Class: Google::Cloud::AIPlatform::V1::Candidate

Inherits:
Object
  • Object
show all
Extended by:
Protobuf::MessageExts::ClassMethods
Includes:
Protobuf::MessageExts
Defined in:
proto_docs/google/cloud/aiplatform/v1/content.rb

Overview

A response candidate generated from the model.

Defined Under Namespace

Modules: FinishReason

Instance Attribute Summary collapse

Instance Attribute Details

#avg_logprobs::Float (readonly)

Returns Output only. Average log probability score of the candidate.

Returns:

  • (::Float)

    Output only. Average log probability score of the candidate.



744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
# File 'proto_docs/google/cloud/aiplatform/v1/content.rb', line 744

class Candidate
  include ::Google::Protobuf::MessageExts
  extend ::Google::Protobuf::MessageExts::ClassMethods

  # The reason why the model stopped generating tokens.
  # If empty, the model has not stopped generating the tokens.
  module FinishReason
    # The finish reason is unspecified.
    FINISH_REASON_UNSPECIFIED = 0

    # Token generation reached a natural stopping point or a configured stop
    # sequence.
    STOP = 1

    # Token generation reached the configured maximum output tokens.
    MAX_TOKENS = 2

    # Token generation stopped because the content potentially contains safety
    # violations. NOTE: When streaming,
    # {::Google::Cloud::AIPlatform::V1::Candidate#content content} is empty if
    # content filters blocks the output.
    SAFETY = 3

    # Token generation stopped because the content potentially contains
    # copyright violations.
    RECITATION = 4

    # All other reasons that stopped the token generation.
    OTHER = 5

    # Token generation stopped because the content contains forbidden terms.
    BLOCKLIST = 6

    # Token generation stopped for potentially containing prohibited content.
    PROHIBITED_CONTENT = 7

    # Token generation stopped because the content potentially contains
    # Sensitive Personally Identifiable Information (SPII).
    SPII = 8

    # The function call generated by the model is invalid.
    MALFORMED_FUNCTION_CALL = 9

    # The model response was blocked by Model Armor.
    MODEL_ARMOR = 10
  end
end

#citation_metadata::Google::Cloud::AIPlatform::V1::CitationMetadata (readonly)

Returns Output only. Source attribution of the generated content.

Returns:



744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
# File 'proto_docs/google/cloud/aiplatform/v1/content.rb', line 744

class Candidate
  include ::Google::Protobuf::MessageExts
  extend ::Google::Protobuf::MessageExts::ClassMethods

  # The reason why the model stopped generating tokens.
  # If empty, the model has not stopped generating the tokens.
  module FinishReason
    # The finish reason is unspecified.
    FINISH_REASON_UNSPECIFIED = 0

    # Token generation reached a natural stopping point or a configured stop
    # sequence.
    STOP = 1

    # Token generation reached the configured maximum output tokens.
    MAX_TOKENS = 2

    # Token generation stopped because the content potentially contains safety
    # violations. NOTE: When streaming,
    # {::Google::Cloud::AIPlatform::V1::Candidate#content content} is empty if
    # content filters blocks the output.
    SAFETY = 3

    # Token generation stopped because the content potentially contains
    # copyright violations.
    RECITATION = 4

    # All other reasons that stopped the token generation.
    OTHER = 5

    # Token generation stopped because the content contains forbidden terms.
    BLOCKLIST = 6

    # Token generation stopped for potentially containing prohibited content.
    PROHIBITED_CONTENT = 7

    # Token generation stopped because the content potentially contains
    # Sensitive Personally Identifiable Information (SPII).
    SPII = 8

    # The function call generated by the model is invalid.
    MALFORMED_FUNCTION_CALL = 9

    # The model response was blocked by Model Armor.
    MODEL_ARMOR = 10
  end
end

#content::Google::Cloud::AIPlatform::V1::Content (readonly)

Returns Output only. Content parts of the candidate.

Returns:



744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
# File 'proto_docs/google/cloud/aiplatform/v1/content.rb', line 744

class Candidate
  include ::Google::Protobuf::MessageExts
  extend ::Google::Protobuf::MessageExts::ClassMethods

  # The reason why the model stopped generating tokens.
  # If empty, the model has not stopped generating the tokens.
  module FinishReason
    # The finish reason is unspecified.
    FINISH_REASON_UNSPECIFIED = 0

    # Token generation reached a natural stopping point or a configured stop
    # sequence.
    STOP = 1

    # Token generation reached the configured maximum output tokens.
    MAX_TOKENS = 2

    # Token generation stopped because the content potentially contains safety
    # violations. NOTE: When streaming,
    # {::Google::Cloud::AIPlatform::V1::Candidate#content content} is empty if
    # content filters blocks the output.
    SAFETY = 3

    # Token generation stopped because the content potentially contains
    # copyright violations.
    RECITATION = 4

    # All other reasons that stopped the token generation.
    OTHER = 5

    # Token generation stopped because the content contains forbidden terms.
    BLOCKLIST = 6

    # Token generation stopped for potentially containing prohibited content.
    PROHIBITED_CONTENT = 7

    # Token generation stopped because the content potentially contains
    # Sensitive Personally Identifiable Information (SPII).
    SPII = 8

    # The function call generated by the model is invalid.
    MALFORMED_FUNCTION_CALL = 9

    # The model response was blocked by Model Armor.
    MODEL_ARMOR = 10
  end
end

#finish_message::String (readonly)

Returns Output only. Describes the reason the mode stopped generating tokens in more detail. This is only filled when finish_reason is set.

Returns:

  • (::String)

    Output only. Describes the reason the mode stopped generating tokens in more detail. This is only filled when finish_reason is set.



744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
# File 'proto_docs/google/cloud/aiplatform/v1/content.rb', line 744

class Candidate
  include ::Google::Protobuf::MessageExts
  extend ::Google::Protobuf::MessageExts::ClassMethods

  # The reason why the model stopped generating tokens.
  # If empty, the model has not stopped generating the tokens.
  module FinishReason
    # The finish reason is unspecified.
    FINISH_REASON_UNSPECIFIED = 0

    # Token generation reached a natural stopping point or a configured stop
    # sequence.
    STOP = 1

    # Token generation reached the configured maximum output tokens.
    MAX_TOKENS = 2

    # Token generation stopped because the content potentially contains safety
    # violations. NOTE: When streaming,
    # {::Google::Cloud::AIPlatform::V1::Candidate#content content} is empty if
    # content filters blocks the output.
    SAFETY = 3

    # Token generation stopped because the content potentially contains
    # copyright violations.
    RECITATION = 4

    # All other reasons that stopped the token generation.
    OTHER = 5

    # Token generation stopped because the content contains forbidden terms.
    BLOCKLIST = 6

    # Token generation stopped for potentially containing prohibited content.
    PROHIBITED_CONTENT = 7

    # Token generation stopped because the content potentially contains
    # Sensitive Personally Identifiable Information (SPII).
    SPII = 8

    # The function call generated by the model is invalid.
    MALFORMED_FUNCTION_CALL = 9

    # The model response was blocked by Model Armor.
    MODEL_ARMOR = 10
  end
end

#finish_reason::Google::Cloud::AIPlatform::V1::Candidate::FinishReason (readonly)

Returns Output only. The reason why the model stopped generating tokens. If empty, the model has not stopped generating the tokens.

Returns:



744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
# File 'proto_docs/google/cloud/aiplatform/v1/content.rb', line 744

class Candidate
  include ::Google::Protobuf::MessageExts
  extend ::Google::Protobuf::MessageExts::ClassMethods

  # The reason why the model stopped generating tokens.
  # If empty, the model has not stopped generating the tokens.
  module FinishReason
    # The finish reason is unspecified.
    FINISH_REASON_UNSPECIFIED = 0

    # Token generation reached a natural stopping point or a configured stop
    # sequence.
    STOP = 1

    # Token generation reached the configured maximum output tokens.
    MAX_TOKENS = 2

    # Token generation stopped because the content potentially contains safety
    # violations. NOTE: When streaming,
    # {::Google::Cloud::AIPlatform::V1::Candidate#content content} is empty if
    # content filters blocks the output.
    SAFETY = 3

    # Token generation stopped because the content potentially contains
    # copyright violations.
    RECITATION = 4

    # All other reasons that stopped the token generation.
    OTHER = 5

    # Token generation stopped because the content contains forbidden terms.
    BLOCKLIST = 6

    # Token generation stopped for potentially containing prohibited content.
    PROHIBITED_CONTENT = 7

    # Token generation stopped because the content potentially contains
    # Sensitive Personally Identifiable Information (SPII).
    SPII = 8

    # The function call generated by the model is invalid.
    MALFORMED_FUNCTION_CALL = 9

    # The model response was blocked by Model Armor.
    MODEL_ARMOR = 10
  end
end

#grounding_metadata::Google::Cloud::AIPlatform::V1::GroundingMetadata (readonly)

Returns Output only. Metadata specifies sources used to ground generated content.

Returns:



744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
# File 'proto_docs/google/cloud/aiplatform/v1/content.rb', line 744

class Candidate
  include ::Google::Protobuf::MessageExts
  extend ::Google::Protobuf::MessageExts::ClassMethods

  # The reason why the model stopped generating tokens.
  # If empty, the model has not stopped generating the tokens.
  module FinishReason
    # The finish reason is unspecified.
    FINISH_REASON_UNSPECIFIED = 0

    # Token generation reached a natural stopping point or a configured stop
    # sequence.
    STOP = 1

    # Token generation reached the configured maximum output tokens.
    MAX_TOKENS = 2

    # Token generation stopped because the content potentially contains safety
    # violations. NOTE: When streaming,
    # {::Google::Cloud::AIPlatform::V1::Candidate#content content} is empty if
    # content filters blocks the output.
    SAFETY = 3

    # Token generation stopped because the content potentially contains
    # copyright violations.
    RECITATION = 4

    # All other reasons that stopped the token generation.
    OTHER = 5

    # Token generation stopped because the content contains forbidden terms.
    BLOCKLIST = 6

    # Token generation stopped for potentially containing prohibited content.
    PROHIBITED_CONTENT = 7

    # Token generation stopped because the content potentially contains
    # Sensitive Personally Identifiable Information (SPII).
    SPII = 8

    # The function call generated by the model is invalid.
    MALFORMED_FUNCTION_CALL = 9

    # The model response was blocked by Model Armor.
    MODEL_ARMOR = 10
  end
end

#index::Integer (readonly)

Returns Output only. Index of the candidate.

Returns:

  • (::Integer)

    Output only. Index of the candidate.



744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
# File 'proto_docs/google/cloud/aiplatform/v1/content.rb', line 744

class Candidate
  include ::Google::Protobuf::MessageExts
  extend ::Google::Protobuf::MessageExts::ClassMethods

  # The reason why the model stopped generating tokens.
  # If empty, the model has not stopped generating the tokens.
  module FinishReason
    # The finish reason is unspecified.
    FINISH_REASON_UNSPECIFIED = 0

    # Token generation reached a natural stopping point or a configured stop
    # sequence.
    STOP = 1

    # Token generation reached the configured maximum output tokens.
    MAX_TOKENS = 2

    # Token generation stopped because the content potentially contains safety
    # violations. NOTE: When streaming,
    # {::Google::Cloud::AIPlatform::V1::Candidate#content content} is empty if
    # content filters blocks the output.
    SAFETY = 3

    # Token generation stopped because the content potentially contains
    # copyright violations.
    RECITATION = 4

    # All other reasons that stopped the token generation.
    OTHER = 5

    # Token generation stopped because the content contains forbidden terms.
    BLOCKLIST = 6

    # Token generation stopped for potentially containing prohibited content.
    PROHIBITED_CONTENT = 7

    # Token generation stopped because the content potentially contains
    # Sensitive Personally Identifiable Information (SPII).
    SPII = 8

    # The function call generated by the model is invalid.
    MALFORMED_FUNCTION_CALL = 9

    # The model response was blocked by Model Armor.
    MODEL_ARMOR = 10
  end
end

#logprobs_result::Google::Cloud::AIPlatform::V1::LogprobsResult (readonly)

Returns Output only. Log-likelihood scores for the response tokens and top tokens.

Returns:



744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
# File 'proto_docs/google/cloud/aiplatform/v1/content.rb', line 744

class Candidate
  include ::Google::Protobuf::MessageExts
  extend ::Google::Protobuf::MessageExts::ClassMethods

  # The reason why the model stopped generating tokens.
  # If empty, the model has not stopped generating the tokens.
  module FinishReason
    # The finish reason is unspecified.
    FINISH_REASON_UNSPECIFIED = 0

    # Token generation reached a natural stopping point or a configured stop
    # sequence.
    STOP = 1

    # Token generation reached the configured maximum output tokens.
    MAX_TOKENS = 2

    # Token generation stopped because the content potentially contains safety
    # violations. NOTE: When streaming,
    # {::Google::Cloud::AIPlatform::V1::Candidate#content content} is empty if
    # content filters blocks the output.
    SAFETY = 3

    # Token generation stopped because the content potentially contains
    # copyright violations.
    RECITATION = 4

    # All other reasons that stopped the token generation.
    OTHER = 5

    # Token generation stopped because the content contains forbidden terms.
    BLOCKLIST = 6

    # Token generation stopped for potentially containing prohibited content.
    PROHIBITED_CONTENT = 7

    # Token generation stopped because the content potentially contains
    # Sensitive Personally Identifiable Information (SPII).
    SPII = 8

    # The function call generated by the model is invalid.
    MALFORMED_FUNCTION_CALL = 9

    # The model response was blocked by Model Armor.
    MODEL_ARMOR = 10
  end
end

#safety_ratings::Array<::Google::Cloud::AIPlatform::V1::SafetyRating> (readonly)

Returns Output only. List of ratings for the safety of a response candidate.

There is at most one rating per category.

Returns:



744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
# File 'proto_docs/google/cloud/aiplatform/v1/content.rb', line 744

class Candidate
  include ::Google::Protobuf::MessageExts
  extend ::Google::Protobuf::MessageExts::ClassMethods

  # The reason why the model stopped generating tokens.
  # If empty, the model has not stopped generating the tokens.
  module FinishReason
    # The finish reason is unspecified.
    FINISH_REASON_UNSPECIFIED = 0

    # Token generation reached a natural stopping point or a configured stop
    # sequence.
    STOP = 1

    # Token generation reached the configured maximum output tokens.
    MAX_TOKENS = 2

    # Token generation stopped because the content potentially contains safety
    # violations. NOTE: When streaming,
    # {::Google::Cloud::AIPlatform::V1::Candidate#content content} is empty if
    # content filters blocks the output.
    SAFETY = 3

    # Token generation stopped because the content potentially contains
    # copyright violations.
    RECITATION = 4

    # All other reasons that stopped the token generation.
    OTHER = 5

    # Token generation stopped because the content contains forbidden terms.
    BLOCKLIST = 6

    # Token generation stopped for potentially containing prohibited content.
    PROHIBITED_CONTENT = 7

    # Token generation stopped because the content potentially contains
    # Sensitive Personally Identifiable Information (SPII).
    SPII = 8

    # The function call generated by the model is invalid.
    MALFORMED_FUNCTION_CALL = 9

    # The model response was blocked by Model Armor.
    MODEL_ARMOR = 10
  end
end

#score::Float (readonly)

Returns Output only. Confidence score of the candidate.

Returns:

  • (::Float)

    Output only. Confidence score of the candidate.



744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
# File 'proto_docs/google/cloud/aiplatform/v1/content.rb', line 744

class Candidate
  include ::Google::Protobuf::MessageExts
  extend ::Google::Protobuf::MessageExts::ClassMethods

  # The reason why the model stopped generating tokens.
  # If empty, the model has not stopped generating the tokens.
  module FinishReason
    # The finish reason is unspecified.
    FINISH_REASON_UNSPECIFIED = 0

    # Token generation reached a natural stopping point or a configured stop
    # sequence.
    STOP = 1

    # Token generation reached the configured maximum output tokens.
    MAX_TOKENS = 2

    # Token generation stopped because the content potentially contains safety
    # violations. NOTE: When streaming,
    # {::Google::Cloud::AIPlatform::V1::Candidate#content content} is empty if
    # content filters blocks the output.
    SAFETY = 3

    # Token generation stopped because the content potentially contains
    # copyright violations.
    RECITATION = 4

    # All other reasons that stopped the token generation.
    OTHER = 5

    # Token generation stopped because the content contains forbidden terms.
    BLOCKLIST = 6

    # Token generation stopped for potentially containing prohibited content.
    PROHIBITED_CONTENT = 7

    # Token generation stopped because the content potentially contains
    # Sensitive Personally Identifiable Information (SPII).
    SPII = 8

    # The function call generated by the model is invalid.
    MALFORMED_FUNCTION_CALL = 9

    # The model response was blocked by Model Armor.
    MODEL_ARMOR = 10
  end
end

#url_context_metadata::Google::Cloud::AIPlatform::V1::UrlContextMetadata (readonly)

Returns Output only. Metadata related to url context retrieval tool.

Returns:



744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
# File 'proto_docs/google/cloud/aiplatform/v1/content.rb', line 744

class Candidate
  include ::Google::Protobuf::MessageExts
  extend ::Google::Protobuf::MessageExts::ClassMethods

  # The reason why the model stopped generating tokens.
  # If empty, the model has not stopped generating the tokens.
  module FinishReason
    # The finish reason is unspecified.
    FINISH_REASON_UNSPECIFIED = 0

    # Token generation reached a natural stopping point or a configured stop
    # sequence.
    STOP = 1

    # Token generation reached the configured maximum output tokens.
    MAX_TOKENS = 2

    # Token generation stopped because the content potentially contains safety
    # violations. NOTE: When streaming,
    # {::Google::Cloud::AIPlatform::V1::Candidate#content content} is empty if
    # content filters blocks the output.
    SAFETY = 3

    # Token generation stopped because the content potentially contains
    # copyright violations.
    RECITATION = 4

    # All other reasons that stopped the token generation.
    OTHER = 5

    # Token generation stopped because the content contains forbidden terms.
    BLOCKLIST = 6

    # Token generation stopped for potentially containing prohibited content.
    PROHIBITED_CONTENT = 7

    # Token generation stopped because the content potentially contains
    # Sensitive Personally Identifiable Information (SPII).
    SPII = 8

    # The function call generated by the model is invalid.
    MALFORMED_FUNCTION_CALL = 9

    # The model response was blocked by Model Armor.
    MODEL_ARMOR = 10
  end
end