Class: OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Sample

Inherits:
Internal::Type::BaseModel show all
Defined in:
lib/openai/models/evals/runs/output_item_retrieve_response.rb

Overview

See Also:

Defined Under Namespace

Classes: Input, Output, Usage

Instance Attribute Summary collapse

Instance Method Summary collapse

Methods inherited from Internal::Type::BaseModel

==, #==, #[], coerce, #deconstruct_keys, #deep_to_h, dump, fields, hash, #hash, inherited, inspect, #inspect, known_fields, optional, recursively_to_h, required, #to_h, #to_json, #to_s, to_sorbet_type, #to_yaml

Methods included from Internal::Type::Converter

#coerce, coerce, #dump, dump, #inspect, inspect, type_info

Methods included from Internal::Util::SorbetRuntimeSupport

#const_missing, #define_sorbet_constant!, #sorbet_constant_defined?, #to_sorbet_type, to_sorbet_type

Constructor Details

#initialize(content: nil, role: nil) ⇒ Object

Parameters:

  • content (String) (defaults to: nil)

    The content of the message.

  • role (String) (defaults to: nil)

    The role of the message (e.g. “system”, “assistant”, “user”).



# File 'lib/openai/models/evals/runs/output_item_retrieve_response.rb', line 197

Instance Attribute Details

#errorOpenAI::Models::Evals::EvalAPIError

An object representing an error response from the Eval API.



99
# File 'lib/openai/models/evals/runs/output_item_retrieve_response.rb', line 99

required :error, -> { OpenAI::Evals::EvalAPIError }

#finish_reasonString

The reason why the sample generation was finished.

Returns:

  • (String)


105
# File 'lib/openai/models/evals/runs/output_item_retrieve_response.rb', line 105

required :finish_reason, String

#inputArray<OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Sample::Input>

An array of input messages.



111
112
# File 'lib/openai/models/evals/runs/output_item_retrieve_response.rb', line 111

required :input,
-> { OpenAI::Internal::Type::ArrayOf[OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Sample::Input] }

#max_completion_tokensInteger

The maximum number of tokens allowed for completion.

Returns:

  • (Integer)


118
# File 'lib/openai/models/evals/runs/output_item_retrieve_response.rb', line 118

required :max_completion_tokens, Integer

#modelString

The model used for generating the sample.

Returns:

  • (String)


124
# File 'lib/openai/models/evals/runs/output_item_retrieve_response.rb', line 124

required :model, String

#outputArray<OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Sample::Output>

An array of output messages.



130
131
# File 'lib/openai/models/evals/runs/output_item_retrieve_response.rb', line 130

required :output,
-> { OpenAI::Internal::Type::ArrayOf[OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Sample::Output] }

#seedInteger

The seed used for generating the sample.

Returns:

  • (Integer)


137
# File 'lib/openai/models/evals/runs/output_item_retrieve_response.rb', line 137

required :seed, Integer

#temperatureFloat

The sampling temperature used.

Returns:

  • (Float)


143
# File 'lib/openai/models/evals/runs/output_item_retrieve_response.rb', line 143

required :temperature, Float

#top_pFloat

The top_p value used for sampling.

Returns:

  • (Float)


149
# File 'lib/openai/models/evals/runs/output_item_retrieve_response.rb', line 149

required :top_p, Float

#usageOpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Sample::Usage

Token usage details for the sample.



155
# File 'lib/openai/models/evals/runs/output_item_retrieve_response.rb', line 155

required :usage, -> { OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Sample::Usage }