Class: OpenAI::Models::Evals::RunCreateResponse

Inherits:
Internal::Type::BaseModel show all
Defined in:
lib/openai/models/evals/run_create_response.rb

Overview

Defined Under Namespace

Modules: DataSource Classes: PerModelUsage, PerTestingCriteriaResult, ResultCounts

Instance Attribute Summary collapse

Class Method Summary collapse

Instance Method Summary collapse

Methods inherited from Internal::Type::BaseModel

==, #==, #[], coerce, #deconstruct_keys, #deep_to_h, dump, fields, hash, #hash, inherited, inspect, #inspect, known_fields, optional, recursively_to_h, required, #to_h, #to_json, #to_s, to_sorbet_type, #to_yaml

Methods included from Internal::Type::Converter

#coerce, coerce, #dump, dump, #inspect, inspect, type_info

Methods included from Internal::Util::SorbetRuntimeSupport

#const_missing, #define_sorbet_constant!, #sorbet_constant_defined?, #to_sorbet_type, to_sorbet_type

Constructor Details

#initialize(failed: , passed: , testing_criteria: ) ⇒ Object

Parameters:

  • failed (Integer) (defaults to: )

    Number of tests failed for this criteria.

  • passed (Integer) (defaults to: )

    Number of tests passed for this criteria.

  • testing_criteria (String) (defaults to: )

    A description of the testing criteria.



# File 'lib/openai/models/evals/run_create_response.rb', line 99

Instance Attribute Details

#created_atInteger

Unix timestamp (in seconds) when the evaluation run was created.

Returns:

  • (Integer)


18
# File 'lib/openai/models/evals/run_create_response.rb', line 18

required :created_at, Integer

#data_sourceOpenAI::Models::Evals::CreateEvalJSONLRunDataSource, ...

Information about the run’s data source.



24
# File 'lib/openai/models/evals/run_create_response.rb', line 24

required :data_source, union: -> { OpenAI::Models::Evals::RunCreateResponse::DataSource }

#errorOpenAI::Models::Evals::EvalAPIError

An object representing an error response from the Eval API.



30
# File 'lib/openai/models/evals/run_create_response.rb', line 30

required :error, -> { OpenAI::Evals::EvalAPIError }

#eval_idString

The identifier of the associated evaluation.

Returns:

  • (String)


36
# File 'lib/openai/models/evals/run_create_response.rb', line 36

required :eval_id, String

#idString

Unique identifier for the evaluation run.

Returns:

  • (String)


12
# File 'lib/openai/models/evals/run_create_response.rb', line 12

required :id, String

#metadataHash{Symbol=>String}?

Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard.

Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters.

Returns:

  • (Hash{Symbol=>String}, nil)


47
# File 'lib/openai/models/evals/run_create_response.rb', line 47

required :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true

#modelString

The model that is evaluated, if applicable.

Returns:

  • (String)


53
# File 'lib/openai/models/evals/run_create_response.rb', line 53

required :model, String

#nameString

The name of the evaluation run.

Returns:

  • (String)


59
# File 'lib/openai/models/evals/run_create_response.rb', line 59

required :name, String

#objectSymbol, :"eval.run"

The type of the object. Always “eval.run”.

Returns:

  • (Symbol, :"eval.run")


65
# File 'lib/openai/models/evals/run_create_response.rb', line 65

required :object, const: :"eval.run"

#per_model_usageArray<OpenAI::Models::Evals::RunCreateResponse::PerModelUsage>

Usage statistics for each model during the evaluation run.



71
72
# File 'lib/openai/models/evals/run_create_response.rb', line 71

required :per_model_usage,
-> { OpenAI::Internal::Type::ArrayOf[OpenAI::Models::Evals::RunCreateResponse::PerModelUsage] }

#per_testing_criteria_resultsArray<OpenAI::Models::Evals::RunCreateResponse::PerTestingCriteriaResult>

Results per testing criteria applied during the evaluation run.



78
79
# File 'lib/openai/models/evals/run_create_response.rb', line 78

required :per_testing_criteria_results,
-> { OpenAI::Internal::Type::ArrayOf[OpenAI::Models::Evals::RunCreateResponse::PerTestingCriteriaResult] }

#report_urlString

The URL to the rendered evaluation run report on the UI dashboard.

Returns:

  • (String)


85
# File 'lib/openai/models/evals/run_create_response.rb', line 85

required :report_url, String

#result_countsOpenAI::Models::Evals::RunCreateResponse::ResultCounts

Counters summarizing the outcomes of the evaluation run.



91
# File 'lib/openai/models/evals/run_create_response.rb', line 91

required :result_counts, -> { OpenAI::Models::Evals::RunCreateResponse::ResultCounts }

#statusString

The status of the evaluation run.

Returns:

  • (String)


97
# File 'lib/openai/models/evals/run_create_response.rb', line 97

required :status, String