Module: LLM::OpenAI::ResponseAdapter::Responds

Includes:
Contract::Completion
Defined in:
lib/llm/providers/openai/response_adapter/responds.rb

Constant Summary

Constants included from Contract

Contract::ContractError

Instance Method Summary collapse

Methods included from Contract

#included

Instance Method Details

#annotationsArray<Hash>

Returns:

  • (Array<Hash>)


20
# File 'lib/llm/providers/openai/response_adapter/responds.rb', line 20

def annotations = messages[0].annotations

#cache_read_tokensInteger

Returns the number of cached input tokens, or 0 when the provider does not report cache usage

Returns:

  • (Integer)

    Returns the number of cached input tokens, or 0 when the provider does not report cache usage



71
72
73
74
75
76
# File 'lib/llm/providers/openai/response_adapter/responds.rb', line 71

def cache_read_tokens
  body
    .usage
    &.input_tokens_details
    &.cached_tokens || 0
end

#cache_write_tokensInteger

Returns the number of cache creation input tokens, or 0 when the provider does not report cache creation usage

Returns:

  • (Integer)

    Returns the number of cache creation input tokens, or 0 when the provider does not report cache creation usage



80
81
82
# File 'lib/llm/providers/openai/response_adapter/responds.rb', line 80

def cache_write_tokens
  0
end

#contentString

Returns the LLM response

Returns:

  • (String)

    Returns the LLM response



118
119
120
# File 'lib/llm/providers/openai/response_adapter/responds.rb', line 118

def content
  super || ""
end

#content!Hash

Returns the LLM response after parsing it as JSON

Returns:

  • (Hash)

    Returns the LLM response after parsing it as JSON



124
125
126
# File 'lib/llm/providers/openai/response_adapter/responds.rb', line 124

def content!
  super
end

#input_audio_tokensInteger

Returns the number of input audio tokens, or 0 when the provider does not report input audio usage

Returns:

  • (Integer)

    Returns the number of input audio tokens, or 0 when the provider does not report input audio usage



47
48
49
50
51
52
# File 'lib/llm/providers/openai/response_adapter/responds.rb', line 47

def input_audio_tokens
  body
    .usage
    &.input_tokens_details
    &.audio_tokens || 0
end

#input_image_tokensInteger

Returns the number of input image tokens, or 0 when the provider does not report input image usage

Returns:

  • (Integer)

    Returns the number of input image tokens, or 0 when the provider does not report input image usage



65
66
67
# File 'lib/llm/providers/openai/response_adapter/responds.rb', line 65

def input_image_tokens
  super
end

#input_tokensInteger Also known as: prompt_tokens

Returns the number of input tokens

Returns:

  • (Integer)

    Returns the number of input tokens



24
25
26
# File 'lib/llm/providers/openai/response_adapter/responds.rb', line 24

def input_tokens
  body.usage&.input_tokens || 0
end

#messagesArray<LLM::Messsage> Also known as: choices

Returns one or more messages

Returns:

  • (Array<LLM::Messsage>)

    Returns one or more messages



7
8
9
# File 'lib/llm/providers/openai/response_adapter/responds.rb', line 7

def messages
  [adapt_message]
end

#modelString

Returns the model name

Returns:

  • (String)

    Returns the model name



98
99
100
# File 'lib/llm/providers/openai/response_adapter/responds.rb', line 98

def model
  body.model
end

#output_audio_tokensInteger

Returns the number of output audio tokens, or 0 when the provider does not report output audio usage

Returns:

  • (Integer)

    Returns the number of output audio tokens, or 0 when the provider does not report output audio usage



56
57
58
59
60
61
# File 'lib/llm/providers/openai/response_adapter/responds.rb', line 56

def output_audio_tokens
  body
    .usage
    &.output_tokens_details
    &.audio_tokens || 0
end

#output_textString

Returns the aggregated text content from the response outputs.

Returns:

  • (String)


112
113
114
# File 'lib/llm/providers/openai/response_adapter/responds.rb', line 112

def output_text
  content
end

#output_tokensInteger Also known as: completion_tokens

Returns the number of output tokens

Returns:

  • (Integer)

    Returns the number of output tokens



31
32
33
# File 'lib/llm/providers/openai/response_adapter/responds.rb', line 31

def output_tokens
  body.usage&.output_tokens || 0
end

#reasoning_contentString?

Returns the reasoning content when the provider exposes it

Returns:

  • (String, nil)

    Returns the reasoning content when the provider exposes it



130
131
132
# File 'lib/llm/providers/openai/response_adapter/responds.rb', line 130

def reasoning_content
  super
end

#reasoning_tokensInteger

Returns the number of reasoning tokens

Returns:

  • (Integer)


38
39
40
41
42
43
# File 'lib/llm/providers/openai/response_adapter/responds.rb', line 38

def reasoning_tokens
  body
    .usage
    &.output_tokens_details
    &.reasoning_tokens || 0
end

#response_idString

Returns:

  • (String)


14
15
16
# File 'lib/llm/providers/openai/response_adapter/responds.rb', line 14

def response_id
  respond_to?(:response) ? response["id"] : id
end

#system_fingerprintnil

OpenAI’s Responses API does not expose a system fingerprint.

Returns:

  • (nil)


105
106
107
# File 'lib/llm/providers/openai/response_adapter/responds.rb', line 105

def system_fingerprint
  nil
end

#total_tokensInteger

Returns the total number of tokens

Returns:

  • (Integer)

    Returns the total number of tokens



86
87
88
# File 'lib/llm/providers/openai/response_adapter/responds.rb', line 86

def total_tokens
  body.usage&.total_tokens || 0
end

#usageLLM::Usage

Returns usage information

Returns:



92
93
94
# File 'lib/llm/providers/openai/response_adapter/responds.rb', line 92

def usage
  super
end