Module: LlmCostTracker::Integrations::Openai
- Extended by:
- Base
- Defined in:
- lib/llm_cost_tracker/integrations/openai.rb
Defined Under Namespace
Modules: ChatCompletionsPatch, ResponsesPatch
Constant Summary
collapse
- INPUT_DETAIL_KEYS =
%i[input_tokens_details input_token_details prompt_tokens_details].freeze
- OUTPUT_DETAIL_KEYS =
%i[output_tokens_details output_token_details completion_tokens_details].freeze
Class Method Summary
collapse
-
.audio_input_tokens(usage) ⇒ Object
-
.audio_output_tokens(usage) ⇒ Object
-
.cache_read_input_tokens(usage) ⇒ Object
-
.hidden_output_tokens(usage) ⇒ Object
-
.input_detail(usage, key) ⇒ Object
-
.integration_name ⇒ Object
-
.minimum_version ⇒ Object
-
.normalize_output_action(action) ⇒ Object
-
.normalize_output_item(item) ⇒ Object
-
.output_detail(usage, key) ⇒ Object
-
.patch_targets ⇒ Object
-
.record_response(response, request:, latency_ms:) ⇒ Object
-
.regular_input_tokens(input_tokens, cache_read, audio_input) ⇒ Object
-
.regular_output_tokens(output_tokens, audio_output) ⇒ Object
-
.service_line_items_from(response) ⇒ Object
-
.stream_pricing_mode(request) ⇒ Object
-
.token_usage(usage:, input_tokens:, output_tokens:, cache_read:) ⇒ Object
-
.version_constant ⇒ Object
Methods included from Base
active?, elapsed_ms, enforce_budget!, install, minimum_version, object_dig, object_value, patch_target, patch_targets, record_safely, request_params, status, stream_collector, stream_pricing_mode, track_stream, version_constant
Class Method Details
124
125
126
|
# File 'lib/llm_cost_tracker/integrations/openai.rb', line 124
def audio_input_tokens(usage)
input_detail(usage, :audio_tokens)
end
|
.audio_output_tokens(usage) ⇒ Object
128
129
130
|
# File 'lib/llm_cost_tracker/integrations/openai.rb', line 128
def audio_output_tokens(usage)
output_detail(usage, :audio_tokens)
end
|
116
117
118
|
# File 'lib/llm_cost_tracker/integrations/openai.rb', line 116
def cache_read_input_tokens(usage)
input_detail(usage, :cached_tokens)
end
|
.hidden_output_tokens(usage) ⇒ Object
120
121
122
|
# File 'lib/llm_cost_tracker/integrations/openai.rb', line 120
def hidden_output_tokens(usage)
output_detail(usage, :reasoning_tokens)
end
|
132
133
134
135
136
137
138
|
# File 'lib/llm_cost_tracker/integrations/openai.rb', line 132
def input_detail(usage, key)
INPUT_DETAIL_KEYS.each do |container|
value = object_dig(usage, container, key)
return value.to_i if value
end
0
end
|
.integration_name ⇒ Object
13
14
15
|
# File 'lib/llm_cost_tracker/integrations/openai.rb', line 13
def integration_name
:openai
end
|
.minimum_version ⇒ Object
21
22
23
|
# File 'lib/llm_cost_tracker/integrations/openai.rb', line 21
def minimum_version
"0.59.0"
end
|
.normalize_output_action(action) ⇒ Object
92
93
94
95
96
97
|
# File 'lib/llm_cost_tracker/integrations/openai.rb', line 92
def normalize_output_action(action)
return nil if action.nil?
return action if action.is_a?(Hash)
{ "type" => object_value(action, :type) }
end
|
.normalize_output_item(item) ⇒ Object
79
80
81
82
83
84
85
86
87
88
89
90
|
# File 'lib/llm_cost_tracker/integrations/openai.rb', line 79
def normalize_output_item(item)
return item if item.is_a?(Hash)
return nil if item.nil?
{
"type" => object_value(item, :type),
"id" => object_value(item, :id),
"status" => object_value(item, :status),
"container_id" => object_value(item, :container_id),
"action" => normalize_output_action(object_value(item, :action))
}
end
|
.output_detail(usage, key) ⇒ Object
140
141
142
143
144
145
146
|
# File 'lib/llm_cost_tracker/integrations/openai.rb', line 140
def output_detail(usage, key)
OUTPUT_DETAIL_KEYS.each do |container|
value = object_dig(usage, container, key)
return value.to_i if value
end
0
end
|
.patch_targets ⇒ Object
29
30
31
32
33
34
35
36
37
38
39
40
41
42
|
# File 'lib/llm_cost_tracker/integrations/openai.rb', line 29
def patch_targets
[
patch_target(
"OpenAI::Resources::Responses",
with: ResponsesPatch,
methods: %i[create stream stream_raw retrieve_streaming]
),
patch_target(
"OpenAI::Resources::Chat::Completions",
with: ChatCompletionsPatch,
methods: %i[create stream_raw]
)
]
end
|
.record_response(response, request:, latency_ms:) ⇒ Object
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
|
# File 'lib/llm_cost_tracker/integrations/openai.rb', line 44
def record_response(response, request:, latency_ms:)
return unless active?
record_safely do
usage = object_value(response, :usage)
next unless usage
input_tokens = object_value(usage, :input_tokens, :prompt_tokens)
output_tokens = object_value(usage, :output_tokens, :completion_tokens)
next if input_tokens.nil? && output_tokens.nil?
cache_read = cache_read_input_tokens(usage)
LlmCostTracker::Tracker.record(
capture: UsageCapture.build(
provider: "openai",
model: object_value(response, :model) || request[:model],
pricing_mode: object_value(response, :service_tier) || request[:service_tier],
token_usage: token_usage(usage:, input_tokens:, output_tokens:, cache_read:),
usage_source: :sdk_response,
provider_response_id: object_value(response, :id),
service_line_items: service_line_items_from(response)
),
latency_ms: latency_ms
)
end
end
|
148
149
150
|
# File 'lib/llm_cost_tracker/integrations/openai.rb', line 148
def regular_input_tokens(input_tokens, cache_read, audio_input)
[input_tokens.to_i - cache_read - audio_input, 0].max
end
|
.regular_output_tokens(output_tokens, audio_output) ⇒ Object
152
153
154
|
# File 'lib/llm_cost_tracker/integrations/openai.rb', line 152
def regular_output_tokens(output_tokens, audio_output)
[output_tokens.to_i - audio_output, 0].max
end
|
.service_line_items_from(response) ⇒ Object
71
72
73
74
75
76
77
|
# File 'lib/llm_cost_tracker/integrations/openai.rb', line 71
def service_line_items_from(response)
output = object_value(response, :output)
return [] unless output.respond_to?(:each)
LlmCostTracker::Parsers::OpenaiServiceCharges
.line_items_from_output(output.map { |item| normalize_output_item(item) })
end
|
.stream_pricing_mode(request) ⇒ Object
17
18
19
|
# File 'lib/llm_cost_tracker/integrations/openai.rb', line 17
def stream_pricing_mode(request)
Pricing.normalize_mode((request || {})[:service_tier])
end
|
.token_usage(usage:, input_tokens:, output_tokens:, cache_read:) ⇒ Object
99
100
101
102
103
104
105
106
107
108
109
110
111
|
# File 'lib/llm_cost_tracker/integrations/openai.rb', line 99
def token_usage(usage:, input_tokens:, output_tokens:, cache_read:)
audio_input = audio_input_tokens(usage)
audio_output = audio_output_tokens(usage)
TokenUsage.build(
input_tokens: regular_input_tokens(input_tokens, cache_read, audio_input),
output_tokens: regular_output_tokens(output_tokens, audio_output),
cache_read_input_tokens: cache_read,
audio_input_tokens: audio_input,
audio_output_tokens: audio_output,
hidden_output_tokens: hidden_output_tokens(usage)
)
end
|
.version_constant ⇒ Object
25
26
27
|
# File 'lib/llm_cost_tracker/integrations/openai.rb', line 25
def version_constant
"OpenAI::VERSION"
end
|