Module: LlmCostTracker::Integrations::Openai
- Extended by:
- Base
- Defined in:
- lib/llm_cost_tracker/integrations/openai.rb
Defined Under Namespace
Modules: ChatCompletionsPatch, ResponsesPatch
Class Method Summary
collapse
Methods included from Base
active?, constant, elapsed_ms, enforce_budget!, install, minimum_version, patch_target, patch_targets, record_safely, request_params, status, version_constant
Class Method Details
56
57
58
59
60
61
|
# File 'lib/llm_cost_tracker/integrations/openai.rb', line 56
def cache_read_input_tokens(usage)
ObjectReader.integer(
ObjectReader.nested(usage, :input_tokens_details, :cached_tokens) ||
ObjectReader.nested(usage, :prompt_tokens_details, :cached_tokens)
)
end
|
.hidden_output_tokens(usage) ⇒ Object
63
64
65
66
67
68
|
# File 'lib/llm_cost_tracker/integrations/openai.rb', line 63
def hidden_output_tokens(usage)
ObjectReader.integer(
ObjectReader.nested(usage, :output_tokens_details, :reasoning_tokens) ||
ObjectReader.nested(usage, :completion_tokens_details, :reasoning_tokens)
)
end
|
.integration_name ⇒ Object
11
|
# File 'lib/llm_cost_tracker/integrations/openai.rb', line 11
def integration_name = :openai
|
.minimum_version ⇒ Object
13
|
# File 'lib/llm_cost_tracker/integrations/openai.rb', line 13
def minimum_version = "0.59.0"
|
.patch_targets ⇒ Object
17
18
19
20
21
22
|
# File 'lib/llm_cost_tracker/integrations/openai.rb', line 17
def patch_targets
[
patch_target("OpenAI::Resources::Responses", with: ResponsesPatch, methods: :create),
patch_target("OpenAI::Resources::Chat::Completions", with: ChatCompletionsPatch, methods: :create)
]
end
|
.record_response(response, request:, latency_ms:) ⇒ Object
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
|
# File 'lib/llm_cost_tracker/integrations/openai.rb', line 24
def record_response(response, request:, latency_ms:)
return unless active?
record_safely do
usage = ObjectReader.first(response, :usage)
next unless usage
input_tokens = ObjectReader.first(usage, :input_tokens, :prompt_tokens)
output_tokens = ObjectReader.first(usage, :output_tokens, :completion_tokens)
next if input_tokens.nil? && output_tokens.nil?
metadata = usage_metadata(usage)
LlmCostTracker::Tracker.record(
provider: "openai",
model: ObjectReader.first(response, :model) || request[:model],
input_tokens: regular_input_tokens(input_tokens, metadata[:cache_read_input_tokens]),
output_tokens: ObjectReader.integer(output_tokens),
latency_ms: latency_ms,
usage_source: :sdk_response,
provider_response_id: ObjectReader.first(response, :id),
metadata: metadata
)
end
end
|
70
71
72
|
# File 'lib/llm_cost_tracker/integrations/openai.rb', line 70
def regular_input_tokens(input_tokens, cache_read)
[ObjectReader.integer(input_tokens) - cache_read.to_i, 0].max
end
|
49
50
51
52
53
54
|
# File 'lib/llm_cost_tracker/integrations/openai.rb', line 49
def usage_metadata(usage)
{
cache_read_input_tokens: cache_read_input_tokens(usage),
hidden_output_tokens: hidden_output_tokens(usage)
}
end
|
.version_constant ⇒ Object
15
|
# File 'lib/llm_cost_tracker/integrations/openai.rb', line 15
def version_constant = "OpenAI::VERSION"
|