Module: LlmCostTracker::Integrations::Openai
- Extended by:
- Base
- Defined in:
- lib/llm_cost_tracker/integrations/openai.rb
Defined Under Namespace
Modules: ChatCompletionsPatch, ResponsesPatch
Class Method Summary collapse
- .cache_read_input_tokens(usage) ⇒ Object
- .finish_stream(collector, errored:) ⇒ Object
- .hidden_output_tokens(usage) ⇒ Object
- .integration_name ⇒ Object
- .minimum_version ⇒ Object
- .patch_targets ⇒ Object
- .record_response(response, request:, latency_ms:) ⇒ Object
- .regular_input_tokens(input_tokens, cache_read) ⇒ Object
- .stream_collector(request) ⇒ Object
- .track_stream(stream, collector:) ⇒ Object
- .version_constant ⇒ Object
Methods included from Base
active?, elapsed_ms, enforce_budget!, install, minimum_version, object_dig, object_value, patch_target, patch_targets, record_safely, request_params, status, version_constant
Class Method Details
.cache_read_input_tokens(usage) ⇒ Object
71 72 73 74 75 76 |
# File 'lib/llm_cost_tracker/integrations/openai.rb', line 71 def cache_read_input_tokens(usage) ( object_dig(usage, :input_tokens_details, :cached_tokens) || object_dig(usage, :prompt_tokens_details, :cached_tokens) ).to_i end |
.finish_stream(collector, errored:) ⇒ Object
107 108 109 |
# File 'lib/llm_cost_tracker/integrations/openai.rb', line 107 def finish_stream(collector, errored:) record_safely { collector.finish!(errored: errored) } end |
.hidden_output_tokens(usage) ⇒ Object
78 79 80 81 82 83 |
# File 'lib/llm_cost_tracker/integrations/openai.rb', line 78 def hidden_output_tokens(usage) ( object_dig(usage, :output_tokens_details, :reasoning_tokens) || object_dig(usage, :completion_tokens_details, :reasoning_tokens) ).to_i end |
.integration_name ⇒ Object
13 14 15 |
# File 'lib/llm_cost_tracker/integrations/openai.rb', line 13 def integration_name :openai end |
.minimum_version ⇒ Object
17 18 19 |
# File 'lib/llm_cost_tracker/integrations/openai.rb', line 17 def minimum_version "0.59.0" end |
.patch_targets ⇒ Object
25 26 27 28 29 30 31 32 33 34 35 36 37 38 |
# File 'lib/llm_cost_tracker/integrations/openai.rb', line 25 def patch_targets [ patch_target( "OpenAI::Resources::Responses", with: ResponsesPatch, methods: %i[create stream stream_raw retrieve_streaming] ), patch_target( "OpenAI::Resources::Chat::Completions", with: ChatCompletionsPatch, methods: %i[create stream_raw] ) ] end |
.record_response(response, request:, latency_ms:) ⇒ Object
40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 |
# File 'lib/llm_cost_tracker/integrations/openai.rb', line 40 def record_response(response, request:, latency_ms:) return unless active? record_safely do usage = object_value(response, :usage) next unless usage input_tokens = object_value(usage, :input_tokens, :prompt_tokens) output_tokens = object_value(usage, :output_tokens, :completion_tokens) next if input_tokens.nil? && output_tokens.nil? cache_read = cache_read_input_tokens(usage) LlmCostTracker::Tracker.record( capture: UsageCapture.build( provider: "openai", model: object_value(response, :model) || request[:model], pricing_mode: object_value(response, :service_tier) || request[:service_tier], token_usage: TokenUsage.build( input_tokens: regular_input_tokens(input_tokens, cache_read), output_tokens: output_tokens.to_i, cache_read_input_tokens: cache_read, hidden_output_tokens: hidden_output_tokens(usage) ), usage_source: :sdk_response, provider_response_id: object_value(response, :id) ), latency_ms: latency_ms ) end end |
.regular_input_tokens(input_tokens, cache_read) ⇒ Object
85 86 87 |
# File 'lib/llm_cost_tracker/integrations/openai.rb', line 85 def regular_input_tokens(input_tokens, cache_read) [input_tokens.to_i - cache_read.to_i, 0].max end |
.stream_collector(request) ⇒ Object
100 101 102 103 104 105 |
# File 'lib/llm_cost_tracker/integrations/openai.rb', line 100 def stream_collector(request) LlmCostTracker::Capture::StreamCollector.new( provider: "openai", model: request[:model] ) end |
.track_stream(stream, collector:) ⇒ Object
89 90 91 92 93 94 95 96 97 98 |
# File 'lib/llm_cost_tracker/integrations/openai.rb', line 89 def track_stream(stream, collector:) return stream unless active? LlmCostTracker::Capture::StreamTracker.new( stream, collector, -> { active? }, ->(errored:) { finish_stream(collector, errored: errored) } ).wrap end |
.version_constant ⇒ Object
21 22 23 |
# File 'lib/llm_cost_tracker/integrations/openai.rb', line 21 def version_constant "OpenAI::VERSION" end |