Module: LlmCostTracker::Integrations::Openai

Extended by:
Base
Defined in:
lib/llm_cost_tracker/integrations/openai.rb

Defined Under Namespace

Modules: ChatCompletionsPatch, ResponsesPatch

Class Method Summary collapse

Methods included from Base

active?, constant, elapsed_ms, enforce_budget!, install, minimum_version, patch_target, patch_targets, record_safely, request_params, status, version_constant

Class Method Details

.cache_read_input_tokens(usage) ⇒ Object



65
66
67
68
69
70
# File 'lib/llm_cost_tracker/integrations/openai.rb', line 65

def cache_read_input_tokens(usage)
  ObjectReader.integer(
    ObjectReader.nested(usage, :input_tokens_details, :cached_tokens) ||
    ObjectReader.nested(usage, :prompt_tokens_details, :cached_tokens)
  )
end

.finish_stream(collector, errored:) ⇒ Object



101
102
103
# File 'lib/llm_cost_tracker/integrations/openai.rb', line 101

def finish_stream(collector, errored:)
  record_safely { collector.finish!(errored: errored) }
end

.hidden_output_tokens(usage) ⇒ Object



72
73
74
75
76
77
# File 'lib/llm_cost_tracker/integrations/openai.rb', line 72

def hidden_output_tokens(usage)
  ObjectReader.integer(
    ObjectReader.nested(usage, :output_tokens_details, :reasoning_tokens) ||
    ObjectReader.nested(usage, :completion_tokens_details, :reasoning_tokens)
  )
end

.integration_nameObject



12
# File 'lib/llm_cost_tracker/integrations/openai.rb', line 12

def integration_name = :openai

.minimum_versionObject



14
# File 'lib/llm_cost_tracker/integrations/openai.rb', line 14

def minimum_version = "0.59.0"

.patch_targetsObject



18
19
20
21
22
23
24
25
26
27
28
29
30
31
# File 'lib/llm_cost_tracker/integrations/openai.rb', line 18

def patch_targets
  [
    patch_target(
      "OpenAI::Resources::Responses",
      with: ResponsesPatch,
      methods: %i[create stream stream_raw retrieve_streaming]
    ),
    patch_target(
      "OpenAI::Resources::Chat::Completions",
      with: ChatCompletionsPatch,
      methods: %i[create stream_raw]
    )
  ]
end

.record_response(response, request:, latency_ms:) ⇒ Object



33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
# File 'lib/llm_cost_tracker/integrations/openai.rb', line 33

def record_response(response, request:, latency_ms:)
  return unless active?

  record_safely do
    usage = ObjectReader.first(response, :usage)
    next unless usage

    input_tokens = ObjectReader.first(usage, :input_tokens, :prompt_tokens)
    output_tokens = ObjectReader.first(usage, :output_tokens, :completion_tokens)
    next if input_tokens.nil? && output_tokens.nil?

     = (usage)
    LlmCostTracker::Tracker.record(
      provider: "openai",
      model: ObjectReader.first(response, :model) || request[:model],
      input_tokens: regular_input_tokens(input_tokens, [:cache_read_input_tokens]),
      output_tokens: ObjectReader.integer(output_tokens),
      latency_ms: latency_ms,
      usage_source: :sdk_response,
      provider_response_id: ObjectReader.first(response, :id),
      metadata: 
    )
  end
end

.regular_input_tokens(input_tokens, cache_read) ⇒ Object



79
80
81
# File 'lib/llm_cost_tracker/integrations/openai.rb', line 79

def regular_input_tokens(input_tokens, cache_read)
  [ObjectReader.integer(input_tokens) - cache_read.to_i, 0].max
end

.stream_collector(request) ⇒ Object



94
95
96
97
98
99
# File 'lib/llm_cost_tracker/integrations/openai.rb', line 94

def stream_collector(request)
  LlmCostTracker::StreamCollector.new(
    provider: "openai",
    model: request[:model] || request["model"]
  )
end

.track_stream(stream, collector:) ⇒ Object



83
84
85
86
87
88
89
90
91
92
# File 'lib/llm_cost_tracker/integrations/openai.rb', line 83

def track_stream(stream, collector:)
  return stream unless active?

  StreamTracker.wrap(
    stream,
    collector: collector,
    active: -> { active? },
    finish: ->(errored:) { finish_stream(collector, errored: errored) }
  )
end

.usage_metadata(usage) ⇒ Object



58
59
60
61
62
63
# File 'lib/llm_cost_tracker/integrations/openai.rb', line 58

def (usage)
  {
    cache_read_input_tokens: cache_read_input_tokens(usage),
    hidden_output_tokens: hidden_output_tokens(usage)
  }
end

.version_constantObject



16
# File 'lib/llm_cost_tracker/integrations/openai.rb', line 16

def version_constant = "OpenAI::VERSION"