Module: LlmCostTracker::Integrations::Openai

Extended by:
Base
Defined in:
lib/llm_cost_tracker/integrations/openai.rb

Defined Under Namespace

Modules: ChatCompletionsPatch, ResponsesPatch

Class Method Summary collapse

Methods included from Base

active?, constant, elapsed_ms, enforce_budget!, install, record_safely, request_params, status

Class Method Details

.cache_read_input_tokens(usage) ⇒ Object



51
52
53
54
55
56
# File 'lib/llm_cost_tracker/integrations/openai.rb', line 51

def cache_read_input_tokens(usage)
  ObjectReader.integer(
    ObjectReader.nested(usage, :input_tokens_details, :cached_tokens) ||
    ObjectReader.nested(usage, :prompt_tokens_details, :cached_tokens)
  )
end

.hidden_output_tokens(usage) ⇒ Object



58
59
60
61
62
63
# File 'lib/llm_cost_tracker/integrations/openai.rb', line 58

def hidden_output_tokens(usage)
  ObjectReader.integer(
    ObjectReader.nested(usage, :output_tokens_details, :reasoning_tokens) ||
    ObjectReader.nested(usage, :completion_tokens_details, :reasoning_tokens)
  )
end

.integration_nameObject



11
# File 'lib/llm_cost_tracker/integrations/openai.rb', line 11

def integration_name = :openai

.record_response(response, request:, latency_ms:) ⇒ Object



20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
# File 'lib/llm_cost_tracker/integrations/openai.rb', line 20

def record_response(response, request:, latency_ms:)
  return unless active?

  record_safely do
    usage = ObjectReader.first(response, :usage)
    next unless usage

    input_tokens = ObjectReader.first(usage, :input_tokens, :prompt_tokens)
    output_tokens = ObjectReader.first(usage, :output_tokens, :completion_tokens)
    next if input_tokens.nil? && output_tokens.nil?

    LlmCostTracker::Tracker.record(
      provider: "openai",
      model: ObjectReader.first(response, :model) || request[:model],
      input_tokens: ObjectReader.integer(input_tokens),
      output_tokens: ObjectReader.integer(output_tokens),
      latency_ms: latency_ms,
      usage_source: :sdk_response,
      provider_response_id: ObjectReader.first(response, :id),
      metadata: (usage)
    )
  end
end

.target_patchesObject



13
14
15
16
17
18
# File 'lib/llm_cost_tracker/integrations/openai.rb', line 13

def target_patches
  [
    [constant("OpenAI::Resources::Responses"), ResponsesPatch],
    [constant("OpenAI::Resources::Chat::Completions"), ChatCompletionsPatch]
  ]
end

.usage_metadata(usage) ⇒ Object



44
45
46
47
48
49
# File 'lib/llm_cost_tracker/integrations/openai.rb', line 44

def (usage)
  {
    cache_read_input_tokens: cache_read_input_tokens(usage),
    hidden_output_tokens: hidden_output_tokens(usage)
  }
end