Module: LlmCostTracker::Integrations::RubyLlm
- Extended by:
- Base
- Defined in:
- lib/llm_cost_tracker/integrations/ruby_llm.rb
Defined Under Namespace
Modules: ProviderPatch
Class Method Summary collapse
- .integration_name ⇒ Object
- .minimum_version ⇒ Object
- .model_id(object) ⇒ Object
- .patch_targets ⇒ Object
- .pricing_mode(response) ⇒ Object
- .provider_response_id(response) ⇒ Object
- .provider_slug(provider) ⇒ Object
- .record_completion(provider, response, request:, latency_ms:, stream:) ⇒ Object
- .record_embedding(provider, response, request:, latency_ms:) ⇒ Object
- .record_transcription(provider, response, request:, latency_ms:) ⇒ Object
- .record_usage(provider:, model:, response:, latency_ms:, stream:, output_tokens: nil) ⇒ Object
- .regular_input_tokens(input_tokens, cache_read) ⇒ Object
- .response_model_id(object) ⇒ Object
- .streaming_request?(request, has_block:) ⇒ Boolean
- .version_constant ⇒ Object
Methods included from Base
active?, elapsed_ms, enforce_budget!, install, minimum_version, object_dig, object_value, patch_target, patch_targets, record_safely, request_params, status, version_constant
Class Method Details
.integration_name ⇒ Object
11 12 13 |
# File 'lib/llm_cost_tracker/integrations/ruby_llm.rb', line 11 def integration_name :ruby_llm end |
.minimum_version ⇒ Object
15 16 17 |
# File 'lib/llm_cost_tracker/integrations/ruby_llm.rb', line 15 def minimum_version "1.14.1" end |
.model_id(object) ⇒ Object
108 109 110 111 112 113 114 |
# File 'lib/llm_cost_tracker/integrations/ruby_llm.rb', line 108 def model_id(object) return nil if object.nil? value = object_value(object, :id, :model_id, :model) value ||= object if object.is_a?(String) || object.is_a?(Symbol) value&.to_s end |
.patch_targets ⇒ Object
23 24 25 26 27 28 29 30 31 |
# File 'lib/llm_cost_tracker/integrations/ruby_llm.rb', line 23 def patch_targets [ patch_target( "RubyLLM::Provider", with: ProviderPatch, methods: %i[slug complete embed transcribe] ) ] end |
.pricing_mode(response) ⇒ Object
125 126 127 128 129 |
# File 'lib/llm_cost_tracker/integrations/ruby_llm.rb', line 125 def pricing_mode(response) object_value(response, :pricing_mode, :service_tier) || object_dig(response, :raw, :pricing_mode) || object_dig(response, :raw, :service_tier) end |
.provider_response_id(response) ⇒ Object
121 122 123 |
# File 'lib/llm_cost_tracker/integrations/ruby_llm.rb', line 121 def provider_response_id(response) object_value(response, :id, :provider_response_id) || object_dig(response, :raw, :id) end |
.provider_slug(provider) ⇒ Object
104 105 106 |
# File 'lib/llm_cost_tracker/integrations/ruby_llm.rb', line 104 def provider_slug(provider) object_value(provider, :slug).to_s end |
.record_completion(provider, response, request:, latency_ms:, stream:) ⇒ Object
33 34 35 36 37 38 39 40 41 |
# File 'lib/llm_cost_tracker/integrations/ruby_llm.rb', line 33 def record_completion(provider, response, request:, latency_ms:, stream:) record_usage( provider: provider_slug(provider), model: response_model_id(response) || model_id(request[:model]), response: response, latency_ms: latency_ms, stream: stream ) end |
.record_embedding(provider, response, request:, latency_ms:) ⇒ Object
47 48 49 50 51 52 53 54 55 56 |
# File 'lib/llm_cost_tracker/integrations/ruby_llm.rb', line 47 def (provider, response, request:, latency_ms:) record_usage( provider: provider_slug(provider), model: response_model_id(response) || model_id(request[:model]), response: response, latency_ms: latency_ms, stream: false, output_tokens: 0 ) end |
.record_transcription(provider, response, request:, latency_ms:) ⇒ Object
58 59 60 61 62 63 64 65 66 |
# File 'lib/llm_cost_tracker/integrations/ruby_llm.rb', line 58 def record_transcription(provider, response, request:, latency_ms:) record_usage( provider: provider_slug(provider), model: response_model_id(response) || model_id(request[:model]), response: response, latency_ms: latency_ms, stream: false ) end |
.record_usage(provider:, model:, response:, latency_ms:, stream:, output_tokens: nil) ⇒ Object
68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 |
# File 'lib/llm_cost_tracker/integrations/ruby_llm.rb', line 68 def record_usage(provider:, model:, response:, latency_ms:, stream:, output_tokens: nil) return unless active? record_safely do input_tokens = object_value(response, :input_tokens) output_tokens = object_value(response, :output_tokens) if output_tokens.nil? next if input_tokens.nil? && output_tokens.nil? cache_read = object_value(response, :cached_tokens).to_i hidden_output = object_value(response, :thinking_tokens, :reasoning_tokens).to_i LlmCostTracker::Tracker.record( capture: UsageCapture.build( provider: provider, model: model, pricing_mode: pricing_mode(response), token_usage: TokenUsage.build( input_tokens: regular_input_tokens(input_tokens, cache_read), output_tokens: output_tokens.to_i, cache_read_input_tokens: cache_read, cache_write_input_tokens: object_value(response, :cache_creation_tokens).to_i, hidden_output_tokens: hidden_output ), stream: stream, usage_source: :ruby_llm, provider_response_id: provider_response_id(response) ), latency_ms: latency_ms ) end end |
.regular_input_tokens(input_tokens, cache_read) ⇒ Object
100 101 102 |
# File 'lib/llm_cost_tracker/integrations/ruby_llm.rb', line 100 def regular_input_tokens(input_tokens, cache_read) [input_tokens.to_i - cache_read.to_i, 0].max end |
.response_model_id(object) ⇒ Object
116 117 118 119 |
# File 'lib/llm_cost_tracker/integrations/ruby_llm.rb', line 116 def response_model_id(object) value = object_value(object, :model_id, :model) value&.to_s end |
.streaming_request?(request, has_block:) ⇒ Boolean
43 44 45 |
# File 'lib/llm_cost_tracker/integrations/ruby_llm.rb', line 43 def streaming_request?(request, has_block:) has_block || request[:stream] == true end |
.version_constant ⇒ Object
19 20 21 |
# File 'lib/llm_cost_tracker/integrations/ruby_llm.rb', line 19 def version_constant "RubyLLM::VERSION" end |