Module: LlmCostTracker::Integrations::Openai::ChatCompletionsPatch
- Defined in:
- lib/llm_cost_tracker/integrations/openai.rb
Instance Method Summary collapse
Instance Method Details
#create(*args, **kwargs) ⇒ Object
146 147 148 149 150 151 152 153 154 155 156 |
# File 'lib/llm_cost_tracker/integrations/openai.rb', line 146 def create(*args, **kwargs) started_at = Process.clock_gettime(Process::CLOCK_MONOTONIC) LlmCostTracker::Integrations::Openai.enforce_budget! response = super LlmCostTracker::Integrations::Openai.record_response( response, request: LlmCostTracker::Integrations::Openai.request_params(args, kwargs), latency_ms: LlmCostTracker::Integrations::Openai.elapsed_ms(started_at) ) response end |
#stream_raw(*args, **kwargs) ⇒ Object
158 159 160 161 162 163 164 |
# File 'lib/llm_cost_tracker/integrations/openai.rb', line 158 def stream_raw(*args, **kwargs) request = LlmCostTracker::Integrations::Openai.request_params(args, kwargs) collector = LlmCostTracker::Integrations::Openai.stream_collector(request) LlmCostTracker::Integrations::Openai.enforce_budget! stream = super LlmCostTracker::Integrations::Openai.track_stream(stream, collector: collector) end |