Module: LlmCostTracker::Integrations::Openai::ResponsesPatch
- Defined in:
- lib/llm_cost_tracker/integrations/openai.rb
Instance Method Summary collapse
- #create(*args, **kwargs) ⇒ Object
- #retrieve_streaming(response_id, *args, **kwargs) ⇒ Object
- #stream(*args, **kwargs) ⇒ Object
- #stream_raw(*args, **kwargs) ⇒ Object
Instance Method Details
#create(*args, **kwargs) ⇒ Object
158 159 160 161 162 163 164 165 166 167 168 |
# File 'lib/llm_cost_tracker/integrations/openai.rb', line 158 def create(*args, **kwargs) LlmCostTracker::Integrations::Openai.enforce_budget! started_at = LlmCostTracker::Timing.now_monotonic response = super LlmCostTracker::Integrations::Openai.record_response( response, request: LlmCostTracker::Integrations::Openai.request_params(args, kwargs), latency_ms: LlmCostTracker::Integrations::Openai.elapsed_ms(started_at) ) response end |
#retrieve_streaming(response_id, *args, **kwargs) ⇒ Object
186 187 188 189 190 191 192 193 |
# File 'lib/llm_cost_tracker/integrations/openai.rb', line 186 def retrieve_streaming(response_id, *args, **kwargs) request = LlmCostTracker::Integrations::Openai.request_params(args, kwargs) LlmCostTracker::Integrations::Openai.enforce_budget! collector = LlmCostTracker::Integrations::Openai.stream_collector(request) collector.provider_response_id = response_id stream = super LlmCostTracker::Integrations::Openai.track_stream(stream, collector: collector) end |
#stream(*args, **kwargs) ⇒ Object
170 171 172 173 174 175 176 |
# File 'lib/llm_cost_tracker/integrations/openai.rb', line 170 def stream(*args, **kwargs) request = LlmCostTracker::Integrations::Openai.request_params(args, kwargs) LlmCostTracker::Integrations::Openai.enforce_budget! collector = LlmCostTracker::Integrations::Openai.stream_collector(request) stream = super LlmCostTracker::Integrations::Openai.track_stream(stream, collector: collector) end |
#stream_raw(*args, **kwargs) ⇒ Object
178 179 180 181 182 183 184 |
# File 'lib/llm_cost_tracker/integrations/openai.rb', line 178 def stream_raw(*args, **kwargs) request = LlmCostTracker::Integrations::Openai.request_params(args, kwargs) LlmCostTracker::Integrations::Openai.enforce_budget! collector = LlmCostTracker::Integrations::Openai.stream_collector(request) stream = super LlmCostTracker::Integrations::Openai.track_stream(stream, collector: collector) end |