Module: Braintrust::Contrib::RubyOpenAI::Instrumentation::Chat::InstanceMethods
- Defined in:
- lib/braintrust/contrib/ruby_openai/instrumentation/chat.rb
Instance Method Summary collapse
-
#chat(parameters:) ⇒ Object
Wrap chat method for ruby-openai gem ruby-openai API: client.chat(parameters: …).
Instance Method Details
#chat(parameters:) ⇒ Object
Wrap chat method for ruby-openai gem ruby-openai API: client.chat(parameters: …)
42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 |
# File 'lib/braintrust/contrib/ruby_openai/instrumentation/chat.rb', line 42 def chat(parameters:) tracer = Braintrust::Contrib.tracer_for(self) tracer.in_span("Chat Completion") do |span| is_streaming = streaming?(parameters) = (parameters) set_input(span, parameters) aggregated_chunks = [] time_to_first_token = nil response = nil response_data = {} if is_streaming # Setup a time measurement for the first chunk from the stream start_time = nil parameters = wrap_stream_callback(parameters, aggregated_chunks) do time_to_first_token ||= Braintrust::Internal::Time.measure(start_time) end start_time = Braintrust::Internal::Time.measure # Then initiate the stream response = super(parameters: parameters) if !aggregated_chunks.empty? response_data = Common.aggregate_streaming_chunks(aggregated_chunks) end else # Make a time measurement synchronously around the API call time_to_first_token = Braintrust::Internal::Time.measure do response = super(parameters: parameters) response_data = response if response end end set_output(span, response_data) set_metrics(span, response_data, time_to_first_token) (span, , response_data) response end end |