Module: Braintrust::Contrib::RubyOpenAI::Instrumentation::Responses::InstanceMethods
- Defined in:
- lib/braintrust/contrib/ruby_openai/instrumentation/responses.rb
Instance Method Summary collapse
-
#create(parameters:) ⇒ Object
Wrap create method for ruby-openai responses API ruby-openai API: client.responses.create(parameters: …).
Instance Method Details
#create(parameters:) ⇒ Object
Wrap create method for ruby-openai responses API ruby-openai API: client.responses.create(parameters: …)
42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 |
# File 'lib/braintrust/contrib/ruby_openai/instrumentation/responses.rb', line 42 def create(parameters:) client = instance_variable_get(:@client) tracer = Braintrust::Contrib.tracer_for(client) tracer.in_span("openai.responses.create") do |span| is_streaming = streaming?(parameters) = (parameters) set_input(span, parameters) aggregated_chunks = [] time_to_first_token = nil response = nil response_data = {} if is_streaming # Setup a time measurement for the first chunk from the stream start_time = nil parameters = wrap_stream_callback(parameters, aggregated_chunks) do time_to_first_token ||= Braintrust::Internal::Time.measure(start_time) end start_time = Braintrust::Internal::Time.measure # Then initiate the stream response = super(parameters: parameters) if !aggregated_chunks.empty? response_data = Common.aggregate_responses_chunks(aggregated_chunks) end else # Make a time measurement synchronously around the API call time_to_first_token = Braintrust::Internal::Time.measure do response = super(parameters: parameters) response_data = response if response end end set_output(span, response_data) set_metrics(span, response_data, time_to_first_token) (span, , response_data) response end end |