Module: Tracekit::LLM::OpenAIInstrumentation
- Defined in:
- lib/tracekit/llm/openai_instrumentation.rb
Defined Under Namespace
Classes: OpenAIStreamAccumulator
Class Method Summary collapse
Class Method Details
.install(tracer) ⇒ Object
10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 |
# File 'lib/tracekit/llm/openai_instrumentation.rb', line 10 def install(tracer) # Try to load the OpenAI gem begin require "openai" rescue LoadError # openai gem not available, check if it's already defined (e.g. in tests) return false unless defined?(::OpenAI::Client) end client_class = ::OpenAI::Client return false unless client_class # Create the prepend module dynamically with tracer closure instrumentation_mod = Module.new do define_method(:chat) do |parameters: {}| model = parameters[:model] || parameters["model"] || "unknown" stream_proc = parameters[:stream] || parameters["stream"] is_streaming = stream_proc.is_a?(Proc) capture = Common.capture_content? span = tracer.start_span("chat #{model}", kind: :client) begin Common.set_request_attributes(span, provider: "openai", model: model, max_tokens: parameters[:max_tokens] || parameters["max_tokens"] || parameters[:max_completion_tokens] || parameters["max_completion_tokens"], temperature: parameters[:temperature] || parameters["temperature"], top_p: parameters[:top_p] || parameters["top_p"] ) # Capture input content if capture = parameters[:messages] || parameters["messages"] if system_msgs = .select { |m| (m[:role] || m["role"]) == "system" } non_system = .reject { |m| (m[:role] || m["role"]) == "system" } Common.capture_system_instructions(span, system_msgs) if system_msgs.any? Common.(span, non_system) end end if is_streaming # ruby-openai handles streaming via proc callback internally. # The chat method returns the final response hash, not an enumerator. # We wrap the user's proc to accumulate span data from each chunk. accumulator = OpenAIStreamAccumulator.new(span, capture) wrapper_proc = proc do |chunk, bytesize| accumulator.process_chunk(chunk) # Call original proc with same args if stream_proc.arity == 2 || stream_proc.arity < 0 stream_proc.call(chunk, bytesize) else stream_proc.call(chunk) end end # Inject stream_options.include_usage for token counting params = parameters.dup so = params[:stream_options] || params["stream_options"] || {} unless so[:include_usage] || so["include_usage"] params[:stream_options] = so.merge(include_usage: true) end params[:stream] = wrapper_proc result = super(parameters: params) accumulator.finalize result else result = super(parameters: parameters) # Non-streaming response handling handle_response(span, result, capture) result end rescue => e Common.set_error_attributes(span, e) span.finish raise end end private def handle_response(span, result, capture) choices = result.dig("choices") || [] Common.set_response_attributes(span, model: result["model"], id: result["id"], finish_reasons: choices.map { |c| c["finish_reason"] }.compact, input_tokens: result.dig("usage", "prompt_tokens"), output_tokens: result.dig("usage", "completion_tokens") ) # Tool calls choices.each do |choice| (choice.dig("message", "tool_calls") || []).each do |tc| Common.record_tool_call(span, name: tc.dig("function", "name") || "unknown", id: tc["id"], arguments: tc.dig("function", "arguments") ) end end # Output content capture if capture && choices.any? output_msgs = choices.map { |c| c["message"] }.compact Common.(span, output_msgs) if output_msgs.any? end rescue => _e # Never break user code ensure span.finish end end client_class.prepend(instrumentation_mod) true end |