Module: LlmLogs::Instrumentation::RubyLlmChat

Defined in:
lib/llm_logs/instrumentation/ruby_llm_chat.rb

Instance Method Summary collapse

Instance Method Details

#complete(&block) ⇒ Object



4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
# File 'lib/llm_logs/instrumentation/ruby_llm_chat.rb', line 4

def complete(&block)
  return super unless LlmLogs.enabled?

  span = LlmLogs::Tracer.start_span(
    name: "chat.complete",
    span_type: "llm",
    model: @model&.id,
    provider: @model&.provider,
    input: messages.map { |m| { role: m.role, content: llm_logs_serialize_content(m.content) } }
  )

  messages_before = messages.size

  begin
    result = super(&block)
    span.record_response(result)
    span.cost = llm_logs_compute_cost(result)
    result
  rescue => e
    span.record_error(e)
    llm_logs_capture_partial_tokens(span, messages_before)
    raise
  ensure
    span.finish
  end
end

#execute_tool(tool_call) ⇒ Object



80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
# File 'lib/llm_logs/instrumentation/ruby_llm_chat.rb', line 80

def execute_tool(tool_call)
  return super unless LlmLogs.enabled?

  span = LlmLogs::Tracer.start_span(
    name: "tool.#{tool_call.name}",
    span_type: "tool",
    input: tool_call.arguments,
    metadata: { tool_name: tool_call.name }
  )

  begin
    result = super
    span.output = llm_logs_serialize_tool_result(result)
    span.set_attribute("tool.halted", true) if defined?(RubyLLM::Tool::Halt) && result.is_a?(RubyLLM::Tool::Halt)
    # Convert hash/array results to JSON so RubyLLM stores clean JSON
    # in message content, not Ruby hash syntax from Hash#to_s
    result = result.to_json if result.is_a?(Hash) || result.is_a?(Array)
    result
  rescue => e
    span.record_error(e)
    raise
  ensure
    span.finish
  end
end