Class: Collavre::AiClient

Inherits:
Object
  • Object
show all
Defined in:
app/services/collavre/ai_client.rb

Constant Summary collapse

SYSTEM_INSTRUCTIONS =
<<~PROMPT.freeze
  You are a senior expert teammate. Respond:
  - Be concise and focus on the essentials (avoid unnecessary verbosity).
  - Use short bullet points only when helpful.
  - State only what you're confident about; briefly note any uncertainty.
  - Respond in the asker's language (prefer the latest user message). Keep code and error messages in their original form.
PROMPT

Instance Attribute Summary collapse

Instance Method Summary collapse

Constructor Details

#initialize(vendor:, model:, system_prompt:, llm_api_key: nil, gateway_url: nil, context: {}) ⇒ AiClient

Returns a new instance of AiClient.



13
14
15
16
17
18
19
20
21
22
# File 'app/services/collavre/ai_client.rb', line 13

def initialize(vendor:, model:, system_prompt:, llm_api_key: nil, gateway_url: nil, context: {})
  @vendor = vendor
  @model = model
  @system_prompt = system_prompt
  @llm_api_key = llm_api_key
  @gateway_url = gateway_url
  @context = context
  @last_input_tokens = 0
  @last_output_tokens = 0
end

Instance Attribute Details

#last_input_tokensObject (readonly)

Returns the value of attribute last_input_tokens.



11
12
13
# File 'app/services/collavre/ai_client.rb', line 11

def last_input_tokens
  @last_input_tokens
end

#last_output_tokensObject (readonly)

Returns the value of attribute last_output_tokens.



11
12
13
# File 'app/services/collavre/ai_client.rb', line 11

def last_output_tokens
  @last_output_tokens
end

Instance Method Details

#ask(prompt) ⇒ Object

Ask a follow-up question using the existing conversation context. Used to generate approval summaries with full conversation history. Returns the response content string, or nil on failure.



88
89
90
91
92
93
94
95
96
97
98
# File 'app/services/collavre/ai_client.rb', line 88

def ask(prompt)
  return nil unless @conversation

  # Disable tool calls for summary generation to avoid recursive approval
  @conversation.with_tools(replace: true)
  response = @conversation.ask(prompt)
  response&.content&.strip.presence
rescue StandardError => e
  Rails.logger.warn("AiClient#ask failed: #{e.class} #{e.message}")
  nil
end

#chat(contents, tools: [], &block) ⇒ Object



24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
# File 'app/services/collavre/ai_client.rb', line 24

def chat(contents, tools: [], &block)
  response_content = +""
  error_message = nil
  input_tokens = nil
  output_tokens = nil

  normalized_vendor = vendor.to_s.downcase
  unless VENDOR_TO_PROVIDER.key?(normalized_vendor)
    Rails.logger.warn "Unsupported LLM vendor '#{@vendor}'. Attempting to use default (google)."
  end

  @conversation = build_conversation(tools)
  add_messages(@conversation, contents)

  response = @conversation.complete do |chunk|
    delta = extract_chunk_content(chunk)
    next if delta.blank?

    response_content << delta
    yield delta if block_given?
  end

  if response
    response_content = response.content.to_s if response.content.present?

    # Extract token usage directly from response object (RubyLLM style)
    if response.respond_to?(:input_tokens)
      input_tokens = response.input_tokens
    end

    if response.respond_to?(:output_tokens)
      output_tokens = response.output_tokens
    end
  end

  response_content.presence
rescue ApprovalPendingError
  # Preserve conversation for follow-up (e.g. generating approval summary)
  raise
rescue CancelledError
  raise # Re-raise cancellation errors without catching them
rescue StandardError => e
  error_message = "[#{e.class.name}] #{e.message}"
  Rails.logger.error "AI Client error: #{error_message}"
  Rails.logger.error "Partial response length: #{response_content.length} chars" if response_content.present?
  Rails.logger.debug e.backtrace.join("\n")
  yield "\n\n⚠️ AI Error: #{error_message}" if block_given?
  nil
ensure
  @last_input_tokens = input_tokens || 0
  @last_output_tokens = output_tokens || 0
  log_interaction(
    messages: @conversation&.messages&.to_a || Array(contents),
    tools: @conversation&.tools&.to_a || [],
    response_content: response_content.presence,
    error_message: error_message,
    input_tokens: input_tokens,
    output_tokens: output_tokens
  )
end