Class: AIA::ChatProcessorService
- Inherits:
-
Object
- Object
- AIA::ChatProcessorService
- Defined in:
- lib/aia/chat_processor_service.rb
Instance Method Summary collapse
- #determine_operation_type ⇒ Object
-
#initialize(ui_presenter, directive_processor = nil) ⇒ ChatProcessorService
constructor
A new instance of ChatProcessorService.
- #maybe_change_model ⇒ Object
- #output_response(response) ⇒ Object
- #process_next_prompts(response, prompt_handler) ⇒ Object
- #process_prompt(prompt) ⇒ Object
-
#send_to_client(conversation_or_conversations) ⇒ Object
conversation is an Array of Hashes (single model) or Hash of Arrays (multi-model per-model contexts) Each entry is an interchange with the LLM.
- #speak(text) ⇒ Object
Constructor Details
#initialize(ui_presenter, directive_processor = nil) ⇒ ChatProcessorService
Returns a new instance of ChatProcessorService.
5 6 7 8 9 |
# File 'lib/aia/chat_processor_service.rb', line 5 def initialize(ui_presenter, directive_processor = nil) @ui_presenter = ui_presenter @speaker = AIA.speak? ? AiClient.new(AIA.config.audio.speech_model) : nil @directive_processor = directive_processor end |
Instance Method Details
#determine_operation_type ⇒ Object
166 167 168 169 170 171 172 173 174 175 176 |
# File 'lib/aia/chat_processor_service.rb', line 166 def determine_operation_type # With multiple models, determine operation type from the first model # or provide a generic description models = AIA.config.models if models.is_a?(Array) && models.size > 1 "MULTI-MODEL PROCESSING" else mode = AIA.client.model.modalities mode.input.join(',') + " TO " + mode.output.join(',') end end |
#maybe_change_model ⇒ Object
87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 |
# File 'lib/aia/chat_processor_service.rb', line 87 def maybe_change_model # With multiple models, we don't need to change the model in the same way # The RubyLLMAdapter now handles multiple models internally # This method is kept for backward compatibility but may not be needed models = AIA.config.models return if models.is_a?(Array) && models.size > 1 return unless AIA.client.respond_to?(:model) && AIA.client.model.respond_to?(:id) client_model = AIA.client.model.id # Get the first model name for comparison first_model = models.first model_name = first_model.respond_to?(:name) ? first_model.name : first_model.to_s # client_model is the full resolved ID (e.g. "claude-sonnet-4-20250514"), # model_name is the configured alias (e.g. "claude-sonnet-4") or a # provider-prefixed name (e.g. "ollama/qwen3"). # # The adapter strips provider prefixes when creating the RubyLLM chat # (e.g. "ollama/qwen3" → model: "qwen3"), so client_model has no prefix. # Strip the prefix from model_name before comparing so that # "qwen3".include?("qwen3") → true (was: "qwen3".include?("ollama/qwen3") → false). comparable_name = model_name.sub(%r{\A[^/]+/}, '') unless client_model.downcase.include?(comparable_name.downcase) # Never replace the adapter when conversation history exists — doing so # destroys all prior context. Role/prompt files can change AIA.config.models # via front matter metadata, but that must not evict an active chat session. if AIA.client.respond_to?(:chats) && AIA.client.chats.values.any? { |chat| chat..any? } warn "Warning: Model config changed to '#{model_name}' but keeping '#{client_model}' to preserve conversation history." return end AIA.client = AIA.client.class.new end end |
#output_response(response) ⇒ Object
124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 |
# File 'lib/aia/chat_processor_service.rb', line 124 def output_response(response) speak(response) out_file = AIA.config.output.file # Output to STDOUT or file based on out_file configuration if out_file.nil? || 'STDOUT' == out_file.upcase print "\nAI:\n " puts response else mode = AIA.append? ? 'a' : 'w' File.open(out_file, mode) do |file| file.puts "\nAI: " # Handle multi-line responses by adding proper indentation response_lines = response.to_s.split("\n") response_lines.each do |line| file.puts " #{line}" end end end history_file = AIA.config.output.history_file if history_file File.open(history_file, 'a') do |f| f.puts "=== #{Time.now} ===" f.puts "Prompt: #{AIA.config.prompt_id}" f.puts "Response: #{response}" f.puts "===" end end end |
#process_next_prompts(response, prompt_handler) ⇒ Object
157 158 159 160 161 162 163 |
# File 'lib/aia/chat_processor_service.rb', line 157 def process_next_prompts(response, prompt_handler) if @directive_processor.directive?(response) directive_result = @directive_processor.process(response, @history_manager.history) response = directive_result[:result] @history_manager.history = directive_result[:modified_history] if directive_result[:modified_history] end end |
#process_prompt(prompt) ⇒ Object
25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 |
# File 'lib/aia/chat_processor_service.rb', line 25 def process_prompt(prompt) result = nil @ui_presenter.with_spinner("Processing", determine_operation_type) do result = send_to_client(prompt) end # Debug output to understand what we're receiving logger.debug("Result received", result_class: result.class.name) logger.debug("Result details", inspect: result.inspect[0..500]) # Preserve token information if available for metrics if result.is_a?(String) logger.debug("Processing result", type: "String") { content: result, metrics: nil } elsif result.respond_to?(:multi_model?) && result.multi_model? logger.debug("Processing result", type: "multi-model response") # Handle multi-model response with metrics { content: result.content, metrics: nil, # Individual model metrics handled separately multi_metrics: result.metrics_list } elsif result.respond_to?(:content) logger.debug("Processing result", type: "standard response with content method") # Standard response object with content method { content: result.content, metrics: { input_tokens: result.respond_to?(:input_tokens) ? result.input_tokens : nil, output_tokens: result.respond_to?(:output_tokens) ? result.output_tokens : nil, model_id: result.respond_to?(:model_id) ? result.model_id : nil } } else logger.debug("Processing result", type: "fallback (unexpected type)") # Fallback for unexpected response types { content: result.to_s, metrics: nil } end end |
#send_to_client(conversation_or_conversations) ⇒ Object
conversation is an Array of Hashes (single model) or Hash of Arrays (multi-model per-model contexts) Each entry is an interchange with the LLM.
68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 |
# File 'lib/aia/chat_processor_service.rb', line 68 def send_to_client(conversation_or_conversations) maybe_change_model # Handle per-model conversations (Hash) or single conversation (Array) - ADR-002 revised if conversation_or_conversations.is_a?(Hash) # Multi-model with per-model contexts: pass Hash directly to adapter logger.debug("Sending per-model conversations to client") result = AIA.client.chat(conversation_or_conversations) else # Single conversation for single model logger.debug("Sending conversation to client", conversation: conversation_or_conversations.inspect[0..500]) result = AIA.client.chat(conversation_or_conversations) end logger.debug("Client returned", result_class: result.class.name, result: result.inspect[0..500]) result end |
#speak(text) ⇒ Object
12 13 14 15 16 17 18 19 20 21 22 |
# File 'lib/aia/chat_processor_service.rb', line 12 def speak(text) return unless AIA.speak? @speaker ||= AiClient.new(AIA.config.audio.speech_model) if AIA.config.audio.speech_model if @speaker system(AIA.config.audio.speak_command, @speaker.speak(text).path) else warn "Warning: Unable to speak. Speech model not configured properly." end end |