Module: Legion::LLM::API::Translators::OpenAIResponse
- Extended by:
- Legion::Logging::Helper
- Defined in:
- lib/legion/llm/api/translators/openai_response.rb
Constant Summary collapse
- FINISH_REASON_MAP =
{ 'stop' => 'stop', 'length' => 'length', 'tool_calls' => 'tool_calls', 'content_filter' => 'content_filter' }.freeze
Class Method Summary collapse
- .build_tool_calls(pipeline_response) ⇒ Object
- .extract_token_count(tokens, key) ⇒ Object
- .format_chat_completion(pipeline_response, model:, request_id: nil) ⇒ Object
- .format_embeddings(vector, model:, input_text:) ⇒ Object
- .format_model_object(id, created: nil, owned_by: 'legion') ⇒ Object
- .format_stream_chunk(delta_text, model:, request_id:, finish_reason: nil) ⇒ Object
- .map_finish_reason(stop_reason) ⇒ Object
Class Method Details
.build_tool_calls(pipeline_response) ⇒ Object
102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 |
# File 'lib/legion/llm/api/translators/openai_response.rb', line 102 def build_tool_calls(pipeline_response) tools_data = pipeline_response.respond_to?(:tools) ? pipeline_response.tools : nil return [] unless tools_data.is_a?(Array) && !tools_data.empty? tools_data.each_with_index.filter_map do |tc, idx| name = tc.respond_to?(:name) ? tc.name : (tc[:name] || tc['name']) args = tc.respond_to?(:arguments) ? tc.arguments : (tc[:arguments] || tc['arguments'] || {}) tc_id = tc.respond_to?(:id) ? tc.id : (tc[:id] || tc['id'] || "call_#{SecureRandom.hex(8)}") next unless name { id: tc_id, type: 'function', index: idx, function: { name: name.to_s, arguments: args.is_a?(String) ? args : Legion::JSON.dump(args) } } end end |
.extract_token_count(tokens, key) ⇒ Object
128 129 130 131 132 133 134 135 136 |
# File 'lib/legion/llm/api/translators/openai_response.rb', line 128 def extract_token_count(tokens, key) return nil if tokens.nil? return tokens[key] || tokens[key.to_s] if tokens.is_a?(Hash) method_name = { input: :input_tokens, output: :output_tokens }[key] return tokens.public_send(method_name) if method_name && tokens.respond_to?(method_name) nil end |
.format_chat_completion(pipeline_response, model:, request_id: nil) ⇒ Object
23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 |
# File 'lib/legion/llm/api/translators/openai_response.rb', line 23 def format_chat_completion(pipeline_response, model:, request_id: nil) request_id ||= SecureRandom.uuid routing = pipeline_response.routing || {} tokens = pipeline_response.tokens || {} raw_msg = pipeline_response. content = raw_msg.is_a?(Hash) ? (raw_msg[:content] || raw_msg['content']) : raw_msg.to_s stop_reason = pipeline_response.stop&.dig(:reason)&.to_s tool_calls = build_tool_calls(pipeline_response) resolved_model = (routing[:model] || routing['model'] || model).to_s log.debug("[llm][translator][openai_response] action=format_chat_completion request_id=#{request_id} model=#{resolved_model}") finish_reason = tool_calls.empty? ? map_finish_reason(stop_reason) : 'tool_calls' = { role: 'assistant', content: content } [:tool_calls] = tool_calls unless tool_calls.empty? { id: "chatcmpl-#{request_id.delete('-')}", object: 'chat.completion', created: Time.now.to_i, model: resolved_model, choices: [ { index: 0, message: , finish_reason: finish_reason } ], usage: { prompt_tokens: extract_token_count(tokens, :input), completion_tokens: extract_token_count(tokens, :output), total_tokens: (extract_token_count(tokens, :input).to_i + extract_token_count(tokens, :output).to_i) } } end |
.format_embeddings(vector, model:, input_text:) ⇒ Object
73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 |
# File 'lib/legion/llm/api/translators/openai_response.rb', line 73 def (vector, model:, input_text:) tokens = input_text.to_s.split.size { object: 'list', data: [ { object: 'embedding', embedding: vector, index: 0 } ], model: model.to_s, usage: { prompt_tokens: tokens, total_tokens: tokens } } end |
.format_model_object(id, created: nil, owned_by: 'legion') ⇒ Object
93 94 95 96 97 98 99 100 |
# File 'lib/legion/llm/api/translators/openai_response.rb', line 93 def format_model_object(id, created: nil, owned_by: 'legion') { id: id.to_s, object: 'model', created: created || Time.now.to_i, owned_by: owned_by } end |
.format_stream_chunk(delta_text, model:, request_id:, finish_reason: nil) ⇒ Object
60 61 62 63 64 65 66 67 68 69 70 71 |
# File 'lib/legion/llm/api/translators/openai_response.rb', line 60 def format_stream_chunk(delta_text, model:, request_id:, finish_reason: nil) choice = { index: 0, delta: {}, finish_reason: finish_reason } choice[:delta][:content] = delta_text if delta_text && !delta_text.empty? { id: "chatcmpl-#{request_id.delete('-')}", object: 'chat.completion.chunk', created: Time.now.to_i, model: model.to_s, choices: [choice] } end |
.map_finish_reason(stop_reason) ⇒ Object
124 125 126 |
# File 'lib/legion/llm/api/translators/openai_response.rb', line 124 def map_finish_reason(stop_reason) FINISH_REASON_MAP.fetch(stop_reason.to_s, 'stop') end |