Class: Legion::LLM::Call::LexLLMAdapter
- Inherits:
-
Object
- Object
- Legion::LLM::Call::LexLLMAdapter
- Includes:
- Legion::Logging::Helper
- Defined in:
- lib/legion/llm/call/lex_llm_adapter.rb
Overview
Adapts a lex-llm provider class to legion-llm’s native dispatch contract.
Defined Under Namespace
Classes: ToolShim
Constant Summary collapse
- METADATA_KEYS =
%i[tier capabilities enabled].freeze
Instance Method Summary collapse
- #chat(model:, messages:, **opts) ⇒ Object
- #count_tokens(model:, messages:) ⇒ Object
- #embed(model:, text:, dimensions: nil, **opts) ⇒ Object
- #health(live: false) ⇒ Object
- #image(model:, prompt:, size:, with: nil, mask: nil, **opts) ⇒ Object
-
#initialize(provider_name, provider_class, instance_config: {}) ⇒ LexLLMAdapter
constructor
A new instance of LexLLMAdapter.
- #offerings(live: false, **filters) ⇒ Object
- #stream(model:, messages:, **opts, &block) ⇒ Object
Constructor Details
#initialize(provider_name, provider_class, instance_config: {}) ⇒ LexLLMAdapter
Returns a new instance of LexLLMAdapter.
14 15 16 17 18 19 |
# File 'lib/legion/llm/call/lex_llm_adapter.rb', line 14 def initialize(provider_name, provider_class, instance_config: {}) @provider_name = provider_name.to_sym @provider_class = provider_class @instance_config = instance_config @lex_llm_namespace = resolve_lex_llm_namespace end |
Instance Method Details
#chat(model:, messages:, **opts) ⇒ Object
21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 |
# File 'lib/legion/llm/call/lex_llm_adapter.rb', line 21 def chat(model:, messages:, **opts) response = provider.chat( messages: (, system: opts[:system]), tools: normalize_tools(opts[:tools]), temperature: opts[:temperature], params: opts[:params] || {}, headers: opts[:headers] || {}, schema: opts[:schema], thinking: opts[:thinking], tool_prefs: opts[:tool_prefs], model: model_info(model, offering_metadata: opts[:offering_metadata]) ) (response, offering_metadata: opts[:offering_metadata]) end |
#count_tokens(model:, messages:) ⇒ Object
98 99 100 101 102 103 104 |
# File 'lib/legion/llm/call/lex_llm_adapter.rb', line 98 def count_tokens(model:, messages:, **) { result: provider.count_tokens(messages: (), model: model_info(model)), model: model, usage: {} } end |
#embed(model:, text:, dimensions: nil, **opts) ⇒ Object
61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 |
# File 'lib/legion/llm/call/lex_llm_adapter.rb', line 61 def (model:, text:, dimensions: nil, **opts) model_info = model_info(model, offering_metadata: opts[:offering_metadata]) response = provider.( text: text, model: model_info, dimensions: dimensions, params: opts[:params] || {}, headers: opts[:headers] || {} ) { result: response.vectors, model: response.model, usage: { input_tokens: response.input_tokens.to_i, output_tokens: 0 }, metadata: (offering_metadata: opts[:offering_metadata]) } end |
#health(live: false) ⇒ Object
94 95 96 |
# File 'lib/legion/llm/call/lex_llm_adapter.rb', line 94 def health(live: false) provider.health(live: live) end |
#image(model:, prompt:, size:, with: nil, mask: nil, **opts) ⇒ Object
79 80 81 82 83 84 85 86 87 88 89 90 91 92 |
# File 'lib/legion/llm/call/lex_llm_adapter.rb', line 79 def image(model:, prompt:, size:, with: nil, mask: nil, **opts) model_info = model_info(model, offering_metadata: opts[:offering_metadata]) response = call_image_provider( prompt: prompt, model: model_info, size: size, with: with, mask: mask, params: opts[:params] || {}, headers: opts[:headers] || {} ) image_response(response, model: model_info, offering_metadata: opts[:offering_metadata]) end |
#offerings(live: false, **filters) ⇒ Object
106 107 108 109 110 |
# File 'lib/legion/llm/call/lex_llm_adapter.rb', line 106 def offerings(live: false, **filters) return [] unless provider.respond_to?(:discover_offerings) provider.discover_offerings(live: live, **filters) end |
#stream(model:, messages:, **opts, &block) ⇒ Object
37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 |
# File 'lib/legion/llm/call/lex_llm_adapter.rb', line 37 def stream(model:, messages:, **opts, &block) accumulator = build_stream_accumulator response = provider.stream_chat( messages: (, system: opts[:system]), tools: normalize_tools(opts[:tools]), temperature: opts[:temperature], params: opts[:params] || {}, headers: opts[:headers] || {}, schema: opts[:schema], thinking: opts[:thinking], tool_prefs: opts[:tool_prefs], model: model_info(model, offering_metadata: opts[:offering_metadata]) ) do |chunk| accumulate_stream_chunk(accumulator, chunk) block&.call(chunk) end if response (response, offering_metadata: opts[:offering_metadata]) else chunk_response(accumulator, offering_metadata: opts[:offering_metadata]) end end |