Module: Legion::LLM
- Extended by:
- Legion::Logging::Helper
- Defined in:
- lib/legion/llm.rb,
lib/legion/llm/api.rb,
lib/legion/llm/call.rb,
lib/legion/llm/audit.rb,
lib/legion/llm/cache.rb,
lib/legion/llm/fleet.rb,
lib/legion/llm/hooks.rb,
lib/legion/llm/tools.rb,
lib/legion/llm/types.rb,
lib/legion/llm/compat.rb,
lib/legion/llm/config.rb,
lib/legion/llm/errors.rb,
lib/legion/llm/helper.rb,
lib/legion/llm/router.rb,
lib/legion/llm/skills.rb,
lib/legion/llm/context.rb,
lib/legion/llm/quality.rb,
lib/legion/llm/version.rb,
lib/legion/llm/api/auth.rb,
lib/legion/llm/metering.rb,
lib/legion/llm/settings.rb,
lib/legion/llm/discovery.rb,
lib/legion/llm/inference.rb,
lib/legion/llm/inventory.rb,
lib/legion/llm/transport.rb,
lib/legion/llm/fleet/lane.rb,
lib/legion/llm/scheduling.rb,
lib/legion/llm/router/rule.rb,
lib/legion/llm/skills/base.rb,
lib/legion/llm/types/chunk.rb,
lib/legion/llm/call/dispatch.rb,
lib/legion/llm/call/registry.rb,
lib/legion/llm/fleet/handler.rb,
lib/legion/llm/skills/errors.rb,
lib/legion/llm/types/message.rb,
lib/legion/llm/cache/response.rb,
lib/legion/llm/call/providers.rb,
lib/legion/llm/discovery/vllm.rb,
lib/legion/llm/hooks/metering.rb,
lib/legion/llm/metering/usage.rb,
lib/legion/llm/api/native/chat.rb,
lib/legion/llm/call/embeddings.rb,
lib/legion/llm/context/curator.rb,
lib/legion/llm/hooks/rag_guard.rb,
lib/legion/llm/inference/steps.rb,
lib/legion/llm/metering/tokens.rb,
lib/legion/llm/quality/checker.rb,
lib/legion/llm/skills/registry.rb,
lib/legion/llm/types/tool_call.rb,
lib/legion/llm/discovery/ollama.rb,
lib/legion/llm/discovery/system.rb,
lib/legion/llm/fleet/dispatcher.rb,
lib/legion/llm/hooks/reflection.rb,
lib/legion/llm/inference/prompt.rb,
lib/legion/llm/metering/tracker.rb,
lib/legion/llm/router/arbitrage.rb,
lib/legion/llm/scheduling/batch.rb,
lib/legion/llm/tools/confidence.rb,
lib/legion/llm/tools/dispatcher.rb,
lib/legion/llm/api/native/models.rb,
lib/legion/llm/api/openai/models.rb,
lib/legion/llm/hooks/reciprocity.rb,
lib/legion/llm/inference/profile.rb,
lib/legion/llm/inference/request.rb,
lib/legion/llm/inference/tracing.rb,
lib/legion/llm/router/resolution.rb,
lib/legion/llm/tools/interceptor.rb,
lib/legion/llm/transport/message.rb,
lib/legion/llm/api/native/helpers.rb,
lib/legion/llm/call/daemon_client.rb,
lib/legion/llm/context/compressor.rb,
lib/legion/llm/hooks/budget_guard.rb,
lib/legion/llm/inference/executor.rb,
lib/legion/llm/inference/response.rb,
lib/legion/llm/inference/timeline.rb,
lib/legion/llm/metering/estimator.rb,
lib/legion/llm/skills/disk_loader.rb,
lib/legion/llm/skills/step_result.rb,
lib/legion/llm/hooks/cost_tracking.rb,
lib/legion/llm/quality/shadow_eval.rb,
lib/legion/llm/scheduling/off_peak.rb,
lib/legion/llm/types/content_block.rb,
lib/legion/llm/api/native/inference.rb,
lib/legion/llm/api/native/instances.rb,
lib/legion/llm/api/native/offerings.rb,
lib/legion/llm/api/native/providers.rb,
lib/legion/llm/call/lex_llm_adapter.rb,
lib/legion/llm/hooks/response_guard.rb,
lib/legion/llm/inference/steps/rbac.rb,
lib/legion/llm/api/openai/embeddings.rb,
lib/legion/llm/inference/gaia_caller.rb,
lib/legion/llm/router/health_tracker.rb,
lib/legion/llm/types/tool_definition.rb,
lib/legion/llm/api/anthropic/messages.rb,
lib/legion/llm/call/structured_output.rb,
lib/legion/llm/fleet/reply_dispatcher.rb,
lib/legion/llm/inference/conversation.rb,
lib/legion/llm/inference/steps/debate.rb,
lib/legion/llm/inference/steps/billing.rb,
lib/legion/llm/router/escalation/chain.rb,
lib/legion/llm/skills/skill_run_result.rb,
lib/legion/llm/call/codex_config_loader.rb,
lib/legion/llm/inference/steps/metering.rb,
lib/legion/llm/quality/confidence/score.rb,
lib/legion/llm/call/claude_config_loader.rb,
lib/legion/llm/inference/audit_publisher.rb,
lib/legion/llm/inference/steps/rag_guard.rb,
lib/legion/llm/inference/tool_dispatcher.rb,
lib/legion/llm/quality/confidence/scorer.rb,
lib/legion/llm/router/escalation/history.rb,
lib/legion/llm/router/escalation/tracker.rb,
lib/legion/llm/skills/external_discovery.rb,
lib/legion/llm/transport/exchanges/audit.rb,
lib/legion/llm/transport/exchanges/fleet.rb,
lib/legion/llm/inference/steps/tool_calls.rb,
lib/legion/llm/router/gateway_interceptor.rb,
lib/legion/llm/api/openai/chat_completions.rb,
lib/legion/llm/inference/steps/rag_context.rb,
lib/legion/llm/inference/steps/prompt_cache.rb,
lib/legion/llm/inference/steps/token_budget.rb,
lib/legion/llm/inference/steps/tool_history.rb,
lib/legion/llm/transport/exchanges/metering.rb,
lib/legion/llm/inference/enrichment_injector.rb,
lib/legion/llm/inference/steps/gaia_advisory.rb,
lib/legion/llm/inference/steps/mcp_discovery.rb,
lib/legion/llm/inference/steps/post_response.rb,
lib/legion/llm/inference/steps/tier_assigner.rb,
lib/legion/llm/inference/steps/trigger_match.rb,
lib/legion/llm/transport/messages/tool_event.rb,
lib/legion/llm/api/translators/openai_request.rb,
lib/legion/llm/inference/steps/classification.rb,
lib/legion/llm/inference/steps/skill_injector.rb,
lib/legion/llm/inference/steps/span_annotator.rb,
lib/legion/llm/inference/steps/sticky_helpers.rb,
lib/legion/llm/inference/steps/sticky_persist.rb,
lib/legion/llm/inference/steps/sticky_runners.rb,
lib/legion/llm/inference/steps/tool_discovery.rb,
lib/legion/llm/tools/interceptors/python_venv.rb,
lib/legion/llm/transport/exchanges/escalation.rb,
lib/legion/llm/transport/messages/audit_event.rb,
lib/legion/llm/transport/messages/fleet_error.rb,
lib/legion/llm/transport/messages/skill_event.rb,
lib/legion/llm/api/translators/openai_response.rb,
lib/legion/llm/transport/messages/prompt_event.rb,
lib/legion/llm/transport/messages/fleet_request.rb,
lib/legion/llm/api/translators/anthropic_request.rb,
lib/legion/llm/inference/steps/knowledge_capture.rb,
lib/legion/llm/transport/messages/fleet_response.rb,
lib/legion/llm/transport/messages/metering_event.rb,
lib/legion/llm/api/translators/anthropic_response.rb,
lib/legion/llm/inference/steps/confidence_scoring.rb,
lib/legion/llm/transport/messages/escalation_event.rb
Defined Under Namespace
Modules: API, Audit, Cache, Call, CompatWarning, Config, Context, Discovery, EscalationHistory, EscalationTracker, Fleet, Helper, Hooks, Inference, Inventory, Metering, Quality, Router, Scheduling, Settings, Skills, Tools, Transport, Types
Classes: AuthError, ContextOverflow, DaemonDeniedError, DaemonRateLimitedError, DaemonUnavailableError, EmbeddingUnavailableError, EscalationExhausted, LLMError, PipelineError, PrivacyModeError, ProviderDown, ProviderError, RateLimitError, TokenBudgetExceeded, UnsupportedCapability, Usage
Constant Summary
collapse
- Routes =
API
- VERSION =
'0.8.47'
Class Method Summary
collapse
Class Method Details
.agent(agent_class) ⇒ Object
176
|
# File 'lib/legion/llm.rb', line 176
def agent(agent_class, **) = agent_class.new(**)
|
.ask ⇒ Object
130
|
# File 'lib/legion/llm.rb', line 130
def ask(...) = Inference.ask(...)
|
.can_embed? ⇒ Boolean
These methods check Discovery first, then fall back to instance ivars set directly on LLM (ivar fallback preserves backwards compat for specs that do Legion::LLM.instance_variable_set)
160
161
162
|
# File 'lib/legion/llm.rb', line 160
def can_embed?
Discovery.can_embed? || @can_embed == true
end
|
.chat ⇒ Object
129
|
# File 'lib/legion/llm.rb', line 129
def chat(...) = Inference.chat(...)
|
.const_missing(name) ⇒ Object
rubocop:disable Metrics/MethodLength
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
|
# File 'lib/legion/llm/compat.rb', line 24
def self.const_missing(name) case name
when :Pipeline
CompatWarning.warn_once('Legion::LLM::Pipeline', 'Legion::LLM::Inference')
Inference
when :ConversationStore
CompatWarning.warn_once('Legion::LLM::ConversationStore', 'Legion::LLM::Inference::Conversation')
Inference::Conversation
when :NativeDispatch
CompatWarning.warn_once('Legion::LLM::NativeDispatch', 'Legion::LLM::Call::Dispatch')
Call::Dispatch
when :NativeResponseAdapter
CompatWarning.warn_once('Legion::LLM::NativeResponseAdapter', 'Legion::LLM::Call::NativeResponseAdapter')
Call::NativeResponseAdapter
when :ProviderRegistry
CompatWarning.warn_once('Legion::LLM::ProviderRegistry', 'Legion::LLM::Call::Registry')
Call::Registry
when :CostEstimator
CompatWarning.warn_once('Legion::LLM::CostEstimator', 'Legion::LLM::Metering::Pricing')
Metering::Pricing
when :CostTracker
CompatWarning.warn_once('Legion::LLM::CostTracker', 'Legion::LLM::Metering::Recorder')
Metering::Recorder
when :TokenTracker
CompatWarning.warn_once('Legion::LLM::TokenTracker', 'Legion::LLM::Metering::Tokens')
Metering::Tokens
when :QualityChecker
CompatWarning.warn_once('Legion::LLM::QualityChecker', 'Legion::LLM::Quality::Checker')
Quality::Checker
when :ConfidenceScorer
CompatWarning.warn_once('Legion::LLM::ConfidenceScorer', 'Legion::LLM::Quality::Confidence::Scorer')
Quality::Confidence::Scorer
when :ConfidenceScore
CompatWarning.warn_once('Legion::LLM::ConfidenceScore', 'Legion::LLM::Quality::Confidence::Score')
Quality::Confidence::Score
when :OverrideConfidence
CompatWarning.warn_once('Legion::LLM::OverrideConfidence', 'Legion::LLM::Tools::Confidence')
Tools::Confidence
when :ResponseCache
CompatWarning.warn_once('Legion::LLM::ResponseCache', 'Legion::LLM::Cache::Response')
Cache::Response
when :Compressor
CompatWarning.warn_once('Legion::LLM::Compressor', 'Legion::LLM::Context::Compressor')
Context::Compressor
when :ClaudeConfigLoader
CompatWarning.warn_once('Legion::LLM::ClaudeConfigLoader', 'Legion::LLM::Call::ClaudeConfigLoader')
Call::ClaudeConfigLoader
when :CodexConfigLoader
CompatWarning.warn_once('Legion::LLM::CodexConfigLoader', 'Legion::LLM::Call::CodexConfigLoader')
Call::CodexConfigLoader
when :DaemonClient
CompatWarning.warn_once('Legion::LLM::DaemonClient', 'Legion::LLM::Call::DaemonClient')
Call::DaemonClient
when :Providers
CompatWarning.warn_once('Legion::LLM::Providers', 'Legion::LLM::Call::Providers')
Call::Providers
when :Prompt
CompatWarning.warn_once('Legion::LLM::Prompt', 'Legion::LLM::Inference::Prompt')
Inference::Prompt
when :ShadowEval
CompatWarning.warn_once('Legion::LLM::ShadowEval', 'Legion::LLM::Quality::ShadowEval')
Quality::ShadowEval
when :Arbitrage
CompatWarning.warn_once('Legion::LLM::Arbitrage', 'Legion::LLM::Router::Arbitrage')
Router::Arbitrage
when :Batch
CompatWarning.warn_once('Legion::LLM::Batch', 'Legion::LLM::Scheduling::Batch')
Scheduling::Batch
when :ContextCurator
CompatWarning.warn_once('Legion::LLM::ContextCurator', 'Legion::LLM::Context::Curator')
Context::Curator
when :Embeddings
CompatWarning.warn_once('Legion::LLM::Embeddings', 'Legion::LLM::Call::Embeddings')
Call::Embeddings
when :OffPeak
CompatWarning.warn_once('Legion::LLM::OffPeak', 'Legion::LLM::Scheduling::OffPeak')
Scheduling::OffPeak
when :InferenceError
CompatWarning.warn_once('Legion::LLM::InferenceError', 'Legion::LLM::PipelineError')
PipelineError
when :Routes, :API
require_relative '../llm/api'
const_get(name)
else
super
end
end
|
.embed(text) ⇒ Object
133
134
135
136
137
138
139
140
141
|
# File 'lib/legion/llm.rb', line 133
def embed(text, **)
if defined?(Legion::Telemetry::OpenInference)
Legion::Telemetry::OpenInference.embedding_span(
model: (Settings.value(:default_model) || 'unknown').to_s
) { |_span| Call::Embeddings.generate(text: text, **) }
else
Call::Embeddings.generate(text: text, **)
end
end
|
.embed_batch(texts) ⇒ Object
.embed_direct(text) ⇒ Object
143
|
# File 'lib/legion/llm.rb', line 143
def embed_direct(text, **) = Call::Embeddings.generate(text: text, **)
|
.embedding_fallback_chain ⇒ Object
.embedding_model ⇒ Object
.embedding_provider ⇒ Object
.shutdown ⇒ Object
107
108
109
110
111
112
113
114
115
116
117
118
119
|
# File 'lib/legion/llm.rb', line 107
def shutdown
log.debug '[llm] shutdown.enter'
Settings.set_value(:connected, value: false)
@started = false
Discovery.reset!
Call::Registry.reset!
@can_embed = nil
@embedding_provider = nil
@embedding_model = nil
@embedding_fallback_chain = nil
log.info '[llm] shut down'
end
|
.started? ⇒ Boolean
121
122
123
|
# File 'lib/legion/llm.rb', line 121
def started?
@started == true
end
|
.structured(messages:, schema:) ⇒ Object
146
147
148
149
150
151
152
153
154
|
# File 'lib/legion/llm.rb', line 146
def structured(messages:, schema:, **)
if defined?(Legion::Telemetry::OpenInference)
Legion::Telemetry::OpenInference.llm_span(
model: (Settings.value(:default_model) || 'unknown').to_s, input: messages.to_s
) { |_span| Call::StructuredOutput.generate(messages: messages, schema: schema, **) }
else
Call::StructuredOutput.generate(messages: messages, schema: schema, **)
end
end
|
.structured_direct(messages:, schema:) ⇒ Object
156
|
# File 'lib/legion/llm.rb', line 156
def structured_direct(messages:, schema:, **) = Call::StructuredOutput.generate(messages: messages, schema: schema, **)
|