Module: Legion::LLM::Settings
- Extended by:
- Legion::Logging::Helper
- Defined in:
- lib/legion/llm/settings.rb
Class Method Summary collapse
- .api_defaults ⇒ Object
- .arbitrage_defaults ⇒ Object
- .batch_defaults ⇒ Object
- .budget_defaults ⇒ Object
- .claude_cli_defaults ⇒ Object
- .compliance_defaults ⇒ Object
- .confidence_defaults ⇒ Object
- .context_curation_defaults ⇒ Object
- .conversation_defaults ⇒ Object
- .daemon_defaults ⇒ Object
- .debate_defaults ⇒ Object
- .default ⇒ Object
- .discovery_defaults ⇒ Object
- .embedding_defaults ⇒ Object
- .gateway_defaults ⇒ Object
- .prompt_caching_defaults ⇒ Object
- .provider_layer_defaults ⇒ Object
- .providers ⇒ Object
- .rag_defaults ⇒ Object
- .routing_defaults ⇒ Object
- .scheduling_defaults ⇒ Object
- .skills_defaults ⇒ Object
- .system_baseline_default ⇒ Object
- .telemetry_defaults ⇒ Object
- .tool_trigger_defaults ⇒ Object
Class Method Details
.api_defaults ⇒ Object
305 306 307 308 309 310 311 312 313 |
# File 'lib/legion/llm/settings.rb', line 305 def self.api_defaults { auth: { enabled: false, api_keys: [], pass_through: false } } end |
.arbitrage_defaults ⇒ Object
177 178 179 180 181 182 183 184 185 |
# File 'lib/legion/llm/settings.rb', line 177 def self.arbitrage_defaults { enabled: false, prefer_cheapest: true, quality_floor: 0.7, cost_table_refresh: 86_400, cost_table: {} } end |
.batch_defaults ⇒ Object
187 188 189 190 191 192 193 194 |
# File 'lib/legion/llm/settings.rb', line 187 def self.batch_defaults { enabled: false, window_seconds: 300, max_batch_size: 100, eligible_intents: %w[batch background low_priority] } end |
.budget_defaults ⇒ Object
157 158 159 160 161 162 163 |
# File 'lib/legion/llm/settings.rb', line 157 def self.budget_defaults { session_max_tokens: nil, session_warn_tokens: nil, daily_max_tokens: nil } end |
.claude_cli_defaults ⇒ Object
47 48 49 50 51 52 |
# File 'lib/legion/llm/settings.rb', line 47 def self.claude_cli_defaults { settings_path: '~/.claude/settings.json', config_path: '~/.claude.json' } end |
.compliance_defaults ⇒ Object
328 329 330 331 332 333 334 335 336 337 338 339 |
# File 'lib/legion/llm/settings.rb', line 328 def self.compliance_defaults { classification_scan: false, encrypt_audit: false, phi_block_cloud: false, cloud_providers: %w[bedrock anthropic openai gemini azure], redact_pii: false, redaction_placeholder: '[REDACTED]', strict_hipaa: false, default_level: :public } end |
.confidence_defaults ⇒ Object
78 79 80 81 82 83 84 85 86 87 |
# File 'lib/legion/llm/settings.rb', line 78 def self.confidence_defaults { bands: { low: 0.3, medium: 0.5, high: 0.7, very_high: 0.9 } } end |
.context_curation_defaults ⇒ Object
252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 |
# File 'lib/legion/llm/settings.rb', line 252 def self.context_curation_defaults { enabled: true, mode: 'heuristic', llm_assisted: false, llm_model: nil, tool_result_max_chars: 2000, thinking_eviction: true, exchange_folding: true, superseded_eviction: true, dedup_enabled: true, dedup_threshold: 0.85, target_context_tokens: 40_000 } end |
.conversation_defaults ⇒ Object
268 269 270 271 272 273 274 275 |
# File 'lib/legion/llm/settings.rb', line 268 def self.conversation_defaults { summarize_threshold: 50_000, target_tokens: 20_000, preserve_recent: 10, auto_compact: true } end |
.daemon_defaults ⇒ Object
89 90 91 92 93 94 |
# File 'lib/legion/llm/settings.rb', line 89 def self.daemon_defaults { url: 'http://127.0.0.1:4567', enabled: true } end |
.debate_defaults ⇒ Object
292 293 294 295 296 297 298 299 300 301 302 303 |
# File 'lib/legion/llm/settings.rb', line 292 def self.debate_defaults { enabled: false, gaia_auto_trigger: false, default_rounds: 1, max_rounds: 3, advocate_model: nil, challenger_model: nil, judge_model: nil, model_selection_strategy: 'rotate' } end |
.default ⇒ Object
10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 |
# File 'lib/legion/llm/settings.rb', line 10 def self.default model_override = ENV.fetch('ANTHROPIC_MODEL', nil) { enabled: true, connected: false, pipeline_enabled: true, pipeline_async_post_steps: true, max_tool_rounds: 200, default_model: model_override, default_provider: nil, system_baseline: system_baseline_default, providers: providers, routing: routing_defaults, budget: budget_defaults, confidence: confidence_defaults, discovery: discovery_defaults, gateway: gateway_defaults, daemon: daemon_defaults, prompt_caching: prompt_caching_defaults, arbitrage: arbitrage_defaults, batch: batch_defaults, scheduling: scheduling_defaults, rag: rag_defaults, embedding: , conversation: conversation_defaults, telemetry: telemetry_defaults, context_curation: context_curation_defaults, debate: debate_defaults, provider_layer: provider_layer_defaults, tool_trigger: tool_trigger_defaults, api: api_defaults, compliance: compliance_defaults, skills: skills_defaults, claude_cli: claude_cli_defaults } end |
.discovery_defaults ⇒ Object
114 115 116 117 118 119 120 |
# File 'lib/legion/llm/settings.rb', line 114 def self.discovery_defaults { enabled: true, refresh_seconds: 60, memory_floor_mb: 2048 } end |
.embedding_defaults ⇒ Object
218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 |
# File 'lib/legion/llm/settings.rb', line 218 def self. { dimension: 1024, enforce_dimension: true, provider_fallback: %w[ollama bedrock openai], provider_models: { bedrock: 'amazon.titan-embed-text-v2:0', anthropic: nil, openai: 'text-embedding-3-small', gemini: 'text-embedding-004', azure: 'text-embedding-3-small', ollama: 'mxbai-embed-large' }, ollama_preferred: %w[mxbai-embed-large nomic-embed-text bge-large snowflake-arctic-embed], ollama_context_chars: { 'mxbai-embed-large' => 1400, 'bge-large' => 1400, 'snowflake-arctic-embed' => 1400, 'nomic-embed-text' => 24_000 }, ollama_default_context_chars: 1400, prefix_registry: { 'nomic-embed-text' => { document: 'search_document: ', query: 'search_query: ' }, 'mxbai-embed-large' => { query: 'Represent this sentence for searching relevant passages: ' } } } end |
.gateway_defaults ⇒ Object
165 166 167 168 169 170 171 172 173 174 175 |
# File 'lib/legion/llm/settings.rb', line 165 def self.gateway_defaults { enabled: true, endpoint: nil, api_key: nil, timeout_seconds: 30, model_policy: {}, headers: {}, fallback_to_direct: true } end |
.prompt_caching_defaults ⇒ Object
96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 |
# File 'lib/legion/llm/settings.rb', line 96 def self.prompt_caching_defaults { enabled: true, min_tokens: 1024, scope: 'ephemeral', cache_system_prompt: true, cache_tools: true, cache_conversation: true, sort_tools: true, response_cache: { enabled: true, ttl_seconds: 300, spool_dir: '~/.legionio/data/spool/llm_responses', spool_threshold_bytes: 8 * 1024 * 1024 } } end |
.provider_layer_defaults ⇒ Object
277 278 279 280 281 282 283 |
# File 'lib/legion/llm/settings.rb', line 277 def self.provider_layer_defaults { mode: 'ruby_llm', native_providers: %w[claude bedrock], fallback_to_ruby_llm: true } end |
.providers ⇒ Object
341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 |
# File 'lib/legion/llm/settings.rb', line 341 def self.providers { bedrock: { enabled: false, default_model: 'us.anthropic.claude-sonnet-4-6', api_key: nil, secret_key: nil, session_token: nil, bearer_token: 'env://AWS_BEARER_TOKEN_BEDROCK', region: 'us-east-2' }, anthropic: { enabled: false, default_model: 'claude-sonnet-4-6', api_key: 'env://ANTHROPIC_API_KEY' }, openai: { enabled: false, default_model: 'gpt-4o', api_key: ['env://OPENAI_API_KEY', 'env://CODEX_API_KEY'] }, gemini: { enabled: false, default_model: 'gemini-2.0-flash', api_key: 'env://GEMINI_API_KEY' }, azure: { enabled: false, default_model: nil, api_base: nil, api_key: nil, auth_token: nil }, ollama: { enabled: false, default_model: 'qwen3.5:latest', base_url: 'http://localhost:11434' } } end |
.rag_defaults ⇒ Object
205 206 207 208 209 210 211 212 213 214 215 216 |
# File 'lib/legion/llm/settings.rb', line 205 def self.rag_defaults { enabled: true, full_limit: 10, compact_limit: 5, min_confidence: 0.5, utilization_compact_threshold: 0.7, utilization_skip_threshold: 0.9, trivial_max_chars: 20, trivial_patterns: %w[hello hi hey ping pong test ok okay yes no thanks thank] } end |
.routing_defaults ⇒ Object
122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 |
# File 'lib/legion/llm/settings.rb', line 122 def self.routing_defaults { enabled: true, tier_priority: %w[local fleet openai_compat cloud frontier], default_intent: { privacy: 'normal', capability: 'moderate', cost: 'normal' }, tiers: { local: { provider: 'ollama' }, fleet: { queue: 'llm.request', timeout_seconds: 30, timeouts: { embed: 10, chat: 30, generate: 30, default: 30 } }, openai_compat: { gateways: [] }, cloud: { providers: %w[bedrock azure gemini] }, frontier: { providers: %w[anthropic openai] } }, health: { window_seconds: 300, circuit_breaker: { failure_threshold: 3, cooldown_seconds: 60 }, latency_penalty_threshold_ms: 5000, budget: { daily_limit_usd: nil, monthly_limit_usd: nil } }, escalation: { enabled: true, pipeline_enabled: true, max_attempts: 3, quality_threshold: 0 }, rules: [], tier_mappings: [] } end |
.scheduling_defaults ⇒ Object
196 197 198 199 200 201 202 203 |
# File 'lib/legion/llm/settings.rb', line 196 def self.scheduling_defaults { enabled: false, peak_hours_utc: '14-22', defer_intents: %w[batch background], max_defer_hours: 8 } end |
.skills_defaults ⇒ Object
315 316 317 318 319 320 321 322 323 324 325 326 |
# File 'lib/legion/llm/settings.rb', line 315 def self.skills_defaults { enabled: true, auto_inject: true, on_demand: true, max_active_skills: 1, directories: ['.legion/skills', '~/.legionio/skills'], auto_discover: { claude: false, codex: false }, enabled_skills: [], disabled_skills: [] } end |
.system_baseline_default ⇒ Object
54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 |
# File 'lib/legion/llm/settings.rb', line 54 def self.system_baseline_default <<~PROMPT.strip You are Legion, an agentic AI partner running on the LegionIO framework. LegionIO is a governed, production-oriented cognitive task and orchestration platform. Your role is to help the user accomplish real work quickly, directly, and safely. Core behavior: - Honor user intent and constraints. - Prefer execution over prompt ceremony: do the task when possible, don't just describe it. - Be concise by default; expand only when the user asks for depth. - Be transparent: never claim you ran something you did not run, and never hide uncertainty. - Minimize blast radius: make the smallest effective change and preserve existing behavior unless asked otherwise. - Do not YOLO risky actions. For destructive, irreversible, security-sensitive, or high-impact actions, pause and get explicit confirmation. - When risk or ambiguity is high, ask focused clarifying questions before acting. - Validate outcomes when practical, and report what changed and why. - Prefer solving work directly in-session; only produce handoff artifacts (including prompts for other AI tools) when the user explicitly asks for that format. Trust model: - Trust is earned through reliable outcomes, clarity, and safe execution. - Speed matters, but never at the expense of integrity or user trust. PROMPT end |
.telemetry_defaults ⇒ Object
246 247 248 249 250 |
# File 'lib/legion/llm/settings.rb', line 246 def self.telemetry_defaults { pipeline_spans: true } end |
.tool_trigger_defaults ⇒ Object
285 286 287 288 289 290 |
# File 'lib/legion/llm/settings.rb', line 285 def self.tool_trigger_defaults { scan_depth: 10, tool_limit: 10 } end |