Module: Legion::Extensions::Agentic::Social::MoralReasoning::Runners::MoralReasoning
- Includes:
- Helpers::Lex
- Included in:
- Client
- Defined in:
- lib/legion/extensions/agentic/social/moral_reasoning/runners/moral_reasoning.rb
Instance Method Summary collapse
- #add_moral_principle(name:, description:, foundation:, weight: Helpers::Constants::DEFAULT_WEIGHT) ⇒ Object
- #apply_ethical_framework(dilemma_id:, framework:) ⇒ Object
- #check_moral_development ⇒ Object
- #evaluate_moral_action(action:, affected_foundations:, domain: :general, description: nil) ⇒ Object
- #moral_foundation_profile ⇒ Object
- #moral_reasoning_stats ⇒ Object
- #moral_stage_info ⇒ Object
- #pose_moral_dilemma(description:, options:, domain: :general, severity: 0.5) ⇒ Object
- #resolve_moral_dilemma(dilemma_id:, option_id:, reasoning:, framework:) ⇒ Object
- #update_moral_reasoning ⇒ Object
Instance Method Details
#add_moral_principle(name:, description:, foundation:, weight: Helpers::Constants::DEFAULT_WEIGHT) ⇒ Object
78 79 80 81 |
# File 'lib/legion/extensions/agentic/social/moral_reasoning/runners/moral_reasoning.rb', line 78 def add_moral_principle(name:, description:, foundation:, weight: Helpers::Constants::DEFAULT_WEIGHT, **) log.info "[moral_reasoning] add_principle: name=#{name} foundation=#{foundation}" engine.add_principle(name: name, description: description, foundation: foundation, weight: weight) end |
#apply_ethical_framework(dilemma_id:, framework:) ⇒ Object
73 74 75 76 |
# File 'lib/legion/extensions/agentic/social/moral_reasoning/runners/moral_reasoning.rb', line 73 def apply_ethical_framework(dilemma_id:, framework:, **) log.debug "[moral_reasoning] apply_framework: id=#{dilemma_id} framework=#{framework}" engine.apply_framework(dilemma_id: dilemma_id, framework: framework) end |
#check_moral_development ⇒ Object
83 84 85 86 87 |
# File 'lib/legion/extensions/agentic/social/moral_reasoning/runners/moral_reasoning.rb', line 83 def check_moral_development(**) log.debug '[moral_reasoning] check_moral_development' result = engine.moral_development { success: true }.merge(result) end |
#evaluate_moral_action(action:, affected_foundations:, domain: :general, description: nil) ⇒ Object
13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 |
# File 'lib/legion/extensions/agentic/social/moral_reasoning/runners/moral_reasoning.rb', line 13 def evaluate_moral_action(action:, affected_foundations:, domain: :general, description: nil, **) log.debug "[moral_reasoning] evaluate_action: action=#{action} domain=#{domain}" if Helpers::LlmEnhancer.available? current_foundations = engine.foundation_profile.transform_values { |f| f[:weight] } llm_result = Helpers::LlmEnhancer.evaluate_action( action: action, description: description.to_s, foundations: current_foundations ) if llm_result log.debug "[moral_reasoning] using LLM evaluation for action=#{action}" result = engine.evaluate_action( action: action, affected_foundations: affected_foundations, domain: domain ) return { success: true, source: :llm, reasoning: llm_result[:reasoning], foundation_impacts: llm_result[:foundation_impacts] }.merge(result) end end result = engine.evaluate_action(action: action, affected_foundations: affected_foundations, domain: domain) { success: true, source: :mechanical }.merge(result) end |
#moral_foundation_profile ⇒ Object
89 90 91 92 |
# File 'lib/legion/extensions/agentic/social/moral_reasoning/runners/moral_reasoning.rb', line 89 def moral_foundation_profile(**) log.debug '[moral_reasoning] foundation_profile' { success: true, foundations: engine.foundation_profile } end |
#moral_reasoning_stats ⇒ Object
105 106 107 108 |
# File 'lib/legion/extensions/agentic/social/moral_reasoning/runners/moral_reasoning.rb', line 105 def moral_reasoning_stats(**) log.debug '[moral_reasoning] stats' { success: true }.merge(engine.to_h) end |
#moral_stage_info ⇒ Object
94 95 96 97 |
# File 'lib/legion/extensions/agentic/social/moral_reasoning/runners/moral_reasoning.rb', line 94 def moral_stage_info(**) log.debug '[moral_reasoning] stage_info' { success: true }.merge(engine.stage_info) end |
#pose_moral_dilemma(description:, options:, domain: :general, severity: 0.5) ⇒ Object
39 40 41 42 |
# File 'lib/legion/extensions/agentic/social/moral_reasoning/runners/moral_reasoning.rb', line 39 def pose_moral_dilemma(description:, options:, domain: :general, severity: 0.5, **) log.info "[moral_reasoning] pose_dilemma: domain=#{domain} severity=#{severity}" engine.pose_dilemma(description: description, options: , domain: domain, severity: severity) end |
#resolve_moral_dilemma(dilemma_id:, option_id:, reasoning:, framework:) ⇒ Object
44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 |
# File 'lib/legion/extensions/agentic/social/moral_reasoning/runners/moral_reasoning.rb', line 44 def resolve_moral_dilemma(dilemma_id:, option_id:, reasoning:, framework:, **) log.info "[moral_reasoning] resolve_dilemma: id=#{dilemma_id} framework=#{framework}" if Helpers::LlmEnhancer.available? dilemma = engine.dilemmas[dilemma_id] if dilemma && !dilemma.resolved? llm_result = Helpers::LlmEnhancer.resolve_dilemma( dilemma_description: dilemma.description, options: dilemma., framework: framework ) if llm_result log.debug "[moral_reasoning] using LLM resolution for dilemma=#{dilemma_id}" result = engine.resolve_dilemma( dilemma_id: dilemma_id, option_id: option_id, reasoning: llm_result[:reasoning], framework: framework ) return result.merge(source: :llm, llm_chosen: llm_result[:chosen_option], llm_confidence: llm_result[:confidence]) end end end engine.resolve_dilemma(dilemma_id: dilemma_id, option_id: option_id, reasoning: reasoning, framework: framework) end |
#update_moral_reasoning ⇒ Object
99 100 101 102 103 |
# File 'lib/legion/extensions/agentic/social/moral_reasoning/runners/moral_reasoning.rb', line 99 def update_moral_reasoning(**) log.debug '[moral_reasoning] decay_all' engine.decay_all { success: true, foundations: engine.foundation_profile } end |