Module: Legion::LLM::Pipeline::Steps::Debate
- Includes:
- Legion::Logging::Helper
- Included in:
- Executor
- Defined in:
- lib/legion/llm/pipeline/steps/debate.rb
Constant Summary collapse
- CHALLENGER_PROMPT =
<<~PROMPT You are a critical analyst reviewing the following response. Your job is to identify weaknesses, logical flaws, unsupported assumptions, missing context, or alternative perspectives that were not considered. Be specific and constructive. Original question/request: %<question>s Advocate's response: %<advocate>s Provide a thorough critique. What is wrong, incomplete, or could be improved? PROMPT
- REBUTTAL_PROMPT =
<<~PROMPT You originally provided a response to a question. A challenger has critiqued your response. Address the critique directly, defending valid points and conceding where the challenger identified genuine weaknesses. Original question/request: %<question>s Your original response: %<advocate>s Challenger's critique: %<challenger>s Provide a rebuttal that strengthens your position or acknowledges valid criticisms. PROMPT
- JUDGE_PROMPT =
<<~PROMPT You are an impartial judge evaluating a multi-round debate about the following question. Your task is to synthesize the strongest arguments from both sides and produce the most accurate, balanced, and complete answer possible. Original question/request: %<question>s Advocate's position: %<advocate>s Challenger's critique: %<challenger>s Advocate's rebuttal: %<rebuttal>s Synthesize these perspectives into a final, authoritative answer. Incorporate valid points from the critique while preserving what the advocate got right. Be direct and definitive. PROMPT
Instance Method Summary collapse
- #debate_enabled?(request) ⇒ Boolean
- #gaia_debate_trigger?(enrichments) ⇒ Boolean
- #run_debate(advocate_response, request) ⇒ Object
- #step_debate ⇒ Object
Instance Method Details
#debate_enabled?(request) ⇒ Boolean
91 92 93 94 95 96 97 98 99 |
# File 'lib/legion/llm/pipeline/steps/debate.rb', line 91 def debate_enabled?(request) explicit = request.extra[:debate] return explicit unless explicit.nil? gaia_trigger = gaia_debate_trigger?(@enrichments) return true if gaia_trigger Legion::Settings.dig(:llm, :debate, :enabled) == true end |
#gaia_debate_trigger?(enrichments) ⇒ Boolean
101 102 103 104 105 106 107 108 |
# File 'lib/legion/llm/pipeline/steps/debate.rb', line 101 def gaia_debate_trigger?(enrichments) return false unless debate_settings[:gaia_auto_trigger] == true advisory = enrichments&.dig('gaia:advisory', :data) return false unless advisory.is_a?(Hash) advisory[:high_stakes] == true || advisory[:debate_recommended] == true end |
#run_debate(advocate_response, request) ⇒ Object
110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 |
# File 'lib/legion/llm/pipeline/steps/debate.rb', line 110 def run_debate(advocate_response, request) rounds = resolve_debate_rounds(request) question = extract_question(request) advocate_text = extract_content(advocate_response) models = select_debate_models(request) @warnings << models[:warning] if models[:warning] advocate_model = models[:advocate] challenger_model = models[:challenger] judge_model = models[:judge] current_advocate = advocate_text current_challenger = nil current_rebuttal = nil rounds.times do |_i| current_challenger = call_debate_role( prompt: format(CHALLENGER_PROMPT, question: question, advocate: current_advocate), model: challenger_model ) current_rebuttal = call_debate_role( prompt: format(REBUTTAL_PROMPT, question: question, advocate: current_advocate, challenger: current_challenger), model: advocate_model ) current_advocate = current_rebuttal end judge_synthesis = call_debate_role( prompt: format(JUDGE_PROMPT, question: question, advocate: advocate_text, challenger: current_challenger || '', rebuttal: current_rebuttal || ''), model: judge_model ) synthetic_response = SyntheticResponse.new(judge_synthesis) { synthetic_response: synthetic_response, rounds: rounds, metadata: { enabled: true, rounds: rounds, advocate_model: advocate_model, challenger_model: challenger_model, judge_model: judge_model, advocate_summary: (advocate_text), challenger_summary: (current_challenger), judge_confidence: nil } } end |
#step_debate ⇒ Object
65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 |
# File 'lib/legion/llm/pipeline/steps/debate.rb', line 65 def step_debate return unless debate_enabled?(@request) return unless @raw_response debate_result = run_debate(@raw_response, @request) return unless debate_result @raw_response = debate_result[:synthetic_response] @enrichments['debate:result'] = { content: "debate completed: #{debate_result[:rounds]} rounds, judge synthesis produced", data: debate_result[:metadata], timestamp: Time.now } @timeline.record( category: :internal, key: 'debate:completed', direction: :internal, detail: "rounds=#{debate_result[:rounds]} advocate=#{debate_result[:metadata][:advocate_model]} " \ "challenger=#{debate_result[:metadata][:challenger_model]} judge=#{debate_result[:metadata][:judge_model]}", from: 'pipeline', to: 'pipeline' ) rescue StandardError => e @warnings << "debate step error: #{e.}" handle_exception(e, level: :warn, operation: 'llm.pipeline.steps.debate') end |