Module: Legion::LLM::Inference::Steps::Debate
- Includes:
- Logging, Legion::Logging::Helper
- Included in:
- Executor
- Defined in:
- lib/legion/llm/inference/steps/debate.rb
Constant Summary collapse
- CHALLENGER_PROMPT =
<<~PROMPT You are a critical analyst reviewing the following response. Your job is to identify weaknesses, logical flaws, unsupported assumptions, missing context, or alternative perspectives that were not considered. Be specific and constructive. Original question/request: %<question>s Advocate's response: %<advocate>s Provide a thorough critique. What is wrong, incomplete, or could be improved? PROMPT
- REBUTTAL_PROMPT =
<<~PROMPT You originally provided a response to a question. A challenger has critiqued your response. Address the critique directly, defending valid points and conceding where the challenger identified genuine weaknesses. Original question/request: %<question>s Your original response: %<advocate>s Challenger's critique: %<challenger>s Provide a rebuttal that strengthens your position or acknowledges valid criticisms. PROMPT
- JUDGE_PROMPT =
<<~PROMPT You are an impartial judge evaluating a multi-round debate about the following question. Your task is to evaluate the strength of each position, state which argument was stronger and why, assign confidence to the final position, then produce the most accurate, balanced, and complete answer possible. Original question/request: %<question>s Advocate's position: %<advocate>s Challenger's critique: %<challenger>s Advocate's rebuttal: %<rebuttal>s Respond using exactly these section labels: Evaluation: compare the advocate and challenger, state which side was stronger, and give confidence. Final answer: synthesize the final answer. Incorporate valid points from the critique while preserving what the advocate got right. Be direct and definitive. PROMPT
Instance Method Summary collapse
- #debate_enabled?(request) ⇒ Boolean
- #gaia_debate_trigger?(enrichments) ⇒ Boolean
- #run_debate(advocate_response, request) ⇒ Object
- #step_debate ⇒ Object
Instance Method Details
#debate_enabled?(request) ⇒ Boolean
113 114 115 116 117 118 119 120 121 |
# File 'lib/legion/llm/inference/steps/debate.rb', line 113 def debate_enabled?(request) explicit = Legion::LLM::Settings.config_value(request.extra, :debate) return explicit unless explicit.nil? gaia_trigger = gaia_debate_trigger?(@enrichments) return true if gaia_trigger settings_value(:debate, :enabled) == true end |
#gaia_debate_trigger?(enrichments) ⇒ Boolean
123 124 125 126 127 128 129 130 |
# File 'lib/legion/llm/inference/steps/debate.rb', line 123 def gaia_debate_trigger?(enrichments) return false unless debate_setting(:gaia_auto_trigger) == true advisory = Legion::LLM::Settings.config_value(enrichments&.fetch('gaia:advisory', nil), :data) return false unless advisory.is_a?(Hash) Legion::LLM::Settings.config_value(advisory, :high_stakes) == true || Legion::LLM::Settings.config_value(advisory, :debate_recommended) == true end |
#run_debate(advocate_response, request) ⇒ Object
132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 |
# File 'lib/legion/llm/inference/steps/debate.rb', line 132 def run_debate(advocate_response, request) rounds = resolve_debate_rounds(request) question = extract_question(request) advocate_text = extract_content(advocate_response) models = select_debate_models(request) @warnings << models[:warning] if models[:warning] if models[:skip] log_step_info(:debate, :skipped, reason: models[:skip]) return nil end advocate_model = models[:advocate] challenger_model = models[:challenger] judge_model = models[:judge] current_advocate = advocate_text log_debate_models(rounds, advocate_model, challenger_model, judge_model) current_challenger, current_rebuttal = run_debate_rounds( rounds: rounds, question: question, advocate_model: advocate_model, challenger_model: challenger_model, current_advocate: current_advocate ) judge_synthesis = judge_debate( question: question, advocate_text: advocate_text, current_challenger: current_challenger, current_rebuttal: current_rebuttal, judge_model: judge_model ) judge_sections = parse_judge_output(judge_synthesis) synthetic_response = SyntheticResponse.new(judge_sections[:final_answer]) { synthetic_response: synthetic_response, rounds: rounds, metadata: ( rounds: rounds, advocate_model: advocate_model, challenger_model: challenger_model, judge_model: judge_model, advocate_text: advocate_text, current_challenger: current_challenger, judge_sections: judge_sections, judge_synthesis: judge_synthesis ) } end |
#step_debate ⇒ Object
69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 |
# File 'lib/legion/llm/inference/steps/debate.rb', line 69 def step_debate unless debate_enabled?(@request) log_step_debug(:debate, :skipped, reason: :disabled) return end unless @raw_response log_step_debug(:debate, :skipped, reason: :no_response) return end log_step_debug(:debate, :start) debate_result = run_debate(@raw_response, @request) unless debate_result log_step_debug(:debate, :skipped, reason: :no_result) return end @raw_response = debate_result[:synthetic_response] @enrichments['debate:result'] = { content: "debate completed: #{debate_result[:rounds]} rounds, judge synthesis produced", data: debate_result[:metadata], timestamp: Time.now } @timeline.record( category: :internal, key: 'debate:completed', direction: :internal, detail: "rounds=#{debate_result[:rounds]} advocate=#{debate_result[:metadata][:advocate_model]} " \ "challenger=#{debate_result[:metadata][:challenger_model]} judge=#{debate_result[:metadata][:judge_model]}", from: 'pipeline', to: 'pipeline' ) log_step_info( :debate, :complete, rounds: debate_result[:rounds], advocate_model: debate_result[:metadata][:advocate_model], challenger_model: debate_result[:metadata][:challenger_model], judge_model: debate_result[:metadata][:judge_model] ) rescue StandardError => e @warnings << "debate step error: #{e.}" handle_exception(e, level: :warn, operation: 'llm.pipeline.steps.debate') end |