Module: Legion::Extensions::Rfp::Generate::Runners::Drafts
- Extended by:
- Helpers::Client
- Includes:
- Helpers::Lex
- Included in:
- Client
- Defined in:
- lib/legion/extensions/rfp/generate/runners/drafts.rb
Instance Method Summary collapse
- #generate_full_draft(rfp_text:, context: {}, model: nil) ⇒ Object
- #generate_response(question:, context: {}, model: nil, scope: :all) ⇒ Object
- #regenerate(question:, previous_answer:, feedback:, context: {}, model: nil) ⇒ Object
Methods included from Helpers::Client
Instance Method Details
#generate_full_draft(rfp_text:, context: {}, model: nil) ⇒ Object
11 12 13 14 15 16 17 18 19 20 |
# File 'lib/legion/extensions/rfp/generate/runners/drafts.rb', line 11 def generate_full_draft(rfp_text:, context: {}, model: nil, **) questions = parse_rfp(rfp_text) responses = questions.map do |question| generate_section_response(question: question[:question], section: question[:section], context: context, model: model) end draft = responses.map { |r| r[:result] }.join("\n\n---\n\n") { result: draft, sections: responses.length, questions_answered: responses.length } end |
#generate_response(question:, context: {}, model: nil, scope: :all) ⇒ Object
22 23 24 25 26 27 28 |
# File 'lib/legion/extensions/rfp/generate/runners/drafts.rb', line 22 def generate_response(question:, context: {}, model: nil, scope: :all, **) retrieved = retrieve_context(question: question, scope: scope) prompt = build_prompt(question: question, context: context, retrieved: retrieved) answer = call_llm(prompt: prompt, model: model) { result: answer, context_used: retrieved.length, question: question } end |
#regenerate(question:, previous_answer:, feedback:, context: {}, model: nil) ⇒ Object
30 31 32 33 34 35 36 37 38 39 40 |
# File 'lib/legion/extensions/rfp/generate/runners/drafts.rb', line 30 def regenerate(question:, previous_answer:, feedback:, context: {}, model: nil, **) prompt = build_revision_prompt( question: question, previous: previous_answer, feedback: feedback, context: context ) answer = call_llm(prompt: prompt, model: model) { result: answer, question: question, revision: true } end |