Module: Legion::Extensions::Rfp::Generate::Runners::Sections
- Extended by:
- Helpers::Client
- Includes:
- Helpers::Lex
- Included in:
- Client
- Defined in:
- lib/legion/extensions/rfp/generate/runners/sections.rb
Instance Method Summary collapse
- #generate_compliance_matrix(requirements:, capabilities: {}, model: nil) ⇒ Object
- #generate_executive_summary(rfp_text:, company_context: {}, model: nil) ⇒ Object
- #generate_section_response(question:, section: nil, context: {}, model: nil, scope: :all) ⇒ Object
Methods included from Helpers::Client
Instance Method Details
#generate_compliance_matrix(requirements:, capabilities: {}, model: nil) ⇒ Object
31 32 33 34 35 |
# File 'lib/legion/extensions/rfp/generate/runners/sections.rb', line 31 def generate_compliance_matrix(requirements:, capabilities: {}, model: nil, **) prompt = build_compliance_prompt(requirements: requirements, capabilities: capabilities) answer = call_section_llm(prompt: prompt, model: model) { result: answer, type: :compliance_matrix, requirements_count: requirements.length } end |
#generate_executive_summary(rfp_text:, company_context: {}, model: nil) ⇒ Object
25 26 27 28 29 |
# File 'lib/legion/extensions/rfp/generate/runners/sections.rb', line 25 def generate_executive_summary(rfp_text:, company_context: {}, model: nil, **) prompt = build_executive_summary_prompt(rfp_text: rfp_text, company_context: company_context) answer = call_section_llm(prompt: prompt, model: model) { result: answer, type: :executive_summary } end |
#generate_section_response(question:, section: nil, context: {}, model: nil, scope: :all) ⇒ Object
11 12 13 14 15 16 17 18 19 20 21 22 23 |
# File 'lib/legion/extensions/rfp/generate/runners/sections.rb', line 11 def generate_section_response(question:, section: nil, context: {}, model: nil, scope: :all, **) retrieved = retrieve_section_context(question: question, section: section, scope: scope) prompt = build_section_prompt(question: question, section: section, context: context, retrieved: retrieved) answer = call_section_llm(prompt: prompt, model: model) { result: answer, section: section, question: question, context_used: retrieved.length } end |