Module: Legion::Extensions::Rfp::Analytics::Runners::Quality
- Extended by:
- Helpers::Client
- Includes:
- Helpers::Lex
- Included in:
- Client
- Defined in:
- lib/legion/extensions/rfp/analytics/runners/quality.rb
Constant Summary collapse
- QUALITY_DIMENSIONS =
%i[completeness relevance clarity compliance].freeze
Instance Method Summary collapse
- #quality_report(proposals:) ⇒ Object
- #score_proposal(sections:) ⇒ Object
- #score_response(response_text:, question: nil, requirements: []) ⇒ Object
Methods included from Helpers::Client
Instance Method Details
#quality_report(proposals:) ⇒ Object
44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 |
# File 'lib/legion/extensions/rfp/analytics/runners/quality.rb', line 44 def quality_report(proposals:, **) return { result: { count: 0 } } if proposals.empty? avg_scores = QUALITY_DIMENSIONS.to_h do |dim| scores = proposals.filter_map { |p| p.dig(:quality, :scores, dim) } avg = scores.empty? ? 0.0 : (scores.sum.to_f / scores.length).round(2) [dim, avg] end { result: { count: proposals.length, average_scores: avg_scores, overall: avg_scores.values.sum / avg_scores.length } } end |
#score_proposal(sections:) ⇒ Object
25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 |
# File 'lib/legion/extensions/rfp/analytics/runners/quality.rb', line 25 def score_proposal(sections:, **) section_scores = sections.map do |section| scored = score_response( response_text: section[:content] || '', question: section[:question], requirements: section[:requirements] || [] ) { name: section[:name], scores: scored[:result] } end avg_overall = if section_scores.empty? 0.0 else section_scores.sum { |s| s[:scores][:overall] } / section_scores.length end { result: { sections: section_scores, overall: avg_overall.round(2) } } end |
#score_response(response_text:, question: nil, requirements: []) ⇒ Object
13 14 15 16 17 18 19 20 21 22 23 |
# File 'lib/legion/extensions/rfp/analytics/runners/quality.rb', line 13 def score_response(response_text:, question: nil, requirements: [], **) scores = { completeness: score_completeness(response_text), relevance: score_relevance(response_text, question), clarity: score_clarity(response_text), compliance: score_compliance(response_text, requirements) } overall = scores.values.sum.to_f / scores.length { result: { scores: scores, overall: overall.round(2) } } end |