Class: CompletionKit::Run

Inherits:
ApplicationRecord show all
Includes:
Taggable, Turbo::Broadcastable
Defined in:
app/models/completion_kit/run.rb

Constant Summary collapse

STATUSES =
%w[pending running completed failed].freeze

Constants inherited from ApplicationRecord

ApplicationRecord::TenantScopedUniquenessValidator

Instance Method Summary collapse

Methods included from Taggable

#tag_names, #tag_names=

Instance Method Details

#as_json(options = {}) ⇒ Object



193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
# File 'app/models/completion_kit/run.rb', line 193

def as_json(options = {})
  snap = progress_snapshot
  {
    id: id, name: name, status: status, prompt_id: prompt_id,
    dataset_id: dataset_id, judge_model: judge_model, temperature: temperature,
    output_column: output_column,
    created_at: created_at, updated_at: updated_at,
    responses_count: responses.count, avg_score: avg_score,
    progress_current: snap[:generated_done],
    progress_total: snap[:generated_total],
    progress: {
      generated: { done: snap[:generated_done], total: snap[:generated_total], failed: snap[:generated_failed] },
      judged:    { done: snap[:judged_done],    total: snap[:judged_total],    failed: snap[:judged_failed] }
    },
    failed_response_ids: responses.where(status: "failed").pluck(:id),
    failure_summary: failure_summary,
    error_message: error_message,
    metric_ids: metric_ids,
    tags: tags.as_json
  }
end

#avg_scoreObject



76
77
78
79
80
81
82
# File 'app/models/completion_kit/run.rb', line 76

def avg_score
  all_reviews = responses.flat_map(&:reviews)
  scores = all_reviews.map(&:ai_score).compact.map(&:to_f)
  return nil if scores.empty?

  (scores.sum / scores.length).round(2)
end

#generate_responses!Object



150
151
152
# File 'app/models/completion_kit/run.rb', line 150

def generate_responses!
  start!
end

#judge_configured?Boolean

Returns:

  • (Boolean)


64
65
66
# File 'app/models/completion_kit/run.rb', line 64

def judge_configured?
  judge_model.present? && metrics.any? && ApiConfig.valid_for_model?(judge_model)
end

#judge_only?Boolean

A judge-only run grades a pre-existing column on the dataset instead of generating new outputs. No prompt is attached; the response text is read from row; no LLM generation happens.

Returns:

  • (Boolean)


27
28
29
# File 'app/models/completion_kit/run.rb', line 27

def judge_only?
  prompt.nil?
end

#mark_completed!Object



40
41
42
43
# File 'app/models/completion_kit/run.rb', line 40

def mark_completed!
  update!(status: "completed")
  broadcast_ui
end

#metric_averagesObject



84
85
86
87
88
89
90
# File 'app/models/completion_kit/run.rb', line 84

def metric_averages
  all_reviews = responses.flat_map(&:reviews).select { |r| r.ai_score.present? }
  all_reviews.group_by(&:metric_name).map do |name, reviews|
    scores = reviews.map { |r| r.ai_score.to_f }
    { name: name, avg: (scores.sum / scores.length).round(1) }
  end
end

#missing_dataset_variablesObject



31
32
33
34
35
36
37
38
# File 'app/models/completion_kit/run.rb', line 31

def missing_dataset_variables
  return [] unless prompt
  vars = prompt.variables
  return [] if vars.empty?
  return vars if dataset.nil?

  vars - dataset.headers
end

#outstanding_work_zero?Boolean

Returns:

  • (Boolean)


45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
# File 'app/models/completion_kit/run.rb', line 45

def outstanding_work_zero?
  return false if responses.where.not(status: Response::TERMINAL_STATUSES).exists?

  metric_ids = metrics.pluck(:id)
  return true if metric_ids.empty?

  succeeded_response_ids = responses.where(status: "succeeded").pluck(:id)
  expected_reviews = succeeded_response_ids.size * metric_ids.size
  return true if expected_reviews.zero?

  terminal_review_count = Review.where(
    response_id: succeeded_response_ids,
    metric_id: metric_ids,
    status: Review::TERMINAL_STATUSES
  ).count

  terminal_review_count >= expected_reviews
end

#progress_snapshotObject



154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
# File 'app/models/completion_kit/run.rb', line 154

def progress_snapshot
  generated_done = responses.where(status: "succeeded").count
  generated_failed = responses.where(status: "failed").count
  generated_total = progress_total

  metric_count = metrics.count
  judged_total = metric_count > 0 ? generated_done : 0
  judged_done = 0
  judged_failed = 0

  if metric_count > 0 && judged_total > 0
    succeeded_response_ids = responses.where(status: "succeeded").pluck(:id)
    metric_ids = metrics.pluck(:id)
    review_counts = Review
      .where(response_id: succeeded_response_ids, metric_id: metric_ids)
      .group(:response_id, :status)
      .count
    succeeded_response_ids.each do |rid|
      ok = review_counts[[rid, "succeeded"]] || 0
      bad = review_counts[[rid, "failed"]] || 0
      next unless ok + bad == metric_count
      if bad > 0
        judged_failed += 1
      else
        judged_done += 1
      end
    end
  end

  {
    generated_done: generated_done,
    generated_total: generated_total,
    generated_failed: generated_failed,
    judged_done: judged_done,
    judged_total: judged_total,
    judged_failed: judged_failed
  }
end

#replace_metrics!(metric_ids) ⇒ Object



68
69
70
71
72
73
74
# File 'app/models/completion_kit/run.rb', line 68

def replace_metrics!(metric_ids)
  return unless metric_ids
  run_metrics.delete_all
  Array(metric_ids).reject(&:blank?).each_with_index do |metric_id, index|
    run_metrics.create!(metric_id: metric_id, position: index + 1)
  end
end

#start!Object



92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
# File 'app/models/completion_kit/run.rb', line 92

def start!
  rows = if dataset
           CsvProcessor.process_self(self)
         else
           [{}]
         end

  return fail_with_summary!("Dataset has no rows") if rows.empty?

  if judge_only?
    column = output_column.presence || "actual_output"
    return fail_with_summary!("Dataset has no \"#{column}\" column") unless dataset && dataset.headers.include?(column)
  else
    client = LlmClient.for_model(prompt.llm_model, ApiConfig.for_model(prompt.llm_model))
    unless client.configured?
      return fail_with_summary!("LLM API not configured: #{client.configuration_errors.join(', ')}")
    end
  end

  transaction do
    responses.destroy_all
    update!(
      status: "running",
      progress_current: 0,
      progress_total: rows.length,
      failure_summary: nil,
      error_message: nil
    )
    rows.each_with_index do |row, index|
      input = row.empty? ? nil : row.to_json
      attrs = {
        status: "pending",
        row_index: index,
        input_data: input,
        expected_output: row["expected_output"]
      }
      if judge_only?
        attrs[:status] = "succeeded"
        attrs[:response_text] = row[output_column.presence || "actual_output"].to_s
      end

      response = responses.create!(attrs)

      if judge_only?
        metrics.each { |m| JudgeReviewJob.perform_later(response.id, m.id) } if judge_configured?
      else
        GenerateRowJob.perform_later(id, response.id)
      end
    end

    RunCompletionCheckJob.perform_later(id) if judge_only?
  end

  broadcast_ui
  broadcast_clear_responses
  true
end