Class: Delayed::Worker

Inherits:
Object
  • Object
show all
Includes:
Runnable
Defined in:
lib/delayed/worker.rb

Instance Attribute Summary collapse

Class Method Summary collapse

Instance Method Summary collapse

Methods included from Runnable

#start

Constructor Details

#initializeWorker

Returns a new instance of Worker.



45
46
47
48
49
50
51
# File 'lib/delayed/worker.rb', line 45

def initialize
  @failed_reserve_count = 0

  # Reset lifecycle on the offhand chance that something lazily
  # triggered its creation before all plugins had been registered.
  Delayed.setup_lifecycle
end

Instance Attribute Details

#nameObject

Every worker has a unique name which by default is the pid of the process. There are some advantages to overriding this with something which survives worker restarts: Workers can safely resume working on tasks which are locked by themselves. The worker will assume that it crashed before.



57
58
59
60
61
62
63
64
65
# File 'lib/delayed/worker.rb', line 57

def name
  return @name unless @name.nil?

  begin
    "#{@name_prefix}host:#{Socket.gethostname} pid:#{Process.pid}"
  rescue StandardError
    "#{@name_prefix}pid:#{Process.pid}"
  end
end

#name_prefixObject

name_prefix is ignored if name is set directly



30
31
32
# File 'lib/delayed/worker.rb', line 30

def name_prefix
  @name_prefix
end

Class Method Details

.delay_job?(job) ⇒ Boolean

Returns:

  • (Boolean)


37
38
39
40
41
42
43
# File 'lib/delayed/worker.rb', line 37

def self.delay_job?(job)
  if delay_jobs.is_a?(Proc)
    delay_jobs.arity == 1 ? delay_jobs.call(job) : delay_jobs.call
  else
    delay_jobs
  end
end

Instance Method Details

#failed(job) ⇒ Object



168
169
170
171
172
173
174
175
176
177
# File 'lib/delayed/worker.rb', line 168

def failed(job)
  self.class.lifecycle.run_callbacks(:failure, self, job) do
    job.hook(:failure)
  rescue StandardError => e
    say "Error when running failure callback: #{e}", 'error'
    say e.backtrace.join("\n"), 'error'
  ensure
    job.destroy_failed_jobs? ? job.destroy : job.fail!
  end
end

#job_say(job, text, level = Delayed.default_log_level) ⇒ Object



179
180
181
182
# File 'lib/delayed/worker.rb', line 179

def job_say(job, text, level = Delayed.default_log_level)
  text = "Job #{job.name} (id=#{job.id})#{say_queue(job.queue)} #{text}"
  say text, level
end

#max_attempts(job) ⇒ Object



189
190
191
# File 'lib/delayed/worker.rb', line 189

def max_attempts(job)
  job.max_attempts || self.class.max_attempts
end

#max_run_time(job) ⇒ Object



193
194
195
# File 'lib/delayed/worker.rb', line 193

def max_run_time(job)
  job.max_run_time || self.class.max_run_time
end

#on_exit!Object



84
85
86
# File 'lib/delayed/worker.rb', line 84

def on_exit!
  Delayed::Job.clear_locks!(name)
end

#reschedule(job, time = nil) ⇒ Object

Reschedule the job in the future (when a job fails). Uses an exponential scale depending on the number of failed attempts.



156
157
158
159
160
161
162
163
164
165
166
# File 'lib/delayed/worker.rb', line 156

def reschedule(job, time = nil)
  if (job.attempts += 1) < max_attempts(job)
    time ||= job.reschedule_at
    job.run_at = time
    job.unlock
    job.save!
  else
    job_say job, "FAILED permanently because of #{job.attempts} consecutive failures", 'error'
    failed(job)
  end
end

#run(job) ⇒ Object



124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
# File 'lib/delayed/worker.rb', line 124

def run(job)
   = {
    status: 'RUNNING',
    name: job.name,
    run_at: job.run_at,
    created_at: job.created_at,
    priority: job.priority,
    queue: job.queue,
    attempts: job.attempts,
    enqueued_for: (Time.current - job.created_at).round,
  }
  job_say job, .to_json
  run_time = Benchmark.realtime do
    Timeout.timeout(max_run_time(job).to_i, WorkerTimeout) do
      job.invoke_job
    end
    job.destroy
  end
  job_say job, format('COMPLETED after %.4f seconds', run_time)
  true # did work
rescue DeserializationError => e
  job_say job, "FAILED permanently with #{e.class.name}: #{e.message}", 'error'

  job.error = e
  failed(job)
rescue Exception => e # rubocop:disable Lint/RescueException
  self.class.lifecycle.run_callbacks(:error, self, job) { handle_failed_job(job, e) }
  false # work failed
end

#run!Object



71
72
73
74
75
76
77
78
79
80
81
82
# File 'lib/delayed/worker.rb', line 71

def run!
  @realtime = Benchmark.realtime do
    @result = work_off
  end

  count = @result[0] + @result[1]

  say format("#{count} jobs processed at %.4f j/s, %d failed", count / @realtime, @result.last) if count.positive?
  interruptable_sleep(self.class.sleep_delay) if count < max_claims

  reload! unless stop?
end

#run_thread_callbacks(job, &block) ⇒ Object



120
121
122
# File 'lib/delayed/worker.rb', line 120

def run_thread_callbacks(job, &block)
  self.class.lifecycle.run_callbacks(:thread, self, job, &block)
end

#say(text, level = Delayed.default_log_level) ⇒ Object



184
185
186
187
# File 'lib/delayed/worker.rb', line 184

def say(text, level = Delayed.default_log_level)
  text = "[Worker(#{name})] #{text}"
  Delayed.say("#{Time.now.strftime('%FT%T%z')}: #{text}", level)
end

#work_off(num = 100) ⇒ Object

Do num jobs and return stats on success/failure. Exit early if interrupted.



90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
# File 'lib/delayed/worker.rb', line 90

def work_off(num = 100)
  success = Concurrent::AtomicFixnum.new(0)
  failure = Concurrent::AtomicFixnum.new(0)

  num.times do
    jobs = reserve_jobs
    break if jobs.empty?

    pool = Concurrent::FixedThreadPool.new(jobs.length)
    jobs.each do |job|
      pool.post do
        run_thread_callbacks(job) do
          if run_job(job)
            success.increment
          else
            failure.increment
          end
        end
      end
    end

    pool.shutdown
    pool.wait_for_termination

    break if stop? # leave if we're exiting
  end

  [success, failure].map(&:value)
end