Class: RailsErrorDashboard::Commands::LogError
- Inherits:
-
Object
- Object
- RailsErrorDashboard::Commands::LogError
- Defined in:
- lib/rails_error_dashboard/commands/log_error.rb
Overview
Command: Log an error to the database This is a write operation that creates an ErrorLog record
Class Method Summary collapse
- .call(exception, context = {}) ⇒ Object
-
.call_async(exception, context = {}) ⇒ Object
Queue error logging as a background job.
Instance Method Summary collapse
- #call ⇒ Object
-
#initialize(exception, context = {}) ⇒ LogError
constructor
A new instance of LogError.
Constructor Details
#initialize(exception, context = {}) ⇒ LogError
Returns a new instance of LogError.
66 67 68 69 |
# File 'lib/rails_error_dashboard/commands/log_error.rb', line 66 def initialize(exception, context = {}) @exception = exception @context = context end |
Class Method Details
.call(exception, context = {}) ⇒ Object
8 9 10 11 12 13 14 15 16 17 18 |
# File 'lib/rails_error_dashboard/commands/log_error.rb', line 8 def self.call(exception, context = {}) # Check if async logging is enabled if RailsErrorDashboard.configuration.async_logging # For async logging, just enqueue the job # All filtering happens when the job runs call_async(exception, context) else # For sync logging, execute immediately new(exception, context).call end end |
.call_async(exception, context = {}) ⇒ Object
Queue error logging as a background job
21 22 23 24 25 26 27 28 29 30 31 32 33 |
# File 'lib/rails_error_dashboard/commands/log_error.rb', line 21 def self.call_async(exception, context = {}) # Serialize exception data for the job exception_data = { class_name: exception.class.name, message: exception., backtrace: exception.backtrace, cause_chain: serialize_cause_chain(exception) } # Enqueue the async job using ActiveJob # The queue adapter (:sidekiq, :solid_queue, :async) is configured separately AsyncErrorLoggingJob.perform_later(exception_data, context) end |
Instance Method Details
#call ⇒ Object
71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 |
# File 'lib/rails_error_dashboard/commands/log_error.rb', line 71 def call # Check if this exception should be logged (ignore list + sampling) return nil unless Services::ExceptionFilter.should_log?(@exception) error_context = ValueObjects::ErrorContext.new(@context, @context[:source]) # Find or create application (cached lookup) application = find_or_create_application # Build error attributes truncated_backtrace = Services::BacktraceProcessor.truncate(@exception.backtrace) attributes = { application_id: application.id, error_type: @exception.class.name, message: @exception., backtrace: truncated_backtrace, user_id: error_context.user_id, request_url: error_context.request_url, request_params: error_context.request_params, user_agent: error_context.user_agent, ip_address: error_context.ip_address, platform: error_context.platform, controller_name: error_context.controller_name, action_name: error_context.action_name, occurred_at: Time.current } # Enriched request context (if columns exist) enrich_with_request_context(attributes, error_context) # Extract exception cause chain (if column exists) if ErrorLog.column_names.include?("exception_cause") cause_json = Services::CauseChainExtractor.call(@exception) # Fall back to pre-serialized cause chain from async job context cause_json ||= build_cause_json_from_context attributes[:exception_cause] = cause_json end # Generate error hash for deduplication (including controller/action context and application) error_hash = Services::ErrorHashGenerator.call( @exception, controller_name: error_context.controller_name, action_name: error_context.action_name, application_id: application.id, context: @context ) # Calculate backtrace signature for fuzzy matching (if column exists) if ErrorLog.column_names.include?("backtrace_signature") attributes[:backtrace_signature] = Services::BacktraceProcessor.calculate_signature( truncated_backtrace, locations: @exception.backtrace_locations ) end # Add git/release info if columns exist if ErrorLog.column_names.include?("git_sha") attributes[:git_sha] = RailsErrorDashboard.configuration.git_sha || ENV["GIT_SHA"] || ENV["HEROKU_SLUG_COMMIT"] || ENV["RENDER_GIT_COMMIT"] || detect_git_sha_from_command end if ErrorLog.column_names.include?("app_version") attributes[:app_version] = RailsErrorDashboard.configuration.app_version || ENV["APP_VERSION"] || detect_version_from_file end # Add environment snapshot (if column exists) if ErrorLog.column_names.include?("environment_info") attributes[:environment_info] = Services::EnvironmentSnapshot.snapshot.to_json end # Apply sensitive data filtering (on by default) attributes = Services::SensitiveDataFilter.filter_attributes(attributes) # Find existing error or create new one # This ensures accurate occurrence tracking error_log = ErrorLog.find_or_increment_by_hash(error_hash, attributes.merge(error_hash: error_hash)) # Track individual error occurrence for co-occurrence analysis (if table exists) if defined?(ErrorOccurrence) && ErrorOccurrence.table_exists? begin ErrorOccurrence.create( error_log: error_log, occurred_at: attributes[:occurred_at], user_id: attributes[:user_id], request_id: error_context.request_id, session_id: error_context.session_id ) rescue => e RailsErrorDashboard::Logger.error("Failed to create error occurrence: #{e.}") end end # Send notifications for new errors and reopened errors (with throttling) if error_log.occurrence_count == 1 # Brand new error — notify if severity meets minimum if Services::NotificationThrottler.severity_meets_minimum?(error_log) Services::ErrorNotificationDispatcher.call(error_log) Services::NotificationThrottler.record_notification(error_log) end PluginRegistry.dispatch(:on_error_logged, error_log) trigger_callbacks(error_log) emit_instrumentation_events(error_log) elsif error_log.just_reopened # Reopened error — notify if meets severity + not in cooldown if Services::NotificationThrottler.should_notify?(error_log) Services::ErrorNotificationDispatcher.call(error_log) Services::NotificationThrottler.record_notification(error_log) end PluginRegistry.dispatch(:on_error_reopened, error_log) trigger_callbacks(error_log) emit_instrumentation_events(error_log) else # Recurring unresolved error — check threshold milestones if Services::NotificationThrottler.threshold_reached?(error_log) Services::ErrorNotificationDispatcher.call(error_log) Services::NotificationThrottler.record_notification(error_log) end PluginRegistry.dispatch(:on_error_recurred, error_log) end # Check for baseline anomalies check_baseline_anomaly(error_log) error_log rescue => e # Don't let error logging cause more errors - fail silently # CRITICAL: Log but never propagate exception RailsErrorDashboard::Logger.error("[RailsErrorDashboard] LogError command failed: #{e.class} - #{e.}") RailsErrorDashboard::Logger.error("Original exception: #{@exception.class} - #{@exception.}") if @exception RailsErrorDashboard::Logger.error("Context: #{@context.inspect}") if @context RailsErrorDashboard::Logger.error(e.backtrace&.first(5)&.join("\n")) if e.backtrace nil # Explicitly return nil, never raise end |