Class: DeadBro::Subscriber
- Inherits:
-
Object
- Object
- DeadBro::Subscriber
- Defined in:
- lib/dead_bro/subscriber.rb
Constant Summary collapse
- EVENT_NAME =
"process_action.action_controller"
Class Method Summary collapse
- .cache_hits(data) ⇒ Object
- .cache_misses(data) ⇒ Object
-
.drain_request_tracking ⇒ Object
Release per-subscriber thread-local state when we’ve decided not to build a payload (disabled / excluded / sampled out).
- .extract_user_id(data) ⇒ Object
- .gc_stats ⇒ Object
- .memory_usage_mb ⇒ Object
- .safe_host ⇒ Object
- .safe_params(data) ⇒ Object
- .safe_path(data) ⇒ Object
- .safe_user_agent(data) ⇒ Object
- .sql_count(data) ⇒ Object
- .subscribe!(client: Client.new) ⇒ Object
-
.truncate_value(value, max_str: 200, max_array: 20, max_hash_keys: 30) ⇒ Object
Recursively truncate values to reasonable sizes to avoid huge payloads.
Class Method Details
.cache_hits(data) ⇒ Object
338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 |
# File 'lib/dead_bro/subscriber.rb', line 338 def self.cache_hits(data) if data[:cache_hits] data[:cache_hits] elsif defined?(Rails) && Rails.cache.respond_to?(:stats) begin Rails.cache.stats[:hits] rescue 0 end else 0 end rescue 0 end |
.cache_misses(data) ⇒ Object
354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 |
# File 'lib/dead_bro/subscriber.rb', line 354 def self.cache_misses(data) if data[:cache_misses] data[:cache_misses] elsif defined?(Rails) && Rails.cache.respond_to?(:stats) begin Rails.cache.stats[:misses] rescue 0 end else 0 end rescue 0 end |
.drain_request_tracking ⇒ Object
Release per-subscriber thread-local state when we’ve decided not to build a payload (disabled / excluded / sampled out). Without this, a subsequent request reusing the same Puma thread would see stale queries/events.
172 173 174 175 176 177 178 179 180 181 182 183 184 185 |
# File 'lib/dead_bro/subscriber.rb', line 172 def self.drain_request_tracking DeadBro::SqlSubscriber.stop_request_tracking if defined?(DeadBro::SqlSubscriber) DeadBro::CacheSubscriber.stop_request_tracking if defined?(DeadBro::CacheSubscriber) DeadBro::RedisSubscriber.stop_request_tracking if defined?(DeadBro::RedisSubscriber) DeadBro::ElasticsearchSubscriber.stop_request_tracking if defined?(DeadBro::ElasticsearchSubscriber) DeadBro::ViewRenderingSubscriber.stop_request_tracking if defined?(DeadBro::ViewRenderingSubscriber) DeadBro::LightweightMemoryTracker.stop_request_tracking if defined?(DeadBro::LightweightMemoryTracker) if DeadBro.configuration.allocation_tracking_enabled && defined?(DeadBro::MemoryTrackingSubscriber) DeadBro::MemoryTrackingSubscriber.stop_request_tracking end Thread.current[:dead_bro_http_events] = nil rescue # Best effort — draining must never raise from the notifications callback. end |
.extract_user_id(data) ⇒ Object
370 371 372 373 374 |
# File 'lib/dead_bro/subscriber.rb', line 370 def self.extract_user_id(data) data[:headers].env["warden"].user.id rescue nil end |
.gc_stats ⇒ Object
304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 |
# File 'lib/dead_bro/subscriber.rb', line 304 def self.gc_stats if defined?(GC) && GC.respond_to?(:stat) stats = GC.stat { count: stats[:count] || 0, heap_allocated_pages: stats[:heap_allocated_pages] || 0, heap_sorted_pages: stats[:heap_sorted_pages] || 0, total_allocated_objects: stats[:total_allocated_objects] || 0 } else {} end rescue {} end |
.memory_usage_mb ⇒ Object
298 299 300 301 302 |
# File 'lib/dead_bro/subscriber.rb', line 298 def self.memory_usage_mb DeadBro::MemoryHelpers.rss_mb rescue 0 end |
.safe_host ⇒ Object
194 195 196 197 198 199 200 201 202 203 204 |
# File 'lib/dead_bro/subscriber.rb', line 194 def self.safe_host if defined?(Rails) && Rails.respond_to?(:application) begin Rails.application.class.module_parent_name rescue "" end else "" end end |
.safe_params(data) ⇒ Object
206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 |
# File 'lib/dead_bro/subscriber.rb', line 206 def self.safe_params(data) return {} unless data[:params] params = data[:params] begin params = params.to_unsafe_h if params.respond_to?(:to_unsafe_h) rescue end unless params.is_a?(Hash) return {} end # Remove router-provided keys that we already send at top-level router_keys = %w[controller action format] # Filter out sensitive parameters sensitive_keys = %w[password password_confirmation token secret key] filtered = params.dup router_keys.each { |k| filtered.delete(k) || filtered.delete(k.to_sym) } filtered = filtered.except(*sensitive_keys, *sensitive_keys.map(&:to_sym)) if filtered.respond_to?(:except) # Truncate deeply to keep payload small and safe truncate_value(filtered) rescue {} end |
.safe_path(data) ⇒ Object
187 188 189 190 191 192 |
# File 'lib/dead_bro/subscriber.rb', line 187 def self.safe_path(data) path = data[:path] || (data[:request] && data[:request].path) path.to_s rescue "" end |
.safe_user_agent(data) ⇒ Object
254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 |
# File 'lib/dead_bro/subscriber.rb', line 254 def self.safe_user_agent(data) begin # Prefer request object if available if data[:request] ua = nil if data[:request].respond_to?(:user_agent) ua = data[:request].user_agent elsif data[:request].respond_to?(:env) ua = data[:request].env && data[:request].env["HTTP_USER_AGENT"] end return ua.to_s[0..200] end # Fallback to headers object/hash if present in notification data if data[:headers] headers = data[:headers] if headers.respond_to?(:[]) ua = headers["HTTP_USER_AGENT"] || headers["User-Agent"] || headers["user-agent"] return ua.to_s[0..200] elsif headers.respond_to?(:to_h) h = begin headers.to_h rescue {} end ua = h["HTTP_USER_AGENT"] || h["User-Agent"] || h["user-agent"] return ua.to_s[0..200] end end # Fallback to env hash if present in notification data if data[:env].is_a?(Hash) ua = data[:env]["HTTP_USER_AGENT"] return ua.to_s[0..200] end "" rescue "" end rescue "" end |
.sql_count(data) ⇒ Object
320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 |
# File 'lib/dead_bro/subscriber.rb', line 320 def self.sql_count(data) # Count SQL queries from the payload if available if data[:sql_count] data[:sql_count] elsif defined?(ActiveRecord) && ActiveRecord::Base.connection # Try to get from ActiveRecord connection begin ActiveRecord::Base.connection.query_cache.size rescue 0 end else 0 end rescue 0 end |
.subscribe!(client: Client.new) ⇒ Object
9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 |
# File 'lib/dead_bro/subscriber.rb', line 9 def self.subscribe!(client: Client.new) ActiveSupport::Notifications.subscribe(EVENT_NAME) do |name, started, finished, _unique_id, data| # When disabled remotely, fire a heartbeat at most once per minute so the gem # can detect when tracking has been re-enabled, then skip all tracking. unless DeadBro.configuration.enabled client.post_heartbeat if DeadBro.configuration.heartbeat_due? drain_request_tracking next end # Skip excluded controllers or controller#action pairs # Also check exclusive_controller_actions - if defined, only track those notification = data.is_a?(Hash) ? data : {} controller_name = notification[:controller].to_s action_name = notification[:action].to_s begin if DeadBro.configuration.excluded_controller?(controller_name, action_name) drain_request_tracking next end unless DeadBro.configuration.exclusive_controller?(controller_name, action_name) drain_request_tracking next end rescue drain_request_tracking next end has_error = data[:exception] || data[:exception_object] # Errors always ship regardless of sampling (this is what the docs promise). unless has_error || DeadBro.configuration.should_sample? drain_request_tracking next end duration_ms = ((finished - started) * 1000.0).round(2) # Stop SQL tracking and get collected queries (this was started by the request) sql_queries = DeadBro::SqlSubscriber.stop_request_tracking # Stop cache, redis, and elasticsearch tracking cache_events = defined?(DeadBro::CacheSubscriber) ? DeadBro::CacheSubscriber.stop_request_tracking : [] redis_events = defined?(DeadBro::RedisSubscriber) ? DeadBro::RedisSubscriber.stop_request_tracking : [] elasticsearch_events = defined?(DeadBro::ElasticsearchSubscriber) ? DeadBro::ElasticsearchSubscriber.stop_request_tracking : [] # Stop view rendering tracking and get collected view events view_events = DeadBro::ViewRenderingSubscriber.stop_request_tracking view_performance = DeadBro::ViewRenderingSubscriber.analyze_view_performance(view_events) # Stop memory tracking and get collected memory data if DeadBro.configuration.allocation_tracking_enabled && defined?(DeadBro::MemoryTrackingSubscriber) detailed_memory = DeadBro::MemoryTrackingSubscriber.stop_request_tracking memory_performance = DeadBro::MemoryTrackingSubscriber.analyze_memory_performance(detailed_memory) # Keep memory_events compact and user-friendly (no large raw arrays) memory_events = { memory_before: detailed_memory[:memory_before], memory_after: detailed_memory[:memory_after], duration_seconds: detailed_memory[:duration_seconds], allocations_count: (detailed_memory[:allocations] || []).length, memory_snapshots_count: (detailed_memory[:memory_snapshots] || []).length, large_objects_count: (detailed_memory[:large_objects] || []).length } else lightweight_memory = DeadBro::LightweightMemoryTracker.stop_request_tracking # Separate raw readings from derived performance metrics to avoid duplicating data memory_events = { memory_before: lightweight_memory[:memory_before], memory_after: lightweight_memory[:memory_after] } memory_performance = { memory_growth_mb: lightweight_memory[:memory_growth_mb], gc_count_increase: lightweight_memory[:gc_count_increase], heap_pages_increase: lightweight_memory[:heap_pages_increase], duration_seconds: lightweight_memory[:duration_seconds] } end # Record memory sample for leak detection (only if memory tracking enabled) if DeadBro.configuration.memory_tracking_enabled DeadBro::MemoryLeakDetector.record_memory_sample({ memory_usage: memory_usage_mb, gc_count: gc_stats[:count], heap_pages: gc_stats[:heap_allocated_pages], object_count: gc_stats[:heap_live_slots], request_id: data[:request_id], controller: data[:controller], action: data[:action] }) end # Report exceptions attached to this action (e.g. controller/view errors) if data[:exception] || data[:exception_object] begin exception_class, = data[:exception] if data[:exception] exception_obj = data[:exception_object] backtrace = Array(exception_obj&.backtrace).first(50) error_payload = { controller: data[:controller], action: data[:action], format: data[:format], method: data[:method], path: safe_path(data), status: data[:status], duration_ms: duration_ms, rails_env: DeadBro.env, host: safe_host, params: safe_params(data), user_agent: safe_user_agent(data), user_id: extract_user_id(data), exception_class: exception_class || exception_obj&.class&.name, message: ( || exception_obj&.).to_s[0, 1000], backtrace: backtrace, error: true, logs: DeadBro.logger.logs } event_name = (exception_class || exception_obj&.class&.name || "exception").to_s client.post_metric(event_name: event_name, payload: error_payload, force: true) rescue ensure next end end payload = { controller: data[:controller], action: data[:action], format: data[:format], method: data[:method], path: safe_path(data), status: data[:status], duration_ms: duration_ms, view_runtime_ms: data[:view_runtime], db_runtime_ms: data[:db_runtime], host: safe_host, rails_env: DeadBro.env, params: safe_params(data), user_agent: safe_user_agent(data), user_id: extract_user_id(data), memory_usage: memory_usage_mb, gc_stats: gc_stats, sql_count: sql_count(data), sql_queries: sql_queries, http_outgoing: Thread.current[:dead_bro_http_events] || [], cache_events: cache_events, redis_events: redis_events, elasticsearch_events: elasticsearch_events, cache_hits: cache_hits(data), cache_misses: cache_misses(data), view_events: view_events, view_performance: view_performance, memory_events: memory_events, memory_performance: memory_performance, logs: DeadBro.logger.logs } client.post_metric(event_name: name, payload: payload) end end |
.truncate_value(value, max_str: 200, max_array: 20, max_hash_keys: 30) ⇒ Object
Recursively truncate values to reasonable sizes to avoid huge payloads
236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 |
# File 'lib/dead_bro/subscriber.rb', line 236 def self.truncate_value(value, max_str: 200, max_array: 20, max_hash_keys: 30) case value when String (value.length > max_str) ? value[0, max_str] + "…" : value when Numeric, TrueClass, FalseClass, NilClass value when Array value[0, max_array].map { |v| truncate_value(v, max_str: max_str, max_array: max_array, max_hash_keys: max_hash_keys) } when Hash entries = value.to_a[0, max_hash_keys] entries.each_with_object({}) do |(k, v), memo| memo[k] = truncate_value(v, max_str: max_str, max_array: max_array, max_hash_keys: max_hash_keys) end else (value.to_s.length > max_str) ? value.to_s[0, max_str] + "…" : value.to_s end end |