Class: DirtyTrackingIntegrationTest

Inherits:
Minitest::Test
  • Object
show all
Includes:
ResourceHelper
Defined in:
lib/kube/cluster/resource/dirty_tracking.rb

Overview

Integration tests — exercises DirtyTracking through the Persistence layer, driving the full Resource → Persistence → kubectl → DirtyTracking cycle.

Instance Method Summary collapse

Methods included from ResourceHelper

#build_resource, #server_state

Instance Method Details

#test_apply_snapshots_server_responseObject


Apply snapshots after the server round-trip




344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
# File 'lib/kube/cluster/resource/dirty_tracking.rb', line 344

def test_apply_snapshots_server_response
  resource, ctl = build_resource(metadata: { name: "my-config" }, spec: { key: "v1" })

  # Server adds metadata on apply
  ctl.stub_response("get", server_state(
    metadata: { name: "my-config", resourceVersion: "1", uid: "abc-123" },
    spec: { key: "v1" }
  ))

  resource.apply

  refute resource.changed?

  # The snapshot should include server-added fields, so mutating
  # the original field shows the correct old value
  resource.instance_variable_get(:@data).spec.key = "v2"
  changes = resource.changes

  # changes[:spec] is [old_hash, new_hash]
  old_spec, new_spec = changes[:spec]
  assert_equal "v1", old_spec[:key]
  assert_equal "v2", new_spec[:key]

  # The resource should also have the server-added metadata
  assert resource.to_h.key?(:metadata)
end

#test_attr_changed_through_apply_mutate_patch_cycleObject


Dynamic attr_changed? tracks through full lifecycle




522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
# File 'lib/kube/cluster/resource/dirty_tracking.rb', line 522

def test_attr_changed_through_apply_mutate_patch_cycle
  resource, ctl = build_resource(metadata: { name: "my-config", namespace: "default" }, spec: { key: "v1" })

  ctl.stub_response("get", server_state(
    metadata: { name: "my-config", namespace: "default" },
    spec: { key: "v1" }
  ))

  resource.apply

  refute resource.spec_changed?, "spec should not be changed after apply"
  refute resource., "metadata should not be changed after apply"

  resource.instance_variable_get(:@data).spec.key = "v2"

  assert resource.spec_changed?, "spec should be changed after mutation"
  refute resource., "metadata should still not be changed"

  ctl.stub_response("get", server_state(
    metadata: { name: "my-config", namespace: "default" },
    spec: { key: "v2" }
  ))

  resource.patch

  refute resource.spec_changed?, "spec should not be changed after patch"
end

#test_changes_applied_resets_baseline_without_server_roundtripObject


changes_applied mid-workflow resets the baseline




465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
# File 'lib/kube/cluster/resource/dirty_tracking.rb', line 465

def test_changes_applied_resets_baseline_without_server_roundtrip
  resource, _ctl = build_resource(metadata: { name: "my-config" }, spec: { key: "v1" })

  resource.instance_variable_get(:@data).spec.key = "v2"
  assert resource.changed?
  assert_equal([:spec], resource.changed)

  # Accept changes locally (no kubectl call)
  resource.changes_applied

  refute resource.changed?
  assert_equal({}, resource.changes)

  # Further mutation is tracked from the new baseline
  resource.instance_variable_get(:@data).spec.key = "v3"
  assert resource.changed?

  changes = resource.changes
  # Old value should be v2 (the accepted baseline), not v1
  assert_equal "v2", changes[:spec].is_a?(Hash) ? changes[:spec][:key]&.first : nil,
    "baseline should be v2 after changes_applied" if changes[:spec].is_a?(Hash)
  assert_equal({ spec: [{ key: "v2" }, { key: "v3" }] }, changes) if changes[:spec].is_a?(Array)
end

#test_changes_applied_then_patch_sends_only_subsequent_changesObject



489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
# File 'lib/kube/cluster/resource/dirty_tracking.rb', line 489

def test_changes_applied_then_patch_sends_only_subsequent_changes
  resource, ctl = build_resource(
    metadata: { name: "my-config", namespace: "default" },
    data: { a: "1", b: "2", c: "3" }
  )

  # First wave of changes
  resource.instance_variable_get(:@data).data.a = "changed-a"
  resource.changes_applied

  # Second wave — only b changes from the new baseline
  resource.instance_variable_get(:@data).data.b = "changed-b"

  ctl.stub_response("get", server_state(
    metadata: { name: "my-config", namespace: "default" },
    data: { a: "changed-a", b: "changed-b", c: "3" }
  ))

  resource.patch

  patch_cmd = ctl.commands.find { |c| c.include?("patch") }
  payload = JSON.parse(patch_cmd.split("-p ").last)

  # Only b should be in the patch, not a (already accepted via changes_applied)
  # deep_diff produces [old, new] tuples
  assert_equal ["2", "changed-b"], payload["data"]["b"]
  refute payload["data"].key?("a"), "already-accepted change 'a' should not be in patch"
end

#test_changes_does_not_raise_name_errorObject


Regression: the original bug — build_changes used ‘result` instead of `hash`




710
711
712
713
714
715
716
717
718
719
720
# File 'lib/kube/cluster/resource/dirty_tracking.rb', line 710

def test_changes_does_not_raise_name_error
  resource, _ctl = build_resource(metadata: { name: "my-config" }, spec: { key: "v1" })

  resource.instance_variable_get(:@data).spec.key = "v2"

  # This would raise NameError with the original bug
  changes = resource.changes

  assert_kind_of Hash, changes
  refute changes.empty?
end

#test_deeply_nested_no_change_produces_empty_patchObject



419
420
421
422
423
424
425
# File 'lib/kube/cluster/resource/dirty_tracking.rb', line 419

def test_deeply_nested_no_change_produces_empty_patch
  resource, _ctl = build_resource(
    metadata: { name: "my-config", labels: { app: "web" } }
  )

  assert_equal({}, resource.patch_data)
end

#test_delete_issues_kubectl_deleteObject


Edge case: delete on persisted resource issues command




693
694
695
696
697
698
699
700
701
702
703
704
# File 'lib/kube/cluster/resource/dirty_tracking.rb', line 693

def test_delete_issues_kubectl_delete
  resource, ctl = build_resource(metadata: { name: "my-config", namespace: "default" })

  result = resource.delete
  assert_equal true, result

  delete_cmd = ctl.commands.find { |c| c.include?("delete") }
  refute_nil delete_cmd
  assert_includes delete_cmd, "configmap"
  assert_includes delete_cmd, "my-config"
  assert_includes delete_cmd, "--namespace default"
end

#test_delete_raises_on_unpersisted_resourceObject



383
384
385
386
387
388
# File 'lib/kube/cluster/resource/dirty_tracking.rb', line 383

def test_delete_raises_on_unpersisted_resource
  resource, _ctl = build_resource(spec: { key: "value" })

  error = assert_raises(Kube::CommandError) { resource.delete }
  assert_match(/cannot delete/, error.message)
end

#test_empty_resource_tracks_all_additionsObject


Edge case: resource with no initial spec data




610
611
612
613
614
615
616
617
# File 'lib/kube/cluster/resource/dirty_tracking.rb', line 610

def test_empty_resource_tracks_all_additions
  resource, _ctl = build_resource(metadata: { name: "empty-config" })

  resource.instance_variable_get(:@data).spec.key = "added"

  assert resource.changed?
  assert_includes resource.changed, :spec
end

#test_full_apply_mutate_patch_lifecycleObject


Full lifecycle: apply → mutate → detect changes → patch → clean




213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
# File 'lib/kube/cluster/resource/dirty_tracking.rb', line 213

def test_full_apply_mutate_patch_lifecycle
  resource, ctl = build_resource(metadata: { name: "app-config", namespace: "production" }, spec: { key: "original" })

  # Stub the reload after apply — server echoes back what we sent
  ctl.stub_response("get", server_state(
    metadata: { name: "app-config", namespace: "production", resourceVersion: "100" },
    spec: { key: "original" }
  ))

  resource.apply

  # Post-apply the resource should be clean (reload calls snapshot!)
  refute resource.changed?, "resource should be clean after apply + reload"
  assert_equal({}, resource.changes)
  assert_equal [], resource.changed

  # Mutate
  resource.instance_variable_get(:@data).spec.key = "updated"

  # Now dirty
  assert resource.changed?

  # Stub reload after patch
  ctl.stub_response("get", server_state(
    metadata: { name: "app-config", namespace: "production", resourceVersion: "101" },
    spec: { key: "updated" }
  ))

  result = resource.patch
  assert_equal true, result

  # Post-patch the resource should be clean again
  refute resource.changed?
  assert_equal({}, resource.changes)
end

#test_multiple_mutations_coalesce_in_single_patchObject


Multiple mutations before patch coalesce into a single diff




431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
# File 'lib/kube/cluster/resource/dirty_tracking.rb', line 431

def test_multiple_mutations_coalesce_in_single_patch
  resource, ctl = build_resource(
    metadata: { name: "my-config", namespace: "default" },
    data: { host: "db-1", port: "5432", pool: "5" }
  )

  d = resource.instance_variable_get(:@data).data
  d.host = "db-2"
  d.port = "5433"
  d.pool = "10"

  ctl.stub_response("get", server_state(
    metadata: { name: "my-config", namespace: "default" },
    data: { host: "db-2", port: "5433", pool: "10" }
  ))

  resource.patch

  # Exactly one patch command
  patch_commands = ctl.commands.select { |c| c.include?("patch") }
  assert_equal 1, patch_commands.size

  payload = JSON.parse(patch_commands.first.split("-p ").last)

  # deep_diff produces [old, new] tuples for each changed leaf
  assert_equal ["db-1", "db-2"], payload["data"]["host"]
  assert_equal ["5432", "5433"], payload["data"]["port"]
  assert_equal ["5", "10"], payload["data"]["pool"]
end

#test_nested_mutation_produces_nested_patchObject


Nested mutation flows through patch_data correctly




401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
# File 'lib/kube/cluster/resource/dirty_tracking.rb', line 401

def test_nested_mutation_produces_nested_patch
  resource, ctl = build_resource(
    metadata: { name: "my-config", namespace: "default", labels: { app: "web", tier: "frontend" } }
  )

  # Mutate only a nested field
  resource.instance_variable_get(:@data)..labels.tier = "backend"

  patch = resource.patch_data
  assert_kind_of Hash, patch[:metadata], "patch_data should nest into metadata"
  assert_kind_of Hash, patch[:metadata][:labels], "patch_data should nest into labels"
  assert_equal ["frontend", "backend"], patch[:metadata][:labels][:tier]

  # Unchanged sibling should not appear
  refute patch[:metadata][:labels].key?(:app), "unchanged label should not appear in patch"
  refute patch.key?(:spec), "unchanged top-level key should not appear in patch"
end

#test_patch_defaults_to_strategic_typeObject



639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
# File 'lib/kube/cluster/resource/dirty_tracking.rb', line 639

def test_patch_defaults_to_strategic_type
  resource, ctl = build_resource(metadata: { name: "my-config", namespace: "default" }, spec: { key: "v1" })

  resource.instance_variable_get(:@data).spec.key = "v2"

  ctl.stub_response("get", server_state(
    metadata: { name: "my-config", namespace: "default" },
    spec: { key: "v2" }
  ))

  resource.patch

  patch_cmd = ctl.commands.find { |c| c.include?("patch") }
  assert_includes patch_cmd, "--type strategic"
end

#test_patch_forwards_type_parameterObject


Edge case: patch type parameter is forwarded




623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
# File 'lib/kube/cluster/resource/dirty_tracking.rb', line 623

def test_patch_forwards_type_parameter
  resource, ctl = build_resource(metadata: { name: "my-config", namespace: "default" }, spec: { key: "v1" })

  resource.instance_variable_get(:@data).spec.key = "v2"

  ctl.stub_response("get", server_state(
    metadata: { name: "my-config", namespace: "default" },
    spec: { key: "v2" }
  ))

  resource.patch(type: "merge")

  patch_cmd = ctl.commands.find { |c| c.include?("patch") }
  assert_includes patch_cmd, "--type merge", "patch type should be forwarded to kubectl"
end

#test_patch_includes_namespace_flagsObject


Edge case: namespace flags are included correctly




659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
# File 'lib/kube/cluster/resource/dirty_tracking.rb', line 659

def test_patch_includes_namespace_flags
  resource, ctl = build_resource(metadata: { name: "my-config", namespace: "kube-system" }, spec: { key: "v1" })

  resource.instance_variable_get(:@data).spec.key = "v2"

  ctl.stub_response("get", server_state(
    metadata: { name: "my-config", namespace: "kube-system" },
    spec: { key: "v2" }
  ))

  resource.patch

  patch_cmd = ctl.commands.find { |c| c.include?("patch") }
  assert_includes patch_cmd, "--namespace kube-system"
end

#test_patch_raises_on_unpersisted_resourceObject


Error cases: unpersisted resources




375
376
377
378
379
380
381
# File 'lib/kube/cluster/resource/dirty_tracking.rb', line 375

def test_patch_raises_on_unpersisted_resource
  resource, _ctl = build_resource(spec: { key: "value" })
  # No name → not persisted

  error = assert_raises(Kube::CommandError) { resource.patch }
  assert_match(/cannot patch/, error.message)
end

#test_patch_returns_false_when_cleanObject


Patch returns false when nothing changed




253
254
255
256
257
258
259
260
261
262
263
264
265
266
# File 'lib/kube/cluster/resource/dirty_tracking.rb', line 253

def test_patch_returns_false_when_clean
  resource, ctl = build_resource(metadata: { name: "app-config", namespace: "default" }, spec: { key: "value" })

  ctl.stub_response("get", server_state(
    metadata: { name: "app-config", namespace: "default" }, spec: { key: "value" }
  ))

  result = resource.patch
  assert_equal false, result, "patch should return false when nothing changed"

  # No patch command should have been issued
  patch_commands = ctl.commands.select { |c| c.include?("patch") }
  assert_empty patch_commands, "no kubectl patch should be issued when resource is clean"
end

#test_patch_sends_only_changed_fieldsObject


Patch sends only the diff, not the full resource




272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
# File 'lib/kube/cluster/resource/dirty_tracking.rb', line 272

def test_patch_sends_only_changed_fields
  resource, ctl = build_resource(
    metadata: { name: "my-config", namespace: "staging" },
    spec: { db_host: "old-db.internal", db_port: "5432", cache_ttl: "300" }
  )

  # Mutate one field
  resource.instance_variable_get(:@data).spec.db_host = "new-db.internal"

  ctl.stub_response("get", server_state(
    metadata: { name: "my-config", namespace: "staging" },
    spec: { db_host: "new-db.internal", db_port: "5432", cache_ttl: "300" }
  ))

  resource.patch

  # Find the patch command
  patch_cmd = ctl.commands.find { |c| c.include?("patch") }
  refute_nil patch_cmd, "a kubectl patch command should have been issued"

  # Extract the JSON payload from the command (last arg after -p)
  json_start = patch_cmd.index("-p ") + 3
  payload = JSON.parse(patch_cmd[json_start..])

  # The payload should contain the spec subtree but NOT metadata
  assert payload.key?("spec"), "patch payload should include changed subtree"
  refute payload.key?("metadata"), "patch payload should not include unchanged top-level keys"
end

#test_reload_does_not_corrupt_previously_captured_changesObject


Snapshot isolation: reload doesn’t leak into captured references




563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
# File 'lib/kube/cluster/resource/dirty_tracking.rb', line 563

def test_reload_does_not_corrupt_previously_captured_changes
  resource, ctl = build_resource(metadata: { name: "my-config", namespace: "default" }, spec: { key: "v1" })

  resource.instance_variable_get(:@data).spec.key = "v2"

  # Capture changes before reload
  changes_before = resource.changes
  patch_before   = resource.patch_data

  # Reload with different server state
  ctl.stub_response("get", server_state(
    metadata: { name: "my-config", namespace: "default" },
    spec: { key: "v3-from-server" }
  ))

  resource.reload

  # Previously captured hashes should be unaffected
  assert_equal "v2", extract_nested_value(changes_before, :spec, :key, 1),
    "previously captured changes should not be corrupted by reload"
  assert_equal "v2", extract_nested_value(patch_before, :spec, :key, 1),
    "previously captured patch_data should not be corrupted by reload"
end

#test_reload_includes_namespace_flagsObject



675
676
677
678
679
680
681
682
683
684
685
686
687
# File 'lib/kube/cluster/resource/dirty_tracking.rb', line 675

def test_reload_includes_namespace_flags
  resource, ctl = build_resource(metadata: { name: "my-config", namespace: "monitoring" }, spec: { key: "v1" })

  ctl.stub_response("get", server_state(
    metadata: { name: "my-config", namespace: "monitoring" },
    spec: { key: "v1" }
  ))

  resource.reload

  get_cmd = ctl.commands.find { |c| c.include?("get") }
  assert_includes get_cmd, "--namespace monitoring"
end

#test_reload_picks_up_server_side_changesObject



324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
# File 'lib/kube/cluster/resource/dirty_tracking.rb', line 324

def test_reload_picks_up_server_side_changes
  resource, ctl = build_resource(metadata: { name: "my-config", namespace: "default" }, spec: { key: "v1" })

  # Server has been mutated externally
  ctl.stub_response("get", server_state(
    metadata: { name: "my-config", namespace: "default", resourceVersion: "200" },
    spec: { key: "server-updated" }
  ))

  resource.reload

  # Resource reflects server state and is clean
  refute resource.changed?
  assert_equal "server-updated", resource.to_h[:spec][:key]
end

#test_reload_raises_on_unpersisted_resourceObject



390
391
392
393
394
395
# File 'lib/kube/cluster/resource/dirty_tracking.rb', line 390

def test_reload_raises_on_unpersisted_resource
  resource, _ctl = build_resource(spec: { key: "value" })

  error = assert_raises(Kube::CommandError) { resource.reload }
  assert_match(/cannot reload/, error.message)
end

#test_reload_resets_dirty_stateObject


Reload resets dirty state from server response




305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
# File 'lib/kube/cluster/resource/dirty_tracking.rb', line 305

def test_reload_resets_dirty_state
  resource, ctl = build_resource(metadata: { name: "my-config", namespace: "default" }, spec: { key: "v1" })

  # Local mutation
  resource.instance_variable_get(:@data).spec.key = "local-change"
  assert resource.changed?

  # Server still has original
  ctl.stub_response("get", server_state(
    metadata: { name: "my-config", namespace: "default" }, spec: { key: "v1" }
  ))

  resource.reload

  # After reload, local changes are gone and resource is clean
  refute resource.changed?
  assert_equal "v1", resource.to_h[:spec][:key]
end

#test_respond_to_for_dynamic_changed_predicatesObject



550
551
552
553
554
555
556
557
# File 'lib/kube/cluster/resource/dirty_tracking.rb', line 550

def test_respond_to_for_dynamic_changed_predicates
  resource, _ctl = build_resource(metadata: { name: "test" })

  assert resource.respond_to?(:metadata_changed?)
  assert resource.respond_to?(:spec_changed?)
  assert resource.respond_to?(:anything_at_all_changed?)
  refute resource.respond_to?(:some_random_method)
end

#test_snapshot_isolation_across_multiple_changes_appliedObject



587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
# File 'lib/kube/cluster/resource/dirty_tracking.rb', line 587

def test_snapshot_isolation_across_multiple_changes_applied
  resource, _ctl = build_resource(metadata: { name: "test" }, data: { counter: "1" })

  resource.instance_variable_get(:@data).data.counter = "2"
  snapshot_1_changes = resource.changes

  resource.changes_applied

  resource.instance_variable_get(:@data).data.counter = "3"
  snapshot_2_changes = resource.changes

  # Each snapshot's changes should be independent
  assert_equal "1", extract_nested_value(snapshot_1_changes, :data, :counter, 0)
  assert_equal "2", extract_nested_value(snapshot_1_changes, :data, :counter, 1)

  assert_equal "2", extract_nested_value(snapshot_2_changes, :data, :counter, 0)
  assert_equal "3", extract_nested_value(snapshot_2_changes, :data, :counter, 1)
end