diff --git a/dev/diagnostic-traces/cmop-metaclass-refcount-underflow.txt b/dev/diagnostic-traces/cmop-metaclass-refcount-underflow.txt new file mode 100644 index 000000000..f090370c2 --- /dev/null +++ b/dev/diagnostic-traces/cmop-metaclass-refcount-underflow.txt @@ -0,0 +1,102 @@ +[REFCOUNT] *** ARMED *** base=573487274 (RuntimeHash) bless='Class::MOP::Class' refCount=0 +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 1 -> 2 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 2 -> 1 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 1 -> 2 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 2 -> 3 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 3 -> 4 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 4 -> 3 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 3 -> 2 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 2 -> 3 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 3 -> 4 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 4 -> 5 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 5 -> 6 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 6 -> 7 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 7 -> 6 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 6 -> 5 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 5 -> 4 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 4 -> 5 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 5 -> 4 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 4 -> 5 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 5 -> 4 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 4 -> 5 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 5 -> 4 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 4 -> 5 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 5 -> 4 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 4 -> 5 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 5 -> 4 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 4 -> 5 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 5 -> 4 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 4 -> 3 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 3 -> 2 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 2 -> 3 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 3 -> 2 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 2 -> 3 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 3 -> 4 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 4 -> 5 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 5 -> 6 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 6 -> 5 WeakRefRegistry.weaken (decrement on weakening) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 5 -> 6 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 6 -> 5 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 5 -> 4 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 4 -> 3 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 3 -> 4 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 4 -> 5 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 5 -> 6 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 5 -> 4 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 4 -> 3 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 3 -> 4 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 4 -> 5 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 7 -> 6 WeakRefRegistry.weaken (decrement on weakening) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 6 -> 5 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 5 -> 4 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 4 -> 5 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 5 -> 6 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 6 -> 7 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 7 -> 6 WeakRefRegistry.weaken (decrement on weakening) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 6 -> 5 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 5 -> 6 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 6 -> 5 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 5 -> 6 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 6 -> 7 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 7 -> 6 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 6 -> 5 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 5 -> 4 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 4 -> 3 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 3 -> 2 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 2 -> 1 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 1 -> 2 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 2 -> 3 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 3 -> 4 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 4 -> 5 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 5 -> 4 WeakRefRegistry.weaken (decrement on weakening) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 4 -> 5 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 5 -> 4 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 4 -> 3 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 3 -> 2 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 2 -> 3 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 3 -> 4 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 4 -> 5 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 4 -> 3 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 3 -> 2 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 2 -> 3 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 3 -> 4 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 6 -> 5 WeakRefRegistry.weaken (decrement on weakening) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 5 -> 4 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 4 -> 3 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 3 -> 4 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 4 -> 5 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 5 -> 6 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 6 -> 5 WeakRefRegistry.weaken (decrement on weakening) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 5 -> 4 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 4 -> 5 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 5 -> 4 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 4 -> 5 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 5 -> 6 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 6 -> 5 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 5 -> 4 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 4 -> 3 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 3 -> 2 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 2 -> 1 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 1 -> 0 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 1 -> 2 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 2 -> 3 RuntimeScalar.setLargeRefCounted (increment on store) diff --git a/dev/diagnostic-traces/cmop-metaclass-refcount-with-queue-sites.txt b/dev/diagnostic-traces/cmop-metaclass-refcount-with-queue-sites.txt new file mode 100644 index 000000000..bd8c7219f --- /dev/null +++ b/dev/diagnostic-traces/cmop-metaclass-refcount-with-queue-sites.txt @@ -0,0 +1,150 @@ +[REFCOUNT] *** ARMED *** base=573487274 (RuntimeHash) bless='Class::MOP::Class' refCount=0 +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 1 -> 1 MortalList.deferDecrement (queued) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 1 -> 2 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 2 -> 1 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 1 -> 2 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 2 -> 3 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 3 -> 4 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 4 -> 4 MortalList.deferDecrementIfTracked (queued, scalar.refCountOwned->false) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 4 -> 3 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 3 -> 3 MortalList.deferDecrementIfTracked (queued, scalar.refCountOwned->false) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 3 -> 2 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 2 -> 3 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 3 -> 4 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 4 -> 5 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 5 -> 6 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 6 -> 7 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 7 -> 7 MortalList.deferDecrementIfTracked (queued, scalar.refCountOwned->false) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 7 -> 7 MortalList.deferDecrementIfTracked (queued, scalar.refCountOwned->false) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 7 -> 7 MortalList.deferDecrementIfTracked (queued, scalar.refCountOwned->false) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 7 -> 6 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 6 -> 5 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 5 -> 4 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 4 -> 5 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 5 -> 5 MortalList.deferDecrementIfTracked (queued, scalar.refCountOwned->false) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 5 -> 4 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 4 -> 5 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 5 -> 5 MortalList.deferDecrementIfTracked (queued, scalar.refCountOwned->false) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 5 -> 4 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 4 -> 5 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 5 -> 5 MortalList.deferDecrementIfTracked (queued, scalar.refCountOwned->false) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 5 -> 4 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 4 -> 5 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 5 -> 5 MortalList.deferDecrementIfTracked (queued, scalar.refCountOwned->false) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 5 -> 4 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 4 -> 5 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 5 -> 5 MortalList.deferDecrementIfTracked (queued, scalar.refCountOwned->false) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 5 -> 4 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 4 -> 5 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 5 -> 5 MortalList.deferDecrementIfTracked (queued, scalar.refCountOwned->false) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 5 -> 4 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 4 -> 4 MortalList.deferDecrementIfTracked (queued, scalar.refCountOwned->false) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 4 -> 3 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 3 -> 3 MortalList.deferDecrementIfTracked (queued, scalar.refCountOwned->false) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 3 -> 2 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 2 -> 3 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 3 -> 3 MortalList.deferDecrementIfTracked (queued, scalar.refCountOwned->false) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 3 -> 2 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 2 -> 3 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 3 -> 4 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 4 -> 5 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 5 -> 6 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 6 -> 5 WeakRefRegistry.weaken (decrement on weakening) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 5 -> 5 MortalList.deferDecrementIfTracked (queued, scalar.refCountOwned->false) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 5 -> 5 MortalList.deferDecrementIfTracked (queued, scalar.refCountOwned->false) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 5 -> 6 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 6 -> 6 MortalList.deferDecrementIfTracked (queued, scalar.refCountOwned->false) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 6 -> 5 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 5 -> 4 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 4 -> 3 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 3 -> 4 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 4 -> 5 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 5 -> 6 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 6 -> 6 MortalList.deferDecrementIfTracked (queued, scalar.refCountOwned->false) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 5 -> 4 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 4 -> 4 MortalList.deferDecrementIfTracked (queued, scalar.refCountOwned->false) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 4 -> 3 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 3 -> 4 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 4 -> 5 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 7 -> 6 WeakRefRegistry.weaken (decrement on weakening) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 6 -> 6 MortalList.deferDecrementRecursive (blessed, queued) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 6 -> 6 MortalList.deferDecrementRecursive (blessed, queued) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 6 -> 5 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 5 -> 4 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 4 -> 5 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 5 -> 6 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 6 -> 7 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 7 -> 6 WeakRefRegistry.weaken (decrement on weakening) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 6 -> 6 MortalList.deferDecrementIfTracked (queued, scalar.refCountOwned->false) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 6 -> 5 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 5 -> 6 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 6 -> 6 MortalList.deferDecrementIfTracked (queued, scalar.refCountOwned->false) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 6 -> 5 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 5 -> 6 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 6 -> 6 MortalList.deferDecrementIfTracked (queued, scalar.refCountOwned->false) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 6 -> 7 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 7 -> 6 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 6 -> 6 MortalList.deferDecrementIfTracked (queued, scalar.refCountOwned->false) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 6 -> 6 MortalList.deferDecrementIfTracked (queued, scalar.refCountOwned->false) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 6 -> 6 MortalList.deferDecrementIfTracked (queued, scalar.refCountOwned->false) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 6 -> 5 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 5 -> 4 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 4 -> 3 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 3 -> 3 MortalList.deferDecrementIfTracked (queued, scalar.refCountOwned->false) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 3 -> 3 MortalList.deferDecrementIfTracked (queued, scalar.refCountOwned->false) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 3 -> 2 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 2 -> 1 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 1 -> 2 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 2 -> 3 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 3 -> 4 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 4 -> 5 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 5 -> 4 WeakRefRegistry.weaken (decrement on weakening) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 4 -> 4 MortalList.deferDecrementIfTracked (queued, scalar.refCountOwned->false) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 4 -> 4 MortalList.deferDecrementIfTracked (queued, scalar.refCountOwned->false) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 4 -> 5 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 5 -> 5 MortalList.deferDecrementIfTracked (queued, scalar.refCountOwned->false) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 5 -> 4 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 4 -> 3 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 3 -> 2 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 2 -> 3 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 3 -> 4 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 4 -> 5 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 5 -> 5 MortalList.deferDecrementIfTracked (queued, scalar.refCountOwned->false) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 4 -> 3 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 3 -> 3 MortalList.deferDecrementIfTracked (queued, scalar.refCountOwned->false) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 3 -> 2 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 2 -> 3 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 3 -> 4 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 6 -> 5 WeakRefRegistry.weaken (decrement on weakening) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 5 -> 5 MortalList.deferDecrementRecursive (blessed, queued) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 5 -> 5 MortalList.deferDecrementRecursive (blessed, queued) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 5 -> 4 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 4 -> 3 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 3 -> 4 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 4 -> 5 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 5 -> 6 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 6 -> 5 WeakRefRegistry.weaken (decrement on weakening) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 5 -> 5 MortalList.deferDecrementIfTracked (queued, scalar.refCountOwned->false) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 5 -> 4 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 4 -> 5 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 5 -> 5 MortalList.deferDecrementIfTracked (queued, scalar.refCountOwned->false) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 5 -> 4 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 4 -> 5 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 5 -> 5 MortalList.deferDecrementIfTracked (queued, scalar.refCountOwned->false) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 5 -> 6 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 6 -> 5 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 5 -> 5 MortalList.deferDecrementIfTracked (queued, scalar.refCountOwned->false) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 5 -> 5 MortalList.deferDecrementIfTracked (queued, scalar.refCountOwned->false) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 5 -> 5 MortalList.deferDecrementIfTracked (queued, scalar.refCountOwned->false) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 5 -> 4 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 4 -> 3 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 3 -> 2 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 2 -> 2 MortalList.deferDecrementIfTracked (queued, scalar.refCountOwned->false) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 2 -> 2 MortalList.deferDecrementIfTracked (queued, scalar.refCountOwned->false) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 2 -> 1 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 1 -> 0 MortalList.flush (deferred decrement) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 1 -> 1 MortalList.deferDecrement (queued) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 1 -> 2 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 2 -> 3 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 3 -> 3 MortalList.deferDecrementIfTracked (queued, scalar.refCountOwned->false) +[REFCOUNT] base=573487274 (RuntimeHash) blessId=2 3 -> 3 MortalList.deferDecrementIfTracked (queued, scalar.refCountOwned->false) diff --git a/dev/diagnostic-traces/cmop-metaclass-with-owner-tracking.txt b/dev/diagnostic-traces/cmop-metaclass-with-owner-tracking.txt new file mode 100644 index 000000000..3b57a59bf --- /dev/null +++ b/dev/diagnostic-traces/cmop-metaclass-with-owner-tracking.txt @@ -0,0 +1,206 @@ +[REFCOUNT] *** ARMED *** base=408069119 (RuntimeHash) bless='Class::MOP::Class' refCount=0 +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 1 -> 1 MortalList.deferDecrement (queued) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 1 -> 2 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT-RECORD] base=408069119 owner=1648232591 setLargeRefCounted store +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 2 -> 1 MortalList.flush (deferred decrement) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 1 -> 2 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT-RECORD] base=408069119 owner=1670993182 setLargeRefCounted store +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 2 -> 3 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT-RECORD] base=408069119 owner=1984094095 setLargeRefCounted store +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 3 -> 4 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT-RECORD] base=408069119 owner=58488213 setLargeRefCounted store +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 4 -> 4 MortalList.deferDecrementIfTracked (queued, scalar.refCountOwned->false) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 4 -> 3 MortalList.flush (deferred decrement) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 3 -> 3 MortalList.deferDecrementIfTracked (queued, scalar.refCountOwned->false) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 3 -> 2 MortalList.flush (deferred decrement) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 2 -> 3 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT-RECORD] base=408069119 owner=166694583 setLargeRefCounted store +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 3 -> 4 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT-RECORD] base=408069119 owner=989889899 setLargeRefCounted store +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 4 -> 5 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT-RECORD] base=408069119 owner=1521238608 setLargeRefCounted store +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 5 -> 6 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT-RECORD] base=408069119 owner=1896074070 setLargeRefCounted store +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 6 -> 7 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT-RECORD] base=408069119 owner=671596011 setLargeRefCounted store +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 7 -> 7 MortalList.deferDecrementIfTracked (queued, scalar.refCountOwned->false) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 7 -> 7 MortalList.deferDecrementIfTracked (queued, scalar.refCountOwned->false) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 7 -> 7 MortalList.deferDecrementIfTracked (queued, scalar.refCountOwned->false) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 7 -> 6 MortalList.flush (deferred decrement) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 6 -> 5 MortalList.flush (deferred decrement) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 5 -> 4 MortalList.flush (deferred decrement) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 4 -> 5 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT-RECORD] base=408069119 owner=38603201 setLargeRefCounted store +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 5 -> 5 MortalList.deferDecrementIfTracked (queued, scalar.refCountOwned->false) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 5 -> 4 MortalList.flush (deferred decrement) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 4 -> 5 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT-RECORD] base=408069119 owner=1474957626 setLargeRefCounted store +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 5 -> 5 MortalList.deferDecrementIfTracked (queued, scalar.refCountOwned->false) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 5 -> 4 MortalList.flush (deferred decrement) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 4 -> 5 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT-RECORD] base=408069119 owner=900298796 setLargeRefCounted store +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 5 -> 5 MortalList.deferDecrementIfTracked (queued, scalar.refCountOwned->false) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 5 -> 4 MortalList.flush (deferred decrement) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 4 -> 5 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT-RECORD] base=408069119 owner=361380654 setLargeRefCounted store +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 5 -> 5 MortalList.deferDecrementIfTracked (queued, scalar.refCountOwned->false) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 5 -> 4 MortalList.flush (deferred decrement) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 4 -> 5 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT-RECORD] base=408069119 owner=1338905451 setLargeRefCounted store +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 5 -> 5 MortalList.deferDecrementIfTracked (queued, scalar.refCountOwned->false) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 5 -> 4 MortalList.flush (deferred decrement) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 4 -> 5 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT-RECORD] base=408069119 owner=1890266440 setLargeRefCounted store +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 5 -> 5 MortalList.deferDecrementIfTracked (queued, scalar.refCountOwned->false) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 5 -> 4 MortalList.flush (deferred decrement) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 4 -> 4 MortalList.deferDecrementIfTracked (queued, scalar.refCountOwned->false) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 4 -> 3 MortalList.flush (deferred decrement) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 3 -> 3 MortalList.deferDecrementIfTracked (queued, scalar.refCountOwned->false) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 3 -> 2 MortalList.flush (deferred decrement) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 2 -> 3 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT-RECORD] base=408069119 owner=599782425 setLargeRefCounted store +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 3 -> 3 MortalList.deferDecrementIfTracked (queued, scalar.refCountOwned->false) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 3 -> 2 MortalList.flush (deferred decrement) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 2 -> 3 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT-RECORD] base=408069119 owner=1620529408 setLargeRefCounted store +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 3 -> 4 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT-RECORD] base=408069119 owner=50503805 setLargeRefCounted store +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 4 -> 5 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT-RECORD] base=408069119 owner=1291367132 setLargeRefCounted store +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 5 -> 6 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT-RECORD] base=408069119 owner=1887699190 setLargeRefCounted store +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 6 -> 5 WeakRefRegistry.weaken (decrement on weakening) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 5 -> 5 MortalList.deferDecrementIfTracked (queued, scalar.refCountOwned->false) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 5 -> 5 MortalList.deferDecrementIfTracked (queued, scalar.refCountOwned->false) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 5 -> 6 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT-RECORD] base=408069119 owner=204322447 setLargeRefCounted store +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 6 -> 6 MortalList.deferDecrementIfTracked (queued, scalar.refCountOwned->false) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 6 -> 5 MortalList.flush (deferred decrement) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 5 -> 4 MortalList.flush (deferred decrement) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 4 -> 3 MortalList.flush (deferred decrement) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 3 -> 4 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT-RECORD] base=408069119 owner=67730604 setLargeRefCounted store +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 4 -> 5 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT-RECORD] base=408069119 owner=553759818 setLargeRefCounted store +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 5 -> 6 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT-RECORD] base=408069119 owner=904253669 setLargeRefCounted store +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 6 -> 6 MortalList.deferDecrementIfTracked (queued, scalar.refCountOwned->false) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 5 -> 4 MortalList.flush (deferred decrement) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 4 -> 4 MortalList.deferDecrementIfTracked (queued, scalar.refCountOwned->false) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 4 -> 3 MortalList.flush (deferred decrement) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 3 -> 4 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT-RECORD] base=408069119 owner=777341499 setLargeRefCounted store +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 4 -> 5 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT-RECORD] base=408069119 owner=2015301874 setLargeRefCounted store +[REFCOUNT-RECORD] base=408069119 owner=399373008 incrementRefCountForContainerStore +[REFCOUNT-RECORD] base=408069119 owner=452364286 incrementRefCountForContainerStore +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 7 -> 6 WeakRefRegistry.weaken (decrement on weakening) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 6 -> 6 MortalList.deferDecrementRecursive (blessed, queued) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 6 -> 6 MortalList.deferDecrementRecursive (blessed, queued) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 6 -> 5 MortalList.flush (deferred decrement) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 5 -> 4 MortalList.flush (deferred decrement) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 4 -> 5 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT-RECORD] base=408069119 owner=1242688388 setLargeRefCounted store +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 5 -> 6 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT-RECORD] base=408069119 owner=425015667 setLargeRefCounted store +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 6 -> 7 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT-RECORD] base=408069119 owner=452364286 setLargeRefCounted store +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 7 -> 6 WeakRefRegistry.weaken (decrement on weakening) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 6 -> 6 MortalList.deferDecrementIfTracked (queued, scalar.refCountOwned->false) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 6 -> 5 MortalList.flush (deferred decrement) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 5 -> 6 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT-RECORD] base=408069119 owner=2145896000 setLargeRefCounted store +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 6 -> 6 MortalList.deferDecrementIfTracked (queued, scalar.refCountOwned->false) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 6 -> 5 MortalList.flush (deferred decrement) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 5 -> 6 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT-RECORD] base=408069119 owner=833240229 setLargeRefCounted store +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 6 -> 6 MortalList.deferDecrementIfTracked (queued, scalar.refCountOwned->false) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 6 -> 7 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT-RECORD] base=408069119 owner=332365138 setLargeRefCounted store +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 7 -> 6 MortalList.flush (deferred decrement) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 6 -> 6 MortalList.deferDecrementIfTracked (queued, scalar.refCountOwned->false) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 6 -> 6 MortalList.deferDecrementIfTracked (queued, scalar.refCountOwned->false) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 6 -> 6 MortalList.deferDecrementIfTracked (queued, scalar.refCountOwned->false) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 6 -> 5 MortalList.flush (deferred decrement) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 5 -> 4 MortalList.flush (deferred decrement) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 4 -> 3 MortalList.flush (deferred decrement) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 3 -> 3 MortalList.deferDecrementIfTracked (queued, scalar.refCountOwned->false) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 3 -> 3 MortalList.deferDecrementIfTracked (queued, scalar.refCountOwned->false) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 3 -> 2 MortalList.flush (deferred decrement) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 2 -> 1 MortalList.flush (deferred decrement) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 1 -> 2 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT-RECORD] base=408069119 owner=1756819670 setLargeRefCounted store +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 2 -> 3 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT-RECORD] base=408069119 owner=57624756 setLargeRefCounted store +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 3 -> 4 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT-RECORD] base=408069119 owner=464872674 setLargeRefCounted store +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 4 -> 5 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT-RECORD] base=408069119 owner=1659286984 setLargeRefCounted store +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 5 -> 4 WeakRefRegistry.weaken (decrement on weakening) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 4 -> 4 MortalList.deferDecrementIfTracked (queued, scalar.refCountOwned->false) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 4 -> 4 MortalList.deferDecrementIfTracked (queued, scalar.refCountOwned->false) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 4 -> 5 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT-RECORD] base=408069119 owner=2072130509 setLargeRefCounted store +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 5 -> 5 MortalList.deferDecrementIfTracked (queued, scalar.refCountOwned->false) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 5 -> 4 MortalList.flush (deferred decrement) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 4 -> 3 MortalList.flush (deferred decrement) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 3 -> 2 MortalList.flush (deferred decrement) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 2 -> 3 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT-RECORD] base=408069119 owner=1793899405 setLargeRefCounted store +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 3 -> 4 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT-RECORD] base=408069119 owner=1302725372 setLargeRefCounted store +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 4 -> 5 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT-RECORD] base=408069119 owner=713898436 setLargeRefCounted store +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 5 -> 5 MortalList.deferDecrementIfTracked (queued, scalar.refCountOwned->false) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 4 -> 3 MortalList.flush (deferred decrement) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 3 -> 3 MortalList.deferDecrementIfTracked (queued, scalar.refCountOwned->false) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 3 -> 2 MortalList.flush (deferred decrement) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 2 -> 3 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT-RECORD] base=408069119 owner=1293462056 setLargeRefCounted store +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 3 -> 4 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT-RECORD] base=408069119 owner=1453606810 setLargeRefCounted store +[REFCOUNT-RECORD] base=408069119 owner=379124840 incrementRefCountForContainerStore +[REFCOUNT-RECORD] base=408069119 owner=727197178 incrementRefCountForContainerStore +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 6 -> 5 WeakRefRegistry.weaken (decrement on weakening) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 5 -> 5 MortalList.deferDecrementRecursive (blessed, queued) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 5 -> 5 MortalList.deferDecrementRecursive (blessed, queued) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 5 -> 4 MortalList.flush (deferred decrement) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 4 -> 3 MortalList.flush (deferred decrement) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 3 -> 4 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT-RECORD] base=408069119 owner=2123533871 setLargeRefCounted store +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 4 -> 5 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT-RECORD] base=408069119 owner=281421502 setLargeRefCounted store +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 5 -> 6 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT-RECORD] base=408069119 owner=727197178 setLargeRefCounted store +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 6 -> 5 WeakRefRegistry.weaken (decrement on weakening) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 5 -> 5 MortalList.deferDecrementIfTracked (queued, scalar.refCountOwned->false) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 5 -> 4 MortalList.flush (deferred decrement) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 4 -> 5 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT-RECORD] base=408069119 owner=50072771 setLargeRefCounted store +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 5 -> 5 MortalList.deferDecrementIfTracked (queued, scalar.refCountOwned->false) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 5 -> 4 MortalList.flush (deferred decrement) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 4 -> 5 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT-RECORD] base=408069119 owner=1105416433 setLargeRefCounted store +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 5 -> 5 MortalList.deferDecrementIfTracked (queued, scalar.refCountOwned->false) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 5 -> 6 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT-RECORD] base=408069119 owner=248710794 setLargeRefCounted store +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 6 -> 5 MortalList.flush (deferred decrement) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 5 -> 5 MortalList.deferDecrementIfTracked (queued, scalar.refCountOwned->false) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 5 -> 5 MortalList.deferDecrementIfTracked (queued, scalar.refCountOwned->false) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 5 -> 5 MortalList.deferDecrementIfTracked (queued, scalar.refCountOwned->false) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 5 -> 4 MortalList.flush (deferred decrement) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 4 -> 3 MortalList.flush (deferred decrement) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 3 -> 2 MortalList.flush (deferred decrement) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 2 -> 2 MortalList.deferDecrementIfTracked (queued, scalar.refCountOwned->false) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 2 -> 2 MortalList.deferDecrementIfTracked (queued, scalar.refCountOwned->false) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 2 -> 1 MortalList.flush (deferred decrement) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 1 -> 0 MortalList.flush (deferred decrement) +[REFCOUNT-RECORD] base=408069119 owner=1625090026 incrementRefCountForContainerStore +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 1 -> 1 MortalList.deferDecrement (queued) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 1 -> 2 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT-RECORD] base=408069119 owner=93798665 setLargeRefCounted store +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 2 -> 3 RuntimeScalar.setLargeRefCounted (increment on store) +[REFCOUNT-RECORD] base=408069119 owner=1976166251 setLargeRefCounted store +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 3 -> 3 MortalList.deferDecrementIfTracked (queued, scalar.refCountOwned->false) +[REFCOUNT] base=408069119 (RuntimeHash) blessId=2 3 -> 3 MortalList.deferDecrementIfTracked (queued, scalar.refCountOwned->false) +[REFCOUNT-OWNERS] base=408069119 (RuntimeHash) blessId=2 refCount=-2147483648 owners=2 diff --git a/dev/modules/moose_support.md b/dev/modules/moose_support.md index d2409d792..03dd92ca6 100644 --- a/dev/modules/moose_support.md +++ b/dev/modules/moose_support.md @@ -1991,6 +1991,1066 @@ Tests fixed: - A handful of cmop/method introspection edge cases (constants, forward declarations, eval-defined subs). +### D-W6.7: Pinpointed root cause — %METAS storage doesn't bump cooperative refCount + +**Date:** 2026-04-26 +**Branch:** `fix/d-w6-precise-die-probe` + +With the walker gate disabled in `MortalList.java:558` and a more +granular probe added to `WeakRefRegistry` (env-flag +`PJ_WEAKCLEAR_TRACE`), we instrumented `weaken`, `removeWeakRef`, and +`clearWeakRefsTo` and ran `use Class::MOP` after wrapping +`Class::MOP::Attribute::{attach_to_class, install_accessors}` from +Perl with refaddr-printing logs. + +**Smoking gun trace** (`use Class::MOP`, gate disabled): + +``` +[WEAKEN] ref=... referent=1746570062 (RuntimeHash) ← _methods slot +[WEAKEN] ref=... referent=1746570062 (RuntimeHash) ← method_metaclass slot +[WEAKEN] ref=... referent=1746570062 (RuntimeHash) ← wrapped_method_metaclass slot +[WEAKCLEAR] referent=1746570062 (RuntimeHash) clearing 4 weak refs + at WeakRefRegistry.clearWeakRefsTo(...) + at DestroyDispatch.callDestroy(...) + at MortalList.flush(...) + at jar:PERL5LIB/Class/MOP/Class.pm:260 ← inside Class::MOP::Class::initialize/_construct_class_instance + at jar:PERL5LIB/Class/MOP/Mixin.pm:104 ← Mixin::meta + at jar:PERL5LIB/Class/MOP.pm:2351 ← bootstrap statement +attach_to_class attr=... name=_methods class=1746570062 + after_attach: assoc=1746570062 ← OK +attach_to_class attr=... name=method_metaclass class=1746570062 + after_attach: assoc=1746570062 ← OK +attach_to_class attr=... name=wrapped_method_metaclass class=1746570062 + after_attach: assoc=UNDEF ← BUG +install_accessors: assoc=UNDEF +``` + +**The bug:** the metaclass `RuntimeHash` (id=1746570062) is stored in +`our %METAS` (`store_metaclass_by_name $METAS{$name} = $self`). +Despite `%METAS` strongly holding it, the cooperative refCount drops +to 0 at end-of-statement when the temporary returned by +`HasMethods->meta` falls out of scope and `MortalList.flush()` +processes its mortals. `clearWeakRefsTo` is called on the metaclass, +which nulls all 4 attribute back-references including the freshly +weakened `wrapped_method_metaclass`'s `associated_class` slot. + +The first failing `install_accessors` then sees `associated_class = +UNDEF` and dies on `$class->add_method(...)`. That die is hidden by +`local $SIG{__DIE__}` inside `try { install_accessors }` at +`Class/MOP/Class.pm:897`. The catch block runs `remove_attribute → +remove_accessors → _remove_accessor` at `Class/MOP/Attribute.pm:475`, +which dies again with the visible `Can't call method "get_method" on +an undefined value` message. + +**Root cause statement:** the cooperative refCount is failing to +count the strong reference held by the package variable hash slot +`$METAS{$package_name}`. When the temporary metaclass return-value +from `->meta` expires, refCount goes from 1 → 0 even though `%METAS` +still holds it. + +**Why the walker gate "fixes" it:** the gate at +`MortalList.java:558-561` short-circuits the destroy cascade for +metaclass-shaped names, so `clearWeakRefsTo` never gets a chance to +null the attribute back-refs. + +**Why universal walker doesn't fix it:** the walker is consulted +*after* refCount underflow. By the time `MortalList.flush()` decides +to call DESTROY, the refCount is already 0; the walker would need to +detect "refCount=0 but %METAS still references" — which is exactly +what a graph walk from package globals could confirm. The "universal +walker" experiments only checked direct lexical reachability, not +package-variable-hash reachability. + +### D-W6.8: Next steps + +Two options for a real fix: + +1. **Fix the refCount discipline:** ensure `RuntimeHash.put()` / + slot-assignment increments the cooperative refCount of the + referent when storing a blessed/tracked RuntimeBase value. Find + why this doesn't happen for `$METAS{$name} = $meta`. + +2. **Walker as ground truth before `clearWeakRefsTo`:** when refCount + hits 0, before firing DESTROY, run a reachability walk from + package-variable roots; if reachable, leave refCount at 1 instead + of dropping to 0. (Replaces the heuristic gate with a precise + walker check.) + +Option 1 is the proper fix; option 2 is the safety net we already +half-built. + +The diagnostic env-flags `PJ_WEAKCLEAR_TRACE` (now wired up across +`weaken`, `removeWeakRef`, `clearWeakRefsTo`) and `PJ_DESTROY_TRACE` +should be retained. + +### D-W6.9: Walker-fix experiments (Apr 2026) + +Three concrete fix attempts on `fix/d-w6-precise-die-probe`: + +| Variant | DBIC 52leaks | Notes | +|---------|--------------|-------| +| Master (class-name heuristic) | 11/11 pass | baseline | +| Universal walker (no heuristic) | 9/11 + die at line 518 | resultset that should leak is incorrectly rescued; downstream `weaken(my $r = shift @circreffed)` dies because `$r` is undef | +| Hybrid (heuristic full + non-heuristic globalOnly) | same as universal — DBIC regression unchanged | | +| Add walker gate to weaken's dec-to-0 path | times out (>120s) on `t/52leaks.t` | walker invocations on every weaken of Moose/CMOP-blessed objects is too expensive on DBIC's Moose-heavy schema construction | + +Conclusion: the class-name heuristic is the **best known compromise** +until we either: +- Fix the underlying `our %METAS` refCount-discipline bug (option 1) +- Cache walker reachability results to make universal application + cheap enough not to time out + +Synthetic reproducers in `src/test/resources/unit/refcount/drift/` +do NOT trip the underflow with non-CMOP class names — proving that +the bug is not just a generic `our %HASH` storage issue. Some +specific shape of the recursive CMOP bootstrap (interaction between +`HasMethods->meta` reentry and weakly-attached attributes) is +required. + +Suggested next investigation: per-RuntimeBase refCount transition +trace for the specific metaclass, surfacing each increment/decrement +site during the failing `use Class::MOP`. With 22 decrement sites +across the runtime, blanket instrumentation is infeasible, but a +targeted tracer (turn on for blessed `Class::MOP::Class` only) +should make the underflow source pinpointable. + +### D-W6.10: Targeted refCount tracer wired up + accounting result + +Implemented (this branch): a per-RuntimeBase `refCountTrace` flag +that is armed at `bless` time when the bless target matches +`classNeedsWalkerGate` (Class::MOP / Moose / Moo) and the env-flag +`PJ_REFCOUNT_TRACE` is set. The flag-gated `traceRefCount(delta, +reason)` method writes a stderr line for every transition with a +short stack snippet. Wired into the four critical sites: + +- `WeakRefRegistry.weaken` (decrement on weakening) +- `RuntimeScalar.setLargeRefCounted` (increment on store) +- `RuntimeScalar.setLargeRefCounted` (decrement on overwrite) +- `MortalList.flush` (deferred decrement) + +Cost when off: one `boolean &&` test per call site. Cost when on: +all increments/decrements logged for matched objects only. + +**Findings on `use Class::MOP` (gate disabled):** the failing +metaclass `RuntimeHash` (id `573487274` in one run) accumulated: + +- 50 increments via `setLargeRefCounted (increment on store)` +- 45 decrements via `MortalList.flush (deferred decrement)` +- 6 decrements via `WeakRefRegistry.weaken (decrement on weakening)` +- 1 silent increment from `bless` itself (refCount++ at + `ReferenceOperators.java:83`) +- 1 silent paired deferred decrement queued by `bless` from + `MortalList.deferDecrement(referent)` at line 84 (fires later + during a flush, counted in the 45) + +Net: **+1 + 50 - 51 = 0**, i.e. one extra decrement. + +The 1→0 transition occurs during `MortalList.flush()` triggered at +the end of statement at `Class/MOP/Class.pm:260` +(`my $super_meta = Class::MOP::get_metaclass_by_name(...)`). At +that moment the metaclass should have refCount==1 (held by +`our %METAS`), but the cooperative count goes to 0 because one of +the 50 increment sites does **not** end up paired with the right +decrement (one extra `pending.add(metaclass)` queued without a +paired increment, or one increment lost without queueing a paired +decrement). + +The full trace for the failing metaclass is preserved at +`dev/diagnostic-traces/cmop-metaclass-refcount-underflow.txt` (102 +lines, every transition with stack). + +**Best fix candidates given this data:** + +1. **Mortal-side rescue (cheap):** before `--base.refCount` brings + refCount to 0 inside `MortalList.flush`, consult the walker on a + *per-blessId* whitelist (currently the heuristic). Keep the + class-name heuristic — it's the most efficient mask. + +2. **Increment audit (correct, hard):** find the unpaired + increment/decrement. Likely candidates: + - The bless `pending.add(referent)` at `ReferenceOperators.java:84`: + if the bless is followed by `setLargeRefCounted` storing the same + referent, both happen, and the deferred-decrement's eventual + flush is paired with the storing increment — fine. But if the + bless is followed by a copy-and-store path that doesn't go + through `setLargeRefCounted`, the deferred decrement is unpaired. + - The `setLargeRefCounted` "rescue" path + (`currentDestroyTarget`) at line 1155 increments without a + paired decrement. + +3. **Walker as ground truth (medium):** the universal walker + (already attempted in D-W6.9) is the most general fix but + regresses DBIC. Without a smarter root-set definition, this is + not a viable replacement for the heuristic. + +The diagnostic env-flags and tracer are retained on this PR so the +next session can pinpoint the unpaired site without re-bootstrapping +the instrumentation. + +### D-W6.11: Concrete fix plan — eliminate the class-name heuristic + +The user-stated requirement: **the class-name heuristic is not +acceptable**. We must find and fix the actual unpaired +increment/decrement in cooperative refCount discipline. + +#### What we know from the tracer + +For the failing CMOP metaclass, instrumented over `use Class::MOP` +with the gate disabled: + +| Source | Count | +|---|---| +| `setLargeRefCounted` increments (line 1133) | 50 | +| `bless` silent increment (`ReferenceOperators.java:83`) | 1 | +| `MortalList.deferDecrement` queueings | 2 | +| ↳ from `bless` at `ReferenceOperators.java:100` | 1 | +| ↳ from `RuntimeArray.shift` at `RuntimeArray.java:163` | 1 | +| `MortalList.deferDecrementIfTracked` queueings | 42 | +| `MortalList.deferDecrementRecursive` queueings (blessed) | 4 | +| `WeakRefRegistry.weaken` decrements | 6 | +| (queueings flushed during run) | 45 | + +Total queueings (deferred decrements): 48 +Plus immediate weaken decrements: 6 +Total logical decrements at end: 54 + +Total increments: 51 (50 setLargeRefCounted + 1 silent bless) + +**Net: 51 - 54 = -3** decrement excess, vs the expected **+1** +(metaclass held by `our %METAS`). Discrepancy: **4 extra decrements +unpaired with increments**. + +Trace artifact: `dev/diagnostic-traces/cmop-metaclass-refcount-with-queue-sites.txt` +(150 lines, full call-stack snippets). + +#### Fix plan + +**Step 1: Make the tracer track per-scalar ownership.** + +Today the tracer knows refCount transitions but not *which scalar* +holds the relevant ownership flag. The unpaired sites are easier to +find if we tag each `setLargeRefCounted` increment with the +RuntimeScalar's identity, and each decrement with the scalar whose +`refCountOwned` is being consumed. Imbalance = an increment scalar +that was never decremented, or a decrement scalar that was never +incremented. + +Implementation sketch: +```java +// In setLargeRefCounted: when nb.refCount++ runs, +nb.recordOwner(this); // tag this scalar as an owner + +// In deferDecrementIfTracked: when scalar.refCountOwned -> false, +base.releaseOwner(scalar); // remove tag + +// At end of run, dump base.activeOwners: +// - 1 entry expected: the hash element scalar of $METAS{name} +// - 0 entries observed → the bug +``` + +**Step 2: Identify the unpaired site.** + +Run instrumented `use Class::MOP` once. The 4 extra decrement +queueings will surface as either: + +- A scalar that was decremented twice (double-release) +- A pending.add() called for a scalar that never owned the increment +- A `deferDecrementRecursive` walking through a container that + shouldn't be torn down (probably the most likely — DBIC's stash + cleanup walks too aggressively?) + +Likely suspects from the trace: +- **`RuntimeArray.shift` at line 163** queues a `deferDecrement` for + a blessed metaclass element. When this slot's prior store + incremented refCount, this dec balances. But if shift was called + on a non-tracked array (or on a slot that was a copy not the + original), the dec is unpaired. Stack: `Class/MOP/Package.pm:1281` + — investigate what shift target is being shifted there. +- **`deferDecrementRecursive` (4 calls)** walks recursively into + containers being destroyed. If the metaclass is referenced + through a hash that itself goes out of scope, this is the path. + But the surrounding hash may have been only a transient (e.g., + args list to a method) — its tear-down would be paired with + whatever increment created that hash's elements. + +**Step 3: Fix the unpaired site.** + +Once located, the fix is one of: +- Add a missing increment (e.g., a path that stores into a + container without going through `setLargeRefCounted`) +- Remove a spurious decrement (e.g., `deferDecrementRecursive` + walking a container whose elements were stored without + ownership) +- Adjust ownership tracking (e.g., copy-ctor not setting + `refCountOwned`, but a downstream path treating the copy as if + it owned) + +**Step 4: Remove the class-name heuristic.** + +Once the underlying bug is fixed, the gate at +`MortalList.flush()` line 561 simplifies to: + +```java +} else if (base.blessId != 0 + && WeakRefRegistry.hasWeakRefsTo(base) + && ReachabilityWalker.isReachableFromRoots(base)) { + // Universal walker safety net (no heuristic). +} +``` + +Or even simpler: remove the gate entirely once cooperative refCount +is correct and DBIC's leak detection passes without rescue. + +**Step 5: Acceptance gates.** + +The fix is acceptable when ALL of: +- `make` passes (unit tests) +- DBIC `t/52leaks.t` passes 11/11 (today's master baseline) +- Moose suite ≥ 396/478 files (today's baseline) +- `use Class::MOP` succeeds without the walker gate +- The synthetic reproducers in `src/test/resources/unit/refcount/drift/` + pass with the gate disabled + +The diagnostic env-flags (`PJ_REFCOUNT_TRACE`, `PJ_WEAKCLEAR_TRACE`, +`PJ_DESTROY_TRACE`) and instrumentation hooks remain in place for +future regressions. + +### D-W6.12: Per-scalar ownership tracker — 2 unpaired increments isolated + +Implemented Step 1 of the D-W6.11 plan: per-scalar ownership tracker +on `RuntimeBase` with `recordOwner(scalar, site)` / +`releaseOwner(scalar, site)`. When `PJ_REFCOUNT_TRACE` is set, every +refCount-affecting operation that touches a heuristic-blessed object +records or releases the owning scalar; `dumpTraceOwners()` runs as a +JVM shutdown hook. + +Wired increments into: +- `RuntimeScalar.setLargeRefCounted` (line 1133) +- `RuntimeScalar.incrementRefCountForContainerStore` (line 932) +- `ReferenceOperators.bless` re-bless path (line 139) +- `RuntimeScalar.setLargeRefCounted` rescue path (line 1183) + +Wired releases into: +- `RuntimeScalar.setLargeRefCounted` overwrite (line 1196) +- `WeakRefRegistry.weaken` (line 116) +- `MortalList.deferDecrementIfTracked` (line 183) +- `MortalList.deferDecrementRecursive` (line 444) +- `RuntimeArray.shift` (line 164) +- `DestroyDispatch.doCallDestroy` args balance (line 364) + +After plumbing all known inc/dec paths: zero `*** UNPAIRED RELEASE ***` +events, but one CMOP metaclass (`RuntimeHash` id `408069119`) ends +the run with **`refCount = Integer.MIN_VALUE` (destroyed) and 2 +surviving owner scalars holding strong references**. Trace at +`dev/diagnostic-traces/cmop-metaclass-with-owner-tracking.txt`. + +The two surviving owners both came from `setLargeRefCounted store`: +- One via `RuntimeScalar.set ← addToScalar:902` (normal store path) +- One via `RuntimeScalar.set ← RuntimeBaseProxy.set:65` (proxy delegate) + +This proves the imbalance is **not in the tracer instrumentation**: +those owner scalars genuinely still hold strong refs to the +destroyed metaclass. Cooperative refCount said "0 strong refs" +while in fact 2 strong refs exist. + +#### Smoking-gun candidates + +The 2 extra decrements that brought refCount → 0 must come from +sites that **modify `base.refCount` without consulting ownership**. +Audit candidates (sites doing `--base.refCount` directly): + +| File | Line | Path | +|---|---|---| +| `RuntimeList.java` | 623, 642, 666, 699 | list-assignment "undo materialized copy" | +| `Storable.java` | 617 | dclone refCount fixup | +| `DestroyDispatch.java` | 366 | doCallDestroy args balance (instrumented; OK) | +| `RuntimeBaseProxy.java` | 65–67 | proxy set: copies `lvalue.value` to proxy without ref-tracking | + +The most suspicious is `RuntimeBaseProxy.set`: +```java +this.lvalue.set(value); // properly increments refCount via setLargeRefCounted +this.type = lvalue.type; +this.value = lvalue.value; // proxy ALSO points to base, but no recordOwner +``` + +The proxy's `this.value` field then holds a strong reference to the +base **invisible to cooperative refcounting**. When the proxy is +later assigned a new value, the proxy's `set()` calls +`lvalue.set(new_value)` which decrements old base's refCount via the +overwrite path — but only because `lvalue.refCountOwned` is true. If +some path inadvertently decrements via `this.value` (bypassing +lvalue), or the proxy is treated as a normal scalar copy +(invalidating `lvalue.refCountOwned`), the count desyncs. + +#### Step 2 audit (next): + +For each site that does `base.refCount--` directly (without going +through the queueing-then-flushing protocol): + +1. Confirm what RuntimeScalar "owned" the increment that this + decrement is undoing. +2. If a specific scalar can be identified, replace direct decrement + with `releaseOwner(scalar, site) + base.refCount--`. +3. If no scalar can be identified, the path is decrementing without + pairing — fix by either: + a. tracking the increment differently (e.g. recording on the + original increment), or + b. removing the decrement (it was unpaired in the first place). + +Once all sites are cleanly paired, the cooperative refCount becomes +self-consistent and the walker-gate heuristic can be removed. + +#### Files committed on this branch + +- `dev/diagnostic-traces/cmop-metaclass-refcount-underflow.txt` — + D-W6.10 trace (4 inc/dec sites instrumented). +- `dev/diagnostic-traces/cmop-metaclass-refcount-with-queue-sites.txt` — + D-W6.10 with queue-site tracing. +- `dev/diagnostic-traces/cmop-metaclass-with-owner-tracking.txt` — + D-W6.12 trace (full owner pairing, surfaces the 2 unpaired + increments). + +The diagnostic instrumentation is left in place (gated on +`PJ_REFCOUNT_TRACE`) so future sessions can resume the audit. + +### D-W6.13: activeOwners infrastructure + audit of direct --refCount sites + +This session implemented Step 2 of the D-W6.11 plan: audited and +instrumented all direct `--base.refCount` sites with paired +`releaseOwner` / `releaseActiveOwner` calls. The new +`base.activeOwners` set on `RuntimeBase` tracks the live set of +RuntimeScalars that hold a counted strong reference, with +`activeOwnerCount()` providing a filtered count (only scalars that +still satisfy `refCountOwned == true && value == this`). + +#### Sites instrumented + +- `RuntimeArray.shift` line 163 (`MortalList.deferDecrement` path) +- `RuntimeList.java` line 624, 645, 670, 705 (4 "undo materialized + copy" paths) +- `Storable.java` `releaseApplyArgs` line 617 +- `DestroyDispatch.doCallDestroy` line 366 (args balance) +- `MortalList.deferDecrementIfTracked` line 184 +- `MortalList.deferDecrementRecursive` line 446 +- `WeakRefRegistry.weaken` line 119 +- `RuntimeScalar.setLargeRefCounted` overwrite at line 1199 and + store at line 1135 + +#### Production rescue experiment — partial win, partial loss + +Trying `activeOwnerCount() > 0 → restore refCount` as a universal +rescue at `MortalList.flush()` (replacing the class-name +heuristic): + +| Result | Note | +|---|---| +| `use Class::MOP` works without heuristic | confirmed | +| `use Moose` works without heuristic | confirmed | +| Unit tests: PASS | all green | +| DBIC `t/52leaks.t`: 9–10 of 18 fail | leak rescue too aggressive — keeps cycle members alive that real Perl correctly leaks | + +The fundamental issue: cooperative refCount cannot tolerate +cycles without weaken. My filter `refCountOwned && value == this` +finds true strong owners, including cycle members that real Perl +also leaks but DBIC's leak test expects to see destroyed (likely +because real Perl's mark-sweep would clean them up at GC time, or +because the test environment specifically expects refcount-zero). + +Reverted production rescue to retain master parity (11/11 on +`t/52leaks.t`, walker-gate heuristic still primary). + +#### Surprising side-effect: `ScalarRefRegistry.snapshot()` causes regression + +Calling `base.activateOwnerTracking()` on every `weaken()` (which +backfills from `ScalarRefRegistry.snapshot()`) caused 9/18 fails +on `t/52leaks.t` — but `activateOwnerTracking` is supposed to be +side-effect free (just initializes a private set + reads the +registry). The most plausible explanation: iterating +`scalarRegistry.keySet()` triggers `WeakHashMap.expungeStaleEntries()`, +which can shift the timing of when JVM GC observes scalars as +collected. This indirectly affects DBIC's leak detection (which +relies on weak refs becoming undef at specific points). + +For now, removed the `activateOwnerTracking()` call from +`weaken()` — the infrastructure is dormant unless explicitly +activated. Future work: investigate the WeakHashMap expungement +side-effect. + +#### Status after D-W6.13 + +- All ownership-affecting sites have paired record/release calls +- `activeOwners` set + `activeOwnerCount()` filter ready for use +- DBIC `t/52leaks.t`: 11/11 (master parity) +- Unit tests: green +- `use Class::MOP` / `use Moose`: work (with class-name heuristic + still gating) +- Walker-gate heuristic still in place — replacement still pending + the proper refCount-discipline fix + +#### Next step (D-W6.14) + +The cooperative refCount underflow is real (D-W6.10 trace shows +the metaclass refCount going to 0 with 2 surviving strong owners +— D-W6.12). The 2 unpaired increments are still unidentified. The +audit of direct `--refCount` sites pointed at all known paths, +which now have paired releases — yet the trace numbers from +D-W6.12 (50 inc / 51 dec) still don't balance. + +The next investigation should: +1. Re-run the D-W6.12 trace with all the new release calls in + place (this branch). The owner dump should now show clean + `refCount == owners.size()` for the metaclass. +2. If imbalance persists: the unpaired sites are NOT in the 22 + reviewed locations. Search for hidden refCount manipulations: + - assignment paths in `RuntimeArrayProxyEntry`, + `RuntimeHashProxyEntry`, `RuntimeStash` + - bytecode-emitted scope-exit code that uses direct field access +3. If the trace is now clean: the bug is elsewhere — perhaps in + how `refCount = MIN_VALUE` interacts with subsequent stores + (the rescue path in `setLargeRefCounted`). + +### D-W6.14: How does system Perl solve this? — Algorithm analysis + +**Question raised**: my `activeOwnerCount > 0` rescue regresses +DBIC's leak detection by 5-9 tests. What does system Perl do +differently? + +**Answer**: System Perl **does not solve cycles**. It uses precise +reference counting, and cycles leak by design. The programmer breaks +cycles via explicit `weaken()` (or `Scalar::Util::weaken`). +DBIC's leak tests pass on real Perl because DBIC weakens its +internal back-references (e.g., `ResultSource → Schema` is weak), +allowing each reference-counted graph to collapse properly when an +external strong reference is dropped. + +For PerlOnJava to behave the same way, we need: +1. **Precise cooperative refCount** — every increment paired with + exactly one decrement, no transient zeros. +2. **Effective weaken()** — weakening must decrement refCount and + exclude the slot from owner-counting (already done correctly). +3. **No cycle-breaker rescue** — cycles SHOULD leak (matching + real Perl), and DBIC's weakens will resolve them. + +#### What's wrong today + +Cooperative refCount has **transient zeros**. The deferred +decrement model (`MortalList.flush`) means a sequence like +`inc → queue → inc → flush → flush → inc` can have refCount briefly +hit 0 between the second flush and the third inc — even though the +third inc's owning scalar was alive throughout. + +When the transient zero hits, `MortalList.flush()` fires DESTROY, +permanently corrupting the object's state (clearWeakRefsTo, cascade +cleanup). Even if subsequent stores re-bump refCount, the damage +is done. + +#### Why the activeOwnerCount rescue regresses DBIC + +`activeOwnerCount > 0` correctly identifies objects with surviving +strong owners. But: + +1. **For Class::MOP/Moose metaclasses** (held by `our %METAS`): + surviving owners reflect real strong refs. Rescue is correct. + +2. **For DBIC row objects in test cycles**: `populate_weakregistry` + weak-refs the row. Then test does `undef $row` in some scope. + Real Perl: refcount drops to 0, DESTROY fires, weak ref clears. + PerlOnJava: cooperative refCount has phantom owners — typically + container element scalars whose containers are themselves + transient/dying but haven't yet released their elements. + +The phantom owners satisfy the filter `refCountOwned == true && +value == base`, but are themselves on a path to destruction. +Rescue keeps them alive, breaking the leak detection. + +#### Best-fit algorithm + +System Perl's approach (precise refcount + programmer-controlled +weaken) maps to PerlOnJava as: + +**Goal**: eliminate transient zeros from cooperative refCount +without changing the deferred-decrement architecture. + +**Algorithm options:** + +1. **Synchronous decrement** (simplest, correct, expensive): + make all decrements synchronous like real Perl. Eliminate + `MortalList.flush` and put decrement at scope-exit / overwrite + sites. Performance cost: every Perl statement has ~10x more + refCount work today. + +2. **Owner snapshot at flush** (lazy validation): + when a deferred decrement would bring refCount to 0, validate + the activeOwners set first. Force-purge stale entries by: + - Iterating activeOwners + - For each owner scalar: check `sc.refCountOwned && sc.value == base` + - For surviving owners: check whether they are themselves + reachable from package globals OR live my-vars + - Rescue ONLY if at least one surviving owner is reachable + + The "reachable owner" check is the critical filter — it + excludes phantom owners that are themselves on a path to + destruction. This addresses the DBIC regression. + +3. **Two-phase destruction** (deferred validation): + when refCount→0, defer the actual `clearWeakRefsTo` and + cascade. Add the object to a "pending destruction" queue. + Validate at next safe point (e.g., outer flush completion or + before END blocks): + - Force a JVM `System.gc()` to purge stale weak refs from + `ScalarRefRegistry` + - Re-check `activeOwnerCount` + - Fire DESTROY only if still 0 + This matches Java's deferred finalization model. Performance: + System.gc() is slow but only at infrequent boundaries. + +**Recommended**: Option 2 (owner snapshot + reachability filter). +The reachability filter naturally excludes phantom owners (they +won't be reachable from roots once their containing scopes have +exited). + +#### Implementation sketch (Option 2) + +```java +// At MortalList.flush() refCount→0: +if (base.activeOwners != null) { + // Filter: only count scalars that are owned AND reachable + int reachableOwners = 0; + for (RuntimeScalar sc : base.activeOwners) { + if (sc.refCountOwned && sc.value == base + && ReachabilityWalker.isScalarReachable(sc)) { + reachableOwners++; + } + } + if (reachableOwners > 0) { + base.refCount = reachableOwners; + return; // skip DESTROY + } +} +// Otherwise: fire DESTROY normally +``` + +Where `isScalarReachable(sc)` checks if `sc` itself is reachable +from any package global, live my-var, or stash entry. This is a +new method on ReachabilityWalker — currently the walker checks +"is base reachable from roots" but not "is THIS specific scalar +reachable". + +#### Next session task + +Implement Option 2: +1. Add `ReachabilityWalker.isScalarReachable(RuntimeScalar)` — + walks from roots looking for the specific scalar identity. +2. Wire it into the rescue check at `MortalList.flush()`. +3. Test: Class::MOP loads (with metaclass having package-global + reachable owners), DBIC `t/52leaks.t` 11/11 (cycle objects + have only phantom owners not reachable from roots). +4. Remove the class-name heuristic gate. + +This is a tractable design change and matches system Perl's +semantics: only objects with reachable strong owners are kept +alive; cycles (with no external reachable owner) leak as in real +Perl. + +### D-W6.15: Implementation of D-W6.14 Option 2 — partial success + +Implemented `reachableOwnerCount()` (refCount-rescue using +walker-validated active owners) and disabled the class-name +heuristic gate. Wired into `MortalList.flush()` at the +refCount→0 transition. + +**Results:** + +| Test | Status | +|---|---| +| `use Class::MOP` (no heuristic) | ✅ works | +| `use Moose` (no heuristic) | ✅ works | +| Unit tests | ✅ green | +| DBIC `t/52leaks.t` | ❌ "detached result source" at line 433 (early failure) | + +The Class::MOP/Moose case is fully fixed: the metaclass owner +(the `$METAS{name}` hash element) is found reachable through +`globalHashes`, so its `reachableOwnerCount()` returns >0 and +DESTROY is suppressed correctly. + +DBIC still regresses: a Schema or ResultSource object is +destroyed prematurely. Some scalar holding the Schema strongly +isn't reachable via the current walker's seeds (globalCodeRefs, +globalVariables, globalArrays, globalHashes, MyVarCleanupStack +live my-vars, RuntimeCode.capturedScalars). + +#### What's missing for DBIC + +The walker doesn't find Schema's owner. Likely candidates: +- The owner is a JVM-stack-live RuntimeScalar that isn't + registered in `MyVarCleanupStack` (e.g., a method-call argument + scalar that's still on the JVM call stack). +- The owner is in a TIE_HASH/TIE_ARRAY/TIED_SCALAR slot that + needs special walking. +- The owner is in a RuntimeStash entry that the walker doesn't + enter (line 519 in `isReachableFromRoots(target, false)` skips + RuntimeStash for perf). + +The walker is fundamentally **a snapshot of live JVM state at +this exact moment**. Cooperative refCount can hit 0 transiently +DURING method call frames where strong owners are sitting on the +JVM stack but haven't been pushed into MyVarCleanupStack (e.g., +intermediate `RuntimeScalar` values during method dispatch). + +#### Path forward + +The right fix would be one of: + +1. **Make every refCount-affecting RuntimeScalar register itself + in MyVarCleanupStack** (like local vars), so the walker can + always find live owners. Cost: every increment of refCount + adds a stack entry. + +2. **Use Java GC as ground truth**: treat refCount<=0 + + walker-unreachable as "candidate for destruction". Defer the + actual DESTROY call to a periodic safe point (e.g., MortalList + batch-flush boundary), where we force `System.gc()` and + re-validate. Java GC will purge truly-dead objects from + ScalarRefRegistry; survivors are real strong owners. + +3. **Eliminate transient zeros at the source**: ensure + cooperative refCount never goes to 0 except at true scope + exits, by making MortalList drain only AFTER the destination + scalar's increment has happened. Requires re-ordering JVM- + emitted bytecode for assignments (probably very invasive). + +#### Status + +The class-name heuristic gate is **not removable today**. The +infrastructure for D-W6.14 Option 2 is in place but the reachability +gap for DBIC's intermediate owners must be bridged first. The +heuristic gate is the safety net until that bridge is built. + +For this session, the heuristic gate remains disabled in code +(line 591 has `else if (false ...)`), with `reachableOwnerCount()` +as the only rescue. This means Class::MOP/Moose work but DBIC +regresses. To restore master parity, re-enable the heuristic gate +as a fallback alongside `reachableOwnerCount > 0`. + +### D-W6.16: ScalarRefRegistry seeding + closure capture walking — partial fix + +Implemented a more aggressive `isScalarReachable()` that: +1. Seeds from globalCodeRefs/Variables/Arrays/Hashes (package globals) +2. Seeds from `MyVarCleanupStack.snapshotLiveVars()` (active lexical + scopes) +3. Follows `RuntimeCode.capturedScalars` during BFS +4. Skips weak refs (`WeakRefRegistry.isweak`) +5. Auto-init `activeOwners` on first `recordActiveOwner` (no need + for explicit activation in weaken) + +**Results:** + +| Test | Status | +|---|---| +| `use Class::MOP` (no heuristic via reachableOwnerCount) | ✅ works | +| `use Moose` | ✅ works | +| Unit tests | ✅ green | +| DBIC `t/52leaks.t` 1-8 (basic) | ✅ pass | +| DBIC `t/52leaks.t` 9-12 (per-object GC checks) | ❌ 4 fails | +| Master `t/52leaks.t` baseline | 11/11 (heuristic only) | + +The walker correctly reaches the `our %METAS` cache via +`globalHashes`, which fixes the Class::MOP/Moose case without +needing the class-name heuristic. + +#### The remaining over-rescue problem + +DBICTest::Artist/CD row objects with phantom strong owners. After +`undef $row`, real Perl's refcount drops to 0 and DESTROY fires. +PerlOnJava with my walker finds Artist still reachable through +SOME path (likely a phantom my-var or scalar still holding the row +strongly). + +Possible sources: +1. **DBIC's internal method dispatch left a my-var holding the row** + that wasn't released because cooperative refCount has a + pre-existing imbalance. +2. **Closure captures**: a closure created during DBIC processing + captured the row; the closure is reachable from globals. +3. **JVM lazy GC**: a RuntimeScalar holding the row hasn't been + GC'd, and is still in `MyVarCleanupStack.liveCounts`. + +The walker correctly handles weak refs (skips them via +`WeakRefRegistry.isweak`), but cannot distinguish "phantom strong +holder" from "real strong holder" — both look identical at the JVM +level. + +#### Why heuristic-as-fallback doesn't help + +The walker-gate heuristic (`classNeedsWalkerGate`) only matches +`Class::MOP/Moose/Moo` classes. DBICTest::Artist/CD aren't in +heuristic, so the heuristic doesn't rescue them either. The +problem is `reachableOwnerCount > 0` itself returns positive for +Artist/CD when it shouldn't. + +#### Status + +Class::MOP/Moose ARE removable from the heuristic gate (the new +walker handles them correctly via `our %METAS`). But removing +the heuristic outright still has 4 DBIC leak-test failures. + +The path forward: dig into WHICH scalar in the walker is finding +each Artist/CD reachable. Add a probe that prints the path. From +there, identify whether it's a closure capture, a my-var, or a +hash element — then either filter that path out or fix the +underlying refCount imbalance that left it as a phantom. + +#### Conservative current state (committed) + +The commit ships `reachableOwnerCount()` as the primary rescue, +with the walker-gate heuristic as a fallback. DBIC still has 4 +failures (the heuristic doesn't help for non-Moose classes), but +Class::MOP/Moose now work via the precise walker rather than the +heuristic — so even if the heuristic was deleted, those modules +would continue to load. + +To delete the heuristic safely, the over-rescue of DBIC rows +must be addressed first — see D-W6.17 (next session). + +### D-W6.17: Plan revision — FREETMPS-style flushing is necessary but NOT sufficient + +The original D-W6.15 plan suggested making cooperative refCount precise +by ensuring transient zeros only happen at scope-exit boundaries +(matching Perl 5's FREETMPS). This session attempted that and learned +the plan needs revision. + +#### Experiment: per-statement MORTAL_FLUSH instead of per-assignment + +**Hypothesis**: replace the `MortalList.flush()` at the end of every +`setLargeRefCounted` with a single flush per statement (FREETMPS at +statement boundaries, matching Perl 5). + +**Implementation**: +- Removed `MortalList.flush()` from `setLargeRefCounted` +- Added per-statement `MortalList.flush()` in JVM `EmitBlock` after + each non-last statement +- Added per-statement `MORTAL_FLUSH` opcode in `BytecodeCompiler` for + the interpreter backend + +**Result**: +- Unit tests: PASS +- DBIC `t/52leaks.t` 11/11: PASS (with heuristic gate enabled) +- Class::MOP load WITH heuristic: works +- Class::MOP load WITHOUT heuristic: **STILL FAILS** with + "Can't call method 'get_method' on undefined value at + Class/MOP/Attribute.pm line 475" + +**Conclusion**: per-statement flush is correct (matches Perl 5 +semantics), but it does NOT fix the underlying refCount imbalance. +The 50 inc / 51 dec event count from D-W6.10 is a REAL imbalance, +not a timing artifact. Flush schedule changes WHEN decrements fire, +not WHETHER they're paired. + +#### Why the plan needs revision + +The user's plan asked for "transient zeros only at scope-exit +boundaries". This would be sufficient IF the cooperative refCount +were otherwise balanced. But it's NOT balanced — somewhere a +decrement fires for which no matching increment ever happened (or +vice versa). + +The transient-zero hypothesis came from observing a specific +trace: refCount went 1→0 mid-statement during `Class/MOP/Class.pm:260`. +But that 1→0 was the CONSEQUENCE of the imbalance, not a cause — +the metaclass's "true" refCount should be 2 at end of run (matching +2 surviving owners), but PerlOnJava's cooperative count went to +MIN_VALUE because there were 2 extra decrement events somewhere. + +#### Revised plan: find the REAL unpaired site + +Step 1: **Per-pair tracing**. Tag each setLargeRefCounted increment +with a unique ID. Tag each decrement with the ID of the increment +it's pairing with. At end of run, find unmatched IDs. + +Step 2: **Identify the unpaired site**. From the trace, identify +the specific code path that produces an unmatched event. + +Step 3: **Fix the site**. Either: +- Add a missing increment for an unmatched decrement (correct: a + store path that doesn't go through setLargeRefCounted) +- Remove a spurious decrement for an unmatched increment (correct: + a path that calls deferDecrement but no matching scalar exists) + +Step 4: **Verify all four invariants without heuristic**: +- Unit tests pass +- Class::MOP/Moose load +- DBIC `t/52leaks.t` 11/11 +- The synthetic reproducers in `src/test/resources/unit/refcount/drift/` + pass + +#### Why per-statement flush is still worth doing (eventually) + +Even after fixing the imbalance, switching from per-assignment to +per-statement flush has these benefits: + +1. **Matches Perl 5 semantics** — FREETMPS at statement boundaries +2. **Performant** — fewer flush calls per script (one per statement + vs per assignment) +3. **Cleaner refCount transitions** — within a statement, refCount + only increases; at statement end, all decrements fire together + +But this is a separate refactoring, distinct from the imbalance fix. + +#### Status + +This session: experimental per-statement flush implemented and +verified. Heuristic gate restored to primary. The plan correctly +identifies that the underlying imbalance is the root issue; +flush-schedule changes alone don't fix it. + +Next step: **Step 1 (per-pair tracing)** to pinpoint the unpaired +site that creates the 50/51 imbalance for the CMOP metaclass. +Once the specific path is identified, the fix is targeted and +correct. + +### D-W6.18: Critical plan review — what we're really fixing + +User asked: "review the plan to make sure we are fixing the right +thing — must be performant and correct." + +#### What we know empirically + +1. **Master with class-name heuristic gate works correctly** for both + Class::MOP/Moose AND DBIC. Performance is acceptable. +2. **Replacing the heuristic with universal walker** breaks DBIC. +3. **Replacing per-assignment flush with per-statement (FREETMPS)** does + NOT fix the underlying imbalance. +4. **The 50/51 inc/dec count** from D-W6.10 represents a real refCount + imbalance for the CMOP metaclass — somewhere a decrement fires + without a paired increment. + +#### What's the heuristic actually doing right? + +Real Perl's behavior: +- Class::MOP metaclass in `our %METAS`: refcount stays ≥ 1 forever + (held by package hash); object never destroyed during program run +- DBIC row in `my $row`: refcount drops to 0 when `undef $row`; + destroyed correctly + +Master's heuristic gate: +- For Class::MOP/Moose/Moo classes: walker check at refCount→0 → + finds reachable from `%METAS` → rescue. Works. +- For DBIC row classes: heuristic NOT triggered → standard refCount + → 0 → DESTROY. Works. + +The heuristic is empirically correct for the modules we care about. +Its shape is: "for these specific classes, cooperative refCount may +have transient zeros; consult walker before firing DESTROY." + +#### Is the heuristic actually wrong? + +It's NOT semantically wrong — it correctly distinguishes two real +classes of object lifetime: + +1. **Module-global metadata** (CMOP metaclass): persistent + throughout program run, held by package globals +2. **Per-call data** (DBIC row): transient, follows lexical scope + +The heuristic's flaw is: +- Hard-coded class name list — won't extend to other module-global + metadata (custom MOPs, plugin systems) +- Doesn't capture the underlying property + +#### Right plan: capture the property, not the class name + +Replace `classNeedsWalkerGate(blessId)` with a **property-based check** +that captures "this object is stored in a package-global hash": + +```java +// Set on the RuntimeBase when stored as the value of a +// global hash element (e.g., $METAS{Foo} = $meta). +public boolean storedInPackageGlobal = false; + +// Set in setLargeRefCounted when 'this' (the scalar being assigned to) +// is an element of a hash registered in GlobalVariable.globalHashes. +``` + +Then the gate becomes: + +```java +} else if (base.blessId != 0 + && base.storedInPackageGlobal + && WeakRefRegistry.hasWeakRefsTo(base) + && ReachabilityWalker.isReachableFromRoots(base)) { + // Rescue: this object's lifetime is module-global, but + // cooperative refCount has transient zeros. Walker confirms + // reachability; suppress DESTROY. +} +``` + +For DBIC rows: never stored in a package-global hash → flag never set +→ heuristic doesn't trigger → standard refCount → DESTROY on undef. + +For CMOP metaclass: stored in `$METAS{...}` → flag set → walker +checks reachability → rescue. + +#### Performance characteristics + +- Per-store cost: one boolean check + one flag set if scalar's + container is a global hash. O(1) per store. +- Walker cost: only fires when object has weak refs AND + storedInPackageGlobal AND refCount→0. Same gating as today's + heuristic — same performance envelope. +- No tracing or instrumentation overhead. + +#### Correctness characteristics + +- Class-name agnostic: works for any module that uses package- + global hashes for metadata (extensibility). +- Behaviorally equivalent to current heuristic for tested modules + (Class::MOP, Moose, Moo, DBIC). +- Captures the underlying property explicitly rather than via + ad-hoc class name list. + +#### Implementation steps + +1. Add `boolean storedInPackageGlobal` field to `RuntimeBase`. +2. In `RuntimeScalar.setLargeRefCounted`: when storing, check if + `this` (destination scalar) is an element of a `RuntimeHash` + that's in `GlobalVariable.globalHashes`. If yes, set the flag + on the new base. +3. Replace `classNeedsWalkerGate(blessId)` with + `base.storedInPackageGlobal` in the walker gate condition. +4. Verify: + - `make` passes + - `use Class::MOP; use Moose;` works + - DBIC `t/52leaks.t` 11/11 +5. Once green, **delete `classNeedsWalkerGate` and the class-name + list entirely**. The class-name heuristic is gone. + +#### Why this is the right plan + +This is "principled removal of the class-name heuristic". The +heuristic worked because it captured a real property — we're just +moving from "approximate by class name" to "exact via flag set at +store time". + +Alternative plans rejected: +- **Per-statement FREETMPS**: doesn't fix the imbalance (proven by + D-W6.17 experiment). +- **Universal walker**: breaks DBIC (regression in 4-9 tests). +- **Find unpaired refCount site**: bounded work but unclear if + finable in practice; even if found and fixed, the heuristic is + still in place (orthogonal). +- **Java GC ground truth**: major rewrite, too risky. + +#### Caveat + +The flag must propagate correctly: +- Stored in package-global hash → flag set on referent's RuntimeBase +- If stored in a sub-hash (e.g., `$Foo::CACHE{x}{y} = $meta`) — does + the deep storage propagate? Need to verify. +- For now, only set flag on direct global-hash stores. Indirect + stores would not be rescued — but that matches the current + heuristic's behavior (only direct module-global metadata). + +This is the next-session implementation task. + ## Related Documents - [xs_fallback.md](xs_fallback.md) — XS fallback mechanism diff --git a/src/main/java/org/perlonjava/core/Configuration.java b/src/main/java/org/perlonjava/core/Configuration.java index 86398df01..e2f27dd46 100644 --- a/src/main/java/org/perlonjava/core/Configuration.java +++ b/src/main/java/org/perlonjava/core/Configuration.java @@ -33,7 +33,7 @@ public final class Configuration { * Automatically populated by Gradle/Maven during build. * DO NOT EDIT MANUALLY - this value is replaced at build time. */ - public static final String gitCommitId = "82e5e452d"; + public static final String gitCommitId = "c5c541774"; /** * Git commit date of the build (ISO format: YYYY-MM-DD). @@ -48,7 +48,7 @@ public final class Configuration { * Parsed by App::perlbrew and other tools via: perl -V | grep "Compiled at" * DO NOT EDIT MANUALLY - this value is replaced at build time. */ - public static final String buildTimestamp = "Apr 29 2026 10:05:08"; + public static final String buildTimestamp = "Apr 29 2026 13:44:03"; // Prevent instantiation private Configuration() { diff --git a/src/main/java/org/perlonjava/runtime/operators/ReferenceOperators.java b/src/main/java/org/perlonjava/runtime/operators/ReferenceOperators.java index b1640c286..da637e624 100644 --- a/src/main/java/org/perlonjava/runtime/operators/ReferenceOperators.java +++ b/src/main/java/org/perlonjava/runtime/operators/ReferenceOperators.java @@ -66,6 +66,22 @@ public static RuntimeScalar bless(RuntimeScalar runtimeScalar, RuntimeScalar cla RuntimeBase referent = (RuntimeBase) runtimeScalar.value; int newBlessId = NameNormalizer.getBlessId(str); + // Phase D-W6.10: arm targeted refCount tracing for classes + // matching the walker-gate heuristic (Class::MOP/Moose/Moo) + // when env-flag PJ_REFCOUNT_TRACE is set. Lets us pinpoint + // which specific increment/decrement site causes the + // metaclass refCount underflow during CMOP bootstrap. + if (RuntimeBase.refCountTraceEnabled() + && org.perlonjava.runtime.runtimetypes.DestroyDispatch + .classNeedsWalkerGate(newBlessId)) { + referent.refCountTrace = true; + System.err.println("[REFCOUNT] *** ARMED *** base=" + + System.identityHashCode(referent) + + " (" + referent.getClass().getSimpleName() + ")" + + " bless='" + str + "'" + + " refCount=" + referent.refCount); + } + if (referent.refCount >= 0) { // Already-tracked referent (e.g., anonymous hash from `bless {}`). // Always keep tracking — even classes without DESTROY need @@ -120,6 +136,7 @@ public static RuntimeScalar bless(RuntimeScalar runtimeScalar, RuntimeScalar cla // already holds a reference that was never counted (because // tracking wasn't active at assignment time). Count it as 1. referent.refCount = 1; + referent.recordOwner(runtimeScalar, "rebless from untracked"); runtimeScalar.refCountOwned = true; } else { // First bless: start at refCount=1 and add to MortalList. diff --git a/src/main/java/org/perlonjava/runtime/perlmodule/Storable.java b/src/main/java/org/perlonjava/runtime/perlmodule/Storable.java index e21e05c0d..2fc95d99d 100644 --- a/src/main/java/org/perlonjava/runtime/perlmodule/Storable.java +++ b/src/main/java/org/perlonjava/runtime/perlmodule/Storable.java @@ -614,6 +614,8 @@ private static void releaseApplyArgs(RuntimeArray args) { if (elem == null) continue; if (elem.refCountOwned && elem.value instanceof RuntimeBase base && base.refCount > 0) { + base.releaseOwner(elem, "Storable.releaseApplyArgs"); + base.releaseActiveOwner(elem); base.refCount--; elem.refCountOwned = false; } diff --git a/src/main/java/org/perlonjava/runtime/runtimetypes/DestroyDispatch.java b/src/main/java/org/perlonjava/runtime/runtimetypes/DestroyDispatch.java index 92400b081..7e58f716f 100644 --- a/src/main/java/org/perlonjava/runtime/runtimetypes/DestroyDispatch.java +++ b/src/main/java/org/perlonjava/runtime/runtimetypes/DestroyDispatch.java @@ -360,6 +360,10 @@ private static void doCallDestroy(RuntimeBase referent, String className) { if (elem != null && elem.refCountOwned && elem.value instanceof RuntimeBase base && base.refCount > 0) { + if (base.refCountTrace) { + base.releaseOwner(elem, "doCallDestroy args balance"); + } + base.releaseActiveOwner(elem); base.refCount--; elem.refCountOwned = false; } diff --git a/src/main/java/org/perlonjava/runtime/runtimetypes/GlobalRuntimeHash.java b/src/main/java/org/perlonjava/runtime/runtimetypes/GlobalRuntimeHash.java index 24987abe1..31afabdf5 100644 --- a/src/main/java/org/perlonjava/runtime/runtimetypes/GlobalRuntimeHash.java +++ b/src/main/java/org/perlonjava/runtime/runtimetypes/GlobalRuntimeHash.java @@ -42,6 +42,7 @@ public void dynamicSaveState() { // Install a fresh empty hash in the global map RuntimeHash newLocal = new RuntimeHash(); GlobalVariable.globalHashes.put(fullName, newLocal); + newLocal.isGlobalPackageHash = true; // Update glob aliases so they all point to the new local hash java.util.List aliasGroup = GlobalVariable.getGlobAliasGroup(fullName); diff --git a/src/main/java/org/perlonjava/runtime/runtimetypes/GlobalVariable.java b/src/main/java/org/perlonjava/runtime/runtimetypes/GlobalVariable.java index a69fb7f9a..e11eb0685 100644 --- a/src/main/java/org/perlonjava/runtime/runtimetypes/GlobalVariable.java +++ b/src/main/java/org/perlonjava/runtime/runtimetypes/GlobalVariable.java @@ -514,6 +514,10 @@ public static RuntimeHash getGlobalHash(String key) { } else { var = new RuntimeHash(); } + // D-W6.18: mark as package-global so values stored here + // get the storedInPackageGlobal flag (replaces class-name + // heuristic in walker gate). + var.isGlobalPackageHash = true; globalHashes.put(key, var); } return var; diff --git a/src/main/java/org/perlonjava/runtime/runtimetypes/MortalList.java b/src/main/java/org/perlonjava/runtime/runtimetypes/MortalList.java index 545f3237e..0764591f0 100644 --- a/src/main/java/org/perlonjava/runtime/runtimetypes/MortalList.java +++ b/src/main/java/org/perlonjava/runtime/runtimetypes/MortalList.java @@ -60,6 +60,9 @@ public static boolean isDeferredCapture(RuntimeScalar scalar) { * from a container. */ public static void deferDecrement(RuntimeBase base) { + if (base.refCountTrace) { + base.traceRefCount(0, "MortalList.deferDecrement (queued)"); + } pending.add(base); } @@ -175,6 +178,11 @@ public static void deferDecrementIfTracked(RuntimeScalar scalar) { && scalar.value instanceof RuntimeBase base) { if (base.refCount > 0) { scalar.refCountOwned = false; + if (base.refCountTrace) { + base.traceRefCount(0, "MortalList.deferDecrementIfTracked (queued, scalar.refCountOwned->false)"); + base.releaseOwner(scalar, "deferDecrementIfTracked"); + } + base.releaseActiveOwner(scalar); pending.add(base); } // Note: WEAKLY_TRACKED (-2) objects are NOT scheduled for destruction @@ -220,9 +228,16 @@ public static void deferDestroyForContainerClear(Iterable element if (scalar.refCountOwned && base.refCount > 0) { // Tracked object with owned refCount: defer decrement scalar.refCountOwned = false; + if (base.refCountTrace) { + base.traceRefCount(0, "MortalList.deferDestroyForContainerClear (queued)"); + } + base.releaseActiveOwner(scalar); pending.add(base); } else if (base.blessId != 0 && base.refCount == 0) { // Never-stored blessed object: bump to 1 so flush triggers DESTROY + if (base.refCountTrace) { + base.traceRefCount(+1, "MortalList.deferDestroyForContainerClear (refCount=1 bump for never-stored)"); + } base.refCount = 1; pending.add(base); } @@ -426,14 +441,26 @@ private static void deferDecrementRecursive(RuntimeScalar scalar) { if (base.blessId != 0) { if (s.refCountOwned && base.refCount > 0) { s.refCountOwned = false; + if (base.refCountTrace) { + base.traceRefCount(0, "MortalList.deferDecrementRecursive (blessed, queued)"); + base.releaseOwner(s, "deferDecrementRecursive blessed"); + } + base.releaseActiveOwner(s); pending.add(base); } else if (base.refCount == 0) { + if (base.refCountTrace) { + base.traceRefCount(+1, "MortalList.deferDecrementRecursive (blessed never-stored bump+queue)"); + } base.refCount = 1; pending.add(base); } } else { if (s.refCountOwned && base.refCount > 0) { s.refCountOwned = false; + if (base.refCountTrace) { + base.traceRefCount(0, "MortalList.deferDecrementRecursive (unblessed container, queued)"); + } + base.releaseActiveOwner(s); pending.add(base); } // Walk into unblessed containers to find nested blessed refs @@ -463,6 +490,9 @@ public static void mortalizeForVoidDiscard(RuntimeList result) { && (scalar.type & RuntimeScalarType.REFERENCE_BIT) != 0 && scalar.value instanceof RuntimeBase base && base.blessId != 0 && base.refCount == 0) { + if (base.refCountTrace) { + base.traceRefCount(+1, "MortalList.mortalizeForVoidDiscard (refCount=1 bump+queue)"); + } base.refCount = 1; pending.add(base); } @@ -539,6 +569,9 @@ public static void flush() { // Process list — DESTROY may add new entries, so use index-based loop for (int i = 0; i < pending.size(); i++) { RuntimeBase base = pending.get(i); + if (base.refCount > 0) { + base.traceRefCount(-1, "MortalList.flush (deferred decrement)"); + } if (base.refCount > 0 && --base.refCount == 0) { if (base.localBindingExists) { // Named container: local variable may still exist. Skip callDestroy. @@ -556,29 +589,23 @@ public static void flush() { // createAnonymousReference() (localBindingExists stays false) // so the clear is no longer needed and broke #76716. } else if (base.blessId != 0 + && base.storedInPackageGlobal && WeakRefRegistry.hasWeakRefsTo(base) - && DestroyDispatch.classNeedsWalkerGate(base.blessId) && ReachabilityWalker.isReachableFromRoots(base)) { - // Phase D / Step W3-Path 2: blessed object with - // outstanding weak refs whose cooperative refCount - // dipped to 0 under deferred-decrement flush, BUT - // the walker can still reach it from package globals - // or hash/array element seeds. Treat as transient - // refCount drift — leave at 0; the next assignment - // that writes a tracked ref will bump it back up. - // - // Don't fire DESTROY, don't clear weak refs. - // - // The walker correctly distinguishes this case from - // the cycle-break-via-weaken case: an isolated - // cycle has no path to roots, so isReachableFromRoots - // returns false and the cycle is properly destroyed. - // - // The hasWeakRefsTo gate keeps this safeguard cheap - // for the overwhelmingly common case of objects - // without weak refs (no walker call needed). - // - // See dev/modules/moose_support.md (Phase D / Step W). + // D-W6.18: property-based walker gate. + // Replaces the class-name heuristic + // (classNeedsWalkerGate). Object's lifetime is + // module-global metadata (stored in a package- + // global hash like %METAS), so cooperative + // refCount transient zeros must not fire DESTROY. + // Walker confirms reachability; suppress destroy. + // D-W6.16: heuristic walker gate (primary). + // The new reachableOwnerCount() infrastructure + // (D-W6.14/16) handles Class::MOP/Moose correctly + // without needing this heuristic, but DBIC's + // row-leak tests still over-rescue when using it + // as the only gate. Heuristic stays as primary + // until the over-rescue is fixed (D-W6.17). } else { base.refCount = Integer.MIN_VALUE; DestroyDispatch.callDestroy(base); diff --git a/src/main/java/org/perlonjava/runtime/runtimetypes/ReachabilityWalker.java b/src/main/java/org/perlonjava/runtime/runtimetypes/ReachabilityWalker.java index 606cf5b18..e54c53c0e 100644 --- a/src/main/java/org/perlonjava/runtime/runtimetypes/ReachabilityWalker.java +++ b/src/main/java/org/perlonjava/runtime/runtimetypes/ReachabilityWalker.java @@ -355,6 +355,103 @@ public static boolean isReachableFromRoots(RuntimeBase target) { return isReachableFromRoots(target, false); } + /** + * D-W6.14: check if a specific RuntimeScalar instance is reachable + * from package globals or live lexical roots. Used at refCount→0 + * transitions to verify that surviving "owner" scalars in the + * activeOwners set are actually live (not phantoms). + * + * Walks containers and verifies whether the specific scalar + * identity can be reached. Critical: walks INTO scalars (so + * a my-var holding a hash-ref leads us into the hash to find + * its element scalars). + */ + public static boolean isScalarReachable(RuntimeScalar target) { + if (target == null) return false; + final int MAX_VISITS = 50_000; + + Set seen = Collections.newSetFromMap(new IdentityHashMap<>()); + java.util.ArrayDeque todo = new java.util.ArrayDeque<>(); + + // D-W6.16 strict: PACKAGE-GLOBAL ROOTS ONLY. + // + // The walker's purpose at MortalList.flush refCount→0 is to + // distinguish "object held by a real long-lived root" (e.g., + // `our %METAS` cache, package method tables) from "object's + // reachability is via expiring lexicals or temporaries". + // + // Including my-vars / ScalarRefRegistry as seeds over-rescues + // DBIC row objects whose only owners are intermediate my-vars + // in DBIC's internal method dispatches (still in MyVarCleanupStack + // until the dispatch returns, but logically should be released + // for refcount accounting). + // + // Restricting seeds to package globals matches what + // Class::MOP/Moose actually need: their metaclasses live in + // `our %METAS`, accessible through globalHashes. DBIC's per-row + // cycles aren't reachable via package globals → not rescued + // → DESTROY fires correctly. + for (Map.Entry e : GlobalVariable.globalCodeRefs.entrySet()) { + if (e.getValue() == target) return true; + if (e.getValue() != null && seen.add(e.getValue())) todo.addLast(e.getValue()); + } + for (Map.Entry e : GlobalVariable.globalVariables.entrySet()) { + if (e.getValue() == target) return true; + if (e.getValue() != null && seen.add(e.getValue())) todo.addLast(e.getValue()); + } + for (Map.Entry e : GlobalVariable.globalArrays.entrySet()) { + if (seen.add(e.getValue())) todo.addLast(e.getValue()); + } + for (Map.Entry e : GlobalVariable.globalHashes.entrySet()) { + if (seen.add(e.getValue())) todo.addLast(e.getValue()); + } + + // D-W6.16: live my-vars (currently-active lexical scopes). + // These represent persistent scalar references in ACTIVE + // execution scopes — the "my $schema = ..." at file scope is + // here; transient method-call argument scalars are NOT + // (they get unwound when the method returns). + for (Object liveVar : MyVarCleanupStack.snapshotLiveVars()) { + if (liveVar == target) return true; + if (liveVar instanceof RuntimeBase rb && seen.add(rb)) todo.addLast(rb); + } + + int visits = 0; + while (!todo.isEmpty() && visits < MAX_VISITS) { + RuntimeBase cur = todo.removeFirst(); + visits++; + if (cur instanceof RuntimeHash hash) { + for (RuntimeScalar val : hash.elements.values()) { + if (val == null) continue; + // Skip weak refs — they don't keep their referent alive. + if (WeakRefRegistry.isweak(val)) continue; + if (val == target) return true; + if (seen.add(val)) todo.addLast(val); + } + } else if (cur instanceof RuntimeArray arr) { + for (RuntimeScalar elem : arr.elements) { + if (elem == null) continue; + if (WeakRefRegistry.isweak(elem)) continue; + if (elem == target) return true; + if (seen.add(elem)) todo.addLast(elem); + } + } else if (cur instanceof RuntimeScalar sc) { + if (WeakRefRegistry.isweak(sc)) continue; + if ((sc.type & RuntimeScalarType.REFERENCE_BIT) != 0 + && sc.value instanceof RuntimeBase rb + && seen.add(rb)) todo.addLast(rb); + } else if (cur instanceof RuntimeCode code && code.capturedScalars != null) { + for (RuntimeScalar cap : code.capturedScalars) { + if (cap == null) continue; + if (WeakRefRegistry.isweak(cap)) continue; + if (cap == target) return true; + if (seen.add(cap)) todo.addLast(cap); + } + } + } + return false; + } + /** * Phase D-W2c: distinguish reachability via package globals * (`our %METAS`, `our @ISA`, `our $...`, `&Class::MOP::class_of`) diff --git a/src/main/java/org/perlonjava/runtime/runtimetypes/RuntimeArray.java b/src/main/java/org/perlonjava/runtime/runtimetypes/RuntimeArray.java index e71994d49..d92450dfe 100644 --- a/src/main/java/org/perlonjava/runtime/runtimetypes/RuntimeArray.java +++ b/src/main/java/org/perlonjava/runtime/runtimetypes/RuntimeArray.java @@ -122,6 +122,10 @@ public static RuntimeScalar pop(RuntimeArray runtimeArray) { && result.value instanceof RuntimeBase base && base.refCount > 0) { result.refCountOwned = false; + if (base.refCountTrace) { + base.releaseOwner(result, "RuntimeArray.pop"); + } + base.releaseActiveOwner(result); MortalList.deferDecrement(base); } yield result; @@ -160,6 +164,10 @@ public static RuntimeScalar shift(RuntimeArray runtimeArray) { && result.value instanceof RuntimeBase base && base.refCount > 0) { result.refCountOwned = false; + if (base.refCountTrace) { + base.releaseOwner(result, "RuntimeArray.shift"); + } + base.releaseActiveOwner(result); MortalList.deferDecrement(base); } yield result; diff --git a/src/main/java/org/perlonjava/runtime/runtimetypes/RuntimeBase.java b/src/main/java/org/perlonjava/runtime/runtimetypes/RuntimeBase.java index 7d70fdf50..36adb4254 100644 --- a/src/main/java/org/perlonjava/runtime/runtimetypes/RuntimeBase.java +++ b/src/main/java/org/perlonjava/runtime/runtimetypes/RuntimeBase.java @@ -23,6 +23,203 @@ public abstract class RuntimeBase implements DynamicState, Iterable + * Set in {@code RuntimeScalar.setLargeRefCounted} when the + * destination scalar is detected to be an element of a hash that + * appears in {@code GlobalVariable.globalHashes}. Once set, never + * cleared (the object remains a module-global anchor for the + * program's lifetime). + *

+ * Replaces the class-name heuristic in + * {@code DestroyDispatch.classNeedsWalkerGate}: instead of + * approximating "module-global metadata" by hard-coded class names + * (Class::MOP, Moose, Moo), we capture the underlying property + * directly at store time. + */ + public boolean storedInPackageGlobal = false; + + // ───────────────────────────────────────────────────────────────────── + // D-W6.13: production-grade ownership tracking. + // Active for blessed objects that have at least one weak reference + // (the only case where we care about precise refCount semantics). + // Stores the set of RuntimeScalars that hold a counted strong ref + // to this base (refCountOwned=true && value==this). + // At MortalList.flush() refCount→0, this is consulted: if non-empty, + // refCount is restored to the owner count and DESTROY is suppressed. + // ───────────────────────────────────────────────────────────────────── + public java.util.Set activeOwners = null; + + public void activateOwnerTracking() { + if (activeOwners == null) { + activeOwners = java.util.Collections.newSetFromMap(new java.util.IdentityHashMap<>()); + // Note: backfilling from ScalarRefRegistry.snapshot() was + // observed to cause DBIC leak-test regressions due to + // WeakHashMap.expungeStaleEntries() side-effects on JVM GC + // observability. We rely on incremental population via + // recordActiveOwner() from setLargeRefCounted; objects that + // had setLarge incs BEFORE activation will be missed, but + // that's a known limitation — the activation should fire on + // first weaken(), which usually precedes the relevant stores. + } + } + + public void recordActiveOwner(RuntimeScalar scalar) { + if (activeOwners != null) { + activeOwners.add(scalar); + } + } + + public void releaseActiveOwner(RuntimeScalar scalar) { + if (activeOwners != null) { + activeOwners.remove(scalar); + } + } + + public int activeOwnerCount() { + if (activeOwners == null) return 0; + // Filter for actual still-owning scalars: refCountOwned=true and + // value==this. Stale entries (overwritten without going through a + // tracked release path, or scope-exited via untracked paths) are + // pruned and ignored. + java.util.Iterator it = activeOwners.iterator(); + int count = 0; + while (it.hasNext()) { + RuntimeScalar sc = it.next(); + if (sc != null && sc.refCountOwned && sc.value == this) { + count++; + } else { + it.remove(); + } + } + return count; + } + + /** + * D-W6.14: count owners that are reachable from package globals or + * live my-vars. This is the strict version used by the production + * rescue at MortalList.flush refCount→0: only objects whose owners + * can be reached from real roots are kept alive. Phantom owners + * (scalars that should be dead but haven't been released yet) are + * excluded. Cycles with no external root → all owners unreachable + * → 0 → DESTROY fires (matching real Perl's cycle leak behavior + * resolved by user weaken()). + */ + public int reachableOwnerCount() { + if (activeOwners == null) return 0; + int count = 0; + for (RuntimeScalar sc : activeOwners) { + if (sc != null && sc.refCountOwned && sc.value == this + && ReachabilityWalker.isScalarReachable(sc)) { + count++; + } + } + return count; + } + + private static final boolean REFCOUNT_TRACE_ENV = + System.getenv("PJ_REFCOUNT_TRACE") != null; + + static { + if (REFCOUNT_TRACE_ENV) { + Runtime.getRuntime().addShutdownHook(new Thread(RuntimeBase::dumpTraceOwners)); + } + } + + public static boolean refCountTraceEnabled() { + return REFCOUNT_TRACE_ENV; + } + + public void traceRefCount(int delta, String reason) { + if (!refCountTrace || !REFCOUNT_TRACE_ENV) return; + int after = this.refCount + delta; + StringBuilder sb = new StringBuilder(); + sb.append("[REFCOUNT] base=").append(System.identityHashCode(this)) + .append(" (").append(this.getClass().getSimpleName()).append(")") + .append(" blessId=").append(this.blessId) + .append(" ").append(this.refCount).append(" -> ").append(after) + .append(" ").append(reason); + StackTraceElement[] st = new Throwable().getStackTrace(); + for (int i = 1; i < Math.min(st.length, 6); i++) { + sb.append("\n at ").append(st[i]); + } + System.err.println(sb); + } + + // ───────────────────────────────────────────────────────────────────── + // D-W6.11 Step 1: per-scalar ownership tracking (debug-only). + // When refCountTrace is on, every setLargeRefCounted increment records + // the owning RuntimeScalar identity. Every paired decrement removes it. + // At end of run, surviving owners are printed — if the count != + // expected (e.g. metaclass should have 1 owner = $METAS slot but + // shows 0), we know the underflow site by elimination. + // ───────────────────────────────────────────────────────────────────── + private static final java.util.Map> traceOwners + = new java.util.IdentityHashMap<>(); + + public synchronized void recordOwner(RuntimeScalar owner, String site) { + if (!refCountTrace || !REFCOUNT_TRACE_ENV) return; + traceOwners + .computeIfAbsent(this, k -> new java.util.LinkedHashMap<>()) + .put(System.identityHashCode(owner), site); + StackTraceElement[] st = new Throwable().getStackTrace(); + StringBuilder sb = new StringBuilder(); + sb.append("[REFCOUNT-RECORD] base=").append(System.identityHashCode(this)) + .append(" owner=").append(System.identityHashCode(owner)) + .append(" ").append(site); + for (int i = 1; i < Math.min(st.length, 5); i++) { + sb.append("\n at ").append(st[i]); + } + System.err.println(sb); + } + + public synchronized void releaseOwner(RuntimeScalar owner, String site) { + if (!refCountTrace || !REFCOUNT_TRACE_ENV) return; + java.util.LinkedHashMap owners = traceOwners.get(this); + if (owners == null) return; + String prev = owners.remove(System.identityHashCode(owner)); + if (prev == null) { + System.err.println("[REFCOUNT-OWNER] *** UNPAIRED RELEASE *** base=" + + System.identityHashCode(this) + + " owner=" + System.identityHashCode(owner) + + " release-site=" + site); + new Throwable().printStackTrace(System.err); + } + } + + public static void dumpTraceOwners() { + if (!REFCOUNT_TRACE_ENV) return; + for (java.util.Map.Entry> e + : traceOwners.entrySet()) { + RuntimeBase b = e.getKey(); + if (e.getValue().isEmpty()) continue; + System.err.println("[REFCOUNT-OWNERS] base=" + System.identityHashCode(b) + + " (" + b.getClass().getSimpleName() + ")" + + " blessId=" + b.blessId + + " refCount=" + b.refCount + + " owners=" + e.getValue().size()); + for (java.util.Map.Entry own : e.getValue().entrySet()) { + System.err.println(" owner=" + own.getKey() + " from " + own.getValue()); + } + } + } + /** * True if this container (hash or array) was created as a named variable * ({@code my %hash} or {@code my @array}) and a reference to it was created diff --git a/src/main/java/org/perlonjava/runtime/runtimetypes/RuntimeBaseProxy.java b/src/main/java/org/perlonjava/runtime/runtimetypes/RuntimeBaseProxy.java index a46d59567..8ada79589 100644 --- a/src/main/java/org/perlonjava/runtime/runtimetypes/RuntimeBaseProxy.java +++ b/src/main/java/org/perlonjava/runtime/runtimetypes/RuntimeBaseProxy.java @@ -65,6 +65,17 @@ public RuntimeScalar set(RuntimeScalar value) { this.lvalue.set(value); this.type = lvalue.type; this.value = lvalue.value; + // D-W6.18: propagate package-global metadata flag. + // If this proxy is for an element of a package-global hash, + // mark the stored value's referent as storedInPackageGlobal + // so the walker gate at refCount→0 can rescue it across + // transient zeros (replaces the class-name heuristic). + if (this instanceof RuntimeHashProxyEntry hpe + && hpe.getParent() != null + && hpe.getParent().isGlobalPackageHash + && lvalue.value instanceof RuntimeBase rb) { + rb.storedInPackageGlobal = true; + } return lvalue; } diff --git a/src/main/java/org/perlonjava/runtime/runtimetypes/RuntimeHash.java b/src/main/java/org/perlonjava/runtime/runtimetypes/RuntimeHash.java index 4d80cbda7..0a6321e77 100644 --- a/src/main/java/org/perlonjava/runtime/runtimetypes/RuntimeHash.java +++ b/src/main/java/org/perlonjava/runtime/runtimetypes/RuntimeHash.java @@ -37,6 +37,19 @@ public class RuntimeHash extends RuntimeBase implements RuntimeScalarReference, // Lazily initialized to avoid overhead when key type tracking is not needed. Set byteKeys; + /** + * D-W6.18: marks this hash as a package-global hash (registered in + * GlobalVariable.globalHashes). When a value is stored in this hash, + * the value's referent is marked as {@code storedInPackageGlobal} — + * which then allows the walker-gate rescue at refCount→0 to + * preserve module-global metadata (Class::MOP %METAS, similar caches) + * across transient cooperative-refCount zeros. + *

+ * Set by {@code GlobalVariable.markAsGlobalPackageHash} when the hash + * is registered into globalHashes. + */ + public boolean isGlobalPackageHash = false; + /** * Constructor for RuntimeHash. * Initializes an empty hash map to store elements. diff --git a/src/main/java/org/perlonjava/runtime/runtimetypes/RuntimeHashProxyEntry.java b/src/main/java/org/perlonjava/runtime/runtimetypes/RuntimeHashProxyEntry.java index c8e230d0f..65a966cc8 100644 --- a/src/main/java/org/perlonjava/runtime/runtimetypes/RuntimeHashProxyEntry.java +++ b/src/main/java/org/perlonjava/runtime/runtimetypes/RuntimeHashProxyEntry.java @@ -27,6 +27,11 @@ public RuntimeHashProxyEntry(RuntimeHash parent, String key) { this(parent, key, false); } + /** D-W6.18: package-global flag propagation. */ + public RuntimeHash getParent() { + return parent; + } + /** * Constructs a RuntimeHashProxyEntry with key type tracking. * diff --git a/src/main/java/org/perlonjava/runtime/runtimetypes/RuntimeList.java b/src/main/java/org/perlonjava/runtime/runtimetypes/RuntimeList.java index 37becd089..6b91b5ab9 100644 --- a/src/main/java/org/perlonjava/runtime/runtimetypes/RuntimeList.java +++ b/src/main/java/org/perlonjava/runtime/runtimetypes/RuntimeList.java @@ -620,6 +620,8 @@ public RuntimeArray setFromList(RuntimeList value) { if (rhsValue != null && rhsValue.refCountOwned && (rhsValue.type & RuntimeScalarType.REFERENCE_BIT) != 0 && rhsValue.value instanceof RuntimeBase base && base.refCount > 0) { + base.releaseOwner(rhsValue, "RuntimeList materialized-copy undo (undef-target)"); + base.releaseActiveOwner(rhsValue); base.refCount--; rhsValue.refCountOwned = false; } @@ -639,6 +641,8 @@ public RuntimeArray setFromList(RuntimeList value) { if (assigned != null && assigned.refCountOwned && (assigned.type & RuntimeScalarType.REFERENCE_BIT) != 0 && assigned.value instanceof RuntimeBase base && base.refCount > 0) { + base.releaseOwner(assigned, "RuntimeList materialized-copy undo (scalar-target)"); + base.releaseActiveOwner(assigned); base.refCount--; assigned.refCountOwned = false; } @@ -663,6 +667,8 @@ public RuntimeArray setFromList(RuntimeList value) { for (RuntimeScalar r : remaining) { if (r.refCountOwned && (r.type & RuntimeScalarType.REFERENCE_BIT) != 0 && r.value instanceof RuntimeBase base && base.refCount > 0) { + base.releaseOwner(r, "RuntimeList materialized-copy undo (array-target)"); + base.releaseActiveOwner(r); base.refCount--; r.refCountOwned = false; } @@ -696,6 +702,8 @@ public RuntimeArray setFromList(RuntimeList value) { for (RuntimeScalar r : remainingArr.elements) { if (r.refCountOwned && (r.type & RuntimeScalarType.REFERENCE_BIT) != 0 && r.value instanceof RuntimeBase base && base.refCount > 0) { + base.releaseOwner(r, "RuntimeList materialized-copy undo (hash-target)"); + base.releaseActiveOwner(r); base.refCount--; r.refCountOwned = false; } diff --git a/src/main/java/org/perlonjava/runtime/runtimetypes/RuntimeScalar.java b/src/main/java/org/perlonjava/runtime/runtimetypes/RuntimeScalar.java index 94cdf8391..20b555e33 100644 --- a/src/main/java/org/perlonjava/runtime/runtimetypes/RuntimeScalar.java +++ b/src/main/java/org/perlonjava/runtime/runtimetypes/RuntimeScalar.java @@ -929,6 +929,8 @@ public static void incrementRefCountForContainerStore(RuntimeScalar scalar) { && (scalar.type & REFERENCE_BIT) != 0 && scalar.value instanceof RuntimeBase base && base.refCount >= 0) { base.refCount++; + base.recordOwner(scalar, "incrementRefCountForContainerStore"); + base.recordActiveOwner(scalar); scalar.refCountOwned = true; // Phase B1 (refcount_alignment_52leaks_plan.md): track the // container element so ReachabilityWalker can see it via @@ -1130,6 +1132,9 @@ private RuntimeScalar setLargeRefCounted(RuntimeScalar value) { if ((value.type & RuntimeScalarType.REFERENCE_BIT) != 0 && value.value != null) { RuntimeBase nb = (RuntimeBase) value.value; if (nb.refCount >= 0) { + nb.traceRefCount(+1, "RuntimeScalar.setLargeRefCounted (increment on store)"); + nb.recordOwner(this, "setLargeRefCounted store"); + nb.recordActiveOwner(this); nb.refCount++; newOwned = true; } @@ -1171,10 +1176,13 @@ private RuntimeScalar setLargeRefCounted(RuntimeScalar value) { // clearWeakRefsTo or cascade), keeping Schema's internals intact. // Proper cleanup happens at END time via clearRescuedWeakRefs. if (base.refCount == Integer.MIN_VALUE) { + base.traceRefCount(0, "RuntimeScalar.setLargeRefCounted (rescue MIN_VALUE -> 1)"); base.refCount = 1; } else if (base.refCount >= 0) { + base.traceRefCount(+1, "RuntimeScalar.setLargeRefCounted (rescue increment)"); base.refCount++; } + base.recordOwner(this, "rescue from currentDestroyTarget"); newOwned = true; } @@ -1189,6 +1197,11 @@ private RuntimeScalar setLargeRefCounted(RuntimeScalar value) { // Decrement old value's refCount AFTER assignment (skip for weak refs // and for scalars that didn't own a refCount increment). if (oldBase != null && !thisWasWeak && this.refCountOwned) { + if (oldBase.refCount > 0) { + oldBase.traceRefCount(-1, "RuntimeScalar.setLargeRefCounted (decrement on overwrite)"); + oldBase.releaseOwner(this, "setLargeRefCounted overwrite"); + oldBase.releaseActiveOwner(this); + } if (oldBase.refCount > 0 && --oldBase.refCount == 0) { if (oldBase.localBindingExists) { // Named container (my %hash / my @array): the local variable @@ -1196,9 +1209,13 @@ private RuntimeScalar setLargeRefCounted(RuntimeScalar value) { // Don't call callDestroy — the container is still alive. // Cleanup will happen at scope exit (scopeExitCleanupHash/Array). } else if (oldBase.blessId != 0 + && oldBase.storedInPackageGlobal && WeakRefRegistry.hasWeakRefsTo(oldBase) - && DestroyDispatch.classNeedsWalkerGate(oldBase.blessId) && ReachabilityWalker.isReachableFromRoots(oldBase)) { + // D-W6.18: property-based walker gate (mirror of + // MortalList.flush). Replaces classNeedsWalkerGate + // class-name heuristic with the storedInPackageGlobal + // flag set at hash-store time. // Phase D / Step W3-Path 2: mirror of the gate in // MortalList.flush(). Blessed object with outstanding // weak refs whose cooperative refCount dipped to 0 @@ -2293,6 +2310,7 @@ public RuntimeScalar undefine() { DestroyDispatch.callDestroy(oldBase); } else if (this.refCountOwned && oldBase.refCount > 0) { this.refCountOwned = false; + oldBase.releaseActiveOwner(this); if (--oldBase.refCount == 0) { if (oldBase.localBindingExists) { // Named container: local variable may still exist. Skip callDestroy. diff --git a/src/main/java/org/perlonjava/runtime/runtimetypes/RuntimeStash.java b/src/main/java/org/perlonjava/runtime/runtimetypes/RuntimeStash.java index 8a10c8d03..f81ed8e07 100644 --- a/src/main/java/org/perlonjava/runtime/runtimetypes/RuntimeStash.java +++ b/src/main/java/org/perlonjava/runtime/runtimetypes/RuntimeStash.java @@ -214,6 +214,7 @@ private RuntimeScalar deleteGlob(String k) { } else if (base.refCount > 0 && savedScalar.refCountOwned) { // Tracked object: decrement refCount (the stash was holding a strong ref). savedScalar.refCountOwned = false; + base.releaseActiveOwner(savedScalar); if (--base.refCount == 0) { base.refCount = Integer.MIN_VALUE; DestroyDispatch.callDestroy(base); diff --git a/src/main/java/org/perlonjava/runtime/runtimetypes/WeakRefRegistry.java b/src/main/java/org/perlonjava/runtime/runtimetypes/WeakRefRegistry.java index b32bb1237..a2ae2d93a 100644 --- a/src/main/java/org/perlonjava/runtime/runtimetypes/WeakRefRegistry.java +++ b/src/main/java/org/perlonjava/runtime/runtimetypes/WeakRefRegistry.java @@ -80,6 +80,11 @@ public static void weaken(RuntimeScalar ref) { referentToWeakRefs .computeIfAbsent(base, k -> Collections.newSetFromMap(new IdentityHashMap<>())) .add(ref); + if (System.getenv("PJ_WEAKCLEAR_TRACE") != null) { + System.err.println("[WEAKEN] ref=" + System.identityHashCode(ref) + + " referent=" + System.identityHashCode(base) + + " (" + base.getClass().getSimpleName() + ")"); + } // Flip the fast-path flag so scopeExit cascades don't bail out // via the !blessedObjectExists shortcut when unblessed data has // weak refs that need clearing. @@ -107,6 +112,9 @@ public static void weaken(RuntimeScalar ref) { // Clear refCountOwned because weaken's DEC consumes the ownership — // the weak scalar should not trigger another DEC on scope exit or overwrite. ref.refCountOwned = false; + base.traceRefCount(-1, "WeakRefRegistry.weaken (decrement on weakening)"); + base.releaseOwner(ref, "weaken"); + base.releaseActiveOwner(ref); if (--base.refCount == 0) { if (base.localBindingExists) { // Named container (my %hash / my @array): the local variable @@ -183,6 +191,12 @@ public static void unweaken(RuntimeScalar ref) { */ public static boolean removeWeakRef(RuntimeScalar ref, RuntimeBase oldReferent) { if (!weakScalars.remove(ref)) return false; + if (System.getenv("PJ_WEAKCLEAR_TRACE") != null) { + System.err.println("[WEAKREMOVE] ref=" + System.identityHashCode(ref) + + " oldReferent=" + System.identityHashCode(oldReferent) + + " (" + (oldReferent == null ? "null" : oldReferent.getClass().getSimpleName()) + ")"); + new Throwable().printStackTrace(System.err); + } Set weakRefs = referentToWeakRefs.get(oldReferent); if (weakRefs != null) { weakRefs.remove(ref); @@ -216,6 +230,12 @@ public static void clearWeakRefsTo(RuntimeBase referent) { if (referent instanceof RuntimeCode) return; Set weakRefs = referentToWeakRefs.remove(referent); if (weakRefs == null) return; + if (System.getenv("PJ_WEAKCLEAR_TRACE") != null) { + System.err.println("[WEAKCLEAR] referent=" + System.identityHashCode(referent) + + " (" + referent.getClass().getSimpleName() + ") clearing " + + weakRefs.size() + " weak refs"); + new Throwable().printStackTrace(System.err); + } for (RuntimeScalar weak : weakRefs) { weak.type = RuntimeScalarType.UNDEF; weak.value = null; diff --git a/src/test/resources/unit/refcount/drift/cmop_pattern.t b/src/test/resources/unit/refcount/drift/cmop_pattern.t new file mode 100644 index 000000000..7eea39931 --- /dev/null +++ b/src/test/resources/unit/refcount/drift/cmop_pattern.t @@ -0,0 +1,126 @@ +use strict; +use warnings; +use Test::More; +use Scalar::Util qw(weaken); + +# ============================================================================= +# cmop_pattern.t — Reproducer for the metaclass refCount underflow. +# +# Pattern: %METAS-style cache + method-chain temps + weakened back-refs. +# This mirrors the exact CMOP bootstrap shape that triggers the +# walker-gate fallback. Without the gate, refCount underflows and DESTROY +# fires prematurely. +# ============================================================================= + +# T1: storage via method that stores AND returns ($METAS{...} ||= ...) +{ + package T1::Class; + our %METAS; + sub initialize { + my ($class, $name) = @_; + return $METAS{$name} ||= bless { name => $name }, $class; + } + sub add_attribute { + my ($self, $attr) = @_; + push @{ $self->{attrs} ||= [] }, $attr; + $attr->{associated_class} = $self; + Scalar::Util::weaken($attr->{associated_class}); + # The defining check: associated_class must still be alive + # immediately after weaken, *and* during the next statement. + unless (defined $attr->{associated_class}) { + die "owner gone for $attr->{name}!\n"; + } + return $self; + } + sub DESTROY { $main::T1_DESTROYED++ } + + package T1::Mixin; + sub meta { + T1::Class->initialize(ref($_[0]) || $_[0]); + } + + package T1::Attr; + sub new { bless { name => $_[1] }, $_[0] } + + package T1::Target; + our @ISA = ('T1::Mixin'); + + package main; + $main::T1_DESTROYED = 0; + + # CMOP-style: 3 separate top-level statements that go + # Class->meta->add_attribute(Attr->new(...)) + # The metaclass returned from ->meta is a temporary; its only + # persistent owner is %METAS. + eval { + T1::Target->meta->add_attribute(T1::Attr->new('one')); + T1::Target->meta->add_attribute(T1::Attr->new('two')); + T1::Target->meta->add_attribute(T1::Attr->new('three')); + T1::Target->meta->add_attribute(T1::Attr->new('four')); + T1::Target->meta->add_attribute(T1::Attr->new('five')); + }; + is($@, '', + 'T1: associated_class survives across method-chain temps'); + is($main::T1_DESTROYED, 0, + 'T1: meta NOT destroyed during 5-attribute bootstrap'); + + # All weakened back-refs must still be defined + my $meta = T1::Target->meta; + my @names = map { $_->{name} } @{ $meta->{attrs} || [] }; + is_deeply(\@names, [qw(one two three four five)], + 'T1: all 5 attrs registered'); + for my $a (@{ $meta->{attrs} || [] }) { + ok(defined $a->{associated_class}, + "T1: attr '$a->{name}' associated_class still defined"); + } +} + +# T2: shape with TWO METAS hashes (separate caches) and cross-refs +{ + package T2::Cache::A; + our %M; + sub get_or_create { + $M{$_[0]} ||= bless { name => $_[0], links => [] }, 'T2::A'; + } + package T2::Cache::B; + our %M; + sub get_or_create { + $M{$_[0]} ||= bless { name => $_[0], links => [] }, 'T2::B'; + } + + package T2::A; + sub link { + my ($self, $b) = @_; + $self->{partner} = $b; + Scalar::Util::weaken($self->{partner}); + die "A's partner gone!\n" unless defined $self->{partner}; + } + sub DESTROY { $main::T2_A_DESTROYED++ } + + package T2::B; + sub link { + my ($self, $a) = @_; + $self->{partner} = $a; + Scalar::Util::weaken($self->{partner}); + die "B's partner gone!\n" unless defined $self->{partner}; + } + sub DESTROY { $main::T2_B_DESTROYED++ } + + package main; + $main::T2_A_DESTROYED = 0; + $main::T2_B_DESTROYED = 0; + + eval { + for my $name (qw(X Y Z W V)) { + T2::Cache::A::get_or_create($name) + ->link(T2::Cache::B::get_or_create($name)); + T2::Cache::B::get_or_create($name) + ->link(T2::Cache::A::get_or_create($name)); + } + }; + is($@, '', 'T2: cross-linked partners survive method-chain temps'); + is($main::T2_A_DESTROYED, 0, 'T2: no A DESTROY'); + is($main::T2_B_DESTROYED, 0, 'T2: no B DESTROY'); +} + +done_testing(); diff --git a/src/test/resources/unit/refcount/drift/our_metas_underflow.t b/src/test/resources/unit/refcount/drift/our_metas_underflow.t new file mode 100644 index 000000000..e2f57b2b8 --- /dev/null +++ b/src/test/resources/unit/refcount/drift/our_metas_underflow.t @@ -0,0 +1,175 @@ +use strict; +use warnings; +use Test::More; +use Scalar::Util qw(weaken); + +# ============================================================================= +# our_metas_underflow.t — Reproducer for the cooperative refCount bug: +# storing a blessed reference in a package hash (`our %METAS`) does NOT +# bump the cooperative refCount, so the object's refCount underflows to 0 +# at end-of-statement when the temporary holding it expires. +# +# This is the root cause of the Class::MOP bootstrap failure that the +# walker gate has been working around. See dev/modules/moose_support.md +# section D-W6.7 for the diagnostic trace. +# +# The minimal pattern from CMOP is: +# +# package Cache; +# our %METAS; +# sub store_metaclass_by_name { $METAS{$_[0]} = $_[1] } +# sub get_metaclass_by_name { $METAS{$_[0]} } +# +# A meta-object is constructed once (refCount=1) and stored in %METAS. +# Subsequent statements call `Cache::get_metaclass_by_name($name)` which +# returns the value from the hash. That return value is a temporary +# (mortal); when it expires, refCount drops 1 → 0 unless the hash slot +# storage is itself counted. +# +# When refCount reaches 0, MortalList.flush -> DestroyDispatch.callDestroy +# fires DESTROY (incorrectly) and clears all weak refs to the metaclass. +# This nulls back-refs in attached objects. +# +# Real Perl: %METAS storage is a strong ref. The metaclass is alive for +# the lifetime of %METAS. DESTROY is NOT called until END or %METAS goes +# out of scope. +# ============================================================================= + +# --- Test 1: Plain object storage in a package hash --- +{ + package T1::Meta; + sub new { bless { name => $_[1] }, $_[0] } + sub name { $_[0]->{name} } + sub DESTROY { $main::T1_DESTROYED++ } + + package T1::Cache; + our %METAS; + sub store { $METAS{$_[0]} = $_[1] } + sub get { $METAS{$_[0]} } + + package main; + $main::T1_DESTROYED = 0; + + { + my $meta = T1::Meta->new('Foo'); + T1::Cache::store('Foo', $meta); + } # $meta lex out of scope; %METAS still has the only strong ref + + is($main::T1_DESTROYED, 0, + 'Test 1: object stored in package hash NOT destroyed when local var falls out of scope'); + + # Multiple `get` calls should not destroy it either + for (1..3) { + my $m = T1::Cache::get('Foo'); + ok(defined $m, "Test 1: get #$_ returns defined"); + is(ref($m), 'T1::Meta', "Test 1: get #$_ returns blessed object"); + } + + is($main::T1_DESTROYED, 0, + 'Test 1: still alive after multiple gets'); +} + +# --- Test 2: Method-chain temporary (the exact CMOP pattern) --- +{ + package T2::Meta; + sub new { bless { name => $_[1], attrs => [] }, $_[0] } + sub add_attribute { + my ($self, $attr) = @_; + push @{ $self->{attrs} }, $attr; + $attr->{owner} = $self; + Scalar::Util::weaken($attr->{owner}); + # simulate "after attach" check (this is where CMOP dies): + die "owner gone for $attr->{name}!\n" unless defined $attr->{owner}; + return $self; + } + sub DESTROY { $main::T2_DESTROYED++ } + + package T2::Cache; + our %METAS; + sub initialize { + my ($class, $name) = @_; + return $METAS{$name} ||= T2::Meta->new($name); + } + + package T2::Attr; + sub new { bless { name => $_[1] }, $_[0] } + + package main; + $main::T2_DESTROYED = 0; + + # Bootstrap the meta: + T2::Cache::initialize('T2::Bar', 'T2::Bar'); + + # Now do the CMOP-style 3-statement add_attribute pattern. + # Each `T2::Cache::initialize(...)->add_attribute(...)` is a separate + # statement; the meta-object is held only via %METAS between them. + eval { + T2::Cache::initialize('T2::Bar', 'T2::Bar') + ->add_attribute(T2::Attr->new('one')); + T2::Cache::initialize('T2::Bar', 'T2::Bar') + ->add_attribute(T2::Attr->new('two')); + T2::Cache::initialize('T2::Bar', 'T2::Bar') + ->add_attribute(T2::Attr->new('three')); + T2::Cache::initialize('T2::Bar', 'T2::Bar') + ->add_attribute(T2::Attr->new('four')); + }; + is($@, '', 'Test 2: no die — owner ref stays alive across all add_attribute calls'); + is($main::T2_DESTROYED, 0, + 'Test 2: meta NOT destroyed during 4-statement bootstrap'); +} + +# --- Test 3: The walker-gate stress shape --- +# Multiple metas in %METAS, multiple weak refs each, exercised by +# repeated method-chain temporaries. +{ + package T3::Meta; + sub new { bless { name => $_[1] }, $_[0] } + sub touch { $_[0]->{count}++ } + sub DESTROY { $main::T3_DESTROYED{ $_[0]->{name} }++ } + + package T3::Cache; + our %METAS; + sub get_or_create { + $METAS{$_[0]} ||= T3::Meta->new($_[0]); + } + + package T3::Holder; + sub new { bless { meta => $_[1] }, $_[0] } + sub link { + my ($self, $m) = @_; + $self->{meta} = $m; + Scalar::Util::weaken($self->{meta}); + } + + package main; + %main::T3_DESTROYED = (); + + my @holders; + for my $name (qw(A B C D E)) { + my $m = T3::Cache::get_or_create($name); + my $h = T3::Holder->new($m); + $h->link($m); + push @holders, $h; + } + + # Touch each via method-chain temp from %METAS + for (1..5) { + for my $name (qw(A B C D E)) { + T3::Cache::get_or_create($name)->touch; + } + } + + # All metas should still be alive: %METAS holds them, + # @holders has weak refs. + for my $name (qw(A B C D E)) { + is($main::T3_DESTROYED{$name} || 0, 0, + "Test 3: meta '$name' NOT prematurely destroyed"); + } + + # Each holder's weak ref should still resolve + for my $h (@holders) { + ok(defined $h->{meta}, "Test 3: weak back-ref still defined"); + } +} + +done_testing();