Memory leak in HybridV2RenderSystem.AddNewChunks(0.50.0-preview.44)

Memory leak when AddNewBatch fail.
This is my temporary solution:

        private int AddNewChunks(NativeArray<ArchetypeChunk> newChunksX,
            //Modify!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
            out NativeArray<ArchetypeChunk> sortedNewChunks)
        {
            int numValidNewChunks = 0;

            Debug.Assert(newChunksX.Length > 0, "Attempted to add new chunks, but list of new chunks was empty");

            var hybridChunkInfoType = GetComponentTypeHandle<HybridChunkInfo>();
            // Sort new chunks by RenderMesh so we can put
            // all compatible chunks inside one batch.
            var batchCreateInfoFactory = new BatchCreateInfoFactory
            {
                EntityManager = EntityManager,
                RenderMeshTypeHandle = GetSharedComponentTypeHandle<RenderMesh>(),
                EditorRenderDataTypeHandle = GetSharedComponentTypeHandle<EditorRenderData>(),
                HybridBatchPartitionHandle = GetSharedComponentTypeHandle<HybridBatchPartition>(),
                RenderMeshFlippedWindingTagTypeHandle = GetComponentTypeHandle<RenderMeshFlippedWindingTag>(),
#if UNITY_EDITOR
                DefaultEditorRenderData = new EditorRenderData
                {SceneCullingMask = UnityEditor.SceneManagement.EditorSceneManager.DefaultSceneCullingMask},
#else
                DefaultEditorRenderData = new EditorRenderData { SceneCullingMask = ~0UL },
#endif
            };
            var batchCompatibility = new BatchCompatibility(this, batchCreateInfoFactory, newChunksX);
            // This also sorts invalid chunks to the back.
            sortedNewChunks = batchCompatibility.SortChunks();

            int batchBegin = 0;
            int numInstances = sortedNewChunks[0].Capacity;

            for (int i = 1; i <= sortedNewChunks.Length; ++i)
            {
                int instancesInChunk = 0;
                bool breakBatch = false;

                if (i < sortedNewChunks.Length)
                {
                    var chunk = sortedNewChunks[i];
                    breakBatch = !batchCompatibility.IsCompatible(EntityManager, this, batchBegin, i);
                    instancesInChunk = chunk.Capacity;
                }
                else
                {
                    breakBatch = true;
                }

                if (numInstances + instancesInChunk > kMaxEntitiesPerBatch)
                    breakBatch = true;

                if (breakBatch)
                {
                    int numChunks = i - batchBegin;

                    var createInfo = batchCompatibility.CreateInfoFor(batchBegin);
                    bool valid = AddNewBatch(ref createInfo, ref hybridChunkInfoType,
                        sortedNewChunks.GetSubArray(batchBegin, numChunks), numInstances);

                    // As soon as we encounter an invalid chunk, we know that all the rest are invalid
                    // too.
                    if (valid)
                        numValidNewChunks += numChunks;
                    else
                    {
                        //Modify!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
                        batchCompatibility.Dispose();
                        //sortedNewChunks.Dispose();

                        return numValidNewChunks;
                    }

                    batchBegin = i;
                    numInstances = instancesInChunk;
                }
                else
                {
                    numInstances += instancesInChunk;
                }
            }

            batchCompatibility.Dispose();

            //Modify!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
            //sortedNewChunks.Dispose();

            return numValidNewChunks;
        }
            public NativeArray<ArchetypeChunk> SortChunks()
            {
                // Key-value sort all chunks according to compatibility
                m_SortIndices.Sort(this);
                var sortedChunks = new NativeArray<ArchetypeChunk>(
                    m_Chunks.Length,
                    //Modify!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
                    Allocator.TempJob,
                    NativeArrayOptions.UninitializedMemory);

                for (int i = 0; i < m_Chunks.Length; ++i)
                    sortedChunks[i] = m_Chunks[SortedIndex(i)];

                return sortedChunks;
            }
            if (numNewChunks > 0)
            {
                Profiler.BeginSample("AddNewChunks");
                int numValidNewChunks = AddNewChunks(newChunks.GetSubArray(0, numNewChunks), out var sortedNewChunks);
                Profiler.EndSample();

                hybridChunkUpdater.PreviousBatchIndex = -1;

                var updateNewChunksJob = new UpdateNewHybridChunksJob
                {
                    //Modify!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
                    NewChunks = sortedNewChunks,
                    HybridChunkInfo = hybridRenderedChunkType,
                    ChunkWorldRenderBounds = chunkWorldRenderBoundsRO,
                    HybridChunkUpdater = hybridChunkUpdater,
                };

#if DEBUG_LOG_INVALID_CHUNKS
                if (numValidNewChunks != numNewChunks)
                    Debug.Log($"Tried to add {numNewChunks} new chunks, but only {numValidNewChunks} were valid, {numNewChunks - numValidNewChunks} were invalid");
#endif

                hybridCompleted = updateNewChunksJob.Schedule(numValidNewChunks, kNumNewChunksPerThread);
            }
    [BurstCompile]
    internal struct UpdateNewHybridChunksJob : IJobParallelFor
    {
        public ComponentTypeHandle<HybridChunkInfo> HybridChunkInfo;
        [ReadOnly] public ComponentTypeHandle<ChunkWorldRenderBounds> ChunkWorldRenderBounds;

        //Modify!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
        [DeallocateOnJobCompletion]
        public NativeArray<ArchetypeChunk> NewChunks;
        public HybridChunkUpdater HybridChunkUpdater;

        public void Execute(int index)
        {
            var chunk = NewChunks[index];
            var chunkInfo = chunk.GetChunkComponentData(HybridChunkInfo);

            ChunkWorldRenderBounds chunkBounds = chunk.GetChunkComponentData(ChunkWorldRenderBounds);

            Debug.Assert(chunkInfo.Valid, "Attempted to process a chunk with uninitialized Hybrid chunk info");
            HybridChunkUpdater.MarkBatchForUpdates(chunkInfo.InternalIndex, true);
            HybridChunkUpdater.ProcessValidChunk(ref chunkInfo, chunk, chunkBounds.Value, true);
            chunk.SetChunkComponentData(HybridChunkInfo, chunkInfo);
            HybridChunkUpdater.FinishExecute();
        }
    }

8154509–1059542–HybridV2RenderSystem.cs (169 KB)

batchCompatibility and sortedNewChunks both appear to be allocated with Allocator.Temp, meaning that calling Dispose on them is optional and is essentially a no-op. I don't see how there is a memory leak in the original code.

And even if there were, the better solution would be to replace the allocations with World.UpdateAllocator. Like temp, those also don't need to be Disposed, but unlike Temp, you can pass them in and out of jobs and they have a lifecycle of 2 world updates (2 frames).

Maybe Allocator.Temp calling Dispose is optional in burst,but it crash in my test.
And the UpdateNewHybridChunksJob.NewChunks is not a sorted chunk list,it’s HybridChunkInfo can get a invail value.

My English is poor,but trust me in the code, it’s a bug from 0.11.0-preview.42.

1 Like

I see now. Let me make a few things clear.
1) This is a bug that can cause crashes.
2) This is not a memory leak.
3) This only happens when the Mesh, Material, or Shader of a RenderMesh is null.

Here's how I am choosing to fix it. Rather than juggle a new container, I just update the existing container to reflect the assumptions made on it. https://github.com/Dreaming381/Kinemation-Skinning-Prototype/commit/79383f9754461a72291c971bdb9a8e2496189243

That’s ok in my case.
And i found other crash bug when AddNewChunks after RemoveBatch ,I can’t tell you why by my poor English,but i can show you how to fix it:

        internal int InternalIndexRange = 1;// => m_SortedInternalIds.Max + 1;
        internal int AllocateInternalId()
        {
            EnsureHaveSpaceForNewBatch();

            int id = m_InternalIdFreelist[m_InternalIdFreelist.Length - 1];

            m_InternalIdFreelist.Resize(m_InternalIdFreelist.Length - 1, NativeArrayOptions.UninitializedMemory);

            Debug.Assert(!m_SortedInternalIds.Contains(id), "Freshly allocated batch id found in list of used ids");

            m_SortedInternalIds.Add(id);

            //Modity!!!!!!!!!!!!!!!!!!!!!!!!!!!!
            InternalIndexRange = math.max(InternalIndexRange, id + 1);

            return id;
        }

Can you at least tell me what line the crash happens so I can figure out why?

in public JobHandle UpdateAllBatches(JobHandle inputDependencies)

            var maxBatchCount = math.max(kInitialMaxBatchCount, InternalIndexRange + numNewChunks);

            // Integer division with round up
            var maxBatchLongCount = (maxBatchCount + kNumBitsPerLong - 1) / kNumBitsPerLong;

            var batchRequiresUpdates = new NativeArray<long>(
                maxBatchLongCount,
                Allocator.TempJob,
                NativeArrayOptions.ClearMemory);

            var batchHadMovingEntities = new NativeArray<long>(
                maxBatchLongCount,
                Allocator.TempJob,
                NativeArrayOptions.ClearMemory);

Sometime maxBatchCount < chunkInfo.InternalIndex,and crash in “Batch index out of bounds”:

        public unsafe void MarkBatchForUpdates(int internalIndex, bool entitiesMoved)
        {
            AtomicHelpers.IndexToQwIndexAndMask(internalIndex, out int qw, out long mask);
            if(qw >= BatchRequiresUpdates.Length || qw >= BatchHadMovingEntities.Length)
            Debug.Assert(qw < BatchRequiresUpdates.Length && qw < BatchHadMovingEntities.Length,
                "Batch index out of bounds");

            var motionInfo = BatchMotionInfos[internalIndex];
            bool mustDisableMotionVectors = motionInfo.MotionVectorFlagSet && !entitiesMoved;

            // If entities moved, we always update the batch since bounds must be updated.
            // If no entities moved, we only update the batch if it requires motion vector disable.
            if (entitiesMoved || mustDisableMotionVectors)
                AtomicHelpers.AtomicOr((long*)BatchRequiresUpdates.GetUnsafePtr(), qw, mask);

            if (entitiesMoved)
                AtomicHelpers.AtomicOr((long*)BatchHadMovingEntities.GetUnsafePtr(), qw, mask);
        }
1 Like