mirror of
https://github.com/Ryujinx/Ryujinx.git
synced 2024-12-27 02:21:21 -08:00
22b2cb39af
* Turn `MemoryOperand` into a struct * Remove `IntrinsicOperation` * Remove `PhiNode` * Remove `Node` * Turn `Operand` into a struct * Turn `Operation` into a struct * Clean up pool management methods * Add `Arena` allocator * Move `OperationHelper` to `Operation.Factory` * Move `OperandHelper` to `Operand.Factory` * Optimize `Operation` a bit * Fix `Arena` initialization * Rename `NativeList<T>` to `ArenaList<T>` * Reduce `Operand` size from 88 to 56 bytes * Reduce `Operation` size from 56 to 40 bytes * Add optimistic interning of Register & Constant operands * Optimize `RegisterUsage` pass a bit * Optimize `RemoveUnusedNodes` pass a bit Iterating in reverse-order allows killing dependency chains in a single pass. * Fix PPTC symbols * Optimize `BasicBlock` a bit Reduce allocations from `_successor` & `DominanceFrontiers` * Fix `Operation` resize * Make `Arena` expandable Change the arena allocator to be expandable by allocating in pages, with some of them being pooled. Currently 32 pages are pooled. An LRU removal mechanism should probably be added to it. Apparently MHR can allocate bitmaps large enough to exceed the 16MB limit for the type. * Move `Arena` & `ArenaList` to `Common` * Remove `ThreadStaticPool` & co * Add `PhiOperation` * Reduce `Operand` size from 56 from 48 bytes * Add linear-probing to `Operand` intern table * Optimize `HybridAllocator` a bit * Add `Allocators` class * Tune `ArenaAllocator` sizes * Add page removal mechanism to `ArenaAllocator` Remove pages which have not been used for more than 5s after each reset. I am on fence if this would be better using a Gen2 callback object like the one in System.Buffers.ArrayPool<T>, to trim the pool. Because right now if a large translation happens, the pages will be freed only after a reset. This reset may not happen for a while because no new translation is hit, but the arena base sizes are rather small. * Fix `OOM` when allocating larger than page size in `ArenaAllocator` Tweak resizing mechanism for Operand.Uses and Assignemnts. * Optimize `Optimizer` a bit * Optimize `Operand.Add<T>/Remove<T>` a bit * Clean up `PreAllocator` * Fix phi insertion order Reduce codegen diffs. * Fix code alignment * Use new heuristics for degree of parallelism * Suppress warnings * Address gdkchan's feedback Renamed `GetValue()` to `GetValueUnsafe()` to make it more clear that `Operand.Value` should usually not be modified directly. * Add fast path to `ArenaAllocator` * Assembly for `ArenaAllocator.Allocate(ulong)`: .L0: mov rax, [rcx+0x18] lea r8, [rax+rdx] cmp r8, [rcx+0x10] ja short .L2 .L1: mov rdx, [rcx+8] add rax, [rdx+8] mov [rcx+0x18], r8 ret .L2: jmp ArenaAllocator.AllocateSlow(UInt64) A few variable/field had to be changed to ulong so that RyuJIT avoids emitting zero-extends. * Implement a new heuristic to free pooled pages. If an arena is used often, it is more likely that its pages will be needed, so the pages are kept for longer (e.g: during PPTC rebuild or burst sof compilations). If is not used often, then it is more likely that its pages will not be needed (e.g: after PPTC rebuild or bursts of compilations). * Address riperiperi's feedback * Use `EqualityComparer<T>` in `IntrusiveList<T>` Avoids a potential GC hole in `Equals(T, T)`.
189 lines
5.0 KiB
C#
189 lines
5.0 KiB
C#
using System;
|
|
using System.Collections.Generic;
|
|
using System.Runtime.CompilerServices;
|
|
using System.Threading;
|
|
|
|
namespace ARMeilleure.Common
|
|
{
|
|
unsafe sealed class ArenaAllocator : Allocator
|
|
{
|
|
private class PageInfo
|
|
{
|
|
public byte* Pointer;
|
|
public byte Unused;
|
|
public int UnusedCounter;
|
|
}
|
|
|
|
private int _lastReset;
|
|
private ulong _index;
|
|
private int _pageIndex;
|
|
private PageInfo _page;
|
|
private List<PageInfo> _pages;
|
|
private readonly ulong _pageSize;
|
|
private readonly uint _pageCount;
|
|
private readonly List<IntPtr> _extras;
|
|
|
|
public ArenaAllocator(uint pageSize, uint pageCount)
|
|
{
|
|
_lastReset = Environment.TickCount;
|
|
|
|
// Set _index to pageSize so that the first allocation goes through the slow path.
|
|
_index = pageSize;
|
|
_pageIndex = -1;
|
|
|
|
_page = null;
|
|
_pages = new List<PageInfo>();
|
|
_pageSize = pageSize;
|
|
_pageCount = pageCount;
|
|
|
|
_extras = new List<IntPtr>();
|
|
}
|
|
|
|
public Span<T> AllocateSpan<T>(ulong count) where T : unmanaged
|
|
{
|
|
return new Span<T>(Allocate<T>(count), (int)count);
|
|
}
|
|
|
|
public override void* Allocate(ulong size)
|
|
{
|
|
if (_index + size <= _pageSize)
|
|
{
|
|
byte* result = _page.Pointer + _index;
|
|
|
|
_index += size;
|
|
|
|
return result;
|
|
}
|
|
|
|
return AllocateSlow(size);
|
|
}
|
|
|
|
[MethodImpl(MethodImplOptions.NoInlining)]
|
|
private void* AllocateSlow(ulong size)
|
|
{
|
|
if (size > _pageSize)
|
|
{
|
|
void* extra = NativeAllocator.Instance.Allocate(size);
|
|
|
|
_extras.Add((IntPtr)extra);
|
|
|
|
return extra;
|
|
}
|
|
|
|
if (_index + size > _pageSize)
|
|
{
|
|
_index = 0;
|
|
_pageIndex++;
|
|
}
|
|
|
|
if (_pageIndex < _pages.Count)
|
|
{
|
|
_page = _pages[_pageIndex];
|
|
_page.Unused = 0;
|
|
}
|
|
else
|
|
{
|
|
_page = new PageInfo();
|
|
_page.Pointer = (byte*)NativeAllocator.Instance.Allocate(_pageSize);
|
|
|
|
_pages.Add(_page);
|
|
}
|
|
|
|
byte* result = _page.Pointer + _index;
|
|
|
|
_index += size;
|
|
|
|
return result;
|
|
}
|
|
|
|
public override void Free(void* block) { }
|
|
|
|
public void Reset()
|
|
{
|
|
_index = _pageSize;
|
|
_pageIndex = -1;
|
|
_page = null;
|
|
|
|
// Free excess pages that was allocated.
|
|
while (_pages.Count > _pageCount)
|
|
{
|
|
NativeAllocator.Instance.Free(_pages[_pages.Count - 1].Pointer);
|
|
|
|
_pages.RemoveAt(_pages.Count - 1);
|
|
}
|
|
|
|
// Free extra blocks that are not page-sized
|
|
foreach (IntPtr ptr in _extras)
|
|
{
|
|
NativeAllocator.Instance.Free((void*)ptr);
|
|
}
|
|
|
|
_extras.Clear();
|
|
|
|
// Free pooled pages that has not been used in a while. Remove pages at the back first, because we try to
|
|
// keep the pages at the front alive, since they're more likely to be hot and in the d-cache.
|
|
bool removing = true;
|
|
|
|
// If arena is used frequently, keep pages for longer. Otherwise keep pages for a shorter amount of time.
|
|
int now = Environment.TickCount;
|
|
int count = (now - _lastReset) switch {
|
|
>= 5000 => 0,
|
|
>= 2500 => 50,
|
|
>= 1000 => 100,
|
|
>= 10 => 1500,
|
|
_ => 5000
|
|
};
|
|
|
|
for (int i = _pages.Count - 1; i >= 0; i--)
|
|
{
|
|
PageInfo page = _pages[i];
|
|
|
|
if (page.Unused == 0)
|
|
{
|
|
page.UnusedCounter = 0;
|
|
}
|
|
|
|
page.UnusedCounter += page.Unused;
|
|
page.Unused = 1;
|
|
|
|
// If page not used after `count` resets, remove it.
|
|
if (removing && page.UnusedCounter >= count)
|
|
{
|
|
NativeAllocator.Instance.Free(page.Pointer);
|
|
|
|
_pages.RemoveAt(i);
|
|
}
|
|
else
|
|
{
|
|
removing = false;
|
|
}
|
|
}
|
|
|
|
_lastReset = now;
|
|
}
|
|
|
|
protected override void Dispose(bool disposing)
|
|
{
|
|
if (_pages != null)
|
|
{
|
|
foreach (PageInfo info in _pages)
|
|
{
|
|
NativeAllocator.Instance.Free(info.Pointer);
|
|
}
|
|
|
|
foreach (IntPtr ptr in _extras)
|
|
{
|
|
NativeAllocator.Instance.Free((void*)ptr);
|
|
}
|
|
|
|
_pages = null;
|
|
}
|
|
}
|
|
|
|
~ArenaAllocator()
|
|
{
|
|
Dispose(false);
|
|
}
|
|
}
|
|
}
|