diff --git a/ARMeilleure/Memory/IJitMemoryAllocator.cs b/ARMeilleure/Memory/IJitMemoryAllocator.cs index 5745a4bfe..19b696b0a 100644 --- a/ARMeilleure/Memory/IJitMemoryAllocator.cs +++ b/ARMeilleure/Memory/IJitMemoryAllocator.cs @@ -4,5 +4,7 @@ { IJitMemoryBlock Allocate(ulong size); IJitMemoryBlock Reserve(ulong size); + + ulong GetPageSize(); } } diff --git a/ARMeilleure/Signal/NativeSignalHandler.cs b/ARMeilleure/Signal/NativeSignalHandler.cs index da02f76a8..e8dc6ddaf 100644 --- a/ARMeilleure/Signal/NativeSignalHandler.cs +++ b/ARMeilleure/Signal/NativeSignalHandler.cs @@ -71,8 +71,8 @@ namespace ARMeilleure.Signal private const uint EXCEPTION_ACCESS_VIOLATION = 0xc0000005; - private static ulong _pageSize = GetPageSize(); - private static ulong _pageMask = _pageSize - 1; + private static ulong _pageSize; + private static ulong _pageMask; private static IntPtr _handlerConfig; private static IntPtr _signalHandlerPtr; @@ -81,19 +81,6 @@ namespace ARMeilleure.Signal private static readonly object _lock = new object(); private static bool _initialized; - private static ulong GetPageSize() - { - // TODO: This needs to be based on the current memory manager configuration. - if (OperatingSystem.IsMacOS() && RuntimeInformation.ProcessArchitecture == Architecture.Arm64) - { - return 1UL << 14; - } - else - { - return 1UL << 12; - } - } - static NativeSignalHandler() { _handlerConfig = Marshal.AllocHGlobal(Unsafe.SizeOf()); @@ -102,12 +89,12 @@ namespace ARMeilleure.Signal config = new SignalHandlerConfig(); } - public static void InitializeJitCache(IJitMemoryAllocator allocator) + public static void Initialize(IJitMemoryAllocator allocator) { JitCache.Initialize(allocator); } - public static void InitializeSignalHandler(Func customSignalHandlerFactory = null) + public static void InitializeSignalHandler(ulong pageSize, Func customSignalHandlerFactory = null) { if (_initialized) return; @@ -115,6 +102,9 @@ namespace ARMeilleure.Signal { if (_initialized) return; + _pageSize = pageSize; + _pageMask = pageSize - 1; + ref SignalHandlerConfig config = ref GetConfigRef(); if (OperatingSystem.IsLinux() || OperatingSystem.IsMacOS()) diff --git a/ARMeilleure/Translation/Translator.cs b/ARMeilleure/Translation/Translator.cs index 75c4df23e..cbf6baa00 100644 --- a/ARMeilleure/Translation/Translator.cs +++ b/ARMeilleure/Translation/Translator.cs @@ -81,7 +81,7 @@ namespace ARMeilleure.Translation if (memory.Type.IsHostMapped()) { - NativeSignalHandler.InitializeSignalHandler(); + NativeSignalHandler.InitializeSignalHandler(allocator.GetPageSize()); } } diff --git a/Ryujinx.Cpu/AddressSpace.cs b/Ryujinx.Cpu/AddressSpace.cs new file mode 100644 index 000000000..cea3b56d2 --- /dev/null +++ b/Ryujinx.Cpu/AddressSpace.cs @@ -0,0 +1,470 @@ +using Ryujinx.Common; +using Ryujinx.Common.Collections; +using Ryujinx.Memory; +using System; + +namespace Ryujinx.Cpu +{ + class AddressSpace : IDisposable + { + private const ulong PageSize = 0x1000; + + private const int DefaultBlockAlignment = 1 << 20; + + private enum MappingType : byte + { + None, + Private, + Shared + } + + private class Mapping : IntrusiveRedBlackTreeNode, IComparable + { + public ulong Address { get; private set; } + public ulong Size { get; private set; } + public ulong EndAddress => Address + Size; + public MappingType Type { get; private set; } + + public Mapping(ulong address, ulong size, MappingType type) + { + Address = address; + Size = size; + Type = type; + } + + public Mapping Split(ulong splitAddress) + { + ulong leftSize = splitAddress - Address; + ulong rightSize = EndAddress - splitAddress; + + Mapping left = new Mapping(Address, leftSize, Type); + + Address = splitAddress; + Size = rightSize; + + return left; + } + + public void UpdateState(MappingType newType) + { + Type = newType; + } + + public void Extend(ulong sizeDelta) + { + Size += sizeDelta; + } + + public int CompareTo(Mapping other) + { + if (Address < other.Address) + { + return -1; + } + else if (Address <= other.EndAddress - 1UL) + { + return 0; + } + else + { + return 1; + } + } + } + + private class PrivateMapping : IntrusiveRedBlackTreeNode, IComparable + { + public ulong Address { get; private set; } + public ulong Size { get; private set; } + public ulong EndAddress => Address + Size; + public PrivateMemoryAllocation PrivateAllocation { get; private set; } + + public PrivateMapping(ulong address, ulong size, PrivateMemoryAllocation privateAllocation) + { + Address = address; + Size = size; + PrivateAllocation = privateAllocation; + } + + public PrivateMapping Split(ulong splitAddress) + { + ulong leftSize = splitAddress - Address; + ulong rightSize = EndAddress - splitAddress; + + (var leftAllocation, PrivateAllocation) = PrivateAllocation.Split(leftSize); + + PrivateMapping left = new PrivateMapping(Address, leftSize, leftAllocation); + + Address = splitAddress; + Size = rightSize; + + return left; + } + + public void Map(MemoryBlock baseBlock, MemoryBlock mirrorBlock, PrivateMemoryAllocation newAllocation) + { + baseBlock.MapView(newAllocation.Memory, newAllocation.Offset, Address, Size); + mirrorBlock.MapView(newAllocation.Memory, newAllocation.Offset, Address, Size); + PrivateAllocation = newAllocation; + } + + public void Unmap(MemoryBlock baseBlock, MemoryBlock mirrorBlock) + { + if (PrivateAllocation.IsValid) + { + baseBlock.UnmapView(PrivateAllocation.Memory, Address, Size); + mirrorBlock.UnmapView(PrivateAllocation.Memory, Address, Size); + PrivateAllocation.Dispose(); + } + + PrivateAllocation = default; + } + + public void Extend(ulong sizeDelta) + { + Size += sizeDelta; + } + + public int CompareTo(PrivateMapping other) + { + if (Address < other.Address) + { + return -1; + } + else if (Address <= other.EndAddress - 1UL) + { + return 0; + } + else + { + return 1; + } + } + } + + private readonly MemoryBlock _backingMemory; + private readonly PrivateMemoryAllocator _privateMemoryAllocator; + private readonly IntrusiveRedBlackTree _mappingTree; + private readonly IntrusiveRedBlackTree _privateTree; + + private readonly object _treeLock; + + private readonly bool _supports4KBPages; + + public MemoryBlock Base { get; } + public MemoryBlock Mirror { get; } + + public AddressSpace(MemoryBlock backingMemory, ulong asSize, bool supports4KBPages) + { + if (!supports4KBPages) + { + _privateMemoryAllocator = new PrivateMemoryAllocator(DefaultBlockAlignment, MemoryAllocationFlags.Mirrorable | MemoryAllocationFlags.NoMap); + _mappingTree = new IntrusiveRedBlackTree(); + _privateTree = new IntrusiveRedBlackTree(); + _treeLock = new object(); + + _mappingTree.Add(new Mapping(0UL, asSize, MappingType.None)); + _privateTree.Add(new PrivateMapping(0UL, asSize, default)); + } + + _backingMemory = backingMemory; + _supports4KBPages = supports4KBPages; + + MemoryAllocationFlags asFlags = MemoryAllocationFlags.Reserve | MemoryAllocationFlags.ViewCompatible; + + Base = new MemoryBlock(asSize, asFlags); + Mirror = new MemoryBlock(asSize, asFlags); + } + + public void Map(ulong va, ulong pa, ulong size, MemoryMapFlags flags) + { + if (_supports4KBPages) + { + Base.MapView(_backingMemory, pa, va, size); + Mirror.MapView(_backingMemory, pa, va, size); + + return; + } + + lock (_treeLock) + { + ulong alignment = MemoryBlock.GetPageSize(); + bool isAligned = ((va | pa | size) & (alignment - 1)) == 0; + + if (flags.HasFlag(MemoryMapFlags.Private) && !isAligned) + { + Update(va, pa, size, MappingType.Private); + } + else + { + // The update method assumes that shared mappings are already aligned. + + if (!flags.HasFlag(MemoryMapFlags.Private)) + { + if ((va & (alignment - 1)) != (pa & (alignment - 1))) + { + throw new InvalidMemoryRegionException($"Virtual address 0x{va:X} and physical address 0x{pa:X} are misaligned and can't be aligned."); + } + + ulong endAddress = va + size; + va = BitUtils.AlignDown(va, alignment); + pa = BitUtils.AlignDown(pa, alignment); + size = BitUtils.AlignUp(endAddress, alignment) - va; + } + + Update(va, pa, size, MappingType.Shared); + } + } + } + + public void Unmap(ulong va, ulong size) + { + if (_supports4KBPages) + { + Base.UnmapView(_backingMemory, va, size); + Mirror.UnmapView(_backingMemory, va, size); + + return; + } + + lock (_treeLock) + { + Update(va, 0UL, size, MappingType.None); + } + } + + private void Update(ulong va, ulong pa, ulong size, MappingType type) + { + Mapping map = _mappingTree.GetNode(new Mapping(va, 1UL, MappingType.None)); + + Update(map, va, pa, size, type); + } + + private Mapping Update(Mapping map, ulong va, ulong pa, ulong size, MappingType type) + { + ulong endAddress = va + size; + + for (; map != null; map = map.Successor) + { + if (map.Address < va) + { + _mappingTree.Add(map.Split(va)); + } + + if (map.EndAddress > endAddress) + { + Mapping newMap = map.Split(endAddress); + _mappingTree.Add(newMap); + map = newMap; + } + + switch (type) + { + case MappingType.None: + if (map.Type == MappingType.Shared) + { + ulong startOffset = map.Address - va; + ulong mapVa = va + startOffset; + ulong mapSize = Math.Min(size - startOffset, map.Size); + ulong mapEndAddress = mapVa + mapSize; + ulong alignment = MemoryBlock.GetPageSize(); + + mapVa = BitUtils.AlignDown(mapVa, alignment); + mapEndAddress = BitUtils.AlignUp(mapEndAddress, alignment); + + mapSize = mapEndAddress - mapVa; + + Base.UnmapView(_backingMemory, mapVa, mapSize); + Mirror.UnmapView(_backingMemory, mapVa, mapSize); + } + else + { + UnmapPrivate(va, size); + } + break; + case MappingType.Private: + if (map.Type == MappingType.Shared) + { + throw new InvalidMemoryRegionException($"Private mapping request at 0x{va:X} with size 0x{size:X} overlaps shared mapping at 0x{map.Address:X} with size 0x{map.Size:X}."); + } + else + { + MapPrivate(va, size); + } + break; + case MappingType.Shared: + if (map.Type != MappingType.None) + { + throw new InvalidMemoryRegionException($"Shared mapping request at 0x{va:X} with size 0x{size:X} overlaps mapping at 0x{map.Address:X} with size 0x{map.Size:X}."); + } + else + { + ulong startOffset = map.Address - va; + ulong mapPa = pa + startOffset; + ulong mapVa = va + startOffset; + ulong mapSize = Math.Min(size - startOffset, map.Size); + + Base.MapView(_backingMemory, mapPa, mapVa, mapSize); + Mirror.MapView(_backingMemory, mapPa, mapVa, mapSize); + } + break; + } + + map.UpdateState(type); + map = TryCoalesce(map); + + if (map.EndAddress >= endAddress) + { + break; + } + } + + return map; + } + + private Mapping TryCoalesce(Mapping map) + { + Mapping previousMap = map.Predecessor; + Mapping nextMap = map.Successor; + + if (previousMap != null && CanCoalesce(previousMap, map)) + { + previousMap.Extend(map.Size); + _mappingTree.Remove(map); + map = previousMap; + } + + if (nextMap != null && CanCoalesce(map, nextMap)) + { + map.Extend(nextMap.Size); + _mappingTree.Remove(nextMap); + } + + return map; + } + + private static bool CanCoalesce(Mapping left, Mapping right) + { + return left.Type == right.Type; + } + + private void MapPrivate(ulong va, ulong size) + { + ulong endAddress = va + size; + + ulong alignment = MemoryBlock.GetPageSize(); + + // Expand the range outwards based on page size to ensure that at least the requested region is mapped. + ulong vaAligned = BitUtils.AlignDown(va, alignment); + ulong endAddressAligned = BitUtils.AlignUp(endAddress, alignment); + + ulong sizeAligned = endAddressAligned - vaAligned; + + PrivateMapping map = _privateTree.GetNode(new PrivateMapping(va, 1UL, default)); + + for (; map != null; map = map.Successor) + { + if (!map.PrivateAllocation.IsValid) + { + if (map.Address < vaAligned) + { + _privateTree.Add(map.Split(vaAligned)); + } + + if (map.EndAddress > endAddressAligned) + { + PrivateMapping newMap = map.Split(endAddressAligned); + _privateTree.Add(newMap); + map = newMap; + } + + map.Map(Base, Mirror, _privateMemoryAllocator.Allocate(map.Size, MemoryBlock.GetPageSize())); + } + + if (map.EndAddress >= endAddressAligned) + { + break; + } + } + } + + private void UnmapPrivate(ulong va, ulong size) + { + ulong endAddress = va + size; + + ulong alignment = MemoryBlock.GetPageSize(); + + // Shrink the range inwards based on page size to ensure we won't unmap memory that might be still in use. + ulong vaAligned = BitUtils.AlignUp(va, alignment); + ulong endAddressAligned = BitUtils.AlignDown(endAddress, alignment); + + if (endAddressAligned <= vaAligned) + { + return; + } + + ulong alignedSize = endAddressAligned - vaAligned; + + PrivateMapping map = _privateTree.GetNode(new PrivateMapping(va, 1UL, default)); + + for (; map != null; map = map.Successor) + { + if (map.PrivateAllocation.IsValid) + { + if (map.Address < vaAligned) + { + _privateTree.Add(map.Split(vaAligned)); + } + + if (map.EndAddress > endAddressAligned) + { + PrivateMapping newMap = map.Split(endAddressAligned); + _privateTree.Add(newMap); + map = newMap; + } + + map.Unmap(Base, Mirror); + map = TryCoalesce(map); + } + + if (map.EndAddress >= endAddressAligned) + { + break; + } + } + } + + private PrivateMapping TryCoalesce(PrivateMapping map) + { + PrivateMapping previousMap = map.Predecessor; + PrivateMapping nextMap = map.Successor; + + if (previousMap != null && CanCoalesce(previousMap, map)) + { + previousMap.Extend(map.Size); + _privateTree.Remove(map); + map = previousMap; + } + + if (nextMap != null && CanCoalesce(map, nextMap)) + { + map.Extend(nextMap.Size); + _privateTree.Remove(nextMap); + } + + return map; + } + + private static bool CanCoalesce(PrivateMapping left, PrivateMapping right) + { + return !left.PrivateAllocation.IsValid && !right.PrivateAllocation.IsValid; + } + + public void Dispose() + { + _privateMemoryAllocator.Dispose(); + Base.Dispose(); + Mirror.Dispose(); + } + } +} \ No newline at end of file diff --git a/Ryujinx.Cpu/Jit/JitMemoryAllocator.cs b/Ryujinx.Cpu/Jit/JitMemoryAllocator.cs index 0cf35c17b..4aa78d06c 100644 --- a/Ryujinx.Cpu/Jit/JitMemoryAllocator.cs +++ b/Ryujinx.Cpu/Jit/JitMemoryAllocator.cs @@ -7,5 +7,7 @@ namespace Ryujinx.Cpu.Jit { public IJitMemoryBlock Allocate(ulong size) => new JitMemoryBlock(size, MemoryAllocationFlags.None); public IJitMemoryBlock Reserve(ulong size) => new JitMemoryBlock(size, MemoryAllocationFlags.Reserve | MemoryAllocationFlags.Jit); + + public ulong GetPageSize() => MemoryBlock.GetPageSize(); } } diff --git a/Ryujinx.Cpu/Jit/MemoryManager.cs b/Ryujinx.Cpu/Jit/MemoryManager.cs index 21c50d51f..014d843b5 100644 --- a/Ryujinx.Cpu/Jit/MemoryManager.cs +++ b/Ryujinx.Cpu/Jit/MemoryManager.cs @@ -28,6 +28,9 @@ namespace Ryujinx.Cpu.Jit private readonly MemoryBlock _backingMemory; private readonly InvalidAccessHandler _invalidAccessHandler; + /// + public bool Supports4KBPages => true; + /// /// Address space width in bits. /// @@ -76,7 +79,7 @@ namespace Ryujinx.Cpu.Jit } /// - public void Map(ulong va, ulong pa, ulong size) + public void Map(ulong va, ulong pa, ulong size, MemoryMapFlags flags) { AssertValidAddressAndSize(va, size); @@ -91,9 +94,16 @@ namespace Ryujinx.Cpu.Jit pa += PageSize; remainingSize -= PageSize; } + Tracking.Map(oVa, size); } + /// + public void MapForeign(ulong va, nuint hostPointer, ulong size) + { + throw new NotSupportedException(); + } + /// public void Unmap(ulong va, ulong size) { @@ -378,6 +388,32 @@ namespace Ryujinx.Cpu.Jit return true; } + /// + public IEnumerable GetHostRegions(ulong va, ulong size) + { + if (size == 0) + { + return Enumerable.Empty(); + } + + var guestRegions = GetPhysicalRegionsImpl(va, size); + if (guestRegions == null) + { + return null; + } + + var regions = new HostMemoryRange[guestRegions.Count]; + + for (int i = 0; i < regions.Length; i++) + { + var guestRegion = guestRegions[i]; + IntPtr pointer = _backingMemory.GetPointer(guestRegion.Address, guestRegion.Size); + regions[i] = new HostMemoryRange((nuint)(ulong)pointer, guestRegion.Size); + } + + return regions; + } + /// public IEnumerable GetPhysicalRegions(ulong va, ulong size) { @@ -386,6 +422,11 @@ namespace Ryujinx.Cpu.Jit return Enumerable.Empty(); } + return GetPhysicalRegionsImpl(va, size); + } + + private List GetPhysicalRegionsImpl(ulong va, ulong size) + { if (!ValidateAddress(va) || !ValidateAddressAndSize(va, size)) { return null; diff --git a/Ryujinx.Cpu/Jit/MemoryManagerHostMapped.cs b/Ryujinx.Cpu/Jit/MemoryManagerHostMapped.cs index c4e59db9f..856b6b9b0 100644 --- a/Ryujinx.Cpu/Jit/MemoryManagerHostMapped.cs +++ b/Ryujinx.Cpu/Jit/MemoryManagerHostMapped.cs @@ -5,6 +5,7 @@ using Ryujinx.Memory.Range; using Ryujinx.Memory.Tracking; using System; using System.Collections.Generic; +using System.Linq; using System.Runtime.CompilerServices; using System.Threading; @@ -37,20 +38,21 @@ namespace Ryujinx.Cpu.Jit private readonly InvalidAccessHandler _invalidAccessHandler; private readonly bool _unsafeMode; - private readonly MemoryBlock _addressSpace; - private readonly MemoryBlock _addressSpaceMirror; + private readonly AddressSpace _addressSpace; private readonly ulong _addressSpaceSize; - private readonly MemoryBlock _backingMemory; private readonly PageTable _pageTable; private readonly MemoryEhMeilleure _memoryEh; private readonly ulong[] _pageBitmap; + /// + public bool Supports4KBPages => MemoryBlock.GetPageSize() == PageSize; + public int AddressSpaceBits { get; } - public IntPtr PageTablePointer => _addressSpace.Pointer; + public IntPtr PageTablePointer => _addressSpace.Base.Pointer; public MemoryManagerType Type => _unsafeMode ? MemoryManagerType.HostMappedUnsafe : MemoryManagerType.HostMapped; @@ -67,7 +69,6 @@ namespace Ryujinx.Cpu.Jit /// Optional function to handle invalid memory accesses public MemoryManagerHostMapped(MemoryBlock backingMemory, ulong addressSpaceSize, bool unsafeMode, InvalidAccessHandler invalidAccessHandler = null) { - _backingMemory = backingMemory; _pageTable = new PageTable(); _invalidAccessHandler = invalidAccessHandler; _unsafeMode = unsafeMode; @@ -86,13 +87,10 @@ namespace Ryujinx.Cpu.Jit _pageBitmap = new ulong[1 << (AddressSpaceBits - (PageBits + PageToPteShift))]; - MemoryAllocationFlags asFlags = MemoryAllocationFlags.Reserve | MemoryAllocationFlags.ViewCompatible; + _addressSpace = new AddressSpace(backingMemory, asSize, Supports4KBPages); - _addressSpace = new MemoryBlock(asSize, asFlags); - _addressSpaceMirror = new MemoryBlock(asSize, asFlags); - - Tracking = new MemoryTracking(this, PageSize, invalidAccessHandler); - _memoryEh = new MemoryEhMeilleure(_addressSpace, _addressSpaceMirror, Tracking); + Tracking = new MemoryTracking(this, (int)MemoryBlock.GetPageSize(), invalidAccessHandler); + _memoryEh = new MemoryEhMeilleure(_addressSpace.Base, _addressSpace.Mirror, Tracking); } /// @@ -145,18 +143,23 @@ namespace Ryujinx.Cpu.Jit } /// - public void Map(ulong va, ulong pa, ulong size) + public void Map(ulong va, ulong pa, ulong size, MemoryMapFlags flags) { AssertValidAddressAndSize(va, size); - _addressSpace.MapView(_backingMemory, pa, va, size); - _addressSpaceMirror.MapView(_backingMemory, pa, va, size); + _addressSpace.Map(va, pa, size, flags); AddMapping(va, size); PtMap(va, pa, size); Tracking.Map(va, size); } + /// + public void MapForeign(ulong va, nuint hostPointer, ulong size) + { + throw new NotSupportedException(); + } + /// public void Unmap(ulong va, ulong size) { @@ -167,8 +170,7 @@ namespace Ryujinx.Cpu.Jit RemoveMapping(va, size); PtUnmap(va, size); - _addressSpace.UnmapView(_backingMemory, va, size); - _addressSpaceMirror.UnmapView(_backingMemory, va, size); + _addressSpace.Unmap(va, size); } private void PtMap(ulong va, ulong pa, ulong size) @@ -201,7 +203,7 @@ namespace Ryujinx.Cpu.Jit { AssertMapped(va, (ulong)Unsafe.SizeOf()); - return _addressSpaceMirror.Read(va); + return _addressSpace.Mirror.Read(va); } catch (InvalidMemoryRegionException) { @@ -241,7 +243,7 @@ namespace Ryujinx.Cpu.Jit { AssertMapped(va, (ulong)data.Length); - _addressSpaceMirror.Read(va, data); + _addressSpace.Mirror.Read(va, data); } catch (InvalidMemoryRegionException) { @@ -260,7 +262,7 @@ namespace Ryujinx.Cpu.Jit { SignalMemoryTracking(va, (ulong)Unsafe.SizeOf(), write: true); - _addressSpaceMirror.Write(va, value); + _addressSpace.Mirror.Write(va, value); } catch (InvalidMemoryRegionException) { @@ -278,7 +280,7 @@ namespace Ryujinx.Cpu.Jit { SignalMemoryTracking(va, (ulong)data.Length, write: true); - _addressSpaceMirror.Write(va, data); + _addressSpace.Mirror.Write(va, data); } catch (InvalidMemoryRegionException) { @@ -296,7 +298,7 @@ namespace Ryujinx.Cpu.Jit { AssertMapped(va, (ulong)data.Length); - _addressSpaceMirror.Write(va, data); + _addressSpace.Mirror.Write(va, data); } catch (InvalidMemoryRegionException) { @@ -314,7 +316,7 @@ namespace Ryujinx.Cpu.Jit { SignalMemoryTracking(va, (ulong)data.Length, false); - Span target = _addressSpaceMirror.GetSpan(va, data.Length); + Span target = _addressSpace.Mirror.GetSpan(va, data.Length); bool changed = !data.SequenceEqual(target); if (changed) @@ -347,7 +349,7 @@ namespace Ryujinx.Cpu.Jit AssertMapped(va, (ulong)size); } - return _addressSpaceMirror.GetSpan(va, size); + return _addressSpace.Mirror.GetSpan(va, size); } /// @@ -362,7 +364,7 @@ namespace Ryujinx.Cpu.Jit AssertMapped(va, (ulong)size); } - return _addressSpaceMirror.GetWritableRegion(va, size); + return _addressSpace.Mirror.GetWritableRegion(va, size); } /// @@ -370,7 +372,7 @@ namespace Ryujinx.Cpu.Jit { SignalMemoryTracking(va, (ulong)Unsafe.SizeOf(), true); - return ref _addressSpaceMirror.GetRef(va); + return ref _addressSpace.Mirror.GetRef(va); } /// @@ -454,6 +456,14 @@ namespace Ryujinx.Cpu.Jit return true; } + /// + public IEnumerable GetHostRegions(ulong va, ulong size) + { + AssertValidAddressAndSize(va, size); + + return Enumerable.Repeat(new HostMemoryRange((nuint)(ulong)_addressSpace.Mirror.GetPointer(va, size), size), 1); + } + /// public IEnumerable GetPhysicalRegions(ulong va, ulong size) { @@ -692,7 +702,7 @@ namespace Ryujinx.Cpu.Jit _ => MemoryPermission.None }; - _addressSpace.Reprotect(va, size, protection, false); + _addressSpace.Base.Reprotect(va, size, protection, false); } /// @@ -799,7 +809,6 @@ namespace Ryujinx.Cpu.Jit protected override void Destroy() { _addressSpace.Dispose(); - _addressSpaceMirror.Dispose(); _memoryEh.Dispose(); } diff --git a/Ryujinx.Cpu/PrivateMemoryAllocation.cs b/Ryujinx.Cpu/PrivateMemoryAllocation.cs new file mode 100644 index 000000000..1327880e2 --- /dev/null +++ b/Ryujinx.Cpu/PrivateMemoryAllocation.cs @@ -0,0 +1,41 @@ +using Ryujinx.Memory; +using System; + +namespace Ryujinx.Cpu +{ + struct PrivateMemoryAllocation : IDisposable + { + private readonly PrivateMemoryAllocator _owner; + private readonly PrivateMemoryAllocator.Block _block; + + public bool IsValid => _owner != null; + public MemoryBlock Memory => _block?.Memory; + public ulong Offset { get; } + public ulong Size { get; } + + public PrivateMemoryAllocation( + PrivateMemoryAllocator owner, + PrivateMemoryAllocator.Block block, + ulong offset, + ulong size) + { + _owner = owner; + _block = block; + Offset = offset; + Size = size; + } + + public (PrivateMemoryAllocation, PrivateMemoryAllocation) Split(ulong splitOffset) + { + PrivateMemoryAllocation left = new PrivateMemoryAllocation(_owner, _block, Offset, splitOffset); + PrivateMemoryAllocation right = new PrivateMemoryAllocation(_owner, _block, Offset + splitOffset, Size - splitOffset); + + return (left, right); + } + + public void Dispose() + { + _owner.Free(_block, Offset, Size); + } + } +} diff --git a/Ryujinx.Cpu/PrivateMemoryAllocator.cs b/Ryujinx.Cpu/PrivateMemoryAllocator.cs new file mode 100644 index 000000000..cbf1f1d9c --- /dev/null +++ b/Ryujinx.Cpu/PrivateMemoryAllocator.cs @@ -0,0 +1,268 @@ +using Ryujinx.Common; +using Ryujinx.Memory; +using System; +using System.Collections.Generic; +using System.Diagnostics; + +namespace Ryujinx.Cpu +{ + class PrivateMemoryAllocator : PrivateMemoryAllocatorImpl + { + public const ulong InvalidOffset = ulong.MaxValue; + + public class Block : IComparable + { + public MemoryBlock Memory { get; private set; } + public ulong Size { get; } + + private struct Range : IComparable + { + public ulong Offset { get; } + public ulong Size { get; } + + public Range(ulong offset, ulong size) + { + Offset = offset; + Size = size; + } + + public int CompareTo(Range other) + { + return Offset.CompareTo(other.Offset); + } + } + + private readonly List _freeRanges; + + public Block(MemoryBlock memory, ulong size) + { + Memory = memory; + Size = size; + _freeRanges = new List + { + new Range(0, size) + }; + } + + public ulong Allocate(ulong size, ulong alignment) + { + for (int i = 0; i < _freeRanges.Count; i++) + { + var range = _freeRanges[i]; + + ulong alignedOffset = BitUtils.AlignUp(range.Offset, alignment); + ulong sizeDelta = alignedOffset - range.Offset; + ulong usableSize = range.Size - sizeDelta; + + if (sizeDelta < range.Size && usableSize >= size) + { + _freeRanges.RemoveAt(i); + + if (sizeDelta != 0) + { + InsertFreeRange(range.Offset, sizeDelta); + } + + ulong endOffset = range.Offset + range.Size; + ulong remainingSize = endOffset - (alignedOffset + size); + if (remainingSize != 0) + { + InsertFreeRange(endOffset - remainingSize, remainingSize); + } + + return alignedOffset; + } + } + + return InvalidOffset; + } + + public void Free(ulong offset, ulong size) + { + InsertFreeRangeComingled(offset, size); + } + + private void InsertFreeRange(ulong offset, ulong size) + { + var range = new Range(offset, size); + int index = _freeRanges.BinarySearch(range); + if (index < 0) + { + index = ~index; + } + + _freeRanges.Insert(index, range); + } + + private void InsertFreeRangeComingled(ulong offset, ulong size) + { + ulong endOffset = offset + size; + var range = new Range(offset, size); + int index = _freeRanges.BinarySearch(range); + if (index < 0) + { + index = ~index; + } + + if (index < _freeRanges.Count && _freeRanges[index].Offset == endOffset) + { + endOffset = _freeRanges[index].Offset + _freeRanges[index].Size; + _freeRanges.RemoveAt(index); + } + + if (index > 0 && _freeRanges[index - 1].Offset + _freeRanges[index - 1].Size == offset) + { + offset = _freeRanges[index - 1].Offset; + _freeRanges.RemoveAt(--index); + } + + range = new Range(offset, endOffset - offset); + + _freeRanges.Insert(index, range); + } + + public bool IsTotallyFree() + { + if (_freeRanges.Count == 1 && _freeRanges[0].Size == Size) + { + Debug.Assert(_freeRanges[0].Offset == 0); + return true; + } + + return false; + } + + public int CompareTo(Block other) + { + return Size.CompareTo(other.Size); + } + + public virtual void Destroy() + { + Memory.Dispose(); + } + } + + public PrivateMemoryAllocator(int blockAlignment, MemoryAllocationFlags allocationFlags) : base(blockAlignment, allocationFlags) + { + } + + public PrivateMemoryAllocation Allocate(ulong size, ulong alignment) + { + var allocation = Allocate(size, alignment, CreateBlock); + + return new PrivateMemoryAllocation(this, allocation.Block, allocation.Offset, allocation.Size); + } + + private Block CreateBlock(MemoryBlock memory, ulong size) + { + return new Block(memory, size); + } + } + + class PrivateMemoryAllocatorImpl : IDisposable where T : PrivateMemoryAllocator.Block + { + private const ulong InvalidOffset = ulong.MaxValue; + + public struct Allocation + { + public T Block { get; } + public ulong Offset { get; } + public ulong Size { get; } + + public Allocation(T block, ulong offset, ulong size) + { + Block = block; + Offset = offset; + Size = size; + } + } + + private readonly List _blocks; + + private readonly int _blockAlignment; + private readonly MemoryAllocationFlags _allocationFlags; + + public PrivateMemoryAllocatorImpl(int blockAlignment, MemoryAllocationFlags allocationFlags) + { + _blocks = new List(); + _blockAlignment = blockAlignment; + _allocationFlags = allocationFlags; + } + + protected Allocation Allocate(ulong size, ulong alignment, Func createBlock) + { + // Ensure we have a sane alignment value. + if ((ulong)(int)alignment != alignment || (int)alignment <= 0) + { + throw new ArgumentOutOfRangeException(nameof(alignment), $"Invalid alignment 0x{alignment:X}."); + } + + for (int i = 0; i < _blocks.Count; i++) + { + var block = _blocks[i]; + + if (block.Size >= size) + { + ulong offset = block.Allocate(size, alignment); + if (offset != InvalidOffset) + { + return new Allocation(block, offset, size); + } + } + } + + ulong blockAlignedSize = BitUtils.AlignUp(size, (ulong)_blockAlignment); + + var memory = new MemoryBlock(blockAlignedSize, _allocationFlags); + var newBlock = createBlock(memory, blockAlignedSize); + + InsertBlock(newBlock); + + ulong newBlockOffset = newBlock.Allocate(size, alignment); + Debug.Assert(newBlockOffset != InvalidOffset); + + return new Allocation(newBlock, newBlockOffset, size); + } + + public void Free(PrivateMemoryAllocator.Block block, ulong offset, ulong size) + { + block.Free(offset, size); + + if (block.IsTotallyFree()) + { + for (int i = 0; i < _blocks.Count; i++) + { + if (_blocks[i] == block) + { + _blocks.RemoveAt(i); + break; + } + } + + block.Destroy(); + } + } + + private void InsertBlock(T block) + { + int index = _blocks.BinarySearch(block); + if (index < 0) + { + index = ~index; + } + + _blocks.Insert(index, block); + } + + public void Dispose() + { + for (int i = 0; i < _blocks.Count; i++) + { + _blocks[i].Destroy(); + } + + _blocks.Clear(); + } + } +} \ No newline at end of file diff --git a/Ryujinx.Graphics.Gpu/Image/TextureGroup.cs b/Ryujinx.Graphics.Gpu/Image/TextureGroup.cs index c167dc0d3..896e11a5d 100644 --- a/Ryujinx.Graphics.Gpu/Image/TextureGroup.cs +++ b/Ryujinx.Graphics.Gpu/Image/TextureGroup.cs @@ -1420,6 +1420,14 @@ namespace Ryujinx.Graphics.Gpu.Image /// The size of the flushing memory access public void FlushAction(TextureGroupHandle handle, ulong address, ulong size) { + // If the page size is larger than 4KB, we will have a lot of false positives for flushing. + // Let's avoid flushing textures that are unlikely to be read from CPU to improve performance + // on those platforms. + if (!_physicalMemory.Supports4KBPages && !Storage.Info.IsLinear && !_context.IsGpuThread()) + { + return; + } + // There is a small gap here where the action is removed but _actionRegistered is still 1. // In this case it will skip registering the action, but here we are already handling it, // so there shouldn't be any issue as it's the same handler for all actions. diff --git a/Ryujinx.Graphics.Gpu/Memory/Buffer.cs b/Ryujinx.Graphics.Gpu/Memory/Buffer.cs index 842249f34..a624386ed 100644 --- a/Ryujinx.Graphics.Gpu/Memory/Buffer.cs +++ b/Ryujinx.Graphics.Gpu/Memory/Buffer.cs @@ -470,19 +470,16 @@ namespace Ryujinx.Graphics.Gpu.Memory return false; } - if (address < Address) + ulong maxAddress = Math.Max(address, Address); + ulong minEndAddress = Math.Min(address + size, Address + Size); + + if (maxAddress >= minEndAddress) { - address = Address; + // Access doesn't overlap. + return false; } - ulong maxSize = Address + Size - address; - - if (size > maxSize) - { - size = maxSize; - } - - ForceDirty(address, size); + ForceDirty(maxAddress, minEndAddress - maxAddress); return true; } diff --git a/Ryujinx.Graphics.Gpu/Memory/PhysicalMemory.cs b/Ryujinx.Graphics.Gpu/Memory/PhysicalMemory.cs index 051838f1f..c1fc0c5cd 100644 --- a/Ryujinx.Graphics.Gpu/Memory/PhysicalMemory.cs +++ b/Ryujinx.Graphics.Gpu/Memory/PhysicalMemory.cs @@ -21,6 +21,11 @@ namespace Ryujinx.Graphics.Gpu.Memory private IVirtualMemoryManagerTracked _cpuMemory; private int _referenceCount; + /// + /// Indicates whenever the memory manager supports 4KB pages. + /// + public bool Supports4KBPages => _cpuMemory.Supports4KBPages; + /// /// In-memory shader cache. /// diff --git a/Ryujinx.HLE/HOS/Kernel/KernelContext.cs b/Ryujinx.HLE/HOS/Kernel/KernelContext.cs index 6c58e1972..ccc5c0f0b 100644 --- a/Ryujinx.HLE/HOS/Kernel/KernelContext.cs +++ b/Ryujinx.HLE/HOS/Kernel/KernelContext.cs @@ -84,7 +84,7 @@ namespace Ryujinx.HLE.HOS.Kernel KernelConstants.UserSlabHeapItemSize, KernelConstants.UserSlabHeapSize); - memory.Commit(KernelConstants.UserSlabHeapBase - DramMemoryMap.DramBase, KernelConstants.UserSlabHeapSize); + CommitMemory(KernelConstants.UserSlabHeapBase - DramMemoryMap.DramBase, KernelConstants.UserSlabHeapSize); CriticalSection = new KCriticalSection(this); Schedulers = new KScheduler[KScheduler.CpuCoresCount]; @@ -119,6 +119,17 @@ namespace Ryujinx.HLE.HOS.Kernel new Thread(PreemptionThreadStart) { Name = "HLE.PreemptionThread" }.Start(); } + public void CommitMemory(ulong address, ulong size) + { + ulong alignment = MemoryBlock.GetPageSize(); + ulong endAddress = address + size; + + address &= ~(alignment - 1); + endAddress = (endAddress + (alignment - 1)) & ~(alignment - 1); + + Memory.Commit(address, endAddress - address); + } + public ulong NewThreadUid() { return Interlocked.Increment(ref _threadUid) - 1; diff --git a/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryRegionManager.cs b/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryRegionManager.cs index 5e6273b86..4596b15d5 100644 --- a/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryRegionManager.cs +++ b/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryRegionManager.cs @@ -64,7 +64,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory if (address != 0) { IncrementPagesReferenceCount(address, pagesCount); - context.Memory.Commit(address - DramMemoryMap.DramBase, pagesCount * KPageTableBase.PageSize); + context.CommitMemory(address - DramMemoryMap.DramBase, pagesCount * KPageTableBase.PageSize); } return address; diff --git a/Ryujinx.HLE/HOS/Kernel/Memory/KPageTable.cs b/Ryujinx.HLE/HOS/Kernel/Memory/KPageTable.cs index 9b7c99ba1..28e9f90aa 100644 --- a/Ryujinx.HLE/HOS/Kernel/Memory/KPageTable.cs +++ b/Ryujinx.HLE/HOS/Kernel/Memory/KPageTable.cs @@ -1,6 +1,8 @@ using Ryujinx.Horizon.Common; using Ryujinx.Memory; +using Ryujinx.Memory.Range; using System; +using System.Collections.Generic; using System.Diagnostics; namespace Ryujinx.HLE.HOS.Kernel.Memory @@ -9,11 +11,19 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory { private readonly IVirtualMemoryManager _cpuMemory; + protected override bool Supports4KBPages => _cpuMemory.Supports4KBPages; + public KPageTable(KernelContext context, IVirtualMemoryManager cpuMemory) : base(context) { _cpuMemory = cpuMemory; } + /// + protected override IEnumerable GetHostRegions(ulong va, ulong size) + { + return _cpuMemory.GetHostRegions(va, size); + } + /// protected override void GetPhysicalRegions(ulong va, ulong size, KPageList pageList) { @@ -43,7 +53,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory return result; } - result = MapPages(dst, pageList, newDstPermission, false, 0); + result = MapPages(dst, pageList, newDstPermission, MemoryMapFlags.Private, false, 0); if (result != Result.Success) { @@ -81,7 +91,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory if (result != Result.Success) { - Result mapResult = MapPages(dst, dstPageList, oldDstPermission, false, 0); + Result mapResult = MapPages(dst, dstPageList, oldDstPermission, MemoryMapFlags.Private, false, 0); Debug.Assert(mapResult == Result.Success); } @@ -89,13 +99,20 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory } /// - protected override Result MapPages(ulong dstVa, ulong pagesCount, ulong srcPa, KMemoryPermission permission, bool shouldFillPages, byte fillValue) + protected override Result MapPages( + ulong dstVa, + ulong pagesCount, + ulong srcPa, + KMemoryPermission permission, + MemoryMapFlags flags, + bool shouldFillPages, + byte fillValue) { ulong size = pagesCount * PageSize; - Context.Memory.Commit(srcPa - DramMemoryMap.DramBase, size); + Context.CommitMemory(srcPa - DramMemoryMap.DramBase, size); - _cpuMemory.Map(dstVa, srcPa - DramMemoryMap.DramBase, size); + _cpuMemory.Map(dstVa, srcPa - DramMemoryMap.DramBase, size, flags); if (DramMemoryMap.IsHeapPhysicalAddress(srcPa)) { @@ -111,7 +128,13 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory } /// - protected override Result MapPages(ulong address, KPageList pageList, KMemoryPermission permission, bool shouldFillPages, byte fillValue) + protected override Result MapPages( + ulong address, + KPageList pageList, + KMemoryPermission permission, + MemoryMapFlags flags, + bool shouldFillPages, + byte fillValue) { using var scopedPageList = new KScopedPageList(Context.MemoryManager, pageList); @@ -122,9 +145,9 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory ulong addr = pageNode.Address - DramMemoryMap.DramBase; ulong size = pageNode.PagesCount * PageSize; - Context.Memory.Commit(addr, size); + Context.CommitMemory(addr, size); - _cpuMemory.Map(currentVa, addr, size); + _cpuMemory.Map(currentVa, addr, size, flags); if (shouldFillPages) { @@ -139,6 +162,21 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory return Result.Success; } + /// + protected override Result MapForeign(IEnumerable regions, ulong va, ulong size) + { + ulong offset = 0; + + foreach (var region in regions) + { + _cpuMemory.MapForeign(va + offset, region.Address, region.Size); + + offset += region.Size; + } + + return Result.Success; + } + /// protected override Result Unmap(ulong address, ulong pagesCount) { @@ -188,4 +226,4 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory _cpuMemory.Write(va, data); } } -} +} \ No newline at end of file diff --git a/Ryujinx.HLE/HOS/Kernel/Memory/KPageTableBase.cs b/Ryujinx.HLE/HOS/Kernel/Memory/KPageTableBase.cs index e19e22c87..bd7d5725b 100644 --- a/Ryujinx.HLE/HOS/Kernel/Memory/KPageTableBase.cs +++ b/Ryujinx.HLE/HOS/Kernel/Memory/KPageTableBase.cs @@ -1,6 +1,8 @@ using Ryujinx.Common; using Ryujinx.HLE.HOS.Kernel.Common; using Ryujinx.HLE.HOS.Kernel.Process; +using Ryujinx.Memory; +using Ryujinx.Memory.Range; using Ryujinx.Horizon.Common; using System; using System.Collections.Generic; @@ -29,6 +31,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory private const int MaxBlocksNeededForInsertion = 2; protected readonly KernelContext Context; + protected virtual bool Supports4KBPages => true; public ulong AddrSpaceStart { get; private set; } public ulong AddrSpaceEnd { get; private set; } @@ -366,7 +369,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory return KernelResult.OutOfResource; } - Result result = MapPages(address, pageList, permission); + Result result = MapPages(address, pageList, permission, MemoryMapFlags.None); if (result == Result.Success) { @@ -502,7 +505,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory if (paIsValid) { - result = MapPages(address, pagesCount, srcPa, permission); + result = MapPages(address, pagesCount, srcPa, permission, MemoryMapFlags.Private); } else { @@ -565,7 +568,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory using var _ = new OnScopeExit(() => pageList.DecrementPagesReferenceCount(Context.MemoryManager)); - return MapPages(address, pageList, permission); + return MapPages(address, pageList, permission, MemoryMapFlags.Private); } public Result MapProcessCodeMemory(ulong dst, ulong src, ulong size) @@ -746,7 +749,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory return KernelResult.InvalidMemState; } - result = MapPages(_currentHeapAddr, pageList, KMemoryPermission.ReadAndWrite, true, (byte)_heapFillValue); + result = MapPages(_currentHeapAddr, pageList, KMemoryPermission.ReadAndWrite, MemoryMapFlags.Private, true, (byte)_heapFillValue); if (result != Result.Success) { @@ -1334,7 +1337,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory ulong currentPagesCount = Math.Min(srcPaPages, dstVaPages); - MapPages(dstVa, currentPagesCount, srcPa, KMemoryPermission.ReadAndWrite); + MapPages(dstVa, currentPagesCount, srcPa, KMemoryPermission.ReadAndWrite, MemoryMapFlags.Private); dstVa += currentPagesCount * PageSize; srcPa += currentPagesCount * PageSize; @@ -1878,7 +1881,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory Context.Memory.Fill(GetDramAddressFromPa(firstPageFillAddress), unusedSizeAfter, (byte)_ipcFillValue); } - Result result = MapPages(currentVa, 1, dstFirstPagePa, permission); + Result result = MapPages(currentVa, 1, dstFirstPagePa, permission, MemoryMapFlags.Private); if (result != Result.Success) { @@ -1894,10 +1897,19 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory { ulong alignedSize = endAddrTruncated - addressRounded; - KPageList pageList = new KPageList(); - srcPageTable.GetPhysicalRegions(addressRounded, alignedSize, pageList); + Result result; - Result result = MapPages(currentVa, pageList, permission); + if (srcPageTable.Supports4KBPages) + { + KPageList pageList = new KPageList(); + srcPageTable.GetPhysicalRegions(addressRounded, alignedSize, pageList); + + result = MapPages(currentVa, pageList, permission, MemoryMapFlags.None); + } + else + { + result = MapForeign(srcPageTable.GetHostRegions(addressRounded, alignedSize), currentVa, alignedSize); + } if (result != Result.Success) { @@ -1932,7 +1944,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory Context.Memory.Fill(GetDramAddressFromPa(lastPageFillAddr), unusedSizeAfter, (byte)_ipcFillValue); - Result result = MapPages(currentVa, 1, dstLastPagePa, permission); + Result result = MapPages(currentVa, 1, dstLastPagePa, permission, MemoryMapFlags.Private); if (result != Result.Success) { @@ -2884,6 +2896,16 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory return StackRegionStart > address || address + size - 1 > StackRegionEnd - 1; } + /// + /// Gets the host regions that make up the given virtual address region. + /// If any part of the virtual region is unmapped, null is returned. + /// + /// Virtual address of the range + /// Size of the range + /// The host regions + /// Throw for unhandled invalid or unmapped memory accesses + protected abstract IEnumerable GetHostRegions(ulong va, ulong size); + /// /// Gets the physical regions that make up the given virtual address region. /// If any part of the virtual region is unmapped, null is returned. @@ -2936,10 +2958,18 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory /// Number of pages to map /// Physical address where the pages should be mapped. May be ignored if aliasing is not supported /// Permission of the region to be mapped + /// Flags controlling the memory map operation /// Indicate if the pages should be filled with the value /// The value used to fill pages when is set to true /// Result of the mapping operation - protected abstract Result MapPages(ulong dstVa, ulong pagesCount, ulong srcPa, KMemoryPermission permission, bool shouldFillPages = false, byte fillValue = 0); + protected abstract Result MapPages( + ulong dstVa, + ulong pagesCount, + ulong srcPa, + KMemoryPermission permission, + MemoryMapFlags flags, + bool shouldFillPages = false, + byte fillValue = 0); /// /// Maps a region of memory into the specified physical memory region. @@ -2947,10 +2977,26 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory /// Destination virtual address that should be mapped /// List of physical memory pages where the pages should be mapped. May be ignored if aliasing is not supported /// Permission of the region to be mapped + /// Flags controlling the memory map operation /// Indicate if the pages should be filled with the value /// The value used to fill pages when is set to true /// Result of the mapping operation - protected abstract Result MapPages(ulong address, KPageList pageList, KMemoryPermission permission, bool shouldFillPages = false, byte fillValue = 0); + protected abstract Result MapPages( + ulong address, + KPageList pageList, + KMemoryPermission permission, + MemoryMapFlags flags, + bool shouldFillPages = false, + byte fillValue = 0); + + /// + /// Maps pages into an arbitrary host memory location. + /// + /// Host regions to be mapped into the specified virtual memory region + /// Destination virtual address of the range on this page table + /// Size of the range + /// Result of the mapping operation + protected abstract Result MapForeign(IEnumerable regions, ulong va, ulong size); /// /// Unmaps a region of memory that was previously mapped with one of the page mapping methods. diff --git a/Ryujinx.HLE/HOS/Kernel/Memory/KSharedMemory.cs b/Ryujinx.HLE/HOS/Kernel/Memory/KSharedMemory.cs index 2dbaf3cd8..5ec3cd724 100644 --- a/Ryujinx.HLE/HOS/Kernel/Memory/KSharedMemory.cs +++ b/Ryujinx.HLE/HOS/Kernel/Memory/KSharedMemory.cs @@ -2,6 +2,7 @@ using Ryujinx.Common; using Ryujinx.HLE.HOS.Kernel.Common; using Ryujinx.HLE.HOS.Kernel.Process; using Ryujinx.Horizon.Common; +using Ryujinx.Memory; namespace Ryujinx.HLE.HOS.Kernel.Memory { @@ -48,7 +49,17 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory return KernelResult.InvalidPermission; } - return memoryManager.MapPages(address, _pageList, MemoryState.SharedMemory, permission); + // On platforms with page size > 4 KB, this can fail due to the address not being page aligned, + // we can return an error to force the application to retry with a different address. + + try + { + return memoryManager.MapPages(address, _pageList, MemoryState.SharedMemory, permission); + } + catch (InvalidMemoryRegionException) + { + return KernelResult.InvalidMemState; + } } public Result UnmapFromProcess(KPageTableBase memoryManager, ulong address, ulong size, KProcess process) diff --git a/Ryujinx.HLE/HOS/Kernel/Memory/SharedMemoryStorage.cs b/Ryujinx.HLE/HOS/Kernel/Memory/SharedMemoryStorage.cs index 167e0aa90..c68b73695 100644 --- a/Ryujinx.HLE/HOS/Kernel/Memory/SharedMemoryStorage.cs +++ b/Ryujinx.HLE/HOS/Kernel/Memory/SharedMemoryStorage.cs @@ -18,7 +18,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory { ulong address = pageNode.Address - DramMemoryMap.DramBase; ulong size = pageNode.PagesCount * KPageTableBase.PageSize; - context.Memory.Commit(address, size); + context.CommitMemory(address, size); } } diff --git a/Ryujinx.Memory.Tests/MockVirtualMemoryManager.cs b/Ryujinx.Memory.Tests/MockVirtualMemoryManager.cs index 6c4422829..06eb4729e 100644 --- a/Ryujinx.Memory.Tests/MockVirtualMemoryManager.cs +++ b/Ryujinx.Memory.Tests/MockVirtualMemoryManager.cs @@ -6,6 +6,8 @@ namespace Ryujinx.Memory.Tests { public class MockVirtualMemoryManager : IVirtualMemoryManager { + public bool Supports4KBPages => true; + public bool NoMappings = false; public event Action OnProtect; @@ -14,7 +16,12 @@ namespace Ryujinx.Memory.Tests { } - public void Map(ulong va, ulong pa, ulong size) + public void Map(ulong va, ulong pa, ulong size, MemoryMapFlags flags) + { + throw new NotImplementedException(); + } + + public void MapForeign(ulong va, nuint hostAddress, ulong size) { throw new NotImplementedException(); } @@ -64,6 +71,11 @@ namespace Ryujinx.Memory.Tests throw new NotImplementedException(); } + IEnumerable IVirtualMemoryManager.GetHostRegions(ulong va, ulong size) + { + throw new NotImplementedException(); + } + IEnumerable IVirtualMemoryManager.GetPhysicalRegions(ulong va, ulong size) { return NoMappings ? new MemoryRange[0] : new MemoryRange[] { new MemoryRange(va, size) }; diff --git a/Ryujinx.Memory/AddressSpaceManager.cs b/Ryujinx.Memory/AddressSpaceManager.cs index ffe880bf8..b532ce5e0 100644 --- a/Ryujinx.Memory/AddressSpaceManager.cs +++ b/Ryujinx.Memory/AddressSpaceManager.cs @@ -13,9 +13,12 @@ namespace Ryujinx.Memory /// public sealed class AddressSpaceManager : IVirtualMemoryManager, IWritableBlock { - public const int PageBits = PageTable.PageBits; - public const int PageSize = PageTable.PageSize; - public const int PageMask = PageTable.PageMask; + public const int PageBits = PageTable.PageBits; + public const int PageSize = PageTable.PageSize; + public const int PageMask = PageTable.PageMask; + + /// + public bool Supports4KBPages => true; /// /// Address space width in bits. @@ -25,7 +28,7 @@ namespace Ryujinx.Memory private readonly ulong _addressSpaceSize; private readonly MemoryBlock _backingMemory; - private readonly PageTable _pageTable; + private readonly PageTable _pageTable; /// /// Creates a new instance of the memory manager. @@ -46,17 +49,17 @@ namespace Ryujinx.Memory AddressSpaceBits = asBits; _addressSpaceSize = asSize; _backingMemory = backingMemory; - _pageTable = new PageTable(); + _pageTable = new PageTable(); } /// - public void Map(ulong va, ulong pa, ulong size) + public void Map(ulong va, ulong pa, ulong size, MemoryMapFlags flags) { AssertValidAddressAndSize(va, size); while (size != 0) { - _pageTable.Map(va, pa); + _pageTable.Map(va, (nuint)(ulong)_backingMemory.GetPointer(pa, PageSize)); va += PageSize; pa += PageSize; @@ -64,6 +67,21 @@ namespace Ryujinx.Memory } } + /// + public void MapForeign(ulong va, nuint hostPointer, ulong size) + { + AssertValidAddressAndSize(va, size); + + while (size != 0) + { + _pageTable.Map(va, hostPointer); + + va += PageSize; + hostPointer += PageSize; + size -= PageSize; + } + } + /// public void Unmap(ulong va, ulong size) { @@ -108,7 +126,7 @@ namespace Ryujinx.Memory if (IsContiguousAndMapped(va, data.Length)) { - data.CopyTo(_backingMemory.GetSpan(GetPhysicalAddressInternal(va), data.Length)); + data.CopyTo(GetHostSpanContiguous(va, data.Length)); } else { @@ -116,22 +134,18 @@ namespace Ryujinx.Memory if ((va & PageMask) != 0) { - ulong pa = GetPhysicalAddressInternal(va); - size = Math.Min(data.Length, PageSize - (int)(va & PageMask)); - data.Slice(0, size).CopyTo(_backingMemory.GetSpan(pa, size)); + data.Slice(0, size).CopyTo(GetHostSpanContiguous(va, size)); offset += size; } for (; offset < data.Length; offset += size) { - ulong pa = GetPhysicalAddressInternal(va + (ulong)offset); - size = Math.Min(data.Length - offset, PageSize); - data.Slice(offset, size).CopyTo(_backingMemory.GetSpan(pa, size)); + data.Slice(offset, size).CopyTo(GetHostSpanContiguous(va + (ulong)offset, size)); } } } @@ -154,7 +168,7 @@ namespace Ryujinx.Memory if (IsContiguousAndMapped(va, size)) { - return _backingMemory.GetSpan(GetPhysicalAddressInternal(va), size); + return GetHostSpanContiguous(va, size); } else { @@ -176,7 +190,7 @@ namespace Ryujinx.Memory if (IsContiguousAndMapped(va, size)) { - return new WritableRegion(null, va, _backingMemory.GetMemory(GetPhysicalAddressInternal(va), size)); + return new WritableRegion(null, va, new NativeMemoryManager((byte*)GetHostAddress(va), size).Memory); } else { @@ -189,14 +203,14 @@ namespace Ryujinx.Memory } /// - public ref T GetRef(ulong va) where T : unmanaged + public unsafe ref T GetRef(ulong va) where T : unmanaged { if (!IsContiguous(va, Unsafe.SizeOf())) { ThrowMemoryNotContiguous(); } - return ref _backingMemory.GetRef(GetPhysicalAddressInternal(va)); + return ref *(T*)GetHostAddress(va); } /// @@ -210,7 +224,7 @@ namespace Ryujinx.Memory return (int)(vaSpan / PageSize); } - private static void ThrowMemoryNotContiguous() => throw new MemoryNotContiguousException(); + private void ThrowMemoryNotContiguous() => throw new MemoryNotContiguousException(); [MethodImpl(MethodImplOptions.AggressiveInlining)] private bool IsContiguousAndMapped(ulong va, int size) => IsContiguous(va, size) && IsMapped(va); @@ -232,7 +246,7 @@ namespace Ryujinx.Memory return false; } - if (GetPhysicalAddressInternal(va) + PageSize != GetPhysicalAddressInternal(va + PageSize)) + if (GetHostAddress(va) + PageSize != GetHostAddress(va + PageSize)) { return false; } @@ -243,6 +257,17 @@ namespace Ryujinx.Memory return true; } + /// + public IEnumerable GetHostRegions(ulong va, ulong size) + { + if (size == 0) + { + return Enumerable.Empty(); + } + + return GetHostRegionsImpl(va, size); + } + /// public IEnumerable GetPhysicalRegions(ulong va, ulong size) { @@ -251,6 +276,39 @@ namespace Ryujinx.Memory return Enumerable.Empty(); } + var hostRegions = GetHostRegionsImpl(va, size); + if (hostRegions == null) + { + return null; + } + + var regions = new MemoryRange[hostRegions.Count]; + + ulong backingStart = (ulong)_backingMemory.Pointer; + ulong backingEnd = backingStart + _backingMemory.Size; + + int count = 0; + + for (int i = 0; i < regions.Length; i++) + { + var hostRegion = hostRegions[i]; + + if ((ulong)hostRegion.Address >= backingStart && (ulong)hostRegion.Address < backingEnd) + { + regions[count++] = new MemoryRange((ulong)hostRegion.Address - backingStart, hostRegion.Size); + } + } + + if (count != regions.Length) + { + return new ArraySegment(regions, 0, count); + } + + return regions; + } + + private List GetHostRegionsImpl(ulong va, ulong size) + { if (!ValidateAddress(va) || !ValidateAddressAndSize(va, size)) { return null; @@ -258,9 +316,9 @@ namespace Ryujinx.Memory int pages = GetPagesCount(va, (uint)size, out va); - var regions = new List(); + var regions = new List(); - ulong regionStart = GetPhysicalAddressInternal(va); + nuint regionStart = GetHostAddress(va); ulong regionSize = PageSize; for (int page = 0; page < pages - 1; page++) @@ -270,12 +328,12 @@ namespace Ryujinx.Memory return null; } - ulong newPa = GetPhysicalAddressInternal(va + PageSize); + nuint newHostAddress = GetHostAddress(va + PageSize); - if (GetPhysicalAddressInternal(va) + PageSize != newPa) + if (GetHostAddress(va) + PageSize != newHostAddress) { - regions.Add(new MemoryRange(regionStart, regionSize)); - regionStart = newPa; + regions.Add(new HostMemoryRange(regionStart, regionSize)); + regionStart = newHostAddress; regionSize = 0; } @@ -283,7 +341,7 @@ namespace Ryujinx.Memory regionSize += PageSize; } - regions.Add(new MemoryRange(regionStart, regionSize)); + regions.Add(new HostMemoryRange(regionStart, regionSize)); return regions; } @@ -301,22 +359,18 @@ namespace Ryujinx.Memory if ((va & PageMask) != 0) { - ulong pa = GetPhysicalAddressInternal(va); - size = Math.Min(data.Length, PageSize - (int)(va & PageMask)); - _backingMemory.GetSpan(pa, size).CopyTo(data.Slice(0, size)); + GetHostSpanContiguous(va, size).CopyTo(data.Slice(0, size)); offset += size; } for (; offset < data.Length; offset += size) { - ulong pa = GetPhysicalAddressInternal(va + (ulong)offset); - size = Math.Min(data.Length - offset, PageSize); - _backingMemory.GetSpan(pa, size).CopyTo(data.Slice(offset, size)); + GetHostSpanContiguous(va + (ulong)offset, size).CopyTo(data.Slice(offset, size)); } } @@ -391,22 +445,23 @@ namespace Ryujinx.Memory } } - private ulong GetPhysicalAddressInternal(ulong va) + private unsafe Span GetHostSpanContiguous(ulong va, int size) { - return _pageTable.Read(va) + (va & PageMask); + return new Span((void*)GetHostAddress(va), size); } - /// - /// Reprotect a region of virtual memory for tracking. Sets software protection bits. - /// - /// Virtual address base - /// Size of the region to protect - /// Memory protection to set + private nuint GetHostAddress(ulong va) + { + return _pageTable.Read(va) + (nuint)(va & PageMask); + } + + /// public void TrackingReprotect(ulong va, ulong size, MemoryPermission protection) { throw new NotImplementedException(); } + /// public void SignalMemoryTracking(ulong va, ulong size, bool write, bool precise = false) { // Only the ARM Memory Manager has tracking for now. diff --git a/Ryujinx.Memory/IVirtualMemoryManager.cs b/Ryujinx.Memory/IVirtualMemoryManager.cs index c8a74f665..390371ad2 100644 --- a/Ryujinx.Memory/IVirtualMemoryManager.cs +++ b/Ryujinx.Memory/IVirtualMemoryManager.cs @@ -6,6 +6,12 @@ namespace Ryujinx.Memory { public interface IVirtualMemoryManager { + /// + /// Indicates whenever the memory manager supports aliasing pages at 4KB granularity. + /// + /// True if 4KB pages are supported by the memory manager, false otherwise + bool Supports4KBPages { get; } + /// /// Maps a virtual memory range into a physical memory range. /// @@ -15,7 +21,20 @@ namespace Ryujinx.Memory /// Virtual memory address /// Physical memory address where the region should be mapped to /// Size to be mapped - void Map(ulong va, ulong pa, ulong size); + /// Flags controlling memory mapping + void Map(ulong va, ulong pa, ulong size, MemoryMapFlags flags); + + /// + /// Maps a virtual memory range into an arbitrary host memory range. + /// + /// + /// Addresses and size must be page aligned. + /// Not all memory managers supports this feature. + /// + /// Virtual memory address + /// Host pointer where the virtual region should be mapped + /// Size to be mapped + void MapForeign(ulong va, nuint hostPointer, ulong size); /// /// Unmaps a previously mapped range of virtual memory. @@ -115,6 +134,15 @@ namespace Ryujinx.Memory /// Throw if the specified memory region is not contiguous in physical memory ref T GetRef(ulong va) where T : unmanaged; + /// + /// Gets the host regions that make up the given virtual address region. + /// If any part of the virtual region is unmapped, null is returned. + /// + /// Virtual address of the range + /// Size of the range + /// Array of host regions + IEnumerable GetHostRegions(ulong va, ulong size); + /// /// Gets the physical regions that make up the given virtual address region. /// If any part of the virtual region is unmapped, null is returned. diff --git a/Ryujinx.Memory/MemoryBlock.cs b/Ryujinx.Memory/MemoryBlock.cs index 2df7ea9bd..885ef4569 100644 --- a/Ryujinx.Memory/MemoryBlock.cs +++ b/Ryujinx.Memory/MemoryBlock.cs @@ -440,4 +440,4 @@ namespace Ryujinx.Memory private static void ThrowInvalidMemoryRegionException() => throw new InvalidMemoryRegionException(); } -} +} \ No newline at end of file diff --git a/Ryujinx.Memory/MemoryMapFlags.cs b/Ryujinx.Memory/MemoryMapFlags.cs new file mode 100644 index 000000000..b4c74c8c9 --- /dev/null +++ b/Ryujinx.Memory/MemoryMapFlags.cs @@ -0,0 +1,23 @@ +using System; + +namespace Ryujinx.Memory +{ + /// + /// Flags that indicate how the host memory should be mapped. + /// + [Flags] + public enum MemoryMapFlags + { + /// + /// No mapping flags. + /// + None = 0, + + /// + /// Indicates that the implementation is free to ignore the specified backing memory offset + /// and allocate its own private storage for the mapping. + /// This allows some mappings that would otherwise fail due to host platform restrictions to succeed. + /// + Private = 1 << 0 + } +} diff --git a/Ryujinx.Memory/Range/HostMemoryRange.cs b/Ryujinx.Memory/Range/HostMemoryRange.cs new file mode 100644 index 000000000..79c649d85 --- /dev/null +++ b/Ryujinx.Memory/Range/HostMemoryRange.cs @@ -0,0 +1,71 @@ +using System; + +namespace Ryujinx.Memory.Range +{ + /// + /// Range of memory composed of an address and size. + /// + public struct HostMemoryRange : IEquatable + { + /// + /// An empty memory range, with a null address and zero size. + /// + public static HostMemoryRange Empty => new HostMemoryRange(0, 0); + + /// + /// Start address of the range. + /// + public nuint Address { get; } + + /// + /// Size of the range in bytes. + /// + public ulong Size { get; } + + /// + /// Address where the range ends (exclusive). + /// + public nuint EndAddress => Address + (nuint)Size; + + /// + /// Creates a new memory range with the specified address and size. + /// + /// Start address + /// Size in bytes + public HostMemoryRange(nuint address, ulong size) + { + Address = address; + Size = size; + } + + /// + /// Checks if the range overlaps with another. + /// + /// The other range to check for overlap + /// True if the ranges overlap, false otherwise + public bool OverlapsWith(HostMemoryRange other) + { + nuint thisAddress = Address; + nuint thisEndAddress = EndAddress; + nuint otherAddress = other.Address; + nuint otherEndAddress = other.EndAddress; + + return thisAddress < otherEndAddress && otherAddress < thisEndAddress; + } + + public override bool Equals(object obj) + { + return obj is HostMemoryRange other && Equals(other); + } + + public bool Equals(HostMemoryRange other) + { + return Address == other.Address && Size == other.Size; + } + + public override int GetHashCode() + { + return HashCode.Combine(Address, Size); + } + } +} diff --git a/Ryujinx.Memory/Tracking/MemoryTracking.cs b/Ryujinx.Memory/Tracking/MemoryTracking.cs index 9aa7c7ff3..9a35cfb6c 100644 --- a/Ryujinx.Memory/Tracking/MemoryTracking.cs +++ b/Ryujinx.Memory/Tracking/MemoryTracking.cs @@ -139,8 +139,6 @@ namespace Ryujinx.Memory.Tracking /// The memory tracking handle public MultiRegionHandle BeginGranularTracking(ulong address, ulong size, IEnumerable handles, ulong granularity) { - (address, size) = PageAlign(address, size); - return new MultiRegionHandle(this, address, size, handles, granularity); } @@ -166,11 +164,11 @@ namespace Ryujinx.Memory.Tracking /// The memory tracking handle public RegionHandle BeginTracking(ulong address, ulong size) { - (address, size) = PageAlign(address, size); + var (paAddress, paSize) = PageAlign(address, size); lock (TrackingLock) { - RegionHandle handle = new RegionHandle(this, address, size, _memoryManager.IsRangeMapped(address, size)); + RegionHandle handle = new RegionHandle(this, paAddress, paSize, address, size, _memoryManager.IsRangeMapped(address, size)); return handle; } @@ -186,11 +184,11 @@ namespace Ryujinx.Memory.Tracking /// The memory tracking handle internal RegionHandle BeginTrackingBitmap(ulong address, ulong size, ConcurrentBitmap bitmap, int bit) { - (address, size) = PageAlign(address, size); + var (paAddress, paSize) = PageAlign(address, size); lock (TrackingLock) { - RegionHandle handle = new RegionHandle(this, address, size, bitmap, bit, _memoryManager.IsRangeMapped(address, size)); + RegionHandle handle = new RegionHandle(this, paAddress, paSize, address, size, bitmap, bit, _memoryManager.IsRangeMapped(address, size)); return handle; } diff --git a/Ryujinx.Memory/Tracking/MultiRegionHandle.cs b/Ryujinx.Memory/Tracking/MultiRegionHandle.cs index 6cbea7f31..6ea2b7845 100644 --- a/Ryujinx.Memory/Tracking/MultiRegionHandle.cs +++ b/Ryujinx.Memory/Tracking/MultiRegionHandle.cs @@ -32,7 +32,7 @@ namespace Ryujinx.Memory.Tracking internal MultiRegionHandle(MemoryTracking tracking, ulong address, ulong size, IEnumerable handles, ulong granularity) { - _handles = new RegionHandle[size / granularity]; + _handles = new RegionHandle[(size + granularity - 1) / granularity]; Granularity = granularity; _dirtyBitmap = new ConcurrentBitmap(_handles.Length, true); @@ -50,7 +50,7 @@ namespace Ryujinx.Memory.Tracking foreach (RegionHandle handle in handles) { - int startIndex = (int)((handle.Address - address) / granularity); + int startIndex = (int)((handle.RealAddress - address) / granularity); // Fill any gap left before this handle. while (i < startIndex) @@ -72,7 +72,7 @@ namespace Ryujinx.Memory.Tracking } else { - int endIndex = (int)((handle.EndAddress - address) / granularity); + int endIndex = (int)((handle.RealEndAddress - address) / granularity); while (i < endIndex) { @@ -171,12 +171,13 @@ namespace Ryujinx.Memory.Tracking modifiedAction(rgStart, rgSize); rgSize = 0; } - rgStart = handle.Address; + + rgStart = handle.RealAddress; } if (handle.Dirty) { - rgSize += handle.Size; + rgSize += handle.RealSize; handle.Reprotect(); } @@ -191,7 +192,7 @@ namespace Ryujinx.Memory.Tracking int startHandle = (int)((address - Address) / Granularity); int lastHandle = (int)((address + (size - 1) - Address) / Granularity); - ulong rgStart = _handles[startHandle].Address; + ulong rgStart = Address + (ulong)startHandle * Granularity; if (startHandle == lastHandle) { @@ -200,7 +201,7 @@ namespace Ryujinx.Memory.Tracking if (handle.Dirty) { handle.Reprotect(); - modifiedAction(rgStart, handle.Size); + modifiedAction(rgStart, handle.RealSize); } return; @@ -273,10 +274,10 @@ namespace Ryujinx.Memory.Tracking modifiedAction(rgStart, rgSize); rgSize = 0; } - rgStart = handle.Address; + rgStart = handle.RealAddress; } - rgSize += handle.Size; + rgSize += handle.RealSize; handle.Reprotect(false, (checkMasks[index] & bitValue) == 0); checkMasks[index] &= ~bitValue; @@ -320,7 +321,7 @@ namespace Ryujinx.Memory.Tracking { handle.Reprotect(); - modifiedAction(rgStart, handle.Size); + modifiedAction(rgStart, handle.RealSize); } } diff --git a/Ryujinx.Memory/Tracking/RegionHandle.cs b/Ryujinx.Memory/Tracking/RegionHandle.cs index 86c77abc3..580f94a51 100644 --- a/Ryujinx.Memory/Tracking/RegionHandle.cs +++ b/Ryujinx.Memory/Tracking/RegionHandle.cs @@ -42,6 +42,10 @@ namespace Ryujinx.Memory.Tracking public ulong Size { get; } public ulong EndAddress { get; } + public ulong RealAddress { get; } + public ulong RealSize { get; } + public ulong RealEndAddress { get; } + internal IMultiRegionHandle Parent { get; set; } private event Action _onDirty; @@ -89,10 +93,12 @@ namespace Ryujinx.Memory.Tracking /// Tracking object for the target memory block /// Virtual address of the region to track /// Size of the region to track + /// The real, unaligned address of the handle + /// The real, unaligned size of the handle /// The bitmap the dirty flag for this handle is stored in /// The bit index representing the dirty flag for this handle /// True if the region handle starts mapped - internal RegionHandle(MemoryTracking tracking, ulong address, ulong size, ConcurrentBitmap bitmap, int bit, bool mapped = true) + internal RegionHandle(MemoryTracking tracking, ulong address, ulong size, ulong realAddress, ulong realSize, ConcurrentBitmap bitmap, int bit, bool mapped = true) { Bitmap = bitmap; DirtyBit = bit; @@ -104,6 +110,10 @@ namespace Ryujinx.Memory.Tracking Size = size; EndAddress = address + size; + RealAddress = realAddress; + RealSize = realSize; + RealEndAddress = realAddress + realSize; + _tracking = tracking; _regions = tracking.GetVirtualRegionsForHandle(address, size); foreach (var region in _regions) @@ -119,16 +129,23 @@ namespace Ryujinx.Memory.Tracking /// Tracking object for the target memory block /// Virtual address of the region to track /// Size of the region to track + /// The real, unaligned address of the handle + /// The real, unaligned size of the handle /// True if the region handle starts mapped - internal RegionHandle(MemoryTracking tracking, ulong address, ulong size, bool mapped = true) + internal RegionHandle(MemoryTracking tracking, ulong address, ulong size, ulong realAddress, ulong realSize, bool mapped = true) { Bitmap = new ConcurrentBitmap(1, mapped); Unmapped = !mapped; + Address = address; Size = size; EndAddress = address + size; + RealAddress = realAddress; + RealSize = realSize; + RealEndAddress = realAddress + realSize; + _tracking = tracking; _regions = tracking.GetVirtualRegionsForHandle(address, size); foreach (var region in _regions) @@ -199,6 +216,10 @@ namespace Ryujinx.Memory.Tracking if (_preAction != null) { + // Limit the range to within this handle. + ulong maxAddress = Math.Max(address, RealAddress); + ulong minEndAddress = Math.Min(address + size, RealAddress + RealSize); + // Copy the handles list in case it changes when we're out of the lock. if (handleIterable is List) { @@ -212,7 +233,7 @@ namespace Ryujinx.Memory.Tracking { lock (_preActionLock) { - _preAction?.Invoke(address, size); + _preAction?.Invoke(maxAddress, minEndAddress - maxAddress); // The action is removed after it returns, to ensure that the null check above succeeds when // it's still in progress rather than continuing and possibly missing a required data flush. diff --git a/Ryujinx.Tests/Cpu/CpuTest.cs b/Ryujinx.Tests/Cpu/CpuTest.cs index cafed37da..b64f74668 100644 --- a/Ryujinx.Tests/Cpu/CpuTest.cs +++ b/Ryujinx.Tests/Cpu/CpuTest.cs @@ -53,7 +53,7 @@ namespace Ryujinx.Tests.Cpu _ram = new MemoryBlock(Size * 2); _memory = new MemoryManager(_ram, 1ul << 16); _memory.IncrementReferenceCount(); - _memory.Map(CodeBaseAddress, 0, Size * 2); + _memory.Map(CodeBaseAddress, 0, Size * 2, MemoryMapFlags.Private); _context = CpuContext.CreateExecutionContext(); Translator.IsReadyForTranslation.Set(); diff --git a/Ryujinx.Tests/Cpu/CpuTest32.cs b/Ryujinx.Tests/Cpu/CpuTest32.cs index 53fea943d..46ae3c771 100644 --- a/Ryujinx.Tests/Cpu/CpuTest32.cs +++ b/Ryujinx.Tests/Cpu/CpuTest32.cs @@ -48,7 +48,7 @@ namespace Ryujinx.Tests.Cpu _ram = new MemoryBlock(Size * 2); _memory = new MemoryManager(_ram, 1ul << 16); _memory.IncrementReferenceCount(); - _memory.Map(CodeBaseAddress, 0, Size * 2); + _memory.Map(CodeBaseAddress, 0, Size * 2, MemoryMapFlags.Private); _context = CpuContext.CreateExecutionContext(); _context.IsAarch32 = true;