From cbaf3fb433a351f7d9509f17f88d4896ba66afd1 Mon Sep 17 00:00:00 2001
From: Fernando Sahmkow <fsahmkow27@gmail.com>
Date: Thu, 11 Nov 2021 21:24:40 +0100
Subject: [PATCH] VideoCore: Update MemoryManager

---
 src/common/multi_level_page_table.cpp |   1 +
 src/common/multi_level_page_table.inc |   7 +-
 src/video_core/memory_manager.cpp     | 147 ++++++++++----------------
 src/video_core/memory_manager.h       |  98 +++++------------
 4 files changed, 86 insertions(+), 167 deletions(-)

diff --git a/src/common/multi_level_page_table.cpp b/src/common/multi_level_page_table.cpp
index 561785ca7..aed04d0b5 100644
--- a/src/common/multi_level_page_table.cpp
+++ b/src/common/multi_level_page_table.cpp
@@ -4,4 +4,5 @@ namespace Common {
 template class Common::MultiLevelPageTable<GPUVAddr>;
 template class Common::MultiLevelPageTable<VAddr>;
 template class Common::MultiLevelPageTable<PAddr>;
+template class Common::MultiLevelPageTable<u32>;
 } // namespace Common
diff --git a/src/common/multi_level_page_table.inc b/src/common/multi_level_page_table.inc
index a75e61f9d..7fbcb908a 100644
--- a/src/common/multi_level_page_table.inc
+++ b/src/common/multi_level_page_table.inc
@@ -20,7 +20,7 @@ MultiLevelPageTable<BaseAddr>::MultiLevelPageTable(std::size_t address_space_bit
     : address_space_bits{address_space_bits_},
       first_level_bits{first_level_bits_}, page_bits{page_bits_} {
     first_level_shift = address_space_bits - first_level_bits;
-    first_level_chunk_size = 1ULL << (first_level_shift - page_bits);
+    first_level_chunk_size = (1ULL << (first_level_shift - page_bits)) * sizeof(BaseAddr);
     alloc_size = (1ULL << (address_space_bits - page_bits)) * sizeof(BaseAddr);
     std::size_t first_level_size = 1ULL << first_level_bits;
     first_level_map.resize(first_level_size, nullptr);
@@ -53,8 +53,7 @@ MultiLevelPageTable<BaseAddr>::~MultiLevelPageTable() noexcept {
 template <typename BaseAddr>
 void MultiLevelPageTable<BaseAddr>::ReserveRange(u64 start, std::size_t size) {
     const u64 new_start = start >> first_level_shift;
-    const u64 new_end =
-        (start + size + (first_level_chunk_size << page_bits) - 1) >> first_level_shift;
+    const u64 new_end = (start + size) >> first_level_shift;
     for (u64 i = new_start; i <= new_end; i++) {
         if (!first_level_map[i]) {
             AllocateLevel(i);
@@ -64,7 +63,7 @@ void MultiLevelPageTable<BaseAddr>::ReserveRange(u64 start, std::size_t size) {
 
 template <typename BaseAddr>
 void MultiLevelPageTable<BaseAddr>::AllocateLevel(u64 level) {
-    void* ptr = reinterpret_cast<char*>(base_ptr) + level * first_level_chunk_size;
+    void* ptr = reinterpret_cast<char *>(base_ptr) + level * first_level_chunk_size;
 #ifdef _WIN32
     void* base{VirtualAlloc(ptr, first_level_chunk_size, MEM_COMMIT, PAGE_READWRITE)};
 #else
diff --git a/src/video_core/memory_manager.cpp b/src/video_core/memory_manager.cpp
index a3efd365e..1e090279f 100644
--- a/src/video_core/memory_manager.cpp
+++ b/src/video_core/memory_manager.cpp
@@ -16,36 +16,63 @@
 
 namespace Tegra {
 
-MemoryManager::MemoryManager(Core::System& system_)
-    : system{system_}, page_table(page_table_size) {}
+MemoryManager::MemoryManager(Core::System& system_, u64 address_space_bits_, u64 page_bits_)
+    : system{system_}, address_space_bits{address_space_bits_}, page_bits{page_bits_}, entries{},
+      page_table{address_space_bits, address_space_bits + page_bits - 38, page_bits} {
+    address_space_size = 1ULL << address_space_bits;
+    allocate_start = address_space_bits > 32 ? 1ULL << 32 : 0;
+    page_size = 1ULL << page_bits;
+    page_mask = page_size - 1ULL;
+    const u64 page_table_bits = address_space_bits - cpu_page_bits;
+    const u64 page_table_size = 1ULL << page_table_bits;
+    page_table_mask = page_table_size - 1;
+
+    entries.resize(page_table_size / 32, 0);
+}
 
 MemoryManager::~MemoryManager() = default;
 
-void MemoryManager::BindRasterizer(VideoCore::RasterizerInterface* rasterizer_) {
-    rasterizer = rasterizer_;
+MemoryManager::EntryType MemoryManager::GetEntry(size_t position) const {
+    position = position >> page_bits;
+    const u64 entry_mask = entries[position / 32];
+    const size_t sub_index = position % 32;
+    return static_cast<EntryType>((entry_mask >> (2 * sub_index)) & 0x03ULL);
 }
 
-GPUVAddr MemoryManager::UpdateRange(GPUVAddr gpu_addr, PageEntry page_entry, std::size_t size) {
+void MemoryManager::SetEntry(size_t position, MemoryManager::EntryType entry) {
+    position = position >> page_bits;
+    const u64 entry_mask = entries[position / 32];
+    const size_t sub_index = position % 32;
+    entries[position / 32] =
+        (~(3ULL << sub_index * 2) & entry_mask) | (static_cast<u64>(entry) << sub_index * 2);
+}
+
+template <MemoryManager::EntryType entry_type>
+GPUVAddr MemoryManager::PageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cpu_addr,
+                                    size_t size) {
     u64 remaining_size{size};
+    if constexpr (entry_type == EntryType::Mapped) {
+        page_table.ReserveRange(gpu_addr, size);
+    }
     for (u64 offset{}; offset < size; offset += page_size) {
-        if (remaining_size < page_size) {
-            SetPageEntry(gpu_addr + offset, page_entry + offset, remaining_size);
-        } else {
-            SetPageEntry(gpu_addr + offset, page_entry + offset);
+        const GPUVAddr current_gpu_addr = gpu_addr + offset;
+        SetEntry(current_gpu_addr, entry_type);
+        if constexpr (entry_type == EntryType::Mapped) {
+            const VAddr current_cpu_addr = cpu_addr + offset;
+            const auto index = PageEntryIndex(current_gpu_addr);
+            page_table[index] = static_cast<u32>(current_cpu_addr >> 12ULL);
         }
         remaining_size -= page_size;
     }
     return gpu_addr;
 }
 
+void MemoryManager::BindRasterizer(VideoCore::RasterizerInterface* rasterizer_) {
+    rasterizer = rasterizer_;
+}
+
 GPUVAddr MemoryManager::Map(VAddr cpu_addr, GPUVAddr gpu_addr, std::size_t size) {
-    const auto it = std::ranges::lower_bound(map_ranges, gpu_addr, {}, &MapRange::first);
-    if (it != map_ranges.end() && it->first == gpu_addr) {
-        it->second = size;
-    } else {
-        map_ranges.insert(it, MapRange{gpu_addr, size});
-    }
-    return UpdateRange(gpu_addr, cpu_addr, size);
+    return PageTableOp<EntryType::Mapped>(gpu_addr, cpu_addr, size);
 }
 
 GPUVAddr MemoryManager::MapAllocate(VAddr cpu_addr, std::size_t size, std::size_t align) {
@@ -62,13 +89,6 @@ void MemoryManager::Unmap(GPUVAddr gpu_addr, std::size_t size) {
     if (size == 0) {
         return;
     }
-    const auto it = std::ranges::lower_bound(map_ranges, gpu_addr, {}, &MapRange::first);
-    if (it != map_ranges.end()) {
-        ASSERT(it->first == gpu_addr);
-        map_ranges.erase(it);
-    } else {
-        ASSERT_MSG(false, "Unmapping non-existent GPU address=0x{:x}", gpu_addr);
-    }
     const auto submapped_ranges = GetSubmappedRange(gpu_addr, size);
 
     for (const auto& [map_addr, map_size] : submapped_ranges) {
@@ -79,63 +99,23 @@ void MemoryManager::Unmap(GPUVAddr gpu_addr, std::size_t size) {
         rasterizer->UnmapMemory(*cpu_addr, map_size);
     }
 
-    UpdateRange(gpu_addr, PageEntry::State::Unmapped, size);
+    PageTableOp<EntryType::Free>(gpu_addr, 0, size);
 }
 
 std::optional<GPUVAddr> MemoryManager::AllocateFixed(GPUVAddr gpu_addr, std::size_t size) {
     for (u64 offset{}; offset < size; offset += page_size) {
-        if (!GetPageEntry(gpu_addr + offset).IsUnmapped()) {
+        if (GetEntry(gpu_addr + offset) != EntryType::Free) {
             return std::nullopt;
         }
     }
 
-    return UpdateRange(gpu_addr, PageEntry::State::Allocated, size);
+    return PageTableOp<EntryType::Reserved>(gpu_addr, 0, size);
 }
 
 GPUVAddr MemoryManager::Allocate(std::size_t size, std::size_t align) {
     return *AllocateFixed(*FindFreeRange(size, align), size);
 }
 
-void MemoryManager::TryLockPage(PageEntry page_entry, std::size_t size) {
-    if (!page_entry.IsValid()) {
-        return;
-    }
-
-    ASSERT(system.CurrentProcess()
-               ->PageTable()
-               .LockForDeviceAddressSpace(page_entry.ToAddress(), size)
-               .IsSuccess());
-}
-
-void MemoryManager::TryUnlockPage(PageEntry page_entry, std::size_t size) {
-    if (!page_entry.IsValid()) {
-        return;
-    }
-
-    ASSERT(system.CurrentProcess()
-               ->PageTable()
-               .UnlockForDeviceAddressSpace(page_entry.ToAddress(), size)
-               .IsSuccess());
-}
-
-PageEntry MemoryManager::GetPageEntry(GPUVAddr gpu_addr) const {
-    return page_table[PageEntryIndex(gpu_addr)];
-}
-
-void MemoryManager::SetPageEntry(GPUVAddr gpu_addr, PageEntry page_entry, std::size_t size) {
-    // TODO(bunnei): We should lock/unlock device regions. This currently causes issues due to
-    // improper tracking, but should be fixed in the future.
-
-    //// Unlock the old page
-    // TryUnlockPage(page_table[PageEntryIndex(gpu_addr)], size);
-
-    //// Lock the new page
-    // TryLockPage(page_entry, size);
-    auto& current_page = page_table[PageEntryIndex(gpu_addr)];
-
-    current_page = page_entry;
-}
-
 std::optional<GPUVAddr> MemoryManager::FindFreeRange(std::size_t size, std::size_t align,
                                                      bool start_32bit_address) const {
     if (!align) {
@@ -145,9 +125,9 @@ std::optional<GPUVAddr> MemoryManager::FindFreeRange(std::size_t size, std::size
     }
 
     u64 available_size{};
-    GPUVAddr gpu_addr{start_32bit_address ? address_space_start_low : address_space_start};
+    GPUVAddr gpu_addr{allocate_start};
     while (gpu_addr + available_size < address_space_size) {
-        if (GetPageEntry(gpu_addr + available_size).IsUnmapped()) {
+        if (GetEntry(gpu_addr + available_size) == EntryType::Free) {
             available_size += page_size;
 
             if (available_size >= size) {
@@ -168,15 +148,12 @@ std::optional<GPUVAddr> MemoryManager::FindFreeRange(std::size_t size, std::size
 }
 
 std::optional<VAddr> MemoryManager::GpuToCpuAddress(GPUVAddr gpu_addr) const {
-    if (gpu_addr == 0) {
-        return std::nullopt;
-    }
-    const auto page_entry{GetPageEntry(gpu_addr)};
-    if (!page_entry.IsValid()) {
+    if (GetEntry(gpu_addr) != EntryType::Mapped) {
         return std::nullopt;
     }
 
-    return page_entry.ToAddress() + (gpu_addr & page_mask);
+    const VAddr cpu_addr_base = static_cast<VAddr>(page_table[PageEntryIndex(gpu_addr)]) << 12ULL;
+    return cpu_addr_base + (gpu_addr & page_mask);
 }
 
 std::optional<VAddr> MemoryManager::GpuToCpuAddress(GPUVAddr addr, std::size_t size) const {
@@ -227,10 +204,6 @@ template void MemoryManager::Write<u32>(GPUVAddr addr, u32 data);
 template void MemoryManager::Write<u64>(GPUVAddr addr, u64 data);
 
 u8* MemoryManager::GetPointer(GPUVAddr gpu_addr) {
-    if (!GetPageEntry(gpu_addr).IsValid()) {
-        return {};
-    }
-
     const auto address{GpuToCpuAddress(gpu_addr)};
     if (!address) {
         return {};
@@ -240,10 +213,6 @@ u8* MemoryManager::GetPointer(GPUVAddr gpu_addr) {
 }
 
 const u8* MemoryManager::GetPointer(GPUVAddr gpu_addr) const {
-    if (!GetPageEntry(gpu_addr).IsValid()) {
-        return {};
-    }
-
     const auto address{GpuToCpuAddress(gpu_addr)};
     if (!address) {
         return {};
@@ -252,12 +221,6 @@ const u8* MemoryManager::GetPointer(GPUVAddr gpu_addr) const {
     return system.Memory().GetPointer(*address);
 }
 
-size_t MemoryManager::BytesToMapEnd(GPUVAddr gpu_addr) const noexcept {
-    auto it = std::ranges::upper_bound(map_ranges, gpu_addr, {}, &MapRange::first);
-    --it;
-    return it->second - (gpu_addr - it->first);
-}
-
 void MemoryManager::ReadBlockImpl(GPUVAddr gpu_src_addr, void* dest_buffer, std::size_t size,
                                   bool is_safe) const {
     std::size_t remaining_size{size};
@@ -268,7 +231,7 @@ void MemoryManager::ReadBlockImpl(GPUVAddr gpu_src_addr, void* dest_buffer, std:
         const std::size_t copy_amount{
             std::min(static_cast<std::size_t>(page_size) - page_offset, remaining_size)};
         const auto page_addr{GpuToCpuAddress(page_index << page_bits)};
-        if (page_addr && *page_addr != 0) {
+        if (page_addr) {
             const auto src_addr{*page_addr + page_offset};
             if (is_safe) {
                 // Flush must happen on the rasterizer interface, such that memory is always
@@ -307,7 +270,7 @@ void MemoryManager::WriteBlockImpl(GPUVAddr gpu_dest_addr, const void* src_buffe
         const std::size_t copy_amount{
             std::min(static_cast<std::size_t>(page_size) - page_offset, remaining_size)};
         const auto page_addr{GpuToCpuAddress(page_index << page_bits)};
-        if (page_addr && *page_addr != 0) {
+        if (page_addr) {
             const auto dest_addr{*page_addr + page_offset};
 
             if (is_safe) {
@@ -392,7 +355,7 @@ bool MemoryManager::IsFullyMappedRange(GPUVAddr gpu_addr, std::size_t size) cons
     size_t page_index{gpu_addr >> page_bits};
     const size_t page_last{(gpu_addr + size + page_size - 1) >> page_bits};
     while (page_index < page_last) {
-        if (!page_table[page_index].IsValid() || page_table[page_index].ToAddress() == 0) {
+        if (GetEntry(page_index << page_bits) == EntryType::Free) {
             return false;
         }
         ++page_index;
@@ -408,7 +371,7 @@ std::vector<std::pair<GPUVAddr, std::size_t>> MemoryManager::GetSubmappedRange(
     size_t page_offset{gpu_addr & page_mask};
     std::optional<std::pair<GPUVAddr, std::size_t>> last_segment{};
     std::optional<VAddr> old_page_addr{};
-    const auto extend_size = [&last_segment, &page_index, &page_offset](std::size_t bytes) {
+    const auto extend_size = [this, &last_segment, &page_index, &page_offset](std::size_t bytes) {
         if (!last_segment) {
             const GPUVAddr new_base_addr = (page_index << page_bits) + page_offset;
             last_segment = {new_base_addr, bytes};
diff --git a/src/video_core/memory_manager.h b/src/video_core/memory_manager.h
index 74f9ce175..0a763fd19 100644
--- a/src/video_core/memory_manager.h
+++ b/src/video_core/memory_manager.h
@@ -8,6 +8,7 @@
 #include <vector>
 
 #include "common/common_types.h"
+#include "common/multi_level_page_table.h"
 
 namespace VideoCore {
 class RasterizerInterface;
@@ -19,55 +20,10 @@ class System;
 
 namespace Tegra {
 
-class PageEntry final {
-public:
-    enum class State : u32 {
-        Unmapped = static_cast<u32>(-1),
-        Allocated = static_cast<u32>(-2),
-    };
-
-    constexpr PageEntry() = default;
-    constexpr PageEntry(State state_) : state{state_} {}
-    constexpr PageEntry(VAddr addr) : state{static_cast<State>(addr >> ShiftBits)} {}
-
-    [[nodiscard]] constexpr bool IsUnmapped() const {
-        return state == State::Unmapped;
-    }
-
-    [[nodiscard]] constexpr bool IsAllocated() const {
-        return state == State::Allocated;
-    }
-
-    [[nodiscard]] constexpr bool IsValid() const {
-        return !IsUnmapped() && !IsAllocated();
-    }
-
-    [[nodiscard]] constexpr VAddr ToAddress() const {
-        if (!IsValid()) {
-            return {};
-        }
-
-        return static_cast<VAddr>(state) << ShiftBits;
-    }
-
-    [[nodiscard]] constexpr PageEntry operator+(u64 offset) const {
-        // If this is a reserved value, offsets do not apply
-        if (!IsValid()) {
-            return *this;
-        }
-        return PageEntry{(static_cast<VAddr>(state) << ShiftBits) + offset};
-    }
-
-private:
-    static constexpr std::size_t ShiftBits{12};
-
-    State state{State::Unmapped};
-};
-static_assert(sizeof(PageEntry) == 4, "PageEntry is too large");
-
 class MemoryManager final {
 public:
-    explicit MemoryManager(Core::System& system_);
+    explicit MemoryManager(Core::System& system_, u64 address_space_bits_ = 40,
+                           u64 page_bits_ = 16);
     ~MemoryManager();
 
     /// Binds a renderer to the memory manager.
@@ -86,9 +42,6 @@ public:
     [[nodiscard]] u8* GetPointer(GPUVAddr addr);
     [[nodiscard]] const u8* GetPointer(GPUVAddr addr) const;
 
-    /// Returns the number of bytes until the end of the memory map containing the given GPU address
-    [[nodiscard]] size_t BytesToMapEnd(GPUVAddr gpu_addr) const noexcept;
-
     /**
      * ReadBlock and WriteBlock are full read and write operations over virtual
      * GPU Memory. It's important to use these when GPU memory may not be continuous
@@ -145,44 +98,47 @@ public:
     void FlushRegion(GPUVAddr gpu_addr, size_t size) const;
 
 private:
-    [[nodiscard]] PageEntry GetPageEntry(GPUVAddr gpu_addr) const;
-    void SetPageEntry(GPUVAddr gpu_addr, PageEntry page_entry, std::size_t size = page_size);
-    GPUVAddr UpdateRange(GPUVAddr gpu_addr, PageEntry page_entry, std::size_t size);
     [[nodiscard]] std::optional<GPUVAddr> FindFreeRange(std::size_t size, std::size_t align,
                                                         bool start_32bit_address = false) const;
 
-    void TryLockPage(PageEntry page_entry, std::size_t size);
-    void TryUnlockPage(PageEntry page_entry, std::size_t size);
-
     void ReadBlockImpl(GPUVAddr gpu_src_addr, void* dest_buffer, std::size_t size,
                        bool is_safe) const;
     void WriteBlockImpl(GPUVAddr gpu_dest_addr, const void* src_buffer, std::size_t size,
                         bool is_safe);
 
-    [[nodiscard]] static constexpr std::size_t PageEntryIndex(GPUVAddr gpu_addr) {
+    [[nodiscard]] inline std::size_t PageEntryIndex(GPUVAddr gpu_addr) const {
         return (gpu_addr >> page_bits) & page_table_mask;
     }
 
-    static constexpr u64 address_space_size = 1ULL << 40;
-    static constexpr u64 address_space_start = 1ULL << 32;
-    static constexpr u64 address_space_start_low = 1ULL << 16;
-    static constexpr u64 page_bits{16};
-    static constexpr u64 page_size{1 << page_bits};
-    static constexpr u64 page_mask{page_size - 1};
-    static constexpr u64 page_table_bits{24};
-    static constexpr u64 page_table_size{1 << page_table_bits};
-    static constexpr u64 page_table_mask{page_table_size - 1};
-
     Core::System& system;
 
+    const u64 address_space_bits;
+    const u64 page_bits;
+    u64 address_space_size;
+    u64 allocate_start;
+    u64 page_size;
+    u64 page_mask;
+    u64 page_table_mask;
+    static constexpr u64 cpu_page_bits{12};
+
     VideoCore::RasterizerInterface* rasterizer = nullptr;
 
-    std::vector<PageEntry> page_table;
+    enum class EntryType : u64 {
+        Free = 0,
+        Reserved = 1,
+        Mapped = 2,
+    };
 
-    using MapRange = std::pair<GPUVAddr, size_t>;
-    std::vector<MapRange> map_ranges;
+    std::vector<u64> entries;
 
-    std::vector<std::pair<VAddr, std::size_t>> cache_invalidate_queue;
+    template <EntryType entry_type>
+    GPUVAddr PageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cpu_addr, size_t size);
+
+    EntryType GetEntry(size_t position) const;
+
+    void SetEntry(size_t position, EntryType entry);
+
+    Common::MultiLevelPageTable<u32> page_table;
 };
 
 } // namespace Tegra