mirror of
https://github.com/citra-emu/citra-nightly.git
synced 2024-12-25 18:25:39 +00:00
Memory: move states into class
This commit is contained in:
parent
d18cda5a5d
commit
42edab01d9
|
@ -188,10 +188,11 @@ ResultVal<VAddr> Process::HeapAllocate(VAddr target, u32 size, VMAPermission per
|
||||||
u32 interval_size = interval.upper() - interval.lower();
|
u32 interval_size = interval.upper() - interval.lower();
|
||||||
LOG_DEBUG(Kernel, "Allocated FCRAM region lower={:08X}, upper={:08X}", interval.lower(),
|
LOG_DEBUG(Kernel, "Allocated FCRAM region lower={:08X}, upper={:08X}", interval.lower(),
|
||||||
interval.upper());
|
interval.upper());
|
||||||
std::fill(Memory::fcram.begin() + interval.lower(),
|
std::fill(kernel.memory.fcram.begin() + interval.lower(),
|
||||||
Memory::fcram.begin() + interval.upper(), 0);
|
kernel.memory.fcram.begin() + interval.upper(), 0);
|
||||||
auto vma = vm_manager.MapBackingMemory(
|
auto vma = vm_manager.MapBackingMemory(interval_target,
|
||||||
interval_target, Memory::fcram.data() + interval.lower(), interval_size, memory_state);
|
kernel.memory.fcram.data() + interval.lower(),
|
||||||
|
interval_size, memory_state);
|
||||||
ASSERT(vma.Succeeded());
|
ASSERT(vma.Succeeded());
|
||||||
vm_manager.Reprotect(vma.Unwrap(), perms);
|
vm_manager.Reprotect(vma.Unwrap(), perms);
|
||||||
interval_target += interval_size;
|
interval_target += interval_size;
|
||||||
|
@ -262,7 +263,7 @@ ResultVal<VAddr> Process::LinearAllocate(VAddr target, u32 size, VMAPermission p
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
u8* backing_memory = Memory::fcram.data() + physical_offset;
|
u8* backing_memory = kernel.memory.fcram.data() + physical_offset;
|
||||||
|
|
||||||
std::fill(backing_memory, backing_memory + size, 0);
|
std::fill(backing_memory, backing_memory + size, 0);
|
||||||
auto vma = vm_manager.MapBackingMemory(target, backing_memory, size, MemoryState::Continuous);
|
auto vma = vm_manager.MapBackingMemory(target, backing_memory, size, MemoryState::Continuous);
|
||||||
|
|
|
@ -43,8 +43,8 @@ ResultVal<SharedPtr<SharedMemory>> KernelSystem::CreateSharedMemory(
|
||||||
|
|
||||||
ASSERT_MSG(offset, "Not enough space in region to allocate shared memory!");
|
ASSERT_MSG(offset, "Not enough space in region to allocate shared memory!");
|
||||||
|
|
||||||
std::fill(Memory::fcram.data() + *offset, Memory::fcram.data() + *offset + size, 0);
|
std::fill(memory.fcram.data() + *offset, memory.fcram.data() + *offset + size, 0);
|
||||||
shared_memory->backing_blocks = {{Memory::fcram.data() + *offset, size}};
|
shared_memory->backing_blocks = {{memory.fcram.data() + *offset, size}};
|
||||||
shared_memory->holding_memory += MemoryRegionInfo::Interval(*offset, *offset + size);
|
shared_memory->holding_memory += MemoryRegionInfo::Interval(*offset, *offset + size);
|
||||||
shared_memory->linear_heap_phys_offset = *offset;
|
shared_memory->linear_heap_phys_offset = *offset;
|
||||||
|
|
||||||
|
@ -86,8 +86,8 @@ SharedPtr<SharedMemory> KernelSystem::CreateSharedMemoryForApplet(
|
||||||
shared_memory->other_permissions = other_permissions;
|
shared_memory->other_permissions = other_permissions;
|
||||||
for (const auto& interval : backing_blocks) {
|
for (const auto& interval : backing_blocks) {
|
||||||
shared_memory->backing_blocks.push_back(
|
shared_memory->backing_blocks.push_back(
|
||||||
{Memory::fcram.data() + interval.lower(), interval.upper() - interval.lower()});
|
{memory.fcram.data() + interval.lower(), interval.upper() - interval.lower()});
|
||||||
std::fill(Memory::fcram.data() + interval.lower(), Memory::fcram.data() + interval.upper(),
|
std::fill(memory.fcram.data() + interval.lower(), memory.fcram.data() + interval.upper(),
|
||||||
0);
|
0);
|
||||||
}
|
}
|
||||||
shared_memory->base_address = Memory::HEAP_VADDR + offset;
|
shared_memory->base_address = Memory::HEAP_VADDR + offset;
|
||||||
|
|
|
@ -355,7 +355,7 @@ ResultVal<SharedPtr<Thread>> KernelSystem::CreateThread(std::string name, VAddr
|
||||||
|
|
||||||
// Map the page to the current process' address space.
|
// Map the page to the current process' address space.
|
||||||
vm_manager.MapBackingMemory(Memory::TLS_AREA_VADDR + available_page * Memory::PAGE_SIZE,
|
vm_manager.MapBackingMemory(Memory::TLS_AREA_VADDR + available_page * Memory::PAGE_SIZE,
|
||||||
Memory::fcram.data() + *offset, Memory::PAGE_SIZE,
|
memory.fcram.data() + *offset, Memory::PAGE_SIZE,
|
||||||
MemoryState::Locked);
|
MemoryState::Locked);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -21,12 +21,6 @@
|
||||||
|
|
||||||
namespace Memory {
|
namespace Memory {
|
||||||
|
|
||||||
static std::array<u8, Memory::VRAM_SIZE> vram;
|
|
||||||
static std::array<u8, Memory::N3DS_EXTRA_RAM_SIZE> n3ds_extra_ram;
|
|
||||||
std::array<u8, Memory::FCRAM_N3DS_SIZE> fcram;
|
|
||||||
|
|
||||||
static PageTable* current_page_table = nullptr;
|
|
||||||
|
|
||||||
void MemorySystem::SetCurrentPageTable(PageTable* page_table) {
|
void MemorySystem::SetCurrentPageTable(PageTable* page_table) {
|
||||||
current_page_table = page_table;
|
current_page_table = page_table;
|
||||||
if (Core::System::GetInstance().IsPoweredOn()) {
|
if (Core::System::GetInstance().IsPoweredOn()) {
|
||||||
|
@ -78,13 +72,7 @@ void UnmapRegion(PageTable& page_table, VAddr base, u32 size) {
|
||||||
MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr, PageType::Unmapped);
|
MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr, PageType::Unmapped);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
u8* MemorySystem::GetPointerForRasterizerCache(VAddr addr) {
|
||||||
* Gets the pointer for virtual memory where the page is marked as RasterizerCachedMemory.
|
|
||||||
* This is used to access the memory where the page pointer is nullptr due to rasterizer cache.
|
|
||||||
* Since the cache only happens on linear heap or VRAM, we know the exact physical address and
|
|
||||||
* pointer of such virtual address
|
|
||||||
*/
|
|
||||||
static u8* GetPointerForRasterizerCache(VAddr addr) {
|
|
||||||
if (addr >= LINEAR_HEAP_VADDR && addr < LINEAR_HEAP_VADDR_END) {
|
if (addr >= LINEAR_HEAP_VADDR && addr < LINEAR_HEAP_VADDR_END) {
|
||||||
return fcram.data() + (addr - LINEAR_HEAP_VADDR);
|
return fcram.data() + (addr - LINEAR_HEAP_VADDR);
|
||||||
}
|
}
|
||||||
|
|
|
@ -178,8 +178,6 @@ enum : VAddr {
|
||||||
NEW_LINEAR_HEAP_VADDR_END = NEW_LINEAR_HEAP_VADDR + NEW_LINEAR_HEAP_SIZE,
|
NEW_LINEAR_HEAP_VADDR_END = NEW_LINEAR_HEAP_VADDR + NEW_LINEAR_HEAP_SIZE,
|
||||||
};
|
};
|
||||||
|
|
||||||
extern std::array<u8, Memory::FCRAM_N3DS_SIZE> fcram;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Flushes any externally cached rasterizer resources touching the given region.
|
* Flushes any externally cached rasterizer resources touching the given region.
|
||||||
*/
|
*/
|
||||||
|
@ -258,12 +256,27 @@ public:
|
||||||
*/
|
*/
|
||||||
void RasterizerMarkRegionCached(PAddr start, u32 size, bool cached);
|
void RasterizerMarkRegionCached(PAddr start, u32 size, bool cached);
|
||||||
|
|
||||||
|
std::array<u8, Memory::FCRAM_N3DS_SIZE> fcram{};
|
||||||
|
|
||||||
private:
|
private:
|
||||||
template <typename T>
|
template <typename T>
|
||||||
T Read(const VAddr vaddr);
|
T Read(const VAddr vaddr);
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
void Write(const VAddr vaddr, const T data);
|
void Write(const VAddr vaddr, const T data);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Gets the pointer for virtual memory where the page is marked as RasterizerCachedMemory.
|
||||||
|
* This is used to access the memory where the page pointer is nullptr due to rasterizer cache.
|
||||||
|
* Since the cache only happens on linear heap or VRAM, we know the exact physical address and
|
||||||
|
* pointer of such virtual address
|
||||||
|
*/
|
||||||
|
u8* GetPointerForRasterizerCache(VAddr addr);
|
||||||
|
|
||||||
|
std::array<u8, Memory::VRAM_SIZE> vram{};
|
||||||
|
std::array<u8, Memory::N3DS_EXTRA_RAM_SIZE> n3ds_extra_ram{};
|
||||||
|
|
||||||
|
PageTable* current_page_table = nullptr;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace Memory
|
} // namespace Memory
|
||||||
|
|
Loading…
Reference in a new issue