mirror of
https://github.com/Ryujinx/Ryujinx.git
synced 2024-11-09 01:58:32 +00:00
ec3e848d79
* Initial Implementation
About as fast as nvidia GL multithreading, can be improved with faster command queuing.
* Struct based command list
Speeds up a bit. Still a lot of time lost to resource copy.
* Do shader init while the render thread is active.
* Introduce circular span pool V1
Ideally should be able to use structs instead of references for storing these spans on commands. Will try that next.
* Refactor SpanRef some more
Use a struct to represent SpanRef, rather than a reference.
* Flush buffers on background thread
* Use a span for UpdateRenderScale.
Much faster than copying the array.
* Calculate command size using reflection
* WIP parallel shaders
* Some minor optimisation
* Only 2 max refs per command now.
The command with 3 refs is gone. 😌
* Don't cast on the GPU side
* Remove redundant casts, force sync on window present
* Fix Shader Cache
* Fix host shader save.
* Fixup to work with new renderer stuff
* Make command Run static, use array of delegates as lookup
Profile says this takes less time than the previous way.
* Bring up to date
* Add settings toggle. Fix Muiltithreading Off mode.
* Fix warning.
* Release tracking lock for flushes
* Fix Conditional Render fast path with threaded gal
* Make handle iteration safe when releasing the lock
This is mostly temporary.
* Attempt to set backend threading on driver
Only really works on nvidia before launching a game.
* Fix race condition with BufferModifiedRangeList, exceptions in tracking actions
* Update buffer set commands
* Some cleanup
* Only use stutter workaround when using opengl renderer non-threaded
* Add host-conditional reservation of counter events
There has always been the possibility that conditional rendering could use a query object just as it is disposed by the counter queue. This change makes it so that when the host decides to use host conditional rendering, the query object is reserved so that it cannot be deleted. Counter events can optionally start reserved, as the threaded implementation can reserve them before the backend creates them, and there would otherwise be a short amount of time where the counter queue could dispose the event before a call to reserve it could be made.
* Address Feedback
* Make counter flush tracked again.
Hopefully does not cause any issues this time.
* Wait for FlushTo on the main queue thread.
Currently assumes only one thread will want to FlushTo (in this case, the GPU thread)
* Add SDL2 headless integration
* Add HLE macro commands.
Co-authored-by: Mary <mary@mary.zone>
90 lines
3.3 KiB
C#
90 lines
3.3 KiB
C#
using System;
|
|
using System.Runtime.CompilerServices;
|
|
using System.Runtime.InteropServices;
|
|
using System.Threading;
|
|
|
|
namespace Ryujinx.Graphics.GAL.Multithreading.Model
|
|
{
|
|
/// <summary>
|
|
/// A memory pool for passing through Span<T> resources with one producer and consumer.
|
|
/// Data is copied on creation to part of the pool, then that region is reserved until it is disposed by the consumer.
|
|
/// Similar to the command queue, this pool assumes that data is created and disposed in the same order.
|
|
/// </summary>
|
|
class CircularSpanPool
|
|
{
|
|
private ThreadedRenderer _renderer;
|
|
private byte[] _pool;
|
|
private int _size;
|
|
|
|
private int _producerPtr;
|
|
private int _producerSkipPosition = -1;
|
|
private int _consumerPtr;
|
|
|
|
public CircularSpanPool(ThreadedRenderer renderer, int size)
|
|
{
|
|
_renderer = renderer;
|
|
_size = size;
|
|
_pool = new byte[size];
|
|
}
|
|
|
|
public SpanRef<T> Insert<T>(ReadOnlySpan<T> data) where T : unmanaged
|
|
{
|
|
int size = data.Length * Unsafe.SizeOf<T>();
|
|
|
|
// Wrapping aware circular queue.
|
|
// If there's no space at the end of the pool for this span, we can't fragment it.
|
|
// So just loop back around to the start. Remember the last skipped position.
|
|
|
|
bool wraparound = _producerPtr + size >= _size;
|
|
int index = wraparound ? 0 : _producerPtr;
|
|
|
|
// _consumerPtr is from another thread, and we're taking it without a lock, so treat this as a snapshot in the past.
|
|
// We know that it will always be before or equal to the producer pointer, and it cannot pass it.
|
|
// This is enough to reason about if there is space in the queue for the data, even if we're checking against an outdated value.
|
|
|
|
int consumer = _consumerPtr;
|
|
bool beforeConsumer = _producerPtr < consumer;
|
|
|
|
if (size > _size - 1 || (wraparound && beforeConsumer) || ((index < consumer || wraparound) && index + size >= consumer))
|
|
{
|
|
// Just get an array in the following situations:
|
|
// - The data is too large to fit in the pool.
|
|
// - A wraparound would happen but the consumer would be covered by it.
|
|
// - The producer would catch up to the consumer as a result.
|
|
|
|
return new SpanRef<T>(_renderer, data.ToArray());
|
|
}
|
|
|
|
data.CopyTo(MemoryMarshal.Cast<byte, T>(new Span<byte>(_pool).Slice(index, size)));
|
|
|
|
if (wraparound)
|
|
{
|
|
_producerSkipPosition = _producerPtr;
|
|
}
|
|
|
|
_producerPtr = index + size;
|
|
|
|
return new SpanRef<T>(data.Length);
|
|
}
|
|
|
|
public Span<T> Get<T>(int length) where T : unmanaged
|
|
{
|
|
int size = length * Unsafe.SizeOf<T>();
|
|
|
|
if (_consumerPtr == Interlocked.CompareExchange(ref _producerSkipPosition, -1, _consumerPtr))
|
|
{
|
|
_consumerPtr = 0;
|
|
}
|
|
|
|
return MemoryMarshal.Cast<byte, T>(new Span<byte>(_pool).Slice(_consumerPtr, size));
|
|
}
|
|
|
|
public void Dispose<T>(int length) where T : unmanaged
|
|
{
|
|
int size = length * Unsafe.SizeOf<T>();
|
|
|
|
_consumerPtr = _consumerPtr + size;
|
|
}
|
|
}
|
|
}
|