mirror of
https://github.com/Ryujinx/libsoundio.git
synced 2024-12-22 22:15:29 +00:00
WASAPI: microphone example works
This commit is contained in:
parent
90fa377c99
commit
b3dfcb4526
17
README.md
17
README.md
|
@ -277,9 +277,11 @@ view `coverage/index.html` in a browser.
|
|||
## Roadmap
|
||||
|
||||
0. implement WASAPI (Windows) backend, get examples working
|
||||
- sine wave (raw device)
|
||||
- microphone
|
||||
- set display name of output stream
|
||||
- move the bulk of the `outstream_open_wasapi` code to the thread and
|
||||
have them communicate back and forth. because the thread has to do
|
||||
weird thread-local com stuff, and all that com stuff really needs to be
|
||||
called from the same thread.
|
||||
0. Make sure PulseAudio can handle refresh devices crashing before
|
||||
block_until_have_devices
|
||||
0. Integrate into libgroove and test with Groove Basin
|
||||
|
@ -289,6 +291,8 @@ view `coverage/index.html` in a browser.
|
|||
If not, might need to hav xrun callback set a flag and have process callback
|
||||
call the underflow callback.
|
||||
0. Create a test for pausing and resuming input and output streams.
|
||||
- Should pause/resume be callable from outside the callbacks?
|
||||
- Ensure double pausing / double resuming works fine.
|
||||
0. Create a test for the latency / synchronization API.
|
||||
- Input is an audio file and some events indexed at particular frame - when
|
||||
listening the events should line up exactly with a beat or visual
|
||||
|
@ -299,16 +303,11 @@ view `coverage/index.html` in a browser.
|
|||
0. Create a test for input stream overflow handling.
|
||||
0. Allow calling functions from outside the callbacks as long as they first
|
||||
call lock and then unlock when done.
|
||||
0. Should pause/resume be callable from outside the callbacks?
|
||||
0. clean up API and improve documentation
|
||||
- make sure every function which can return an error documents which errors
|
||||
it can return
|
||||
0. use a documentation generator and host the docs somewhere
|
||||
0. add len arguments to APIs that have char *
|
||||
- replace strdup with `soundio_str_dupe`
|
||||
0. Support PulseAudio proplist properties for main context and streams
|
||||
0. Expose JACK options in `jack_client_open`
|
||||
0. custom allocator support
|
||||
0. mlock memory which is accessed in the real time path
|
||||
0. make rtprio warning a callback and have existing behavior be the default callback
|
||||
0. write detailed docs on buffer underflows explaining when they occur, what state
|
||||
|
@ -317,10 +316,14 @@ view `coverage/index.html` in a browser.
|
|||
0. In ALSA do we need to wake up the poll when destroying the in or out stream?
|
||||
0. Detect PulseAudio server going offline and emit `on_backend_disconnect`.
|
||||
0. Add [sndio](http://www.sndio.org/) backend to support OpenBSD.
|
||||
0. Custom allocator support
|
||||
0. Support for stream icon.
|
||||
- PulseAudio: XDG icon name
|
||||
- WASAPI: path to .exe, .dll, or .ico
|
||||
- CoreAudio: CFURLRef image file
|
||||
0. clean up API and improve documentation
|
||||
- make sure every function which can return an error documents which errors
|
||||
it can return
|
||||
|
||||
## Planned Uses for libsoundio
|
||||
|
||||
|
|
|
@ -313,8 +313,10 @@ int main(int argc, char **argv) {
|
|||
instream->software_latency = microphone_latency;
|
||||
instream->read_callback = read_callback;
|
||||
|
||||
if ((err = soundio_instream_open(instream)))
|
||||
panic("unable to open input stream: %s", soundio_strerror(err));
|
||||
if ((err = soundio_instream_open(instream))) {
|
||||
fprintf(stderr, "unable to open input stream: %s", soundio_strerror(err));
|
||||
return 1;
|
||||
}
|
||||
|
||||
struct SoundIoOutStream *outstream = soundio_outstream_create(out_device);
|
||||
if (!outstream)
|
||||
|
@ -326,8 +328,10 @@ int main(int argc, char **argv) {
|
|||
outstream->write_callback = write_callback;
|
||||
outstream->underflow_callback = underflow_callback;
|
||||
|
||||
if ((err = soundio_outstream_open(outstream)))
|
||||
panic("unable to open output stream: %s", soundio_strerror(err));
|
||||
if ((err = soundio_outstream_open(outstream))) {
|
||||
fprintf(stderr, "unable to open output stream: %s", soundio_strerror(err));
|
||||
return 1;
|
||||
}
|
||||
|
||||
int capacity = microphone_latency * 2 * instream->sample_rate * instream->bytes_per_frame;
|
||||
ring_buffer = soundio_ring_buffer_create(soundio, capacity);
|
||||
|
|
|
@ -765,6 +765,8 @@ SOUNDIO_EXPORT int soundio_outstream_clear_buffer(struct SoundIoOutStream *outst
|
|||
// prevents `write_callback` from being called. Otherwise this returns
|
||||
// `SoundIoErrorIncompatibleDevice`.
|
||||
// You must call this function only from the `write_callback` thread context.
|
||||
// Pausing when already paused or unpausing when already unpaused has no
|
||||
// effect and always returns SoundIoErrorNone.
|
||||
SOUNDIO_EXPORT int soundio_outstream_pause(struct SoundIoOutStream *outstream, bool pause);
|
||||
|
||||
|
||||
|
@ -820,6 +822,8 @@ SOUNDIO_EXPORT int soundio_instream_end_read(struct SoundIoInStream *instream);
|
|||
// prevents `read_callback` from being called. Otherwise this returns
|
||||
// `SoundIoErrorIncompatibleDevice`.
|
||||
// You must call this function only from the `read_callback` thread context.
|
||||
// Pausing when already paused or unpausing when already unpaused has no
|
||||
// effect and always returns SoundIoErrorNone.
|
||||
SOUNDIO_EXPORT int soundio_instream_pause(struct SoundIoInStream *instream, bool pause);
|
||||
|
||||
|
||||
|
|
311
src/wasapi.cpp
311
src/wasapi.cpp
|
@ -788,7 +788,9 @@ static int refresh_devices(SoundIoPrivate *si) {
|
|||
rd.device_shared->current_format = from_wave_format_format(rd.wave_format);
|
||||
|
||||
|
||||
// WASAPI performs resampling in shared mode, so any value is valid.
|
||||
if (rd.device_shared->aim == SoundIoDeviceAimOutput) {
|
||||
// For output streams in shared mode,
|
||||
// WASAPI performs resampling, so any value is valid.
|
||||
// Let's pick some reasonable min and max values.
|
||||
rd.device_shared->sample_rate_count = 1;
|
||||
rd.device_shared->sample_rates = &dev_shared->prealloc_sample_rate_range;
|
||||
|
@ -796,6 +798,13 @@ static int refresh_devices(SoundIoPrivate *si) {
|
|||
rd.device_shared->sample_rate_current);
|
||||
rd.device_shared->sample_rates[0].max = max(SOUNDIO_MAX_SAMPLE_RATE,
|
||||
rd.device_shared->sample_rate_current);
|
||||
} else {
|
||||
// Shared mode input stream: mix format is all we can do.
|
||||
rd.device_shared->sample_rate_count = 1;
|
||||
rd.device_shared->sample_rates = &dev_shared->prealloc_sample_rate_range;
|
||||
rd.device_shared->sample_rates[0].min = rd.device_shared->sample_rate_current;
|
||||
rd.device_shared->sample_rates[0].max = rd.device_shared->sample_rate_current;
|
||||
}
|
||||
|
||||
if ((err = detect_valid_formats(&rd, rd.wave_format, dev_shared,
|
||||
AUDCLNT_SHAREMODE_SHARED)))
|
||||
|
@ -1014,7 +1023,10 @@ static int outstream_open_wasapi(struct SoundIoPrivate *si, struct SoundIoOutStr
|
|||
|
||||
if (FAILED(hr = CoInitializeEx(nullptr, COINIT_APARTMENTTHREADED))) {
|
||||
outstream_destroy_wasapi(si, os);
|
||||
if (hr == E_OUTOFMEMORY)
|
||||
return SoundIoErrorNoMem;
|
||||
else
|
||||
return SoundIoErrorOpeningDevice;
|
||||
}
|
||||
|
||||
osw->is_raw = device->is_raw;
|
||||
|
@ -1174,8 +1186,15 @@ static int outstream_open_wasapi(struct SoundIoPrivate *si, struct SoundIoOutStr
|
|||
static int outstream_pause_wasapi(struct SoundIoPrivate *si, struct SoundIoOutStreamPrivate *os, bool pause) {
|
||||
SoundIoOutStreamWasapi *osw = &os->backend_data.wasapi;
|
||||
HRESULT hr;
|
||||
if (pause && !osw->is_paused) {
|
||||
if (FAILED(hr = IAudioClient_Stop(osw->audio_client)))
|
||||
return SoundIoErrorStreaming;
|
||||
osw->is_paused = true;
|
||||
} else if (!pause && osw->is_paused) {
|
||||
if (FAILED(hr = IAudioClient_Start(osw->audio_client)))
|
||||
return SoundIoErrorStreaming;
|
||||
osw->is_paused = false;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1320,29 +1339,307 @@ static int outstream_clear_buffer_wasapi(struct SoundIoPrivate *si, struct Sound
|
|||
|
||||
|
||||
static void instream_destroy_wasapi(struct SoundIoPrivate *si, struct SoundIoInStreamPrivate *is) {
|
||||
soundio_panic("TODO");
|
||||
SoundIoInStreamWasapi *isw = &is->backend_data.wasapi;
|
||||
|
||||
if (isw->thread) {
|
||||
isw->thread_exit_flag.clear();
|
||||
if (isw->is_raw) {
|
||||
if (isw->h_event)
|
||||
SetEvent(isw->h_event);
|
||||
} else {
|
||||
soundio_os_mutex_lock(isw->mutex);
|
||||
soundio_os_cond_signal(isw->cond, isw->mutex);
|
||||
soundio_os_mutex_unlock(isw->mutex);
|
||||
}
|
||||
soundio_os_thread_destroy(isw->thread);
|
||||
}
|
||||
|
||||
if (isw->audio_capture_client)
|
||||
IUnknown_Release(isw->audio_capture_client);
|
||||
if (isw->audio_client)
|
||||
IUnknown_Release(isw->audio_client);
|
||||
if (isw->h_event)
|
||||
CloseHandle(isw->h_event);
|
||||
|
||||
soundio_os_cond_destroy(isw->cond);
|
||||
soundio_os_mutex_destroy(isw->mutex);
|
||||
|
||||
CoUninitialize();
|
||||
}
|
||||
|
||||
static int instream_open_wasapi(struct SoundIoPrivate *si, struct SoundIoInStreamPrivate *is) {
|
||||
soundio_panic("TODO");
|
||||
SoundIoInStreamWasapi *isw = &is->backend_data.wasapi;
|
||||
SoundIoInStream *instream = &is->pub;
|
||||
SoundIoDevice *device = instream->device;
|
||||
SoundIoDevicePrivate *dev = (SoundIoDevicePrivate *)device;
|
||||
SoundIoDeviceWasapi *dw = &dev->backend_data.wasapi;
|
||||
HRESULT hr;
|
||||
|
||||
if (FAILED(hr = CoInitializeEx(nullptr, COINIT_APARTMENTTHREADED))) {
|
||||
instream_destroy_wasapi(si, is);
|
||||
if (hr == E_OUTOFMEMORY)
|
||||
return SoundIoErrorNoMem;
|
||||
else
|
||||
return SoundIoErrorOpeningDevice;
|
||||
}
|
||||
|
||||
isw->is_raw = device->is_raw;
|
||||
|
||||
if (!(isw->cond = soundio_os_cond_create())) {
|
||||
instream_destroy_wasapi(si, is);
|
||||
return SoundIoErrorNoMem;
|
||||
}
|
||||
|
||||
if (!(isw->mutex = soundio_os_mutex_create())) {
|
||||
instream_destroy_wasapi(si, is);
|
||||
return SoundIoErrorNoMem;
|
||||
}
|
||||
|
||||
if (FAILED(hr = IMMDevice_Activate(dw->mm_device, IID_IAudioClient,
|
||||
CLSCTX_ALL, nullptr, (void**)&isw->audio_client)))
|
||||
{
|
||||
instream_destroy_wasapi(si, is);
|
||||
return SoundIoErrorOpeningDevice;
|
||||
}
|
||||
|
||||
AUDCLNT_SHAREMODE share_mode;
|
||||
DWORD flags;
|
||||
REFERENCE_TIME buffer_duration;
|
||||
REFERENCE_TIME periodicity;
|
||||
WAVEFORMATEXTENSIBLE wave_format = {0};
|
||||
wave_format.Format.wFormatTag = WAVE_FORMAT_EXTENSIBLE;
|
||||
wave_format.Format.cbSize = sizeof(WAVEFORMATEXTENSIBLE) - sizeof(WAVEFORMATEX);
|
||||
if (isw->is_raw) {
|
||||
wave_format.Format.nSamplesPerSec = instream->sample_rate;
|
||||
flags = AUDCLNT_STREAMFLAGS_EVENTCALLBACK;
|
||||
share_mode = AUDCLNT_SHAREMODE_EXCLUSIVE;
|
||||
periodicity = to_reference_time(dw->period_duration);
|
||||
buffer_duration = periodicity;
|
||||
} else {
|
||||
WAVEFORMATEXTENSIBLE *mix_format;
|
||||
if (FAILED(hr = IAudioClient_GetMixFormat(isw->audio_client, (WAVEFORMATEX **)&mix_format))) {
|
||||
instream_destroy_wasapi(si, is);
|
||||
return SoundIoErrorOpeningDevice;
|
||||
}
|
||||
wave_format.Format.nSamplesPerSec = mix_format->Format.nSamplesPerSec;
|
||||
CoTaskMemFree(mix_format);
|
||||
mix_format = nullptr;
|
||||
if (wave_format.Format.nSamplesPerSec != (DWORD)instream->sample_rate) {
|
||||
instream_destroy_wasapi(si, is);
|
||||
return SoundIoErrorIncompatibleDevice;
|
||||
}
|
||||
flags = 0;
|
||||
share_mode = AUDCLNT_SHAREMODE_SHARED;
|
||||
periodicity = 0;
|
||||
buffer_duration = to_reference_time(4.0);
|
||||
}
|
||||
to_wave_format_layout(&instream->layout, &wave_format);
|
||||
to_wave_format_format(instream->format, &wave_format);
|
||||
complete_wave_format_data(&wave_format);
|
||||
|
||||
if (FAILED(hr = IAudioClient_Initialize(isw->audio_client, share_mode, flags,
|
||||
buffer_duration, periodicity, (WAVEFORMATEX*)&wave_format, nullptr)))
|
||||
{
|
||||
if (hr == AUDCLNT_E_BUFFER_SIZE_NOT_ALIGNED) {
|
||||
if (FAILED(hr = IAudioClient_GetBufferSize(isw->audio_client, &isw->buffer_frame_count))) {
|
||||
instream_destroy_wasapi(si, is);
|
||||
return SoundIoErrorOpeningDevice;
|
||||
}
|
||||
IUnknown_Release(isw->audio_client);
|
||||
isw->audio_client = nullptr;
|
||||
if (FAILED(hr = IMMDevice_Activate(dw->mm_device, IID_IAudioClient,
|
||||
CLSCTX_ALL, nullptr, (void**)&isw->audio_client)))
|
||||
{
|
||||
instream_destroy_wasapi(si, is);
|
||||
return SoundIoErrorOpeningDevice;
|
||||
}
|
||||
if (!isw->is_raw) {
|
||||
WAVEFORMATEXTENSIBLE *mix_format;
|
||||
if (FAILED(hr = IAudioClient_GetMixFormat(isw->audio_client, (WAVEFORMATEX **)&mix_format))) {
|
||||
instream_destroy_wasapi(si, is);
|
||||
return SoundIoErrorOpeningDevice;
|
||||
}
|
||||
wave_format.Format.nSamplesPerSec = mix_format->Format.nSamplesPerSec;
|
||||
CoTaskMemFree(mix_format);
|
||||
mix_format = nullptr;
|
||||
flags = 0;
|
||||
to_wave_format_layout(&instream->layout, &wave_format);
|
||||
to_wave_format_format(instream->format, &wave_format);
|
||||
complete_wave_format_data(&wave_format);
|
||||
}
|
||||
|
||||
buffer_duration = to_reference_time(isw->buffer_frame_count / (double)instream->sample_rate);
|
||||
if (isw->is_raw)
|
||||
periodicity = buffer_duration;
|
||||
if (FAILED(hr = IAudioClient_Initialize(isw->audio_client, share_mode, flags,
|
||||
buffer_duration, periodicity, (WAVEFORMATEX*)&wave_format, nullptr)))
|
||||
{
|
||||
if (hr == AUDCLNT_E_UNSUPPORTED_FORMAT) {
|
||||
instream_destroy_wasapi(si, is);
|
||||
return SoundIoErrorIncompatibleDevice;
|
||||
} else if (hr == E_OUTOFMEMORY) {
|
||||
instream_destroy_wasapi(si, is);
|
||||
return SoundIoErrorNoMem;
|
||||
} else {
|
||||
instream_destroy_wasapi(si, is);
|
||||
return SoundIoErrorOpeningDevice;
|
||||
}
|
||||
}
|
||||
} else if (hr == AUDCLNT_E_UNSUPPORTED_FORMAT) {
|
||||
instream_destroy_wasapi(si, is);
|
||||
return SoundIoErrorIncompatibleDevice;
|
||||
} else if (hr == E_OUTOFMEMORY) {
|
||||
instream_destroy_wasapi(si, is);
|
||||
return SoundIoErrorNoMem;
|
||||
} else {
|
||||
instream_destroy_wasapi(si, is);
|
||||
return SoundIoErrorOpeningDevice;
|
||||
}
|
||||
}
|
||||
if (FAILED(hr = IAudioClient_GetBufferSize(isw->audio_client, &isw->buffer_frame_count))) {
|
||||
instream_destroy_wasapi(si, is);
|
||||
return SoundIoErrorOpeningDevice;
|
||||
}
|
||||
if (isw->is_raw)
|
||||
instream->software_latency = isw->buffer_frame_count / (double)instream->sample_rate;
|
||||
|
||||
if (isw->is_raw) {
|
||||
isw->h_event = CreateEvent(nullptr, FALSE, FALSE, nullptr);
|
||||
if (!isw->h_event) {
|
||||
instream_destroy_wasapi(si, is);
|
||||
return SoundIoErrorOpeningDevice;
|
||||
}
|
||||
if (FAILED(hr = IAudioClient_SetEventHandle(isw->audio_client, isw->h_event))) {
|
||||
instream_destroy_wasapi(si, is);
|
||||
return SoundIoErrorOpeningDevice;
|
||||
}
|
||||
}
|
||||
|
||||
if (FAILED(hr = IAudioClient_GetService(isw->audio_client, IID_IAudioCaptureClient,
|
||||
(void **)&isw->audio_capture_client)))
|
||||
{
|
||||
instream_destroy_wasapi(si, is);
|
||||
return SoundIoErrorOpeningDevice;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int instream_pause_wasapi(struct SoundIoPrivate *si, struct SoundIoInStreamPrivate *is, bool pause) {
|
||||
soundio_panic("TODO");
|
||||
SoundIoInStreamWasapi *isw = &is->backend_data.wasapi;
|
||||
HRESULT hr;
|
||||
if (pause && !isw->is_paused) {
|
||||
if (FAILED(hr = IAudioClient_Stop(isw->audio_client)))
|
||||
return SoundIoErrorStreaming;
|
||||
isw->is_paused = true;
|
||||
} else if (!pause && isw->is_paused) {
|
||||
if (FAILED(hr = IAudioClient_Start(isw->audio_client)))
|
||||
return SoundIoErrorStreaming;
|
||||
isw->is_paused = false;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void instream_raw_run(void *arg) {
|
||||
soundio_panic("TODO instream_raw_run");
|
||||
}
|
||||
|
||||
static void instream_shared_run(void *arg) {
|
||||
SoundIoInStreamPrivate *is = (SoundIoInStreamPrivate *) arg;
|
||||
SoundIoInStreamWasapi *isw = &is->backend_data.wasapi;
|
||||
SoundIoInStream *instream = &is->pub;
|
||||
|
||||
HRESULT hr;
|
||||
|
||||
if (FAILED(hr = IAudioClient_Start(isw->audio_client))) {
|
||||
instream->error_callback(instream, SoundIoErrorStreaming);
|
||||
return;
|
||||
}
|
||||
|
||||
for (;;) {
|
||||
soundio_os_mutex_lock(isw->mutex);
|
||||
soundio_os_cond_timed_wait(isw->cond, isw->mutex, instream->software_latency / 2.0);
|
||||
if (!isw->thread_exit_flag.test_and_set()) {
|
||||
soundio_os_mutex_unlock(isw->mutex);
|
||||
return;
|
||||
}
|
||||
soundio_os_mutex_unlock(isw->mutex);
|
||||
|
||||
UINT32 frames_available;
|
||||
if (FAILED(hr = IAudioClient_GetCurrentPadding(isw->audio_client, &frames_available))) {
|
||||
instream->error_callback(instream, SoundIoErrorStreaming);
|
||||
return;
|
||||
}
|
||||
|
||||
isw->readable_frame_count = frames_available;
|
||||
if (isw->readable_frame_count > 0)
|
||||
instream->read_callback(instream, 0, isw->readable_frame_count);
|
||||
}
|
||||
}
|
||||
|
||||
static int instream_start_wasapi(struct SoundIoPrivate *si, struct SoundIoInStreamPrivate *is) {
|
||||
soundio_panic("TODO");
|
||||
SoundIoInStreamWasapi *isw = &is->backend_data.wasapi;
|
||||
int err;
|
||||
|
||||
assert(!isw->thread);
|
||||
isw->thread_exit_flag.test_and_set();
|
||||
|
||||
if (isw->is_raw) {
|
||||
if ((err = soundio_os_thread_create(instream_raw_run, is, true, &isw->thread)))
|
||||
return err;
|
||||
} else {
|
||||
if ((err = soundio_os_thread_create(instream_shared_run, is, true, &isw->thread)))
|
||||
return err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int instream_begin_read_wasapi(struct SoundIoPrivate *si, struct SoundIoInStreamPrivate *is,
|
||||
SoundIoChannelArea **out_areas, int *frame_count)
|
||||
{
|
||||
soundio_panic("TODO");
|
||||
SoundIoInStreamWasapi *isw = &is->backend_data.wasapi;
|
||||
SoundIoInStream *instream = &is->pub;
|
||||
HRESULT hr;
|
||||
|
||||
if (isw->read_buf_frames_left <= 0) {
|
||||
UINT32 frames_to_read;
|
||||
DWORD flags;
|
||||
if (FAILED(hr = IAudioCaptureClient_GetBuffer(isw->audio_capture_client,
|
||||
(BYTE**)&isw->read_buf, &frames_to_read, &flags, nullptr, nullptr)))
|
||||
{
|
||||
return SoundIoErrorStreaming;
|
||||
}
|
||||
isw->read_buf_frames_left = frames_to_read;
|
||||
if (flags & AUDCLNT_BUFFERFLAGS_SILENT)
|
||||
isw->read_buf = nullptr;
|
||||
}
|
||||
|
||||
isw->read_frame_count = min(*frame_count, isw->read_buf_frames_left);
|
||||
*frame_count = isw->read_frame_count;
|
||||
|
||||
if (isw->read_buf) {
|
||||
for (int ch = 0; ch < instream->layout.channel_count; ch += 1) {
|
||||
isw->areas[ch].ptr = isw->read_buf + ch * instream->bytes_per_sample;
|
||||
isw->areas[ch].step = instream->bytes_per_frame;
|
||||
}
|
||||
|
||||
*out_areas = isw->areas;
|
||||
} else {
|
||||
*out_areas = nullptr;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int instream_end_read_wasapi(struct SoundIoPrivate *si, struct SoundIoInStreamPrivate *is) {
|
||||
soundio_panic("TODO");
|
||||
SoundIoInStreamWasapi *isw = &is->backend_data.wasapi;
|
||||
HRESULT hr;
|
||||
if (FAILED(hr = IAudioCaptureClient_ReleaseBuffer(isw->audio_capture_client, isw->read_frame_count))) {
|
||||
return SoundIoErrorStreaming;
|
||||
}
|
||||
isw->read_buf_frames_left -= isw->read_frame_count;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -63,10 +63,26 @@ struct SoundIoOutStreamWasapi {
|
|||
UINT32 buffer_frame_count;
|
||||
int write_frame_count;
|
||||
HANDLE h_event;
|
||||
bool is_paused;
|
||||
SoundIoChannelArea areas[SOUNDIO_MAX_CHANNELS];
|
||||
};
|
||||
|
||||
struct SoundIoInStreamWasapi {
|
||||
IAudioClient *audio_client;
|
||||
IAudioCaptureClient *audio_capture_client;
|
||||
SoundIoOsThread *thread;
|
||||
SoundIoOsMutex *mutex;
|
||||
SoundIoOsCond *cond;
|
||||
atomic_flag thread_exit_flag;
|
||||
bool is_raw;
|
||||
int readable_frame_count;
|
||||
UINT32 buffer_frame_count;
|
||||
int read_frame_count;
|
||||
HANDLE h_event;
|
||||
bool is_paused;
|
||||
char *read_buf;
|
||||
int read_buf_frames_left;
|
||||
SoundIoChannelArea areas[SOUNDIO_MAX_CHANNELS];
|
||||
};
|
||||
|
||||
#endif
|
||||
|
|
Loading…
Reference in a new issue