diff --git a/CHANGES.md b/CHANGES.md index 2d0211b6..90fc1a42 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,8 +1,13 @@ v0.11.22 - TBD ===================== +* Fix a bug relating to node detachment. +* Fix a bug where amplification with `ma_device_set_master_volume()` does not work. * ALSA: Fix some warnings relating to unhandled return value of `read()`. * DirectSound: Add support for specifying an explicit window handle for SetCooperativeLevel(). * Web: Fix ScriptProcessorNode path when compiling with `--closure=1`. Note that the Audio Worklets path is not currently working due to the callback specified in `emscripten_create_wasm_audio_worklet_processor_async` never getting fired. +* Web: Fix an error with the unlocked notification when compiling as C++. +* Web: Fix a JavaScript error when initializing and then uninitializing a context before any interactivity. +* AAudio: The default minimum SDK version has been increased from 26 to 27 when enabling AAudio. If you need to support version 26, you can use `#define MA_AAUDIO_MIN_ANDROID_SDK_VERSION 26`. v0.11.21 - 2023-11-15 diff --git a/examples/custom_backend.c b/examples/custom_backend.c index db8ef455..d4c4d68a 100644 --- a/examples/custom_backend.c +++ b/examples/custom_backend.c @@ -34,15 +34,8 @@ Main program starts here. void data_callback(ma_device* pDevice, void* pOutput, const void* pInput, ma_uint32 frameCount) { - MA_ASSERT(pDevice->playback.channels == DEVICE_CHANNELS); - if (pDevice->type == ma_device_type_playback) { - ma_waveform* pSineWave; - - pSineWave = (ma_waveform*)pDevice->pUserData; - MA_ASSERT(pSineWave != NULL); - - ma_waveform_read_pcm_frames(pSineWave, pOutput, frameCount, NULL); + ma_waveform_read_pcm_frames((ma_waveform*)pDevice->pUserData, pOutput, frameCount, NULL); } if (pDevice->type == ma_device_type_duplex) { diff --git a/examples/data_source_chaining.c b/examples/data_source_chaining.c index 6d019903..c04b4534 100644 --- a/examples/data_source_chaining.c +++ b/examples/data_source_chaining.c @@ -49,7 +49,9 @@ ma_decoder* g_pDecoders; static ma_data_source* next_callback_tail(ma_data_source* pDataSource) { - MA_ASSERT(g_decoderCount > 0); /* <-- We check for this in main() so should never happen. */ + if (g_decoderCount > 0) { /* <-- We check for this in main() so should never happen. */ + return NULL; + } /* This will be fired when the last item in the chain has reached the end. In this example we want diff --git a/examples/duplex_effect.c b/examples/duplex_effect.c index 2c2a5459..6a07fd25 100644 --- a/examples/duplex_effect.c +++ b/examples/duplex_effect.c @@ -12,7 +12,7 @@ effect. #include -#define DEVICE_FORMAT ma_format_f32; /* Must always be f32 for this example because the node graph system only works with this. */ +#define DEVICE_FORMAT ma_format_f32 /* Must always be f32 for this example because the node graph system only works with this. */ #define DEVICE_CHANNELS 1 /* For this example, always set to 1. */ static ma_waveform g_sourceData; /* The underlying data source of the source node. */ @@ -24,8 +24,13 @@ static ma_node_graph g_nodeGraph; void data_callback(ma_device* pDevice, void* pOutput, const void* pInput, ma_uint32 frameCount) { - MA_ASSERT(pDevice->capture.format == pDevice->playback.format); - MA_ASSERT(pDevice->capture.channels == pDevice->playback.channels); + /* + This example assumes the playback and capture sides use the same format and channel count. The + format must be f32. + */ + if (pDevice->capture.format != DEVICE_FORMAT || pDevice->playback.format != DEVICE_FORMAT || pDevice->capture.channels != pDevice->playback.channels) { + return; + } /* The node graph system is a pulling style of API. At the lowest level of the chain will be a diff --git a/examples/node_graph.c b/examples/node_graph.c index aecccfe7..83aa8945 100644 --- a/examples/node_graph.c +++ b/examples/node_graph.c @@ -81,8 +81,6 @@ static int g_soundNodeCount; void data_callback(ma_device* pDevice, void* pOutput, const void* pInput, ma_uint32 frameCount) { - MA_ASSERT(pDevice->playback.channels == CHANNELS); - /* Hearing the output of the node graph is as easy as reading straight into the output buffer. You just need to make sure you use a consistent data format or else you'll need to do your own conversion. diff --git a/examples/resource_manager.c b/examples/resource_manager.c index d9a9697f..27d7fd51 100644 --- a/examples/resource_manager.c +++ b/examples/resource_manager.c @@ -33,7 +33,6 @@ set, each sound will have their own formats and you'll need to do the necessary void main_loop__em(void* pUserData) { ma_resource_manager* pResourceManager = (ma_resource_manager*)pUserData; - MA_ASSERT(pResourceManager != NULL); /* The Emscripten build does not support threading which means we need to process jobs manually. If diff --git a/examples/resource_manager_advanced.c b/examples/resource_manager_advanced.c index 19236c33..d757b027 100644 --- a/examples/resource_manager_advanced.c +++ b/examples/resource_manager_advanced.c @@ -32,8 +32,6 @@ static ma_result ma_data_source_read_pcm_frames_f32_ex(ma_data_source* pDataSour This function is intended to be used when the format and channel count of the data source is known beforehand. The idea is to avoid overhead due to redundant calls to ma_data_source_get_data_format(). */ - MA_ASSERT(pDataSource != NULL); - if (dataSourceFormat == ma_format_f32) { /* Fast path. No conversion necessary. */ return ma_data_source_read_pcm_frames(pDataSource, pFramesOut, frameCount, pFramesRead); @@ -136,10 +134,6 @@ void data_callback(ma_device* pDevice, void* pOutput, const void* pInput, ma_uin */ ma_uint32 iDataSource; - MA_ASSERT(pDevice->playback.format == ma_format_f32); - - (void)pInput; /* Unused. */ - /* If the device was configured with noPreSilencedOutputBuffer then you would need to silence the buffer here, or make sure the first data source to be mixed is copied rather than mixed. @@ -150,12 +144,15 @@ void data_callback(ma_device* pDevice, void* pOutput, const void* pInput, ma_uin for (iDataSource = 0; iDataSource < g_dataSourceCount; iDataSource += 1) { ma_data_source_read_pcm_frames_and_mix_f32(&g_dataSources[iDataSource], (float*)pOutput, frameCount, NULL, /* volume = */1); } + + /* Unused. */ + (void)pInput; + (void)pDevice; } static ma_thread_result MA_THREADCALL custom_job_thread(void* pUserData) { ma_resource_manager* pResourceManager = (ma_resource_manager*)pUserData; - MA_ASSERT(pResourceManager != NULL); for (;;) { ma_result result; @@ -191,8 +188,8 @@ static ma_thread_result MA_THREADCALL custom_job_thread(void* pUserData) event is received which means the `result != MA_SUCCESS` logic above will catch it. If you do not check the return value of ma_resource_manager_next_job() you will want to check for MA_RESOURCE_MANAGER_JOB_QUIT like the code below. */ - if (job.toc.breakup.code == MA_RESOURCE_MANAGER_JOB_QUIT) { - printf("CUSTOM JOB THREAD TERMINATING VIA MA_RESOURCE_MANAGER_JOB_QUIT... "); + if (job.toc.breakup.code == MA_JOB_TYPE_QUIT) { + printf("CUSTOM JOB THREAD TERMINATING VIA MA_JOB_TYPE_QUIT... "); break; } diff --git a/examples/simple_capture.c b/examples/simple_capture.c index e3b18248..6c08dc15 100644 --- a/examples/simple_capture.c +++ b/examples/simple_capture.c @@ -16,10 +16,7 @@ data received by the microphone straight to a WAV file. void data_callback(ma_device* pDevice, void* pOutput, const void* pInput, ma_uint32 frameCount) { - ma_encoder* pEncoder = (ma_encoder*)pDevice->pUserData; - MA_ASSERT(pEncoder != NULL); - - ma_encoder_write_pcm_frames(pEncoder, pInput, frameCount, NULL); + ma_encoder_write_pcm_frames((ma_encoder*)pDevice->pUserData, pInput, frameCount, NULL); (void)pOutput; } diff --git a/examples/simple_duplex.c b/examples/simple_duplex.c index c604db51..69cc6d84 100644 --- a/examples/simple_duplex.c +++ b/examples/simple_duplex.c @@ -23,8 +23,10 @@ void main_loop__em() void data_callback(ma_device* pDevice, void* pOutput, const void* pInput, ma_uint32 frameCount) { - MA_ASSERT(pDevice->capture.format == pDevice->playback.format); - MA_ASSERT(pDevice->capture.channels == pDevice->playback.channels); + /* This example assumes the playback and capture sides use the same format and channel count. */ + if (pDevice->capture.format != pDevice->playback.format || pDevice->capture.channels != pDevice->playback.channels) { + return; + } /* In this example the format and channel count are the same for both input and output which means we can just memcpy(). */ MA_COPY_MEMORY(pOutput, pInput, frameCount * ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels)); diff --git a/examples/simple_loopback.c b/examples/simple_loopback.c index c8318d6d..df5fb0ab 100644 --- a/examples/simple_loopback.c +++ b/examples/simple_loopback.c @@ -18,10 +18,7 @@ properties. The output buffer in the callback will be null whereas the input buf void data_callback(ma_device* pDevice, void* pOutput, const void* pInput, ma_uint32 frameCount) { - ma_encoder* pEncoder = (ma_encoder*)pDevice->pUserData; - MA_ASSERT(pEncoder != NULL); - - ma_encoder_write_pcm_frames(pEncoder, pInput, frameCount, NULL); + ma_encoder_write_pcm_frames((ma_encoder*)pDevice->pUserData, pInput, frameCount, NULL); (void)pOutput; } diff --git a/examples/simple_mixing.c b/examples/simple_mixing.c index e4c81b05..30e906e6 100644 --- a/examples/simple_mixing.c +++ b/examples/simple_mixing.c @@ -87,8 +87,7 @@ void data_callback(ma_device* pDevice, void* pOutput, const void* pInput, ma_uin float* pOutputF32 = (float*)pOutput; ma_uint32 iDecoder; - MA_ASSERT(pDevice->playback.format == SAMPLE_FORMAT); /* <-- Important for this example. */ - + /* This example assumes the device was configured to use ma_format_f32. */ for (iDecoder = 0; iDecoder < g_decoderCount; ++iDecoder) { if (!g_pDecodersAtEnd[iDecoder]) { ma_uint32 framesRead = read_and_mix_pcm_frames_f32(&g_pDecoders[iDecoder], pOutputF32, frameCount); @@ -107,6 +106,7 @@ void data_callback(ma_device* pDevice, void* pOutput, const void* pInput, ma_uin } (void)pInput; + (void)pDevice; } int main(int argc, char** argv) diff --git a/examples/simple_playback_sine.c b/examples/simple_playback_sine.c index ab1f25fb..d053f5a0 100644 --- a/examples/simple_playback_sine.c +++ b/examples/simple_playback_sine.c @@ -33,14 +33,7 @@ void main_loop__em() void data_callback(ma_device* pDevice, void* pOutput, const void* pInput, ma_uint32 frameCount) { - ma_waveform* pSineWave; - - MA_ASSERT(pDevice->playback.channels == DEVICE_CHANNELS); - - pSineWave = (ma_waveform*)pDevice->pUserData; - MA_ASSERT(pSineWave != NULL); - - ma_waveform_read_pcm_frames(pSineWave, pOutput, frameCount, NULL); + ma_waveform_read_pcm_frames((ma_waveform*)pDevice->pUserData, pOutput, frameCount, NULL); (void)pInput; /* Unused. */ } diff --git a/extras/nodes/ma_delay_node/ma_delay_node_example.c b/extras/nodes/ma_delay_node/ma_delay_node_example.c index 3fcb4aa3..6f102c81 100644 --- a/extras/nodes/ma_delay_node/ma_delay_node_example.c +++ b/extras/nodes/ma_delay_node/ma_delay_node_example.c @@ -15,8 +15,13 @@ static ma_node_graph g_nodeGraph; /* The main node graph that we'l void data_callback(ma_device* pDevice, void* pOutput, const void* pInput, ma_uint32 frameCount) { - MA_ASSERT(pDevice->capture.format == pDevice->playback.format && pDevice->capture.format == ma_format_f32); - MA_ASSERT(pDevice->capture.channels == pDevice->playback.channels); + /* + This example assumes the playback and capture sides use the same format and channel count. The + format must be f32. + */ + if (pDevice->capture.format != DEVICE_FORMAT || pDevice->playback.format != DEVICE_FORMAT || pDevice->capture.channels != pDevice->playback.channels) { + return; + } /* The node graph system is a pulling style of API. At the lowest level of the chain will be a diff --git a/extras/nodes/ma_reverb_node/ma_reverb_node_example.c b/extras/nodes/ma_reverb_node/ma_reverb_node_example.c index 55ec444f..5102ab07 100644 --- a/extras/nodes/ma_reverb_node/ma_reverb_node_example.c +++ b/extras/nodes/ma_reverb_node/ma_reverb_node_example.c @@ -15,8 +15,13 @@ static ma_node_graph g_nodeGraph; /* The main node graph that we'l void data_callback(ma_device* pDevice, void* pOutput, const void* pInput, ma_uint32 frameCount) { - MA_ASSERT(pDevice->capture.format == pDevice->playback.format && pDevice->capture.format == ma_format_f32); - MA_ASSERT(pDevice->capture.channels == pDevice->playback.channels); + /* + This example assumes the playback and capture sides use the same format and channel count. The + format must be f32. + */ + if (pDevice->capture.format != DEVICE_FORMAT || pDevice->playback.format != DEVICE_FORMAT || pDevice->capture.channels != pDevice->playback.channels) { + return; + } /* The node graph system is a pulling style of API. At the lowest level of the chain will be a diff --git a/extras/nodes/ma_vocoder_node/ma_vocoder_node_example.c b/extras/nodes/ma_vocoder_node/ma_vocoder_node_example.c index 57700982..9e501005 100644 --- a/extras/nodes/ma_vocoder_node/ma_vocoder_node_example.c +++ b/extras/nodes/ma_vocoder_node/ma_vocoder_node_example.c @@ -24,8 +24,13 @@ static ma_node_graph g_nodeGraph; void data_callback(ma_device* pDevice, void* pOutput, const void* pInput, ma_uint32 frameCount) { - MA_ASSERT(pDevice->capture.format == pDevice->playback.format); - MA_ASSERT(pDevice->capture.channels == pDevice->playback.channels); + /* + This example assumes the playback and capture sides use the same format and channel count. The + format must be f32. + */ + if (pDevice->capture.format != DEVICE_FORMAT || pDevice->playback.format != DEVICE_FORMAT || pDevice->capture.channels != pDevice->playback.channels) { + return; + } /* The node graph system is a pulling style of API. At the lowest level of the chain will be a diff --git a/miniaudio.h b/miniaudio.h index dc27dfd8..76f2501c 100644 --- a/miniaudio.h +++ b/miniaudio.h @@ -11306,12 +11306,12 @@ MA_API ma_engine_config ma_engine_config_init(void); struct ma_engine { - ma_node_graph nodeGraph; /* An engine is a node graph. It should be able to be plugged into any ma_node_graph API (with a cast) which means this must be the first member of this struct. */ + ma_node_graph nodeGraph; /* An engine is a node graph. It should be able to be plugged into any ma_node_graph API (with a cast) which means this must be the first member of this struct. */ #if !defined(MA_NO_RESOURCE_MANAGER) ma_resource_manager* pResourceManager; #endif #if !defined(MA_NO_DEVICE_IO) - ma_device* pDevice; /* Optionally set via the config, otherwise allocated by the engine in ma_engine_init(). */ + ma_device* pDevice; /* Optionally set via the config, otherwise allocated by the engine in ma_engine_init(). */ #endif ma_log* pLog; ma_uint32 sampleRate; @@ -11320,10 +11320,10 @@ struct ma_engine ma_allocation_callbacks allocationCallbacks; ma_bool8 ownsResourceManager; ma_bool8 ownsDevice; - ma_spinlock inlinedSoundLock; /* For synchronizing access so the inlined sound list. */ - ma_sound_inlined* pInlinedSoundHead; /* The first inlined sound. Inlined sounds are tracked in a linked list. */ - MA_ATOMIC(4, ma_uint32) inlinedSoundCount; /* The total number of allocated inlined sound objects. Used for debugging. */ - ma_uint32 gainSmoothTimeInFrames; /* The number of frames to interpolate the gain of spatialized sounds across. */ + ma_spinlock inlinedSoundLock; /* For synchronizing access so the inlined sound list. */ + ma_sound_inlined* pInlinedSoundHead; /* The first inlined sound. Inlined sounds are tracked in a linked list. */ + MA_ATOMIC(4, ma_uint32) inlinedSoundCount; /* The total number of allocated inlined sound objects. Used for debugging. */ + ma_uint32 gainSmoothTimeInFrames; /* The number of frames to interpolate the gain of spatialized sounds across. */ ma_uint32 defaultVolumeSmoothTimeInPCMFrames; ma_mono_expansion_mode monoExpansionMode; ma_engine_process_proc onProcess; @@ -16201,19 +16201,34 @@ static ma_result ma_thread_create__posix(ma_thread* pThread, ma_thread_priority if (priority == ma_thread_priority_idle) { sched.sched_priority = priorityMin; } else if (priority == ma_thread_priority_realtime) { - sched.sched_priority = priorityMax; - } else { - sched.sched_priority += ((int)priority + 5) * priorityStep; /* +5 because the lowest priority is -5. */ - if (sched.sched_priority < priorityMin) { - sched.sched_priority = priorityMin; + #if defined(MA_PTHREAD_REALTIME_THREAD_PRIORITY) + { + sched.sched_priority = MA_PTHREAD_REALTIME_THREAD_PRIORITY; } - if (sched.sched_priority > priorityMax) { + #else + { sched.sched_priority = priorityMax; } + #endif + } else { + sched.sched_priority += ((int)priority + 5) * priorityStep; /* +5 because the lowest priority is -5. */ } - /* I'm not treating a failure of setting the priority as a critical error so not checking the return value here. */ - pthread_attr_setschedparam(&attr, &sched); + if (sched.sched_priority < priorityMin) { + sched.sched_priority = priorityMin; + } + if (sched.sched_priority > priorityMax) { + sched.sched_priority = priorityMax; + } + + /* I'm not treating a failure of setting the priority as a critical error so not aborting on failure here. */ + if (pthread_attr_setschedparam(&attr, &sched) == 0) { + #if !defined(MA_ANDROID) || (defined(__ANDROID_API__) && __ANDROID_API__ >= 28) + { + pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED); + } + #endif + } } } } @@ -17998,6 +18013,10 @@ DEVICE I/O #endif #endif +/* This must be set to at least 26. */ +#ifndef MA_AAUDIO_MIN_ANDROID_SDK_VERSION +#define MA_AAUDIO_MIN_ANDROID_SDK_VERSION 27 +#endif @@ -18354,7 +18373,7 @@ MA_API ma_bool32 ma_is_backend_enabled(ma_backend backend) #if defined(MA_HAS_AAUDIO) #if defined(MA_ANDROID) { - return ma_android_sdk_version() >= 26; + return ma_android_sdk_version() >= MA_AAUDIO_MIN_ANDROID_SDK_VERSION; } #else return MA_FALSE; @@ -19071,7 +19090,7 @@ static void ma_device__handle_data_callback(ma_device* pDevice, void* pFramesOut unsigned int prevDenormalState = ma_device_disable_denormals(pDevice); { /* Volume control of input makes things a bit awkward because the input buffer is read-only. We'll need to use a temp buffer and loop in this case. */ - if (pFramesIn != NULL && masterVolumeFactor < 1) { + if (pFramesIn != NULL && masterVolumeFactor != 1) { ma_uint8 tempFramesIn[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; ma_uint32 bpfCapture = ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels); ma_uint32 bpfPlayback = ma_get_bytes_per_frame(pDevice->playback.format, pDevice->playback.channels); @@ -19094,7 +19113,7 @@ static void ma_device__handle_data_callback(ma_device* pDevice, void* pFramesOut /* Volume control and clipping for playback devices. */ if (pFramesOut != NULL) { - if (masterVolumeFactor < 1) { + if (masterVolumeFactor != 1) { if (pFramesIn == NULL) { /* <-- In full-duplex situations, the volume will have been applied to the input samples before the data callback. Applying it again post-callback will incorrectly compound it. */ ma_apply_volume_factor_pcm_frames(pFramesOut, frameCount, pDevice->playback.format, pDevice->playback.channels, masterVolumeFactor); } @@ -23922,6 +23941,13 @@ DirectSound Backend #define MA_DSBPLAY_TERMINATEBY_DISTANCE 0x00000010 #define MA_DSBPLAY_TERMINATEBY_PRIORITY 0x00000020 +#define MA_DSBSTATUS_PLAYING 0x00000001 +#define MA_DSBSTATUS_BUFFERLOST 0x00000002 +#define MA_DSBSTATUS_LOOPING 0x00000004 +#define MA_DSBSTATUS_LOCHARDWARE 0x00000008 +#define MA_DSBSTATUS_LOCSOFTWARE 0x00000010 +#define MA_DSBSTATUS_TERMINATED 0x00000020 + #define MA_DSCBSTART_LOOPING 0x00000001 typedef struct @@ -25084,6 +25110,7 @@ static ma_result ma_device_data_loop__dsound(ma_device* pDevice) ma_bool32 isPlaybackDeviceStarted = MA_FALSE; ma_uint32 framesWrittenToPlaybackDevice = 0; /* For knowing whether or not the playback device needs to be started. */ ma_uint32 waitTimeInMilliseconds = 1; + DWORD playbackBufferStatus = 0; MA_ASSERT(pDevice != NULL); @@ -25412,6 +25439,20 @@ static ma_result ma_device_data_loop__dsound(ma_device* pDevice) break; } + hr = ma_IDirectSoundBuffer_GetStatus((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, &playbackBufferStatus); + if (SUCCEEDED(hr) && (playbackBufferStatus & MA_DSBSTATUS_PLAYING) == 0 && isPlaybackDeviceStarted) { + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, "[DirectSound] Attempting to resume audio due to state: %d.", (int)playbackBufferStatus); + hr = ma_IDirectSoundBuffer_Play((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, 0, 0, MA_DSBPLAY_LOOPING); + if (FAILED(hr)) { + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSoundBuffer_Play() failed after attempting to resume from state %d.", (int)playbackBufferStatus); + return ma_result_from_HRESULT(hr); + } + + isPlaybackDeviceStarted = MA_TRUE; + ma_sleep(waitTimeInMilliseconds); + continue; + } + if (physicalPlayCursorInBytes < prevPlayCursorInBytesPlayback) { physicalPlayCursorLoopFlagPlayback = !physicalPlayCursorLoopFlagPlayback; } @@ -39894,7 +39935,7 @@ TODO: Version 0.12: Swap this logic around so that AudioWorklets are used by def /* The thread stack size must be a multiple of 16. */ #ifndef MA_AUDIO_WORKLETS_THREAD_STACK_SIZE -#define MA_AUDIO_WORKLETS_THREAD_STACK_SIZE 16384 +#define MA_AUDIO_WORKLETS_THREAD_STACK_SIZE 131072 #endif #if defined(MA_USE_AUDIO_WORKLETS) @@ -40637,6 +40678,10 @@ static ma_result ma_context_uninit__webaudio(ma_context* pContext) /* Remove the global miniaudio object from window if there are no more references to it. */ EM_ASM({ if (typeof(window.miniaudio) !== 'undefined') { + miniaudio.unlock_event_types.map(function(event_type) { + document.removeEventListener(event_type, miniaudio.unlock, true); + }); + window.miniaudio.referenceCount -= 1; if (window.miniaudio.referenceCount === 0) { delete window.miniaudio; @@ -59336,7 +59381,7 @@ static ma_result ma_default_vfs_seek__win32(ma_vfs* pVFS, ma_vfs_file file, ma_i result = ma_SetFilePointerEx((HANDLE)file, liDistanceToMove, NULL, dwMoveMethod); } else if (ma_SetFilePointer != NULL) { /* No SetFilePointerEx() so restrict to 31 bits. */ - if (origin > 0x7FFFFFFF) { + if (offset > 0x7FFFFFFF) { return MA_OUT_OF_RANGE; } @@ -59939,7 +59984,7 @@ extern "C" { #define MA_DR_WAV_XSTRINGIFY(x) MA_DR_WAV_STRINGIFY(x) #define MA_DR_WAV_VERSION_MAJOR 0 #define MA_DR_WAV_VERSION_MINOR 13 -#define MA_DR_WAV_VERSION_REVISION 13 +#define MA_DR_WAV_VERSION_REVISION 14 #define MA_DR_WAV_VERSION_STRING MA_DR_WAV_XSTRINGIFY(MA_DR_WAV_VERSION_MAJOR) "." MA_DR_WAV_XSTRINGIFY(MA_DR_WAV_VERSION_MINOR) "." MA_DR_WAV_XSTRINGIFY(MA_DR_WAV_VERSION_REVISION) #include #define MA_DR_WAVE_FORMAT_PCM 0x1 @@ -72457,7 +72502,7 @@ static ma_result ma_node_detach_full(ma_node* pNode) linked list logic. We don't need to worry about the audio thread referencing these because the step above severed the connection to the graph. */ - for (pOutputBus = (ma_node_output_bus*)ma_atomic_load_ptr(&pInputBus->head.pNext); pOutputBus != NULL; pOutputBus = (ma_node_output_bus*)ma_atomic_load_ptr(&pOutputBus->pNext)) { + for (pOutputBus = (ma_node_output_bus*)ma_atomic_load_ptr(&pInputBus->head.pNext); pOutputBus != NULL; pOutputBus = (ma_node_output_bus*)ma_atomic_load_ptr(&pInputBus->head.pNext)) { ma_node_detach_output_bus(pOutputBus->pNode, pOutputBus->outputBusIndex); /* This won't do any waiting in practice and should be efficient. */ } } @@ -78720,7 +78765,6 @@ MA_PRIVATE ma_bool32 ma_dr_wav_init__internal(ma_dr_wav* pWav, ma_dr_wav_chunk_p } if (pWav->container == ma_dr_wav_container_riff || pWav->container == ma_dr_wav_container_rifx) { if (ma_dr_wav_bytes_to_u32_ex(chunkSizeBytes, pWav->container) < 36) { - return MA_FALSE; } } else if (pWav->container == ma_dr_wav_container_rf64) { if (ma_dr_wav_bytes_to_u32_le(chunkSizeBytes) != 0xFFFFFFFF) { @@ -79047,9 +79091,7 @@ MA_PRIVATE ma_bool32 ma_dr_wav_init__internal(ma_dr_wav* pWav, ma_dr_wav_chunk_p } } if (isProcessingMetadata) { - ma_uint64 metadataBytesRead; - metadataBytesRead = ma_dr_wav__metadata_process_chunk(&metadataParser, &header, ma_dr_wav_metadata_type_all_including_unknown); - MA_DR_WAV_ASSERT(metadataBytesRead <= header.sizeInBytes); + ma_dr_wav__metadata_process_chunk(&metadataParser, &header, ma_dr_wav_metadata_type_all_including_unknown); if (ma_dr_wav__seek_from_start(pWav->onSeek, cursor, pWav->pUserData) == MA_FALSE) { break; } diff --git a/tests/test_deviceio/ma_test_deviceio.c b/tests/test_deviceio/ma_test_deviceio.c index 6c8ceaf5..f7b049b6 100644 --- a/tests/test_deviceio/ma_test_deviceio.c +++ b/tests/test_deviceio/ma_test_deviceio.c @@ -41,7 +41,6 @@ will receive the captured audio. If multiple backends are specified, the priority will be based on the order in which you specify them. If multiple waveform or noise types are specified the last one on the command line will have priority. */ -#define MA_FORCE_UWP #include "../test_common/ma_test_common.c" typedef enum