diff --git a/examples/custom_backend.c b/examples/custom_backend.c index e30cd463..5c0b4d71 100644 --- a/examples/custom_backend.c +++ b/examples/custom_backend.c @@ -180,9 +180,6 @@ static ma_result ma_context_enumerate_devices__sdl(ma_context* pContext, ma_enum ma_bool32 cbResult; int iDevice; - MA_ASSERT(pContext != NULL); - MA_ASSERT(callback != NULL); - /* Playback */ if (!isTerminated) { int deviceCount = ((MA_PFN_SDL_GetNumAudioDevices)pContextEx->sdl.SDL_GetNumAudioDevices)(0); @@ -241,8 +238,6 @@ static ma_result ma_context_get_device_info__sdl(ma_context* pContext, ma_device const char* pDeviceName; #endif - MA_ASSERT(pContext != NULL); - if (pDeviceID == NULL) { if (deviceType == ma_device_type_playback) { pDeviceInfo->id.custom.i = 0; @@ -322,8 +317,6 @@ void ma_audio_callback_capture__sdl(void* pUserData, ma_uint8* pBuffer, int buff { ma_device_ex* pDeviceEx = (ma_device_ex*)pUserData; - MA_ASSERT(pDeviceEx != NULL); - ma_device_handle_backend_data_callback((ma_device*)pDeviceEx, NULL, pBuffer, (ma_uint32)bufferSizeInBytes / ma_get_bytes_per_frame(pDeviceEx->device.capture.internalFormat, pDeviceEx->device.capture.internalChannels)); } @@ -331,8 +324,6 @@ void ma_audio_callback_playback__sdl(void* pUserData, ma_uint8* pBuffer, int buf { ma_device_ex* pDeviceEx = (ma_device_ex*)pUserData; - MA_ASSERT(pDeviceEx != NULL); - ma_device_handle_backend_data_callback((ma_device*)pDeviceEx, pBuffer, NULL, (ma_uint32)bufferSizeInBytes / ma_get_bytes_per_frame(pDeviceEx->device.playback.internalFormat, pDeviceEx->device.playback.internalChannels)); } @@ -344,9 +335,6 @@ static ma_result ma_device_init_internal__sdl(ma_device_ex* pDeviceEx, const ma_ const char* pDeviceName; int deviceID; - MA_ASSERT(pDeviceEx != NULL); - MA_ASSERT(pDescriptor != NULL); - /* SDL is a little bit awkward with specifying the buffer size, You need to specify the size of the buffer in frames, but since we may have requested a period size in milliseconds we'll need to convert, which depends on the sample rate. But there's a possibility that @@ -430,8 +418,6 @@ static ma_result ma_device_init__sdl(ma_device* pDevice, const ma_device_config* ma_context_ex* pContextEx = (ma_context_ex*)pDevice->pContext; ma_result result; - MA_ASSERT(pDevice != NULL); - /* SDL does not support loopback mode, so must return MA_DEVICE_TYPE_NOT_SUPPORTED if it's requested. */ if (pConfig->deviceType == ma_device_type_loopback) { return MA_DEVICE_TYPE_NOT_SUPPORTED; @@ -463,8 +449,6 @@ static ma_result ma_device_uninit__sdl(ma_device* pDevice) ma_device_ex* pDeviceEx = (ma_device_ex*)pDevice; ma_context_ex* pContextEx = (ma_context_ex*)pDevice->pContext; - MA_ASSERT(pDevice != NULL); - if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { ((MA_PFN_SDL_CloseAudioDevice)pContextEx->sdl.SDL_CloseAudioDevice)(pDeviceEx->sdl.deviceIDCapture); } @@ -481,8 +465,6 @@ static ma_result ma_device_start__sdl(ma_device* pDevice) ma_device_ex* pDeviceEx = (ma_device_ex*)pDevice; ma_context_ex* pContextEx = (ma_context_ex*)pDevice->pContext; - MA_ASSERT(pDevice != NULL); - if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { ((MA_PFN_SDL_PauseAudioDevice)pContextEx->sdl.SDL_PauseAudioDevice)(pDeviceEx->sdl.deviceIDCapture, 0); } @@ -499,8 +481,6 @@ static ma_result ma_device_stop__sdl(ma_device* pDevice) ma_device_ex* pDeviceEx = (ma_device_ex*)pDevice; ma_context_ex* pContextEx = (ma_context_ex*)pDevice->pContext; - MA_ASSERT(pDevice != NULL); - if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { ((MA_PFN_SDL_PauseAudioDevice)pContextEx->sdl.SDL_PauseAudioDevice)(pDeviceEx->sdl.deviceIDCapture, 1); } @@ -516,8 +496,6 @@ static ma_result ma_context_uninit__sdl(ma_context* pContext) { ma_context_ex* pContextEx = (ma_context_ex*)pContext; - MA_ASSERT(pContext != NULL); - ((MA_PFN_SDL_QuitSubSystem)pContextEx->sdl.SDL_QuitSubSystem)(MA_SDL_INIT_AUDIO); /* Close the handle to the SDL shared object last. */ @@ -545,8 +523,6 @@ static ma_result ma_context_init__sdl(ma_context* pContext, const ma_context_con #endif }; - MA_ASSERT(pContext != NULL); - (void)pConfig; /* Check if we have SDL2 installed somewhere. If not it's not usable and we need to abort. */ @@ -641,15 +617,8 @@ Main program starts here. void data_callback(ma_device* pDevice, void* pOutput, const void* pInput, ma_uint32 frameCount) { - MA_ASSERT(pDevice->playback.channels == DEVICE_CHANNELS); - if (pDevice->type == ma_device_type_playback) { - ma_waveform* pSineWave; - - pSineWave = (ma_waveform*)pDevice->pUserData; - MA_ASSERT(pSineWave != NULL); - - ma_waveform_read_pcm_frames(pSineWave, pOutput, frameCount, NULL); + ma_waveform_read_pcm_frames((ma_waveform*)pDevice->pUserData, pOutput, frameCount, NULL); } if (pDevice->type == ma_device_type_duplex) { diff --git a/examples/data_source_chaining.c b/examples/data_source_chaining.c index 6d019903..c04b4534 100644 --- a/examples/data_source_chaining.c +++ b/examples/data_source_chaining.c @@ -49,7 +49,9 @@ ma_decoder* g_pDecoders; static ma_data_source* next_callback_tail(ma_data_source* pDataSource) { - MA_ASSERT(g_decoderCount > 0); /* <-- We check for this in main() so should never happen. */ + if (g_decoderCount > 0) { /* <-- We check for this in main() so should never happen. */ + return NULL; + } /* This will be fired when the last item in the chain has reached the end. In this example we want diff --git a/examples/duplex_effect.c b/examples/duplex_effect.c index 2c2a5459..6a07fd25 100644 --- a/examples/duplex_effect.c +++ b/examples/duplex_effect.c @@ -12,7 +12,7 @@ effect. #include -#define DEVICE_FORMAT ma_format_f32; /* Must always be f32 for this example because the node graph system only works with this. */ +#define DEVICE_FORMAT ma_format_f32 /* Must always be f32 for this example because the node graph system only works with this. */ #define DEVICE_CHANNELS 1 /* For this example, always set to 1. */ static ma_waveform g_sourceData; /* The underlying data source of the source node. */ @@ -24,8 +24,13 @@ static ma_node_graph g_nodeGraph; void data_callback(ma_device* pDevice, void* pOutput, const void* pInput, ma_uint32 frameCount) { - MA_ASSERT(pDevice->capture.format == pDevice->playback.format); - MA_ASSERT(pDevice->capture.channels == pDevice->playback.channels); + /* + This example assumes the playback and capture sides use the same format and channel count. The + format must be f32. + */ + if (pDevice->capture.format != DEVICE_FORMAT || pDevice->playback.format != DEVICE_FORMAT || pDevice->capture.channels != pDevice->playback.channels) { + return; + } /* The node graph system is a pulling style of API. At the lowest level of the chain will be a diff --git a/examples/node_graph.c b/examples/node_graph.c index aecccfe7..83aa8945 100644 --- a/examples/node_graph.c +++ b/examples/node_graph.c @@ -81,8 +81,6 @@ static int g_soundNodeCount; void data_callback(ma_device* pDevice, void* pOutput, const void* pInput, ma_uint32 frameCount) { - MA_ASSERT(pDevice->playback.channels == CHANNELS); - /* Hearing the output of the node graph is as easy as reading straight into the output buffer. You just need to make sure you use a consistent data format or else you'll need to do your own conversion. diff --git a/examples/resource_manager.c b/examples/resource_manager.c index d9a9697f..27d7fd51 100644 --- a/examples/resource_manager.c +++ b/examples/resource_manager.c @@ -33,7 +33,6 @@ set, each sound will have their own formats and you'll need to do the necessary void main_loop__em(void* pUserData) { ma_resource_manager* pResourceManager = (ma_resource_manager*)pUserData; - MA_ASSERT(pResourceManager != NULL); /* The Emscripten build does not support threading which means we need to process jobs manually. If diff --git a/examples/resource_manager_advanced.c b/examples/resource_manager_advanced.c index 19236c33..d757b027 100644 --- a/examples/resource_manager_advanced.c +++ b/examples/resource_manager_advanced.c @@ -32,8 +32,6 @@ static ma_result ma_data_source_read_pcm_frames_f32_ex(ma_data_source* pDataSour This function is intended to be used when the format and channel count of the data source is known beforehand. The idea is to avoid overhead due to redundant calls to ma_data_source_get_data_format(). */ - MA_ASSERT(pDataSource != NULL); - if (dataSourceFormat == ma_format_f32) { /* Fast path. No conversion necessary. */ return ma_data_source_read_pcm_frames(pDataSource, pFramesOut, frameCount, pFramesRead); @@ -136,10 +134,6 @@ void data_callback(ma_device* pDevice, void* pOutput, const void* pInput, ma_uin */ ma_uint32 iDataSource; - MA_ASSERT(pDevice->playback.format == ma_format_f32); - - (void)pInput; /* Unused. */ - /* If the device was configured with noPreSilencedOutputBuffer then you would need to silence the buffer here, or make sure the first data source to be mixed is copied rather than mixed. @@ -150,12 +144,15 @@ void data_callback(ma_device* pDevice, void* pOutput, const void* pInput, ma_uin for (iDataSource = 0; iDataSource < g_dataSourceCount; iDataSource += 1) { ma_data_source_read_pcm_frames_and_mix_f32(&g_dataSources[iDataSource], (float*)pOutput, frameCount, NULL, /* volume = */1); } + + /* Unused. */ + (void)pInput; + (void)pDevice; } static ma_thread_result MA_THREADCALL custom_job_thread(void* pUserData) { ma_resource_manager* pResourceManager = (ma_resource_manager*)pUserData; - MA_ASSERT(pResourceManager != NULL); for (;;) { ma_result result; @@ -191,8 +188,8 @@ static ma_thread_result MA_THREADCALL custom_job_thread(void* pUserData) event is received which means the `result != MA_SUCCESS` logic above will catch it. If you do not check the return value of ma_resource_manager_next_job() you will want to check for MA_RESOURCE_MANAGER_JOB_QUIT like the code below. */ - if (job.toc.breakup.code == MA_RESOURCE_MANAGER_JOB_QUIT) { - printf("CUSTOM JOB THREAD TERMINATING VIA MA_RESOURCE_MANAGER_JOB_QUIT... "); + if (job.toc.breakup.code == MA_JOB_TYPE_QUIT) { + printf("CUSTOM JOB THREAD TERMINATING VIA MA_JOB_TYPE_QUIT... "); break; } diff --git a/examples/simple_capture.c b/examples/simple_capture.c index e3b18248..6c08dc15 100644 --- a/examples/simple_capture.c +++ b/examples/simple_capture.c @@ -16,10 +16,7 @@ data received by the microphone straight to a WAV file. void data_callback(ma_device* pDevice, void* pOutput, const void* pInput, ma_uint32 frameCount) { - ma_encoder* pEncoder = (ma_encoder*)pDevice->pUserData; - MA_ASSERT(pEncoder != NULL); - - ma_encoder_write_pcm_frames(pEncoder, pInput, frameCount, NULL); + ma_encoder_write_pcm_frames((ma_encoder*)pDevice->pUserData, pInput, frameCount, NULL); (void)pOutput; } diff --git a/examples/simple_duplex.c b/examples/simple_duplex.c index c604db51..69cc6d84 100644 --- a/examples/simple_duplex.c +++ b/examples/simple_duplex.c @@ -23,8 +23,10 @@ void main_loop__em() void data_callback(ma_device* pDevice, void* pOutput, const void* pInput, ma_uint32 frameCount) { - MA_ASSERT(pDevice->capture.format == pDevice->playback.format); - MA_ASSERT(pDevice->capture.channels == pDevice->playback.channels); + /* This example assumes the playback and capture sides use the same format and channel count. */ + if (pDevice->capture.format != pDevice->playback.format || pDevice->capture.channels != pDevice->playback.channels) { + return; + } /* In this example the format and channel count are the same for both input and output which means we can just memcpy(). */ MA_COPY_MEMORY(pOutput, pInput, frameCount * ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels)); diff --git a/examples/simple_loopback.c b/examples/simple_loopback.c index c8318d6d..df5fb0ab 100644 --- a/examples/simple_loopback.c +++ b/examples/simple_loopback.c @@ -18,10 +18,7 @@ properties. The output buffer in the callback will be null whereas the input buf void data_callback(ma_device* pDevice, void* pOutput, const void* pInput, ma_uint32 frameCount) { - ma_encoder* pEncoder = (ma_encoder*)pDevice->pUserData; - MA_ASSERT(pEncoder != NULL); - - ma_encoder_write_pcm_frames(pEncoder, pInput, frameCount, NULL); + ma_encoder_write_pcm_frames((ma_encoder*)pDevice->pUserData, pInput, frameCount, NULL); (void)pOutput; } diff --git a/examples/simple_mixing.c b/examples/simple_mixing.c index e4c81b05..30e906e6 100644 --- a/examples/simple_mixing.c +++ b/examples/simple_mixing.c @@ -87,8 +87,7 @@ void data_callback(ma_device* pDevice, void* pOutput, const void* pInput, ma_uin float* pOutputF32 = (float*)pOutput; ma_uint32 iDecoder; - MA_ASSERT(pDevice->playback.format == SAMPLE_FORMAT); /* <-- Important for this example. */ - + /* This example assumes the device was configured to use ma_format_f32. */ for (iDecoder = 0; iDecoder < g_decoderCount; ++iDecoder) { if (!g_pDecodersAtEnd[iDecoder]) { ma_uint32 framesRead = read_and_mix_pcm_frames_f32(&g_pDecoders[iDecoder], pOutputF32, frameCount); @@ -107,6 +106,7 @@ void data_callback(ma_device* pDevice, void* pOutput, const void* pInput, ma_uin } (void)pInput; + (void)pDevice; } int main(int argc, char** argv) diff --git a/examples/simple_playback_sine.c b/examples/simple_playback_sine.c index ab1f25fb..d053f5a0 100644 --- a/examples/simple_playback_sine.c +++ b/examples/simple_playback_sine.c @@ -33,14 +33,7 @@ void main_loop__em() void data_callback(ma_device* pDevice, void* pOutput, const void* pInput, ma_uint32 frameCount) { - ma_waveform* pSineWave; - - MA_ASSERT(pDevice->playback.channels == DEVICE_CHANNELS); - - pSineWave = (ma_waveform*)pDevice->pUserData; - MA_ASSERT(pSineWave != NULL); - - ma_waveform_read_pcm_frames(pSineWave, pOutput, frameCount, NULL); + ma_waveform_read_pcm_frames((ma_waveform*)pDevice->pUserData, pOutput, frameCount, NULL); (void)pInput; /* Unused. */ } diff --git a/extras/nodes/ma_delay_node/ma_delay_node_example.c b/extras/nodes/ma_delay_node/ma_delay_node_example.c index 3fcb4aa3..6f102c81 100644 --- a/extras/nodes/ma_delay_node/ma_delay_node_example.c +++ b/extras/nodes/ma_delay_node/ma_delay_node_example.c @@ -15,8 +15,13 @@ static ma_node_graph g_nodeGraph; /* The main node graph that we'l void data_callback(ma_device* pDevice, void* pOutput, const void* pInput, ma_uint32 frameCount) { - MA_ASSERT(pDevice->capture.format == pDevice->playback.format && pDevice->capture.format == ma_format_f32); - MA_ASSERT(pDevice->capture.channels == pDevice->playback.channels); + /* + This example assumes the playback and capture sides use the same format and channel count. The + format must be f32. + */ + if (pDevice->capture.format != DEVICE_FORMAT || pDevice->playback.format != DEVICE_FORMAT || pDevice->capture.channels != pDevice->playback.channels) { + return; + } /* The node graph system is a pulling style of API. At the lowest level of the chain will be a diff --git a/extras/nodes/ma_reverb_node/ma_reverb_node_example.c b/extras/nodes/ma_reverb_node/ma_reverb_node_example.c index 55ec444f..5102ab07 100644 --- a/extras/nodes/ma_reverb_node/ma_reverb_node_example.c +++ b/extras/nodes/ma_reverb_node/ma_reverb_node_example.c @@ -15,8 +15,13 @@ static ma_node_graph g_nodeGraph; /* The main node graph that we'l void data_callback(ma_device* pDevice, void* pOutput, const void* pInput, ma_uint32 frameCount) { - MA_ASSERT(pDevice->capture.format == pDevice->playback.format && pDevice->capture.format == ma_format_f32); - MA_ASSERT(pDevice->capture.channels == pDevice->playback.channels); + /* + This example assumes the playback and capture sides use the same format and channel count. The + format must be f32. + */ + if (pDevice->capture.format != DEVICE_FORMAT || pDevice->playback.format != DEVICE_FORMAT || pDevice->capture.channels != pDevice->playback.channels) { + return; + } /* The node graph system is a pulling style of API. At the lowest level of the chain will be a diff --git a/extras/nodes/ma_vocoder_node/ma_vocoder_node_example.c b/extras/nodes/ma_vocoder_node/ma_vocoder_node_example.c index 57700982..9e501005 100644 --- a/extras/nodes/ma_vocoder_node/ma_vocoder_node_example.c +++ b/extras/nodes/ma_vocoder_node/ma_vocoder_node_example.c @@ -24,8 +24,13 @@ static ma_node_graph g_nodeGraph; void data_callback(ma_device* pDevice, void* pOutput, const void* pInput, ma_uint32 frameCount) { - MA_ASSERT(pDevice->capture.format == pDevice->playback.format); - MA_ASSERT(pDevice->capture.channels == pDevice->playback.channels); + /* + This example assumes the playback and capture sides use the same format and channel count. The + format must be f32. + */ + if (pDevice->capture.format != DEVICE_FORMAT || pDevice->playback.format != DEVICE_FORMAT || pDevice->capture.channels != pDevice->playback.channels) { + return; + } /* The node graph system is a pulling style of API. At the lowest level of the chain will be a