Use a heap allocation for the duplex ring buffer in ma_device.

The heap allocation is aligned to MA_CACHE_LINE_SIZE which is an
optimal alignment for ring buffers.

This also reduces the size of the `ma_device` struct for non-duplex
devices which is the most common setup.
This commit is contained in:
David Reid
2026-02-16 09:50:10 +10:00
parent dbc955fb0d
commit ebbe9707e2
+16 -5
View File
@@ -7962,7 +7962,7 @@ struct ma_device
ma_bool8 noDisableDenormals;
ma_bool8 noFixedSizedCallback;
ma_atomic_float masterVolumeFactor; /* Linear 0..1. Can be read and written simultaneously by different threads. Must be used atomically. */
ma_duplex_rb duplexRB; /* Intermediary buffer for duplex devices on asynchronous backends. */
ma_duplex_rb* pDuplexRB; /* Intermediary buffer for duplex devices. Allocated on the heap to ensure it is aligned to MA_CACHE_LINE_SIZE. Will be null for non-duplex devices. */
struct
{
ma_resample_algorithm algorithm;
@@ -49202,7 +49202,15 @@ MA_API ma_result ma_device_init(ma_context* pContext, const ma_device_config* pC
after ma_device_post_init().
*/
if (pConfig->deviceType == ma_device_type_duplex) {
result = ma_duplex_rb_init(pDevice->capture.format, pDevice->capture.channels, pDevice->sampleRate, pDevice->capture.internalSampleRate, pDevice->capture.internalPeriodSizeInFrames, ma_device_get_allocation_callbacks(pDevice), &pDevice->duplexRB);
/* TODO: Allocate one block of memory big enough for the ma_duplex_rb struct *and* the internal buffer. Then pass in the pre-allocated buffer to the ring buffer init routine. */
pDevice->pDuplexRB = (ma_duplex_rb*)ma_aligned_malloc(sizeof(ma_duplex_rb), MA_CACHE_LINE_SIZE, ma_device_get_allocation_callbacks(pDevice));
if (pDevice->pDuplexRB == NULL) {
ma_device_uninit(pDevice);
return MA_OUT_OF_MEMORY;
}
result = ma_duplex_rb_init(pDevice->capture.format, pDevice->capture.channels, pDevice->sampleRate, pDevice->capture.internalSampleRate, pDevice->capture.internalPeriodSizeInFrames, ma_device_get_allocation_callbacks(pDevice), pDevice->pDuplexRB);
if (result != MA_SUCCESS) {
ma_device_uninit(pDevice);
return result;
@@ -49415,7 +49423,8 @@ MA_API void ma_device_uninit(ma_device* pDevice)
}
if (pDevice->type == ma_device_type_duplex) {
ma_duplex_rb_uninit(&pDevice->duplexRB);
ma_duplex_rb_uninit(pDevice->pDuplexRB);
ma_aligned_free(pDevice->pDuplexRB, ma_device_get_allocation_callbacks(pDevice));
}
if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex || pDevice->type == ma_device_type_loopback) {
@@ -49993,12 +50002,14 @@ MA_API ma_result ma_device_handle_backend_data_callback(ma_device* pDevice, void
}
if (pDevice->type == ma_device_type_duplex) {
MA_ASSERT(pDevice->pDuplexRB != NULL);
if (pInput != NULL) {
ma_device__handle_duplex_callback_capture(pDevice, frameCount, pInput, &pDevice->duplexRB.rb);
ma_device__handle_duplex_callback_capture(pDevice, frameCount, pInput, &pDevice->pDuplexRB->rb);
}
if (pOutput != NULL) {
ma_device__handle_duplex_callback_playback(pDevice, frameCount, pOutput, &pDevice->duplexRB.rb);
ma_device__handle_duplex_callback_playback(pDevice, frameCount, pOutput, &pDevice->pDuplexRB->rb);
}
} else {
if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_loopback) {