mirror of
https://github.com/mackron/miniaudio.git
synced 2026-04-22 00:06:59 +02:00
Web Audio: Fix ScriptProcessNode path when compiling with --closure=1.
Audio Worklets do not work with --closure=1 because the callback used with emscripten_create_wasm_audio_worklet_processor_async never gets fired which means miniaudio will never be able to escape from it's busy wait loop. Public issue https://github.com/mackron/miniaudio/issues/778
This commit is contained in:
+21
-22
@@ -39791,7 +39791,7 @@ static ma_result ma_device_uninit__webaudio(ma_device* pDevice)
|
||||
#if defined(MA_USE_AUDIO_WORKLETS)
|
||||
{
|
||||
EM_ASM({
|
||||
var device = miniaudio.get_device_by_index($0);
|
||||
var device = window.miniaudio.get_device_by_index($0);
|
||||
|
||||
if (device.streamNode !== undefined) {
|
||||
device.streamNode.disconnect();
|
||||
@@ -39806,7 +39806,7 @@ static ma_result ma_device_uninit__webaudio(ma_device* pDevice)
|
||||
#else
|
||||
{
|
||||
EM_ASM({
|
||||
var device = miniaudio.get_device_by_index($0);
|
||||
var device = window.miniaudio.get_device_by_index($0);
|
||||
|
||||
/* Make sure all nodes are disconnected and marked for collection. */
|
||||
if (device.scriptNode !== undefined) {
|
||||
@@ -39833,7 +39833,7 @@ static ma_result ma_device_uninit__webaudio(ma_device* pDevice)
|
||||
|
||||
/* Clean up the device on the JS side. */
|
||||
EM_ASM({
|
||||
miniaudio.untrack_device_by_index($0);
|
||||
window.miniaudio.untrack_device_by_index($0);
|
||||
}, pDevice->webaudio.deviceIndex);
|
||||
|
||||
ma_free(pDevice->webaudio.pIntermediaryBuffer, &pDevice->pContext->allocationCallbacks);
|
||||
@@ -39998,7 +39998,6 @@ static void ma_audio_worklet_processor_created__webaudio(EMSCRIPTEN_WEBAUDIO_T a
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
pParameters->pDevice->webaudio.audioWorklet = emscripten_create_wasm_audio_worklet_node(audioContext, "miniaudio", &audioWorkletOptions, &ma_audio_worklet_process_callback__webaudio, pParameters->pDevice);
|
||||
|
||||
/* With the audio worklet initialized we can now attach it to the graph. */
|
||||
@@ -40138,7 +40137,6 @@ static ma_result ma_device_init__webaudio(ma_device* pDevice, const ma_device_co
|
||||
/* It's not clear if this can return an error. None of the tests in the Emscripten repository check for this, so neither am I for now. */
|
||||
pDevice->webaudio.audioContext = emscripten_create_audio_context(&audioContextAttributes);
|
||||
|
||||
|
||||
/*
|
||||
With the context created we can now create the worklet. We can only have a single worklet per audio
|
||||
context which means we'll need to craft this appropriately to handle duplex devices correctly.
|
||||
@@ -40187,7 +40185,7 @@ static ma_result ma_device_init__webaudio(ma_device* pDevice, const ma_device_co
|
||||
|
||||
/* We need to add an entry to the miniaudio.devices list on the JS side so we can do some JS/C interop. */
|
||||
pDevice->webaudio.deviceIndex = EM_ASM_INT({
|
||||
return miniaudio.track_device({
|
||||
return window.miniaudio.track_device({
|
||||
webaudio: emscriptenGetAudioObject($0),
|
||||
state: 1 /* 1 = ma_device_state_stopped */
|
||||
});
|
||||
@@ -40272,11 +40270,11 @@ static ma_result ma_device_init__webaudio(ma_device* pDevice, const ma_device_co
|
||||
/* The node processing callback. */
|
||||
device.scriptNode.onaudioprocess = function(e) {
|
||||
if (device.intermediaryBufferView == null || device.intermediaryBufferView.length == 0) {
|
||||
device.intermediaryBufferView = new Float32Array(Module.HEAPF32.buffer, pIntermediaryBuffer, bufferSize * channels);
|
||||
device.intermediaryBufferView = new Float32Array(HEAPF32.buffer, pIntermediaryBuffer, bufferSize * channels);
|
||||
}
|
||||
|
||||
/* Do the capture side first. */
|
||||
if (deviceType == miniaudio.device_type.capture || deviceType == miniaudio.device_type.duplex) {
|
||||
if (deviceType == window.miniaudio.device_type.capture || deviceType == window.miniaudio.device_type.duplex) {
|
||||
/* The data must be interleaved before being processed miniaudio. */
|
||||
for (var iChannel = 0; iChannel < channels; iChannel += 1) {
|
||||
var inputBuffer = e.inputBuffer.getChannelData(iChannel);
|
||||
@@ -40290,7 +40288,7 @@ static ma_result ma_device_init__webaudio(ma_device* pDevice, const ma_device_co
|
||||
_ma_device_process_pcm_frames_capture__webaudio(pDevice, bufferSize, pIntermediaryBuffer);
|
||||
}
|
||||
|
||||
if (deviceType == miniaudio.device_type.playback || deviceType == miniaudio.device_type.duplex) {
|
||||
if (deviceType == window.miniaudio.device_type.playback || deviceType == window.miniaudio.device_type.duplex) {
|
||||
_ma_device_process_pcm_frames_playback__webaudio(pDevice, bufferSize, pIntermediaryBuffer);
|
||||
|
||||
for (var iChannel = 0; iChannel < e.outputBuffer.numberOfChannels; ++iChannel) {
|
||||
@@ -40310,7 +40308,7 @@ static ma_result ma_device_init__webaudio(ma_device* pDevice, const ma_device_co
|
||||
};
|
||||
|
||||
/* Now we need to connect our node to the graph. */
|
||||
if (deviceType == miniaudio.device_type.capture || deviceType == miniaudio.device_type.duplex) {
|
||||
if (deviceType == window.miniaudio.device_type.capture || deviceType == window.miniaudio.device_type.duplex) {
|
||||
navigator.mediaDevices.getUserMedia({audio:true, video:false})
|
||||
.then(function(stream) {
|
||||
device.streamNode = device.webaudio.createMediaStreamSource(stream);
|
||||
@@ -40322,13 +40320,13 @@ static ma_result ma_device_init__webaudio(ma_device* pDevice, const ma_device_co
|
||||
});
|
||||
}
|
||||
|
||||
if (deviceType == miniaudio.device_type.playback) {
|
||||
if (deviceType == window.miniaudio.device_type.playback) {
|
||||
device.scriptNode.connect(device.webaudio.destination);
|
||||
}
|
||||
|
||||
device.pDevice = pDevice;
|
||||
|
||||
return miniaudio.track_device(device);
|
||||
return window.miniaudio.track_device(device);
|
||||
}, pConfig->deviceType, channels, sampleRate, periodSizeInFrames, pDevice->webaudio.pIntermediaryBuffer, pDevice);
|
||||
|
||||
if (deviceIndex < 0) {
|
||||
@@ -40338,7 +40336,7 @@ static ma_result ma_device_init__webaudio(ma_device* pDevice, const ma_device_co
|
||||
pDevice->webaudio.deviceIndex = deviceIndex;
|
||||
|
||||
/* Grab the sample rate from the audio context directly. */
|
||||
sampleRate = (ma_uint32)EM_ASM_INT({ return miniaudio.get_device_by_index($0).webaudio.sampleRate; }, deviceIndex);
|
||||
sampleRate = (ma_uint32)EM_ASM_INT({ return window.miniaudio.get_device_by_index($0).webaudio.sampleRate; }, deviceIndex);
|
||||
|
||||
if (pDescriptorCapture != NULL) {
|
||||
pDescriptorCapture->format = ma_format_f32;
|
||||
@@ -40368,9 +40366,9 @@ static ma_result ma_device_start__webaudio(ma_device* pDevice)
|
||||
MA_ASSERT(pDevice != NULL);
|
||||
|
||||
EM_ASM({
|
||||
var device = miniaudio.get_device_by_index($0);
|
||||
var device = window.miniaudio.get_device_by_index($0);
|
||||
device.webaudio.resume();
|
||||
device.state = miniaudio.device_state.started;
|
||||
device.state = window.miniaudio.device_state.started;
|
||||
}, pDevice->webaudio.deviceIndex);
|
||||
|
||||
return MA_SUCCESS;
|
||||
@@ -40390,9 +40388,9 @@ static ma_result ma_device_stop__webaudio(ma_device* pDevice)
|
||||
do any kind of explicit draining.
|
||||
*/
|
||||
EM_ASM({
|
||||
var device = miniaudio.get_device_by_index($0);
|
||||
var device = window.miniaudio.get_device_by_index($0);
|
||||
device.webaudio.suspend();
|
||||
device.state = miniaudio.device_state.stopped;
|
||||
device.state = window.miniaudio.device_state.stopped;
|
||||
}, pDevice->webaudio.deviceIndex);
|
||||
|
||||
ma_device__on_notification_stopped(pDevice);
|
||||
@@ -40451,6 +40449,7 @@ static ma_result ma_context_init__webaudio(ma_context* pContext, const ma_contex
|
||||
window.miniaudio.device_state.started = $4;
|
||||
|
||||
/* Device cache for mapping devices to indexes for JavaScript/C interop. */
|
||||
let miniaudio = window.miniaudio;
|
||||
miniaudio.devices = [];
|
||||
|
||||
miniaudio.track_device = function(device) {
|
||||
@@ -40502,13 +40501,13 @@ static ma_result ma_context_init__webaudio(ma_context* pContext, const ma_contex
|
||||
var device = miniaudio.devices[i];
|
||||
if (device != null &&
|
||||
device.webaudio != null &&
|
||||
device.state === window.miniaudio.device_state.started) {
|
||||
device.state === miniaudio.device_state.started) {
|
||||
|
||||
device.webaudio.resume().then(() => {
|
||||
Module._ma_device__on_notification_unlocked(device.pDevice);
|
||||
},
|
||||
(error) => {console.error("Failed to resume audiocontext", error);
|
||||
});
|
||||
_ma_device__on_notification_unlocked(device.pDevice);
|
||||
},
|
||||
(error) => {console.error("Failed to resume audiocontext", error);
|
||||
});
|
||||
}
|
||||
}
|
||||
miniaudio.unlock_event_types.map(function(event_type) {
|
||||
|
||||
Reference in New Issue
Block a user