diff --git a/docs/examples/custom_backend.html b/docs/examples/custom_backend.html index 2637042a..582a1352 100644 --- a/docs/examples/custom_backend.html +++ b/docs/examples/custom_backend.html @@ -281,7 +281,7 @@ remaining callbacks if it is successfully initialized. Custom backends are identified with the ma_backend_custom backend type. For the purpose of demonstration, this example only uses the ma_backend_custom backend type because otherwise the built-in backends would always get chosen first and none of the code for the custom backends would actually get hit. By default, the ma_backend_custom backend is the lowest priority backend, except for ma_backend_null.
-+#include "../miniaudio.c" #ifdef __EMSCRIPTEN__ diff --git a/docs/examples/custom_decoder.html b/docs/examples/custom_decoder.html index 3a553d1e..45a8989e 100644 --- a/docs/examples/custom_decoder.html +++ b/docs/examples/custom_decoder.html @@ -274,7 +274,7 @@ The custom decoding data sources (ma_libvor the decoder via the decoder config (ma_decoder_config). You need to implement a vtable for each of your custom decoders. See ma_decoding_backend_vtable for the functions you need to implement. The onInitFile, onInitFileW and onInitMemory functions are optional. -+#include "../miniaudio.c" #include "../extras/decoders/libvorbis/miniaudio_libvorbis.c" #include "../extras/decoders/libopus/miniaudio_libopus.c" diff --git a/docs/examples/custom_decoder_engine.html b/docs/examples/custom_decoder_engine.html index 66f747dc..5df08ab1 100644 --- a/docs/examples/custom_decoder_engine.html +++ b/docs/examples/custom_decoder_engine.html @@ -257,7 +257,7 @@ Demonstrates how to implement a custom decoder and use it with the high level AP This is the same as the custom_decoder example, only it's used with the high level engine API rather than the low level decoding API. You can use this to add support for Opus to your games, for example (via libopus). -+#include "../miniaudio.c" #include "../extras/decoders/libvorbis/miniaudio_libvorbis.c" #include "../extras/decoders/libopus/miniaudio_libopus.c" diff --git a/docs/examples/data_source_chaining.html b/docs/examples/data_source_chaining.html index 68ffa3dd..55950bea 100644 --- a/docs/examples/data_source_chaining.html +++ b/docs/examples/data_source_chaining.html @@ -283,7 +283,7 @@ consistently read from the head data source this state will become inconsistent work correctly. When using a chain, this pointer needs to be reset if you need to play the chain again from the start: -+ma_data_source_set_current(&headDataSource, &headDataSource); ma_data_source_seek_to_pcm_frame(&headDataSource, 0);@@ -292,7 +292,7 @@ The code above is setting the "current" data source in the chain to th starting the chain from the start again. It is also seeking the head data source back to the start so that playback starts from the start as expected. You do not need to seek non-head items back to the start as miniaudio will do that for you internally.
-+#include "../miniaudio.c" #include <stdio.h> diff --git a/docs/examples/duplex_effect.html b/docs/examples/duplex_effect.html index e6d44e4e..c319ee24 100644 --- a/docs/examples/duplex_effect.html +++ b/docs/examples/duplex_effect.html @@ -258,7 +258,7 @@ This example applies a vocoder effect to the input stream before outputting it. called ma_vocoder_node is used to achieve the effect which can be found in the extras folder in the miniaudio repository. The vocoder node uses https://github.com/blastbay/voclib to achieve the effect. -+#include "../miniaudio.c" #include "../extras/nodes/ma_vocoder_node/ma_vocoder_node.c" diff --git a/docs/examples/engine_advanced.html b/docs/examples/engine_advanced.html index 2c7bed74..5344b46a 100644 --- a/docs/examples/engine_advanced.html +++ b/docs/examples/engine_advanced.html @@ -275,7 +275,7 @@ This example will play the sound that's passed in on the command line. Using a shared resource manager, as we do in this example, is useful for when you want to user multiple engines so that you can output to multiple playback devices simultaneoulys. An example might be a local co-op multiplayer game where each player has their own headphones. -+#include "../miniaudio.c" #define MAX_DEVICES 2 diff --git a/docs/examples/engine_effects.html b/docs/examples/engine_effects.html index ed76e689..c9c2347f 100644 --- a/docs/examples/engine_effects.html +++ b/docs/examples/engine_effects.html @@ -269,7 +269,7 @@ sound's output into the effect's input. See the Node Graph example for h This example is playing only a single sound at a time which means only a single ma_sound object it being used. If you want to play multiple sounds at the same time, even if they're for the same sound file, you need multiple ma_sound objects. -+#include "../miniaudio.c" #define DELAY_IN_SECONDS 0.2f diff --git a/docs/examples/engine_hello_world.html b/docs/examples/engine_hello_world.html index bc44ea34..b7b645d8 100644 --- a/docs/examples/engine_hello_world.html +++ b/docs/examples/engine_hello_world.html @@ -255,7 +255,7 @@ This example demonstrates how to initialize an audio engine and play a sound.This will play the sound specified on the command line.
-+#include "../miniaudio.c" #include <stdio.h> diff --git a/docs/examples/engine_sdl.html b/docs/examples/engine_sdl.html index 1c9f4cef..e8fcf62f 100644 --- a/docs/examples/engine_sdl.html +++ b/docs/examples/engine_sdl.html @@ -262,7 +262,7 @@ audio output instead of miniaudio. This example will load the sound specified on the command line and rotate it around the listener's head. -+#define MA_NO_DEVICE_IO /* <-- Disables the ma_device API. We don't need that in this example since SDL will be doing that part for us. */ #include "../miniaudio.c" diff --git a/docs/examples/engine_steamaudio.html b/docs/examples/engine_steamaudio.html index e7ba9f0d..d23380ff 100644 --- a/docs/examples/engine_steamaudio.html +++ b/docs/examples/engine_steamaudio.html @@ -270,7 +270,7 @@ consistent, you must set the period size in the engine config to be consistent w you specify in your IPLAudioSettings object. If for some reason you want the period size of the engine to be different to that of your Steam Audio configuration, you'll need to implement a sort of buffering solution to your node. -+#include "../miniaudio.c" #include <stdint.h> /* Required for uint32_t which is used by STEAMAUDIO_VERSION, and a random use of uint8_t. If there's a Steam Audio maintainer reading this, that needs to be fixed to use IPLuint32 and IPLuint8. */ diff --git a/docs/examples/hilo_interop.html b/docs/examples/hilo_interop.html index d680c052..2c6134a8 100644 --- a/docs/examples/hilo_interop.html +++ b/docs/examples/hilo_interop.html @@ -271,7 +271,7 @@ A more robust example would probably not want to use a ring buffer directly as t Instead you would probably want to do a custom data source that handles underruns and overruns of the ring buffer and deals with desyncs between capture and playback. In the future this example may be updated to make use of a more advanced data source that handles all of this. -+#include "../miniaudio.c" static ma_pcm_rb rb; diff --git a/docs/examples/node_graph.html b/docs/examples/node_graph.html index 111d4829..6cd4e79e 100644 --- a/docs/examples/node_graph.html +++ b/docs/examples/node_graph.html @@ -280,7 +280,7 @@ node graph. This example will be using the following node graph set up: -+>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Data flows left to right >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> +---------------+ +-----------------+ @@ -317,7 +317,7 @@ pass and echo effects so that one of them becomes more obvious than the other.When you want to read from the graph, you simply call ma_node_graph_read_pcm_frames().
-+#include "../miniaudio.c" /* Data Format */ diff --git a/docs/examples/resource_manager.html b/docs/examples/resource_manager.html index b4f1a499..d67d1df4 100644 --- a/docs/examples/resource_manager.html +++ b/docs/examples/resource_manager.html @@ -287,7 +287,7 @@ for processing jobs. That is more advanced, and beyond the scope of this example When you initialize a resource manager you can specify the sample format, channels and sample rate to use when reading data from the data source. This means the resource manager will ensure all sounds will have a standard format. When not set, each sound will have their own formats and you'll need to do the necessary data conversion yourself. -+#define MA_NO_ENGINE /* We're intentionally not using the ma_engine API here. */ #include "../miniaudio.c" diff --git a/docs/examples/resource_manager_advanced.html b/docs/examples/resource_manager_advanced.html index 32c91fbf..1a788f13 100644 --- a/docs/examples/resource_manager_advanced.html +++ b/docs/examples/resource_manager_advanced.html @@ -272,7 +272,7 @@ threads. You can also implement your own custom job threads which this example a In this example we show how you can create a data source, mix them with other data sources, configure the number of job threads to manage internally and how to implement your own custom job thread. -+#define MA_NO_ENGINE /* We're intentionally not using the ma_engine API here. */ #include "../miniaudio.c" diff --git a/docs/examples/simple_capture.html b/docs/examples/simple_capture.html index 25a6792e..ddc29575 100644 --- a/docs/examples/simple_capture.html +++ b/docs/examples/simple_capture.html @@ -262,7 +262,7 @@ specified on the command line. Capturing works in a very similar way to playback. The only difference is the direction of data movement. Instead of the application sending data to the device, the device will send data to the application. This example just writes the data received by the microphone straight to a WAV file. -+#include "../miniaudio.c" #include <stdlib.h> diff --git a/docs/examples/simple_duplex.html b/docs/examples/simple_duplex.html index 60e172e1..897b2e20 100644 --- a/docs/examples/simple_duplex.html +++ b/docs/examples/simple_duplex.html @@ -264,7 +264,7 @@ Note that the microphone and playback device must run in lockstep. Any kind of t glitching which the backend may not be able to recover from. For this reason, miniaudio forces you to use the same sample rate for both capture and playback. If internally the native sample rates differ, miniaudio will perform the sample rate conversion for you automatically. -+#include "../miniaudio.c" #include <stdio.h> diff --git a/docs/examples/simple_enumeration.html b/docs/examples/simple_enumeration.html index 54035fb4..e53f30fa 100644 --- a/docs/examples/simple_enumeration.html +++ b/docs/examples/simple_enumeration.html @@ -261,7 +261,7 @@ context sits above a device. You can have many devices to one context. If you use device enumeration, you should explicitly specify the same context you used for enumeration in the call to ma_device_init() when you initialize your devices. -+#include "../miniaudio.c" #include <stdio.h> diff --git a/docs/examples/simple_loopback.html b/docs/examples/simple_loopback.html index c31c6f3e..d76c83f2 100644 --- a/docs/examples/simple_loopback.html +++ b/docs/examples/simple_loopback.html @@ -266,7 +266,7 @@ used indirectly with PulseAudio by choosing the appropriate loopback device afte To use loopback mode you just need to set the device type to ma_device_type_loopback and set the capture device config properties. The output buffer in the callback will be null whereas the input buffer will be valid. -+#include "../miniaudio.c" #include <stdlib.h> diff --git a/docs/examples/simple_looping.html b/docs/examples/simple_looping.html index 6f3ebfe1..d0f55f1f 100644 --- a/docs/examples/simple_looping.html +++ b/docs/examples/simple_looping.html @@ -257,7 +257,7 @@ Shows one way to handle looping of a sound. This example uses a decoder as the data source. Decoders can be used with the ma_data_source API which, conveniently, supports looping via the ma_data_source_read_pcm_frames() API. To use it, all you need to do is pass a pointer to the decoder straight into ma_data_source_read_pcm_frames() and it will just work. -+#include "../miniaudio.c" #include <stdio.h> diff --git a/docs/examples/simple_mixing.html b/docs/examples/simple_mixing.html index d988fb20..c838e4f6 100644 --- a/docs/examples/simple_mixing.html +++ b/docs/examples/simple_mixing.html @@ -259,10 +259,10 @@ device and then mix your sounds together which you can do by simply summing thei do this is to use floating point samples and use miniaudio's built-in clipper to handling clipping for you. (Clipping is when sample are clamped to their minimum and maximum range, which for floating point is -1..1.) -+Usage: simple_mixing [input file 0] [input file 1] ... [input file n] Example: simple_mixing file1.wav file2.flac -+#include "../miniaudio.c" #include <stdio.h> diff --git a/docs/examples/simple_playback.html b/docs/examples/simple_playback.html index 59005809..3c267345 100644 --- a/docs/examples/simple_playback.html +++ b/docs/examples/simple_playback.html @@ -264,7 +264,7 @@ This example uses the ma_decoder API device and can be used independently of it. This example only plays back a single sound file, but it's possible to play back multiple files by simple loading multiple decoders and mixing them (do not create multiple devices to do this). See the simple_mixing example for how best to do this. -+#include "../miniaudio.c" #include <stdio.h> diff --git a/docs/examples/simple_playback_sine.html b/docs/examples/simple_playback_sine.html index db8641fd..7862e438 100644 --- a/docs/examples/simple_playback_sine.html +++ b/docs/examples/simple_playback_sine.html @@ -270,7 +270,7 @@ the ma_waveform_read_pcm_frames() APThis example works with Emscripten.
-+#define MA_NO_DECODING #define MA_NO_ENCODING #include "../miniaudio.c" diff --git a/docs/examples/simple_spatialization.html b/docs/examples/simple_spatialization.html index 714dd4ec..c297128e 100644 --- a/docs/examples/simple_spatialization.html +++ b/docs/examples/simple_spatialization.html @@ -272,7 +272,7 @@ information on the available features. To use this example, pass in the path of a sound as the first argument. The sound will be positioned in front of the listener, while the listener rotates on the the spot to create an orbiting effect. Terminate the program with Ctrl+C. -+#include "../miniaudio.c" #include <stdio.h> diff --git a/docs/manual/index.html b/docs/manual/index.html index be40c986..41fbe4d6 100644 --- a/docs/manual/index.html +++ b/docs/manual/index.html @@ -307,7 +307,7 @@ device you need to allocate memory for the device object beforehand. This gives complete control over how the memory is allocated. In the example below we initialize a playback device on the stack, but you could allocate it on the heap if that suits your situation better. -+void data_callback(ma_device* pDevice, void* pOutput, const void* pInput, ma_uint32 frameCount) { // In playback mode copy data to pOutput. In capture mode read data from pInput. In full-duplex mode, both @@ -460,7 +460,7 @@ result in a deadlock. Instead you set a variable or signal an event indicating t needs to stop and handle it in a different thread. The following APIs must never be called inside the callback: -+ma_device_init() ma_device_init_ex() ma_device_uninit() @@ -479,7 +479,7 @@ The example above demonstrates the initialization of a playback device, but it w same for capture. All you need to do is change the device type from ma_device_type_playback to ma_device_type_capture when setting up the config, like so: -+ma_device_config config = ma_device_config_init(ma_device_type_capture); config.capture.format = MY_FORMAT; config.capture.channels = MY_CHANNEL_COUNT; @@ -548,7 +548,7 @@ The example above did not specify a physical device to connect to which means it operating system's default device. If you have multiple physical devices connected and you want to use a specific one you will need to specify the device ID in the configuration, like so: -+config.playback.pDeviceID = pMyPlaybackDeviceID; // Only if requesting a playback or duplex device. config.capture.pDeviceID = pMyCaptureDeviceID; // Only if requesting a capture, duplex or loopback device.@@ -560,7 +560,7 @@ more global level and to perform operations outside the scope of an individual d used for performing run-time linking against backend libraries, initializing backends and enumerating devices. The example below shows how to enumerate devices.
-+ma_context context; if (ma_context_init(NULL, 0, NULL, &context) != MA_SUCCESS) { // Error. @@ -668,7 +668,7 @@ this manual. The code below shows how you can initialize an engine using it's default configuration. -+ma_result result; ma_engine engine; @@ -693,7 +693,7 @@ this will result in the struct being invalidated once the function encapsulating allocating the engine on the heap is more appropriate, you can easily do so with a standard call to malloc() or whatever heap allocation routine you like: -+ma_engine* pEngine = malloc(sizeof(*pEngine));@@ -701,7 +701,7 @@ The ma_engine API uses the same conf an engine, you can fill out a ma_engine_config object and pass it into the first parameter of ma_engine_init():
-+ma_result result; ma_engine engine; ma_engine_config engineConfig; @@ -728,7 +728,7 @@ The engine must be uninitialized with ma_en By default the engine will be started, but nothing will be playing because no sounds have been initialized. The easiest but least flexible way of playing a sound is like so: -+ma_engine_play_sound(&engine, "my_sound.wav", NULL);@@ -738,7 +738,7 @@ should be associated with which will be explained later. This particular way of simple, but lacks flexibility and features. A more flexible way of playing a sound is to first initialize a sound:
-+ma_result result; ma_sound sound; @@ -766,7 +766,7 @@ Sounds are not started by default. Start a sound with+ma_sound_set_start_time_in_pcm_frames() ma_sound_set_start_time_in_milliseconds() ma_sound_set_stop_time_in_pcm_frames() @@ -779,7 +779,7 @@ engine. The current global time in PCM frames can be retrieved with ma_engine_set_time_in_pcm_frames() for synchronization purposes if required. Note that scheduling a start time still requires an explicit call to ma_sound_start() before anything will play: -+ma_sound_set_start_time_in_pcm_frames(&sound, ma_engine_get_time_in_pcm_frames(&engine) + (ma_engine_get_sample_rate(&engine) * 2); ma_sound_start(&sound);@@ -897,7 +897,7 @@ AudioToolbox, try with -framework AudioUnit of iOS. Alternatively, if you would rather keep using runtime linking you can add the following to your entitlements.xcent file:
-+<key>com.apple.security.cs.allow-dyld-environment-variables</key> <true/> <key>com.apple.security.cs.allow-unsigned-executable-memory</key> @@ -1311,7 +1311,7 @@ MA_NO_RESOURCE_MANAGER Disables the resource manager. When using the engine this will also disable the following functions: -+ma_sound_init_from_file() ma_sound_init_from_file_w() ma_sound_init_copy() @@ -1353,7 +1353,7 @@ miniaudio for data conversion, decoding and/or encoding. Some families of APIs require threading which means the following options must also be set: -+@@ -1608,7 +1608,7 @@ implements the data source interface can be plugged into anyMA_NO_DEVICE_IO+ma_result result; ma_uint64 framesRead; @@ -1629,7 +1629,7 @@ When calling any data source function, with the exception of ma_data_source_uninit(), you can pass in any object that implements a data source. For example, you could plug in a decoder like so: -+ma_result result; ma_uint64 framesRead; ma_decoder decoder; // <-- This would be initialized with ma_decoder_init_*(). @@ -1647,7 +1647,7 @@ can use ma_data_source_seek_pcm_frames() -+result = ma_data_source_seek_to_pcm_frame(pDataSource, frameIndex); if (result != MA_SUCCESS) { return result; // Failed to seek to PCM frame. @@ -1658,7 +1658,7 @@ You can retrieve the total length of a data source in PCM frames, but note that may not have the notion of a length, such as noise and waveforms, and others may just not have a way of determining the length such as some decoders. To retrieve the length: -+ma_uint64 length; result = ma_data_source_get_length_in_pcm_frames(pDataSource, &length); @@ -1675,7 +1675,7 @@ broadcast. If you do this, ma_data_source_g The current position of the cursor in PCM frames can also be retrieved: -+ma_uint64 cursor; result = ma_data_source_get_cursor_in_pcm_frames(pDataSource, &cursor); @@ -1687,7 +1687,7 @@ result = ma_data_source_get_cursor_in_pcm_frames(pDataSource, &cursor); You will often need to know the data format that will be returned after reading. This can be retrieved like so: -+ma_format format; ma_uint32 channels; ma_uint32 sampleRate; @@ -1706,7 +1706,7 @@ If you do not need a specific data format property, just pass in NULL to the res There may be cases where you want to implement something like a sound bank where you only want to read data within a certain range of the underlying data. To do this you can use a range: -+result = ma_data_source_set_range_in_pcm_frames(pDataSource, rangeBegInFrames, rangeEndInFrames); if (result != MA_SUCCESS) { return result; // Failed to set the range. @@ -1724,7 +1724,7 @@ Custom loop points can also be used with data sources. By default, data sources they reach the end of the data source, but if you need to loop at a specific location, you can do the following: -+result = ma_data_set_loop_point_in_pcm_frames(pDataSource, loopBegInFrames, loopEndInFrames); if (result != MA_SUCCESS) { return result; // Failed to set the loop point. @@ -1738,7 +1738,7 @@ The loop point is relative to the current range. It's sometimes useful to chain data sources together so that a seamless transition can be achieved. To do this, you can use chaining: -+ma_decoder decoder1; ma_decoder decoder2; @@ -1765,7 +1765,7 @@ gaps. Note that when looping is enabled, only the current data source will be looped. You can loop the entire chain by linking in a loop like so: -+ma_data_source_set_next(&decoder1, &decoder2); // decoder1 -> decoder2 ma_data_source_set_next(&decoder2, &decoder1); // decoder2 -> decoder1 (loop back to the start).@@ -1785,7 +1785,7 @@ Instead, initialize multiple data sources for each instance. You can implement a custom data source by implementing the functions in ma_data_source_vtable. Your custom object must have ma_data_source_base as it's first member:
-+struct my_data_source { ma_data_source_base base; @@ -1796,7 +1796,7 @@ Your custom object must have ma_data_source In your initialization routine, you need to call ma_data_source_init() in order to set up the base object (ma_data_source_base): -+static ma_result my_data_source_read(ma_data_source* pDataSource, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead) { // Read data here. Output in the same format returned by my_data_source_get_data_format(). @@ -1885,7 +1885,7 @@ configured via the engine config. The most basic way to initialize the engine is with a default config, like so: -+ma_result result; ma_engine engine; @@ -1899,7 +1899,7 @@ This will result in the engine initializing a playback device using the operatin device. This will be sufficient for many use cases, but if you need more flexibility you'll want to configure the engine with an engine config: -+ma_result result; ma_engine engine; ma_engine_config engineConfig; @@ -1917,7 +1917,7 @@ In the example above we're passing in a pre-initialized device. Since the ca control of the device's data callback, it's their responsibility to manually call ma_engine_read_pcm_frames() from inside their data callback: -+void playback_data_callback(ma_device* pDevice, void* pOutput, const void* pInput, ma_uint32 frameCount) { ma_engine_read_pcm_frames(&g_Engine, pOutput, frameCount, NULL); @@ -1926,7 +1926,7 @@ control of the device's data callback, it's their responsibility to manu You can also use the engine independent of a device entirely: -+ma_result result; ma_engine engine; ma_engine_config engineConfig; @@ -1953,7 +1953,7 @@ processing or want to use a different audio system for playback such as SDL. When a sound is loaded it goes through a resource manager. By default the engine will initialize a resource manager internally, but you can also specify a pre-initialized resource manager: -+ma_result result; ma_engine engine1; ma_engine engine2; @@ -1979,7 +1979,7 @@ is using their own set of headphones. By default an engine will be in a started state. To make it so the engine is not automatically started you can configure it as such: -+engineConfig.noAutoStart = MA_TRUE; // The engine will need to be started manually. @@ -2004,7 +2004,7 @@ prefer decibel based volume control, use ma When a sound is spatialized, it is done so relative to a listener. An engine can be configured to have multiple listeners which can be configured via the config: -+engineConfig.listenerCount = 2;@@ -2015,14 +2015,14 @@ and velocity (for doppler effect). A listener is referenced by an index, the mea to the caller (the index is 0 based and cannot go beyond the listener count, minus 1). The position, direction and velocity are all specified in absolute terms:
-+ma_engine_listener_set_position(&engine, listenerIndex, worldPosX, worldPosY, worldPosZ);The direction of the listener represents it's forward vector. The listener's up vector can also be specified and defaults to +1 on the Y axis.
-+ma_engine_listener_set_direction(&engine, listenerIndex, forwardX, forwardY, forwardZ); ma_engine_listener_set_world_up(&engine, listenerIndex, 0, 1, 0);@@ -2031,7 +2031,7 @@ The engine supports directional attenuation. The listener can have a cone the co attenuated based on the listener's direction. When a sound is between the inner and outer cones, it will be attenuated between 1 and the cone's outer gain:
-+ma_engine_listener_set_cone(&engine, listenerIndex, innerAngleInRadians, outerAngleInRadians, outerGain);@@ -2049,7 +2049,7 @@ positive Y points up and negative Z points forward. The simplest and least flexible way to play a sound is like so:
-+ma_engine_play_sound(&engine, "my_sound.wav", pGroup);@@ -2057,7 +2057,7 @@ This is a "fire and forget" style of function. The engine will manage internally. When the sound finishes playing, it'll be put up for recycling. For more flexibility you'll want to initialize a sound object:
-+ma_sound sound; result = ma_sound_init_from_file(&engine, "my_sound.wav", flags, pGroup, NULL, &sound); @@ -2074,7 +2074,7 @@ The example above loads a sound from a file. If the resource manager has been di be able to use this function and instead you'll need to initialize a sound directly from a data source: -+ma_sound sound; result = ma_sound_init_from_data_source(&engine, &dataSource, flags, pGroup, &sound); @@ -2091,7 +2091,7 @@ sound multiple times at the same time, you need to initialize a separate ma_sound_init_ex(). This uses miniaudio's standard config/init pattern: -+ma_sound sound; ma_sound_config soundConfig; @@ -2123,7 +2123,7 @@ allocate a block of memory and then load the file directly into it. When reading will be decoded dynamically on the fly. In order to save processing time on the audio thread, it might be beneficial to pre-decode the sound. You can do this with the MA_SOUND_FLAG_DECODE flag: -+ma_sound_init_from_file(&engine, "my_sound.wav", MA_SOUND_FLAG_DECODE, pGroup, NULL, &sound);@@ -2131,7 +2131,7 @@ By default, sounds will be loaded synchronously, meaning MA_SOUND_FLAG_ASYNC flag:
-+ma_sound_init_from_file(&engine, "my_sound.wav", MA_SOUND_FLAG_DECODE | MA_SOUND_FLAG_ASYNC, pGroup, NULL, &sound);@@ -2146,7 +2146,7 @@ If you need to wait for an asynchronously loaded sound to be fully loaded, you c fence in miniaudio is a simple synchronization mechanism which simply blocks until it's internal counter hit's zero. You can specify a fence like so:
-+ma_result result; ma_fence fence; ma_sound sounds[4]; @@ -2170,7 +2170,7 @@ ma_fence_wait(&fence); If loading the entire sound into memory is prohibitive, you can also configure the engine to stream the audio data: -