From 27acc82695f1b64ec56ae96bdf6fd0cf371b1222 Mon Sep 17 00:00:00 2001 From: David Reid Date: Mon, 8 Sep 2025 12:05:49 +1000 Subject: [PATCH] Remove vertical scroll bars from code sections. --- docs/examples/custom_backend.html | 2 +- docs/examples/custom_decoder.html | 2 +- docs/examples/custom_decoder_engine.html | 2 +- docs/examples/data_source_chaining.html | 4 +- docs/examples/duplex_effect.html | 2 +- docs/examples/engine_advanced.html | 2 +- docs/examples/engine_effects.html | 2 +- docs/examples/engine_hello_world.html | 2 +- docs/examples/engine_sdl.html | 2 +- docs/examples/engine_steamaudio.html | 2 +- docs/examples/hilo_interop.html | 2 +- docs/examples/node_graph.html | 4 +- docs/examples/resource_manager.html | 2 +- docs/examples/resource_manager_advanced.html | 2 +- docs/examples/simple_capture.html | 2 +- docs/examples/simple_duplex.html | 2 +- docs/examples/simple_enumeration.html | 2 +- docs/examples/simple_loopback.html | 2 +- docs/examples/simple_looping.html | 2 +- docs/examples/simple_mixing.html | 4 +- docs/examples/simple_playback.html | 2 +- docs/examples/simple_playback_sine.html | 2 +- docs/examples/simple_spatialization.html | 2 +- docs/manual/index.html | 256 +++++++++---------- 24 files changed, 154 insertions(+), 154 deletions(-) diff --git a/docs/examples/custom_backend.html b/docs/examples/custom_backend.html index 2637042a..582a1352 100644 --- a/docs/examples/custom_backend.html +++ b/docs/examples/custom_backend.html @@ -281,7 +281,7 @@ remaining callbacks if it is successfully initialized. Custom backends are identified with the ma_backend_custom backend type. For the purpose of demonstration, this example only uses the ma_backend_custom backend type because otherwise the built-in backends would always get chosen first and none of the code for the custom backends would actually get hit. By default, the ma_backend_custom backend is the lowest priority backend, except for ma_backend_null.

-
+
 #include "../miniaudio.c"
 
 #ifdef __EMSCRIPTEN__
diff --git a/docs/examples/custom_decoder.html b/docs/examples/custom_decoder.html
index 3a553d1e..45a8989e 100644
--- a/docs/examples/custom_decoder.html
+++ b/docs/examples/custom_decoder.html
@@ -274,7 +274,7 @@ The custom decoding data sources (ma_libvor
 the decoder via the decoder config (ma_decoder_config). You need to implement a vtable for each
 of your custom decoders. See ma_decoding_backend_vtable for the functions you need to implement.
 The onInitFile, onInitFileW and onInitMemory functions are optional.

-
+
 #include "../miniaudio.c"
 #include "../extras/decoders/libvorbis/miniaudio_libvorbis.c"
 #include "../extras/decoders/libopus/miniaudio_libopus.c"
diff --git a/docs/examples/custom_decoder_engine.html b/docs/examples/custom_decoder_engine.html
index 66f747dc..5df08ab1 100644
--- a/docs/examples/custom_decoder_engine.html
+++ b/docs/examples/custom_decoder_engine.html
@@ -257,7 +257,7 @@ Demonstrates how to implement a custom decoder and use it with the high level AP
 This is the same as the custom_decoder example, only it's used with the high level engine API
 rather than the low level decoding API. You can use this to add support for Opus to your games, for
 example (via libopus).

-
+
 #include "../miniaudio.c"
 #include "../extras/decoders/libvorbis/miniaudio_libvorbis.c"
 #include "../extras/decoders/libopus/miniaudio_libopus.c"
diff --git a/docs/examples/data_source_chaining.html b/docs/examples/data_source_chaining.html
index 68ffa3dd..55950bea 100644
--- a/docs/examples/data_source_chaining.html
+++ b/docs/examples/data_source_chaining.html
@@ -283,7 +283,7 @@ consistently read from the head data source this state will become inconsistent
 work correctly. When using a chain, this pointer needs to be reset if you need to play the
 chain again from the start:
 

-
+
 ma_data_source_set_current(&headDataSource, &headDataSource);
 ma_data_source_seek_to_pcm_frame(&headDataSource, 0);
 

@@ -292,7 +292,7 @@ The code above is setting the "current" data source in the chain to th starting the chain from the start again. It is also seeking the head data source back to the start so that playback starts from the start as expected. You do not need to seek non-head items back to the start as miniaudio will do that for you internally.

-
+
 #include "../miniaudio.c"
 
 #include <stdio.h>
diff --git a/docs/examples/duplex_effect.html b/docs/examples/duplex_effect.html
index e6d44e4e..c319ee24 100644
--- a/docs/examples/duplex_effect.html
+++ b/docs/examples/duplex_effect.html
@@ -258,7 +258,7 @@ This example applies a vocoder effect to the input stream before outputting it.
 called ma_vocoder_node is used to achieve the effect which can be found in the extras folder in
 the miniaudio repository. The vocoder node uses https://github.com/blastbay/voclib to achieve the
 effect.

-
+
 #include "../miniaudio.c"
 #include "../extras/nodes/ma_vocoder_node/ma_vocoder_node.c"
 
diff --git a/docs/examples/engine_advanced.html b/docs/examples/engine_advanced.html
index 2c7bed74..5344b46a 100644
--- a/docs/examples/engine_advanced.html
+++ b/docs/examples/engine_advanced.html
@@ -275,7 +275,7 @@ This example will play the sound that's passed in on the command line.
 Using a shared resource manager, as we do in this example, is useful for when you want to user
 multiple engines so that you can output to multiple playback devices simultaneoulys. An example
 might be a local co-op multiplayer game where each player has their own headphones.

-
+
 #include "../miniaudio.c"
 
 #define MAX_DEVICES 2
diff --git a/docs/examples/engine_effects.html b/docs/examples/engine_effects.html
index ed76e689..c9c2347f 100644
--- a/docs/examples/engine_effects.html
+++ b/docs/examples/engine_effects.html
@@ -269,7 +269,7 @@ sound's output into the effect's input. See the Node Graph example for h
 This example is playing only a single sound at a time which means only a single ma_sound object
 it being used. If you want to play multiple sounds at the same time, even if they're for the same
 sound file, you need multiple ma_sound objects.

-
+
 #include "../miniaudio.c"
 
 #define DELAY_IN_SECONDS    0.2f
diff --git a/docs/examples/engine_hello_world.html b/docs/examples/engine_hello_world.html
index bc44ea34..b7b645d8 100644
--- a/docs/examples/engine_hello_world.html
+++ b/docs/examples/engine_hello_world.html
@@ -255,7 +255,7 @@ This example demonstrates how to initialize an audio engine and play a sound.
 

This will play the sound specified on the command line.

-
+
 #include "../miniaudio.c"
 
 #include <stdio.h>
diff --git a/docs/examples/engine_sdl.html b/docs/examples/engine_sdl.html
index 1c9f4cef..e8fcf62f 100644
--- a/docs/examples/engine_sdl.html
+++ b/docs/examples/engine_sdl.html
@@ -262,7 +262,7 @@ audio output instead of miniaudio.
 
 This example will load the sound specified on the command line and rotate it around the listener's
 head.

-
+
 #define MA_NO_DEVICE_IO /* <-- Disables the ma_device API. We don't need that in this example since SDL will be doing that part for us. */
 #include "../miniaudio.c"
 
diff --git a/docs/examples/engine_steamaudio.html b/docs/examples/engine_steamaudio.html
index e7ba9f0d..d23380ff 100644
--- a/docs/examples/engine_steamaudio.html
+++ b/docs/examples/engine_steamaudio.html
@@ -270,7 +270,7 @@ consistent, you must set the period size in the engine config to be consistent w
 you specify in your IPLAudioSettings object. If for some reason you want the period size of the
 engine to be different to that of your Steam Audio configuration, you'll need to implement a sort
 of buffering solution to your node.

-
+
 #include "../miniaudio.c"
 
 #include <stdint.h> /* Required for uint32_t which is used by STEAMAUDIO_VERSION, and a random use of uint8_t. If there's a Steam Audio maintainer reading this, that needs to be fixed to use IPLuint32 and IPLuint8. */
diff --git a/docs/examples/hilo_interop.html b/docs/examples/hilo_interop.html
index d680c052..2c6134a8 100644
--- a/docs/examples/hilo_interop.html
+++ b/docs/examples/hilo_interop.html
@@ -271,7 +271,7 @@ A more robust example would probably not want to use a ring buffer directly as t
 Instead you would probably want to do a custom data source that handles underruns and overruns of
 the ring buffer and deals with desyncs between capture and playback. In the future this example
 may be updated to make use of a more advanced data source that handles all of this.

-
+
 #include "../miniaudio.c"
 
 static ma_pcm_rb rb;
diff --git a/docs/examples/node_graph.html b/docs/examples/node_graph.html
index 111d4829..6cd4e79e 100644
--- a/docs/examples/node_graph.html
+++ b/docs/examples/node_graph.html
@@ -280,7 +280,7 @@ node graph.
 
 This example will be using the following node graph set up:
 

-
+
 >>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Data flows left to right >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
 
 +---------------+                              +-----------------+
@@ -317,7 +317,7 @@ pass and echo effects so that one of them becomes more obvious than the other.
 

When you want to read from the graph, you simply call ma_node_graph_read_pcm_frames().

-
+
 #include "../miniaudio.c"
 
 /* Data Format */
diff --git a/docs/examples/resource_manager.html b/docs/examples/resource_manager.html
index b4f1a499..d67d1df4 100644
--- a/docs/examples/resource_manager.html
+++ b/docs/examples/resource_manager.html
@@ -287,7 +287,7 @@ for processing jobs. That is more advanced, and beyond the scope of this example
 When you initialize a resource manager you can specify the sample format, channels and sample rate to use when reading
 data from the data source. This means the resource manager will ensure all sounds will have a standard format. When not
 set, each sound will have their own formats and you'll need to do the necessary data conversion yourself.

-
+
 #define MA_NO_ENGINE        /* We're intentionally not using the ma_engine API here. */
 #include "../miniaudio.c"
 
diff --git a/docs/examples/resource_manager_advanced.html b/docs/examples/resource_manager_advanced.html
index 32c91fbf..1a788f13 100644
--- a/docs/examples/resource_manager_advanced.html
+++ b/docs/examples/resource_manager_advanced.html
@@ -272,7 +272,7 @@ threads. You can also implement your own custom job threads which this example a
 
 In this example we show how you can create a data source, mix them with other data sources, configure the number of job
 threads to manage internally and how to implement your own custom job thread.

-
+
 #define MA_NO_ENGINE        /* We're intentionally not using the ma_engine API here. */
 #include "../miniaudio.c"
 
diff --git a/docs/examples/simple_capture.html b/docs/examples/simple_capture.html
index 25a6792e..ddc29575 100644
--- a/docs/examples/simple_capture.html
+++ b/docs/examples/simple_capture.html
@@ -262,7 +262,7 @@ specified on the command line.
 Capturing works in a very similar way to playback. The only difference is the direction of data movement. Instead of
 the application sending data to the device, the device will send data to the application. This example just writes the
 data received by the microphone straight to a WAV file.

-
+
 #include "../miniaudio.c"
 
 #include <stdlib.h>
diff --git a/docs/examples/simple_duplex.html b/docs/examples/simple_duplex.html
index 60e172e1..897b2e20 100644
--- a/docs/examples/simple_duplex.html
+++ b/docs/examples/simple_duplex.html
@@ -264,7 +264,7 @@ Note that the microphone and playback device must run in lockstep. Any kind of t
 glitching which the backend may not be able to recover from. For this reason, miniaudio forces you to use the same
 sample rate for both capture and playback. If internally the native sample rates differ, miniaudio will perform the
 sample rate conversion for you automatically.

-
+
 #include "../miniaudio.c"
 
 #include <stdio.h>
diff --git a/docs/examples/simple_enumeration.html b/docs/examples/simple_enumeration.html
index 54035fb4..e53f30fa 100644
--- a/docs/examples/simple_enumeration.html
+++ b/docs/examples/simple_enumeration.html
@@ -261,7 +261,7 @@ context sits above a device. You can have many devices to one context.
 
 If you use device enumeration, you should explicitly specify the same context you used for enumeration in the call to
 ma_device_init() when you initialize your devices.

-
+
 #include "../miniaudio.c"
 
 #include <stdio.h>
diff --git a/docs/examples/simple_loopback.html b/docs/examples/simple_loopback.html
index c31c6f3e..d76c83f2 100644
--- a/docs/examples/simple_loopback.html
+++ b/docs/examples/simple_loopback.html
@@ -266,7 +266,7 @@ used indirectly with PulseAudio by choosing the appropriate loopback device afte
 
 To use loopback mode you just need to set the device type to ma_device_type_loopback and set the capture device config
 properties. The output buffer in the callback will be null whereas the input buffer will be valid.

-
+
 #include "../miniaudio.c"
 
 #include <stdlib.h>
diff --git a/docs/examples/simple_looping.html b/docs/examples/simple_looping.html
index 6f3ebfe1..d0f55f1f 100644
--- a/docs/examples/simple_looping.html
+++ b/docs/examples/simple_looping.html
@@ -257,7 +257,7 @@ Shows one way to handle looping of a sound.
 This example uses a decoder as the data source. Decoders can be used with the ma_data_source API which, conveniently,
 supports looping via the ma_data_source_read_pcm_frames() API. To use it, all you need to do is pass a pointer to the
 decoder straight into ma_data_source_read_pcm_frames() and it will just work.

-
+
 #include "../miniaudio.c"
 
 #include <stdio.h>
diff --git a/docs/examples/simple_mixing.html b/docs/examples/simple_mixing.html
index d988fb20..c838e4f6 100644
--- a/docs/examples/simple_mixing.html
+++ b/docs/examples/simple_mixing.html
@@ -259,10 +259,10 @@ device and then mix your sounds together which you can do by simply summing thei
 do this is to use floating point samples and use miniaudio's built-in clipper to handling clipping for you. (Clipping
 is when sample are clamped to their minimum and maximum range, which for floating point is -1..1.)
 

-
+
 Usage:   simple_mixing [input file 0] [input file 1] ... [input file n]
 Example: simple_mixing file1.wav file2.flac
-
+
 #include "../miniaudio.c"
 
 #include <stdio.h>
diff --git a/docs/examples/simple_playback.html b/docs/examples/simple_playback.html
index 59005809..3c267345 100644
--- a/docs/examples/simple_playback.html
+++ b/docs/examples/simple_playback.html
@@ -264,7 +264,7 @@ This example uses the ma_decoder API
 device and can be used independently of it. This example only plays back a single sound file, but it's possible to play
 back multiple files by simple loading multiple decoders and mixing them (do not create multiple devices to do this). See
 the simple_mixing example for how best to do this.

-
+
 #include "../miniaudio.c"
 
 #include <stdio.h>
diff --git a/docs/examples/simple_playback_sine.html b/docs/examples/simple_playback_sine.html
index db8641fd..7862e438 100644
--- a/docs/examples/simple_playback_sine.html
+++ b/docs/examples/simple_playback_sine.html
@@ -270,7 +270,7 @@ the ma_waveform_read_pcm_frames() AP
 

This example works with Emscripten.

-
+
 #define MA_NO_DECODING
 #define MA_NO_ENCODING
 #include "../miniaudio.c"
diff --git a/docs/examples/simple_spatialization.html b/docs/examples/simple_spatialization.html
index 714dd4ec..c297128e 100644
--- a/docs/examples/simple_spatialization.html
+++ b/docs/examples/simple_spatialization.html
@@ -272,7 +272,7 @@ information on the available features.
 To use this example, pass in the path of a sound as the first argument. The sound will be
 positioned in front of the listener, while the listener rotates on the the spot to create an
 orbiting effect. Terminate the program with Ctrl+C.

-
+
 #include "../miniaudio.c"
 
 #include <stdio.h>
diff --git a/docs/manual/index.html b/docs/manual/index.html
index be40c986..41fbe4d6 100644
--- a/docs/manual/index.html
+++ b/docs/manual/index.html
@@ -307,7 +307,7 @@ device you need to allocate memory for the device object beforehand. This gives
 complete control over how the memory is allocated. In the example below we initialize a playback
 device on the stack, but you could allocate it on the heap if that suits your situation better.
 

-
+
 void data_callback(ma_device* pDevice, void* pOutput, const void* pInput, ma_uint32 frameCount)
 {
     // In playback mode copy data to pOutput. In capture mode read data from pInput. In full-duplex mode, both
@@ -460,7 +460,7 @@ result in a deadlock. Instead you set a variable or signal an event indicating t
 needs to stop and handle it in a different thread. The following APIs must never be called inside
 the callback:
 

-
+
 ma_device_init()
 ma_device_init_ex()
 ma_device_uninit()
@@ -479,7 +479,7 @@ The example above demonstrates the initialization of a playback device, but it w
 same for capture. All you need to do is change the device type from ma_device_type_playback to
 ma_device_type_capture when setting up the config, like so:
 

-
+
 ma_device_config config = ma_device_config_init(ma_device_type_capture);
 config.capture.format   = MY_FORMAT;
 config.capture.channels = MY_CHANNEL_COUNT;
@@ -548,7 +548,7 @@ The example above did not specify a physical device to connect to which means it
 operating system's default device. If you have multiple physical devices connected and you want to
 use a specific one you will need to specify the device ID in the configuration, like so:
 

-
+
 config.playback.pDeviceID = pMyPlaybackDeviceID;    // Only if requesting a playback or duplex device.
 config.capture.pDeviceID = pMyCaptureDeviceID;      // Only if requesting a capture, duplex or loopback device.
 

@@ -560,7 +560,7 @@ more global level and to perform operations outside the scope of an individual d used for performing run-time linking against backend libraries, initializing backends and enumerating devices. The example below shows how to enumerate devices.

-
+
 ma_context context;
 if (ma_context_init(NULL, 0, NULL, &context) != MA_SUCCESS) {
     // Error.
@@ -668,7 +668,7 @@ this manual.
 
 The code below shows how you can initialize an engine using it's default configuration.
 

-
+
 ma_result result;
 ma_engine engine;
 
@@ -693,7 +693,7 @@ this will result in the struct being invalidated once the function encapsulating
 allocating the engine on the heap is more appropriate, you can easily do so with a standard call
 to malloc() or whatever heap allocation routine you like:
 

-
+
 ma_engine* pEngine = malloc(sizeof(*pEngine));
 

@@ -701,7 +701,7 @@ The ma_engine API uses the same conf an engine, you can fill out a ma_engine_config object and pass it into the first parameter of ma_engine_init():

-
+
 ma_result result;
 ma_engine engine;
 ma_engine_config engineConfig;
@@ -728,7 +728,7 @@ The engine must be uninitialized with ma_en
 By default the engine will be started, but nothing will be playing because no sounds have been
 initialized. The easiest but least flexible way of playing a sound is like so:
 

-
+
 ma_engine_play_sound(&engine, "my_sound.wav", NULL);
 

@@ -738,7 +738,7 @@ should be associated with which will be explained later. This particular way of simple, but lacks flexibility and features. A more flexible way of playing a sound is to first initialize a sound:

-
+
 ma_result result;
 ma_sound sound;
 
@@ -766,7 +766,7 @@ Sounds are not started by default. Start a sound with 
+
 ma_sound_set_start_time_in_pcm_frames()
 ma_sound_set_start_time_in_milliseconds()
 ma_sound_set_stop_time_in_pcm_frames()
@@ -779,7 +779,7 @@ engine. The current global time in PCM frames can be retrieved with
 ma_engine_set_time_in_pcm_frames() for synchronization purposes if required. Note that scheduling
 a start time still requires an explicit call to ma_sound_start() before anything will play:
 

-
+
 ma_sound_set_start_time_in_pcm_frames(&sound, ma_engine_get_time_in_pcm_frames(&engine) + (ma_engine_get_sample_rate(&engine) * 2);
 ma_sound_start(&sound);
 

@@ -897,7 +897,7 @@ AudioToolbox, try with -framework AudioUnit of iOS. Alternatively, if you would rather keep using runtime linking you can add the following to your entitlements.xcent file:

-
+
 <key>com.apple.security.cs.allow-dyld-environment-variables</key>
 <true/>
 <key>com.apple.security.cs.allow-unsigned-executable-memory</key>
@@ -1311,7 +1311,7 @@ MA_NO_RESOURCE_MANAGER
 Disables the resource manager. When using the engine this will
 also disable the following functions:
 

-
+
 ma_sound_init_from_file()
 ma_sound_init_from_file_w()
 ma_sound_init_copy()
@@ -1353,7 +1353,7 @@ miniaudio for data conversion, decoding and/or encoding. Some
 families of APIs require threading which means the following
 options must also be set:
 

-
+
 MA_NO_DEVICE_IO
 
@@ -1608,7 +1608,7 @@ implements the data source interface can be plugged into any
+
 ma_result result;
 ma_uint64 framesRead;
 
@@ -1629,7 +1629,7 @@ When calling any data source function, with the exception of ma_data_source_uninit(), you can pass in any object that implements a data source. For example,
 you could plug in a decoder like so:
 

-
+
 ma_result result;
 ma_uint64 framesRead;
 ma_decoder decoder;   // <-- This would be initialized with ma_decoder_init_*().
@@ -1647,7 +1647,7 @@ can use ma_data_source_seek_pcm_frames()
-
+
 result = ma_data_source_seek_to_pcm_frame(pDataSource, frameIndex);
 if (result != MA_SUCCESS) {
     return result;  // Failed to seek to PCM frame.
@@ -1658,7 +1658,7 @@ You can retrieve the total length of a data source in PCM frames, but note that
 may not have the notion of a length, such as noise and waveforms, and others may just not have a
 way of determining the length such as some decoders. To retrieve the length:
 

-
+
 ma_uint64 length;
 
 result = ma_data_source_get_length_in_pcm_frames(pDataSource, &length);
@@ -1675,7 +1675,7 @@ broadcast. If you do this, ma_data_source_g
 
 The current position of the cursor in PCM frames can also be retrieved:
 

-
+
 ma_uint64 cursor;
 
 result = ma_data_source_get_cursor_in_pcm_frames(pDataSource, &cursor);
@@ -1687,7 +1687,7 @@ result = ma_data_source_get_cursor_in_pcm_frames(pDataSource, &cursor);
 You will often need to know the data format that will be returned after reading. This can be
 retrieved like so:
 

-
+
 ma_format format;
 ma_uint32 channels;
 ma_uint32 sampleRate;
@@ -1706,7 +1706,7 @@ If you do not need a specific data format property, just pass in NULL to the res
 There may be cases where you want to implement something like a sound bank where you only want to
 read data within a certain range of the underlying data. To do this you can use a range:
 

-
+
 result = ma_data_source_set_range_in_pcm_frames(pDataSource, rangeBegInFrames, rangeEndInFrames);
 if (result != MA_SUCCESS) {
     return result;  // Failed to set the range.
@@ -1724,7 +1724,7 @@ Custom loop points can also be used with data sources. By default, data sources
 they reach the end of the data source, but if you need to loop at a specific location, you can do
 the following:
 

-
+
 result = ma_data_set_loop_point_in_pcm_frames(pDataSource, loopBegInFrames, loopEndInFrames);
 if (result != MA_SUCCESS) {
     return result;  // Failed to set the loop point.
@@ -1738,7 +1738,7 @@ The loop point is relative to the current range.
 It's sometimes useful to chain data sources together so that a seamless transition can be achieved.
 To do this, you can use chaining:
 

-
+
 ma_decoder decoder1;
 ma_decoder decoder2;
 
@@ -1765,7 +1765,7 @@ gaps.
 Note that when looping is enabled, only the current data source will be looped. You can loop the
 entire chain by linking in a loop like so:
 

-
+
 ma_data_source_set_next(&decoder1, &decoder2);  // decoder1 -> decoder2
 ma_data_source_set_next(&decoder2, &decoder1);  // decoder2 -> decoder1 (loop back to the start).
 

@@ -1785,7 +1785,7 @@ Instead, initialize multiple data sources for each instance. You can implement a custom data source by implementing the functions in ma_data_source_vtable. Your custom object must have ma_data_source_base as it's first member:

-
+
 struct my_data_source
 {
     ma_data_source_base base;
@@ -1796,7 +1796,7 @@ Your custom object must have ma_data_source
 In your initialization routine, you need to call ma_data_source_init() in order to set up the
 base object (ma_data_source_base):
 

-
+
 static ma_result my_data_source_read(ma_data_source* pDataSource, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead)
 {
     // Read data here. Output in the same format returned by my_data_source_get_data_format().
@@ -1885,7 +1885,7 @@ configured via the engine config.
 
 The most basic way to initialize the engine is with a default config, like so:
 

-
+
 ma_result result;
 ma_engine engine;
 
@@ -1899,7 +1899,7 @@ This will result in the engine initializing a playback device using the operatin
 device. This will be sufficient for many use cases, but if you need more flexibility you'll want to
 configure the engine with an engine config:
 

-
+
 ma_result result;
 ma_engine engine;
 ma_engine_config engineConfig;
@@ -1917,7 +1917,7 @@ In the example above we're passing in a pre-initialized device. Since the ca
 control of the device's data callback, it's their responsibility to manually call
 ma_engine_read_pcm_frames() from inside their data callback:
 

-
+
 void playback_data_callback(ma_device* pDevice, void* pOutput, const void* pInput, ma_uint32 frameCount)
 {
     ma_engine_read_pcm_frames(&g_Engine, pOutput, frameCount, NULL);
@@ -1926,7 +1926,7 @@ control of the device's data callback, it's their responsibility to manu
 
 You can also use the engine independent of a device entirely:
 

-
+
 ma_result result;
 ma_engine engine;
 ma_engine_config engineConfig;
@@ -1953,7 +1953,7 @@ processing or want to use a different audio system for playback such as SDL.
 When a sound is loaded it goes through a resource manager. By default the engine will initialize a
 resource manager internally, but you can also specify a pre-initialized resource manager:
 

-
+
 ma_result result;
 ma_engine engine1;
 ma_engine engine2;
@@ -1979,7 +1979,7 @@ is using their own set of headphones.
 By default an engine will be in a started state. To make it so the engine is not automatically
 started you can configure it as such:
 

-
+
 engineConfig.noAutoStart = MA_TRUE;
 
 // The engine will need to be started manually.
@@ -2004,7 +2004,7 @@ prefer decibel based volume control, use ma
 When a sound is spatialized, it is done so relative to a listener. An engine can be configured to
 have multiple listeners which can be configured via the config:
 

-
+
 engineConfig.listenerCount = 2;
 

@@ -2015,14 +2015,14 @@ and velocity (for doppler effect). A listener is referenced by an index, the mea to the caller (the index is 0 based and cannot go beyond the listener count, minus 1). The position, direction and velocity are all specified in absolute terms:

-
+
 ma_engine_listener_set_position(&engine, listenerIndex, worldPosX, worldPosY, worldPosZ);
 

The direction of the listener represents it's forward vector. The listener's up vector can also be specified and defaults to +1 on the Y axis.

-
+
 ma_engine_listener_set_direction(&engine, listenerIndex, forwardX, forwardY, forwardZ);
 ma_engine_listener_set_world_up(&engine, listenerIndex, 0, 1, 0);
 

@@ -2031,7 +2031,7 @@ The engine supports directional attenuation. The listener can have a cone the co attenuated based on the listener's direction. When a sound is between the inner and outer cones, it will be attenuated between 1 and the cone's outer gain:

-
+
 ma_engine_listener_set_cone(&engine, listenerIndex, innerAngleInRadians, outerAngleInRadians, outerGain);
 

@@ -2049,7 +2049,7 @@ positive Y points up and negative Z points forward. The simplest and least flexible way to play a sound is like so:

-
+
 ma_engine_play_sound(&engine, "my_sound.wav", pGroup);
 

@@ -2057,7 +2057,7 @@ This is a "fire and forget" style of function. The engine will manage internally. When the sound finishes playing, it'll be put up for recycling. For more flexibility you'll want to initialize a sound object:

-
+
 ma_sound sound;
 
 result = ma_sound_init_from_file(&engine, "my_sound.wav", flags, pGroup, NULL, &sound);
@@ -2074,7 +2074,7 @@ The example above loads a sound from a file. If the resource manager has been di
 be able to use this function and instead you'll need to initialize a sound directly from a data
 source:
 

-
+
 ma_sound sound;
 
 result = ma_sound_init_from_data_source(&engine, &dataSource, flags, pGroup, &sound);
@@ -2091,7 +2091,7 @@ sound multiple times at the same time, you need to initialize a separate ma_sound_init_ex(). This uses miniaudio's
 standard config/init pattern:
 

-
+
 ma_sound sound;
 ma_sound_config soundConfig;
 
@@ -2123,7 +2123,7 @@ allocate a block of memory and then load the file directly into it. When reading
 will be decoded dynamically on the fly. In order to save processing time on the audio thread, it
 might be beneficial to pre-decode the sound. You can do this with the MA_SOUND_FLAG_DECODE flag:
 

-
+
 ma_sound_init_from_file(&engine, "my_sound.wav", MA_SOUND_FLAG_DECODE, pGroup, NULL, &sound);
 

@@ -2131,7 +2131,7 @@ By default, sounds will be loaded synchronously, meaning MA_SOUND_FLAG_ASYNC flag:

-
+
 ma_sound_init_from_file(&engine, "my_sound.wav", MA_SOUND_FLAG_DECODE | MA_SOUND_FLAG_ASYNC, pGroup, NULL, &sound);
 

@@ -2146,7 +2146,7 @@ If you need to wait for an asynchronously loaded sound to be fully loaded, you c fence in miniaudio is a simple synchronization mechanism which simply blocks until it's internal counter hit's zero. You can specify a fence like so:

-
+
 ma_result result;
 ma_fence fence;
 ma_sound sounds[4];
@@ -2170,7 +2170,7 @@ ma_fence_wait(&fence);
 If loading the entire sound into memory is prohibitive, you can also configure the engine to stream
 the audio data:
 

-
+
 ma_sound_init_from_file(&engine, "my_sound.wav", MA_SOUND_FLAG_STREAM, pGroup, NULL, &sound);
 

@@ -2225,7 +2225,7 @@ The engine supports 3D spatialization of sounds. By default sounds will have spa enabled, but if a sound does not need to be spatialized it's best to disable it. There are two ways to disable spatialization of a sound:

-
+
 // Disable spatialization at initialization time via a flag:
 ma_sound_init_from_file(&engine, "my_sound.wav", MA_SOUND_FLAG_NO_SPATIALIZATION, NULL, NULL, &sound);
 
@@ -2236,40 +2236,40 @@ ma_sound_set_spatialization_enabled(&sound, isSpatializationEnabled);
 By default sounds will be spatialized based on the closest listener. If a sound should always be
 spatialized relative to a specific listener it can be pinned to one:
 

-
+
 ma_sound_set_pinned_listener_index(&sound, listenerIndex);
 

Like listeners, sounds have a position. By default, the position of a sound is in absolute space, but it can be changed to be relative to a listener:

-
+
 ma_sound_set_positioning(&sound, ma_positioning_relative);
 

Note that relative positioning of a sound only makes sense if there is either only one listener, or the sound is pinned to a specific listener. To set the position of a sound:

-
+
 ma_sound_set_position(&sound, posX, posY, posZ);
 

The direction works the same way as a listener and represents the sound's forward direction:

-
+
 ma_sound_set_direction(&sound, forwardX, forwardY, forwardZ);
 

Sound's also have a cone for controlling directional attenuation. This works exactly the same as listeners:

-
+
 ma_sound_set_cone(&sound, innerAngleInRadians, outerAngleInRadians, outerGain);
 

The velocity of a sound is used for doppler effect and can be set as such:

-
+
 ma_sound_set_velocity(&sound, velocityX, velocityY, velocityZ);
 

@@ -2277,7 +2277,7 @@ The engine supports different attenuation models which can be configured on a pe default the attenuation model is set to ma_attenuation_model_inverse which is the equivalent to OpenAL's AL_INVERSE_DISTANCE_CLAMPED. Configure the attenuation model like so:

-
+
 ma_sound_set_attenuation_model(&sound, ma_attenuation_model_inverse);
 

@@ -2319,13 +2319,13 @@ Exponential attenuation.

To control how quickly a sound rolls off as it moves away from the listener, you need to configure the rolloff:

-
+
 ma_sound_set_rolloff(&sound, rolloff);
 

You can control the minimum and maximum gain to apply from spatialization:

-
+
 ma_sound_set_min_gain(&sound, minGain);
 ma_sound_set_max_gain(&sound, maxGain);
 

@@ -2335,7 +2335,7 @@ the attenuation calculation. This is useful if you want to ensure sounds don' volume after the listener moves further away and to have sounds play a maximum volume when the listener is within a certain distance:

-
+
 ma_sound_set_min_distance(&sound, minDistance);
 ma_sound_set_max_distance(&sound, maxDistance);
 

@@ -2343,7 +2343,7 @@ ma_sound_set_max_distance(&sound, maxDistance); The engine's spatialization system supports doppler effect. The doppler factor can be configure on a per-sound basis like so:

-
+
 ma_sound_set_doppler_factor(&sound, dopplerFactor);
 

@@ -2351,7 +2351,7 @@ You can fade sounds in and out with ma_soun ma_sound_set_fade_in_milliseconds(). Set the volume to -1 to use the current volume as the starting volume:

-
+
 // Fade in over 1 second.
 ma_sound_set_fade_in_milliseconds(&sound, 0, 1, 1000);
 
@@ -2364,7 +2364,7 @@ ma_sound_set_fade_in_milliseconds(&sound, -1, 0, 1000);
 By default sounds will start immediately, but sometimes for timing and synchronization purposes it
 can be useful to schedule a sound to start or stop:
 

-
+
 // Start the sound in 1 second from now.
 ma_sound_set_start_time_in_pcm_frames(&sound, ma_engine_get_time_in_pcm_frames(&engine) + (ma_engine_get_sample_rate(&engine) * 1));
 
@@ -2401,14 +2401,14 @@ you cannot be uninitializing sound from the callback. To set the callback you ca
 ma_sound_set_end_callback(). Alternatively, if you're using ma_sound_init_ex(), you can pass it
 into the config like so:
 

-
+
 soundConfig.endCallback = my_end_callback;
 soundConfig.pEndCallbackUserData = pMyEndCallbackUserData;
 

The end callback is declared like so:

-
+
 void my_end_callback(void* pUserData, ma_sound* pSound)
 {
     ...
@@ -2418,7 +2418,7 @@ The end callback is declared like so:
 Internally a sound wraps around a data source. Some APIs exist to control the underlying data
 source, mainly for convenience:
 

-
+
 ma_sound_seek_to_pcm_frame(&sound, frameIndex);
 ma_sound_get_data_format(&sound, &format, &channels, &sampleRate, pChannelMap, channelMapCapacity);
 ma_sound_get_cursor_in_pcm_frames(&sound, &cursor);
@@ -2464,7 +2464,7 @@ the data to be loaded asynchronously.
 
 The example below is how you can initialize a resource manager using it's default configuration:
 

-
+
 ma_resource_manager_config config;
 ma_resource_manager resourceManager;
 
@@ -2483,7 +2483,7 @@ is useful for offloading the cost of data conversion to load time rather than dy
 converting at mixing time. To do this, you configure the decoded format, channels and sample rate
 like the code below:
 

-
+
 config = ma_resource_manager_config_init();
 config.decodedFormat     = device.playback.format;
 config.decodedChannels   = device.playback.channels;
@@ -2503,7 +2503,7 @@ only supports decoders that are built into miniaudio. It's possible to suppo
 formats through the use of custom decoders. To do so, pass in your ma_decoding_backend_vtable
 vtables into the resource manager config:
 

-
+
 ma_decoding_backend_vtable* pCustomBackendVTables[] =
 {
     &g_ma_decoding_backend_vtable_libvorbis,
@@ -2527,7 +2527,7 @@ Asynchronicity is achieved via a job system. When an operation needs to be perfo
 decoding of a page, a job will be posted to a queue which will then be processed by a job thread.
 By default there will be only one job thread running, but this can be configured, like so:
 

-
+
 config = ma_resource_manager_config_init();
 config.jobThreadCount = MY_JOB_THREAD_COUNT;
 

@@ -2539,7 +2539,7 @@ do this, just set the job thread count to 0 and process jobs manually. To proces need to retrieve a job using ma_resource_manager_next_job() and then process it using ma_job_process():

-
+
 config = ma_resource_manager_config_init();
 config.jobThreadCount = 0;                            // Don't manage any job threads internally.
 config.flags = MA_RESOURCE_MANAGER_FLAG_NON_BLOCKING; // Optional. Makes ma_resource_manager_next_job() non-blocking.
@@ -2583,7 +2583,7 @@ When loading a file, it's sometimes convenient to be able to customize how f
 read instead of using standard fopen(), fclose(), etc. which is what miniaudio will use by
 default. This can be done by setting pVFS member of the resource manager's config:
 

-
+
 // Initialize your custom VFS object. See documentation for VFS for information on how to do this.
 my_custom_vfs vfs = my_custom_vfs_init();
 
@@ -2603,7 +2603,7 @@ By default a sound will be loaded synchronously. The returned data source is own
 which means the caller is responsible for the allocation and freeing of the data source. Below is
 an example for initializing a data source:
 

-
+
 ma_resource_manager_data_source dataSource;
 ma_result result = ma_resource_manager_data_source_init(pResourceManager, pFilePath, flags, &dataSource);
 if (result != MA_SUCCESS) {
@@ -2627,7 +2627,7 @@ ma_resource_manager_data_source_uninit(&dataSource);
 The flags parameter specifies how you want to perform loading of the sound file. It can be a
 combination of the following flags:
 

-
+
 MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_STREAM
 MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_DECODE
 MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_ASYNC
@@ -2720,7 +2720,7 @@ sample rate of the file.
 
 The example below shows how you could use a fence when loading a number of sounds:
 

-
+
 // This fence will be released when all sounds are finished loading entirely.
 ma_fence fence;
 ma_fence_init(&fence);
@@ -2744,14 +2744,14 @@ In the example above we used a fence for waiting until the entire file has been
 you only need to wait for the initialization of the internal decoder to complete, you can use the
 init member of the ma_resource_manager_pipeline_notifications object:
 

-
+
 notifications.init.pFence = &fence;
 

If a fence is not appropriate for your situation, you can instead use a callback that is fired on an individual sound basis. This is done in a very similar way to fences:

-
+
 typedef struct
 {
     ma_async_notification_callbacks cb;
@@ -2864,7 +2864,7 @@ need to use more than one job thread. There are plans to remove this lock in a f
 In addition, posting a job will release a semaphore, which on Win32 is implemented with
 ReleaseSemaphore and on POSIX platforms via a condition variable:
 

-
+
 pthread_mutex_lock(&pSemaphore->lock);
 {
     pSemaphore->value += 1;
@@ -2889,7 +2889,7 @@ will be unloaded. This is a detail to keep in mind because it could result in ex
 unloading of a sound. For example, the following sequence will result in a file be loaded twice,
 once after the other:
 

-
+
 ma_resource_manager_data_source_init(pResourceManager, "my_file", ..., &myDataBuffer0); // Refcount = 1. Initial load.
 ma_resource_manager_data_source_uninit(&myDataBuffer0);                                 // Refcount = 0. Unloaded.
 
@@ -3009,7 +3009,7 @@ attached to an input bus of another. Multiple nodes can connect their output bus
 node's input bus, in which case their outputs will be mixed before processing by the node. Below is
 a diagram that illustrates a hypothetical node graph setup:
 

-
+
 >>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Data flows left to right >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
 
 +---------------+                              +-----------------+
@@ -3045,7 +3045,7 @@ container around the entire graph. The ma_n
 issues which will be explained later. A ma_node_graph object is initialized using miniaudio's
 standard config/init system:
 

-
+
 ma_node_graph_config nodeGraphConfig = ma_node_graph_config_init(myChannelCount);
 
 result = ma_node_graph_init(&nodeGraphConfig, NULL, &nodeGraph);    // Second parameter is a pointer to allocation callbacks.
@@ -3061,7 +3061,7 @@ endpoint must be configured such that their output buses have the same channel c
 audio data from the node graph, it'll have the channel count you specified in the config. To read
 data from the graph:
 

-
+
 ma_uint32 framesRead;
 result = ma_node_graph_read_pcm_frames(&nodeGraph, pFramesOut, frameCount, &framesRead);
 if (result != MA_SUCCESS) {
@@ -3086,7 +3086,7 @@ miniaudio includes a few stock nodes for common functionality. This is how you w
 node which reads directly from a data source (ma_data_source_node) which is an example of one
 of the stock nodes that comes with miniaudio:
 

-
+
 ma_data_source_node_config config = ma_data_source_node_config_init(pMyDataSource);
 
 ma_data_source_node dataSourceNode;
@@ -3105,7 +3105,7 @@ returned from ma_data_source_node_init()ma_node_attach_output_bus():
 

-
+
 result = ma_node_attach_output_bus(&dataSourceNode, 0, ma_node_graph_get_endpoint(&nodeGraph), 0);
 if (result != MA_SUCCESS) {
     // Failed to attach node.
@@ -3130,7 +3130,7 @@ your own processing callback to apply a custom effect of some kind. This is simi
 one of the stock node types, only this time you need to specify a pointer to a vtable containing a
 pointer to the processing function and the number of input and output buses. Example:
 

-
+
 static void my_custom_node_process_pcm_frames(ma_node* pNode, const float** ppFramesIn, ma_uint32* pFrameCountIn, float** ppFramesOut, ma_uint32* pFrameCountOut)
 {
     // Do some processing of ppFramesIn (one stream of audio data per input bus)
@@ -3188,7 +3188,7 @@ static space. The number of input and output buses are specified as part of the
 a variable number of buses on a per-node bases, the vtable should have the relevant bus count set
 to MA_NODE_BUS_COUNT_UNKNOWN. In this case, the bus count should be set in the node config:
 

-
+
 static ma_node_vtable my_custom_node_vtable =
 {
     my_custom_node_process_pcm_frames, // The function that will be called process your custom node. This is where you'd implement your effect processing.
@@ -3216,7 +3216,7 @@ set if the vtable specifies MA_NODE_BUS_COUNT_UNKNOWN in the relevant bus count.
 Most often you'll want to create a structure to encapsulate your node with some extra data. You
 need to make sure the ma_node_base object is your first member of the structure:
 

-
+
 typedef struct
 {
     ma_node_base base; // <-- Make sure this is always the first member.
@@ -3337,7 +3337,7 @@ If you need to make a copy of an audio stream for effect processing you can use
 called ma_splitter_node. This takes has 1 input bus and splits the stream into 2 output buses.
 You can use it like this:
 

-
+
 ma_splitter_node_config splitterNodeConfig = ma_splitter_node_config_init(channels);
 
 ma_splitter_node splitterNode;
@@ -3353,7 +3353,7 @@ ma_node_attach_output_bus(&splitterNode, 1, &myEffectNode,
 
 The volume of an output bus can be configured on a per-bus basis:
 

-
+
 ma_node_set_output_bus_volume(&splitterNode, 0, 0.5f);
 ma_node_set_output_bus_volume(&splitterNode, 1, 0.5f);
 

@@ -3365,7 +3365,7 @@ copied streams. You can start and stop a node with the following:

-
+
 ma_node_set_state(&splitterNode, ma_node_state_started);    // The default state.
 ma_node_set_state(&splitterNode, ma_node_state_stopped);
 

@@ -3379,7 +3379,7 @@ atomically. You can configure the initial state of a node in it's config:

-
+
 nodeConfig.initialState = ma_node_state_stopped;
 

@@ -3387,7 +3387,7 @@ Note that for the stock specialized nodes, all of their configs will have a -

+
 dataSourceNodeConfig.nodeConfig.initialState = ma_node_state_stopped;
 

@@ -3425,21 +3425,21 @@ state in a frame-exact manner. Without this mechanism, starting and stopping of to the resolution of a call to ma_node_graph_read_pcm_frames() which would typically be in blocks of several milliseconds. The following APIs can be used for scheduling node states:

-
+
 ma_node_set_state_time()
 ma_node_get_state_time()
 

The time is absolute and must be based on the global clock. An example is below:

-
+
 ma_node_set_state_time(&myNode, ma_node_state_started, sampleRate*1);   // Delay starting to 1 second.
 ma_node_set_state_time(&myNode, ma_node_state_stopped, sampleRate*5);   // Delay stopping to 5 seconds.
 

An example for changing the state using a relative time.

-
+
 ma_node_set_state_time(&myNode, ma_node_state_started, sampleRate*1 + ma_node_graph_get_time(&myNodeGraph));
 ma_node_set_state_time(&myNode, ma_node_state_stopped, sampleRate*5 + ma_node_graph_get_time(&myNodeGraph));
 

@@ -3571,7 +3571,7 @@ FLAC

You can disable the built-in decoders by specifying one or more of the following options before the miniaudio implementation:

-
+
 #define MA_NO_WAV
 #define MA_NO_MP3
 #define MA_NO_FLAC
@@ -3586,7 +3586,7 @@ A decoder can be initialized from a file with ma_decoder_init_memory(), or from data delivered via callbacks with ma_decoder_init(). Here is
 an example for loading a decoder from a file:
 

-
+
 ma_decoder decoder;
 ma_result result = ma_decoder_init_file("MySong.mp3", NULL, &decoder);
 if (result != MA_SUCCESS) {
@@ -3602,7 +3602,7 @@ When initializing a decoder, you can optionally pass in a pointer to a NULL argument in the example above) which allows you to configure the output format, channel
 count, sample rate and channel map:
 

-
+
 ma_decoder_config config = ma_decoder_config_init(ma_format_f32, 2, 48000);
 

@@ -3615,7 +3615,7 @@ Data is read from the decoder as PCM frames. This will output the number of PCM read. If this is less than the requested number of PCM frames it means you've reached the end. The return value will be MA_AT_END if no samples have been read and the end has been reached.

-
+
 ma_result result = ma_decoder_read_pcm_frames(pDecoder, pFrames, framesToRead, &framesRead);
 if (framesRead < framesToRead) {
     // Reached the end.
@@ -3624,7 +3624,7 @@ return value will be MA_AT_END if no
 
 You can also seek to a specific frame like so:
 

-
+
 ma_result result = ma_decoder_seek_to_pcm_frame(pDecoder, targetFrame);
 if (result != MA_SUCCESS) {
     return false;   // An error occurred.
@@ -3633,7 +3633,7 @@ You can also seek to a specific frame like so:
 
 If you want to loop back to the start, you can simply seek back to the first PCM frame:
 

-
+
 ma_decoder_seek_to_pcm_frame(pDecoder, 0);
 

@@ -3642,7 +3642,7 @@ backend. This can be unnecessarily inefficient if the type is already known. In use encodingFormat variable in the device config to specify a specific encoding format you want to decode:

-
+
 decoderConfig.encodingFormat = ma_encoding_format_wav;
 

@@ -3667,7 +3667,7 @@ Opus decoder in the "extras" folder of the miniaudio repository which A custom decoder must implement a data source. A vtable called ma_decoding_backend_vtable needs to be implemented which is then passed into the decoder config:

-
+
 ma_decoding_backend_vtable* pCustomBackendVTables[] =
 {
     &g_ma_decoding_backend_vtable_libvorbis,
@@ -3684,7 +3684,7 @@ decoderConfig.customBackendCount     = sizeof
 
 The ma_decoding_backend_vtable vtable has the following functions:
 

-
+
 onInit
 onInitFile
 onInitFileW
@@ -3739,7 +3739,7 @@ opportunity to clean up and internal data.
 The ma_encoding API is used for writing audio files. The only supported output format is WAV.
 This can be disabled by specifying the following option before the implementation of miniaudio:
 

-
+
 #define MA_NO_WAV
 

@@ -3747,7 +3747,7 @@ An encoder can be initialized to write to a file with ma_encoder_init(). Below is an example for initializing an encoder to output to a file.

-
+
 ma_encoder_config config = ma_encoder_config_init(ma_encoding_format_wav, FORMAT, CHANNELS, SAMPLE_RATE);
 ma_encoder encoder;
 ma_result result = ma_encoder_init_file("my_file.wav", &config, &encoder);
@@ -3786,7 +3786,7 @@ be returned. The encoder will not perform data conversion so you will need to co
 outputting any audio data. To output audio data, use ma_encoder_write_pcm_frames(), like in the
 example below:
 

-
+
 ma_uint64 framesWritten;
 result = ma_encoder_write_pcm_frames(&encoder, pPCMFramesToWrite, framesToWrite, &framesWritten);
 if (result != MA_SUCCESS) {
@@ -3860,7 +3860,7 @@ Note that even if the dither mode is set to something other than 
+
 s16 -> u8
 s24 -> u8
 s32 -> u8
@@ -3879,7 +3879,7 @@ Channel conversion is used for channel rearrangement and conversion from one cha
 another. The ma_channel_converter API is used for channel conversion. Below is an example of
 initializing a simple channel converter which converts from mono to stereo.
 

-
+
 ma_channel_converter_config config = ma_channel_converter_config_init(
     ma_format,                      // Sample format
     1,                              // Input channels
@@ -3896,7 +3896,7 @@ result = ma_channel_converter_init(&config, NULL, &converter);
 
 To perform the conversion simply call ma_channel_converter_process_pcm_frames() like so:
 

-
+
 ma_result result = ma_channel_converter_process_pcm_frames(&converter, pFramesOut, pFramesIn, frameCount);
 if (result != MA_SUCCESS) {
     // Error.
@@ -4158,7 +4158,7 @@ mapping as the device.

Resampling is achieved with the ma_resampler object. To create a resampler object, do something like the following:

-
+
 ma_resampler_config config = ma_resampler_config_init(
     ma_format_s16,
     channels,
@@ -4175,13 +4175,13 @@ like the following:
 
 Do the following to uninitialize the resampler:
 

-
+
 ma_resampler_uninit(&resampler);
 

The following example shows how data can be processed

-
+
 ma_uint64 frameCountIn  = 1000;
 ma_uint64 frameCountOut = 2000;
 ma_result result = ma_resampler_process_pcm_frames(&resampler, pFramesIn, &frameCountIn, pFramesOut, &frameCountOut);
@@ -4308,7 +4308,7 @@ The API for the linear resampler is the same as the main resampler API, only it&
 You can implement a custom resampler by using the ma_resample_algorithm_custom resampling
 algorithm and setting a vtable in the resampler config:
 

-
+
 ma_resampler_config config = ma_resampler_config_init(..., ma_resample_algorithm_custom);
 config.pBackendVTable = &g_customResamplerVTable;
 

@@ -4361,7 +4361,7 @@ requested when the device was initialized and the format of the backend's na for general data conversion is very similar to the resampling API. Create a ma_data_converter object like this:

-
+
 ma_data_converter_config config = ma_data_converter_config_init(
     inputFormat,
     outputFormat,
@@ -4382,7 +4382,7 @@ In the example above we use ma_data_convert
 there's many more properties that can be configured, such as channel maps and resampling quality.
 Something like the following may be more suitable depending on your requirements:
 

-
+
 ma_data_converter_config config = ma_data_converter_config_init_default();
 config.formatIn = inputFormat;
 config.formatOut = outputFormat;
@@ -4396,13 +4396,13 @@ config.resampling.linear.lpfOrder = MA_MAX_FILTER_ORDER;
 
 Do the following to uninitialize the data converter:
 

-
+
 ma_data_converter_uninit(&converter, NULL);
 

The following example shows how data can be processed

-
+
 ma_uint64 frameCountIn  = 1000;
 ma_uint64 frameCountOut = 2000;
 ma_result result = ma_data_converter_process_pcm_frames(&converter, pFramesIn, &frameCountIn, pFramesOut, &frameCountOut);
@@ -4456,7 +4456,7 @@ is required. This can be retrieved in terms of both the input rate and the outpu
 

Biquad filtering is achieved with the ma_biquad API. Example:

-
+
 ma_biquad_config config = ma_biquad_config_init(ma_format_f32, channels, b0, b1, b2, a0, a1, a2);
 ma_result result = ma_biquad_init(&config, NULL, &biquad);
 if (result != MA_SUCCESS) {
@@ -4487,7 +4487,7 @@ Input and output frames are always interleaved.
 Filtering can be applied in-place by passing in the same pointer for both the input and output
 buffers, like so:
 

-
+
 ma_biquad_process_pcm_frames(&biquad, pMyData, pMyData, frameCount);
 

@@ -4537,7 +4537,7 @@ High order low-pass filter (Butterworth)

Low-pass filter example:

-
+
 ma_lpf_config config = ma_lpf_config_init(ma_format_f32, channels, sampleRate, cutoffFrequency, order);
 ma_result result = ma_lpf_init(&config, &lpf);
 if (result != MA_SUCCESS) {
@@ -4557,14 +4557,14 @@ you need to convert it yourself beforehand. Input and output frames are always i
 Filtering can be applied in-place by passing in the same pointer for both the input and output
 buffers, like so:
 

-
+
 ma_lpf_process_pcm_frames(&lpf, pMyData, pMyData, frameCount);
 

The maximum filter order is limited to MA_MAX_FILTER_ORDER which is set to 8. If you need more, you can chain first and second order filters together.

-
+
 for (iFilter = 0; iFilter < filterCount; iFilter += 1) {
     ma_lpf2_process_pcm_frames(&lpf2[iFilter], pMyData, pMyData, frameCount);
 }
@@ -4756,7 +4756,7 @@ the high shelf filter does the same thing for high frequencies.
 miniaudio supports generation of sine, square, triangle and sawtooth waveforms. This is achieved
 with the ma_waveform API. Example:
 

-
+
 ma_waveform_config config = ma_waveform_config_init(
     FORMAT,
     CHANNELS,
@@ -4818,7 +4818,7 @@ ma_waveform_type_sawtooth

miniaudio supports generation of white, pink and Brownian noise via the ma_noise API. Example:

-
+
 ma_noise_config config = ma_noise_config_init(
     FORMAT,
     CHANNELS,
@@ -4852,7 +4852,7 @@ By default, the noise API will use different values for different channels. So,
 left side in a stereo stream will be different to the right side. To instead have each channel use
 the same random value, set the duplicateChannels member of the noise config to true, like so:
 

-
+
 config.duplicateChannels = MA_TRUE;
 

@@ -4888,7 +4888,7 @@ you internally. Memory management is flexible and should support most use cases. Audio buffers are initialized using the standard configuration system used everywhere in miniaudio:

-
+
 ma_audio_buffer_config config = ma_audio_buffer_config_init(
     format,
     channels,
@@ -4918,7 +4918,7 @@ raw audio data in a contiguous block of memory. That is, the raw audio data will
 immediately after the ma_audio_buffer structure. To do this, use
 ma_audio_buffer_alloc_and_init():
 

-
+
 ma_audio_buffer_config config = ma_audio_buffer_config_init(
     format,
     channels,
@@ -4952,7 +4952,7 @@ parameter is set to true. If you want to manually loop back to the start, you ca
 ma_audio_buffer_seek_to_pcm_frame(pAudioBuffer, 0). Below is an example for reading data from an
 audio buffer.
 

-
+
 ma_uint64 framesRead = ma_audio_buffer_read_pcm_frames(pAudioBuffer, pFramesOut, desiredFrameCount, isLooping);
 if (framesRead < desiredFrameCount) {
     // If not looping, this means the end has been reached. This should never happen in looping mode with valid input.
@@ -4962,7 +4962,7 @@ audio buffer.
 Sometimes you may want to avoid the cost of data movement between the internal buffer and the
 output buffer. Instead you can use memory mapping to retrieve a pointer to a segment of data:
 

-
+
 void* pMappedFrames;
 ma_uint64 frameCount = frameCountToTryMapping;
 ma_result result = ma_audio_buffer_map(pAudioBuffer, &pMappedFrames, &frameCount);
@@ -5003,7 +5003,7 @@ you.
 The examples below use the PCM frame variant of the ring buffer since that's most likely the one
 you will want to use. To initialize a ring buffer, do something like the following:
 

-
+
 ma_pcm_rb rb;
 ma_result result = ma_pcm_rb_init(FORMAT, CHANNELS, BUFFER_SIZE_IN_FRAMES, NULL, NULL, &rb);
 if (result != MA_SUCCESS) {
@@ -5309,7 +5309,7 @@ UWP only supports default playback and capture devices.
 
  • UWP requires the Microphone capability to be enabled in the application's manifest (Package.appxmanifest):
  • -
    +
     <Package ...>
         ...
         <Capabilities>