Commit c2d33b90a4cc52d69bde0bb9c56ba78760ed9196
Update OS X sample with CoreAudio output.
Christopher Snowhill authored on 1/12/2016, 8:04:45 AMChristopher Snowhill committed on 6/13/2018, 12:10:58 AM
Parent: 6f1fcbac380c012b333c655624363e6c14ac7f72
Files changed
src/main_osx.cpp | changed |
src/audio_output.cpp | added |
src/audio_output.h | added |
src/main_osx.cpp | ||
---|---|---|
@@ -41,13 +41,13 @@ | ||
41 | 41 | |
42 | 42 | void render(void * unused, short * samples, uint32_t sampleCount) |
43 | 43 | { |
44 | 44 | syntrax_info info; |
45 | - mixChunk(player, sample_buffer, sampleCount); | |
45 | + mixChunk(player, samples, sampleCount); | |
46 | 46 | if (playerGetSongEnded(player)) running = 0; |
47 | 47 | if (playerGetLoopCount(player) >= 2) |
48 | 48 | { |
49 | - fade_buffer( sample_buffer, sampleCount, fade_start, fade_length ); | |
49 | + fade_buffer( samples, sampleCount, fade_start, fade_length ); | |
50 | 50 | fade_start += sampleCount; |
51 | 51 | } |
52 | 52 | playerGetInfo(player, &info); |
53 | 53 | fprintf(stderr, "\ro: %3u - r: %2u - c: %2u (%2u)", info.coarse, info.fine, info.channelsPlaying, info.channelsPlaying > max_channels ? max_channels = info.channelsPlaying : max_channels); |
@@ -90,9 +90,9 @@ | ||
90 | 90 | initSubsong(player, 0); |
91 | 91 | |
92 | 92 | signal(SIGINT, signal_handler); |
93 | 93 | |
94 | - output = new CoreAudioPlayer(render, SAMPLE_RATE); | |
94 | + output = new CoreAudioStream(render, 0, SAMPLE_RATE); | |
95 | 95 | |
96 | 96 | if ( output ) |
97 | 97 | { |
98 | 98 | fade_start = 0; fade_length = SAMPLE_RATE * 10; |
src/audio_output.cpp | ||
---|---|---|
@@ -1,0 +1,72 @@ | ||
1 | + | |
2 | + | |
3 | +static const uint32_t DEFAULT_CHUNK_MS = 60; | |
4 | + | |
5 | +CoreAudioStream::CoreAudioStream(callback useCb, void * userData, const uint32_t useSampleRate) : | |
6 | + playerCallback(useCb), playerCallbackUserData(userData), | |
7 | + sampleRate(useSampleRate), audioQueue(NULL) | |
8 | +{ | |
9 | + const uint32_t bufferSize = 2048; | |
10 | + const uint32_t audioLatencyFrames = sampleRate * DEFAULT_CHUNK_MS / 1000; | |
11 | + bufferByteSize = bufferSize << 2; | |
12 | + // Number of buffers should be ceil(audioLatencyFrames / bufferSize) | |
13 | + numberOfBuffers = (audioLatencyFrames + bufferSize - 1) / bufferSize; | |
14 | + buffers = new AudioQueueBufferRef[numberOfBuffers]; | |
15 | +} | |
16 | + | |
17 | +CoreAudioStream::~CoreAudioStream() { | |
18 | + close(); | |
19 | + delete[] buffers; | |
20 | +} | |
21 | + | |
22 | +void CoreAudioStream::renderOutputBuffer(void *userData, AudioQueueRef queue, AudioQueueBufferRef buffer) { | |
23 | + CoreAudioStream *stream = (CoreAudioStream *)userData; | |
24 | + if (queue == NULL) { | |
25 | + // Priming the buffers, skip timestamp handling | |
26 | + queue = stream->audioQueue; | |
27 | + } | |
28 | + | |
29 | + uint frameCount = buffer->mAudioDataByteSize >> 2; | |
30 | + stream->playerCallback(stream->playerCallbackUserData, (short*)buffer->mAudioData, frameCount); | |
31 | + | |
32 | + AudioQueueEnqueueBuffer(queue, buffer, 0, NULL); | |
33 | +} | |
34 | + | |
35 | +bool CoreAudioStream::start() { | |
36 | + if (audioQueue != NULL) { | |
37 | + return true; | |
38 | + } | |
39 | + | |
40 | + AudioStreamBasicDescription dataFormat = {sampleRate, kAudioFormatLinearPCM, kAudioFormatFlagIsSignedInteger | kAudioFormatFlagsNativeEndian, 4, 1, 4, 2, 16, 0}; | |
41 | + OSStatus res = AudioQueueNewOutput(&dataFormat, renderOutputBuffer, this, NULL, NULL, 0, &audioQueue); | |
42 | + if (res || audioQueue == NULL) { | |
43 | + return false; | |
44 | + } | |
45 | + | |
46 | + for (uint i = 0; i < numberOfBuffers; i++) { | |
47 | + res = AudioQueueAllocateBuffer(audioQueue, bufferByteSize, buffers + i); | |
48 | + if (res || buffers[i] == NULL) { | |
49 | + res = AudioQueueDispose(audioQueue, true); | |
50 | + audioQueue = NULL; | |
51 | + return false; | |
52 | + } | |
53 | + buffers[i]->mAudioDataByteSize = bufferByteSize; | |
54 | + // Prime the buffer allocated | |
55 | + renderOutputBuffer(this, NULL, buffers[i]); | |
56 | + } | |
57 | + | |
58 | + res = AudioQueueStart(audioQueue, NULL); | |
59 | + if (res) { | |
60 | + res = AudioQueueDispose(audioQueue, true); | |
61 | + audioQueue = NULL; | |
62 | + return false; | |
63 | + } | |
64 | + | |
65 | + return true; | |
66 | +} | |
67 | + | |
68 | +void CoreAudioStream::close() { | |
69 | + if (audioQueue == NULL) return; | |
70 | + OSStatus res = AudioQueueDispose(audioQueue, true); | |
71 | + audioQueue = NULL; | |
72 | +} |
src/audio_output.h | ||
---|---|---|
@@ -1,0 +1,34 @@ | ||
1 | + | |
2 | + | |
3 | + | |
4 | + | |
5 | + | |
6 | + | |
7 | + | |
8 | +class CoreAudioStream { | |
9 | +public: | |
10 | + typedef void (*callback)(void * context, short * samples, uint32_t count); | |
11 | + | |
12 | +private: | |
13 | + AudioQueueRef audioQueue; | |
14 | + AudioQueueBufferRef *buffers; | |
15 | + uint32_t numberOfBuffers; | |
16 | + uint32_t bufferByteSize; | |
17 | + | |
18 | + uint32_t sampleRate; | |
19 | + | |
20 | + callback playerCallback; | |
21 | + void * playerCallbackUserData; | |
22 | + | |
23 | + static void renderOutputBuffer(void *userData, AudioQueueRef queue, AudioQueueBufferRef buffer); | |
24 | + | |
25 | +public: | |
26 | + | |
27 | + CoreAudioStream(callback cb, void * userData, const uint32_t sampleRate); | |
28 | + ~CoreAudioStream(); | |
29 | + bool start(); | |
30 | + void close(); | |
31 | +}; | |
32 | + | |
33 | + | |
34 | + |
Built with git-ssb-web