diff options
author | Mikhail Burakov <mburakov@mailbox.org> | 2024-05-19 10:14:23 +0200 |
---|---|---|
committer | Mikhail Burakov <mburakov@mailbox.org> | 2024-05-19 10:15:12 +0200 |
commit | 3a4e9a0818e2e352eb87b13a250c9ad6e67524b0 (patch) | |
tree | 305545a50aaa19a05cd657b5cc249eabc627a3f2 | |
parent | 191dd1097cb9b8827e53ecd450c31d60e63e91f3 (diff) |
Change alsa audio output to pipewire
-rw-r--r-- | audio.c | 199 | ||||
-rw-r--r-- | audio.h | 4 | ||||
-rw-r--r-- | main.c | 19 | ||||
-rw-r--r-- | makefile | 2 |
4 files changed, 141 insertions, 83 deletions
@@ -17,123 +17,176 @@ #include "audio.h" -#include <alsa/asoundlib.h> #include <errno.h> -#include <stdatomic.h> -#include <stddef.h> -#include <stdint.h> +#include <pipewire/pipewire.h> +#include <spa/param/audio/raw-utils.h> +#include <spa/utils/result.h> +#include <stdio.h> +#include <stdlib.h> #include <string.h> -#include <threads.h> #include "atomic_queue.h" #include "toolbox/utils.h" struct AudioContext { - const char* device; - atomic_bool running; struct AtomicQueue queue; - atomic_uint_fast64_t latency; - thrd_t thread; -}; + struct pw_thread_loop* pw_thread_loop; + struct pw_stream* pw_stream; -static int AudioContextThreadProc(void* arg) { - struct AudioContext* context = arg; + size_t queue_samples_sum; + size_t queue_samples_count; +}; - snd_pcm_t* pcm = NULL; - int err = snd_pcm_open(&pcm, context->device, SND_PCM_STREAM_PLAYBACK, 0); - if (err) { - LOG("Failed to open pcm (%s)", snd_strerror(err)); - atomic_store_explicit(&context->running, 0, memory_order_relaxed); - return 0; +static void OnStreamProcess(void* data) { + struct AudioContext* audio_context = data; + struct pw_buffer* pw_buffer = + pw_stream_dequeue_buffer(audio_context->pw_stream); + if (!pw_buffer) { + LOG("Failed to dequeue stream buffer"); + return; } - // TODO(mburakov): Read audio configuration from the server. - err = snd_pcm_set_params(pcm, SND_PCM_FORMAT_S16_LE, - SND_PCM_ACCESS_RW_INTERLEAVED, 2, 48000, 1, 10000); - if (err) { - LOG("Failed to set pcm params (%s)", snd_strerror(err)); - atomic_store_explicit(&context->running, 0, memory_order_relaxed); - goto rollback_pcm; - } + static const size_t stride = sizeof(int16_t) * 2; + struct spa_data* spa_data = &pw_buffer->buffer->datas[0]; + size_t requested = + MIN(pw_buffer->requested, spa_data->maxsize / stride) * stride; + size_t available = + AtomicQueueRead(&audio_context->queue, spa_data->data, requested); - while (atomic_load_explicit(&context->running, memory_order_relaxed)) { - // TODO(mburakov): Frame size depends on dynamic audio configuration. - static const unsigned frame_size = sizeof(int16_t) * 2; - uint8_t buffer[480 * frame_size]; - - size_t size = AtomicQueueRead(&context->queue, buffer, sizeof(buffer)); - if (size < sizeof(buffer)) { - // LOG("Audio queue underflow!"); - memset(buffer + size, 0, sizeof(buffer) - size); - uint64_t micros = (sizeof(buffer) - size) * 1000 / frame_size / 48; - atomic_fetch_add_explicit(&context->latency, micros, - memory_order_relaxed); - } - - for (snd_pcm_uframes_t offset = 0; offset < sizeof(buffer) / frame_size;) { - snd_pcm_sframes_t nframes = - snd_pcm_writei(pcm, buffer + offset * frame_size, - sizeof(buffer) / frame_size - offset); - if (nframes < 0) { - LOG("Failed to write pcm (%s)", snd_strerror((int)nframes)); - atomic_store_explicit(&context->running, 0, memory_order_relaxed); - goto rollback_pcm; - } - offset += (snd_pcm_uframes_t)nframes; - } + if (available < requested) { + // LOG("Audio queue underflow (%zu < %zu)!", available, requested); + memset((uint8_t*)spa_data->data + available, 0, requested - available); } -rollback_pcm: - snd_pcm_close(pcm); - return 0; + spa_data->chunk->offset = 0; + spa_data->chunk->stride = stride; + spa_data->chunk->size = (uint32_t)requested; + pw_stream_queue_buffer(audio_context->pw_stream, pw_buffer); + return; } -struct AudioContext* AudioContextCreate(const char* device) { +struct AudioContext* AudioContextCreate(size_t queue_size) { + pw_init(0, NULL); struct AudioContext* audio_context = malloc(sizeof(struct AudioContext)); if (!audio_context) { - LOG("Failed to allocate context (%s)", strerror(errno)); + LOG("Failed to allocate audio context (%s)", strerror(errno)); return NULL; } - audio_context->device = device; - atomic_init(&audio_context->running, 1); - if (!AtomicQueueCreate(&audio_context->queue, 4800 * sizeof(int16_t) * 2)) { - LOG("Failed to create queue (%s)", strerror(errno)); - goto rollback_context; + if (!AtomicQueueCreate(&audio_context->queue, + queue_size * sizeof(int16_t) * 2)) { + LOG("Failed to create buffer queue (%s)", strerror(errno)); + goto rollback_audio_context; } - if (thrd_create(&audio_context->thread, AudioContextThreadProc, - audio_context) != thrd_success) { - LOG("Failed to create thread (%s)", strerror(errno)); + audio_context->pw_thread_loop = pw_thread_loop_new("audio-playback", NULL); + if (!audio_context->pw_thread_loop) { + LOG("Failed to create pipewire thread loop"); goto rollback_queue; } + + pw_thread_loop_lock(audio_context->pw_thread_loop); + int err = pw_thread_loop_start(audio_context->pw_thread_loop); + if (err) { + LOG("Failed to start pipewire thread loop (%s)", spa_strerror(err)); + pw_thread_loop_unlock(audio_context->pw_thread_loop); + goto rollback_thread_loop; + } + + // TOOD(mburakov): Read these from the commandline? + struct pw_properties* pw_properties = pw_properties_new( +#define _(...) __VA_ARGS__ + _(PW_KEY_MEDIA_TYPE, "Audio"), _(PW_KEY_MEDIA_CATEGORY, "Playback"), + _(PW_KEY_MEDIA_ROLE, "Game"), _(PW_KEY_NODE_LATENCY, "128/48000"), NULL +#undef _ + ); + if (!pw_properties) { + LOG("Failed to create pipewire properties"); + pw_thread_loop_unlock(audio_context->pw_thread_loop); + goto rollback_thread_loop; + } + + static const struct pw_stream_events kPwStreamEvents = { + .version = PW_VERSION_STREAM_EVENTS, + .process = OnStreamProcess, + }; + audio_context->pw_stream = pw_stream_new_simple( + pw_thread_loop_get_loop(audio_context->pw_thread_loop), "audio-playback", + pw_properties, &kPwStreamEvents, audio_context); + if (!audio_context->pw_stream) { + LOG("Failed to create pipewire stream"); + pw_thread_loop_unlock(audio_context->pw_thread_loop); + goto rollback_thread_loop; + } + + uint8_t buffer[1024]; + struct spa_pod_builder spa_pod_builder = + SPA_POD_BUILDER_INIT(buffer, sizeof(buffer)); + const struct spa_pod* params[] = { + spa_format_audio_raw_build( + &spa_pod_builder, SPA_PARAM_EnumFormat, + &SPA_AUDIO_INFO_RAW_INIT(.format = SPA_AUDIO_FORMAT_S16_LE, + .rate = 48000, .channels = 2, + .position = {SPA_AUDIO_CHANNEL_FL, + SPA_AUDIO_CHANNEL_FR})), + }; + static const enum pw_stream_flags kPwStreamFlags = + PW_STREAM_FLAG_AUTOCONNECT | PW_STREAM_FLAG_MAP_BUFFERS | + PW_STREAM_FLAG_RT_PROCESS; + if (pw_stream_connect(audio_context->pw_stream, PW_DIRECTION_OUTPUT, + PW_ID_ANY, kPwStreamFlags, params, LENGTH(params))) { + LOG("Failed to connect pipewire stream"); + pw_stream_destroy(audio_context->pw_stream); + pw_thread_loop_unlock(audio_context->pw_thread_loop); + goto rollback_thread_loop; + } + + audio_context->queue_samples_sum = 0; + audio_context->queue_samples_count = 0; + pw_thread_loop_unlock(audio_context->pw_thread_loop); return audio_context; +rollback_thread_loop: + pw_thread_loop_destroy(audio_context->pw_thread_loop); rollback_queue: AtomicQueueDestroy(&audio_context->queue); -rollback_context: +rollback_audio_context: free(audio_context); + pw_deinit(); return NULL; } bool AudioContextDecode(struct AudioContext* audio_context, const void* buffer, size_t size) { - if (!atomic_load_explicit(&audio_context->running, memory_order_relaxed)) { - LOG("Audio thread was stopped early!"); - return false; - } if (AtomicQueueWrite(&audio_context->queue, buffer, size) < size) LOG("Audio queue overflow!"); + size_t queue_size = + atomic_load_explicit(&audio_context->queue.size, memory_order_relaxed); + static const size_t stride = sizeof(int16_t) * 2; + audio_context->queue_samples_sum += queue_size / stride; + audio_context->queue_samples_count++; return true; } -uint64_t AudioContextGetLatency(const struct AudioContext* audio_context) { - return atomic_load_explicit(&audio_context->latency, memory_order_relaxed); +uint64_t AudioContextGetLatency(struct AudioContext* audio_context) { + size_t queue_latency = 0; + if (audio_context->queue_samples_count) { + queue_latency = + audio_context->queue_samples_sum / audio_context->queue_samples_count; + audio_context->queue_samples_sum = 0; + audio_context->queue_samples_count = 0; + } + // TODO(mburakov): This number is extremely optimistic, i.e. Bluetooth delays + // are not accounted for. Is it anyhow possible to get this information? + return (128 + queue_latency) * 1000000 / 48000; } void AudioContextDestroy(struct AudioContext* audio_context) { - atomic_store_explicit(&audio_context->running, 0, memory_order_relaxed); - thrd_join(audio_context->thread, NULL); + pw_thread_loop_lock(audio_context->pw_thread_loop); + pw_stream_destroy(audio_context->pw_stream); + pw_thread_loop_unlock(audio_context->pw_thread_loop); + pw_thread_loop_destroy(audio_context->pw_thread_loop); AtomicQueueDestroy(&audio_context->queue); free(audio_context); + pw_deinit(); } @@ -24,10 +24,10 @@ struct AudioContext; -struct AudioContext* AudioContextCreate(const char* device); +struct AudioContext* AudioContextCreate(size_t queue_size); bool AudioContextDecode(struct AudioContext* audio_context, const void* buffer, size_t size); -uint64_t AudioContextGetLatency(const struct AudioContext* audio_context); +uint64_t AudioContextGetLatency(struct AudioContext* audio_context); void AudioContextDestroy(struct AudioContext* audio_context); #endif // RECEIVER_AUDIO_H_ @@ -149,7 +149,7 @@ static void GetMaxOverlaySize(size_t* width, size_t* height) { } static struct Context* ContextCreate(int sock, bool no_input, bool stats, - const char* audio_device) { + const char* audio_buffer) { struct Context* context = calloc(1, sizeof(struct Context)); if (!context) { LOG("Failed to allocate context (%s)", strerror(errno)); @@ -198,8 +198,13 @@ static struct Context* ContextCreate(int sock, bool no_input, bool stats, goto rollback_overlay; } - if (audio_device) { - context->audio_context = AudioContextCreate(audio_device); + if (audio_buffer) { + int buffer_size = atoi(audio_buffer); + if (buffer_size <= 0) { + LOG("Invalid audio buffer size"); + goto rollback_decode_context; + } + context->audio_context = AudioContextCreate((size_t)buffer_size); if (!context->audio_context) { LOG("Failed to create audio context"); goto rollback_decode_context; @@ -445,7 +450,7 @@ static void ContextDestroy(struct Context* context) { int main(int argc, char* argv[]) { if (argc < 2) { - LOG("Usage: %s <ip>:<port> [--no-input] [--stats] [--audio <device>]", + LOG("Usage: %s <ip>:<port> [--no-input] [--stats] [--audio <buffer_size>]", argv[0]); return EXIT_FAILURE; } @@ -458,14 +463,14 @@ int main(int argc, char* argv[]) { bool no_input = false; bool stats = false; - const char* audio_device = NULL; + const char* audio_buffer = NULL; for (int i = 2; i < argc; i++) { if (!strcmp(argv[i], "--no-input")) { no_input = true; } else if (!strcmp(argv[i], "--stats")) { stats = true; } else if (!strcmp(argv[i], "--audio")) { - audio_device = argv[++i]; + audio_buffer = argv[++i]; if (i == argc) { LOG("Audio argument requires a value"); return EXIT_FAILURE; @@ -473,7 +478,7 @@ int main(int argc, char* argv[]) { } } - struct Context* context = ContextCreate(sock, no_input, stats, audio_device); + struct Context* context = ContextCreate(sock, no_input, stats, audio_buffer); if (!context) { LOG("Failed to create context"); goto rollback_socket; @@ -10,7 +10,7 @@ obj+=\ toolbox/perf.o libs:=\ - alsa \ + libpipewire-0.3 \ libva \ libva-drm \ mfx \ |