feature/analog_input (#12)

Co-authored-by: Paul Obernesser <paul.obernesser@inncubator.at>
Reviewed-on: https://gitea.pstruebi.xyz/auracaster/bumble-auracast/pulls/12
This commit was merged in pull request #12.
This commit is contained in:
2025-12-03 12:28:30 +01:00
parent 98dd00e653
commit 6c7b74a0b2
19 changed files with 1497 additions and 924 deletions

6
.gitignore vendored
View File

@@ -44,3 +44,9 @@ src/auracast/server/certs/per_device/
src/auracast/.env
src/auracast/server/certs/ca/ca_cert.srl
src/auracast/server/credentials.json
pcm1862-i2s.dtbo
ch1.wav
ch2.wav
src/auracast/available_samples.txt
src/auracast/server/stream_settings2.json
src/scripts/temperature_log*

View File

@@ -218,6 +218,31 @@ sudo ldconfig # refresh linker cache
- echo i2c-dev | sudo tee -a /etc/modules
- read temp /src/scripts/temp
# configure the pcm1862 i2s interface
bash misc/build_pcm1862_dts.sh
bash misc/install_asoundconf.sh
- configure differential inputs
sudo modprobe i2c-dev
i2cdetect -y 1 | grep -i 4a || true
i2cset -f -y 1 0x4a 0x00 0x00 # Page 0
i2cset -f -y 1 0x4a 0x06 0x10 # Left = VIN1P/M [DIFF]
i2cset -f -y 1 0x4a 0x07 0x10 # Right = VIN2P/M [DIFF]
# test recording
arecord -f cd -c 1 -D record_left left.wav -r48000
arecord -f cd -c 1 -D record_right right.wav -r48000
# Run with realtime priority
- for the feedback loop to work right realtime priority is absolutely nececcarry.
chrt -f 99 python src/auracast/multicast.py
- give the user realtime priority:
sudo tee /etc/security/limits.d/99-realtime.conf >/dev/null <<'EOF'
caster - rtprio 99
caster - memlock unlimited
EOF
# Known issues:
- When running on a laptop there might be issues switching between usb and browser audio input since they use the same audio device

28
poetry.lock generated
View File

@@ -1,4 +1,4 @@
# This file is automatically @generated by Poetry 2.1.2 and should not be changed by hand.
# This file is automatically @generated by Poetry 2.1.4 and should not be changed by hand.
[[package]]
name = "aioconsole"
@@ -2443,6 +2443,30 @@ files = [
{file = "rpds_py-0.25.1.tar.gz", hash = "sha256:8960b6dac09b62dac26e75d7e2c4a22efb835d827a7278c34f72b2b84fa160e3"},
]
[[package]]
name = "samplerate"
version = "0.2.2"
description = "Monolithic python wrapper for libsamplerate based on pybind11 and NumPy"
optional = false
python-versions = ">=3.7"
groups = ["main"]
files = [
{file = "samplerate-0.2.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:99b47c238ef7216b87ccf5e8860b94b527cceef7a8add38f146e75f6efec257f"},
{file = "samplerate-0.2.2-cp310-cp310-win_amd64.whl", hash = "sha256:0aa6ae933cb85eac5ffdebc38abc198be890c2bcbac263c30301699d651e9513"},
{file = "samplerate-0.2.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:a41fe7a8c68101bf9900ba415cf2a0a58199bba9cac15e0a3b22b70006705b29"},
{file = "samplerate-0.2.2-cp311-cp311-win_amd64.whl", hash = "sha256:86fb8eb9a6c75d4c17f8125e203d29bf2d87bf5ce0e671184ba5111f015c9264"},
{file = "samplerate-0.2.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:3f30fea3e42b51e2441cf464e24c4744fa0b9a837b7beefb6a8eb6cc72af1e51"},
{file = "samplerate-0.2.2-cp312-cp312-win_amd64.whl", hash = "sha256:1170c5e4f68d9c1bbec2fce1549108838a473058f69cca7bc377e053ee43457b"},
{file = "samplerate-0.2.2-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:567dfe3888634435b8da1ac4bc06ad289ba777876f446760249e923e6b3585c5"},
{file = "samplerate-0.2.2-cp38-cp38-win_amd64.whl", hash = "sha256:6c819b0360e9632be0391ec3eecc15510e30775632f4022e384e28908f59648c"},
{file = "samplerate-0.2.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:d072b658e438d55fed1224da9b226be1328ff9aea4268d02dbc7d864a72ce4f4"},
{file = "samplerate-0.2.2-cp39-cp39-win_amd64.whl", hash = "sha256:bdae4f21890378f3886816800c8ef3395dabaa13fcac07bb0de7ad413703bfef"},
{file = "samplerate-0.2.2.tar.gz", hash = "sha256:40964bfa28d33bc948389d958c2e742585f21891d8372ebba89260f491a15caa"},
]
[package.dependencies]
numpy = "*"
[[package]]
name = "six"
version = "1.17.0"
@@ -2952,4 +2976,4 @@ test = ["pytest", "pytest-asyncio"]
[metadata]
lock-version = "2.1"
python-versions = ">=3.11"
content-hash = "6b5300c349ed045e8fd3e617e6262bbd7e5c48c518e4c62cedf7c17da50ce8c0"
content-hash = "3c9f92c7a5af40f98da9c7824d9c2a6f7eb809e91e43cfef4995761b2e887256"

View File

@@ -16,7 +16,8 @@ dependencies = [
"aiortc (>=1.13.0,<2.0.0)",
"sounddevice (>=0.5.2,<0.6.0)",
"python-dotenv (>=1.1.1,<2.0.0)",
"smbus2 (>=0.5.0,<0.6.0)"
"smbus2 (>=0.5.0,<0.6.0)",
"samplerate (>=0.2.2,<0.3.0)"
]
[project.optional-dependencies]

View File

@@ -40,8 +40,6 @@ class AuracastGlobalConfig(BaseModel):
# so receivers may render earlier than the presentation delay for lower latency.
immediate_rendering: bool = False
assisted_listening_stream: bool = False
# Adaptive frame dropping: discard sub-frame samples when buffer exceeds threshold
enable_adaptive_frame_dropping: bool = False
# "Audio input. "
# "'device' -> use the host's default sound input device, "

View File

@@ -27,6 +27,10 @@ from typing import cast, Any, AsyncGenerator, Coroutine, List
import itertools
import glob
import time
import threading
import numpy as np # for audio down-mix
import os
import lc3 # type: ignore # pylint: disable=E0401
@@ -42,7 +46,6 @@ from bumble.profiles import bass
import bumble.device
import bumble.transport
import bumble.utils
import numpy as np # for audio down-mix
from bumble.device import Host, AdvertisingChannelMap
from bumble.audio import io as audio_io
@@ -54,22 +57,20 @@ from auracast.utils.webrtc_audio_input import WebRTCAudioInput
# Patch sounddevice.InputStream globally to use low-latency settings
import sounddevice as sd
from collections import deque
class ModSoundDeviceAudioInput(audio_io.SoundDeviceAudioInput):
"""Patched SoundDeviceAudioInput that creates RawInputStream with low-latency parameters."""
"""Patched SoundDeviceAudioInput with low-latency capture and adaptive resampling."""
def _open(self):
"""Patched _open method that creates RawInputStream with low-latency parameters."""
try:
"""Create RawInputStream with low-latency parameters and initialize ring buffer."""
dev_info = sd.query_devices(self._device)
hostapis = sd.query_hostapis()
api_index = dev_info.get('hostapi')
api_name = hostapis[api_index]['name'] if isinstance(api_index, int) and 0 <= api_index < len(hostapis) else 'unknown'
pa_ver = None
try:
pa_ver = sd.get_portaudio_version()
except Exception:
pass
logging.info(
"SoundDevice backend=%s device='%s' (id=%s) ch=%s default_low_input_latency=%.4f default_high_input_latency=%.4f portaudio=%s",
api_name,
@@ -80,29 +81,74 @@ class ModSoundDeviceAudioInput(audio_io.SoundDeviceAudioInput):
float(dev_info.get('default_high_input_latency') or 0.0),
pa_ver[1] if isinstance(pa_ver, tuple) and len(pa_ver) >= 2 else pa_ver,
)
except Exception as e:
logging.warning("Failed to query sounddevice backend/device info: %s", e)
# Create RawInputStream with injected low-latency parameters
# Target ~2 ms blocksize (48 kHz -> 96 frames). For other rates, keep ~2 ms.
_sr = int(self._pcm_format.sample_rate)
self.counter=0
self.max_avail=0
self.logfile_name="available_samples.txt"
self.blocksize = 120
if os.path.exists(self.logfile_name):
os.remove(self.logfile_name)
self._stream = sd.RawInputStream(
samplerate=self._pcm_format.sample_rate,
device=self._device,
channels=self._pcm_format.channels,
dtype='int16',
blocksize=240, # Match frame size
latency=0.010,
blocksize=self.blocksize,
latency=0.004,
)
self._stream.start()
logging.info(f"SoundDeviceAudioInput: Opened with blocksize=240, latency=0.010 (10ms)")
return audio_io.PcmFormat(
audio_io.PcmFormat.Endianness.LITTLE,
audio_io.PcmFormat.SampleType.INT16,
self._pcm_format.sample_rate,
2,
1,
)
def _read(self, frame_size: int) -> bytes:
"""Read PCM samples from the stream."""
#if self.counter % 50 == 0:
frame_size = frame_size + 1 # consume samples a little faster to avoid latency akkumulation
pcm_buffer, overflowed = self._stream.read(frame_size)
if overflowed:
logging.warning("SoundDeviceAudioInput: overflowed")
n_available = self._stream.read_available
# adapt = n_available > 20
# if adapt:
# pcm_extra, overflowed = self._stream.read(3)
# logging.info('consuming extra samples, available was %d', n_available)
# if overflowed:
# logging.warning("SoundDeviceAudioInput: overflowed")
# out = bytes(pcm_buffer) + bytes(pcm_extra)
# else:
out = bytes(pcm_buffer)
self.max_avail = max(self.max_avail, n_available)
#Diagnostics
#with open(self.logfile_name, "a", encoding="utf-8") as f:
# f.write(f"{n_available}, {adapt}, {round(self._runavg, 2)}, {overflowed}\n")
if self.counter % 500 == 0:
logging.info(
"read available=%d, max=%d, latency:%d",
n_available, self.max_avail, self._stream.latency
)
self.max_avail = 0
self.counter += 1
return out
audio_io.SoundDeviceAudioInput = ModSoundDeviceAudioInput
# modified from bumble
@@ -590,58 +636,7 @@ class Streamer():
if hasattr(audio_input, "rewind"):
audio_input.rewind = big_config[i].loop
# Retry logic ALSA sometimes keeps the device busy for a short time after the
# previous stream has closed. Handle PortAudioError -9985 with back-off retries.
import sounddevice as _sd
max_attempts = 3
for attempt in range(1, max_attempts + 1):
try:
pcm_format = await audio_input.open()
break # success
except _sd.PortAudioError as err:
# -9985 == paDeviceUnavailable
logging.error('Could not open audio device %s with error %s', audio_source, err)
code = None
if hasattr(err, 'errno'):
code = err.errno
elif len(err.args) > 1 and isinstance(err.args[1], int):
code = err.args[1]
if code == -9985 and attempt < max_attempts:
backoff_ms = 200 * attempt
logging.warning("PortAudio device busy (attempt %d/%d). Retrying in %.1f ms…", attempt, max_attempts, backoff_ms)
# ensure device handle and PortAudio context are closed before retrying
try:
if hasattr(audio_input, "aclose"):
await audio_input.aclose()
elif hasattr(audio_input, "close"):
audio_input.close()
except Exception:
pass
# Fully terminate PortAudio to drop lingering handles (sounddevice quirk)
if hasattr(_sd, "_terminate"):
try:
_sd._terminate()
except Exception:
pass
# Small pause then re-initialize PortAudio
await asyncio.sleep(0.1)
if hasattr(_sd, "_initialize"):
try:
_sd._initialize()
except Exception:
pass
# Back-off before next attempt
await asyncio.sleep(backoff_ms / 1000)
# Recreate audio_input fresh for next attempt
audio_input = await audio_io.create_audio_input(audio_source, input_format)
continue
# Other errors or final attempt re-raise so caller can abort gracefully
raise
else:
# Loop exhausted without break
logging.error("Unable to open audio device after %d attempts giving up", max_attempts)
return
if pcm_format.channels != 1:
logging.info("Input device provides %d channels will down-mix to mono for LC3", pcm_format.channels)
@@ -673,34 +668,6 @@ class Streamer():
bigs = self.bigs
self.is_streaming = True
# frame drop algo parameters
# In demo/precoded modes there may be no audio_input or no _pcm_format yet
ai = big.get('audio_input')
if ai is not None and hasattr(ai, '_pcm_format') and getattr(ai, '_pcm_format') is not None:
sample_rate = ai._pcm_format.sample_rate
else:
sample_rate = global_config.auracast_sampling_rate_hz
samples_discarded_total = 0 # Total samples discarded
discard_events = 0 # Number of times we discarded samples
frames_since_last_discard = 999 # Guard: frames since last discard (start high to allow first drop)
enable_drift_compensation = getattr(global_config, 'enable_adaptive_frame_dropping', False)
# Hardcoded parameters (unit: milliseconds)
drift_threshold_ms = 2.0 if enable_drift_compensation else 0.0
static_drop_ms = 1 if enable_drift_compensation else 0.0
# Guard interval measured in LC3 frames (10 ms each); 50 => 500 ms cooldown
discard_guard_frames = int(2*sample_rate / 1000) if enable_drift_compensation else 0
# Derived sample counts
drop_threshold_samples = int(sample_rate * drift_threshold_ms / 1000.0)
static_drop_samples = int(sample_rate * static_drop_ms / 1000.0)
if enable_drift_compensation:
logging.info(f"Clock drift compensation ENABLED: threshold={drift_threshold_ms}ms, guard={discard_guard_frames} frames")
else:
logging.info("Clock drift compensation DISABLED")
# Periodic monitoring
last_stats_log = time.perf_counter()
stats_interval = 5.0 # Log stats every 5 seconds
frame_count = 0
# One streamer fits all
@@ -715,209 +682,82 @@ class Streamer():
if lc3_frame == b'': # Not all streams may stop at the same time
stream_finished[i] = True
continue
else: # code lc3 on the fly
# Use stored frames generator when available so we can aclose() it on stop
else: # code lc3 on the fly with perf counters
# Ensure frames generator exists (so we can aclose() on stop)
frames_gen = big.get('frames_gen')
if frames_gen is None:
frames_gen = big['audio_input'].frames(big['lc3_frame_samples'])
big['frames_gen'] = frames_gen
# Read the frame we need for encoding
# Initialize perf tracking bucket per BIG
perf = big.setdefault('_perf', {
'n': 0,
'samples_sum': 0.0, 'samples_max': 0.0,
'enc_sum': 0.0, 'enc_max': 0.0,
'write_sum': 0.0, 'write_max': 0.0,
'loop_sum': 0.0, 'loop_max': 0.0,
})
# Total loop duration timer (sample + encode + write)
t_loop0 = time.perf_counter()
# Measure time to get a sample from the buffer
t0 = time.perf_counter()
pcm_frame = await anext(frames_gen, None)
dt_sample = time.perf_counter() - t0
if pcm_frame is None: # Not all streams may stop at the same time
stream_finished[i] = True
continue
# Discard excess samples in buffer if above threshold (clock drift compensation)
if enable_drift_compensation and hasattr(big['audio_input'], '_stream') and big['audio_input']._stream:
sd_buffer_samples = big['audio_input']._stream.read_available
# Guard: only allow discard if enough frames have passed since last discard
if sd_buffer_samples > drop_threshold_samples and frames_since_last_discard >= discard_guard_frames:
# Always drop a static amount (3ms) for predictable behavior
# This matches the crossfade duration better for smoother transitions
samples_to_drop = min(static_drop_samples, max(1, big['lc3_frame_samples'] - 1))
try:
discarded_data = await anext(big['audio_input'].frames(samples_to_drop))
samples_discarded_total += samples_to_drop
discard_events += 1
# Log every discard event with timing information
sample_rate = big['audio_input']._pcm_format.sample_rate
time_since_last_ms = frames_since_last_discard * 10 # Each frame is 10ms
logging.info(
f"DISCARD #{discard_events}: dropped {samples_to_drop} samples ({samples_to_drop / sample_rate * 1000:.1f}ms) | "
f"buffer was {sd_buffer_samples} samples ({sd_buffer_samples / sample_rate * 1000:.1f}ms) | "
f"since_last={frames_since_last_discard} frames ({time_since_last_ms}ms) | "
f"frame={frame_count}"
)
# Reset guard counter
frames_since_last_discard = 0
# Store how much we dropped for potential adaptive crossfade
big['last_drop_samples'] = samples_to_drop
# Set flag to apply crossfade on next frame
big['apply_crossfade'] = True
except Exception as e:
logging.error(f"Failed to discard samples: {e}")
# Down-mix multi-channel PCM to mono for LC3 encoder if needed
if big.get('channels', 1) > 1:
if isinstance(pcm_frame, np.ndarray):
if pcm_frame.ndim > 1:
mono = pcm_frame.mean(axis=1).astype(pcm_frame.dtype)
pcm_frame = mono
else:
# Convert raw bytes to numpy, average channels, convert back
dtype = np.int16 if big['pcm_bit_depth'] == 16 else np.float32
samples = np.frombuffer(pcm_frame, dtype=dtype)
samples = samples.reshape(-1, big['channels']).mean(axis=1)
pcm_frame = samples.astype(dtype).tobytes()
# Apply crossfade if samples were just dropped (drift compensation)
if big.get('apply_crossfade') and big.get('prev_pcm_frame') is not None:
# Crossfade duration: 10ms for smoother transition (was 5ms)
dtype = np.int16 if big['pcm_bit_depth'] == 16 else np.float32
sample_rate = big['audio_input']._pcm_format.sample_rate
crossfade_samples = min(int(sample_rate * 0.010), big['lc3_frame_samples'] // 2)
# Convert frames to numpy arrays (make writable copies)
prev_samples = np.frombuffer(big['prev_pcm_frame'], dtype=dtype).copy()
curr_samples = np.frombuffer(pcm_frame, dtype=dtype).copy()
# Create equal-power crossfade curves (smoother than linear)
# Equal-power maintains perceived loudness during transition
t = np.linspace(0, 1, crossfade_samples)
fade_out = np.cos(t * np.pi / 2) # Cosine fade out
fade_in = np.sin(t * np.pi / 2) # Sine fade in
# Apply crossfade to the beginning of current frame with end of previous frame
if len(prev_samples) >= crossfade_samples and len(curr_samples) >= crossfade_samples:
crossfaded = (
prev_samples[-crossfade_samples:] * fade_out +
curr_samples[:crossfade_samples] * fade_in
).astype(dtype)
# Replace beginning of current frame with crossfaded section
curr_samples[:crossfade_samples] = crossfaded
pcm_frame = curr_samples.tobytes()
big['apply_crossfade'] = False
# Store current frame for potential next crossfade
if enable_drift_compensation:
big['prev_pcm_frame'] = pcm_frame
# Measure LC3 encoding time
t1 = time.perf_counter()
lc3_frame = big['encoder'].encode(
pcm_frame, num_bytes=big['lc3_bytes_per_frame'], bit_depth=big['pcm_bit_depth']
)
dt_enc = time.perf_counter() - t1
# Measure write blocking time
t2 = time.perf_counter()
await big['iso_queue'].write(lc3_frame)
dt_write = time.perf_counter() - t2
# Total loop duration
dt_loop = time.perf_counter() - t_loop0
# Update stats
perf['n'] += 1
perf['samples_sum'] += dt_sample
perf['enc_sum'] += dt_enc
perf['write_sum'] += dt_write
perf['loop_sum'] += dt_loop
perf['samples_max'] = max(perf['samples_max'], dt_sample)
perf['enc_max'] = max(perf['enc_max'], dt_enc)
perf['write_max'] = max(perf['write_max'], dt_write)
perf['loop_max'] = max(perf['loop_max'], dt_loop)
frame_count += 1
# Increment guard counter (tracks frames since last discard)
frames_since_last_discard += 1
# Periodic stats logging (only for device/sounddevice streams, not WAV files)
# WAV file concurrent access causes deadlock in ThreadedAudioInput
now = time.perf_counter()
is_device_stream = hasattr(big['audio_input'], '_stream') and big['audio_input']._stream is not None
if is_device_stream and now - last_stats_log >= stats_interval:
# Get current buffer status from PortAudio
current_sd_buffer = 0
if hasattr(big['audio_input'], '_stream') and big['audio_input']._stream:
try:
current_sd_buffer = big['audio_input']._stream.read_available
except Exception:
pass
# Get stream latency and CPU load from sounddevice
stream_latency_ms = None
cpu_load_pct = None
if hasattr(big['audio_input'], '_stream') and big['audio_input']._stream:
try:
latency = big['audio_input']._stream.latency
if frame_count == 501: # Debug log once
logging.info(f"DEBUG: stream.latency raw value = {latency}, type = {type(latency)}")
# latency can be either a float (for input-only streams) or tuple (input, output)
if latency is not None:
if isinstance(latency, (int, float)):
# Single value for input-only stream
stream_latency_ms = float(latency) * 1000.0
elif isinstance(latency, (tuple, list)) and len(latency) >= 1:
# Tuple (input_latency, output_latency)
stream_latency_ms = latency[0] * 1000.0
except Exception as e:
if frame_count == 501: # Log once at startup
logging.warning(f"Could not get stream.latency: {e}")
try:
cpu_load = big['audio_input']._stream.cpu_load
if frame_count == 501: # Debug log once
logging.info(f"DEBUG: stream.cpu_load raw value = {cpu_load}")
# cpu_load is a fraction (0.0 to 1.0)
if cpu_load is not None and cpu_load >= 0:
cpu_load_pct = cpu_load * 100.0 # Convert to percentage
except Exception as e:
if frame_count == 501: # Log once at startup
logging.warning(f"Could not get stream.cpu_load: {e}")
# Get backend-specific buffer status
backend_delay = None
backend_label = "Backend"
# Determine which backend we're using based on audio_input device
try:
device_info = big['audio_input']._device if hasattr(big['audio_input'], '_device') else None
if device_info is not None and isinstance(device_info, int):
hostapi = sd.query_hostapis(sd.query_devices(device_info)['hostapi'])
backend_name = hostapi['name']
else:
backend_name = "Unknown"
except Exception:
backend_name = "Unknown"
if 'pulse' in backend_name.lower():
# PipeWire/PulseAudio backend - no direct buffer access
# SD_buffer is the only reliable metric
backend_label = "PipeWire"
backend_delay = None # Cannot read PipeWire internal buffers directly
else:
# ALSA backend - can read kernel buffer
backend_label = "ALSA_kernel"
try:
with open('/proc/asound/card0/pcm0c/sub0/status', 'r') as f:
for line in f:
if 'delay' in line and ':' in line:
backend_delay = int(line.split(':')[1].strip())
break
except Exception:
pass
if enable_drift_compensation:
avg_discard_per_event = (samples_discarded_total / discard_events) if discard_events > 0 else 0.0
discard_event_rate = (discard_events / frame_count * 100) if frame_count > 0 else 0.0
latency_str = f"stream_latency={stream_latency_ms:.2f} ms" if stream_latency_ms is not None else "stream_latency=N/A"
cpu_str = f"cpu_load={cpu_load_pct:.1f}%" if cpu_load_pct is not None else "cpu_load=N/A"
# Log every 500 frames for this BIG and reset accumulators
if perf['n'] >= 500:
n = perf['n']
logging.info(
f"STATS: frames={frame_count} | discard_events={discard_events} ({discard_event_rate:.1f}%) | "
f"avg_discard={avg_discard_per_event:.0f} samples/event | "
f"SD_buffer={current_sd_buffer} samples ({current_sd_buffer / big['audio_input']._pcm_format.sample_rate * 1000:.1f} ms) | "
f"{latency_str} | {cpu_str} | "
f"threshold={drop_threshold_samples} samples ({drop_threshold_samples / big['audio_input']._pcm_format.sample_rate * 1000:.1f} ms)"
"Perf(i=%d, last %d): sample mean=%.6fms max=%.6fms | encode mean=%.6fms max=%.6fms | write mean=%.6fms max=%.6fms | loop mean=%.6fms max=%.6fms",
i,
n,
(perf['samples_sum'] / n) * 1e3, perf['samples_max'] * 1e3,
(perf['enc_sum'] / n) * 1e3, perf['enc_max'] * 1e3,
(perf['write_sum'] / n) * 1e3, perf['write_max'] * 1e3,
(perf['loop_sum'] / n) * 1e3, perf['loop_max'] * 1e3,
)
else:
backend_str = f"{backend_label}={backend_delay} samples ({backend_delay / big['audio_input']._pcm_format.sample_rate * 1000:.1f} ms)" if backend_delay is not None else f"{backend_label}=N/A (use pw-top)"
latency_str = f"stream_latency={stream_latency_ms:.2f} ms" if stream_latency_ms is not None else "stream_latency=N/A"
cpu_str = f"cpu_load={cpu_load_pct:.1f}%" if cpu_load_pct is not None else "cpu_load=N/A"
logging.info(
f"STATS: frames={frame_count} | "
f"SD_buffer={current_sd_buffer} samples ({current_sd_buffer / big['audio_input']._pcm_format.sample_rate * 1000:.1f} ms) | "
f"{latency_str} | {cpu_str} | "
f"{backend_str} | "
f"drift_compensation=DISABLED"
)
last_stats_log = now
perf.update({
'n': 0,
'samples_sum': 0.0, 'samples_max': 0.0,
'enc_sum': 0.0, 'enc_max': 0.0,
'write_sum': 0.0, 'write_max': 0.0,
'loop_sum': 0.0, 'loop_max': 0.0,
})
if all(stream_finished): # Take into account that multiple files have different lengths
logging.info('All streams finished, stopping streamer')
@@ -971,105 +811,27 @@ if __name__ == "__main__":
)
os.chdir(os.path.dirname(__file__))
# =============================================================================
# AUDIO BACKEND CONFIGURATION - Toggle between ALSA and PipeWire
# =============================================================================
# Uncomment ONE of the following backend configurations:
# Option 1: Direct ALSA (Direct hardware access, bypasses PipeWire)
AUDIO_BACKEND = 'ALSA'
target_latency_ms = 10.0
# Option 2: PipeWire via PulseAudio API (Routes through pipewire-pulse)
#AUDIO_BACKEND = 'PipeWire'
#target_latency_ms = 5.0 # PipeWire typically handles lower latency better
# =============================================================================
import sounddevice as sd
import subprocess
# Detect if PipeWire is running (even if we're using ALSA API)
pipewire_running = False
try:
result = subprocess.run(['systemctl', '--user', 'is-active', 'pipewire'],
capture_output=True, text=True, timeout=1)
pipewire_running = (result.returncode == 0)
except Exception:
pass
if AUDIO_BACKEND == 'ALSA':
os.environ['SDL_AUDIODRIVER'] = 'alsa'
sd.default.latency = target_latency_ms / 1000.0
# Find ALSA host API
try:
alsa_hostapi = next(i for i, ha in enumerate(sd.query_hostapis())
if 'ALSA' in ha['name'])
logging.info(f"ALSA host API available at index: {alsa_hostapi}")
except StopIteration:
logging.error("ALSA backend not found!")
elif AUDIO_BACKEND == 'PipeWire':
os.environ['SDL_AUDIODRIVER'] = 'pulseaudio'
sd.default.latency = target_latency_ms / 1000.0
if not pipewire_running:
logging.error("PipeWire selected but not running!")
raise RuntimeError("PipeWire is not active")
# Find PulseAudio host API (required for PipeWire mode)
try:
pulse_hostapi = next(i for i, ha in enumerate(sd.query_hostapis())
if 'pulse' in ha['name'].lower())
logging.info(f"Using PulseAudio host API at index: {pulse_hostapi} → routes to PipeWire")
except StopIteration:
logging.error("PulseAudio host API not found! Did you rebuild PortAudio with -DPA_USE_PULSEAUDIO=ON?")
raise RuntimeError("PulseAudio API not available in PortAudio")
else:
logging.error(f"Unknown AUDIO_BACKEND: {AUDIO_BACKEND}")
raise ValueError(f"Invalid AUDIO_BACKEND: {AUDIO_BACKEND}")
# Select audio input device based on backend
shure_device_idx = None
if AUDIO_BACKEND == 'ALSA':
search_str='ch1'
# Use ALSA devices
from auracast.utils.sounddevice_utils import get_alsa_usb_inputs
devices = get_alsa_usb_inputs()
logging.info("Searching ALSA devices for Shure MVX2U...")
logging.info(f"Searching ALSA devices for first device with string {search_str}...")
audio_dev = None
for idx, dev in devices:
logging.info(f" ALSA device [{idx}]: {dev['name']} ({dev['max_input_channels']} ch)")
if 'shure' in dev['name'].lower() and 'mvx2u' in dev['name'].lower():
shure_device_idx = idx
if search_str in dev['name'].lower():
audio_dev = idx
logging.info(f"✓ Selected ALSA device {idx}: {dev['name']}")
break
elif AUDIO_BACKEND == 'PipeWire':
# Use PulseAudio devices (routed through PipeWire)
logging.info("Searching PulseAudio devices for Shure MVX2U...")
for idx, dev in enumerate(sd.query_devices()):
# Only consider PulseAudio input devices
if dev['max_input_channels'] > 0:
hostapi = sd.query_hostapis(dev['hostapi'])
if 'pulse' in hostapi['name'].lower():
dev_name_lower = dev['name'].lower()
logging.info(f" PulseAudio device [{idx}]: {dev['name']} ({dev['max_input_channels']} ch)")
# Skip monitor devices (they're output monitors, not real inputs)
if 'monitor' in dev_name_lower:
continue
# Look for Shure MVX2U - prefer "Mono" device for mono input
if 'shure' in dev_name_lower and 'mvx2u' in dev_name_lower:
shure_device_idx = idx
logging.info(f"✓ Selected PulseAudio device {idx}: {dev['name']} → routes to PipeWire")
break
if shure_device_idx is None:
logging.error(f"Shure MVX2U not found in {AUDIO_BACKEND} devices!")
if audio_dev is None:
logging.error(f"Audio device {audio_dev} not found in {AUDIO_BACKEND} devices!")
raise RuntimeError(f"Audio device not found for {AUDIO_BACKEND} backend")
config = auracast_config.AuracastConfigGroup(
@@ -1093,8 +855,6 @@ if __name__ == "__main__":
#config.transport= 'auto'
config.transport='serial:/dev/ttyAMA3,1000000,rtscts' # transport for raspberry pi
# TODO: encrypted streams are not working
for big in config.bigs:
#big.code = 'abcd'
#big.code = '78 e5 dc f1 34 ab 42 bf c1 92 ef dd 3a fd 67 ae'
@@ -1102,11 +862,11 @@ if __name__ == "__main__":
#big.audio_source = big.audio_source.replace('.wav', '_10_16_32.lc3') #lc3 precoded files
#big.audio_source = read_lc3_file(big.audio_source) # load files in advance
# --- Configure Shure MVX2U USB Audio Interface (ALSA backend) ---
if shure_device_idx is not None:
big.audio_source = f'device:{shure_device_idx}' # Shure MVX2U USB mono interface
# --- Configure device (ALSA backend) ---
if audio_dev is not None:
big.audio_source = f'device:{audio_dev}'
big.input_format = 'int16le,48000,1' # int16, 48kHz, mono
logging.info(f"Configured BIG '{big.name}' with Shure MVX2U (device:{shure_device_idx}, 48kHz mono)")
logging.info(f"Configured BIG '{big.name}' with (device:{audio_dev}, 48kHz mono)")
else:
logging.warning(f"Shure device not found, BIG '{big.name}' will use default audio_source: {big.audio_source}")
@@ -1119,15 +879,12 @@ if __name__ == "__main__":
# 24kHz is only working with 2 streams - probably airtime constraint
# TODO: with more than three broadcasters (16kHz) no advertising (no primary channels is present anymore)
# TODO: find the bottleneck - probably airtime
# TODO: test encrypted streams
config.auracast_sampling_rate_hz = 16000
config.octets_per_frame = 40 # 32kbps@16kHz
config.auracast_sampling_rate_hz = 24000
config.octets_per_frame = 60 # 32kbps@16kHz
#config.immediate_rendering = True
#config.debug = True
# Enable clock drift compensation to prevent latency accumulation
# With ~43 samples/sec drift (0.89ms/sec), threshold of 2ms will trigger every ~2.2 seconds
run_async(
broadcast(
config,

View File

@@ -8,6 +8,8 @@ import requests
from dotenv import load_dotenv
import streamlit as st
from auracast.utils.read_temp import read_case_temp, read_cpu_temp
from auracast import auracast_config
from auracast.utils.frontend_auth import (
is_pw_disabled,
@@ -100,6 +102,10 @@ except Exception:
# Define is_streaming early from the fetched status for use throughout the UI
is_streaming = bool(saved_settings.get("is_streaming", False))
# Extract secondary status, if provided by the backend /status endpoint.
secondary_status = saved_settings.get("secondary") or {}
secondary_is_streaming = bool(saved_settings.get("secondary_is_streaming", secondary_status.get("is_streaming", False)))
st.title("Auracast Audio Mode Control")
def render_stream_controls(status_streaming: bool, start_label: str, stop_label: str, mode_label: str):
@@ -120,6 +126,7 @@ def render_stream_controls(status_streaming: bool, start_label: str, stop_label:
# Note: backend persists 'USB' for any device:<name> source (including AES67). We default to 'USB' in that case.
options = [
"Demo",
"Analog",
"USB",
"Network",
]
@@ -153,6 +160,11 @@ if isinstance(backend_mode_raw, str):
elif backend_mode_raw in options:
backend_mode_mapped = backend_mode_raw
# When Analog is selected in the UI we always show it as such, even though the
# backend currently persists USB for all device sources.
if audio_mode == "Analog":
running_mode = "Analog"
else:
running_mode = backend_mode_mapped if (is_streaming and backend_mode_mapped) else audio_mode
is_started = False
@@ -338,7 +350,257 @@ if audio_mode == "Demo":
quality = None # Not used in demo mode
else:
# Stream quality selection (now enabled)
# --- Mode-specific configuration ---
default_name = saved_settings.get('channel_names', ["Broadcast0"])[0]
raw_program_info = saved_settings.get('program_info', default_name)
if isinstance(raw_program_info, list) and raw_program_info:
default_program_info = raw_program_info[0]
else:
default_program_info = raw_program_info
default_lang = saved_settings.get('languages', ["deu"])[0]
# Per-mode configuration and controls
input_device = None
radio2_enabled = False
radio1_cfg = None
radio2_cfg = None
if audio_mode == "Analog":
# --- Radio 1 controls ---
st.subheader("Radio 1")
quality_options = list(QUALITY_MAP.keys())
default_quality = "Medium (24kHz)" if "Medium (24kHz)" in quality_options else quality_options[0]
quality1 = st.selectbox(
"Stream Quality (Radio 1)",
quality_options,
index=quality_options.index(default_quality),
help="Select the audio sampling rate for Radio 1."
)
stream_passwort1 = st.text_input(
"Stream Passwort (Radio 1)",
value="",
type="password",
help="Optional: Set a broadcast code for Radio 1."
)
col_r1_flags1, col_r1_flags2, col_r1_pdelay, col_r1_rtn = st.columns([1, 1, 0.7, 0.6], gap="small")
with col_r1_flags1:
assisted_listening1 = st.checkbox(
"Assistive listening (R1)",
value=bool(saved_settings.get('assisted_listening_stream', False)),
help="tells the receiver that this is an assistive listening stream"
)
with col_r1_flags2:
immediate_rendering1 = st.checkbox(
"Immediate rendering (R1)",
value=bool(saved_settings.get('immediate_rendering', False)),
help="tells the receiver to ignore presentation delay and render immediately if possible."
)
default_pdelay = int(saved_settings.get('presentation_delay_us', 40000) or 40000)
with col_r1_pdelay:
default_pdelay_ms = max(10, min(200, default_pdelay // 1000))
presentation_delay_ms1 = st.number_input(
"Delay (ms, R1)",
min_value=10, max_value=200, step=5, value=default_pdelay_ms,
help="Delay between capture and presentation for Radio 1."
)
default_rtn = int(saved_settings.get('rtn', 4) or 4)
with col_r1_rtn:
rtn_options = [1,2,3,4]
default_rtn_clamped = min(4, max(1, default_rtn))
rtn1 = st.selectbox(
"RTN (R1)", options=rtn_options, index=rtn_options.index(default_rtn_clamped),
help="Number of ISO retransmissions for Radio 1."
)
col_r1_name, col_r1_lang = st.columns([2, 1])
with col_r1_name:
stream_name1 = st.text_input(
"Channel Name (Radio 1)",
value=default_name,
help="Name for the first analog radio (Radio 1)."
)
with col_r1_lang:
language1 = st.text_input(
"Language (ISO 639-3) (Radio 1)",
value=default_lang,
help="Language code for Radio 1."
)
program_info1 = st.text_input(
"Program Info (Radio 1)",
value=default_program_info,
help="Program information for Radio 1."
)
# Analog mode exposes only ALSA ch1/ch2 inputs.
if not is_streaming:
try:
resp = requests.get(f"{BACKEND_URL}/audio_inputs_pw_usb")
device_list = resp.json().get('inputs', [])
except Exception as e:
st.error(f"Failed to fetch devices: {e}")
device_list = []
analog_devices = [d for d in device_list if d.get('name') in ('ch1', 'ch2')]
if not analog_devices:
st.warning("No Analog (ch1/ch2) ALSA inputs found. Check asound configuration.")
if st.button("Refresh", disabled=is_streaming):
try:
r = requests.post(f"{BACKEND_URL}/refresh_audio_devices", timeout=8)
if not r.ok:
st.error(f"Failed to refresh: {r.text}")
except Exception as e:
st.error(f"Failed to refresh devices: {e}")
st.rerun()
analog_names = [d['name'] for d in analog_devices]
else:
analog_devices = []
analog_names = []
if not is_streaming:
if analog_names:
default_r1_idx = 0
input_device1 = st.selectbox(
"Input Device (Radio 1)",
analog_names,
index=default_r1_idx,
)
else:
input_device1 = None
else:
input_device1 = saved_settings.get('input_device')
st.selectbox(
"Input Device (Radio 1)",
[input_device1 or "No device selected"],
index=0,
disabled=True,
help="Stop the stream to change the input device."
)
# --- Radio 2 controls ---
st.subheader("Radio 2")
# If the backend reports that the secondary radio is currently streaming,
# initialize the checkbox to checked so the UI reflects the active state
# when the frontend is loaded.
radio2_enabled_default = secondary_is_streaming
radio2_enabled = st.checkbox(
"Enable Radio 2",
value=radio2_enabled_default,
help="Activate a second analog radio with its own quality and timing settings."
)
if radio2_enabled:
quality2 = st.selectbox(
"Stream Quality (Radio 2)",
quality_options,
index=quality_options.index(default_quality),
help="Select the audio sampling rate for Radio 2."
)
stream_passwort2 = st.text_input(
"Stream Passwort (Radio 2)",
value="",
type="password",
help="Optional: Set a broadcast code for Radio 2."
)
col_r2_flags1, col_r2_flags2, col_r2_pdelay, col_r2_rtn = st.columns([1, 1, 0.7, 0.6], gap="small")
with col_r2_flags1:
assisted_listening2 = st.checkbox(
"Assistive listening (R2)",
value=bool(saved_settings.get('assisted_listening_stream', False)),
help="tells the receiver that this is an assistive listening stream"
)
with col_r2_flags2:
immediate_rendering2 = st.checkbox(
"Immediate rendering (R2)",
value=bool(saved_settings.get('immediate_rendering', False)),
help="tells the receiver to ignore presentation delay and render immediately if possible."
)
with col_r2_pdelay:
presentation_delay_ms2 = st.number_input(
"Delay (ms, R2)",
min_value=10, max_value=200, step=5, value=default_pdelay_ms,
help="Delay between capture and presentation for Radio 2."
)
with col_r2_rtn:
rtn2 = st.selectbox(
"RTN (R2)", options=rtn_options, index=rtn_options.index(default_rtn_clamped),
help="Number of ISO retransmissions for Radio 2."
)
col_r2_name, col_r2_lang = st.columns([2, 1])
with col_r2_name:
stream_name2 = st.text_input(
"Channel Name (Radio 2)",
value=f"{default_name}_2",
help="Name for the second analog radio (Radio 2)."
)
with col_r2_lang:
language2 = st.text_input(
"Language (ISO 639-3) (Radio 2)",
value=default_lang,
help="Language code for Radio 2."
)
program_info2 = st.text_input(
"Program Info (Radio 2)",
value=default_program_info,
help="Program information for Radio 2."
)
if not is_streaming:
if analog_names:
default_r2_idx = 1 if len(analog_names) > 1 else 0
input_device2 = st.selectbox(
"Input Device (Radio 2)",
analog_names,
index=default_r2_idx,
)
else:
input_device2 = None
else:
input_device2 = saved_settings.get('input_device')
st.selectbox(
"Input Device (Radio 2)",
[input_device2 or "No device selected"],
index=0,
disabled=True,
help="Stop the stream to change the input device."
)
radio2_cfg = {
'id': 1002,
'name': stream_name2,
'program_info': program_info2,
'language': language2,
'input_device': input_device2,
'quality': quality2,
'stream_passwort': stream_passwort2,
'assisted_listening': assisted_listening2,
'immediate_rendering': immediate_rendering2,
'presentation_delay_ms': presentation_delay_ms2,
'rtn': rtn2,
}
radio1_cfg = {
'id': 1001,
'name': stream_name1,
'program_info': program_info1,
'language': language1,
'input_device': input_device1,
'quality': quality1,
'stream_passwort': stream_passwort1,
'assisted_listening': assisted_listening1,
'immediate_rendering': immediate_rendering1,
'presentation_delay_ms': presentation_delay_ms1,
'rtn': rtn1,
}
else:
# USB/Network: single set of controls shared with the single channel
quality_options = list(QUALITY_MAP.keys())
default_quality = "Medium (24kHz)" if "Medium (24kHz)" in quality_options else quality_options[0]
quality = st.selectbox(
@@ -347,37 +609,14 @@ else:
index=quality_options.index(default_quality),
help="Select the audio sampling rate for the stream. Lower rates may improve compatibility."
)
default_name = saved_settings.get('channel_names', ["Broadcast0"])[0]
default_lang = saved_settings.get('languages', ["deu"])[0]
default_input = saved_settings.get('input_device') or 'default'
stream_name = st.text_input(
"Channel Name",
value=default_name,
help="The primary name for your broadcast. Like the SSID of a WLAN, it identifies your stream for receivers."
)
raw_program_info = saved_settings.get('program_info', default_name)
if isinstance(raw_program_info, list) and raw_program_info:
default_program_info = raw_program_info[0]
else:
default_program_info = raw_program_info
program_info = st.text_input(
"Program Info",
value=default_program_info,
help="Additional details about the broadcast program, such as its content or purpose. Shown to receivers for more context."
)
language = st.text_input(
"Language (ISO 639-3)",
value=default_lang,
help="Three-letter language code (e.g., 'eng' for English, 'deu' for German). Used by receivers to display the language of the stream. See: https://en.wikipedia.org/wiki/List_of_ISO_639-3_codes"
)
# Optional broadcast code for coded streams
stream_passwort = st.text_input(
"Stream Passwort",
value="",
type="password",
help="Optional: Set a broadcast code to protect your stream. Leave empty for an open (uncoded) broadcast."
)
# Flags and QoS row (compact, four columns)
col_flags1, col_flags2, col_pdelay, col_rtn = st.columns([1, 1, 0.7, 0.6], gap="small")
with col_flags1:
assisted_listening = st.checkbox(
@@ -391,7 +630,6 @@ else:
value=bool(saved_settings.get('immediate_rendering', False)),
help="tells the receiver to ignore presentation delay and render immediately if possible."
)
# QoS/presentation controls inline with flags
default_pdelay = int(saved_settings.get('presentation_delay_us', 40000) or 40000)
with col_pdelay:
default_pdelay_ms = max(10, min(200, default_pdelay // 1000))
@@ -409,11 +647,24 @@ else:
help="Number of ISO retransmissions (higher improves robustness at cost of airtime)."
)
stream_name = st.text_input(
"Channel Name",
value=default_name,
help="The primary name for your broadcast. Like the SSID of a WLAN, it identifies your stream for receivers."
)
program_info = st.text_input(
"Program Info",
value=default_program_info,
help="Additional details about the broadcast program, such as its content or purpose. Shown to receivers for more context."
)
language = st.text_input(
"Language (ISO 639-3)",
value=default_lang,
help="Three-letter language code (e.g., 'eng' for English, 'deu' for German). Used by receivers to display the language of the stream. See: https://en.wikipedia.org/wiki/List_of_ISO_639-3_codes"
)
# Input device selection for USB or AES67 mode
if audio_mode in ("USB", "Network"):
if not is_streaming:
# Only query device lists when NOT streaming to avoid extra backend calls
try:
endpoint = "/audio_inputs_pw_usb" if audio_mode == "USB" else "/audio_inputs_pw_network"
resp = requests.get(f"{BACKEND_URL}{endpoint}")
@@ -422,12 +673,13 @@ else:
st.error(f"Failed to fetch devices: {e}")
device_list = []
# Display "name [id]" but use name as value
if audio_mode == "USB":
device_list = [d for d in device_list if d.get('name') not in ('ch1', 'ch2')]
input_options = [f"{d['name']} [{d['id']}]" for d in device_list]
option_name_map = {f"{d['name']} [{d['id']}]": d['name'] for d in device_list}
device_names = [d['name'] for d in device_list]
# Determine default input by name (from persisted server state)
default_input_name = saved_settings.get('input_device')
if default_input_name not in device_names and device_names:
default_input_name = device_names[0]
@@ -469,10 +721,8 @@ else:
except Exception as e:
st.error(f"Failed to refresh devices: {e}")
st.rerun()
# Send only the device name to backend
input_device = option_name_map.get(selected_option)
else:
# When streaming, keep showing the current selection but lock editing.
input_device = saved_settings.get('input_device')
current_label = input_device or "No device selected"
st.selectbox(
@@ -484,6 +734,7 @@ else:
)
else:
input_device = None
start_stream, stop_stream = render_stream_controls(is_streaming, "Start Auracast", "Stop Auracast", running_mode)
if stop_stream:
@@ -499,12 +750,68 @@ else:
if start_stream:
# Always send stop to ensure backend is in a clean state, regardless of current status
r = requests.post(f"{BACKEND_URL}/stop_audio").json()
#if r['was_running']:
# st.success("Stream Stopped!")
# Small pause lets backend fully release audio devices before re-init
time.sleep(1)
# Prepare config using the model (do NOT send qos_config, only relevant fields)
if audio_mode == "Analog":
# Build separate configs per radio, each with its own quality and QoS parameters.
is_started = False
def _build_group_from_radio(cfg: dict) -> auracast_config.AuracastConfigGroup | None:
if not cfg or not cfg.get('input_device'):
return None
q = QUALITY_MAP[cfg['quality']]
return auracast_config.AuracastConfigGroup(
auracast_sampling_rate_hz=q['rate'],
octets_per_frame=q['octets'],
transport='', # is set in backend
assisted_listening_stream=bool(cfg['assisted_listening']),
immediate_rendering=bool(cfg['immediate_rendering']),
presentation_delay_us=int(cfg['presentation_delay_ms'] * 1000),
qos_config=auracast_config.AuracastQoSConfig(
iso_int_multiple_10ms=1,
number_of_retransmissions=int(cfg['rtn']),
max_transport_latency_ms=int(cfg['rtn']) * 10 + 3,
),
bigs=[
auracast_config.AuracastBigConfig(
id=cfg.get('id', 123456),
code=(cfg['stream_passwort'].strip() or None),
name=cfg['name'],
program_info=cfg['program_info'],
language=cfg['language'],
audio_source=f"device:{cfg['input_device']}",
input_format=f"int16le,{q['rate']},1",
iso_que_len=1,
sampling_frequency=q['rate'],
octets_per_frame=q['octets'],
)
],
)
# Radio 1 (always active if a device is selected)
config1 = _build_group_from_radio(radio1_cfg)
# Radio 2 (optional)
config2 = _build_group_from_radio(radio2_cfg) if radio2_enabled else None
try:
if config1 is not None:
r1 = requests.post(f"{BACKEND_URL}/init", json=config1.model_dump())
if r1.status_code == 200:
is_started = True
else:
st.error(f"Failed to initialize Radio 1: {r1.text}")
else:
st.error("Radio 1 has no valid input device configured.")
if config2 is not None:
r2 = requests.post(f"{BACKEND_URL}/init2", json=config2.model_dump())
if r2.status_code != 200:
st.error(f"Failed to initialize Radio 2: {r2.text}")
except Exception as e:
st.error(f"Error while starting Analog radios: {e}")
else:
# USB/Network: single config as before, using shared controls
q = QUALITY_MAP[quality]
config = auracast_config.AuracastConfigGroup(
auracast_sampling_rate_hz=q['rate'],
@@ -530,7 +837,7 @@ else:
sampling_frequency=q['rate'],
octets_per_frame=q['octets'],
),
]
],
)
try:
@@ -563,6 +870,20 @@ if is_started or is_stopped:
############################
with st.expander("System control", expanded=False):
st.subheader("System temperatures")
temp_col1, temp_col2, temp_col3 = st.columns([1, 1, 1])
with temp_col1:
refresh_temps = st.button("Refresh")
try:
case_temp = read_case_temp()
cpu_temp = read_cpu_temp()
with temp_col2:
st.write(f"CPU: {cpu_temp} °C")
with temp_col3:
st.write(f"Case: {case_temp} °C")
except Exception as e:
st.warning(f"Could not read temperatures: {e}")
st.subheader("Change password")
if is_pw_disabled():
st.info("Frontend password protection is disabled via DISABLE_FRONTEND_PW.")

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,18 @@
from smbus2 import SMBus
def read_case_temp():
addr = 0x48 # change if your scan shows different
with SMBus(1) as bus:
msb, lsb = bus.read_i2c_block_data(addr, 0x00, 2)
raw = ((msb << 8) | lsb) >> 4
if raw & 0x800: # sign bit for 12-bit
raw -= 1 << 12
return round(raw * 0.0625, 2)
def read_cpu_temp():
with open("/sys/class/thermal/thermal_zone0/temp", "r") as f:
return round(int(f.read()) / 1000, 2)
if __name__ == "__main__":
print("Case temperature: ", read_case_temp(), "°C")
print("CPU temperature: ", read_cpu_temp(), "°C")

View File

@@ -232,13 +232,19 @@ def get_alsa_usb_inputs():
name = dev.get('name', '').lower()
# Filter for USB devices based on common patterns:
# - Contains 'usb' in the name
# - hw:X,Y pattern (ALSA hardware devices)
# - hw:X or hw:X,Y pattern present anywhere in name (ALSA hardware devices)
# - dsnoop/ch1/ch2 convenience entries from asound.conf
# Exclude: default, dmix, pulse, pipewire, sysdefault
if any(exclude in name for exclude in ['default', 'dmix', 'pulse', 'pipewire', 'sysdefault']):
continue
# Include if it has 'usb' in name or matches hw:X pattern
if 'usb' in name or re.match(r'hw:\d+', name):
# Include if it has 'usb' or contains an hw:* token, or matches common dsnoop/mono aliases
if (
'usb' in name or
re.search(r'hw:\d+(?:,\d+)?', name) or
name.startswith('dsnoop') or
name in ('ch1', 'ch2')
):
usb_inputs.append((idx, dev))
return usb_inputs

28
src/misc/asound.conf Normal file
View File

@@ -0,0 +1,28 @@
pcm.ch1 {
type dsnoop
ipc_key 234884
slave {
pcm "hw:CARD=i2s,DEV=0"
channels 2
rate 48000
format S16_LE
period_size 120
buffer_size 240
}
bindings.0 0
}
pcm.ch2 {
type dsnoop
ipc_key 234884
slave {
pcm "hw:CARD=i2s,DEV=0"
channels 2
rate 48000
format S16_LE
period_size 120
buffer_size 240
}
bindings.0 1
}

19
src/misc/build_pcm1862_dts.sh Executable file
View File

@@ -0,0 +1,19 @@
#!/usr/bin/env bash
set -euo pipefail
DTS=./src/misc/pcm1862-i2s.dts
DTBO=pcm1862-i2s.dtbo
OUT=/boot/firmware/overlays
# build
dtc -@ -I dts -O dtb -o "$DTBO" "$DTS"
# install
sudo install -m 0644 "$DTBO" "$OUT/$DTBO"
# NOTE: also add
# dtparam=i2c_arm=on
# dtoverlay=pcm1862-i2s
# to /boot/firmware/config.txt
echo "Built and installed $DTBO to $OUT."
echo "Now either reboot to load the installed overlay"

View File

@@ -0,0 +1 @@
sudo cp src/misc/asound.conf /etc/asound.conf

55
src/misc/pcm1862-i2s.dts Normal file
View File

@@ -0,0 +1,55 @@
/dts-v1/;
/plugin/;
/ {
compatible = "brcm,bcm2835";
/* Enable the I²S controller */
fragment@0 {
target = <&i2s>;
__overlay__ {
status = "okay";
};
};
/* PCM1862 on I2C1 at 0x4a (change if your bus/address differ) */
fragment@1 {
target = <&i2c1>;
__overlay__ {
#address-cells = <1>;
#size-cells = <0>;
pcm1862: adc@4a {
compatible = "ti,pcm1862";
reg = <0x4a>;
#sound-dai-cells = <0>;
/* Rails are hard-powered on your board, so no regulators here */
};
};
};
/* Link bcm2835-i2s <-> pcm1862 via simple-audio-card */
fragment@2 {
target-path = "/";
__overlay__ {
pcm1862_sound: pcm1862-sound {
compatible = "simple-audio-card";
simple-audio-card,name = "pcm1862 on i2s";
simple-audio-card,format = "i2s";
/* Pi is master for BCLK/LRCLK */
simple-audio-card,bitclock-master = <&dai_cpu>;
simple-audio-card,frame-master = <&dai_cpu>;
dai_cpu: simple-audio-card,cpu {
sound-dai = <&i2s>;
dai-tdm-slot-num = <2>;
dai-tdm-slot-width = <32>;
};
simple-audio-card,codec {
sound-dai = <&pcm1862>;
};
};
};
};
};

View File

@@ -1,16 +0,0 @@
import sounddevice as sd, pprint
from auracast.utils.sounddevice_utils import devices_by_backend
print("PortAudio library:", sd._libname)
print("PortAudio version:", sd.get_portaudio_version())
print("\nHost APIs:")
pprint.pprint(sd.query_hostapis())
print("\nDevices:")
pprint.pprint(sd.query_devices())
# Example: only PulseAudio devices on Linux
print("\nOnly PulseAudio devices:")
for i, d in devices_by_backend("PulseAudio"):
print(f"{i}: {d['name']} in={d['max_input_channels']} out={d['max_output_channels']}")

View File

@@ -0,0 +1,47 @@
import sounddevice as sd, pprint
from auracast.utils.sounddevice_utils import (
devices_by_backend,
get_alsa_inputs,
get_alsa_usb_inputs,
get_network_pw_inputs,
refresh_pw_cache,
)
print("PortAudio library:", sd._libname)
print("PortAudio version:", sd.get_portaudio_version())
print("\nHost APIs:")
apis = sd.query_hostapis()
pprint.pprint(apis)
print("\nAll Devices (with host API name):")
devs = sd.query_devices()
for i, d in enumerate(devs):
ha_name = apis[d['hostapi']]['name'] if isinstance(d.get('hostapi'), int) and d['hostapi'] < len(apis) else '?'
if d.get('max_input_channels', 0) > 0:
print(f"IN {i:>3}: {d['name']} api={ha_name} in={d['max_input_channels']}")
elif d.get('max_output_channels', 0) > 0:
print(f"OUT {i:>3}: {d['name']} api={ha_name} out={d['max_output_channels']}")
else:
print(f"DEV {i:>3}: {d['name']} api={ha_name} (no I/O)")
print("\nALSA input devices (PortAudio ALSA host):")
for i, d in devices_by_backend('ALSA'):
if d.get('max_input_channels', 0) > 0:
print(f"ALSA {i:>3}: {d['name']} in={d['max_input_channels']}")
print("\nALSA USB-filtered inputs:")
for i, d in get_alsa_usb_inputs():
print(f"USB {i:>3}: {d['name']} in={d['max_input_channels']}")
print("\nRefreshing PipeWire caches...")
try:
refresh_pw_cache()
except Exception:
pass
print("PipeWire Network inputs (from cache):")
for i, d in get_network_pw_inputs():
print(f"NET {i:>3}: {d['name']} in={d.get('max_input_channels', 0)}")

View File

@@ -0,0 +1,36 @@
import csv
import time
from datetime import datetime
from pathlib import Path
from auracast.utils.read_temp import read_case_temp, read_cpu_temp
def main() -> None:
script_path = Path(__file__).resolve()
log_dir = script_path.parent
start_time = datetime.now()
filename = start_time.strftime("temperature_log_%Y%m%d_%H%M%S.csv")
log_path = log_dir / filename
with log_path.open("w", newline="") as csvfile:
writer = csv.writer(csvfile)
writer.writerow(["timestamp", "cpu_temp_c", "case_temp_c"])
try:
while True:
now = datetime.now().isoformat(timespec="seconds")
cpu_temp = read_cpu_temp()
case_temp = read_case_temp()
writer.writerow([now, cpu_temp, case_temp])
csvfile.flush()
time.sleep(30)
except KeyboardInterrupt:
pass
if __name__ == "__main__":
main()

View File

@@ -1,8 +0,0 @@
from smbus2 import SMBus
addr = 0x48 # change if your scan shows different
with SMBus(1) as bus:
msb, lsb = bus.read_i2c_block_data(addr, 0x00, 2)
raw = ((msb << 8) | lsb) >> 4
if raw & 0x800: # sign bit for 12-bit
raw -= 1 << 12
print(f"{raw * 0.0625:.2f} °C")

View File

@@ -9,6 +9,9 @@ ExecStart=/home/caster/.local/bin/poetry run python src/auracast/server/multicas
Restart=on-failure
Environment=PYTHONUNBUFFERED=1
Environment=LOG_LEVEL=INFO
CPUSchedulingPolicy=fifo
CPUSchedulingPriority=99
LimitRTPRIO=99
[Install]
WantedBy=default.target