feat: improve audio streaming UI and configuration (#11)

- Renamed "AES67" mode to "Network" for clearer user understanding
- Added structured stream controls with consistent start/stop buttons and status display
- Changed presentation delay input from microseconds to milliseconds for better usability
- Restricted retransmission (RTN) options to valid range of 1-4
- Added help tooltips for assisted listening and immediate rendering options
- Fixed portaudio configuration to enable ALSA support and remove

Co-authored-by: pstruebi <struebin.patrick.com>
Reviewed-on: https://gitea.pstruebi.xyz/auracaster/bumble-auracast/pulls/11
This commit was merged in pull request #11.
This commit is contained in:
2025-10-31 10:46:42 +01:00
parent 00a832a1fd
commit 5a1e1f13ac
5 changed files with 96 additions and 318 deletions

1
.gitignore vendored
View File

@@ -43,3 +43,4 @@ src/auracast/server/stream_settings.json
src/auracast/server/certs/per_device/ src/auracast/server/certs/per_device/
src/auracast/.env src/auracast/.env
src/auracast/server/certs/ca/ca_cert.srl src/auracast/server/certs/ca/ca_cert.srl
src/auracast/server/credentials.json

View File

@@ -170,6 +170,7 @@ option snd_usb_audio nrpacks=1
sudo apt install -y --no-install-recommends \ sudo apt install -y --no-install-recommends \
git build-essential cmake pkg-config \ git build-essential cmake pkg-config \
libasound2-dev libpulse-dev pipewire ethtool linuxptp libasound2-dev libpulse-dev pipewire ethtool linuxptp
sudo apt remove -y libportaudio2 portaudio19-dev libportaudiocpp0
git clone https://github.com/PortAudio/portaudio.git git clone https://github.com/PortAudio/portaudio.git
cd portaudio cd portaudio
@@ -177,7 +178,7 @@ git checkout 9abe5fe7db729280080a0bbc1397a528cd3ce658
rm -rf build rm -rf build
cmake -S . -B build -G"Unix Makefiles" \ cmake -S . -B build -G"Unix Makefiles" \
-DBUILD_SHARED_LIBS=ON \ -DBUILD_SHARED_LIBS=ON \
-DPA_USE_ALSA=OFF \ -DPA_USE_ALSA=ON \
-DPA_USE_PULSEAUDIO=ON \ -DPA_USE_PULSEAUDIO=ON \
-DPA_USE_JACK=OFF -DPA_USE_JACK=OFF
cmake --build build -j$(nproc) cmake --build build -j$(nproc)

View File

@@ -79,8 +79,6 @@ if not is_pw_disabled():
if not st.session_state['frontend_authenticated']: if not st.session_state['frontend_authenticated']:
st.stop() st.stop()
# Global: desired packetization time in ms for Opus (should match backend)
PTIME = 40
BACKEND_URL = "http://localhost:5000" BACKEND_URL = "http://localhost:5000"
QUALITY_MAP = { QUALITY_MAP = {
@@ -104,18 +102,31 @@ is_streaming = bool(saved_settings.get("is_streaming", False))
st.title("Auracast Audio Mode Control") st.title("Auracast Audio Mode Control")
def render_stream_controls(status_streaming: bool, start_label: str, stop_label: str, mode_label: str):
c_start, c_stop, c_spacer, c_status = st.columns([1, 1, 1, 2], gap="small", vertical_alignment="center")
with c_start:
start_clicked = st.button(start_label, disabled=status_streaming)
with c_stop:
stop_clicked = st.button(stop_label, disabled=not status_streaming)
with c_status:
st.write(
(
f"Mode: {mode_label} · " + ("🟢 Streaming" if status_streaming else "🔴 Stopped")
)
)
return start_clicked, stop_clicked
# Audio mode selection with persisted default # Audio mode selection with persisted default
# Note: backend persists 'USB' for any device:<name> source (including AES67). We default to 'USB' in that case. # Note: backend persists 'USB' for any device:<name> source (including AES67). We default to 'USB' in that case.
options = [ options = [
"Demo", "Demo",
"USB", "USB",
"AES67", "Network",
# "Webapp"
] ]
saved_audio_mode = saved_settings.get("audio_mode", "Demo") saved_audio_mode = saved_settings.get("audio_mode", "Demo")
if saved_audio_mode not in options: if saved_audio_mode not in options:
# Map legacy/unknown modes to closest # Map legacy/unknown modes to closest
mapping = {"USB/Network": "USB", "Network": "AES67"} mapping = {"USB/Network": "USB", "AES67": "Network"}
saved_audio_mode = mapping.get(saved_audio_mode, "Demo") saved_audio_mode = mapping.get(saved_audio_mode, "Demo")
audio_mode = st.selectbox( audio_mode = st.selectbox(
@@ -123,15 +134,35 @@ audio_mode = st.selectbox(
options, options,
index=options.index(saved_audio_mode) if saved_audio_mode in options else options.index("Demo"), index=options.index(saved_audio_mode) if saved_audio_mode in options else options.index("Demo"),
help=( help=(
"Select the audio input source. Choose 'Webapp' for browser microphone, " "Select the audio input source. Choose 'USB' for a connected USB audio device (via PipeWire), "
"'USB' for a connected USB audio device (via PipeWire), 'AES67' for network RTP/AES67 sources, " "'Network' (AES67) for network RTP/AES67 sources, "
"or 'Demo' for a simulated stream." "or 'Demo' for a simulated stream."
) )
) )
# Determine the displayed mode label:
# - While streaming, prefer the backend-reported mode
# - When not streaming, show the currently selected mode
backend_mode_raw = saved_settings.get("audio_mode")
backend_mode_mapped = None
if isinstance(backend_mode_raw, str):
if backend_mode_raw == "AES67":
backend_mode_mapped = "Network"
elif backend_mode_raw == "USB/Network":
backend_mode_mapped = "USB"
elif backend_mode_raw in options:
backend_mode_mapped = backend_mode_raw
running_mode = backend_mode_mapped if (is_streaming and backend_mode_mapped) else audio_mode
is_started = False
is_stopped = False
if audio_mode == "Demo": if audio_mode == "Demo":
demo_stream_map = { demo_stream_map = {
"1 × 48kHz": {"quality": "High (48kHz)", "streams": 1}, "1 × 48kHz": {"quality": "High (48kHz)", "streams": 1},
"1 × 24kHz": {"quality": "Medium (24kHz)", "streams": 1},
"1 × 16kHz": {"quality": "Fair (16kHz)", "streams": 1},
"2 × 24kHz": {"quality": "Medium (24kHz)", "streams": 2}, "2 × 24kHz": {"quality": "Medium (24kHz)", "streams": 2},
"3 × 16kHz": {"quality": "Fair (16kHz)", "streams": 3}, "3 × 16kHz": {"quality": "Fair (16kHz)", "streams": 3},
"2 × 48kHz": {"quality": "High (48kHz)", "streams": 2}, "2 × 48kHz": {"quality": "High (48kHz)", "streams": 2},
@@ -154,40 +185,41 @@ if audio_mode == "Demo":
type=("password"), type=("password"),
help="Optional: Set a broadcast code to protect your stream. Leave empty for an open (uncoded) broadcast." help="Optional: Set a broadcast code to protect your stream. Leave empty for an open (uncoded) broadcast."
) )
col_flags1, col_flags2, col_pdelay, col_rtn = st.columns([1, 1, 1, 1], gap="small") col_flags1, col_flags2, col_pdelay, col_rtn = st.columns([1, 1, 0.7, 0.6], gap="small", vertical_alignment="center")
with col_flags1: with col_flags1:
assisted_listening = st.checkbox( assisted_listening = st.checkbox(
"Assistive listening", "Assistive listening",
value=bool(saved_settings.get('assisted_listening_stream', False)) value=bool(saved_settings.get('assisted_listening_stream', False)),
help="tells the receiver that this is an assistive listening stream"
) )
with col_flags2: with col_flags2:
immediate_rendering = st.checkbox( immediate_rendering = st.checkbox(
"Immediate rendering", "Immediate rendering",
value=bool(saved_settings.get('immediate_rendering', False)) value=bool(saved_settings.get('immediate_rendering', False)),
help="tells the receiver to ignore presentation delay and render immediately if possible."
) )
# QoS/presentation controls inline with flags # QoS/presentation controls inline with flags
default_pdelay = int(saved_settings.get('presentation_delay_us', 40000) or 40000) default_pdelay = int(saved_settings.get('presentation_delay_us', 40000) or 40000)
with col_pdelay: with col_pdelay:
presentation_delay_us = st.number_input( default_pdelay_ms = max(10, min(200, default_pdelay // 1000))
"Presentation delay (µs)", presentation_delay_ms = st.number_input(
min_value=10000, max_value=200000, step=1000, value=default_pdelay, "Delay (ms)",
min_value=10, max_value=200, step=5, value=default_pdelay_ms,
help="Delay between capture and presentation for receivers." help="Delay between capture and presentation for receivers."
) )
default_rtn = int(saved_settings.get('rtn', 4) or 4) default_rtn = int(saved_settings.get('rtn', 4) or 4)
with col_rtn: with col_rtn:
rtn_options = [1,2,3,4]
default_rtn_clamped = min(4, max(1, default_rtn))
rtn = st.selectbox( rtn = st.selectbox(
"Retransmissions (RTN)", options=[0,1,2,3,4], index=[0,1,2,3,4].index(default_rtn), "RTN", options=rtn_options, index=rtn_options.index(default_rtn_clamped),
help="Number of ISO retransmissions (higher improves robustness at cost of airtime)." help="Number of ISO retransmissions (higher improves robustness at cost of airtime)."
) )
#st.info(f"Demo mode selected: {demo_selected} (Streams: {demo_stream_map[demo_selected]['streams']}, Rate: {demo_stream_map[demo_selected]['rate']} Hz)") #st.info(f"Demo mode selected: {demo_selected} (Streams: {demo_stream_map[demo_selected]['streams']}, Rate: {demo_stream_map[demo_selected]['rate']} Hz)")
# Start/Stop buttons for demo mode # Start/Stop buttons for demo mode
if 'demo_stream_started' not in st.session_state: if 'demo_stream_started' not in st.session_state:
st.session_state['demo_stream_started'] = False st.session_state['demo_stream_started'] = False
col1, col2 = st.columns(2) start_demo, stop_demo = render_stream_controls(is_streaming, "Start Demo", "Stop Demo", running_mode)
with col1:
start_demo = st.button("Start Demo Stream", disabled=is_streaming)
with col2:
stop_demo = st.button("Stop Demo Stream", disabled=not is_streaming)
if start_demo: if start_demo:
# Always stop any running stream for clean state # Always stop any running stream for clean state
try: try:
@@ -232,7 +264,7 @@ if audio_mode == "Demo":
transport='', # is set in baccol_qoskend transport='', # is set in baccol_qoskend
assisted_listening_stream=assisted_listening, assisted_listening_stream=assisted_listening,
immediate_rendering=immediate_rendering, immediate_rendering=immediate_rendering,
presentation_delay_us=presentation_delay_us, presentation_delay_us=int(presentation_delay_ms * 1000),
qos_config=auracast_config.AuracastQoSConfig( qos_config=auracast_config.AuracastQoSConfig(
iso_int_multiple_10ms=1, iso_int_multiple_10ms=1,
number_of_retransmissions=int(rtn), number_of_retransmissions=int(rtn),
@@ -248,7 +280,7 @@ if audio_mode == "Demo":
transport='', # is set in backend transport='', # is set in backend
assisted_listening_stream=assisted_listening, assisted_listening_stream=assisted_listening,
immediate_rendering=immediate_rendering, immediate_rendering=immediate_rendering,
presentation_delay_us=presentation_delay_us, presentation_delay_us=int(presentation_delay_ms * 1000),
qos_config=auracast_config.AuracastQoSConfig( qos_config=auracast_config.AuracastQoSConfig(
iso_int_multiple_10ms=1, iso_int_multiple_10ms=1,
number_of_retransmissions=int(rtn), number_of_retransmissions=int(rtn),
@@ -257,43 +289,38 @@ if audio_mode == "Demo":
bigs=bigs2 bigs=bigs2
) )
# Call /init and /init2 # Call /init and /init2
is_started = False
try: try:
r1 = requests.post(f"{BACKEND_URL}/init", json=config1.model_dump()) r1 = requests.post(f"{BACKEND_URL}/init", json=config1.model_dump())
if r1.status_code == 200: if r1.status_code == 200:
msg = f"Demo stream started on multicaster 1 ({len(bigs1)} streams)"
st.session_state['demo_stream_started'] = True st.session_state['demo_stream_started'] = True
st.success(msg) is_started = True
else: else:
st.session_state['demo_stream_started'] = False st.session_state['demo_stream_started'] = False
st.error(f"Failed to initialize multicaster 1: {r1.text}") st.error(f"Failed to initialize multicaster 1: {r1.text}")
if config2: if config2:
r2 = requests.post(f"{BACKEND_URL}/init2", json=config2.model_dump()) r2 = requests.post(f"{BACKEND_URL}/init2", json=config2.model_dump())
if r2.status_code == 200: if r2.status_code == 200:
st.success(f"Demo stream started on multicaster 2 ({len(bigs2)} streams)") is_started = True
else: else:
st.error(f"Failed to initialize multicaster 2: {r2.text}") st.error(f"Failed to initialize multicaster 2: {r2.text}")
except Exception as e: except Exception as e:
st.session_state['demo_stream_started'] = False st.session_state['demo_stream_started'] = False
st.error(f"Error: {e}") st.error(f"Error: {e}")
if is_started:
pass
elif stop_demo: elif stop_demo:
try: try:
r = requests.post(f"{BACKEND_URL}/stop_audio").json() r = requests.post(f"{BACKEND_URL}/stop_audio").json()
st.session_state['demo_stream_started'] = False st.session_state['demo_stream_started'] = False
if r.get('was_running'): if r.get('was_running'):
st.info("Demo stream stopped.") is_stopped = True
st.rerun()
else:
st.info("Demo stream was not running.")
except Exception as e: except Exception as e:
st.error(f"Error: {e}") st.error(f"Error: {e}")
elif st.session_state['demo_stream_started']:
st.success(f"Demo stream running: {demo_selected}")
else:
st.info("Demo stream not running.")
quality = None # Not used in demo mode quality = None # Not used in demo mode
else: else:
# Stream quality selection (now enabled) # Stream quality selection (now enabled)
quality_options = list(QUALITY_MAP.keys()) quality_options = list(QUALITY_MAP.keys())
default_quality = "Medium (24kHz)" if "Medium (24kHz)" in quality_options else quality_options[0] default_quality = "Medium (24kHz)" if "Medium (24kHz)" in quality_options else quality_options[0]
quality = st.selectbox( quality = st.selectbox(
@@ -333,39 +360,40 @@ else:
help="Optional: Set a broadcast code to protect your stream. Leave empty for an open (uncoded) broadcast." help="Optional: Set a broadcast code to protect your stream. Leave empty for an open (uncoded) broadcast."
) )
# Flags and QoS row (compact, four columns) # Flags and QoS row (compact, four columns)
col_flags1, col_flags2, col_pdelay, col_rtn = st.columns([1, 1, 1, 1], gap="small") col_flags1, col_flags2, col_pdelay, col_rtn = st.columns([1, 1, 0.7, 0.6], gap="small")
with col_flags1: with col_flags1:
assisted_listening = st.checkbox( assisted_listening = st.checkbox(
"Assistive listening", "Assistive listening",
value=bool(saved_settings.get('assisted_listening_stream', False)) value=bool(saved_settings.get('assisted_listening_stream', False)),
help="tells the receiver that this is an assistive listening stream"
) )
with col_flags2: with col_flags2:
immediate_rendering = st.checkbox( immediate_rendering = st.checkbox(
"Immediate rendering", "Immediate rendering",
value=bool(saved_settings.get('immediate_rendering', False)) value=bool(saved_settings.get('immediate_rendering', False)),
help="tells the receiver to ignore presentation delay and render immediately if possible."
) )
# QoS/presentation controls inline with flags # QoS/presentation controls inline with flags
default_pdelay = int(saved_settings.get('presentation_delay_us', 40000) or 40000) default_pdelay = int(saved_settings.get('presentation_delay_us', 40000) or 40000)
with col_pdelay: with col_pdelay:
presentation_delay_us = st.number_input( default_pdelay_ms = max(10, min(200, default_pdelay // 1000))
"Presentation delay (µs)", presentation_delay_ms = st.number_input(
min_value=10000, max_value=200000, step=1000, value=default_pdelay, "Delay (ms)",
min_value=10, max_value=200, step=5, value=default_pdelay_ms,
help="Delay between capture and presentation for receivers." help="Delay between capture and presentation for receivers."
) )
default_rtn = int(saved_settings.get('rtn', 4) or 4) default_rtn = int(saved_settings.get('rtn', 4) or 4)
with col_rtn: with col_rtn:
rtn_options = [1,2,3,4]
default_rtn_clamped = min(4, max(1, default_rtn))
rtn = st.selectbox( rtn = st.selectbox(
"Retransmissions (RTN)", options=[0,1,2,3,4], index=[0,1,2,3,4].index(default_rtn), "RTN", options=rtn_options, index=rtn_options.index(default_rtn_clamped),
help="Number of ISO retransmissions (higher improves robustness at cost of airtime)." help="Number of ISO retransmissions (higher improves robustness at cost of airtime)."
) )
# Gain slider for Webapp mode
if audio_mode == "Webapp":
mic_gain = st.slider("Microphone Gain", 0.0, 2.0, 1.0, 0.1, help="Adjust microphone volume sent to Auracast")
else:
mic_gain = 1.0
# Input device selection for USB or AES67 mode # Input device selection for USB or AES67 mode
if audio_mode in ("USB", "AES67"): if audio_mode in ("USB", "Network"):
if not is_streaming: if not is_streaming:
# Only query device lists when NOT streaming to avoid extra backend calls # Only query device lists when NOT streaming to avoid extra backend calls
try: try:
@@ -438,55 +466,17 @@ else:
) )
else: else:
input_device = None input_device = None
start_stream, stop_stream = render_stream_controls(is_streaming, "Start Auracast", "Stop Auracast", running_mode)
# Buttons and status on a single row (4 columns: start, stop, spacer, status)
c_start, c_stop, c_spacer, c_status = st.columns([1, 1, 1, 2], gap="small", vertical_alignment="center")
with c_start:
start_stream = st.button("Start Auracast", disabled=is_streaming)
with c_stop:
stop_stream = st.button("Stop Auracast", disabled=not is_streaming)
# c_spacer intentionally left empty to push status to the far right
with c_status:
# Fetch current status from backend and render using Streamlit widgets (no HTML)
# The is_streaming variable is now defined at the top of the script.
# We only need to re-fetch here if we want the absolute latest status for the display,
# but for UI consistency, we can just use the value from the top of the script run.
st.write("🟢 Streaming" if is_streaming else "🔴 Stopped")
# If gain slider moved while streaming, send update to JS without restarting
if audio_mode == "Webapp" and st.session_state.get('stream_started'):
update_js = f"""
<script>
if (window.gainNode) {{ window.gainNode.gain.value = {mic_gain}; }}
</script>
"""
st.components.v1.html(update_js, height=0)
if stop_stream: if stop_stream:
st.session_state['stream_started'] = False st.session_state['stream_started'] = False
try: try:
r = requests.post(f"{BACKEND_URL}/stop_audio").json() r = requests.post(f"{BACKEND_URL}/stop_audio").json()
if r['was_running']: if r['was_running']:
st.success("Stream Stopped!") is_stopped = True
st.rerun()
else:
st.success("Stream was not running.")
except Exception as e: except Exception as e:
st.error(f"Error: {e}") st.error(f"Error: {e}")
# Ensure existing WebRTC connection is fully closed so that a fresh
# connection is created the next time we start the stream.
if audio_mode == "Webapp":
cleanup_js = """
<script>
if (window.webrtc_pc) {
window.webrtc_pc.getSenders().forEach(s => s.track.stop());
window.webrtc_pc.close();
window.webrtc_pc = null;
}
window.webrtc_started = false;
</script>
"""
st.components.v1.html(cleanup_js, height=0)
if start_stream: if start_stream:
# Always send stop to ensure backend is in a clean state, regardless of current status # Always send stop to ensure backend is in a clean state, regardless of current status
@@ -504,7 +494,7 @@ else:
transport='', # is set in backend transport='', # is set in backend
assisted_listening_stream=assisted_listening, assisted_listening_stream=assisted_listening,
immediate_rendering=immediate_rendering, immediate_rendering=immediate_rendering,
presentation_delay_us=presentation_delay_us, presentation_delay_us=int(presentation_delay_ms * 1000),
qos_config=auracast_config.AuracastQoSConfig( qos_config=auracast_config.AuracastQoSConfig(
iso_int_multiple_10ms=1, iso_int_multiple_10ms=1,
number_of_retransmissions=int(rtn), number_of_retransmissions=int(rtn),
@@ -516,12 +506,8 @@ else:
name=stream_name, name=stream_name,
program_info=program_info, program_info=program_info,
language=language, language=language,
audio_source=( audio_source=(f"device:{input_device}"),
f"device:{input_device}" if audio_mode in ("USB", "AES67") else ( input_format=(f"int16le,{q['rate']},1"),
"webrtc" if audio_mode == "Webapp" else "network"
)
),
input_format=(f"int16le,{q['rate']},1" if audio_mode in ("USB", "AES67") else "auto"),
iso_que_len=1, iso_que_len=1,
sampling_frequency=q['rate'], sampling_frequency=q['rate'],
octets_per_frame=q['octets'], octets_per_frame=q['octets'],
@@ -532,71 +518,18 @@ else:
try: try:
r = requests.post(f"{BACKEND_URL}/init", json=config.model_dump()) r = requests.post(f"{BACKEND_URL}/init", json=config.model_dump())
if r.status_code == 200: if r.status_code == 200:
st.success("Stream Started!") is_started = True
st.rerun()
else: else:
st.error(f"Failed to initialize: {r.text}") st.error(f"Failed to initialize: {r.text}")
except Exception as e: except Exception as e:
st.error(f"Error: {e}") st.error(f"Error: {e}")
# Render / maintain WebRTC component # Centralized rerun based on start/stop outcomes
if audio_mode == "Webapp" and (start_stream or st.session_state.get('stream_started')): if is_started or is_stopped:
st.markdown("Starting microphone; allow access if prompted and speak.") st.rerun()
component = f"""
<script>
(async () => {{
// Clean up any previous WebRTC connection before starting a new one
if (window.webrtc_pc) {{
window.webrtc_pc.getSenders().forEach(s => s.track.stop());
window.webrtc_pc.close();
}}
const GAIN_VALUE = {mic_gain};
const pc = new RTCPeerConnection(); // No STUN needed for localhost
window.webrtc_pc = pc;
window.webrtc_started = true;
const micStream = await navigator.mediaDevices.getUserMedia({{audio:true}});
// Create Web Audio gain processing
const audioCtx = new (window.AudioContext || window.webkitAudioContext)();
const source = audioCtx.createMediaStreamSource(micStream);
const gainNode = audioCtx.createGain();
gainNode.gain.value = GAIN_VALUE;
// Expose for later adjustments
window.gainNode = gainNode;
const dest = audioCtx.createMediaStreamDestination();
source.connect(gainNode).connect(dest);
// Add processed tracks to WebRTC
dest.stream.getTracks().forEach(t => pc.addTrack(t, dest.stream));
// --- WebRTC offer/answer exchange ---
const offer = await pc.createOffer();
// Patch SDP offer to include a=ptime using global PTIME
let sdp = offer.sdp;
const ptime_line = 'a=ptime:{PTIME}';
const maxptime_line = 'a=maxptime:{PTIME}';
if (sdp.includes('a=sendrecv')) {{
sdp = sdp.replace('a=sendrecv', 'a=sendrecv\\n' + ptime_line + '\\n' + maxptime_line);
}} else {{
sdp += '\\n' + ptime_line + '\\n' + maxptime_line;
}}
const patched_offer = new RTCSessionDescription({{sdp, type: offer.type}});
await pc.setLocalDescription(patched_offer);
// Send offer to backend
const response = await fetch(
"{BACKEND_URL}/offer",
{{
method: 'POST',
headers: {{'Content-Type':'application/json'}},
body: JSON.stringify({{sdp: pc.localDescription.sdp, type: pc.localDescription.type}})
}}
);
const answer = await response.json();
await pc.setRemoteDescription(new RTCSessionDescription({{sdp: answer.sdp, type: answer.type}}));
}})();
</script>
"""
st.components.v1.html(component, height=0)
st.session_state['stream_started'] = True
#else: #else:
# st.header("Advertised Streams (Cloud Announcements)") #st.header("Advertised Streams (Cloud Announcements)")
#st.info("This feature requires backend support to list advertised streams.")
# st.info("This feature requires backend support to list advertised streams.") # st.info("This feature requires backend support to list advertised streams.")
# Placeholder for future implementation # Placeholder for future implementation
# Example: r = requests.get(f"{BACKEND_URL}/advertised_streams") # Example: r = requests.get(f"{BACKEND_URL}/advertised_streams")

View File

@@ -4,7 +4,6 @@ TODO: in the future the multicaster objects should run in their own threads or e
""" """
import os import os
import logging as log import logging as log
import uuid
import json import json
import sys import sys
import threading import threading
@@ -12,18 +11,12 @@ from concurrent.futures import Future
from datetime import datetime from datetime import datetime
import time import time
import asyncio import asyncio
import numpy as np
from dotenv import load_dotenv from dotenv import load_dotenv
from pydantic import BaseModel
from fastapi import FastAPI, HTTPException from fastapi import FastAPI, HTTPException
from fastapi.middleware.cors import CORSMiddleware from fastapi.middleware.cors import CORSMiddleware
from auracast import multicast_control, auracast_config from auracast import multicast_control, auracast_config
from aiortc import RTCPeerConnection, RTCSessionDescription, MediaStreamTrack
import av
import av.audio.layout
import sounddevice as sd # type: ignore import sounddevice as sd # type: ignore
from typing import Set
import traceback import traceback
from auracast.utils.sounddevice_utils import ( from auracast.utils.sounddevice_utils import (
get_usb_pw_inputs, get_usb_pw_inputs,
@@ -39,17 +32,14 @@ STREAM_SETTINGS_FILE = os.path.join(os.path.dirname(__file__), 'stream_settings.
TRANSPORT1 = os.getenv('TRANSPORT1', 'serial:/dev/ttyAMA3,1000000,rtscts') # transport for raspberry pi gpio header TRANSPORT1 = os.getenv('TRANSPORT1', 'serial:/dev/ttyAMA3,1000000,rtscts') # transport for raspberry pi gpio header
TRANSPORT2 = os.getenv('TRANSPORT2', 'serial:/dev/ttyAMA4,1000000,rtscts') # transport for raspberry pi gpio header TRANSPORT2 = os.getenv('TRANSPORT2', 'serial:/dev/ttyAMA4,1000000,rtscts') # transport for raspberry pi gpio header
PTIME = 40 # seems to have no effect at all
pcs: Set[RTCPeerConnection] = set() # keep refs so they dont GC early
os.environ["PULSE_LATENCY_MSEC"] = "3" os.environ["PULSE_LATENCY_MSEC"] = "3"
# In-memory cache to avoid disk I/O on hot paths like /status # In-memory cache to avoid disk I/O on hot paths like /status
SETTINGS_CACHE: dict = {} SETTINGS_CACHE: dict = {}
class Offer(BaseModel):
sdp: str
type: str
def get_device_index_by_name(name: str): def get_device_index_by_name(name: str):
"""Return the device index for a given device name, or None if not found. """Return the device index for a given device name, or None if not found.
@@ -184,7 +174,7 @@ class StreamerWorker:
net_names = {d.get('name') for _, d in get_network_pw_inputs()} net_names = {d.get('name') for _, d in get_network_pw_inputs()}
except Exception: except Exception:
usb_names, net_names = set(), set() usb_names, net_names = set(), set()
audio_mode_persist = 'AES67' if (input_device_name in net_names) else 'USB' audio_mode_persist = 'Network' if (input_device_name in net_names) else 'USB'
# Map device name to index and configure input_format # Map device name to index and configure input_format
device_index = int(input_device_name) if (input_device_name and input_device_name.isdigit()) else get_device_index_by_name(input_device_name or '') device_index = int(input_device_name) if (input_device_name and input_device_name.isdigit()) else get_device_index_by_name(input_device_name or '')
@@ -334,12 +324,6 @@ async def initialize2(conf: auracast_config.AuracastConfigGroup):
async def stop_audio(): async def stop_audio():
"""Stops streaming on both multicaster1 and multicaster2 (worker thread).""" """Stops streaming on both multicaster1 and multicaster2 (worker thread)."""
try: try:
# Close any active PeerConnections
close_tasks = [pc.close() for pc in list(pcs)]
pcs.clear()
if close_tasks:
await asyncio.gather(*close_tasks, return_exceptions=True)
was_running = await streamer.call(streamer._w_stop_all) was_running = await streamer.call(streamer._w_stop_all)
# Persist is_streaming=False # Persist is_streaming=False
@@ -536,138 +520,6 @@ async def refresh_audio_devices():
raise HTTPException(status_code=500, detail=f"Failed to refresh devices: {e}") raise HTTPException(status_code=500, detail=f"Failed to refresh devices: {e}")
# async def offer(offer: Offer):
# @app.post("/offer") #webrtc endpoint
# log.info("/offer endpoint called")
# # If a previous PeerConnection is still alive, close it so we only ever keep one active.
# if pcs:
# log.info("Closing %d existing PeerConnection(s) before creating a new one", len(pcs))
# close_tasks = [p.close() for p in list(pcs)]
# await asyncio.gather(*close_tasks, return_exceptions=True)
# pcs.clear()
# pc = RTCPeerConnection() # No STUN needed for localhost
# pcs.add(pc)
# id_ = uuid.uuid4().hex[:8]
# log.info(f"{id_}: new PeerConnection")
# # create directory for records - only for testing
# os.makedirs("./records", exist_ok=True)
# # Do NOT start the streamer yet we'll start it lazily once we actually
# # receive the first audio frame, ensuring WebRTCAudioInput is ready and
# # avoiding race-conditions on restarts.
# @pc.on("track")
# async def on_track(track: MediaStreamTrack):
# log.info(f"{id_}: track {track.kind} received")
# try:
# first = True
# while True:
# frame: av.audio.frame.AudioFrame = await track.recv() # RTP audio frame (already decrypted)
# if first:
# log.info(f"{id_}: frame layout={frame.layout}")
# log.info(f"{id_}: frame format={frame.format}")
# log.info(
# f"{id_}: frame sample_rate={frame.sample_rate}, samples_per_channel={frame.samples}, planes={frame.planes}"
# )
# # Lazily start the streamer now that we know a track exists.
# if multicaster1.streamer is None:
# await multicaster1.start_streaming()
# # Yield control so the Streamer coroutine has a chance to
# # create the WebRTCAudioInput before we push samples.
# await asyncio.sleep(0)
# # Persist is_streaming=True for Webapp mode
# try:
# settings = load_stream_settings() or {}
# settings['is_streaming'] = True
# settings['timestamp'] = datetime.utcnow().isoformat()
# save_stream_settings(settings)
# except Exception:
# log.warning("Failed to persist is_streaming=True on WebRTC start", exc_info=True)
# first = False
# # in stereo case this is interleaved data format
# frame_array = frame.to_ndarray()
# log.info(f"array.shape{frame_array.shape}")
# log.info(f"array.dtype{frame_array.dtype}")
# log.info(f"frame.to_ndarray(){frame_array}")
# samples = frame_array.reshape(-1)
# log.info(f"samples.shape: {samples.shape}")
# if frame.layout.name == 'stereo':
# # Interleaved stereo: [L0, R0, L1, R1, ...]
# mono_array = samples[::2] # Take left channel
# else:
# mono_array = samples
# log.info(f"mono_array.shape: {mono_array.shape}")
# frame_array = frame.to_ndarray()
# # Flatten in case it's (1, N) or (N,)
# samples = frame_array.reshape(-1)
# if frame.layout.name == 'stereo':
# # Interleaved stereo: [L0, R0, L1, R1, ...]
# mono_array = samples[::2] # Take left channel
# else:
# mono_array = samples
# # Get current WebRTC audio input (streamer may have been restarted)
# big0 = list(multicaster1.bigs.values())[0]
# audio_input = big0.get('audio_input')
# # Wait until the streamer has instantiated the WebRTCAudioInput
# if audio_input is None or getattr(audio_input, 'closed', False):
# continue
# # Feed mono PCM samples to the global WebRTC audio input
# await audio_input.put_samples(mono_array.astype(np.int16))
# # Save to WAV file - only for testing
# # if not hasattr(pc, 'wav_writer'):
# # import wave
# # wav_path = f"./records/auracast_{id_}.wav"
# # pc.wav_writer = wave.open(wav_path, "wb")
# # pc.wav_writer.setnchannels(1) # mono
# # pc.wav_writer.setsampwidth(2) # 16-bit PCM
# # pc.wav_writer.setframerate(frame.sample_rate)
# # pcm_data = mono_array.astype(np.int16).tobytes()
# # pc.wav_writer.writeframes(pcm_data)
# except Exception as e:
# log.error(f"{id_}: Exception in on_track: {e}")
# finally:
# # Always close the wav file when the track ends or on error
# if hasattr(pc, 'wav_writer'):
# try:
# pc.wav_writer.close()
# except Exception:
# pass
# del pc.wav_writer
# # --- SDP negotiation ---
# log.info(f"{id_}: setting remote description")
# await pc.setRemoteDescription(RTCSessionDescription(**offer.model_dump()))
# log.info(f"{id_}: creating answer")
# answer = await pc.createAnswer()
# sdp = answer.sdp
# # Insert a=ptime using the global PTIME variable
# ptime_line = f"a=ptime:{PTIME}"
# if "a=sendrecv" in sdp:
# sdp = sdp.replace("a=sendrecv", f"a=sendrecv\n{ptime_line}")
# else:
# sdp += f"\n{ptime_line}"
# new_answer = RTCSessionDescription(sdp=sdp, type=answer.type)
# await pc.setLocalDescription(new_answer)
# log.info(f"{id_}: sending answer with {ptime_line}")
# return {"sdp": pc.localDescription.sdp,
# "type": pc.localDescription.type}
@app.post("/shutdown") @app.post("/shutdown")
async def shutdown(): async def shutdown():
"""Stops broadcasting and releases all audio/Bluetooth resources.""" """Stops broadcasting and releases all audio/Bluetooth resources."""
@@ -687,13 +539,6 @@ async def system_reboot():
try: try:
# Best-effort: stop any active streaming cleanly WITHOUT persisting state # Best-effort: stop any active streaming cleanly WITHOUT persisting state
try: try:
# Close any WebRTC peer connections
close_tasks = [pc.close() for pc in list(pcs)]
pcs.clear()
if close_tasks:
await asyncio.gather(*close_tasks, return_exceptions=True)
# Stop streaming on worker but DO NOT touch stream_settings.json
try: try:
await streamer.call(streamer._w_stop_all) await streamer.call(streamer._w_stop_all)
except Exception: except Exception:

View File

@@ -25,17 +25,15 @@ def is_pw_disabled() -> bool:
return str(val).strip().lower() in ("1", "true", "yes", "on") return str(val).strip().lower() in ("1", "true", "yes", "on")
# Storage paths and permissions # Storage paths and permissions (store next to multicast_frontend.py)
def state_dir() -> Path: def state_dir() -> Path:
custom = os.getenv("AURACAST_STATE_DIR") # utils/ -> auracast/ -> server/
if custom: return Path(__file__).resolve().parents[1] / "server"
return Path(custom).expanduser()
return Path.home() / ".config" / "auracast"
def pw_file_path() -> Path: def pw_file_path() -> Path:
return state_dir() / "frontend_pw.json" return state_dir() / "credentials.json"
def ensure_state_dir() -> None: def ensure_state_dir() -> None: