From 5a1e1f13ac72c12fc9fd88f422d7db6e647a7aec Mon Sep 17 00:00:00 2001 From: pstruebi Date: Fri, 31 Oct 2025 10:46:42 +0100 Subject: [PATCH] feat: improve audio streaming UI and configuration (#11) - Renamed "AES67" mode to "Network" for clearer user understanding - Added structured stream controls with consistent start/stop buttons and status display - Changed presentation delay input from microseconds to milliseconds for better usability - Restricted retransmission (RTN) options to valid range of 1-4 - Added help tooltips for assisted listening and immediate rendering options - Fixed portaudio configuration to enable ALSA support and remove Co-authored-by: pstruebi Reviewed-on: https://gitea.pstruebi.xyz/auracaster/bumble-auracast/pulls/11 --- .gitignore | 1 + README.md | 3 +- src/auracast/server/multicast_frontend.py | 239 ++++++++-------------- src/auracast/server/multicast_server.py | 161 +-------------- src/auracast/utils/frontend_auth.py | 10 +- 5 files changed, 96 insertions(+), 318 deletions(-) diff --git a/.gitignore b/.gitignore index 0cbe8da..60d0bd9 100644 --- a/.gitignore +++ b/.gitignore @@ -43,3 +43,4 @@ src/auracast/server/stream_settings.json src/auracast/server/certs/per_device/ src/auracast/.env src/auracast/server/certs/ca/ca_cert.srl +src/auracast/server/credentials.json diff --git a/README.md b/README.md index e4f5deb..24e2973 100644 --- a/README.md +++ b/README.md @@ -170,6 +170,7 @@ option snd_usb_audio nrpacks=1 sudo apt install -y --no-install-recommends \ git build-essential cmake pkg-config \ libasound2-dev libpulse-dev pipewire ethtool linuxptp +sudo apt remove -y libportaudio2 portaudio19-dev libportaudiocpp0 git clone https://github.com/PortAudio/portaudio.git cd portaudio @@ -177,7 +178,7 @@ git checkout 9abe5fe7db729280080a0bbc1397a528cd3ce658 rm -rf build cmake -S . -B build -G"Unix Makefiles" \ -DBUILD_SHARED_LIBS=ON \ - -DPA_USE_ALSA=OFF \ + -DPA_USE_ALSA=ON \ -DPA_USE_PULSEAUDIO=ON \ -DPA_USE_JACK=OFF cmake --build build -j$(nproc) diff --git a/src/auracast/server/multicast_frontend.py b/src/auracast/server/multicast_frontend.py index 730b733..7b78b75 100644 --- a/src/auracast/server/multicast_frontend.py +++ b/src/auracast/server/multicast_frontend.py @@ -79,8 +79,6 @@ if not is_pw_disabled(): if not st.session_state['frontend_authenticated']: st.stop() -# Global: desired packetization time in ms for Opus (should match backend) -PTIME = 40 BACKEND_URL = "http://localhost:5000" QUALITY_MAP = { @@ -104,18 +102,31 @@ is_streaming = bool(saved_settings.get("is_streaming", False)) st.title("Auracast Audio Mode Control") +def render_stream_controls(status_streaming: bool, start_label: str, stop_label: str, mode_label: str): + c_start, c_stop, c_spacer, c_status = st.columns([1, 1, 1, 2], gap="small", vertical_alignment="center") + with c_start: + start_clicked = st.button(start_label, disabled=status_streaming) + with c_stop: + stop_clicked = st.button(stop_label, disabled=not status_streaming) + with c_status: + st.write( + ( + f"Mode: {mode_label} · " + ("🟢 Streaming" if status_streaming else "🔴 Stopped") + ) + ) + return start_clicked, stop_clicked + # Audio mode selection with persisted default # Note: backend persists 'USB' for any device: source (including AES67). We default to 'USB' in that case. options = [ "Demo", "USB", - "AES67", - # "Webapp" + "Network", ] saved_audio_mode = saved_settings.get("audio_mode", "Demo") if saved_audio_mode not in options: # Map legacy/unknown modes to closest - mapping = {"USB/Network": "USB", "Network": "AES67"} + mapping = {"USB/Network": "USB", "AES67": "Network"} saved_audio_mode = mapping.get(saved_audio_mode, "Demo") audio_mode = st.selectbox( @@ -123,15 +134,35 @@ audio_mode = st.selectbox( options, index=options.index(saved_audio_mode) if saved_audio_mode in options else options.index("Demo"), help=( - "Select the audio input source. Choose 'Webapp' for browser microphone, " - "'USB' for a connected USB audio device (via PipeWire), 'AES67' for network RTP/AES67 sources, " + "Select the audio input source. Choose 'USB' for a connected USB audio device (via PipeWire), " + "'Network' (AES67) for network RTP/AES67 sources, " "or 'Demo' for a simulated stream." ) ) +# Determine the displayed mode label: +# - While streaming, prefer the backend-reported mode +# - When not streaming, show the currently selected mode +backend_mode_raw = saved_settings.get("audio_mode") +backend_mode_mapped = None +if isinstance(backend_mode_raw, str): + if backend_mode_raw == "AES67": + backend_mode_mapped = "Network" + elif backend_mode_raw == "USB/Network": + backend_mode_mapped = "USB" + elif backend_mode_raw in options: + backend_mode_mapped = backend_mode_raw + +running_mode = backend_mode_mapped if (is_streaming and backend_mode_mapped) else audio_mode + +is_started = False +is_stopped = False + if audio_mode == "Demo": demo_stream_map = { "1 × 48kHz": {"quality": "High (48kHz)", "streams": 1}, + "1 × 24kHz": {"quality": "Medium (24kHz)", "streams": 1}, + "1 × 16kHz": {"quality": "Fair (16kHz)", "streams": 1}, "2 × 24kHz": {"quality": "Medium (24kHz)", "streams": 2}, "3 × 16kHz": {"quality": "Fair (16kHz)", "streams": 3}, "2 × 48kHz": {"quality": "High (48kHz)", "streams": 2}, @@ -154,40 +185,41 @@ if audio_mode == "Demo": type=("password"), help="Optional: Set a broadcast code to protect your stream. Leave empty for an open (uncoded) broadcast." ) - col_flags1, col_flags2, col_pdelay, col_rtn = st.columns([1, 1, 1, 1], gap="small") + col_flags1, col_flags2, col_pdelay, col_rtn = st.columns([1, 1, 0.7, 0.6], gap="small", vertical_alignment="center") with col_flags1: assisted_listening = st.checkbox( "Assistive listening", - value=bool(saved_settings.get('assisted_listening_stream', False)) + value=bool(saved_settings.get('assisted_listening_stream', False)), + help="tells the receiver that this is an assistive listening stream" ) with col_flags2: immediate_rendering = st.checkbox( "Immediate rendering", - value=bool(saved_settings.get('immediate_rendering', False)) + value=bool(saved_settings.get('immediate_rendering', False)), + help="tells the receiver to ignore presentation delay and render immediately if possible." ) # QoS/presentation controls inline with flags default_pdelay = int(saved_settings.get('presentation_delay_us', 40000) or 40000) with col_pdelay: - presentation_delay_us = st.number_input( - "Presentation delay (µs)", - min_value=10000, max_value=200000, step=1000, value=default_pdelay, + default_pdelay_ms = max(10, min(200, default_pdelay // 1000)) + presentation_delay_ms = st.number_input( + "Delay (ms)", + min_value=10, max_value=200, step=5, value=default_pdelay_ms, help="Delay between capture and presentation for receivers." ) default_rtn = int(saved_settings.get('rtn', 4) or 4) with col_rtn: + rtn_options = [1,2,3,4] + default_rtn_clamped = min(4, max(1, default_rtn)) rtn = st.selectbox( - "Retransmissions (RTN)", options=[0,1,2,3,4], index=[0,1,2,3,4].index(default_rtn), + "RTN", options=rtn_options, index=rtn_options.index(default_rtn_clamped), help="Number of ISO retransmissions (higher improves robustness at cost of airtime)." ) #st.info(f"Demo mode selected: {demo_selected} (Streams: {demo_stream_map[demo_selected]['streams']}, Rate: {demo_stream_map[demo_selected]['rate']} Hz)") # Start/Stop buttons for demo mode if 'demo_stream_started' not in st.session_state: st.session_state['demo_stream_started'] = False - col1, col2 = st.columns(2) - with col1: - start_demo = st.button("Start Demo Stream", disabled=is_streaming) - with col2: - stop_demo = st.button("Stop Demo Stream", disabled=not is_streaming) + start_demo, stop_demo = render_stream_controls(is_streaming, "Start Demo", "Stop Demo", running_mode) if start_demo: # Always stop any running stream for clean state try: @@ -232,7 +264,7 @@ if audio_mode == "Demo": transport='', # is set in baccol_qoskend assisted_listening_stream=assisted_listening, immediate_rendering=immediate_rendering, - presentation_delay_us=presentation_delay_us, + presentation_delay_us=int(presentation_delay_ms * 1000), qos_config=auracast_config.AuracastQoSConfig( iso_int_multiple_10ms=1, number_of_retransmissions=int(rtn), @@ -248,7 +280,7 @@ if audio_mode == "Demo": transport='', # is set in backend assisted_listening_stream=assisted_listening, immediate_rendering=immediate_rendering, - presentation_delay_us=presentation_delay_us, + presentation_delay_us=int(presentation_delay_ms * 1000), qos_config=auracast_config.AuracastQoSConfig( iso_int_multiple_10ms=1, number_of_retransmissions=int(rtn), @@ -257,43 +289,38 @@ if audio_mode == "Demo": bigs=bigs2 ) # Call /init and /init2 + is_started = False try: r1 = requests.post(f"{BACKEND_URL}/init", json=config1.model_dump()) if r1.status_code == 200: - msg = f"Demo stream started on multicaster 1 ({len(bigs1)} streams)" st.session_state['demo_stream_started'] = True - st.success(msg) + is_started = True else: st.session_state['demo_stream_started'] = False st.error(f"Failed to initialize multicaster 1: {r1.text}") if config2: r2 = requests.post(f"{BACKEND_URL}/init2", json=config2.model_dump()) if r2.status_code == 200: - st.success(f"Demo stream started on multicaster 2 ({len(bigs2)} streams)") + is_started = True else: st.error(f"Failed to initialize multicaster 2: {r2.text}") except Exception as e: st.session_state['demo_stream_started'] = False st.error(f"Error: {e}") + if is_started: + pass elif stop_demo: try: r = requests.post(f"{BACKEND_URL}/stop_audio").json() st.session_state['demo_stream_started'] = False if r.get('was_running'): - st.info("Demo stream stopped.") - st.rerun() - else: - st.info("Demo stream was not running.") + is_stopped = True except Exception as e: st.error(f"Error: {e}") - elif st.session_state['demo_stream_started']: - st.success(f"Demo stream running: {demo_selected}") - else: - st.info("Demo stream not running.") + quality = None # Not used in demo mode else: # Stream quality selection (now enabled) - quality_options = list(QUALITY_MAP.keys()) default_quality = "Medium (24kHz)" if "Medium (24kHz)" in quality_options else quality_options[0] quality = st.selectbox( @@ -333,39 +360,40 @@ else: help="Optional: Set a broadcast code to protect your stream. Leave empty for an open (uncoded) broadcast." ) # Flags and QoS row (compact, four columns) - col_flags1, col_flags2, col_pdelay, col_rtn = st.columns([1, 1, 1, 1], gap="small") + col_flags1, col_flags2, col_pdelay, col_rtn = st.columns([1, 1, 0.7, 0.6], gap="small") with col_flags1: assisted_listening = st.checkbox( "Assistive listening", - value=bool(saved_settings.get('assisted_listening_stream', False)) + value=bool(saved_settings.get('assisted_listening_stream', False)), + help="tells the receiver that this is an assistive listening stream" ) with col_flags2: immediate_rendering = st.checkbox( "Immediate rendering", - value=bool(saved_settings.get('immediate_rendering', False)) + value=bool(saved_settings.get('immediate_rendering', False)), + help="tells the receiver to ignore presentation delay and render immediately if possible." ) # QoS/presentation controls inline with flags default_pdelay = int(saved_settings.get('presentation_delay_us', 40000) or 40000) with col_pdelay: - presentation_delay_us = st.number_input( - "Presentation delay (µs)", - min_value=10000, max_value=200000, step=1000, value=default_pdelay, + default_pdelay_ms = max(10, min(200, default_pdelay // 1000)) + presentation_delay_ms = st.number_input( + "Delay (ms)", + min_value=10, max_value=200, step=5, value=default_pdelay_ms, help="Delay between capture and presentation for receivers." ) default_rtn = int(saved_settings.get('rtn', 4) or 4) with col_rtn: + rtn_options = [1,2,3,4] + default_rtn_clamped = min(4, max(1, default_rtn)) rtn = st.selectbox( - "Retransmissions (RTN)", options=[0,1,2,3,4], index=[0,1,2,3,4].index(default_rtn), + "RTN", options=rtn_options, index=rtn_options.index(default_rtn_clamped), help="Number of ISO retransmissions (higher improves robustness at cost of airtime)." ) - # Gain slider for Webapp mode - if audio_mode == "Webapp": - mic_gain = st.slider("Microphone Gain", 0.0, 2.0, 1.0, 0.1, help="Adjust microphone volume sent to Auracast") - else: - mic_gain = 1.0 + # Input device selection for USB or AES67 mode - if audio_mode in ("USB", "AES67"): + if audio_mode in ("USB", "Network"): if not is_streaming: # Only query device lists when NOT streaming to avoid extra backend calls try: @@ -438,55 +466,17 @@ else: ) else: input_device = None - - # Buttons and status on a single row (4 columns: start, stop, spacer, status) - c_start, c_stop, c_spacer, c_status = st.columns([1, 1, 1, 2], gap="small", vertical_alignment="center") - with c_start: - start_stream = st.button("Start Auracast", disabled=is_streaming) - with c_stop: - stop_stream = st.button("Stop Auracast", disabled=not is_streaming) - # c_spacer intentionally left empty to push status to the far right - with c_status: - # Fetch current status from backend and render using Streamlit widgets (no HTML) - # The is_streaming variable is now defined at the top of the script. - # We only need to re-fetch here if we want the absolute latest status for the display, - # but for UI consistency, we can just use the value from the top of the script run. - st.write("🟢 Streaming" if is_streaming else "🔴 Stopped") - - # If gain slider moved while streaming, send update to JS without restarting - if audio_mode == "Webapp" and st.session_state.get('stream_started'): - update_js = f""" - - """ - st.components.v1.html(update_js, height=0) + start_stream, stop_stream = render_stream_controls(is_streaming, "Start Auracast", "Stop Auracast", running_mode) if stop_stream: st.session_state['stream_started'] = False try: r = requests.post(f"{BACKEND_URL}/stop_audio").json() if r['was_running']: - st.success("Stream Stopped!") - st.rerun() - else: - st.success("Stream was not running.") + is_stopped = True except Exception as e: st.error(f"Error: {e}") - # Ensure existing WebRTC connection is fully closed so that a fresh - # connection is created the next time we start the stream. - if audio_mode == "Webapp": - cleanup_js = """ - - """ - st.components.v1.html(cleanup_js, height=0) + if start_stream: # Always send stop to ensure backend is in a clean state, regardless of current status @@ -504,7 +494,7 @@ else: transport='', # is set in backend assisted_listening_stream=assisted_listening, immediate_rendering=immediate_rendering, - presentation_delay_us=presentation_delay_us, + presentation_delay_us=int(presentation_delay_ms * 1000), qos_config=auracast_config.AuracastQoSConfig( iso_int_multiple_10ms=1, number_of_retransmissions=int(rtn), @@ -516,12 +506,8 @@ else: name=stream_name, program_info=program_info, language=language, - audio_source=( - f"device:{input_device}" if audio_mode in ("USB", "AES67") else ( - "webrtc" if audio_mode == "Webapp" else "network" - ) - ), - input_format=(f"int16le,{q['rate']},1" if audio_mode in ("USB", "AES67") else "auto"), + audio_source=(f"device:{input_device}"), + input_format=(f"int16le,{q['rate']},1"), iso_que_len=1, sampling_frequency=q['rate'], octets_per_frame=q['octets'], @@ -532,71 +518,18 @@ else: try: r = requests.post(f"{BACKEND_URL}/init", json=config.model_dump()) if r.status_code == 200: - st.success("Stream Started!") - st.rerun() + is_started = True else: st.error(f"Failed to initialize: {r.text}") except Exception as e: st.error(f"Error: {e}") - # Render / maintain WebRTC component - if audio_mode == "Webapp" and (start_stream or st.session_state.get('stream_started')): - st.markdown("Starting microphone; allow access if prompted and speak.") - component = f""" - - """ - st.components.v1.html(component, height=0) - st.session_state['stream_started'] = True +# Centralized rerun based on start/stop outcomes +if is_started or is_stopped: + st.rerun() #else: -# st.header("Advertised Streams (Cloud Announcements)") + #st.header("Advertised Streams (Cloud Announcements)") + #st.info("This feature requires backend support to list advertised streams.") # st.info("This feature requires backend support to list advertised streams.") # Placeholder for future implementation # Example: r = requests.get(f"{BACKEND_URL}/advertised_streams") diff --git a/src/auracast/server/multicast_server.py b/src/auracast/server/multicast_server.py index a2b1b88..96508a8 100644 --- a/src/auracast/server/multicast_server.py +++ b/src/auracast/server/multicast_server.py @@ -4,7 +4,6 @@ TODO: in the future the multicaster objects should run in their own threads or e """ import os import logging as log -import uuid import json import sys import threading @@ -12,18 +11,12 @@ from concurrent.futures import Future from datetime import datetime import time import asyncio -import numpy as np from dotenv import load_dotenv -from pydantic import BaseModel from fastapi import FastAPI, HTTPException from fastapi.middleware.cors import CORSMiddleware from auracast import multicast_control, auracast_config -from aiortc import RTCPeerConnection, RTCSessionDescription, MediaStreamTrack -import av -import av.audio.layout import sounddevice as sd # type: ignore -from typing import Set import traceback from auracast.utils.sounddevice_utils import ( get_usb_pw_inputs, @@ -39,17 +32,14 @@ STREAM_SETTINGS_FILE = os.path.join(os.path.dirname(__file__), 'stream_settings. TRANSPORT1 = os.getenv('TRANSPORT1', 'serial:/dev/ttyAMA3,1000000,rtscts') # transport for raspberry pi gpio header TRANSPORT2 = os.getenv('TRANSPORT2', 'serial:/dev/ttyAMA4,1000000,rtscts') # transport for raspberry pi gpio header -PTIME = 40 # seems to have no effect at all -pcs: Set[RTCPeerConnection] = set() # keep refs so they don’t GC early + os.environ["PULSE_LATENCY_MSEC"] = "3" # In-memory cache to avoid disk I/O on hot paths like /status SETTINGS_CACHE: dict = {} -class Offer(BaseModel): - sdp: str - type: str + def get_device_index_by_name(name: str): """Return the device index for a given device name, or None if not found. @@ -184,7 +174,7 @@ class StreamerWorker: net_names = {d.get('name') for _, d in get_network_pw_inputs()} except Exception: usb_names, net_names = set(), set() - audio_mode_persist = 'AES67' if (input_device_name in net_names) else 'USB' + audio_mode_persist = 'Network' if (input_device_name in net_names) else 'USB' # Map device name to index and configure input_format device_index = int(input_device_name) if (input_device_name and input_device_name.isdigit()) else get_device_index_by_name(input_device_name or '') @@ -334,12 +324,6 @@ async def initialize2(conf: auracast_config.AuracastConfigGroup): async def stop_audio(): """Stops streaming on both multicaster1 and multicaster2 (worker thread).""" try: - # Close any active PeerConnections - close_tasks = [pc.close() for pc in list(pcs)] - pcs.clear() - if close_tasks: - await asyncio.gather(*close_tasks, return_exceptions=True) - was_running = await streamer.call(streamer._w_stop_all) # Persist is_streaming=False @@ -536,138 +520,6 @@ async def refresh_audio_devices(): raise HTTPException(status_code=500, detail=f"Failed to refresh devices: {e}") -# async def offer(offer: Offer): -# @app.post("/offer") #webrtc endpoint -# log.info("/offer endpoint called") - -# # If a previous PeerConnection is still alive, close it so we only ever keep one active. -# if pcs: -# log.info("Closing %d existing PeerConnection(s) before creating a new one", len(pcs)) -# close_tasks = [p.close() for p in list(pcs)] -# await asyncio.gather(*close_tasks, return_exceptions=True) -# pcs.clear() - -# pc = RTCPeerConnection() # No STUN needed for localhost -# pcs.add(pc) -# id_ = uuid.uuid4().hex[:8] -# log.info(f"{id_}: new PeerConnection") - -# # create directory for records - only for testing -# os.makedirs("./records", exist_ok=True) - -# # Do NOT start the streamer yet – we'll start it lazily once we actually -# # receive the first audio frame, ensuring WebRTCAudioInput is ready and -# # avoiding race-conditions on restarts. -# @pc.on("track") -# async def on_track(track: MediaStreamTrack): -# log.info(f"{id_}: track {track.kind} received") -# try: -# first = True -# while True: -# frame: av.audio.frame.AudioFrame = await track.recv() # RTP audio frame (already decrypted) -# if first: -# log.info(f"{id_}: frame layout={frame.layout}") -# log.info(f"{id_}: frame format={frame.format}") -# log.info( -# f"{id_}: frame sample_rate={frame.sample_rate}, samples_per_channel={frame.samples}, planes={frame.planes}" -# ) -# # Lazily start the streamer now that we know a track exists. -# if multicaster1.streamer is None: -# await multicaster1.start_streaming() -# # Yield control so the Streamer coroutine has a chance to -# # create the WebRTCAudioInput before we push samples. -# await asyncio.sleep(0) -# # Persist is_streaming=True for Webapp mode -# try: -# settings = load_stream_settings() or {} -# settings['is_streaming'] = True -# settings['timestamp'] = datetime.utcnow().isoformat() -# save_stream_settings(settings) -# except Exception: -# log.warning("Failed to persist is_streaming=True on WebRTC start", exc_info=True) -# first = False -# # in stereo case this is interleaved data format -# frame_array = frame.to_ndarray() -# log.info(f"array.shape{frame_array.shape}") -# log.info(f"array.dtype{frame_array.dtype}") -# log.info(f"frame.to_ndarray(){frame_array}") - -# samples = frame_array.reshape(-1) -# log.info(f"samples.shape: {samples.shape}") - -# if frame.layout.name == 'stereo': -# # Interleaved stereo: [L0, R0, L1, R1, ...] -# mono_array = samples[::2] # Take left channel -# else: -# mono_array = samples - -# log.info(f"mono_array.shape: {mono_array.shape}") - - -# frame_array = frame.to_ndarray() - -# # Flatten in case it's (1, N) or (N,) -# samples = frame_array.reshape(-1) - -# if frame.layout.name == 'stereo': -# # Interleaved stereo: [L0, R0, L1, R1, ...] -# mono_array = samples[::2] # Take left channel -# else: -# mono_array = samples - -# # Get current WebRTC audio input (streamer may have been restarted) -# big0 = list(multicaster1.bigs.values())[0] -# audio_input = big0.get('audio_input') -# # Wait until the streamer has instantiated the WebRTCAudioInput -# if audio_input is None or getattr(audio_input, 'closed', False): -# continue -# # Feed mono PCM samples to the global WebRTC audio input -# await audio_input.put_samples(mono_array.astype(np.int16)) - -# # Save to WAV file - only for testing -# # if not hasattr(pc, 'wav_writer'): -# # import wave -# # wav_path = f"./records/auracast_{id_}.wav" -# # pc.wav_writer = wave.open(wav_path, "wb") -# # pc.wav_writer.setnchannels(1) # mono -# # pc.wav_writer.setsampwidth(2) # 16-bit PCM -# # pc.wav_writer.setframerate(frame.sample_rate) - -# # pcm_data = mono_array.astype(np.int16).tobytes() -# # pc.wav_writer.writeframes(pcm_data) - - -# except Exception as e: -# log.error(f"{id_}: Exception in on_track: {e}") -# finally: -# # Always close the wav file when the track ends or on error -# if hasattr(pc, 'wav_writer'): -# try: -# pc.wav_writer.close() -# except Exception: -# pass -# del pc.wav_writer - -# # --- SDP negotiation --- -# log.info(f"{id_}: setting remote description") -# await pc.setRemoteDescription(RTCSessionDescription(**offer.model_dump())) - -# log.info(f"{id_}: creating answer") -# answer = await pc.createAnswer() -# sdp = answer.sdp -# # Insert a=ptime using the global PTIME variable -# ptime_line = f"a=ptime:{PTIME}" -# if "a=sendrecv" in sdp: -# sdp = sdp.replace("a=sendrecv", f"a=sendrecv\n{ptime_line}") -# else: -# sdp += f"\n{ptime_line}" -# new_answer = RTCSessionDescription(sdp=sdp, type=answer.type) -# await pc.setLocalDescription(new_answer) -# log.info(f"{id_}: sending answer with {ptime_line}") -# return {"sdp": pc.localDescription.sdp, -# "type": pc.localDescription.type} - - @app.post("/shutdown") async def shutdown(): """Stops broadcasting and releases all audio/Bluetooth resources.""" @@ -687,13 +539,6 @@ async def system_reboot(): try: # Best-effort: stop any active streaming cleanly WITHOUT persisting state try: - # Close any WebRTC peer connections - close_tasks = [pc.close() for pc in list(pcs)] - pcs.clear() - if close_tasks: - await asyncio.gather(*close_tasks, return_exceptions=True) - - # Stop streaming on worker but DO NOT touch stream_settings.json try: await streamer.call(streamer._w_stop_all) except Exception: diff --git a/src/auracast/utils/frontend_auth.py b/src/auracast/utils/frontend_auth.py index e1d4111..5bd989b 100644 --- a/src/auracast/utils/frontend_auth.py +++ b/src/auracast/utils/frontend_auth.py @@ -25,17 +25,15 @@ def is_pw_disabled() -> bool: return str(val).strip().lower() in ("1", "true", "yes", "on") -# Storage paths and permissions +# Storage paths and permissions (store next to multicast_frontend.py) def state_dir() -> Path: - custom = os.getenv("AURACAST_STATE_DIR") - if custom: - return Path(custom).expanduser() - return Path.home() / ".config" / "auracast" + # utils/ -> auracast/ -> server/ + return Path(__file__).resolve().parents[1] / "server" def pw_file_path() -> Path: - return state_dir() / "frontend_pw.json" + return state_dir() / "credentials.json" def ensure_state_dir() -> None: