# frontend/app.py from itertools import filterfalse import streamlit as st import requests from auracast import auracast_config import logging as log # Track whether WebRTC stream is active across Streamlit reruns if 'stream_started' not in st.session_state: st.session_state['stream_started'] = False # Global: desired packetization time in ms for Opus (should match backend) PTIME = 40 BACKEND_URL = "http://localhost:5000" # Try loading persisted settings from backend saved_settings = {} try: resp = requests.get(f"{BACKEND_URL}/status", timeout=1) if resp.status_code == 200: saved_settings = resp.json() except Exception: saved_settings = {} st.title("🎙️ Auracast Audio Mode Control") # Audio mode selection with persisted default options = ["Webapp", "USB"] saved_audio_mode = saved_settings.get("audio_mode", "Webapp") if saved_audio_mode not in options: saved_audio_mode = "Webapp" audio_mode = st.selectbox( "Audio Mode", options, index=options.index(saved_audio_mode) ) if audio_mode in ["Webapp", "USB"]: # Stream quality selection (temporarily disabled) # quality = st.selectbox("Stream Quality", ["High (48kHz)", "Mid (24kHz)", "Fair (16kHz)"]) quality_map = { "High (48kHz)": {"rate": 48000, "octets": 120}, "Mid (24kHz)": {"rate": 24000, "octets": 60}, "Fair (16kHz)": {"rate": 16000, "octets": 40}, } # Default to high quality while UI is hidden quality = "High (48kHz)" default_name = saved_settings.get('channel_names', ["Broadcast0"])[0] default_lang = saved_settings.get('languages', ["deu"])[0] default_input = saved_settings.get('input_device') or 'default' stream_name = st.text_input("Channel Name", value=default_name) language = st.text_input("Language (ISO 639-3)", value=default_lang) # Gain slider for Webapp mode if audio_mode == "Webapp": mic_gain = st.slider("Microphone Gain", 0.0, 4.0, 1.0, 0.1, help="Adjust microphone volume sent to Auracast") else: mic_gain = 1.0 # Input device selection for USB mode if audio_mode == "USB": try: resp = requests.get(f"{BACKEND_URL}/audio_inputs") if resp.status_code == 200: input_options = [f"{d['id']}:{d['name']}" for d in resp.json().get('inputs', [])] else: input_options = [] except Exception: input_options = [] if not input_options: st.error("No hardware audio input devices found.") st.stop() if default_input not in input_options: default_input = input_options[0] col1, col2 = st.columns([3, 1], vertical_alignment="bottom") with col1: selected_option = st.selectbox("Input Device", input_options, index=input_options.index(default_input)) with col2: if st.button("Refresh"): try: requests.post(f"{BACKEND_URL}/refresh_audio_inputs", timeout=3) except Exception as e: st.error(f"Failed to refresh devices: {e}") st.rerun() # We send only the numeric/card identifier (before :) or 'default' input_device = selected_option.split(":", 1)[0] if ":" in selected_option else selected_option else: input_device = None start_stream = st.button("Start Auracast") stop_stream = st.button("Stop Auracast") # If gain slider moved while streaming, send update to JS without restarting if audio_mode == "Webapp" and st.session_state.get('stream_started'): update_js = f""" """ st.components.v1.html(update_js, height=0) if stop_stream: st.session_state['stream_started'] = False try: r = requests.post(f"{BACKEND_URL}/stop_audio").json() if r['was_running']: st.success("Stream Stopped!") else: st.success("Stream was not running.") except Exception as e: st.error(f"Error: {e}") if start_stream: # Always send stop to ensure backend is in a clean state, regardless of current status r = requests.post(f"{BACKEND_URL}/stop_audio").json() if r['was_running']: st.success("Stream Stopped!") # Small pause lets backend fully release audio devices before re-init import time; time.sleep(1) # Prepare config using the model (do NOT send qos_config, only relevant fields) q = quality_map[quality] config = auracast_config.AuracastConfigGroup( auracast_sampling_rate_hz=q['rate'], octets_per_frame=q['octets'], transport="auto", bigs = [ auracast_config.AuracastBigConfig( name=stream_name, program_info=f"{stream_name} {quality}", language=language, audio_source=( f"device:{input_device}" if audio_mode == "USB" else ( "webrtc" if audio_mode == "Webapp" else "network" ) ), input_format=(f"int16le,{q['rate']},1" if audio_mode == "USB" else "auto"), iso_que_len=1, # TODO: this should be way less to decrease delay sampling_frequency=q['rate'], octets_per_frame=q['octets'], ), ] ) try: r = requests.post(f"{BACKEND_URL}/init", json=config.model_dump()) if r.status_code == 200: st.success("Stream Started!") else: st.error(f"Failed to initialize: {r.text}") except Exception as e: st.error(f"Error: {e}") # Render / maintain WebRTC component if audio_mode == "Webapp" and (start_stream or st.session_state.get('stream_started')): st.markdown("Starting microphone; allow access if prompted and speak.") component = f""" """ st.components.v1.html(component, height=0) st.session_state['stream_started'] = True else: st.header("Advertised Streams (Cloud Announcements)") st.info("This feature requires backend support to list advertised streams.") # Placeholder for future implementation # Example: r = requests.get(f"{BACKEND_URL}/advertised_streams") # if r.status_code == 200: # streams = r.json() # for s in streams: # st.write(s) # else: # st.error("Could not fetch advertised streams.") log.basicConfig( level=log.DEBUG, format='%(module)s.py:%(lineno)d %(levelname)s: %(message)s' )