Files
bumble-auracast/src/auracast/server/multicast_frontend.py
2025-06-18 12:29:13 +02:00

187 lines
7.4 KiB
Python

# frontend/app.py
from itertools import filterfalse
import streamlit as st
import requests
from auracast import auracast_config
import logging as log
# Global: desired packetization time in ms for Opus (should match backend)
PTIME = 40
BACKEND_URL = "http://localhost:5000"
# Try loading persisted settings from backend
saved_settings = {}
try:
resp = requests.get(f"{BACKEND_URL}/status", timeout=1)
if resp.status_code == 200:
saved_settings = resp.json()
except Exception:
saved_settings = {}
st.title("🎙️ Auracast Audio Mode Control")
# Audio mode selection with persisted default
options = ["Webapp", "USB"]
saved_audio_mode = saved_settings.get("audio_mode", "Webapp")
if saved_audio_mode not in options:
saved_audio_mode = "Webapp"
audio_mode = st.selectbox(
"Audio Mode",
options,
index=options.index(saved_audio_mode)
)
if audio_mode in ["Webapp", "USB"]:
# Stream quality selection (temporarily disabled)
# quality = st.selectbox("Stream Quality", ["High (48kHz)", "Mid (24kHz)", "Fair (16kHz)"])
quality_map = {
"High (48kHz)": {"rate": 48000, "octets": 120},
"Mid (24kHz)": {"rate": 24000, "octets": 60},
"Fair (16kHz)": {"rate": 16000, "octets": 40},
}
# Default to high quality while UI is hidden
quality = "High (48kHz)"
default_name = saved_settings.get('channel_names', ["Broadcast0"])[0]
default_lang = saved_settings.get('languages', ["deu"])[0]
default_input = saved_settings.get('input_device') or 'default'
stream_name = st.text_input("Channel Name", value=default_name)
language = st.text_input("Language (ISO 639-3)", value=default_lang)
# Input device selection for USB mode
if audio_mode == "USB":
try:
resp = requests.get(f"{BACKEND_URL}/audio_inputs")
if resp.status_code == 200:
input_options = [f"{d['id']}:{d['name']}" for d in resp.json().get('inputs', [])]
else:
input_options = []
except Exception:
input_options = []
if not input_options:
st.error("No hardware audio input devices found.")
st.stop()
if default_input not in input_options:
default_input = input_options[0]
col1, col2 = st.columns([3, 1], vertical_alignment="bottom")
with col1:
selected_option = st.selectbox("Input Device", input_options, index=input_options.index(default_input))
with col2:
if st.button("Refresh"):
try:
requests.post(f"{BACKEND_URL}/refresh_audio_inputs", timeout=3)
except Exception as e:
st.error(f"Failed to refresh devices: {e}")
st.rerun()
# We send only the numeric/card identifier (before :) or 'default'
input_device = selected_option.split(":", 1)[0] if ":" in selected_option else selected_option
else:
input_device = None
start_stream = st.button("Start Auracast")
stop_stream = st.button("Stop Auracast")
if stop_stream:
try:
r = requests.post(f"{BACKEND_URL}/stop_audio").json()
if r['was_running']:
st.success("Stream Stopped!")
else:
st.success("Stream was not running.")
except Exception as e:
st.error(f"Error: {e}")
if start_stream:
# Always send stop to ensure backend is in a clean state, regardless of current status
r = requests.post(f"{BACKEND_URL}/stop_audio").json()
if r['was_running']:
st.success("Stream Stopped!")
# Small pause lets backend fully release audio devices before re-init
import time; time.sleep(1)
# Prepare config using the model (do NOT send qos_config, only relevant fields)
q = quality_map[quality]
config = auracast_config.AuracastConfigGroup(
auracast_sampling_rate_hz=q['rate'],
octets_per_frame=q['octets'],
transport="auto",
bigs = [
auracast_config.AuracastBigConfig(
name=stream_name,
program_info=f"{stream_name} {quality}",
language=language,
audio_source=(
f"device:{input_device}" if audio_mode == "USB" else (
"webrtc" if audio_mode == "Webapp" else "network"
)
),
input_format=(f"int16le,{q['rate']},1" if audio_mode == "USB" else "auto"),
iso_que_len=1, # TODO: this should be way less to decrease delay
sampling_frequency=q['rate'],
octets_per_frame=q['octets'],
),
]
)
try:
r = requests.post(f"{BACKEND_URL}/init", json=config.model_dump())
if r.status_code == 200:
st.success("Stream Started!")
else:
st.error(f"Failed to initialize: {r.text}")
except Exception as e:
st.error(f"Error: {e}")
if audio_mode == "Webapp" and start_stream:
st.markdown("Starting microphone; allow access if prompted and speak.")
component = f"""
<script>
(async () => {{
const pc = new RTCPeerConnection(); // No STUN needed for localhost
const stream = await navigator.mediaDevices.getUserMedia({{audio:true}});
stream.getTracks().forEach(t => pc.addTrack(t, stream));
// --- WebRTC offer/answer exchange ---
const offer = await pc.createOffer();
// Patch SDP offer to include a=ptime using global PTIME
let sdp = offer.sdp;
const ptime_line = 'a=ptime:{PTIME}';
const maxptime_line = 'a=maxptime:{PTIME}';
if (sdp.includes('a=sendrecv')) {{
sdp = sdp.replace('a=sendrecv', 'a=sendrecv\\n' + ptime_line + '\\n' + maxptime_line);
}} else {{
sdp += '\\n' + ptime_line + '\\n' + maxptime_line;
}}
const patched_offer = new RTCSessionDescription({{sdp, type: offer.type}});
await pc.setLocalDescription(patched_offer);
// Send offer to backend
const response = await fetch(
"{BACKEND_URL}/offer",
{{
method: 'POST',
headers: {{'Content-Type':'application/json'}},
body: JSON.stringify({{sdp: pc.localDescription.sdp, type: pc.localDescription.type}})
}}
);
const answer = await response.json();
await pc.setRemoteDescription(new RTCSessionDescription({{sdp: answer.sdp, type: answer.type}}));
}})();
</script>
"""
st.components.v1.html(component, height=0)
else:
st.header("Advertised Streams (Cloud Announcements)")
st.info("This feature requires backend support to list advertised streams.")
# Placeholder for future implementation
# Example: r = requests.get(f"{BACKEND_URL}/advertised_streams")
# if r.status_code == 200:
# streams = r.json()
# for s in streams:
# st.write(s)
# else:
# st.error("Could not fetch advertised streams.")
log.basicConfig(
level=log.DEBUG,
format='%(module)s.py:%(lineno)d %(levelname)s: %(message)s'
)