Files
bumble-auracast/src/auracast/server/multicast_frontend.py

266 lines
11 KiB
Python

# frontend/app.py
import os
import streamlit as st
import requests
from auracast import auracast_config
import logging as log
# Track whether WebRTC stream is active across Streamlit reruns
if 'stream_started' not in st.session_state:
st.session_state['stream_started'] = False
# Global: desired packetization time in ms for Opus (should match backend)
PTIME = 40
BACKEND_URL = "http://localhost:5000"
# Try loading persisted settings from backend
saved_settings = {}
try:
resp = requests.get(f"{BACKEND_URL}/status", timeout=1)
if resp.status_code == 200:
saved_settings = resp.json()
except Exception:
saved_settings = {}
st.title("🎙️ Auracast Audio Mode Control")
# Audio mode selection with persisted default
options = ["Webapp", "USB"]
saved_audio_mode = saved_settings.get("audio_mode", "Webapp")
if saved_audio_mode not in options:
saved_audio_mode = "Webapp"
audio_mode = st.selectbox(
"Audio Mode",
options,
index=options.index(saved_audio_mode),
help="Select the audio input source. Choose 'Webapp' for browser microphone or 'USB' for a connected hardware device."
)
if audio_mode in ["Webapp", "USB"]:
# Stream quality selection (temporarily disabled)
# quality = st.selectbox("Stream Quality", ["High (48kHz)", "Mid (24kHz)", "Fair (16kHz)"])
quality_map = {
"High (48kHz)": {"rate": 48000, "octets": 120},
"Mid (24kHz)": {"rate": 24000, "octets": 60},
"Fair (16kHz)": {"rate": 16000, "octets": 40},
}
# Default to high quality while UI is hidden
quality = "High (48kHz)"
default_name = saved_settings.get('channel_names', ["Broadcast0"])[0]
default_lang = saved_settings.get('languages', ["deu"])[0]
default_input = saved_settings.get('input_device') or 'default'
stream_name = st.text_input(
"Channel Name",
value=default_name,
help="The primary name for your broadcast. Like the SSID of a WLAN, it identifies your stream for receivers."
)
raw_program_info = saved_settings.get('program_info', default_name)
if isinstance(raw_program_info, list) and raw_program_info:
default_program_info = raw_program_info[0]
else:
default_program_info = raw_program_info
program_info = st.text_input(
"Program Info",
value=default_program_info,
help="Additional details about the broadcast program, such as its content or purpose. Shown to receivers for more context."
)
language = st.text_input(
"Language (ISO 639-3)",
value=default_lang,
help="Three-letter language code (e.g., 'eng' for English, 'deu' for German). Used by receivers to display the language of the stream. See: https://en.wikipedia.org/wiki/List_of_ISO_639-3_codes"
)
# Gain slider for Webapp mode
if audio_mode == "Webapp":
mic_gain = st.slider("Microphone Gain", 0.0, 2.0, 1.0, 0.1, help="Adjust microphone volume sent to Auracast")
else:
mic_gain = 1.0
# Input device selection for USB mode
if audio_mode == "USB":
try:
resp = requests.get(f"{BACKEND_URL}/audio_inputs")
if resp.status_code == 200:
input_options = [f"{d['id']}:{d['name']}" for d in resp.json().get('inputs', [])]
else:
input_options = []
except Exception:
input_options = []
if not input_options:
st.warning("No hardware audio input devices found. Plug in a USB input device and click Refresh.")
if st.button("Refresh"):
try:
requests.post(f"{BACKEND_URL}/refresh_audio_inputs", timeout=3)
except Exception as e:
st.error(f"Failed to refresh devices: {e}")
st.rerun()
input_device = None
else:
if default_input not in input_options:
default_input = input_options[0]
col1, col2 = st.columns([3, 1], vertical_alignment="bottom")
with col1:
selected_option = st.selectbox("Input Device", input_options, index=input_options.index(default_input))
with col2:
if st.button("Refresh"):
try:
requests.post(f"{BACKEND_URL}/refresh_audio_inputs", timeout=3)
except Exception as e:
st.error(f"Failed to refresh devices: {e}")
st.rerun()
# We send only the numeric/card identifier (before :) or 'default'
input_device = selected_option.split(":", 1)[0] if ":" in selected_option else selected_option
else:
input_device = None
import time
start_stream = st.button("Start Auracast")
stop_stream = st.button("Stop Auracast")
# If gain slider moved while streaming, send update to JS without restarting
if audio_mode == "Webapp" and st.session_state.get('stream_started'):
update_js = f"""
<script>
if (window.gainNode) {{ window.gainNode.gain.value = {mic_gain}; }}
</script>
"""
st.components.v1.html(update_js, height=0)
if stop_stream:
st.session_state['stream_started'] = False
try:
r = requests.post(f"{BACKEND_URL}/stop_audio").json()
if r['was_running']:
st.success("Stream Stopped!")
else:
st.success("Stream was not running.")
except Exception as e:
st.error(f"Error: {e}")
# Ensure existing WebRTC connection is fully closed so that a fresh
# connection is created the next time we start the stream.
if audio_mode == "Webapp":
cleanup_js = """
<script>
if (window.webrtc_pc) {
window.webrtc_pc.getSenders().forEach(s => s.track.stop());
window.webrtc_pc.close();
window.webrtc_pc = null;
}
window.webrtc_started = false;
</script>
"""
st.components.v1.html(cleanup_js, height=0)
if start_stream:
# Always send stop to ensure backend is in a clean state, regardless of current status
r = requests.post(f"{BACKEND_URL}/stop_audio").json()
if r['was_running']:
st.success("Stream Stopped!")
# Small pause lets backend fully release audio devices before re-init
import time; time.sleep(1)
# Prepare config using the model (do NOT send qos_config, only relevant fields)
q = quality_map[quality]
config = auracast_config.AuracastConfigGroup(
auracast_sampling_rate_hz=q['rate'],
octets_per_frame=q['octets'],
transport='serial:/dev/ttyAMA3,1000000,rtscts', # transport for raspberry pi gpio header
bigs = [
auracast_config.AuracastBigConfig(
name=stream_name,
program_info=program_info,
language=language,
audio_source=(
f"device:{input_device}" if audio_mode == "USB" else (
"webrtc" if audio_mode == "Webapp" else "network"
)
),
input_format=(f"int16le,{q['rate']},1" if audio_mode == "USB" else "auto"),
iso_que_len=1, # TODO: this should be way less to decrease delay
sampling_frequency=q['rate'],
octets_per_frame=q['octets'],
),
]
)
try:
r = requests.post(f"{BACKEND_URL}/init", json=config.model_dump())
if r.status_code == 200:
st.success("Stream Started!")
else:
st.error(f"Failed to initialize: {r.text}")
except Exception as e:
st.error(f"Error: {e}")
# Render / maintain WebRTC component
if audio_mode == "Webapp" and (start_stream or st.session_state.get('stream_started')):
st.markdown("Starting microphone; allow access if prompted and speak.")
component = f"""
<script>
(async () => {{
// Clean up any previous WebRTC connection before starting a new one
if (window.webrtc_pc) {{
window.webrtc_pc.getSenders().forEach(s => s.track.stop());
window.webrtc_pc.close();
}}
const GAIN_VALUE = {mic_gain};
const pc = new RTCPeerConnection(); // No STUN needed for localhost
window.webrtc_pc = pc;
window.webrtc_started = true;
const micStream = await navigator.mediaDevices.getUserMedia({{audio:true}});
// Create Web Audio gain processing
const audioCtx = new (window.AudioContext || window.webkitAudioContext)();
const source = audioCtx.createMediaStreamSource(micStream);
const gainNode = audioCtx.createGain();
gainNode.gain.value = GAIN_VALUE;
// Expose for later adjustments
window.gainNode = gainNode;
const dest = audioCtx.createMediaStreamDestination();
source.connect(gainNode).connect(dest);
// Add processed tracks to WebRTC
dest.stream.getTracks().forEach(t => pc.addTrack(t, dest.stream));
// --- WebRTC offer/answer exchange ---
const offer = await pc.createOffer();
// Patch SDP offer to include a=ptime using global PTIME
let sdp = offer.sdp;
const ptime_line = 'a=ptime:{PTIME}';
const maxptime_line = 'a=maxptime:{PTIME}';
if (sdp.includes('a=sendrecv')) {{
sdp = sdp.replace('a=sendrecv', 'a=sendrecv\\n' + ptime_line + '\\n' + maxptime_line);
}} else {{
sdp += '\\n' + ptime_line + '\\n' + maxptime_line;
}}
const patched_offer = new RTCSessionDescription({{sdp, type: offer.type}});
await pc.setLocalDescription(patched_offer);
// Send offer to backend
const response = await fetch(
"{BACKEND_URL}/offer",
{{
method: 'POST',
headers: {{'Content-Type':'application/json'}},
body: JSON.stringify({{sdp: pc.localDescription.sdp, type: pc.localDescription.type}})
}}
);
const answer = await response.json();
await pc.setRemoteDescription(new RTCSessionDescription({{sdp: answer.sdp, type: answer.type}}));
}})();
</script>
"""
st.components.v1.html(component, height=0)
st.session_state['stream_started'] = True
else:
st.header("Advertised Streams (Cloud Announcements)")
st.info("This feature requires backend support to list advertised streams.")
# Placeholder for future implementation
# Example: r = requests.get(f"{BACKEND_URL}/advertised_streams")
# if r.status_code == 200:
# streams = r.json()
# for s in streams:
# st.write(s)
# else:
# st.error("Could not fetch advertised streams.")
log.basicConfig(
level=os.environ.get('LOG_LEVEL', log.DEBUG),
format='%(module)s.py:%(lineno)d %(levelname)s: %(message)s'
)