feat: streamline UI and remove WebRTC audio input
- Removed WebRTC/browser microphone input option and related code - Simplified UI labels and column layouts for better usability - Added more quality options for demo mode (1x24kHz, 1x16kHz) - Removed redundant success/info messages for stream start/stop - Deleted unused PTIME constant and WebRTC-related code - Renamed "AES67" to "Network" in audio mode selection for consistency - Adjusted column widths for delay and RTN controls to be more compact
This commit is contained in:
@@ -79,8 +79,6 @@ if not is_pw_disabled():
|
||||
if not st.session_state['frontend_authenticated']:
|
||||
st.stop()
|
||||
|
||||
# Global: desired packetization time in ms for Opus (should match backend)
|
||||
PTIME = 40
|
||||
BACKEND_URL = "http://localhost:5000"
|
||||
|
||||
QUALITY_MAP = {
|
||||
@@ -124,7 +122,6 @@ options = [
|
||||
"Demo",
|
||||
"USB",
|
||||
"Network",
|
||||
# "Webapp"
|
||||
]
|
||||
saved_audio_mode = saved_settings.get("audio_mode", "Demo")
|
||||
if saved_audio_mode not in options:
|
||||
@@ -137,8 +134,8 @@ audio_mode = st.selectbox(
|
||||
options,
|
||||
index=options.index(saved_audio_mode) if saved_audio_mode in options else options.index("Demo"),
|
||||
help=(
|
||||
"Select the audio input source. Choose 'Webapp' for browser microphone, "
|
||||
"'USB' for a connected USB audio device (via PipeWire), 'Network' (AES67) for network RTP/AES67 sources, "
|
||||
"Select the audio input source. Choose 'USB' for a connected USB audio device (via PipeWire), "
|
||||
"'Network' (AES67) for network RTP/AES67 sources, "
|
||||
"or 'Demo' for a simulated stream."
|
||||
)
|
||||
)
|
||||
@@ -164,6 +161,8 @@ is_stopped = False
|
||||
if audio_mode == "Demo":
|
||||
demo_stream_map = {
|
||||
"1 × 48kHz": {"quality": "High (48kHz)", "streams": 1},
|
||||
"1 × 24kHz": {"quality": "Medium (24kHz)", "streams": 1},
|
||||
"1 × 16kHz": {"quality": "Fair (16kHz)", "streams": 1},
|
||||
"2 × 24kHz": {"quality": "Medium (24kHz)", "streams": 2},
|
||||
"3 × 16kHz": {"quality": "Fair (16kHz)", "streams": 3},
|
||||
"2 × 48kHz": {"quality": "High (48kHz)", "streams": 2},
|
||||
@@ -186,7 +185,7 @@ if audio_mode == "Demo":
|
||||
type=("password"),
|
||||
help="Optional: Set a broadcast code to protect your stream. Leave empty for an open (uncoded) broadcast."
|
||||
)
|
||||
col_flags1, col_flags2, col_pdelay, col_rtn = st.columns([1, 1, 1, 1], gap="small")
|
||||
col_flags1, col_flags2, col_pdelay, col_rtn = st.columns([1, 1, 0.7, 0.6], gap="small", vertical_alignment="center")
|
||||
with col_flags1:
|
||||
assisted_listening = st.checkbox(
|
||||
"Assistive listening",
|
||||
@@ -204,7 +203,7 @@ if audio_mode == "Demo":
|
||||
with col_pdelay:
|
||||
default_pdelay_ms = max(10, min(200, default_pdelay // 1000))
|
||||
presentation_delay_ms = st.number_input(
|
||||
"Presentation delay (ms)",
|
||||
"Delay (ms)",
|
||||
min_value=10, max_value=200, step=5, value=default_pdelay_ms,
|
||||
help="Delay between capture and presentation for receivers."
|
||||
)
|
||||
@@ -213,14 +212,14 @@ if audio_mode == "Demo":
|
||||
rtn_options = [1,2,3,4]
|
||||
default_rtn_clamped = min(4, max(1, default_rtn))
|
||||
rtn = st.selectbox(
|
||||
"Retransmissions (RTN)", options=rtn_options, index=rtn_options.index(default_rtn_clamped),
|
||||
"RTN", options=rtn_options, index=rtn_options.index(default_rtn_clamped),
|
||||
help="Number of ISO retransmissions (higher improves robustness at cost of airtime)."
|
||||
)
|
||||
#st.info(f"Demo mode selected: {demo_selected} (Streams: {demo_stream_map[demo_selected]['streams']}, Rate: {demo_stream_map[demo_selected]['rate']} Hz)")
|
||||
# Start/Stop buttons for demo mode
|
||||
if 'demo_stream_started' not in st.session_state:
|
||||
st.session_state['demo_stream_started'] = False
|
||||
start_demo, stop_demo = render_stream_controls(is_streaming, "Start Demo Stream", "Stop Demo Stream", running_mode)
|
||||
start_demo, stop_demo = render_stream_controls(is_streaming, "Start Demo", "Stop Demo", running_mode)
|
||||
if start_demo:
|
||||
# Always stop any running stream for clean state
|
||||
try:
|
||||
@@ -294,9 +293,7 @@ if audio_mode == "Demo":
|
||||
try:
|
||||
r1 = requests.post(f"{BACKEND_URL}/init", json=config1.model_dump())
|
||||
if r1.status_code == 200:
|
||||
msg = f"Demo stream started on multicaster 1 ({len(bigs1)} streams)"
|
||||
st.session_state['demo_stream_started'] = True
|
||||
st.success(msg)
|
||||
is_started = True
|
||||
else:
|
||||
st.session_state['demo_stream_started'] = False
|
||||
@@ -304,7 +301,6 @@ if audio_mode == "Demo":
|
||||
if config2:
|
||||
r2 = requests.post(f"{BACKEND_URL}/init2", json=config2.model_dump())
|
||||
if r2.status_code == 200:
|
||||
st.success(f"Demo stream started on multicaster 2 ({len(bigs2)} streams)")
|
||||
is_started = True
|
||||
else:
|
||||
st.error(f"Failed to initialize multicaster 2: {r2.text}")
|
||||
@@ -318,16 +314,10 @@ if audio_mode == "Demo":
|
||||
r = requests.post(f"{BACKEND_URL}/stop_audio").json()
|
||||
st.session_state['demo_stream_started'] = False
|
||||
if r.get('was_running'):
|
||||
st.info("Demo stream stopped.")
|
||||
is_stopped = True
|
||||
else:
|
||||
st.info("Demo stream was not running.")
|
||||
except Exception as e:
|
||||
st.error(f"Error: {e}")
|
||||
elif st.session_state['demo_stream_started']:
|
||||
st.success(f"Demo stream running: {demo_selected}")
|
||||
else:
|
||||
st.info("Demo stream not running.")
|
||||
|
||||
quality = None # Not used in demo mode
|
||||
else:
|
||||
# Stream quality selection (now enabled)
|
||||
@@ -370,7 +360,7 @@ else:
|
||||
help="Optional: Set a broadcast code to protect your stream. Leave empty for an open (uncoded) broadcast."
|
||||
)
|
||||
# Flags and QoS row (compact, four columns)
|
||||
col_flags1, col_flags2, col_pdelay, col_rtn = st.columns([1, 1, 1, 1], gap="small")
|
||||
col_flags1, col_flags2, col_pdelay, col_rtn = st.columns([1, 1, 0.7, 0.6], gap="small")
|
||||
with col_flags1:
|
||||
assisted_listening = st.checkbox(
|
||||
"Assistive listening",
|
||||
@@ -388,7 +378,7 @@ else:
|
||||
with col_pdelay:
|
||||
default_pdelay_ms = max(10, min(200, default_pdelay // 1000))
|
||||
presentation_delay_ms = st.number_input(
|
||||
"Presentation delay (ms)",
|
||||
"Delay (ms)",
|
||||
min_value=10, max_value=200, step=5, value=default_pdelay_ms,
|
||||
help="Delay between capture and presentation for receivers."
|
||||
)
|
||||
@@ -397,14 +387,10 @@ else:
|
||||
rtn_options = [1,2,3,4]
|
||||
default_rtn_clamped = min(4, max(1, default_rtn))
|
||||
rtn = st.selectbox(
|
||||
"Retransmissions (RTN)", options=rtn_options, index=rtn_options.index(default_rtn_clamped),
|
||||
"RTN", options=rtn_options, index=rtn_options.index(default_rtn_clamped),
|
||||
help="Number of ISO retransmissions (higher improves robustness at cost of airtime)."
|
||||
)
|
||||
# Gain slider for Webapp mode
|
||||
if audio_mode == "Webapp":
|
||||
mic_gain = st.slider("Microphone Gain", 0.0, 2.0, 1.0, 0.1, help="Adjust microphone volume sent to Auracast")
|
||||
else:
|
||||
mic_gain = 1.0
|
||||
|
||||
|
||||
# Input device selection for USB or AES67 mode
|
||||
if audio_mode in ("USB", "Network"):
|
||||
@@ -482,40 +468,15 @@ else:
|
||||
input_device = None
|
||||
start_stream, stop_stream = render_stream_controls(is_streaming, "Start Auracast", "Stop Auracast", running_mode)
|
||||
|
||||
# If gain slider moved while streaming, send update to JS without restarting
|
||||
if audio_mode == "Webapp" and st.session_state.get('stream_started'):
|
||||
update_js = f"""
|
||||
<script>
|
||||
if (window.gainNode) {{ window.gainNode.gain.value = {mic_gain}; }}
|
||||
</script>
|
||||
"""
|
||||
st.components.v1.html(update_js, height=0)
|
||||
|
||||
if stop_stream:
|
||||
st.session_state['stream_started'] = False
|
||||
try:
|
||||
r = requests.post(f"{BACKEND_URL}/stop_audio").json()
|
||||
if r['was_running']:
|
||||
st.success("Stream Stopped!")
|
||||
is_stopped = True
|
||||
else:
|
||||
st.success("Stream was not running.")
|
||||
except Exception as e:
|
||||
st.error(f"Error: {e}")
|
||||
# Ensure existing WebRTC connection is fully closed so that a fresh
|
||||
# connection is created the next time we start the stream.
|
||||
if audio_mode == "Webapp":
|
||||
cleanup_js = """
|
||||
<script>
|
||||
if (window.webrtc_pc) {
|
||||
window.webrtc_pc.getSenders().forEach(s => s.track.stop());
|
||||
window.webrtc_pc.close();
|
||||
window.webrtc_pc = null;
|
||||
}
|
||||
window.webrtc_started = false;
|
||||
</script>
|
||||
"""
|
||||
st.components.v1.html(cleanup_js, height=0)
|
||||
|
||||
|
||||
if start_stream:
|
||||
# Always send stop to ensure backend is in a clean state, regardless of current status
|
||||
@@ -545,12 +506,8 @@ else:
|
||||
name=stream_name,
|
||||
program_info=program_info,
|
||||
language=language,
|
||||
audio_source=(
|
||||
f"device:{input_device}" if audio_mode in ("USB", "Network") else (
|
||||
"webrtc" if audio_mode == "Webapp" else "network"
|
||||
)
|
||||
),
|
||||
input_format=(f"int16le,{q['rate']},1" if audio_mode in ("USB", "Network") else "auto"),
|
||||
audio_source=(f"device:{input_device}"),
|
||||
input_format=(f"int16le,{q['rate']},1"),
|
||||
iso_que_len=1,
|
||||
sampling_frequency=q['rate'],
|
||||
octets_per_frame=q['octets'],
|
||||
@@ -561,75 +518,18 @@ else:
|
||||
try:
|
||||
r = requests.post(f"{BACKEND_URL}/init", json=config.model_dump())
|
||||
if r.status_code == 200:
|
||||
st.success("Stream Started!")
|
||||
is_started = True
|
||||
else:
|
||||
st.error(f"Failed to initialize: {r.text}")
|
||||
except Exception as e:
|
||||
st.error(f"Error: {e}")
|
||||
|
||||
# Render / maintain WebRTC component
|
||||
if audio_mode == "Webapp" and (start_stream or st.session_state.get('stream_started')):
|
||||
st.markdown("Starting microphone; allow access if prompted and speak.")
|
||||
component = f"""
|
||||
<script>
|
||||
(async () => {{
|
||||
// Clean up any previous WebRTC connection before starting a new one
|
||||
if (window.webrtc_pc) {{
|
||||
window.webrtc_pc.getSenders().forEach(s => s.track.stop());
|
||||
window.webrtc_pc.close();
|
||||
}}
|
||||
const GAIN_VALUE = {mic_gain};
|
||||
const pc = new RTCPeerConnection(); // No STUN needed for localhost
|
||||
window.webrtc_pc = pc;
|
||||
window.webrtc_started = true;
|
||||
const micStream = await navigator.mediaDevices.getUserMedia({{audio:true}});
|
||||
// Create Web Audio gain processing
|
||||
const audioCtx = new (window.AudioContext || window.webkitAudioContext)();
|
||||
const source = audioCtx.createMediaStreamSource(micStream);
|
||||
const gainNode = audioCtx.createGain();
|
||||
gainNode.gain.value = GAIN_VALUE;
|
||||
// Expose for later adjustments
|
||||
window.gainNode = gainNode;
|
||||
const dest = audioCtx.createMediaStreamDestination();
|
||||
source.connect(gainNode).connect(dest);
|
||||
// Add processed tracks to WebRTC
|
||||
dest.stream.getTracks().forEach(t => pc.addTrack(t, dest.stream));
|
||||
// --- WebRTC offer/answer exchange ---
|
||||
const offer = await pc.createOffer();
|
||||
// Patch SDP offer to include a=ptime using global PTIME
|
||||
let sdp = offer.sdp;
|
||||
const ptime_line = 'a=ptime:{PTIME}';
|
||||
const maxptime_line = 'a=maxptime:{PTIME}';
|
||||
if (sdp.includes('a=sendrecv')) {{
|
||||
sdp = sdp.replace('a=sendrecv', 'a=sendrecv\\n' + ptime_line + '\\n' + maxptime_line);
|
||||
}} else {{
|
||||
sdp += '\\n' + ptime_line + '\\n' + maxptime_line;
|
||||
}}
|
||||
const patched_offer = new RTCSessionDescription({{sdp, type: offer.type}});
|
||||
await pc.setLocalDescription(patched_offer);
|
||||
// Send offer to backend
|
||||
const response = await fetch(
|
||||
"{BACKEND_URL}/offer",
|
||||
{{
|
||||
method: 'POST',
|
||||
headers: {{'Content-Type':'application/json'}},
|
||||
body: JSON.stringify({{sdp: pc.localDescription.sdp, type: pc.localDescription.type}})
|
||||
}}
|
||||
);
|
||||
const answer = await response.json();
|
||||
await pc.setRemoteDescription(new RTCSessionDescription({{sdp: answer.sdp, type: answer.type}}));
|
||||
}})();
|
||||
</script>
|
||||
"""
|
||||
st.components.v1.html(component, height=0)
|
||||
st.session_state['stream_started'] = True
|
||||
|
||||
# Centralized rerun based on start/stop outcomes
|
||||
if is_started or is_stopped:
|
||||
st.rerun()
|
||||
#else:
|
||||
# st.header("Advertised Streams (Cloud Announcements)")
|
||||
#st.header("Advertised Streams (Cloud Announcements)")
|
||||
#st.info("This feature requires backend support to list advertised streams.")
|
||||
# st.info("This feature requires backend support to list advertised streams.")
|
||||
# Placeholder for future implementation
|
||||
# Example: r = requests.get(f"{BACKEND_URL}/advertised_streams")
|
||||
|
||||
@@ -4,7 +4,6 @@ TODO: in the future the multicaster objects should run in their own threads or e
|
||||
"""
|
||||
import os
|
||||
import logging as log
|
||||
import uuid
|
||||
import json
|
||||
import sys
|
||||
import threading
|
||||
@@ -12,18 +11,12 @@ from concurrent.futures import Future
|
||||
from datetime import datetime
|
||||
import time
|
||||
import asyncio
|
||||
import numpy as np
|
||||
from dotenv import load_dotenv
|
||||
|
||||
from pydantic import BaseModel
|
||||
from fastapi import FastAPI, HTTPException
|
||||
from fastapi.middleware.cors import CORSMiddleware
|
||||
from auracast import multicast_control, auracast_config
|
||||
from aiortc import RTCPeerConnection, RTCSessionDescription, MediaStreamTrack
|
||||
import av
|
||||
import av.audio.layout
|
||||
import sounddevice as sd # type: ignore
|
||||
from typing import Set
|
||||
import traceback
|
||||
from auracast.utils.sounddevice_utils import (
|
||||
get_usb_pw_inputs,
|
||||
@@ -39,17 +32,14 @@ STREAM_SETTINGS_FILE = os.path.join(os.path.dirname(__file__), 'stream_settings.
|
||||
TRANSPORT1 = os.getenv('TRANSPORT1', 'serial:/dev/ttyAMA3,1000000,rtscts') # transport for raspberry pi gpio header
|
||||
TRANSPORT2 = os.getenv('TRANSPORT2', 'serial:/dev/ttyAMA4,1000000,rtscts') # transport for raspberry pi gpio header
|
||||
|
||||
PTIME = 40 # seems to have no effect at all
|
||||
pcs: Set[RTCPeerConnection] = set() # keep refs so they don’t GC early
|
||||
|
||||
|
||||
os.environ["PULSE_LATENCY_MSEC"] = "3"
|
||||
|
||||
# In-memory cache to avoid disk I/O on hot paths like /status
|
||||
SETTINGS_CACHE: dict = {}
|
||||
|
||||
class Offer(BaseModel):
|
||||
sdp: str
|
||||
type: str
|
||||
|
||||
|
||||
def get_device_index_by_name(name: str):
|
||||
"""Return the device index for a given device name, or None if not found.
|
||||
@@ -184,7 +174,7 @@ class StreamerWorker:
|
||||
net_names = {d.get('name') for _, d in get_network_pw_inputs()}
|
||||
except Exception:
|
||||
usb_names, net_names = set(), set()
|
||||
audio_mode_persist = 'AES67' if (input_device_name in net_names) else 'USB'
|
||||
audio_mode_persist = 'Network' if (input_device_name in net_names) else 'USB'
|
||||
|
||||
# Map device name to index and configure input_format
|
||||
device_index = int(input_device_name) if (input_device_name and input_device_name.isdigit()) else get_device_index_by_name(input_device_name or '')
|
||||
@@ -334,12 +324,6 @@ async def initialize2(conf: auracast_config.AuracastConfigGroup):
|
||||
async def stop_audio():
|
||||
"""Stops streaming on both multicaster1 and multicaster2 (worker thread)."""
|
||||
try:
|
||||
# Close any active PeerConnections
|
||||
close_tasks = [pc.close() for pc in list(pcs)]
|
||||
pcs.clear()
|
||||
if close_tasks:
|
||||
await asyncio.gather(*close_tasks, return_exceptions=True)
|
||||
|
||||
was_running = await streamer.call(streamer._w_stop_all)
|
||||
|
||||
# Persist is_streaming=False
|
||||
@@ -536,138 +520,6 @@ async def refresh_audio_devices():
|
||||
raise HTTPException(status_code=500, detail=f"Failed to refresh devices: {e}")
|
||||
|
||||
|
||||
# async def offer(offer: Offer):
|
||||
# @app.post("/offer") #webrtc endpoint
|
||||
# log.info("/offer endpoint called")
|
||||
|
||||
# # If a previous PeerConnection is still alive, close it so we only ever keep one active.
|
||||
# if pcs:
|
||||
# log.info("Closing %d existing PeerConnection(s) before creating a new one", len(pcs))
|
||||
# close_tasks = [p.close() for p in list(pcs)]
|
||||
# await asyncio.gather(*close_tasks, return_exceptions=True)
|
||||
# pcs.clear()
|
||||
|
||||
# pc = RTCPeerConnection() # No STUN needed for localhost
|
||||
# pcs.add(pc)
|
||||
# id_ = uuid.uuid4().hex[:8]
|
||||
# log.info(f"{id_}: new PeerConnection")
|
||||
|
||||
# # create directory for records - only for testing
|
||||
# os.makedirs("./records", exist_ok=True)
|
||||
|
||||
# # Do NOT start the streamer yet – we'll start it lazily once we actually
|
||||
# # receive the first audio frame, ensuring WebRTCAudioInput is ready and
|
||||
# # avoiding race-conditions on restarts.
|
||||
# @pc.on("track")
|
||||
# async def on_track(track: MediaStreamTrack):
|
||||
# log.info(f"{id_}: track {track.kind} received")
|
||||
# try:
|
||||
# first = True
|
||||
# while True:
|
||||
# frame: av.audio.frame.AudioFrame = await track.recv() # RTP audio frame (already decrypted)
|
||||
# if first:
|
||||
# log.info(f"{id_}: frame layout={frame.layout}")
|
||||
# log.info(f"{id_}: frame format={frame.format}")
|
||||
# log.info(
|
||||
# f"{id_}: frame sample_rate={frame.sample_rate}, samples_per_channel={frame.samples}, planes={frame.planes}"
|
||||
# )
|
||||
# # Lazily start the streamer now that we know a track exists.
|
||||
# if multicaster1.streamer is None:
|
||||
# await multicaster1.start_streaming()
|
||||
# # Yield control so the Streamer coroutine has a chance to
|
||||
# # create the WebRTCAudioInput before we push samples.
|
||||
# await asyncio.sleep(0)
|
||||
# # Persist is_streaming=True for Webapp mode
|
||||
# try:
|
||||
# settings = load_stream_settings() or {}
|
||||
# settings['is_streaming'] = True
|
||||
# settings['timestamp'] = datetime.utcnow().isoformat()
|
||||
# save_stream_settings(settings)
|
||||
# except Exception:
|
||||
# log.warning("Failed to persist is_streaming=True on WebRTC start", exc_info=True)
|
||||
# first = False
|
||||
# # in stereo case this is interleaved data format
|
||||
# frame_array = frame.to_ndarray()
|
||||
# log.info(f"array.shape{frame_array.shape}")
|
||||
# log.info(f"array.dtype{frame_array.dtype}")
|
||||
# log.info(f"frame.to_ndarray(){frame_array}")
|
||||
|
||||
# samples = frame_array.reshape(-1)
|
||||
# log.info(f"samples.shape: {samples.shape}")
|
||||
|
||||
# if frame.layout.name == 'stereo':
|
||||
# # Interleaved stereo: [L0, R0, L1, R1, ...]
|
||||
# mono_array = samples[::2] # Take left channel
|
||||
# else:
|
||||
# mono_array = samples
|
||||
|
||||
# log.info(f"mono_array.shape: {mono_array.shape}")
|
||||
|
||||
|
||||
# frame_array = frame.to_ndarray()
|
||||
|
||||
# # Flatten in case it's (1, N) or (N,)
|
||||
# samples = frame_array.reshape(-1)
|
||||
|
||||
# if frame.layout.name == 'stereo':
|
||||
# # Interleaved stereo: [L0, R0, L1, R1, ...]
|
||||
# mono_array = samples[::2] # Take left channel
|
||||
# else:
|
||||
# mono_array = samples
|
||||
|
||||
# # Get current WebRTC audio input (streamer may have been restarted)
|
||||
# big0 = list(multicaster1.bigs.values())[0]
|
||||
# audio_input = big0.get('audio_input')
|
||||
# # Wait until the streamer has instantiated the WebRTCAudioInput
|
||||
# if audio_input is None or getattr(audio_input, 'closed', False):
|
||||
# continue
|
||||
# # Feed mono PCM samples to the global WebRTC audio input
|
||||
# await audio_input.put_samples(mono_array.astype(np.int16))
|
||||
|
||||
# # Save to WAV file - only for testing
|
||||
# # if not hasattr(pc, 'wav_writer'):
|
||||
# # import wave
|
||||
# # wav_path = f"./records/auracast_{id_}.wav"
|
||||
# # pc.wav_writer = wave.open(wav_path, "wb")
|
||||
# # pc.wav_writer.setnchannels(1) # mono
|
||||
# # pc.wav_writer.setsampwidth(2) # 16-bit PCM
|
||||
# # pc.wav_writer.setframerate(frame.sample_rate)
|
||||
|
||||
# # pcm_data = mono_array.astype(np.int16).tobytes()
|
||||
# # pc.wav_writer.writeframes(pcm_data)
|
||||
|
||||
|
||||
# except Exception as e:
|
||||
# log.error(f"{id_}: Exception in on_track: {e}")
|
||||
# finally:
|
||||
# # Always close the wav file when the track ends or on error
|
||||
# if hasattr(pc, 'wav_writer'):
|
||||
# try:
|
||||
# pc.wav_writer.close()
|
||||
# except Exception:
|
||||
# pass
|
||||
# del pc.wav_writer
|
||||
|
||||
# # --- SDP negotiation ---
|
||||
# log.info(f"{id_}: setting remote description")
|
||||
# await pc.setRemoteDescription(RTCSessionDescription(**offer.model_dump()))
|
||||
|
||||
# log.info(f"{id_}: creating answer")
|
||||
# answer = await pc.createAnswer()
|
||||
# sdp = answer.sdp
|
||||
# # Insert a=ptime using the global PTIME variable
|
||||
# ptime_line = f"a=ptime:{PTIME}"
|
||||
# if "a=sendrecv" in sdp:
|
||||
# sdp = sdp.replace("a=sendrecv", f"a=sendrecv\n{ptime_line}")
|
||||
# else:
|
||||
# sdp += f"\n{ptime_line}"
|
||||
# new_answer = RTCSessionDescription(sdp=sdp, type=answer.type)
|
||||
# await pc.setLocalDescription(new_answer)
|
||||
# log.info(f"{id_}: sending answer with {ptime_line}")
|
||||
# return {"sdp": pc.localDescription.sdp,
|
||||
# "type": pc.localDescription.type}
|
||||
|
||||
|
||||
@app.post("/shutdown")
|
||||
async def shutdown():
|
||||
"""Stops broadcasting and releases all audio/Bluetooth resources."""
|
||||
@@ -687,13 +539,6 @@ async def system_reboot():
|
||||
try:
|
||||
# Best-effort: stop any active streaming cleanly WITHOUT persisting state
|
||||
try:
|
||||
# Close any WebRTC peer connections
|
||||
close_tasks = [pc.close() for pc in list(pcs)]
|
||||
pcs.clear()
|
||||
if close_tasks:
|
||||
await asyncio.gather(*close_tasks, return_exceptions=True)
|
||||
|
||||
# Stop streaming on worker but DO NOT touch stream_settings.json
|
||||
try:
|
||||
await streamer.call(streamer._w_stop_all)
|
||||
except Exception:
|
||||
|
||||
Reference in New Issue
Block a user