diff --git a/.gitignore b/.gitignore index 60d0bd9..2f43ec2 100644 --- a/.gitignore +++ b/.gitignore @@ -44,3 +44,9 @@ src/auracast/server/certs/per_device/ src/auracast/.env src/auracast/server/certs/ca/ca_cert.srl src/auracast/server/credentials.json +pcm1862-i2s.dtbo +ch1.wav +ch2.wav +src/auracast/available_samples.txt +src/auracast/server/stream_settings2.json +src/scripts/temperature_log* diff --git a/README.md b/README.md index 24e2973..2cfda88 100644 --- a/README.md +++ b/README.md @@ -218,6 +218,31 @@ sudo ldconfig # refresh linker cache - echo i2c-dev | sudo tee -a /etc/modules - read temp /src/scripts/temp +# configure the pcm1862 i2s interface +bash misc/build_pcm1862_dts.sh +bash misc/install_asoundconf.sh + +- configure differential inputs +sudo modprobe i2c-dev +i2cdetect -y 1 | grep -i 4a || true + +i2cset -f -y 1 0x4a 0x00 0x00 # Page 0 +i2cset -f -y 1 0x4a 0x06 0x10 # Left = VIN1P/M [DIFF] +i2cset -f -y 1 0x4a 0x07 0x10 # Right = VIN2P/M [DIFF] + +# test recording + arecord -f cd -c 1 -D record_left left.wav -r48000 + arecord -f cd -c 1 -D record_right right.wav -r48000 + +# Run with realtime priority +- for the feedback loop to work right realtime priority is absolutely nececcarry. +chrt -f 99 python src/auracast/multicast.py +- give the user realtime priority: +sudo tee /etc/security/limits.d/99-realtime.conf >/dev/null <<'EOF' +caster - rtprio 99 +caster - memlock unlimited +EOF + # Known issues: - When running on a laptop there might be issues switching between usb and browser audio input since they use the same audio device diff --git a/poetry.lock b/poetry.lock index dba8f46..c609b04 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 2.1.2 and should not be changed by hand. +# This file is automatically @generated by Poetry 2.1.4 and should not be changed by hand. [[package]] name = "aioconsole" @@ -2443,6 +2443,30 @@ files = [ {file = "rpds_py-0.25.1.tar.gz", hash = "sha256:8960b6dac09b62dac26e75d7e2c4a22efb835d827a7278c34f72b2b84fa160e3"}, ] +[[package]] +name = "samplerate" +version = "0.2.2" +description = "Monolithic python wrapper for libsamplerate based on pybind11 and NumPy" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "samplerate-0.2.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:99b47c238ef7216b87ccf5e8860b94b527cceef7a8add38f146e75f6efec257f"}, + {file = "samplerate-0.2.2-cp310-cp310-win_amd64.whl", hash = "sha256:0aa6ae933cb85eac5ffdebc38abc198be890c2bcbac263c30301699d651e9513"}, + {file = "samplerate-0.2.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:a41fe7a8c68101bf9900ba415cf2a0a58199bba9cac15e0a3b22b70006705b29"}, + {file = "samplerate-0.2.2-cp311-cp311-win_amd64.whl", hash = "sha256:86fb8eb9a6c75d4c17f8125e203d29bf2d87bf5ce0e671184ba5111f015c9264"}, + {file = "samplerate-0.2.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:3f30fea3e42b51e2441cf464e24c4744fa0b9a837b7beefb6a8eb6cc72af1e51"}, + {file = "samplerate-0.2.2-cp312-cp312-win_amd64.whl", hash = "sha256:1170c5e4f68d9c1bbec2fce1549108838a473058f69cca7bc377e053ee43457b"}, + {file = "samplerate-0.2.2-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:567dfe3888634435b8da1ac4bc06ad289ba777876f446760249e923e6b3585c5"}, + {file = "samplerate-0.2.2-cp38-cp38-win_amd64.whl", hash = "sha256:6c819b0360e9632be0391ec3eecc15510e30775632f4022e384e28908f59648c"}, + {file = "samplerate-0.2.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:d072b658e438d55fed1224da9b226be1328ff9aea4268d02dbc7d864a72ce4f4"}, + {file = "samplerate-0.2.2-cp39-cp39-win_amd64.whl", hash = "sha256:bdae4f21890378f3886816800c8ef3395dabaa13fcac07bb0de7ad413703bfef"}, + {file = "samplerate-0.2.2.tar.gz", hash = "sha256:40964bfa28d33bc948389d958c2e742585f21891d8372ebba89260f491a15caa"}, +] + +[package.dependencies] +numpy = "*" + [[package]] name = "six" version = "1.17.0" @@ -2952,4 +2976,4 @@ test = ["pytest", "pytest-asyncio"] [metadata] lock-version = "2.1" python-versions = ">=3.11" -content-hash = "6b5300c349ed045e8fd3e617e6262bbd7e5c48c518e4c62cedf7c17da50ce8c0" +content-hash = "3c9f92c7a5af40f98da9c7824d9c2a6f7eb809e91e43cfef4995761b2e887256" diff --git a/pyproject.toml b/pyproject.toml index 799f3ce..e93c66d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -16,7 +16,8 @@ dependencies = [ "aiortc (>=1.13.0,<2.0.0)", "sounddevice (>=0.5.2,<0.6.0)", "python-dotenv (>=1.1.1,<2.0.0)", - "smbus2 (>=0.5.0,<0.6.0)" + "smbus2 (>=0.5.0,<0.6.0)", + "samplerate (>=0.2.2,<0.3.0)" ] [project.optional-dependencies] diff --git a/src/auracast/auracast_config.py b/src/auracast/auracast_config.py index a904b11..757e862 100644 --- a/src/auracast/auracast_config.py +++ b/src/auracast/auracast_config.py @@ -7,24 +7,19 @@ class AuracastQoSConfig(BaseModel): number_of_retransmissions: int max_transport_latency_ms: int -class AuracastQosHigh(AuracastQoSConfig): +class AuracastQosRobust(AuracastQoSConfig): iso_int_multiple_10ms: int = 1 number_of_retransmissions:int = 4 #4 max_transport_latency_ms:int = 43 #varies from the default value in bumble (was 65) -class AuracastQosMid(AuracastQoSConfig): - iso_int_multiple_10ms: int = 2 - number_of_retransmissions:int = 3 - max_transport_latency_ms:int = 65 - -class AuracastQosLow(AuracastQoSConfig): - iso_int_multiple_10ms: int = 3 - number_of_retransmissions:int = 2 #4 - max_transport_latency_ms:int = 65 #varies from the default value in bumble (was 65) +class AuracastQosFast(AuracastQoSConfig): + iso_int_multiple_10ms: int = 1 + number_of_retransmissions:int = 2 + max_transport_latency_ms:int = 22 class AuracastGlobalConfig(BaseModel): - qos_config: AuracastQoSConfig = AuracastQosHigh() + qos_config: AuracastQoSConfig = AuracastQosRobust() debug: bool = False device_name: str = 'Auracaster' transport: str = '' @@ -40,8 +35,6 @@ class AuracastGlobalConfig(BaseModel): # so receivers may render earlier than the presentation delay for lower latency. immediate_rendering: bool = False assisted_listening_stream: bool = False - # Adaptive frame dropping: discard sub-frame samples when buffer exceeds threshold - enable_adaptive_frame_dropping: bool = False # "Audio input. " # "'device' -> use the host's default sound input device, " @@ -61,6 +54,7 @@ class AuracastBigConfig(BaseModel): loop: bool = True precode_wav: bool = False iso_que_len: int = 64 + num_bis: int = 1 # 1 = mono (FRONT_LEFT), 2 = stereo (FRONT_LEFT + FRONT_RIGHT) class AuracastBigConfigDeu(AuracastBigConfig): id: int = 12 @@ -75,7 +69,7 @@ class AuracastBigConfigEng(AuracastBigConfig): random_address: str = 'F2:F1:F2:F3:F4:F5' name: str = 'Lecture Hall A' language: str ='eng' - program_info: str = 'Lecture EN' + program_info: str = 'Lecture EN' audio_source: str = 'file:./testdata/wave_particle_5min_en.wav' class AuracastBigConfigFra(AuracastBigConfig): @@ -84,7 +78,7 @@ class AuracastBigConfigFra(AuracastBigConfig): # French name: str = 'Auditoire A' language: str ='fra' - program_info: str = 'Auditoire FR' + program_info: str = 'Auditoire FR' audio_source: str = 'file:./testdata/wave_particle_5min_fr.wav' class AuracastBigConfigSpa(AuracastBigConfig): @@ -92,7 +86,7 @@ class AuracastBigConfigSpa(AuracastBigConfig): random_address: str = 'F4:F1:F2:F3:F4:F5' name: str = 'Auditorio A' language: str ='spa' - program_info: str = 'Auditorio ES' + program_info: str = 'Auditorio ES' audio_source: str = 'file:./testdata/wave_particle_5min_es.wav' class AuracastBigConfigIta(AuracastBigConfig): @@ -100,7 +94,7 @@ class AuracastBigConfigIta(AuracastBigConfig): random_address: str = 'F5:F1:F2:F3:F4:F5' name: str = 'Aula A' language: str ='ita' - program_info: str = 'Aula IT' + program_info: str = 'Aula IT' audio_source: str = 'file:./testdata/wave_particle_5min_it.wav' @@ -109,7 +103,7 @@ class AuracastBigConfigPol(AuracastBigConfig): random_address: str = 'F6:F1:F2:F3:F4:F5' name: str = 'Sala Wykładowa' language: str ='pol' - program_info: str = 'Sala Wykładowa PL' + program_info: str = 'Sala Wykładowa PL' audio_source: str = 'file:./testdata/wave_particle_5min_pl.wav' diff --git a/src/auracast/multicast.py b/src/auracast/multicast.py index 02203bb..0c451ba 100644 --- a/src/auracast/multicast.py +++ b/src/auracast/multicast.py @@ -27,6 +27,10 @@ from typing import cast, Any, AsyncGenerator, Coroutine, List import itertools import glob import time +import threading + +import numpy as np # for audio down-mix +import os import lc3 # type: ignore # pylint: disable=E0401 @@ -42,7 +46,6 @@ from bumble.profiles import bass import bumble.device import bumble.transport import bumble.utils -import numpy as np # for audio down-mix from bumble.device import Host, AdvertisingChannelMap from bumble.audio import io as audio_io @@ -54,55 +57,98 @@ from auracast.utils.webrtc_audio_input import WebRTCAudioInput # Patch sounddevice.InputStream globally to use low-latency settings import sounddevice as sd +from collections import deque + class ModSoundDeviceAudioInput(audio_io.SoundDeviceAudioInput): - """Patched SoundDeviceAudioInput that creates RawInputStream with low-latency parameters.""" + """Patched SoundDeviceAudioInput with low-latency capture and adaptive resampling.""" def _open(self): - """Patched _open method that creates RawInputStream with low-latency parameters.""" - try: - dev_info = sd.query_devices(self._device) - hostapis = sd.query_hostapis() - api_index = dev_info.get('hostapi') - api_name = hostapis[api_index]['name'] if isinstance(api_index, int) and 0 <= api_index < len(hostapis) else 'unknown' - pa_ver = None - try: - pa_ver = sd.get_portaudio_version() - except Exception: - pass - logging.info( - "SoundDevice backend=%s device='%s' (id=%s) ch=%s default_low_input_latency=%.4f default_high_input_latency=%.4f portaudio=%s", - api_name, - dev_info.get('name'), - self._device, - dev_info.get('max_input_channels'), - float(dev_info.get('default_low_input_latency') or 0.0), - float(dev_info.get('default_high_input_latency') or 0.0), - pa_ver[1] if isinstance(pa_ver, tuple) and len(pa_ver) >= 2 else pa_ver, - ) - except Exception as e: - logging.warning("Failed to query sounddevice backend/device info: %s", e) + """Create RawInputStream with low-latency parameters and initialize ring buffer.""" + dev_info = sd.query_devices(self._device) + hostapis = sd.query_hostapis() + api_index = dev_info.get('hostapi') + api_name = hostapis[api_index]['name'] if isinstance(api_index, int) and 0 <= api_index < len(hostapis) else 'unknown' + pa_ver = sd.get_portaudio_version() + logging.info( + "SoundDevice backend=%s device='%s' (id=%s) ch=%s default_low_input_latency=%.4f default_high_input_latency=%.4f portaudio=%s", + api_name, + dev_info.get('name'), + self._device, + dev_info.get('max_input_channels'), + float(dev_info.get('default_low_input_latency') or 0.0), + float(dev_info.get('default_high_input_latency') or 0.0), + pa_ver[1] if isinstance(pa_ver, tuple) and len(pa_ver) >= 2 else pa_ver, + ) # Create RawInputStream with injected low-latency parameters + # Target ~2 ms blocksize (48 kHz -> 96 frames). For other rates, keep ~2 ms. + _sr = int(self._pcm_format.sample_rate) + + self.counter=0 + self.max_avail=0 + self.logfile_name="available_samples.txt" + self.blocksize = 120 + + if os.path.exists(self.logfile_name): + os.remove(self.logfile_name) + self._stream = sd.RawInputStream( samplerate=self._pcm_format.sample_rate, device=self._device, channels=self._pcm_format.channels, dtype='int16', - blocksize=240, # Match frame size - latency=0.010, + blocksize=self.blocksize, + latency=0.004, ) self._stream.start() - - logging.info(f"SoundDeviceAudioInput: Opened with blocksize=240, latency=0.010 (10ms)") - + return audio_io.PcmFormat( audio_io.PcmFormat.Endianness.LITTLE, audio_io.PcmFormat.SampleType.INT16, self._pcm_format.sample_rate, - 2, + 1, ) + def _read(self, frame_size: int) -> bytes: + """Read PCM samples from the stream.""" + + #if self.counter % 50 == 0: + frame_size = frame_size + 1 # consume samples a little faster to avoid latency akkumulation + + pcm_buffer, overflowed = self._stream.read(frame_size) + if overflowed: + logging.warning("SoundDeviceAudioInput: overflowed") + + n_available = self._stream.read_available + + # adapt = n_available > 20 + # if adapt: + # pcm_extra, overflowed = self._stream.read(3) + # logging.info('consuming extra samples, available was %d', n_available) + # if overflowed: + # logging.warning("SoundDeviceAudioInput: overflowed") + + # out = bytes(pcm_buffer) + bytes(pcm_extra) + # else: + out = bytes(pcm_buffer) + + self.max_avail = max(self.max_avail, n_available) + + #Diagnostics + #with open(self.logfile_name, "a", encoding="utf-8") as f: + # f.write(f"{n_available}, {adapt}, {round(self._runavg, 2)}, {overflowed}\n") + + if self.counter % 500 == 0: + logging.info( + "read available=%d, max=%d, latency:%d", + n_available, self.max_avail, self._stream.latency + ) + self.max_avail = 0 + + self.counter += 1 + return out + audio_io.SoundDeviceAudioInput = ModSoundDeviceAudioInput # modified from bumble @@ -211,6 +257,20 @@ def run_async(async_command: Coroutine) -> None: color('!!! An error occurred while executing the command:', 'red'), message ) +def _build_bis_list(num_bis: int) -> list: + """Build BIS list for BasicAudioAnnouncement based on num_bis (1=mono, 2=stereo).""" + locations = [bap.AudioLocation.FRONT_LEFT, bap.AudioLocation.FRONT_RIGHT] + return [ + bap.BasicAudioAnnouncement.BIS( + index=idx + 1, + codec_specific_configuration=bap.CodecSpecificConfiguration( + audio_channel_allocation=locations[idx] + ), + ) + for idx in range(num_bis) + ] + + async def init_broadcast( device, global_config : auracast_config.AuracastGlobalConfig, @@ -226,7 +286,8 @@ async def init_broadcast( tag=le_audio.Metadata.Tag.LANGUAGE, data=conf.language.encode() ), le_audio.Metadata.Entry( - tag=le_audio.Metadata.Tag.PROGRAM_INFO, data=conf.program_info.encode() + tag=le_audio.Metadata.Tag.PROGRAM_INFO, + data=conf.program_info.encode('latin-1') ), le_audio.Metadata.Entry( tag=le_audio.Metadata.Tag.BROADCAST_NAME, data=conf.name.encode() @@ -249,9 +310,10 @@ async def init_broadcast( else [] ) ) - logging.info( - metadata.pretty_print("\n") - ) + try: + logging.info(metadata.pretty_print("\n")) + except UnicodeDecodeError: + logging.info("Metadata: (contains non-UTF-8 bytes)") bigs[f'big{i}'] = {} # Config advertising set bigs[f'big{i}']['basic_audio_announcement'] = bap.BasicAudioAnnouncement( @@ -261,18 +323,11 @@ async def init_broadcast( codec_id=hci.CodingFormat(codec_id=hci.CodecID.LC3), codec_specific_configuration=bap.CodecSpecificConfiguration( sampling_frequency=bap_sampling_freq, - frame_duration=bap.FrameDuration.DURATION_10000_US, + frame_duration=bap.FrameDuration.DURATION_7500_US if global_config.frame_duration_us == 7500 else bap.FrameDuration.DURATION_10000_US, octets_per_codec_frame=global_config.octets_per_frame, ), metadata=metadata, - bis=[ - bap.BasicAudioAnnouncement.BIS( - index=1, - codec_specific_configuration=bap.CodecSpecificConfiguration( - audio_channel_allocation=bap.AudioLocation.FRONT_LEFT - ), - ), - ], + bis=_build_bis_list(conf.num_bis), ) ], ) @@ -293,6 +348,36 @@ async def init_broadcast( ) ) bigs[f'big{i}']['broadcast_audio_announcement'] = bap.BroadcastAudioAnnouncement(conf.id) + + # Build advertising data types list + advertising_data_types = [ + (core.AdvertisingData.BROADCAST_NAME, conf.name.encode()), + ] + + # [PBP] Add Public Broadcast Profile Service Data (UUID 0x1856) + # Required for PTS Qualification (PBP/PBS/STR) + # Dynamically calculate PBP features based on stream configuration + pbp_features = 0x00 + + # Bit 0: Encryption (set if broadcast_code is configured) + if conf.code is not None: + pbp_features |= 0x01 + + # Bit 1 vs Bit 2: Quality based on sample rate + if global_config.auracast_sampling_rate_hz in [16000, 24000]: + pbp_features |= 0x02 # Standard Quality + elif global_config.auracast_sampling_rate_hz == 48000: + pbp_features |= 0x04 # High Quality + + # Build PBP service data with Program_Info metadata (LTV format: Length, Type=0x03, Value) + # LTV: Length = 1 (type) + len(value), Type = 0x03 (Program_Info) + program_info_bytes = conf.program_info.encode('latin-1') + pbp_metadata_ltv = bytes([len(program_info_bytes) + 1, 0x03]) + program_info_bytes + pbp_service_data = struct.pack(' 1 and isinstance(err.args[1], int): - code = err.args[1] - if code == -9985 and attempt < max_attempts: - backoff_ms = 200 * attempt - logging.warning("PortAudio device busy (attempt %d/%d). Retrying in %.1f ms…", attempt, max_attempts, backoff_ms) - # ensure device handle and PortAudio context are closed before retrying - try: - if hasattr(audio_input, "aclose"): - await audio_input.aclose() - elif hasattr(audio_input, "close"): - audio_input.close() - except Exception: - pass - # Fully terminate PortAudio to drop lingering handles (sounddevice quirk) - if hasattr(_sd, "_terminate"): - try: - _sd._terminate() - except Exception: - pass - # Small pause then re-initialize PortAudio - await asyncio.sleep(0.1) - if hasattr(_sd, "_initialize"): - try: - _sd._initialize() - except Exception: - pass + pcm_format = await audio_input.open() - # Back-off before next attempt - await asyncio.sleep(backoff_ms / 1000) - # Recreate audio_input fresh for next attempt - audio_input = await audio_io.create_audio_input(audio_source, input_format) - continue - # Other errors or final attempt – re-raise so caller can abort gracefully - raise - else: - # Loop exhausted without break - logging.error("Unable to open audio device after %d attempts – giving up", max_attempts) + num_bis = big.get('num_bis', 1) + if num_bis == 2 and pcm_format.channels < 2: + logging.error("Stereo (num_bis=2) requires at least 2 input channels, got %d", pcm_format.channels) return + if pcm_format.channels != num_bis: + if num_bis == 1: + logging.info("Input device provides %d channels – will down-mix to mono for LC3", pcm_format.channels) + else: + logging.info("Input device provides %d channels – using first %d for stereo", pcm_format.channels, num_bis) - if pcm_format.channels != 1: - logging.info("Input device provides %d channels – will down-mix to mono for LC3", pcm_format.channels) if pcm_format.sample_type == audio_io.PcmFormat.SampleType.INT16: pcm_bit_depth = 16 elif pcm_format.sample_type == audio_io.PcmFormat.SampleType.FLOAT32: @@ -653,271 +698,139 @@ class Streamer(): logging.error("Only INT16 and FLOAT32 sample types are supported") return - encoder = lc3.Encoder( - frame_duration_us=global_config.frame_duration_us, - sample_rate_hz=global_config.auracast_sampling_rate_hz, - num_channels=1, - input_sample_rate_hz=pcm_format.sample_rate, - ) + # Create one encoder per BIS (mono: 1 encoder, stereo: 2 encoders) + encoders = [ + lc3.Encoder( + frame_duration_us=global_config.frame_duration_us, + sample_rate_hz=global_config.auracast_sampling_rate_hz, + num_channels=1, + input_sample_rate_hz=pcm_format.sample_rate, + ) + for _ in range(num_bis) + ] - lc3_frame_samples = encoder.get_frame_samples() # number of the pcm samples per lc3 frame + lc3_frame_samples = encoders[0].get_frame_samples() # number of the pcm samples per lc3 frame big['pcm_bit_depth'] = pcm_bit_depth big['channels'] = pcm_format.channels big['lc3_frame_samples'] = lc3_frame_samples big['lc3_bytes_per_frame'] = global_config.octets_per_frame big['audio_input'] = audio_input - big['encoder'] = encoder + big['encoders'] = encoders + # Keep backward compat + big['encoder'] = encoders[0] big['precoded'] = False logging.info("Streaming audio...") bigs = self.bigs self.is_streaming = True - # frame drop algo parameters - # In demo/precoded modes there may be no audio_input or no _pcm_format yet - ai = big.get('audio_input') - if ai is not None and hasattr(ai, '_pcm_format') and getattr(ai, '_pcm_format') is not None: - sample_rate = ai._pcm_format.sample_rate - else: - sample_rate = global_config.auracast_sampling_rate_hz - samples_discarded_total = 0 # Total samples discarded - discard_events = 0 # Number of times we discarded samples - frames_since_last_discard = 999 # Guard: frames since last discard (start high to allow first drop) - enable_drift_compensation = getattr(global_config, 'enable_adaptive_frame_dropping', False) - # Hardcoded parameters (unit: milliseconds) - drift_threshold_ms = 2.0 if enable_drift_compensation else 0.0 - static_drop_ms = 1 if enable_drift_compensation else 0.0 - # Guard interval measured in LC3 frames (10 ms each); 50 => 500 ms cooldown - discard_guard_frames = int(2*sample_rate / 1000) if enable_drift_compensation else 0 - # Derived sample counts - drop_threshold_samples = int(sample_rate * drift_threshold_ms / 1000.0) - static_drop_samples = int(sample_rate * static_drop_ms / 1000.0) - - if enable_drift_compensation: - logging.info(f"Clock drift compensation ENABLED: threshold={drift_threshold_ms}ms, guard={discard_guard_frames} frames") - else: - logging.info("Clock drift compensation DISABLED") - - # Periodic monitoring - last_stats_log = time.perf_counter() - stats_interval = 5.0 # Log stats every 5 seconds frame_count = 0 - + # One streamer fits all while self.is_streaming: stream_finished = [False for _ in range(len(bigs))] for i, big in enumerate(bigs.values()): - if big['precoded']:# everything was already lc3 coded beforehand + if big['precoded']: # everything was already lc3 coded beforehand lc3_frame = bytes( itertools.islice(big['lc3_frames'], big['lc3_bytes_per_frame']) - ) - - if lc3_frame == b'': # Not all streams may stop at the same time - stream_finished[i] = True - continue - else: # code lc3 on the fly - # Use stored frames generator when available so we can aclose() it on stop - frames_gen = big.get('frames_gen') - if frames_gen is None: - frames_gen = big['audio_input'].frames(big['lc3_frame_samples']) - big['frames_gen'] = frames_gen - - # Read the frame we need for encoding - pcm_frame = await anext(frames_gen, None) - - if pcm_frame is None: # Not all streams may stop at the same time - stream_finished[i] = True - continue - - # Discard excess samples in buffer if above threshold (clock drift compensation) - if enable_drift_compensation and hasattr(big['audio_input'], '_stream') and big['audio_input']._stream: - sd_buffer_samples = big['audio_input']._stream.read_available - - # Guard: only allow discard if enough frames have passed since last discard - if sd_buffer_samples > drop_threshold_samples and frames_since_last_discard >= discard_guard_frames: - # Always drop a static amount (3ms) for predictable behavior - # This matches the crossfade duration better for smoother transitions - samples_to_drop = min(static_drop_samples, max(1, big['lc3_frame_samples'] - 1)) - try: - discarded_data = await anext(big['audio_input'].frames(samples_to_drop)) - samples_discarded_total += samples_to_drop - discard_events += 1 - - # Log every discard event with timing information - sample_rate = big['audio_input']._pcm_format.sample_rate - time_since_last_ms = frames_since_last_discard * 10 # Each frame is 10ms - logging.info( - f"DISCARD #{discard_events}: dropped {samples_to_drop} samples ({samples_to_drop / sample_rate * 1000:.1f}ms) | " - f"buffer was {sd_buffer_samples} samples ({sd_buffer_samples / sample_rate * 1000:.1f}ms) | " - f"since_last={frames_since_last_discard} frames ({time_since_last_ms}ms) | " - f"frame={frame_count}" - ) - - # Reset guard counter - frames_since_last_discard = 0 - # Store how much we dropped for potential adaptive crossfade - big['last_drop_samples'] = samples_to_drop - # Set flag to apply crossfade on next frame - big['apply_crossfade'] = True - - except Exception as e: - logging.error(f"Failed to discard samples: {e}") - - # Down-mix multi-channel PCM to mono for LC3 encoder if needed - if big.get('channels', 1) > 1: - if isinstance(pcm_frame, np.ndarray): - if pcm_frame.ndim > 1: - mono = pcm_frame.mean(axis=1).astype(pcm_frame.dtype) - pcm_frame = mono - else: - # Convert raw bytes to numpy, average channels, convert back - dtype = np.int16 if big['pcm_bit_depth'] == 16 else np.float32 - samples = np.frombuffer(pcm_frame, dtype=dtype) - samples = samples.reshape(-1, big['channels']).mean(axis=1) - pcm_frame = samples.astype(dtype).tobytes() - - # Apply crossfade if samples were just dropped (drift compensation) - if big.get('apply_crossfade') and big.get('prev_pcm_frame') is not None: - # Crossfade duration: 10ms for smoother transition (was 5ms) - dtype = np.int16 if big['pcm_bit_depth'] == 16 else np.float32 - sample_rate = big['audio_input']._pcm_format.sample_rate - crossfade_samples = min(int(sample_rate * 0.010), big['lc3_frame_samples'] // 2) - - # Convert frames to numpy arrays (make writable copies) - prev_samples = np.frombuffer(big['prev_pcm_frame'], dtype=dtype).copy() - curr_samples = np.frombuffer(pcm_frame, dtype=dtype).copy() - - # Create equal-power crossfade curves (smoother than linear) - # Equal-power maintains perceived loudness during transition - t = np.linspace(0, 1, crossfade_samples) - fade_out = np.cos(t * np.pi / 2) # Cosine fade out - fade_in = np.sin(t * np.pi / 2) # Sine fade in - - # Apply crossfade to the beginning of current frame with end of previous frame - if len(prev_samples) >= crossfade_samples and len(curr_samples) >= crossfade_samples: - crossfaded = ( - prev_samples[-crossfade_samples:] * fade_out + - curr_samples[:crossfade_samples] * fade_in - ).astype(dtype) - # Replace beginning of current frame with crossfaded section - curr_samples[:crossfade_samples] = crossfaded - pcm_frame = curr_samples.tobytes() - - big['apply_crossfade'] = False - - # Store current frame for potential next crossfade - if enable_drift_compensation: - big['prev_pcm_frame'] = pcm_frame - - lc3_frame = big['encoder'].encode( - pcm_frame, num_bytes=big['lc3_bytes_per_frame'], bit_depth=big['pcm_bit_depth'] ) - await big['iso_queue'].write(lc3_frame) - frame_count += 1 - # Increment guard counter (tracks frames since last discard) - frames_since_last_discard += 1 - - # Periodic stats logging (only for device/sounddevice streams, not WAV files) - # WAV file concurrent access causes deadlock in ThreadedAudioInput - now = time.perf_counter() - is_device_stream = hasattr(big['audio_input'], '_stream') and big['audio_input']._stream is not None - if is_device_stream and now - last_stats_log >= stats_interval: - # Get current buffer status from PortAudio - current_sd_buffer = 0 - if hasattr(big['audio_input'], '_stream') and big['audio_input']._stream: - try: - current_sd_buffer = big['audio_input']._stream.read_available - except Exception: - pass - - # Get stream latency and CPU load from sounddevice - stream_latency_ms = None - cpu_load_pct = None - if hasattr(big['audio_input'], '_stream') and big['audio_input']._stream: - try: - latency = big['audio_input']._stream.latency - if frame_count == 501: # Debug log once - logging.info(f"DEBUG: stream.latency raw value = {latency}, type = {type(latency)}") - # latency can be either a float (for input-only streams) or tuple (input, output) - if latency is not None: - if isinstance(latency, (int, float)): - # Single value for input-only stream - stream_latency_ms = float(latency) * 1000.0 - elif isinstance(latency, (tuple, list)) and len(latency) >= 1: - # Tuple (input_latency, output_latency) - stream_latency_ms = latency[0] * 1000.0 - except Exception as e: - if frame_count == 501: # Log once at startup - logging.warning(f"Could not get stream.latency: {e}") - - try: - cpu_load = big['audio_input']._stream.cpu_load - if frame_count == 501: # Debug log once - logging.info(f"DEBUG: stream.cpu_load raw value = {cpu_load}") - # cpu_load is a fraction (0.0 to 1.0) - if cpu_load is not None and cpu_load >= 0: - cpu_load_pct = cpu_load * 100.0 # Convert to percentage - except Exception as e: - if frame_count == 501: # Log once at startup - logging.warning(f"Could not get stream.cpu_load: {e}") - - # Get backend-specific buffer status - backend_delay = None - backend_label = "Backend" - - # Determine which backend we're using based on audio_input device - try: - device_info = big['audio_input']._device if hasattr(big['audio_input'], '_device') else None - if device_info is not None and isinstance(device_info, int): - hostapi = sd.query_hostapis(sd.query_devices(device_info)['hostapi']) - backend_name = hostapi['name'] - else: - backend_name = "Unknown" - except Exception: - backend_name = "Unknown" - - if 'pulse' in backend_name.lower(): - # PipeWire/PulseAudio backend - no direct buffer access - # SD_buffer is the only reliable metric - backend_label = "PipeWire" - backend_delay = None # Cannot read PipeWire internal buffers directly - else: - # ALSA backend - can read kernel buffer - backend_label = "ALSA_kernel" - try: - with open('/proc/asound/card0/pcm0c/sub0/status', 'r') as f: - for line in f: - if 'delay' in line and ':' in line: - backend_delay = int(line.split(':')[1].strip()) - break - except Exception: - pass - - if enable_drift_compensation: - avg_discard_per_event = (samples_discarded_total / discard_events) if discard_events > 0 else 0.0 - discard_event_rate = (discard_events / frame_count * 100) if frame_count > 0 else 0.0 - latency_str = f"stream_latency={stream_latency_ms:.2f} ms" if stream_latency_ms is not None else "stream_latency=N/A" - cpu_str = f"cpu_load={cpu_load_pct:.1f}%" if cpu_load_pct is not None else "cpu_load=N/A" - logging.info( - f"STATS: frames={frame_count} | discard_events={discard_events} ({discard_event_rate:.1f}%) | " - f"avg_discard={avg_discard_per_event:.0f} samples/event | " - f"SD_buffer={current_sd_buffer} samples ({current_sd_buffer / big['audio_input']._pcm_format.sample_rate * 1000:.1f} ms) | " - f"{latency_str} | {cpu_str} | " - f"threshold={drop_threshold_samples} samples ({drop_threshold_samples / big['audio_input']._pcm_format.sample_rate * 1000:.1f} ms)" + if lc3_frame == b'': # Not all streams may stop at the same time + stream_finished[i] = True + continue + else: # code lc3 on the fly with perf counters + # Ensure frames generator exists (so we can aclose() on stop) + frames_gen = big.get('frames_gen') + if frames_gen is None: + # For stereo, request frame_samples per channel (interleaved input) + frames_gen = big['audio_input'].frames(big['lc3_frame_samples']) + big['frames_gen'] = frames_gen + + # Initialize perf tracking bucket per BIG + perf = big.setdefault('_perf', { + 'n': 0, + 'samples_sum': 0.0, 'samples_max': 0.0, + 'enc_sum': 0.0, 'enc_max': 0.0, + 'write_sum': 0.0, 'write_max': 0.0, + 'loop_sum': 0.0, 'loop_max': 0.0, + }) + + # Total loop duration timer (sample + encode + write) + t_loop0 = time.perf_counter() + + # Measure time to get a sample from the buffer + t0 = time.perf_counter() + pcm_frame = await anext(frames_gen, None) + dt_sample = time.perf_counter() - t0 + + if pcm_frame is None: # Not all streams may stop at the same time + stream_finished[i] = True + continue + + # Measure LC3 encoding time + t1 = time.perf_counter() + num_bis = big.get('num_bis', 1) + if num_bis == 1: + # Mono: single encoder, single queue + lc3_frame = big['encoder'].encode( + pcm_frame, num_bytes=big['lc3_bytes_per_frame'], bit_depth=big['pcm_bit_depth'] ) + lc3_frames_out = [lc3_frame] else: - backend_str = f"{backend_label}={backend_delay} samples ({backend_delay / big['audio_input']._pcm_format.sample_rate * 1000:.1f} ms)" if backend_delay is not None else f"{backend_label}=N/A (use pw-top)" - latency_str = f"stream_latency={stream_latency_ms:.2f} ms" if stream_latency_ms is not None else "stream_latency=N/A" - cpu_str = f"cpu_load={cpu_load_pct:.1f}%" if cpu_load_pct is not None else "cpu_load=N/A" + # Stereo: split interleaved PCM into L/R, encode separately + pcm_array = np.frombuffer(pcm_frame, dtype=np.int16) + channels_in = big['channels'] + lc3_frames_out = [] + for ch_idx, encoder in enumerate(big['encoders']): + # Extract channel (interleaved: L,R,L,R,... or L,R,C,... for >2 ch) + ch_pcm = pcm_array[ch_idx::channels_in].tobytes() + lc3_frame = encoder.encode( + ch_pcm, num_bytes=big['lc3_bytes_per_frame'], bit_depth=big['pcm_bit_depth'] + ) + lc3_frames_out.append(lc3_frame) + dt_enc = time.perf_counter() - t1 + + # Measure write blocking time + t2 = time.perf_counter() + for q_idx, lc3_frame in enumerate(lc3_frames_out): + await big['iso_queues'][q_idx].write(lc3_frame) + dt_write = time.perf_counter() - t2 + + # Total loop duration + dt_loop = time.perf_counter() - t_loop0 + + # Update stats + perf['n'] += 1 + perf['samples_sum'] += dt_sample + perf['enc_sum'] += dt_enc + perf['write_sum'] += dt_write + perf['loop_sum'] += dt_loop + perf['samples_max'] = max(perf['samples_max'], dt_sample) + perf['enc_max'] = max(perf['enc_max'], dt_enc) + perf['write_max'] = max(perf['write_max'], dt_write) + perf['loop_max'] = max(perf['loop_max'], dt_loop) + + frame_count += 1 + + # Log every 500 frames for this BIG and reset accumulators + if perf['n'] >= 500: + n = perf['n'] logging.info( - f"STATS: frames={frame_count} | " - f"SD_buffer={current_sd_buffer} samples ({current_sd_buffer / big['audio_input']._pcm_format.sample_rate * 1000:.1f} ms) | " - f"{latency_str} | {cpu_str} | " - f"{backend_str} | " - f"drift_compensation=DISABLED" + "Perf(i=%d, last %d): sample mean=%.6fms max=%.6fms | encode mean=%.6fms max=%.6fms | write mean=%.6fms max=%.6fms | loop mean=%.6fms max=%.6fms", + i, + n, + (perf['samples_sum'] / n) * 1e3, perf['samples_max'] * 1e3, + (perf['enc_sum'] / n) * 1e3, perf['enc_max'] * 1e3, + (perf['write_sum'] / n) * 1e3, perf['write_max'] * 1e3, + (perf['loop_sum'] / n) * 1e3, perf['loop_max'] * 1e3, ) - last_stats_log = now + perf.update({ + 'n': 0, + 'samples_sum': 0.0, 'samples_max': 0.0, + 'enc_sum': 0.0, 'enc_max': 0.0, + 'write_sum': 0.0, 'write_max': 0.0, + 'loop_sum': 0.0, 'loop_max': 0.0, + }) if all(stream_finished): # Take into account that multiple files have different lengths logging.info('All streams finished, stopping streamer') @@ -970,106 +883,28 @@ if __name__ == "__main__": format='%(module)s.py:%(lineno)d %(levelname)s: %(message)s' ) os.chdir(os.path.dirname(__file__)) - - # ============================================================================= - # AUDIO BACKEND CONFIGURATION - Toggle between ALSA and PipeWire - # ============================================================================= - # Uncomment ONE of the following backend configurations: - - # Option 1: Direct ALSA (Direct hardware access, bypasses PipeWire) - AUDIO_BACKEND = 'ALSA' - target_latency_ms = 10.0 - - # Option 2: PipeWire via PulseAudio API (Routes through pipewire-pulse) - #AUDIO_BACKEND = 'PipeWire' - #target_latency_ms = 5.0 # PipeWire typically handles lower latency better - - # ============================================================================= - - import sounddevice as sd - import subprocess - - # Detect if PipeWire is running (even if we're using ALSA API) - pipewire_running = False - try: - result = subprocess.run(['systemctl', '--user', 'is-active', 'pipewire'], - capture_output=True, text=True, timeout=1) - pipewire_running = (result.returncode == 0) - except Exception: - pass - - if AUDIO_BACKEND == 'ALSA': - os.environ['SDL_AUDIODRIVER'] = 'alsa' - sd.default.latency = target_latency_ms / 1000.0 - # Find ALSA host API - try: - alsa_hostapi = next(i for i, ha in enumerate(sd.query_hostapis()) - if 'ALSA' in ha['name']) - logging.info(f"ALSA host API available at index: {alsa_hostapi}") - except StopIteration: - logging.error("ALSA backend not found!") + # Find ALSA host API + alsa_hostapi = next(i for i, ha in enumerate(sd.query_hostapis()) + if 'ALSA' in ha['name']) - elif AUDIO_BACKEND == 'PipeWire': - os.environ['SDL_AUDIODRIVER'] = 'pulseaudio' - sd.default.latency = target_latency_ms / 1000.0 - - if not pipewire_running: - logging.error("PipeWire selected but not running!") - raise RuntimeError("PipeWire is not active") - - # Find PulseAudio host API (required for PipeWire mode) - try: - pulse_hostapi = next(i for i, ha in enumerate(sd.query_hostapis()) - if 'pulse' in ha['name'].lower()) - logging.info(f"Using PulseAudio host API at index: {pulse_hostapi} → routes to PipeWire") - except StopIteration: - logging.error("PulseAudio host API not found! Did you rebuild PortAudio with -DPA_USE_PULSEAUDIO=ON?") - raise RuntimeError("PulseAudio API not available in PortAudio") - else: - logging.error(f"Unknown AUDIO_BACKEND: {AUDIO_BACKEND}") - raise ValueError(f"Invalid AUDIO_BACKEND: {AUDIO_BACKEND}") - - # Select audio input device based on backend - shure_device_idx = None + search_str='ch1' + # Use ALSA devices + from auracast.utils.sounddevice_utils import get_alsa_usb_inputs + devices = get_alsa_usb_inputs() + logging.info(f"Searching ALSA devices for first device with string {search_str}...") - if AUDIO_BACKEND == 'ALSA': - # Use ALSA devices - from auracast.utils.sounddevice_utils import get_alsa_usb_inputs - devices = get_alsa_usb_inputs() - logging.info("Searching ALSA devices for Shure MVX2U...") - - for idx, dev in devices: - logging.info(f" ALSA device [{idx}]: {dev['name']} ({dev['max_input_channels']} ch)") - if 'shure' in dev['name'].lower() and 'mvx2u' in dev['name'].lower(): - shure_device_idx = idx - logging.info(f"✓ Selected ALSA device {idx}: {dev['name']}") - break - - elif AUDIO_BACKEND == 'PipeWire': - # Use PulseAudio devices (routed through PipeWire) - logging.info("Searching PulseAudio devices for Shure MVX2U...") - - for idx, dev in enumerate(sd.query_devices()): - # Only consider PulseAudio input devices - if dev['max_input_channels'] > 0: - hostapi = sd.query_hostapis(dev['hostapi']) - if 'pulse' in hostapi['name'].lower(): - dev_name_lower = dev['name'].lower() - logging.info(f" PulseAudio device [{idx}]: {dev['name']} ({dev['max_input_channels']} ch)") - - # Skip monitor devices (they're output monitors, not real inputs) - if 'monitor' in dev_name_lower: - continue - - # Look for Shure MVX2U - prefer "Mono" device for mono input - if 'shure' in dev_name_lower and 'mvx2u' in dev_name_lower: - shure_device_idx = idx - logging.info(f"✓ Selected PulseAudio device {idx}: {dev['name']} → routes to PipeWire") - break + audio_dev = None + for idx, dev in devices: + logging.info(f" ALSA device [{idx}]: {dev['name']} ({dev['max_input_channels']} ch)") + if search_str in dev['name'].lower(): + audio_dev = idx + logging.info(f"✓ Selected ALSA device {idx}: {dev['name']}") + break + - if shure_device_idx is None: - logging.error(f"Shure MVX2U not found in {AUDIO_BACKEND} devices!") + if audio_dev is None: + logging.error(f"Audio device {audio_dev} not found in {AUDIO_BACKEND} devices!") raise RuntimeError(f"Audio device not found for {AUDIO_BACKEND} backend") config = auracast_config.AuracastConfigGroup( @@ -1083,7 +918,7 @@ if __name__ == "__main__": ) # TODO: How can we use other iso interval than 10ms ?(medium or low rel) ? - nrf53audio receiver repports I2S tx underrun - config.qos_config=auracast_config.AuracastQosHigh() + config.qos_config=auracast_config.AuracastQosRobust() #config.transport='serial:/dev/serial/by-id/usb-ZEPHYR_Zephyr_HCI_UART_sample_81BD14B8D71B5662-if00,1000000,rtscts' # transport for nrf52 dongle #config.transport='serial:/dev/serial/by-id/usb-SEGGER_J-Link_001050076061-if02,1000000,rtscts' # transport for nrf53dk @@ -1093,8 +928,6 @@ if __name__ == "__main__": #config.transport= 'auto' config.transport='serial:/dev/ttyAMA3,1000000,rtscts' # transport for raspberry pi - # TODO: encrypted streams are not working - for big in config.bigs: #big.code = 'abcd' #big.code = '78 e5 dc f1 34 ab 42 bf c1 92 ef dd 3a fd 67 ae' @@ -1102,11 +935,11 @@ if __name__ == "__main__": #big.audio_source = big.audio_source.replace('.wav', '_10_16_32.lc3') #lc3 precoded files #big.audio_source = read_lc3_file(big.audio_source) # load files in advance - # --- Configure Shure MVX2U USB Audio Interface (ALSA backend) --- - if shure_device_idx is not None: - big.audio_source = f'device:{shure_device_idx}' # Shure MVX2U USB mono interface + # --- Configure device (ALSA backend) --- + if audio_dev is not None: + big.audio_source = f'device:{audio_dev}' big.input_format = 'int16le,48000,1' # int16, 48kHz, mono - logging.info(f"Configured BIG '{big.name}' with Shure MVX2U (device:{shure_device_idx}, 48kHz mono)") + logging.info(f"Configured BIG '{big.name}' with (device:{audio_dev}, 48kHz mono)") else: logging.warning(f"Shure device not found, BIG '{big.name}' will use default audio_source: {big.audio_source}") @@ -1119,15 +952,12 @@ if __name__ == "__main__": # 24kHz is only working with 2 streams - probably airtime constraint # TODO: with more than three broadcasters (16kHz) no advertising (no primary channels is present anymore) # TODO: find the bottleneck - probably airtime - # TODO: test encrypted streams - config.auracast_sampling_rate_hz = 16000 - config.octets_per_frame = 40 # 32kbps@16kHz + config.auracast_sampling_rate_hz = 24000 + config.octets_per_frame = 60 # 32kbps@16kHz + #config.immediate_rendering = True #config.debug = True - # Enable clock drift compensation to prevent latency accumulation - # With ~43 samples/sec drift (0.89ms/sec), threshold of 2ms will trigger every ~2.2 seconds - run_async( broadcast( config, diff --git a/src/auracast/multicast_control.py b/src/auracast/multicast_control.py index f651511..226210b 100644 --- a/src/auracast/multicast_control.py +++ b/src/auracast/multicast_control.py @@ -140,7 +140,7 @@ async def main(): os.chdir(os.path.dirname(__file__)) global_conf = auracast_config.AuracastGlobalConfig( - qos_config=auracast_config.AuracastQosHigh() + qos_config=auracast_config.AuracastQosRobust() ) #global_conf.transport='serial:/dev/serial/by-id/usb-SEGGER_J-Link_001057705357-if02,1000000,rtscts' # transport for nrf54l15dk global_conf.transport='serial:/dev/serial/by-id/usb-ZEPHYR_Zephyr_HCI_UART_sample_81BD14B8D71B5662-if00,115200,rtscts' #nrf52dongle hci_uart usb cdc diff --git a/src/auracast/multicast_script.py b/src/auracast/multicast_script.py index 01c4fdd..38524b7 100644 --- a/src/auracast/multicast_script.py +++ b/src/auracast/multicast_script.py @@ -159,7 +159,7 @@ if __name__ == "__main__": ], immediate_rendering=False, presentation_delay_us=40000, - qos_config=auracast_config.AuracastQosHigh(), + qos_config=auracast_config.AuracastQosRobust(), auracast_sampling_rate_hz = LC3_SRATE, octets_per_frame = OCTETS_PER_FRAME, transport=TRANSPORT1, diff --git a/src/auracast/server/multicast_frontend.py b/src/auracast/server/multicast_frontend.py index 3a8dae7..c847a24 100644 --- a/src/auracast/server/multicast_frontend.py +++ b/src/auracast/server/multicast_frontend.py @@ -8,6 +8,8 @@ import requests from dotenv import load_dotenv import streamlit as st +from auracast.utils.read_temp import read_case_temp, read_cpu_temp + from auracast import auracast_config from auracast.utils.frontend_auth import ( is_pw_disabled, @@ -88,6 +90,11 @@ QUALITY_MAP = { "Fair (16kHz)": {"rate": 16000, "octets": 40}, } +QOS_PRESET_MAP = { + "Fast": auracast_config.AuracastQosFast(), + "Robust": auracast_config.AuracastQosRobust(), +} + # Try loading persisted settings from backend saved_settings = {} try: @@ -100,6 +107,10 @@ except Exception: # Define is_streaming early from the fetched status for use throughout the UI is_streaming = bool(saved_settings.get("is_streaming", False)) +# Extract secondary status, if provided by the backend /status endpoint. +secondary_status = saved_settings.get("secondary") or {} +secondary_is_streaming = bool(saved_settings.get("secondary_is_streaming", secondary_status.get("is_streaming", False))) + st.title("Auracast Audio Mode Control") def render_stream_controls(status_streaming: bool, start_label: str, stop_label: str, mode_label: str): @@ -119,9 +130,10 @@ def render_stream_controls(status_streaming: bool, start_label: str, stop_label: # Audio mode selection with persisted default # Note: backend persists 'USB' for any device: source (including AES67). We default to 'USB' in that case. options = [ - "Demo", - "USB", - "Network", + "Demo", + "Analog", + "USB", + "Network", ] saved_audio_mode = saved_settings.get("audio_mode", "Demo") if saved_audio_mode not in options: @@ -153,7 +165,12 @@ if isinstance(backend_mode_raw, str): elif backend_mode_raw in options: backend_mode_mapped = backend_mode_raw -running_mode = backend_mode_mapped if (is_streaming and backend_mode_mapped) else audio_mode +# When Analog is selected in the UI we always show it as such, even though the +# backend currently persists USB for all device sources. +if audio_mode == "Analog": + running_mode = "Analog" +else: + running_mode = backend_mode_mapped if (is_streaming and backend_mode_mapped) else audio_mode is_started = False is_stopped = False @@ -203,7 +220,7 @@ if audio_mode == "Demo": type=("password"), help="Optional: Set a broadcast code to protect your stream. Leave empty for an open (uncoded) broadcast." ) - col_flags1, col_flags2, col_pdelay, col_rtn = st.columns([1, 1, 0.7, 0.6], gap="small", vertical_alignment="center") + col_flags1, col_flags2, col_pdelay, col_qos = st.columns([1, 1, 0.7, 0.6], gap="small", vertical_alignment="center") with col_flags1: assisted_listening = st.checkbox( "Assistive listening", @@ -225,13 +242,13 @@ if audio_mode == "Demo": min_value=10, max_value=200, step=5, value=default_pdelay_ms, help="Delay between capture and presentation for receivers." ) - default_rtn = int(saved_settings.get('rtn', 4) or 4) - with col_rtn: - rtn_options = [1,2,3,4] - default_rtn_clamped = min(4, max(1, default_rtn)) - rtn = st.selectbox( - "RTN", options=rtn_options, index=rtn_options.index(default_rtn_clamped), - help="Number of ISO retransmissions (higher improves robustness at cost of airtime)." + with col_qos: + qos_options = list(QOS_PRESET_MAP.keys()) + saved_qos = saved_settings.get('qos_preset', 'Fast') + default_qos_idx = qos_options.index(saved_qos) if saved_qos in qos_options else 0 + qos_preset = st.selectbox( + "QoS", options=qos_options, index=default_qos_idx, + help="Fast: 2 retransmissions, lower latency. Robust: 4 retransmissions, better reliability." ) #st.info(f"Demo mode selected: {demo_selected} (Streams: {demo_stream_map[demo_selected]['streams']}, Rate: {demo_stream_map[demo_selected]['rate']} Hz)") # Start/Stop buttons for demo mode @@ -283,11 +300,7 @@ if audio_mode == "Demo": assisted_listening_stream=assisted_listening, immediate_rendering=immediate_rendering, presentation_delay_us=int(presentation_delay_ms * 1000), - qos_config=auracast_config.AuracastQoSConfig( - iso_int_multiple_10ms=1, - number_of_retransmissions=int(rtn), - max_transport_latency_ms=int(rtn)*10 + 3, - ), + qos_config=QOS_PRESET_MAP[qos_preset], bigs=bigs1 ) config2 = None @@ -299,11 +312,7 @@ if audio_mode == "Demo": assisted_listening_stream=assisted_listening, immediate_rendering=immediate_rendering, presentation_delay_us=int(presentation_delay_ms * 1000), - qos_config=auracast_config.AuracastQoSConfig( - iso_int_multiple_10ms=1, - number_of_retransmissions=int(rtn), - max_transport_latency_ms=int(rtn)*10 + 3, - ), + qos_config=QOS_PRESET_MAP[qos_preset], bigs=bigs2 ) # Call /init and /init2 @@ -338,111 +347,103 @@ if audio_mode == "Demo": quality = None # Not used in demo mode else: - # Stream quality selection (now enabled) - quality_options = list(QUALITY_MAP.keys()) - default_quality = "Medium (24kHz)" if "Medium (24kHz)" in quality_options else quality_options[0] - quality = st.selectbox( - "Stream Quality (Sampling Rate)", - quality_options, - index=quality_options.index(default_quality), - help="Select the audio sampling rate for the stream. Lower rates may improve compatibility." - ) + # --- Mode-specific configuration --- default_name = saved_settings.get('channel_names', ["Broadcast0"])[0] - default_lang = saved_settings.get('languages', ["deu"])[0] - default_input = saved_settings.get('input_device') or 'default' - stream_name = st.text_input( - "Channel Name", - value=default_name, - help="The primary name for your broadcast. Like the SSID of a WLAN, it identifies your stream for receivers." - ) raw_program_info = saved_settings.get('program_info', default_name) if isinstance(raw_program_info, list) and raw_program_info: default_program_info = raw_program_info[0] else: default_program_info = raw_program_info - program_info = st.text_input( - "Program Info", - value=default_program_info, - help="Additional details about the broadcast program, such as its content or purpose. Shown to receivers for more context." - ) - language = st.text_input( - "Language (ISO 639-3)", - value=default_lang, - help="Three-letter language code (e.g., 'eng' for English, 'deu' for German). Used by receivers to display the language of the stream. See: https://en.wikipedia.org/wiki/List_of_ISO_639-3_codes" - ) - # Optional broadcast code for coded streams - stream_passwort = st.text_input( - "Stream Passwort", - value="", - type="password", - help="Optional: Set a broadcast code to protect your stream. Leave empty for an open (uncoded) broadcast." - ) - # Flags and QoS row (compact, four columns) - col_flags1, col_flags2, col_pdelay, col_rtn = st.columns([1, 1, 0.7, 0.6], gap="small") - with col_flags1: - assisted_listening = st.checkbox( - "Assistive listening", - value=bool(saved_settings.get('assisted_listening_stream', False)), - help="tells the receiver that this is an assistive listening stream" - ) - with col_flags2: - immediate_rendering = st.checkbox( - "Immediate rendering", - value=bool(saved_settings.get('immediate_rendering', False)), - help="tells the receiver to ignore presentation delay and render immediately if possible." - ) - # QoS/presentation controls inline with flags - default_pdelay = int(saved_settings.get('presentation_delay_us', 40000) or 40000) - with col_pdelay: - default_pdelay_ms = max(10, min(200, default_pdelay // 1000)) - presentation_delay_ms = st.number_input( - "Delay (ms)", - min_value=10, max_value=200, step=5, value=default_pdelay_ms, - help="Delay between capture and presentation for receivers." - ) - default_rtn = int(saved_settings.get('rtn', 4) or 4) - with col_rtn: - rtn_options = [1,2,3,4] - default_rtn_clamped = min(4, max(1, default_rtn)) - rtn = st.selectbox( - "RTN", options=rtn_options, index=rtn_options.index(default_rtn_clamped), - help="Number of ISO retransmissions (higher improves robustness at cost of airtime)." - ) - + default_lang = saved_settings.get('languages', ["deu"])[0] - # Input device selection for USB or AES67 mode - if audio_mode in ("USB", "Network"): + # Per-mode configuration and controls + input_device = None + radio2_enabled = False + radio1_cfg = None + radio2_cfg = None + + if audio_mode == "Analog": + # --- Radio 1 controls --- + st.subheader("Radio 1") + + quality_options = list(QUALITY_MAP.keys()) + default_quality = "Medium (24kHz)" if "Medium (24kHz)" in quality_options else quality_options[0] + quality1 = st.selectbox( + "Stream Quality (Radio 1)", + quality_options, + index=quality_options.index(default_quality), + help="Select the audio sampling rate for Radio 1." + ) + + stream_passwort1 = st.text_input( + "Stream Passwort (Radio 1)", + value="", + type="password", + help="Optional: Set a broadcast code for Radio 1." + ) + + col_r1_flags1, col_r1_flags2, col_r1_pdelay, col_r1_qos = st.columns([1, 1, 0.7, 0.6], gap="small") + with col_r1_flags1: + assisted_listening1 = st.checkbox( + "Assistive listening (R1)", + value=bool(saved_settings.get('assisted_listening_stream', False)), + help="tells the receiver that this is an assistive listening stream" + ) + with col_r1_flags2: + immediate_rendering1 = st.checkbox( + "Immediate rendering (R1)", + value=bool(saved_settings.get('immediate_rendering', False)), + help="tells the receiver to ignore presentation delay and render immediately if possible." + ) + default_pdelay = int(saved_settings.get('presentation_delay_us', 40000) or 40000) + with col_r1_pdelay: + default_pdelay_ms = max(10, min(200, default_pdelay // 1000)) + presentation_delay_ms1 = st.number_input( + "Delay (ms, R1)", + min_value=10, max_value=200, step=5, value=default_pdelay_ms, + help="Delay between capture and presentation for Radio 1." + ) + with col_r1_qos: + qos_options = list(QOS_PRESET_MAP.keys()) + saved_qos = saved_settings.get('qos_preset', 'Fast') + default_qos_idx = qos_options.index(saved_qos) if saved_qos in qos_options else 0 + qos_preset1 = st.selectbox( + "QoS (R1)", options=qos_options, index=default_qos_idx, + help="Fast: 2 retransmissions, lower latency. Robust: 4 retransmissions, better reliability." + ) + + col_r1_name, col_r1_lang = st.columns([2, 1]) + with col_r1_name: + stream_name1 = st.text_input( + "Channel Name (Radio 1)", + value=default_name, + help="Name for the first analog radio (Radio 1)." + ) + with col_r1_lang: + language1 = st.text_input( + "Language (ISO 639-3) (Radio 1)", + value=default_lang, + help="Language code for Radio 1." + ) + program_info1 = st.text_input( + "Program Info (Radio 1)", + value=default_program_info, + help="Program information for Radio 1." + ) + + # Analog mode exposes only ALSA ch1/ch2 inputs. if not is_streaming: - # Only query device lists when NOT streaming to avoid extra backend calls try: - endpoint = "/audio_inputs_pw_usb" if audio_mode == "USB" else "/audio_inputs_pw_network" - resp = requests.get(f"{BACKEND_URL}{endpoint}") + resp = requests.get(f"{BACKEND_URL}/audio_inputs_pw_usb") device_list = resp.json().get('inputs', []) except Exception as e: st.error(f"Failed to fetch devices: {e}") device_list = [] - # Display "name [id]" but use name as value - input_options = [f"{d['name']} [{d['id']}]" for d in device_list] - option_name_map = {f"{d['name']} [{d['id']}]": d['name'] for d in device_list} - device_names = [d['name'] for d in device_list] + analog_devices = [d for d in device_list if d.get('name') in ('ch1', 'ch2')] - # Determine default input by name (from persisted server state) - default_input_name = saved_settings.get('input_device') - if default_input_name not in device_names and device_names: - default_input_name = device_names[0] - default_input_label = None - for label, name in option_name_map.items(): - if name == default_input_name: - default_input_label = label - break - if not input_options: - warn_text = ( - "No USB audio input devices found. Connect a USB input and click Refresh." - if audio_mode == "USB" else - "No AES67/Network inputs found." - ) - st.warning(warn_text) + if not analog_devices: + st.warning("No Analog (ch1/ch2) ALSA inputs found. Check asound configuration.") if st.button("Refresh", disabled=is_streaming): try: r = requests.post(f"{BACKEND_URL}/refresh_audio_devices", timeout=8) @@ -451,16 +452,248 @@ else: except Exception as e: st.error(f"Failed to refresh devices: {e}") st.rerun() - input_device = None + analog_names = [d['name'] for d in analog_devices] + else: + analog_devices = [] + analog_names = [] + + if not is_streaming: + if analog_names: + default_r1_idx = 0 + input_device1 = st.selectbox( + "Input Device (Radio 1)", + analog_names, + index=default_r1_idx, + ) else: - col1, col2 = st.columns([3, 1], vertical_alignment="bottom") - with col1: - selected_option = st.selectbox( - "Input Device", - input_options, - index=input_options.index(default_input_label) if default_input_label in input_options else 0 + input_device1 = None + else: + input_device1 = saved_settings.get('input_device') + st.selectbox( + "Input Device (Radio 1)", + [input_device1 or "No device selected"], + index=0, + disabled=True, + help="Stop the stream to change the input device." + ) + + # --- Radio 2 controls --- + st.subheader("Radio 2") + # If the backend reports that the secondary radio is currently streaming, + # initialize the checkbox to checked so the UI reflects the active state + # when the frontend is loaded. + radio2_enabled_default = secondary_is_streaming + radio2_enabled = st.checkbox( + "Enable Radio 2", + value=radio2_enabled_default, + help="Activate a second analog radio with its own quality and timing settings." + ) + + if radio2_enabled: + quality2 = st.selectbox( + "Stream Quality (Radio 2)", + quality_options, + index=quality_options.index(default_quality), + help="Select the audio sampling rate for Radio 2." + ) + + stream_passwort2 = st.text_input( + "Stream Passwort (Radio 2)", + value="", + type="password", + help="Optional: Set a broadcast code for Radio 2." + ) + + col_r2_flags1, col_r2_flags2, col_r2_pdelay, col_r2_qos = st.columns([1, 1, 0.7, 0.6], gap="small") + with col_r2_flags1: + assisted_listening2 = st.checkbox( + "Assistive listening (R2)", + value=bool(saved_settings.get('assisted_listening_stream', False)), + help="tells the receiver that this is an assistive listening stream" + ) + with col_r2_flags2: + immediate_rendering2 = st.checkbox( + "Immediate rendering (R2)", + value=bool(saved_settings.get('immediate_rendering', False)), + help="tells the receiver to ignore presentation delay and render immediately if possible." + ) + with col_r2_pdelay: + presentation_delay_ms2 = st.number_input( + "Delay (ms, R2)", + min_value=10, max_value=200, step=5, value=default_pdelay_ms, + help="Delay between capture and presentation for Radio 2." + ) + with col_r2_qos: + saved_qos2 = saved_settings.get('secondary', {}).get('qos_preset', 'Fast') + default_qos_idx2 = qos_options.index(saved_qos2) if saved_qos2 in qos_options else 0 + qos_preset2 = st.selectbox( + "QoS (R2)", options=qos_options, index=default_qos_idx2, + help="Fast: 2 retransmissions, lower latency. Robust: 4 retransmissions, better reliability." + ) + + col_r2_name, col_r2_lang = st.columns([2, 1]) + with col_r2_name: + stream_name2 = st.text_input( + "Channel Name (Radio 2)", + value=f"{default_name}_2", + help="Name for the second analog radio (Radio 2)." + ) + with col_r2_lang: + language2 = st.text_input( + "Language (ISO 639-3) (Radio 2)", + value=default_lang, + help="Language code for Radio 2." + ) + program_info2 = st.text_input( + "Program Info (Radio 2)", + value=default_program_info, + help="Program information for Radio 2." + ) + + if not is_streaming: + if analog_names: + default_r2_idx = 1 if len(analog_names) > 1 else 0 + input_device2 = st.selectbox( + "Input Device (Radio 2)", + analog_names, + index=default_r2_idx, ) - with col2: + else: + input_device2 = None + else: + input_device2 = saved_settings.get('input_device') + st.selectbox( + "Input Device (Radio 2)", + [input_device2 or "No device selected"], + index=0, + disabled=True, + help="Stop the stream to change the input device." + ) + + radio2_cfg = { + 'id': 1002, + 'name': stream_name2, + 'program_info': program_info2, + 'language': language2, + 'input_device': input_device2, + 'quality': quality2, + 'stream_passwort': stream_passwort2, + 'assisted_listening': assisted_listening2, + 'immediate_rendering': immediate_rendering2, + 'presentation_delay_ms': presentation_delay_ms2, + 'qos_preset': qos_preset2, + } + + radio1_cfg = { + 'id': 1001, + 'name': stream_name1, + 'program_info': program_info1, + 'language': language1, + 'input_device': input_device1, + 'quality': quality1, + 'stream_passwort': stream_passwort1, + 'assisted_listening': assisted_listening1, + 'immediate_rendering': immediate_rendering1, + 'presentation_delay_ms': presentation_delay_ms1, + 'qos_preset': qos_preset1, + } + + else: + # USB/Network: single set of controls shared with the single channel + quality_options = list(QUALITY_MAP.keys()) + default_quality = "Medium (24kHz)" if "Medium (24kHz)" in quality_options else quality_options[0] + quality = st.selectbox( + "Stream Quality (Sampling Rate)", + quality_options, + index=quality_options.index(default_quality), + help="Select the audio sampling rate for the stream. Lower rates may improve compatibility." + ) + + stream_passwort = st.text_input( + "Stream Passwort", + value="", + type="password", + help="Optional: Set a broadcast code to protect your stream. Leave empty for an open (uncoded) broadcast." + ) + + col_flags1, col_flags2, col_pdelay, col_qos = st.columns([1, 1, 0.7, 0.6], gap="small") + with col_flags1: + assisted_listening = st.checkbox( + "Assistive listening", + value=bool(saved_settings.get('assisted_listening_stream', False)), + help="tells the receiver that this is an assistive listening stream" + ) + with col_flags2: + immediate_rendering = st.checkbox( + "Immediate rendering", + value=bool(saved_settings.get('immediate_rendering', False)), + help="tells the receiver to ignore presentation delay and render immediately if possible." + ) + default_pdelay = int(saved_settings.get('presentation_delay_us', 40000) or 40000) + with col_pdelay: + default_pdelay_ms = max(10, min(200, default_pdelay // 1000)) + presentation_delay_ms = st.number_input( + "Delay (ms)", + min_value=10, max_value=200, step=5, value=default_pdelay_ms, + help="Delay between capture and presentation for receivers." + ) + with col_qos: + qos_options = list(QOS_PRESET_MAP.keys()) + saved_qos = saved_settings.get('qos_preset', 'Fast') + default_qos_idx = qos_options.index(saved_qos) if saved_qos in qos_options else 0 + qos_preset = st.selectbox( + "QoS", options=qos_options, index=default_qos_idx, + help="Fast: 2 retransmissions, lower latency. Robust: 4 retransmissions, better reliability." + ) + + stream_name = st.text_input( + "Channel Name", + value=default_name, + help="The primary name for your broadcast. Like the SSID of a WLAN, it identifies your stream for receivers." + ) + program_info = st.text_input( + "Program Info", + value=default_program_info, + help="Additional details about the broadcast program, such as its content or purpose. Shown to receivers for more context." + ) + language = st.text_input( + "Language (ISO 639-3)", + value=default_lang, + help="Three-letter language code (e.g., 'eng' for English, 'deu' for German). Used by receivers to display the language of the stream. See: https://en.wikipedia.org/wiki/List_of_ISO_639-3_codes" + ) + + if audio_mode in ("USB", "Network"): + if not is_streaming: + try: + endpoint = "/audio_inputs_pw_usb" if audio_mode == "USB" else "/audio_inputs_pw_network" + resp = requests.get(f"{BACKEND_URL}{endpoint}") + device_list = resp.json().get('inputs', []) + except Exception as e: + st.error(f"Failed to fetch devices: {e}") + device_list = [] + + if audio_mode == "USB": + device_list = [d for d in device_list if d.get('name') not in ('ch1', 'ch2')] + + input_options = [f"{d['name']} [{d['id']}]" for d in device_list] + option_name_map = {f"{d['name']} [{d['id']}]": d['name'] for d in device_list} + device_names = [d['name'] for d in device_list] + + default_input_name = saved_settings.get('input_device') + if default_input_name not in device_names and device_names: + default_input_name = device_names[0] + default_input_label = None + for label, name in option_name_map.items(): + if name == default_input_name: + default_input_label = label + break + if not input_options: + warn_text = ( + "No USB audio input devices found. Connect a USB input and click Refresh." + if audio_mode == "USB" else + "No AES67/Network inputs found." + ) + st.warning(warn_text) if st.button("Refresh", disabled=is_streaming): try: r = requests.post(f"{BACKEND_URL}/refresh_audio_devices", timeout=8) @@ -469,21 +702,38 @@ else: except Exception as e: st.error(f"Failed to refresh devices: {e}") st.rerun() - # Send only the device name to backend - input_device = option_name_map.get(selected_option) + input_device = None + else: + col1, col2 = st.columns([3, 1], vertical_alignment="bottom") + with col1: + selected_option = st.selectbox( + "Input Device", + input_options, + index=input_options.index(default_input_label) if default_input_label in input_options else 0 + ) + with col2: + if st.button("Refresh", disabled=is_streaming): + try: + r = requests.post(f"{BACKEND_URL}/refresh_audio_devices", timeout=8) + if not r.ok: + st.error(f"Failed to refresh: {r.text}") + except Exception as e: + st.error(f"Failed to refresh devices: {e}") + st.rerun() + input_device = option_name_map.get(selected_option) + else: + input_device = saved_settings.get('input_device') + current_label = input_device or "No device selected" + st.selectbox( + "Input Device", + [current_label], + index=0, + disabled=True, + help="Stop the stream to change the input device." + ) else: - # When streaming, keep showing the current selection but lock editing. - input_device = saved_settings.get('input_device') - current_label = input_device or "No device selected" - st.selectbox( - "Input Device", - [current_label], - index=0, - disabled=True, - help="Stop the stream to change the input device." - ) - else: - input_device = None + input_device = None + start_stream, stop_stream = render_stream_controls(is_streaming, "Start Auracast", "Stop Auracast", running_mode) if stop_stream: @@ -499,48 +749,96 @@ else: if start_stream: # Always send stop to ensure backend is in a clean state, regardless of current status r = requests.post(f"{BACKEND_URL}/stop_audio").json() - #if r['was_running']: - # st.success("Stream Stopped!") - # Small pause lets backend fully release audio devices before re-init time.sleep(1) - # Prepare config using the model (do NOT send qos_config, only relevant fields) - q = QUALITY_MAP[quality] - config = auracast_config.AuracastConfigGroup( - auracast_sampling_rate_hz=q['rate'], - octets_per_frame=q['octets'], - transport='', # is set in backend - assisted_listening_stream=assisted_listening, - immediate_rendering=immediate_rendering, - presentation_delay_us=int(presentation_delay_ms * 1000), - qos_config=auracast_config.AuracastQoSConfig( - iso_int_multiple_10ms=1, - number_of_retransmissions=int(rtn), - max_transport_latency_ms=int(rtn)*10 + 3, - ), - bigs = [ - auracast_config.AuracastBigConfig( - code=(stream_passwort.strip() or None), - name=stream_name, - program_info=program_info, - language=language, - audio_source=(f"device:{input_device}"), - input_format=(f"int16le,{q['rate']},1"), - iso_que_len=1, - sampling_frequency=q['rate'], - octets_per_frame=q['octets'], - ), - ] - ) - try: - r = requests.post(f"{BACKEND_URL}/init", json=config.model_dump()) - if r.status_code == 200: - is_started = True - else: - st.error(f"Failed to initialize: {r.text}") - except Exception as e: - st.error(f"Error: {e}") + if audio_mode == "Analog": + # Build separate configs per radio, each with its own quality and QoS parameters. + is_started = False + + def _build_group_from_radio(cfg: dict) -> auracast_config.AuracastConfigGroup | None: + if not cfg or not cfg.get('input_device'): + return None + q = QUALITY_MAP[cfg['quality']] + return auracast_config.AuracastConfigGroup( + auracast_sampling_rate_hz=q['rate'], + octets_per_frame=q['octets'], + transport='', # is set in backend + assisted_listening_stream=bool(cfg['assisted_listening']), + immediate_rendering=bool(cfg['immediate_rendering']), + presentation_delay_us=int(cfg['presentation_delay_ms'] * 1000), + qos_config=QOS_PRESET_MAP[cfg['qos_preset']], + bigs=[ + auracast_config.AuracastBigConfig( + id=cfg.get('id', 123456), + code=(cfg['stream_passwort'].strip() or None), + name=cfg['name'], + program_info=cfg['program_info'], + language=cfg['language'], + audio_source=f"device:{cfg['input_device']}", + input_format=f"int16le,{q['rate']},1", + iso_que_len=1, + sampling_frequency=q['rate'], + octets_per_frame=q['octets'], + ) + ], + ) + + # Radio 1 (always active if a device is selected) + config1 = _build_group_from_radio(radio1_cfg) + # Radio 2 (optional) + config2 = _build_group_from_radio(radio2_cfg) if radio2_enabled else None + + try: + if config1 is not None: + r1 = requests.post(f"{BACKEND_URL}/init", json=config1.model_dump()) + if r1.status_code == 200: + is_started = True + else: + st.error(f"Failed to initialize Radio 1: {r1.text}") + else: + st.error("Radio 1 has no valid input device configured.") + + if config2 is not None: + r2 = requests.post(f"{BACKEND_URL}/init2", json=config2.model_dump()) + if r2.status_code != 200: + st.error(f"Failed to initialize Radio 2: {r2.text}") + except Exception as e: + st.error(f"Error while starting Analog radios: {e}") + else: + # USB/Network: single config as before, using shared controls + q = QUALITY_MAP[quality] + config = auracast_config.AuracastConfigGroup( + auracast_sampling_rate_hz=q['rate'], + octets_per_frame=q['octets'], + transport='', # is set in backend + assisted_listening_stream=assisted_listening, + immediate_rendering=immediate_rendering, + presentation_delay_us=int(presentation_delay_ms * 1000), + qos_config=QOS_PRESET_MAP[qos_preset], + bigs=[ + auracast_config.AuracastBigConfig( + code=(stream_passwort.strip() or None), + name=stream_name, + program_info=program_info, + language=language, + audio_source=(f"device:{input_device}"), + input_format=(f"int16le,{q['rate']},1"), + iso_que_len=1, + sampling_frequency=q['rate'], + octets_per_frame=q['octets'], + ), + ], + ) + + try: + r = requests.post(f"{BACKEND_URL}/init", json=config.model_dump()) + if r.status_code == 200: + is_started = True + else: + st.error(f"Failed to initialize: {r.text}") + except Exception as e: + st.error(f"Error: {e}") # Centralized rerun based on start/stop outcomes if is_started or is_stopped: @@ -563,6 +861,36 @@ if is_started or is_stopped: ############################ with st.expander("System control", expanded=False): + st.subheader("System temperatures") + temp_col1, temp_col2, temp_col3 = st.columns([1, 1, 1]) + with temp_col1: + refresh_temps = st.button("Refresh") + try: + case_temp = read_case_temp() + cpu_temp = read_cpu_temp() + with temp_col2: + st.write(f"CPU: {cpu_temp} °C") + with temp_col3: + st.write(f"Case: {case_temp} °C") + except Exception as e: + st.warning(f"Could not read temperatures: {e}") + + st.subheader("CA Certificate") + st.caption("Download the CA certificate to trust this device's HTTPS connection.") + try: + cert_resp = requests.get(f"{BACKEND_URL}/cert", timeout=2) + if cert_resp.status_code == 200: + st.download_button( + label="Download CA Certificate", + data=cert_resp.content, + file_name="ca_cert.pem", + mime="application/x-pem-file", + ) + else: + st.warning("CA certificate not available.") + except Exception as e: + st.warning(f"Could not fetch CA certificate: {e}") + st.subheader("Change password") if is_pw_disabled(): st.info("Frontend password protection is disabled via DISABLE_FRONTEND_PW.") diff --git a/src/auracast/server/multicast_server.py b/src/auracast/server/multicast_server.py index 7692e22..20a3823 100644 --- a/src/auracast/server/multicast_server.py +++ b/src/auracast/server/multicast_server.py @@ -5,15 +5,13 @@ TODO: in the future the multicaster objects should run in their own threads or e import os import logging as log import json -import sys -import threading -from concurrent.futures import Future from datetime import datetime -import time import asyncio +import random from dotenv import load_dotenv from fastapi import FastAPI, HTTPException +from fastapi.responses import FileResponse from fastapi.middleware.cors import CORSMiddleware from auracast import multicast_control, auracast_config import sounddevice as sd # type: ignore @@ -24,21 +22,35 @@ from auracast.utils.sounddevice_utils import ( resolve_input_device_index, refresh_pw_cache, ) -from auracast.utils.reset_utils import reset_nrf54l load_dotenv() # make sure pipewire sets latency -STREAM_SETTINGS_FILE = os.path.join(os.path.dirname(__file__), 'stream_settings.json') +# Primary and secondary persisted settings files +STREAM_SETTINGS_FILE1 = os.path.join(os.path.dirname(__file__), 'stream_settings.json') +STREAM_SETTINGS_FILE2 = os.path.join(os.path.dirname(__file__), 'stream_settings2.json') +CA_CERT_PATH = os.path.join(os.path.dirname(__file__), 'certs', 'ca', 'ca_cert.pem') # Raspberry Pi UART transports TRANSPORT1 = os.getenv('TRANSPORT1', 'serial:/dev/ttyAMA3,1000000,rtscts') # transport for raspberry pi gpio header TRANSPORT2 = os.getenv('TRANSPORT2', 'serial:/dev/ttyAMA4,1000000,rtscts') # transport for raspberry pi gpio header os.environ["PULSE_LATENCY_MSEC"] = "3" -# In-memory cache to avoid disk I/O on hot paths like /status -SETTINGS_CACHE: dict = {} +# Defaults from the AuracastBigConfig model, used to detect whether random_address/id +# were explicitly set or are still at their model default values. +_DEFAULT_BIG = auracast_config.AuracastBigConfig() +DEFAULT_BIG_ID = _DEFAULT_BIG.id +DEFAULT_RANDOM_ADDRESS = _DEFAULT_BIG.random_address + +# QoS presets mapping - must match frontend +QOS_PRESET_MAP = { + "Fast": auracast_config.AuracastQosFast(), + "Robust": auracast_config.AuracastQosRobust(), +} + +# In-memory caches to avoid disk I/O on hot paths like /status +SETTINGS_CACHE1: dict = {} +SETTINGS_CACHE2: dict = {} - def get_device_index_by_name(name: str): """Return the device index for a given device name, or None if not found. @@ -55,40 +67,69 @@ def get_device_index_by_name(name: str): return None -def _hydrate_settings_cache_from_disk() -> None: - """Populate SETTINGS_CACHE once from disk at startup. +def _init_settings_cache_from_disk() -> None: + """Populate SETTINGS_CACHE1 and SETTINGS_CACHE2 once from disk at startup. - Safe to call multiple times; errors fall back to empty dict. + If a file doesn't exist, initialize to an empty dict. Any JSON or I/O errors raise. """ - global SETTINGS_CACHE - try: - if os.path.exists(STREAM_SETTINGS_FILE): - with open(STREAM_SETTINGS_FILE, 'r', encoding='utf-8') as f: - SETTINGS_CACHE = json.load(f) - else: - SETTINGS_CACHE = {} - except Exception: - SETTINGS_CACHE = {} + global SETTINGS_CACHE1, SETTINGS_CACHE2 + if os.path.exists(STREAM_SETTINGS_FILE1): + with open(STREAM_SETTINGS_FILE1, 'r', encoding='utf-8') as f: + SETTINGS_CACHE1 = json.load(f) + else: + SETTINGS_CACHE1 = {} + if os.path.exists(STREAM_SETTINGS_FILE2): + with open(STREAM_SETTINGS_FILE2, 'r', encoding='utf-8') as f: + SETTINGS_CACHE2 = json.load(f) + else: + SETTINGS_CACHE2 = {} def load_stream_settings() -> dict: - """Return stream settings from in-memory cache. + """Return PRIMARY stream settings from in-memory cache. - The cache is hydrated once at startup and updated by save_stream_settings(). - No disk I/O occurs here. + Hydrated once at startup and updated by save_stream_settings(). No disk I/O occurs here. """ - global SETTINGS_CACHE - return SETTINGS_CACHE + global SETTINGS_CACHE1 + return SETTINGS_CACHE1 + +def load_stream_settings2() -> dict: + """Return SECONDARY stream settings from in-memory cache.""" + global SETTINGS_CACHE2 + return SETTINGS_CACHE2 def save_stream_settings(settings: dict): - """Update in-memory settings cache and persist to disk.""" - global SETTINGS_CACHE - SETTINGS_CACHE = dict(settings) - try: - with open(STREAM_SETTINGS_FILE, 'w', encoding='utf-8') as f: - json.dump(SETTINGS_CACHE, f, indent=2) - except Exception as e: - log.error('Unable to persist stream settings: %s', e) + """Update PRIMARY in-memory settings cache and persist to disk.""" + global SETTINGS_CACHE1 + SETTINGS_CACHE1 = dict(settings) + os.makedirs(os.path.dirname(STREAM_SETTINGS_FILE1), exist_ok=True) + with open(STREAM_SETTINGS_FILE1, 'w', encoding='utf-8') as f: + json.dump(SETTINGS_CACHE1, f, indent=2) + f.flush() + os.fsync(f.fileno()) + log.info("Saved primary settings to %s", STREAM_SETTINGS_FILE1) +def save_stream_settings2(settings: dict): + """Update SECONDARY in-memory settings cache and persist to disk.""" + global SETTINGS_CACHE2 + SETTINGS_CACHE2 = dict(settings) + os.makedirs(os.path.dirname(STREAM_SETTINGS_FILE2), exist_ok=True) + with open(STREAM_SETTINGS_FILE2, 'w', encoding='utf-8') as f: + json.dump(SETTINGS_CACHE2, f, indent=2) + f.flush() + os.fsync(f.fileno()) + log.info("Saved secondary settings to %s", STREAM_SETTINGS_FILE2) + +def save_settings(persisted: dict, secondary: bool = False) -> None: + """Attach timestamp and persist using the appropriate cache/file.""" + persisted = dict(persisted) + persisted['timestamp'] = datetime.utcnow().isoformat() + if secondary: + save_stream_settings2(persisted) + else: + save_stream_settings(persisted) + +def gen_random_add() -> str: + return ':'.join(['%02X' % random.randint(0, 255) for _ in range(6)]) app = FastAPI() @@ -104,90 +145,197 @@ app.add_middleware( # Initialize global configuration global_config_group = auracast_config.AuracastConfigGroup() -class StreamerWorker: # TODO: is wraping in this Worker stricly nececcarry ? - """Owns multicaster(s) on a dedicated asyncio loop in a background thread.""" +# Module-level state replacing StreamerWorker +multicaster1: multicast_control.Multicaster | None = None +multicaster2: multicast_control.Multicaster | None = None +_stream_lock = asyncio.Lock() # serialize initialize/stop_audio on API side - def __init__(self) -> None: - self._thread: threading.Thread | None = None - self._loop: asyncio.AbstractEventLoop | None = None - # These live only on the worker loop - self._multicaster1: multicast_control.Multicaster | None = None - self._multicaster2: multicast_control.Multicaster | None = None - self._started = threading.Event() - # ---------- Thread/loop management ---------- - def start(self) -> None: - if self._thread and self._thread.is_alive(): - return - self._thread = threading.Thread(target=self._run, name="StreamerWorker", daemon=True) - self._thread.start() - self._started.wait(timeout=5) - - def _run(self) -> None: - loop = asyncio.new_event_loop() - self._loop = loop - asyncio.set_event_loop(loop) - self._started.set() - try: - loop.run_forever() - finally: - try: - pending = asyncio.all_tasks(loop) - for t in pending: - t.cancel() - loop.run_until_complete(asyncio.gather(*pending, return_exceptions=True)) - except Exception: - pass - loop.close() - - def _ensure_loop(self) -> asyncio.AbstractEventLoop: - if not self._loop: - raise RuntimeError("StreamerWorker loop not started") - return self._loop - - async def call(self, coro_func, *args, **kwargs): - """Schedule a coroutine on the worker loop and await its result from the API loop.""" - loop = self._ensure_loop() - fut: Future = asyncio.run_coroutine_threadsafe(coro_func(*args, **kwargs), loop) - return await asyncio.wrap_future(fut) - - # ---------- Worker-loop coroutines ---------- - async def _w_init_primary(self, conf: auracast_config.AuracastConfigGroup) -> dict: - # Clean any previous - if self._multicaster1 is not None: - try: - await self._multicaster1.shutdown() - except Exception: - pass - self._multicaster1 = None - - # overwrite some configurations - conf.transport = TRANSPORT1 - # Enable adaptive frame dropping only for device-based inputs (not file/demo) - try: - conf.enable_adaptive_frame_dropping = any( - isinstance(big.audio_source, str) and big.audio_source.startswith('device:') - for big in conf.bigs +async def _init_i2c_on_startup() -> None: + # Ensure i2c-dev kernel module is loaded (required for /dev/i2c-* access) + try: + proc = await asyncio.create_subprocess_exec( + "sudo", "modprobe", "i2c-dev", + stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.PIPE, + ) + stdout, stderr = await proc.communicate() + if proc.returncode != 0: + log.warning( + "modprobe i2c-dev failed (rc=%s): %s", + proc.returncode, + (stderr or b"").decode(errors="ignore").strip(), ) - except Exception: - conf.enable_adaptive_frame_dropping = False - # Derive device name and input mode + else: + log.info("i2c-dev module loaded successfully") + except Exception as e: + log.warning("Exception running modprobe i2c-dev: %s", e, exc_info=True) + + # Table of (register, expected_value) + dev_add = "0x4a" + reg_table = [ + ("0x00", "0x00"), + ("0x06", "0x10"), + ("0x07", "0x10"), + ] + for reg, expected in reg_table: + write_cmd = ["i2cset", "-f", "-y", "1", dev_add, reg, expected] + try: + proc = await asyncio.create_subprocess_exec( + *write_cmd, + stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.PIPE, + ) + stdout, stderr = await proc.communicate() + if proc.returncode != 0: + log.warning( + "i2cset failed (%s): rc=%s stderr=%s", + " ".join(write_cmd), + proc.returncode, + (stderr or b"").decode(errors="ignore").strip(), + ) + # If the write failed, skip verification for this register + continue + except Exception as e: + log.warning("Exception running i2cset (%s): %s", " ".join(write_cmd), e, exc_info=True) + continue + + # Verify configured register with i2cget + read_cmd = ["i2cget", "-f", "-y", "1", dev_add, reg] + try: + proc = await asyncio.create_subprocess_exec( + *read_cmd, + stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.PIPE, + ) + stdout, stderr = await proc.communicate() + if proc.returncode != 0: + log.warning( + "i2cget failed (%s): rc=%s stderr=%s", + " ".join(read_cmd), + proc.returncode, + (stderr or b"").decode(errors="ignore").strip(), + ) + continue + + value = (stdout or b"").decode(errors="ignore").strip() + if value != expected: + log.error( + "I2C register verify failed: addr=0x4a reg=%s expected=%s got=%s", + reg, + expected, + value, + ) + else: + log.info( + "I2C register verified: addr=0x4a reg=%s value=%s", + reg, + value, + ) + except Exception as e: + log.warning("Exception running i2cget (%s): %s", " ".join(read_cmd), e, exc_info=True) + + +async def _set_adc_level_on_startup() -> None: + """Ensure ADC mixer level is set at startup. + + Runs: amixer -c 2 set 'ADC' x% + """ + cmd = ["amixer", "-c", "2", "set", "ADC", "80%"] + try: + proc = await asyncio.create_subprocess_exec( + *cmd, + stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.PIPE, + ) + stdout, stderr = await proc.communicate() + if proc.returncode != 0: + log.warning( + "amixer ADC level command failed (rc=%s): %s", + proc.returncode, + (stderr or b"" ).decode(errors="ignore").strip(), + ) + else: + log.info("amixer ADC level set successfully: %s", (stdout or b"" ).decode(errors="ignore").strip()) + except Exception as e: + log.warning("Exception running amixer ADC level command: %s", e, exc_info=True) + + +async def _stop_all() -> bool: + global multicaster1, multicaster2 + was_running = False + if multicaster1 is not None: + try: + await multicaster1.stop_streaming() + await multicaster1.shutdown() + was_running = True + finally: + multicaster1 = None + if multicaster2 is not None: + try: + await multicaster2.stop_streaming() + await multicaster2.shutdown() + was_running = True + finally: + multicaster2 = None + return was_running + +async def _status_primary() -> dict: + if multicaster1 is None: + return {'is_initialized': False, 'is_streaming': False} + return multicaster1.get_status() + +async def _status_secondary() -> dict: + """Return runtime status for the SECONDARY multicaster. + + Mirrors _status_primary but for multicaster2 so that /status can expose + both primary and secondary state to the frontend. + """ + if multicaster2 is None: + return {'is_initialized': False, 'is_streaming': False} + return multicaster2.get_status() + +async def _stream_lc3(audio_data: dict[str, str], bigs_template: list) -> None: + if multicaster1 is None: + raise HTTPException(status_code=500, detail='Auracast endpoint was never intialized') + for big in bigs_template: + if big.language not in audio_data: + raise HTTPException(status_code=500, detail='language len missmatch') + big.audio_source = audio_data[big.language].encode('latin-1') + multicaster1.big_conf = bigs_template + await multicaster1.start_streaming() + +def _resolve_qos_preset_name(qos_config) -> str: + """Resolve qos_config to preset name based on retransmission count.""" + if qos_config is None: + return "Fast" + rtn = getattr(qos_config, 'number_of_retransmissions', 2) + # Fast has 2 retransmissions, Robust has 4 + return "Robust" if rtn >= 4 else "Fast" + +async def init_radio(transport: str, conf: auracast_config.AuracastConfigGroup, current_mc: multicast_control.Multicaster | None): + try: + log.info('Initializing multicaster with transport %s and config:\n %s', transport, conf.model_dump_json(indent=2)) + + if current_mc is not None: + await current_mc.shutdown() + current_mc = None + + conf.transport = transport + first_source = conf.bigs[0].audio_source if conf.bigs else '' input_device_name = None audio_mode_persist = 'Demo' - if first_source.startswith('device:'): + if isinstance(first_source, str) and first_source.startswith('device:'): input_device_name = first_source.split(':', 1)[1] if ':' in first_source else None - try: - alsa_usb_names = {d.get('name') for _, d in get_alsa_usb_inputs()} - except Exception: - alsa_usb_names = set() - try: - net_names = {d.get('name') for _, d in get_network_pw_inputs()} - except Exception: - usb_names, net_names = set(), set() - audio_mode_persist = 'Network' if (input_device_name in net_names) else 'USB' + alsa_usb_names = {d.get('name') for _, d in get_alsa_usb_inputs()} + net_names = {d.get('name') for _, d in get_network_pw_inputs()} + if input_device_name in ('ch1', 'ch2'): + # Explicitly treat ch1/ch2 as Analog input mode + audio_mode_persist = 'Analog' + else: + audio_mode_persist = 'Network' if (input_device_name in net_names) else 'USB' - # Map device name to index using centralized resolver if input_device_name and input_device_name.isdigit(): device_index = int(input_device_name) else: @@ -195,30 +343,32 @@ class StreamerWorker: # TODO: is wraping in this Worker stricly nececcarry ? if device_index is None: raise HTTPException(status_code=400, detail=f"Audio device '{input_device_name}' not found.") for big in conf.bigs: - if big.audio_source.startswith('device:'): + if isinstance(big.audio_source, str) and big.audio_source.startswith('device:'): big.audio_source = f'device:{device_index}' devinfo = sd.query_devices(device_index) - # Force capture at 48 kHz to avoid resampler latency and 44.1 kHz incompatibilities - capture_rate = 48000 max_in = int(devinfo.get('max_input_channels') or 1) channels = max(1, min(2, max_in)) for big in conf.bigs: - big.input_format = f"int16le,{capture_rate},{channels}" + big.input_format = f"int16le,{48000},{channels}" - # Coerce QoS: compute max_transport_latency from RTN if qos_config present - if getattr(conf, 'qos_config', None) and getattr(conf.qos_config, 'number_of_retransmissions', None) is not None: - conf.qos_config.max_transport_latency_ms = int(conf.qos_config.number_of_retransmissions) * 10 + 3 + conf.qos_config.max_transport_latency_ms = int(conf.qos_config.number_of_retransmissions) * 10 + 3 + + # Only generate a new random_address if the BIG is still at the model default. + for big in conf.bigs: + if not getattr(big, 'random_address', None) or big.random_address == DEFAULT_RANDOM_ADDRESS: + big.random_address = gen_random_add() + + # Log the final, fully-updated configuration just before creating the Multicaster + log.info('Final multicaster config (transport=%s):\n %s', transport, conf.model_dump_json(indent=2)) + + mc = multicast_control.Multicaster(conf, conf.bigs) + await mc.init_broadcast() - # Create and init multicaster1 - self._multicaster1 = multicast_control.Multicaster(conf, conf.bigs) - await reset_nrf54l(1) - await self._multicaster1.init_broadcast() auto_started = False - if any(big.audio_source.startswith("device:") or big.audio_source.startswith("file:") for big in conf.bigs): - await self._multicaster1.start_streaming() + if any(isinstance(big.audio_source, str) and (big.audio_source.startswith("device:") or big.audio_source.startswith("file:")) for big in conf.bigs): + await mc.start_streaming() auto_started = True - # Return proposed settings to persist on API side demo_count = sum(1 for big in conf.bigs if isinstance(big.audio_source, str) and big.audio_source.startswith('file:')) demo_rate = int(conf.auracast_sampling_rate_hz or 0) demo_type = None @@ -227,7 +377,7 @@ class StreamerWorker: # TODO: is wraping in this Worker stricly nececcarry ? demo_type = f"{demo_count} × {demo_rate//1000}kHz" else: demo_type = f"{demo_count} × {demo_rate}Hz" - return { + persisted = { 'channel_names': [big.name for big in conf.bigs], 'languages': [big.language for big in conf.bigs], 'audio_mode': audio_mode_persist, @@ -237,162 +387,62 @@ class StreamerWorker: # TODO: is wraping in this Worker stricly nececcarry ? 'auracast_sampling_rate_hz': conf.auracast_sampling_rate_hz, 'octets_per_frame': conf.octets_per_frame, 'presentation_delay_us': getattr(conf, 'presentation_delay_us', None), - 'rtn': getattr(getattr(conf, 'qos_config', None), 'number_of_retransmissions', None), + 'qos_preset': _resolve_qos_preset_name(conf.qos_config), 'immediate_rendering': getattr(conf, 'immediate_rendering', False), 'assisted_listening_stream': getattr(conf, 'assisted_listening_stream', False), 'stream_password': (conf.bigs[0].code if conf.bigs and getattr(conf.bigs[0], 'code', None) else None), + 'big_ids': [getattr(big, 'id', DEFAULT_BIG_ID) for big in conf.bigs], + 'big_random_addresses': [getattr(big, 'random_address', DEFAULT_RANDOM_ADDRESS) for big in conf.bigs], 'demo_total_streams': demo_count, 'demo_stream_type': demo_type, 'is_streaming': auto_started, + 'demo_sources': [str(b.audio_source) for b in conf.bigs if isinstance(b.audio_source, str) and b.audio_source.startswith('file:')], } + return mc, persisted + except HTTPException: + raise + except Exception as e: + log.error("Exception in init_radio: %s", traceback.format_exc()) + raise HTTPException(status_code=500, detail=str(e)) - async def _w_init_secondary(self, conf: auracast_config.AuracastConfigGroup) -> None: - if self._multicaster2 is not None: - try: - await self._multicaster2.shutdown() - except Exception: - pass - self._multicaster2 = None - - conf.transport = TRANSPORT2 - # Enable adaptive frame dropping only for device-based inputs (not file/demo) - try: - conf.enable_adaptive_frame_dropping = any( - isinstance(big.audio_source, str) and big.audio_source.startswith('device:') - for big in conf.bigs - ) - except Exception: - conf.enable_adaptive_frame_dropping = False - for big in conf.bigs: - if big.audio_source.startswith('device:'): - device_name = big.audio_source.split(':', 1)[1] - # Resolve backend preference by membership - try: - net_names = {d.get('name') for _, d in get_network_pw_inputs()} - except Exception: - net_names = set() - try: - alsa_usb_names = {d.get('name') for _, d in get_alsa_usb_inputs()} - except Exception: - alsa_usb_names = set() - device_index = resolve_input_device_index(device_name) - if device_index is None: - raise HTTPException(status_code=400, detail=f"Audio device '{device_name}' not found.") - big.audio_source = f'device:{device_index}' - # Coerce QoS: compute max_transport_latency from RTN if qos_config present - if getattr(conf, 'qos_config', None) and getattr(conf.qos_config, 'number_of_retransmissions', None) is not None: - conf.qos_config.max_transport_latency_ms = int(conf.qos_config.number_of_retransmissions) * 10 + 3 - - - self._multicaster2 = multicast_control.Multicaster(conf, conf.bigs) - await reset_nrf54l(0) - await self._multicaster2.init_broadcast() - if any(big.audio_source.startswith("device:") or big.audio_source.startswith("file:") for big in conf.bigs): - await self._multicaster2.start_streaming() - - async def _w_stop_all(self) -> bool: - was_running = False - if self._multicaster1 is not None: - try: - await self._multicaster1.stop_streaming() - await self._multicaster1.shutdown() - was_running = True - finally: - self._multicaster1 = None - if self._multicaster2 is not None: - try: - await self._multicaster2.stop_streaming() - await self._multicaster2.shutdown() - was_running = True - finally: - self._multicaster2 = None - return was_running - - async def _w_status_primary(self) -> dict: - if self._multicaster1 is None: - return {'is_initialized': False, 'is_streaming': False} - try: - return self._multicaster1.get_status() - except Exception: - return {'is_initialized': True, 'is_streaming': False} - - async def _w_stream_lc3(self, audio_data: dict[str, str], bigs_template: list) -> None: - if self._multicaster1 is None: - raise HTTPException(status_code=500, detail='Auracast endpoint was never intialized') - # Update bigs audio_source with provided bytes and start - for big in bigs_template: - if big.language not in audio_data: - raise HTTPException(status_code=500, detail='language len missmatch') - big.audio_source = audio_data[big.language].encode('latin-1') - self._multicaster1.big_conf = bigs_template - await self._multicaster1.start_streaming() - - -# Create the worker singleton and a route-level lock -streamer = StreamerWorker() -# multicaster1: multicast_control.Multicaster | None = None # kept for legacy references, do not use on API loop -# multicaster2: multicast_control.Multicaster | None = None -_stream_lock = asyncio.Lock() # serialize initialize/stop_audio on API side @app.post("/init") async def initialize(conf: auracast_config.AuracastConfigGroup): """Initializes the primary broadcaster on the streamer thread.""" - global global_config_group async with _stream_lock: - try: - global_config_group = conf - log.info('Initializing multicaster1 with config:\n %s', conf.model_dump_json(indent=2)) - persisted = await streamer.call(streamer._w_init_primary, conf) - # Persist returned settings (avoid touching from worker thread) - persisted['timestamp'] = datetime.utcnow().isoformat() - save_stream_settings(persisted) - except Exception as e: - log.error("Exception in /init: %s", traceback.format_exc()) - raise HTTPException(status_code=500, detail=str(e)) + global multicaster1, global_config_group + mc, persisted = await init_radio(TRANSPORT1, conf, multicaster1) + multicaster1 = mc + global_config_group = conf + save_settings(persisted, secondary=False) @app.post("/init2") async def initialize2(conf: auracast_config.AuracastConfigGroup): """Initializes the secondary broadcaster on the streamer thread.""" - try: - log.info('Initializing multicaster2 with config:\n %s', conf.model_dump_json(indent=2)) - await streamer.call(streamer._w_init_secondary, conf) - try: - is_demo = any(isinstance(big.audio_source, str) and big.audio_source.startswith('file:') for big in conf.bigs) - if is_demo: - settings = load_stream_settings() or {} - primary_count = int(settings.get('demo_total_streams') or len(settings.get('channel_names') or [])) - secondary_count = len(conf.bigs or []) - total = primary_count + secondary_count - settings['demo_total_streams'] = total - demo_rate = int(conf.auracast_sampling_rate_hz or 0) - if demo_rate > 0: - if demo_rate in (48000, 24000, 16000): - settings['demo_stream_type'] = f"{total} × {demo_rate//1000}kHz" - else: - settings['demo_stream_type'] = f"{total} × {demo_rate}Hz" - settings['timestamp'] = datetime.utcnow().isoformat() - save_stream_settings(settings) - except Exception: - log.warning("Failed to persist demo_total_streams in /init2", exc_info=True) - except Exception as e: - log.error("Exception in /init2: %s", traceback.format_exc()) - raise HTTPException(status_code=500, detail=str(e)) - - - + async with _stream_lock: + global multicaster2 + mc, persisted = await init_radio(TRANSPORT2, conf, multicaster2) + multicaster2 = mc + save_settings(persisted, secondary=True) @app.post("/stop_audio") async def stop_audio(): """Stops streaming on both multicaster1 and multicaster2 (worker thread).""" try: - was_running = await streamer.call(streamer._w_stop_all) + was_running = await _stop_all() - # Persist is_streaming=False + # Persist is_streaming=False for both primary and secondary try: - settings = load_stream_settings() or {} - if settings.get('is_streaming'): - settings['is_streaming'] = False - settings['timestamp'] = datetime.utcnow().isoformat() - save_stream_settings(settings) + settings1 = load_stream_settings() or {} + if settings1.get('is_streaming'): + settings1['is_streaming'] = False + settings1['timestamp'] = datetime.utcnow().isoformat() + save_stream_settings(settings1) + + settings2 = load_stream_settings2() or {} + if settings2.get('is_streaming'): + settings2['is_streaming'] = False + settings2['timestamp'] = datetime.utcnow().isoformat() + save_stream_settings2(settings2) except Exception: log.warning("Failed to persist is_streaming=False during stop_audio", exc_info=True) @@ -402,92 +452,181 @@ async def stop_audio(): log.error("Exception in /stop_audio: %s", traceback.format_exc()) raise HTTPException(status_code=500, detail=str(e)) - @app.post("/stream_lc3") async def send_audio(audio_data: dict[str, str]): """Sends a block of pre-coded LC3 audio via the worker.""" try: - await streamer.call(streamer._w_stream_lc3, audio_data, list(global_config_group.bigs)) + await _stream_lc3(audio_data, list(global_config_group.bigs)) return {"status": "audio_sent"} except Exception as e: raise HTTPException(status_code=500, detail=str(e)) +@app.get("/cert") +async def download_ca_cert(): + """Download the CA certificate for TLS verification.""" + if not os.path.exists(CA_CERT_PATH): + raise HTTPException(status_code=404, detail="CA certificate not found") + return FileResponse(CA_CERT_PATH, filename="ca_cert.pem", media_type="application/x-pem-file") @app.get("/status") async def get_status(): """Gets current status (worker) merged with persisted settings cache.""" - status = await streamer.call(streamer._w_status_primary) - status.update(load_stream_settings()) + primary_runtime = await _status_primary() + primary_persisted = load_stream_settings() or {} + + # Preserve existing top-level shape for primary for compatibility + status: dict = {} + status.update(primary_runtime) + status.update(primary_persisted) + + # Attach secondary block with its own runtime + persisted settings + secondary_runtime = await _status_secondary() + secondary_persisted = load_stream_settings2() or {} + secondary: dict = {} + secondary.update(secondary_runtime) + secondary.update(secondary_persisted) + status["secondary"] = secondary + status["secondary_is_streaming"] = bool(secondary.get("is_streaming", False)) + return status - async def _autostart_from_settings(): - """Background task: auto-start last selected device-based input at server startup. + settings1 = load_stream_settings() or {} + settings2 = load_stream_settings2() or {} - Skips Webapp (webrtc) and Demo (file) modes. Polls every 2 seconds until the - saved device name appears in either USB or Network lists, then builds a config - and initializes streaming. - """ - try: - settings = load_stream_settings() or {} + log.info("[AUTOSTART] Starting autostart check: primary_ts=%s secondary_ts=%s", settings1.get('timestamp'), settings2.get('timestamp')) + + async def do_primary(): + global multicaster1, global_config_group + settings = settings1 audio_mode = settings.get('audio_mode') input_device_name = settings.get('input_device') rate = settings.get('auracast_sampling_rate_hz') octets = settings.get('octets_per_frame') pres_delay = settings.get('presentation_delay_us') - saved_rtn = settings.get('rtn') + saved_qos_preset = settings.get('qos_preset', 'Fast') immediate_rendering = settings.get('immediate_rendering', False) assisted_listening_stream = settings.get('assisted_listening_stream', False) channel_names = settings.get('channel_names') or ["Broadcast0"] program_info = settings.get('program_info') or channel_names languages = settings.get('languages') or ["deu"] + big_ids = settings.get('big_ids') or [] + big_addrs = settings.get('big_random_addresses') or [] stream_password = settings.get('stream_password') original_ts = settings.get('timestamp') previously_streaming = bool(settings.get('is_streaming')) - # Only auto-start if the previous state was streaming and it's a device-based input. + log.info( + "[AUTOSTART][PRIMARY] loaded settings: previously_streaming=%s audio_mode=%s rate=%s octets=%s pres_delay=%s qos_preset=%s immediate_rendering=%s assisted_listening_stream=%s demo_sources=%s", + previously_streaming, + audio_mode, + rate, + octets, + pres_delay, + saved_qos_preset, + immediate_rendering, + assisted_listening_stream, + (settings.get('demo_sources') or []), + ) + if not previously_streaming: + log.info("[AUTOSTART][PRIMARY] Skipping autostart: is_streaming flag was False in persisted settings") return - if not input_device_name: - return - if rate is None or octets is None: - # Not enough info to reconstruct stream reliably - return - - # Avoid duplicate start if already streaming - current = await streamer.call(streamer._w_status_primary) - if current.get('is_streaming'): - return - - while True: - # Do not interfere if user started a stream manually in the meantime - current = await streamer.call(streamer._w_status_primary) - if current.get('is_streaming'): + if audio_mode == 'Demo': + demo_sources = settings.get('demo_sources') or [] + if not demo_sources or rate is None or octets is None: + log.warning( + "[AUTOSTART][PRIMARY] Demo autostart aborted: demo_sources_present=%s rate=%s octets=%s", + bool(demo_sources), + rate, + octets, + ) + return + bigs = [] + for i, src in enumerate(demo_sources): + name = channel_names[i] if i < len(channel_names) else f"Broadcast{i}" + pinfo = program_info[i] if isinstance(program_info, list) and i < len(program_info) else (program_info[0] if isinstance(program_info, list) and program_info else program_info) + lang = languages[i] if i < len(languages) else (languages[0] if languages else "deu") + bigs.append( + auracast_config.AuracastBigConfig( + id=big_ids[i] if i < len(big_ids) else DEFAULT_BIG_ID, + random_address=big_addrs[i] if i < len(big_addrs) else DEFAULT_RANDOM_ADDRESS, + code=stream_password, + name=name, + program_info=pinfo, + language=lang, + audio_source=src, + iso_que_len=1, + sampling_frequency=rate, + octets_per_frame=octets, + ) + ) + log.info( + "[AUTOSTART][PRIMARY] Building demo config for %d streams, rate=%s, octets=%s", + len(bigs), + rate, + octets, + ) + + conf = auracast_config.AuracastConfigGroup( + auracast_sampling_rate_hz=rate, + octets_per_frame=octets, + transport=TRANSPORT1, + immediate_rendering=immediate_rendering, + assisted_listening_stream=assisted_listening_stream, + presentation_delay_us=pres_delay if pres_delay is not None else 40000, + bigs=bigs, + ) + conf.qos_config = QOS_PRESET_MAP.get(saved_qos_preset, QOS_PRESET_MAP["Fast"]) + log.info("[AUTOSTART][PRIMARY] Scheduling demo init_radio in 2s") + await asyncio.sleep(2) + async with _stream_lock: + log.info("[AUTOSTART][PRIMARY] Calling init_radio for demo autostart") + mc, persisted = await init_radio(TRANSPORT1, conf, multicaster1) + multicaster1 = mc + global_config_group = conf + save_settings(persisted, secondary=False) + log.info("[AUTOSTART][PRIMARY] Demo autostart completed; settings persisted with is_streaming=%s", persisted.get('is_streaming')) + return + if not input_device_name or rate is None or octets is None: + log.info( + "[AUTOSTART][PRIMARY] Skipping device-based autostart: input_device=%s rate=%s octets=%s", + input_device_name, + rate, + octets, + ) + return + current = await _status_primary() + if current.get('is_streaming'): + log.info("[AUTOSTART][PRIMARY] Skipping device-based autostart: stream already running") + return + while True: + current = await _status_primary() + if current.get('is_streaming'): + log.info("[AUTOSTART][PRIMARY] Aborting wait loop: stream started externally") return - # Abort if saved settings changed to a different target while we were polling current_settings = load_stream_settings() or {} if current_settings.get('timestamp') != original_ts: - # Settings were updated (likely by user via /init) - # If the target device or mode changed, stop autostart if ( current_settings.get('input_device') != input_device_name or current_settings.get('audio_mode') != audio_mode ): + log.info("[AUTOSTART][PRIMARY] Aborting wait loop: settings changed (audio_mode/input_device)") return - # Check against the cached device lists usb = [d for _, d in get_alsa_usb_inputs()] net = [d for _, d in get_network_pw_inputs()] names = {d.get('name') for d in usb} | {d.get('name') for d in net} if input_device_name in names: - # Build a minimal config based on saved fields + log.info("[AUTOSTART][PRIMARY] Device '%s' detected, starting autostart", input_device_name) bigs = [ auracast_config.AuracastBigConfig( + id=big_ids[0] if big_ids else DEFAULT_BIG_ID, + random_address=big_addrs[0] if big_addrs else DEFAULT_RANDOM_ADDRESS, code=stream_password, name=channel_names[0] if channel_names else "Broadcast0", program_info=program_info[0] if isinstance(program_info, list) and program_info else program_info, language=languages[0] if languages else "deu", audio_source=f"device:{input_device_name}", - # input_format is intentionally omitted to use the default iso_que_len=1, sampling_frequency=rate, octets_per_frame=octets, @@ -502,33 +641,184 @@ async def _autostart_from_settings(): presentation_delay_us=pres_delay if pres_delay is not None else 40000, bigs=bigs, ) - # Attach QoS if saved_rtn present - conf.qos_config = auracast_config.AuracastQoSConfig( - iso_int_multiple_10ms=1, - number_of_retransmissions=int(saved_rtn), - max_transport_latency_ms=int(saved_rtn) * 10 + 3, - ) - - # Initialize and start + conf.qos_config = QOS_PRESET_MAP.get(saved_qos_preset, QOS_PRESET_MAP["Fast"]) + log.info("[AUTOSTART][PRIMARY] Scheduling device init_radio in 2s") await asyncio.sleep(2) - await initialize(conf) + async with _stream_lock: + log.info("[AUTOSTART][PRIMARY] Calling init_radio for device autostart") + mc, persisted = await init_radio(TRANSPORT1, conf, multicaster1) + multicaster1 = mc + global_config_group = conf + save_settings(persisted, secondary=False) + log.info("[AUTOSTART][PRIMARY] Device autostart completed; settings persisted with is_streaming=%s", persisted.get('is_streaming')) return await asyncio.sleep(2) - except Exception: - log.warning("Autostart task failed", exc_info=True) + + async def do_secondary(): + global multicaster2 + settings = settings2 + audio_mode = settings.get('audio_mode') + input_device_name = settings.get('input_device') + rate = settings.get('auracast_sampling_rate_hz') + octets = settings.get('octets_per_frame') + pres_delay = settings.get('presentation_delay_us') + saved_qos_preset = settings.get('qos_preset', 'Fast') + immediate_rendering = settings.get('immediate_rendering', False) + assisted_listening_stream = settings.get('assisted_listening_stream', False) + channel_names = settings.get('channel_names') or ["Broadcast0"] + program_info = settings.get('program_info') or channel_names + languages = settings.get('languages') or ["deu"] + big_ids = settings.get('big_ids') or [] + big_addrs = settings.get('big_random_addresses') or [] + stream_password = settings.get('stream_password') + original_ts = settings.get('timestamp') + previously_streaming = bool(settings.get('is_streaming')) + log.info( + "[AUTOSTART][SECONDARY] loaded settings: previously_streaming=%s audio_mode=%s rate=%s octets=%s pres_delay=%s qos_preset=%s immediate_rendering=%s assisted_listening_stream=%s demo_sources=%s", + previously_streaming, + audio_mode, + rate, + octets, + pres_delay, + saved_qos_preset, + immediate_rendering, + assisted_listening_stream, + (settings.get('demo_sources') or []), + ) + if not previously_streaming: + log.info("[AUTOSTART][SECONDARY] Skipping autostart: is_streaming flag was False in persisted settings") + return + if audio_mode == 'Demo': + demo_sources = settings.get('demo_sources') or [] + if not demo_sources or rate is None or octets is None: + log.warning( + "[AUTOSTART][SECONDARY] Demo autostart aborted: demo_sources_present=%s rate=%s octets=%s", + bool(demo_sources), + rate, + octets, + ) + return + bigs = [] + for i, src in enumerate(demo_sources): + name = channel_names[i] if i < len(channel_names) else f"Broadcast{i}" + pinfo = program_info[i] if isinstance(program_info, list) and i < len(program_info) else (program_info[0] if isinstance(program_info, list) and program_info else program_info) + lang = languages[i] if i < len(languages) else (languages[0] if languages else "deu") + bigs.append( + auracast_config.AuracastBigConfig( + code=stream_password, + name=name, + program_info=pinfo, + language=lang, + audio_source=src, + iso_que_len=1, + sampling_frequency=rate, + octets_per_frame=octets, + ) + ) + conf = auracast_config.AuracastConfigGroup( + auracast_sampling_rate_hz=rate, + octets_per_frame=octets, + transport=TRANSPORT2, + immediate_rendering=immediate_rendering, + assisted_listening_stream=assisted_listening_stream, + presentation_delay_us=pres_delay if pres_delay is not None else 40000, + bigs=bigs, + ) + conf.qos_config = QOS_PRESET_MAP.get(saved_qos_preset, QOS_PRESET_MAP["Fast"]) + log.info("[AUTOSTART][SECONDARY] Scheduling demo init_radio in 2s") + await asyncio.sleep(2) + async with _stream_lock: + log.info("[AUTOSTART][SECONDARY] Calling init_radio for demo autostart") + mc, persisted = await init_radio(TRANSPORT2, conf, multicaster2) + multicaster2 = mc + save_settings(persisted, secondary=True) + log.info("[AUTOSTART][SECONDARY] Demo autostart completed; settings persisted with is_streaming=%s", persisted.get('is_streaming')) + return + if not input_device_name or rate is None or octets is None: + log.info( + "[AUTOSTART][SECONDARY] Skipping device-based autostart: input_device=%s rate=%s octets=%s", + input_device_name, + rate, + octets, + ) + return + if multicaster2 is not None: + try: + if multicaster2.get_status().get('is_streaming'): + log.info("[AUTOSTART][SECONDARY] Skipping device-based autostart: stream already running") + return + except Exception: + pass + while True: + if multicaster2 is not None: + try: + if multicaster2.get_status().get('is_streaming'): + log.info("[AUTOSTART][SECONDARY] Aborting wait loop: stream started externally") + return + except Exception: + pass + current_settings = load_stream_settings2() or {} + if current_settings.get('timestamp') != original_ts: + if ( + current_settings.get('input_device') != input_device_name or + current_settings.get('audio_mode') != audio_mode + ): + return + usb = [d for _, d in get_alsa_usb_inputs()] + net = [d for _, d in get_network_pw_inputs()] + names = {d.get('name') for d in usb} | {d.get('name') for d in net} + if input_device_name in names: + bigs = [ + auracast_config.AuracastBigConfig( + id=big_ids[0] if big_ids else DEFAULT_BIG_ID, + random_address=big_addrs[0] if big_addrs else DEFAULT_RANDOM_ADDRESS, + code=stream_password, + name=channel_names[0] if channel_names else "Broadcast0", + program_info=program_info[0] if isinstance(program_info, list) and program_info else program_info, + language=languages[0] if languages else "deu", + audio_source=f"device:{input_device_name}", + iso_que_len=1, + sampling_frequency=rate, + octets_per_frame=octets, + ) + ] + conf = auracast_config.AuracastConfigGroup( + auracast_sampling_rate_hz=rate, + octets_per_frame=octets, + transport=TRANSPORT2, + immediate_rendering=immediate_rendering, + assisted_listening_stream=assisted_listening_stream, + presentation_delay_us=pres_delay if pres_delay is not None else 40000, + bigs=bigs, + ) + conf.qos_config = QOS_PRESET_MAP.get(saved_qos_preset, QOS_PRESET_MAP["Fast"]) + log.info("[AUTOSTART][SECONDARY] Scheduling device init_radio in 2s") + await asyncio.sleep(2) + async with _stream_lock: + log.info("[AUTOSTART][SECONDARY] Calling init_radio for device autostart") + mc, persisted = await init_radio(TRANSPORT2, conf, multicaster2) + multicaster2 = mc + save_settings(persisted, secondary=True) + log.info("[AUTOSTART][SECONDARY] Device autostart completed; settings persisted with is_streaming=%s", persisted.get('is_streaming')) + return + await asyncio.sleep(2) + + await do_primary() + await do_secondary() @app.on_event("startup") async def _startup_autostart_event(): # Spawn the autostart task without blocking startup - log.info("Refreshing PipeWire device cache.") + log.info("[STARTUP] Auracast multicast server startup: initializing settings cache, I2C, and PipeWire cache") # Hydrate settings cache once to avoid disk I/O during /status - _hydrate_settings_cache_from_disk() + _init_settings_cache_from_disk() + await _init_i2c_on_startup() + # Ensure ADC mixer level is set at startup + await _set_adc_level_on_startup() refresh_pw_cache() - # Start the streamer worker thread - streamer.start() + log.info("[STARTUP] Scheduling autostart task") asyncio.create_task(_autostart_from_settings()) - @app.get("/audio_inputs_pw_usb") async def audio_inputs_pw_usb(): """List USB input devices using ALSA backend (USB is ALSA in our scheme).""" @@ -542,7 +832,6 @@ async def audio_inputs_pw_usb(): log.error("Exception in /audio_inputs_pw_usb: %s", traceback.format_exc()) raise HTTPException(status_code=500, detail=str(e)) - @app.get("/audio_inputs_pw_network") async def audio_inputs_pw_network(): """List PipeWire Network/AES67 input nodes from cache.""" @@ -556,40 +845,33 @@ async def audio_inputs_pw_network(): log.error("Exception in /audio_inputs_pw_network: %s", traceback.format_exc()) raise HTTPException(status_code=500, detail=str(e)) - @app.post("/refresh_audio_devices") async def refresh_audio_devices(): """Triggers a re-scan of audio devices, but only if no stream is active.""" - streaming = False try: - status = await streamer.call(streamer._w_status_primary) + status = await _status_primary() streaming = bool(status.get('is_streaming')) - except Exception: - pass # Ignore errors, default to not refreshing + except Exception as e: + log.error("Exception in /refresh_audio_devices: %s", traceback.format_exc()) + raise HTTPException(status_code=500, detail=str(e)) if streaming: log.warning("Ignoring refresh request: an audio stream is active.") raise HTTPException(status_code=409, detail="An audio stream is active. Stop the stream before refreshing devices.") - try: - log.info("Refreshing PipeWire device cache.") - refresh_pw_cache() - return {"status": "ok"} - except Exception as e: - log.error("Exception during device refresh: %s", traceback.format_exc()) - raise HTTPException(status_code=500, detail=f"Failed to refresh devices: {e}") - + log.info("Refreshing PipeWire device cache.") + refresh_pw_cache() + return {"status": "ok"} @app.post("/shutdown") async def shutdown(): """Stops broadcasting and releases all audio/Bluetooth resources.""" try: - await streamer.call(streamer._w_stop_all) + await _stop_all() return {"status": "stopped"} except Exception as e: raise HTTPException(status_code=500, detail=str(e)) - @app.post("/system_reboot") async def system_reboot(): """Stop audio and request a system reboot via sudo. @@ -599,13 +881,9 @@ async def system_reboot(): try: # Best-effort: stop any active streaming cleanly WITHOUT persisting state try: - try: - await streamer.call(streamer._w_stop_all) - except Exception: - pass + await _stop_all() except Exception: - log.warning("Non-fatal: failed to stop streams before reboot", exc_info=True) - + pass # Launch reboot without waiting for completion try: await asyncio.create_subprocess_exec("sudo", "reboot") diff --git a/src/auracast/server/start_frontend_https.sh b/src/auracast/server/start_frontend_https.sh index 19fbd61..6e145ff 100755 --- a/src/auracast/server/start_frontend_https.sh +++ b/src/auracast/server/start_frontend_https.sh @@ -34,4 +34,4 @@ echo "Using Avahi domain: $AVAHI_DOMAIN" POETRY_BIN="/home/caster/.local/bin/poetry" # Start Streamlit HTTPS server (port 443) -$POETRY_BIN run streamlit run multicast_frontend.py --server.port 443 --server.enableCORS false --server.enableXsrfProtection false --server.headless true --server.sslCertFile "$CERT" --server.sslKeyFile "$KEY" --browser.gatherUsageStats false +$POETRY_BIN run streamlit run multicast_frontend.py --server.port 443 --server.address 0.0.0.0 --server.enableCORS false --server.enableXsrfProtection false --server.headless true --server.sslCertFile "$CERT" --server.sslKeyFile "$KEY" --browser.gatherUsageStats false diff --git a/src/auracast/testdata/announcement_en_stereo.wav b/src/auracast/testdata/announcement_es_stereo.wav similarity index 100% rename from src/auracast/testdata/announcement_en_stereo.wav rename to src/auracast/testdata/announcement_es_stereo.wav diff --git a/src/auracast/utils/read_temp.py b/src/auracast/utils/read_temp.py new file mode 100644 index 0000000..79e6552 --- /dev/null +++ b/src/auracast/utils/read_temp.py @@ -0,0 +1,18 @@ +from smbus2 import SMBus + +def read_case_temp(): + addr = 0x48 # change if your scan shows different + with SMBus(1) as bus: + msb, lsb = bus.read_i2c_block_data(addr, 0x00, 2) + raw = ((msb << 8) | lsb) >> 4 + if raw & 0x800: # sign bit for 12-bit + raw -= 1 << 12 + return round(raw * 0.0625, 2) + +def read_cpu_temp(): + with open("/sys/class/thermal/thermal_zone0/temp", "r") as f: + return round(int(f.read()) / 1000, 2) + +if __name__ == "__main__": + print("Case temperature: ", read_case_temp(), "°C") + print("CPU temperature: ", read_cpu_temp(), "°C") diff --git a/src/auracast/utils/reset_utils.py b/src/auracast/utils/reset_utils.py index 74f549b..43ad776 100644 --- a/src/auracast/utils/reset_utils.py +++ b/src/auracast/utils/reset_utils.py @@ -2,7 +2,7 @@ import os import asyncio import logging as log -async def reset_nrf54l(slot: int = 0, timeout: float = 8.0): +async def reset_nrf54l(interface: int = 0, timeout: float = 8.0): """ Reset the nRF54L target using OpenOCD before starting broadcast. @@ -24,7 +24,7 @@ async def reset_nrf54l(slot: int = 0, timeout: float = 8.0): try: # Resolve project directory and filenames proj_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', 'openocd')) - names = ['raspberrypi-swd0.cfg', 'swd0.cfg'] if slot == 0 else ['raspberrypi-swd1.cfg', 'swd1.cfg'] + names = ['raspberrypi-swd0.cfg', 'swd0.cfg'] if interface == 0 else ['raspberrypi-swd1.cfg', 'swd1.cfg'] cfg = None for n in names: p = os.path.join(proj_dir, n) @@ -56,7 +56,7 @@ async def reset_nrf54l(slot: int = 0, timeout: float = 8.0): ok = await _run(cmd) if ok: - log.info("reset_nrf54l: reset succeeded (slot %d) using %s", slot, cfg) + log.info("reset_nrf54l: reset succeeded (interface %d) using %s", interface, cfg) except FileNotFoundError: log.error("reset_nrf54l: openocd not found; skipping reset") @@ -71,7 +71,10 @@ if __name__ == '__main__': format='%(asctime)s.%(msecs)03d %(levelname)s: %(message)s', datefmt='%Y-%m-%d %H:%M:%S' ) + interface_to_reset = 0 + log.info(f"Executing reset for interface {interface_to_reset}") + asyncio.run(reset_nrf54l(interface=interface_to_reset)) - slot_to_reset = 1 - log.info(f"Executing reset for slot {slot_to_reset}") - asyncio.run(reset_nrf54l(slot=slot_to_reset)) + interface_to_reset = 1 + log.info(f"Executing reset for interface {interface_to_reset}") + asyncio.run(reset_nrf54l(interface=interface_to_reset)) diff --git a/src/auracast/utils/sounddevice_utils.py b/src/auracast/utils/sounddevice_utils.py index 9d7c1f8..2c30e89 100644 --- a/src/auracast/utils/sounddevice_utils.py +++ b/src/auracast/utils/sounddevice_utils.py @@ -232,13 +232,19 @@ def get_alsa_usb_inputs(): name = dev.get('name', '').lower() # Filter for USB devices based on common patterns: # - Contains 'usb' in the name - # - hw:X,Y pattern (ALSA hardware devices) + # - hw:X or hw:X,Y pattern present anywhere in name (ALSA hardware devices) + # - dsnoop/ch1/ch2 convenience entries from asound.conf # Exclude: default, dmix, pulse, pipewire, sysdefault if any(exclude in name for exclude in ['default', 'dmix', 'pulse', 'pipewire', 'sysdefault']): continue - # Include if it has 'usb' in name or matches hw:X pattern - if 'usb' in name or re.match(r'hw:\d+', name): + # Include if it has 'usb' or contains an hw:* token, or matches common dsnoop/mono aliases + if ( + 'usb' in name or + re.search(r'hw:\d+(?:,\d+)?', name) or + name.startswith('dsnoop') or + name in ('ch1', 'ch2') + ): usb_inputs.append((idx, dev)) return usb_inputs diff --git a/src/misc/asound.conf b/src/misc/asound.conf new file mode 100644 index 0000000..30e6594 --- /dev/null +++ b/src/misc/asound.conf @@ -0,0 +1,28 @@ +pcm.ch1 { + type dsnoop + ipc_key 234884 + slave { + pcm "hw:CARD=i2s,DEV=0" + channels 2 + rate 48000 + format S16_LE + period_size 120 + buffer_size 240 + } + bindings.0 0 +} + + +pcm.ch2 { + type dsnoop + ipc_key 234884 + slave { + pcm "hw:CARD=i2s,DEV=0" + channels 2 + rate 48000 + format S16_LE + period_size 120 + buffer_size 240 + } + bindings.0 1 +} \ No newline at end of file diff --git a/src/misc/build_pcm1862_dts.sh b/src/misc/build_pcm1862_dts.sh new file mode 100755 index 0000000..e0372ad --- /dev/null +++ b/src/misc/build_pcm1862_dts.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash +set -euo pipefail +DTS=./src/misc/pcm1862-i2s.dts +DTBO=pcm1862-i2s.dtbo +OUT=/boot/firmware/overlays + +# build +dtc -@ -I dts -O dtb -o "$DTBO" "$DTS" + +# install +sudo install -m 0644 "$DTBO" "$OUT/$DTBO" + +# NOTE: also add +# dtparam=i2c_arm=on +# dtoverlay=pcm1862-i2s +# to /boot/firmware/config.txt + +echo "Built and installed $DTBO to $OUT." +echo "Now either reboot to load the installed overlay" diff --git a/src/misc/install_asoundconf.sh b/src/misc/install_asoundconf.sh new file mode 100644 index 0000000..fce13fc --- /dev/null +++ b/src/misc/install_asoundconf.sh @@ -0,0 +1 @@ +sudo cp src/misc/asound.conf /etc/asound.conf \ No newline at end of file diff --git a/src/misc/pcm1862-i2s.dts b/src/misc/pcm1862-i2s.dts new file mode 100644 index 0000000..7c2f30d --- /dev/null +++ b/src/misc/pcm1862-i2s.dts @@ -0,0 +1,55 @@ +/dts-v1/; +/plugin/; + +/ { + compatible = "brcm,bcm2835"; + + /* Enable the I²S controller */ + fragment@0 { + target = <&i2s>; + __overlay__ { + status = "okay"; + }; + }; + + /* PCM1862 on I2C1 at 0x4a (change if your bus/address differ) */ + fragment@1 { + target = <&i2c1>; + __overlay__ { + #address-cells = <1>; + #size-cells = <0>; + + pcm1862: adc@4a { + compatible = "ti,pcm1862"; + reg = <0x4a>; + #sound-dai-cells = <0>; + /* Rails are hard-powered on your board, so no regulators here */ + }; + }; + }; + + /* Link bcm2835-i2s <-> pcm1862 via simple-audio-card */ + fragment@2 { + target-path = "/"; + __overlay__ { + pcm1862_sound: pcm1862-sound { + compatible = "simple-audio-card"; + simple-audio-card,name = "pcm1862 on i2s"; + simple-audio-card,format = "i2s"; + /* Pi is master for BCLK/LRCLK */ + simple-audio-card,bitclock-master = <&dai_cpu>; + simple-audio-card,frame-master = <&dai_cpu>; + + dai_cpu: simple-audio-card,cpu { + sound-dai = <&i2s>; + dai-tdm-slot-num = <2>; + dai-tdm-slot-width = <32>; + }; + + simple-audio-card,codec { + sound-dai = <&pcm1862>; + }; + }; + }; + }; +}; diff --git a/src/openocd/raspberrypi-swd0.cfg b/src/openocd/raspberrypi-swd0.cfg index 853f575..ae77096 100644 --- a/src/openocd/raspberrypi-swd0.cfg +++ b/src/openocd/raspberrypi-swd0.cfg @@ -1,7 +1,7 @@ adapter driver bcm2835gpio transport select swd adapter gpio swclk 17 -adapter gpio swdio 18 +adapter gpio swdio 26 #adapter gpio trst 26 #reset_config trst_only diff --git a/src/openocd/raspberrypi-swd1.cfg b/src/openocd/raspberrypi-swd1.cfg index 103c0f9..b47e592 100644 --- a/src/openocd/raspberrypi-swd1.cfg +++ b/src/openocd/raspberrypi-swd1.cfg @@ -1,7 +1,7 @@ adapter driver bcm2835gpio transport select swd -adapter gpio swclk 24 -adapter gpio swdio 23 +adapter gpio swclk 23 +adapter gpio swdio 24 #adapter gpio trst 27 #reset_config trst_only diff --git a/src/qualification/BAP/test_bap_bsrc_scc_bv20c.py b/src/qualification/BAP/test_bap_bsrc_scc_bv20c.py new file mode 100644 index 0000000..bc6703c --- /dev/null +++ b/src/qualification/BAP/test_bap_bsrc_scc_bv20c.py @@ -0,0 +1,42 @@ +""" +BAP/BSRC/SCC/BV-20-C: Config Broadcast, LC3 16_2_2 + +Configuration: 16kHz, 40 octets/frame, stereo (2 BISes), QoS _2 variant +""" + +import logging +import os + +from auracast.auracast_config import AuracastGlobalConfig, AuracastBigConfig, AuracastQosRobust +from auracast.multicast import broadcast, run_async + + +if __name__ == "__main__": + logging.basicConfig( + level=os.environ.get("LOG_LEVEL", logging.INFO), + format="%(module)s.py:%(lineno)d %(levelname)s: %(message)s", + ) + + os.chdir(os.path.join(os.path.dirname(__file__), "../../auracast")) + + config = AuracastGlobalConfig() + # _2 variant uses different QoS (RTN=2, higher latency) + config.qos_config = AuracastQosRobust() + config.transport = "serial:/dev/ttyAMA3,1000000,rtscts" + + # 16_2_2: 16kHz, 40 octets/frame + config.auracast_sampling_rate_hz = 16000 + config.octets_per_frame = 40 + + big = AuracastBigConfig() + big.random_address = "F1:F1:F2:F3:F4:F5" + big.audio_source = "file:./testdata/announcement_en.wav" + big.num_bis = 1 + big.id = 12 + + run_async( + broadcast( + config, + [big], + ) + ) diff --git a/src/qualification/BAP/test_bap_bsrc_scc_bv22c.py b/src/qualification/BAP/test_bap_bsrc_scc_bv22c.py new file mode 100644 index 0000000..81b0765 --- /dev/null +++ b/src/qualification/BAP/test_bap_bsrc_scc_bv22c.py @@ -0,0 +1,42 @@ +""" +BAP/BSRC/SCC/BV-22-C: Config Broadcast, LC3 24_2_2 + +Configuration: 24kHz, 60 octets/frame, stereo (2 BISes), QoS _2 variant +""" + +import logging +import os + +from auracast.auracast_config import AuracastGlobalConfig, AuracastBigConfig, AuracastQosRobust +from auracast.multicast import broadcast, run_async + + +if __name__ == "__main__": + logging.basicConfig( + level=os.environ.get("LOG_LEVEL", logging.INFO), + format="%(module)s.py:%(lineno)d %(levelname)s: %(message)s", + ) + + os.chdir(os.path.join(os.path.dirname(__file__), "../../auracast")) + + config = AuracastGlobalConfig() + # _2 variant uses different QoS (RTN=2, higher latency) + config.qos_config = AuracastQosRobust() + config.transport = "serial:/dev/ttyAMA3,1000000,rtscts" + + # 24_2_2: 24kHz, 60 octets/frame + config.auracast_sampling_rate_hz = 24000 + config.octets_per_frame = 60 + + big = AuracastBigConfig() + big.random_address = "F1:F1:F2:F3:F4:F5" + big.audio_source = "file:./testdata/announcement_en.wav" + big.num_bis = 1 + big.id = 12 + + run_async( + broadcast( + config, + [big], + ) + ) diff --git a/src/qualification/BAP/test_bap_bsrc_scc_bv28c.py b/src/qualification/BAP/test_bap_bsrc_scc_bv28c.py new file mode 100644 index 0000000..13bc71c --- /dev/null +++ b/src/qualification/BAP/test_bap_bsrc_scc_bv28c.py @@ -0,0 +1,42 @@ +""" +BAP/BSRC/SCC/BV-28-C: Config Broadcast, LC3 48_2_2 + +Configuration: 48kHz, 100 octets/frame, stereo (2 BISes), QoS _2 variant +""" + +import logging +import os + +from auracast.auracast_config import AuracastGlobalConfig, AuracastBigConfig, AuracastQosRobust +from auracast.multicast import broadcast, run_async + + +if __name__ == "__main__": + logging.basicConfig( + level=os.environ.get("LOG_LEVEL", logging.INFO), + format="%(module)s.py:%(lineno)d %(levelname)s: %(message)s", + ) + + os.chdir(os.path.join(os.path.dirname(__file__), "../../auracast")) + + config = AuracastGlobalConfig() + # _2 variant uses different QoS (RTN=2, higher latency) + config.qos_config = AuracastQosRobust() + config.transport = "serial:/dev/ttyAMA3,1000000,rtscts" + + # 48_2_2: 48kHz, 100 octets/frame + config.auracast_sampling_rate_hz = 48000 + config.octets_per_frame = 100 + + big = AuracastBigConfig() + big.random_address = "F1:F1:F2:F3:F4:F5" + big.audio_source = "file:./testdata/announcement_en.wav" + big.num_bis = 1 + big.id = 12 + + run_async( + broadcast( + config, + [big], + ) + ) diff --git a/src/qualification/BAP/test_bap_bsrc_scc_bv30c.py b/src/qualification/BAP/test_bap_bsrc_scc_bv30c.py new file mode 100644 index 0000000..add01f0 --- /dev/null +++ b/src/qualification/BAP/test_bap_bsrc_scc_bv30c.py @@ -0,0 +1,42 @@ +""" +BAP/BSRC/SCC/BV-30-C: Config Broadcast, LC3 48_4_2 + +Configuration: 48kHz, 120 octets/frame, stereo (2 BISes), QoS _2 variant +""" + +import logging +import os + +from auracast.auracast_config import AuracastGlobalConfig, AuracastBigConfig, AuracastQosRobust +from auracast.multicast import broadcast, run_async + + +if __name__ == "__main__": + logging.basicConfig( + level=os.environ.get("LOG_LEVEL", logging.INFO), + format="%(module)s.py:%(lineno)d %(levelname)s: %(message)s", + ) + + os.chdir(os.path.join(os.path.dirname(__file__), "../../auracast")) + + config = AuracastGlobalConfig() + # _2 variant uses different QoS (RTN=2, higher latency) + config.qos_config = AuracastQosRobust() + config.transport = "serial:/dev/ttyAMA3,1000000,rtscts" + + # 48_4_2: 48kHz, 120 octets/frame + config.auracast_sampling_rate_hz = 48000 + config.octets_per_frame = 120 + + big = AuracastBigConfig() + big.random_address = "F1:F1:F2:F3:F4:F5" + big.audio_source = "file:./testdata/announcement_en.wav" + big.num_bis = 1 + big.id = 12 + + run_async( + broadcast( + config, + [big], + ) + ) diff --git a/src/qualification/BAP/test_bap_bsrc_scc_bv32c.py b/src/qualification/BAP/test_bap_bsrc_scc_bv32c.py new file mode 100644 index 0000000..4f5b0b8 --- /dev/null +++ b/src/qualification/BAP/test_bap_bsrc_scc_bv32c.py @@ -0,0 +1,43 @@ +""" +BAP/BSRC/SCC/BV-32-C: Config Broadcast, LC3 48_6_2 +also works for BV35,36,37 - just restart + +Configuration: 48kHz, 155 octets/frame, stereo (2 BISes), QoS _2 variant +""" + +import logging +import os + +from auracast.auracast_config import AuracastGlobalConfig, AuracastBigConfig, AuracastQosRobust +from auracast.multicast import broadcast, run_async + + +if __name__ == "__main__": + logging.basicConfig( + level=os.environ.get("LOG_LEVEL", logging.INFO), + format="%(module)s.py:%(lineno)d %(levelname)s: %(message)s", + ) + + os.chdir(os.path.join(os.path.dirname(__file__), "../../auracast")) + + config = AuracastGlobalConfig() + # _2 variant uses different QoS (RTN=2, higher latency) + config.qos_config = AuracastQosRobust() + config.transport = "serial:/dev/ttyAMA3,1000000,rtscts" + + # 48_6_2: 48kHz, 155 octets/frame + config.auracast_sampling_rate_hz = 48000 + config.octets_per_frame = 155 + + big = AuracastBigConfig() + big.random_address = "F1:F1:F2:F3:F4:F5" + big.audio_source = "file:./testdata/announcement_en.wav" + big.num_bis = 1 + big.id = 12 + + run_async( + broadcast( + config, + [big], + ) + ) diff --git a/src/qualification/BAP/test_bap_bsrc_scc_bv38c.py b/src/qualification/BAP/test_bap_bsrc_scc_bv38c.py new file mode 100644 index 0000000..9e02504 --- /dev/null +++ b/src/qualification/BAP/test_bap_bsrc_scc_bv38c.py @@ -0,0 +1,45 @@ +""" +BAP/BSRC/SCC/BV-38-C: Multi BIG Configuration + +Configuration: Two BIGs (id=12 and id=13), stereo (2 BISes each) +""" + +import logging +import os + +from auracast.auracast_config import AuracastGlobalConfig, AuracastBigConfig, AuracastQosFast +from auracast.multicast import broadcast, run_async + + +if __name__ == "__main__": + logging.basicConfig( + level=os.environ.get("LOG_LEVEL", logging.INFO), + format="%(module)s.py:%(lineno)d %(levelname)s: %(message)s", + ) + + os.chdir(os.path.join(os.path.dirname(__file__), "../../auracast")) + + config = AuracastGlobalConfig() + config.qos_config = AuracastQosFast() + config.transport = "serial:/dev/ttyAMA3,1000000,rtscts" + + # First BIG + big1 = AuracastBigConfig() + big1.random_address = "F1:F1:F2:F3:F4:F5" + big1.audio_source = "file:./testdata/announcement_en.wav" + big1.num_bis = 1 + big1.id = 12 + + # Second BIG + big2 = AuracastBigConfig() + big2.random_address = "F1:F1:F2:F3:F4:F6" + big2.audio_source = "file:./testdata/announcement_en.wav" + big2.num_bis = 1 + big2.id = 13 + + run_async( + broadcast( + config, + [big1, big2], + ) + ) diff --git a/src/qualification/BAP/test_bap_bsrc_scc_str_bv04.py b/src/qualification/BAP/test_bap_bsrc_scc_str_bv04.py new file mode 100644 index 0000000..1c314be --- /dev/null +++ b/src/qualification/BAP/test_bap_bsrc_scc_str_bv04.py @@ -0,0 +1,42 @@ +""" +For BV36-C and BV 37-C to success just restart the stream while the testcase is running +""" + +import logging +import os + +from auracast.auracast_config import AuracastGlobalConfig, AuracastBigConfig, AuracastQosRobust +from auracast.multicast import broadcast, run_async + + +if __name__ == "__main__": + logging.basicConfig( + level=os.environ.get("LOG_LEVEL", logging.INFO), + format="%(module)s.py:%(lineno)d %(levelname)s: %(message)s", + ) + + # Ensure relative audio paths like in AuracastBigConfig work (./auracast/...) from src/auracast/ + os.chdir(os.path.join(os.path.dirname(__file__), "../../auracast")) + + # Start from default global config + config = AuracastGlobalConfig() + + # Use same QoS profile as multicast main + config.qos_config = AuracastQosRobust() + + # Transport similar to multicast main; adjust if needed for your setup + # config.transport = "auto" # let multicast auto-detect + config.transport = "serial:/dev/ttyAMA3,1000000,rtscts" # Raspberry Pi default + + # Default BIG, only modify the random address as requested + big = AuracastBigConfig() + big.random_address = "F1:F1:F2:F3:F4:F5" + big.audio_source = "file:./testdata/announcement_en.wav" + big.id = 12 + + run_async( + broadcast( + config, + [big], + ) + ) diff --git a/src/qualification/BAP/test_bap_bsrc_scc_str_bv06c.py b/src/qualification/BAP/test_bap_bsrc_scc_str_bv06c.py new file mode 100644 index 0000000..b343a49 --- /dev/null +++ b/src/qualification/BAP/test_bap_bsrc_scc_str_bv06c.py @@ -0,0 +1,41 @@ +""" +BAP/BSRC/SCC/BV-06-C and BAP/BSRC/STR/BV-06-C: Config Broadcast, LC3 24_2_1 + +Configuration: 24kHz, stereo (2 BISes) +""" + +import logging +import os + +from auracast.auracast_config import AuracastGlobalConfig, AuracastBigConfig, AuracastQosFast +from auracast.multicast import broadcast, run_async + + +if __name__ == "__main__": + logging.basicConfig( + level=os.environ.get("LOG_LEVEL", logging.INFO), + format="%(module)s.py:%(lineno)d %(levelname)s: %(message)s", + ) + + os.chdir(os.path.join(os.path.dirname(__file__), "../../auracast")) + + config = AuracastGlobalConfig() + config.qos_config = AuracastQosFast() + config.transport = "serial:/dev/ttyAMA3,1000000,rtscts" + + # 24_2_1: 24kHz + config.auracast_sampling_rate_hz = 24000 + config.octets_per_frame = 60 + + big = AuracastBigConfig() + big.random_address = "F1:F1:F2:F3:F4:F5" + big.audio_source = "file:./testdata/announcement_en.wav" + big.num_bis = 1 + big.id = 12 + + run_async( + broadcast( + config, + [big], + ) + ) diff --git a/src/qualification/BAP/test_bap_bsrc_scc_str_bv12c.py b/src/qualification/BAP/test_bap_bsrc_scc_str_bv12c.py new file mode 100644 index 0000000..89806e1 --- /dev/null +++ b/src/qualification/BAP/test_bap_bsrc_scc_str_bv12c.py @@ -0,0 +1,41 @@ +""" +BAP/BSRC/SCC/BV-12-C and BAP/BSRC/STR/BV-12-C: Config Broadcast, LC3 48_2_1 + +Configuration: 48kHz, 100 octets/frame, stereo (2 BISes) +""" + +import logging +import os + +from auracast.auracast_config import AuracastGlobalConfig, AuracastBigConfig, AuracastQosFast +from auracast.multicast import broadcast, run_async + + +if __name__ == "__main__": + logging.basicConfig( + level=os.environ.get("LOG_LEVEL", logging.INFO), + format="%(module)s.py:%(lineno)d %(levelname)s: %(message)s", + ) + + os.chdir(os.path.join(os.path.dirname(__file__), "../../auracast")) + + config = AuracastGlobalConfig() + config.qos_config = AuracastQosFast() + config.transport = "serial:/dev/ttyAMA3,1000000,rtscts" + + # 48_2_1: 48kHz, 100 octets/frame + config.auracast_sampling_rate_hz = 48000 + config.octets_per_frame = 100 + + big = AuracastBigConfig() + big.random_address = "F1:F1:F2:F3:F4:F5" + big.audio_source = "file:./testdata/announcement_en.wav" + big.num_bis = 1 + big.id = 12 + + run_async( + broadcast( + config, + [big], + ) + ) diff --git a/src/qualification/BAP/test_bap_bsrc_scc_str_bv14c.py b/src/qualification/BAP/test_bap_bsrc_scc_str_bv14c.py new file mode 100644 index 0000000..9a539f9 --- /dev/null +++ b/src/qualification/BAP/test_bap_bsrc_scc_str_bv14c.py @@ -0,0 +1,41 @@ +""" +BAP/BSRC/SCC/BV-14-C and BAP/BSRC/STR/BV-14-C: Config Broadcast, LC3 48_4_1 + +Configuration: 48kHz, 120 octets/frame, stereo (2 BISes) +""" + +import logging +import os + +from auracast.auracast_config import AuracastGlobalConfig, AuracastBigConfig, AuracastQosFast +from auracast.multicast import broadcast, run_async + + +if __name__ == "__main__": + logging.basicConfig( + level=os.environ.get("LOG_LEVEL", logging.INFO), + format="%(module)s.py:%(lineno)d %(levelname)s: %(message)s", + ) + + os.chdir(os.path.join(os.path.dirname(__file__), "../../auracast")) + + config = AuracastGlobalConfig() + config.qos_config = AuracastQosFast() + config.transport = "serial:/dev/ttyAMA3,1000000,rtscts" + + # 48_4_1: 48kHz, 120 octets/frame + config.auracast_sampling_rate_hz = 48000 + config.octets_per_frame = 120 + + big = AuracastBigConfig() + big.random_address = "F1:F1:F2:F3:F4:F5" + big.audio_source = "file:./testdata/announcement_en.wav" + big.num_bis = 1 + big.id = 12 + + run_async( + broadcast( + config, + [big], + ) + ) diff --git a/src/qualification/BAP/test_bap_bsrc_scc_str_bv16c.py b/src/qualification/BAP/test_bap_bsrc_scc_str_bv16c.py new file mode 100644 index 0000000..969e41b --- /dev/null +++ b/src/qualification/BAP/test_bap_bsrc_scc_str_bv16c.py @@ -0,0 +1,41 @@ +""" +BAP/BSRC/SCC/BV-16-C and BAP/BSRC/STR/BV-16-C: Config Broadcast, LC3 48_6_1 + +Configuration: 48kHz, 155 octets/frame, stereo (2 BISes) +""" + +import logging +import os + +from auracast.auracast_config import AuracastGlobalConfig, AuracastBigConfig, AuracastQosFast +from auracast.multicast import broadcast, run_async + + +if __name__ == "__main__": + logging.basicConfig( + level=os.environ.get("LOG_LEVEL", logging.INFO), + format="%(module)s.py:%(lineno)d %(levelname)s: %(message)s", + ) + + os.chdir(os.path.join(os.path.dirname(__file__), "../../auracast")) + + config = AuracastGlobalConfig() + config.qos_config = AuracastQosFast() + config.transport = "serial:/dev/ttyAMA3,1000000,rtscts" + + # 48_6_1: 48kHz, 155 octets/frame + config.auracast_sampling_rate_hz = 48000 + config.octets_per_frame = 155 + + big = AuracastBigConfig() + big.random_address = "F1:F1:F2:F3:F4:F5" + big.audio_source = "file:./testdata/announcement_en.wav" + big.num_bis = 1 + big.id = 12 + + run_async( + broadcast( + config, + [big], + ) + ) diff --git a/src/qualification/BAP/test_bap_bsrc_str_bv21c.py b/src/qualification/BAP/test_bap_bsrc_str_bv21c.py new file mode 100644 index 0000000..0df5a1c --- /dev/null +++ b/src/qualification/BAP/test_bap_bsrc_str_bv21c.py @@ -0,0 +1,41 @@ +""" +BAP/BSRC/STR/BV-21-C: BSRC, Multiple BISes, LC3 16_2 + +Configuration: 16kHz, 40 octets/frame, stereo (2 BISes) +""" + +import logging +import os + +from auracast.auracast_config import AuracastGlobalConfig, AuracastBigConfig, AuracastQosFast +from auracast.multicast import broadcast, run_async + + +if __name__ == "__main__": + logging.basicConfig( + level=os.environ.get("LOG_LEVEL", logging.INFO), + format="%(module)s.py:%(lineno)d %(levelname)s: %(message)s", + ) + + os.chdir(os.path.join(os.path.dirname(__file__), "../../auracast")) + + config = AuracastGlobalConfig() + config.qos_config = AuracastQosFast() + config.transport = "serial:/dev/ttyAMA3,1000000,rtscts" + + # 16_2: 16kHz, 40 octets/frame + config.auracast_sampling_rate_hz = 16000 + config.octets_per_frame = 40 + + big = AuracastBigConfig() + big.random_address = "F1:F1:F2:F3:F4:F5" + big.audio_source = "file:./testdata/announcement_es_stereo.wav" + big.id = 12 + big.num_bis = 2 # stereo (multiple BISes) + + run_async( + broadcast( + config, + [big], + ) + ) diff --git a/src/qualification/BAP/test_bap_bsrc_str_bv23c.py b/src/qualification/BAP/test_bap_bsrc_str_bv23c.py new file mode 100644 index 0000000..9171592 --- /dev/null +++ b/src/qualification/BAP/test_bap_bsrc_str_bv23c.py @@ -0,0 +1,41 @@ +""" +BAP/BSRC/STR/BV-23-C: BSRC, Multiple BISes, LC3 24_2 + +Configuration: 24kHz, 60 octets/frame, stereo (2 BISes) +""" + +import logging +import os + +from auracast.auracast_config import AuracastGlobalConfig, AuracastBigConfig, AuracastQosFast +from auracast.multicast import broadcast, run_async + + +if __name__ == "__main__": + logging.basicConfig( + level=os.environ.get("LOG_LEVEL", logging.INFO), + format="%(module)s.py:%(lineno)d %(levelname)s: %(message)s", + ) + + os.chdir(os.path.join(os.path.dirname(__file__), "../../auracast")) + + config = AuracastGlobalConfig() + config.qos_config = AuracastQosFast() + config.transport = "serial:/dev/ttyAMA3,1000000,rtscts" + + # 24_2: 24kHz, 60 octets/frame + config.auracast_sampling_rate_hz = 24000 + config.octets_per_frame = 60 + + big = AuracastBigConfig() + big.random_address = "F1:F1:F2:F3:F4:F5" + big.audio_source = "file:./testdata/announcement_es_stereo.wav" + big.id = 12 + big.num_bis = 2 # stereo (multiple BISes) + + run_async( + broadcast( + config, + [big], + ) + ) diff --git a/src/qualification/BAP/test_bap_bsrc_str_bv29c.py b/src/qualification/BAP/test_bap_bsrc_str_bv29c.py new file mode 100644 index 0000000..695b2c1 --- /dev/null +++ b/src/qualification/BAP/test_bap_bsrc_str_bv29c.py @@ -0,0 +1,41 @@ +""" +BAP/BSRC/STR/BV-29-C: BSRC, Multiple BISes, LC3 48_2 + +Configuration: 48kHz, 100 octets/frame, stereo (2 BISes) +""" + +import logging +import os + +from auracast.auracast_config import AuracastGlobalConfig, AuracastBigConfig, AuracastQosFast +from auracast.multicast import broadcast, run_async + + +if __name__ == "__main__": + logging.basicConfig( + level=os.environ.get("LOG_LEVEL", logging.INFO), + format="%(module)s.py:%(lineno)d %(levelname)s: %(message)s", + ) + + os.chdir(os.path.join(os.path.dirname(__file__), "../../auracast")) + + config = AuracastGlobalConfig() + config.qos_config = AuracastQosFast() + config.transport = "serial:/dev/ttyAMA3,1000000,rtscts" + + # 48_2: 48kHz, 100 octets/frame + config.auracast_sampling_rate_hz = 48000 + config.octets_per_frame = 100 + + big = AuracastBigConfig() + big.random_address = "F1:F1:F2:F3:F4:F5" + big.audio_source = "file:./testdata/announcement_es_stereo.wav" + big.id = 12 + big.num_bis = 2 # stereo (multiple BISes) + + run_async( + broadcast( + config, + [big], + ) + ) diff --git a/src/qualification/BAP/test_bap_bsrc_str_bv31c.py b/src/qualification/BAP/test_bap_bsrc_str_bv31c.py new file mode 100644 index 0000000..a4a3a07 --- /dev/null +++ b/src/qualification/BAP/test_bap_bsrc_str_bv31c.py @@ -0,0 +1,41 @@ +""" +BAP/BSRC/STR/BV-31-C: BSRC, Multiple BISes, LC3 48_4 + +Configuration: 48kHz, 120 octets/frame, stereo (2 BISes) +""" + +import logging +import os + +from auracast.auracast_config import AuracastGlobalConfig, AuracastBigConfig, AuracastQosFast +from auracast.multicast import broadcast, run_async + + +if __name__ == "__main__": + logging.basicConfig( + level=os.environ.get("LOG_LEVEL", logging.INFO), + format="%(module)s.py:%(lineno)d %(levelname)s: %(message)s", + ) + + os.chdir(os.path.join(os.path.dirname(__file__), "../../auracast")) + + config = AuracastGlobalConfig() + config.qos_config = AuracastQosFast() + config.transport = "serial:/dev/ttyAMA3,1000000,rtscts" + + # 48_4: 48kHz, 120 octets/frame + config.auracast_sampling_rate_hz = 48000 + config.octets_per_frame = 120 + + big = AuracastBigConfig() + big.random_address = "F1:F1:F2:F3:F4:F5" + big.audio_source = "file:./testdata/announcement_es_stereo.wav" + big.id = 12 + big.num_bis = 2 # stereo (multiple BISes) + + run_async( + broadcast( + config, + [big], + ) + ) diff --git a/src/qualification/BAP/test_bap_bsrc_str_bv33c.py b/src/qualification/BAP/test_bap_bsrc_str_bv33c.py new file mode 100644 index 0000000..0824092 --- /dev/null +++ b/src/qualification/BAP/test_bap_bsrc_str_bv33c.py @@ -0,0 +1,41 @@ +""" +BAP/BSRC/STR/BV-33-C: BSRC, Multiple BISes, LC3 48_6 + +Configuration: 48kHz, 155 octets/frame, stereo (2 BISes) +""" + +import logging +import os + +from auracast.auracast_config import AuracastGlobalConfig, AuracastBigConfig, AuracastQosFast +from auracast.multicast import broadcast, run_async + + +if __name__ == "__main__": + logging.basicConfig( + level=os.environ.get("LOG_LEVEL", logging.INFO), + format="%(module)s.py:%(lineno)d %(levelname)s: %(message)s", + ) + + os.chdir(os.path.join(os.path.dirname(__file__), "../../auracast")) + + config = AuracastGlobalConfig() + config.qos_config = AuracastQosFast() + config.transport = "serial:/dev/ttyAMA3,1000000,rtscts" + + # 48_6: 48kHz, 155 octets/frame + config.auracast_sampling_rate_hz = 48000 + config.octets_per_frame = 155 + + big = AuracastBigConfig() + big.random_address = "F1:F1:F2:F3:F4:F5" + big.audio_source = "file:./testdata/announcement_es_stereo.wav" + big.id = 12 + big.num_bis = 2 # stereo (multiple BISes) + + run_async( + broadcast( + config, + [big], + ) + ) diff --git a/src/qualification/CAP/test_cap_ini_bst.py b/src/qualification/CAP/test_cap_ini_bst.py new file mode 100644 index 0000000..1286a06 --- /dev/null +++ b/src/qualification/CAP/test_cap_ini_bst.py @@ -0,0 +1,46 @@ +""" +CAP/INI/BST/BV-01-C and CAP/INI/BST/BV-05-C: +- BV-01-C: Broadcast Audio Starting for Single Audio Stream +- BV-05-C: Broadcast Audio Starting for Single Audio Streams - Single CCID + +Make sure to set TSPX_BST_CODEC_CONFIG to 16_2_1 +Restart the stream when asked to terminate. +""" + +import logging +import os + +from auracast.auracast_config import AuracastGlobalConfig, AuracastBigConfig, AuracastQosRobust +from auracast.multicast import broadcast, run_async + + +if __name__ == "__main__": + logging.basicConfig( + level=os.environ.get("LOG_LEVEL", logging.INFO), + format="%(module)s.py:%(lineno)d %(levelname)s: %(message)s", + ) + + os.chdir(os.path.join(os.path.dirname(__file__), "../../auracast")) + + # Start from default global config + config = AuracastGlobalConfig() + + # Use same QoS profile as multicast main + config.qos_config = AuracastQosRobust() + + # Transport similar to multicast main; adjust if needed for your setup + # config.transport = "auto" # let multicast auto-detect + config.transport = "serial:/dev/ttyAMA3,1000000,rtscts" # Raspberry Pi default + + # Default BIG, only modify the random address as requested + big = AuracastBigConfig() + big.random_address = "F1:F1:F2:F3:F4:F5" + big.audio_source = "file:./testdata/announcement_en.wav" + big.id = 12 + + run_async( + broadcast( + config, + [big], + ) + ) diff --git a/src/qualification/GAP/test_gap_bis_bbm.py b/src/qualification/GAP/test_gap_bis_bbm.py new file mode 100644 index 0000000..b0e8262 --- /dev/null +++ b/src/qualification/GAP/test_gap_bis_bbm.py @@ -0,0 +1,42 @@ +""" +""" + +import logging +import os + +from auracast.auracast_config import AuracastGlobalConfig, AuracastBigConfig, AuracastQosRobust +from auracast.multicast import broadcast, run_async + + +if __name__ == "__main__": + logging.basicConfig( + level=os.environ.get("LOG_LEVEL", logging.INFO), + format="%(module)s.py:%(lineno)d %(levelname)s: %(message)s", + ) + + # Ensure relative audio paths like in AuracastBigConfig work (./auracast/...) from src/auracast/ + os.chdir(os.path.join(os.path.dirname(__file__), "../../auracast")) + + # Start from default global config + config = AuracastGlobalConfig() + + # Use same QoS profile as multicast main + config.qos_config = AuracastQosRobust() + + # Transport similar to multicast main; adjust if needed for your setup + # config.transport = "auto" # let multicast auto-detect + config.transport = "serial:/dev/ttyAMA3,1000000,rtscts" # Raspberry Pi default + + # Stereo BIG with 2 BISes (FRONT_LEFT + FRONT_RIGHT) + big = AuracastBigConfig() + big.random_address = "F1:F1:F2:F3:F4:F5" + big.audio_source = "file:./testdata/announcement_es_stereo.wav" + big.id = 12 + big.num_bis = 2 # stereo: 2 BISes + + run_async( + broadcast( + config, + [big], + ) + ) diff --git a/src/qualification/GAP/test_gap_brob_bcst.py b/src/qualification/GAP/test_gap_brob_bcst.py new file mode 100644 index 0000000..3b4e2aa --- /dev/null +++ b/src/qualification/GAP/test_gap_brob_bcst.py @@ -0,0 +1,100 @@ +""" +GAP/BROB/BCST/BV-01-C: Broadcaster role with non-connectable advertising. + +Advertising with TSPX_advertising_data value (27 bytes): +- Flags: BR/EDR Not Supported +- 16-bit Service UUIDs: 0x1800, 0x1801 +- Local Name: "PTS-GAP-06B8" +- Appearance: 0x0000 +""" + +import asyncio +import logging +import os + +import bumble.device +import bumble.transport +from bumble import hci +from bumble.device import DeviceConfiguration, AdvertisingParameters, AdvertisingEventProperties + + +async def run_broadcaster(): + """Configure and start non-connectable advertising for GAP/BROB/BCST/BV-01-C.""" + + # Transport - adjust as needed for your setup + transport_str = "serial:/dev/ttyAMA3,1000000,rtscts" + + async with await bumble.transport.open_transport(transport_str) as (hci_source, hci_sink): + # Device configuration + device_config = DeviceConfiguration( + name="PTS-GAP-06B8", + address=hci.Address("F1:F1:F2:F3:F4:F5"), + ) + + device = bumble.device.Device.from_config_with_hci( + device_config, + hci_source, + hci_sink, + ) + await device.power_on() + + # Exact advertising data payload (27 bytes) as specified: + # 0x02, 0x01, 0x04 - Flags: BR/EDR Not Supported + # 0x05, 0x03, 0x00, 0x18, 0x01, 0x18 - 16-bit Service UUIDs: 0x1800, 0x1801 + # 0x0D, 0x09, 0x50, 0x54, 0x53, 0x2D, 0x47, - Complete Local Name: "PTS-GAP-06B8" + # 0x41, 0x50, 0x2D, 0x30, 0x36, 0x42, 0x38 + # 0x03, 0x19, 0x00, 0x00 - Appearance: 0x0000 + adv_data = bytes([ + 0x02, 0x01, 0x04, # Flags: BR/EDR Not Supported + 0x05, 0x03, 0x00, 0x18, 0x01, 0x18, # 16-bit Service UUIDs + 0x0D, 0x09, 0x50, 0x54, 0x53, 0x2D, 0x47, 0x41, # Local Name: "PTS-GAP-06B8" + 0x50, 0x2D, 0x30, 0x36, 0x42, 0x38, + 0x03, 0x19, 0x00, 0x00 # Appearance + ]) + + logging.info("Advertising data (%d bytes): %s", len(adv_data), adv_data.hex()) + + # Create advertising set with non-connectable parameters (ADV_NONCONN_IND equivalent) + advertising_set = await device.create_advertising_set( + advertising_parameters=AdvertisingParameters( + advertising_event_properties=AdvertisingEventProperties( + is_connectable=False, # Non-connectable (ADV_NONCONN_IND) + is_scannable=False, # Not scannable + is_directed=False, + is_high_duty_cycle_directed_connectable=False, + is_legacy=True, # Use legacy advertising PDUs + is_anonymous=False, + ), + primary_advertising_interval_min=0x0800, # 1.28s + primary_advertising_interval_max=0x0800, # 1.28s + primary_advertising_phy=hci.Phy.LE_1M, + ), + advertising_data=adv_data, + auto_start=True, + ) + + logging.info("Non-connectable advertising started (ADV_NONCONN_IND)") + logging.info("Advertising set handle: %s", advertising_set.advertising_handle) + + # Keep advertising until interrupted + logging.info("Press Ctrl+C to stop...") + try: + while True: + await asyncio.sleep(1) + except asyncio.CancelledError: + pass + finally: + await advertising_set.stop() + logging.info("Advertising stopped") + + +if __name__ == "__main__": + logging.basicConfig( + level=os.environ.get("LOG_LEVEL", logging.INFO), + format="%(module)s.py:%(lineno)d %(levelname)s: %(message)s", + ) + + try: + asyncio.run(run_broadcaster()) + except KeyboardInterrupt: + logging.info("Interrupted by user") diff --git a/src/qualification/GAP/test_gap_conn_ncon.py b/src/qualification/GAP/test_gap_conn_ncon.py new file mode 100644 index 0000000..c984dd4 --- /dev/null +++ b/src/qualification/GAP/test_gap_conn_ncon.py @@ -0,0 +1,102 @@ +""" +GAP/CONN/NCON/BV-01-C: Non-Connectable Mode. + +PTS Action: Select YES when asked "Does the IUT have an ability to send +non-connectable advertising report?" + +Configuration (same as GAP/BROB/BCST/BV-01-C): +- Advertising_Type: 0x03 (ADV_NONCONN_IND) +- Flags AD Type (0x01): 0x04 (Not Discoverable, BR/EDR Not Supported) +- Legacy non-connectable advertising packet +""" + +import asyncio +import logging +import os + +import bumble.device +import bumble.transport +from bumble import hci +from bumble.device import DeviceConfiguration, AdvertisingParameters, AdvertisingEventProperties + + +async def run_non_connectable(): + """Configure and start non-connectable advertising for GAP/CONN/NCON/BV-01-C.""" + + # Transport - adjust as needed for your setup + transport_str = "serial:/dev/ttyAMA3,1000000,rtscts" + + async with await bumble.transport.open_transport(transport_str) as (hci_source, hci_sink): + # Device configuration + device_config = DeviceConfiguration( + name="PTS-GAP-06B8", + address=hci.Address("F1:F1:F2:F3:F4:F5"), + ) + + device = bumble.device.Device.from_config_with_hci( + device_config, + hci_source, + hci_sink, + ) + await device.power_on() + + # Exact advertising data payload (27 bytes) as specified: + # 0x02, 0x01, 0x04 - Flags: BR/EDR Not Supported + # 0x05, 0x03, 0x00, 0x18, 0x01, 0x18 - 16-bit Service UUIDs: 0x1800, 0x1801 + # 0x0D, 0x09, 0x50, 0x54, 0x53, 0x2D, 0x47, - Complete Local Name: "PTS-GAP-06B8" + # 0x41, 0x50, 0x2D, 0x30, 0x36, 0x42, 0x38 + # 0x03, 0x19, 0x00, 0x00 - Appearance: 0x0000 + adv_data = bytes([ + 0x02, 0x01, 0x04, # Flags: BR/EDR Not Supported + 0x05, 0x03, 0x00, 0x18, 0x01, 0x18, # 16-bit Service UUIDs + 0x0D, 0x09, 0x50, 0x54, 0x53, 0x2D, 0x47, 0x41, # Local Name: "PTS-GAP-06B8" + 0x50, 0x2D, 0x30, 0x36, 0x42, 0x38, + 0x03, 0x19, 0x00, 0x00 # Appearance + ]) + + logging.info("Advertising data (%d bytes): %s", len(adv_data), adv_data.hex()) + + # Create advertising set with non-connectable parameters (ADV_NONCONN_IND equivalent) + advertising_set = await device.create_advertising_set( + advertising_parameters=AdvertisingParameters( + advertising_event_properties=AdvertisingEventProperties( + is_connectable=False, # Non-connectable (ADV_NONCONN_IND) + is_scannable=False, # Not scannable + is_directed=False, + is_high_duty_cycle_directed_connectable=False, + is_legacy=True, # Use legacy advertising PDUs + is_anonymous=False, + ), + primary_advertising_interval_min=0x0800, # 1.28s + primary_advertising_interval_max=0x0800, # 1.28s + primary_advertising_phy=hci.Phy.LE_1M, + ), + advertising_data=adv_data, + auto_start=True, + ) + + logging.info("Non-connectable advertising started (ADV_NONCONN_IND)") + logging.info("Advertising set handle: %s", advertising_set.advertising_handle) + + # Keep advertising until interrupted + logging.info("Press Ctrl+C to stop...") + try: + while True: + await asyncio.sleep(1) + except asyncio.CancelledError: + pass + finally: + await advertising_set.stop() + logging.info("Advertising stopped") + + +if __name__ == "__main__": + logging.basicConfig( + level=os.environ.get("LOG_LEVEL", logging.INFO), + format="%(module)s.py:%(lineno)d %(levelname)s: %(message)s", + ) + + try: + asyncio.run(run_non_connectable()) + except KeyboardInterrupt: + logging.info("Interrupted by user") diff --git a/src/qualification/PBP/test_pbp_pbs_pbm_bv01c.py b/src/qualification/PBP/test_pbp_pbs_pbm_bv01c.py new file mode 100644 index 0000000..0853c95 --- /dev/null +++ b/src/qualification/PBP/test_pbp_pbs_pbm_bv01c.py @@ -0,0 +1,45 @@ +""" +PBP/PBS/PBM/BV-01-C: Transmit Program_Info Metadata + +Configuration: 16kHz, unencrypted, stereo (2 BISes) +Program_Info metadata: 0x00112233445566778899AABBCCDDEEFF +""" + +import logging +import os + +from auracast.auracast_config import AuracastGlobalConfig, AuracastBigConfig, AuracastQosFast +from auracast.multicast import broadcast, run_async + + +if __name__ == "__main__": + logging.basicConfig( + level=os.environ.get("LOG_LEVEL", logging.INFO), + format="%(module)s.py:%(lineno)d %(levelname)s: %(message)s", + ) + + os.chdir(os.path.join(os.path.dirname(__file__), "../../auracast")) + + config = AuracastGlobalConfig() + config.qos_config = AuracastQosFast() + config.transport = "serial:/dev/ttyAMA3,1000000,rtscts" + + # 16_2_1: 16kHz, stereo + config.auracast_sampling_rate_hz = 16000 + config.octets_per_frame = 40 + + big = AuracastBigConfig() + big.random_address = "F1:F1:F2:F3:F4:F5" + big.audio_source = "file:./testdata/announcement_en.wav" + big.num_bis = 1 + big.id = 12 + big.name = "Broadcast" + # Program_Info metadata: 00112233445566778899AABBCCDDEEFF + big.program_info = bytes.fromhex("00112233445566778899AABBCCDDEEFF").decode('latin-1') + + run_async( + broadcast( + config, + [big], + ) + ) diff --git a/src/qualification/PBP/test_pbp_pbs_str_bv01c.py b/src/qualification/PBP/test_pbp_pbs_str_bv01c.py new file mode 100644 index 0000000..07a19e7 --- /dev/null +++ b/src/qualification/PBP/test_pbp_pbs_str_bv01c.py @@ -0,0 +1,45 @@ +""" +PBP/PBS/STR/BV-01-C: Standard Quality Streaming Support, 16_2_1 - PBS +(TSPC_PBP_1_1 AND TSPC_PBP_7_1) OR TSPC_ALL + +Configuration: 16kHz, unencrypted, stereo (2 BISes) +PBP Features: 0x02 (Standard Quality) +""" + +import logging +import os + +from auracast.auracast_config import AuracastGlobalConfig, AuracastBigConfig, AuracastQosFast +from auracast.multicast import broadcast, run_async + + +if __name__ == "__main__": + logging.basicConfig( + level=os.environ.get("LOG_LEVEL", logging.INFO), + format="%(module)s.py:%(lineno)d %(levelname)s: %(message)s", + ) + + os.chdir(os.path.join(os.path.dirname(__file__), "../../auracast")) + + config = AuracastGlobalConfig() + config.qos_config = AuracastQosFast() + config.transport = "serial:/dev/ttyAMA3,1000000,rtscts" + + # 16_2_1: 16kHz, stereo + config.auracast_sampling_rate_hz = 16000 + config.octets_per_frame = 40 + + big = AuracastBigConfig() + big.random_address = "F1:F1:F2:F3:F4:F5" + big.audio_source = "file:./testdata/announcement_en.wav" + big.id = 12 + big.num_bis = 1 + big.name = "Broadcaster" + # Unencrypted (no code) + + run_async( + broadcast( + config, + [big], + ) + ) diff --git a/src/qualification/PBP/test_pbp_pbs_str_bv02c.py b/src/qualification/PBP/test_pbp_pbs_str_bv02c.py new file mode 100644 index 0000000..543af1b --- /dev/null +++ b/src/qualification/PBP/test_pbp_pbs_str_bv02c.py @@ -0,0 +1,46 @@ +""" +PBP/PBS/STR/BV-02-C: High Quality Streaming Support - PBS +(TSPC_PBP_1_1 AND TSPC_PBP_6_5) OR TSPC_ALL + +Configuration: 48kHz, unencrypted, stereo (2 BISes) +PBP Features: 0x04 (High Quality) +""" + +import logging +import os + +from auracast.auracast_config import AuracastGlobalConfig, AuracastBigConfig, AuracastQosFast +from auracast.multicast import broadcast, run_async + + +if __name__ == "__main__": + logging.basicConfig( + level=os.environ.get("LOG_LEVEL", logging.INFO), + format="%(module)s.py:%(lineno)d %(levelname)s: %(message)s", + ) + + os.chdir(os.path.join(os.path.dirname(__file__), "../../auracast")) + + config = AuracastGlobalConfig() + config.qos_config = AuracastQosFast() + config.transport = "serial:/dev/ttyAMA3,1000000,rtscts" + + # High Quality: 48kHz, 48_1_1 configuration + config.auracast_sampling_rate_hz = 48000 + config.octets_per_frame = 75 # 48_1_1: 48kHz, 75 octets/frame + config.frame_duration_us = 7500 # 7.5ms frame duration for 48_1_1 + + big = AuracastBigConfig() + big.random_address = "F1:F1:F2:F3:F4:F5" + big.audio_source = "file:./testdata/announcement_en.wav" + big.num_bis = 1 + big.id = 12 + big.name = "Broadcaster" + # Unencrypted (no code) + + run_async( + broadcast( + config, + [big], + ) + ) diff --git a/src/qualification/PBP/test_pbp_pbs_str_bv03c.py b/src/qualification/PBP/test_pbp_pbs_str_bv03c.py new file mode 100644 index 0000000..569f18d --- /dev/null +++ b/src/qualification/PBP/test_pbp_pbs_str_bv03c.py @@ -0,0 +1,45 @@ +""" +PBP/PBS/STR/BV-03-C: Encrypted Streaming Support, Standard Quality - PBS +(TSPC_PBP_1_1 AND TSPC_PBP_6_6 AND TSPC_PBP_6_4) OR TSPC_ALL + +Configuration: 16kHz, encrypted, stereo (2 BISes) +PBP Features: 0x03 (Standard Quality + Encrypted) +""" + +import logging +import os + +from auracast.auracast_config import AuracastGlobalConfig, AuracastBigConfig, AuracastQosFast +from auracast.multicast import broadcast, run_async + + +if __name__ == "__main__": + logging.basicConfig( + level=os.environ.get("LOG_LEVEL", logging.INFO), + format="%(module)s.py:%(lineno)d %(levelname)s: %(message)s", + ) + + os.chdir(os.path.join(os.path.dirname(__file__), "../../auracast")) + + config = AuracastGlobalConfig() + config.qos_config = AuracastQosFast() + config.transport = "serial:/dev/ttyAMA3,1000000,rtscts" + + # Standard Quality: 16kHz + config.auracast_sampling_rate_hz = 16000 + config.octets_per_frame = 40 + + big = AuracastBigConfig() + big.random_address = "F1:F1:F2:F3:F4:F5" + big.audio_source = "file:./testdata/announcement_en.wav" + big.num_bis = 1 + big.id = 12 + big.name = "Broadcaster" + big.code = "0x0102680553F1415AA265BBAFC6EA03B8" # Encrypted (hex format) + + run_async( + broadcast( + config, + [big], + ) + ) diff --git a/src/qualification/PBP/test_pbp_pbs_str_bv04c.py b/src/qualification/PBP/test_pbp_pbs_str_bv04c.py new file mode 100644 index 0000000..caf6e8f --- /dev/null +++ b/src/qualification/PBP/test_pbp_pbs_str_bv04c.py @@ -0,0 +1,46 @@ +""" +PBP/PBS/STR/BV-04-C: Encrypted Streaming Support, High Quality - PBS +(TSPC_PBP_1_1 AND TSPC_PBP_6_6 AND TSPC_PBP_6_5) OR TSPC_ALL + +Configuration: 48kHz, encrypted, stereo (2 BISes) +PBP Features: 0x05 (High Quality + Encrypted) +""" + +import logging +import os + +from auracast.auracast_config import AuracastGlobalConfig, AuracastBigConfig, AuracastQosFast +from auracast.multicast import broadcast, run_async + + +if __name__ == "__main__": + logging.basicConfig( + level=os.environ.get("LOG_LEVEL", logging.INFO), + format="%(module)s.py:%(lineno)d %(levelname)s: %(message)s", + ) + + os.chdir(os.path.join(os.path.dirname(__file__), "../../auracast")) + + config = AuracastGlobalConfig() + config.qos_config = AuracastQosFast() + config.transport = "serial:/dev/ttyAMA3,1000000,rtscts" + + # High Quality: 48kHz, 48_1_1 configuration + config.auracast_sampling_rate_hz = 48000 + config.octets_per_frame = 75 # 48_1_1: 48kHz, 75 octets/frame + config.frame_duration_us = 7500 # 7.5ms frame duration for 48_1_1 + + big = AuracastBigConfig() + big.random_address = "F1:F1:F2:F3:F4:F5" + big.audio_source = "file:./testdata/announcement_en.wav" + big.num_bis = 1 + big.id = 12 + big.name = "Broadcaster" + big.code = "0x0102680553F1415AA265BBAFC6EA03B8" # Encrypted (hex format) + + run_async( + broadcast( + config, + [big], + ) + ) diff --git a/src/qualification/PBP/test_pbp_pbs_str_bv05c.py b/src/qualification/PBP/test_pbp_pbs_str_bv05c.py new file mode 100644 index 0000000..ec076c4 --- /dev/null +++ b/src/qualification/PBP/test_pbp_pbs_str_bv05c.py @@ -0,0 +1,46 @@ +""" +PBP/PBS/STR/BV-05-C: Standard Quality Streaming Support, 16_2_2 - PBS +(TSPC_PBP_1_1 AND TSPC_PBP_7_3) OR TSPC_ALL + +Configuration: 16kHz, unencrypted, stereo (2 BISes), QoS 16_2_2 +PBP Features: 0x02 (Standard Quality) +""" + +import logging +import os + +from auracast.auracast_config import AuracastGlobalConfig, AuracastBigConfig, AuracastQosRobust +from auracast.multicast import broadcast, run_async + + +if __name__ == "__main__": + logging.basicConfig( + level=os.environ.get("LOG_LEVEL", logging.INFO), + format="%(module)s.py:%(lineno)d %(levelname)s: %(message)s", + ) + + os.chdir(os.path.join(os.path.dirname(__file__), "../../auracast")) + + config = AuracastGlobalConfig() + # 16_2_2 uses different QoS (RTN=2, higher latency) + config.qos_config = AuracastQosRobust() + config.transport = "serial:/dev/ttyAMA3,1000000,rtscts" + + # 16_2_2: 16kHz + config.auracast_sampling_rate_hz = 16000 + config.octets_per_frame = 40 + + big = AuracastBigConfig() + big.random_address = "F1:F1:F2:F3:F4:F5" + big.audio_source = "file:./testdata/announcement_en.wav" + big.num_bis = 1 + big.id = 12 + big.name = "Broadcaster" + # Unencrypted (no code) + + run_async( + broadcast( + config, + [big], + ) + ) diff --git a/src/qualification/PBP/test_pbp_pbs_str_bv06c.py b/src/qualification/PBP/test_pbp_pbs_str_bv06c.py new file mode 100644 index 0000000..52f624b --- /dev/null +++ b/src/qualification/PBP/test_pbp_pbs_str_bv06c.py @@ -0,0 +1,45 @@ +""" +PBP/PBS/STR/BV-06-C: Standard Quality Streaming Support, 24_2_1 - PBS +(TSPC_PBP_1_1 AND TSPC_PBP_7_2) OR TSPC_ALL + +Configuration: 24kHz, unencrypted, stereo (2 BISes) +PBP Features: 0x02 (Standard Quality) +""" + +import logging +import os + +from auracast.auracast_config import AuracastGlobalConfig, AuracastBigConfig, AuracastQosFast +from auracast.multicast import broadcast, run_async + + +if __name__ == "__main__": + logging.basicConfig( + level=os.environ.get("LOG_LEVEL", logging.INFO), + format="%(module)s.py:%(lineno)d %(levelname)s: %(message)s", + ) + + os.chdir(os.path.join(os.path.dirname(__file__), "../../auracast")) + + config = AuracastGlobalConfig() + config.qos_config = AuracastQosFast() + config.transport = "serial:/dev/ttyAMA3,1000000,rtscts" + + # 24_2_1: 24kHz + config.auracast_sampling_rate_hz = 24000 + config.octets_per_frame = 60 + + big = AuracastBigConfig() + big.random_address = "F1:F1:F2:F3:F4:F5" + big.audio_source = "file:./testdata/announcement_en.wav" + big.num_bis = 1 + big.id = 12 + big.name = "Broadcaster" + # Unencrypted (no code) + + run_async( + broadcast( + config, + [big], + ) + ) diff --git a/src/qualification/PBP/test_pbp_pbs_str_bv07c.py b/src/qualification/PBP/test_pbp_pbs_str_bv07c.py new file mode 100644 index 0000000..f547d18 --- /dev/null +++ b/src/qualification/PBP/test_pbp_pbs_str_bv07c.py @@ -0,0 +1,46 @@ +""" +PBP/PBS/STR/BV-07-C: Standard Quality Streaming Support, 24_2_2 - PBS +(TSPC_PBP_1_1 AND TSPC_PBP_7_4) OR TSPC_ALL + +Configuration: 24kHz, unencrypted, stereo (2 BISes), QoS 24_2_2 +PBP Features: 0x02 (Standard Quality) +""" + +import logging +import os + +from auracast.auracast_config import AuracastGlobalConfig, AuracastBigConfig, AuracastQosRobust +from auracast.multicast import broadcast, run_async + + +if __name__ == "__main__": + logging.basicConfig( + level=os.environ.get("LOG_LEVEL", logging.INFO), + format="%(module)s.py:%(lineno)d %(levelname)s: %(message)s", + ) + + os.chdir(os.path.join(os.path.dirname(__file__), "../../auracast")) + + config = AuracastGlobalConfig() + # 24_2_2 uses different QoS (RTN=2, higher latency) + config.qos_config = AuracastQosRobust() + config.transport = "serial:/dev/ttyAMA3,1000000,rtscts" + + # 24_2_2: 24kHz + config.auracast_sampling_rate_hz = 24000 + config.octets_per_frame = 60 + + big = AuracastBigConfig() + big.random_address = "F1:F1:F2:F3:F4:F5" + big.audio_source = "file:./testdata/announcement_en.wav" + big.num_bis = 1 + big.id = 12 + big.name = "Broadcaster" + # Unencrypted (no code) + + run_async( + broadcast( + config, + [big], + ) + ) diff --git a/src/qualification/README.md b/src/qualification/README.md new file mode 100644 index 0000000..3969f64 --- /dev/null +++ b/src/qualification/README.md @@ -0,0 +1,33 @@ +# Qualification procedure + +# flash a qualifiaction dongle +- To use a normal nRF5240 Nordic dongle +- See https://bluekitchen-gmbh.com/bluetooth-pts-with-nordic-nrf52840-usb-dongle/ for Nordic nRF52 dev dongle +- Install PTS Firmware Upgrade Software +- Plug-in nRF52840 USB Dongle +- Start PTS Firmware Update Software +- If you click on 'OK', updating the bootloader will fail (the Nordic bootloader on the nRF52840 USB cannot be updated via DFU) +- Close the software +- Open an Explorer window and navigate to C:\Program Files (x86)\Bluetooth SIG\PTS Firmware Upgrade Software\tools and copy the file nrfutil.exe +- Navigate to AppData\Local\PTSFirmwareUpgradeSoftware within your user folder and paste the nrfutil.exe into this folder +- Note the file with the UUID128-like file name as you'll need it soon +- Open a PowerShell via File->Open Windows PowerShell as a regular user +- Reset the nRF52840 USB Dongle by pressing the smaller button (labeled 'RESET') to enter DFU mode +- A red LED should start flashing +- Run the nrfutil.exe with the .bin file (it's actually a ZIP archive) with the UUID128-like name +- Or just press the TAB key: .\nrfutil dfu usb-serial -pkg be4d3ab8-9c98-408a-8be4-18acf4b32d28.zip -p COM4 +- Et voila, the nRF52840 USB Dongle can be used with PTS + +# PTS ixit prerequisites +In BAP set +- Broadcast_ID=12 +- Broadcast_ID_2=13 + +In CAP set +- TSPX_BST_CODEC_CONFIG=16_2_1 + +Everywhere set +- use STREAMING_DATA_CONFIRMATION_METHOD=By Playing + +# Notes +- some testcases are just passed by restarting the stream. \ No newline at end of file diff --git a/src/scripts/list_pw_nodes.py b/src/scripts/list_pw_nodes.py deleted file mode 100644 index 270eb6a..0000000 --- a/src/scripts/list_pw_nodes.py +++ /dev/null @@ -1,16 +0,0 @@ -import sounddevice as sd, pprint -from auracast.utils.sounddevice_utils import devices_by_backend - -print("PortAudio library:", sd._libname) -print("PortAudio version:", sd.get_portaudio_version()) -print("\nHost APIs:") -pprint.pprint(sd.query_hostapis()) -print("\nDevices:") -pprint.pprint(sd.query_devices()) - -# Example: only PulseAudio devices on Linux -print("\nOnly PulseAudio devices:") -for i, d in devices_by_backend("PulseAudio"): - print(f"{i}: {d['name']} in={d['max_input_channels']} out={d['max_output_channels']}") - - diff --git a/src/scripts/list_sd_nodes.py b/src/scripts/list_sd_nodes.py new file mode 100644 index 0000000..6c850f7 --- /dev/null +++ b/src/scripts/list_sd_nodes.py @@ -0,0 +1,47 @@ +import sounddevice as sd, pprint +from auracast.utils.sounddevice_utils import ( + devices_by_backend, + get_alsa_inputs, + get_alsa_usb_inputs, + get_network_pw_inputs, + refresh_pw_cache, +) + +print("PortAudio library:", sd._libname) +print("PortAudio version:", sd.get_portaudio_version()) + +print("\nHost APIs:") +apis = sd.query_hostapis() +pprint.pprint(apis) + +print("\nAll Devices (with host API name):") +devs = sd.query_devices() +for i, d in enumerate(devs): + ha_name = apis[d['hostapi']]['name'] if isinstance(d.get('hostapi'), int) and d['hostapi'] < len(apis) else '?' + if d.get('max_input_channels', 0) > 0: + print(f"IN {i:>3}: {d['name']} api={ha_name} in={d['max_input_channels']}") + elif d.get('max_output_channels', 0) > 0: + print(f"OUT {i:>3}: {d['name']} api={ha_name} out={d['max_output_channels']}") + else: + print(f"DEV {i:>3}: {d['name']} api={ha_name} (no I/O)") + +print("\nALSA input devices (PortAudio ALSA host):") +for i, d in devices_by_backend('ALSA'): + if d.get('max_input_channels', 0) > 0: + print(f"ALSA {i:>3}: {d['name']} in={d['max_input_channels']}") + +print("\nALSA USB-filtered inputs:") +for i, d in get_alsa_usb_inputs(): + print(f"USB {i:>3}: {d['name']} in={d['max_input_channels']}") + +print("\nRefreshing PipeWire caches...") +try: + refresh_pw_cache() +except Exception: + pass + +print("PipeWire Network inputs (from cache):") +for i, d in get_network_pw_inputs(): + print(f"NET {i:>3}: {d['name']} in={d.get('max_input_channels', 0)}") + + diff --git a/src/scripts/log_temperature.py b/src/scripts/log_temperature.py new file mode 100644 index 0000000..75420af --- /dev/null +++ b/src/scripts/log_temperature.py @@ -0,0 +1,36 @@ +import csv +import time +from datetime import datetime +from pathlib import Path + +from auracast.utils.read_temp import read_case_temp, read_cpu_temp + + +def main() -> None: + script_path = Path(__file__).resolve() + log_dir = script_path.parent + + start_time = datetime.now() + filename = start_time.strftime("temperature_log_%Y%m%d_%H%M%S.csv") + log_path = log_dir / filename + + with log_path.open("w", newline="") as csvfile: + writer = csv.writer(csvfile) + writer.writerow(["timestamp", "cpu_temp_c", "case_temp_c"]) + + try: + while True: + now = datetime.now().isoformat(timespec="seconds") + cpu_temp = read_cpu_temp() + case_temp = read_case_temp() + + writer.writerow([now, cpu_temp, case_temp]) + csvfile.flush() + + time.sleep(30) + except KeyboardInterrupt: + pass + + +if __name__ == "__main__": + main() diff --git a/src/scripts/read_case_temp.py b/src/scripts/read_case_temp.py deleted file mode 100644 index 2c6f619..0000000 --- a/src/scripts/read_case_temp.py +++ /dev/null @@ -1,8 +0,0 @@ -from smbus2 import SMBus -addr = 0x48 # change if your scan shows different -with SMBus(1) as bus: - msb, lsb = bus.read_i2c_block_data(addr, 0x00, 2) -raw = ((msb << 8) | lsb) >> 4 -if raw & 0x800: # sign bit for 12-bit - raw -= 1 << 12 -print(f"{raw * 0.0625:.2f} °C") diff --git a/src/service/auracast-server.service b/src/service/auracast-server.service index 23f8735..06395bf 100644 --- a/src/service/auracast-server.service +++ b/src/service/auracast-server.service @@ -9,6 +9,9 @@ ExecStart=/home/caster/.local/bin/poetry run python src/auracast/server/multicas Restart=on-failure Environment=PYTHONUNBUFFERED=1 Environment=LOG_LEVEL=INFO +CPUSchedulingPolicy=fifo +CPUSchedulingPriority=99 +LimitRTPRIO=99 [Install] WantedBy=default.target