733 lines
33 KiB
Python
733 lines
33 KiB
Python
# Copyright 2024 Google LLC
|
||
#
|
||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||
# you may not use this file except in compliance with the License.
|
||
# You may obtain a copy of the License at
|
||
#
|
||
# https://www.apache.org/licenses/LICENSE-2.0
|
||
#
|
||
# Unless required by applicable law or agreed to in writing, software
|
||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||
# See the License for the specific language governing permissions and
|
||
# limitations under the License.
|
||
|
||
# -----------------------------------------------------------------------------
|
||
# Imports
|
||
# -----------------------------------------------------------------------------
|
||
from __future__ import annotations
|
||
import pprint
|
||
import asyncio
|
||
import contextlib
|
||
import logging
|
||
import wave
|
||
import itertools
|
||
import struct
|
||
from typing import cast, Any, AsyncGenerator, Coroutine, List
|
||
import itertools
|
||
import glob
|
||
|
||
try:
|
||
import lc3 # type: ignore # pylint: disable=E0401
|
||
except ImportError as e:
|
||
raise ImportError("Try `python -m pip install \".[lc3]\"`.") from e
|
||
|
||
from bumble.colors import color
|
||
from bumble import company_ids
|
||
from bumble import core
|
||
from bumble import gatt
|
||
from bumble import hci
|
||
from bumble.profiles import bap
|
||
from bumble.profiles import le_audio
|
||
from bumble.profiles import pbp
|
||
from bumble.profiles import bass
|
||
import bumble.device
|
||
import bumble.transport
|
||
import bumble.utils
|
||
import numpy as np # for audio down-mix
|
||
from bumble.device import Host, BIGInfoAdvertisement, AdvertisingChannelMap
|
||
from bumble.audio import io as audio_io
|
||
|
||
from auracast import auracast_config
|
||
from auracast.utils.read_lc3_file import read_lc3_file
|
||
from auracast.utils.network_audio_receiver import NetworkAudioReceiverUncoded
|
||
from auracast.utils.webrtc_audio_input import WebRTCAudioInput
|
||
|
||
|
||
# Instantiate WebRTC audio input for streaming (can be used per-BIG or globally)
|
||
|
||
# modified from bumble
|
||
class ModWaveAudioInput(audio_io.ThreadedAudioInput):
|
||
"""Audio input that reads PCM samples from a .wav file."""
|
||
|
||
def __init__(self, filename: str) -> None:
|
||
super().__init__()
|
||
self._filename = filename
|
||
self._wav: wave.Wave_read | None = None
|
||
self._bytes_read = 0
|
||
self.rewind=True
|
||
|
||
def _open(self) -> audio_io.PcmFormat:
|
||
self._wav = wave.open(self._filename, 'rb')
|
||
if self._wav.getsampwidth() != 2:
|
||
raise ValueError('sample width not supported')
|
||
return audio_io.PcmFormat(
|
||
audio_io.PcmFormat.Endianness.LITTLE,
|
||
audio_io.PcmFormat.SampleType.INT16,
|
||
self._wav.getframerate(),
|
||
self._wav.getnchannels(),
|
||
)
|
||
|
||
def _read(self, frame_size: int) -> bytes:
|
||
if not self._wav:
|
||
return b''
|
||
|
||
pcm_samples = self._wav.readframes(frame_size)
|
||
if not pcm_samples and self._bytes_read:
|
||
if not self.rewind:
|
||
return None
|
||
# Loop around.
|
||
self._wav.rewind()
|
||
self._bytes_read = 0
|
||
pcm_samples = self._wav.readframes(frame_size)
|
||
|
||
self._bytes_read += len(pcm_samples)
|
||
return pcm_samples
|
||
|
||
def _close(self) -> None:
|
||
if self._wav:
|
||
self._wav.close()
|
||
|
||
audio_io.WaveAudioInput = ModWaveAudioInput
|
||
|
||
|
||
# -----------------------------------------------------------------------------
|
||
# Logging
|
||
# -----------------------------------------------------------------------------
|
||
logger = logging.getLogger(__name__)
|
||
|
||
@contextlib.asynccontextmanager
|
||
async def create_device(config: auracast_config.AuracastGlobalConfig) -> AsyncGenerator[bumble.device.Device, Any]:
|
||
async with await bumble.transport.open_transport(config.transport) as (
|
||
hci_source,
|
||
hci_sink,
|
||
):
|
||
device_config = bumble.device.DeviceConfiguration(
|
||
name=config.device_name,
|
||
address= hci.Address(config.auracast_device_address),
|
||
keystore='JsonKeyStore',
|
||
#le_simultaneous_enabled=True #TODO: What is this doing ?
|
||
)
|
||
|
||
device = bumble.device.Device.from_config_with_hci(
|
||
device_config,
|
||
hci_source,
|
||
hci_sink,
|
||
)
|
||
await device.power_on()
|
||
|
||
yield device
|
||
|
||
|
||
def run_async(async_command: Coroutine) -> None:
|
||
try:
|
||
asyncio.run(async_command)
|
||
except core.ProtocolError as error:
|
||
if error.error_namespace == 'att' and error.error_code in list(
|
||
bass.ApplicationError
|
||
):
|
||
message = bass.ApplicationError(error.error_code).name
|
||
else:
|
||
message = str(error)
|
||
|
||
print(
|
||
color('!!! An error occurred while executing the command:', 'red'), message
|
||
)
|
||
|
||
async def init_broadcast(
|
||
device,
|
||
global_config : auracast_config.AuracastGlobalConfig,
|
||
big_config: List[auracast_config.AuracastBigConfig]
|
||
) -> dict:
|
||
|
||
bap_sampling_freq = getattr(bap.SamplingFrequency, f"FREQ_{global_config.auracast_sampling_rate_hz}")
|
||
bigs = {}
|
||
for i, conf in enumerate(big_config):
|
||
bigs[f'big{i}'] = {}
|
||
# Config advertising set
|
||
bigs[f'big{i}']['basic_audio_announcement'] = bap.BasicAudioAnnouncement(
|
||
presentation_delay=global_config.presentation_delay_us,
|
||
subgroups=[
|
||
bap.BasicAudioAnnouncement.Subgroup(
|
||
codec_id=hci.CodingFormat(codec_id=hci.CodecID.LC3),
|
||
codec_specific_configuration=bap.CodecSpecificConfiguration(
|
||
sampling_frequency=bap_sampling_freq,
|
||
frame_duration=bap.FrameDuration.DURATION_10000_US,
|
||
octets_per_codec_frame=global_config.octets_per_frame,
|
||
),
|
||
metadata=le_audio.Metadata(
|
||
[
|
||
le_audio.Metadata.Entry(
|
||
tag=le_audio.Metadata.Tag.LANGUAGE, data=conf.language.encode()
|
||
),
|
||
le_audio.Metadata.Entry(
|
||
tag=le_audio.Metadata.Tag.PROGRAM_INFO, data=conf.program_info.encode()
|
||
),
|
||
le_audio.Metadata.Entry(
|
||
tag=le_audio.Metadata.Tag.BROADCAST_NAME, data=conf.name.encode()
|
||
),
|
||
]
|
||
),
|
||
bis=[
|
||
bap.BasicAudioAnnouncement.BIS(
|
||
index=1,
|
||
codec_specific_configuration=bap.CodecSpecificConfiguration(
|
||
audio_channel_allocation=bap.AudioLocation.FRONT_LEFT
|
||
),
|
||
),
|
||
],
|
||
)
|
||
],
|
||
)
|
||
logger.info('Setup Advertising')
|
||
advertising_manufacturer_data = (
|
||
b''
|
||
if global_config.manufacturer_data == (None, None)
|
||
else bytes(
|
||
core.AdvertisingData(
|
||
[
|
||
(
|
||
core.AdvertisingData.MANUFACTURER_SPECIFIC_DATA,
|
||
struct.pack('<H', global_config.manufacturer_data[0])
|
||
+ global_config.manufacturer_data[1],
|
||
)
|
||
]
|
||
)
|
||
)
|
||
)
|
||
bigs[f'big{i}']['broadcast_audio_announcement'] = bap.BroadcastAudioAnnouncement(conf.id)
|
||
advertising_set = await device.create_advertising_set(
|
||
random_address=hci.Address(conf.random_address),
|
||
advertising_parameters=bumble.device.AdvertisingParameters(
|
||
advertising_event_properties=bumble.device.AdvertisingEventProperties(
|
||
is_connectable=False
|
||
),
|
||
primary_advertising_interval_min=100,
|
||
primary_advertising_interval_max=200,
|
||
advertising_sid=i,
|
||
primary_advertising_phy=hci.Phy.LE_1M, # 2m phy config throws error - because for primary advertising channels, 1mbit is only supported
|
||
secondary_advertising_phy=hci.Phy.LE_2M, # this is the secondary advertising beeing send on non advertising channels (extendend advertising)
|
||
#advertising_tx_power= # tx power in dbm (max 20)
|
||
#secondary_advertising_max_skip=10,
|
||
),
|
||
advertising_data=(
|
||
bigs[f'big{i}']['broadcast_audio_announcement'].get_advertising_data()
|
||
+ bytes(
|
||
core.AdvertisingData(
|
||
[(core.AdvertisingData.BROADCAST_NAME, conf.name.encode())]
|
||
)
|
||
)
|
||
+ advertising_manufacturer_data
|
||
),
|
||
periodic_advertising_parameters=bumble.device.PeriodicAdvertisingParameters(
|
||
periodic_advertising_interval_min=80,
|
||
periodic_advertising_interval_max=160,
|
||
),
|
||
periodic_advertising_data=bigs[f'big{i}']['basic_audio_announcement'].get_advertising_data(),
|
||
auto_restart=True,
|
||
auto_start=True,
|
||
)
|
||
bigs[f'big{i}']['advertising_set'] = advertising_set
|
||
|
||
logging.info('Start Periodic Advertising')
|
||
await advertising_set.start_periodic()
|
||
|
||
logging.info('Setup BIG')
|
||
if global_config.qos_config.iso_int_multiple_10ms == 1:
|
||
frame_enable = 0
|
||
else:
|
||
frame_enable = 1
|
||
|
||
big = await device.create_big(
|
||
bigs[f'big{i}']['advertising_set'],
|
||
parameters=bumble.device.BigParameters(
|
||
num_bis=1,
|
||
sdu_interval=global_config.qos_config.iso_int_multiple_10ms*10000, # Is the same as iso interval
|
||
max_sdu=global_config.octets_per_frame,
|
||
max_transport_latency=global_config.qos_config.max_transport_latency_ms,
|
||
rtn=global_config.qos_config.number_of_retransmissions,
|
||
broadcast_code=(
|
||
bytes.fromhex(conf.code) if conf.code else None
|
||
),
|
||
framing=frame_enable # needed if iso interval is not frame interval of codedc
|
||
),
|
||
)
|
||
bigs[f'big{i}']['big'] = big
|
||
|
||
for bis_link in big.bis_links:
|
||
await bis_link.setup_data_path(
|
||
direction=bis_link.Direction.HOST_TO_CONTROLLER
|
||
)
|
||
|
||
iso_queue = bumble.device.IsoPacketStream(big.bis_links[0], conf.iso_que_len)
|
||
|
||
logging.info('Setup ISO Data Path')
|
||
|
||
bigs[f'big{i}']['iso_queue'] = iso_queue
|
||
|
||
logging.debug(f'big{i} parameters are:')
|
||
logging.debug('%s', pprint.pformat(vars(big)))
|
||
logging.debug(f'Finished setup of big{i}.')
|
||
|
||
await asyncio.sleep(i+1) # Wait for advertising to set up
|
||
|
||
def on_flow():
|
||
data_packet_queue = iso_queue.data_packet_queue
|
||
print(
|
||
f'\rPACKETS: pending={data_packet_queue.pending}, '
|
||
f'queued={data_packet_queue.queued}, '
|
||
f'completed={data_packet_queue.completed}',
|
||
end='',
|
||
)
|
||
|
||
if global_config.debug:
|
||
bigs[f'big{0}']['iso_queue'].data_packet_queue.on('flow', on_flow)
|
||
|
||
return bigs
|
||
|
||
|
||
class Streamer():
|
||
"""
|
||
Streamer class that supports multiple input formats. See bumble for streaming from wav or device
|
||
Added functionallity on top of bumble:
|
||
- loop parameter
|
||
- if True the audio_source will be looped for ever
|
||
- precode wav files
|
||
- lc3 coded files
|
||
- just use a .lc3 file as audio_source
|
||
- lc3 coded from ram
|
||
- use a bytestring b'' as audio_source
|
||
"""
|
||
|
||
def __init__(
|
||
self,
|
||
bigs,
|
||
global_config : auracast_config.AuracastGlobalConfig,
|
||
big_config: List[auracast_config.AuracastBigConfig]
|
||
):
|
||
self.task = None
|
||
self.is_streaming = False
|
||
self.bigs = bigs
|
||
self.global_config = global_config
|
||
self.big_config = big_config
|
||
|
||
def start_streaming(self):
|
||
if not self.is_streaming:
|
||
self.task = asyncio.create_task(self.stream())
|
||
else:
|
||
logging.warning('Streamer is already running')
|
||
|
||
async def stop_streaming(self):
|
||
"""Gracefully stop streaming and release audio devices."""
|
||
if not self.is_streaming and self.task is None:
|
||
return
|
||
|
||
# Ask the streaming loop to finish
|
||
self.is_streaming = False
|
||
if self.task is not None:
|
||
self.task.cancel()
|
||
|
||
self.task = None
|
||
|
||
# Close audio inputs (await to ensure ALSA devices are released)
|
||
close_tasks = []
|
||
for big in self.bigs.values():
|
||
ai = big.get("audio_input")
|
||
if ai and hasattr(ai, "close"):
|
||
close_tasks.append(ai.close())
|
||
# Remove reference so a fresh one is created next time
|
||
big.pop("audio_input", None)
|
||
if close_tasks:
|
||
await asyncio.gather(*close_tasks, return_exceptions=True)
|
||
|
||
async def stream(self):
|
||
|
||
bigs = self.bigs
|
||
big_config = self.big_config
|
||
global_config = self.global_config
|
||
for i, big in enumerate(bigs.values()):
|
||
audio_source = big_config[i].audio_source
|
||
input_format = big_config[i].input_format
|
||
|
||
# --- New: network_uncoded mode using NetworkAudioReceiver ---
|
||
if isinstance(audio_source, NetworkAudioReceiverUncoded):
|
||
# Start the UDP receiver coroutine so packets are actually received
|
||
asyncio.create_task(audio_source.receive())
|
||
encoder = lc3.Encoder(
|
||
frame_duration_us=global_config.frame_duration_us,
|
||
sample_rate_hz=global_config.auracast_sampling_rate_hz,
|
||
num_channels=1,
|
||
input_sample_rate_hz=audio_source.samplerate,
|
||
)
|
||
lc3_frame_samples = encoder.get_frame_samples()
|
||
big['pcm_bit_depth'] = 16
|
||
big['lc3_frame_samples'] = lc3_frame_samples
|
||
big['lc3_bytes_per_frame'] = global_config.octets_per_frame
|
||
big['audio_input'] = audio_source
|
||
big['encoder'] = encoder
|
||
big['precoded'] = False
|
||
|
||
elif audio_source == 'webrtc':
|
||
big['audio_input'] = WebRTCAudioInput()
|
||
encoder = lc3.Encoder(
|
||
frame_duration_us=global_config.frame_duration_us,
|
||
sample_rate_hz=global_config.auracast_sampling_rate_hz,
|
||
num_channels=1,
|
||
input_sample_rate_hz=48000, # TODO: get samplerate from webrtc
|
||
)
|
||
lc3_frame_samples = encoder.get_frame_samples()
|
||
big['pcm_bit_depth'] = 16
|
||
big['lc3_frame_samples'] = lc3_frame_samples
|
||
big['lc3_bytes_per_frame'] = global_config.octets_per_frame
|
||
big['encoder'] = encoder
|
||
big['precoded'] = False
|
||
|
||
# precoded lc3 from ram
|
||
elif isinstance(big_config[i].audio_source, bytes):
|
||
big['precoded'] = True
|
||
|
||
lc3_frames = iter(big_config[i].audio_source)
|
||
|
||
if big_config[i].loop:
|
||
lc3_frames = itertools.cycle(lc3_frames)
|
||
big['lc3_frames'] = lc3_frames
|
||
|
||
# precoded lc3 file
|
||
elif big_config[i].audio_source.endswith('.lc3'):
|
||
big['precoded'] = True
|
||
filename = big_config[i].audio_source.replace('file:', '')
|
||
|
||
lc3_bytes = read_lc3_file(filename)
|
||
lc3_frames = iter(lc3_bytes)
|
||
|
||
if big_config[i].loop:
|
||
lc3_frames = itertools.cycle(lc3_frames)
|
||
big['lc3_frames'] = lc3_frames
|
||
|
||
# use wav files and code them entirely before streaming
|
||
elif big_config[i].precode_wav and big_config[i].audio_source.endswith('.wav'):
|
||
big['precoded'] = True
|
||
|
||
audio_input = await audio_io.create_audio_input(audio_source, input_format)
|
||
audio_input.rewind = False
|
||
pcm_format = await audio_input.open()
|
||
|
||
if pcm_format.channels != 1:
|
||
logging.error("Only 1 channels PCM configurations are supported")
|
||
return
|
||
if pcm_format.sample_type == audio_io.PcmFormat.SampleType.INT16:
|
||
pcm_bit_depth = 16
|
||
elif pcm_format.sample_type == audio_io.PcmFormat.SampleType.FLOAT32:
|
||
pcm_bit_depth = None
|
||
else:
|
||
logging.error("Only INT16 and FLOAT32 sample types are supported")
|
||
return
|
||
encoder = lc3.Encoder(
|
||
frame_duration_us=global_config.frame_duration_us,
|
||
sample_rate_hz=global_config.auracast_sampling_rate_hz,
|
||
num_channels=1,
|
||
input_sample_rate_hz=pcm_format.sample_rate,
|
||
)
|
||
lc3_frame_samples = encoder.get_frame_samples() # number of the pcm samples per lc3 frame
|
||
|
||
lc3_bytes = b''
|
||
async for pcm_frame in audio_input.frames(lc3_frame_samples):
|
||
lc3_bytes += encoder.encode(
|
||
pcm_frame, num_bytes=global_config.octets_per_frame, bit_depth=pcm_bit_depth
|
||
)
|
||
lc3_frames = iter(lc3_bytes)
|
||
|
||
# have a look at itertools.islice
|
||
if big_config[i].loop:
|
||
lc3_frames = itertools.cycle(lc3_frames)
|
||
big['lc3_frames'] = lc3_frames
|
||
|
||
# anything else, e.g. realtime stream from device (bumble) or non-precoded file
|
||
else:
|
||
current_big_config = self.big_config[i]
|
||
audio_source_str = str(current_big_config.audio_source) # Ensure string type
|
||
input_format_str = current_big_config.input_format
|
||
input_gain_val = current_big_config.input_gain
|
||
|
||
audio_filter_for_create = None
|
||
effective_audio_source_for_create = audio_source_str
|
||
|
||
if audio_source_str.startswith('device:'):
|
||
parts = audio_source_str.split(':', 1)
|
||
if len(parts) > 1:
|
||
device_specifier_with_potential_gain = parts[1]
|
||
pure_device_name = device_specifier_with_potential_gain.split(',', 1)[0]
|
||
effective_audio_source_for_create = f"device:{pure_device_name}"
|
||
|
||
gain_to_apply = input_gain_val if input_gain_val is not None else 1.0
|
||
if abs(gain_to_apply - 1.0) > 0.01:
|
||
audio_filter_for_create = f"volume={gain_to_apply:.2f}"
|
||
logger.info(f"Applying FFmpeg volume filter for {effective_audio_source_for_create}: {audio_filter_for_create}")
|
||
elif audio_source_str.startswith('file:'):
|
||
gain_to_apply = input_gain_val if input_gain_val is not None else 1.0
|
||
if abs(gain_to_apply - 1.0) > 0.01:
|
||
audio_filter_for_create = f"volume={gain_to_apply:.2f}"
|
||
logger.info(f"Applying FFmpeg volume filter for {audio_source_str}: {audio_filter_for_create}")
|
||
|
||
# Prepare the source string, potentially with an FFmpeg filter
|
||
final_audio_source_spec = effective_audio_source_for_create
|
||
if current_big_config.input_gain is not None and input_format_str == 'ffmpeg': # Apply gain only if ffmpeg is used
|
||
audio_filter_value = f"volume={current_big_config.input_gain:.2f}"
|
||
logging.info(f"Applying FFmpeg volume filter for {effective_audio_source_for_create}: {audio_filter_value}")
|
||
# Append 'af' (audio filter) option to the source spec for FFmpeg
|
||
if '?' in final_audio_source_spec: # if there are already ffmpeg options (e.g. sample_rate)
|
||
final_audio_source_spec = f"{final_audio_source_spec}&af={audio_filter_value}"
|
||
else: # if this is the first ffmpeg option
|
||
final_audio_source_spec = f"{final_audio_source_spec},af={audio_filter_value}"
|
||
|
||
# Initial creation of audio_input
|
||
audio_input = await audio_io.create_audio_input(
|
||
final_audio_source_spec,
|
||
input_format=input_format_str
|
||
)
|
||
big['audio_input'] = audio_input # Store early for potential cleanup
|
||
|
||
if hasattr(audio_input, "rewind"):
|
||
audio_input.rewind = current_big_config.loop
|
||
|
||
import sounddevice as _sd
|
||
max_attempts = 3
|
||
pcm_format = None # Initialize pcm_format
|
||
for attempt in range(1, max_attempts + 1):
|
||
try:
|
||
logging.info(f"Attempting to open audio input: {effective_audio_source_for_create} (attempt {attempt})")
|
||
pcm_format = await audio_input.open()
|
||
logging.info(f"Successfully opened audio input: {effective_audio_source_for_create}, PCM Format: {pcm_format}")
|
||
break # success
|
||
except _sd.PortAudioError as err:
|
||
logging.error('Could not open audio device %s with error %s (attempt %d/%d)', effective_audio_source_for_create, err, attempt, max_attempts)
|
||
code = getattr(err, 'errno', None) or (err.args[1] if len(err.args) > 1 and isinstance(err.args[1], int) else None)
|
||
if code == -9985 and attempt < max_attempts: # paDeviceUnavailable
|
||
backoff_ms = (2 ** (attempt - 1)) * 100 # exponential backoff
|
||
logging.warning("PortAudio device busy. Retrying in %.1f ms…", backoff_ms)
|
||
try:
|
||
if hasattr(audio_input, "aclose"): await audio_input.aclose()
|
||
elif hasattr(audio_input, "close"): audio_input.close()
|
||
except Exception as close_err: logging.debug(f"Error closing audio_input during retry: {close_err}")
|
||
if hasattr(_sd, "_terminate"): # sounddevice specific cleanup
|
||
try: _sd._terminate()
|
||
except Exception as term_err: logging.debug(f"Error terminating PortAudio: {term_err}")
|
||
await asyncio.sleep(0.1)
|
||
if hasattr(_sd, "_initialize"): # sounddevice specific reinit
|
||
try: _sd._initialize()
|
||
except Exception as init_err: logging.debug(f"Error initializing PortAudio: {init_err}")
|
||
await asyncio.sleep(backoff_ms / 1000)
|
||
# Recreate audio_input for next attempt, using the potentially modified source spec
|
||
audio_input = await audio_io.create_audio_input(
|
||
final_audio_source_spec, # Use the spec that includes the filter if applicable
|
||
input_format=input_format_str
|
||
)
|
||
big['audio_input'] = audio_input # Update stored reference
|
||
if hasattr(audio_input, "rewind"):
|
||
audio_input.rewind = current_big_config.loop
|
||
continue
|
||
raise # Re-raise if not paDeviceUnavailable or max_attempts reached
|
||
except Exception as e:
|
||
logging.error(f"Unexpected error opening audio device {effective_audio_source_for_create}: {e}")
|
||
raise # Re-raise other unexpected errors
|
||
else: # else for 'for' loop: if loop finished without break
|
||
logging.error("Unable to open audio device '%s' after %d attempts – giving up.", effective_audio_source_for_create, max_attempts)
|
||
return # Or handle error more gracefully, e.g. mark BIG as inactive
|
||
|
||
# Proceed with encoder setup if pcm_format was obtained
|
||
if not pcm_format:
|
||
logging.error(f"Failed to obtain PCM format for {effective_audio_source_for_create}. Cannot set up encoder.")
|
||
return
|
||
|
||
if pcm_format.channels != 1:
|
||
logging.info("Input device '%s' provides %d channels – will down-mix to mono for LC3", effective_audio_source_for_create, pcm_format.channels)
|
||
# Downmixing is typically handled by FFmpeg if channels > 1 and output is mono
|
||
# For LC3, we always want mono, so this is informational.
|
||
|
||
# Determine pcm_bit_depth for encoder based on pcm_format.sample_type
|
||
if pcm_format.sample_type == audio_io.PcmFormat.SampleType.INT16:
|
||
pcm_bit_depth = 16
|
||
elif pcm_format.sample_type == audio_io.PcmFormat.SampleType.FLOAT32:
|
||
pcm_bit_depth = None # LC3 encoder can handle float32 directly
|
||
else:
|
||
logging.error("Unsupported PCM sample type: %s for %s. Only INT16 and FLOAT32 are supported.", pcm_format.sample_type, effective_audio_source_for_create)
|
||
return
|
||
|
||
encoder = lc3.Encoder(
|
||
frame_duration_us=self.global_config.frame_duration_us,
|
||
sample_rate_hz=self.global_config.auracast_sampling_rate_hz,
|
||
num_channels=1, # LC3 is mono
|
||
input_sample_rate_hz=pcm_format.sample_rate,
|
||
)
|
||
lc3_frame_samples = encoder.get_frame_samples()
|
||
big['pcm_bit_depth'] = pcm_bit_depth
|
||
big['lc3_frame_samples'] = lc3_frame_samples
|
||
big['lc3_bytes_per_frame'] = self.global_config.octets_per_frame
|
||
big['encoder'] = encoder
|
||
big['precoded'] = False
|
||
|
||
|
||
logging.info("Streaming audio...")
|
||
bigs = self.bigs
|
||
self.is_streaming = True
|
||
logging.info("Entering main streaming loop...")
|
||
# One streamer fits all
|
||
while self.is_streaming:
|
||
stream_finished = [False for _ in range(len(bigs))]
|
||
for i, big in enumerate(bigs.values()):
|
||
|
||
if big['precoded']:# everything was already lc3 coded beforehand
|
||
lc3_frame = bytes(
|
||
itertools.islice(big['lc3_frames'], big['lc3_bytes_per_frame'])
|
||
)
|
||
|
||
if lc3_frame == b'': # Not all streams may stop at the same time
|
||
stream_finished[i] = True
|
||
continue
|
||
else: # code lc3 on the fly
|
||
logging.debug(f"BIG {i} ({big.get('name', 'N/A')}): Attempting to read pcm_frame.")
|
||
pcm_frame = await anext(big['audio_input'].frames(big['lc3_frame_samples']), None)
|
||
logging.debug(f"BIG {i} ({big.get('name', 'N/A')}): Read pcm_frame: {'None' if pcm_frame is None else f'type {type(pcm_frame)}, len {len(pcm_frame)} bytes' if isinstance(pcm_frame, bytes) else f'type {type(pcm_frame)}, shape {pcm_frame.shape}' if hasattr(pcm_frame, 'shape') else f'type {type(pcm_frame)}'}")
|
||
|
||
if pcm_frame is None: # Not all streams may stop at the same time
|
||
stream_finished[i] = True
|
||
continue
|
||
|
||
# Down-mix multi-channel PCM to mono for LC3 encoder if needed
|
||
if big.get('channels', 1) > 1:
|
||
if isinstance(pcm_frame, np.ndarray):
|
||
if pcm_frame.ndim > 1:
|
||
mono = pcm_frame.mean(axis=1).astype(pcm_frame.dtype)
|
||
pcm_frame = mono
|
||
else:
|
||
# Convert raw bytes to numpy, average channels, convert back
|
||
dtype = np.int16 if big['pcm_bit_depth'] == 16 else np.float32
|
||
samples = np.frombuffer(pcm_frame, dtype=dtype)
|
||
samples = samples.reshape(-1, big['channels']).mean(axis=1)
|
||
pcm_frame = samples.astype(dtype).tobytes()
|
||
|
||
lc3_frame = big['encoder'].encode(
|
||
pcm_frame, num_bytes=big['lc3_bytes_per_frame'], bit_depth=big['pcm_bit_depth']
|
||
)
|
||
|
||
await big['iso_queue'].write(lc3_frame)
|
||
|
||
if all(stream_finished): # Take into account that multiple files have different lengths
|
||
logging.info('All streams finished, stopping streamer')
|
||
self.is_streaming = False
|
||
break
|
||
|
||
|
||
# -----------------------------------------------------------------------------
|
||
# Main
|
||
# -----------------------------------------------------------------------------
|
||
|
||
async def broadcast(global_conf: auracast_config.AuracastGlobalConfig, big_conf: List[auracast_config.AuracastBigConfig]):
|
||
"""Start a broadcast."""
|
||
|
||
if global_conf.transport == 'auto':
|
||
devices = glob.glob('/dev/serial/by-id/*')
|
||
logging.info('Found serial devices: %s', devices)
|
||
for device in devices:
|
||
if 'usb-ZEPHYR_Zephyr_HCI_UART_sample' in device:
|
||
logging.info('Using: %s', device)
|
||
global_conf.transport = f'serial:{device},115200,rtscts'
|
||
break
|
||
|
||
# check again if transport is still auto
|
||
if global_conf.transport == 'auto':
|
||
raise AssertionError('No suitable transport found.')
|
||
|
||
async with create_device(global_conf) as device:
|
||
if not device.supports_le_periodic_advertising:
|
||
logger.error(color('Periodic advertising not supported', 'red'))
|
||
return
|
||
|
||
bigs = await init_broadcast( # the bigs dictionary contains all the global configurations
|
||
device,
|
||
global_conf,
|
||
big_conf
|
||
)
|
||
streamer = Streamer(bigs, global_conf, big_conf)
|
||
streamer.start_streaming()
|
||
|
||
await asyncio.wait([streamer.task])
|
||
|
||
|
||
# -----------------------------------------------------------------------------
|
||
if __name__ == "__main__":
|
||
import os
|
||
|
||
logging.basicConfig( #export LOG_LEVEL=INFO
|
||
level=os.environ.get('LOG_LEVEL', logging.DEBUG),
|
||
format='%(module)s.py:%(lineno)d %(levelname)s: %(message)s'
|
||
)
|
||
os.chdir(os.path.dirname(__file__))
|
||
|
||
|
||
config = auracast_config.AuracastConfigGroup(
|
||
bigs = [
|
||
auracast_config.AuracastBigConfigDeu(),
|
||
#auracast_config.AuracastBigConfigEng(),
|
||
#auracast_config.AuracastBigConfigFra(),
|
||
#auracast_config.AuracastBigConfigEs(),
|
||
#auracast_config.AuracastBigConfigIt(),
|
||
]
|
||
)
|
||
|
||
# TODO: How can we use other iso interval than 10ms ?(medium or low rel) ? - nrf53audio receiver repports I2S tx underrun
|
||
config.qos_config=auracast_config.AuracastQosHigh()
|
||
|
||
#config.transport='serial:/dev/serial/by-id/usb-ZEPHYR_Zephyr_HCI_UART_sample_81BD14B8D71B5662-if00,1000000,rtscts' # transport for nrf52 dongle
|
||
#config.transport='serial:/dev/serial/by-id/usb-SEGGER_J-Link_001050076061-if02,1000000,rtscts' # transport for nrf53dk
|
||
#config.transport='serial:/dev/serial/by-id/usb-SEGGER_J-Link_001057705357-if02,1000000,rtscts' # transport for nrf54l15dk
|
||
#config.transport='serial:/dev/serial/by-id/usb-ZEPHYR_Zephyr_HCI_UART_sample_95A087EADB030B24-if00,115200,rtscts' #nrf52dongle hci_uart usb cdc
|
||
#config.transport='usb:2fe3:000b' #nrf52dongle hci_usb # TODO: iso packet over usb not supported
|
||
config.transport= 'auto'
|
||
#config.transport='serial:/dev/ttyAMA2,1000000,rtscts' # transport for raspberry pi
|
||
|
||
|
||
for big in config.bigs: # TODO: encrypted streams are not working
|
||
#big.code = 'ff'*16 # returns hci/HCI_ENCRYPTION_MODE_NOT_ACCEPTABLE_ERROR
|
||
#big.code = '78 e5 dc f1 34 ab 42 bf c1 92 ef dd 3a fd 67 ae'
|
||
big.precode_wav = True
|
||
big.audio_source = big.audio_source.replace('.wav', '_10_16_32.lc3') #lc3 precoded files
|
||
big.audio_source = read_lc3_file(big.audio_source) # load files in advance
|
||
|
||
# --- Network_uncoded mode using NetworkAudioReceiver ---
|
||
#big.audio_source = NetworkAudioReceiverUncoded(port=50007, samplerate=16000, channels=1, chunk_size=1024)
|
||
|
||
# 16kHz works reliably with 3 streams
|
||
# 24kHz is only working with 2 streams - probably airtime constraint
|
||
# TODO: with more than three broadcasters (16kHz) no advertising (no primary channels is present anymore)
|
||
# TODO: find the bottleneck - probably airtime
|
||
# TODO: test encrypted streams
|
||
|
||
config.auracast_sampling_rate_hz = 16000
|
||
config.octets_per_frame = 40 # 32kbps@16kHz
|
||
#config.debug = True
|
||
|
||
run_async(
|
||
broadcast(
|
||
config,
|
||
config.bigs
|
||
)
|
||
)
|
||
|
||
# TODO: possible inputs:
|
||
# wav file locally
|
||
# precoded lc3 file locally
|
||
# realtime audio locally
|
||
# realtime audio network lc3 coded
|
||
# (realtime audio network uncoded)
|