33 Commits
0.2 ... main

Author SHA1 Message Date
6d54e72f1d Add new reset mechanism with sleep. 2026-04-10 12:22:18 +02:00
df6c85d9ff Add new reset mechanism 2. 2026-04-10 11:58:16 +02:00
8106f61d6a Add new reset mechanism. 2026-04-10 10:57:12 +02:00
0a8dc74d5c Fixes script error in systemupdate. 2026-04-09 15:00:35 +02:00
8475e4d068 New system update logic. 2026-04-09 14:46:17 +02:00
3f01ef5968 Adds openocd with nrf support build to the server update function. Adds 2bad8ad2cd889d8c8d255b8e0dc0e7a187b98c9a hci_uart_beacon commit build hex file to project. (#26)
Co-authored-by: Pbopbo <p.obernesser@freenet.de>
Reviewed-on: #26
2026-04-09 12:04:18 +00:00
pober
67992e65ec Updates poetry lock. 2026-04-09 11:59:30 +02:00
0b12323921 fix/gain-4dbU (#25)
Co-authored-by: Pbopbo <p.obernesser@freenet.de>
Reviewed-on: #25
2026-04-09 09:54:14 +00:00
Pbopbo
6e633d2880 Merge branch 'wip_alsaaudio' TODO poetry lock 2026-04-09 11:51:37 +02:00
7bdf6f8417 feature/blue_led (#23)
Co-authored-by: pstruebi <office@summitwave.eu>
Co-authored-by: pober <paul.obernesser@summitwave.eu>
Co-authored-by: Pbopbo <p.obernesser@freenet.de>
Reviewed-on: #23
Co-authored-by: pstruebi <struebin.patrick@gmail.com>
Co-committed-by: pstruebi <struebin.patrick@gmail.com>
2026-04-07 14:34:11 +00:00
Pbopbo
291d75b137 stereo seems to work, NEEDS RADIO FIRMWARE WITH 2 TX BUFFERS. 2026-04-07 14:36:15 +02:00
Pbopbo
a126613739 First working version of two monos at the same time. 2026-04-02 18:56:17 +02:00
036b5f80dd Updates poetry lock. 2026-04-02 18:10:23 +02:00
Pbopbo
e818765b4f Adds sw_pyalsaaudio repo so our custom function works. 2026-04-02 17:37:38 +02:00
Pbopbo
3d59a6dabf ASRC: Adds NONBLOCK read from ALSA buffer; controls the amount of frames in the ALSA buffer; Adds resampling to get rid of audio glitches; no latency buildup anymore. 2026-04-01 14:00:26 +02:00
Pbopbo
cf69ad2957 134ms constant delay, no build up, seems to be no glitches, bang bang control. 2026-03-30 14:45:25 +02:00
Pbopbo
cdfecaf5eb delay method wip save to test no thread method. 2026-03-24 13:14:56 +01:00
4036fee1f5 Randomize Broadcast ID per stream instead of using static values 2026-03-24 12:09:16 +01:00
Pbopbo
1687a2b790 Latency lowered. 2026-03-18 17:37:34 +01:00
Pbopbo
a605195646 First good audio with alsaaudio. 2026-03-18 16:55:55 +01:00
pober
e1d717ed5c Adds DHCP/static IP toggle for both ports in the UI. 2026-03-03 15:50:19 +01:00
pober
540d8503ac Some corrections for Activates link local for both ports, removes fallback IP. 2026-03-03 15:35:13 +01:00
pober
c82f375539 Activates link local for both ports, removes fallback IP. 2026-03-03 15:02:55 +01:00
70bde5295f Fixes mDNS issue; when DHCP IP is present use this for mDNS and not the static fallback IP. 2026-02-16 16:25:59 +01:00
f5f93b4b8e analog_input_gain (#21)
- add input boost slider
- add level meter

Reviewed-on: https://gitea.pstruebi.xyz/auracaster/bumble-auracast/pulls/21
2026-02-12 17:09:46 +01:00
3322b9edf4 add 192.168.42.10 as default ip with update script 2026-02-12 17:08:23 +01:00
d6230e7522 add software gain boost parameter for input signal amplification 2026-02-12 13:30:07 +01:00
f2382470d8 add network information display showing hostname and IP address 2026-02-10 16:51:22 +01:00
7c2f0bf0cb add HTTP to HTTPS redirect server on port 80 2026-02-10 16:37:34 +01:00
184e9c84af impelement a gain slider 2026-01-20 18:00:37 +01:00
6852c74cd0 add a delete recordings button 2026-01-20 17:45:23 +01:00
7b77aa9042 update_from_main (#20)
- implement updates from main

Co-authored-by: pstruebi <patrick.struebin@summitwave.eu>
Reviewed-on: https://gitea.pstruebi.xyz/auracaster/bumble-auracast/pulls/20
2026-01-20 16:52:29 +01:00
pober
59ca5dafd2 stereo-support and dep-integration (#19)
Co-authored-by: pstruebi <struebin.patrick@gmail.com>
Reviewed-on: https://gitea.pstruebi.xyz/auracaster/bumble-auracast/pulls/19
2026-01-20 12:57:17 +01:00
95 changed files with 24453 additions and 564 deletions

3
.gitignore vendored
View File

@@ -50,3 +50,6 @@ ch2.wav
src/auracast/available_samples.txt
src/auracast/server/stream_settings2.json
src/scripts/temperature_log*
src/auracast/server/recordings/
src/auracast/server/led_settings.json

14
AGENTS.md Normal file
View File

@@ -0,0 +1,14 @@
# AGENTS.md
## Setup commands
- this projects uses poetry for package management
- if something should be run in a python env use 'poetry run'
# Environment
- this application normally runs on an embedded linux on a cm4
## Application
- this is a bluetooth Auracast transmitter application
- if you add a new parameter for a stream make sure it is saved to the settings.json so it is persisted
- it consists of multicast_frontend.py and multicast_server.py mainly which connect to each other via a rest api
- after you implemented something the user will mainly test it and you should call the update_and_run_server_and_frontend.sh script if the server and frontend were already running.

79
poetry.lock generated
View File

@@ -270,51 +270,6 @@ optional = false
python-versions = ">=3.9"
groups = ["main"]
files = [
{file = "av-14.4.0-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:10219620699a65b9829cfa08784da2ed38371f1a223ab8f3523f440a24c8381c"},
{file = "av-14.4.0-cp310-cp310-macosx_12_0_x86_64.whl", hash = "sha256:8bac981fde1c05e231df9f73a06ed9febce1f03fb0f1320707ac2861bba2567f"},
{file = "av-14.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cc634ed5bdeb362f0523b73693b079b540418d35d7f3003654f788ae6c317eef"},
{file = "av-14.4.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:23973ed5c5bec9565094d2b3643f10a6996707ddffa5252e112d578ad34aa9ae"},
{file = "av-14.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0655f7207db6a211d7cedb8ac6a2f7ccc9c4b62290130e393a3fd99425247311"},
{file = "av-14.4.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:1edaab73319bfefe53ee09c4b1cf7b141ea7e6678a0a1c62f7bac1e2c68ec4e7"},
{file = "av-14.4.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:b54838fa17c031ffd780df07b9962fac1be05220f3c28468f7fe49474f1bf8d2"},
{file = "av-14.4.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:f4b59ac6c563b9b6197299944145958a8ec34710799fd851f1a889b0cbcd1059"},
{file = "av-14.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:a0192a584fae9f6cedfac03c06d5bf246517cdf00c8779bc33414404796a526e"},
{file = "av-14.4.0-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:5b21d5586a88b9fce0ab78e26bd1c38f8642f8e2aad5b35e619f4d202217c701"},
{file = "av-14.4.0-cp311-cp311-macosx_12_0_x86_64.whl", hash = "sha256:cf8762d90b0f94a20c9f6e25a94f1757db5a256707964dfd0b1d4403e7a16835"},
{file = "av-14.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c0ac9f08920c7bbe0795319689d901e27cb3d7870b9a0acae3f26fc9daa801a6"},
{file = "av-14.4.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a56d9ad2afdb638ec0404e962dc570960aae7e08ae331ad7ff70fbe99a6cf40e"},
{file = "av-14.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6bed513cbcb3437d0ae47743edc1f5b4a113c0b66cdd4e1aafc533abf5b2fbf2"},
{file = "av-14.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d030c2d3647931e53d51f2f6e0fcf465263e7acf9ec6e4faa8dbfc77975318c3"},
{file = "av-14.4.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:1cc21582a4f606271d8c2036ec7a6247df0831050306c55cf8a905701d0f0474"},
{file = "av-14.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:ce7c9cd452153d36f1b1478f904ed5f9ab191d76db873bdd3a597193290805d4"},
{file = "av-14.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:fd261e31cc6b43ca722f80656c39934199d8f2eb391e0147e704b6226acebc29"},
{file = "av-14.4.0-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:a53e682b239dd23b4e3bc9568cfb1168fc629ab01925fdb2e7556eb426339e94"},
{file = "av-14.4.0-cp312-cp312-macosx_12_0_x86_64.whl", hash = "sha256:5aa0b901751a32703fa938d2155d56ce3faf3630e4a48d238b35d2f7e49e5395"},
{file = "av-14.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a3b316fed3597675fe2aacfed34e25fc9d5bb0196dc8c0b014ae5ed4adda48de"},
{file = "av-14.4.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a587b5c5014c3c0e16143a0f8d99874e46b5d0c50db6111aa0b54206b5687c81"},
{file = "av-14.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10d53f75e8ac1ec8877a551c0db32a83c0aaeae719d05285281eaaba211bbc30"},
{file = "av-14.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:c8558cfde79dd8fc92d97c70e0f0fa8c94c7a66f68ae73afdf58598f0fe5e10d"},
{file = "av-14.4.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:455b6410dea0ab2d30234ffb28df7d62ca3cdf10708528e247bec3a4cdcced09"},
{file = "av-14.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1661efbe9d975f927b8512d654704223d936f39016fad2ddab00aee7c40f412c"},
{file = "av-14.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:fbbeef1f421a3461086853d6464ad5526b56ffe8ccb0ab3fd0a1f121dfbf26ad"},
{file = "av-14.4.0-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:3d2aea7c602b105363903e4017103bc4b60336e7aff80e1c22e8b4ec09fd125f"},
{file = "av-14.4.0-cp313-cp313-macosx_12_0_x86_64.whl", hash = "sha256:38c18f036aeb6dc9abf5e867d998c867f9ec93a5f722b60721fdffc123bbb2ae"},
{file = "av-14.4.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:58c1e18c8be73b6eada2d9ec397852ec74ebe51938451bdf83644a807189d6c8"},
{file = "av-14.4.0-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e4c32ff03a357feb030634f093089a73cb474b04efe7fbfba31f229cb2fab115"},
{file = "av-14.4.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:af31d16ae25964a6a02e09cc132b9decd5ee493c5dcb21bcdf0d71b2d6adbd59"},
{file = "av-14.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:e9fb297009e528f4851d25f3bb2781b2db18b59b10aed10240e947b77c582fb7"},
{file = "av-14.4.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:573314cb9eafec2827dc98c416c965330dc7508193adbccd281700d8673b9f0a"},
{file = "av-14.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:f82ab27ee57c3b80eb50a5293222307dfdc02f810ea41119078cfc85ea3cf9a8"},
{file = "av-14.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:9f682003bbcaac620b52f68ff0e85830fff165dea53949e217483a615993ca20"},
{file = "av-14.4.0-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:8ff683777e0bb3601f7cfb4545dca25db92817585330b773e897e1f6f9d612f7"},
{file = "av-14.4.0-cp39-cp39-macosx_12_0_x86_64.whl", hash = "sha256:fe372acf7b1814bc2b16d89161609db63f81dad88684da76d26dd32cd1c16f92"},
{file = "av-14.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:de869030eb8acfdfe39f39965de3a899dcde9b08df2db41f183c6166ca6f6d09"},
{file = "av-14.4.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9117ed91fba6299b7d5233dd3e471770bab829f97e5a157f182761e9fb59254c"},
{file = "av-14.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:54e8f9209184098b7755e6250be8ffa48a8aa5b554a02555406120583da17373"},
{file = "av-14.4.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:38ea51e62a014663caec7f621d6601cf269ef450f3c8705f5e3225e5623fd15d"},
{file = "av-14.4.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:5d1d89842efe913448482573a253bd6955ce30a77f8a4cd04a1a3537cc919896"},
{file = "av-14.4.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:c3048e333da1367a2bca47e69593e10bc70f027d876adee9d1582c8cb818f36a"},
{file = "av-14.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:5d6f25570d0782dd05640c7e1f71cb29857d94d915b5521a1e757ecae78a5a50"},
{file = "av-14.4.0.tar.gz", hash = "sha256:3ecbf803a7fdf67229c0edada0830d6bfaea4d10bfb24f0c3f4e607cd1064b42"},
]
@@ -1849,6 +1804,22 @@ files = [
{file = "protobuf-6.30.2.tar.gz", hash = "sha256:35c859ae076d8c56054c25b59e5e59638d86545ed6e2b6efac6be0b6ea3ba048"},
]
[[package]]
name = "pyalsaaudio"
version = "0.11.0"
description = "ALSA bindings"
optional = false
python-versions = "*"
groups = ["main"]
files = []
develop = false
[package.source]
type = "git"
url = "ssh://git@gitea.summitwave.work:222/auracaster/sw_pyalsaaudio.git"
reference = "b3d11582e03df6929b2e7acbaa1306afc7b8a6bc"
resolved_reference = "b3d11582e03df6929b2e7acbaa1306afc7b8a6bc"
[[package]]
name = "pyarrow"
version = "20.0.0"
@@ -2443,6 +2414,22 @@ files = [
{file = "rpds_py-0.25.1.tar.gz", hash = "sha256:8960b6dac09b62dac26e75d7e2c4a22efb835d827a7278c34f72b2b84fa160e3"},
]
[[package]]
name = "rpi-gpio"
version = "0.7.1"
description = "A module to control Raspberry Pi GPIO channels"
optional = false
python-versions = "*"
groups = ["main"]
files = [
{file = "RPi.GPIO-0.7.1-cp27-cp27mu-linux_armv6l.whl", hash = "sha256:b86b66dc02faa5461b443a1e1f0c1d209d64ab5229696f32fb3b0215e0600c8c"},
{file = "RPi.GPIO-0.7.1-cp310-cp310-linux_armv6l.whl", hash = "sha256:57b6c044ef5375a78c8dda27cdfadf329e76aa6943cd6cffbbbd345a9adf9ca5"},
{file = "RPi.GPIO-0.7.1-cp37-cp37m-linux_armv6l.whl", hash = "sha256:77afb817b81331ce3049a4b8f94a85e41b7c404d8e56b61ac0f1eb75c3120868"},
{file = "RPi.GPIO-0.7.1-cp38-cp38-linux_armv6l.whl", hash = "sha256:29226823da8b5ccb9001d795a944f2e00924eeae583490f0bc7317581172c624"},
{file = "RPi.GPIO-0.7.1-cp39-cp39-linux_armv6l.whl", hash = "sha256:15311d3b063b71dee738cd26570effc9985a952454d162937c34e08c0fc99902"},
{file = "RPi.GPIO-0.7.1.tar.gz", hash = "sha256:cd61c4b03c37b62bba4a5acfea9862749c33c618e0295e7e90aa4713fb373b70"},
]
[[package]]
name = "samplerate"
version = "0.2.2"
@@ -2976,4 +2963,4 @@ test = ["pytest", "pytest-asyncio"]
[metadata]
lock-version = "2.1"
python-versions = ">=3.11"
content-hash = "3c9f92c7a5af40f98da9c7824d9c2a6f7eb809e91e43cfef4995761b2e887256"
content-hash = "7bccf2978170ead195e1e8cff151823a5276823195a239622186fcec830154d9"

View File

@@ -17,7 +17,9 @@ dependencies = [
"sounddevice (>=0.5.2,<0.6.0)",
"python-dotenv (>=1.1.1,<2.0.0)",
"smbus2 (>=0.5.0,<0.6.0)",
"samplerate (>=0.2.2,<0.3.0)"
"samplerate (>=0.2.2,<0.3.0)",
"rpi-gpio (>=0.7.1,<0.8.0)",
"pyalsaaudio @ git+ssh://git@gitea.summitwave.work:222/auracaster/sw_pyalsaaudio.git@b3d11582e03df6929b2e7acbaa1306afc7b8a6bc"
]
[project.optional-dependencies]

View File

@@ -111,3 +111,5 @@ class AuracastConfigGroup(AuracastGlobalConfig):
bigs: List[AuracastBigConfig] = [
AuracastBigConfigDeu(),
]
analog_gain_db_left: float = 0.0 # ADC gain level for analog mode left channel (-12 to 18 dB)
analog_gain_db_right: float = 0.0 # ADC gain level for analog mode right channel (-12 to 18 dB)

View File

@@ -30,6 +30,7 @@ import time
import threading
import numpy as np # for audio down-mix
import samplerate
import os
import lc3 # type: ignore # pylint: disable=E0401
@@ -56,100 +57,229 @@ from auracast.utils.webrtc_audio_input import WebRTCAudioInput
# Patch sounddevice.InputStream globally to use low-latency settings
import sounddevice as sd
import alsaaudio
from collections import deque
class ModSoundDeviceAudioInput(audio_io.SoundDeviceAudioInput):
"""Patched SoundDeviceAudioInput with low-latency capture and adaptive resampling."""
class AlsaArecordAudioInput(audio_io.AudioInput):
def __init__(self, device_name: str, pcm_format: audio_io.PcmFormat):
self._device_name = device_name
self._pcm_format = pcm_format
self._proc: asyncio.subprocess.Process | None = None
def _open(self):
"""Create RawInputStream with low-latency parameters and initialize ring buffer."""
dev_info = sd.query_devices(self._device)
hostapis = sd.query_hostapis()
api_index = dev_info.get('hostapi')
api_name = hostapis[api_index]['name'] if isinstance(api_index, int) and 0 <= api_index < len(hostapis) else 'unknown'
pa_ver = sd.get_portaudio_version()
async def open(self) -> audio_io.PcmFormat:
if self._proc is not None:
return self._pcm_format
args = [
'arecord',
'-D', self._device_name,
'-q',
'-t', 'raw',
'-f', 'S16_LE',
'-r', str(int(self._pcm_format.sample_rate)),
'-c', str(int(self._pcm_format.channels)),
]
logging.info(
"SoundDevice backend=%s device='%s' (id=%s) ch=%s default_low_input_latency=%.4f default_high_input_latency=%.4f portaudio=%s",
api_name,
dev_info.get('name'),
self._device,
dev_info.get('max_input_channels'),
float(dev_info.get('default_low_input_latency') or 0.0),
float(dev_info.get('default_high_input_latency') or 0.0),
pa_ver[1] if isinstance(pa_ver, tuple) and len(pa_ver) >= 2 else pa_ver,
"Opening ALSA capture via arecord: device='%s' rate=%s ch=%s",
self._device_name,
self._pcm_format.sample_rate,
self._pcm_format.channels,
)
# Create RawInputStream with injected low-latency parameters
# Target ~2 ms blocksize (48 kHz -> 96 frames). For other rates, keep ~2 ms.
_sr = int(self._pcm_format.sample_rate)
self.counter=0
self.max_avail=0
self.logfile_name="available_samples.txt"
self.blocksize = 120
self._proc = await asyncio.create_subprocess_exec(
*args,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.DEVNULL,
)
if os.path.exists(self.logfile_name):
os.remove(self.logfile_name)
if self._proc.stdout is None:
raise RuntimeError('arecord stdout pipe was not created')
self._stream = sd.RawInputStream(
samplerate=self._pcm_format.sample_rate,
return self._pcm_format
def frames(self, frame_size: int) -> AsyncGenerator[bytes]:
async def _gen() -> AsyncGenerator[bytes]:
if self._proc is None:
await self.open()
if self._proc is None or self._proc.stdout is None:
return
bytes_per_frame = frame_size * self._pcm_format.channels * self._pcm_format.bytes_per_sample
while True:
try:
data = await self._proc.stdout.readexactly(bytes_per_frame)
except asyncio.IncompleteReadError:
return
except Exception:
return
yield data
return _gen()
async def aclose(self) -> None:
if self._proc is None:
return
try:
if self._proc.returncode is None:
self._proc.terminate()
except ProcessLookupError:
pass
except Exception:
pass
with contextlib.suppress(Exception):
await asyncio.wait_for(self._proc.wait(), timeout=1.0)
if self._proc.returncode is None:
with contextlib.suppress(Exception):
self._proc.kill()
with contextlib.suppress(Exception):
await asyncio.wait_for(self._proc.wait(), timeout=1.0)
self._proc = None
class PyAlsaAudioInput(audio_io.ThreadedAudioInput):
"""PyALSA audio input with non-blocking reads - supports mono/stereo."""
def __init__(self, device, pcm_format: audio_io.PcmFormat):
super().__init__()
logging.info("PyALSA: device = %s", device)
self._device = str(device) if not isinstance(device, str) else device
if self._device.isdigit():
self._device = 'default' if self._device == '0' else f'hw:{self._device}'
self._pcm_format = pcm_format
self._pcm = None
self._actual_channels = None
self._periodsize = None
self._hw_channels = None
self._first_read = True
self._resampler = None
self._resampler_buffer = np.empty(0, dtype=np.float32)
def _open(self) -> audio_io.PcmFormat:
ALSA_PERIODSIZE = 240
ALSA_PERIODS = 4
ALSA_MODE = alsaaudio.PCM_NONBLOCK
requested_rate = int(self._pcm_format.sample_rate)
requested_channels = int(self._pcm_format.channels)
self._periodsize = ALSA_PERIODSIZE
self._pcm = alsaaudio.PCM(
type=alsaaudio.PCM_CAPTURE,
mode=ALSA_MODE,
device=self._device,
channels=self._pcm_format.channels,
dtype='int16',
blocksize=self.blocksize,
latency=0.004,
periods=ALSA_PERIODS,
)
self._stream.start()
self._pcm.setchannels(requested_channels)
self._pcm.setformat(alsaaudio.PCM_FORMAT_S16_LE)
actual_rate = self._pcm.setrate(requested_rate)
self._pcm.setperiodsize(ALSA_PERIODSIZE)
logging.info("PyALSA: device=%s rate=%d ch=%d periodsize=%d (%.1fms) periods=%d mode=%s",
self._device, actual_rate, requested_channels, ALSA_PERIODSIZE,
(ALSA_PERIODSIZE / actual_rate) * 1000, ALSA_PERIODS, ALSA_MODE)
if actual_rate != requested_rate:
logging.warning("PyALSA: Sample rate mismatch! requested=%d actual=%d", requested_rate, actual_rate)
self._actual_channels = requested_channels
self._resampler = samplerate.Resampler('sinc_fastest', channels=requested_channels)
self._resampler_buffer = np.empty(0, dtype=np.float32)
self._bang_bang = 0
return audio_io.PcmFormat(
audio_io.PcmFormat.Endianness.LITTLE,
audio_io.PcmFormat.SampleType.INT16,
self._pcm_format.sample_rate,
1,
actual_rate,
requested_channels,
)
def _read(self, frame_size: int) -> bytes:
"""Read PCM samples from the stream."""
try:
avail = self._pcm.avail()
logging.debug("PyALSA: avail before read: %d", avail)
length, data = self._pcm.read_sw(frame_size + self._bang_bang)
avail = self._pcm.avail()
SETPOINT = 120
TOLERANCE = 40
if avail < SETPOINT - TOLERANCE:
self._bang_bang = -1
elif avail > SETPOINT + TOLERANCE:
self._bang_bang = 1
else:
self._bang_bang = 0
logging.debug("PyALSA: read length=%d, data length=%d, avail=%d, bang_bang=%d", length, len(data), avail, self._bang_bang)
#if self.counter % 50 == 0:
frame_size = frame_size + 1 # consume samples a little faster to avoid latency akkumulation
if length > 0:
if self._first_read:
expected_mono = self._periodsize * 2
expected_stereo = self._periodsize * 2 * 2
# self._hw_channels = 2 if len(data) == expected_stereo else 1
self._hw_channels = self._actual_channels
logging.info("PyALSA first read: bytes=%d detected_hw_channels=%d requested_channels=%d",
len(data), self._hw_channels, self._actual_channels)
self._first_read = False
if self._hw_channels == 2 and self._actual_channels == 1:
pcm_stereo = np.frombuffer(data, dtype=np.int16)
pcm_mono = pcm_stereo[::2]
data = pcm_mono.tobytes()
actual_samples = len(data) // (2 * self._actual_channels)
ratio = frame_size / actual_samples
pcm_f32 = np.frombuffer(data, dtype=np.int16).astype(np.float32) / 32768.0
if self._actual_channels > 1:
pcm_f32 = pcm_f32.reshape(-1, self._actual_channels)
resampled = self._resampler.process(pcm_f32, ratio, end_of_input=False)
if self._actual_channels > 1:
resampled = resampled.reshape(-1)
self._resampler_buffer = np.concatenate([self._resampler_buffer, resampled])
else:
logging.warning("PyALSA: No data read from ALSA")
self._resampler_buffer = np.concatenate([
self._resampler_buffer,
np.zeros(frame_size * self._actual_channels, dtype=np.float32),
])
except alsaaudio.ALSAAudioError as e:
logging.error("PyALSA: ALSA read error: %s", e)
self._resampler_buffer = np.concatenate([
self._resampler_buffer,
np.zeros(frame_size * self._actual_channels, dtype=np.float32),
])
except Exception as e:
logging.error("PyALSA: Unexpected error in _read: %s", e, exc_info=True)
self._resampler_buffer = np.concatenate([
self._resampler_buffer,
np.zeros(frame_size * self._actual_channels, dtype=np.float32),
])
pcm_buffer, overflowed = self._stream.read(frame_size)
if overflowed:
logging.warning("SoundDeviceAudioInput: overflowed")
needed = frame_size * self._actual_channels
if len(self._resampler_buffer) < needed:
pad = np.zeros(needed - len(self._resampler_buffer), dtype=np.float32)
self._resampler_buffer = np.concatenate([self._resampler_buffer, pad])
logging.debug("PyALSA: padded buffer with %d samples", needed - len(self._resampler_buffer))
n_available = self._stream.read_available
output = self._resampler_buffer[:needed]
self._resampler_buffer = self._resampler_buffer[needed:]
# adapt = n_available > 20
# if adapt:
# pcm_extra, overflowed = self._stream.read(3)
# logging.info('consuming extra samples, available was %d', n_available)
# if overflowed:
# logging.warning("SoundDeviceAudioInput: overflowed")
# out = bytes(pcm_buffer) + bytes(pcm_extra)
# else:
out = bytes(pcm_buffer)
logging.debug("PyALSA: resampler_buffer remaining=%d", len(self._resampler_buffer))
return np.clip(output * 32767.0, -32768, 32767).astype(np.int16).tobytes()
self.max_avail = max(self.max_avail, n_available)
#Diagnostics
#with open(self.logfile_name, "a", encoding="utf-8") as f:
# f.write(f"{n_available}, {adapt}, {round(self._runavg, 2)}, {overflowed}\n")
def _close(self) -> None:
if self._pcm:
self._pcm.close()
self._pcm = None
if self.counter % 500 == 0:
logging.info(
"read available=%d, max=%d, latency:%d",
n_available, self.max_avail, self._stream.latency
)
self.max_avail = 0
self.counter += 1
return out
audio_io.SoundDeviceAudioInput = ModSoundDeviceAudioInput
audio_io.SoundDeviceAudioInput = PyAlsaAudioInput
# modified from bumble
class ModWaveAudioInput(audio_io.ThreadedAudioInput):
@@ -459,7 +589,7 @@ async def init_broadcast(
def on_flow():
data_packet_queue = iso_queue.data_packet_queue
print(
logging.info(
f'\rPACKETS: pending={data_packet_queue.pending}, '
f'queued={data_packet_queue.queued}, '
f'completed={data_packet_queue.completed}',
@@ -559,6 +689,12 @@ class Streamer():
except Exception:
pass
def get_audio_levels(self) -> list[float]:
"""Return current RMS audio levels (0.0-1.0) for each BIG."""
if not self.bigs:
return []
return [big.get('_audio_level_rms', 0.0) for big in self.bigs.values()]
async def stream(self):
bigs = self.bigs
@@ -671,7 +807,13 @@ class Streamer():
# anything else, e.g. realtime stream from device (bumble)
else:
audio_input = await audio_io.create_audio_input(audio_source, input_format)
if isinstance(audio_source, str) and audio_source.startswith('alsa:'):
if input_format == 'auto':
raise ValueError('input format details required for alsa input')
pcm = audio_io.PcmFormat.from_str(input_format)
audio_input = AlsaArecordAudioInput(audio_source[5:], pcm)
else:
audio_input = await audio_io.create_audio_input(audio_source, input_format)
# Store early so stop_streaming can close even if open() fails
big['audio_input'] = audio_input
# SoundDeviceAudioInput (used for `mic:<device>` captures) has no `.rewind`.
@@ -767,6 +909,11 @@ class Streamer():
stream_finished[i] = True
continue
# Compute RMS audio level (normalized 0.0-1.0) for level monitoring
pcm_samples = np.frombuffer(pcm_frame, dtype=np.int16).astype(np.float32)
rms = np.sqrt(np.mean(pcm_samples ** 2)) / 32768.0 if len(pcm_samples) > 0 else 0.0
big['_audio_level_rms'] = float(rms)
# Measure LC3 encoding time
t1 = time.perf_counter()
num_bis = big.get('num_bis', 1)

View File

@@ -37,6 +37,12 @@ class Multicaster:
'is_initialized': self.is_auracast_init,
'is_streaming': streaming,
}
def get_audio_levels(self) -> list[float]:
"""Return current RMS audio levels (0.0-1.0) for each BIG."""
if self.streamer is not None and self.streamer.is_streaming:
return self.streamer.get_audio_levels()
return []
async def init_broadcast(self):
self.device_acm = multicast.create_device(self.global_conf)
@@ -137,6 +143,10 @@ async def main():
level=os.environ.get('LOG_LEVEL', logging.DEBUG),
format='%(module)s.py:%(lineno)d %(levelname)s: %(message)s'
)
# Enable debug logging for bumble
# logging.getLogger('bumble').setLevel(logging.DEBUG)
os.chdir(os.path.dirname(__file__))
global_conf = auracast_config.AuracastGlobalConfig(

View File

@@ -0,0 +1,42 @@
"""Minimal HTTP server that redirects all requests to HTTPS (port 443).
Run on port 80 alongside the HTTPS Streamlit frontend so that users who
type a bare IP address into their browser are automatically forwarded.
"""
import http.server
import sys
class RedirectHandler(http.server.BaseHTTPRequestHandler):
def do_GET(self):
host = self.headers.get("Host", "").split(":")[0] or self.server.server_address[0]
target = f"https://{host}{self.path}"
self.send_response(301)
self.send_header("Location", target)
self.end_headers()
# Handle every method the same way
do_POST = do_GET
do_PUT = do_GET
do_DELETE = do_GET
do_HEAD = do_GET
def log_message(self, format, *args):
# Keep logging minimal
sys.stderr.write(f"[http-redirect] {self.address_string()} -> https {args[0] if args else ''}\n")
def main():
port = int(sys.argv[1]) if len(sys.argv) > 1 else 80
server = http.server.HTTPServer(("0.0.0.0", port), RedirectHandler)
print(f"HTTP->HTTPS redirect server listening on 0.0.0.0:{port}")
try:
server.serve_forever()
except KeyboardInterrupt:
pass
server.server_close()
if __name__ == "__main__":
main()

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -33,5 +33,12 @@ echo "Using Avahi domain: $AVAHI_DOMAIN"
# Path to poetry binary
POETRY_BIN="/home/caster/.local/bin/poetry"
# Start HTTP->HTTPS redirect server on port 80 (background)
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
python3 "$SCRIPT_DIR/http_to_https_redirect.py" 80 &
REDIRECT_PID=$!
echo "HTTP->HTTPS redirect server started (PID $REDIRECT_PID)"
trap "kill $REDIRECT_PID 2>/dev/null" EXIT
# Start Streamlit HTTPS server (port 443)
$POETRY_BIN run streamlit run multicast_frontend.py --server.port 443 --server.address 0.0.0.0 --server.enableCORS false --server.enableXsrfProtection false --server.headless true --server.sslCertFile "$CERT" --server.sslKeyFile "$KEY" --browser.gatherUsageStats false

View File

@@ -0,0 +1,90 @@
#!/bin/bash
# system_update.sh - Runs after git checkout in the Python system_update endpoint.
# Called with the current working directory = project root.
# All output is also written to /tmp/system_update.log for debugging.
exec > >(tee -a /tmp/system_update.log) 2>&1
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../../.." && pwd)"
POETRY="$HOME/.local/bin/poetry"
OPENOCD_SRC="$HOME/sw_openocd"
OPENOCD_REPO="ssh://git@gitea.summitwave.work:222/auracaster/sw_openocd.git"
OPENOCD_BRANCH="change-8818"
OPENOCD_MARKER="$OPENOCD_SRC/.last_built_commit"
OPENOCD_DIR="$PROJECT_ROOT/src/openocd"
echo "[system_update] Starting post-checkout update. project_root=$PROJECT_ROOT"
# 1. poetry install
echo "[system_update] Running poetry install..."
(cd "$PROJECT_ROOT" && "$POETRY" install)
if [ $? -ne 0 ]; then
echo "[system_update] ERROR: poetry install failed"
exit 1
fi
# 2. Clone/update and build sw_openocd if needed
if [ ! -d "$OPENOCD_SRC" ]; then
echo "[system_update] Installing sw_openocd build dependencies..."
sudo apt install -y git build-essential libtool autoconf texinfo \
libusb-1.0-0-dev libftdi1-dev libhidapi-dev pkg-config || \
echo "[system_update] WARNING: apt install deps had errors, continuing"
sudo apt-get install -y pkg-config libjim-dev || \
echo "[system_update] WARNING: apt-get install libjim-dev had errors, continuing"
echo "[system_update] Cloning sw_openocd branch $OPENOCD_BRANCH..."
git clone --branch "$OPENOCD_BRANCH" --single-branch "$OPENOCD_REPO" "$OPENOCD_SRC"
if [ $? -ne 0 ]; then
echo "[system_update] ERROR: git clone sw_openocd failed"
exit 1
fi
else
echo "[system_update] Updating sw_openocd..."
git -C "$OPENOCD_SRC" fetch origin "$OPENOCD_BRANCH"
git -C "$OPENOCD_SRC" checkout "$OPENOCD_BRANCH"
git -C "$OPENOCD_SRC" pull
fi
OPENOCD_COMMIT=$(git -C "$OPENOCD_SRC" rev-parse HEAD)
LAST_BUILT=""
[ -f "$OPENOCD_MARKER" ] && LAST_BUILT=$(cat "$OPENOCD_MARKER")
if [ "$OPENOCD_COMMIT" != "$LAST_BUILT" ]; then
echo "[system_update] Building sw_openocd (commit $OPENOCD_COMMIT)..."
(cd "$OPENOCD_SRC" && ./bootstrap)
if [ $? -ne 0 ]; then echo "[system_update] ERROR: openocd bootstrap failed"; exit 1; fi
(cd "$OPENOCD_SRC" && ./configure --enable-bcm2835gpio --enable-sysfsgpio)
if [ $? -ne 0 ]; then echo "[system_update] ERROR: openocd configure failed"; exit 1; fi
(cd "$OPENOCD_SRC" && make)
if [ $? -ne 0 ]; then echo "[system_update] ERROR: openocd make failed"; exit 1; fi
(cd "$OPENOCD_SRC" && sudo make install)
if [ $? -ne 0 ]; then echo "[system_update] ERROR: openocd make install failed"; exit 1; fi
echo "$OPENOCD_COMMIT" > "$OPENOCD_MARKER"
echo "[system_update] sw_openocd built and installed (commit $OPENOCD_COMMIT)"
else
echo "[system_update] sw_openocd up to date (commit $OPENOCD_COMMIT), skipping build"
fi
# 3. Flash firmware to both SWD interfaces
FLASH_SCRIPT="$OPENOCD_DIR/flash.sh"
HEX_FILE="$OPENOCD_DIR/merged.hex"
for IFACE in swd0 swd1; do
echo "[system_update] Flashing $IFACE..."
(cd "$OPENOCD_DIR" && bash "$FLASH_SCRIPT" -i "$IFACE" -f "$HEX_FILE")
if [ $? -ne 0 ]; then
echo "[system_update] ERROR: flash $IFACE failed"
exit 1
fi
echo "[system_update] Flash $IFACE complete"
done
# 4. Restart services (this will kill this process too)
echo "[system_update] Restarting services..."
bash "$PROJECT_ROOT/src/service/update_and_run_server_and_frontend.sh"

View File

@@ -243,7 +243,8 @@ def get_alsa_usb_inputs():
'usb' in name or
re.search(r'hw:\d+(?:,\d+)?', name) or
name.startswith('dsnoop') or
name in ('ch1', 'ch2')
name in ('ch1', 'ch2') or
name.startswith('dante_asrc_ch')
):
usb_inputs.append((idx, dev))

View File

@@ -0,0 +1 @@
../dante_data/capability/config.json

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@@ -0,0 +1,26 @@
root:x:0:
daemon:x:1:
bin:x:2:
sys:x:3:
adm:x:4:
tty:x:5:
disk:x:6:
lp:x:7:
mail:x:8:
kmem:x:9:
wheel:x:10:root
cdrom:x:11:
dialout:x:18:
floppy:x:19:
video:x:28:
audio:x:29:
tape:x:32:
www-data:x:33:
operator:x:37:
utmp:x:43:
plugdev:x:46:
staff:x:50:
lock:x:54:
netdev:x:82:
users:x:100:
nobody:x:65534:

View File

@@ -0,0 +1 @@
buildroot

View File

@@ -0,0 +1,2 @@
127.0.0.1 localhost
127.0.1.1 buildroot

View File

@@ -0,0 +1 @@
Welcome to Buildroot

View File

@@ -0,0 +1 @@

View File

@@ -0,0 +1 @@

View File

@@ -0,0 +1 @@
../proc/self/mounts

View File

@@ -0,0 +1,21 @@
#!/bin/sh
# In case we have a slow-to-appear interface (e.g. eth-over-USB),
# and we need to configure it, wait until it appears, but not too
# long either. IF_WAIT_DELAY is in seconds.
if [ "${IF_WAIT_DELAY}" -a ! -e "/sys/class/net/${IFACE}" ]; then
printf "Waiting for interface %s to appear" "${IFACE}"
while [ ${IF_WAIT_DELAY} -gt 0 ]; do
if [ -e "/sys/class/net/${IFACE}" ]; then
printf "\n"
exit 0
fi
sleep 1
printf "."
: $((IF_WAIT_DELAY -= 1))
done
printf " timeout!\n"
exit 1
fi

View File

@@ -0,0 +1,20 @@
#!/bin/sh
# This allows NFS booting to work while also being able to configure
# the network interface via DHCP when not NFS booting. Otherwise, a
# NFS booted system will likely hang during DHCP configuration.
# Attempting to configure the network interface used for NFS will
# initially bring that network down. Since the root filesystem is
# accessed over this network, the system hangs.
# This script is run by ifup and will attempt to detect if a NFS root
# mount uses the interface to be configured (IFACE), and if so does
# not configure it. This should allow the same build to be disk/flash
# booted or NFS booted.
nfsip=`sed -n '/^[^ ]*:.* \/ nfs.*[ ,]addr=\([0-9.]\+\).*/s//\1/p' /proc/mounts`
if [ -n "$nfsip" ] && ip route get to "$nfsip" | grep -q "dev $IFACE"; then
echo Skipping $IFACE, used for NFS from $nfsip
exit 1
fi

View File

@@ -0,0 +1,13 @@
# /etc/nsswitch.conf
passwd: files
group: files
shadow: files
hosts: files dns
networks: files dns
protocols: files
services: files
ethers: files
rpc: files

View File

@@ -0,0 +1 @@
../usr/lib/os-release

View File

@@ -0,0 +1,9 @@
root:x:0:0:root:/root:/bin/sh
daemon:x:1:1:daemon:/usr/sbin:/bin/false
bin:x:2:2:bin:/bin:/bin/false
sys:x:3:3:sys:/dev:/bin/false
sync:x:4:100:sync:/bin:/bin/sync
mail:x:8:8:mail:/var/spool/mail:/bin/false
www-data:x:33:33:www-data:/var/www:/bin/false
operator:x:37:37:Operator:/var:/bin/false
nobody:x:65534:65534:nobody:/home:/bin/false

View File

@@ -0,0 +1,19 @@
export PATH="/bin:/sbin:/usr/bin:/usr/sbin"
if [ "$PS1" ]; then
if [ "`id -u`" -eq 0 ]; then
export PS1='# '
else
export PS1='$ '
fi
fi
export EDITOR='/bin/vi'
# Source configuration files from /etc/profile.d
for i in /etc/profile.d/*.sh ; do
if [ -r "$i" ]; then
. $i
fi
done
unset i

View File

@@ -0,0 +1 @@
umask 022

View File

@@ -0,0 +1,61 @@
# Internet (IP) protocols
#
# Updated from http://www.iana.org/assignments/protocol-numbers and other
# sources.
ip 0 IP # internet protocol, pseudo protocol number
hopopt 0 HOPOPT # IPv6 Hop-by-Hop Option [RFC1883]
icmp 1 ICMP # internet control message protocol
igmp 2 IGMP # Internet Group Management
ggp 3 GGP # gateway-gateway protocol
ipencap 4 IP-ENCAP # IP encapsulated in IP (officially ``IP'')
st 5 ST # ST datagram mode
tcp 6 TCP # transmission control protocol
egp 8 EGP # exterior gateway protocol
igp 9 IGP # any private interior gateway (Cisco)
pup 12 PUP # PARC universal packet protocol
udp 17 UDP # user datagram protocol
hmp 20 HMP # host monitoring protocol
xns-idp 22 XNS-IDP # Xerox NS IDP
rdp 27 RDP # "reliable datagram" protocol
iso-tp4 29 ISO-TP4 # ISO Transport Protocol class 4 [RFC905]
dccp 33 DCCP # Datagram Congestion Control Prot. [RFC4340]
xtp 36 XTP # Xpress Transfer Protocol
ddp 37 DDP # Datagram Delivery Protocol
idpr-cmtp 38 IDPR-CMTP # IDPR Control Message Transport
ipv6 41 IPv6 # Internet Protocol, version 6
ipv6-route 43 IPv6-Route # Routing Header for IPv6
ipv6-frag 44 IPv6-Frag # Fragment Header for IPv6
idrp 45 IDRP # Inter-Domain Routing Protocol
rsvp 46 RSVP # Reservation Protocol
gre 47 GRE # General Routing Encapsulation
esp 50 IPSEC-ESP # Encap Security Payload [RFC2406]
ah 51 IPSEC-AH # Authentication Header [RFC2402]
skip 57 SKIP # SKIP
ipv6-icmp 58 IPv6-ICMP # ICMP for IPv6
ipv6-nonxt 59 IPv6-NoNxt # No Next Header for IPv6
ipv6-opts 60 IPv6-Opts # Destination Options for IPv6
rspf 73 RSPF CPHB # Radio Shortest Path First (officially CPHB)
vmtp 81 VMTP # Versatile Message Transport
eigrp 88 EIGRP # Enhanced Interior Routing Protocol (Cisco)
ospf 89 OSPFIGP # Open Shortest Path First IGP
ax.25 93 AX.25 # AX.25 frames
ipip 94 IPIP # IP-within-IP Encapsulation Protocol
etherip 97 ETHERIP # Ethernet-within-IP Encapsulation [RFC3378]
encap 98 ENCAP # Yet Another IP encapsulation [RFC1241]
# 99 # any private encryption scheme
pim 103 PIM # Protocol Independent Multicast
ipcomp 108 IPCOMP # IP Payload Compression Protocol
vrrp 112 VRRP # Virtual Router Redundancy Protocol [RFC5798]
l2tp 115 L2TP # Layer Two Tunneling Protocol [RFC2661]
isis 124 ISIS # IS-IS over IPv4
sctp 132 SCTP # Stream Control Transmission Protocol
fc 133 FC # Fibre Channel
mobility-header 135 Mobility-Header # Mobility Support for IPv6 [RFC3775]
udplite 136 UDPLite # UDP-Lite [RFC3828]
mpls-in-ip 137 MPLS-in-IP # MPLS-in-IP [RFC4023]
manet 138 # MANET Protocols [RFC5498]
hip 139 HIP # Host Identity Protocol
shim6 140 Shim6 # Shim6 Protocol [RFC5533]
wesp 141 WESP # Wrapped Encapsulating Security Payload
rohc 142 ROHC # Robust Header Compression

View File

@@ -0,0 +1 @@

View File

@@ -0,0 +1,302 @@
# /etc/services:
# $Id: services,v 1.1 2004/10/09 02:49:18 andersen Exp $
#
# Network services, Internet style
#
# Note that it is presently the policy of IANA to assign a single well-known
# port number for both TCP and UDP; hence, most entries here have two entries
# even if the protocol doesn't support UDP operations.
# Updated from RFC 1700, ``Assigned Numbers'' (October 1994). Not all ports
# are included, only the more common ones.
tcpmux 1/tcp # TCP port service multiplexer
echo 7/tcp
echo 7/udp
discard 9/tcp sink null
discard 9/udp sink null
systat 11/tcp users
daytime 13/tcp
daytime 13/udp
netstat 15/tcp
qotd 17/tcp quote
msp 18/tcp # message send protocol
msp 18/udp # message send protocol
chargen 19/tcp ttytst source
chargen 19/udp ttytst source
ftp-data 20/tcp
ftp 21/tcp
fsp 21/udp fspd
ssh 22/tcp # SSH Remote Login Protocol
ssh 22/udp # SSH Remote Login Protocol
telnet 23/tcp
# 24 - private
smtp 25/tcp mail
# 26 - unassigned
time 37/tcp timserver
time 37/udp timserver
rlp 39/udp resource # resource location
nameserver 42/tcp name # IEN 116
whois 43/tcp nicname
re-mail-ck 50/tcp # Remote Mail Checking Protocol
re-mail-ck 50/udp # Remote Mail Checking Protocol
domain 53/tcp nameserver # name-domain server
domain 53/udp nameserver
mtp 57/tcp # deprecated
bootps 67/tcp # BOOTP server
bootps 67/udp
bootpc 68/tcp # BOOTP client
bootpc 68/udp
tftp 69/udp
gopher 70/tcp # Internet Gopher
gopher 70/udp
rje 77/tcp netrjs
finger 79/tcp
www 80/tcp http # WorldWideWeb HTTP
www 80/udp # HyperText Transfer Protocol
link 87/tcp ttylink
kerberos 88/tcp kerberos5 krb5 # Kerberos v5
kerberos 88/udp kerberos5 krb5 # Kerberos v5
supdup 95/tcp
# 100 - reserved
hostnames 101/tcp hostname # usually from sri-nic
iso-tsap 102/tcp tsap # part of ISODE.
csnet-ns 105/tcp cso-ns # also used by CSO name server
csnet-ns 105/udp cso-ns
# unfortunately the poppassd (Eudora) uses a port which has already
# been assigned to a different service. We list the poppassd as an
# alias here. This should work for programs asking for this service.
# (due to a bug in inetd the 3com-tsmux line is disabled)
#3com-tsmux 106/tcp poppassd
#3com-tsmux 106/udp poppassd
rtelnet 107/tcp # Remote Telnet
rtelnet 107/udp
pop-2 109/tcp postoffice # POP version 2
pop-2 109/udp
pop-3 110/tcp # POP version 3
pop-3 110/udp
sunrpc 111/tcp portmapper # RPC 4.0 portmapper TCP
sunrpc 111/udp portmapper # RPC 4.0 portmapper UDP
auth 113/tcp authentication tap ident
sftp 115/tcp
uucp-path 117/tcp
nntp 119/tcp readnews untp # USENET News Transfer Protocol
ntp 123/tcp
ntp 123/udp # Network Time Protocol
netbios-ns 137/tcp # NETBIOS Name Service
netbios-ns 137/udp
netbios-dgm 138/tcp # NETBIOS Datagram Service
netbios-dgm 138/udp
netbios-ssn 139/tcp # NETBIOS session service
netbios-ssn 139/udp
imap2 143/tcp # Interim Mail Access Proto v2
imap2 143/udp
snmp 161/udp # Simple Net Mgmt Proto
snmp-trap 162/udp snmptrap # Traps for SNMP
cmip-man 163/tcp # ISO mgmt over IP (CMOT)
cmip-man 163/udp
cmip-agent 164/tcp
cmip-agent 164/udp
xdmcp 177/tcp # X Display Mgr. Control Proto
xdmcp 177/udp
nextstep 178/tcp NeXTStep NextStep # NeXTStep window
nextstep 178/udp NeXTStep NextStep # server
bgp 179/tcp # Border Gateway Proto.
bgp 179/udp
prospero 191/tcp # Cliff Neuman's Prospero
prospero 191/udp
irc 194/tcp # Internet Relay Chat
irc 194/udp
smux 199/tcp # SNMP Unix Multiplexer
smux 199/udp
at-rtmp 201/tcp # AppleTalk routing
at-rtmp 201/udp
at-nbp 202/tcp # AppleTalk name binding
at-nbp 202/udp
at-echo 204/tcp # AppleTalk echo
at-echo 204/udp
at-zis 206/tcp # AppleTalk zone information
at-zis 206/udp
qmtp 209/tcp # The Quick Mail Transfer Protocol
qmtp 209/udp # The Quick Mail Transfer Protocol
z3950 210/tcp wais # NISO Z39.50 database
z3950 210/udp wais
ipx 213/tcp # IPX
ipx 213/udp
imap3 220/tcp # Interactive Mail Access
imap3 220/udp # Protocol v3
ulistserv 372/tcp # UNIX Listserv
ulistserv 372/udp
https 443/tcp # MCom
https 443/udp # MCom
snpp 444/tcp # Simple Network Paging Protocol
snpp 444/udp # Simple Network Paging Protocol
saft 487/tcp # Simple Asynchronous File Transfer
saft 487/udp # Simple Asynchronous File Transfer
npmp-local 610/tcp dqs313_qmaster # npmp-local / DQS
npmp-local 610/udp dqs313_qmaster # npmp-local / DQS
npmp-gui 611/tcp dqs313_execd # npmp-gui / DQS
npmp-gui 611/udp dqs313_execd # npmp-gui / DQS
hmmp-ind 612/tcp dqs313_intercell# HMMP Indication / DQS
hmmp-ind 612/udp dqs313_intercell# HMMP Indication / DQS
#
# UNIX specific services
#
exec 512/tcp
biff 512/udp comsat
login 513/tcp
who 513/udp whod
shell 514/tcp cmd # no passwords used
syslog 514/udp
printer 515/tcp spooler # line printer spooler
talk 517/udp
ntalk 518/udp
route 520/udp router routed # RIP
timed 525/udp timeserver
tempo 526/tcp newdate
courier 530/tcp rpc
conference 531/tcp chat
netnews 532/tcp readnews
netwall 533/udp # -for emergency broadcasts
uucp 540/tcp uucpd # uucp daemon
afpovertcp 548/tcp # AFP over TCP
afpovertcp 548/udp # AFP over TCP
remotefs 556/tcp rfs_server rfs # Brunhoff remote filesystem
klogin 543/tcp # Kerberized `rlogin' (v5)
kshell 544/tcp krcmd # Kerberized `rsh' (v5)
kerberos-adm 749/tcp # Kerberos `kadmin' (v5)
#
webster 765/tcp # Network dictionary
webster 765/udp
#
# From ``Assigned Numbers'':
#
#> The Registered Ports are not controlled by the IANA and on most systems
#> can be used by ordinary user processes or programs executed by ordinary
#> users.
#
#> Ports are used in the TCP [45,106] to name the ends of logical
#> connections which carry long term conversations. For the purpose of
#> providing services to unknown callers, a service contact port is
#> defined. This list specifies the port used by the server process as its
#> contact port. While the IANA can not control uses of these ports it
#> does register or list uses of these ports as a convienence to the
#> community.
#
nfsdstatus 1110/tcp
nfsd-keepalive 1110/udp
ingreslock 1524/tcp
ingreslock 1524/udp
prospero-np 1525/tcp # Prospero non-privileged
prospero-np 1525/udp
datametrics 1645/tcp old-radius # datametrics / old radius entry
datametrics 1645/udp old-radius # datametrics / old radius entry
sa-msg-port 1646/tcp old-radacct # sa-msg-port / old radacct entry
sa-msg-port 1646/udp old-radacct # sa-msg-port / old radacct entry
radius 1812/tcp # Radius
radius 1812/udp # Radius
radacct 1813/tcp # Radius Accounting
radacct 1813/udp # Radius Accounting
nfsd 2049/tcp nfs
nfsd 2049/udp nfs
cvspserver 2401/tcp # CVS client/server operations
cvspserver 2401/udp # CVS client/server operations
mysql 3306/tcp # MySQL
mysql 3306/udp # MySQL
rfe 5002/tcp # Radio Free Ethernet
rfe 5002/udp # Actually uses UDP only
cfengine 5308/tcp # CFengine
cfengine 5308/udp # CFengine
bbs 7000/tcp # BBS service
#
#
# Kerberos (Project Athena/MIT) services
# Note that these are for Kerberos v4, and are unofficial. Sites running
# v4 should uncomment these and comment out the v5 entries above.
#
kerberos4 750/udp kerberos-iv kdc # Kerberos (server) udp
kerberos4 750/tcp kerberos-iv kdc # Kerberos (server) tcp
kerberos_master 751/udp # Kerberos authentication
kerberos_master 751/tcp # Kerberos authentication
passwd_server 752/udp # Kerberos passwd server
krb_prop 754/tcp # Kerberos slave propagation
krbupdate 760/tcp kreg # Kerberos registration
kpasswd 761/tcp kpwd # Kerberos "passwd"
kpop 1109/tcp # Pop with Kerberos
knetd 2053/tcp # Kerberos de-multiplexor
zephyr-srv 2102/udp # Zephyr server
zephyr-clt 2103/udp # Zephyr serv-hm connection
zephyr-hm 2104/udp # Zephyr hostmanager
eklogin 2105/tcp # Kerberos encrypted rlogin
#
# Unofficial but necessary (for NetBSD) services
#
supfilesrv 871/tcp # SUP server
supfiledbg 1127/tcp # SUP debugging
#
# Datagram Delivery Protocol services
#
rtmp 1/ddp # Routing Table Maintenance Protocol
nbp 2/ddp # Name Binding Protocol
echo 4/ddp # AppleTalk Echo Protocol
zip 6/ddp # Zone Information Protocol
#
# Services added for the Debian GNU/Linux distribution
poppassd 106/tcp # Eudora
poppassd 106/udp # Eudora
mailq 174/tcp # Mailer transport queue for Zmailer
mailq 174/tcp # Mailer transport queue for Zmailer
omirr 808/tcp omirrd # online mirror
omirr 808/udp omirrd # online mirror
rmtcfg 1236/tcp # Gracilis Packeten remote config server
xtel 1313/tcp # french minitel
coda_opcons 1355/udp # Coda opcons (Coda fs)
coda_venus 1363/udp # Coda venus (Coda fs)
coda_auth 1357/udp # Coda auth (Coda fs)
coda_udpsrv 1359/udp # Coda udpsrv (Coda fs)
coda_filesrv 1361/udp # Coda filesrv (Coda fs)
codacon 1423/tcp venus.cmu # Coda Console (Coda fs)
coda_aux1 1431/tcp # coda auxiliary service (Coda fs)
coda_aux1 1431/udp # coda auxiliary service (Coda fs)
coda_aux2 1433/tcp # coda auxiliary service (Coda fs)
coda_aux2 1433/udp # coda auxiliary service (Coda fs)
coda_aux3 1435/tcp # coda auxiliary service (Coda fs)
coda_aux3 1435/udp # coda auxiliary service (Coda fs)
cfinger 2003/tcp # GNU Finger
afbackup 2988/tcp # Afbackup system
afbackup 2988/udp # Afbackup system
icp 3130/tcp # Internet Cache Protocol (Squid)
icp 3130/udp # Internet Cache Protocol (Squid)
postgres 5432/tcp # POSTGRES
postgres 5432/udp # POSTGRES
fax 4557/tcp # FAX transmission service (old)
hylafax 4559/tcp # HylaFAX client-server protocol (new)
noclog 5354/tcp # noclogd with TCP (nocol)
noclog 5354/udp # noclogd with UDP (nocol)
hostmon 5355/tcp # hostmon uses TCP (nocol)
hostmon 5355/udp # hostmon uses TCP (nocol)
ircd 6667/tcp # Internet Relay Chat
ircd 6667/udp # Internet Relay Chat
webcache 8080/tcp # WWW caching service
webcache 8080/udp # WWW caching service
tproxy 8081/tcp # Transparent Proxy
tproxy 8081/udp # Transparent Proxy
mandelspawn 9359/udp mandelbrot # network mandelbrot
amanda 10080/udp # amanda backup services
amandaidx 10082/tcp # amanda backup services
amidxtape 10083/tcp # amanda backup services
isdnlog 20011/tcp # isdn logging system
isdnlog 20011/udp # isdn logging system
vboxd 20012/tcp # voice box system
vboxd 20012/udp # voice box system
binkp 24554/tcp # Binkley
binkp 24554/udp # Binkley
asp 27374/tcp # Address Search Protocol
asp 27374/udp # Address Search Protocol
tfido 60177/tcp # Ifmail
tfido 60177/udp # Ifmail
fido 60179/tcp # Ifmail
fido 60179/udp # Ifmail
# Local services

View File

@@ -0,0 +1,9 @@
root::::::::
daemon:*:::::::
bin:*:::::::
sys:*:::::::
sync:*:::::::
mail:*:::::::
www-data:*:::::::
operator:*:::::::
nobody:*:::::::

View File

@@ -0,0 +1,33 @@
-----BEGIN CERTIFICATE-----
MIIFrzCCA5egAwIBAgIUOTWl6IoOr4Lvj0qz0qoA2W8q6SEwDQYJKoZIhvcNAQEL
BQAwUjELMAkGA1UEBhMCQVUxDDAKBgNVBAgMA05TVzERMA8GA1UECgwIQXVkaW5h
dGUxFDASBgNVBAsMC0VuZ2luZWVyaW5nMQwwCgYDVQQDDANERVAwIBcNNzAwMTAx
MDAwMDAwWhgPMjE3MDAxMDExMjAwMDBaMFIxCzAJBgNVBAYTAkFVMQwwCgYDVQQI
DANOU1cxETAPBgNVBAoMCEF1ZGluYXRlMRQwEgYDVQQLDAtFbmdpbmVlcmluZzEM
MAoGA1UEAwwDREVQMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAr2Xf
QTbmntPQQxYSceK8a8OkKCCTkX2HLmpTDDBrySdBTtvu0jJDIISEtzSUQu8UQ+8H
2atTXgc+Cseam+fDOsU3gmDgn+lmcA5o1rDLrXlQZCJrO/JUpyQ5v7kkGU33AqKc
Ik307fPwkX6YvBBy4Zc/T23bU8KXW+8beQrteImie9Pw9tt9GBvREox0/MBv23qs
3IDdPXB5qdFdenwAwIUGvG43Aohhldnp063HLc8GNySmfwuFpCPgMYph+jX/yKv+
EMBmH9KIhYDvzuq28NJajTaJtsMXr1OlW9a6s2zehK7JYnjrqo3J7ebhb6nxz4co
uQ39DHc+Hsbfi/Bg/UpabpW5Wdl3GHOaeUNWpLheifO2OP35S3yPkTCiz8Wfu7l3
CO67360PaBTmq7tjen2H6pFLWsGLsjcjnUtp0sf50LAhpFHvsBVKS7MkkXwNdncg
X+kGTDCpYNeKdCU6s71Z7RKXjArO1RtRxVU1N4l5U3JOYQ7jChLGwoFDfpBRMVh+
gNNi9/lLNU++gRFmF+1i//bI44Z+cviHkfGq+dIpWBO1KSkGtAE9hWmq0qNdWS1V
b60LU9h4IMg23XYmHpJEyQoKTz4EIJgVjor9ErHws9Ig5vG4FGyXnIjPC9N7PgN1
iDIrf8FgaOSRQPcBd3VjguW4Y8qFts1opQCKFbkCAwEAAaN7MHkwCQYDVR0TBAIw
ADAsBglghkgBhvhCAQ0EHxYdT3BlblNTTCBHZW5lcmF0ZWQgQ2VydGlmaWNhdGUw
HQYDVR0OBBYEFNG9TtgJ0D6ukZf/M4ZyEAW+i+95MB8GA1UdIwQYMBaAFNG9TtgJ
0D6ukZf/M4ZyEAW+i+95MA0GCSqGSIb3DQEBCwUAA4ICAQBBmzlYv9E7W5E0Lv0X
wszsgvbfavB83vvhYkDiPFIUt+6s6b97TNtKhfZZWbJprL4Gt262Xt1t4pZY8csU
co8qpEop4uA943A2z+3Fwc5OuYH+TMlvYVhRLnCFtgeu89VaXEPAFd+d1652nfta
l1y9Gj8NsPdsM0xwDzChRqyAR9heXil1ZhLVzfJr3ri0jdK9ZEIfsS8GOgSmLm+5
LO9UXu73Sqq4qWpp54T7AGEJz7DqIHQSY1c/beuFn7W8Ox9K/9MPoq3mQnn24z/1
rS3chxriKTe3hcslKs0I5HE7HF9SSG9sW6GI5TUcWfCAjIn74WK5/q6XBhh+Io1p
QnsokMzgvNIk/Eit+9P38uNkU1TNsL+wMgV2/qNcRHX5gSfr3yhCN/tg8zCcBkef
Ek5dmdxfSY8wq273W6rKKJbGB3Eb9R9gpYnHWAgiH/qJ0epnT20ynakRUpj9ZSza
cuEZNbcXBUQPCUkyDUOpW7h4budXIMlIbvMtiL8pna8XQM309K9CNZfV16QGuK1P
KNjdiwvl7UVtEUiTCAwt+rq0N3tRuq5ceK9YrnWiCBCXTMIa9o3sv6IgByXvOCkv
uNa8PJr1O8uPtbQqaMsBC3nnG2wpSsEjaurYy5SOSUVvXxVnb3L93FuADMgn1i5Y
UtfGCCL4MCkNn9APcgzpRSqUFA==
-----END CERTIFICATE-----

File diff suppressed because it is too large Load Diff

Binary file not shown.

View File

@@ -0,0 +1,4 @@
/* GNU ld script
Use the shared library, but some functions are only in
the static library. */
GROUP ( libgcc_s.so.1 -lgcc )

Binary file not shown.

Binary file not shown.

View File

@@ -0,0 +1 @@
lib

Binary file not shown.

Binary file not shown.

View File

@@ -0,0 +1 @@
libasound.so.2.0.0

View File

@@ -0,0 +1 @@
libasound.so.2.0.0

View File

@@ -0,0 +1 @@
libstdc++.so.6.0.28

View File

@@ -0,0 +1 @@
libstdc++.so.6.0.28

View File

@@ -0,0 +1 @@
libz.so.1.2.11

View File

@@ -0,0 +1 @@
libz.so.1.2.11

Binary file not shown.

View File

@@ -0,0 +1,5 @@
NAME=Buildroot
VERSION=2021.11
ID=buildroot
VERSION_ID=2021.11
PRETTY_NAME="Buildroot 2021.11"

View File

@@ -0,0 +1 @@
lib

View File

@@ -0,0 +1 @@

View File

@@ -0,0 +1 @@

View File

@@ -0,0 +1 @@

View File

@@ -0,0 +1 @@
/tmp

BIN
src/dep/dante_package/crun Executable file

Binary file not shown.

View File

@@ -0,0 +1,320 @@
{
"ociVersion": "1.0.1",
"process": {
"terminal": false,
"user": {
"uid": 0,
"gid": 0
},
"args": [
"./dep_manager",
"/dante_data/capability/dante.json"
],
"env": [
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
"LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/dante",
"TERM=xterm"
],
"cwd": "/dante",
"capabilities": {
"bounding": [
"CAP_CHOWN",
"CAP_DAC_OVERRIDE",
"CAP_FSETID",
"CAP_FOWNER",
"CAP_MKNOD",
"CAP_NET_RAW",
"CAP_SETGID",
"CAP_SETUID",
"CAP_SETFCAP",
"CAP_SETPCAP",
"CAP_NET_BIND_SERVICE",
"CAP_SYS_CHROOT",
"CAP_KILL",
"CAP_SYS_NICE",
"CAP_AUDIT_WRITE",
"CAP_NET_ADMIN"
],
"effective": [
"CAP_CHOWN",
"CAP_DAC_OVERRIDE",
"CAP_FSETID",
"CAP_FOWNER",
"CAP_MKNOD",
"CAP_NET_RAW",
"CAP_SETGID",
"CAP_SETUID",
"CAP_SETFCAP",
"CAP_SETPCAP",
"CAP_NET_BIND_SERVICE",
"CAP_SYS_CHROOT",
"CAP_KILL",
"CAP_SYS_NICE",
"CAP_AUDIT_WRITE",
"CAP_NET_ADMIN"
],
"inheritable": [
"CAP_CHOWN",
"CAP_DAC_OVERRIDE",
"CAP_FSETID",
"CAP_FOWNER",
"CAP_MKNOD",
"CAP_NET_RAW",
"CAP_SETGID",
"CAP_SETUID",
"CAP_SETFCAP",
"CAP_SETPCAP",
"CAP_NET_BIND_SERVICE",
"CAP_SYS_CHROOT",
"CAP_KILL",
"CAP_SYS_NICE",
"CAP_AUDIT_WRITE",
"CAP_NET_ADMIN"
],
"permitted": [
"CAP_CHOWN",
"CAP_DAC_OVERRIDE",
"CAP_FSETID",
"CAP_FOWNER",
"CAP_MKNOD",
"CAP_NET_RAW",
"CAP_SETGID",
"CAP_SETUID",
"CAP_SETFCAP",
"CAP_SETPCAP",
"CAP_NET_BIND_SERVICE",
"CAP_SYS_CHROOT",
"CAP_KILL",
"CAP_SYS_NICE",
"CAP_AUDIT_WRITE",
"CAP_NET_ADMIN"
],
"ambient": [
"CAP_CHOWN",
"CAP_DAC_OVERRIDE",
"CAP_FSETID",
"CAP_FOWNER",
"CAP_MKNOD",
"CAP_NET_RAW",
"CAP_SETGID",
"CAP_SETUID",
"CAP_SETFCAP",
"CAP_SETPCAP",
"CAP_NET_BIND_SERVICE",
"CAP_SYS_CHROOT",
"CAP_KILL",
"CAP_SYS_NICE",
"CAP_AUDIT_WRITE",
"CAP_NET_ADMIN"
]
},
"rlimits": [
{
"type": "RLIMIT_NOFILE",
"hard": 1024,
"soft": 1024
}
],
"noNewPrivileges": true
},
"root": {
"path": "rootfs",
"readonly": false
},
"hostname": "",
"mounts": [
{
"destination": "/proc",
"type": "proc",
"source": "proc"
},
{
"destination": "/var/run",
"type": "tmpfs",
"source": "tmpfs",
"options": ["nosuid", "strictatime", "mode=755", "size=65536k"]
},
{
"destination": "/var/run/dante",
"type": "bind",
"source": "/var/run/dante",
"options": ["bind", "rw"]
},
{
"destination": "/var/lib/dbus/machine-id",
"type": "bind",
"source": "/var/lib/dbus/machine-id",
"options": ["ro", "rbind", "rprivate", "nosuid", "noexec", "nodev"]
},
{
"destination": "/dev",
"type": "tmpfs",
"source": "tmpfs",
"options": ["nosuid", "strictatime", "mode=755", "size=65536k"]
},
{
"destination": "/dev/pts",
"type": "devpts",
"source": "devpts",
"options": ["nosuid", "noexec", "newinstance", "ptmxmode=0666", "mode=0620", "gid=5"]
},
{
"destination": "/dev/shm",
"type": "bind",
"source": "/dev/shm",
"options": ["bind", "rw"]
},
{
"destination": "/dev/snd",
"type": "bind",
"source": "/dev/snd",
"options": ["bind", "rw"]
},
{
"destination": "/tmp",
"type": "tmpfs",
"source": "tmpfs",
"options": ["nosuid", "strictatime", "mode=755", "size=65536k"]
},
{
"destination": "/var/log",
"type": "bind",
"source": "/var/log",
"options": ["bind", "rw"]
},
{
"destination": "/etc/machine-id",
"type": "bind",
"source": "/etc/machine-id",
"options": ["ro", "rbind", "rprivate", "nosuid", "noexec", "nodev"]
},
{
"destination": "/etc/resolv.conf",
"type": "bind",
"source": "/etc/resolv.conf",
"options": ["ro", "rbind", "rprivate", "nosuid", "noexec", "nodev"]
},
{
"destination": "/dante_data",
"type": "bind",
"source": "/home/caster/bumble-auracast/src/dep/dante_package/dante_data",
"options": ["bind", "rw"]
},
{
"destination": "/dante_data/capability",
"type": "bind",
"source": "/home/caster/bumble-auracast/src/dep/dante_package/dante_data/capability",
"options": ["bind", "ro"]
},
{
"destination": "/sys",
"type": "sysfs",
"source": "sysfs",
"options": ["nosuid", "noexec", "nodev", "ro"]
},
{
"destination": "/sys/fs/cgroup",
"type": "cgroup2",
"source": "cgroup2",
"options": ["nosuid", "noexec", "nodev", "relatime", "ro"]
},
{
"destination": "/usr/share/alsa/alsa.conf",
"type": "bind",
"source": "/usr/share/alsa/alsa.conf",
"options": ["bind", "ro"]
}
],
"linux": {
"cgroupsPath": "dante",
"namespaces": [
{ "type": "pid" },
{ "type": "ipc" },
{ "type": "mount" },
{ "type": "uts" },
{ "type": "cgroup" }
],
"devices": [
{
"path": "/dev/ptp0",
"type": "c",
"major": 249,
"minor": 0,
"fileMode": 384,
"uid": 0,
"gid": 0
},
{
"path": "/dev/snd/pcmC3D0p",
"type": "c",
"major": 116,
"minor": 8,
"fileMode": 432,
"uid": 0,
"gid": 29
},
{
"path": "/dev/snd/pcmC3D0c",
"type": "c",
"major": 116,
"minor": 9,
"fileMode": 432,
"uid": 0,
"gid": 29
},
{
"path": "/dev/snd/pcmC3D1p",
"type": "c",
"major": 116,
"minor": 10,
"fileMode": 432,
"uid": 0,
"gid": 29
},
{
"path": "/dev/snd/pcmC3D1c",
"type": "c",
"major": 116,
"minor": 11,
"fileMode": 432,
"uid": 0,
"gid": 29
},
{
"path": "/dev/snd/controlC3",
"type": "c",
"major": 116,
"minor": 12,
"fileMode": 432,
"uid": 0,
"gid": 29
}
],
"maskedPaths": [
"/proc/kcore",
"/proc/latency_stats",
"/proc/timer_list",
"/proc/timer_stats",
"/proc/sched_debug",
"/proc/scsi"
],
"readonlyPaths": [
"/proc/asound",
"/proc/bus",
"/proc/fs",
"/proc/irq",
"/proc/sys",
"/proc/sysrq-trigger"
],
"resources":{
"devices":[
{
"allow": true,
"type": "c",
"major": 116,
"access": "rw"
}
]
}
}
}

View File

@@ -0,0 +1,80 @@
{
"trialMode": true,
"$schema": "./dante.json_schema.json",
"platform":
{
"cgroupVersion": 2,
"logDirectory" : "/var/log"
},
"audio" :
{
"txChannels" : 0,
"rxChannels" : 6,
"sampleRate" : 48000,
"availableSampleRates" :
[
48000
],
"samplesPerPeriod" : 16,
"periodsPerBuffer" : 300,
"networkLatencyMinMs" : 2,
"networkLatencyDefaultMs" : 5,
"supportedEncodings" :
[
"PCM16"
],
"defaultEncoding" : "PCM16",
"numDepCores" : 1
},
"network" :
{
"interfaceMode" : "Direct",
"interfaces" :
[
"eth0"
],
"preferredLinkSpeed" : "LINK_SPEED_100M"
},
"clock" :
{
"enableHwTimestamping" : false
},
"hardwareClock" :
{
"useHwClock" : false
},
"hostcpu" :
{
"enableDdp" : false
},
"alsaAsrc":
{
"enableAlsaAsrc": true,
"deviceConfigurations": [
{
"deviceIdentifier": "hw:0,0",
"direction": "playback",
"bitDepth": 16,
"numOpenChannels": 6,
"alsaChannelRange": "0-5",
"danteChannelRange": "0-5",
"bufferSize": 4800,
"samplesPerPeriod": 16
}
]
},
"product" :
{
"manfId" : "Audinate",
"manfName" : "Audinate Pty Ltd",
"modelId" : "OEMDEP",
"modelName" : "Linux Dante Embedded Platform",
"modelVersion" :
{
"major" : 9,
"minor" : 9,
"bugfix" : 99
},
"devicePrefix" : "DEP"
}
}

View File

@@ -0,0 +1,734 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"title": "Schema for Dante Embedded Platform dante.json configuration file.",
"type": "object",
"properties": {
"$schema": {
"type": "string",
"description": "Removes superfluous warning on the schema field introduced by additionalProperties = false."
},
"platform": {
"type": "object",
"properties": {
"cgroupVersion": {
"type": "integer",
"oneOf": [
{
"enum": [1, 2]
}
],
"description": "Tells DEP which version of cgroups the system is configured for. Setting this value allows DEP to optimise the system more appropriately. If this value is not set at all then DEP will still function but possibly in a less optimal way."
},
"logDirectory": {
"type": "string",
"description": "Directory to write DEP log files. The directory must be available inside the container, DEP must have read/write access to the directory and the directory must exist before the DEP container is started."
},
"maxNumLogs": {
"type": "integer",
"default": 6,
"description": "Maximum number of versions of each DEP log file to keep."
},
"maxLogSize": {
"type": "integer",
"default": 102400,
"description": "Maximum number of bytes of each DEP log file."
},
"logLevel": {
"type": "string",
"oneOf": [
{
"enum": [
"Error",
"Warning",
"Notice",
"Info",
"Debug"
]
}
],
"default": "Warning",
"description": "The minimum log level to write to the log files."
}
},
"required": [
"logDirectory"
]
},
"audio": {
"type": "object",
"properties": {
"rxChannels": {
"type": "integer",
"minimum": 0,
"maximum": 512,
"description": "The number of receive channels. Actual number of available channels will be the minimum of this value and the licensed channels."
},
"txChannels": {
"type": "integer",
"minimum": 0,
"maximum": 512,
"description": "The number of transmit channels. Actual number of available channels will be the minimum of this value and the licensed channels."
},
"maxRxFlows": {
"type": "integer",
"description": "Maximum receive flows that DEP will allow. Default is 'MAX(2, (rxChannels + 1) / 2)'."
},
"maxTxFlows": {
"type": "integer",
"description": "Maximum transmit flows that DEP will allow. Default is 'MAX(2, (txChannels + 1) / 2)'."
},
"sampleRate": {
"type": "integer",
"oneOf": [
{
"enum": [
44100,
48000,
88200,
96000
]
}
],
"default": 48000,
"description": "Default sample rate of DEP."
},
"availableSampleRates": {
"type": "array",
"minItems": 1,
"items": {
"type": "integer",
"oneOf": [
{
"enum": [
44100,
48000,
88200,
96000
]
}
]
},
"default": [
44100,
48000,
88200,
96000
],
"contains": {
"const": 48000
},
"uniqueItems": true,
"description": "A list of sample rates that can be selected for the DEP device."
},
"samplesPerPeriod": {
"type": "integer",
"default": 16,
"description": "The number of samples between audio period events (ticks)."
},
"periodsPerBuffer": {
"type": "integer",
"default": 3000,
"description": "The number of periods in the buffer."
},
"networkLatencyMinMs": {
"type": "integer",
"oneOf": [
{
"enum": [
1,
2,
3,
4,
5,
10
]
}
],
"default": 2,
"description": "The minimum latency in milliseconds (ms) that this device can support."
},
"networkLatencyDefaultMs": {
"type": "integer",
"minimum": 1,
"maximum": 40,
"default": 4,
"description": "Default network latency in milliseconds (ms)."
},
"numDepCores": {
"anyOf": [
{
"type": "array",
"items": {
"type": "integer",
"minimum": 0
},
"minItems": 1,
"uniqueItems": true,
"description": "List of CPU core IDs DEP will run on."
},
{
"type": "integer",
"minimum": 0,
"description": "The number of CPU cores DEP will run on. DEP will run on the given number of consecutive cores starting from core 0"
}
],
"description": "The CPU cores DEP will run on. The host system is responsible for ensuring the configured cores are isolated exclusively for DEP use."
},
"percentCpuShare": {
"type": "integer",
"minimum": 1,
"maximum": 100,
"default": 100,
"description": "The share of CPU time DEP will be allocated when CPU time is under contention. NOTE: this setting has no effect when cgroups v2 is in use and will be deprecated soon."
},
"defaultEncoding": {
"type": "string",
"oneOf": [
{
"enum": [
"PCM16",
"PCM24",
"PCM32"
]
}
],
"description": "(DEPRECATED) The default native device encoding value. This field should no longer be used. The default should instead be specified as the first entry in supportedEncodings"
},
"supportedEncodings": {
"type": "array",
"items": {
"type": "string",
"oneOf": [
{
"enum": [
"PCM16",
"PCM24",
"PCM32"
]
}
]
},
"default": [
"PCM24"
],
"uniqueItems": true,
"description": "A list of supported native device encoding values."
},
"aes67Supported": {
"type": "boolean",
"default": false,
"description": "Whether this device supports the AES67 protocol."
},
"channelGroupsFile": {
"type": "string",
"description": "The full path to a separate JSON file that specifies logical channel groupings."
},
"defaultChannelNamesFile": {
"type": "string",
"description": "The full path to a separate JSON file that specifies custom default channel names."
},
"perChannelEncodingsFile": {
"type": "string",
"description": "The full path to a separate JSON file that specifies per-channel encodings."
},
"enableSelfSubscription": {
"type": "boolean",
"default": true,
"description": "Whether the device self-subscription capability should be enabled."
},
"silenceHeadDelayMs" : {
"type": "integer",
"default": 20,
"description": "DEP erases audio in the rx and tx audio buffer shortly after the network time for those frames passes. This controls the delay on this erasure measured in milliseconds."
}
},
"required": [
"txChannels",
"rxChannels",
"availableSampleRates",
"numDepCores"
]
},
"network": {
"type": "object",
"properties": {
"interfaceMode": {
"type": "string",
"oneOf": [
{
"enum": [
"Switched",
"Direct"
]
}
],
"default": "Direct",
"description": "DEP network interface mode. Direct means connected to the network via a PHY; Switched means connected to the network via a switch."
},
"interfaces": {
"type": "array",
"items": {
"anyOf": [
{ "type": "string" },
{ "type": "integer" }
]
},
"minItems": 1,
"maxItems": 2,
"uniqueItems": true,
"description": "List of network interface names or indexes DEP will use to connect to the Dante network."
},
"preferredLinkSpeed": {
"type": "string",
"oneOf": [
{
"enum": [
"LINK_SPEED_10G",
"LINK_SPEED_1G",
"LINK_SPEED_100M"
]
}
],
"default": "LINK_SPEED_1G",
"description": "The preferred link speed of the network interface/s used by DEP."
},
"webSocketPort": {
"type": "integer",
"minimum": 1024,
"maximum": 65535,
"description": "The websocket port used by DEP. If not set an ephemeral port is used."
}
},
"required": [
"interfaces"
]
},
"mdns": {
"type": "object",
"properties": {
"restrictInterfaces": {
"type": "boolean",
"default": true,
"description": "Whether to restrict mDNS advertisements to only the specified network interfaces."
}
}
},
"clock": {
"type": "object",
"properties": {
"enableHwTimestamping": {
"anyOf": [
{
"type": "boolean"
},
{
"const": "v1"
}
],
"default": false,
"description": "Whether to use hardware packet timestamping at the Network Interface Card (NIC) level."
},
"dsaTaggedPackets": {
"type": "boolean",
"default": false,
"description": "Whether packets read from the network interface have a DSA tag attached."
},
"hardwareInterfaces": {
"type": "array",
"items": {
"anyOf": [
{ "type": "string" },
{ "type": "integer" }
]
},
"minItems": 1,
"maxItems": 2,
"description": "List of network interface names or indexes that support hardware packet timestamping."
},
"followerOnly": {
"type": "boolean",
"default": false,
"description": "Whether the device should be in follower only mode. When true, DEP cannot become clock leader."
}
},
"if": {
"properties": {
"enableHwTimestamping": {
"anyOf": [
{ "const": true },
{ "const": "v1" }
]
},
"dsaTaggedPackets": { "const": true }
},
"required": [
"enableHwTimestamping", "dsaTaggedPackets"
]
},
"then": {
"required": [
"hardwareInterfaces"
]
}
},
"hardwareClock": {
"type": "object",
"properties": {
"useHwClock": {
"type": "boolean",
"default": false,
"description": "Enable use of clocking hardware."
},
"circuitName": {
"type": "string",
"description": "Name of the clock generator and adjustment circuitry. This field must be one of the supported strings in a DEP release."
},
"circuitRevision": {
"type": "integer",
"description": "An integer representing the circuit revision to use. This field must correspond to a supported revision and circuit in a DEP release."
},
"i2cBus": {
"type": "string",
"default": "/dev/i2c-0",
"description": "The I2C bus device to use to communicate with the clock circuitry. If not present, the first I2C bus device '/dev/i2c-0' is used.",
"pattern": "^\\/dev"
},
"i2cAddr": {
"type": "string",
"description": "The I2C address configurable for a circuit. If not present, the default addresses for the circuit are used."
},
"extClockInputDev": {
"type": "string",
"default": "/dev/extclkin",
"description": "The device path to the external clock input driver used in the clock feedback algorithm. If not present, this field defaults to '/dev/extclkin'.",
"pattern": "^\\/dev"
},
"bitClocks": {
"type": "array",
"description": "An array of mappings between the sample rate and bit clock configurations.",
"items": {
"type": "object",
"properties": {
"sampleRate": {
"type": "integer",
"description": "Sample rate of the mapping."
},
"tdmChannels": {
"type": "integer",
"description": "Number of TDM channels."
},
"bitDepth": {
"type": "integer",
"description": "Bit depth of mapping."
}
},
"required": [
"tdmChannels",
"bitDepth"
]
}
},
"loadCapacitance": {
"type": "integer",
"default": -1,
"description": "Value for the internal load capacitance in pf to set for the clock circuit. If not set or set to a negative number the circuits default will be used. The default and set of valid values are clock circuit specific. For DEP supported si5351b based clock circuits the default load capacitance is 10pF and the set of valid values for this field are 6, 8 and 10."
}
},
"if": {
"properties": {
"useHwClock": { "const": true }
},
"required": ["useHwClock"]
},
"then": {
"required": ["circuitName"]
}
},
"hostcpu": {
"type": "object",
"properties": {
"enableDdp": {
"type": "boolean",
"default": false,
"description": "Enable the 'Dante Device Protocol'."
}
},
"required": ["enableDdp"]
},
"alsaAsrc": {
"type": "object",
"properties": {
"enableAlsaAsrc": {
"type": "boolean",
"description": "Set to true to enable ALSA ASRC and false to disable."
},
"txLatencySamples": {
"type": "integer",
"default": 48,
"description": "Offset used by ASRC when writing audio to the DEP TX buffer measured in samples."
},
"pollMode": {
"type": "boolean",
"default": false,
"description": "If true, ALSA ASRC will not wait on the DEP shared memory semaphore and will instead poll the memory to determine when more data is available."
},
"schedulingPriority": {
"type": "integer",
"default": 70,
"minimum": 0,
"maximum": 100,
"description": "The real-time scheduling priority to run the ALSA ASRC application at."
},
"cpuAffinity": {
"type": "integer",
"minimum": 0,
"description": "The CPU core ID which should be exclusively assigned to ASRC. NOTE: for optimal performance, ensure that the selected CPU core ID is not already listed in the numDepCores value"
},
"deviceConfigurations": {
"type": "array",
"minItems": 1,
"uniqueItems": true,
"description": "List of devices to open. This key is required if Asrc is enabled.",
"items": {
"allOf": [
{
"type": "object",
"description": "Configuration options for each ALSA device to be opened",
"properties": {
"deviceIdentifier": {
"type": "string",
"description": "The ALSA device identifier for this device, e.g. \"hw:1,0\" or \"hw:CARD=sofhdadsp,DEV=0\"."
},
"direction": {
"enum": [
"playback",
"capture"
],
"description": "The direction to open the ALSA device in. Must be \"capture\" or \"playback\"."
},
"bitDepth": {
"enum": [
16,
24,
32
],
"default": 24,
"description": "The PCM bit depth to open the ALSA device with. The device will be opened with the first format it claims to support which is that depth. Typically this maps 8 to S8, 16 to S16_LE, 24 to S24_LE and 32 to S32_LE."
},
"bitWidthOverride": {
"enum": [
16,
24,
32
],
"description": "The number of bits each sample is packed into. For example, \"bitDepth\": 24, \"bitWidthOverride\": 24 is equivalent to S24_3LE, so the application writes samples aligned to 3 bytes. This overrides the alignment of the selected format. So if, for example, a device only claims to support S24_LE (24 bits aligned to 32 bit words) but actually writes 24 bit samples aligned to 24 bits, this setting can account for this."
},
"alsaFormat": {
"enum": [
"S16_LE",
"S24_LE",
"S32_LE",
"FLOAT_LE",
"S24_3LE",
"S16",
"S24",
"S32",
"FLOAT"
],
"description": "The specific ALSA format name to open the device with. Incompatible with bitDepth."
},
"numOpenChannels": {
"type": "integer",
"minimum": 1,
"default": 2,
"description": "The number of channels to open on the ALSA device."
},
"alsaChannelRange": {
"type": "string",
"pattern": "^[0-9]+-[0-9]+$",
"description": "The block of ALSA channels to use. Can only be provided if numOpenChannels is specified. String of the form \"X-Y\" where X and Y are zero indexed channel numbers, specifying the block [X,Y] inclusive. Defaults to 0-(numOpenChannels - 1)"
},
"danteChannelRange": {
"type": "string",
"pattern": "^[0-9]+-[0-9]+$",
"description": "The block of DEP channels this device will read from or write to. Can only be provided if numOpenChannels is specified. String of the form \"X-Y\" where X and Y are zero indexed channel numbers, specifying the block [X,Y] inclusive. Defaults to 0-(numOpenChannels-1)"
},
"gain": {
"type": "integer",
"default": 0,
"description": "Positive or negative gain in dB to apply to the audio for this device."
},
"bufferSize": {
"type": "integer",
"default": 64,
"description": "Size of the ALSA buffer to request the device to open with. Should never be less than 2 DEP periods or 2 ALSA periods. Note that the exact numbers for bufferSize and samplesPerPeriod are merely a request, and individual ALSA drivers are entitled to find other nearby valid values, if necessary."
},
"samplesPerPeriod": {
"type": "integer",
"default": 8,
"description": "Samples per period to request the ALSA device to open with. Note that the exact numbers for bufferSize and samplesPerPeriod are merely a request, and individual ALSA drivers are entitled to find other nearby valid values, if necessary."
},
"latency": {
"type": "integer",
"description": "By default, ASRC maintains the ALSA buffer at its halfway point - which corresponds to the insertion latency of ASRC. This key overrides this behaviour, specifying the target buffer point in samples."
},
"readWriteiBuffer": {
"type": "integer",
"description": "For drivers that don't support MMAP (memory-mapped) buffer operations, the application can emulate the memory mapping internally by inserting an additional buffer and services that through ALSA R/W calls. If this value is >0, it specifies the size of this additional buffer."
},
"forceArtificialAudioTime": {
"type": "boolean",
"description": "This setting provides an override for drivers which don't provide correct audio timestamps. If this is set to true, ASRC overrides the audio time with an artificial one calculated from sample counts."
}
},
"required": [
"deviceIdentifier",
"direction"
]
},
{
"description": "The alsaFormat option is incompatible with the bitDepth and bitWidthOverride option",
"if": {
"anyOf": [
{"required": ["bitDepth"]},
{"required": ["bitWidthOverride"]}
]
},
"then": {
"not": {"required": ["alsaFormat"]}
}
},
{
"description": "alsaChannelRange and danteChannelRange can each only be defined if numOpenChannels is specified",
"if": {
"anyOf": [
{"required": ["alsaChannelRange"]},
{"required": ["danteChannelRange"]}
]
},
"then": {
"required": ["numOpenChannels"]
}
}
]
}
}
},
"required": [
"enableAlsaAsrc"
]
},
"product": {
"type": "object",
"properties": {
"manfId": {
"type": "string",
"minLength": 1,
"maxLength": 8,
"description": "The ID of the device manufacturer. This value is assigned to the manufacturer by Audinate when signing up as a DEP licensee."
},
"manfName": {
"type": "string",
"minLength": 1,
"maxLength": 31,
"description": "Human-readable manufacturer name that users will see in Dante Controller."
},
"modelId": {
"type": "string",
"minLength": 1,
"maxLength": 8,
"description": "The device model ID, up to 8 characters long and unique for each product type produced by a manufacturer."
},
"modelName": {
"type": "string",
"minLength": 1,
"maxLength": 31,
"description": "Human-readable model name that users will see in Dante Controller."
},
"modelVersion": {
"type": "object",
"description": "3-part version number of the DEP device, this will be shown in Dante Controller.",
"properties": {
"major": {
"type": "integer",
"description": "Product version major number."
},
"minor": {
"type": "integer",
"description": "Product version minor number."
},
"bugfix": {
"type": "integer",
"description": "Product version bugfix number."
}
},
"required": [
"major",
"minor",
"bugfix"
]
},
"modelVersionString": {
"type": "string",
"description": "An arbitrary string that overrides the 'modelVersion'. If not set the 'modelVersion' fields will be used to construct a model version string.",
"minLength": 1,
"maxLength": 31
},
"devicePrefix": {
"type": "string",
"default": "DEP",
"minLength": 1,
"maxLength": 24,
"pattern": "^[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,23})?$",
"description": "Dante device name prefix. Up to 24 characters, legal characters are A-Z, a-z, 0-9, and '-' ('-' cannot be the first character)."
}
},
"required": [
"manfId",
"manfName",
"modelId",
"modelName",
"modelVersion"
]
},
"trialMode": {
"type": "boolean",
"default": false,
"description": "Set to true to start the container in 'Trial Mode'. If excluded or false, DEP will require activation."
},
"misc": {
"type": "object",
"properties": {
"enableIdentify": {
"type": "boolean",
"default": false,
"description": "Set to true to enable the device 'Identify' function and false to disable."
}
}
},
"ddhi": {
"type": "object",
"properties": {
"enable": {
"type": "boolean",
"default": true,
"description": "Set to true to enable Dante Device Host Interface (DDHI) and false to disable. All other properties in the ddhi object are only used if this value is true."
},
"clientRpcs": {
"type": "array",
"items": {
"type": "string"
},
"description": "List of DDHI RPCs supported by the platform DDHI client(s)"
}
}
}
},
"required": [
"platform",
"audio",
"network",
"product"
],
"additionalProperties" : false
}

Binary file not shown.

View File

@@ -0,0 +1,3 @@
{
"sampleRate" : 48000
}

View File

@@ -0,0 +1,7 @@
!preferred
subdomain_name _DFLT
mport
socket_loglevel 3
port_v1_m_p
!port_v2_u_p
!enrolled

View File

@@ -0,0 +1 @@
e—|Ć%J<4E>6w7

View File

@@ -0,0 +1 @@
0

280
src/dep/dante_package/dep.sh Executable file
View File

@@ -0,0 +1,280 @@
#!/bin/sh
# To use a different OCI-compliant container runtime,
# update both CONTAINER_RUNTIME and CONTAINER_RUNTIME_PATH:
#
# - CONTAINER_RUNTIME should be set to the name of the runtime binary (e.g., 'crun', 'runc').
# - CONTAINER_RUNTIME_PATH should point to the directory where the binary is installed
# (e.g., '/usr/bin' for a system-installed runtime).
CONTAINER_RUNTIME_PATH=$PWD
CONTAINER_RUNTIME=crun
CONTAINER_STATUS_PATH=/run/$CONTAINER_RUNTIME
CONTAINER_CMD="$CONTAINER_RUNTIME_PATH/$CONTAINER_RUNTIME --root=$CONTAINER_STATUS_PATH"
CONTAINER_CMD_ADDITIONAL_OPTIONS=""
CONTAINER_LOGS="/var/log/dante_container.log"
IMAGES_PATH=$PWD/dante_data/images
ROOTFS_MOUNTPOINT=$PWD/bundle/rootfs
ACTIVE_IMAGE_ID_PATH=$IMAGES_PATH/active
DANTE_JSON=$PWD/dante_data/capability/dante.json
# Check we can actually start/stop containers
# NOTE: on some systems 'id' might not be available, hence we check manually
if [ "$(grep -E '^Uid:' /proc/self/status | awk '{print $2}')" -ne 0 ]; then
echo "This script must be executed with root privileges."
echo ""
exit 1
fi
# This function assumes that:
# - the JSON file is well-formed
# - key and value appear on the same line
# - strings are double-quoted and dont contain escaped quotes
# - assumes the key exists exactly once per line
get_json_field()
{
json_file=$1
field_name=$2
default="__NOT_FOUND__"
if [ ! -f "$json_file" ]; then
echo "error: file '$json_file' not found" >&2
exit 1
fi
# explaining each sed:
# - 's/^[^:]*://' removes everything up to and including the first colon
# - 's/ //' removes the first space character after the colon, if present
# - 's/^"//' and 's/"$//' removes leading and trailing double quotes from the value
# - 's/,[[:space:]]*$//' removes a trailing comma and any following whitespace (e.g. to handle lists)
# - 's/[[:space:]]*$//' trims any remaining trailing whitespace from the value
value=$(grep "\"$field_name\"" "$json_file" | \
sed -e 's/^[^:]*://' -e 's/ //' | \
sed -e 's/^"//' -e 's/"$//' | \
sed -e 's/,[[:space:]]*$//' | \
sed -e 's/[[:space:]]*$//' | head -n 1)
if [ -z "$value" ]; then
echo "$default"
else
echo "$value"
fi
}
check_cgroup_mounts()
{
cgroup_version=$(get_json_field "$DANTE_JSON" "cgroupVersion")
if [ "$cgroup_version" = "__NOT_FOUND__" ]; then
return
fi
check_mount() {
path=$1
expectedType=$2
while IFS= read -r line; do
# get the second field (mount point) and the third field (type)
mountPoint=$(echo "$line" | awk '{print $2}')
mountType=$(echo "$line" | awk '{print $3}')
# if the mountPoint doesn't start with /sys/fs/cgroup, skip it
case "$mountPoint" in
/sys/fs/cgroup*) ;;
*) continue ;;
esac
# if mount point and type exactly match the expected values, we're good
if [ "$mountPoint" = "$path" ] && [ "$mountType" = "$expectedType" ]; then
echo "mount OK: $path ($expectedType)"
return
fi
# There is a chance multiple controllers are mounted on the same path,
# for instance we might be looking for /sys/fs/cgroup/cpu and have
#
# /sys/fs/cgroup/cpu,cpuacct cgroup etc..
#
# mounted instead.
# because we skip entries that do not start with /sys/fs/cgroup at
# the beginning of the loop, we know getting the substring after
# /sys/fs/cgroup at this point will yield an empty string at worst
cgroupSubstring=${mountPoint#/sys/fs/cgroup/}
# do the same with $path
cgroupPathSubstring=${path#/sys/fs/cgroup/}
# check if cgroupPathSubstring is part of cgroupSubstring
# eg this would successfully match 'cpu' against both 'cpuacct,cpu' and 'cpu,cpuacct'
if echo "$cgroupSubstring" | grep -qw "$cgroupPathSubstring"; then
if [ "$mountType" = "$expectedType" ]; then
echo "mount OK: $path ($expectedType)"
return
fi
fi
done < /proc/mounts
echo "warning: missing or incorrect mountpoint: $path (expected type: $expectedType)"
}
if [ "$cgroup_version" = "1" ]; then
echo "cgroup version set to v1 in $DANTE_JSON"
echo "checking mounts..."
check_mount "/sys/fs/cgroup" "tmpfs"
check_mount "/sys/fs/cgroup/cpuset" "cgroup"
check_mount "/sys/fs/cgroup/cpu" "cgroup"
check_mount "/sys/fs/cgroup/memory" "cgroup"
check_mount "/sys/fs/cgroup/devices" "cgroup"
elif [ "$cgroup_version" = "2" ]; then
echo "cgroup version set to v2 in $DANTE_JSON"
echo "checking mounts..."
check_mount "/sys/fs/cgroup" "cgroup2"
else
echo "error: unsupported cgroupVersion value ($cgroup_version) in $DANTE_JSON"
exit 1
fi
}
start()
{
# A poorly-timed stop() could leave the container mounted while
# the processes inside the container were successfully shut down.
# Instead of relying on whether the container is there or not when
# deciding to start DEP, check whether dep_manager is actually running.
# shellcheck disable=SC2009
# (SC2009 recommends using pgrep, but it is not always available)
if ps -e >/dev/null 2>&1; then
PS_CMD="ps -e"
else
PS_CMD="ps" # assume heavily stripped-down BusyBox
fi
if $PS_CMD | grep -q "[d]ep_manager"; then
echo "DEP is already running"
exit 0
fi
# Some basic checks before proceeding
if [ ! -f "$ACTIVE_IMAGE_ID_PATH" ]; then
echo "error: $ACTIVE_IMAGE_ID_PATH not found, can't select active rootfs"
exit 1
fi
active_image_id=$(cat "$ACTIVE_IMAGE_ID_PATH")
rootfs="$IMAGES_PATH/$active_image_id/rootfs_squash"
if [ ! -f "$rootfs" ]; then
echo "error: $rootfs not found"
exit 1
fi
check_cgroup_mounts
mkdir -p /var/run/dante
mkdir -p ${CONTAINER_STATUS_PATH}
# Make sure /etc/resolv.conf is there when later on we
# try to bind mount it from the container.
if [ ! -f "/etc/resolv.conf" ]; then
touch /etc/resolv.conf
fi
if ! grep -q " $ROOTFS_MOUNTPOINT " /proc/mounts; then
if ! mount "$rootfs" "$ROOTFS_MOUNTPOINT" >/dev/null 2>&1; then
echo "error: could not mount $rootfs"
exit 1
fi
fi
# At this point, it's safe to always forcefully delete the container.
#
# This may be necessary in scenarios where the DEP processes did not actually
# start after running ./dep.sh start — for example, due to invalid configuration
# in dante.json. In such cases, a user would typically inspect the logs,
# fix the underlying issue, and then retry with ./dep.sh start.
#
# However, if the dante container remains mounted, the container runtime's 'run'
# command will fail, forcing the user to manually delete the container - either
# by using ./dep.sh stop (which is not intuitive) or manually.
#
# To avoid these issues and make the recovery easier to execute, unconditionally
# remove the dante container before attempting to run it again.
#
# NOTE: while we could check whether the container exists before removing it,
# not all systems provide the necessary cgroup status layers to reliably list
# configured containers.
${CONTAINER_CMD} delete --force dante
# rootfs (only mount with no parent mount) cannot be pivot_root()ed. The check hereafter
# relies on the fact that rootfs will be either a ramfs or tmpfs. This is a bit more restrictive
# than necessary, as the container could in practice be started from a ramfs or tmpfs (as long as
# it is not the rootfs).
# WARNING: crun falls back to chroot when --no-pivot is enabled, and a process running in the container
# can in practice access the tree outside of the chroot.
ROOT_FSTYPE=$(mount|grep 'on / type'|awk '{print $5}')
if [ "$ROOT_FSTYPE" = "rootfs" ] || [ "$ROOT_FSTYPE" = "ramfs" ] || [ "$ROOT_FSTYPE" = "tmpfs" ]; then
CONTAINER_CMD_ADDITIONAL_OPTIONS="$CONTAINER_CMD_ADDITIONAL_OPTIONS --no-pivot"
fi
if ! ${CONTAINER_CMD} run ${CONTAINER_CMD_ADDITIONAL_OPTIONS} --detach --bundle ./bundle dante > "$CONTAINER_LOGS" 2>&1; then
echo "error: failed to start dante container, more details available in $CONTAINER_LOGS"
exit 1
else
echo "DEP started"
fi
}
stop()
{
# in some cases we might have the mountpoint but no container running:
# check if that's the case before proceeding
if ${CONTAINER_CMD} list | grep dante >/dev/null 2>&1; then
# stop the init process (dep_manager) by sending a SIGTERM signal
echo "stopping DEP..."
${CONTAINER_CMD} kill dante TERM
for _ in $(seq 1 10); do
sleep 1
DEP_PROCS=$(${CONTAINER_CMD} ps dante | grep -v PID -c)
if [ "$DEP_PROCS" -eq 0 ]; then
break
fi
done
DEP_PROCS=$(${CONTAINER_CMD} ps dante | grep -v PID -c)
if [ "$DEP_PROCS" -ne 0 ]; then
echo "DEP still running, sending SIGKILL"
${CONTAINER_CMD} kill -a dante KILL
sleep 1
fi
echo "removing container..."
${CONTAINER_CMD} delete --force dante
fi
if grep -q " $ROOTFS_MOUNTPOINT " /proc/mounts; then
echo "umount rootfs..."
umount "$PWD"/bundle/rootfs
fi
echo "done"
}
USAGE_MESSAGE="Usage: dep.sh <start|stop>"
if [ "$#" -eq 0 ]; then
echo "$USAGE_MESSAGE"
exit 1
fi
case $1 in
"start" ) start "$2" ;;
"stop" ) stop ;;
* )
echo "$USAGE_MESSAGE"
exit 1
;;
esac

View File

@@ -0,0 +1 @@
development/dep_check.sh

View File

@@ -0,0 +1,556 @@
#!/bin/sh
# This script collects all the necessary information/files for support, then bundle them into a single .tgz file.
# Copyright © 2022-2025 Audinate Pty Ltd ACN 120 828 006 (Audinate). All rights reserved.
#
#
# 1. Subject to the terms and conditions of this Licence, Audinate hereby grants you a worldwide, non-exclusive,
# no-charge, royalty free licence to copy, modify, merge, publish, redistribute, sublicense, and/or sell the
# Software, provided always that the following conditions are met:
# 1.1. the Software must accompany, or be incorporated in a licensed Audinate product, solution or offering
# or be used in a product, solution or offering which requires the use of another licensed Audinate
# product, solution or offering. The Software is not for use as a standalone product without any
# reference to Audinate's products;
# 1.2. the Software is provided as part of example code and as guidance material only without any warranty
# or expectation of performance, compatibility, support, updates or security; and
# 1.3. the above copyright notice and this License must be included in all copies or substantial portions
# of the Software, and all derivative works of the Software, unless the copies or derivative works are
# solely in the form of machine-executable object code generated by the source language processor.
#
# 2. TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT.
#
# 3. TO THE FULLEST EXTENT PERMITTED BY APPLICABLE LAW, IN NO EVENT SHALL AUDINATE BE LIABLE ON ANY LEGAL THEORY
# (INCLUDING, WITHOUT LIMITATION, IN AN ACTION FOR BREACH OF CONTRACT, NEGLIGENCE OR OTHERWISE) FOR ANY CLAIM,
# LOSS, DAMAGES OR OTHER LIABILITY HOWSOEVER INCURRED. WITHOUT LIMITING THE SCOPE OF THE PREVIOUS SENTENCE THE
# EXCLUSION OF LIABILITY SHALL INCLUDE: LOSS OF PRODUCTION OR OPERATION TIME, LOSS, DAMAGE OR CORRUPTION OF
# DATA OR RECORDS; OR LOSS OF ANTICIPATED SAVINGS, OPPORTUNITY, REVENUE, PROFIT OR GOODWILL, OR OTHER ECONOMIC
# LOSS; OR ANY SPECIAL, INCIDENTAL, INDIRECT, CONSEQUENTIAL, PUNITIVE OR EXEMPLARY DAMAGES, ARISING OUT OF OR
# IN CONNECTION WITH THIS AGREEMENT, ACCESS OF THE SOFTWARE OR ANY OTHER DEALINGS WITH THE SOFTWARE, EVEN IF
# AUDINATE HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH CLAIM, LOSS, DAMAGES OR OTHER LIABILITY.
#
# 4. APPLICABLE LEGISLATION SUCH AS THE AUSTRALIAN CONSUMER LAW MAY APPLY REPRESENTATIONS, WARRANTIES, OR CONDITIONS,
# OR IMPOSES OBLIGATIONS OR LIABILITY ON AUDINATE THAT CANNOT BE EXCLUDED, RESTRICTED OR MODIFIED TO THE FULL
# EXTENT SET OUT IN THE EXPRESS TERMS OF THIS CLAUSE ABOVE "CONSUMER GUARANTEES". TO THE EXTENT THAT SUCH CONSUMER
# GUARANTEES CONTINUE TO APPLY, THEN TO THE FULL EXTENT PERMITTED BY THE APPLICABLE LEGISLATION, THE LIABILITY OF
# AUDINATE UNDER THE RELEVANT CONSUMER GUARANTEE IS LIMITED (WHERE PERMITTED AT AUDINATE'S OPTION) TO ONE OF
# FOLLOWING REMEDIES OR SUBSTANTIALLY EQUIVALENT REMEDIES:
# 4.1. THE REPLACEMENT OF THE SOFTWARE, THE SUPPLY OF EQUIVALENT SOFTWARE, OR SUPPLYING RELEVANT SERVICES AGAIN;
# 4.2. THE REPAIR OF THE SOFTWARE;
# 4.3. THE PAYMENT OF THE COST OF REPLACING THE SOFTWARE, OF ACQUIRING EQUIVALENT SOFTWARE, HAVING THE RELEVANT
# SERVICES SUPPLIED AGAIN, OR HAVING THE SOFTWARE REPAIRED.
#
# 5. This License does not grant any permissions or rights to use the trade marks (whether registered or unregistered),
# the trade names, or product names of Audinate.
#
# 6. If you choose to redistribute or sell the Software you may elect to offer support, maintenance, warranties,
# indemnities or other liability obligations or rights consistent with this License. However, you may only act on
# your own behalf and must not bind Audinate. You agree to indemnify and hold harmless Audinate, and its affiliates
# from any liability claimed or incurred by reason of your offering or accepting any additional warranty or additional
# liability.
#
# NOTE: this script is intended to be run on production systems where the dante_package/development
# directory might not be available (thus no `jq` to rely on for JSON parsing) and basic tools such as `id`
# could be missing (e.g. BusyBox).
# Any changes to the script should take this into account.
RED_COLOR="\e[01;31m"
GREEN_COLOR="\e[01;32m"
YELLOW_COLOR="\e[01;33m"
BLUE_COLOR="\e[01;34m"
END_COLOR="\e[0m"
red() { printf '%b %s %b' "$RED_COLOR" "$*" "$END_COLOR"; }
green() { printf '%b %s %b' "$GREEN_COLOR" "$*" "$END_COLOR"; }
blue() { printf '%b %s %b' "$BLUE_COLOR" "$*" "$END_COLOR"; }
yellow() { printf '%b %s %b' "$YELLOW_COLOR" "$*" "$END_COLOR"; }
logerr() { echo "[ $(red ERROR)] $1"; }
logwarn() { echo "[$(yellow WARNING)] $1"; }
loginfo() { echo "[ $(blue INFO)] $1"; }
logok() { echo "[ $(green OK)] $1"; }
fail() { exit 1; }
cmd_exists() { command -v -- "$1" >/dev/null 2>&1; }
# This function assumes that:
# - the JSON file is well-formed
# - key and value appear on the same line
# - strings are double-quoted and dont contain escaped quotes
# - assumes the key exists exactly once per line
get_json_field()
{
json_file=$1
field_name=$2
default="__NOT_FOUND__"
if [ ! -f "$json_file" ]; then
echo "error: file '$json_file' not found" >&2
exit 1
fi
# explaining each sed:
# - 's/^[^:]*://' removes everything up to and including the first colon
# - 's/ //' removes the first space character after the colon, if present
# - 's/^"//' and 's/"$//' removes leading and trailing double quotes from the value
# - 's/,[[:space:]]*$//' removes a trailing comma and any following whitespace (e.g. to handle lists)
# - 's/[[:space:]]*$//' trims any remaining trailing whitespace from the value
value=$(grep "\"$field_name\"" "$json_file" | \
sed -e 's/^[^:]*://' -e 's/ //' | \
sed -e 's/^"//' -e 's/"$//' | \
sed -e 's/,[[:space:]]*$//' | \
sed -e 's/[[:space:]]*$//' | head -n 1)
if [ -z "$value" ]; then
echo "$default"
else
echo "$value"
fi
}
# where DEP is installed, default value
DEFAULT_DEP_PATH="/opt/dep"
# where DEP logs are stored, default value
DEFAULT_LOGS_PATH="/var/log"
# where temporary files created by this script will be stored, default value
DEFAULT_TEMP_PATH="/tmp"
# where the archive created by this script will be stored, default value
DEFAULT_OUTPUT_PATH=$(pwd)
# DEP container logs can only be stored in /var/log at the moment.
CONT_LOGS="/var/log/dante_container.log"
usage() {
loginfo "Usage: $0 [OPTIONS]"
loginfo ""
loginfo "This tool collects diagnostic data to help debug issues with the DEP software."
loginfo ""
loginfo "Options:"
loginfo " -c <path> Specify the directory where DEP is installed."
loginfo " Default is '${DEFAULT_DEP_PATH}'."
loginfo " -l <path> Specify the directory where DEP stores its log files."
loginfo " Default is '${DEFAULT_LOGS_PATH}'."
loginfo " -o <path> Specify the output directory for the final archive and any temporary"
loginfo " files or directories created in the process. This directory must be"
loginfo " writable by the user executing the script."
loginfo " Default is the current directory, '${DEFAULT_OUTPUT_PATH}'"
loginfo ""
loginfo "Examples:"
loginfo ""
loginfo " $0 -c /apps/dep -l /tmp/logs"
loginfo ""
loginfo " Collects diagnostic data from a DEP installation in /apps/dep, DEP log files in"
loginfo " /tmp/logs, and stores the output in the current directory."
loginfo ""
loginfo " $0 -c /apps/dep -l /tmp/logs -o /tmp/dep_diagnostics"
loginfo ""
loginfo " Collects diagnostic data from a DEP installation in /apps/dep, DEP log files in"
loginfo " /tmp/logs, and stores the output in /tmp/dep_diagnostics."
loginfo ""
loginfo " $0 -o /home/user/dep_diagnostics"
loginfo ""
loginfo " Uses the default DEP installation and log file paths, and stores the output in"
loginfo " /home/user/dep_diagnostics."
}
# Copy a file or directory from a source to a destination.
#
# Arguments:
# src (str): the source file or directory to be copied.
# dst (str): the destination where the source will be copied.
# msg (str): an error message to be logged if the copy operation fails.
#
# Behaviour:
# If the source is a directory, the function performs a recursive
# copy. If the copy operation fails for any reason, it logs a warning
# message using the provided `msg` argument along with the captured
# error message from the failed copy operation.
#
# NOTE: the function uses `eval` to allow for correct parameter expansion
# (e.g. "cp /var/log/dante_*" wouldn't work otherwise).
copy() {
src="$1"
dst="$2"
msg="$3"
cmd="cp ${src} ${dst}"
if [ -d "${src}" ]; then
cmd="cp -r ${src} ${dst}"
fi
err=$(eval "${cmd}" 2>&1)
res=$?
if [ "${res}" -ne 0 ]; then
logwarn "$msg: $err"
fi
}
# Checks if a specified directory exists and if it's writable.
#
# Arguments:
# path (str): Directory to check.
# check_write (str): '1' to check write permission, '0' otherwise.
# err_msg (str): Optional. Additional error message to display.
#
# Behaviour:
# Logs an error and exits if `path` is not a valid directory.
# If `check_write` is '1', also checks for write permission.
# Logs an error and exits if the directory is not writable.
check_path() {
path="$1"
check_write="$2"
err_msg="$3"
_ret_val=0
if [ ! -d "${path}" ]; then
logerr "${path} is not a valid path"
_ret_val=1
elif [ ! -w "${path}" ] && [ "${check_write}" = "1" ]; then
logerr "you don't have writing permission for the directory: $path"
_ret_val=1
fi
if [ "${err_msg}" ] && [ ${_ret_val} -eq 1 ]; then
logerr "${err_msg}"
fi
if [ ${_ret_val} -eq 1 ]; then
exit ${_ret_val}
fi
}
collect_kernel_config() {
dest_path="$1"
config_file=""
is_gzipped=0
if [ -f "/proc/config.gz" ]; then
config_file="/proc/config.gz"
is_gzipped=1
elif [ -f "/boot/config-$(uname -r)" ]; then
config_file="/boot/config-$(uname -r)"
elif [ -f "/boot/config" ]; then
config_file="/boot/config"
elif [ -f "/lib/modules/$(uname -r)/build/.config" ]; then
config_file="/lib/modules/$(uname -r)/build/.config"
fi
if [ -z "$config_file" ]; then
logerr "no kernel config found in standard locations"
return
fi
loginfo "found kernel config at: $config_file"
# for gzipped config, try to decompress and copy
if [ "$is_gzipped" -eq 1 ]; then
if cmd_exists gunzip; then
if gunzip -c "$config_file" > "$dest_path"/kernel_config.txt 2>/dev/null; then
# if gunzip suceeeds, early return to avoid copy
return
fi
fi
fi
copy "$config_file" "$dest_path" "Failed to copy config from $config_file to $dest_path"
}
while getopts ":o:c:l:h" option; do
case $option in
o) # output directory
OUTPUT_PATH=$OPTARG
TEMP_PATH=$OPTARG
;;
l) # log directory
LOGS_PATH=$OPTARG
;;
c) # DEP install path
DEP_PATH=$OPTARG
;;
h) # display Help
usage
exit 0
;;
\?) # invalid option
errmsg="invalid option: -$OPTARG"
;;
:) # missing argument
errmsg="option -$OPTARG requires an argument."
;;
esac
done
# if we have an error from getopts, log it and exit
if [ -n "$errmsg" ]; then
logerr "$errmsg"
fail
fi
# if we can't create archives, we can't proceed
if ! cmd_exists tar; then
logerr "'tar' not found, unable to create archives"
fail
fi
# check whether we need to use defaults
: "${DEP_PATH:=$DEFAULT_DEP_PATH}"
: "${LOGS_PATH:=$DEFAULT_LOGS_PATH}"
: "${TEMP_PATH:=$DEFAULT_TEMP_PATH}"
: "${OUTPUT_PATH:=$DEFAULT_OUTPUT_PATH}"
# if OUTPUT_PATH can't be written to, we can't proceed
# NOTE: by checking OUTPUT_PATH we also check TEMP_PATH:
# the latter is set to /tmp by default, so it is only necessary
# to make sure we can write to it when the user has specified
# a different directory, in which case OUTPUT_PATH would have
# the same value so it makes sense to only check OUTPUT_PATH
check_path "$OUTPUT_PATH" 1 "please chose a different directory using the -o option. Try $0 -h for more information"
# check that provided paths are valid
check_path "$DEP_PATH" 0 "please chose a different directory using the -c option. Try $0 -h for more information"
check_path "$LOGS_PATH" 0 "please chose a different directory using the -l option. Try $0 -h for more information"
# this script's own log file
LOGFILE="/tmp/collector.txt"
# start logging our own output:
# - create a named pipe
# - start tee reading from it in the background
# - redirect stdout and stderr to the named pipe
# trap command ensures that the named pipe gets deleted when the script exits.
mkfifo /tmp/tmpfifo
trap 'rm /tmp/tmpfifo && rm ${LOGFILE}' EXIT
tee -a "${LOGFILE}" < /tmp/tmpfifo &
exec > /tmp/tmpfifo 2>&1
# in a world where all shells support process substitution
# this is an alternative way
# exec > >(tee -a ${LOGFILE} )
# exec 2> >(tee -a ${LOGFILE} >&2)
# output what we're running with
loginfo "DEP install path: ${DEP_PATH}"
loginfo "DEP logs path: ${LOGS_PATH}"
loginfo "Temporary files will be saved in: ${TEMP_PATH}"
loginfo "Script output archive will be saved in: ${OUTPUT_PATH}"
# we'll use a subdir to store our data
SUPPORT_DIR=${TEMP_PATH}/dep_support
# where to store the ethtool output
ETHTOOL_FILE="${SUPPORT_DIR}/ethtoolinfo.txt"
# where to store the HW clock info
HW_CLKING_FILE="${SUPPORT_DIR}/hwclk.txt"
# in case the script was interrupted midway during a previous run
rm -rf "${SUPPORT_DIR}"
# if we can't create ${SUPPORT_DIR}, we can't proceed
if ! mkdir -p "${SUPPORT_DIR}" 2>/dev/null; then
logerr "cannot create directory ${SUPPORT_DIR}: permission denied"
fail
fi
DANTE_JSON="$DEP_PATH"/dante_package/dante_data/capability/dante.json
CONFIG_JSON="$DEP_PATH"/dante_package/dante_data/capability/config.json
CONFIG_DEP="$DEP_PATH"/dante_package/dante_data/config
ACTIVATION_DIR="${DEP_PATH}/dante_package/dante_data/activation"
loginfo "Collecting config files..."
# if found, get dante.json
if [ -f "${DANTE_JSON}" ]; then
copy "${DANTE_JSON}" "${SUPPORT_DIR}" "collection of ${DANTE_JSON} failed"
else
logerr "dante.json not found in $(dirname "${DANTE_JSON}")"
fi
# if found, get config.json
if [ -f "${CONFIG_JSON}" ]; then
copy "${CONFIG_JSON}" "${SUPPORT_DIR}" "collection of ${CONFIG_JSON} failed"
else
logerr "config.json not found in $(dirname "${CONFIG_JSON}")"
fi
# if found, get all content from dante_data/config
if [ -d "${CONFIG_DEP}" ]; then
copy "${CONFIG_DEP}" "${SUPPORT_DIR}" "collection of DEP ${CONFIG_DEP} directory failed"
else
logerr "DEP config directory not found in $(dirname "${CONFIG_DEP}")"
fi
# check and collect activation files
if [ -d "${ACTIVATION_DIR}" ]; then
# copy whatever we have in the activation directory
copy "${ACTIVATION_DIR}" "${SUPPORT_DIR}" "collection of DEP activation files failed"
# log errors related to single act
for actFile in device.lic manufacturer.cert; do
if [ ! -f "${ACTIVATION_DIR}/${actFile}" ]; then
logwarn "activation file '${actFile}' not found in ${ACTIVATION_DIR}"
fi
done
else
logerr "DEP activation directory not found in $(dirname "${ACTIVATION_DIR}")"
fi
loginfo "Collecting DEP logs..."
# get all DEP logs
mkdir -p "${SUPPORT_DIR}/logs"
copy "${LOGS_PATH}/dante_*" "${SUPPORT_DIR}/logs" "collection of DEP logs failed"
# get the container logs
mkdir -p "${SUPPORT_DIR}/logs"
copy "${CONT_LOGS}" "${SUPPORT_DIR}/logs" "collection of DEP container logs failed"
loginfo "Collecting system info..."
# get kernel config
collect_kernel_config "${SUPPORT_DIR}"
# get /proc/cpuinfo
copy "/proc/cpuinfo" "${SUPPORT_DIR}/cpuinfo.txt" "collection of /proc/cpuinfo failed"
# get /proc/interrupts
copy "/proc/interrupts" "${SUPPORT_DIR}/interrupts.txt" "collection of /proc/interrupts failed"
# get mount points
mount > "${SUPPORT_DIR}/mountinfo.txt" || logwarn "collection of mount points failed"
# get info about running processes: try including thread info first,
# in case of failure (e.g. "ps" is actually BusyBox) fall back to processes only
if ! ps -efL > "${SUPPORT_DIR}/processinfo.txt" 2> /dev/null; then
ps > "${SUPPORT_DIR}/processinfo.txt" || logwarn "unable to write process info into ${SUPPORT_DIR}/processinfo.txt"
fi
# get the list of active sockets
if cmd_exists netstat; then
netstat -anp 2>/dev/null > "${SUPPORT_DIR}/netstat.txt" || logwarn "unable to collect active socket info"
else
logwarn "netstat command not available"
fi
# get info about network interfaces
if cmd_exists ip; then
ip address > "${SUPPORT_DIR}/ipinfo.txt" || logwarn "unable to write ip info to ${SUPPORT_DIR}/ipinfo.txt"
else
logwarn "ip command not available"
fi
# get ALSA version (userspace libs)
if cmd_exists aplay; then
aplay --version > "${SUPPORT_DIR}/alsa.txt" || logwarn "unable to write ALSA version to ${SUPPORT_DIR}/alsa.txt"
fi
# get kernel messages
if cmd_exists dmesg; then
dmesg > "${SUPPORT_DIR}/dmesg.txt" || logwarn "unable to collect kernel messages - dmesg failed"
fi
# get device nodes
ls -l /dev > "${SUPPORT_DIR}/device_nodes.txt" || logwarn "unable to collect info about device nodes"
# get timestamp and coalesce info about each network interface
if cmd_exists ethtool; then
for NETWORK_INTERFACE in /sys/class/net/*; do
INTERFACE_NAME=$(basename "$NETWORK_INTERFACE")
{
echo "ethtool -c \"$INTERFACE_NAME\""
ethtool -c "$INTERFACE_NAME" 2>&1
echo "------------------------"
} >> "$ETHTOOL_FILE"
{
echo "ethtool -T \"$INTERFACE_NAME\""
ethtool -T "$INTERFACE_NAME" 2>&1
echo "------------------------"
} >> "$ETHTOOL_FILE"
done
else
logwarn "ethtool command not available"
fi
# get info for HW clocking, if enabled in dante.json
if [ -f "${DANTE_JSON}" ]; then
MNT_DIR="${SUPPORT_DIR}/mnt"
ROOTFS_FILE="$DEP_PATH/dante_package/dante_data/images/0/rootfs_squash"
useHwClock=$(get_json_field "${DANTE_JSON}" useHwClock)
if [ "$useHwClock" = "true" ]; then
circuitName=$(get_json_field "${DANTE_JSON}" circuitName)
i2cBus=$(get_json_field "${DANTE_JSON}" i2cBus)
i2cAddr=$(get_json_field "${DANTE_JSON}" i2cAddr)
{
echo "circuitName=$circuitName"
echo "i2cBus=$i2cBus"
echo "i2cAddr=$i2cAddr"
} >> "$HW_CLKING_FILE"
# hwclkcfg binary is in the DEP rootfs so mount rootfs first and then run it
mkdir -p "${MNT_DIR}"
if ! mount "$ROOTFS_FILE" "${MNT_DIR}"; then
logerr "unable to collect HW clocking info: rootfs mount failed"
else
"$MNT_DIR"/dante/hwclkcfg -c --i2cbus "$i2cBus" --i2caddr "$i2cAddr" "$circuitName" >> "$HW_CLKING_FILE" 2>&1
umount "${MNT_DIR}" 2> /dev/null
fi
rm -rf "${MNT_DIR}"
fi
fi
# if we are UID 0, run dep_check.sh and save its output
if [ "$(grep -E '^Uid:' /proc/self/status | awk '{print $2}')" -eq "0" ]; then
if [ ! -f "./development/dep_check.sh" ]; then
logwarn "dep_check.sh not found, skipping"
else
loginfo "Run dep_check and collect its output..."
{ ./development/dep_check.sh "${DEP_PATH}" > "${SUPPORT_DIR}/depcheck.txt"; } 2>&1
# remove escape characters from dep_check.sh output
sed -i 's/[^[:print:]]\[[0-9;]*[a-zA-Z]//g' "${SUPPORT_DIR}/depcheck.txt"
fi
else
logwarn "could not run dep_check.sh because user was not root"
fi
# add this script own logs to the bundle
if [ -f "$LOGFILE" ]; then
# remove escape characters from this script output
sed -i 's/[^[:print:]]\[[0-9;]*[a-zA-Z]//g' "$LOGFILE"
fi
loginfo "Create final archive..."
# copy our own logs to the support directory, fail silently
cp "$LOGFILE" "${SUPPORT_DIR}/collector.txt" || true
# bundle everything together
timestamp=$(date "+%Y.%m.%d-%H.%M.%S")
tgz_name="dep_support-${timestamp}.tgz"
if ! tar czf "${OUTPUT_PATH}"/"${tgz_name}" -C "$(dirname "${SUPPORT_DIR}")" "$(basename "${SUPPORT_DIR}")" > /dev/null 2>&1; then
logerr "unable to bundle support files in ${OUTPUT_PATH}/${tgz_name}"
_exit_val=1
else
logok "DEP log files and system info bundled in ${OUTPUT_PATH}/${tgz_name}"
_exit_val=0
fi
# remove temporary data
rm -rf "${SUPPORT_DIR}"
exit ${_exit_val}
#
# Copyright © 2022-2025 Audinate Pty Ltd ACN 120 828 006 (Audinate). All rights reserved.
#

BIN
src/dep/dante_package/depconfig Executable file

Binary file not shown.

View File

@@ -0,0 +1,27 @@
[Unit]
Description=Dante Embedded Platform
After=network.target
[Service]
Type=simple
# Optional: restrict CPU affinity for relevant slices before the service starts.
# Uncomment one or more of the following lines to pin system slices to specific CPUs.
#
# Notes:
# - These affect *other* processes in the corresponding slices (init, user, system), not just this service.
# - This may impact unrelated services or user sessions: if possible, CPU isolation should be obtained
# at a system level by tuning the kernel command line
#
#ExecStartPre=/usr/bin/systemctl set-property init.scope AllowedCPUs=0,1
#ExecStartPre=/usr/bin/systemctl set-property user.slice AllowedCPUs=0,1
#ExecStartPre=/usr/bin/systemctl set-property system.slice AllowedCPUs=0,1
ExecStart=/opt/dep/dante_package/dep.sh start
ExecStopPost=/opt/dep/dante_package/dep.sh stop
WorkingDirectory=/opt/dep/dante_package
PIDFile=/run/dante.pid
RemainAfterExit=yes
[Install]
WantedBy=multi-user.target

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,429 @@
PTP TIMESTAMPING TEST TOOL OVERVIEW
===================================
ptp_timestamping_test is a standalone tool that can be used independently of DEP
itself to do a definitive check of a network driver's timestamping capabilities.
After installing DEP, this tool should ALWAYS be run before starting DEP for the
first time. This is to ensure that the driver(s) for the selected network interface(s)
are able to properly timestamp all PTP event packets using the required timestamping
mode (i.e. hardware, PTPv1-only hardware or software).
If problems are found with the driver(s) when timestamping in a given mode, DEP
should NOT be started in that mode, as otherwise PTP will either fail to start
or will experience synchronisation issues.
In such a case, in order to run DEP:
- To continue using the given mode, the driver(s) must be updated and the tool
rerun until problems are no longer reported
- Or, another timestamping mode that shows no problems must be used instead
CONTENTS
========
1. Test types
2. Test requirements
a. Preliminary check
b. Full test
3. Running tests
a. Via dep_check.sh
b. Running manually
4. Understanding test results
a. Preliminary check
b. Full test
c. Full test output examples
5. Using event + error logging
6. Test behaviour and duration
a. Test packet receive and send behaviour
b. Test duration
1. TEST TYPES
=============
There are two types of tests that can be done with the tool:
1) A preliminary check of whether a driver actually supports configuring a
particular timestamping mode. This test tries to create a network socket
configured to use the selected mode. The success or failure of this test
is a much more reliable indicator of configuration support than the output
of 'ethtool -T', as a driver's reported claims are not always 100% accurate.
Nevertheless, as a guide to the user the tool gathers and displays the
equivalent information shown via 'ethtool -T'.
2) A full test. This uses a configured network socket to send and receive
all PTP event packet types, and checks that they all get timestamped.
A full test is the only real way to determine if timestamping is fully operational.
A simple preliminary check is NOT a substitute for this.
However, if a timestamping mode fails a preliminary check it means that mode
CANNOT be used (unless the driver can be updated/rectified).
2. TEST REQUIREMENTS
====================
The tool must always be run as root.
a. Preliminary check
--------------------
Running a preliminary check has no extra requirements. This check can be done
at any time.
b. Full test
------------
The device being tested must be connected to a Dante network. And, in addition
to this device:
- At least two Dante devices (one leader, and at least one follower) must be
present on the network
- If AES67 and/or site/domain unicast clocking are going to be used, at least
two PTPv2-capable devices (again, one leader and at least one follower) must
be present:
* At least one of these must be a third-party (i.e. non-Dante) device
* Two Dante devices running in AES67 mode are NOT a suitable substitute, as
without a third-party device there will be no PTPv2 follower devices
Points of note:
- ALL devices (both Dante and third-party) MUST be in the unmanaged domain (i.e.
in the default subdomain for PTPv1, and in domain number 0 for PTPv2)
- The tool assumes that all PTP traffic is multicast. Therefore, any follower
devices configured to use unicast delay requests will not be visible and thus
cannot be used
- If only one device of each PTPv1/PTPv2 type is present (as a leader), the
tool can still be run but it will report that the test is incomplete
- Running the test will NOT cause clock synchronisation to be disrupted on the
network
3. RUNNING TESTS
================
a. Via dep_check.sh
-------------------
dep_check.sh will use the tool to do preliminary checks of all three timestamping
modes for the interface(s) configured in dante.json. Upon completion, it will
display a complete command line(s) (with parameters based on the settings in
dante.json) that the user can run to do a full test(s) for:
- The mode in dante.json, and/or
- The recommended mode (if different to the above), based on the check results
If the timestamping mode set in dante.json fails a preliminary check, dep_check.sh
will show a complete command line the user can run to see the full output of the
check if desired.
b. Running manually
-------------------
Running the tool with the -h option will show how the tool can be used.
As stated above dep_check.sh provides complete command lines for convenience,
however manual usage is fairly straightforward:
- The value for -i is the interface under "network" in dante.json
- The timestamping mode can be set as required
- If "dsaTaggedPackets" in dante.json is set to true, -dsa must be specified
- If -dsa is used along with either -hw or -hwv1, a value must be supplied
for -hwi. This is the entry under "clock.hardwareInterfaces" in dante.json
that corresponds to the value supplied for -i
- By default, the tool will run a full test. Specifying -c will result in the
tool running only a preliminary check
4. UNDERSTANDING TEST RESULTS
=============================
a. Preliminary check
--------------------
The result will be a simple pass or fail, depending on whether a network socket
could be configured with the chosen settings.
The tool will also display the timestamping capabilities reported by the driver
(the same ones shown via 'ethtool -T') and indicate whether these match what the
user wants. HOWEVER, even if the reported capabilities do not match the check
still proceeds to do a network socket setup by explaining that it MAY succeed-
in spite of the mismatch.
b. Full test
------------
For timestamping to be deemed operational, the driver MUST be able to timestamp:
- Both PTP event packet types (SYNC and DELAY_REQ)
- In both directions
Furthermore, in order to be able to run DEP:
- To use Dante audio, PTPv1 timestamping must fully work
- To use AES67 with a third-party device and to enable site/domain unicast
clocking, PTPv2 timestamping must also work
The test output provides a terse summary of whether timestamping for a particular
version of PTP was:
- All OK
- Found to have errors for a particular packet type + timestamping direction
- Not (completely) tested due to lack of devices present
Refer to the next section for some example full test outputs.
The tool will display detailed results for the packet version + type(s) that
encountered any timestamping errors. These results provide specific information
on the number of packets sent/received, the number successfully timestamped and
how many attempts resulted in errors. This can be useful when debugging a network
driver. If however, yet more specific details are required about the types of
errors and/or the relative times at which packet events + timestamping operations
occur, the logging option can be used. This is discussed further below.
c. Full test output examples
----------------------------
On a device with a working driver, assuming both PTPv1 and PTPv2 leaders and
followers are present the output will look like the following:
# ./ptp_timestamping_test -i eno2 -hw
Using interface eno2, with hardware timestamping
Checking PHC resolution...
Checking PHC read speed... 4283ns (approx.)
Testing PTP timestamping...
Testing v1 SYNC packets... OK
Testing v1 DELAY_REQ packets... OK
Testing v2 SYNC packets... OK
Testing v2 DELAY_REQ packets... OK
TEST SUMMARY
============
PTPv1 all OK
PTPv2 all OK
Points of note:
- When using hardware timestamping, the first thing the tool does is perform some
measurements of the PHC (PTP Hardware Clock)
- The approximate read speed of the PHC should be noted. NICs that perform
timestamping at the PHY instead of the MAC layer will exhibit very large values
here (greater than about 100,000ns), and these are typically unsuitable for use
with DEP as the slow read times will adversely affect its PTP accuracy
The outputs below were produced on a board which has problems with hardware
timestamping (but not software timestamping). Also:
- The Dante network used had two devices
- One of the devices had AES67 turned on (and so there was a PTPv2 leader
present, but no PTPv2 followers)
Software timestamping result:
# ./ptp_timestamping_test -i eth1
Using interface eth1, with software timestamping
Testing PTP timestamping...
Testing v1 SYNC packets... OK
Testing v1 DELAY_REQ packets... OK
Testing v2 SYNC packets... OK
Testing v2 DELAY_REQ packets... none detected
TEST SUMMARY
============
PTPv1 all OK
PTPv2 only partially tested - no follower devices detected
Hardware timestamping result:
# ./ptp_timestamping_test -i eth1 -hw
Using interface eth1, with hardware timestamping
Checking PHC resolution...
Checking PHC read speed... 3928ns (approx.)
Testing PTP timestamping...
Testing v1 SYNC packets... OK
Testing v1 DELAY_REQ packets... Tx problems
Testing v2 SYNC packets... OK
Testing v2 DELAY_REQ packets... none detected
TEST SUMMARY
============
PTPv1 SYNCs OK, errors found with DELAY_REQs - details below
PTPv2 only partially tested - no follower devices detected
DETAILED RESULTS
================
v1 DELAY_REQ
------------
Rx packet receive limit: 5
Rx packets received: 5
Rx packets timestamped: 5
Rx packet timestamping errors: 0
Rx packets from additional followers received: 0
Rx packets from additional followers timestamped: 0
Rx packet timestamping errors for additional followers: 0
Tx packets sent: 5
Tx packets timestamped: 0
Tx packets with slow timestamps: 0
Tx packet timestamping errors: 0
The detailed results show that:
- All Rx packets (up to the default receive limit of 5) from the follower the
test chose were successfully timestamped
- No other followers were present (and so DELAY_REQs from those were not received)
- No Tx packets were timestamped, HOWEVER there were no errors. This is an
indication that the driver did not attempt to timestamp any outgoing v1 DELAY_REQs
For some drivers, timestamping operations do indeed take place BUT result in
errors. The following detailed results are from a board whose driver produces
these:
DETAILED RESULTS
================
v1 SYNC
-------
Rx packet receive limit: 20
Rx packets received: 20
Rx packets timestamped: 0
Rx packet timestamping errors: 20
Rx FOLLOW-UPs received: 20
Multiple leaders detected: no
Tx packets sent: 20
Tx packets timestamped: 20
Tx packets with slow timestamps: 0
Tx packet timestamping errors: 0
These details show that:
- All 20 (the default receive limit) SYNC packets were indeed received, however
none were timestamped
- Timestamping was attempted each time, but always resulted in an error
- THere were no issues timestamping outgoing packets
The details in a SYNC report also include network information that may be of
interest to the user:
- Most PTP leaders issue FOLLOW-UP packets after each SYNC. These do not need
to be timestamped, however the tool listens for these and checks that they
indeed come from the leader sending the SYNCs. If no FOLLOW-UPs are received,
this may indicate either a problematic leader OR the presence of a leader
using the one-step rather than the more common two-step synchronisation method
- If the tool detects more than one leader on the network, it will be indicated
here
5. USING EVENT + ERROR LOGGING
==============================
While the full test outputs above point to which timestamping operations fail to
work, in some cases (e.g. if trying to debug a network driver) a detailed timeline
of packet and timestamping events, along with the specific errors that occurred,
can be useful.
By using the -l option along with a file to log to, the tool will produce its
normal output but also place all events into that file. The logs will contain:
- The start and end of each test
- Every packet receive and send
- Every successful timestamp read (Rx/Tx), along with the timestamp value
- If an error reading a timestamp occurs, a description of the error (ancillary
data truncated, no timestamp information, insufficient timestamp data)
- If timestamping is not taking place at all (as in the example above, for Tx),
the log will be missing these events
Each line also starts with a timestamp (seconds and nanoseconds), which is the
value of CLOCK_REALTIME at the moment an event or error was logged.
NOTE: When running a hardware timestamping test, you will notice that the first
test in the log is a PTPv1 "scratch" test. This is a throwaway test done at the
start that, on some devices, will exhibit odd Tx timestamping and/or errors.
A scratch test is always done because in some cases a driver will only start
reporting correct timestamps after a few initial socket and timestamp operation
failures. This way, the actual tests of interest are not affected. The scratch
test logs can be safely ignored (although initial socket behaviour after setup
may be of interest to some).
6. TEST BEHAVIOUR AND DURATION
==============================
a. Test packet receive and send behaviour
-----------------------------------------
The test uses packet receive limits for each PTP event packet type (for both
protocol versions). By default, these are:
- 20 SYNCs from the first (and ideally only) leader seen
- 5 DELAY_REQs from the first follower detected
When transmitting packets:
- For a SYNC test, each received SYNC is copied and sent out
- For a DELAY_REQ, packet sends are throttled if required so that not less than
0.25 seconds can elapse before a DELAY_REQ is sent. The sent packet is a copy
of the last received DELAY_REQ. DELAY_REQ tests count packets from the first
follower detected but otherwise receive and timestamp DELAY_REQs from any and
all followers on the network
Each test only ends when:
- The receive limit has been reached, and
- For SYNCs, that same number has been sent out
- For DELAY_REQs, a minimum of that same number has been sent
- Or, the test times out waiting for a packet to arrive
- For SYNCs, the test will wait 2 seconds before timing out
- for DELAY_REQs, this figure is 8 seconds (because it can be up to 7.5 seconds
between packet arrivals)
- Or, a socket error occurs (these should NOT occur unless there is a system
issue or network connectivity is suddenly lost)
NOTE: If the test sees no SYNC packets for a particular PTP version, it will
automatically skip the DELAY_REQ test for that version as, in the absence of a
leader, there will not be followers sending DELAY_REQs. If no SYNCs are detected
despite the device being on a populated Dante network (or one with PTPv2 devices),
it may be the case that multicast PTP traffic is not being sent to this device.
If this happens, the network and/or switch(es) should be checked to ensure that
the device receives multicast PTP.
b. Test duration
----------------
At the default receive limits, a test will last about 25 seconds typically:
- SYNCs from Dante leaders are normally sent every 0.25 seconds
- DELAY_REQs from Dante followers are sent at varying intervals, but on average
are around 4-5 seconds
Note: third-party AES67 devices may have their own packet send intervals
To run a shorter or longer test, the -nsy and -ndr tool options can be used:
- If a driver is known to have working timestamping for a particular packet
type but not another, the limit for the working type can be reduced
- Setting the limit to 0 will skip that packet type entirely (and the full test
output will say so, and also warn of an incomplete test)
- On the other hand, it may be the case that a driver only begins to exhibit
problems after running for a while. In this case, the limits can be increased
using the send intervals above as a rough guide for determining the approximate
test duration

Binary file not shown.

View File

@@ -0,0 +1,3 @@
DEP_VERSION=1.5.0.2
DEP_GIT_HASH=
DEP_BUILD_TIMESTAMP=2025-08-20_05-14-33_UTC

View File

@@ -6,8 +6,8 @@ pcm.ch1 {
channels 2
rate 48000
format S16_LE
period_size 120
buffer_size 240
period_size 240
buffer_size 960
}
bindings.0 0
}
@@ -21,8 +21,326 @@ pcm.ch2 {
channels 2
rate 48000
format S16_LE
period_size 120
buffer_size 240
period_size 240
buffer_size 960
}
bindings.0 1
}
}
# ============================================================
# DEP Dante RX -> ALSA Loopback is now done by DEP ALSA ASRC.
# So: NO alsaloop needed anymore.
#
# Apps read from hw:Loopback,1,0 via dsnoop fanout,
# then we split into 6 mono virtual devices.
# ============================================================
# ---- shared 6ch capture from Loopback with dsnoop fanout ----
pcm.dante_asrc_shared6 {
type dsnoop
ipc_key 1048577
ipc_key_add_uid true
ipc_perm 0666
slave {
pcm "hw:Loopback,1,0" # capture side of ALSA loopback
channels 6
rate 48000
format S16_LE
period_size 240
buffer_size 960
}
hint { show on ; description "DEP RX (via ASRC) shared 6ch (loopback+dsnoop)" }
}
# ---- 6 mono devices (each maps one of the 6 channels) ----
# (Using route explicitly makes the intent very clear.)
pcm.dante_asrc_ch1 {
type route
slave { pcm "dante_asrc_shared6"; channels 6; }
ttable.0.0 1
hint { show on ; description "DEP RX CH1" }
}
pcm.dante_asrc_ch2 {
type route
slave { pcm "dante_asrc_shared6"; channels 6; }
ttable.0.1 1
hint { show on ; description "DEP RX CH2" }
}
pcm.dante_asrc_ch3 {
type route
slave { pcm "dante_asrc_shared6"; channels 6; }
ttable.0.2 1
hint { show on ; description "DEP RX CH3" }
}
pcm.dante_asrc_ch4 {
type route
slave { pcm "dante_asrc_shared6"; channels 6; }
ttable.0.3 1
hint { show on ; description "DEP RX CH4" }
}
pcm.dante_asrc_ch5 {
type route
slave { pcm "dante_asrc_shared6"; channels 6; }
ttable.0.4 1
hint { show on ; description "DEP RX CH5" }
}
pcm.dante_asrc_ch6 {
type route
slave { pcm "dante_asrc_shared6"; channels 6; }
ttable.0.5 1
hint { show on ; description "DEP RX CH6" }
}
# ---- Stereo devices for Dante (combine any two channels as L+R) ----
# These devices route selected source channels to stereo output
# Format: dante_stereo_<left_ch>_<right_ch>
pcm.dante_stereo_1_2 {
type route
slave { pcm "dante_asrc_shared6"; channels 6; }
ttable.0.0 1 # Left channel from ch1
ttable.1.1 1 # Right channel from ch2
hint { show on ; description "DEP RX Stereo CH1+CH2" }
}
pcm.dante_stereo_1_3 {
type route
slave { pcm "dante_asrc_shared6"; channels 6; }
ttable.0.0 1
ttable.1.2 1
hint { show on ; description "DEP RX Stereo CH1+CH3" }
}
pcm.dante_stereo_1_4 {
type route
slave { pcm "dante_asrc_shared6"; channels 6; }
ttable.0.0 1
ttable.1.3 1
hint { show on ; description "DEP RX Stereo CH1+CH4" }
}
pcm.dante_stereo_1_5 {
type route
slave { pcm "dante_asrc_shared6"; channels 6; }
ttable.0.0 1
ttable.1.4 1
hint { show on ; description "DEP RX Stereo CH1+CH5" }
}
pcm.dante_stereo_1_6 {
type route
slave { pcm "dante_asrc_shared6"; channels 6; }
ttable.0.0 1
ttable.1.5 1
hint { show on ; description "DEP RX Stereo CH1+CH6" }
}
pcm.dante_stereo_2_3 {
type route
slave { pcm "dante_asrc_shared6"; channels 6; }
ttable.0.1 1
ttable.1.2 1
hint { show on ; description "DEP RX Stereo CH2+CH3" }
}
pcm.dante_stereo_2_4 {
type route
slave { pcm "dante_asrc_shared6"; channels 6; }
ttable.0.1 1
ttable.1.3 1
hint { show on ; description "DEP RX Stereo CH2+CH4" }
}
pcm.dante_stereo_2_5 {
type route
slave { pcm "dante_asrc_shared6"; channels 6; }
ttable.0.1 1
ttable.1.4 1
hint { show on ; description "DEP RX Stereo CH2+CH5" }
}
pcm.dante_stereo_2_6 {
type route
slave { pcm "dante_asrc_shared6"; channels 6; }
ttable.0.1 1
ttable.1.5 1
hint { show on ; description "DEP RX Stereo CH2+CH6" }
}
pcm.dante_stereo_3_4 {
type route
slave { pcm "dante_asrc_shared6"; channels 6; }
ttable.0.2 1
ttable.1.3 1
hint { show on ; description "DEP RX Stereo CH3+CH4" }
}
pcm.dante_stereo_3_5 {
type route
slave { pcm "dante_asrc_shared6"; channels 6; }
ttable.0.2 1
ttable.1.4 1
hint { show on ; description "DEP RX Stereo CH3+CH5" }
}
pcm.dante_stereo_3_6 {
type route
slave { pcm "dante_asrc_shared6"; channels 6; }
ttable.0.2 1
ttable.1.5 1
hint { show on ; description "DEP RX Stereo CH3+CH6" }
}
pcm.dante_stereo_4_5 {
type route
slave { pcm "dante_asrc_shared6"; channels 6; }
ttable.0.3 1
ttable.1.4 1
hint { show on ; description "DEP RX Stereo CH4+CH5" }
}
pcm.dante_stereo_4_6 {
type route
slave { pcm "dante_asrc_shared6"; channels 6; }
ttable.0.3 1
ttable.1.5 1
hint { show on ; description "DEP RX Stereo CH4+CH6" }
}
pcm.dante_stereo_5_6 {
type route
slave { pcm "dante_asrc_shared6"; channels 6; }
ttable.0.4 1
ttable.1.5 1
hint { show on ; description "DEP RX Stereo CH5+CH6" }
}
# ---- Reverse stereo devices (for when left channel > right channel) ----
pcm.dante_stereo_2_1 {
type route
slave { pcm "dante_asrc_shared6"; channels 6; }
ttable.0.1 1 # Left from ch2
ttable.1.0 1 # Right from ch1
hint { show on ; description "DEP RX Stereo CH2+CH1" }
}
pcm.dante_stereo_3_1 {
type route
slave { pcm "dante_asrc_shared6"; channels 6; }
ttable.0.2 1
ttable.1.0 1
hint { show on ; description "DEP RX Stereo CH3+CH1" }
}
pcm.dante_stereo_3_2 {
type route
slave { pcm "dante_asrc_shared6"; channels 6; }
ttable.0.2 1
ttable.1.1 1
hint { show on ; description "DEP RX Stereo CH3+CH2" }
}
pcm.dante_stereo_4_1 {
type route
slave { pcm "dante_asrc_shared6"; channels 6; }
ttable.0.3 1
ttable.1.0 1
hint { show on ; description "DEP RX Stereo CH4+CH1" }
}
pcm.dante_stereo_4_2 {
type route
slave { pcm "dante_asrc_shared6"; channels 6; }
ttable.0.3 1
ttable.1.1 1
hint { show on ; description "DEP RX Stereo CH4+CH2" }
}
pcm.dante_stereo_4_3 {
type route
slave { pcm "dante_asrc_shared6"; channels 6; }
ttable.0.3 1
ttable.1.2 1
hint { show on ; description "DEP RX Stereo CH4+CH3" }
}
pcm.dante_stereo_5_1 {
type route
slave { pcm "dante_asrc_shared6"; channels 6; }
ttable.0.4 1
ttable.1.0 1
hint { show on ; description "DEP RX Stereo CH5+CH1" }
}
pcm.dante_stereo_5_2 {
type route
slave { pcm "dante_asrc_shared6"; channels 6; }
ttable.0.4 1
ttable.1.1 1
hint { show on ; description "DEP RX Stereo CH5+CH2" }
}
pcm.dante_stereo_5_3 {
type route
slave { pcm "dante_asrc_shared6"; channels 6; }
ttable.0.4 1
ttable.1.2 1
hint { show on ; description "DEP RX Stereo CH5+CH3" }
}
pcm.dante_stereo_5_4 {
type route
slave { pcm "dante_asrc_shared6"; channels 6; }
ttable.0.4 1
ttable.1.3 1
hint { show on ; description "DEP RX Stereo CH5+CH4" }
}
pcm.dante_stereo_6_1 {
type route
slave { pcm "dante_asrc_shared6"; channels 6; }
ttable.0.5 1
ttable.1.0 1
hint { show on ; description "DEP RX Stereo CH6+CH1" }
}
pcm.dante_stereo_6_2 {
type route
slave { pcm "dante_asrc_shared6"; channels 6; }
ttable.0.5 1
ttable.1.1 1
hint { show on ; description "DEP RX Stereo CH6+CH2" }
}
pcm.dante_stereo_6_3 {
type route
slave { pcm "dante_asrc_shared6"; channels 6; }
ttable.0.5 1
ttable.1.2 1
hint { show on ; description "DEP RX Stereo CH6+CH3" }
}
pcm.dante_stereo_6_4 {
type route
slave { pcm "dante_asrc_shared6"; channels 6; }
ttable.0.5 1
ttable.1.3 1
hint { show on ; description "DEP RX Stereo CH6+CH4" }
}
pcm.dante_stereo_6_5 {
type route
slave { pcm "dante_asrc_shared6"; channels 6; }
ttable.0.5 1
ttable.1.4 1
hint { show on ; description "DEP RX Stereo CH6+CH5" }
}

3
src/misc/install_asoundconf.sh Normal file → Executable file
View File

@@ -1 +1,2 @@
sudo cp src/misc/asound.conf /etc/asound.conf
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
sudo cp "$SCRIPT_DIR/asound.conf" /etc/asound.conf

56
src/openocd/flash.sh Normal file
View File

@@ -0,0 +1,56 @@
#!/bin/bash
set -e
INTERFACE="swd0"
HEX_FILE=""
usage() {
echo "Usage: $0 -f <hex_file> [-i swd0|swd1]"
exit 1
}
while getopts "f:i:h" opt; do
case "$opt" in
f) HEX_FILE="$OPTARG" ;;
i)
if [[ "$OPTARG" == "swd0" || "$OPTARG" == "swd1" ]]; then
INTERFACE="$OPTARG"
else
usage
fi
;;
h) usage ;;
*) usage ;;
esac
done
[[ -n "$HEX_FILE" ]] || usage
[[ -f "$HEX_FILE" ]] || { echo "HEX file not found: $HEX_FILE"; exit 1; }
sudo openocd \
-f ./raspberrypi-${INTERFACE}.cfg \
-c "init" \
-c "reset init" \
-c "flash banks" \
-c "flash write_image $HEX_FILE" \
-c "verify_image $HEX_FILE" \
-c "reset run" \
-c "shutdown"
sudo openocd \
-f ./raspberrypi-${INTERFACE}.cfg \
-c "init" \
-c "nrf54l.dap apreg 2 0x000 0x1" \
-c "sleep 100" \
-c "nrf54l.dap apreg 2 0x000 0x0" \
-c "shutdown"
sudo openocd \
-f ./raspberrypi-${INTERFACE}.cfg \
-c "init" \
-c "nrf54l.dap apreg 2 0x000 0x4" \
-c "sleep 100" \
-c "nrf54l.dap apreg 2 0x000 0x0" \
-c "shutdown"
echo "Flashing complete."

13111
src/openocd/merged.hex Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -5,4 +5,9 @@ adapter gpio swdio 26
#adapter gpio trst 26
#reset_config trst_only
source [find target/nordic/nrf54l.cfg]
flash bank $_CHIPNAME.flash nrf54 0x00000000 0 0 0 $_TARGETNAME
adapter speed 1000

View File

@@ -5,4 +5,9 @@ adapter gpio swdio 24
#adapter gpio trst 27
#reset_config trst_only
source [find target/nordic/nrf54l.cfg]
flash bank $_CHIPNAME.flash nrf54 0x00000000 0 0 0 $_TARGETNAME
adapter speed 1000

29
src/service/update_and_run_server_and_frontend.sh Normal file → Executable file
View File

@@ -4,6 +4,35 @@ set -e
# This script installs, enables, and restarts the auracast-server and auracast-frontend services
# Requires sudo privileges
# Ensure static link local is activated (for direct laptop connection)
# Enable link-local for all wired ethernet connections
while IFS=: read -r name type; do
if [[ "$type" == *"ethernet"* ]]; then
echo "Enabling IPv4 link-local for connection: $name"
sudo nmcli connection modify "$name" ipv4.link-local enabled 2>/dev/null || echo "Failed to modify $name"
sudo nmcli connection up "$name" 2>/dev/null || echo "Failed to bring up $name"
fi
done < <(nmcli -t -f NAME,TYPE connection show)
# Configure Avahi to prefer DHCP address over static fallback for mDNS
# Get the DHCP-assigned IP (first non-localhost, non-192.168.42.10 IP)
DHCP_IP=$(ip -4 addr show eth0 2>/dev/null | grep -oP '(?<=inet\s)\d+(\.\d+){3}' | grep -v '^127\.' | grep -v '^169\.254\.' | head -n1)
HOSTNAME=$(hostname)
if [ -n "$DHCP_IP" ]; then
echo "DHCP address detected: $DHCP_IP, configuring Avahi to prefer it for mDNS."
# Add entry to /etc/avahi/hosts to explicitly map hostname to DHCP IP
sudo mkdir -p /etc/avahi
echo "$DHCP_IP $HOSTNAME $HOSTNAME.local" | sudo tee /etc/avahi/hosts > /dev/null
# Restart avahi to apply the hosts file
sudo systemctl restart avahi-daemon
else
echo "No DHCP address detected, mDNS will use link local"
# Remove hosts file to let Avahi advertise all IPs
sudo rm -f /etc/avahi/hosts
sudo systemctl restart avahi-daemon
fi
# Copy system service file for frontend
sudo cp /home/caster/bumble-auracast/src/service/auracast-frontend.service /etc/systemd/system/auracast-frontend.service