stereo-support and dep-integration (#19)

Co-authored-by: pstruebi <struebin.patrick@gmail.com>
Reviewed-on: https://gitea.pstruebi.xyz/auracaster/bumble-auracast/pulls/19
This commit was merged in pull request #19.
This commit is contained in:
pober
2026-01-20 12:57:17 +01:00
parent dd02e0ddc3
commit 59ca5dafd2
82 changed files with 10236 additions and 395 deletions

2
.gitignore vendored
View File

@@ -50,3 +50,5 @@ ch2.wav
src/auracast/available_samples.txt
src/auracast/server/stream_settings2.json
src/scripts/temperature_log*
src/auracast/server/recordings/

View File

@@ -60,6 +60,85 @@ import sounddevice as sd
from collections import deque
class AlsaArecordAudioInput(audio_io.AudioInput):
def __init__(self, device_name: str, pcm_format: audio_io.PcmFormat):
self._device_name = device_name
self._pcm_format = pcm_format
self._proc: asyncio.subprocess.Process | None = None
async def open(self) -> audio_io.PcmFormat:
if self._proc is not None:
return self._pcm_format
args = [
'arecord',
'-D', self._device_name,
'-q',
'-t', 'raw',
'-f', 'S16_LE',
'-r', str(int(self._pcm_format.sample_rate)),
'-c', str(int(self._pcm_format.channels)),
]
logging.info(
"Opening ALSA capture via arecord: device='%s' rate=%s ch=%s",
self._device_name,
self._pcm_format.sample_rate,
self._pcm_format.channels,
)
self._proc = await asyncio.create_subprocess_exec(
*args,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.DEVNULL,
)
if self._proc.stdout is None:
raise RuntimeError('arecord stdout pipe was not created')
return self._pcm_format
def frames(self, frame_size: int) -> AsyncGenerator[bytes]:
async def _gen() -> AsyncGenerator[bytes]:
if self._proc is None:
await self.open()
if self._proc is None or self._proc.stdout is None:
return
bytes_per_frame = frame_size * self._pcm_format.channels * self._pcm_format.bytes_per_sample
while True:
try:
data = await self._proc.stdout.readexactly(bytes_per_frame)
except asyncio.IncompleteReadError:
return
except Exception:
return
yield data
return _gen()
async def aclose(self) -> None:
if self._proc is None:
return
try:
if self._proc.returncode is None:
self._proc.terminate()
except ProcessLookupError:
pass
except Exception:
pass
with contextlib.suppress(Exception):
await asyncio.wait_for(self._proc.wait(), timeout=1.0)
if self._proc.returncode is None:
with contextlib.suppress(Exception):
self._proc.kill()
with contextlib.suppress(Exception):
await asyncio.wait_for(self._proc.wait(), timeout=1.0)
self._proc = None
class ModSoundDeviceAudioInput(audio_io.SoundDeviceAudioInput):
"""Patched SoundDeviceAudioInput with low-latency capture and adaptive resampling."""
@@ -670,6 +749,12 @@ class Streamer():
big['lc3_frames'] = lc3_frames
# anything else, e.g. realtime stream from device (bumble)
else:
if isinstance(audio_source, str) and audio_source.startswith('alsa:'):
if input_format == 'auto':
raise ValueError('input format details required for alsa input')
pcm = audio_io.PcmFormat.from_str(input_format)
audio_input = AlsaArecordAudioInput(audio_source[5:], pcm)
else:
audio_input = await audio_io.create_audio_input(audio_source, input_format)
# Store early so stop_streaming can close even if open() fails

File diff suppressed because it is too large Load Diff

View File

@@ -8,6 +8,7 @@ import json
from datetime import datetime
import asyncio
import random
import subprocess
from dotenv import load_dotenv
from fastapi import FastAPI, HTTPException
@@ -21,6 +22,7 @@ from auracast.utils.sounddevice_utils import (
get_alsa_usb_inputs,
resolve_input_device_index,
refresh_pw_cache,
devices_by_backend,
)
load_dotenv()
@@ -326,30 +328,122 @@ async def init_radio(transport: str, conf: auracast_config.AuracastConfigGroup,
first_source = conf.bigs[0].audio_source if conf.bigs else ''
input_device_name = None
audio_mode_persist = 'Demo'
if any(isinstance(b.audio_source, str) and b.audio_source.startswith('device:') for b in conf.bigs):
if isinstance(first_source, str) and first_source.startswith('device:'):
input_device_name = first_source.split(':', 1)[1] if ':' in first_source else None
alsa_usb_names = {d.get('name') for _, d in get_alsa_usb_inputs()}
net_names = {d.get('name') for _, d in get_network_pw_inputs()}
dante_channels = {"dante_asrc_ch1", "dante_asrc_ch2", "dante_asrc_ch3", "dante_asrc_ch4", "dante_asrc_ch5", "dante_asrc_ch6"}
if input_device_name in ('ch1', 'ch2'):
# Explicitly treat ch1/ch2 as Analog input mode
audio_mode_persist = 'Analog'
elif input_device_name in dante_channels:
audio_mode_persist = 'Network - Dante'
else:
audio_mode_persist = 'Network' if (input_device_name in net_names) else 'USB'
if input_device_name and input_device_name.isdigit():
device_index = int(input_device_name)
else:
device_index = resolve_input_device_index(input_device_name or '')
if device_index is None:
raise HTTPException(status_code=400, detail=f"Audio device '{input_device_name}' not found.")
# Configure each BIG independently so Dante multi-stream can select different channels.
for big in conf.bigs:
if isinstance(big.audio_source, str) and big.audio_source.startswith('device:'):
if not (isinstance(big.audio_source, str) and big.audio_source.startswith('device:')):
continue
sel = big.audio_source.split(':', 1)[1] if ':' in big.audio_source else None
# IMPORTANT: All hardware capture is at 48kHz; LC3 encoder may downsample.
hardware_capture_rate = 48000
if sel in dante_channels:
# Use ALSA directly (PortAudio doesn't enumerate route PCMs on some systems).
big.audio_source = f'alsa:{sel}'
big.input_format = f"int16le,{hardware_capture_rate},1"
continue
# Dante stereo devices: dante_stereo_X_Y (e.g., dante_stereo_1_2)
if sel and sel.startswith('dante_stereo_'):
is_stereo = getattr(big, 'num_bis', 1) == 2
if is_stereo:
# Stereo mode: use the stereo ALSA device with 2 channels
big.audio_source = f'alsa:{sel}'
big.input_format = f"int16le,{hardware_capture_rate},2"
log.info("Configured Dante stereo input: using ALSA %s with 2 channels", sel)
else:
# Fallback to mono if num_bis != 2 (shouldn't happen)
big.audio_source = f'alsa:{sel}'
big.input_format = f"int16le,{hardware_capture_rate},2"
log.warning("Dante stereo device %s used but num_bis=%d, capturing as stereo anyway", sel, getattr(big, 'num_bis', 1))
continue
if sel in ('ch1', 'ch2'):
# Analog channels: check if this should be stereo based on num_bis
is_stereo = getattr(big, 'num_bis', 1) == 2
if is_stereo and sel == 'ch1':
# Stereo mode: use ALSA directly to capture both channels from hardware
# ch1=left (channel 0), ch2=right (channel 1)
big.audio_source = 'alsa:hw:CARD=i2s,DEV=0'
big.input_format = f"int16le,{hardware_capture_rate},2"
log.info("Configured analog stereo input: using ALSA hw:CARD=i2s,DEV=0 with ch1=left, ch2=right")
elif is_stereo and sel == 'ch2':
# Skip ch2 in stereo mode as it's already captured as part of stereo pair
continue
else:
# Mono mode: individual channel capture
device_index = resolve_input_device_index(sel)
if device_index is None:
raise HTTPException(status_code=400, detail=f"Audio device '{sel}' not found.")
big.audio_source = f'device:{device_index}'
big.input_format = f"int16le,{hardware_capture_rate},1"
continue
if sel and sel.isdigit():
device_index = int(sel)
else:
device_index = resolve_input_device_index(sel or '')
if device_index is None:
raise HTTPException(status_code=400, detail=f"Audio device '{sel}' not found.")
try:
resolved_devinfo = sd.query_devices(device_index)
log.info(
"Resolved input device '%s' -> idx=%s name='%s' hostapi=%s max_in=%s",
sel,
device_index,
resolved_devinfo.get('name'),
resolved_devinfo.get('hostapi'),
resolved_devinfo.get('max_input_channels'),
)
except Exception:
log.info("Resolved input device '%s' -> idx=%s (devinfo unavailable)", sel, device_index)
big.audio_source = f'device:{device_index}'
devinfo = sd.query_devices(device_index)
max_in = int(devinfo.get('max_input_channels') or 1)
channels = max(1, min(2, max_in))
for big in conf.bigs:
big.input_format = f"int16le,{48000},{channels}"
big.input_format = f"int16le,{hardware_capture_rate},{channels}"
# The config group keeps the target sampling rate for LC3 encoder
# The audio input will capture at 48kHz and LC3 encoder will downsample
target_sampling_rate = getattr(conf, 'auracast_sampling_rate_hz', None)
if target_sampling_rate is None and conf.bigs:
target_sampling_rate = getattr(conf.bigs[0], 'sampling_frequency', 48000)
if target_sampling_rate is None:
target_sampling_rate = 48000
# Keep the config group sampling rate as set by frontend
conf.auracast_sampling_rate_hz = target_sampling_rate
# Ensure octets_per_frame matches the target sampling rate
if target_sampling_rate == 48000:
conf.octets_per_frame = 120
elif target_sampling_rate == 32000:
conf.octets_per_frame = 80
elif target_sampling_rate == 24000:
conf.octets_per_frame = 60
elif target_sampling_rate == 16000:
conf.octets_per_frame = 40
else:
conf.octets_per_frame = 120 # default to 48000 setting
conf.qos_config.max_transport_latency_ms = int(conf.qos_config.number_of_retransmissions) * 10 + 3
@@ -365,7 +459,7 @@ async def init_radio(transport: str, conf: auracast_config.AuracastConfigGroup,
await mc.init_broadcast()
auto_started = False
if any(isinstance(big.audio_source, str) and (big.audio_source.startswith("device:") or big.audio_source.startswith("file:")) for big in conf.bigs):
if any(isinstance(big.audio_source, str) and (big.audio_source.startswith("device:") or big.audio_source.startswith("alsa:") or big.audio_source.startswith("file:")) for big in conf.bigs):
await mc.start_streaming()
auto_started = True
@@ -390,6 +484,7 @@ async def init_radio(transport: str, conf: auracast_config.AuracastConfigGroup,
'qos_preset': _resolve_qos_preset_name(conf.qos_config),
'immediate_rendering': getattr(conf, 'immediate_rendering', False),
'assisted_listening_stream': getattr(conf, 'assisted_listening_stream', False),
'analog_stereo_mode': getattr(conf.bigs[0], 'analog_stereo_mode', False) if conf.bigs else False,
'stream_password': (conf.bigs[0].code if conf.bigs and getattr(conf.bigs[0], 'code', None) else None),
'big_ids': [getattr(big, 'id', DEFAULT_BIG_ID) for big in conf.bigs],
'big_random_addresses': [getattr(big, 'random_address', DEFAULT_RANDOM_ADDRESS) for big in conf.bigs],
@@ -577,6 +672,9 @@ async def _autostart_from_settings():
presentation_delay_us=pres_delay if pres_delay is not None else 40000,
bigs=bigs,
)
# Set num_bis for stereo mode if needed
if conf.bigs and settings.get('analog_stereo_mode', False):
conf.bigs[0].num_bis = 2
conf.qos_config = QOS_PRESET_MAP.get(saved_qos_preset, QOS_PRESET_MAP["Fast"])
log.info("[AUTOSTART][PRIMARY] Scheduling demo init_radio in 2s")
await asyncio.sleep(2)
@@ -641,6 +739,9 @@ async def _autostart_from_settings():
presentation_delay_us=pres_delay if pres_delay is not None else 40000,
bigs=bigs,
)
# Set num_bis for stereo mode if needed
if conf.bigs and settings.get('analog_stereo_mode', False):
conf.bigs[0].num_bis = 2
conf.qos_config = QOS_PRESET_MAP.get(saved_qos_preset, QOS_PRESET_MAP["Fast"])
log.info("[AUTOSTART][PRIMARY] Scheduling device init_radio in 2s")
await asyncio.sleep(2)
@@ -810,6 +911,18 @@ async def _autostart_from_settings():
async def _startup_autostart_event():
# Spawn the autostart task without blocking startup
log.info("[STARTUP] Auracast multicast server startup: initializing settings cache, I2C, and PipeWire cache")
# Run install_asoundconf.sh script
script_path = os.path.join(os.path.dirname(__file__), '..', 'misc', 'install_asoundconf.sh')
try:
log.info("[STARTUP] Running install_asoundconf.sh script")
result = subprocess.run(['bash', script_path], capture_output=True, text=True, check=True)
log.info(f"[STARTUP] install_asoundconf.sh completed: {result.stdout.strip()}")
except subprocess.CalledProcessError as e:
log.error(f"[STARTUP] Failed to run install_asoundconf.sh: {e.stderr.strip()}")
except Exception as e:
log.error(f"[STARTUP] Error running install_asoundconf.sh: {str(e)}")
# Hydrate settings cache once to avoid disk I/O during /status
_init_settings_cache_from_disk()
await _init_i2c_on_startup()
@@ -845,6 +958,28 @@ async def audio_inputs_pw_network():
log.error("Exception in /audio_inputs_pw_network: %s", traceback.format_exc())
raise HTTPException(status_code=500, detail=str(e))
@app.get("/audio_inputs_dante")
async def audio_inputs_dante():
"""List Dante ALSA input devices from asound.conf."""
try:
dante_channels = [
"dante_asrc_ch1",
"dante_asrc_ch2",
"dante_asrc_ch3",
"dante_asrc_ch4",
"dante_asrc_ch5",
"dante_asrc_ch6",
]
return {
"inputs": [
{"id": name, "name": name, "max_input_channels": 1}
for name in dante_channels
]
}
except Exception as e:
log.error("Exception in /audio_inputs_dante: %s", traceback.format_exc())
raise HTTPException(status_code=500, detail=str(e))
@app.post("/refresh_audio_devices")
async def refresh_audio_devices():
"""Triggers a re-scan of audio devices, but only if no stream is active."""
@@ -899,6 +1034,56 @@ async def system_reboot():
raise HTTPException(status_code=500, detail=str(e))
@app.post("/restart_dep")
async def restart_dep():
"""Restart DEP by running dep.sh stop then dep.sh start in the dep directory.
Requires the service user to have passwordless sudo permissions to run dep.sh.
"""
try:
# Get the dep directory path (dep.sh is in dante_package subdirectory)
dep_dir = os.path.join(os.path.dirname(__file__), '..', '..', 'dep', 'dante_package')
# Run dep.sh stop first
log.info("Stopping DEP...")
stop_process = await asyncio.create_subprocess_exec(
"sudo", "bash", "dep.sh", "stop",
cwd=dep_dir,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE
)
stop_stdout, stop_stderr = await stop_process.communicate()
if stop_process.returncode != 0:
error_msg = stop_stderr.decode() if stop_stderr else "Unknown error"
log.error(f"Failed to stop DEP: {error_msg}")
raise HTTPException(status_code=500, detail=f"Failed to stop DEP: {error_msg}")
# Run dep.sh start after stop succeeds
log.info("Starting DEP...")
start_process = await asyncio.create_subprocess_exec(
"sudo", "bash", "dep.sh", "start",
cwd=dep_dir,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE
)
start_stdout, start_stderr = await start_process.communicate()
if start_process.returncode == 0:
log.info("DEP restarted successfully")
return {"status": "success", "message": "DEP restarted successfully"}
else:
error_msg = start_stderr.decode() if start_stderr else "Unknown error"
log.error(f"Failed to start DEP: {error_msg}")
raise HTTPException(status_code=500, detail=f"Failed to start DEP: {error_msg}")
except HTTPException:
raise
except Exception as e:
log.error("Exception in /restart_dep: %s", e, exc_info=True)
raise HTTPException(status_code=500, detail=str(e))
@app.get("/version")
async def get_version():
"""Get the current software version (git tag or commit)."""
@@ -1069,6 +1254,169 @@ async def system_update():
raise HTTPException(status_code=500, detail=str(e))
# Recording functionality
RECORDINGS_DIR = os.path.join(os.path.dirname(__file__), 'recordings')
os.makedirs(RECORDINGS_DIR, exist_ok=True)
def cleanup_old_recordings(keep_latest: str = None):
"""Delete all recordings except the latest one (or specified file)."""
try:
recordings = []
for filename in os.listdir(RECORDINGS_DIR):
if filename.endswith('.wav'):
filepath = os.path.join(RECORDINGS_DIR, filename)
if os.path.isfile(filepath):
recordings.append((filename, os.path.getmtime(filepath)))
# Sort by modification time (newest first)
recordings.sort(key=lambda x: x[1], reverse=True)
# Keep only the latest recording (or the specified one)
if keep_latest and os.path.exists(os.path.join(RECORDINGS_DIR, keep_latest)):
files_to_keep = {keep_latest}
else:
files_to_keep = {recordings[0][0]} if recordings else set()
# Delete old recordings
for filename, _ in recordings:
if filename not in files_to_keep:
filepath = os.path.join(RECORDINGS_DIR, filename)
try:
os.remove(filepath)
log.info("Deleted old recording: %s", filename)
except Exception as e:
log.warning("Failed to delete recording %s: %s", filename, e)
except Exception as e:
log.warning("Error during recording cleanup: %s", e)
@app.get("/alsa_devices")
async def get_alsa_devices():
"""Get list of available ALSA input devices."""
try:
devices = []
dev_list = sd.query_devices()
for idx, dev in enumerate(dev_list):
if dev.get('max_input_channels', 0) > 0:
devices.append({
'id': idx,
'name': dev['name'],
'max_input_channels': dev['max_input_channels']
})
# Add individual Dante ASRC channels if shared device is found
dante_shared_device = None
for device in devices:
if device['name'] == 'dante_asrc_shared6':
dante_shared_device = device
break
if dante_shared_device:
# Add individual Dante ASRC channels as virtual devices
for i in range(1, 7): # ch1 to ch6
devices.append({
'id': f"dante_asrc_ch{i}",
'name': f'dante_asrc_ch{i}',
'max_input_channels': 1,
'parent_device': dante_shared_device['name'],
'parent_id': dante_shared_device['id']
})
return {"devices": devices}
except Exception as e:
log.error("Exception in /alsa_devices: %s", e, exc_info=True)
raise HTTPException(status_code=500, detail=str(e))
@app.post("/start_recording")
async def start_recording(request: dict):
"""Start a 5-second recording from the specified ALSA device."""
try:
device_name = request.get('device')
if not device_name:
raise HTTPException(status_code=400, detail="Device name is required")
# Generate filename with timestamp
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
filename = f"recording_{timestamp}.wav"
filepath = os.path.join(RECORDINGS_DIR, filename)
# Determine channel count based on device type
# For other devices, try to find actual channel count
channels = 1 # Default to mono
try:
devices = sd.query_devices()
for dev in devices:
if dev['name'] == device_name and dev.get('max_input_channels', 0) > 0:
channels = dev.get('max_input_channels', 1)
break
except Exception:
pass
# Build arecord command
cmd = [
"arecord",
"-D", device_name, # Use the device name directly
"-f", "cd", # CD quality (16-bit little-endian, 44100 Hz)
"-c", str(channels), # Channel count
"-d", "5", # Duration in seconds
"-t", "wav", # WAV format
filepath
]
log.info("Starting recording with command: %s", " ".join(cmd))
proc = await asyncio.create_subprocess_exec(
*cmd,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE
)
stdout, stderr = await proc.communicate()
if proc.returncode != 0:
error_msg = stderr.decode(errors="ignore").strip() if stderr else "Unknown error"
log.error("Recording failed: %s", error_msg)
raise HTTPException(status_code=500, detail=f"Recording failed: {error_msg}")
# Verify file was created and has content
if not os.path.exists(filepath) or os.path.getsize(filepath) == 0:
raise HTTPException(status_code=500, detail="Recording file was not created or is empty")
# Clean up old recordings, keeping only this new one
cleanup_old_recordings(keep_latest=filename)
log.info("Recording completed successfully: %s", filename)
return {"success": True, "filename": filename}
except HTTPException:
raise
except Exception as e:
log.error("Exception in /start_recording: %s", e, exc_info=True)
raise HTTPException(status_code=500, detail=str(e))
@app.get("/download_recording/{filename}")
async def download_recording(filename: str):
"""Download a recorded WAV file."""
try:
# Validate filename to prevent directory traversal
if not filename.endswith('.wav') or '/' in filename or '\\' in filename:
raise HTTPException(status_code=400, detail="Invalid filename")
filepath = os.path.join(RECORDINGS_DIR, filename)
if not os.path.exists(filepath):
raise HTTPException(status_code=404, detail="Recording file not found")
return FileResponse(filepath, filename=filename, media_type="audio/wav")
except HTTPException:
raise
except Exception as e:
log.error("Exception in /download_recording: %s", e, exc_info=True)
raise HTTPException(status_code=500, detail=str(e))
if __name__ == '__main__':
import os
os.chdir(os.path.dirname(__file__))

View File

@@ -243,7 +243,8 @@ def get_alsa_usb_inputs():
'usb' in name or
re.search(r'hw:\d+(?:,\d+)?', name) or
name.startswith('dsnoop') or
name in ('ch1', 'ch2')
name in ('ch1', 'ch2') or
name.startswith('dante_asrc_ch')
):
usb_inputs.append((idx, dev))

View File

@@ -0,0 +1 @@
../dante_data/capability/config.json

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@@ -0,0 +1,26 @@
root:x:0:
daemon:x:1:
bin:x:2:
sys:x:3:
adm:x:4:
tty:x:5:
disk:x:6:
lp:x:7:
mail:x:8:
kmem:x:9:
wheel:x:10:root
cdrom:x:11:
dialout:x:18:
floppy:x:19:
video:x:28:
audio:x:29:
tape:x:32:
www-data:x:33:
operator:x:37:
utmp:x:43:
plugdev:x:46:
staff:x:50:
lock:x:54:
netdev:x:82:
users:x:100:
nobody:x:65534:

View File

@@ -0,0 +1 @@
buildroot

View File

@@ -0,0 +1,2 @@
127.0.0.1 localhost
127.0.1.1 buildroot

View File

@@ -0,0 +1 @@
Welcome to Buildroot

View File

@@ -0,0 +1 @@

View File

@@ -0,0 +1 @@

View File

@@ -0,0 +1 @@
../proc/self/mounts

View File

@@ -0,0 +1,21 @@
#!/bin/sh
# In case we have a slow-to-appear interface (e.g. eth-over-USB),
# and we need to configure it, wait until it appears, but not too
# long either. IF_WAIT_DELAY is in seconds.
if [ "${IF_WAIT_DELAY}" -a ! -e "/sys/class/net/${IFACE}" ]; then
printf "Waiting for interface %s to appear" "${IFACE}"
while [ ${IF_WAIT_DELAY} -gt 0 ]; do
if [ -e "/sys/class/net/${IFACE}" ]; then
printf "\n"
exit 0
fi
sleep 1
printf "."
: $((IF_WAIT_DELAY -= 1))
done
printf " timeout!\n"
exit 1
fi

View File

@@ -0,0 +1,20 @@
#!/bin/sh
# This allows NFS booting to work while also being able to configure
# the network interface via DHCP when not NFS booting. Otherwise, a
# NFS booted system will likely hang during DHCP configuration.
# Attempting to configure the network interface used for NFS will
# initially bring that network down. Since the root filesystem is
# accessed over this network, the system hangs.
# This script is run by ifup and will attempt to detect if a NFS root
# mount uses the interface to be configured (IFACE), and if so does
# not configure it. This should allow the same build to be disk/flash
# booted or NFS booted.
nfsip=`sed -n '/^[^ ]*:.* \/ nfs.*[ ,]addr=\([0-9.]\+\).*/s//\1/p' /proc/mounts`
if [ -n "$nfsip" ] && ip route get to "$nfsip" | grep -q "dev $IFACE"; then
echo Skipping $IFACE, used for NFS from $nfsip
exit 1
fi

View File

@@ -0,0 +1,13 @@
# /etc/nsswitch.conf
passwd: files
group: files
shadow: files
hosts: files dns
networks: files dns
protocols: files
services: files
ethers: files
rpc: files

View File

@@ -0,0 +1 @@
../usr/lib/os-release

View File

@@ -0,0 +1,9 @@
root:x:0:0:root:/root:/bin/sh
daemon:x:1:1:daemon:/usr/sbin:/bin/false
bin:x:2:2:bin:/bin:/bin/false
sys:x:3:3:sys:/dev:/bin/false
sync:x:4:100:sync:/bin:/bin/sync
mail:x:8:8:mail:/var/spool/mail:/bin/false
www-data:x:33:33:www-data:/var/www:/bin/false
operator:x:37:37:Operator:/var:/bin/false
nobody:x:65534:65534:nobody:/home:/bin/false

View File

@@ -0,0 +1,19 @@
export PATH="/bin:/sbin:/usr/bin:/usr/sbin"
if [ "$PS1" ]; then
if [ "`id -u`" -eq 0 ]; then
export PS1='# '
else
export PS1='$ '
fi
fi
export EDITOR='/bin/vi'
# Source configuration files from /etc/profile.d
for i in /etc/profile.d/*.sh ; do
if [ -r "$i" ]; then
. $i
fi
done
unset i

View File

@@ -0,0 +1 @@
umask 022

View File

@@ -0,0 +1,61 @@
# Internet (IP) protocols
#
# Updated from http://www.iana.org/assignments/protocol-numbers and other
# sources.
ip 0 IP # internet protocol, pseudo protocol number
hopopt 0 HOPOPT # IPv6 Hop-by-Hop Option [RFC1883]
icmp 1 ICMP # internet control message protocol
igmp 2 IGMP # Internet Group Management
ggp 3 GGP # gateway-gateway protocol
ipencap 4 IP-ENCAP # IP encapsulated in IP (officially ``IP'')
st 5 ST # ST datagram mode
tcp 6 TCP # transmission control protocol
egp 8 EGP # exterior gateway protocol
igp 9 IGP # any private interior gateway (Cisco)
pup 12 PUP # PARC universal packet protocol
udp 17 UDP # user datagram protocol
hmp 20 HMP # host monitoring protocol
xns-idp 22 XNS-IDP # Xerox NS IDP
rdp 27 RDP # "reliable datagram" protocol
iso-tp4 29 ISO-TP4 # ISO Transport Protocol class 4 [RFC905]
dccp 33 DCCP # Datagram Congestion Control Prot. [RFC4340]
xtp 36 XTP # Xpress Transfer Protocol
ddp 37 DDP # Datagram Delivery Protocol
idpr-cmtp 38 IDPR-CMTP # IDPR Control Message Transport
ipv6 41 IPv6 # Internet Protocol, version 6
ipv6-route 43 IPv6-Route # Routing Header for IPv6
ipv6-frag 44 IPv6-Frag # Fragment Header for IPv6
idrp 45 IDRP # Inter-Domain Routing Protocol
rsvp 46 RSVP # Reservation Protocol
gre 47 GRE # General Routing Encapsulation
esp 50 IPSEC-ESP # Encap Security Payload [RFC2406]
ah 51 IPSEC-AH # Authentication Header [RFC2402]
skip 57 SKIP # SKIP
ipv6-icmp 58 IPv6-ICMP # ICMP for IPv6
ipv6-nonxt 59 IPv6-NoNxt # No Next Header for IPv6
ipv6-opts 60 IPv6-Opts # Destination Options for IPv6
rspf 73 RSPF CPHB # Radio Shortest Path First (officially CPHB)
vmtp 81 VMTP # Versatile Message Transport
eigrp 88 EIGRP # Enhanced Interior Routing Protocol (Cisco)
ospf 89 OSPFIGP # Open Shortest Path First IGP
ax.25 93 AX.25 # AX.25 frames
ipip 94 IPIP # IP-within-IP Encapsulation Protocol
etherip 97 ETHERIP # Ethernet-within-IP Encapsulation [RFC3378]
encap 98 ENCAP # Yet Another IP encapsulation [RFC1241]
# 99 # any private encryption scheme
pim 103 PIM # Protocol Independent Multicast
ipcomp 108 IPCOMP # IP Payload Compression Protocol
vrrp 112 VRRP # Virtual Router Redundancy Protocol [RFC5798]
l2tp 115 L2TP # Layer Two Tunneling Protocol [RFC2661]
isis 124 ISIS # IS-IS over IPv4
sctp 132 SCTP # Stream Control Transmission Protocol
fc 133 FC # Fibre Channel
mobility-header 135 Mobility-Header # Mobility Support for IPv6 [RFC3775]
udplite 136 UDPLite # UDP-Lite [RFC3828]
mpls-in-ip 137 MPLS-in-IP # MPLS-in-IP [RFC4023]
manet 138 # MANET Protocols [RFC5498]
hip 139 HIP # Host Identity Protocol
shim6 140 Shim6 # Shim6 Protocol [RFC5533]
wesp 141 WESP # Wrapped Encapsulating Security Payload
rohc 142 ROHC # Robust Header Compression

View File

@@ -0,0 +1 @@

View File

@@ -0,0 +1,302 @@
# /etc/services:
# $Id: services,v 1.1 2004/10/09 02:49:18 andersen Exp $
#
# Network services, Internet style
#
# Note that it is presently the policy of IANA to assign a single well-known
# port number for both TCP and UDP; hence, most entries here have two entries
# even if the protocol doesn't support UDP operations.
# Updated from RFC 1700, ``Assigned Numbers'' (October 1994). Not all ports
# are included, only the more common ones.
tcpmux 1/tcp # TCP port service multiplexer
echo 7/tcp
echo 7/udp
discard 9/tcp sink null
discard 9/udp sink null
systat 11/tcp users
daytime 13/tcp
daytime 13/udp
netstat 15/tcp
qotd 17/tcp quote
msp 18/tcp # message send protocol
msp 18/udp # message send protocol
chargen 19/tcp ttytst source
chargen 19/udp ttytst source
ftp-data 20/tcp
ftp 21/tcp
fsp 21/udp fspd
ssh 22/tcp # SSH Remote Login Protocol
ssh 22/udp # SSH Remote Login Protocol
telnet 23/tcp
# 24 - private
smtp 25/tcp mail
# 26 - unassigned
time 37/tcp timserver
time 37/udp timserver
rlp 39/udp resource # resource location
nameserver 42/tcp name # IEN 116
whois 43/tcp nicname
re-mail-ck 50/tcp # Remote Mail Checking Protocol
re-mail-ck 50/udp # Remote Mail Checking Protocol
domain 53/tcp nameserver # name-domain server
domain 53/udp nameserver
mtp 57/tcp # deprecated
bootps 67/tcp # BOOTP server
bootps 67/udp
bootpc 68/tcp # BOOTP client
bootpc 68/udp
tftp 69/udp
gopher 70/tcp # Internet Gopher
gopher 70/udp
rje 77/tcp netrjs
finger 79/tcp
www 80/tcp http # WorldWideWeb HTTP
www 80/udp # HyperText Transfer Protocol
link 87/tcp ttylink
kerberos 88/tcp kerberos5 krb5 # Kerberos v5
kerberos 88/udp kerberos5 krb5 # Kerberos v5
supdup 95/tcp
# 100 - reserved
hostnames 101/tcp hostname # usually from sri-nic
iso-tsap 102/tcp tsap # part of ISODE.
csnet-ns 105/tcp cso-ns # also used by CSO name server
csnet-ns 105/udp cso-ns
# unfortunately the poppassd (Eudora) uses a port which has already
# been assigned to a different service. We list the poppassd as an
# alias here. This should work for programs asking for this service.
# (due to a bug in inetd the 3com-tsmux line is disabled)
#3com-tsmux 106/tcp poppassd
#3com-tsmux 106/udp poppassd
rtelnet 107/tcp # Remote Telnet
rtelnet 107/udp
pop-2 109/tcp postoffice # POP version 2
pop-2 109/udp
pop-3 110/tcp # POP version 3
pop-3 110/udp
sunrpc 111/tcp portmapper # RPC 4.0 portmapper TCP
sunrpc 111/udp portmapper # RPC 4.0 portmapper UDP
auth 113/tcp authentication tap ident
sftp 115/tcp
uucp-path 117/tcp
nntp 119/tcp readnews untp # USENET News Transfer Protocol
ntp 123/tcp
ntp 123/udp # Network Time Protocol
netbios-ns 137/tcp # NETBIOS Name Service
netbios-ns 137/udp
netbios-dgm 138/tcp # NETBIOS Datagram Service
netbios-dgm 138/udp
netbios-ssn 139/tcp # NETBIOS session service
netbios-ssn 139/udp
imap2 143/tcp # Interim Mail Access Proto v2
imap2 143/udp
snmp 161/udp # Simple Net Mgmt Proto
snmp-trap 162/udp snmptrap # Traps for SNMP
cmip-man 163/tcp # ISO mgmt over IP (CMOT)
cmip-man 163/udp
cmip-agent 164/tcp
cmip-agent 164/udp
xdmcp 177/tcp # X Display Mgr. Control Proto
xdmcp 177/udp
nextstep 178/tcp NeXTStep NextStep # NeXTStep window
nextstep 178/udp NeXTStep NextStep # server
bgp 179/tcp # Border Gateway Proto.
bgp 179/udp
prospero 191/tcp # Cliff Neuman's Prospero
prospero 191/udp
irc 194/tcp # Internet Relay Chat
irc 194/udp
smux 199/tcp # SNMP Unix Multiplexer
smux 199/udp
at-rtmp 201/tcp # AppleTalk routing
at-rtmp 201/udp
at-nbp 202/tcp # AppleTalk name binding
at-nbp 202/udp
at-echo 204/tcp # AppleTalk echo
at-echo 204/udp
at-zis 206/tcp # AppleTalk zone information
at-zis 206/udp
qmtp 209/tcp # The Quick Mail Transfer Protocol
qmtp 209/udp # The Quick Mail Transfer Protocol
z3950 210/tcp wais # NISO Z39.50 database
z3950 210/udp wais
ipx 213/tcp # IPX
ipx 213/udp
imap3 220/tcp # Interactive Mail Access
imap3 220/udp # Protocol v3
ulistserv 372/tcp # UNIX Listserv
ulistserv 372/udp
https 443/tcp # MCom
https 443/udp # MCom
snpp 444/tcp # Simple Network Paging Protocol
snpp 444/udp # Simple Network Paging Protocol
saft 487/tcp # Simple Asynchronous File Transfer
saft 487/udp # Simple Asynchronous File Transfer
npmp-local 610/tcp dqs313_qmaster # npmp-local / DQS
npmp-local 610/udp dqs313_qmaster # npmp-local / DQS
npmp-gui 611/tcp dqs313_execd # npmp-gui / DQS
npmp-gui 611/udp dqs313_execd # npmp-gui / DQS
hmmp-ind 612/tcp dqs313_intercell# HMMP Indication / DQS
hmmp-ind 612/udp dqs313_intercell# HMMP Indication / DQS
#
# UNIX specific services
#
exec 512/tcp
biff 512/udp comsat
login 513/tcp
who 513/udp whod
shell 514/tcp cmd # no passwords used
syslog 514/udp
printer 515/tcp spooler # line printer spooler
talk 517/udp
ntalk 518/udp
route 520/udp router routed # RIP
timed 525/udp timeserver
tempo 526/tcp newdate
courier 530/tcp rpc
conference 531/tcp chat
netnews 532/tcp readnews
netwall 533/udp # -for emergency broadcasts
uucp 540/tcp uucpd # uucp daemon
afpovertcp 548/tcp # AFP over TCP
afpovertcp 548/udp # AFP over TCP
remotefs 556/tcp rfs_server rfs # Brunhoff remote filesystem
klogin 543/tcp # Kerberized `rlogin' (v5)
kshell 544/tcp krcmd # Kerberized `rsh' (v5)
kerberos-adm 749/tcp # Kerberos `kadmin' (v5)
#
webster 765/tcp # Network dictionary
webster 765/udp
#
# From ``Assigned Numbers'':
#
#> The Registered Ports are not controlled by the IANA and on most systems
#> can be used by ordinary user processes or programs executed by ordinary
#> users.
#
#> Ports are used in the TCP [45,106] to name the ends of logical
#> connections which carry long term conversations. For the purpose of
#> providing services to unknown callers, a service contact port is
#> defined. This list specifies the port used by the server process as its
#> contact port. While the IANA can not control uses of these ports it
#> does register or list uses of these ports as a convienence to the
#> community.
#
nfsdstatus 1110/tcp
nfsd-keepalive 1110/udp
ingreslock 1524/tcp
ingreslock 1524/udp
prospero-np 1525/tcp # Prospero non-privileged
prospero-np 1525/udp
datametrics 1645/tcp old-radius # datametrics / old radius entry
datametrics 1645/udp old-radius # datametrics / old radius entry
sa-msg-port 1646/tcp old-radacct # sa-msg-port / old radacct entry
sa-msg-port 1646/udp old-radacct # sa-msg-port / old radacct entry
radius 1812/tcp # Radius
radius 1812/udp # Radius
radacct 1813/tcp # Radius Accounting
radacct 1813/udp # Radius Accounting
nfsd 2049/tcp nfs
nfsd 2049/udp nfs
cvspserver 2401/tcp # CVS client/server operations
cvspserver 2401/udp # CVS client/server operations
mysql 3306/tcp # MySQL
mysql 3306/udp # MySQL
rfe 5002/tcp # Radio Free Ethernet
rfe 5002/udp # Actually uses UDP only
cfengine 5308/tcp # CFengine
cfengine 5308/udp # CFengine
bbs 7000/tcp # BBS service
#
#
# Kerberos (Project Athena/MIT) services
# Note that these are for Kerberos v4, and are unofficial. Sites running
# v4 should uncomment these and comment out the v5 entries above.
#
kerberos4 750/udp kerberos-iv kdc # Kerberos (server) udp
kerberos4 750/tcp kerberos-iv kdc # Kerberos (server) tcp
kerberos_master 751/udp # Kerberos authentication
kerberos_master 751/tcp # Kerberos authentication
passwd_server 752/udp # Kerberos passwd server
krb_prop 754/tcp # Kerberos slave propagation
krbupdate 760/tcp kreg # Kerberos registration
kpasswd 761/tcp kpwd # Kerberos "passwd"
kpop 1109/tcp # Pop with Kerberos
knetd 2053/tcp # Kerberos de-multiplexor
zephyr-srv 2102/udp # Zephyr server
zephyr-clt 2103/udp # Zephyr serv-hm connection
zephyr-hm 2104/udp # Zephyr hostmanager
eklogin 2105/tcp # Kerberos encrypted rlogin
#
# Unofficial but necessary (for NetBSD) services
#
supfilesrv 871/tcp # SUP server
supfiledbg 1127/tcp # SUP debugging
#
# Datagram Delivery Protocol services
#
rtmp 1/ddp # Routing Table Maintenance Protocol
nbp 2/ddp # Name Binding Protocol
echo 4/ddp # AppleTalk Echo Protocol
zip 6/ddp # Zone Information Protocol
#
# Services added for the Debian GNU/Linux distribution
poppassd 106/tcp # Eudora
poppassd 106/udp # Eudora
mailq 174/tcp # Mailer transport queue for Zmailer
mailq 174/tcp # Mailer transport queue for Zmailer
omirr 808/tcp omirrd # online mirror
omirr 808/udp omirrd # online mirror
rmtcfg 1236/tcp # Gracilis Packeten remote config server
xtel 1313/tcp # french minitel
coda_opcons 1355/udp # Coda opcons (Coda fs)
coda_venus 1363/udp # Coda venus (Coda fs)
coda_auth 1357/udp # Coda auth (Coda fs)
coda_udpsrv 1359/udp # Coda udpsrv (Coda fs)
coda_filesrv 1361/udp # Coda filesrv (Coda fs)
codacon 1423/tcp venus.cmu # Coda Console (Coda fs)
coda_aux1 1431/tcp # coda auxiliary service (Coda fs)
coda_aux1 1431/udp # coda auxiliary service (Coda fs)
coda_aux2 1433/tcp # coda auxiliary service (Coda fs)
coda_aux2 1433/udp # coda auxiliary service (Coda fs)
coda_aux3 1435/tcp # coda auxiliary service (Coda fs)
coda_aux3 1435/udp # coda auxiliary service (Coda fs)
cfinger 2003/tcp # GNU Finger
afbackup 2988/tcp # Afbackup system
afbackup 2988/udp # Afbackup system
icp 3130/tcp # Internet Cache Protocol (Squid)
icp 3130/udp # Internet Cache Protocol (Squid)
postgres 5432/tcp # POSTGRES
postgres 5432/udp # POSTGRES
fax 4557/tcp # FAX transmission service (old)
hylafax 4559/tcp # HylaFAX client-server protocol (new)
noclog 5354/tcp # noclogd with TCP (nocol)
noclog 5354/udp # noclogd with UDP (nocol)
hostmon 5355/tcp # hostmon uses TCP (nocol)
hostmon 5355/udp # hostmon uses TCP (nocol)
ircd 6667/tcp # Internet Relay Chat
ircd 6667/udp # Internet Relay Chat
webcache 8080/tcp # WWW caching service
webcache 8080/udp # WWW caching service
tproxy 8081/tcp # Transparent Proxy
tproxy 8081/udp # Transparent Proxy
mandelspawn 9359/udp mandelbrot # network mandelbrot
amanda 10080/udp # amanda backup services
amandaidx 10082/tcp # amanda backup services
amidxtape 10083/tcp # amanda backup services
isdnlog 20011/tcp # isdn logging system
isdnlog 20011/udp # isdn logging system
vboxd 20012/tcp # voice box system
vboxd 20012/udp # voice box system
binkp 24554/tcp # Binkley
binkp 24554/udp # Binkley
asp 27374/tcp # Address Search Protocol
asp 27374/udp # Address Search Protocol
tfido 60177/tcp # Ifmail
tfido 60177/udp # Ifmail
fido 60179/tcp # Ifmail
fido 60179/udp # Ifmail
# Local services

View File

@@ -0,0 +1,9 @@
root::::::::
daemon:*:::::::
bin:*:::::::
sys:*:::::::
sync:*:::::::
mail:*:::::::
www-data:*:::::::
operator:*:::::::
nobody:*:::::::

View File

@@ -0,0 +1,33 @@
-----BEGIN CERTIFICATE-----
MIIFrzCCA5egAwIBAgIUOTWl6IoOr4Lvj0qz0qoA2W8q6SEwDQYJKoZIhvcNAQEL
BQAwUjELMAkGA1UEBhMCQVUxDDAKBgNVBAgMA05TVzERMA8GA1UECgwIQXVkaW5h
dGUxFDASBgNVBAsMC0VuZ2luZWVyaW5nMQwwCgYDVQQDDANERVAwIBcNNzAwMTAx
MDAwMDAwWhgPMjE3MDAxMDExMjAwMDBaMFIxCzAJBgNVBAYTAkFVMQwwCgYDVQQI
DANOU1cxETAPBgNVBAoMCEF1ZGluYXRlMRQwEgYDVQQLDAtFbmdpbmVlcmluZzEM
MAoGA1UEAwwDREVQMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAr2Xf
QTbmntPQQxYSceK8a8OkKCCTkX2HLmpTDDBrySdBTtvu0jJDIISEtzSUQu8UQ+8H
2atTXgc+Cseam+fDOsU3gmDgn+lmcA5o1rDLrXlQZCJrO/JUpyQ5v7kkGU33AqKc
Ik307fPwkX6YvBBy4Zc/T23bU8KXW+8beQrteImie9Pw9tt9GBvREox0/MBv23qs
3IDdPXB5qdFdenwAwIUGvG43Aohhldnp063HLc8GNySmfwuFpCPgMYph+jX/yKv+
EMBmH9KIhYDvzuq28NJajTaJtsMXr1OlW9a6s2zehK7JYnjrqo3J7ebhb6nxz4co
uQ39DHc+Hsbfi/Bg/UpabpW5Wdl3GHOaeUNWpLheifO2OP35S3yPkTCiz8Wfu7l3
CO67360PaBTmq7tjen2H6pFLWsGLsjcjnUtp0sf50LAhpFHvsBVKS7MkkXwNdncg
X+kGTDCpYNeKdCU6s71Z7RKXjArO1RtRxVU1N4l5U3JOYQ7jChLGwoFDfpBRMVh+
gNNi9/lLNU++gRFmF+1i//bI44Z+cviHkfGq+dIpWBO1KSkGtAE9hWmq0qNdWS1V
b60LU9h4IMg23XYmHpJEyQoKTz4EIJgVjor9ErHws9Ig5vG4FGyXnIjPC9N7PgN1
iDIrf8FgaOSRQPcBd3VjguW4Y8qFts1opQCKFbkCAwEAAaN7MHkwCQYDVR0TBAIw
ADAsBglghkgBhvhCAQ0EHxYdT3BlblNTTCBHZW5lcmF0ZWQgQ2VydGlmaWNhdGUw
HQYDVR0OBBYEFNG9TtgJ0D6ukZf/M4ZyEAW+i+95MB8GA1UdIwQYMBaAFNG9TtgJ
0D6ukZf/M4ZyEAW+i+95MA0GCSqGSIb3DQEBCwUAA4ICAQBBmzlYv9E7W5E0Lv0X
wszsgvbfavB83vvhYkDiPFIUt+6s6b97TNtKhfZZWbJprL4Gt262Xt1t4pZY8csU
co8qpEop4uA943A2z+3Fwc5OuYH+TMlvYVhRLnCFtgeu89VaXEPAFd+d1652nfta
l1y9Gj8NsPdsM0xwDzChRqyAR9heXil1ZhLVzfJr3ri0jdK9ZEIfsS8GOgSmLm+5
LO9UXu73Sqq4qWpp54T7AGEJz7DqIHQSY1c/beuFn7W8Ox9K/9MPoq3mQnn24z/1
rS3chxriKTe3hcslKs0I5HE7HF9SSG9sW6GI5TUcWfCAjIn74WK5/q6XBhh+Io1p
QnsokMzgvNIk/Eit+9P38uNkU1TNsL+wMgV2/qNcRHX5gSfr3yhCN/tg8zCcBkef
Ek5dmdxfSY8wq273W6rKKJbGB3Eb9R9gpYnHWAgiH/qJ0epnT20ynakRUpj9ZSza
cuEZNbcXBUQPCUkyDUOpW7h4budXIMlIbvMtiL8pna8XQM309K9CNZfV16QGuK1P
KNjdiwvl7UVtEUiTCAwt+rq0N3tRuq5ceK9YrnWiCBCXTMIa9o3sv6IgByXvOCkv
uNa8PJr1O8uPtbQqaMsBC3nnG2wpSsEjaurYy5SOSUVvXxVnb3L93FuADMgn1i5Y
UtfGCCL4MCkNn9APcgzpRSqUFA==
-----END CERTIFICATE-----

File diff suppressed because it is too large Load Diff

Binary file not shown.

View File

@@ -0,0 +1,4 @@
/* GNU ld script
Use the shared library, but some functions are only in
the static library. */
GROUP ( libgcc_s.so.1 -lgcc )

Binary file not shown.

Binary file not shown.

View File

@@ -0,0 +1 @@
lib

Binary file not shown.

Binary file not shown.

View File

@@ -0,0 +1 @@
libasound.so.2.0.0

View File

@@ -0,0 +1 @@
libasound.so.2.0.0

View File

@@ -0,0 +1 @@
libstdc++.so.6.0.28

View File

@@ -0,0 +1 @@
libstdc++.so.6.0.28

View File

@@ -0,0 +1 @@
libz.so.1.2.11

View File

@@ -0,0 +1 @@
libz.so.1.2.11

Binary file not shown.

View File

@@ -0,0 +1,5 @@
NAME=Buildroot
VERSION=2021.11
ID=buildroot
VERSION_ID=2021.11
PRETTY_NAME="Buildroot 2021.11"

View File

@@ -0,0 +1 @@
lib

View File

@@ -0,0 +1 @@

View File

@@ -0,0 +1 @@

View File

@@ -0,0 +1 @@

View File

@@ -0,0 +1 @@
/tmp

BIN
src/dep/dante_package/crun Executable file

Binary file not shown.

View File

@@ -0,0 +1,320 @@
{
"ociVersion": "1.0.1",
"process": {
"terminal": false,
"user": {
"uid": 0,
"gid": 0
},
"args": [
"./dep_manager",
"/dante_data/capability/dante.json"
],
"env": [
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
"LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/dante",
"TERM=xterm"
],
"cwd": "/dante",
"capabilities": {
"bounding": [
"CAP_CHOWN",
"CAP_DAC_OVERRIDE",
"CAP_FSETID",
"CAP_FOWNER",
"CAP_MKNOD",
"CAP_NET_RAW",
"CAP_SETGID",
"CAP_SETUID",
"CAP_SETFCAP",
"CAP_SETPCAP",
"CAP_NET_BIND_SERVICE",
"CAP_SYS_CHROOT",
"CAP_KILL",
"CAP_SYS_NICE",
"CAP_AUDIT_WRITE",
"CAP_NET_ADMIN"
],
"effective": [
"CAP_CHOWN",
"CAP_DAC_OVERRIDE",
"CAP_FSETID",
"CAP_FOWNER",
"CAP_MKNOD",
"CAP_NET_RAW",
"CAP_SETGID",
"CAP_SETUID",
"CAP_SETFCAP",
"CAP_SETPCAP",
"CAP_NET_BIND_SERVICE",
"CAP_SYS_CHROOT",
"CAP_KILL",
"CAP_SYS_NICE",
"CAP_AUDIT_WRITE",
"CAP_NET_ADMIN"
],
"inheritable": [
"CAP_CHOWN",
"CAP_DAC_OVERRIDE",
"CAP_FSETID",
"CAP_FOWNER",
"CAP_MKNOD",
"CAP_NET_RAW",
"CAP_SETGID",
"CAP_SETUID",
"CAP_SETFCAP",
"CAP_SETPCAP",
"CAP_NET_BIND_SERVICE",
"CAP_SYS_CHROOT",
"CAP_KILL",
"CAP_SYS_NICE",
"CAP_AUDIT_WRITE",
"CAP_NET_ADMIN"
],
"permitted": [
"CAP_CHOWN",
"CAP_DAC_OVERRIDE",
"CAP_FSETID",
"CAP_FOWNER",
"CAP_MKNOD",
"CAP_NET_RAW",
"CAP_SETGID",
"CAP_SETUID",
"CAP_SETFCAP",
"CAP_SETPCAP",
"CAP_NET_BIND_SERVICE",
"CAP_SYS_CHROOT",
"CAP_KILL",
"CAP_SYS_NICE",
"CAP_AUDIT_WRITE",
"CAP_NET_ADMIN"
],
"ambient": [
"CAP_CHOWN",
"CAP_DAC_OVERRIDE",
"CAP_FSETID",
"CAP_FOWNER",
"CAP_MKNOD",
"CAP_NET_RAW",
"CAP_SETGID",
"CAP_SETUID",
"CAP_SETFCAP",
"CAP_SETPCAP",
"CAP_NET_BIND_SERVICE",
"CAP_SYS_CHROOT",
"CAP_KILL",
"CAP_SYS_NICE",
"CAP_AUDIT_WRITE",
"CAP_NET_ADMIN"
]
},
"rlimits": [
{
"type": "RLIMIT_NOFILE",
"hard": 1024,
"soft": 1024
}
],
"noNewPrivileges": true
},
"root": {
"path": "rootfs",
"readonly": false
},
"hostname": "",
"mounts": [
{
"destination": "/proc",
"type": "proc",
"source": "proc"
},
{
"destination": "/var/run",
"type": "tmpfs",
"source": "tmpfs",
"options": ["nosuid", "strictatime", "mode=755", "size=65536k"]
},
{
"destination": "/var/run/dante",
"type": "bind",
"source": "/var/run/dante",
"options": ["bind", "rw"]
},
{
"destination": "/var/lib/dbus/machine-id",
"type": "bind",
"source": "/var/lib/dbus/machine-id",
"options": ["ro", "rbind", "rprivate", "nosuid", "noexec", "nodev"]
},
{
"destination": "/dev",
"type": "tmpfs",
"source": "tmpfs",
"options": ["nosuid", "strictatime", "mode=755", "size=65536k"]
},
{
"destination": "/dev/pts",
"type": "devpts",
"source": "devpts",
"options": ["nosuid", "noexec", "newinstance", "ptmxmode=0666", "mode=0620", "gid=5"]
},
{
"destination": "/dev/shm",
"type": "bind",
"source": "/dev/shm",
"options": ["bind", "rw"]
},
{
"destination": "/dev/snd",
"type": "bind",
"source": "/dev/snd",
"options": ["bind", "rw"]
},
{
"destination": "/tmp",
"type": "tmpfs",
"source": "tmpfs",
"options": ["nosuid", "strictatime", "mode=755", "size=65536k"]
},
{
"destination": "/var/log",
"type": "bind",
"source": "/var/log",
"options": ["bind", "rw"]
},
{
"destination": "/etc/machine-id",
"type": "bind",
"source": "/etc/machine-id",
"options": ["ro", "rbind", "rprivate", "nosuid", "noexec", "nodev"]
},
{
"destination": "/etc/resolv.conf",
"type": "bind",
"source": "/etc/resolv.conf",
"options": ["ro", "rbind", "rprivate", "nosuid", "noexec", "nodev"]
},
{
"destination": "/dante_data",
"type": "bind",
"source": "/home/caster/bumble-auracast/src/dep/dante_package/dante_data",
"options": ["bind", "rw"]
},
{
"destination": "/dante_data/capability",
"type": "bind",
"source": "/home/caster/bumble-auracast/src/dep/dante_package/dante_data/capability",
"options": ["bind", "ro"]
},
{
"destination": "/sys",
"type": "sysfs",
"source": "sysfs",
"options": ["nosuid", "noexec", "nodev", "ro"]
},
{
"destination": "/sys/fs/cgroup",
"type": "cgroup2",
"source": "cgroup2",
"options": ["nosuid", "noexec", "nodev", "relatime", "ro"]
},
{
"destination": "/usr/share/alsa/alsa.conf",
"type": "bind",
"source": "/usr/share/alsa/alsa.conf",
"options": ["bind", "ro"]
}
],
"linux": {
"cgroupsPath": "dante",
"namespaces": [
{ "type": "pid" },
{ "type": "ipc" },
{ "type": "mount" },
{ "type": "uts" },
{ "type": "cgroup" }
],
"devices": [
{
"path": "/dev/ptp0",
"type": "c",
"major": 249,
"minor": 0,
"fileMode": 384,
"uid": 0,
"gid": 0
},
{
"path": "/dev/snd/pcmC3D0p",
"type": "c",
"major": 116,
"minor": 8,
"fileMode": 432,
"uid": 0,
"gid": 29
},
{
"path": "/dev/snd/pcmC3D0c",
"type": "c",
"major": 116,
"minor": 9,
"fileMode": 432,
"uid": 0,
"gid": 29
},
{
"path": "/dev/snd/pcmC3D1p",
"type": "c",
"major": 116,
"minor": 10,
"fileMode": 432,
"uid": 0,
"gid": 29
},
{
"path": "/dev/snd/pcmC3D1c",
"type": "c",
"major": 116,
"minor": 11,
"fileMode": 432,
"uid": 0,
"gid": 29
},
{
"path": "/dev/snd/controlC3",
"type": "c",
"major": 116,
"minor": 12,
"fileMode": 432,
"uid": 0,
"gid": 29
}
],
"maskedPaths": [
"/proc/kcore",
"/proc/latency_stats",
"/proc/timer_list",
"/proc/timer_stats",
"/proc/sched_debug",
"/proc/scsi"
],
"readonlyPaths": [
"/proc/asound",
"/proc/bus",
"/proc/fs",
"/proc/irq",
"/proc/sys",
"/proc/sysrq-trigger"
],
"resources":{
"devices":[
{
"allow": true,
"type": "c",
"major": 116,
"access": "rw"
}
]
}
}
}

View File

@@ -0,0 +1,80 @@
{
"trialMode": true,
"$schema": "./dante.json_schema.json",
"platform":
{
"cgroupVersion": 2,
"logDirectory" : "/var/log"
},
"audio" :
{
"txChannels" : 0,
"rxChannels" : 6,
"sampleRate" : 48000,
"availableSampleRates" :
[
48000
],
"samplesPerPeriod" : 16,
"periodsPerBuffer" : 300,
"networkLatencyMinMs" : 2,
"networkLatencyDefaultMs" : 5,
"supportedEncodings" :
[
"PCM16"
],
"defaultEncoding" : "PCM16",
"numDepCores" : 1
},
"network" :
{
"interfaceMode" : "Direct",
"interfaces" :
[
"eth0"
],
"preferredLinkSpeed" : "LINK_SPEED_100M"
},
"clock" :
{
"enableHwTimestamping" : false
},
"hardwareClock" :
{
"useHwClock" : false
},
"hostcpu" :
{
"enableDdp" : false
},
"alsaAsrc":
{
"enableAlsaAsrc": true,
"deviceConfigurations": [
{
"deviceIdentifier": "hw:0,0",
"direction": "playback",
"bitDepth": 16,
"numOpenChannels": 6,
"alsaChannelRange": "0-5",
"danteChannelRange": "0-5",
"bufferSize": 4800,
"samplesPerPeriod": 16
}
]
},
"product" :
{
"manfId" : "Audinate",
"manfName" : "Audinate Pty Ltd",
"modelId" : "OEMDEP",
"modelName" : "Linux Dante Embedded Platform",
"modelVersion" :
{
"major" : 9,
"minor" : 9,
"bugfix" : 99
},
"devicePrefix" : "DEP"
}
}

View File

@@ -0,0 +1,734 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"title": "Schema for Dante Embedded Platform dante.json configuration file.",
"type": "object",
"properties": {
"$schema": {
"type": "string",
"description": "Removes superfluous warning on the schema field introduced by additionalProperties = false."
},
"platform": {
"type": "object",
"properties": {
"cgroupVersion": {
"type": "integer",
"oneOf": [
{
"enum": [1, 2]
}
],
"description": "Tells DEP which version of cgroups the system is configured for. Setting this value allows DEP to optimise the system more appropriately. If this value is not set at all then DEP will still function but possibly in a less optimal way."
},
"logDirectory": {
"type": "string",
"description": "Directory to write DEP log files. The directory must be available inside the container, DEP must have read/write access to the directory and the directory must exist before the DEP container is started."
},
"maxNumLogs": {
"type": "integer",
"default": 6,
"description": "Maximum number of versions of each DEP log file to keep."
},
"maxLogSize": {
"type": "integer",
"default": 102400,
"description": "Maximum number of bytes of each DEP log file."
},
"logLevel": {
"type": "string",
"oneOf": [
{
"enum": [
"Error",
"Warning",
"Notice",
"Info",
"Debug"
]
}
],
"default": "Warning",
"description": "The minimum log level to write to the log files."
}
},
"required": [
"logDirectory"
]
},
"audio": {
"type": "object",
"properties": {
"rxChannels": {
"type": "integer",
"minimum": 0,
"maximum": 512,
"description": "The number of receive channels. Actual number of available channels will be the minimum of this value and the licensed channels."
},
"txChannels": {
"type": "integer",
"minimum": 0,
"maximum": 512,
"description": "The number of transmit channels. Actual number of available channels will be the minimum of this value and the licensed channels."
},
"maxRxFlows": {
"type": "integer",
"description": "Maximum receive flows that DEP will allow. Default is 'MAX(2, (rxChannels + 1) / 2)'."
},
"maxTxFlows": {
"type": "integer",
"description": "Maximum transmit flows that DEP will allow. Default is 'MAX(2, (txChannels + 1) / 2)'."
},
"sampleRate": {
"type": "integer",
"oneOf": [
{
"enum": [
44100,
48000,
88200,
96000
]
}
],
"default": 48000,
"description": "Default sample rate of DEP."
},
"availableSampleRates": {
"type": "array",
"minItems": 1,
"items": {
"type": "integer",
"oneOf": [
{
"enum": [
44100,
48000,
88200,
96000
]
}
]
},
"default": [
44100,
48000,
88200,
96000
],
"contains": {
"const": 48000
},
"uniqueItems": true,
"description": "A list of sample rates that can be selected for the DEP device."
},
"samplesPerPeriod": {
"type": "integer",
"default": 16,
"description": "The number of samples between audio period events (ticks)."
},
"periodsPerBuffer": {
"type": "integer",
"default": 3000,
"description": "The number of periods in the buffer."
},
"networkLatencyMinMs": {
"type": "integer",
"oneOf": [
{
"enum": [
1,
2,
3,
4,
5,
10
]
}
],
"default": 2,
"description": "The minimum latency in milliseconds (ms) that this device can support."
},
"networkLatencyDefaultMs": {
"type": "integer",
"minimum": 1,
"maximum": 40,
"default": 4,
"description": "Default network latency in milliseconds (ms)."
},
"numDepCores": {
"anyOf": [
{
"type": "array",
"items": {
"type": "integer",
"minimum": 0
},
"minItems": 1,
"uniqueItems": true,
"description": "List of CPU core IDs DEP will run on."
},
{
"type": "integer",
"minimum": 0,
"description": "The number of CPU cores DEP will run on. DEP will run on the given number of consecutive cores starting from core 0"
}
],
"description": "The CPU cores DEP will run on. The host system is responsible for ensuring the configured cores are isolated exclusively for DEP use."
},
"percentCpuShare": {
"type": "integer",
"minimum": 1,
"maximum": 100,
"default": 100,
"description": "The share of CPU time DEP will be allocated when CPU time is under contention. NOTE: this setting has no effect when cgroups v2 is in use and will be deprecated soon."
},
"defaultEncoding": {
"type": "string",
"oneOf": [
{
"enum": [
"PCM16",
"PCM24",
"PCM32"
]
}
],
"description": "(DEPRECATED) The default native device encoding value. This field should no longer be used. The default should instead be specified as the first entry in supportedEncodings"
},
"supportedEncodings": {
"type": "array",
"items": {
"type": "string",
"oneOf": [
{
"enum": [
"PCM16",
"PCM24",
"PCM32"
]
}
]
},
"default": [
"PCM24"
],
"uniqueItems": true,
"description": "A list of supported native device encoding values."
},
"aes67Supported": {
"type": "boolean",
"default": false,
"description": "Whether this device supports the AES67 protocol."
},
"channelGroupsFile": {
"type": "string",
"description": "The full path to a separate JSON file that specifies logical channel groupings."
},
"defaultChannelNamesFile": {
"type": "string",
"description": "The full path to a separate JSON file that specifies custom default channel names."
},
"perChannelEncodingsFile": {
"type": "string",
"description": "The full path to a separate JSON file that specifies per-channel encodings."
},
"enableSelfSubscription": {
"type": "boolean",
"default": true,
"description": "Whether the device self-subscription capability should be enabled."
},
"silenceHeadDelayMs" : {
"type": "integer",
"default": 20,
"description": "DEP erases audio in the rx and tx audio buffer shortly after the network time for those frames passes. This controls the delay on this erasure measured in milliseconds."
}
},
"required": [
"txChannels",
"rxChannels",
"availableSampleRates",
"numDepCores"
]
},
"network": {
"type": "object",
"properties": {
"interfaceMode": {
"type": "string",
"oneOf": [
{
"enum": [
"Switched",
"Direct"
]
}
],
"default": "Direct",
"description": "DEP network interface mode. Direct means connected to the network via a PHY; Switched means connected to the network via a switch."
},
"interfaces": {
"type": "array",
"items": {
"anyOf": [
{ "type": "string" },
{ "type": "integer" }
]
},
"minItems": 1,
"maxItems": 2,
"uniqueItems": true,
"description": "List of network interface names or indexes DEP will use to connect to the Dante network."
},
"preferredLinkSpeed": {
"type": "string",
"oneOf": [
{
"enum": [
"LINK_SPEED_10G",
"LINK_SPEED_1G",
"LINK_SPEED_100M"
]
}
],
"default": "LINK_SPEED_1G",
"description": "The preferred link speed of the network interface/s used by DEP."
},
"webSocketPort": {
"type": "integer",
"minimum": 1024,
"maximum": 65535,
"description": "The websocket port used by DEP. If not set an ephemeral port is used."
}
},
"required": [
"interfaces"
]
},
"mdns": {
"type": "object",
"properties": {
"restrictInterfaces": {
"type": "boolean",
"default": true,
"description": "Whether to restrict mDNS advertisements to only the specified network interfaces."
}
}
},
"clock": {
"type": "object",
"properties": {
"enableHwTimestamping": {
"anyOf": [
{
"type": "boolean"
},
{
"const": "v1"
}
],
"default": false,
"description": "Whether to use hardware packet timestamping at the Network Interface Card (NIC) level."
},
"dsaTaggedPackets": {
"type": "boolean",
"default": false,
"description": "Whether packets read from the network interface have a DSA tag attached."
},
"hardwareInterfaces": {
"type": "array",
"items": {
"anyOf": [
{ "type": "string" },
{ "type": "integer" }
]
},
"minItems": 1,
"maxItems": 2,
"description": "List of network interface names or indexes that support hardware packet timestamping."
},
"followerOnly": {
"type": "boolean",
"default": false,
"description": "Whether the device should be in follower only mode. When true, DEP cannot become clock leader."
}
},
"if": {
"properties": {
"enableHwTimestamping": {
"anyOf": [
{ "const": true },
{ "const": "v1" }
]
},
"dsaTaggedPackets": { "const": true }
},
"required": [
"enableHwTimestamping", "dsaTaggedPackets"
]
},
"then": {
"required": [
"hardwareInterfaces"
]
}
},
"hardwareClock": {
"type": "object",
"properties": {
"useHwClock": {
"type": "boolean",
"default": false,
"description": "Enable use of clocking hardware."
},
"circuitName": {
"type": "string",
"description": "Name of the clock generator and adjustment circuitry. This field must be one of the supported strings in a DEP release."
},
"circuitRevision": {
"type": "integer",
"description": "An integer representing the circuit revision to use. This field must correspond to a supported revision and circuit in a DEP release."
},
"i2cBus": {
"type": "string",
"default": "/dev/i2c-0",
"description": "The I2C bus device to use to communicate with the clock circuitry. If not present, the first I2C bus device '/dev/i2c-0' is used.",
"pattern": "^\\/dev"
},
"i2cAddr": {
"type": "string",
"description": "The I2C address configurable for a circuit. If not present, the default addresses for the circuit are used."
},
"extClockInputDev": {
"type": "string",
"default": "/dev/extclkin",
"description": "The device path to the external clock input driver used in the clock feedback algorithm. If not present, this field defaults to '/dev/extclkin'.",
"pattern": "^\\/dev"
},
"bitClocks": {
"type": "array",
"description": "An array of mappings between the sample rate and bit clock configurations.",
"items": {
"type": "object",
"properties": {
"sampleRate": {
"type": "integer",
"description": "Sample rate of the mapping."
},
"tdmChannels": {
"type": "integer",
"description": "Number of TDM channels."
},
"bitDepth": {
"type": "integer",
"description": "Bit depth of mapping."
}
},
"required": [
"tdmChannels",
"bitDepth"
]
}
},
"loadCapacitance": {
"type": "integer",
"default": -1,
"description": "Value for the internal load capacitance in pf to set for the clock circuit. If not set or set to a negative number the circuits default will be used. The default and set of valid values are clock circuit specific. For DEP supported si5351b based clock circuits the default load capacitance is 10pF and the set of valid values for this field are 6, 8 and 10."
}
},
"if": {
"properties": {
"useHwClock": { "const": true }
},
"required": ["useHwClock"]
},
"then": {
"required": ["circuitName"]
}
},
"hostcpu": {
"type": "object",
"properties": {
"enableDdp": {
"type": "boolean",
"default": false,
"description": "Enable the 'Dante Device Protocol'."
}
},
"required": ["enableDdp"]
},
"alsaAsrc": {
"type": "object",
"properties": {
"enableAlsaAsrc": {
"type": "boolean",
"description": "Set to true to enable ALSA ASRC and false to disable."
},
"txLatencySamples": {
"type": "integer",
"default": 48,
"description": "Offset used by ASRC when writing audio to the DEP TX buffer measured in samples."
},
"pollMode": {
"type": "boolean",
"default": false,
"description": "If true, ALSA ASRC will not wait on the DEP shared memory semaphore and will instead poll the memory to determine when more data is available."
},
"schedulingPriority": {
"type": "integer",
"default": 70,
"minimum": 0,
"maximum": 100,
"description": "The real-time scheduling priority to run the ALSA ASRC application at."
},
"cpuAffinity": {
"type": "integer",
"minimum": 0,
"description": "The CPU core ID which should be exclusively assigned to ASRC. NOTE: for optimal performance, ensure that the selected CPU core ID is not already listed in the numDepCores value"
},
"deviceConfigurations": {
"type": "array",
"minItems": 1,
"uniqueItems": true,
"description": "List of devices to open. This key is required if Asrc is enabled.",
"items": {
"allOf": [
{
"type": "object",
"description": "Configuration options for each ALSA device to be opened",
"properties": {
"deviceIdentifier": {
"type": "string",
"description": "The ALSA device identifier for this device, e.g. \"hw:1,0\" or \"hw:CARD=sofhdadsp,DEV=0\"."
},
"direction": {
"enum": [
"playback",
"capture"
],
"description": "The direction to open the ALSA device in. Must be \"capture\" or \"playback\"."
},
"bitDepth": {
"enum": [
16,
24,
32
],
"default": 24,
"description": "The PCM bit depth to open the ALSA device with. The device will be opened with the first format it claims to support which is that depth. Typically this maps 8 to S8, 16 to S16_LE, 24 to S24_LE and 32 to S32_LE."
},
"bitWidthOverride": {
"enum": [
16,
24,
32
],
"description": "The number of bits each sample is packed into. For example, \"bitDepth\": 24, \"bitWidthOverride\": 24 is equivalent to S24_3LE, so the application writes samples aligned to 3 bytes. This overrides the alignment of the selected format. So if, for example, a device only claims to support S24_LE (24 bits aligned to 32 bit words) but actually writes 24 bit samples aligned to 24 bits, this setting can account for this."
},
"alsaFormat": {
"enum": [
"S16_LE",
"S24_LE",
"S32_LE",
"FLOAT_LE",
"S24_3LE",
"S16",
"S24",
"S32",
"FLOAT"
],
"description": "The specific ALSA format name to open the device with. Incompatible with bitDepth."
},
"numOpenChannels": {
"type": "integer",
"minimum": 1,
"default": 2,
"description": "The number of channels to open on the ALSA device."
},
"alsaChannelRange": {
"type": "string",
"pattern": "^[0-9]+-[0-9]+$",
"description": "The block of ALSA channels to use. Can only be provided if numOpenChannels is specified. String of the form \"X-Y\" where X and Y are zero indexed channel numbers, specifying the block [X,Y] inclusive. Defaults to 0-(numOpenChannels - 1)"
},
"danteChannelRange": {
"type": "string",
"pattern": "^[0-9]+-[0-9]+$",
"description": "The block of DEP channels this device will read from or write to. Can only be provided if numOpenChannels is specified. String of the form \"X-Y\" where X and Y are zero indexed channel numbers, specifying the block [X,Y] inclusive. Defaults to 0-(numOpenChannels-1)"
},
"gain": {
"type": "integer",
"default": 0,
"description": "Positive or negative gain in dB to apply to the audio for this device."
},
"bufferSize": {
"type": "integer",
"default": 64,
"description": "Size of the ALSA buffer to request the device to open with. Should never be less than 2 DEP periods or 2 ALSA periods. Note that the exact numbers for bufferSize and samplesPerPeriod are merely a request, and individual ALSA drivers are entitled to find other nearby valid values, if necessary."
},
"samplesPerPeriod": {
"type": "integer",
"default": 8,
"description": "Samples per period to request the ALSA device to open with. Note that the exact numbers for bufferSize and samplesPerPeriod are merely a request, and individual ALSA drivers are entitled to find other nearby valid values, if necessary."
},
"latency": {
"type": "integer",
"description": "By default, ASRC maintains the ALSA buffer at its halfway point - which corresponds to the insertion latency of ASRC. This key overrides this behaviour, specifying the target buffer point in samples."
},
"readWriteiBuffer": {
"type": "integer",
"description": "For drivers that don't support MMAP (memory-mapped) buffer operations, the application can emulate the memory mapping internally by inserting an additional buffer and services that through ALSA R/W calls. If this value is >0, it specifies the size of this additional buffer."
},
"forceArtificialAudioTime": {
"type": "boolean",
"description": "This setting provides an override for drivers which don't provide correct audio timestamps. If this is set to true, ASRC overrides the audio time with an artificial one calculated from sample counts."
}
},
"required": [
"deviceIdentifier",
"direction"
]
},
{
"description": "The alsaFormat option is incompatible with the bitDepth and bitWidthOverride option",
"if": {
"anyOf": [
{"required": ["bitDepth"]},
{"required": ["bitWidthOverride"]}
]
},
"then": {
"not": {"required": ["alsaFormat"]}
}
},
{
"description": "alsaChannelRange and danteChannelRange can each only be defined if numOpenChannels is specified",
"if": {
"anyOf": [
{"required": ["alsaChannelRange"]},
{"required": ["danteChannelRange"]}
]
},
"then": {
"required": ["numOpenChannels"]
}
}
]
}
}
},
"required": [
"enableAlsaAsrc"
]
},
"product": {
"type": "object",
"properties": {
"manfId": {
"type": "string",
"minLength": 1,
"maxLength": 8,
"description": "The ID of the device manufacturer. This value is assigned to the manufacturer by Audinate when signing up as a DEP licensee."
},
"manfName": {
"type": "string",
"minLength": 1,
"maxLength": 31,
"description": "Human-readable manufacturer name that users will see in Dante Controller."
},
"modelId": {
"type": "string",
"minLength": 1,
"maxLength": 8,
"description": "The device model ID, up to 8 characters long and unique for each product type produced by a manufacturer."
},
"modelName": {
"type": "string",
"minLength": 1,
"maxLength": 31,
"description": "Human-readable model name that users will see in Dante Controller."
},
"modelVersion": {
"type": "object",
"description": "3-part version number of the DEP device, this will be shown in Dante Controller.",
"properties": {
"major": {
"type": "integer",
"description": "Product version major number."
},
"minor": {
"type": "integer",
"description": "Product version minor number."
},
"bugfix": {
"type": "integer",
"description": "Product version bugfix number."
}
},
"required": [
"major",
"minor",
"bugfix"
]
},
"modelVersionString": {
"type": "string",
"description": "An arbitrary string that overrides the 'modelVersion'. If not set the 'modelVersion' fields will be used to construct a model version string.",
"minLength": 1,
"maxLength": 31
},
"devicePrefix": {
"type": "string",
"default": "DEP",
"minLength": 1,
"maxLength": 24,
"pattern": "^[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,23})?$",
"description": "Dante device name prefix. Up to 24 characters, legal characters are A-Z, a-z, 0-9, and '-' ('-' cannot be the first character)."
}
},
"required": [
"manfId",
"manfName",
"modelId",
"modelName",
"modelVersion"
]
},
"trialMode": {
"type": "boolean",
"default": false,
"description": "Set to true to start the container in 'Trial Mode'. If excluded or false, DEP will require activation."
},
"misc": {
"type": "object",
"properties": {
"enableIdentify": {
"type": "boolean",
"default": false,
"description": "Set to true to enable the device 'Identify' function and false to disable."
}
}
},
"ddhi": {
"type": "object",
"properties": {
"enable": {
"type": "boolean",
"default": true,
"description": "Set to true to enable Dante Device Host Interface (DDHI) and false to disable. All other properties in the ddhi object are only used if this value is true."
},
"clientRpcs": {
"type": "array",
"items": {
"type": "string"
},
"description": "List of DDHI RPCs supported by the platform DDHI client(s)"
}
}
}
},
"required": [
"platform",
"audio",
"network",
"product"
],
"additionalProperties" : false
}

Binary file not shown.

View File

@@ -0,0 +1,3 @@
{
"sampleRate" : 48000
}

View File

@@ -0,0 +1,7 @@
!preferred
subdomain_name _DFLT
mport
socket_loglevel 3
port_v1_m_p
!port_v2_u_p
!enrolled

View File

@@ -0,0 +1 @@
e—|Ć%J<4E>6w7

View File

@@ -0,0 +1 @@
0

280
src/dep/dante_package/dep.sh Executable file
View File

@@ -0,0 +1,280 @@
#!/bin/sh
# To use a different OCI-compliant container runtime,
# update both CONTAINER_RUNTIME and CONTAINER_RUNTIME_PATH:
#
# - CONTAINER_RUNTIME should be set to the name of the runtime binary (e.g., 'crun', 'runc').
# - CONTAINER_RUNTIME_PATH should point to the directory where the binary is installed
# (e.g., '/usr/bin' for a system-installed runtime).
CONTAINER_RUNTIME_PATH=$PWD
CONTAINER_RUNTIME=crun
CONTAINER_STATUS_PATH=/run/$CONTAINER_RUNTIME
CONTAINER_CMD="$CONTAINER_RUNTIME_PATH/$CONTAINER_RUNTIME --root=$CONTAINER_STATUS_PATH"
CONTAINER_CMD_ADDITIONAL_OPTIONS=""
CONTAINER_LOGS="/var/log/dante_container.log"
IMAGES_PATH=$PWD/dante_data/images
ROOTFS_MOUNTPOINT=$PWD/bundle/rootfs
ACTIVE_IMAGE_ID_PATH=$IMAGES_PATH/active
DANTE_JSON=$PWD/dante_data/capability/dante.json
# Check we can actually start/stop containers
# NOTE: on some systems 'id' might not be available, hence we check manually
if [ "$(grep -E '^Uid:' /proc/self/status | awk '{print $2}')" -ne 0 ]; then
echo "This script must be executed with root privileges."
echo ""
exit 1
fi
# This function assumes that:
# - the JSON file is well-formed
# - key and value appear on the same line
# - strings are double-quoted and dont contain escaped quotes
# - assumes the key exists exactly once per line
get_json_field()
{
json_file=$1
field_name=$2
default="__NOT_FOUND__"
if [ ! -f "$json_file" ]; then
echo "error: file '$json_file' not found" >&2
exit 1
fi
# explaining each sed:
# - 's/^[^:]*://' removes everything up to and including the first colon
# - 's/ //' removes the first space character after the colon, if present
# - 's/^"//' and 's/"$//' removes leading and trailing double quotes from the value
# - 's/,[[:space:]]*$//' removes a trailing comma and any following whitespace (e.g. to handle lists)
# - 's/[[:space:]]*$//' trims any remaining trailing whitespace from the value
value=$(grep "\"$field_name\"" "$json_file" | \
sed -e 's/^[^:]*://' -e 's/ //' | \
sed -e 's/^"//' -e 's/"$//' | \
sed -e 's/,[[:space:]]*$//' | \
sed -e 's/[[:space:]]*$//' | head -n 1)
if [ -z "$value" ]; then
echo "$default"
else
echo "$value"
fi
}
check_cgroup_mounts()
{
cgroup_version=$(get_json_field "$DANTE_JSON" "cgroupVersion")
if [ "$cgroup_version" = "__NOT_FOUND__" ]; then
return
fi
check_mount() {
path=$1
expectedType=$2
while IFS= read -r line; do
# get the second field (mount point) and the third field (type)
mountPoint=$(echo "$line" | awk '{print $2}')
mountType=$(echo "$line" | awk '{print $3}')
# if the mountPoint doesn't start with /sys/fs/cgroup, skip it
case "$mountPoint" in
/sys/fs/cgroup*) ;;
*) continue ;;
esac
# if mount point and type exactly match the expected values, we're good
if [ "$mountPoint" = "$path" ] && [ "$mountType" = "$expectedType" ]; then
echo "mount OK: $path ($expectedType)"
return
fi
# There is a chance multiple controllers are mounted on the same path,
# for instance we might be looking for /sys/fs/cgroup/cpu and have
#
# /sys/fs/cgroup/cpu,cpuacct cgroup etc..
#
# mounted instead.
# because we skip entries that do not start with /sys/fs/cgroup at
# the beginning of the loop, we know getting the substring after
# /sys/fs/cgroup at this point will yield an empty string at worst
cgroupSubstring=${mountPoint#/sys/fs/cgroup/}
# do the same with $path
cgroupPathSubstring=${path#/sys/fs/cgroup/}
# check if cgroupPathSubstring is part of cgroupSubstring
# eg this would successfully match 'cpu' against both 'cpuacct,cpu' and 'cpu,cpuacct'
if echo "$cgroupSubstring" | grep -qw "$cgroupPathSubstring"; then
if [ "$mountType" = "$expectedType" ]; then
echo "mount OK: $path ($expectedType)"
return
fi
fi
done < /proc/mounts
echo "warning: missing or incorrect mountpoint: $path (expected type: $expectedType)"
}
if [ "$cgroup_version" = "1" ]; then
echo "cgroup version set to v1 in $DANTE_JSON"
echo "checking mounts..."
check_mount "/sys/fs/cgroup" "tmpfs"
check_mount "/sys/fs/cgroup/cpuset" "cgroup"
check_mount "/sys/fs/cgroup/cpu" "cgroup"
check_mount "/sys/fs/cgroup/memory" "cgroup"
check_mount "/sys/fs/cgroup/devices" "cgroup"
elif [ "$cgroup_version" = "2" ]; then
echo "cgroup version set to v2 in $DANTE_JSON"
echo "checking mounts..."
check_mount "/sys/fs/cgroup" "cgroup2"
else
echo "error: unsupported cgroupVersion value ($cgroup_version) in $DANTE_JSON"
exit 1
fi
}
start()
{
# A poorly-timed stop() could leave the container mounted while
# the processes inside the container were successfully shut down.
# Instead of relying on whether the container is there or not when
# deciding to start DEP, check whether dep_manager is actually running.
# shellcheck disable=SC2009
# (SC2009 recommends using pgrep, but it is not always available)
if ps -e >/dev/null 2>&1; then
PS_CMD="ps -e"
else
PS_CMD="ps" # assume heavily stripped-down BusyBox
fi
if $PS_CMD | grep -q "[d]ep_manager"; then
echo "DEP is already running"
exit 0
fi
# Some basic checks before proceeding
if [ ! -f "$ACTIVE_IMAGE_ID_PATH" ]; then
echo "error: $ACTIVE_IMAGE_ID_PATH not found, can't select active rootfs"
exit 1
fi
active_image_id=$(cat "$ACTIVE_IMAGE_ID_PATH")
rootfs="$IMAGES_PATH/$active_image_id/rootfs_squash"
if [ ! -f "$rootfs" ]; then
echo "error: $rootfs not found"
exit 1
fi
check_cgroup_mounts
mkdir -p /var/run/dante
mkdir -p ${CONTAINER_STATUS_PATH}
# Make sure /etc/resolv.conf is there when later on we
# try to bind mount it from the container.
if [ ! -f "/etc/resolv.conf" ]; then
touch /etc/resolv.conf
fi
if ! grep -q " $ROOTFS_MOUNTPOINT " /proc/mounts; then
if ! mount "$rootfs" "$ROOTFS_MOUNTPOINT" >/dev/null 2>&1; then
echo "error: could not mount $rootfs"
exit 1
fi
fi
# At this point, it's safe to always forcefully delete the container.
#
# This may be necessary in scenarios where the DEP processes did not actually
# start after running ./dep.sh start — for example, due to invalid configuration
# in dante.json. In such cases, a user would typically inspect the logs,
# fix the underlying issue, and then retry with ./dep.sh start.
#
# However, if the dante container remains mounted, the container runtime's 'run'
# command will fail, forcing the user to manually delete the container - either
# by using ./dep.sh stop (which is not intuitive) or manually.
#
# To avoid these issues and make the recovery easier to execute, unconditionally
# remove the dante container before attempting to run it again.
#
# NOTE: while we could check whether the container exists before removing it,
# not all systems provide the necessary cgroup status layers to reliably list
# configured containers.
${CONTAINER_CMD} delete --force dante
# rootfs (only mount with no parent mount) cannot be pivot_root()ed. The check hereafter
# relies on the fact that rootfs will be either a ramfs or tmpfs. This is a bit more restrictive
# than necessary, as the container could in practice be started from a ramfs or tmpfs (as long as
# it is not the rootfs).
# WARNING: crun falls back to chroot when --no-pivot is enabled, and a process running in the container
# can in practice access the tree outside of the chroot.
ROOT_FSTYPE=$(mount|grep 'on / type'|awk '{print $5}')
if [ "$ROOT_FSTYPE" = "rootfs" ] || [ "$ROOT_FSTYPE" = "ramfs" ] || [ "$ROOT_FSTYPE" = "tmpfs" ]; then
CONTAINER_CMD_ADDITIONAL_OPTIONS="$CONTAINER_CMD_ADDITIONAL_OPTIONS --no-pivot"
fi
if ! ${CONTAINER_CMD} run ${CONTAINER_CMD_ADDITIONAL_OPTIONS} --detach --bundle ./bundle dante > "$CONTAINER_LOGS" 2>&1; then
echo "error: failed to start dante container, more details available in $CONTAINER_LOGS"
exit 1
else
echo "DEP started"
fi
}
stop()
{
# in some cases we might have the mountpoint but no container running:
# check if that's the case before proceeding
if ${CONTAINER_CMD} list | grep dante >/dev/null 2>&1; then
# stop the init process (dep_manager) by sending a SIGTERM signal
echo "stopping DEP..."
${CONTAINER_CMD} kill dante TERM
for _ in $(seq 1 10); do
sleep 1
DEP_PROCS=$(${CONTAINER_CMD} ps dante | grep -v PID -c)
if [ "$DEP_PROCS" -eq 0 ]; then
break
fi
done
DEP_PROCS=$(${CONTAINER_CMD} ps dante | grep -v PID -c)
if [ "$DEP_PROCS" -ne 0 ]; then
echo "DEP still running, sending SIGKILL"
${CONTAINER_CMD} kill -a dante KILL
sleep 1
fi
echo "removing container..."
${CONTAINER_CMD} delete --force dante
fi
if grep -q " $ROOTFS_MOUNTPOINT " /proc/mounts; then
echo "umount rootfs..."
umount "$PWD"/bundle/rootfs
fi
echo "done"
}
USAGE_MESSAGE="Usage: dep.sh <start|stop>"
if [ "$#" -eq 0 ]; then
echo "$USAGE_MESSAGE"
exit 1
fi
case $1 in
"start" ) start "$2" ;;
"stop" ) stop ;;
* )
echo "$USAGE_MESSAGE"
exit 1
;;
esac

View File

@@ -0,0 +1 @@
development/dep_check.sh

View File

@@ -0,0 +1,556 @@
#!/bin/sh
# This script collects all the necessary information/files for support, then bundle them into a single .tgz file.
# Copyright © 2022-2025 Audinate Pty Ltd ACN 120 828 006 (Audinate). All rights reserved.
#
#
# 1. Subject to the terms and conditions of this Licence, Audinate hereby grants you a worldwide, non-exclusive,
# no-charge, royalty free licence to copy, modify, merge, publish, redistribute, sublicense, and/or sell the
# Software, provided always that the following conditions are met:
# 1.1. the Software must accompany, or be incorporated in a licensed Audinate product, solution or offering
# or be used in a product, solution or offering which requires the use of another licensed Audinate
# product, solution or offering. The Software is not for use as a standalone product without any
# reference to Audinate's products;
# 1.2. the Software is provided as part of example code and as guidance material only without any warranty
# or expectation of performance, compatibility, support, updates or security; and
# 1.3. the above copyright notice and this License must be included in all copies or substantial portions
# of the Software, and all derivative works of the Software, unless the copies or derivative works are
# solely in the form of machine-executable object code generated by the source language processor.
#
# 2. TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT.
#
# 3. TO THE FULLEST EXTENT PERMITTED BY APPLICABLE LAW, IN NO EVENT SHALL AUDINATE BE LIABLE ON ANY LEGAL THEORY
# (INCLUDING, WITHOUT LIMITATION, IN AN ACTION FOR BREACH OF CONTRACT, NEGLIGENCE OR OTHERWISE) FOR ANY CLAIM,
# LOSS, DAMAGES OR OTHER LIABILITY HOWSOEVER INCURRED. WITHOUT LIMITING THE SCOPE OF THE PREVIOUS SENTENCE THE
# EXCLUSION OF LIABILITY SHALL INCLUDE: LOSS OF PRODUCTION OR OPERATION TIME, LOSS, DAMAGE OR CORRUPTION OF
# DATA OR RECORDS; OR LOSS OF ANTICIPATED SAVINGS, OPPORTUNITY, REVENUE, PROFIT OR GOODWILL, OR OTHER ECONOMIC
# LOSS; OR ANY SPECIAL, INCIDENTAL, INDIRECT, CONSEQUENTIAL, PUNITIVE OR EXEMPLARY DAMAGES, ARISING OUT OF OR
# IN CONNECTION WITH THIS AGREEMENT, ACCESS OF THE SOFTWARE OR ANY OTHER DEALINGS WITH THE SOFTWARE, EVEN IF
# AUDINATE HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH CLAIM, LOSS, DAMAGES OR OTHER LIABILITY.
#
# 4. APPLICABLE LEGISLATION SUCH AS THE AUSTRALIAN CONSUMER LAW MAY APPLY REPRESENTATIONS, WARRANTIES, OR CONDITIONS,
# OR IMPOSES OBLIGATIONS OR LIABILITY ON AUDINATE THAT CANNOT BE EXCLUDED, RESTRICTED OR MODIFIED TO THE FULL
# EXTENT SET OUT IN THE EXPRESS TERMS OF THIS CLAUSE ABOVE "CONSUMER GUARANTEES". TO THE EXTENT THAT SUCH CONSUMER
# GUARANTEES CONTINUE TO APPLY, THEN TO THE FULL EXTENT PERMITTED BY THE APPLICABLE LEGISLATION, THE LIABILITY OF
# AUDINATE UNDER THE RELEVANT CONSUMER GUARANTEE IS LIMITED (WHERE PERMITTED AT AUDINATE'S OPTION) TO ONE OF
# FOLLOWING REMEDIES OR SUBSTANTIALLY EQUIVALENT REMEDIES:
# 4.1. THE REPLACEMENT OF THE SOFTWARE, THE SUPPLY OF EQUIVALENT SOFTWARE, OR SUPPLYING RELEVANT SERVICES AGAIN;
# 4.2. THE REPAIR OF THE SOFTWARE;
# 4.3. THE PAYMENT OF THE COST OF REPLACING THE SOFTWARE, OF ACQUIRING EQUIVALENT SOFTWARE, HAVING THE RELEVANT
# SERVICES SUPPLIED AGAIN, OR HAVING THE SOFTWARE REPAIRED.
#
# 5. This License does not grant any permissions or rights to use the trade marks (whether registered or unregistered),
# the trade names, or product names of Audinate.
#
# 6. If you choose to redistribute or sell the Software you may elect to offer support, maintenance, warranties,
# indemnities or other liability obligations or rights consistent with this License. However, you may only act on
# your own behalf and must not bind Audinate. You agree to indemnify and hold harmless Audinate, and its affiliates
# from any liability claimed or incurred by reason of your offering or accepting any additional warranty or additional
# liability.
#
# NOTE: this script is intended to be run on production systems where the dante_package/development
# directory might not be available (thus no `jq` to rely on for JSON parsing) and basic tools such as `id`
# could be missing (e.g. BusyBox).
# Any changes to the script should take this into account.
RED_COLOR="\e[01;31m"
GREEN_COLOR="\e[01;32m"
YELLOW_COLOR="\e[01;33m"
BLUE_COLOR="\e[01;34m"
END_COLOR="\e[0m"
red() { printf '%b %s %b' "$RED_COLOR" "$*" "$END_COLOR"; }
green() { printf '%b %s %b' "$GREEN_COLOR" "$*" "$END_COLOR"; }
blue() { printf '%b %s %b' "$BLUE_COLOR" "$*" "$END_COLOR"; }
yellow() { printf '%b %s %b' "$YELLOW_COLOR" "$*" "$END_COLOR"; }
logerr() { echo "[ $(red ERROR)] $1"; }
logwarn() { echo "[$(yellow WARNING)] $1"; }
loginfo() { echo "[ $(blue INFO)] $1"; }
logok() { echo "[ $(green OK)] $1"; }
fail() { exit 1; }
cmd_exists() { command -v -- "$1" >/dev/null 2>&1; }
# This function assumes that:
# - the JSON file is well-formed
# - key and value appear on the same line
# - strings are double-quoted and dont contain escaped quotes
# - assumes the key exists exactly once per line
get_json_field()
{
json_file=$1
field_name=$2
default="__NOT_FOUND__"
if [ ! -f "$json_file" ]; then
echo "error: file '$json_file' not found" >&2
exit 1
fi
# explaining each sed:
# - 's/^[^:]*://' removes everything up to and including the first colon
# - 's/ //' removes the first space character after the colon, if present
# - 's/^"//' and 's/"$//' removes leading and trailing double quotes from the value
# - 's/,[[:space:]]*$//' removes a trailing comma and any following whitespace (e.g. to handle lists)
# - 's/[[:space:]]*$//' trims any remaining trailing whitespace from the value
value=$(grep "\"$field_name\"" "$json_file" | \
sed -e 's/^[^:]*://' -e 's/ //' | \
sed -e 's/^"//' -e 's/"$//' | \
sed -e 's/,[[:space:]]*$//' | \
sed -e 's/[[:space:]]*$//' | head -n 1)
if [ -z "$value" ]; then
echo "$default"
else
echo "$value"
fi
}
# where DEP is installed, default value
DEFAULT_DEP_PATH="/opt/dep"
# where DEP logs are stored, default value
DEFAULT_LOGS_PATH="/var/log"
# where temporary files created by this script will be stored, default value
DEFAULT_TEMP_PATH="/tmp"
# where the archive created by this script will be stored, default value
DEFAULT_OUTPUT_PATH=$(pwd)
# DEP container logs can only be stored in /var/log at the moment.
CONT_LOGS="/var/log/dante_container.log"
usage() {
loginfo "Usage: $0 [OPTIONS]"
loginfo ""
loginfo "This tool collects diagnostic data to help debug issues with the DEP software."
loginfo ""
loginfo "Options:"
loginfo " -c <path> Specify the directory where DEP is installed."
loginfo " Default is '${DEFAULT_DEP_PATH}'."
loginfo " -l <path> Specify the directory where DEP stores its log files."
loginfo " Default is '${DEFAULT_LOGS_PATH}'."
loginfo " -o <path> Specify the output directory for the final archive and any temporary"
loginfo " files or directories created in the process. This directory must be"
loginfo " writable by the user executing the script."
loginfo " Default is the current directory, '${DEFAULT_OUTPUT_PATH}'"
loginfo ""
loginfo "Examples:"
loginfo ""
loginfo " $0 -c /apps/dep -l /tmp/logs"
loginfo ""
loginfo " Collects diagnostic data from a DEP installation in /apps/dep, DEP log files in"
loginfo " /tmp/logs, and stores the output in the current directory."
loginfo ""
loginfo " $0 -c /apps/dep -l /tmp/logs -o /tmp/dep_diagnostics"
loginfo ""
loginfo " Collects diagnostic data from a DEP installation in /apps/dep, DEP log files in"
loginfo " /tmp/logs, and stores the output in /tmp/dep_diagnostics."
loginfo ""
loginfo " $0 -o /home/user/dep_diagnostics"
loginfo ""
loginfo " Uses the default DEP installation and log file paths, and stores the output in"
loginfo " /home/user/dep_diagnostics."
}
# Copy a file or directory from a source to a destination.
#
# Arguments:
# src (str): the source file or directory to be copied.
# dst (str): the destination where the source will be copied.
# msg (str): an error message to be logged if the copy operation fails.
#
# Behaviour:
# If the source is a directory, the function performs a recursive
# copy. If the copy operation fails for any reason, it logs a warning
# message using the provided `msg` argument along with the captured
# error message from the failed copy operation.
#
# NOTE: the function uses `eval` to allow for correct parameter expansion
# (e.g. "cp /var/log/dante_*" wouldn't work otherwise).
copy() {
src="$1"
dst="$2"
msg="$3"
cmd="cp ${src} ${dst}"
if [ -d "${src}" ]; then
cmd="cp -r ${src} ${dst}"
fi
err=$(eval "${cmd}" 2>&1)
res=$?
if [ "${res}" -ne 0 ]; then
logwarn "$msg: $err"
fi
}
# Checks if a specified directory exists and if it's writable.
#
# Arguments:
# path (str): Directory to check.
# check_write (str): '1' to check write permission, '0' otherwise.
# err_msg (str): Optional. Additional error message to display.
#
# Behaviour:
# Logs an error and exits if `path` is not a valid directory.
# If `check_write` is '1', also checks for write permission.
# Logs an error and exits if the directory is not writable.
check_path() {
path="$1"
check_write="$2"
err_msg="$3"
_ret_val=0
if [ ! -d "${path}" ]; then
logerr "${path} is not a valid path"
_ret_val=1
elif [ ! -w "${path}" ] && [ "${check_write}" = "1" ]; then
logerr "you don't have writing permission for the directory: $path"
_ret_val=1
fi
if [ "${err_msg}" ] && [ ${_ret_val} -eq 1 ]; then
logerr "${err_msg}"
fi
if [ ${_ret_val} -eq 1 ]; then
exit ${_ret_val}
fi
}
collect_kernel_config() {
dest_path="$1"
config_file=""
is_gzipped=0
if [ -f "/proc/config.gz" ]; then
config_file="/proc/config.gz"
is_gzipped=1
elif [ -f "/boot/config-$(uname -r)" ]; then
config_file="/boot/config-$(uname -r)"
elif [ -f "/boot/config" ]; then
config_file="/boot/config"
elif [ -f "/lib/modules/$(uname -r)/build/.config" ]; then
config_file="/lib/modules/$(uname -r)/build/.config"
fi
if [ -z "$config_file" ]; then
logerr "no kernel config found in standard locations"
return
fi
loginfo "found kernel config at: $config_file"
# for gzipped config, try to decompress and copy
if [ "$is_gzipped" -eq 1 ]; then
if cmd_exists gunzip; then
if gunzip -c "$config_file" > "$dest_path"/kernel_config.txt 2>/dev/null; then
# if gunzip suceeeds, early return to avoid copy
return
fi
fi
fi
copy "$config_file" "$dest_path" "Failed to copy config from $config_file to $dest_path"
}
while getopts ":o:c:l:h" option; do
case $option in
o) # output directory
OUTPUT_PATH=$OPTARG
TEMP_PATH=$OPTARG
;;
l) # log directory
LOGS_PATH=$OPTARG
;;
c) # DEP install path
DEP_PATH=$OPTARG
;;
h) # display Help
usage
exit 0
;;
\?) # invalid option
errmsg="invalid option: -$OPTARG"
;;
:) # missing argument
errmsg="option -$OPTARG requires an argument."
;;
esac
done
# if we have an error from getopts, log it and exit
if [ -n "$errmsg" ]; then
logerr "$errmsg"
fail
fi
# if we can't create archives, we can't proceed
if ! cmd_exists tar; then
logerr "'tar' not found, unable to create archives"
fail
fi
# check whether we need to use defaults
: "${DEP_PATH:=$DEFAULT_DEP_PATH}"
: "${LOGS_PATH:=$DEFAULT_LOGS_PATH}"
: "${TEMP_PATH:=$DEFAULT_TEMP_PATH}"
: "${OUTPUT_PATH:=$DEFAULT_OUTPUT_PATH}"
# if OUTPUT_PATH can't be written to, we can't proceed
# NOTE: by checking OUTPUT_PATH we also check TEMP_PATH:
# the latter is set to /tmp by default, so it is only necessary
# to make sure we can write to it when the user has specified
# a different directory, in which case OUTPUT_PATH would have
# the same value so it makes sense to only check OUTPUT_PATH
check_path "$OUTPUT_PATH" 1 "please chose a different directory using the -o option. Try $0 -h for more information"
# check that provided paths are valid
check_path "$DEP_PATH" 0 "please chose a different directory using the -c option. Try $0 -h for more information"
check_path "$LOGS_PATH" 0 "please chose a different directory using the -l option. Try $0 -h for more information"
# this script's own log file
LOGFILE="/tmp/collector.txt"
# start logging our own output:
# - create a named pipe
# - start tee reading from it in the background
# - redirect stdout and stderr to the named pipe
# trap command ensures that the named pipe gets deleted when the script exits.
mkfifo /tmp/tmpfifo
trap 'rm /tmp/tmpfifo && rm ${LOGFILE}' EXIT
tee -a "${LOGFILE}" < /tmp/tmpfifo &
exec > /tmp/tmpfifo 2>&1
# in a world where all shells support process substitution
# this is an alternative way
# exec > >(tee -a ${LOGFILE} )
# exec 2> >(tee -a ${LOGFILE} >&2)
# output what we're running with
loginfo "DEP install path: ${DEP_PATH}"
loginfo "DEP logs path: ${LOGS_PATH}"
loginfo "Temporary files will be saved in: ${TEMP_PATH}"
loginfo "Script output archive will be saved in: ${OUTPUT_PATH}"
# we'll use a subdir to store our data
SUPPORT_DIR=${TEMP_PATH}/dep_support
# where to store the ethtool output
ETHTOOL_FILE="${SUPPORT_DIR}/ethtoolinfo.txt"
# where to store the HW clock info
HW_CLKING_FILE="${SUPPORT_DIR}/hwclk.txt"
# in case the script was interrupted midway during a previous run
rm -rf "${SUPPORT_DIR}"
# if we can't create ${SUPPORT_DIR}, we can't proceed
if ! mkdir -p "${SUPPORT_DIR}" 2>/dev/null; then
logerr "cannot create directory ${SUPPORT_DIR}: permission denied"
fail
fi
DANTE_JSON="$DEP_PATH"/dante_package/dante_data/capability/dante.json
CONFIG_JSON="$DEP_PATH"/dante_package/dante_data/capability/config.json
CONFIG_DEP="$DEP_PATH"/dante_package/dante_data/config
ACTIVATION_DIR="${DEP_PATH}/dante_package/dante_data/activation"
loginfo "Collecting config files..."
# if found, get dante.json
if [ -f "${DANTE_JSON}" ]; then
copy "${DANTE_JSON}" "${SUPPORT_DIR}" "collection of ${DANTE_JSON} failed"
else
logerr "dante.json not found in $(dirname "${DANTE_JSON}")"
fi
# if found, get config.json
if [ -f "${CONFIG_JSON}" ]; then
copy "${CONFIG_JSON}" "${SUPPORT_DIR}" "collection of ${CONFIG_JSON} failed"
else
logerr "config.json not found in $(dirname "${CONFIG_JSON}")"
fi
# if found, get all content from dante_data/config
if [ -d "${CONFIG_DEP}" ]; then
copy "${CONFIG_DEP}" "${SUPPORT_DIR}" "collection of DEP ${CONFIG_DEP} directory failed"
else
logerr "DEP config directory not found in $(dirname "${CONFIG_DEP}")"
fi
# check and collect activation files
if [ -d "${ACTIVATION_DIR}" ]; then
# copy whatever we have in the activation directory
copy "${ACTIVATION_DIR}" "${SUPPORT_DIR}" "collection of DEP activation files failed"
# log errors related to single act
for actFile in device.lic manufacturer.cert; do
if [ ! -f "${ACTIVATION_DIR}/${actFile}" ]; then
logwarn "activation file '${actFile}' not found in ${ACTIVATION_DIR}"
fi
done
else
logerr "DEP activation directory not found in $(dirname "${ACTIVATION_DIR}")"
fi
loginfo "Collecting DEP logs..."
# get all DEP logs
mkdir -p "${SUPPORT_DIR}/logs"
copy "${LOGS_PATH}/dante_*" "${SUPPORT_DIR}/logs" "collection of DEP logs failed"
# get the container logs
mkdir -p "${SUPPORT_DIR}/logs"
copy "${CONT_LOGS}" "${SUPPORT_DIR}/logs" "collection of DEP container logs failed"
loginfo "Collecting system info..."
# get kernel config
collect_kernel_config "${SUPPORT_DIR}"
# get /proc/cpuinfo
copy "/proc/cpuinfo" "${SUPPORT_DIR}/cpuinfo.txt" "collection of /proc/cpuinfo failed"
# get /proc/interrupts
copy "/proc/interrupts" "${SUPPORT_DIR}/interrupts.txt" "collection of /proc/interrupts failed"
# get mount points
mount > "${SUPPORT_DIR}/mountinfo.txt" || logwarn "collection of mount points failed"
# get info about running processes: try including thread info first,
# in case of failure (e.g. "ps" is actually BusyBox) fall back to processes only
if ! ps -efL > "${SUPPORT_DIR}/processinfo.txt" 2> /dev/null; then
ps > "${SUPPORT_DIR}/processinfo.txt" || logwarn "unable to write process info into ${SUPPORT_DIR}/processinfo.txt"
fi
# get the list of active sockets
if cmd_exists netstat; then
netstat -anp 2>/dev/null > "${SUPPORT_DIR}/netstat.txt" || logwarn "unable to collect active socket info"
else
logwarn "netstat command not available"
fi
# get info about network interfaces
if cmd_exists ip; then
ip address > "${SUPPORT_DIR}/ipinfo.txt" || logwarn "unable to write ip info to ${SUPPORT_DIR}/ipinfo.txt"
else
logwarn "ip command not available"
fi
# get ALSA version (userspace libs)
if cmd_exists aplay; then
aplay --version > "${SUPPORT_DIR}/alsa.txt" || logwarn "unable to write ALSA version to ${SUPPORT_DIR}/alsa.txt"
fi
# get kernel messages
if cmd_exists dmesg; then
dmesg > "${SUPPORT_DIR}/dmesg.txt" || logwarn "unable to collect kernel messages - dmesg failed"
fi
# get device nodes
ls -l /dev > "${SUPPORT_DIR}/device_nodes.txt" || logwarn "unable to collect info about device nodes"
# get timestamp and coalesce info about each network interface
if cmd_exists ethtool; then
for NETWORK_INTERFACE in /sys/class/net/*; do
INTERFACE_NAME=$(basename "$NETWORK_INTERFACE")
{
echo "ethtool -c \"$INTERFACE_NAME\""
ethtool -c "$INTERFACE_NAME" 2>&1
echo "------------------------"
} >> "$ETHTOOL_FILE"
{
echo "ethtool -T \"$INTERFACE_NAME\""
ethtool -T "$INTERFACE_NAME" 2>&1
echo "------------------------"
} >> "$ETHTOOL_FILE"
done
else
logwarn "ethtool command not available"
fi
# get info for HW clocking, if enabled in dante.json
if [ -f "${DANTE_JSON}" ]; then
MNT_DIR="${SUPPORT_DIR}/mnt"
ROOTFS_FILE="$DEP_PATH/dante_package/dante_data/images/0/rootfs_squash"
useHwClock=$(get_json_field "${DANTE_JSON}" useHwClock)
if [ "$useHwClock" = "true" ]; then
circuitName=$(get_json_field "${DANTE_JSON}" circuitName)
i2cBus=$(get_json_field "${DANTE_JSON}" i2cBus)
i2cAddr=$(get_json_field "${DANTE_JSON}" i2cAddr)
{
echo "circuitName=$circuitName"
echo "i2cBus=$i2cBus"
echo "i2cAddr=$i2cAddr"
} >> "$HW_CLKING_FILE"
# hwclkcfg binary is in the DEP rootfs so mount rootfs first and then run it
mkdir -p "${MNT_DIR}"
if ! mount "$ROOTFS_FILE" "${MNT_DIR}"; then
logerr "unable to collect HW clocking info: rootfs mount failed"
else
"$MNT_DIR"/dante/hwclkcfg -c --i2cbus "$i2cBus" --i2caddr "$i2cAddr" "$circuitName" >> "$HW_CLKING_FILE" 2>&1
umount "${MNT_DIR}" 2> /dev/null
fi
rm -rf "${MNT_DIR}"
fi
fi
# if we are UID 0, run dep_check.sh and save its output
if [ "$(grep -E '^Uid:' /proc/self/status | awk '{print $2}')" -eq "0" ]; then
if [ ! -f "./development/dep_check.sh" ]; then
logwarn "dep_check.sh not found, skipping"
else
loginfo "Run dep_check and collect its output..."
{ ./development/dep_check.sh "${DEP_PATH}" > "${SUPPORT_DIR}/depcheck.txt"; } 2>&1
# remove escape characters from dep_check.sh output
sed -i 's/[^[:print:]]\[[0-9;]*[a-zA-Z]//g' "${SUPPORT_DIR}/depcheck.txt"
fi
else
logwarn "could not run dep_check.sh because user was not root"
fi
# add this script own logs to the bundle
if [ -f "$LOGFILE" ]; then
# remove escape characters from this script output
sed -i 's/[^[:print:]]\[[0-9;]*[a-zA-Z]//g' "$LOGFILE"
fi
loginfo "Create final archive..."
# copy our own logs to the support directory, fail silently
cp "$LOGFILE" "${SUPPORT_DIR}/collector.txt" || true
# bundle everything together
timestamp=$(date "+%Y.%m.%d-%H.%M.%S")
tgz_name="dep_support-${timestamp}.tgz"
if ! tar czf "${OUTPUT_PATH}"/"${tgz_name}" -C "$(dirname "${SUPPORT_DIR}")" "$(basename "${SUPPORT_DIR}")" > /dev/null 2>&1; then
logerr "unable to bundle support files in ${OUTPUT_PATH}/${tgz_name}"
_exit_val=1
else
logok "DEP log files and system info bundled in ${OUTPUT_PATH}/${tgz_name}"
_exit_val=0
fi
# remove temporary data
rm -rf "${SUPPORT_DIR}"
exit ${_exit_val}
#
# Copyright © 2022-2025 Audinate Pty Ltd ACN 120 828 006 (Audinate). All rights reserved.
#

BIN
src/dep/dante_package/depconfig Executable file

Binary file not shown.

View File

@@ -0,0 +1,27 @@
[Unit]
Description=Dante Embedded Platform
After=network.target
[Service]
Type=simple
# Optional: restrict CPU affinity for relevant slices before the service starts.
# Uncomment one or more of the following lines to pin system slices to specific CPUs.
#
# Notes:
# - These affect *other* processes in the corresponding slices (init, user, system), not just this service.
# - This may impact unrelated services or user sessions: if possible, CPU isolation should be obtained
# at a system level by tuning the kernel command line
#
#ExecStartPre=/usr/bin/systemctl set-property init.scope AllowedCPUs=0,1
#ExecStartPre=/usr/bin/systemctl set-property user.slice AllowedCPUs=0,1
#ExecStartPre=/usr/bin/systemctl set-property system.slice AllowedCPUs=0,1
ExecStart=/opt/dep/dante_package/dep.sh start
ExecStopPost=/opt/dep/dante_package/dep.sh stop
WorkingDirectory=/opt/dep/dante_package
PIDFile=/run/dante.pid
RemainAfterExit=yes
[Install]
WantedBy=multi-user.target

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,429 @@
PTP TIMESTAMPING TEST TOOL OVERVIEW
===================================
ptp_timestamping_test is a standalone tool that can be used independently of DEP
itself to do a definitive check of a network driver's timestamping capabilities.
After installing DEP, this tool should ALWAYS be run before starting DEP for the
first time. This is to ensure that the driver(s) for the selected network interface(s)
are able to properly timestamp all PTP event packets using the required timestamping
mode (i.e. hardware, PTPv1-only hardware or software).
If problems are found with the driver(s) when timestamping in a given mode, DEP
should NOT be started in that mode, as otherwise PTP will either fail to start
or will experience synchronisation issues.
In such a case, in order to run DEP:
- To continue using the given mode, the driver(s) must be updated and the tool
rerun until problems are no longer reported
- Or, another timestamping mode that shows no problems must be used instead
CONTENTS
========
1. Test types
2. Test requirements
a. Preliminary check
b. Full test
3. Running tests
a. Via dep_check.sh
b. Running manually
4. Understanding test results
a. Preliminary check
b. Full test
c. Full test output examples
5. Using event + error logging
6. Test behaviour and duration
a. Test packet receive and send behaviour
b. Test duration
1. TEST TYPES
=============
There are two types of tests that can be done with the tool:
1) A preliminary check of whether a driver actually supports configuring a
particular timestamping mode. This test tries to create a network socket
configured to use the selected mode. The success or failure of this test
is a much more reliable indicator of configuration support than the output
of 'ethtool -T', as a driver's reported claims are not always 100% accurate.
Nevertheless, as a guide to the user the tool gathers and displays the
equivalent information shown via 'ethtool -T'.
2) A full test. This uses a configured network socket to send and receive
all PTP event packet types, and checks that they all get timestamped.
A full test is the only real way to determine if timestamping is fully operational.
A simple preliminary check is NOT a substitute for this.
However, if a timestamping mode fails a preliminary check it means that mode
CANNOT be used (unless the driver can be updated/rectified).
2. TEST REQUIREMENTS
====================
The tool must always be run as root.
a. Preliminary check
--------------------
Running a preliminary check has no extra requirements. This check can be done
at any time.
b. Full test
------------
The device being tested must be connected to a Dante network. And, in addition
to this device:
- At least two Dante devices (one leader, and at least one follower) must be
present on the network
- If AES67 and/or site/domain unicast clocking are going to be used, at least
two PTPv2-capable devices (again, one leader and at least one follower) must
be present:
* At least one of these must be a third-party (i.e. non-Dante) device
* Two Dante devices running in AES67 mode are NOT a suitable substitute, as
without a third-party device there will be no PTPv2 follower devices
Points of note:
- ALL devices (both Dante and third-party) MUST be in the unmanaged domain (i.e.
in the default subdomain for PTPv1, and in domain number 0 for PTPv2)
- The tool assumes that all PTP traffic is multicast. Therefore, any follower
devices configured to use unicast delay requests will not be visible and thus
cannot be used
- If only one device of each PTPv1/PTPv2 type is present (as a leader), the
tool can still be run but it will report that the test is incomplete
- Running the test will NOT cause clock synchronisation to be disrupted on the
network
3. RUNNING TESTS
================
a. Via dep_check.sh
-------------------
dep_check.sh will use the tool to do preliminary checks of all three timestamping
modes for the interface(s) configured in dante.json. Upon completion, it will
display a complete command line(s) (with parameters based on the settings in
dante.json) that the user can run to do a full test(s) for:
- The mode in dante.json, and/or
- The recommended mode (if different to the above), based on the check results
If the timestamping mode set in dante.json fails a preliminary check, dep_check.sh
will show a complete command line the user can run to see the full output of the
check if desired.
b. Running manually
-------------------
Running the tool with the -h option will show how the tool can be used.
As stated above dep_check.sh provides complete command lines for convenience,
however manual usage is fairly straightforward:
- The value for -i is the interface under "network" in dante.json
- The timestamping mode can be set as required
- If "dsaTaggedPackets" in dante.json is set to true, -dsa must be specified
- If -dsa is used along with either -hw or -hwv1, a value must be supplied
for -hwi. This is the entry under "clock.hardwareInterfaces" in dante.json
that corresponds to the value supplied for -i
- By default, the tool will run a full test. Specifying -c will result in the
tool running only a preliminary check
4. UNDERSTANDING TEST RESULTS
=============================
a. Preliminary check
--------------------
The result will be a simple pass or fail, depending on whether a network socket
could be configured with the chosen settings.
The tool will also display the timestamping capabilities reported by the driver
(the same ones shown via 'ethtool -T') and indicate whether these match what the
user wants. HOWEVER, even if the reported capabilities do not match the check
still proceeds to do a network socket setup by explaining that it MAY succeed-
in spite of the mismatch.
b. Full test
------------
For timestamping to be deemed operational, the driver MUST be able to timestamp:
- Both PTP event packet types (SYNC and DELAY_REQ)
- In both directions
Furthermore, in order to be able to run DEP:
- To use Dante audio, PTPv1 timestamping must fully work
- To use AES67 with a third-party device and to enable site/domain unicast
clocking, PTPv2 timestamping must also work
The test output provides a terse summary of whether timestamping for a particular
version of PTP was:
- All OK
- Found to have errors for a particular packet type + timestamping direction
- Not (completely) tested due to lack of devices present
Refer to the next section for some example full test outputs.
The tool will display detailed results for the packet version + type(s) that
encountered any timestamping errors. These results provide specific information
on the number of packets sent/received, the number successfully timestamped and
how many attempts resulted in errors. This can be useful when debugging a network
driver. If however, yet more specific details are required about the types of
errors and/or the relative times at which packet events + timestamping operations
occur, the logging option can be used. This is discussed further below.
c. Full test output examples
----------------------------
On a device with a working driver, assuming both PTPv1 and PTPv2 leaders and
followers are present the output will look like the following:
# ./ptp_timestamping_test -i eno2 -hw
Using interface eno2, with hardware timestamping
Checking PHC resolution...
Checking PHC read speed... 4283ns (approx.)
Testing PTP timestamping...
Testing v1 SYNC packets... OK
Testing v1 DELAY_REQ packets... OK
Testing v2 SYNC packets... OK
Testing v2 DELAY_REQ packets... OK
TEST SUMMARY
============
PTPv1 all OK
PTPv2 all OK
Points of note:
- When using hardware timestamping, the first thing the tool does is perform some
measurements of the PHC (PTP Hardware Clock)
- The approximate read speed of the PHC should be noted. NICs that perform
timestamping at the PHY instead of the MAC layer will exhibit very large values
here (greater than about 100,000ns), and these are typically unsuitable for use
with DEP as the slow read times will adversely affect its PTP accuracy
The outputs below were produced on a board which has problems with hardware
timestamping (but not software timestamping). Also:
- The Dante network used had two devices
- One of the devices had AES67 turned on (and so there was a PTPv2 leader
present, but no PTPv2 followers)
Software timestamping result:
# ./ptp_timestamping_test -i eth1
Using interface eth1, with software timestamping
Testing PTP timestamping...
Testing v1 SYNC packets... OK
Testing v1 DELAY_REQ packets... OK
Testing v2 SYNC packets... OK
Testing v2 DELAY_REQ packets... none detected
TEST SUMMARY
============
PTPv1 all OK
PTPv2 only partially tested - no follower devices detected
Hardware timestamping result:
# ./ptp_timestamping_test -i eth1 -hw
Using interface eth1, with hardware timestamping
Checking PHC resolution...
Checking PHC read speed... 3928ns (approx.)
Testing PTP timestamping...
Testing v1 SYNC packets... OK
Testing v1 DELAY_REQ packets... Tx problems
Testing v2 SYNC packets... OK
Testing v2 DELAY_REQ packets... none detected
TEST SUMMARY
============
PTPv1 SYNCs OK, errors found with DELAY_REQs - details below
PTPv2 only partially tested - no follower devices detected
DETAILED RESULTS
================
v1 DELAY_REQ
------------
Rx packet receive limit: 5
Rx packets received: 5
Rx packets timestamped: 5
Rx packet timestamping errors: 0
Rx packets from additional followers received: 0
Rx packets from additional followers timestamped: 0
Rx packet timestamping errors for additional followers: 0
Tx packets sent: 5
Tx packets timestamped: 0
Tx packets with slow timestamps: 0
Tx packet timestamping errors: 0
The detailed results show that:
- All Rx packets (up to the default receive limit of 5) from the follower the
test chose were successfully timestamped
- No other followers were present (and so DELAY_REQs from those were not received)
- No Tx packets were timestamped, HOWEVER there were no errors. This is an
indication that the driver did not attempt to timestamp any outgoing v1 DELAY_REQs
For some drivers, timestamping operations do indeed take place BUT result in
errors. The following detailed results are from a board whose driver produces
these:
DETAILED RESULTS
================
v1 SYNC
-------
Rx packet receive limit: 20
Rx packets received: 20
Rx packets timestamped: 0
Rx packet timestamping errors: 20
Rx FOLLOW-UPs received: 20
Multiple leaders detected: no
Tx packets sent: 20
Tx packets timestamped: 20
Tx packets with slow timestamps: 0
Tx packet timestamping errors: 0
These details show that:
- All 20 (the default receive limit) SYNC packets were indeed received, however
none were timestamped
- Timestamping was attempted each time, but always resulted in an error
- THere were no issues timestamping outgoing packets
The details in a SYNC report also include network information that may be of
interest to the user:
- Most PTP leaders issue FOLLOW-UP packets after each SYNC. These do not need
to be timestamped, however the tool listens for these and checks that they
indeed come from the leader sending the SYNCs. If no FOLLOW-UPs are received,
this may indicate either a problematic leader OR the presence of a leader
using the one-step rather than the more common two-step synchronisation method
- If the tool detects more than one leader on the network, it will be indicated
here
5. USING EVENT + ERROR LOGGING
==============================
While the full test outputs above point to which timestamping operations fail to
work, in some cases (e.g. if trying to debug a network driver) a detailed timeline
of packet and timestamping events, along with the specific errors that occurred,
can be useful.
By using the -l option along with a file to log to, the tool will produce its
normal output but also place all events into that file. The logs will contain:
- The start and end of each test
- Every packet receive and send
- Every successful timestamp read (Rx/Tx), along with the timestamp value
- If an error reading a timestamp occurs, a description of the error (ancillary
data truncated, no timestamp information, insufficient timestamp data)
- If timestamping is not taking place at all (as in the example above, for Tx),
the log will be missing these events
Each line also starts with a timestamp (seconds and nanoseconds), which is the
value of CLOCK_REALTIME at the moment an event or error was logged.
NOTE: When running a hardware timestamping test, you will notice that the first
test in the log is a PTPv1 "scratch" test. This is a throwaway test done at the
start that, on some devices, will exhibit odd Tx timestamping and/or errors.
A scratch test is always done because in some cases a driver will only start
reporting correct timestamps after a few initial socket and timestamp operation
failures. This way, the actual tests of interest are not affected. The scratch
test logs can be safely ignored (although initial socket behaviour after setup
may be of interest to some).
6. TEST BEHAVIOUR AND DURATION
==============================
a. Test packet receive and send behaviour
-----------------------------------------
The test uses packet receive limits for each PTP event packet type (for both
protocol versions). By default, these are:
- 20 SYNCs from the first (and ideally only) leader seen
- 5 DELAY_REQs from the first follower detected
When transmitting packets:
- For a SYNC test, each received SYNC is copied and sent out
- For a DELAY_REQ, packet sends are throttled if required so that not less than
0.25 seconds can elapse before a DELAY_REQ is sent. The sent packet is a copy
of the last received DELAY_REQ. DELAY_REQ tests count packets from the first
follower detected but otherwise receive and timestamp DELAY_REQs from any and
all followers on the network
Each test only ends when:
- The receive limit has been reached, and
- For SYNCs, that same number has been sent out
- For DELAY_REQs, a minimum of that same number has been sent
- Or, the test times out waiting for a packet to arrive
- For SYNCs, the test will wait 2 seconds before timing out
- for DELAY_REQs, this figure is 8 seconds (because it can be up to 7.5 seconds
between packet arrivals)
- Or, a socket error occurs (these should NOT occur unless there is a system
issue or network connectivity is suddenly lost)
NOTE: If the test sees no SYNC packets for a particular PTP version, it will
automatically skip the DELAY_REQ test for that version as, in the absence of a
leader, there will not be followers sending DELAY_REQs. If no SYNCs are detected
despite the device being on a populated Dante network (or one with PTPv2 devices),
it may be the case that multicast PTP traffic is not being sent to this device.
If this happens, the network and/or switch(es) should be checked to ensure that
the device receives multicast PTP.
b. Test duration
----------------
At the default receive limits, a test will last about 25 seconds typically:
- SYNCs from Dante leaders are normally sent every 0.25 seconds
- DELAY_REQs from Dante followers are sent at varying intervals, but on average
are around 4-5 seconds
Note: third-party AES67 devices may have their own packet send intervals
To run a shorter or longer test, the -nsy and -ndr tool options can be used:
- If a driver is known to have working timestamping for a particular packet
type but not another, the limit for the working type can be reduced
- Setting the limit to 0 will skip that packet type entirely (and the full test
output will say so, and also warn of an incomplete test)
- On the other hand, it may be the case that a driver only begins to exhibit
problems after running for a while. In this case, the limits can be increased
using the send intervals above as a rough guide for determining the approximate
test duration

Binary file not shown.

View File

@@ -0,0 +1,3 @@
DEP_VERSION=1.5.0.2
DEP_GIT_HASH=
DEP_BUILD_TIMESTAMP=2025-08-20_05-14-33_UTC

View File

@@ -26,3 +26,321 @@ pcm.ch2 {
}
bindings.0 1
}
# ============================================================
# DEP Dante RX -> ALSA Loopback is now done by DEP ALSA ASRC.
# So: NO alsaloop needed anymore.
#
# Apps read from hw:Loopback,1,0 via dsnoop fanout,
# then we split into 6 mono virtual devices.
# ============================================================
# ---- shared 6ch capture from Loopback with dsnoop fanout ----
pcm.dante_asrc_shared6 {
type dsnoop
ipc_key 1048577
ipc_key_add_uid true
ipc_perm 0666
slave {
pcm "hw:Loopback,1,0" # capture side of ALSA loopback
channels 6
rate 48000
format S16_LE
period_size 240
buffer_size 960
}
hint { show on ; description "DEP RX (via ASRC) shared 6ch (loopback+dsnoop)" }
}
# ---- 6 mono devices (each maps one of the 6 channels) ----
# (Using route explicitly makes the intent very clear.)
pcm.dante_asrc_ch1 {
type route
slave { pcm "dante_asrc_shared6"; channels 6; }
ttable.0.0 1
hint { show on ; description "DEP RX CH1" }
}
pcm.dante_asrc_ch2 {
type route
slave { pcm "dante_asrc_shared6"; channels 6; }
ttable.0.1 1
hint { show on ; description "DEP RX CH2" }
}
pcm.dante_asrc_ch3 {
type route
slave { pcm "dante_asrc_shared6"; channels 6; }
ttable.0.2 1
hint { show on ; description "DEP RX CH3" }
}
pcm.dante_asrc_ch4 {
type route
slave { pcm "dante_asrc_shared6"; channels 6; }
ttable.0.3 1
hint { show on ; description "DEP RX CH4" }
}
pcm.dante_asrc_ch5 {
type route
slave { pcm "dante_asrc_shared6"; channels 6; }
ttable.0.4 1
hint { show on ; description "DEP RX CH5" }
}
pcm.dante_asrc_ch6 {
type route
slave { pcm "dante_asrc_shared6"; channels 6; }
ttable.0.5 1
hint { show on ; description "DEP RX CH6" }
}
# ---- Stereo devices for Dante (combine any two channels as L+R) ----
# These devices route selected source channels to stereo output
# Format: dante_stereo_<left_ch>_<right_ch>
pcm.dante_stereo_1_2 {
type route
slave { pcm "dante_asrc_shared6"; channels 6; }
ttable.0.0 1 # Left channel from ch1
ttable.1.1 1 # Right channel from ch2
hint { show on ; description "DEP RX Stereo CH1+CH2" }
}
pcm.dante_stereo_1_3 {
type route
slave { pcm "dante_asrc_shared6"; channels 6; }
ttable.0.0 1
ttable.1.2 1
hint { show on ; description "DEP RX Stereo CH1+CH3" }
}
pcm.dante_stereo_1_4 {
type route
slave { pcm "dante_asrc_shared6"; channels 6; }
ttable.0.0 1
ttable.1.3 1
hint { show on ; description "DEP RX Stereo CH1+CH4" }
}
pcm.dante_stereo_1_5 {
type route
slave { pcm "dante_asrc_shared6"; channels 6; }
ttable.0.0 1
ttable.1.4 1
hint { show on ; description "DEP RX Stereo CH1+CH5" }
}
pcm.dante_stereo_1_6 {
type route
slave { pcm "dante_asrc_shared6"; channels 6; }
ttable.0.0 1
ttable.1.5 1
hint { show on ; description "DEP RX Stereo CH1+CH6" }
}
pcm.dante_stereo_2_3 {
type route
slave { pcm "dante_asrc_shared6"; channels 6; }
ttable.0.1 1
ttable.1.2 1
hint { show on ; description "DEP RX Stereo CH2+CH3" }
}
pcm.dante_stereo_2_4 {
type route
slave { pcm "dante_asrc_shared6"; channels 6; }
ttable.0.1 1
ttable.1.3 1
hint { show on ; description "DEP RX Stereo CH2+CH4" }
}
pcm.dante_stereo_2_5 {
type route
slave { pcm "dante_asrc_shared6"; channels 6; }
ttable.0.1 1
ttable.1.4 1
hint { show on ; description "DEP RX Stereo CH2+CH5" }
}
pcm.dante_stereo_2_6 {
type route
slave { pcm "dante_asrc_shared6"; channels 6; }
ttable.0.1 1
ttable.1.5 1
hint { show on ; description "DEP RX Stereo CH2+CH6" }
}
pcm.dante_stereo_3_4 {
type route
slave { pcm "dante_asrc_shared6"; channels 6; }
ttable.0.2 1
ttable.1.3 1
hint { show on ; description "DEP RX Stereo CH3+CH4" }
}
pcm.dante_stereo_3_5 {
type route
slave { pcm "dante_asrc_shared6"; channels 6; }
ttable.0.2 1
ttable.1.4 1
hint { show on ; description "DEP RX Stereo CH3+CH5" }
}
pcm.dante_stereo_3_6 {
type route
slave { pcm "dante_asrc_shared6"; channels 6; }
ttable.0.2 1
ttable.1.5 1
hint { show on ; description "DEP RX Stereo CH3+CH6" }
}
pcm.dante_stereo_4_5 {
type route
slave { pcm "dante_asrc_shared6"; channels 6; }
ttable.0.3 1
ttable.1.4 1
hint { show on ; description "DEP RX Stereo CH4+CH5" }
}
pcm.dante_stereo_4_6 {
type route
slave { pcm "dante_asrc_shared6"; channels 6; }
ttable.0.3 1
ttable.1.5 1
hint { show on ; description "DEP RX Stereo CH4+CH6" }
}
pcm.dante_stereo_5_6 {
type route
slave { pcm "dante_asrc_shared6"; channels 6; }
ttable.0.4 1
ttable.1.5 1
hint { show on ; description "DEP RX Stereo CH5+CH6" }
}
# ---- Reverse stereo devices (for when left channel > right channel) ----
pcm.dante_stereo_2_1 {
type route
slave { pcm "dante_asrc_shared6"; channels 6; }
ttable.0.1 1 # Left from ch2
ttable.1.0 1 # Right from ch1
hint { show on ; description "DEP RX Stereo CH2+CH1" }
}
pcm.dante_stereo_3_1 {
type route
slave { pcm "dante_asrc_shared6"; channels 6; }
ttable.0.2 1
ttable.1.0 1
hint { show on ; description "DEP RX Stereo CH3+CH1" }
}
pcm.dante_stereo_3_2 {
type route
slave { pcm "dante_asrc_shared6"; channels 6; }
ttable.0.2 1
ttable.1.1 1
hint { show on ; description "DEP RX Stereo CH3+CH2" }
}
pcm.dante_stereo_4_1 {
type route
slave { pcm "dante_asrc_shared6"; channels 6; }
ttable.0.3 1
ttable.1.0 1
hint { show on ; description "DEP RX Stereo CH4+CH1" }
}
pcm.dante_stereo_4_2 {
type route
slave { pcm "dante_asrc_shared6"; channels 6; }
ttable.0.3 1
ttable.1.1 1
hint { show on ; description "DEP RX Stereo CH4+CH2" }
}
pcm.dante_stereo_4_3 {
type route
slave { pcm "dante_asrc_shared6"; channels 6; }
ttable.0.3 1
ttable.1.2 1
hint { show on ; description "DEP RX Stereo CH4+CH3" }
}
pcm.dante_stereo_5_1 {
type route
slave { pcm "dante_asrc_shared6"; channels 6; }
ttable.0.4 1
ttable.1.0 1
hint { show on ; description "DEP RX Stereo CH5+CH1" }
}
pcm.dante_stereo_5_2 {
type route
slave { pcm "dante_asrc_shared6"; channels 6; }
ttable.0.4 1
ttable.1.1 1
hint { show on ; description "DEP RX Stereo CH5+CH2" }
}
pcm.dante_stereo_5_3 {
type route
slave { pcm "dante_asrc_shared6"; channels 6; }
ttable.0.4 1
ttable.1.2 1
hint { show on ; description "DEP RX Stereo CH5+CH3" }
}
pcm.dante_stereo_5_4 {
type route
slave { pcm "dante_asrc_shared6"; channels 6; }
ttable.0.4 1
ttable.1.3 1
hint { show on ; description "DEP RX Stereo CH5+CH4" }
}
pcm.dante_stereo_6_1 {
type route
slave { pcm "dante_asrc_shared6"; channels 6; }
ttable.0.5 1
ttable.1.0 1
hint { show on ; description "DEP RX Stereo CH6+CH1" }
}
pcm.dante_stereo_6_2 {
type route
slave { pcm "dante_asrc_shared6"; channels 6; }
ttable.0.5 1
ttable.1.1 1
hint { show on ; description "DEP RX Stereo CH6+CH2" }
}
pcm.dante_stereo_6_3 {
type route
slave { pcm "dante_asrc_shared6"; channels 6; }
ttable.0.5 1
ttable.1.2 1
hint { show on ; description "DEP RX Stereo CH6+CH3" }
}
pcm.dante_stereo_6_4 {
type route
slave { pcm "dante_asrc_shared6"; channels 6; }
ttable.0.5 1
ttable.1.3 1
hint { show on ; description "DEP RX Stereo CH6+CH4" }
}
pcm.dante_stereo_6_5 {
type route
slave { pcm "dante_asrc_shared6"; channels 6; }
ttable.0.5 1
ttable.1.4 1
hint { show on ; description "DEP RX Stereo CH6+CH5" }
}

0
src/misc/install_asoundconf.sh Normal file → Executable file
View File