Compare commits

...

4 Commits

Author SHA1 Message Date
Pbopbo
0c7de92ae9 Refactoring to sort results in year/month/day forlder. 2026-04-08 11:10:12 +02:00
Pbopbo
dd118ddb23 Adds check if test is valid to latency test. 2026-04-08 10:27:22 +02:00
Pbopbo
31cc2c0e92 more plotting 2026-04-07 16:54:05 +02:00
Pbopbo
2cab55c8cd Plotting for write read time and avail logs. 2026-03-31 11:55:44 +02:00
9 changed files with 502 additions and 13 deletions

View File

@@ -39,6 +39,10 @@ artifact_detection:
enabled: true
threshold_db: 6.0 # Energy change threshold in dB between consecutive windows (detects level changes)
latency:
max_std_dev_ms: 0.5 # Maximum allowed std deviation; test fails if exceeded
min_avg_ms: 1.0 # Minimum expected average latency; near-zero indicates bad loopback
latency_buildup:
measurement_interval: 10 # seconds between latency measurements
max_duration: null # maximum test duration in seconds (null = run until canceled)

92
plot_alsa_status.py Normal file
View File

@@ -0,0 +1,92 @@
#!/usr/bin/env python3
"""Parse ALSA status log file and plot avail value over time."""
import sys
import re
import os
from datetime import datetime
import matplotlib.pyplot as plt
TIMESTAMP_RE = re.compile(r"^===== (\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d+) =====")
AVAIL_RE = re.compile(r"^avail\s*:\s*(\d+)")
def parse_log(log_path):
timestamps = []
avail_values = []
with open(log_path, "r") as f:
current_timestamp = None
for line in f:
line = line.strip()
# Check for timestamp line
ts_match = TIMESTAMP_RE.match(line)
if ts_match:
current_timestamp = datetime.strptime(ts_match.group(1), "%Y-%m-%d %H:%M:%S.%f")
continue
# Check for avail line (only if we have a timestamp)
if current_timestamp:
avail_match = AVAIL_RE.match(line)
if avail_match:
timestamps.append(current_timestamp)
avail_values.append(int(avail_match.group(1)))
current_timestamp = None # Reset until next timestamp
if not timestamps:
print("No valid timestamp/avail pairs found in the log file.", file=sys.stderr)
sys.exit(1)
# Convert to relative seconds from first timestamp
t0 = timestamps[0]
seconds = [(t - t0).total_seconds() for t in timestamps]
return seconds, avail_values
def plot(seconds, avail_values, out_path):
plt.figure(figsize=(12, 6))
plt.plot(seconds, avail_values, label="avail", linewidth=1, alpha=0.7)
# Add moving average (windowed mean)
if len(avail_values) >= 10: # Only if we have enough data points
window_size = min(50, len(avail_values) // 10) # Adaptive window size
import numpy as np
moving_avg = np.convolve(avail_values, np.ones(window_size)/window_size, mode='valid')
# Adjust timestamps for the moving average (they align with window centers)
ma_seconds = seconds[window_size-1:]
plt.plot(ma_seconds, moving_avg, label=f"moving mean (window={window_size})", linewidth=2)
plt.xlabel("Time (s)")
plt.ylabel("Available samples")
plt.title("ALSA Available Samples Over Time")
plt.legend()
plt.grid(True)
plt.tight_layout()
plt.savefig(out_path, dpi=150)
print(f"Plot saved to {out_path}")
def main():
if len(sys.argv) != 2:
print(f"Usage: {sys.argv[0]} <path_to_alsa_status_log>", file=sys.stderr)
sys.exit(1)
log_path = sys.argv[1]
if not os.path.isfile(log_path):
print(f"File not found: {log_path}", file=sys.stderr)
sys.exit(1)
seconds, avail_values = parse_log(log_path)
log_dir = os.path.dirname(os.path.abspath(log_path))
log_base = os.path.splitext(os.path.basename(log_path))[0]
out_path = os.path.join(log_dir, f"{log_base}_avail_plot.png")
plot(seconds, avail_values, out_path)
if __name__ == "__main__":
main()

302
plot_combined.py Normal file
View File

@@ -0,0 +1,302 @@
#!/usr/bin/env python3
"""Combine ALSA avail, perf metrics, and latency plots into one figure."""
import sys
import re
import os
from datetime import datetime
import matplotlib.pyplot as plt
import numpy as np
# Regex patterns
TIMESTAMP_RE = re.compile(r"^===== (\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d+) =====")
AVAIL_RE = re.compile(r"^avail\s*:\s*(\d+)")
PERF_RE = re.compile(
r"^(\w+ \d+ \d+:\d+:\d+) .* Perf\(.*?\):"
r".*?sample mean=([\d.]+)ms"
r".*?write mean=([\d.]+)ms"
r".*?loop mean=([\d.]+)ms"
)
LATENCY_RE = re.compile(r"^(\w+ \d+ \d+:\d+:\d+).*latency.*?(\d+(?:\.\d+)?)ms")
PYALSA_AVAIL_BEFORE_RE = re.compile(r"^(\w+ \d+ \d+:\d+:\d+).*PyALSA: avail before read: (\d+)")
PYALSA_AVAIL_AFTER_RE = re.compile(r"^(\w+ \d+ \d+:\d+:\d+).*PyALSA: .* avail=(\d+)")
def parse_alsa_status(log_path):
timestamps = []
avail_values = []
with open(log_path, "r") as f:
current_timestamp = None
for line in f:
line = line.strip()
ts_match = TIMESTAMP_RE.match(line)
if ts_match:
current_timestamp = datetime.strptime(ts_match.group(1), "%Y-%m-%d %H:%M:%S.%f")
continue
if current_timestamp:
avail_match = AVAIL_RE.match(line)
if avail_match:
timestamps.append(current_timestamp)
avail_values.append(int(avail_match.group(1)))
current_timestamp = None
if not timestamps:
return [], []
t0 = timestamps[0]
seconds = [(t - t0).total_seconds() for t in timestamps]
return seconds, avail_values
def parse_perf_log(log_path):
timestamps = []
sample_means = []
write_means = []
loop_means = []
with open(log_path, "r") as f:
for line in f:
m = PERF_RE.search(line)
if m:
ts_str, sample, write, loop = m.groups()
ts = datetime.strptime(ts_str, "%b %d %H:%M:%S")
timestamps.append(ts)
sample_means.append(float(sample))
write_means.append(float(write))
loop_means.append(float(loop))
if not timestamps:
return [], [], [], []
t0 = timestamps[0]
seconds = [(t - t0).total_seconds() for t in timestamps]
return seconds, sample_means, write_means, loop_means
def parse_pyalsa_avail(perf_file):
"""Parse PyALSA avail before/after read from the perf log file."""
before_timestamps = []
before_values = []
after_timestamps = []
after_values = []
with open(perf_file, "r") as f:
for line in f:
line = line.strip()
# Check for "avail before read"
before_match = PYALSA_AVAIL_BEFORE_RE.match(line)
if before_match:
ts_str, avail = before_match.groups()
current_year = datetime.now().year
ts_with_year = f"{current_year} {ts_str}"
ts = datetime.strptime(ts_with_year, "%Y %b %d %H:%M:%S")
before_timestamps.append(ts)
before_values.append(int(avail))
continue
# Check for "avail=" (after read)
after_match = PYALSA_AVAIL_AFTER_RE.match(line)
if after_match:
ts_str, avail = after_match.groups()
current_year = datetime.now().year
ts_with_year = f"{current_year} {ts_str}"
ts = datetime.strptime(ts_with_year, "%Y %b %d %H:%M:%S")
after_timestamps.append(ts)
after_values.append(int(avail))
return before_timestamps, before_values, after_timestamps, after_values
def parse_latency_yaml(yaml_path):
import yaml
with open(yaml_path, 'r') as f:
data = yaml.safe_load(f)
latency_measurements = data.get('latency_buildup_result', {}).get('latency_measurements', [])
timestamps = []
latencies = []
for measurement in latency_measurements:
ts_str = measurement['timestamp']
latency = measurement['latency_ms']
# Parse ISO format timestamp
ts = datetime.fromisoformat(ts_str)
timestamps.append(ts)
latencies.append(float(latency))
if not timestamps:
return [], []
t0 = timestamps[0]
seconds = [(t - t0).total_seconds() for t in timestamps]
return seconds, latencies
def plot_combined(alsa_file, perf_file, latency_file, out_path):
# Parse all logs
alsa_seconds, avail_values = parse_alsa_status(alsa_file)
perf_seconds, sample_means, write_means, loop_means = parse_perf_log(perf_file)
latency_seconds, latencies = parse_latency_yaml(latency_file)
# Parse PyALSA avail data
before_timestamps, before_values, after_timestamps, after_values = parse_pyalsa_avail(perf_file)
# Get absolute timestamps for proper alignment
alsa_timestamps = []
perf_timestamps = []
latency_timestamps = []
# Re-parse to get absolute timestamps for alignment
with open(alsa_file, "r") as f:
current_timestamp = None
for line in f:
line = line.strip()
ts_match = TIMESTAMP_RE.match(line)
if ts_match:
current_timestamp = datetime.strptime(ts_match.group(1), "%Y-%m-%d %H:%M:%S.%f")
continue
if current_timestamp:
avail_match = AVAIL_RE.match(line)
if avail_match:
alsa_timestamps.append(current_timestamp)
current_timestamp = None
with open(perf_file, "r") as f:
for line in f:
m = PERF_RE.search(line)
if m:
ts_str = m.group(1)
# Add current year to the timestamp since it doesn't include year
current_year = datetime.now().year
ts_with_year = f"{current_year} {ts_str}"
ts = datetime.strptime(ts_with_year, "%Y %b %d %H:%M:%S")
perf_timestamps.append(ts)
import yaml
with open(latency_file, 'r') as f:
data = yaml.safe_load(f)
latency_measurements = data.get('latency_buildup_result', {}).get('latency_measurements', [])
for measurement in latency_measurements:
ts_str = measurement['timestamp']
ts = datetime.fromisoformat(ts_str)
latency_timestamps.append(ts)
# Find earliest timestamp
all_abs_timestamps = []
if alsa_timestamps:
all_abs_timestamps.extend(alsa_timestamps)
if perf_timestamps:
all_abs_timestamps.extend(perf_timestamps)
if latency_timestamps:
all_abs_timestamps.extend(latency_timestamps)
if before_timestamps:
all_abs_timestamps.extend(before_timestamps)
if after_timestamps:
all_abs_timestamps.extend(after_timestamps)
t0_absolute = min(all_abs_timestamps)
# Convert all times to seconds from earliest timestamp
alsa_aligned = [(ts - t0_absolute).total_seconds() for ts in alsa_timestamps] if alsa_timestamps else []
perf_aligned = [(ts - t0_absolute).total_seconds() for ts in perf_timestamps] if perf_timestamps else []
latency_aligned = [(ts - t0_absolute).total_seconds() for ts in latency_timestamps] if latency_timestamps else []
before_aligned = [(ts - t0_absolute).total_seconds() for ts in before_timestamps] if before_timestamps else []
after_aligned = [(ts - t0_absolute).total_seconds() for ts in after_timestamps] if after_timestamps else []
# Create figure with 4 subplots sharing x-axis
fig, (ax1, ax2, ax3, ax4) = plt.subplots(4, 1, figsize=(14, 12), sharex=True)
fig.suptitle("Combined Audio Performance Metrics", fontsize=16)
# Plot 1: ALSA avail
if alsa_aligned and avail_values:
ax1.plot(alsa_aligned, avail_values, label="avail", linewidth=1, alpha=0.7, color='blue')
if len(avail_values) >= 10:
window_size = min(50, len(avail_values) // 10)
moving_avg = np.convolve(avail_values, np.ones(window_size)/window_size, mode='valid')
ma_seconds = alsa_aligned[window_size-1:]
ax1.plot(ma_seconds, moving_avg, label=f"moving mean (window={window_size})",
linewidth=2, color='darkblue')
ax1.set_ylabel("Available samples")
ax1.set_title("ALSA Available Samples")
ax1.legend()
ax1.grid(True, alpha=0.3)
# Plot 2: Perf metrics
if perf_aligned:
ax2.plot(perf_aligned, sample_means, label="sample mean", linewidth=1, alpha=0.8, color='green')
ax2.plot(perf_aligned, write_means, label="write mean", linewidth=1, alpha=0.8, color='orange')
ax2.plot(perf_aligned, loop_means, label="loop mean", linewidth=1, alpha=0.8, color='red')
# Add moving average for loop mean
if len(loop_means) >= 10:
window_size = min(50, len(loop_means) // 10)
moving_avg = np.convolve(loop_means, np.ones(window_size)/window_size, mode='valid')
ma_seconds = perf_aligned[window_size-1:]
ax2.plot(ma_seconds, moving_avg, label=f"loop mean moving avg (window={window_size})",
linewidth=2, color='darkred', alpha=0.9)
ax2.set_ylabel("Duration (ms)")
ax2.set_title("Performance Metrics")
ax2.legend()
ax2.grid(True, alpha=0.3)
# Plot 3: Latency
if latency_aligned:
ax3.plot(latency_aligned, latencies, label="latency", linewidth=1, color='purple')
ax3.set_ylabel("Latency (ms)")
ax3.set_title("Latency Buildup")
ax3.legend()
ax3.grid(True, alpha=0.3)
# Plot 4: PyALSA avail before/after read
if before_aligned and before_values:
ax4.plot(before_aligned, before_values, label="avail before read", linewidth=1, alpha=0.7, color='cyan')
if after_aligned and after_values:
ax4.plot(after_aligned, after_values, label="avail after read", linewidth=1, alpha=0.7, color='magenta')
ax4.set_xlabel("Time (s)")
ax4.set_ylabel("Available samples")
ax4.set_title("PyALSA Available Samples (Before/After Read)")
if before_aligned or after_aligned:
ax4.legend()
ax4.grid(True, alpha=0.3)
plt.tight_layout()
plt.savefig(out_path, dpi=150, bbox_inches='tight')
print(f"Combined plot saved to {out_path}")
# Show interactive plot
plt.show()
def main():
if len(sys.argv) != 4:
print(f"Usage: {sys.argv[0]} <alsa_status.log> <perf_log.log> <latency_results.yaml>", file=sys.stderr)
sys.exit(1)
alsa_file = sys.argv[1]
perf_file = sys.argv[2]
latency_file = sys.argv[3]
for file_path in [alsa_file, perf_file, latency_file]:
if not os.path.isfile(file_path):
print(f"File not found: {file_path}", file=sys.stderr)
sys.exit(1)
# Determine output path (same directory as first file)
log_dir = os.path.dirname(os.path.abspath(alsa_file))
out_path = os.path.join(log_dir, "combined_audio_plot.png")
plot_combined(alsa_file, perf_file, latency_file, out_path)
if __name__ == "__main__":
main()

81
plot_perf_log.py Normal file
View File

@@ -0,0 +1,81 @@
#!/usr/bin/env python3
"""Parse Perf lines from a log file and plot sample mean, write mean, and loop mean over time."""
import sys
import re
import os
from datetime import datetime
import matplotlib.pyplot as plt
PERF_RE = re.compile(
r"^(\w+ \d+ \d+:\d+:\d+) .* Perf\(.*?\):"
r".*?sample mean=([\d.]+)ms"
r".*?write mean=([\d.]+)ms"
r".*?loop mean=([\d.]+)ms"
)
def parse_log(log_path):
timestamps = []
sample_means = []
write_means = []
loop_means = []
with open(log_path, "r") as f:
for line in f:
m = PERF_RE.search(line)
if m:
ts_str, sample, write, loop = m.groups()
ts = datetime.strptime(ts_str, "%b %d %H:%M:%S")
timestamps.append(ts)
sample_means.append(float(sample))
write_means.append(float(write))
loop_means.append(float(loop))
if not timestamps:
print("No Perf lines found in the log file.", file=sys.stderr)
sys.exit(1)
t0 = timestamps[0]
seconds = [(t - t0).total_seconds() for t in timestamps]
return seconds, sample_means, write_means, loop_means
def plot(seconds, sample_means, write_means, loop_means, out_path):
plt.figure(figsize=(12, 6))
plt.plot(seconds, sample_means, label="sample mean (ms)")
plt.plot(seconds, write_means, label="write mean (ms)")
plt.plot(seconds, loop_means, label="loop mean (ms)")
plt.xlabel("Time (s)")
plt.ylabel("Duration (ms)")
plt.title("Perf Metrics Over Time")
plt.legend()
plt.grid(True)
plt.tight_layout()
plt.savefig(out_path, dpi=150)
print(f"Plot saved to {out_path}")
def main():
if len(sys.argv) != 2:
print(f"Usage: {sys.argv[0]} <path_to_log_file>", file=sys.stderr)
sys.exit(1)
log_path = sys.argv[1]
if not os.path.isfile(log_path):
print(f"File not found: {log_path}", file=sys.stderr)
sys.exit(1)
seconds, sample_means, write_means, loop_means = parse_log(log_path)
log_dir = os.path.dirname(os.path.abspath(log_path))
log_base = os.path.splitext(os.path.basename(log_path))[0]
out_path = os.path.join(log_dir, f"{log_base}_perf_plot.png")
plot(seconds, sample_means, write_means, loop_means, out_path)
if __name__ == "__main__":
main()

View File

@@ -235,11 +235,22 @@ def run_latency_test(config: Dict, num_measurements: int = 5, save_plots: bool =
last_correlation = correlation
last_lags = lags
avg = float(np.mean(latencies))
std_dev = float(np.std(latencies))
latency_cfg = config.get('latency', {})
max_std_dev_ms = latency_cfg.get('max_std_dev_ms', None)
min_avg_ms = latency_cfg.get('min_avg_ms', None)
valid = True
if max_std_dev_ms is not None and std_dev > max_std_dev_ms:
valid = False
if min_avg_ms is not None and avg < min_avg_ms:
valid = False
latency_stats = {
'avg': float(np.mean(latencies)),
'avg': avg,
'min': float(np.min(latencies)),
'max': float(np.max(latencies)),
'std': float(np.std(latencies))
'std': std_dev,
'valid': valid
}
if save_plots and output_dir and last_recording is not None:

View File

@@ -36,10 +36,9 @@ def main():
test_id = timestamp.strftime('%Y%m%d_%H%M%S')
results_dir = Path(config['output']['results_dir'])
results_dir.mkdir(exist_ok=True)
test_output_dir = results_dir / f"{test_id}_artifact_detection"
test_output_dir.mkdir(exist_ok=True)
test_output_dir = results_dir / timestamp.strftime('%Y') / timestamp.strftime('%m') / timestamp.strftime('%d') / f"{test_id}_artifact_detection"
test_output_dir.mkdir(parents=True, exist_ok=True)
save_plots = config['output'].get('save_plots', False)

View File

@@ -26,10 +26,9 @@ def main():
test_id = timestamp.strftime('%Y%m%d_%H%M%S')
results_dir = Path(config['output']['results_dir'])
results_dir.mkdir(exist_ok=True)
test_output_dir = results_dir / f"{test_id}_latency"
test_output_dir.mkdir(exist_ok=True)
test_output_dir = results_dir / timestamp.strftime('%Y') / timestamp.strftime('%m') / timestamp.strftime('%d') / f"{test_id}_latency"
test_output_dir.mkdir(parents=True, exist_ok=True)
save_plots = config['output'].get('save_plots', False)
@@ -47,7 +46,9 @@ def main():
try:
latency_stats = run_latency_test(config, num_measurements=args.measurements,
save_plots=save_plots, output_dir=test_output_dir)
print(f"✓ Latency: avg={latency_stats['avg']:.3f}ms, "
valid = latency_stats.get('valid', True)
status = "PASS" if valid else "FAIL"
print(f"{'' if valid else ''} Latency [{status}]: avg={latency_stats['avg']:.3f}ms, "
f"min={latency_stats['min']:.3f}ms, max={latency_stats['max']:.3f}ms, "
f"std={latency_stats['std']:.3f}ms")
except Exception as e:

View File

@@ -263,10 +263,9 @@ def main():
test_id = timestamp.strftime('%Y%m%d_%H%M%S')
results_dir = Path(config['output']['results_dir'])
results_dir.mkdir(exist_ok=True)
test_output_dir = results_dir / f"{test_id}_latency_buildup"
test_output_dir.mkdir(exist_ok=True)
test_output_dir = results_dir / timestamp.strftime('%Y') / timestamp.strftime('%m') / timestamp.strftime('%d') / f"{test_id}_latency_buildup"
test_output_dir.mkdir(parents=True, exist_ok=True)
save_plots = config['output'].get('save_plots', False)

View File

@@ -88,7 +88,7 @@ def display_results(yaml_file: Path):
def list_all_results(results_dir: Path):
yaml_files = sorted(results_dir.glob("*_results.yaml"))
yaml_files = sorted(results_dir.rglob("*_results.yaml"))
if not yaml_files:
print("No test results found.")