Files
auracast-translator/pyproject.toml
2025-03-25 12:02:59 +01:00

50 lines
1.4 KiB
TOML

[tool.poetry]
name = "auracast_translator"
version = "0.1.0"
authors = ["Patrick S <pstruebi>"]
description = "Announcement System"
readme = "readme.md"
packages = [
{ include = "translator_models", from = "src" },
{ include = "translator_client", from = "src" },
{ include = "auracast_translator", from = "src" },
]
[tool.poetry.dependencies]
python = "~3.11"
setuptools= ">=77"
coqui-tts = "0.26"
[tool.poetry.group.general.dependencies]
requests="2.32.3"
aiohttp="3.9.3"
fastapi="0.115.11"
uvicorn="0.34.0"
ollama="0.4.7"
piper-tts="1.2.0"
librosa="0.10.1"
aioconsole="0.8.1"
lc3 = { git = "ssh://git@ssh.pstruebi.xyz:222/auracaster/liblc3.git", rev = "7558637303106c7ea971e7bb8cedf379d3e08bcc" }
auracast = { git = "ssh://git@ssh.pstruebi.xyz:222/auracaster/bumble-auracast.git" }
#[tool.poetry.group.gpu.dependencies]
#onnxruntime-gpu = "^1.20.1"
# TODO: for running piper on gpu investigate
# https://onnxruntime.ai/docs/execution-providers/CUDA-ExecutionProvider.html#requirements
# put everything in pytorch container according to piper github:
# https://catalog.ngc.nvidia.com/orgs/nvidia/containers/pytorch
# Use a seperate container for the voice provider
[tool.poetry.group.dev.dependencies]
pytest = {version=">8.2", optional=true}
[tool.pytest.ini_options]
addopts = [
"--import-mode=importlib","--count=1","-s","-v"
]
[build-system]
requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api"