Files
auracast-server-deploy/docker-compose.yaml

104 lines
2.4 KiB
YAML

services:
ollama:
container_name: ollama
image: "ollama/ollama"
restart: unless-stopped
environment:
OLLAMA_MAX_QUEUE: 16
OLLAMA_KEEP_ALIVE: 30
ports:
- 11434:11434
volumes:
- ollama:/root/.ollama
networks:
- default
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: 1
capabilities: [gpu]
wireguard:
image: lscr.io/linuxserver/wireguard:latest
container_name: wireguard
restart: unless-stopped
cap_add:
- NET_ADMIN
- SYS_MODULE #optional
environment:
- PUID=1000
- PGID=1000
- TZ=Europe/Vienna
- SERVERURL=vpn.pstruebi.xyz
- SERVERPORT=51821 #optional
- PEERS=11 #optional
- PEERDNS=auto #optional
- PERSISTENTKEEPALIVE_PEERS=all
- ALLOWEDIPS=0.0.0.0/0 #optional
#- INTERNAL_SUBNET=10.13.13.0 #optional
#- LOG_CONFS=true #optional
volumes:
- ./wg_conf:/config
#- wireguard:/config
#- /lib/modules:/lib/modules #optional
ports:
- 51821:51820/udp
- "7999:7999" #auracast-translator
sysctls:
- net.ipv4.conf.all.src_valid_mark=1
networks:
- default
# needs time for first boot since xtts model is downloaded
auracast-translator:
container_name: auracast-translator
build:
context: ../auracast-translator
dockerfile: Dockerfile
ssh:
- default=~/.ssh/id_ed25519 #lappi
restart: unless-stopped
depends_on:
- wireguard
network_mode: service:wireguard # not sure if this is the best way
#ports:
# - "7999:7999" #auracast-translator
#networks:
# - default
environment:
- LOG_LEVEL=INFO
- PYTHONUNBUFFERED=1
deploy: # for tts on gpu
resources:
reservations:
devices:
- driver: nvidia
count: 1
capabilities: [gpu]
webui:
container_name: webui
build:
context: ../auracaster-webui
dockerfile: Dockerfile
ssh:
- default=~/.ssh/id_ed25519 #lappi
restart: unless-stopped
ports:
- "8501:8501"
environment:
- PYTHONUNBUFFERED=1
# Change this URL if the translator service is running on a different host
- TRANSLATOR_API_BASE_URL=http://wireguard:7999 # http://auracast-translator:7999
networks:
- default
volumes:
ollama:
wireguard: