services: ollama: container_name: ollama image: "ollama/ollama" restart: unless-stopped environment: OLLAMA_MAX_QUEUE: 16 OLLAMA_KEEP_ALIVE: 30 ports: - 11434:11434 volumes: - ollama:/root/.ollama networks: - default deploy: resources: reservations: devices: - driver: nvidia count: 1 capabilities: [gpu] wireguard: image: lscr.io/linuxserver/wireguard:latest container_name: wireguard restart: unless-stopped cap_add: - NET_ADMIN - SYS_MODULE #optional environment: - PUID=1000 - PGID=1000 - TZ=Etc/UTC - SERVERURL=vpn-hinterwaldner.duckdns.org - SERVERPORT=51821 #optional - PEERS=raspi #optional - PEERDNS=auto #optional #- INTERNAL_SUBNET=10.13.13.0 #optional #- ALLOWEDIPS=0.0.0.0/0 #optional #- PERSISTENTKEEPALIVE_PEERS= #optional #- LOG_CONFS=true #optional volumes: - wireguard:/config #- /lib/modules:/lib/modules #optional ports: - 51821:51820/udp sysctls: - net.ipv4.conf.all.src_valid_mark=1 networks: - default webui: container_name: webui build: context: ../auracaster-webui dockerfile: Dockerfile ssh: - default=~/.ssh/id_ed25519 #lappi restart: unless-stopped ports: - "8501:8501" environment: - PYTHONUNBUFFERED=1 # Change this URL if the translator service is running on a different host - TRANSLATOR_API_BASE_URL=http://auracast-translator:7999 networks: - default auracast-translator: build: context: ../auracast-translator dockerfile: Dockerfile ssh: - default=~/.ssh/id_ed25519 #lappi restart: unless-stopped ports: - "7999:7999" environment: - PYTHONUNBUFFERED=1 networks: - default deploy: # for tts on gpu resources: reservations: devices: - driver: nvidia count: 1 capabilities: [gpu] volumes: ollama: wireguard: