use homeserver llm provider as a default

This commit is contained in:
2025-03-13 11:15:15 +01:00
parent 2b6de8797e
commit ce05358f26
3 changed files with 15 additions and 20 deletions

View File

@@ -43,7 +43,7 @@ ENDPOINT_GROUPS: dict[int:EndpointGroup] = { # for now make sure , .id and key a
id=1,
name="Gate2",
languages=["deu", "eng", "fra"],
endpoints=[ENDPOINTS[2]],
endpoints=[ENDPOINTS[1]],
)
}

View File

@@ -53,9 +53,6 @@ def init_endpoint(endpoint: Endpoint, languages: list[str]):
# Configure the bigs
for conf in config.bigs: # TODO: this is now part of the endpoint group config
conf.loop = False
# conf.llm_client = 'openwebui' # comment out for local llm
# conf.llm_host_url = 'https://ollama.pstruebi.xyz'
# conf.llm_host_token = 'sk-17124cb84df14cc6ab2d9e17d0724d13'
# Initialize the endpoint if config changed or if it's not already initialized
if not multicast_client.get_status(base_url=endpoint.url)['is_initialized'] or config != CURRENT_ENDPOINT_CONFIG.get(endpoint.id):
@@ -219,22 +216,15 @@ async def start_announcement(text: str, group_id: int):
@app.get("/endpoints/{endpoint_id}/status") # TODO: think about progress tracking
async def get_endpoint_status(endpoint_id: str):
@app.get("/groups/{endpoint_id}/state") # TODO: think about progress tracking
async def get_group_state(group_id: int):
"""Get the status of a specific endpoint."""
# Check if the endpoint exists
endpoint = endpoints_db.get_endpoint_by_id(endpoint_id)
if not endpoint:
raise HTTPException(status_code=404, detail=f"Endpoint {endpoint_id} not found")
ep_group = endpoints_db.get_group_by_id(group_id)
if not ep_group:
raise HTTPException(status_code=404, detail=f"Endpoint {group_id} not found")
# Return the status if available, otherwise a default status
if endpoint_id in endpoint_status:
return endpoint_status[endpoint_id]
else:
return {
"active": False,
"broadcasts": 0,
}
return ep_group.current_state
@app.get("/endpoints")

View File

@@ -27,9 +27,14 @@ class Endpoint(BaseModel):
class TranslatorLangConfig(BaseModel):
translator_llm: str = 'llama3.2:3b-instruct-q4_0'
llm_client: str = 'ollama'
llm_host_url: str | None = 'http://localhost:11434'
llm_host_token: str | None = None
llm_client: str = 'openwebui' # remote (homserver)
llm_host_url: str = 'https://ollama.pstruebi.xyz'
llm_host_token: str = 'sk-17124cb84df14cc6ab2d9e17d0724d13'
# llm_client: str = 'ollama' #local
# llm_host_url: str | None = 'http://localhost:11434'
# llm_host_token: str | None = None
tts_system: str = 'piper'
tts_model: str ='de_DE-kerstin-low'