This commit is contained in:
cangui 2025-08-16 19:35:22 +02:00
parent f7482012c0
commit c23206746e

View File

@ -1,20 +1,23 @@
# docker-compose.yml
version: "3.9" version: "3.9"
services: services:
localai: localai:
image: localai/localai:latest-aio-cpu build:
context: .
dockerfile: Dockerfile
container_name: localai container_name: localai
ports: ["8085:8080"] ports:
- "8085:8080" # accès direct: http://<hôte>:8085
environment: environment:
- MODELS_PATH=/models - MODELS_PATH=/models
- THREADS=8 - THREADS=8 # adapte au nombre de cœurs
- CONTEXT_SIZE=2048 - CONTEXT_SIZE=2048 # augmente si tu as plus de RAM
- DEBUG=false - DEBUG=false
- DISABLE_DOWNLOAD=true # backends déjà inclus - DISABLE_DOWNLOAD=false # laisse LocalAI récupérer le backend llama-cpp au 1er run
command: ["--models-path","/models","--address","0.0.0.0:8080"] command: ["--models-path", "/models", "--address", "0.0.0.0:8080"]
healthcheck: healthcheck:
test: ["CMD","curl","-fsS","http://localhost:8080/v1/models"] test: ["CMD", "curl", "-fsS", "http://localhost:8080/v1/models"]
interval: 10s interval: 10s
timeout: 5s timeout: 5s
retries: 10 retries: 10
restart: unless-stopped restart: unless-stopped