up
This commit is contained in:
parent
05c04d828d
commit
94aec10172
@ -1,22 +1,19 @@
|
|||||||
|
# docker-compose.yml
|
||||||
version: "3.9"
|
version: "3.9"
|
||||||
|
|
||||||
services:
|
services:
|
||||||
localai:
|
localai:
|
||||||
build:
|
image: localai/localai:latest-aio-cpu
|
||||||
context: .
|
|
||||||
dockerfile: Dockerfile
|
|
||||||
container_name: localai
|
container_name: localai
|
||||||
ports:
|
ports: ["8085:8080"]
|
||||||
- "8085:8080" # accès direct: http://<hôte>:8085
|
|
||||||
environment:
|
environment:
|
||||||
- MODELS_PATH=/models
|
- MODELS_PATH=/models
|
||||||
- THREADS=8 # adapte au nombre de cœurs
|
- THREADS=8
|
||||||
- CONTEXT_SIZE=2048 # augmente si tu as plus de RAM
|
- CONTEXT_SIZE=2048
|
||||||
- DEBUG=false
|
- DEBUG=false
|
||||||
- DISABLE_DOWNLOAD=false # laisse LocalAI récupérer le backend llama-cpp au 1er run
|
- DISABLE_DOWNLOAD=true # backends déjà inclus
|
||||||
command: ["--models-path", "/models", "--address", "0.0.0.0:8080"]
|
command: ["--models-path","/models","--address","0.0.0.0:8080"]
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "curl", "-fsS", "http://localhost:8080/v1/models"]
|
test: ["CMD","curl","-fsS","http://localhost:8080/v1/models"]
|
||||||
interval: 10s
|
interval: 10s
|
||||||
timeout: 5s
|
timeout: 5s
|
||||||
retries: 10
|
retries: 10
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user