Code/pgpt/docker-compose.yaml
Stéphan c24898bf02 fix(pgpt) : suite à montée de version de docker
régénération du docker-compose.yaml pour pouvoir relancer le service
2025-06-15 12:02:33 +02:00

106 lines
3.0 KiB
YAML

services:
#-----------------------------------
#---- Private-GPT services ---------
#-----------------------------------
private-gpt-ollama:
image: ${PGPT_IMAGE:-zylonai/private-gpt}:${PGPT_TAG:-0.6.2}-ollama
user: root
build:
context: .
dockerfile: Dockerfile.ollama
volumes:
- /home/fabnum/fabnum-dev/Fiches:/home/worker/app/local_data/Fiches:Z
ports:
- "127.0.0.1:8001:8001"
environment:
PORT: 8001
PGPT_PROFILES: docker
PGPT_MODE: ollama
PGPT_EMBED_MODE: ollama
PGPT_OLLAMA_API_BASE: http://ollama:11434
HF_TOKEN: ${HF_TOKEN:-}
profiles:
- ""
- ollama-cpu
- ollama-cuda
- ollama-api
depends_on:
ollama:
condition: service_started
private-gpt-llamacpp-cpu:
image: ${PGPT_IMAGE:-zylonai/private-gpt}:${PGPT_TAG:-0.6.2}-llamacpp-cpu
user: root
build:
context: .
dockerfile: Dockerfile.llamacpp-cpu
volumes:
- ./local_data/:/home/worker/app/local_data
- ./models/:/home/worker/app/models
entrypoint: sh -c ".venv/bin/python scripts/setup && .venv/bin/python -m private_gpt"
ports:
- "127.0.0.1:8001:8001"
environment:
PORT: 8001
PGPT_PROFILES: local
HF_TOKEN: ${HF_TOKEN:-}
profiles:
- llamacpp-cpu
#-----------------------------------
#---- Ollama services --------------
#-----------------------------------
ollama:
image: traefik:v2.10
ports:
- "127.0.0.1:8080:8080"
command:
- "--providers.file.filename=/etc/router.yml"
- "--log.level=ERROR"
- "--api.insecure=true"
- "--providers.docker=true"
- "--providers.docker.exposedbydefault=false"
- "--entrypoints.web.address=:11434"
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
- ./.docker/router.yml:/etc/router.yml:ro
extra_hosts:
- "host.docker.internal:host-gateway"
security_opt:
- label:disable
profiles:
- ""
- ollama-cpu
- ollama-cuda
- ollama-api
ollama-cpu:
image: ollama/ollama:latest
ports:
- "127.0.0.1:11434:11434"
volumes:
- ./models:/root/.ollama:Z
healthcheck:
disable: true
profiles:
- ""
- ollama-cpu
ollama-cuda:
image: ollama/ollama:latest
ports:
- "11434:11434"
volumes:
- ./models:/root/.ollama
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: 1
capabilities: [gpu]
profiles:
- ollama-cuda