fix(pgpt) : suite à montée de version de docker
régénération du docker-compose.yaml pour pouvoir relancer le service
This commit is contained in:
parent
359d17f628
commit
c24898bf02
@ -3,10 +3,8 @@ services:
|
|||||||
#---- Private-GPT services ---------
|
#---- Private-GPT services ---------
|
||||||
#-----------------------------------
|
#-----------------------------------
|
||||||
|
|
||||||
# Private-GPT service for the Ollama CPU and GPU modes
|
|
||||||
# This service builds from an external Dockerfile and runs the Ollama mode.
|
|
||||||
private-gpt-ollama:
|
private-gpt-ollama:
|
||||||
image: ${PGPT_IMAGE:-zylonai/private-gpt}:${PGPT_TAG:-0.6.2}-ollama # x-release-please-version
|
image: ${PGPT_IMAGE:-zylonai/private-gpt}:${PGPT_TAG:-0.6.2}-ollama
|
||||||
user: root
|
user: root
|
||||||
build:
|
build:
|
||||||
context: .
|
context: .
|
||||||
@ -29,12 +27,10 @@ services:
|
|||||||
- ollama-api
|
- ollama-api
|
||||||
depends_on:
|
depends_on:
|
||||||
ollama:
|
ollama:
|
||||||
condition: service_healthy
|
condition: service_started
|
||||||
|
|
||||||
# Private-GPT service for the local mode
|
|
||||||
# This service builds from a local Dockerfile and runs the application in local mode.
|
|
||||||
private-gpt-llamacpp-cpu:
|
private-gpt-llamacpp-cpu:
|
||||||
image: ${PGPT_IMAGE:-zylonai/private-gpt}:${PGPT_TAG:-0.6.2}-llamacpp-cpu # x-release-please-version
|
image: ${PGPT_IMAGE:-zylonai/private-gpt}:${PGPT_TAG:-0.6.2}-llamacpp-cpu
|
||||||
user: root
|
user: root
|
||||||
build:
|
build:
|
||||||
context: .
|
context: .
|
||||||
@ -56,22 +52,8 @@ services:
|
|||||||
#---- Ollama services --------------
|
#---- Ollama services --------------
|
||||||
#-----------------------------------
|
#-----------------------------------
|
||||||
|
|
||||||
# Traefik reverse proxy for the Ollama service
|
|
||||||
# This will route requests to the Ollama service based on the profile.
|
|
||||||
ollama:
|
ollama:
|
||||||
image: traefik:v2.10
|
image: traefik:v2.10
|
||||||
healthcheck:
|
|
||||||
test:
|
|
||||||
[
|
|
||||||
"CMD",
|
|
||||||
"sh",
|
|
||||||
"-c",
|
|
||||||
"wget -q --spider http://ollama:11434 || exit 1",
|
|
||||||
]
|
|
||||||
interval: 10s
|
|
||||||
retries: 3
|
|
||||||
start_period: 5s
|
|
||||||
timeout: 5s
|
|
||||||
ports:
|
ports:
|
||||||
- "127.0.0.1:8080:8080"
|
- "127.0.0.1:8080:8080"
|
||||||
command:
|
command:
|
||||||
@ -86,24 +68,26 @@ services:
|
|||||||
- ./.docker/router.yml:/etc/router.yml:ro
|
- ./.docker/router.yml:/etc/router.yml:ro
|
||||||
extra_hosts:
|
extra_hosts:
|
||||||
- "host.docker.internal:host-gateway"
|
- "host.docker.internal:host-gateway"
|
||||||
|
security_opt:
|
||||||
|
- label:disable
|
||||||
profiles:
|
profiles:
|
||||||
- ""
|
- ""
|
||||||
- ollama-cpu
|
- ollama-cpu
|
||||||
- ollama-cuda
|
- ollama-cuda
|
||||||
- ollama-api
|
- ollama-api
|
||||||
|
|
||||||
# Ollama service for the CPU mode
|
|
||||||
ollama-cpu:
|
ollama-cpu:
|
||||||
image: ollama/ollama:latest
|
image: ollama/ollama:latest
|
||||||
ports:
|
ports:
|
||||||
- "127.0.0.1:11434:11434"
|
- "127.0.0.1:11434:11434"
|
||||||
volumes:
|
volumes:
|
||||||
- ./models:/root/.ollama:Z
|
- ./models:/root/.ollama:Z
|
||||||
|
healthcheck:
|
||||||
|
disable: true
|
||||||
profiles:
|
profiles:
|
||||||
- ""
|
- ""
|
||||||
- ollama-cpu
|
- ollama-cpu
|
||||||
|
|
||||||
# Ollama service for the CUDA mode
|
|
||||||
ollama-cuda:
|
ollama-cuda:
|
||||||
image: ollama/ollama:latest
|
image: ollama/ollama:latest
|
||||||
ports:
|
ports:
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user