fix: naming image and ollama-cpu (#2056)

This commit is contained in:
Javier Martinez 2024-08-12 08:23:16 +02:00 committed by GitHub
parent 22904ca8ad
commit 89477ea9d3
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
1 changed files with 3 additions and 4 deletions

View File

@ -7,7 +7,7 @@ services:
# Private-GPT service for the Ollama CPU and GPU modes
# This service builds from an external Dockerfile and runs the Ollama mode.
private-gpt-ollama:
image: ${PGPT_IMAGE:-zylonai/private-gpt}${PGPT_TAG:-0.6.2}-ollama # x-release-please-version
image: ${PGPT_IMAGE:-zylonai/private-gpt}:${PGPT_TAG:-0.6.2}-ollama # x-release-please-version
build:
context: .
dockerfile: Dockerfile.ollama
@ -31,7 +31,7 @@ services:
# Private-GPT service for the local mode
# This service builds from a local Dockerfile and runs the application in local mode.
private-gpt-llamacpp-cpu:
image: ${PGPT_IMAGE:-zylonai/private-gpt}${PGPT_TAG:-0.6.2}-llamacpp-cpu # x-release-please-version
image: ${PGPT_IMAGE:-zylonai/private-gpt}:${PGPT_TAG:-0.6.2}-llamacpp-cpu # x-release-please-version
build:
context: .
dockerfile: Dockerfile.llamacpp-cpu
@ -57,7 +57,6 @@ services:
ollama:
image: traefik:v2.10
ports:
- "11435:11434"
- "8081:8080"
command:
- "--providers.file.filename=/etc/router.yml"
@ -84,7 +83,7 @@ services:
- ./models:/root/.ollama
profiles:
- ""
- ollama
- ollama-cpu
# Ollama service for the CUDA mode
ollama-cuda: