[9d3784]: / aiagents4pharma / talk2aiagents4pharma / docker-compose.yml

Download this file

105 lines (99 with data), 2.5 kB

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
#talk2aiagents4pharma
services:
# CPU variant – start with: docker compose --profile cpu up
ollama:
image: ollama/ollama:latest
container_name: ollama-server
ports:
- "11434:11434"
volumes:
- ollama_data:/root/.ollama
restart: unless-stopped
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:11434/api/version"]
interval: 30s
timeout: 10s
retries: 3
entrypoint: ["/bin/sh", "-c"]
command: >
"ollama serve &
sleep 10 &&
ollama pull nomic-embed-text &&
tail -f /dev/null"
networks:
- app-network
profiles: ["cpu"]
# NVIDIA GPU variant – start with: docker compose --profile nvidia up
ollama-nvidia:
image: ollama/ollama:latest
container_name: ollama-server-nvidia
ports:
- "11434:11434"
volumes:
- ollama_data:/root/.ollama
restart: unless-stopped
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:11434/api/version"]
interval: 30s
timeout: 10s
retries: 3
entrypoint: ["/bin/sh", "-c"]
command: >
"ollama serve &
sleep 10 &&
ollama pull nomic-embed-text &&
tail -f /dev/null"
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: all
capabilities: [gpu]
networks:
- app-network
profiles: ["nvidia"]
# AMD GPU variant – start with: docker compose --profile amd up
ollama-amd:
image: ollama/ollama:rocm
container_name: ollama-server-amd
ports:
- "11434:11434"
volumes:
- ollama_data:/root/.ollama
restart: unless-stopped
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:11434/api/version"]
interval: 30s
timeout: 10s
retries: 3
entrypoint: ["/bin/sh", "-c"]
command: >
"ollama serve &
sleep 10 &&
ollama pull nomic-embed-text &&
tail -f /dev/null"
devices:
- /dev/kfd
- /dev/dri
networks:
- app-network
profiles: ["amd"]
talk2aiagents4pharma:
image: virtualpatientengine/talk2aiagents4pharma:latest
container_name: talk2aiagents4pharma
ports:
- "8501:8501"
env_file:
- .env
restart: unless-stopped
networks:
- app-network
volumes:
ollama_data:
name: ollama_data
# Uncomment the line below if you want to use an existing volume:
# external: true
networks:
app-network:
name: app-network