Switch to unified view

a b/aiagents4pharma/talk2aiagents4pharma/docker-compose.yml
1
#talk2aiagents4pharma
2
services:
3
  # CPU variant – start with: docker compose --profile cpu up
4
  ollama:
5
    image: ollama/ollama:latest
6
    container_name: ollama-server
7
    ports:
8
      - "11434:11434"
9
    volumes:
10
      - ollama_data:/root/.ollama
11
    restart: unless-stopped
12
    healthcheck:
13
      test: ["CMD", "curl", "-f", "http://localhost:11434/api/version"]
14
      interval: 30s
15
      timeout: 10s
16
      retries: 3
17
    entrypoint: ["/bin/sh", "-c"]
18
    command: >
19
      "ollama serve &
20
      sleep 10 &&
21
      ollama pull nomic-embed-text &&
22
      tail -f /dev/null"
23
    networks:
24
      - app-network
25
    profiles: ["cpu"]
26
27
  # NVIDIA GPU variant – start with: docker compose --profile nvidia up
28
  ollama-nvidia:
29
    image: ollama/ollama:latest
30
    container_name: ollama-server-nvidia
31
    ports:
32
      - "11434:11434"
33
    volumes:
34
      - ollama_data:/root/.ollama
35
    restart: unless-stopped
36
    healthcheck:
37
      test: ["CMD", "curl", "-f", "http://localhost:11434/api/version"]
38
      interval: 30s
39
      timeout: 10s
40
      retries: 3
41
    entrypoint: ["/bin/sh", "-c"]
42
    command: >
43
      "ollama serve &
44
      sleep 10 &&
45
      ollama pull nomic-embed-text &&
46
      tail -f /dev/null"
47
    deploy:
48
      resources:
49
        reservations:
50
          devices:
51
            - driver: nvidia
52
              count: all
53
              capabilities: [gpu]
54
    networks:
55
      - app-network
56
    profiles: ["nvidia"]
57
58
  # AMD GPU variant – start with: docker compose --profile amd up
59
  ollama-amd:
60
    image: ollama/ollama:rocm
61
    container_name: ollama-server-amd
62
    ports:
63
      - "11434:11434"
64
    volumes:
65
      - ollama_data:/root/.ollama
66
    restart: unless-stopped
67
    healthcheck:
68
      test: ["CMD", "curl", "-f", "http://localhost:11434/api/version"]
69
      interval: 30s
70
      timeout: 10s
71
      retries: 3
72
    entrypoint: ["/bin/sh", "-c"]
73
    command: >
74
      "ollama serve &
75
      sleep 10 &&
76
      ollama pull nomic-embed-text &&
77
      tail -f /dev/null"
78
    devices:
79
      - /dev/kfd
80
      - /dev/dri
81
    networks:
82
      - app-network
83
    profiles: ["amd"]
84
85
  talk2aiagents4pharma:
86
    image: virtualpatientengine/talk2aiagents4pharma:latest
87
    container_name: talk2aiagents4pharma
88
    ports:
89
      - "8501:8501"
90
    env_file:
91
      - .env
92
    restart: unless-stopped
93
    networks:
94
      - app-network
95
96
volumes:
97
  ollama_data:
98
    name: ollama_data
99
    # Uncomment the line below if you want to use an existing volume:
100
    # external: true
101
102
networks:
103
  app-network:
104
    name: app-network