-
-
Notifications
You must be signed in to change notification settings - Fork 665
Expand file tree
/
Copy pathdocker-compose.yml
More file actions
166 lines (160 loc) · 5.72 KB
/
docker-compose.yml
File metadata and controls
166 lines (160 loc) · 5.72 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
# QuantDinger Docker Compose - One-Click Deployment
# Usage:
# 1. Copy env.example to .env and edit your settings
# cp backend_api_python/env.example backend_api_python/.env
# 2. IMPORTANT: Generate and set a secure SECRET_KEY in backend_api_python/.env
# SECRET_KEY=$(python3 -c "import secrets; print(secrets.token_hex(32))")
# Or edit backend_api_python/.env and replace SECRET_KEY value
# 3. docker-compose up -d --build
# 4. Open http://localhost:8888
#
# Frontend image: nginx serving prebuilt files from `frontend/dist` only.
# Vue source is maintained separately; build there and sync dist into this repo.
#
# Note: The container will NOT start if SECRET_KEY is using the default value.
# This is a security measure to prevent insecure deployments.
#
# Docker image sources:
# Default uses official Docker Hub images.
# To switch source globally, set a single `IMAGE_PREFIX` in project-root `.env`.
# Examples: empty (official), `docker.m.daocloud.io/library/`,
# `docker.xuanyuan.me/library/`.
# Backend Dockerfile tries Aliyun apt/PyPI first and falls back to official (no extra env).
# For slow pulls of base images, optionally set IMAGE_PREFIX or Docker Engine registry-mirrors.
services:
# ========================
# PostgreSQL Database
# ========================
postgres:
image: ${IMAGE_PREFIX:-}postgres:16-alpine
container_name: quantdinger-db
restart: unless-stopped
environment:
POSTGRES_DB: ${POSTGRES_DB:-quantdinger}
POSTGRES_USER: ${POSTGRES_USER:-quantdinger}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-quantdinger123}
TZ: ${TZ:-Asia/Shanghai}
# Raise PostgreSQL's max_connections so the backend pool (default
# DB_POOL_MAX=50) plus psql/admin connections never exceed PG's own
# limit. PG default is 100; 150 gives headroom for future scaling.
command:
- "postgres"
- "-c"
- "max_connections=${PG_MAX_CONNECTIONS:-150}"
- "-c"
- "shared_buffers=${PG_SHARED_BUFFERS:-256MB}"
volumes:
- postgres_data:/var/lib/postgresql/data
- ./backend_api_python/migrations/init.sql:/docker-entrypoint-initdb.d/01-init.sql
ports:
- "${DB_PORT:-127.0.0.1:5432}:5432"
networks:
- quantdinger-network
healthcheck:
test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER:-quantdinger} -d ${POSTGRES_DB:-quantdinger}"]
interval: 10s
timeout: 5s
retries: 5
# ========================
# Redis (optional cache layer)
# ========================
redis:
image: ${IMAGE_PREFIX:-}redis:7-alpine
container_name: quantdinger-redis
restart: unless-stopped
command: ["redis-server", "--maxmemory", "128mb", "--maxmemory-policy", "allkeys-lru"]
ports:
- "${REDIS_PORT:-127.0.0.1:6379}:6379"
networks:
- quantdinger-network
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 10s
timeout: 5s
retries: 5
# ========================
# Backend API (Python/Flask)
# ========================
backend:
build:
context: ./backend_api_python
dockerfile: Dockerfile
args:
BASE_IMAGE: ${IMAGE_PREFIX:-}python:3.12-slim-bookworm
container_name: quantdinger-backend
restart: unless-stopped
depends_on:
postgres:
condition: service_healthy
redis:
condition: service_healthy
ports:
- "${BACKEND_PORT:-127.0.0.1:5000}:5000"
volumes:
- backend_logs:/app/logs
- backend_data:/app/data
# Mount .env for runtime config and admin-panel updates
- ./backend_api_python/.env:/app/.env
environment:
- PYTHON_API_HOST=0.0.0.0
- PYTHON_API_PORT=5000
- TZ=${TZ:-Asia/Shanghai}
- DATABASE_URL=postgresql://${POSTGRES_USER:-quantdinger}:${POSTGRES_PASSWORD:-quantdinger123}@postgres:5432/${POSTGRES_DB:-quantdinger}
- DB_TYPE=postgresql
- REDIS_HOST=redis
- REDIS_PORT=6379
- CACHE_ENABLED=true
# Database connection pool tuning (see app/utils/db_postgres.py).
# Defaults are safe for ~50 concurrent DB users; raise if you run many
# bots or see `connection pool exhausted` errors.
- DB_POOL_MIN=${DB_POOL_MIN:-5}
- DB_POOL_MAX=${DB_POOL_MAX:-50}
- DB_POOL_ACQUIRE_TIMEOUT=${DB_POOL_ACQUIRE_TIMEOUT:-10}
- DB_POOL_HEALTH_CHECK=${DB_POOL_HEALTH_CHECK:-true}
# Route-level executor tuning (each worker may use one DB connection).
- MARKET_EXECUTOR_WORKERS=${MARKET_EXECUTOR_WORKERS:-6}
- PORTFOLIO_EXECUTOR_WORKERS=${PORTFOLIO_EXECUTOR_WORKERS:-3}
# Gunicorn concurrency (threads within the single worker process).
- GUNICORN_WORKERS=${GUNICORN_WORKERS:-1}
- GUNICORN_THREADS=${GUNICORN_THREADS:-8}
# IBKR/MT5 need local TWS or MT5 terminal; set false on SaaS cloud (see env.example).
- ALLOW_LOCAL_DESKTOP_BROKERS=${ALLOW_LOCAL_DESKTOP_BROKERS:-true}
networks:
- quantdinger-network
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:5000/api/health"]
interval: 30s
timeout: 10s
retries: 3
# ========================
# Frontend (Nginx serving prebuilt dist)
# ========================
frontend:
build:
context: .
dockerfile: frontend/Dockerfile
args:
RUNTIME_IMAGE: ${IMAGE_PREFIX:-}nginx:1.25-alpine
container_name: quantdinger-frontend
restart: unless-stopped
ports:
- "${FRONTEND_PORT:-8888}:80"
depends_on:
- backend
networks:
- quantdinger-network
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost/health"]
interval: 30s
timeout: 10s
retries: 3
volumes:
postgres_data:
driver: local
backend_logs:
driver: local
backend_data:
driver: local
networks:
quantdinger-network:
driver: bridge