feat(架构): 完善基础架构设计

This commit is contained in:
2025-07-02 16:17:59 +08:00
parent 03e615a8fd
commit 5b4392894f
89 changed files with 18555 additions and 3521 deletions

442
docker-compose.prod.yml Normal file
View File

@@ -0,0 +1,442 @@
version: "3.8"
services:
# PostgreSQL 数据库 (生产环境)
postgres:
image: postgres:16.9
container_name: tyapi-postgres-prod
environment:
POSTGRES_DB: ${DB_NAME:-tyapi_prod}
POSTGRES_USER: ${DB_USER:-tyapi_user}
POSTGRES_PASSWORD: ${DB_PASSWORD}
POSTGRES_INITDB_ARGS: "--encoding=UTF-8 --lc-collate=C --lc-ctype=C"
# 性能优化配置
POSTGRES_SHARED_PRELOAD_LIBRARIES: pg_stat_statements
volumes:
- postgres_data:/var/lib/postgresql/data
- ./scripts/init.sql:/docker-entrypoint-initdb.d/init.sql
networks:
- tyapi-network
healthcheck:
test: ["CMD-SHELL", "pg_isready -U ${DB_USER:-tyapi_user}"]
interval: 30s
timeout: 10s
retries: 5
start_period: 60s
restart: unless-stopped
deploy:
resources:
limits:
memory: 2G
cpus: "1.0"
reservations:
memory: 512M
cpus: "0.5"
# 生产环境不暴露端口到主机
# ports:
# - "5432:5432"
# Redis 缓存 (生产环境)
redis:
image: redis:8.0.2
container_name: tyapi-redis-prod
environment:
REDIS_PASSWORD: ${REDIS_PASSWORD}
volumes:
- redis_data:/data
- ./deployments/docker/redis.conf:/usr/local/etc/redis/redis.conf
command: >
sh -c "
if [ ! -z '${REDIS_PASSWORD}' ]; then
redis-server /usr/local/etc/redis/redis.conf --requirepass ${REDIS_PASSWORD}
else
redis-server /usr/local/etc/redis/redis.conf
fi
"
networks:
- tyapi-network
healthcheck:
test: >
sh -c "
if [ ! -z '${REDIS_PASSWORD}' ]; then
redis-cli -a ${REDIS_PASSWORD} ping
else
redis-cli ping
fi
"
interval: 30s
timeout: 10s
retries: 5
restart: unless-stopped
deploy:
resources:
limits:
memory: 1G
cpus: "0.5"
reservations:
memory: 256M
cpus: "0.2"
# 生产环境不暴露端口到主机
# ports:
# - "6379:6379"
# TYAPI 应用程序
tyapi-app:
image: docker-registry.tianyuanapi.com/tyapi-server:${APP_VERSION:-latest}
container_name: tyapi-app-prod
environment:
# 环境设置
ENV: production
# 服务器配置
SERVER_PORT: ${SERVER_PORT:-8080}
SERVER_MODE: release
# 数据库配置
DB_HOST: postgres
DB_PORT: 5432
DB_USER: ${DB_USER:-tyapi_user}
DB_PASSWORD: ${DB_PASSWORD}
DB_NAME: ${DB_NAME:-tyapi_prod}
DB_SSLMODE: ${DB_SSLMODE:-require}
# Redis配置
REDIS_HOST: redis
REDIS_PORT: 6379
REDIS_PASSWORD: ${REDIS_PASSWORD}
# JWT配置
JWT_SECRET: ${JWT_SECRET}
# 监控配置
TRACING_ENABLED: true
TRACING_ENDPOINT: http://jaeger:4317
METRICS_ENABLED: true
# 日志配置
LOG_LEVEL: ${LOG_LEVEL:-info}
LOG_FORMAT: json
# 短信配置
SMS_ACCESS_KEY_ID: ${SMS_ACCESS_KEY_ID}
SMS_ACCESS_KEY_SECRET: ${SMS_ACCESS_KEY_SECRET}
SMS_SIGN_NAME: ${SMS_SIGN_NAME}
SMS_TEMPLATE_CODE: ${SMS_TEMPLATE_CODE}
ports:
- "${APP_PORT:-8080}:8080"
volumes:
- app_logs:/app/logs
networks:
- tyapi-network
depends_on:
postgres:
condition: service_healthy
redis:
condition: service_healthy
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8080/health"]
interval: 30s
timeout: 10s
retries: 5
start_period: 60s
restart: unless-stopped
deploy:
resources:
limits:
memory: 1G
cpus: "1.0"
reservations:
memory: 256M
cpus: "0.3"
# Jaeger 链路追踪 (生产环境配置)
jaeger:
image: jaegertracing/all-in-one:1.70.0
container_name: tyapi-jaeger-prod
ports:
- "${JAEGER_UI_PORT:-16686}:16686" # Jaeger UI
- "4317:4317" # OTLP gRPC receiver
- "4318:4318" # OTLP HTTP receiver
environment:
# 启用OTLP接收器
COLLECTOR_OTLP_ENABLED: true
# 配置持久化存储 (生产环境建议使用Elasticsearch/Cassandra)
SPAN_STORAGE_TYPE: memory
# 设置日志级别
LOG_LEVEL: warn
# 配置采样策略
SAMPLING_STRATEGIES_FILE: /etc/jaeger/sampling_strategies.json
# 内存存储配置 (生产环境应增加)
MEMORY_MAX_TRACES: 100000
# 查询服务配置
QUERY_MAX_CLOCK_SKEW_ADJUSTMENT: 0
# 收集器配置 (生产环境优化)
COLLECTOR_QUEUE_SIZE: 5000
COLLECTOR_NUM_WORKERS: 100
# gRPC服务器配置
COLLECTOR_GRPC_SERVER_MAX_RECEIVE_MESSAGE_LENGTH: 8388608
COLLECTOR_GRPC_SERVER_MAX_CONNECTION_AGE: 120s
COLLECTOR_GRPC_SERVER_MAX_CONNECTION_IDLE: 60s
# HTTP服务器配置
COLLECTOR_HTTP_SERVER_HOST_PORT: :14268
COLLECTOR_HTTP_SERVER_READ_TIMEOUT: 30s
COLLECTOR_HTTP_SERVER_WRITE_TIMEOUT: 30s
# UI配置
QUERY_UI_CONFIG: /etc/jaeger/ui-config.json
# 安全配置
QUERY_BASE_PATH: /
volumes:
- ./deployments/docker/jaeger-sampling-prod.json:/etc/jaeger/sampling_strategies.json
- ./deployments/docker/jaeger-ui-config.json:/etc/jaeger/ui-config.json
networks:
- tyapi-network
healthcheck:
test:
[
"CMD",
"wget",
"--no-verbose",
"--tries=1",
"--spider",
"http://localhost:14269/health",
]
interval: 60s
timeout: 30s
retries: 3
start_period: 60s
restart: unless-stopped
deploy:
resources:
limits:
memory: 1G
cpus: "0.5"
reservations:
memory: 512M
cpus: "0.2"
# Nginx 反向代理 (可选)
nginx:
image: nginx:1.27.3-alpine
container_name: tyapi-nginx-prod
ports:
- "${NGINX_HTTP_PORT:-80}:80"
- "${NGINX_HTTPS_PORT:-443}:443"
volumes:
- ./deployments/docker/nginx.conf:/etc/nginx/nginx.conf
- ./deployments/docker/ssl:/etc/nginx/ssl
- nginx_logs:/var/log/nginx
networks:
- tyapi-network
depends_on:
- tyapi-app
healthcheck:
test:
[
"CMD",
"wget",
"--quiet",
"--tries=1",
"--spider",
"http://localhost/health",
]
interval: 30s
timeout: 10s
retries: 3
restart: unless-stopped
deploy:
resources:
limits:
memory: 256M
cpus: "0.3"
reservations:
memory: 64M
cpus: "0.1"
# Prometheus 监控 (生产环境)
prometheus:
image: prom/prometheus:v2.55.1
container_name: tyapi-prometheus-prod
ports:
- "${PROMETHEUS_PORT:-9090}:9090"
volumes:
- ./deployments/docker/prometheus.yml:/etc/prometheus/prometheus.yml
- prometheus_data:/prometheus
command:
- "--config.file=/etc/prometheus/prometheus.yml"
- "--storage.tsdb.path=/prometheus"
- "--web.console.libraries=/etc/prometheus/console_libraries"
- "--web.console.templates=/etc/prometheus/consoles"
- "--web.enable-lifecycle"
- "--storage.tsdb.retention.time=30d"
- "--storage.tsdb.retention.size=10GB"
- "--web.enable-admin-api"
networks:
- tyapi-network
healthcheck:
test:
[
"CMD",
"wget",
"--quiet",
"--tries=1",
"--spider",
"http://localhost:9090/-/healthy",
]
interval: 30s
timeout: 10s
retries: 3
restart: unless-stopped
deploy:
resources:
limits:
memory: 2G
cpus: "1.0"
reservations:
memory: 512M
cpus: "0.3"
# Grafana 仪表盘 (生产环境)
grafana:
image: grafana/grafana:11.4.0
container_name: tyapi-grafana-prod
ports:
- "${GRAFANA_PORT:-3000}:3000"
environment:
GF_SECURITY_ADMIN_PASSWORD: ${GRAFANA_ADMIN_PASSWORD:-Gf7nB3xM9cV6pQ2w}
GF_SECURITY_ADMIN_USER: ${GRAFANA_ADMIN_USER:-admin}
GF_INSTALL_PLUGINS: "grafana-clock-panel,grafana-simple-json-datasource"
GF_ANALYTICS_REPORTING_ENABLED: "false"
GF_ANALYTICS_CHECK_FOR_UPDATES: "false"
GF_USERS_ALLOW_SIGN_UP: "false"
GF_SERVER_ROOT_URL: "http://localhost:3000"
volumes:
- grafana_data:/var/lib/grafana
- ./deployments/docker/grafana/provisioning:/etc/grafana/provisioning
networks:
- tyapi-network
depends_on:
- prometheus
healthcheck:
test:
[
"CMD",
"wget",
"--quiet",
"--tries=1",
"--spider",
"http://localhost:3000/api/health",
]
interval: 30s
timeout: 10s
retries: 3
restart: unless-stopped
deploy:
resources:
limits:
memory: 1G
cpus: "0.5"
reservations:
memory: 256M
cpus: "0.2"
# MinIO 对象存储 (生产环境)
minio:
image: minio/minio:RELEASE.2024-12-18T13-15-44Z
container_name: tyapi-minio-prod
ports:
- "${MINIO_API_PORT:-9000}:9000"
- "${MINIO_CONSOLE_PORT:-9001}:9001"
environment:
MINIO_ROOT_USER: ${MINIO_ROOT_USER:-minioadmin}
MINIO_ROOT_PASSWORD: ${MINIO_ROOT_PASSWORD:-Mn5oH8yK3bR7vX1z}
MINIO_BROWSER_REDIRECT_URL: "http://localhost:9001"
volumes:
- minio_data:/data
command: server /data --console-address ":9001"
networks:
- tyapi-network
healthcheck:
test:
["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
interval: 30s
timeout: 20s
retries: 3
restart: unless-stopped
deploy:
resources:
limits:
memory: 1G
cpus: "0.5"
reservations:
memory: 256M
cpus: "0.2"
# pgAdmin 数据库管理 (生产环境)
pgadmin:
image: dpage/pgadmin4:8.15
container_name: tyapi-pgadmin-prod
environment:
PGADMIN_DEFAULT_EMAIL: ${PGADMIN_EMAIL:-admin@tyapi.com}
PGADMIN_DEFAULT_PASSWORD: ${PGADMIN_PASSWORD:-Pa4dG9wF2sL6tN8u}
PGADMIN_CONFIG_SERVER_MODE: "True"
PGADMIN_CONFIG_MASTER_PASSWORD_REQUIRED: "False"
PGADMIN_CONFIG_UPGRADE_CHECK_ENABLED: "False"
PGADMIN_CONFIG_ENHANCED_COOKIE_PROTECTION: "False"
ports:
- "${PGADMIN_PORT:-5050}:80"
volumes:
- pgadmin_data:/var/lib/pgadmin
- ./deployments/docker/pgadmin-servers.json:/pgadmin4/servers.json
- ./deployments/docker/pgadmin-passfile:/var/lib/pgadmin/passfile
networks:
- tyapi-network
depends_on:
postgres:
condition: service_healthy
healthcheck:
test:
[
"CMD",
"wget",
"--quiet",
"--tries=1",
"--spider",
"http://localhost/misc/ping",
]
interval: 30s
timeout: 10s
retries: 3
restart: unless-stopped
deploy:
resources:
limits:
memory: 512M
cpus: "0.3"
reservations:
memory: 128M
cpus: "0.1"
volumes:
postgres_data:
driver: local
redis_data:
driver: local
app_logs:
driver: local
nginx_logs:
driver: local
prometheus_data:
driver: local
grafana_data:
driver: local
minio_data:
driver: local
pgadmin_data:
driver: local
networks:
tyapi-network:
driver: bridge
ipam:
config:
- subnet: 172.20.0.0/16