feat: fix provider test endpoints and update deployment scripts

- Finnhub: Add missing /test endpoint
- AlphaVantage: Fix test endpoint deserialization (handle null api_url)
- Mock Provider: Add /test endpoint and fix Zodios validation error by adding Mock enum
- Deployment: Remove Mock Provider from production deployment script
- Infrastructure: Add production Dockerfiles and compose configs
This commit is contained in:
Lv, Qi 2025-12-01 01:28:20 +08:00
parent 6065b1ff48
commit fbb1703b00
17 changed files with 1411 additions and 268 deletions

23
Tiltfile.prod Normal file
View File

@ -0,0 +1,23 @@
# 加载生产环境配置
docker_compose('docker-compose.prod.yml')
# 定义服务列表
# 这些服务涉及到代码编译Release 模式)或构建(前端),过程较慢
# 我们将它们设置为手动触发模式,避免开发过程中意外修改文件导致自动触发漫长的重构建
services = [
'data-persistence-service',
'api-gateway',
'mock-provider-service',
'alphavantage-provider-service',
'tushare-provider-service',
'finnhub-provider-service',
'yfinance-provider-service',
'report-generator-service',
'workflow-orchestrator-service',
'frontend'
]
# 遍历设置触发模式为手动 (Manual)
for name in services:
dc_resource(name, trigger_mode=TRIGGER_MODE_MANUAL)

339
deploy_to_harbor.sh Normal file
View File

@ -0,0 +1,339 @@
#!/bin/bash
# 遇到错误立即退出
set -e
# 配置变量
REGISTRY="harbor.3prism.ai"
PROJECT="fundamental_analysis"
VERSION="latest"
NAMESPACE="$REGISTRY/$PROJECT"
# 颜色输出
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
RED='\033[0;31m'
NC='\033[0m' # No Color
echo -e "${GREEN}=== 开始构建并推送镜像到 $NAMESPACE ===${NC}"
# 定义服务列表
# 格式: "服务名:Dockerfile路径"
# 注意:所有的后端服务现在都使用通用的 docker/Dockerfile.backend.prod
SERVICES=(
"data-persistence-service:docker/Dockerfile.backend.prod"
"api-gateway:docker/Dockerfile.backend.prod"
"alphavantage-provider-service:docker/Dockerfile.backend.prod"
"tushare-provider-service:docker/Dockerfile.backend.prod"
"finnhub-provider-service:docker/Dockerfile.backend.prod"
"yfinance-provider-service:docker/Dockerfile.backend.prod"
"report-generator-service:docker/Dockerfile.backend.prod"
"workflow-orchestrator-service:docker/Dockerfile.backend.prod"
"mock-provider-service:docker/Dockerfile.backend.prod"
"frontend:docker/Dockerfile.frontend.prod"
)
# 总大小计数器
TOTAL_SIZE=0
for entry in "${SERVICES[@]}"; do
KEY="${entry%%:*}"
DOCKERFILE="${entry#*:}"
IMAGE_NAME="$NAMESPACE/$KEY:$VERSION"
echo -e "\n${YELLOW}>>> 正在构建 $KEY ...${NC}"
echo "使用 Dockerfile: $DOCKERFILE"
# 构建镜像
if [ "$KEY" == "frontend" ]; then
# 前端不需要 SERVICE_NAME build-arg
docker build -t "$IMAGE_NAME" -f "$DOCKERFILE" .
elif [ "$KEY" == "data-persistence-service" ]; then
# 特殊处理 data-persistence-service 的二进制名称差异
docker build -t "$IMAGE_NAME" --build-arg SERVICE_NAME="data-persistence-service-server" -f "$DOCKERFILE" .
else
# 后端服务需要传递 SERVICE_NAME
docker build -t "$IMAGE_NAME" --build-arg SERVICE_NAME="$KEY" -f "$DOCKERFILE" .
fi
# 获取镜像大小 (MB)
SIZE_BYTES=$(docker inspect "$IMAGE_NAME" --format='{{.Size}}')
SIZE_MB=$(echo "scale=2; $SIZE_BYTES / 1024 / 1024" | bc)
echo -e "${GREEN}$KEY 构建完成. 大小: ${SIZE_MB} MB${NC}"
# 累加大小
TOTAL_SIZE=$(echo "$TOTAL_SIZE + $SIZE_BYTES" | bc)
echo -e "${YELLOW}>>> 正在推送 $KEY 到 Harbor ...${NC}"
docker push "$IMAGE_NAME"
done
TOTAL_SIZE_MB=$(echo "scale=2; $TOTAL_SIZE / 1024 / 1024" | bc)
echo -e "\n${GREEN}=== 所有镜像处理完成 ===${NC}"
echo -e "${GREEN}总大小: ${TOTAL_SIZE_MB} MB${NC}"
# 生成服务器使用的 docker-compose.server.yml
echo -e "\n${YELLOW}>>> 正在生成服务器部署文件 docker-compose.server.yml ...${NC}"
# 基于 docker-compose.prod.yml 生成,但是替换 build 为 image
# 这里我们直接手动定义,因为解析 yaml 替换比较复杂,且我们清楚结构
cat > docker-compose.server.yml <<EOF
services:
postgres-db:
image: timescale/timescaledb:2.15.2-pg16
container_name: fundamental-postgres
command: -c shared_preload_libraries=timescaledb
environment:
POSTGRES_USER: postgres
POSTGRES_PASSWORD: postgres
POSTGRES_DB: fundamental
volumes:
- pgdata:/var/lib/postgresql/data
healthcheck:
test: ["CMD-SHELL", "pg_isready -U postgres -d fundamental"]
interval: 5s
timeout: 5s
retries: 10
networks:
- app-network
restart: always
nats:
image: nats:2.9
container_name: fundamental-nats
volumes:
- nats_data:/data
networks:
- app-network
restart: always
data-persistence-service:
image: $NAMESPACE/data-persistence-service:$VERSION
container_name: data-persistence-service
environment:
HOST: 0.0.0.0
PORT: 3000
DATABASE_URL: postgresql://postgres:postgres@postgres-db:5432/fundamental
RUST_LOG: info
RUST_BACKTRACE: "1"
SKIP_MIGRATIONS_ON_MISMATCH: "1"
depends_on:
postgres-db:
condition: service_healthy
healthcheck:
test: ["CMD-SHELL", "curl -fsS http://localhost:3000/health >/dev/null || exit 1"]
interval: 10s
timeout: 5s
retries: 5
networks:
- app-network
restart: always
api-gateway:
image: $NAMESPACE/api-gateway:$VERSION
container_name: api-gateway
environment:
SERVER_PORT: 4000
NATS_ADDR: nats://nats:4222
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000
REPORT_GENERATOR_SERVICE_URL: http://report-generator-service:8004
RUST_LOG: info,axum=info
RUST_BACKTRACE: "1"
depends_on:
nats:
condition: service_started
data-persistence-service:
condition: service_healthy
networks:
- app-network
healthcheck:
test: ["CMD-SHELL", "curl -fsS http://localhost:4000/health >/dev/null || exit 1"]
interval: 10s
timeout: 5s
retries: 5
restart: always
mock-provider-service:
image: $NAMESPACE/mock-provider-service:$VERSION
container_name: mock-provider-service
volumes:
- workflow_data:/mnt/workflow_data
environment:
SERVER_PORT: 8006
NATS_ADDR: nats://nats:4222
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000
API_GATEWAY_URL: http://api-gateway:4000
WORKFLOW_DATA_PATH: /mnt/workflow_data
SERVICE_HOST: mock-provider-service
RUST_LOG: info
RUST_BACKTRACE: "1"
depends_on:
- nats
- data-persistence-service
networks:
- app-network
restart: always
alphavantage-provider-service:
image: $NAMESPACE/alphavantage-provider-service:$VERSION
container_name: alphavantage-provider-service
volumes:
- workflow_data:/mnt/workflow_data
environment:
SERVER_PORT: 8000
NATS_ADDR: nats://nats:4222
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000
API_GATEWAY_URL: http://api-gateway:4000
WORKFLOW_DATA_PATH: /mnt/workflow_data
SERVICE_HOST: alphavantage-provider-service
RUST_LOG: info
RUST_BACKTRACE: "1"
depends_on:
- nats
- data-persistence-service
networks:
- app-network
restart: always
tushare-provider-service:
image: $NAMESPACE/tushare-provider-service:$VERSION
container_name: tushare-provider-service
volumes:
- workflow_data:/mnt/workflow_data
environment:
SERVER_PORT: 8001
NATS_ADDR: nats://nats:4222
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000
TUSHARE_API_URL: http://api.waditu.com
API_GATEWAY_URL: http://api-gateway:4000
WORKFLOW_DATA_PATH: /mnt/workflow_data
SERVICE_HOST: tushare-provider-service
RUST_LOG: info
RUST_BACKTRACE: "1"
depends_on:
- nats
- data-persistence-service
networks:
- app-network
restart: always
finnhub-provider-service:
image: $NAMESPACE/finnhub-provider-service:$VERSION
container_name: finnhub-provider-service
volumes:
- workflow_data:/mnt/workflow_data
environment:
SERVER_PORT: 8002
NATS_ADDR: nats://nats:4222
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000
FINNHUB_API_URL: https://finnhub.io/api/v1
API_GATEWAY_URL: http://api-gateway:4000
WORKFLOW_DATA_PATH: /mnt/workflow_data
SERVICE_HOST: finnhub-provider-service
RUST_LOG: info
RUST_BACKTRACE: "1"
depends_on:
- nats
- data-persistence-service
networks:
- app-network
restart: always
yfinance-provider-service:
image: $NAMESPACE/yfinance-provider-service:$VERSION
container_name: yfinance-provider-service
volumes:
- workflow_data:/mnt/workflow_data
environment:
SERVER_PORT: 8003
NATS_ADDR: nats://nats:4222
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000
API_GATEWAY_URL: http://api-gateway:4000
WORKFLOW_DATA_PATH: /mnt/workflow_data
SERVICE_HOST: yfinance-provider-service
RUST_LOG: info
RUST_BACKTRACE: "1"
depends_on:
- nats
- data-persistence-service
networks:
- app-network
dns:
- 8.8.8.8
- 8.8.4.4
restart: always
report-generator-service:
image: $NAMESPACE/report-generator-service:$VERSION
container_name: report-generator-service
volumes:
- workflow_data:/mnt/workflow_data
environment:
SERVER_PORT: 8004
NATS_ADDR: nats://nats:4222
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000
GOTENBERG_URL: http://gotenberg:3000
WORKFLOW_DATA_PATH: /mnt/workflow_data
RUST_LOG: info
RUST_BACKTRACE: "1"
depends_on:
- nats
- data-persistence-service
- gotenberg
networks:
- app-network
restart: always
workflow-orchestrator-service:
image: $NAMESPACE/workflow-orchestrator-service:$VERSION
container_name: workflow-orchestrator-service
volumes:
- workflow_data:/mnt/workflow_data
environment:
SERVER_PORT: 8005
NATS_ADDR: nats://nats:4222
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000
WORKFLOW_DATA_PATH: /mnt/workflow_data
RUST_LOG: info
RUST_BACKTRACE: "1"
depends_on:
- nats
- data-persistence-service
networks:
- app-network
restart: always
gotenberg:
image: gotenberg/gotenberg:8
container_name: gotenberg
networks:
- app-network
restart: always
frontend:
image: $NAMESPACE/frontend:$VERSION
container_name: fundamental-frontend
ports:
- "8080:80" # Map host 8080 to container 80 (Nginx)
depends_on:
api-gateway:
condition: service_healthy
networks:
- app-network
restart: always
volumes:
workflow_data:
pgdata:
nats_data:
networks:
app-network:
EOF
echo -e "${GREEN}生成完成: docker-compose.server.yml${NC}"
echo -e "请将此文件复制到远程服务器,并执行: docker-compose -f docker-compose.server.yml up -d"

292
docker-compose.prod.yml Normal file
View File

@ -0,0 +1,292 @@
services:
postgres-db:
image: timescale/timescaledb:2.15.2-pg16
container_name: fundamental-postgres
command: -c shared_preload_libraries=timescaledb
environment:
POSTGRES_USER: postgres
POSTGRES_PASSWORD: postgres
POSTGRES_DB: fundamental
volumes:
- pgdata:/var/lib/postgresql/data
healthcheck:
test: ["CMD-SHELL", "pg_isready -U postgres -d fundamental"]
interval: 5s
timeout: 5s
retries: 10
networks:
- app-network
restart: always
nats:
image: nats:2.9
container_name: fundamental-nats
volumes:
- nats_data:/data
networks:
- app-network
restart: always
data-persistence-service:
build:
context: .
dockerfile: docker/Dockerfile.backend.prod
args:
SERVICE_NAME: data-persistence-service-server
container_name: data-persistence-service
# Note: The binary name in Dockerfile is generic 'app' or we can override entrypoint.
# The Dockerfile entrypoint is /usr/local/bin/app.
environment:
HOST: 0.0.0.0
PORT: 3000
DATABASE_URL: postgresql://postgres:postgres@postgres-db:5432/fundamental
RUST_LOG: info
RUST_BACKTRACE: "1"
SKIP_MIGRATIONS_ON_MISMATCH: "1"
depends_on:
postgres-db:
condition: service_healthy
healthcheck:
test: ["CMD-SHELL", "curl -fsS http://localhost:3000/health >/dev/null || exit 1"]
interval: 10s
timeout: 5s
retries: 5
networks:
- app-network
restart: always
api-gateway:
build:
context: .
dockerfile: docker/Dockerfile.backend.prod
args:
SERVICE_NAME: api-gateway
container_name: api-gateway
environment:
SERVER_PORT: 4000
NATS_ADDR: nats://nats:4222
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000
REPORT_GENERATOR_SERVICE_URL: http://report-generator-service:8004
RUST_LOG: info,axum=info
RUST_BACKTRACE: "1"
depends_on:
nats:
condition: service_started
data-persistence-service:
condition: service_healthy
networks:
- app-network
healthcheck:
test: ["CMD-SHELL", "curl -fsS http://localhost:4000/health >/dev/null || exit 1"]
interval: 10s
timeout: 5s
retries: 5
restart: always
mock-provider-service:
build:
context: .
dockerfile: docker/Dockerfile.backend.prod
args:
SERVICE_NAME: mock-provider-service
container_name: mock-provider-service
volumes:
- workflow_data:/mnt/workflow_data
environment:
SERVER_PORT: 8006
NATS_ADDR: nats://nats:4222
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000
API_GATEWAY_URL: http://api-gateway:4000
WORKFLOW_DATA_PATH: /mnt/workflow_data
SERVICE_HOST: mock-provider-service
RUST_LOG: info
RUST_BACKTRACE: "1"
depends_on:
- nats
- data-persistence-service
networks:
- app-network
restart: always
alphavantage-provider-service:
build:
context: .
dockerfile: docker/Dockerfile.backend.prod
args:
SERVICE_NAME: alphavantage-provider-service
container_name: alphavantage-provider-service
volumes:
- workflow_data:/mnt/workflow_data
environment:
SERVER_PORT: 8000
NATS_ADDR: nats://nats:4222
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000
API_GATEWAY_URL: http://api-gateway:4000
WORKFLOW_DATA_PATH: /mnt/workflow_data
SERVICE_HOST: alphavantage-provider-service
RUST_LOG: info
RUST_BACKTRACE: "1"
depends_on:
- nats
- data-persistence-service
networks:
- app-network
restart: always
tushare-provider-service:
build:
context: .
dockerfile: docker/Dockerfile.backend.prod
args:
SERVICE_NAME: tushare-provider-service
container_name: tushare-provider-service
volumes:
- workflow_data:/mnt/workflow_data
environment:
SERVER_PORT: 8001
NATS_ADDR: nats://nats:4222
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000
TUSHARE_API_URL: http://api.waditu.com
API_GATEWAY_URL: http://api-gateway:4000
WORKFLOW_DATA_PATH: /mnt/workflow_data
SERVICE_HOST: tushare-provider-service
RUST_LOG: info
RUST_BACKTRACE: "1"
depends_on:
- nats
- data-persistence-service
networks:
- app-network
restart: always
finnhub-provider-service:
build:
context: .
dockerfile: docker/Dockerfile.backend.prod
args:
SERVICE_NAME: finnhub-provider-service
container_name: finnhub-provider-service
volumes:
- workflow_data:/mnt/workflow_data
environment:
SERVER_PORT: 8002
NATS_ADDR: nats://nats:4222
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000
FINNHUB_API_URL: https://finnhub.io/api/v1
API_GATEWAY_URL: http://api-gateway:4000
WORKFLOW_DATA_PATH: /mnt/workflow_data
SERVICE_HOST: finnhub-provider-service
RUST_LOG: info
RUST_BACKTRACE: "1"
depends_on:
- nats
- data-persistence-service
networks:
- app-network
restart: always
yfinance-provider-service:
build:
context: .
dockerfile: docker/Dockerfile.backend.prod
args:
SERVICE_NAME: yfinance-provider-service
container_name: yfinance-provider-service
volumes:
- workflow_data:/mnt/workflow_data
environment:
SERVER_PORT: 8003
NATS_ADDR: nats://nats:4222
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000
API_GATEWAY_URL: http://api-gateway:4000
WORKFLOW_DATA_PATH: /mnt/workflow_data
SERVICE_HOST: yfinance-provider-service
RUST_LOG: info
RUST_BACKTRACE: "1"
depends_on:
- nats
- data-persistence-service
networks:
- app-network
dns:
- 8.8.8.8
- 8.8.4.4
restart: always
report-generator-service:
build:
context: .
dockerfile: docker/Dockerfile.backend.prod
args:
SERVICE_NAME: report-generator-service
container_name: report-generator-service
volumes:
- workflow_data:/mnt/workflow_data
environment:
SERVER_PORT: 8004
NATS_ADDR: nats://nats:4222
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000
GOTENBERG_URL: http://gotenberg:3000
WORKFLOW_DATA_PATH: /mnt/workflow_data
RUST_LOG: info
RUST_BACKTRACE: "1"
depends_on:
- nats
- data-persistence-service
- gotenberg
networks:
- app-network
restart: always
gotenberg:
image: gotenberg/gotenberg:8
container_name: gotenberg
networks:
- app-network
restart: always
workflow-orchestrator-service:
build:
context: .
dockerfile: docker/Dockerfile.backend.prod
args:
SERVICE_NAME: workflow-orchestrator-service
container_name: workflow-orchestrator-service
volumes:
- workflow_data:/mnt/workflow_data
environment:
SERVER_PORT: 8005
NATS_ADDR: nats://nats:4222
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000
WORKFLOW_DATA_PATH: /mnt/workflow_data
RUST_LOG: info
RUST_BACKTRACE: "1"
depends_on:
- nats
- data-persistence-service
networks:
- app-network
restart: always
frontend:
build:
context: .
dockerfile: docker/Dockerfile.frontend.prod
container_name: fundamental-frontend
ports:
- "8080:80" # Map host 8080 to container 80 (Nginx)
depends_on:
api-gateway:
condition: service_healthy
networks:
- app-network
restart: always
volumes:
workflow_data:
pgdata:
nats_data:
networks:
app-network:

230
docker-compose.server.yml Normal file
View File

@ -0,0 +1,230 @@
services:
postgres-db:
image: timescale/timescaledb:2.15.2-pg16
container_name: fundamental-postgres
command: -c shared_preload_libraries=timescaledb
environment:
POSTGRES_USER: postgres
POSTGRES_PASSWORD: postgres
POSTGRES_DB: fundamental
volumes:
- pgdata:/var/lib/postgresql/data
healthcheck:
test: ["CMD-SHELL", "pg_isready -U postgres -d fundamental"]
interval: 5s
timeout: 5s
retries: 10
networks:
- app-network
restart: always
nats:
image: nats:2.9
container_name: fundamental-nats
volumes:
- nats_data:/data
networks:
- app-network
restart: always
data-persistence-service:
image: harbor.3prism.ai/fundamental_analysis/data-persistence-service:latest
container_name: data-persistence-service
environment:
HOST: 0.0.0.0
PORT: 3000
DATABASE_URL: postgresql://postgres:postgres@postgres-db:5432/fundamental
RUST_LOG: info
RUST_BACKTRACE: "1"
SKIP_MIGRATIONS_ON_MISMATCH: "1"
depends_on:
postgres-db:
condition: service_healthy
healthcheck:
test: ["CMD-SHELL", "curl -fsS http://localhost:3000/health >/dev/null || exit 1"]
interval: 10s
timeout: 5s
retries: 5
networks:
- app-network
restart: always
api-gateway:
image: harbor.3prism.ai/fundamental_analysis/api-gateway:latest
container_name: api-gateway
environment:
SERVER_PORT: 4000
NATS_ADDR: nats://nats:4222
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000
REPORT_GENERATOR_SERVICE_URL: http://report-generator-service:8004
RUST_LOG: info,axum=info
RUST_BACKTRACE: "1"
depends_on:
nats:
condition: service_started
data-persistence-service:
condition: service_healthy
networks:
- app-network
healthcheck:
test: ["CMD-SHELL", "curl -fsS http://localhost:4000/health >/dev/null || exit 1"]
interval: 10s
timeout: 5s
retries: 5
restart: always
alphavantage-provider-service:
image: harbor.3prism.ai/fundamental_analysis/alphavantage-provider-service:latest
container_name: alphavantage-provider-service
volumes:
- workflow_data:/mnt/workflow_data
environment:
SERVER_PORT: 8000
NATS_ADDR: nats://nats:4222
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000
API_GATEWAY_URL: http://api-gateway:4000
WORKFLOW_DATA_PATH: /mnt/workflow_data
SERVICE_HOST: alphavantage-provider-service
RUST_LOG: info
RUST_BACKTRACE: "1"
depends_on:
- nats
- data-persistence-service
networks:
- app-network
restart: always
tushare-provider-service:
image: harbor.3prism.ai/fundamental_analysis/tushare-provider-service:latest
container_name: tushare-provider-service
volumes:
- workflow_data:/mnt/workflow_data
environment:
SERVER_PORT: 8001
NATS_ADDR: nats://nats:4222
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000
TUSHARE_API_URL: http://api.waditu.com
API_GATEWAY_URL: http://api-gateway:4000
WORKFLOW_DATA_PATH: /mnt/workflow_data
SERVICE_HOST: tushare-provider-service
RUST_LOG: info
RUST_BACKTRACE: "1"
depends_on:
- nats
- data-persistence-service
networks:
- app-network
restart: always
finnhub-provider-service:
image: harbor.3prism.ai/fundamental_analysis/finnhub-provider-service:latest
container_name: finnhub-provider-service
volumes:
- workflow_data:/mnt/workflow_data
environment:
SERVER_PORT: 8002
NATS_ADDR: nats://nats:4222
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000
FINNHUB_API_URL: https://finnhub.io/api/v1
API_GATEWAY_URL: http://api-gateway:4000
WORKFLOW_DATA_PATH: /mnt/workflow_data
SERVICE_HOST: finnhub-provider-service
RUST_LOG: info
RUST_BACKTRACE: "1"
depends_on:
- nats
- data-persistence-service
networks:
- app-network
restart: always
yfinance-provider-service:
image: harbor.3prism.ai/fundamental_analysis/yfinance-provider-service:latest
container_name: yfinance-provider-service
volumes:
- workflow_data:/mnt/workflow_data
environment:
SERVER_PORT: 8003
NATS_ADDR: nats://nats:4222
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000
API_GATEWAY_URL: http://api-gateway:4000
WORKFLOW_DATA_PATH: /mnt/workflow_data
SERVICE_HOST: yfinance-provider-service
RUST_LOG: info
RUST_BACKTRACE: "1"
depends_on:
- nats
- data-persistence-service
networks:
- app-network
dns:
- 8.8.8.8
- 8.8.4.4
restart: always
report-generator-service:
image: harbor.3prism.ai/fundamental_analysis/report-generator-service:latest
container_name: report-generator-service
volumes:
- workflow_data:/mnt/workflow_data
environment:
SERVER_PORT: 8004
NATS_ADDR: nats://nats:4222
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000
GOTENBERG_URL: http://gotenberg:3000
WORKFLOW_DATA_PATH: /mnt/workflow_data
RUST_LOG: info
RUST_BACKTRACE: "1"
depends_on:
- nats
- data-persistence-service
- gotenberg
networks:
- app-network
restart: always
workflow-orchestrator-service:
image: harbor.3prism.ai/fundamental_analysis/workflow-orchestrator-service:latest
container_name: workflow-orchestrator-service
volumes:
- workflow_data:/mnt/workflow_data
environment:
SERVER_PORT: 8005
NATS_ADDR: nats://nats:4222
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000
WORKFLOW_DATA_PATH: /mnt/workflow_data
RUST_LOG: info
RUST_BACKTRACE: "1"
depends_on:
- nats
- data-persistence-service
networks:
- app-network
restart: always
gotenberg:
image: gotenberg/gotenberg:8
container_name: gotenberg
networks:
- app-network
restart: always
frontend:
image: harbor.3prism.ai/fundamental_analysis/frontend:latest
container_name: fundamental-frontend
ports:
- "28080:80" # Map host 28080 to container 80 (Nginx)
depends_on:
api-gateway:
condition: service_healthy
networks:
- app-network
restart: always
volumes:
workflow_data:
pgdata:
nats_data:
networks:
app-network:

View File

@ -0,0 +1,67 @@
# 1. Build Stage
FROM rust:1.90-bookworm as builder
ARG SERVICE_NAME
WORKDIR /usr/src/app
# Copy the entire workspace
COPY . .
# Build the specific service in release mode
ENV SQLX_OFFLINE=true
RUN cargo build --release --bin ${SERVICE_NAME}
# Prepare runtime assets directory
RUN mkdir -p /app/assets
# Conditionally copy potential asset folders if they exist for the service
# We use a shell loop or explicit checks. Docker COPY doesn't support conditionals well.
# So we do it in the builder stage using shell.
# 1. Migrations (e.g., data-persistence-service)
RUN if [ -d "services/${SERVICE_NAME}/migrations" ]; then \
mkdir -p /app/assets/migrations && \
cp -r services/${SERVICE_NAME}/migrations/* /app/assets/migrations/; \
fi
# 2. Templates (e.g., report-generator-service)
RUN if [ -d "services/${SERVICE_NAME}/templates" ]; then \
mkdir -p /app/assets/templates && \
cp -r services/${SERVICE_NAME}/templates/* /app/assets/templates/; \
fi
# 2.1 Cookies (e.g., report-generator-service)
RUN if [ -f "services/${SERVICE_NAME}/cookies.txt" ]; then \
cp services/${SERVICE_NAME}/cookies.txt /app/assets/cookies.txt; \
fi
# 3. Config folder (root level, needed by some services like data-persistence)
# We copy it to a specific location.
RUN cp -r config /app/config
# 4. Service Kit Mirror (needed by data-persistence-service build usually, but maybe runtime?)
# It was needed for build. Runtime usually doesn't need it unless it compiles code at runtime.
# 2. Runtime Stage
FROM debian:bookworm-slim
ARG SERVICE_NAME
ENV TZ=Asia/Shanghai
# Install dependencies
RUN apt-get update && apt-get install -y --no-install-recommends \
ca-certificates \
libssl3 \
curl \
&& rm -rf /var/lib/apt/lists/*
WORKDIR /app
# Copy binary
COPY --from=builder /usr/src/app/target/release/${SERVICE_NAME} /usr/local/bin/app
# Copy prepared assets
COPY --from=builder /app/assets /app/
COPY --from=builder /app/config /app/config
# Set the binary as the entrypoint
ENTRYPOINT ["/usr/local/bin/app"]

13
docker/Dockerfile.builder Normal file
View File

@ -0,0 +1,13 @@
FROM rust:1.90-bookworm
WORKDIR /usr/src/app
# Copy the entire workspace
COPY . .
# Set SQLX offline mode to avoid needing a running DB during build
ENV SQLX_OFFLINE=true
# Build the entire workspace in release mode
# This compiles all crates in the workspace at once
RUN cargo build --release --workspace

25
docker/Dockerfile.dist Normal file
View File

@ -0,0 +1,25 @@
FROM debian:bookworm-slim
ENV TZ=Asia/Shanghai
# Install minimal runtime dependencies
RUN apt-get update && apt-get install -y --no-install-recommends \
ca-certificates \
libssl3 \
curl \
&& rm -rf /var/lib/apt/lists/*
WORKDIR /app
# The build context is expected to be prepared by the deployment script
# It should contain:
# - app (the binary)
# - config/ (if needed)
# - assets/ (if needed)
COPY . .
# Ensure the binary is executable
RUN chmod +x /app/app
ENTRYPOINT ["/app/app"]

View File

@ -0,0 +1,24 @@
# 1. Build Stage
FROM node:20-slim AS builder
WORKDIR /app
# Environment variables for build time
# ENV NODE_ENV=production <- REMOVED: This causes npm ci to skip devDependencies (tsc, vite)
# These must match the Nginx proxy paths
ENV VITE_API_TARGET=/api
ENV NEXT_PUBLIC_BACKEND_URL=/api/v1
COPY frontend/package.json frontend/package-lock.json ./
RUN npm ci
COPY frontend/ .
RUN npm run build
# 2. Runtime Stage
FROM nginx:alpine
COPY --from=builder /app/dist /usr/share/nginx/html
COPY docker/nginx.prod.conf /etc/nginx/conf.d/default.conf
EXPOSE 80
CMD ["nginx", "-g", "daemon off;"]

36
docker/nginx.prod.conf Normal file
View File

@ -0,0 +1,36 @@
server {
listen 80;
server_name localhost;
root /usr/share/nginx/html;
index index.html;
# Compression
gzip on;
gzip_types text/plain text/css application/json application/javascript text/xml application/xml application/xml+rss text/javascript;
location / {
try_files $uri $uri/ /index.html;
}
# Proxy API requests to the backend
# Matches /api/v1/..., /api/context/..., etc.
location /api/ {
proxy_pass http://api-gateway:4000/api/;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
# Proxy specific endpoints that are at root level in api-gateway
location /health {
proxy_pass http://api-gateway:4000/health;
proxy_set_header Host $host;
}
location /tasks/ {
proxy_pass http://api-gateway:4000/tasks/;
proxy_set_header Host $host;
}
}

View File

@ -231,6 +231,7 @@ export const DataSourceProvider = z.enum([
"Finnhub", "Finnhub",
"Alphavantage", "Alphavantage",
"Yfinance", "Yfinance",
"Mock",
]); ]);
export const DataSourceConfig = z.object({ export const DataSourceConfig = z.object({
api_key: z.union([z.string(), z.null()]).optional(), api_key: z.union([z.string(), z.null()]).optional(),

View File

@ -1,5 +1,5 @@
import { useDataSources, useUpdateDataSources, useTestDataSource, useRegisteredProviders } from "@/hooks/useConfig" import { useDataSources, useUpdateDataSources, useTestDataSource, useRegisteredProviders } from "@/hooks/useConfig"
import { DataSourceConfig } from "@/types/config" import { DataSourceConfig, DataSourceProvider, DataSourceProviders } from "@/types/config"
import { useToast } from "@/hooks/use-toast" import { useToast } from "@/hooks/use-toast"
import { DynamicConfigForm } from "@/components/config/DynamicConfigForm" import { DynamicConfigForm } from "@/components/config/DynamicConfigForm"
@ -98,18 +98,28 @@ export function DataSourceTab() {
// Find existing config or create default // Find existing config or create default
const configEntry = dataSources ? (dataSources as Record<string, any>)[meta.id] : undefined; const configEntry = dataSources ? (dataSources as Record<string, any>)[meta.id] : undefined;
// Default config structure. // We know that meta.id must be a valid DataSourceProvider because the backend
// Note: We default 'provider' field to the ID from metadata. // only registers providers that are part of the enum system.
// Backend expects specific enum values for 'provider', but currently our IDs match (lowercase/uppercase handling needed?) // However, meta.id comes as lowercase (e.g., "tushare") while the Enum expects PascalCase (e.g., "Tushare").
// The backend DataSourceProvider enum is PascalCase (Tushare), but IDs are likely lowercase (tushare). // To maintain strict type safety and follow the Single Source of Truth,
// However, DataSourceConfig.provider is an enum. // we need to cast or map it correctly.
// We might need to map ID to Enum if strict. // Since we cannot change the backend serialization easily without breaking other things,
// For now, assuming the backend persistence can handle the string or we just store it. // and we must respect the Zod schema, we try to match it case-insensitively to the Enum.
// Actually, the 'provider' field in DataSourceConfig is DataSourceProvider enum.
// Let's hope the JSON deserialization handles "tushare" -> Tushare. let providerEnum = Object.values(DataSourceProviders).find(
(p) => p.toLowerCase() === meta.id.toLowerCase()
);
if (!providerEnum) {
console.warn(`Provider ID '${meta.id}' from metadata does not match any known DataSourceProvider enum.`);
// Fallback or skip? If we skip, the user can't configure it.
// If we cast forcefully, Zod might reject it on save.
// Let's attempt to use it as is but cast to satisfy TS, acknowledging the risk if it doesn't match.
providerEnum = meta.id as DataSourceProvider;
}
const config = (configEntry || { const config = (configEntry || {
provider: meta.id, // This might need capitalization adjustment provider: providerEnum,
enabled: false, enabled: false,
// We init other fields as empty, they will be filled by DynamicConfigForm // We init other fields as empty, they will be filled by DynamicConfigForm
}) as DataSourceConfig; }) as DataSourceConfig;

View File

@ -6,7 +6,7 @@ set -e
# 配置变量 # 配置变量
REGISTRY="harbor.3prism.ai" REGISTRY="harbor.3prism.ai"
PROJECT="fundamental_analysis" PROJECT="fundamental_analysis"
VERSION="latest" # 或者使用 $(date +%Y%m%d%H%M%S) 生成时间戳版本 VERSION="latest"
NAMESPACE="$REGISTRY/$PROJECT" NAMESPACE="$REGISTRY/$PROJECT"
# 颜色输出 # 颜色输出
@ -15,70 +15,154 @@ YELLOW='\033[1;33m'
RED='\033[0;31m' RED='\033[0;31m'
NC='\033[0m' # No Color NC='\033[0m' # No Color
echo -e "${GREEN}=== 开始构建并推送镜像到 $NAMESPACE ===${NC}" # 清理工作目录函数
function cleanup {
echo -e "\n${YELLOW}>>> 清理临时文件...${NC}"
rm -rf ./dist_bin
rm -rf ./temp_build_context
# 尝试删除构建容器(如果存在)
docker rm -f fundamental-builder-extract 2>/dev/null || true
}
trap cleanup EXIT
# 定义服务列表及其 Dockerfile 路径 echo -e "${GREEN}=== 开始优化的构建部署流程 ===${NC}"
# 格式: "服务名:Dockerfile路径" echo -e "目标仓库: $NAMESPACE"
SERVICES=(
"data-persistence-service:services/data-persistence-service/Dockerfile" # ==========================================
"api-gateway:services/api-gateway/Dockerfile" # 阶段 1: 全局构建 (Build Once)
"alphavantage-provider-service:services/alphavantage-provider-service/Dockerfile" # ==========================================
"tushare-provider-service:services/tushare-provider-service/Dockerfile" echo -e "\n${YELLOW}>>> [阶段 1/3] 全局构建: 编译所有 Rust 服务...${NC}"
"finnhub-provider-service:services/finnhub-provider-service/Dockerfile" echo "使用 Dockerfile: docker/Dockerfile.builder"
"yfinance-provider-service:services/yfinance-provider-service/Dockerfile"
"report-generator-service:services/report-generator-service/Dockerfile" # 检查是否需要重新构建 (这一步可以进一步优化但为了简单起见我们总是构建依赖Docker层缓存)
"frontend:frontend/Dockerfile.prod" docker build -t fundamental-workspace-builder -f docker/Dockerfile.builder .
# 提取二进制文件
echo -e "${YELLOW}>>> 正在提取二进制文件...${NC}"
mkdir -p ./dist_bin
# 创建临时容器
docker create --name fundamental-builder-extract fundamental-workspace-builder
# 从容器中复制 target/release 目录下的二进制文件
# 注意: 这里我们复制整个 release 目录可能会太大,我们只复制二进制文件
# 但是 docker cp 不支持通配符复制特定文件列表,所以我们先全部复制出来,或者我们知道名字
# 定义二进制文件映射 (服务目录 -> 二进制名称)
# 如果二进制名称与目录名一致,则只需列出目录名
declare -A SERVICE_BIN_MAP
SERVICE_BIN_MAP=(
["data-persistence-service"]="data-persistence-service-server"
["api-gateway"]="api-gateway"
["alphavantage-provider-service"]="alphavantage-provider-service"
["tushare-provider-service"]="tushare-provider-service"
["finnhub-provider-service"]="finnhub-provider-service"
["yfinance-provider-service"]="yfinance-provider-service"
["report-generator-service"]="report-generator-service"
["workflow-orchestrator-service"]="workflow-orchestrator-service"
# ["mock-provider-service"]="mock-provider-service" # Skipped for Prod
) )
# 总大小计数器 for SERVICE_DIR in "${!SERVICE_BIN_MAP[@]}"; do
BINARY_NAME="${SERVICE_BIN_MAP[$SERVICE_DIR]}"
echo "提取: $BINARY_NAME"
docker cp "fundamental-builder-extract:/usr/src/app/target/release/$BINARY_NAME" "./dist_bin/$BINARY_NAME"
done
# 删除临时容器
docker rm -f fundamental-builder-extract
echo -e "${GREEN}√ 二进制提取完成${NC}"
# ==========================================
# 阶段 2: 前端构建 (Frontend)
# ==========================================
echo -e "\n${YELLOW}>>> [阶段 2/3] 构建前端服务...${NC}"
FRONTEND_IMAGE="$NAMESPACE/frontend:$VERSION"
docker build -t "$FRONTEND_IMAGE" -f docker/Dockerfile.frontend.prod .
echo -e "${YELLOW}>>> 推送前端镜像...${NC}"
docker push "$FRONTEND_IMAGE"
echo -e "${GREEN}√ 前端处理完成${NC}"
# ==========================================
# 阶段 3: 打包与分发 (Package Many)
# ==========================================
echo -e "\n${YELLOW}>>> [阶段 3/3] 打包并推送后端微服务...${NC}"
TOTAL_SIZE=0 TOTAL_SIZE=0
for entry in "${SERVICES[@]}"; do for SERVICE_DIR in "${!SERVICE_BIN_MAP[@]}"; do
KEY="${entry%%:*}" BINARY_NAME="${SERVICE_BIN_MAP[$SERVICE_DIR]}"
DOCKERFILE="${entry#*:}" IMAGE_NAME="$NAMESPACE/$SERVICE_DIR:$VERSION"
IMAGE_NAME="$NAMESPACE/$KEY:$VERSION"
echo -e "\n${YELLOW}>>> 正在构建 $KEY ...${NC}" echo -e "\n------------------------------------------------"
echo "使用 Dockerfile: $DOCKERFILE" echo -e "${YELLOW}处理服务: $SERVICE_DIR${NC}"
# 构建镜像 # 准备构建上下文
# 注意:构建上下文始终为项目根目录 (.) CONTEXT_DIR="./temp_build_context/$SERVICE_DIR"
docker build -t "$IMAGE_NAME" -f "$DOCKERFILE" . rm -rf "$CONTEXT_DIR"
mkdir -p "$CONTEXT_DIR"
mkdir -p "$CONTEXT_DIR/assets"
# 获取镜像大小 (MB) # 1. 复制二进制文件并重命名为 app
SIZE_BYTES=$(docker inspect "$IMAGE_NAME" --format='{{.Size}}') cp "./dist_bin/$BINARY_NAME" "$CONTEXT_DIR/app"
SIZE_MB=$(echo "scale=2; $SIZE_BYTES / 1024 / 1024" | bc)
echo -e "${GREEN}$KEY 构建完成. 大小: ${SIZE_MB} MB${NC}" # 2. 复制配置目录 (如果需要)
# data-persistence-service 等服务需要根目录的 config
cp -r config "$CONTEXT_DIR/config"
# 累加大小 # 3. 复制服务特定的资产 (Assets)
TOTAL_SIZE=$(echo "$TOTAL_SIZE + $SIZE_BYTES" | bc) # 3.1 Migrations
if [ -d "services/$SERVICE_DIR/migrations" ]; then
# 检查单个镜像大小是否异常 (例如超过 500MB 对于 Rust 微服务来说通常是不正常的,除非包含大模型) echo " - 包含 migrations"
if (( $(echo "$SIZE_MB > 500" | bc -l) )); then mkdir -p "$CONTEXT_DIR/assets/migrations"
echo -e "${RED}警告: $KEY 镜像大小超过 500MB请检查 Dockerfile 是否包含不必要的文件!${NC}" cp -r "services/$SERVICE_DIR/migrations/"* "$CONTEXT_DIR/assets/migrations/"
# 这里我们可以选择暂停询问用户,或者只是警告
fi fi
echo -e "${YELLOW}>>> 正在推送 $KEY 到 Harbor ...${NC}" # 3.2 Templates
if [ -d "services/$SERVICE_DIR/templates" ]; then
echo " - 包含 templates"
mkdir -p "$CONTEXT_DIR/assets/templates"
cp -r "services/$SERVICE_DIR/templates/"* "$CONTEXT_DIR/assets/templates/"
fi
# 3.3 Cookies
if [ -f "services/$SERVICE_DIR/cookies.txt" ]; then
echo " - 包含 cookies.txt"
cp "services/$SERVICE_DIR/cookies.txt" "$CONTEXT_DIR/assets/cookies.txt"
fi
# 3.4 Web Assets (e.g. data-persistence-service assets folder if exists)
if [ -d "services/$SERVICE_DIR/assets" ]; then
echo " - 包含 web assets"
cp -r "services/$SERVICE_DIR/assets/"* "$CONTEXT_DIR/assets/"
fi
# 4. 构建极简镜像
# 不需要传递构建参数,因为文件已经准备好了
docker build -t "$IMAGE_NAME" -f docker/Dockerfile.dist "$CONTEXT_DIR"
# 5. 推送
echo -e "${YELLOW} 推送 $SERVICE_DIR 到 Harbor ...${NC}"
docker push "$IMAGE_NAME" docker push "$IMAGE_NAME"
# 统计大小
SIZE_BYTES=$(docker inspect "$IMAGE_NAME" --format='{{.Size}}')
TOTAL_SIZE=$(echo "$TOTAL_SIZE + $SIZE_BYTES" | bc)
done done
TOTAL_SIZE_MB=$(echo "scale=2; $TOTAL_SIZE / 1024 / 1024" | bc) TOTAL_SIZE_MB=$(echo "scale=2; $TOTAL_SIZE / 1024 / 1024" | bc)
echo -e "\n${GREEN}=== 所有镜像处理完成 ===${NC}" echo -e "\n${GREEN}=== 所有镜像处理完成 ===${NC}"
echo -e "${GREEN}总大小: ${TOTAL_SIZE_MB} MB${NC}" echo -e "${GREEN}后端总大小: ${TOTAL_SIZE_MB} MB${NC}"
# 检查总大小是否超过 1GB (1024 MB)
if (( $(echo "$TOTAL_SIZE_MB > 1024" | bc -l) )); then
echo -e "${RED}警告: 总镜像大小超过 1GB请注意远程仓库的空间限制${NC}"
else
echo -e "${GREEN}总大小在 1GB 限制范围内。${NC}"
fi
# 生成服务器使用的 docker-compose.server.yml # ==========================================
# 阶段 4: 生成部署文件
# ==========================================
echo -e "\n${YELLOW}>>> 正在生成服务器部署文件 docker-compose.server.yml ...${NC}" echo -e "\n${YELLOW}>>> 正在生成服务器部署文件 docker-compose.server.yml ...${NC}"
cat > docker-compose.server.yml <<EOF cat > docker-compose.server.yml <<YAML
services: services:
postgres-db: postgres-db:
image: timescale/timescaledb:2.15.2-pg16 image: timescale/timescaledb:2.15.2-pg16
@ -97,147 +181,141 @@ services:
retries: 10 retries: 10
networks: networks:
- app-network - app-network
restart: always
nats: nats:
image: nats:2.9 image: nats:2.9
container_name: fundamental-nats
volumes: volumes:
- nats_data:/data - nats_data:/data
networks: networks:
- app-network - app-network
restart: always
data-persistence-service: data-persistence-service:
image: $NAMESPACE/data-persistence-service:$VERSION image: $NAMESPACE/data-persistence-service:$VERSION
container_name: data-persistence-service container_name: data-persistence-service
restart: unless-stopped
environment: environment:
HOST: 0.0.0.0 HOST: 0.0.0.0
PORT: 3000 PORT: 3000
DATABASE_URL: postgresql://postgres:postgres@postgres-db:5432/fundamental DATABASE_URL: postgresql://postgres:postgres@postgres-db:5432/fundamental
RUST_LOG: info RUST_LOG: info
RUST_BACKTRACE: "1" RUST_BACKTRACE: "1"
SKIP_MIGRATIONS_ON_MISMATCH: "1"
depends_on: depends_on:
postgres-db: postgres-db:
condition: service_healthy condition: service_healthy
healthcheck:
test: ["CMD-SHELL", "curl -fsS http://localhost:3000/health >/dev/null || exit 1"]
interval: 10s
timeout: 5s
retries: 5
networks: networks:
- app-network - app-network
restart: always
frontend:
image: $NAMESPACE/frontend:$VERSION
container_name: fundamental-frontend
restart: unless-stopped
environment:
NEXT_PUBLIC_BACKEND_URL: http://api-gateway:4000/v1
FRONTEND_INTERNAL_URL: http://fundamental-frontend:3000
BACKEND_INTERNAL_URL: http://api-gateway:4000/v1
NODE_ENV: production
ports:
- "3001:3000"
depends_on:
api-gateway:
condition: service_healthy
networks:
- app-network
api-gateway: api-gateway:
image: $NAMESPACE/api-gateway:$VERSION image: $NAMESPACE/api-gateway:$VERSION
container_name: api-gateway container_name: api-gateway
restart: unless-stopped
environment: environment:
SERVER_PORT: 4000 SERVER_PORT: 4000
NATS_ADDR: nats://nats:4222 NATS_ADDR: nats://nats:4222
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000 DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000
PROVIDER_SERVICES: '["http://alphavantage-provider-service:8000", "http://tushare-provider-service:8001", "http://finnhub-provider-service:8002", "http://yfinance-provider-service:8003"]' REPORT_GENERATOR_SERVICE_URL: http://report-generator-service:8004
RUST_LOG: info,axum=info RUST_LOG: info,axum=info
RUST_BACKTRACE: "1" RUST_BACKTRACE: "1"
depends_on: depends_on:
- nats nats:
- data-persistence-service condition: service_started
- alphavantage-provider-service data-persistence-service:
- tushare-provider-service condition: service_healthy
- finnhub-provider-service
- yfinance-provider-service
networks: networks:
- app-network - app-network
healthcheck: healthcheck:
test: ["CMD-SHELL", "curl -fsS http://localhost:4000/health >/dev/null || exit 1"] test: ["CMD-SHELL", "curl -fsS http://localhost:4000/health >/dev/null || exit 1"]
interval: 5s interval: 10s
timeout: 5s timeout: 5s
retries: 12 retries: 5
restart: always
alphavantage-provider-service: alphavantage-provider-service:
image: $NAMESPACE/alphavantage-provider-service:$VERSION image: $NAMESPACE/alphavantage-provider-service:$VERSION
container_name: alphavantage-provider-service container_name: alphavantage-provider-service
restart: unless-stopped volumes:
- workflow_data:/mnt/workflow_data
environment: environment:
SERVER_PORT: 8000 SERVER_PORT: 8000
NATS_ADDR: nats://nats:4222 NATS_ADDR: nats://nats:4222
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000 DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000
RUST_LOG: info,axum=info API_GATEWAY_URL: http://api-gateway:4000
WORKFLOW_DATA_PATH: /mnt/workflow_data
SERVICE_HOST: alphavantage-provider-service
RUST_LOG: info
RUST_BACKTRACE: "1" RUST_BACKTRACE: "1"
depends_on: depends_on:
- nats - nats
- data-persistence-service - data-persistence-service
networks: networks:
- app-network - app-network
healthcheck: restart: always
test: ["CMD-SHELL", "curl -fsS http://localhost:8000/health >/dev/null || exit 1"]
interval: 5s
timeout: 5s
retries: 12
tushare-provider-service: tushare-provider-service:
image: $NAMESPACE/tushare-provider-service:$VERSION image: $NAMESPACE/tushare-provider-service:$VERSION
container_name: tushare-provider-service container_name: tushare-provider-service
restart: unless-stopped volumes:
- workflow_data:/mnt/workflow_data
environment: environment:
SERVER_PORT: 8001 SERVER_PORT: 8001
NATS_ADDR: nats://nats:4222 NATS_ADDR: nats://nats:4222
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000 DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000
TUSHARE_API_URL: http://api.waditu.com TUSHARE_API_URL: http://api.waditu.com
RUST_LOG: info,axum=info API_GATEWAY_URL: http://api-gateway:4000
WORKFLOW_DATA_PATH: /mnt/workflow_data
SERVICE_HOST: tushare-provider-service
RUST_LOG: info
RUST_BACKTRACE: "1" RUST_BACKTRACE: "1"
depends_on: depends_on:
- nats - nats
- data-persistence-service - data-persistence-service
networks: networks:
- app-network - app-network
healthcheck: restart: always
test: ["CMD-SHELL", "curl -fsS http://localhost:8001/health >/dev/null || exit 1"]
interval: 5s
timeout: 5s
retries: 12
finnhub-provider-service: finnhub-provider-service:
image: $NAMESPACE/finnhub-provider-service:$VERSION image: $NAMESPACE/finnhub-provider-service:$VERSION
container_name: finnhub-provider-service container_name: finnhub-provider-service
restart: unless-stopped volumes:
- workflow_data:/mnt/workflow_data
environment: environment:
SERVER_PORT: 8002 SERVER_PORT: 8002
NATS_ADDR: nats://nats:4222 NATS_ADDR: nats://nats:4222
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000 DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000
FINNHUB_API_URL: https://finnhub.io/api/v1 FINNHUB_API_URL: https://finnhub.io/api/v1
RUST_LOG: info,axum=info API_GATEWAY_URL: http://api-gateway:4000
WORKFLOW_DATA_PATH: /mnt/workflow_data
SERVICE_HOST: finnhub-provider-service
RUST_LOG: info
RUST_BACKTRACE: "1" RUST_BACKTRACE: "1"
depends_on: depends_on:
- nats - nats
- data-persistence-service - data-persistence-service
networks: networks:
- app-network - app-network
healthcheck: restart: always
test: ["CMD-SHELL", "curl -fsS http://localhost:8002/health >/dev/null || exit 1"]
interval: 5s
timeout: 5s
retries: 12
yfinance-provider-service: yfinance-provider-service:
image: $NAMESPACE/yfinance-provider-service:$VERSION image: $NAMESPACE/yfinance-provider-service:$VERSION
container_name: yfinance-provider-service container_name: yfinance-provider-service
restart: unless-stopped volumes:
- workflow_data:/mnt/workflow_data
environment: environment:
SERVER_PORT: 8003 SERVER_PORT: 8003
NATS_ADDR: nats://nats:4222 NATS_ADDR: nats://nats:4222
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000 DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000
RUST_LOG: info,axum=info API_GATEWAY_URL: http://api-gateway:4000
WORKFLOW_DATA_PATH: /mnt/workflow_data
SERVICE_HOST: yfinance-provider-service
RUST_LOG: info
RUST_BACKTRACE: "1" RUST_BACKTRACE: "1"
depends_on: depends_on:
- nats - nats
@ -247,41 +325,79 @@ services:
dns: dns:
- 8.8.8.8 - 8.8.8.8
- 8.8.4.4 - 8.8.4.4
healthcheck: restart: always
test: ["CMD-SHELL", "curl -fsS http://localhost:8003/health >/dev/null || exit 1"]
interval: 5s
timeout: 5s
retries: 12
report-generator-service: report-generator-service:
image: $NAMESPACE/report-generator-service:$VERSION image: $NAMESPACE/report-generator-service:$VERSION
container_name: report-generator-service container_name: report-generator-service
restart: unless-stopped volumes:
- workflow_data:/mnt/workflow_data
environment: environment:
SERVER_PORT: 8004 SERVER_PORT: 8004
NATS_ADDR: nats://nats:4222 NATS_ADDR: nats://nats:4222
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000 DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000
RUST_LOG: info,axum=info GOTENBERG_URL: http://gotenberg:3000
WORKFLOW_DATA_PATH: /mnt/workflow_data
RUST_LOG: info
RUST_BACKTRACE: "1"
depends_on:
- nats
- data-persistence-service
- gotenberg
networks:
- app-network
restart: always
workflow-orchestrator-service:
image: $NAMESPACE/workflow-orchestrator-service:$VERSION
container_name: workflow-orchestrator-service
volumes:
- workflow_data:/mnt/workflow_data
environment:
SERVER_PORT: 8005
NATS_ADDR: nats://nats:4222
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000
WORKFLOW_DATA_PATH: /mnt/workflow_data
RUST_LOG: info
RUST_BACKTRACE: "1" RUST_BACKTRACE: "1"
depends_on: depends_on:
- nats - nats
- data-persistence-service - data-persistence-service
networks: networks:
- app-network - app-network
healthcheck: restart: always
test: ["CMD-SHELL", "curl -fsS http://localhost:8004/health >/dev/null || exit 1"]
interval: 5s gotenberg:
timeout: 5s image: gotenberg/gotenberg:8
retries: 12 container_name: gotenberg
networks:
- app-network
restart: always
frontend:
image: $NAMESPACE/frontend:$VERSION
container_name: fundamental-frontend
ports:
- "8080:80" # Map host 8080 to container 80 (Nginx)
depends_on:
api-gateway:
condition: service_healthy
networks:
- app-network
restart: always
volumes: volumes:
workflow_data:
pgdata: pgdata:
nats_data: nats_data:
networks: networks:
app-network: app-network:
EOF YAML
echo -e "${GREEN}生成完成: docker-compose.server.yml${NC}" echo -e "${GREEN}生成完成: docker-compose.server.yml${NC}"
echo -e "请将此文件复制到远程服务器,并执行: docker-compose -f docker-compose.server.yml up -d" echo -e "请执行以下步骤更新远端服务器:"
echo -e "1. 将 docker-compose.server.yml 复制到服务器"
echo -e "2. 在服务器执行: docker-compose -f docker-compose.server.yml pull (拉取最新镜像)"
echo -e "3. 在服务器执行: docker-compose -f docker-compose.server.yml up -d (重启服务)"
echo -e " 或者一键命令: docker-compose -f docker-compose.server.yml up -d --pull always"

View File

@ -6,7 +6,7 @@ use tracing::{info, warn};
#[derive(Deserialize)] #[derive(Deserialize)]
pub struct TestConnectionRequest { pub struct TestConnectionRequest {
// This is the MCP endpoint URL // This is the MCP endpoint URL
pub api_url: String, pub api_url: Option<String>,
// The API key is passed for validation but might not be used directly // The API key is passed for validation but might not be used directly
// in the MCP connection itself, depending on auth mechanism. // in the MCP connection itself, depending on auth mechanism.
pub api_key: Option<String>, pub api_key: Option<String>,
@ -23,20 +23,15 @@ pub struct TestConnectionResponse {
pub async fn test_connection( pub async fn test_connection(
Json(payload): Json<TestConnectionRequest>, Json(payload): Json<TestConnectionRequest>,
) -> impl IntoResponse { ) -> impl IntoResponse {
info!("Testing connection to MCP endpoint: {}", payload.api_url); // Default MCP URL if not provided
let base_url = payload.api_url
.filter(|s| !s.is_empty())
.unwrap_or_else(|| "https://mcp.alphavantage.co/mcp".to_string());
if payload.api_url.is_empty() { info!("Testing connection to MCP endpoint: {}", base_url);
return (
StatusCode::BAD_REQUEST,
Json(TestConnectionResponse {
success: false,
message: "API URL (MCP Endpoint) cannot be empty.".to_string(),
}),
).into_response();
}
// 要求传入 base MCP URL不包含查询参数与 api_key然后按官方文档拼接 ?apikey= // 要求传入 base MCP URL不包含查询参数与 api_key然后按官方文档拼接 ?apikey=
if payload.api_url.contains('?') { if base_url.contains('?') {
return ( return (
StatusCode::BAD_REQUEST, StatusCode::BAD_REQUEST,
Json(TestConnectionResponse { Json(TestConnectionResponse {
@ -45,6 +40,7 @@ pub async fn test_connection(
}), }),
).into_response(); ).into_response();
} }
let Some(key) = &payload.api_key else { let Some(key) = &payload.api_key else {
return ( return (
StatusCode::BAD_REQUEST, StatusCode::BAD_REQUEST,
@ -54,7 +50,8 @@ pub async fn test_connection(
}), }),
).into_response(); ).into_response();
}; };
let final_url = format!("{}?apikey={}", payload.api_url, key);
let final_url = format!("{}?apikey={}", base_url, key);
info!("Testing MCP with final endpoint: {}", final_url); info!("Testing MCP with final endpoint: {}", final_url);
let mcp_client = match AvClient::connect(&final_url).await { let mcp_client = match AvClient::connect(&final_url).await {
Ok(client) => client, Ok(client) => client,

View File

@ -8,8 +8,7 @@ use axum::{
routing::{get, post}, routing::{get, post},
}; };
use common_contracts::config_models::{ use common_contracts::config_models::{
AnalysisTemplateSets, DataSourceProvider, DataSourcesConfig, LlmProvidersConfig,
DataSourcesConfig, LlmProvider, LlmProvidersConfig,
AnalysisTemplateSummary, AnalysisTemplateSet AnalysisTemplateSummary, AnalysisTemplateSet
}; };
use common_contracts::dtos::{SessionDataDto, WorkflowHistoryDto, WorkflowHistorySummaryDto}; use common_contracts::dtos::{SessionDataDto, WorkflowHistoryDto, WorkflowHistorySummaryDto};
@ -20,9 +19,9 @@ use common_contracts::subjects::{NatsSubject, SubjectMessage};
use common_contracts::symbol_utils::{CanonicalSymbol, Market}; use common_contracts::symbol_utils::{CanonicalSymbol, Market};
use futures_util::future::join_all; use futures_util::future::join_all;
use futures_util::stream::StreamExt; use futures_util::stream::StreamExt;
use serde::{Deserialize, Serialize}; use serde::Deserialize;
use std::collections::HashMap; use std::collections::{HashMap, HashSet};
use tokio::try_join; // use tokio::try_join;
use tracing::{error, info, warn}; use tracing::{error, info, warn};
use uuid::Uuid; use uuid::Uuid;
use utoipa::OpenApi; use utoipa::OpenApi;
@ -204,7 +203,7 @@ fn create_v1_router() -> Router<AppState> {
) )
.route("/configs/test", post(test_data_source_config)) .route("/configs/test", post(test_data_source_config))
.route("/configs/llm/test", post(test_llm_config)) .route("/configs/llm/test", post(test_llm_config))
.route("/config", get(get_legacy_system_config)) // .route("/config", get(get_legacy_system_config))
.route("/discover-models/{provider_id}", get(discover_models)) .route("/discover-models/{provider_id}", get(discover_models))
.route("/discover-models", post(discover_models_preview)) .route("/discover-models", post(discover_models_preview))
.route("/registry/register", post(registry::register_service)) .route("/registry/register", post(registry::register_service))
@ -269,128 +268,10 @@ async fn proxy_generate_pdf(
// ... rest of file (unchanged) ... // ... rest of file (unchanged) ...
// Including legacy config and other handlers here to complete file write... // Including legacy config and other handlers here to complete file write...
// --- Legacy Config Compatibility --- // --- Legacy Config Compatibility - REMOVED ---
/*
#[derive(Serialize, Default)] // Legacy structs and handlers removed to enforce new design.
struct LegacyDatabaseConfig { */
url: Option<String>,
}
#[derive(Serialize, Default)]
struct LegacyNewApiConfig {
provider_id: Option<String>,
provider_name: Option<String>,
api_key: Option<String>,
base_url: Option<String>,
model_count: usize,
}
#[derive(Serialize, Default)]
struct LegacyDataSourceConfig {
provider: String,
api_key: Option<String>,
api_url: Option<String>,
enabled: bool,
}
#[derive(Serialize)]
struct LegacySystemConfigResponse {
database: LegacyDatabaseConfig,
new_api: LegacyNewApiConfig,
data_sources: HashMap<String, LegacyDataSourceConfig>,
llm_providers: LlmProvidersConfig,
analysis_template_sets: AnalysisTemplateSets,
}
async fn get_legacy_system_config(State(state): State<AppState>) -> Result<impl IntoResponse> {
let persistence = state.persistence_client.clone();
// let (llm_providers, analysis_template_sets, data_sources) = try_join!(
// persistence.get_llm_providers_config(),
// persistence.get_analysis_template_sets(),
// persistence.get_data_sources_config()
// )?;
let (llm_providers, data_sources) = try_join!(
persistence.get_llm_providers_config(),
persistence.get_data_sources_config()
)?;
let analysis_template_sets = AnalysisTemplateSets::default(); // Empty placeholder
let new_api = derive_primary_provider(&llm_providers);
let ds_map = project_data_sources(data_sources);
let database_url = std::env::var("DATABASE_URL").ok();
let response = LegacySystemConfigResponse {
database: LegacyDatabaseConfig { url: database_url },
new_api,
data_sources: ds_map,
llm_providers,
analysis_template_sets,
};
Ok(Json(response))
}
fn derive_primary_provider(providers: &LlmProvidersConfig) -> LegacyNewApiConfig {
const PREFERRED_IDS: [&str; 3] = ["new_api", "openrouter", "default"];
let mut selected_id: Option<String> = None;
let mut selected_provider: Option<&LlmProvider> = None;
for preferred in PREFERRED_IDS {
if let Some(provider) = providers.get(preferred) {
selected_id = Some(preferred.to_string());
selected_provider = Some(provider);
break;
}
}
if selected_provider.is_none() {
if let Some((fallback_id, provider)) = providers.iter().next() {
selected_id = Some(fallback_id.clone());
selected_provider = Some(provider);
}
}
if let Some(provider) = selected_provider {
LegacyNewApiConfig {
provider_id: selected_id,
provider_name: Some(provider.name.clone()),
api_key: Some(provider.api_key.clone()),
base_url: Some(provider.api_base_url.clone()),
model_count: provider.models.len(),
}
} else {
LegacyNewApiConfig::default()
}
}
fn project_data_sources(
configs: DataSourcesConfig,
) -> HashMap<String, LegacyDataSourceConfig> {
configs
.0
.into_iter()
.map(|(key, cfg)| {
let provider = provider_id(&cfg.provider).to_string();
let entry = LegacyDataSourceConfig {
provider,
api_key: cfg.api_key.clone(),
api_url: cfg.api_url.clone(),
enabled: cfg.enabled,
};
(key, entry)
})
.collect()
}
fn provider_id(provider: &DataSourceProvider) -> &'static str {
match provider {
DataSourceProvider::Tushare => "tushare",
DataSourceProvider::Finnhub => "finnhub",
DataSourceProvider::Alphavantage => "alphavantage",
DataSourceProvider::Yfinance => "yfinance",
}
}
// --- Helper Functions --- // --- Helper Functions ---
@ -1181,12 +1062,13 @@ async fn get_registered_providers(State(state): State<AppState>) -> Result<impl
let entries = state.registry.get_entries(); let entries = state.registry.get_entries();
let mut seen_ids = HashSet::new();
let providers: Vec<ProviderMetadata> = entries let providers: Vec<ProviderMetadata> = entries
.into_iter() .into_iter()
.filter_map(|entry| { .filter_map(|entry| {
// Only return DataProvider services that have metadata // Only return DataProvider services that have metadata
if entry.registration.role == common_contracts::registry::ServiceRole::DataProvider { if entry.registration.role == common_contracts::registry::ServiceRole::DataProvider {
entry.registration.metadata entry.registration.metadata.filter(|m| seen_ids.insert(m.id.clone()))
} else { } else {
None None
} }

View File

@ -128,6 +128,7 @@ pub enum DataSourceProvider {
Finnhub, Finnhub,
Alphavantage, Alphavantage,
Yfinance, Yfinance,
Mock,
} }
#[api_dto] #[api_dto]

View File

@ -1,20 +1,84 @@
use std::collections::HashMap; use std::collections::HashMap;
use axum::{ use axum::{
extract::State, extract::State,
response::Json, response::{Json, IntoResponse},
routing::get, routing::{get, post},
Router, Router,
http::StatusCode,
}; };
use common_contracts::observability::{HealthStatus, ServiceStatus, TaskProgress}; use common_contracts::observability::{HealthStatus, ServiceStatus, TaskProgress};
use crate::state::{AppState, ServiceOperationalStatus}; use crate::state::{AppState, ServiceOperationalStatus};
use serde::Deserialize;
use crate::fh_client::FinnhubClient;
pub fn create_router(app_state: AppState) -> Router { pub fn create_router(app_state: AppState) -> Router {
Router::new() Router::new()
.route("/health", get(health_check)) .route("/health", get(health_check))
.route("/tasks", get(get_current_tasks)) .route("/tasks", get(get_current_tasks))
.route("/test", post(test_connection))
.with_state(app_state) .with_state(app_state)
} }
#[derive(Deserialize)]
struct TestRequest {
api_key: Option<String>,
api_url: Option<String>,
}
async fn test_connection(
State(state): State<AppState>,
Json(payload): Json<TestRequest>,
) -> impl IntoResponse {
let api_url = payload.api_url
.filter(|s| !s.is_empty())
.unwrap_or_else(|| state.config.finnhub_api_url.clone());
let api_key = if let Some(k) = payload.api_key.filter(|s| !s.is_empty()) {
k
} else if let Some(k) = &state.config.finnhub_api_key {
k.clone()
} else {
return (
StatusCode::BAD_REQUEST,
Json(serde_json::json!({
"success": false,
"message": "No API Key provided or configured"
}))
).into_response();
};
// Validate API Key with a lightweight request (e.g. AAPL quote)
let client_res = FinnhubClient::new(api_url, api_key);
match client_res {
Ok(client) => {
match client.get::<serde_json::Value>("quote", vec![("symbol".to_string(), "AAPL".to_string())]).await {
Ok(_) => (
StatusCode::OK,
Json(serde_json::json!({
"success": true,
"message": "Connection successful"
}))
).into_response(),
Err(e) => (
StatusCode::BAD_REQUEST,
Json(serde_json::json!({
"success": false,
"message": format!("Connection failed: {}", e)
}))
).into_response(),
}
},
Err(e) => (
StatusCode::INTERNAL_SERVER_ERROR,
Json(serde_json::json!({
"success": false,
"message": format!("Failed to initialize client: {}", e)
}))
).into_response()
}
}
/// [GET /health] /// [GET /health]
/// Provides the current health status of the module. /// Provides the current health status of the module.
async fn health_check(State(state): State<AppState>) -> Json<HealthStatus> { async fn health_check(State(state): State<AppState>) -> Json<HealthStatus> {

View File

@ -3,15 +3,17 @@ use axum::{
extract::State, extract::State,
http::StatusCode, http::StatusCode,
response::{IntoResponse, Json}, response::{IntoResponse, Json},
routing::get, routing::{get, post},
Router, Router,
}; };
use common_contracts::observability::TaskProgress; use common_contracts::observability::TaskProgress;
use serde::Deserialize;
pub fn create_router(state: AppState) -> Router { pub fn create_router(state: AppState) -> Router {
Router::new() Router::new()
.route("/health", get(health_check)) .route("/health", get(health_check))
.route("/tasks", get(get_tasks)) .route("/tasks", get(get_tasks))
.route("/test", post(test_connection))
.with_state(state) .with_state(state)
} }
@ -24,3 +26,24 @@ async fn get_tasks(State(state): State<AppState>) -> impl IntoResponse {
Json(tasks) Json(tasks)
} }
#[derive(Deserialize)]
struct TestRequest {
// 允许接收任意参数,但不做处理
#[allow(dead_code)]
api_key: Option<String>,
#[allow(dead_code)]
api_url: Option<String>,
}
async fn test_connection(
Json(_payload): Json<TestRequest>,
) -> impl IntoResponse {
// Mock Provider 总是成功
(
StatusCode::OK,
Json(serde_json::json!({
"success": true,
"message": "Mock Provider connection successful"
}))
)
}