Compare commits

...

6 Commits

Author SHA1 Message Date
Lv, Qi
fbb1703b00 feat: fix provider test endpoints and update deployment scripts
- Finnhub: Add missing /test endpoint
- AlphaVantage: Fix test endpoint deserialization (handle null api_url)
- Mock Provider: Add /test endpoint and fix Zodios validation error by adding Mock enum
- Deployment: Remove Mock Provider from production deployment script
- Infrastructure: Add production Dockerfiles and compose configs
2025-12-01 02:00:21 +08:00
Lv, Qi
6065b1ff48 chore: fix frontend build errors and prepare prod configuration 2025-11-30 23:21:34 +08:00
Lv, Qi
abe47c4bc8 refactor(report): switch to HTML+Gotenberg for high-quality PDF export
- Feat: Add Gotenberg service to docker-compose for headless PDF rendering
- Feat: Implement /generate-pdf endpoint in report-generator-service
- Feat: Add PDF generation proxy route in api-gateway
- Refactor(frontend): Rewrite PDFExportButton to generate HTML with embedded styles and images
- Feat(frontend): Auto-crop React Flow screenshots to remove whitespace
- Style: Optimize report print layout with CSS (margins, image sizing)
- Chore: Remove legacy react-pdf code and font files
2025-11-30 22:43:22 +08:00
Lv, Qi
7933c706d1 refactor: strict typing for workflow events using WorkflowEventType enum
- refactor(frontend): replace string literals with WorkflowEventType enum for event handling
- feat(backend): export WorkflowEventType in common-contracts and openapi
- fix(tests): update end-to-end tests to match new ContextSelectorConfig and LlmConfig types
- chore: regenerate openapi.json and frontend client schemas
2025-11-30 19:28:57 +08:00
Lv, Qi
e9e4d0c1b3 chore: massive update covering recent refactoring and bug fixes
- fix: infinite message loop in workflow orchestrator
- feat: restore realtime LLM streaming from report generator to frontend
- refactor: major update to provider services (generic workers, workflow adapters)
- refactor: common contracts and message definitions updated
- feat: enhanced logging and observability in orchestrator
- docs: update project management tasks and status
- chore: dependency updates and config adjustments
2025-11-30 19:17:02 +08:00
Lv, Qi
5dc13fa735 fix: resolve infinite feedback loop in orchestrator and restore realtime LLM streaming 2025-11-30 19:16:02 +08:00
113 changed files with 15603 additions and 1980 deletions

229
Cargo.lock generated
View File

@ -40,6 +40,7 @@ version = "0.1.0"
dependencies = [
"anyhow",
"async-nats",
"async-trait",
"axum",
"chrono",
"common-contracts",
@ -319,6 +320,7 @@ dependencies = [
"matchit",
"memchr",
"mime",
"multer",
"percent-encoding",
"pin-project-lite",
"serde_core",
@ -448,6 +450,50 @@ dependencies = [
"generic-array",
]
[[package]]
name = "bollard"
version = "0.18.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "97ccca1260af6a459d75994ad5acc1651bcabcbdbc41467cc9786519ab854c30"
dependencies = [
"base64 0.22.1",
"bollard-stubs",
"bytes",
"futures-core",
"futures-util",
"hex",
"http",
"http-body-util",
"hyper",
"hyper-named-pipe",
"hyper-util",
"hyperlocal",
"log",
"pin-project-lite",
"serde",
"serde_derive",
"serde_json",
"serde_repr",
"serde_urlencoded",
"thiserror 2.0.17",
"tokio",
"tokio-util",
"tower-service",
"url",
"winapi",
]
[[package]]
name = "bollard-stubs"
version = "1.47.1-rc.27.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3f179cfbddb6e77a5472703d4b30436bff32929c0aa8a9008ecf23d1d3cdd0da"
dependencies = [
"serde",
"serde_repr",
"serde_with 3.16.1",
]
[[package]]
name = "borsh"
version = "1.6.0"
@ -681,6 +727,19 @@ dependencies = [
"yaml-rust2",
]
[[package]]
name = "console"
version = "0.15.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "054ccb5b10f9f2cbf51eb355ca1d05c2d279ce1804688d0db74b4733a5aeafd8"
dependencies = [
"encode_unicode",
"libc",
"once_cell",
"unicode-width",
"windows-sys 0.59.0",
]
[[package]]
name = "const-oid"
version = "0.9.6"
@ -1152,6 +1211,12 @@ dependencies = [
"serde",
]
[[package]]
name = "encode_unicode"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "34aa73646ffb006b8f5147f3dc182bd4bcb190227ce861fc4a4844bf8e3cb2c0"
[[package]]
name = "encoding_rs"
version = "0.8.35"
@ -1161,6 +1226,28 @@ dependencies = [
"cfg-if",
]
[[package]]
name = "end-to-end"
version = "0.1.0"
dependencies = [
"anyhow",
"bollard",
"chrono",
"common-contracts",
"console",
"eventsource-stream",
"futures",
"indicatif",
"rand 0.9.2",
"reqwest",
"serde",
"serde_json",
"tokio",
"tracing",
"tracing-subscriber",
"uuid",
]
[[package]]
name = "equivalent"
version = "1.0.2"
@ -1245,6 +1332,7 @@ version = "0.1.0"
dependencies = [
"anyhow",
"async-nats",
"async-trait",
"axum",
"chrono",
"common-contracts",
@ -1699,6 +1787,21 @@ dependencies = [
"want",
]
[[package]]
name = "hyper-named-pipe"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "73b7d8abf35697b81a825e386fc151e0d503e8cb5fcb93cc8669c376dfd6f278"
dependencies = [
"hex",
"hyper",
"hyper-util",
"pin-project-lite",
"tokio",
"tower-service",
"winapi",
]
[[package]]
name = "hyper-rustls"
version = "0.27.7"
@ -1759,6 +1862,21 @@ dependencies = [
"windows-registry",
]
[[package]]
name = "hyperlocal"
version = "0.9.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "986c5ce3b994526b3cd75578e62554abd09f0899d6206de48b3e96ab34ccc8c7"
dependencies = [
"hex",
"http-body-util",
"hyper",
"hyper-util",
"pin-project-lite",
"tokio",
"tower-service",
]
[[package]]
name = "iana-time-zone"
version = "0.1.64"
@ -1930,6 +2048,19 @@ dependencies = [
"serde_core",
]
[[package]]
name = "indicatif"
version = "0.17.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "183b3088984b400f4cfac3620d5e076c84da5364016b4f49473de574b2586235"
dependencies = [
"console",
"number_prefix",
"portable-atomic",
"unicode-width",
"web-time",
]
[[package]]
name = "instant"
version = "0.1.13"
@ -2246,6 +2377,23 @@ dependencies = [
"workflow-context",
]
[[package]]
name = "multer"
version = "3.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "83e87776546dc87511aa5ee218730c92b666d7264ab6ed41f9d215af9cd5224b"
dependencies = [
"bytes",
"encoding_rs",
"futures-util",
"http",
"httparse",
"memchr",
"mime",
"spin",
"version_check",
]
[[package]]
name = "native-tls"
version = "0.2.14"
@ -2358,6 +2506,12 @@ dependencies = [
"libm",
]
[[package]]
name = "number_prefix"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "830b246a0e5f20af87141b25c173cd1b609bd7779a4617d6ec582abaf90870f3"
[[package]]
name = "oas"
version = "0.2.1"
@ -2367,7 +2521,7 @@ dependencies = [
"either",
"serde",
"serde_json",
"serde_with",
"serde_with 2.3.3",
]
[[package]]
@ -3109,7 +3263,7 @@ dependencies = [
"rand 0.9.2",
"reqwest",
"rmcp-macros",
"schemars",
"schemars 1.1.0",
"serde",
"serde_json",
"sse-stream",
@ -3376,6 +3530,18 @@ dependencies = [
"windows-sys 0.61.2",
]
[[package]]
name = "schemars"
version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4cd191f9397d57d581cddd31014772520aa448f65ef991055d7f61582c65165f"
dependencies = [
"dyn-clone",
"ref-cast",
"serde",
"serde_json",
]
[[package]]
name = "schemars"
version = "1.1.0"
@ -3610,6 +3776,24 @@ dependencies = [
"time",
]
[[package]]
name = "serde_with"
version = "3.16.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4fa237f2807440d238e0364a218270b98f767a00d3dada77b1c53ae88940e2e7"
dependencies = [
"base64 0.22.1",
"chrono",
"hex",
"indexmap 1.9.3",
"indexmap 2.12.1",
"schemars 0.9.0",
"schemars 1.1.0",
"serde_core",
"serde_json",
"time",
]
[[package]]
name = "serde_with_macros"
version = "2.3.3"
@ -3677,7 +3861,7 @@ dependencies = [
"quote",
"reqwest",
"rmcp",
"schemars",
"schemars 1.1.0",
"serde",
"serde_json",
"serde_urlencoded",
@ -3702,7 +3886,7 @@ dependencies = [
"once_cell",
"proc-macro2",
"quote",
"schemars",
"schemars 1.1.0",
"serde",
"serde_json",
"serde_urlencoded",
@ -4672,6 +4856,12 @@ version = "1.12.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f6ccf251212114b54433ec949fd6a7841275f9ada20dddd2f29e9ceea4501493"
[[package]]
name = "unicode-width"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b4ac048d71ede7ee76d585517add45da530660ef4390e49b098733c6e897f254"
[[package]]
name = "unsafe-libyaml"
version = "0.2.11"
@ -4947,6 +5137,22 @@ dependencies = [
"wasite",
]
[[package]]
name = "winapi"
version = "0.3.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
dependencies = [
"winapi-i686-pc-windows-gnu",
"winapi-x86_64-pc-windows-gnu",
]
[[package]]
name = "winapi-i686-pc-windows-gnu"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
[[package]]
name = "winapi-util"
version = "0.1.11"
@ -4956,6 +5162,12 @@ dependencies = [
"windows-sys 0.61.2",
]
[[package]]
name = "winapi-x86_64-pc-windows-gnu"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
[[package]]
name = "windows-core"
version = "0.62.2"
@ -5044,6 +5256,15 @@ dependencies = [
"windows-targets 0.52.6",
]
[[package]]
name = "windows-sys"
version = "0.59.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b"
dependencies = [
"windows-targets 0.52.6",
]
[[package]]
name = "windows-sys"
version = "0.60.2"

View File

@ -12,6 +12,7 @@ members = [
"services/workflow-orchestrator-service",
"services/yfinance-provider-service",
"crates/workflow-context",
"tests/end-to-end",
]
[workspace.package]

23
Tiltfile.prod Normal file
View File

@ -0,0 +1,23 @@
# 加载生产环境配置
docker_compose('docker-compose.prod.yml')
# 定义服务列表
# 这些服务涉及到代码编译Release 模式)或构建(前端),过程较慢
# 我们将它们设置为手动触发模式,避免开发过程中意外修改文件导致自动触发漫长的重构建
services = [
'data-persistence-service',
'api-gateway',
'mock-provider-service',
'alphavantage-provider-service',
'tushare-provider-service',
'finnhub-provider-service',
'yfinance-provider-service',
'report-generator-service',
'workflow-orchestrator-service',
'frontend'
]
# 遍历设置触发模式为手动 (Manual)
for name in services:
dc_resource(name, trigger_mode=TRIGGER_MODE_MANUAL)

BIN
assets/flow.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 141 KiB

5553
assets/tushare.json Normal file

File diff suppressed because it is too large Load Diff

339
deploy_to_harbor.sh Normal file
View File

@ -0,0 +1,339 @@
#!/bin/bash
# 遇到错误立即退出
set -e
# 配置变量
REGISTRY="harbor.3prism.ai"
PROJECT="fundamental_analysis"
VERSION="latest"
NAMESPACE="$REGISTRY/$PROJECT"
# 颜色输出
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
RED='\033[0;31m'
NC='\033[0m' # No Color
echo -e "${GREEN}=== 开始构建并推送镜像到 $NAMESPACE ===${NC}"
# 定义服务列表
# 格式: "服务名:Dockerfile路径"
# 注意:所有的后端服务现在都使用通用的 docker/Dockerfile.backend.prod
SERVICES=(
"data-persistence-service:docker/Dockerfile.backend.prod"
"api-gateway:docker/Dockerfile.backend.prod"
"alphavantage-provider-service:docker/Dockerfile.backend.prod"
"tushare-provider-service:docker/Dockerfile.backend.prod"
"finnhub-provider-service:docker/Dockerfile.backend.prod"
"yfinance-provider-service:docker/Dockerfile.backend.prod"
"report-generator-service:docker/Dockerfile.backend.prod"
"workflow-orchestrator-service:docker/Dockerfile.backend.prod"
"mock-provider-service:docker/Dockerfile.backend.prod"
"frontend:docker/Dockerfile.frontend.prod"
)
# 总大小计数器
TOTAL_SIZE=0
for entry in "${SERVICES[@]}"; do
KEY="${entry%%:*}"
DOCKERFILE="${entry#*:}"
IMAGE_NAME="$NAMESPACE/$KEY:$VERSION"
echo -e "\n${YELLOW}>>> 正在构建 $KEY ...${NC}"
echo "使用 Dockerfile: $DOCKERFILE"
# 构建镜像
if [ "$KEY" == "frontend" ]; then
# 前端不需要 SERVICE_NAME build-arg
docker build -t "$IMAGE_NAME" -f "$DOCKERFILE" .
elif [ "$KEY" == "data-persistence-service" ]; then
# 特殊处理 data-persistence-service 的二进制名称差异
docker build -t "$IMAGE_NAME" --build-arg SERVICE_NAME="data-persistence-service-server" -f "$DOCKERFILE" .
else
# 后端服务需要传递 SERVICE_NAME
docker build -t "$IMAGE_NAME" --build-arg SERVICE_NAME="$KEY" -f "$DOCKERFILE" .
fi
# 获取镜像大小 (MB)
SIZE_BYTES=$(docker inspect "$IMAGE_NAME" --format='{{.Size}}')
SIZE_MB=$(echo "scale=2; $SIZE_BYTES / 1024 / 1024" | bc)
echo -e "${GREEN}$KEY 构建完成. 大小: ${SIZE_MB} MB${NC}"
# 累加大小
TOTAL_SIZE=$(echo "$TOTAL_SIZE + $SIZE_BYTES" | bc)
echo -e "${YELLOW}>>> 正在推送 $KEY 到 Harbor ...${NC}"
docker push "$IMAGE_NAME"
done
TOTAL_SIZE_MB=$(echo "scale=2; $TOTAL_SIZE / 1024 / 1024" | bc)
echo -e "\n${GREEN}=== 所有镜像处理完成 ===${NC}"
echo -e "${GREEN}总大小: ${TOTAL_SIZE_MB} MB${NC}"
# 生成服务器使用的 docker-compose.server.yml
echo -e "\n${YELLOW}>>> 正在生成服务器部署文件 docker-compose.server.yml ...${NC}"
# 基于 docker-compose.prod.yml 生成,但是替换 build 为 image
# 这里我们直接手动定义,因为解析 yaml 替换比较复杂,且我们清楚结构
cat > docker-compose.server.yml <<EOF
services:
postgres-db:
image: timescale/timescaledb:2.15.2-pg16
container_name: fundamental-postgres
command: -c shared_preload_libraries=timescaledb
environment:
POSTGRES_USER: postgres
POSTGRES_PASSWORD: postgres
POSTGRES_DB: fundamental
volumes:
- pgdata:/var/lib/postgresql/data
healthcheck:
test: ["CMD-SHELL", "pg_isready -U postgres -d fundamental"]
interval: 5s
timeout: 5s
retries: 10
networks:
- app-network
restart: always
nats:
image: nats:2.9
container_name: fundamental-nats
volumes:
- nats_data:/data
networks:
- app-network
restart: always
data-persistence-service:
image: $NAMESPACE/data-persistence-service:$VERSION
container_name: data-persistence-service
environment:
HOST: 0.0.0.0
PORT: 3000
DATABASE_URL: postgresql://postgres:postgres@postgres-db:5432/fundamental
RUST_LOG: info
RUST_BACKTRACE: "1"
SKIP_MIGRATIONS_ON_MISMATCH: "1"
depends_on:
postgres-db:
condition: service_healthy
healthcheck:
test: ["CMD-SHELL", "curl -fsS http://localhost:3000/health >/dev/null || exit 1"]
interval: 10s
timeout: 5s
retries: 5
networks:
- app-network
restart: always
api-gateway:
image: $NAMESPACE/api-gateway:$VERSION
container_name: api-gateway
environment:
SERVER_PORT: 4000
NATS_ADDR: nats://nats:4222
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000
REPORT_GENERATOR_SERVICE_URL: http://report-generator-service:8004
RUST_LOG: info,axum=info
RUST_BACKTRACE: "1"
depends_on:
nats:
condition: service_started
data-persistence-service:
condition: service_healthy
networks:
- app-network
healthcheck:
test: ["CMD-SHELL", "curl -fsS http://localhost:4000/health >/dev/null || exit 1"]
interval: 10s
timeout: 5s
retries: 5
restart: always
mock-provider-service:
image: $NAMESPACE/mock-provider-service:$VERSION
container_name: mock-provider-service
volumes:
- workflow_data:/mnt/workflow_data
environment:
SERVER_PORT: 8006
NATS_ADDR: nats://nats:4222
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000
API_GATEWAY_URL: http://api-gateway:4000
WORKFLOW_DATA_PATH: /mnt/workflow_data
SERVICE_HOST: mock-provider-service
RUST_LOG: info
RUST_BACKTRACE: "1"
depends_on:
- nats
- data-persistence-service
networks:
- app-network
restart: always
alphavantage-provider-service:
image: $NAMESPACE/alphavantage-provider-service:$VERSION
container_name: alphavantage-provider-service
volumes:
- workflow_data:/mnt/workflow_data
environment:
SERVER_PORT: 8000
NATS_ADDR: nats://nats:4222
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000
API_GATEWAY_URL: http://api-gateway:4000
WORKFLOW_DATA_PATH: /mnt/workflow_data
SERVICE_HOST: alphavantage-provider-service
RUST_LOG: info
RUST_BACKTRACE: "1"
depends_on:
- nats
- data-persistence-service
networks:
- app-network
restart: always
tushare-provider-service:
image: $NAMESPACE/tushare-provider-service:$VERSION
container_name: tushare-provider-service
volumes:
- workflow_data:/mnt/workflow_data
environment:
SERVER_PORT: 8001
NATS_ADDR: nats://nats:4222
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000
TUSHARE_API_URL: http://api.waditu.com
API_GATEWAY_URL: http://api-gateway:4000
WORKFLOW_DATA_PATH: /mnt/workflow_data
SERVICE_HOST: tushare-provider-service
RUST_LOG: info
RUST_BACKTRACE: "1"
depends_on:
- nats
- data-persistence-service
networks:
- app-network
restart: always
finnhub-provider-service:
image: $NAMESPACE/finnhub-provider-service:$VERSION
container_name: finnhub-provider-service
volumes:
- workflow_data:/mnt/workflow_data
environment:
SERVER_PORT: 8002
NATS_ADDR: nats://nats:4222
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000
FINNHUB_API_URL: https://finnhub.io/api/v1
API_GATEWAY_URL: http://api-gateway:4000
WORKFLOW_DATA_PATH: /mnt/workflow_data
SERVICE_HOST: finnhub-provider-service
RUST_LOG: info
RUST_BACKTRACE: "1"
depends_on:
- nats
- data-persistence-service
networks:
- app-network
restart: always
yfinance-provider-service:
image: $NAMESPACE/yfinance-provider-service:$VERSION
container_name: yfinance-provider-service
volumes:
- workflow_data:/mnt/workflow_data
environment:
SERVER_PORT: 8003
NATS_ADDR: nats://nats:4222
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000
API_GATEWAY_URL: http://api-gateway:4000
WORKFLOW_DATA_PATH: /mnt/workflow_data
SERVICE_HOST: yfinance-provider-service
RUST_LOG: info
RUST_BACKTRACE: "1"
depends_on:
- nats
- data-persistence-service
networks:
- app-network
dns:
- 8.8.8.8
- 8.8.4.4
restart: always
report-generator-service:
image: $NAMESPACE/report-generator-service:$VERSION
container_name: report-generator-service
volumes:
- workflow_data:/mnt/workflow_data
environment:
SERVER_PORT: 8004
NATS_ADDR: nats://nats:4222
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000
GOTENBERG_URL: http://gotenberg:3000
WORKFLOW_DATA_PATH: /mnt/workflow_data
RUST_LOG: info
RUST_BACKTRACE: "1"
depends_on:
- nats
- data-persistence-service
- gotenberg
networks:
- app-network
restart: always
workflow-orchestrator-service:
image: $NAMESPACE/workflow-orchestrator-service:$VERSION
container_name: workflow-orchestrator-service
volumes:
- workflow_data:/mnt/workflow_data
environment:
SERVER_PORT: 8005
NATS_ADDR: nats://nats:4222
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000
WORKFLOW_DATA_PATH: /mnt/workflow_data
RUST_LOG: info
RUST_BACKTRACE: "1"
depends_on:
- nats
- data-persistence-service
networks:
- app-network
restart: always
gotenberg:
image: gotenberg/gotenberg:8
container_name: gotenberg
networks:
- app-network
restart: always
frontend:
image: $NAMESPACE/frontend:$VERSION
container_name: fundamental-frontend
ports:
- "8080:80" # Map host 8080 to container 80 (Nginx)
depends_on:
api-gateway:
condition: service_healthy
networks:
- app-network
restart: always
volumes:
workflow_data:
pgdata:
nats_data:
networks:
app-network:
EOF
echo -e "${GREEN}生成完成: docker-compose.server.yml${NC}"
echo -e "请将此文件复制到远程服务器,并执行: docker-compose -f docker-compose.server.yml up -d"

292
docker-compose.prod.yml Normal file
View File

@ -0,0 +1,292 @@
services:
postgres-db:
image: timescale/timescaledb:2.15.2-pg16
container_name: fundamental-postgres
command: -c shared_preload_libraries=timescaledb
environment:
POSTGRES_USER: postgres
POSTGRES_PASSWORD: postgres
POSTGRES_DB: fundamental
volumes:
- pgdata:/var/lib/postgresql/data
healthcheck:
test: ["CMD-SHELL", "pg_isready -U postgres -d fundamental"]
interval: 5s
timeout: 5s
retries: 10
networks:
- app-network
restart: always
nats:
image: nats:2.9
container_name: fundamental-nats
volumes:
- nats_data:/data
networks:
- app-network
restart: always
data-persistence-service:
build:
context: .
dockerfile: docker/Dockerfile.backend.prod
args:
SERVICE_NAME: data-persistence-service-server
container_name: data-persistence-service
# Note: The binary name in Dockerfile is generic 'app' or we can override entrypoint.
# The Dockerfile entrypoint is /usr/local/bin/app.
environment:
HOST: 0.0.0.0
PORT: 3000
DATABASE_URL: postgresql://postgres:postgres@postgres-db:5432/fundamental
RUST_LOG: info
RUST_BACKTRACE: "1"
SKIP_MIGRATIONS_ON_MISMATCH: "1"
depends_on:
postgres-db:
condition: service_healthy
healthcheck:
test: ["CMD-SHELL", "curl -fsS http://localhost:3000/health >/dev/null || exit 1"]
interval: 10s
timeout: 5s
retries: 5
networks:
- app-network
restart: always
api-gateway:
build:
context: .
dockerfile: docker/Dockerfile.backend.prod
args:
SERVICE_NAME: api-gateway
container_name: api-gateway
environment:
SERVER_PORT: 4000
NATS_ADDR: nats://nats:4222
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000
REPORT_GENERATOR_SERVICE_URL: http://report-generator-service:8004
RUST_LOG: info,axum=info
RUST_BACKTRACE: "1"
depends_on:
nats:
condition: service_started
data-persistence-service:
condition: service_healthy
networks:
- app-network
healthcheck:
test: ["CMD-SHELL", "curl -fsS http://localhost:4000/health >/dev/null || exit 1"]
interval: 10s
timeout: 5s
retries: 5
restart: always
mock-provider-service:
build:
context: .
dockerfile: docker/Dockerfile.backend.prod
args:
SERVICE_NAME: mock-provider-service
container_name: mock-provider-service
volumes:
- workflow_data:/mnt/workflow_data
environment:
SERVER_PORT: 8006
NATS_ADDR: nats://nats:4222
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000
API_GATEWAY_URL: http://api-gateway:4000
WORKFLOW_DATA_PATH: /mnt/workflow_data
SERVICE_HOST: mock-provider-service
RUST_LOG: info
RUST_BACKTRACE: "1"
depends_on:
- nats
- data-persistence-service
networks:
- app-network
restart: always
alphavantage-provider-service:
build:
context: .
dockerfile: docker/Dockerfile.backend.prod
args:
SERVICE_NAME: alphavantage-provider-service
container_name: alphavantage-provider-service
volumes:
- workflow_data:/mnt/workflow_data
environment:
SERVER_PORT: 8000
NATS_ADDR: nats://nats:4222
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000
API_GATEWAY_URL: http://api-gateway:4000
WORKFLOW_DATA_PATH: /mnt/workflow_data
SERVICE_HOST: alphavantage-provider-service
RUST_LOG: info
RUST_BACKTRACE: "1"
depends_on:
- nats
- data-persistence-service
networks:
- app-network
restart: always
tushare-provider-service:
build:
context: .
dockerfile: docker/Dockerfile.backend.prod
args:
SERVICE_NAME: tushare-provider-service
container_name: tushare-provider-service
volumes:
- workflow_data:/mnt/workflow_data
environment:
SERVER_PORT: 8001
NATS_ADDR: nats://nats:4222
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000
TUSHARE_API_URL: http://api.waditu.com
API_GATEWAY_URL: http://api-gateway:4000
WORKFLOW_DATA_PATH: /mnt/workflow_data
SERVICE_HOST: tushare-provider-service
RUST_LOG: info
RUST_BACKTRACE: "1"
depends_on:
- nats
- data-persistence-service
networks:
- app-network
restart: always
finnhub-provider-service:
build:
context: .
dockerfile: docker/Dockerfile.backend.prod
args:
SERVICE_NAME: finnhub-provider-service
container_name: finnhub-provider-service
volumes:
- workflow_data:/mnt/workflow_data
environment:
SERVER_PORT: 8002
NATS_ADDR: nats://nats:4222
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000
FINNHUB_API_URL: https://finnhub.io/api/v1
API_GATEWAY_URL: http://api-gateway:4000
WORKFLOW_DATA_PATH: /mnt/workflow_data
SERVICE_HOST: finnhub-provider-service
RUST_LOG: info
RUST_BACKTRACE: "1"
depends_on:
- nats
- data-persistence-service
networks:
- app-network
restart: always
yfinance-provider-service:
build:
context: .
dockerfile: docker/Dockerfile.backend.prod
args:
SERVICE_NAME: yfinance-provider-service
container_name: yfinance-provider-service
volumes:
- workflow_data:/mnt/workflow_data
environment:
SERVER_PORT: 8003
NATS_ADDR: nats://nats:4222
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000
API_GATEWAY_URL: http://api-gateway:4000
WORKFLOW_DATA_PATH: /mnt/workflow_data
SERVICE_HOST: yfinance-provider-service
RUST_LOG: info
RUST_BACKTRACE: "1"
depends_on:
- nats
- data-persistence-service
networks:
- app-network
dns:
- 8.8.8.8
- 8.8.4.4
restart: always
report-generator-service:
build:
context: .
dockerfile: docker/Dockerfile.backend.prod
args:
SERVICE_NAME: report-generator-service
container_name: report-generator-service
volumes:
- workflow_data:/mnt/workflow_data
environment:
SERVER_PORT: 8004
NATS_ADDR: nats://nats:4222
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000
GOTENBERG_URL: http://gotenberg:3000
WORKFLOW_DATA_PATH: /mnt/workflow_data
RUST_LOG: info
RUST_BACKTRACE: "1"
depends_on:
- nats
- data-persistence-service
- gotenberg
networks:
- app-network
restart: always
gotenberg:
image: gotenberg/gotenberg:8
container_name: gotenberg
networks:
- app-network
restart: always
workflow-orchestrator-service:
build:
context: .
dockerfile: docker/Dockerfile.backend.prod
args:
SERVICE_NAME: workflow-orchestrator-service
container_name: workflow-orchestrator-service
volumes:
- workflow_data:/mnt/workflow_data
environment:
SERVER_PORT: 8005
NATS_ADDR: nats://nats:4222
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000
WORKFLOW_DATA_PATH: /mnt/workflow_data
RUST_LOG: info
RUST_BACKTRACE: "1"
depends_on:
- nats
- data-persistence-service
networks:
- app-network
restart: always
frontend:
build:
context: .
dockerfile: docker/Dockerfile.frontend.prod
container_name: fundamental-frontend
ports:
- "8080:80" # Map host 8080 to container 80 (Nginx)
depends_on:
api-gateway:
condition: service_healthy
networks:
- app-network
restart: always
volumes:
workflow_data:
pgdata:
nats_data:
networks:
app-network:

230
docker-compose.server.yml Normal file
View File

@ -0,0 +1,230 @@
services:
postgres-db:
image: timescale/timescaledb:2.15.2-pg16
container_name: fundamental-postgres
command: -c shared_preload_libraries=timescaledb
environment:
POSTGRES_USER: postgres
POSTGRES_PASSWORD: postgres
POSTGRES_DB: fundamental
volumes:
- pgdata:/var/lib/postgresql/data
healthcheck:
test: ["CMD-SHELL", "pg_isready -U postgres -d fundamental"]
interval: 5s
timeout: 5s
retries: 10
networks:
- app-network
restart: always
nats:
image: nats:2.9
container_name: fundamental-nats
volumes:
- nats_data:/data
networks:
- app-network
restart: always
data-persistence-service:
image: harbor.3prism.ai/fundamental_analysis/data-persistence-service:latest
container_name: data-persistence-service
environment:
HOST: 0.0.0.0
PORT: 3000
DATABASE_URL: postgresql://postgres:postgres@postgres-db:5432/fundamental
RUST_LOG: info
RUST_BACKTRACE: "1"
SKIP_MIGRATIONS_ON_MISMATCH: "1"
depends_on:
postgres-db:
condition: service_healthy
healthcheck:
test: ["CMD-SHELL", "curl -fsS http://localhost:3000/health >/dev/null || exit 1"]
interval: 10s
timeout: 5s
retries: 5
networks:
- app-network
restart: always
api-gateway:
image: harbor.3prism.ai/fundamental_analysis/api-gateway:latest
container_name: api-gateway
environment:
SERVER_PORT: 4000
NATS_ADDR: nats://nats:4222
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000
REPORT_GENERATOR_SERVICE_URL: http://report-generator-service:8004
RUST_LOG: info,axum=info
RUST_BACKTRACE: "1"
depends_on:
nats:
condition: service_started
data-persistence-service:
condition: service_healthy
networks:
- app-network
healthcheck:
test: ["CMD-SHELL", "curl -fsS http://localhost:4000/health >/dev/null || exit 1"]
interval: 10s
timeout: 5s
retries: 5
restart: always
alphavantage-provider-service:
image: harbor.3prism.ai/fundamental_analysis/alphavantage-provider-service:latest
container_name: alphavantage-provider-service
volumes:
- workflow_data:/mnt/workflow_data
environment:
SERVER_PORT: 8000
NATS_ADDR: nats://nats:4222
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000
API_GATEWAY_URL: http://api-gateway:4000
WORKFLOW_DATA_PATH: /mnt/workflow_data
SERVICE_HOST: alphavantage-provider-service
RUST_LOG: info
RUST_BACKTRACE: "1"
depends_on:
- nats
- data-persistence-service
networks:
- app-network
restart: always
tushare-provider-service:
image: harbor.3prism.ai/fundamental_analysis/tushare-provider-service:latest
container_name: tushare-provider-service
volumes:
- workflow_data:/mnt/workflow_data
environment:
SERVER_PORT: 8001
NATS_ADDR: nats://nats:4222
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000
TUSHARE_API_URL: http://api.waditu.com
API_GATEWAY_URL: http://api-gateway:4000
WORKFLOW_DATA_PATH: /mnt/workflow_data
SERVICE_HOST: tushare-provider-service
RUST_LOG: info
RUST_BACKTRACE: "1"
depends_on:
- nats
- data-persistence-service
networks:
- app-network
restart: always
finnhub-provider-service:
image: harbor.3prism.ai/fundamental_analysis/finnhub-provider-service:latest
container_name: finnhub-provider-service
volumes:
- workflow_data:/mnt/workflow_data
environment:
SERVER_PORT: 8002
NATS_ADDR: nats://nats:4222
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000
FINNHUB_API_URL: https://finnhub.io/api/v1
API_GATEWAY_URL: http://api-gateway:4000
WORKFLOW_DATA_PATH: /mnt/workflow_data
SERVICE_HOST: finnhub-provider-service
RUST_LOG: info
RUST_BACKTRACE: "1"
depends_on:
- nats
- data-persistence-service
networks:
- app-network
restart: always
yfinance-provider-service:
image: harbor.3prism.ai/fundamental_analysis/yfinance-provider-service:latest
container_name: yfinance-provider-service
volumes:
- workflow_data:/mnt/workflow_data
environment:
SERVER_PORT: 8003
NATS_ADDR: nats://nats:4222
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000
API_GATEWAY_URL: http://api-gateway:4000
WORKFLOW_DATA_PATH: /mnt/workflow_data
SERVICE_HOST: yfinance-provider-service
RUST_LOG: info
RUST_BACKTRACE: "1"
depends_on:
- nats
- data-persistence-service
networks:
- app-network
dns:
- 8.8.8.8
- 8.8.4.4
restart: always
report-generator-service:
image: harbor.3prism.ai/fundamental_analysis/report-generator-service:latest
container_name: report-generator-service
volumes:
- workflow_data:/mnt/workflow_data
environment:
SERVER_PORT: 8004
NATS_ADDR: nats://nats:4222
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000
GOTENBERG_URL: http://gotenberg:3000
WORKFLOW_DATA_PATH: /mnt/workflow_data
RUST_LOG: info
RUST_BACKTRACE: "1"
depends_on:
- nats
- data-persistence-service
- gotenberg
networks:
- app-network
restart: always
workflow-orchestrator-service:
image: harbor.3prism.ai/fundamental_analysis/workflow-orchestrator-service:latest
container_name: workflow-orchestrator-service
volumes:
- workflow_data:/mnt/workflow_data
environment:
SERVER_PORT: 8005
NATS_ADDR: nats://nats:4222
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000
WORKFLOW_DATA_PATH: /mnt/workflow_data
RUST_LOG: info
RUST_BACKTRACE: "1"
depends_on:
- nats
- data-persistence-service
networks:
- app-network
restart: always
gotenberg:
image: gotenberg/gotenberg:8
container_name: gotenberg
networks:
- app-network
restart: always
frontend:
image: harbor.3prism.ai/fundamental_analysis/frontend:latest
container_name: fundamental-frontend
ports:
- "28080:80" # Map host 28080 to container 80 (Nginx)
depends_on:
api-gateway:
condition: service_healthy
networks:
- app-network
restart: always
volumes:
workflow_data:
pgdata:
nats_data:
networks:
app-network:

View File

@ -98,7 +98,7 @@ services:
environment:
SERVER_PORT: 4000
NATS_ADDR: nats://nats:4222
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000/api/v1
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000
REPORT_GENERATOR_SERVICE_URL: http://report-generator-service:8004
RUST_LOG: info,axum=info
RUST_BACKTRACE: "1"
@ -109,6 +109,8 @@ services:
condition: service_healthy
alphavantage-provider-service:
condition: service_started
mock-provider-service:
condition: service_started
tushare-provider-service:
condition: service_started
finnhub-provider-service:
@ -129,6 +131,38 @@ services:
- cargo-target:/app/target
- cargo-cache:/usr/local/cargo
mock-provider-service:
build:
context: .
dockerfile: docker/Dockerfile.dev
container_name: mock-provider-service
working_dir: /app/services/mock-provider-service
command: ["cargo", "watch", "-x", "run"]
volumes:
- workflow_data:/mnt/workflow_data
- ./:/app
- cargo-target:/app/target
- cargo-cache:/usr/local/cargo
environment:
SERVER_PORT: 8006
NATS_ADDR: nats://nats:4222
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000
API_GATEWAY_URL: http://api-gateway:4000
WORKFLOW_DATA_PATH: /mnt/workflow_data
SERVICE_HOST: mock-provider-service
RUST_LOG: info,axum=info
RUST_BACKTRACE: "1"
depends_on:
- nats
- data-persistence-service
networks:
- app-network
healthcheck:
test: ["CMD-SHELL", "curl -fsS http://localhost:8006/health >/dev/null || exit 1"]
interval: 5s
timeout: 5s
retries: 12
alphavantage-provider-service:
build:
context: .
@ -144,7 +178,7 @@ services:
environment:
SERVER_PORT: 8000
NATS_ADDR: nats://nats:4222
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000/api/v1
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000
API_GATEWAY_URL: http://api-gateway:4000
WORKFLOW_DATA_PATH: /mnt/workflow_data
SERVICE_HOST: alphavantage-provider-service
@ -176,7 +210,7 @@ services:
environment:
SERVER_PORT: 8001
NATS_ADDR: nats://nats:4222
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000/api/v1
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000
TUSHARE_API_URL: http://api.waditu.com
API_GATEWAY_URL: http://api-gateway:4000
WORKFLOW_DATA_PATH: /mnt/workflow_data
@ -209,7 +243,7 @@ services:
environment:
SERVER_PORT: 8002
NATS_ADDR: nats://nats:4222
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000/api/v1
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000
FINNHUB_API_URL: https://finnhub.io/api/v1
API_GATEWAY_URL: http://api-gateway:4000
WORKFLOW_DATA_PATH: /mnt/workflow_data
@ -242,7 +276,7 @@ services:
environment:
SERVER_PORT: 8003
NATS_ADDR: nats://nats:4222
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000/api/v1
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000
API_GATEWAY_URL: http://api-gateway:4000
WORKFLOW_DATA_PATH: /mnt/workflow_data
SERVICE_HOST: yfinance-provider-service
@ -277,13 +311,15 @@ services:
environment:
SERVER_PORT: 8004
NATS_ADDR: nats://nats:4222
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000/api/v1
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000
GOTENBERG_URL: http://gotenberg:3000
WORKFLOW_DATA_PATH: /mnt/workflow_data
RUST_LOG: info,axum=info
RUST_BACKTRACE: "1"
depends_on:
- nats
- data-persistence-service
- gotenberg
networks:
- app-network
healthcheck:
@ -292,6 +328,14 @@ services:
timeout: 5s
retries: 12
gotenberg:
image: gotenberg/gotenberg:8
container_name: gotenberg
ports:
- "3000:3000"
networks:
- app-network
workflow-orchestrator-service:
build:
context: .
@ -307,7 +351,7 @@ services:
environment:
SERVER_PORT: 8005
NATS_ADDR: nats://nats:4222
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000/api/v1
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000
WORKFLOW_DATA_PATH: /mnt/workflow_data
RUST_LOG: info
RUST_BACKTRACE: "1"

View File

@ -0,0 +1,67 @@
# 1. Build Stage
FROM rust:1.90-bookworm as builder
ARG SERVICE_NAME
WORKDIR /usr/src/app
# Copy the entire workspace
COPY . .
# Build the specific service in release mode
ENV SQLX_OFFLINE=true
RUN cargo build --release --bin ${SERVICE_NAME}
# Prepare runtime assets directory
RUN mkdir -p /app/assets
# Conditionally copy potential asset folders if they exist for the service
# We use a shell loop or explicit checks. Docker COPY doesn't support conditionals well.
# So we do it in the builder stage using shell.
# 1. Migrations (e.g., data-persistence-service)
RUN if [ -d "services/${SERVICE_NAME}/migrations" ]; then \
mkdir -p /app/assets/migrations && \
cp -r services/${SERVICE_NAME}/migrations/* /app/assets/migrations/; \
fi
# 2. Templates (e.g., report-generator-service)
RUN if [ -d "services/${SERVICE_NAME}/templates" ]; then \
mkdir -p /app/assets/templates && \
cp -r services/${SERVICE_NAME}/templates/* /app/assets/templates/; \
fi
# 2.1 Cookies (e.g., report-generator-service)
RUN if [ -f "services/${SERVICE_NAME}/cookies.txt" ]; then \
cp services/${SERVICE_NAME}/cookies.txt /app/assets/cookies.txt; \
fi
# 3. Config folder (root level, needed by some services like data-persistence)
# We copy it to a specific location.
RUN cp -r config /app/config
# 4. Service Kit Mirror (needed by data-persistence-service build usually, but maybe runtime?)
# It was needed for build. Runtime usually doesn't need it unless it compiles code at runtime.
# 2. Runtime Stage
FROM debian:bookworm-slim
ARG SERVICE_NAME
ENV TZ=Asia/Shanghai
# Install dependencies
RUN apt-get update && apt-get install -y --no-install-recommends \
ca-certificates \
libssl3 \
curl \
&& rm -rf /var/lib/apt/lists/*
WORKDIR /app
# Copy binary
COPY --from=builder /usr/src/app/target/release/${SERVICE_NAME} /usr/local/bin/app
# Copy prepared assets
COPY --from=builder /app/assets /app/
COPY --from=builder /app/config /app/config
# Set the binary as the entrypoint
ENTRYPOINT ["/usr/local/bin/app"]

13
docker/Dockerfile.builder Normal file
View File

@ -0,0 +1,13 @@
FROM rust:1.90-bookworm
WORKDIR /usr/src/app
# Copy the entire workspace
COPY . .
# Set SQLX offline mode to avoid needing a running DB during build
ENV SQLX_OFFLINE=true
# Build the entire workspace in release mode
# This compiles all crates in the workspace at once
RUN cargo build --release --workspace

25
docker/Dockerfile.dist Normal file
View File

@ -0,0 +1,25 @@
FROM debian:bookworm-slim
ENV TZ=Asia/Shanghai
# Install minimal runtime dependencies
RUN apt-get update && apt-get install -y --no-install-recommends \
ca-certificates \
libssl3 \
curl \
&& rm -rf /var/lib/apt/lists/*
WORKDIR /app
# The build context is expected to be prepared by the deployment script
# It should contain:
# - app (the binary)
# - config/ (if needed)
# - assets/ (if needed)
COPY . .
# Ensure the binary is executable
RUN chmod +x /app/app
ENTRYPOINT ["/app/app"]

View File

@ -0,0 +1,24 @@
# 1. Build Stage
FROM node:20-slim AS builder
WORKDIR /app
# Environment variables for build time
# ENV NODE_ENV=production <- REMOVED: This causes npm ci to skip devDependencies (tsc, vite)
# These must match the Nginx proxy paths
ENV VITE_API_TARGET=/api
ENV NEXT_PUBLIC_BACKEND_URL=/api/v1
COPY frontend/package.json frontend/package-lock.json ./
RUN npm ci
COPY frontend/ .
RUN npm run build
# 2. Runtime Stage
FROM nginx:alpine
COPY --from=builder /app/dist /usr/share/nginx/html
COPY docker/nginx.prod.conf /etc/nginx/conf.d/default.conf
EXPOSE 80
CMD ["nginx", "-g", "daemon off;"]

36
docker/nginx.prod.conf Normal file
View File

@ -0,0 +1,36 @@
server {
listen 80;
server_name localhost;
root /usr/share/nginx/html;
index index.html;
# Compression
gzip on;
gzip_types text/plain text/css application/json application/javascript text/xml application/xml application/xml+rss text/javascript;
location / {
try_files $uri $uri/ /index.html;
}
# Proxy API requests to the backend
# Matches /api/v1/..., /api/context/..., etc.
location /api/ {
proxy_pass http://api-gateway:4000/api/;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
# Proxy specific endpoints that are at root level in api-gateway
location /health {
proxy_pass http://api-gateway:4000/health;
proxy_set_header Host $host;
}
location /tasks/ {
proxy_pass http://api-gateway:4000/tasks/;
proxy_set_header Host $host;
}
}

View File

@ -0,0 +1,142 @@
# 高可靠任务调度协议设计 (High Reliability Task Scheduling Protocol)
**日期**: 2025-11-29
**状态**: Implemented (Testing Pending)
**优先级**: High
**背景**: 当前 Workflow 采用 "Fire-and-Forget" 模式,导致当 Provider 处于降级、网络分区或宕机状态时Orchestrator 无法感知,造成任务“假死”且无法自动恢复。
## 1. 核心设计理念
将 Orchestrator 从简单的“发令员”升级为“任务监理”,对任务进行全生命周期管理。引入 **握手 (Handshake)**、**心跳 (Heartbeat)** 和 **熔断 (Circuit Breaking)** 机制。
## 2. 协议流程图
### 2.1 正常调度流程 (Happy Path)
```mermaid
sequenceDiagram
participant O as Orchestrator
participant P as Provider (Worker)
O->>P: Dispatch Task (Request)
activate O
P->>O: Acknowledgement (Accepted)
deactivate O
Note over O: Task Status: Running
loop Execution
P->>P: Processing...
P->>O: Heartbeat (Progress Update)
Note over O: Reset Watchdog Timer
end
P->>O: TaskCompleted (Result)
Note over O: Task Status: Completed
```
### 2.2 拒绝调度流程 (Provider Degraded)
Provider 在降级模式下不再通过 Sleep 阻塞,而是保持连接并快速拒绝任务。
```mermaid
sequenceDiagram
participant O as Orchestrator
participant P as Provider (Degraded)
O->>P: Dispatch Task (Request)
activate O
P->>O: Acknowledgement (Rejected: "API Key Missing")
deactivate O
Note over O: Task Status: Failed/Skipped
O->>O: Trigger Failure Handling
```
### 2.3 调度超时 (Dispatch Timeout)
Provider 宕机或网络断连。
```mermaid
sequenceDiagram
participant O as Orchestrator
participant P as Provider (Dead)
O->>P: Dispatch Task (Request)
activate O
Note right of O: Wait 5s...
O->>O: Timeout Error
deactivate O
Note over O: Task Status: Failed (Dispatch Error)
```
### 2.4 执行超时/心跳丢失 (Execution Watchdog)
任务开始执行后Worker 意外死亡。
```mermaid
sequenceDiagram
participant O as Orchestrator (Monitor Loop)
participant P as Provider
P->>O: Heartbeat (T=0)
P->>P: Crash! 💥
loop Every 1s
O->>O: Check Active Tasks
end
Note over O: Now > LastHeartbeat + 30s
O->>O: Mark Task Failed (Zombie)
```
## 3. 实施任务清单 (Implementation Tasks)
### Phase 1: 协议定义 (Common Contracts)
- [x] **Define `TaskAcknowledgement` DTO**:
- `Accepted`
- `Rejected { reason: String }`
- [x] **Define `TaskHeartbeat`**:
- 复用 `WorkflowEvent::TaskStateChanged`,当 `status=Running` 时视为心跳。
### Phase 2: 调度器改造 (Orchestrator)
- [x] **Refactor `dispatch_task`**:
- 从 `nats.publish` 改为 `nats.request`
- 设置 strict timeout (e.g., 5s)。
- 处理 `Rejected` 响应,立即置为 Failed。
- [x] **Implement `TaskMonitor`**:
- 新增后台 Tokio Task。
- 维护 `RunningTasks` 列表,包含 `last_heartbeat_at``started_at`
- 逻辑:
- `if (now - started_at > max_timeout) -> Fail (Timeout)`
- `if (now - last_heartbeat_at > heartbeat_timeout) -> Fail (Zombie)`
### Phase 3: 提供者改造 (Provider Services)
`tushare-provider` 为首个改造对象,其他 Provider 后续跟进。
- [x] **Remove Blocking Sleep**:
- 即使在 `Degraded` 状态,也要连接 NATS。
- [x] **Implement Ack Logic**:
- 收到 Command 后,先检查自身状态。
- 如果 `Degraded`,回复 `Rejected`
- 如果正常,回复 `Accepted` 并开始异步执行。
- [x] **Implement Heartbeat**:
- 在长耗时操作(如 Fetch定期发送进度/心跳事件。
- 已在 `WorkflowNodeRunner` 层面统一实现。
### Phase 4: 验证与测试 (Validation)
- [ ] **Unit Tests (Contracts/Orchestrator)**:
- 验证 Ack 序列化/反序列化。
- 验证 TaskMonitor 的超时逻辑Mock 时间/Task
- [ ] **Component Test (Mock Provider)**:
- 创建一个 Mock Provider模拟
- 正常 ACK + 完成。
- 正常 ACK + 半路 Crash测试心跳丢失
- 拒绝 ACK (Degraded)。
- 根本不 ACK (Timeout)。
- 验证 Orchestrator 在上述情况下的状态流转是否正确。
## 4. 预期效果
1. **快速失败**: 系统不再因为配置错误(如缺 Key而 hang 住 5 分钟,而是毫秒级报错。
2. **自我修复**: 遇到网络波动导致的丢包Orchestrator 能感知并(未来)触发重试。
3. **可观测性**: 能够清晰区分 "调度失败"、"拒绝执行" 和 "执行超时" 三种错误模式。

View File

@ -0,0 +1,170 @@
# Tushare 报表呈现优化任务 (Report Presentation Refactor)
## 1. 背景与问题
目前的 Tushare 数据以 `metric_name` + `period_date` 的长表形式存储和展示。
问题:
- **可读性差**:无法直观对比同一时间点下的不同指标,也无法直观对比同一指标的历史趋势。
- **缺乏语义**:原始字段如 `c_pay_acq_const_fiolta` 对普通用户极其晦涩。
- **精度冗余**`1420023169.52` 这样的数字难以快速阅读。
## 2. 设计目标 (Design Goal)
生成一份结构化、人类可读的 Markdown 报告,必须**包含所有原始数据**,无损且清晰。
### 2.1 核心转换逻辑 (Pivot)
- **全量保留**:所有原始数据中的 `metric_name` 都必须展示,不可遗漏。
- **行 (Rows)**: 财务指标 (Metrics),需映射为中文名称,并进行逻辑分组(如利润表、资产负债表)。未匹配到字典的指标显示原始 Key。
- **列 (Columns)**: 报告期 (Period Date),按时间倒序排列。
- **值 (Values)**: 格式化后的数值 (e.g., 亿元、万元、百分比)。
### 2.2 展示结构 (Sectioning)
采用**“按年份分组”+“指标分组”**的嵌套结构,利用 Markdown 标题层级进行索引。
- **Data Provider 仅负责数据格式化,不涉及 LLM 摘要生成。**
- **纯数据展示**:不生成任何自然语言评论或摘要。
**结构示例:**
```markdown
# 600521.SS 财务数据明细
## 2025年度
### 关键指标
| 指标 | 2025-09-30 | ... |
| :--- | :--- | :--- |
| 总市值 | 14.20 亿 | ... |
### 资产负债表
...
## 2024年度
### 关键指标
| 指标 | 2024-12-31 | 2024-09-30 | 2024-06-30 | 2024-03-31 |
| :--- | :--- | :--- | :--- | :--- |
| ... | ... | ... | ... | ... |
### 资产负债表
...
```
*分段策略:按**年份**Year进行一级切分。每年包含该年所有已披露的报告期。这种方式索引最清晰且单表列数固定通常最多4列完美适配阅读。*
## 3. 实施步骤 (Implementation Plan)
### Step 1: 数据字典完善 (Data Dictionary)
- 完善 `docs/5_data_dictionary/` 下的定义。
- 建立 `metric_name` -> `Display Name (CN)` 的全量映射表。
### Step 2: 数据转换逻辑 (Transformation)
- **分组算法**:
- 解析 `period_date` 提取年份。
- 按年份将数据归类到不同的 `YearBlock`
- 年份倒序排列2025 -> 2024 -> ...)。
- 年份内部按日期倒序排列12-31 -> 09-30 ...)。
- **行分类算法**: 将 Metrics 分为 `Snapshot`, `Income`, `Balance`, `CashFlow`, `Ratios`, `Misc`
- **格式化**:
- 金额: 除以 10^8 保留 2 位小数 (e.g., "14.20 亿")。
- 比例: 乘 100 保留 2 位小数 (e.g., "15.30%").
- 人数/户数: 整数或“万”单位。
### Step 3: Markdown 渲染 (Rendering)
- 实现分层渲染:
1. Level 2 Title: `## 2024年度`
2. Level 3 Title: `### 资产负债表`
3. Table: 渲染该年份内的该类指标。
## 4. 详细设计与映射 (Detailed Design)
### 4.1 字段映射表 (Field Mapping Dictionary)
**A. 摘要与市场 (Snapshot & Market)**
| Metric Key | 中文显示 | 单位策略 |
| :--- | :--- | :--- |
| `total_mv` | 总市值 | 亿 (保留2位) |
| `employees` | 员工人数 | 整数 |
| `holder_num` | 股东户数 | 万 (保留2位) |
| `close` | 收盘价 | 原始值 |
| `pe` | 市盈率 | 原始值 |
| `pb` | 市净率 | 原始值 |
**B. 利润表 (Income Statement)**
| Metric Key | 中文显示 | 单位策略 |
| :--- | :--- | :--- |
| `revenue` | 营业收入 | 亿 |
| `n_income` | 净利润 | 亿 |
| `rd_exp` | 研发费用 | 亿 |
| `sell_exp` | 销售费用 | 亿 |
| `admin_exp` | 管理费用 | 亿 |
| `fin_exp` | 财务费用 | 亿 |
| `total_cogs` | 营业成本 | 亿 |
| `tax_to_ebt` | 实际税率 | % |
| `__tax_rate` | 所得税率(计算) | % |
**C. 资产负债表 (Balance Sheet)**
| Metric Key | 中文显示 | 单位策略 |
| :--- | :--- | :--- |
| `total_assets` | 总资产 | 亿 |
| `fix_assets` | 固定资产 | 亿 |
| `inventories` | 存货 | 亿 |
| `accounts_receiv`| 应收账款 | 亿 |
| `accounts_pay` | 应付账款 | 亿 |
| `prepayment` | 预付款项 | 亿 |
| `adv_receipts` | 预收款项 | 亿 |
| `contract_liab` | 合同负债 | 亿 |
| `money_cap` | 货币资金 | 亿 |
| `lt_eqt_invest` | 长期股权投资 | 亿 |
| `goodwill` | 商誉 | 亿 |
| `st_borr` | 短期借款 | 亿 |
| `lt_borr` | 长期借款 | 亿 |
**D. 现金流量表 (Cash Flow)**
| Metric Key | 中文显示 | 单位策略 |
| :--- | :--- | :--- |
| `n_cashflow_act` | 经营净现金流 | 亿 |
| `c_paid_to_for_empl` | 支付职工现金 | 亿 |
| `c_pay_acq_const_fiolta` | 购建资产支付 | 亿 |
| `dividend_amount`| 分红总额 | 亿 |
**E. 运营与比率 (Ratios)**
| Metric Key | 中文显示 | 单位策略 |
| :--- | :--- | :--- |
| `arturn_days` | 应收周转天数 | 天 |
| `invturn_days` | 存货周转天数 | 天 |
| `__gross_margin` | 毛利率 | % |
| `__net_margin` | 净利率 | % |
| `__money_cap_ratio` | 现金占比 | % |
| `__fix_assets_ratio` | 固定资产占比 | % |
| `__lt_invest_ratio` | 长投占比 | % |
| `__goodwill_ratio` | 商誉占比 | % |
| `__ar_ratio` | 应收占比 | % |
| `__ap_ratio` | 应付占比 | % |
| `__st_borr_ratio` | 短贷占比 | % |
| `__lt_borr_ratio` | 长贷占比 | % |
| `__rd_rate` | 研发费率 | % |
| `__sell_rate` | 销售费率 | % |
| `__admin_rate` | 管理费率 | % |
**F. 其他 (Misc)**
*未匹配上述任何字段的 Key将直接显示原始 Key并保留原始数值精度。*
### 4.2 Markdown 结构全貌 (Structure Preview)
```markdown
# Tushare 财务数据明细
## 2025年度
### 关键指标
| 指标 | 2025-09-30 | ... |
| :--- | :--- | :--- |
| 总市值 | 14.20 亿 | ... |
### 利润表
| 指标 | 2025-09-30 | ... |
| :--- | :--- | ... |
| 营业收入 | 64.09 亿 | ... |
... (资产负债、现金流) ...
## 2024年度
### 关键指标
...
```

View File

@ -0,0 +1,75 @@
# 任务Realtime Logs 数据流优化 (缓冲与回放) [已完成]
## 目标
解决前端 `Realtime Logs` 面板在页面刷新、重新连接或初始加载延迟时丢失日志的问题。
目标是确保 "First-hand" 服务端日志能够可靠地流向前端,不依赖 NATS 的临时性。
## 实施方案
### 1. 后端:增强 `SyncStateCommand` (Orchestrator)
我们需要修改 `handle_sync_state` 逻辑,使其在发送状态快照的同时,**也能读取当前的临时日志文件**,并将历史日志作为事件发送给前端。
* **修改 `workflow.rs` -> `handle_sync_state`**:
* 调用 `log_manager.read_current_logs(req_id)` (需要新增此方法,非破坏性读取)。
* 读取到的日志内容可能是巨大的字符串。为了不阻塞 NATS 消息,可以分块发送,或者作为 `WorkflowStateSnapshot` 的一部分发送(如果大小允许)。
* **方案选择**: 发送一个新的事件类型 `WorkflowLogHistory` 或者复用 `TaskLog`(批量发送)。
* 鉴于前端 `handleEvent` 处理 `TaskLog` 是追加式的,我们可以循环发送 `TaskLog` 事件。
* **更优方案**: 在 `WorkflowStateSnapshot` 结构体中增加 `logs: Vec<String>` 字段。这样前端在恢复快照时一次性填入。
### 2. 定义数据结构变更
* **`common-contracts/src/messages.rs`**:
* 修改 `WorkflowStateSnapshot`,增加 `logs: Vec<String>`
### 3. 完善 `LogBufferManager`
* **`logging.rs`**:
* 新增 `read_current_logs(&self, request_id: &str) -> Result<Vec<String>>`
* 读取文件,按行分割,返回 `Vec<String>`
### 4. 前端适配
* **`useWorkflowStore.ts`**:
* 在 `handleEvent` -> `WorkflowStateSnapshot` 分支中,处理 `event.payload.logs`
* 将这些日志合并到 `state.logs` (Global Logs) 或者解析后分发到 `state.tasks` (如果日志格式包含 Task ID)。
* 目前日志格式为 `[ISO Time] [Level] Message`,不一定包含 Task ID所以主要作为 Global Logs 展示。
### 5. 流程梳理
1. **前端启动/刷新**:
* 调用 `SSE /events/{id}`
* API Gateway 收到连接,订阅 NATS并发送 `SyncStateCommand` 给 Orchestrator。
2. **Orchestrator**:
* 收到 `SyncStateCommand`
* 生成 DAG 快照。
* **读取 `temp_logs/{id}.log`**。
* 构建 `WorkflowStateSnapshot` (包含 logs)。
* 发布到 NATS。
3. **前端接收**:
* 收到 Snapshot。
* 恢复 DAG 状态。
* 恢复 Logs 面板内容。
4. **后续实时日志**:
* Orchestrator 继续运行Tracing Layer 写入文件。
* **关键点**: 我们之前删除了 `publish_log`。现在需要恢复**实时推送**能力,但不是手动调用。
* **方案**: `FileRequestLogLayer` 除了写文件,还应该有一个机制将日志推送到 NATS 吗?
* **回答**: 是的。之前的重构把推送删了,导致前端**收不到实时更新**了。
* **修正**: `FileRequestLogLayer` 应该同时负责:
1. 写文件 (持久化缓冲)。
2. 推送到 NATS (实时展示)。
* **技术难点**: Layer 是同步的NATS 是异步的。
* **解决**: 使用 `tokio::sync::broadcast``mpsc` 通道。Layer 将日志发送到通道,有一个后台 Task 负责接收通道消息并推送到 NATS。
## 修正后的后端任务列表
1. **恢复实时推送通道**:
* 在 `AppState` 中增加一个 `log_broadcast_tx` (sender)。
* `FileRequestLogLayer` 持有这个 sender。
* 在 `main.rs` 启动一个后台任务,监听 receiver将日志封装为 `WorkflowEvent::TaskLog` 并推送到 NATS。
2. **实现历史回放 (Snapshot)**:
* 修改 `WorkflowStateSnapshot` 增加 `logs` 字段。
* `LogBufferManager` 增加读取方法。
* `handle_sync_state` 填充 logs。
## 前端任务列表
1. 更新 `WorkflowStateSnapshot` 类型定义。
2. 在 Store 中处理 Snapshot 携带的日志。
这个方案兼顾了实时性和可靠性(断线重连)。

View File

@ -0,0 +1,58 @@
# 任务:重构工作流日志 (基于文件缓冲的持久化方案) [已完成]
## 背景 (Context)
目前 `workflow-orchestrator-service` 通过 `publish_log` 手动向 NATS 发送日志这种方式既不规范也导致了“双重日志”问题Rust 标准日志 vs 前端 NATS 日志),且无法持久化保存。
我们需要一种方案,能自动捕获 Rust 标准日志(`tracing`将其暂存并在工作流结束时归档到全局上下文VGCS中。考虑到日志可能非常长为了避免内存溢出OOM和保证数据安全我们将采用**文件系统**作为临时缓冲区,而不是内存。
## 目标 (Objectives)
1. **清理 (Cleanup)**: 删除 `workflow-orchestrator-service` 中所有手动的 `publish_log``WorkflowEvent::TaskLog` 发送逻辑。
2. **捕获 (Capture)**: 实现一个自定义的 `tracing` Layer能够识别当前的 `request_id`,并将日志实时追加写入到磁盘上的临时文件中。
3. **持久化 (Persistence)**: 当工作流结束(无论成功或失败)时,读取对应的临时日志文件,将其存入 VGCS 仓库(`workflow.log`),然后清理临时文件。
## 实施方案 (Implementation Plan)
### 1. 日志管理器 (`LogBufferManager`)
创建一个新的结构体 `LogBufferManager`,用于管理临时日志文件。
* **目录**: 在服务根目录下创建 `temp_logs/` 目录。
* **路径策略**: 每个请求对应一个文件,例如 `temp_logs/{request_id}.log`
* **功能**:
* `append(request_id, message)`: 以追加模式打开(或创建)文件,写入日志行。为了性能,可以考虑持有活跃文件的句柄缓存(`DashMap<String, File>`或者简单地每次打开OS层面对追加写的缓存通常已经很好。考虑到并发量不大**每次 Open 追加**是最稳妥且无状态的,不容易出 Bug。
* `finalize(request_id)`: 读取完整文件内容,返回 `String` 或 `Vec<u8>`,然后**删除**该文件。
### 2. 自定义 Tracing Layer (`FileRequestLogLayer`)
`workflow-orchestrator-service` 中新建 `logging` 模块。
* 实现 `tracing_subscriber::Layer`
* **逻辑 (`on_event`)**:
1. 从 Span 的 Extensions 中尝试获取 `request_id` (或者从 event 的字段中获取)。
2. 如果找到了 `request_id`
* 格式化日志信息 (e.g., `[2025-11-30T10:00:00Z INFO] Message...`).
* 调用 `LogBufferManager::append(request_id, line)`.
### 3. 更新 `AppState``main.rs`
* 在 `AppState` 中加入 `pub log_manager: Arc<LogBufferManager>`
* 在 `main.rs` 中初始化 `LogBufferManager` (确保 `temp_logs` 目录存在)。
* 配置 `tracing-subscriber`,将 `FileRequestLogLayer` 注册进去。注意Layer 需要能访问到 `LogBufferManager`(可以通过全局静态变量或者在 Layer 构造时传入 Arc
### 4. 重构 `WorkflowEngine` (`workflow.rs`)
* **移除旧代码**: 删除所有 `publish_log` 方法及其调用。
* **上下文传递**: 确保所有处理逻辑都在带有 `request_id` 的 Span 下运行。
* 例如: `let _span = tracing::info_span!("workflow", request_id = %req_id).entered();`
* 或者使用 `#[tracing::instrument(fields(request_id = %cmd.request_id))]`
* **归档逻辑**:
* 在 `try_finish_workflow` (或处理完成/失败的地方)
* 调用 `self.state.log_manager.finalize(req_id)` 获取完整日志。
* 使用 `self.state.vgcs` 将日志内容写入 `workflow.log` (或 `_execution.log`)。
* 提交到 VGCS。
## 预期结果
* **稳定性**: 即使日志有几百 MB也不会占用服务内存只会占用磁盘空间。
* **隔离性**: 不同 Request 的日志写入不同的文件,互不干扰,支持高并发。
* **可观测性**: 未来通过 History API 可以查看到完整的、包含系统级信息的执行日志。
## 约束与风险
* **磁盘空间**: 需定期清理 `temp_logs` 中可能因服务崩溃而残留的僵尸文件(可以在服务启动时清理,或者由 Cron 处理)。目前先在服务启动时打印警告或简单清理。
* **性能**: 频繁的文件打开/关闭Append模式在 SSD 上通常不是问题,但如果是极高频日志(每秒数千条)可能会有开销。鉴于我们的场景是分析任务,频率可控。

View File

@ -19,15 +19,19 @@
"@radix-ui/react-slot": "^1.2.4",
"@radix-ui/react-tabs": "^1.1.13",
"@radix-ui/react-tooltip": "^1.2.8",
"@react-pdf/renderer": "^4.3.1",
"@tanstack/react-query": "^5.90.10",
"@zodios/core": "^10.9.6",
"axios": "^1.13.2",
"buffer": "^6.0.3",
"class-variance-authority": "^0.7.1",
"clsx": "^2.1.1",
"cmdk": "^1.1.1",
"date-fns": "^4.1.0",
"elkjs": "^0.11.0",
"html-to-image": "^1.11.13",
"lucide-react": "^0.554.0",
"marked": "^17.0.1",
"react": "^19.2.0",
"react-dom": "^19.2.0",
"react-markdown": "^10.1.0",
@ -402,6 +406,15 @@
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/runtime": {
"version": "7.28.4",
"resolved": "http://npm.repo.lan/@babel/runtime/-/runtime-7.28.4.tgz",
"integrity": "sha512-Q/N6JNWvIvPnLDvjlE1OUBLPQHH6l3CltCEsHIujp45zQUSSh8K+gHnaEX45yAT1nyngnINhvWtzN+Nb9D8RAQ==",
"license": "MIT",
"engines": {
"node": ">=6.9.0"
}
},
"node_modules/@babel/template": {
"version": "7.27.2",
"resolved": "https://registry.npmjs.org/@babel/template/-/template-7.27.2.tgz",
@ -2265,6 +2278,180 @@
"integrity": "sha512-HPwpGIzkl28mWyZqG52jiqDJ12waP11Pa1lGoiyUkIEuMLBP0oeK/C89esbXrxsky5we7dfd8U58nm0SgAWpVw==",
"license": "MIT"
},
"node_modules/@react-pdf/fns": {
"version": "3.1.2",
"resolved": "http://npm.repo.lan/@react-pdf/fns/-/fns-3.1.2.tgz",
"integrity": "sha512-qTKGUf0iAMGg2+OsUcp9ffKnKi41RukM/zYIWMDJ4hRVYSr89Q7e3wSDW/Koqx3ea3Uy/z3h2y3wPX6Bdfxk6g==",
"license": "MIT"
},
"node_modules/@react-pdf/font": {
"version": "4.0.3",
"resolved": "http://npm.repo.lan/@react-pdf/font/-/font-4.0.3.tgz",
"integrity": "sha512-N1qQDZr6phXYQOp033Hvm2nkUkx2LkszjGPbmRavs9VOYzi4sp31MaccMKptL24ii6UhBh/z9yPUhnuNe/qHwA==",
"license": "MIT",
"dependencies": {
"@react-pdf/pdfkit": "^4.0.4",
"@react-pdf/types": "^2.9.1",
"fontkit": "^2.0.2",
"is-url": "^1.2.4"
}
},
"node_modules/@react-pdf/image": {
"version": "3.0.3",
"resolved": "http://npm.repo.lan/@react-pdf/image/-/image-3.0.3.tgz",
"integrity": "sha512-lvP5ryzYM3wpbO9bvqLZYwEr5XBDX9jcaRICvtnoRqdJOo7PRrMnmB4MMScyb+Xw10mGeIubZAAomNAG5ONQZQ==",
"license": "MIT",
"dependencies": {
"@react-pdf/png-js": "^3.0.0",
"jay-peg": "^1.1.1"
}
},
"node_modules/@react-pdf/layout": {
"version": "4.4.1",
"resolved": "http://npm.repo.lan/@react-pdf/layout/-/layout-4.4.1.tgz",
"integrity": "sha512-GVzdlWoZWldRDzlWj3SttRXmVDxg7YfraAohwy+o9gb9hrbDJaaAV6jV3pc630Evd3K46OAzk8EFu8EgPDuVuA==",
"license": "MIT",
"dependencies": {
"@react-pdf/fns": "3.1.2",
"@react-pdf/image": "^3.0.3",
"@react-pdf/primitives": "^4.1.1",
"@react-pdf/stylesheet": "^6.1.1",
"@react-pdf/textkit": "^6.0.0",
"@react-pdf/types": "^2.9.1",
"emoji-regex-xs": "^1.0.0",
"queue": "^6.0.1",
"yoga-layout": "^3.2.1"
}
},
"node_modules/@react-pdf/pdfkit": {
"version": "4.0.4",
"resolved": "http://npm.repo.lan/@react-pdf/pdfkit/-/pdfkit-4.0.4.tgz",
"integrity": "sha512-/nITLggsPlB66bVLnm0X7MNdKQxXelLGZG6zB5acF5cCgkFwmXHnLNyxYOUD4GMOMg1HOPShXDKWrwk2ZeHsvw==",
"license": "MIT",
"dependencies": {
"@babel/runtime": "^7.20.13",
"@react-pdf/png-js": "^3.0.0",
"browserify-zlib": "^0.2.0",
"crypto-js": "^4.2.0",
"fontkit": "^2.0.2",
"jay-peg": "^1.1.1",
"linebreak": "^1.1.0",
"vite-compatible-readable-stream": "^3.6.1"
}
},
"node_modules/@react-pdf/png-js": {
"version": "3.0.0",
"resolved": "http://npm.repo.lan/@react-pdf/png-js/-/png-js-3.0.0.tgz",
"integrity": "sha512-eSJnEItZ37WPt6Qv5pncQDxLJRK15eaRwPT+gZoujP548CodenOVp49GST8XJvKMFt9YqIBzGBV/j9AgrOQzVA==",
"license": "MIT",
"dependencies": {
"browserify-zlib": "^0.2.0"
}
},
"node_modules/@react-pdf/primitives": {
"version": "4.1.1",
"resolved": "http://npm.repo.lan/@react-pdf/primitives/-/primitives-4.1.1.tgz",
"integrity": "sha512-IuhxYls1luJb7NUWy6q5avb1XrNaVj9bTNI40U9qGRuS6n7Hje/8H8Qi99Z9UKFV74bBP3DOf3L1wV2qZVgVrQ==",
"license": "MIT"
},
"node_modules/@react-pdf/reconciler": {
"version": "1.1.4",
"resolved": "http://npm.repo.lan/@react-pdf/reconciler/-/reconciler-1.1.4.tgz",
"integrity": "sha512-oTQDiR/t4Z/Guxac88IavpU2UgN7eR0RMI9DRKvKnvPz2DUasGjXfChAdMqDNmJJxxV26mMy9xQOUV2UU5/okg==",
"license": "MIT",
"dependencies": {
"object-assign": "^4.1.1",
"scheduler": "0.25.0-rc-603e6108-20241029"
},
"peerDependencies": {
"react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0"
}
},
"node_modules/@react-pdf/reconciler/node_modules/scheduler": {
"version": "0.25.0-rc-603e6108-20241029",
"resolved": "http://npm.repo.lan/scheduler/-/scheduler-0.25.0-rc-603e6108-20241029.tgz",
"integrity": "sha512-pFwF6H1XrSdYYNLfOcGlM28/j8CGLu8IvdrxqhjWULe2bPcKiKW4CV+OWqR/9fT52mywx65l7ysNkjLKBda7eA==",
"license": "MIT"
},
"node_modules/@react-pdf/render": {
"version": "4.3.1",
"resolved": "http://npm.repo.lan/@react-pdf/render/-/render-4.3.1.tgz",
"integrity": "sha512-v1WAaAhQShQZGcBxfjkEThGCHVH9CSuitrZ1bIOLvB5iBKM14abYK5D6djKhWCwF6FTzYeT2WRjRMVgze/ND2A==",
"license": "MIT",
"dependencies": {
"@babel/runtime": "^7.20.13",
"@react-pdf/fns": "3.1.2",
"@react-pdf/primitives": "^4.1.1",
"@react-pdf/textkit": "^6.0.0",
"@react-pdf/types": "^2.9.1",
"abs-svg-path": "^0.1.1",
"color-string": "^1.9.1",
"normalize-svg-path": "^1.1.0",
"parse-svg-path": "^0.1.2",
"svg-arc-to-cubic-bezier": "^3.2.0"
}
},
"node_modules/@react-pdf/renderer": {
"version": "4.3.1",
"resolved": "http://npm.repo.lan/@react-pdf/renderer/-/renderer-4.3.1.tgz",
"integrity": "sha512-dPKHiwGTaOsKqNWCHPYYrx8CDfAGsUnV4tvRsEu0VPGxuot1AOq/M+YgfN/Pb+MeXCTe2/lv6NvA8haUtj3tsA==",
"license": "MIT",
"dependencies": {
"@babel/runtime": "^7.20.13",
"@react-pdf/fns": "3.1.2",
"@react-pdf/font": "^4.0.3",
"@react-pdf/layout": "^4.4.1",
"@react-pdf/pdfkit": "^4.0.4",
"@react-pdf/primitives": "^4.1.1",
"@react-pdf/reconciler": "^1.1.4",
"@react-pdf/render": "^4.3.1",
"@react-pdf/types": "^2.9.1",
"events": "^3.3.0",
"object-assign": "^4.1.1",
"prop-types": "^15.6.2",
"queue": "^6.0.1"
},
"peerDependencies": {
"react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0"
}
},
"node_modules/@react-pdf/stylesheet": {
"version": "6.1.1",
"resolved": "http://npm.repo.lan/@react-pdf/stylesheet/-/stylesheet-6.1.1.tgz",
"integrity": "sha512-Iyw0A3wRIeQLN4EkaKf8yF9MvdMxiZ8JjoyzLzDHSxnKYoOA4UGu84veCb8dT9N8MxY5x7a0BUv/avTe586Plg==",
"license": "MIT",
"dependencies": {
"@react-pdf/fns": "3.1.2",
"@react-pdf/types": "^2.9.1",
"color-string": "^1.9.1",
"hsl-to-hex": "^1.0.0",
"media-engine": "^1.0.3",
"postcss-value-parser": "^4.1.0"
}
},
"node_modules/@react-pdf/textkit": {
"version": "6.0.0",
"resolved": "http://npm.repo.lan/@react-pdf/textkit/-/textkit-6.0.0.tgz",
"integrity": "sha512-fDt19KWaJRK/n2AaFoVm31hgGmpygmTV7LsHGJNGZkgzXcFyLsx+XUl63DTDPH3iqxj3xUX128t104GtOz8tTw==",
"license": "MIT",
"dependencies": {
"@react-pdf/fns": "3.1.2",
"bidi-js": "^1.0.2",
"hyphen": "^1.6.4",
"unicode-properties": "^1.4.1"
}
},
"node_modules/@react-pdf/types": {
"version": "2.9.1",
"resolved": "http://npm.repo.lan/@react-pdf/types/-/types-2.9.1.tgz",
"integrity": "sha512-5GoCgG0G5NMgpPuHbKG2xcVRQt7+E5pg3IyzVIIozKG3nLcnsXW4zy25vG1ZBQA0jmo39q34au/sOnL/0d1A4w==",
"license": "MIT",
"dependencies": {
"@react-pdf/font": "^4.0.3",
"@react-pdf/primitives": "^4.1.1",
"@react-pdf/stylesheet": "^6.1.1"
}
},
"node_modules/@reactflow/background": {
"version": "11.3.14",
"resolved": "https://registry.npmjs.org/@reactflow/background/-/background-11.3.14.tgz",
@ -2850,6 +3037,15 @@
"win32"
]
},
"node_modules/@swc/helpers": {
"version": "0.5.17",
"resolved": "http://npm.repo.lan/@swc/helpers/-/helpers-0.5.17.tgz",
"integrity": "sha512-5IKx/Y13RsYd+sauPb2x+U/xZikHjolzfuDgTAl/Tdf3Q8rslRvC19NKDLgAJQ6wsqADk10ntlv08nPFw/gO/A==",
"license": "Apache-2.0",
"dependencies": {
"tslib": "^2.8.0"
}
},
"node_modules/@tailwindcss/node": {
"version": "4.1.17",
"resolved": "https://registry.npmjs.org/@tailwindcss/node/-/node-4.1.17.tgz",
@ -3932,6 +4128,12 @@
"zod": "^3.x"
}
},
"node_modules/abs-svg-path": {
"version": "0.1.1",
"resolved": "http://npm.repo.lan/abs-svg-path/-/abs-svg-path-0.1.1.tgz",
"integrity": "sha512-d8XPSGjfyzlXC3Xx891DJRyZfqk5JU0BJrDQcsWomFIV1/BIzPW5HDH5iDdWpqWaav0YVIEzT1RHTwWr0FFshA==",
"license": "MIT"
},
"node_modules/acorn": {
"version": "8.15.0",
"resolved": "https://registry.npmjs.org/acorn/-/acorn-8.15.0.tgz",
@ -4079,6 +4281,26 @@
"dev": true,
"license": "MIT"
},
"node_modules/base64-js": {
"version": "1.5.1",
"resolved": "http://npm.repo.lan/base64-js/-/base64-js-1.5.1.tgz",
"integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==",
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/feross"
},
{
"type": "patreon",
"url": "https://www.patreon.com/feross"
},
{
"type": "consulting",
"url": "https://feross.org/support"
}
],
"license": "MIT"
},
"node_modules/baseline-browser-mapping": {
"version": "2.8.30",
"resolved": "https://registry.npmjs.org/baseline-browser-mapping/-/baseline-browser-mapping-2.8.30.tgz",
@ -4089,6 +4311,15 @@
"baseline-browser-mapping": "dist/cli.js"
}
},
"node_modules/bidi-js": {
"version": "1.0.3",
"resolved": "http://npm.repo.lan/bidi-js/-/bidi-js-1.0.3.tgz",
"integrity": "sha512-RKshQI1R3YQ+n9YJz2QQ147P66ELpa1FQEg20Dk8oW9t2KgLbpDLLp9aGZ7y8WHSshDknG0bknqGw5/tyCs5tw==",
"license": "MIT",
"dependencies": {
"require-from-string": "^2.0.2"
}
},
"node_modules/brace-expansion": {
"version": "1.1.12",
"resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz",
@ -4113,6 +4344,24 @@
"node": ">=8"
}
},
"node_modules/brotli": {
"version": "1.3.3",
"resolved": "http://npm.repo.lan/brotli/-/brotli-1.3.3.tgz",
"integrity": "sha512-oTKjJdShmDuGW94SyyaoQvAjf30dZaHnjJ8uAF+u2/vGJkJbJPJAT1gDiOJP5v1Zb6f9KEyW/1HpuaWIXtGHPg==",
"license": "MIT",
"dependencies": {
"base64-js": "^1.1.2"
}
},
"node_modules/browserify-zlib": {
"version": "0.2.0",
"resolved": "http://npm.repo.lan/browserify-zlib/-/browserify-zlib-0.2.0.tgz",
"integrity": "sha512-Z942RysHXmJrhqk88FmKBVq/v5tqmSkDz7p54G/MGyjMnCFFnC79XWNbg+Vta8W6Wb2qtSZTSxIGkJrRpCFEiA==",
"license": "MIT",
"dependencies": {
"pako": "~1.0.5"
}
},
"node_modules/browserslist": {
"version": "4.28.0",
"resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.28.0.tgz",
@ -4147,6 +4396,30 @@
"node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7"
}
},
"node_modules/buffer": {
"version": "6.0.3",
"resolved": "http://npm.repo.lan/buffer/-/buffer-6.0.3.tgz",
"integrity": "sha512-FTiCpNxtwiZZHEZbcbTIcZjERVICn9yq/pDFkTl95/AxzD1naBctN7YO68riM/gLSDY7sdrMby8hofADYuuqOA==",
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/feross"
},
{
"type": "patreon",
"url": "https://www.patreon.com/feross"
},
{
"type": "consulting",
"url": "https://feross.org/support"
}
],
"license": "MIT",
"dependencies": {
"base64-js": "^1.3.1",
"ieee754": "^1.2.1"
}
},
"node_modules/cac": {
"version": "6.7.14",
"resolved": "https://registry.npmjs.org/cac/-/cac-6.7.14.tgz",
@ -4293,6 +4566,15 @@
"integrity": "sha512-JhZUT7JFcQy/EzW605k/ktHtncoo9vnyW/2GspNYwFlN1C/WmjuV/xtS04e9SOkL2sTdw0VAZ2UGCcQ9lR6p6w==",
"license": "MIT"
},
"node_modules/clone": {
"version": "2.1.2",
"resolved": "http://npm.repo.lan/clone/-/clone-2.1.2.tgz",
"integrity": "sha512-3Pe/CF1Nn94hyhIYpjtiLhdCoEoz0DqQ+988E9gmeEdQZlojxnOb74wctFyuwWQHzqyf9X7C7MG8juUpqBJT8w==",
"license": "MIT",
"engines": {
"node": ">=0.8"
}
},
"node_modules/clsx": {
"version": "2.1.1",
"resolved": "https://registry.npmjs.org/clsx/-/clsx-2.1.1.tgz",
@ -4335,9 +4617,18 @@
"version": "1.1.4",
"resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz",
"integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==",
"dev": true,
"license": "MIT"
},
"node_modules/color-string": {
"version": "1.9.1",
"resolved": "http://npm.repo.lan/color-string/-/color-string-1.9.1.tgz",
"integrity": "sha512-shrVawQFojnZv6xM40anx4CkoDP+fZsw/ZerEMsW/pyzsRbElpsL/DBVW7q3ExxwusdNXI3lXpuhEZkzs8p5Eg==",
"license": "MIT",
"dependencies": {
"color-name": "^1.0.0",
"simple-swizzle": "^0.2.2"
}
},
"node_modules/combined-stream": {
"version": "1.0.8",
"resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz",
@ -4398,6 +4689,12 @@
"node": ">= 8"
}
},
"node_modules/crypto-js": {
"version": "4.2.0",
"resolved": "http://npm.repo.lan/crypto-js/-/crypto-js-4.2.0.tgz",
"integrity": "sha512-KALDyEYgpY+Rlob/iriUtjV6d5Eq+Y191A5g4UqLAi8CyGP9N1+FdVbkc1SxKc2r4YAYqG8JzO2KGL+AizD70Q==",
"license": "MIT"
},
"node_modules/cssesc": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/cssesc/-/cssesc-3.0.0.tgz",
@ -4616,6 +4913,12 @@
"url": "https://github.com/sponsors/wooorm"
}
},
"node_modules/dfa": {
"version": "1.2.0",
"resolved": "http://npm.repo.lan/dfa/-/dfa-1.2.0.tgz",
"integrity": "sha512-ED3jP8saaweFTjeGX8HQPjeC1YYyZs98jGNZx6IiBvxW7JG5v492kamAQB3m2wop07CvU/RQmzcKr6bgcC5D/Q==",
"license": "MIT"
},
"node_modules/dunder-proto": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz",
@ -4643,6 +4946,12 @@
"integrity": "sha512-u4J8h9mwEDaYMqo0RYJpqNMFDoMK7f+pu4GjcV+N8jIC7TRdORgzkfSjTJemhqONFfH6fBI3wpysgWbhgVWIXw==",
"license": "EPL-2.0"
},
"node_modules/emoji-regex-xs": {
"version": "1.0.0",
"resolved": "http://npm.repo.lan/emoji-regex-xs/-/emoji-regex-xs-1.0.0.tgz",
"integrity": "sha512-LRlerrMYoIDrT6jgpeZ2YYl/L8EulRTt5hQcYjy5AInh7HWXKimpqx68aknBFpGL2+/IcogTcaydJEgaTmOpDg==",
"license": "MIT"
},
"node_modules/enhanced-resolve": {
"version": "5.18.3",
"resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-5.18.3.tgz",
@ -4968,6 +5277,15 @@
"dev": true,
"license": "MIT"
},
"node_modules/events": {
"version": "3.3.0",
"resolved": "http://npm.repo.lan/events/-/events-3.3.0.tgz",
"integrity": "sha512-mQw+2fkQbALzQ7V0MY0IqdnXNOeTtP4r0lN9z7AAawCXgqea7bDii20AYrIBrFd/Hx0M2Ocz6S111CaFkUcb0Q==",
"license": "MIT",
"engines": {
"node": ">=0.8.x"
}
},
"node_modules/extend": {
"version": "3.0.2",
"resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz",
@ -4978,7 +5296,6 @@
"version": "3.1.3",
"resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz",
"integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==",
"dev": true,
"license": "MIT"
},
"node_modules/fast-glob": {
@ -5136,6 +5453,23 @@
}
}
},
"node_modules/fontkit": {
"version": "2.0.4",
"resolved": "http://npm.repo.lan/fontkit/-/fontkit-2.0.4.tgz",
"integrity": "sha512-syetQadaUEDNdxdugga9CpEYVaQIxOwk7GlwZWWZ19//qW4zE5bknOKeMBDYAASwnpaSHKJITRLMF9m1fp3s6g==",
"license": "MIT",
"dependencies": {
"@swc/helpers": "^0.5.12",
"brotli": "^1.3.2",
"clone": "^2.1.2",
"dfa": "^1.2.0",
"fast-deep-equal": "^3.1.3",
"restructure": "^3.0.0",
"tiny-inflate": "^1.0.3",
"unicode-properties": "^1.4.0",
"unicode-trie": "^2.0.0"
}
},
"node_modules/form-data": {
"version": "4.0.5",
"resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.5.tgz",
@ -5441,6 +5775,27 @@
"hermes-estree": "0.25.1"
}
},
"node_modules/hsl-to-hex": {
"version": "1.0.0",
"resolved": "http://npm.repo.lan/hsl-to-hex/-/hsl-to-hex-1.0.0.tgz",
"integrity": "sha512-K6GVpucS5wFf44X0h2bLVRDsycgJmf9FF2elg+CrqD8GcFU8c6vYhgXn8NjUkFCwj+xDFb70qgLbTUm6sxwPmA==",
"license": "MIT",
"dependencies": {
"hsl-to-rgb-for-reals": "^1.1.0"
}
},
"node_modules/hsl-to-rgb-for-reals": {
"version": "1.1.1",
"resolved": "http://npm.repo.lan/hsl-to-rgb-for-reals/-/hsl-to-rgb-for-reals-1.1.1.tgz",
"integrity": "sha512-LgOWAkrN0rFaQpfdWBQlv/VhkOxb5AsBjk6NQVx4yEzWS923T07X0M1Y0VNko2H52HeSpZrZNNMJ0aFqsdVzQg==",
"license": "ISC"
},
"node_modules/html-to-image": {
"version": "1.11.13",
"resolved": "http://npm.repo.lan/html-to-image/-/html-to-image-1.11.13.tgz",
"integrity": "sha512-cuOPoI7WApyhBElTTb9oqsawRvZ0rHhaHwghRLlTuffoD1B2aDemlCruLeZrUIIdvG7gs9xeELEPm6PhuASqrg==",
"license": "MIT"
},
"node_modules/html-url-attributes": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/html-url-attributes/-/html-url-attributes-3.0.1.tgz",
@ -5451,6 +5806,32 @@
"url": "https://opencollective.com/unified"
}
},
"node_modules/hyphen": {
"version": "1.10.6",
"resolved": "http://npm.repo.lan/hyphen/-/hyphen-1.10.6.tgz",
"integrity": "sha512-fXHXcGFTXOvZTSkPJuGOQf5Lv5T/R2itiiCVPg9LxAje5D00O0pP83yJShFq5V89Ly//Gt6acj7z8pbBr34stw==",
"license": "ISC"
},
"node_modules/ieee754": {
"version": "1.2.1",
"resolved": "http://npm.repo.lan/ieee754/-/ieee754-1.2.1.tgz",
"integrity": "sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==",
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/feross"
},
{
"type": "patreon",
"url": "https://www.patreon.com/feross"
},
{
"type": "consulting",
"url": "https://feross.org/support"
}
],
"license": "BSD-3-Clause"
},
"node_modules/ignore": {
"version": "5.3.2",
"resolved": "https://registry.npmjs.org/ignore/-/ignore-5.3.2.tgz",
@ -5488,6 +5869,12 @@
"node": ">=0.8.19"
}
},
"node_modules/inherits": {
"version": "2.0.4",
"resolved": "http://npm.repo.lan/inherits/-/inherits-2.0.4.tgz",
"integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==",
"license": "ISC"
},
"node_modules/inline-style-parser": {
"version": "0.2.7",
"resolved": "https://registry.npmjs.org/inline-style-parser/-/inline-style-parser-0.2.7.tgz",
@ -5518,6 +5905,12 @@
"url": "https://github.com/sponsors/wooorm"
}
},
"node_modules/is-arrayish": {
"version": "0.3.4",
"resolved": "http://npm.repo.lan/is-arrayish/-/is-arrayish-0.3.4.tgz",
"integrity": "sha512-m6UrgzFVUYawGBh1dUsWR5M2Clqic9RVXC/9f8ceNlv2IcO9j9J/z8UoCLPqtsPBFNzEpfR3xftohbfqDx8EQA==",
"license": "MIT"
},
"node_modules/is-decimal": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/is-decimal/-/is-decimal-2.0.1.tgz",
@ -5583,6 +5976,12 @@
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/is-url": {
"version": "1.2.4",
"resolved": "http://npm.repo.lan/is-url/-/is-url-1.2.4.tgz",
"integrity": "sha512-ITvGim8FhRiYe4IQ5uHSkj7pVaPDrCTkNd3yq3cV7iZAcJdHTUMPMEHcqSOy9xZ9qFenQCvi+2wjH9a1nXqHww==",
"license": "MIT"
},
"node_modules/isexe": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz",
@ -5590,6 +5989,15 @@
"dev": true,
"license": "ISC"
},
"node_modules/jay-peg": {
"version": "1.1.1",
"resolved": "http://npm.repo.lan/jay-peg/-/jay-peg-1.1.1.tgz",
"integrity": "sha512-D62KEuBxz/ip2gQKOEhk/mx14o7eiFRaU+VNNSP4MOiIkwb/D6B3G1Mfas7C/Fit8EsSV2/IWjZElx/Gs6A4ww==",
"license": "MIT",
"dependencies": {
"restructure": "^3.0.0"
}
},
"node_modules/jiti": {
"version": "2.6.1",
"resolved": "https://registry.npmjs.org/jiti/-/jiti-2.6.1.tgz",
@ -5604,7 +6012,6 @@
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz",
"integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==",
"dev": true,
"license": "MIT"
},
"node_modules/js-yaml": {
@ -5965,6 +6372,25 @@
"url": "https://opencollective.com/parcel"
}
},
"node_modules/linebreak": {
"version": "1.1.0",
"resolved": "http://npm.repo.lan/linebreak/-/linebreak-1.1.0.tgz",
"integrity": "sha512-MHp03UImeVhB7XZtjd0E4n6+3xr5Dq/9xI/5FptGk5FrbDR3zagPa2DS6U8ks/3HjbKWG9Q1M2ufOzxV2qLYSQ==",
"license": "MIT",
"dependencies": {
"base64-js": "0.0.8",
"unicode-trie": "^2.0.0"
}
},
"node_modules/linebreak/node_modules/base64-js": {
"version": "0.0.8",
"resolved": "http://npm.repo.lan/base64-js/-/base64-js-0.0.8.tgz",
"integrity": "sha512-3XSA2cR/h/73EzlXXdU6YNycmYI7+kicTxks4eJg2g39biHR84slg2+des+p7iHYhbRg/udIS4TD53WabcOUkw==",
"license": "MIT",
"engines": {
"node": ">= 0.4"
}
},
"node_modules/locate-path": {
"version": "6.0.0",
"resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz",
@ -6004,6 +6430,18 @@
"url": "https://github.com/sponsors/wooorm"
}
},
"node_modules/loose-envify": {
"version": "1.4.0",
"resolved": "http://npm.repo.lan/loose-envify/-/loose-envify-1.4.0.tgz",
"integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==",
"license": "MIT",
"dependencies": {
"js-tokens": "^3.0.0 || ^4.0.0"
},
"bin": {
"loose-envify": "cli.js"
}
},
"node_modules/lru-cache": {
"version": "5.1.1",
"resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz",
@ -6043,6 +6481,18 @@
"url": "https://github.com/sponsors/wooorm"
}
},
"node_modules/marked": {
"version": "17.0.1",
"resolved": "http://npm.repo.lan/marked/-/marked-17.0.1.tgz",
"integrity": "sha512-boeBdiS0ghpWcSwoNm/jJBwdpFaMnZWRzjA6SkUMYb40SVaN1x7mmfGKp0jvexGcx+7y2La5zRZsYFZI6Qpypg==",
"license": "MIT",
"bin": {
"marked": "bin/marked.js"
},
"engines": {
"node": ">= 20"
}
},
"node_modules/math-intrinsics": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz",
@ -6334,6 +6784,12 @@
"url": "https://opencollective.com/unified"
}
},
"node_modules/media-engine": {
"version": "1.0.3",
"resolved": "http://npm.repo.lan/media-engine/-/media-engine-1.0.3.tgz",
"integrity": "sha512-aa5tG6sDoK+k70B9iEX1NeyfT8ObCKhNDs6lJVpwF6r8vhUfuKMslIcirq6HIUYuuUYLefcEQOn9bSBOvawtwg==",
"license": "MIT"
},
"node_modules/merge2": {
"version": "1.4.1",
"resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz",
@ -7021,6 +7477,24 @@
"node": ">=0.10.0"
}
},
"node_modules/normalize-svg-path": {
"version": "1.1.0",
"resolved": "http://npm.repo.lan/normalize-svg-path/-/normalize-svg-path-1.1.0.tgz",
"integrity": "sha512-r9KHKG2UUeB5LoTouwDzBy2VxXlHsiM6fyLQvnJa0S5hrhzqElH/CH7TUGhT1fVvIYBIKf3OpY4YJ4CK+iaqHg==",
"license": "MIT",
"dependencies": {
"svg-arc-to-cubic-bezier": "^3.0.0"
}
},
"node_modules/object-assign": {
"version": "4.1.1",
"resolved": "http://npm.repo.lan/object-assign/-/object-assign-4.1.1.tgz",
"integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==",
"license": "MIT",
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/openapi-types": {
"version": "12.1.3",
"resolved": "https://registry.npmjs.org/openapi-types/-/openapi-types-12.1.3.tgz",
@ -7114,6 +7588,12 @@
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/pako": {
"version": "1.0.11",
"resolved": "http://npm.repo.lan/pako/-/pako-1.0.11.tgz",
"integrity": "sha512-4hLB8Py4zZce5s4yd9XzopqwVv/yGNhV1Bl8NTmCq1763HeK2+EwVTv+leGeL13Dnh2wfbqowVPXCIO0z4taYw==",
"license": "(MIT AND Zlib)"
},
"node_modules/parent-module": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz",
@ -7152,6 +7632,12 @@
"integrity": "sha512-CmBKiL6NNo/OqgmMn95Fk9Whlp2mtvIv+KNpQKN2F4SjvrEesubTRWGYSg+BnWZOnlCaSTU1sMpsBOzgbYhnsA==",
"license": "MIT"
},
"node_modules/parse-svg-path": {
"version": "0.1.2",
"resolved": "http://npm.repo.lan/parse-svg-path/-/parse-svg-path-0.1.2.tgz",
"integrity": "sha512-JyPSBnkTJ0AI8GGJLfMXvKq42cj5c006fnLz6fXy6zfoVjJizi8BNTpu8on8ziI1cKy9d9DGNuY17Ce7wuejpQ==",
"license": "MIT"
},
"node_modules/pastable": {
"version": "2.2.1",
"resolved": "https://registry.npmjs.org/pastable/-/pastable-2.2.1.tgz",
@ -7265,7 +7751,6 @@
"version": "4.2.0",
"resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-4.2.0.tgz",
"integrity": "sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ==",
"dev": true,
"license": "MIT"
},
"node_modules/prelude-ls": {
@ -7294,6 +7779,17 @@
"url": "https://github.com/prettier/prettier?sponsor=1"
}
},
"node_modules/prop-types": {
"version": "15.8.1",
"resolved": "http://npm.repo.lan/prop-types/-/prop-types-15.8.1.tgz",
"integrity": "sha512-oj87CgZICdulUohogVAR7AjlC0327U4el4L6eAvOqCeudMDVU0NThNaV+b9Df4dXgSP1gXMTnPdhfe/2qDH5cg==",
"license": "MIT",
"dependencies": {
"loose-envify": "^1.4.0",
"object-assign": "^4.1.1",
"react-is": "^16.13.1"
}
},
"node_modules/property-information": {
"version": "7.1.0",
"resolved": "https://registry.npmjs.org/property-information/-/property-information-7.1.0.tgz",
@ -7320,6 +7816,15 @@
"node": ">=6"
}
},
"node_modules/queue": {
"version": "6.0.2",
"resolved": "http://npm.repo.lan/queue/-/queue-6.0.2.tgz",
"integrity": "sha512-iHZWu+q3IdFZFX36ro/lKBkSvfkztY5Y7HMiPlOUjhupPcG2JMfst2KKEpu5XndviX/3UhFbRngUPNKtgvtZiA==",
"license": "MIT",
"dependencies": {
"inherits": "~2.0.3"
}
},
"node_modules/queue-microtask": {
"version": "1.2.3",
"resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz",
@ -7362,6 +7867,12 @@
"react": "^19.2.0"
}
},
"node_modules/react-is": {
"version": "16.13.1",
"resolved": "http://npm.repo.lan/react-is/-/react-is-16.13.1.tgz",
"integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==",
"license": "MIT"
},
"node_modules/react-markdown": {
"version": "10.1.0",
"resolved": "https://registry.npmjs.org/react-markdown/-/react-markdown-10.1.0.tgz",
@ -7594,7 +8105,6 @@
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/require-from-string/-/require-from-string-2.0.2.tgz",
"integrity": "sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=0.10.0"
@ -7610,6 +8120,12 @@
"node": ">=4"
}
},
"node_modules/restructure": {
"version": "3.0.2",
"resolved": "http://npm.repo.lan/restructure/-/restructure-3.0.2.tgz",
"integrity": "sha512-gSfoiOEA0VPE6Tukkrr7I0RBdE0s7H1eFCDBk05l1KIQT1UIKNc5JZy6jdyW6eYH3aR3g5b3PuL77rq0hvwtAw==",
"license": "MIT"
},
"node_modules/reusify": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/reusify/-/reusify-1.1.0.tgz",
@ -7687,6 +8203,26 @@
"queue-microtask": "^1.2.2"
}
},
"node_modules/safe-buffer": {
"version": "5.2.1",
"resolved": "http://npm.repo.lan/safe-buffer/-/safe-buffer-5.2.1.tgz",
"integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==",
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/feross"
},
{
"type": "patreon",
"url": "https://www.patreon.com/feross"
},
{
"type": "consulting",
"url": "https://feross.org/support"
}
],
"license": "MIT"
},
"node_modules/scheduler": {
"version": "0.27.0",
"resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.27.0.tgz",
@ -7732,6 +8268,15 @@
"node": ">=8"
}
},
"node_modules/simple-swizzle": {
"version": "0.2.4",
"resolved": "http://npm.repo.lan/simple-swizzle/-/simple-swizzle-0.2.4.tgz",
"integrity": "sha512-nAu1WFPQSMNr2Zn9PGSZK9AGn4t/y97lEm+MXTtUDwfP0ksAIX4nO+6ruD9Jwut4C49SB1Ws+fbXsm/yScWOHw==",
"license": "MIT",
"dependencies": {
"is-arrayish": "^0.3.1"
}
},
"node_modules/source-map": {
"version": "0.6.1",
"resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz",
@ -7762,6 +8307,15 @@
"url": "https://github.com/sponsors/wooorm"
}
},
"node_modules/string_decoder": {
"version": "1.3.0",
"resolved": "http://npm.repo.lan/string_decoder/-/string_decoder-1.3.0.tgz",
"integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==",
"license": "MIT",
"dependencies": {
"safe-buffer": "~5.2.0"
}
},
"node_modules/stringify-entities": {
"version": "4.0.4",
"resolved": "https://registry.npmjs.org/stringify-entities/-/stringify-entities-4.0.4.tgz",
@ -7820,6 +8374,12 @@
"node": ">=8"
}
},
"node_modules/svg-arc-to-cubic-bezier": {
"version": "3.2.0",
"resolved": "http://npm.repo.lan/svg-arc-to-cubic-bezier/-/svg-arc-to-cubic-bezier-3.2.0.tgz",
"integrity": "sha512-djbJ/vZKZO+gPoSDThGNpKDO+o+bAeA4XQKovvkNCqnIS2t+S4qnLAGQhyyrulhCFRl1WWzAp0wUDV8PpTVU3g==",
"license": "ISC"
},
"node_modules/tailwind-merge": {
"version": "3.4.0",
"resolved": "https://registry.npmjs.org/tailwind-merge/-/tailwind-merge-3.4.0.tgz",
@ -7894,6 +8454,12 @@
"url": "https://opencollective.com/webpack"
}
},
"node_modules/tiny-inflate": {
"version": "1.0.3",
"resolved": "http://npm.repo.lan/tiny-inflate/-/tiny-inflate-1.0.3.tgz",
"integrity": "sha512-pkY1fj1cKHb2seWDy0B16HeWyczlJA9/WW3u3c4z/NiWDsO3DOU5D7nhTLE9CF0yXv/QZFY7sEJmj24dK+Rrqw==",
"license": "MIT"
},
"node_modules/tinyglobby": {
"version": "0.2.15",
"resolved": "https://registry.npmjs.org/tinyglobby/-/tinyglobby-0.2.15.tgz",
@ -8093,6 +8659,32 @@
"dev": true,
"license": "MIT"
},
"node_modules/unicode-properties": {
"version": "1.4.1",
"resolved": "http://npm.repo.lan/unicode-properties/-/unicode-properties-1.4.1.tgz",
"integrity": "sha512-CLjCCLQ6UuMxWnbIylkisbRj31qxHPAurvena/0iwSVbQ2G1VY5/HjV0IRabOEbDHlzZlRdCrD4NhB0JtU40Pg==",
"license": "MIT",
"dependencies": {
"base64-js": "^1.3.0",
"unicode-trie": "^2.0.0"
}
},
"node_modules/unicode-trie": {
"version": "2.0.0",
"resolved": "http://npm.repo.lan/unicode-trie/-/unicode-trie-2.0.0.tgz",
"integrity": "sha512-x7bc76x0bm4prf1VLg79uhAzKw8DVboClSN5VxJuQ+LKDOVEW9CdH+VY7SP+vX7xCYQqzzgQpFqz15zeLvAtZQ==",
"license": "MIT",
"dependencies": {
"pako": "^0.2.5",
"tiny-inflate": "^1.0.0"
}
},
"node_modules/unicode-trie/node_modules/pako": {
"version": "0.2.9",
"resolved": "http://npm.repo.lan/pako/-/pako-0.2.9.tgz",
"integrity": "sha512-NUcwaKxUxWrZLpDG+z/xZaCgQITkA/Dv4V/T6bw7VON6l1Xz/VnrBqrYjZQ12TamKHzITTfOEIYUj48y2KXImA==",
"license": "MIT"
},
"node_modules/unified": {
"version": "11.0.5",
"resolved": "https://registry.npmjs.org/unified/-/unified-11.0.5.tgz",
@ -8287,7 +8879,6 @@
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz",
"integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==",
"dev": true,
"license": "MIT"
},
"node_modules/vfile": {
@ -8393,6 +8984,20 @@
}
}
},
"node_modules/vite-compatible-readable-stream": {
"version": "3.6.1",
"resolved": "http://npm.repo.lan/vite-compatible-readable-stream/-/vite-compatible-readable-stream-3.6.1.tgz",
"integrity": "sha512-t20zYkrSf868+j/p31cRIGN28Phrjm3nRSLR2fyc2tiWi4cZGVdv68yNlwnIINTkMTmPoMiSlc0OadaO7DXZaQ==",
"license": "MIT",
"dependencies": {
"inherits": "^2.0.3",
"string_decoder": "^1.1.1",
"util-deprecate": "^1.0.1"
},
"engines": {
"node": ">= 6"
}
},
"node_modules/vite/node_modules/fdir": {
"version": "6.5.0",
"resolved": "https://registry.npmjs.org/fdir/-/fdir-6.5.0.tgz",
@ -8510,6 +9115,12 @@
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/yoga-layout": {
"version": "3.2.1",
"resolved": "http://npm.repo.lan/yoga-layout/-/yoga-layout-3.2.1.tgz",
"integrity": "sha512-0LPOt3AxKqMdFBZA3HBAt/t/8vIKq7VaQYbuA8WxCgung+p9TVyKRYdpvCb80HcdTN2NkbIKbhNwKUfm3tQywQ==",
"license": "MIT"
},
"node_modules/zod": {
"version": "3.25.76",
"resolved": "http://npm.repo.lan/zod/-/zod-3.25.76.tgz",

View File

@ -22,15 +22,19 @@
"@radix-ui/react-slot": "^1.2.4",
"@radix-ui/react-tabs": "^1.1.13",
"@radix-ui/react-tooltip": "^1.2.8",
"@react-pdf/renderer": "^4.3.1",
"@tanstack/react-query": "^5.90.10",
"@zodios/core": "^10.9.6",
"axios": "^1.13.2",
"buffer": "^6.0.3",
"class-variance-authority": "^0.7.1",
"clsx": "^2.1.1",
"cmdk": "^1.1.1",
"date-fns": "^4.1.0",
"elkjs": "^0.11.0",
"html-to-image": "^1.11.13",
"lucide-react": "^0.554.0",
"marked": "^17.0.1",
"react": "^19.2.0",
"react-dom": "^19.2.0",
"react-markdown": "^10.1.0",

Binary file not shown.

Binary file not shown.

View File

@ -1,11 +1,11 @@
# Fundamental Analysis Platform 用户指南 (v2.0 - Vite Refactor)
日期: 2025-11-22
版本: 2.0
# Fundamental Analysis Platform 用户指南 (v2.1 - Dynamic Refactor)
日期: 2025-11-30
版本: 2.1
## 1. 简介
Fundamental Analysis Platform 是一个基于 AI Agent 的深度基本面投研平台,旨在通过自动化工作流聚合多源金融数据,并利用 LLM大语言模型生成专业的财务分析报告。
v2.0 版本采用了全新的 Vite + React SPA 架构,提供了更流畅的交互体验和实时的分析状态可视化
v2.1 版本引入了动态配置架构、增强的实时日志流和结构化的数据报表展示,提供了更稳定和可视化的分析体验
## 2. 核心功能
@ -16,47 +16,57 @@ v2.0 版本采用了全新的 Vite + React SPA 架构,提供了更流畅的交
* **开始分析**: 点击“生成分析报告”按钮即可启动分析流程。
### 2.2 分析报告页 (Report View)
核心工作区,分为左侧状态栏和右侧详情区
核心工作区,采用**双栏布局**:左侧为实时状态监控,右侧为多标签页详情展示
#### 左侧:工作流状态
* **可视化 DAG**: 展示当前的分析任务依赖图。
#### 左侧:工作流状态 (Workflow Status)
* **可视化 DAG (Visualizer)**:
* 展示当前的分析任务依赖图。节点显示**人类可读的任务名称** (如 "新闻分析", "财务数据获取")。
* **节点颜色**: 灰色(等待)、蓝色(运行中)、绿色(完成)、红色(失败)。
* **动态连线**: 当任务运行时,连接线会有流光动画指示数据流向。
* **实时日志**: 滚动展示所有后台任务的执行日志,支持实时查看数据抓取和分析进度。
* **动态连线**: 任务运行时显示流光动画,指示数据流向。
* **实时日志 (Real-time Logs)**:
* 位于左侧底部(或独立面板),实时滚动展示所有后台任务的执行日志。
* **历史回放**: 即使刷新页面或断线重连,系统也会自动拉取完整的历史日志,确保信息不丢失。
#### 右侧:详情面板
* **Analysis Report**: 展示由 AI 生成的最终分析报告。支持 Markdown 格式(标题、表格、加粗、引用),并带有打字机生成特效。
* **Fundamental Data**: (开发中) 展示抓取到的原始财务数据表格。
* **Stock Chart**: (开发中) 展示股价走势图。
#### 右侧:详情面板 (Detail Tabs)
右侧区域根据分析流程动态生成多个标签页:
* **Overview (总览)**:
* 展示整体分析进度、任务完成统计和总耗时。
* 如果任务失败,会在此处显示具体的错误信息摘要。
* **任务详情页 (Task Tabs)**:
* 每个工作流节点(如 "Financial Data", "News Analysis")都有独立的标签页。
* **智能渲染**:
* **分析报告**: AI 生成的文本以 Markdown 格式渲染,支持富文本排版。
* **财务数据**: 原始财务数据(特别是 Tushare A股数据现在自动转换为**结构化 Markdown 表格**,按年份和报表类型分组,数值经过格式化(如 "14.20 亿"),便于阅读。
* **Inspector (调试器)**: 点击右上角的 "Inspector" 按钮,可以打开侧边栏,查看该任务的输入/输出文件差异 (Diff) 和上下文信息,方便调试。
### 2.3 系统配置 (Config)
集中管理平台的所有外部连接和参数。
* **AI Provider**:
* 管理 LLM 供应商 (OpenAI, Anthropic, Local Ollama 等)。
* 配置 API Key 和 Base URL。
* 刷新并选择可用的模型 (GPT-4o, Claude-3.5 等)。
* **数据源配置**:
* 启用/禁用金融数据源 (Tushare, Finnhub, AlphaVantage)。
* 输入对应的 API Token。
* 支持连接测试。
* 支持配置 API Key、Base URL 和模型选择。
* **数据源配置 (Dynamic Data Sources)**:
* **动态加载**: 支持的数据源列表Tushare, Finnhub, AlphaVantage 等)及其配置项由后端动态下发,无需升级前端即可支持新数据源。
* **功能**: 支持输入 Token/Key并提供 **"Test Connection" (测试连接)** 按钮以验证配置是否有效。
* **分析模板**:
* 查看当前的分析流程模板(如 "Quick Scan")。
* 查看每个模块使用的 Prompt 模板及模型配置。
* 查看当前的分析流程模板及各模块使用的 Prompt。
* **系统状态**:
* 监控微服务集群 (API Gateway, Orchestrator 等) 的健康状态。
## 3. 快速开始
1. 进入 **配置页** -> **AI Provider**,添加您的 OpenAI API Key。
2. 进入 **配置页** -> **数据源配置**启用 Tushare 并输入 Token
2. 进入 **配置页** -> **数据源配置**选择 **Tushare** (或其他源),输入 Token 并点击 **Test Connection** 确认连通性,最后保存
3. 回到 **首页**,输入 `600519.SS`,选择 `CN` 市场。
4. 点击 **生成分析报告**,观察工作流运行及报告生成。
4. 点击 **生成分析报告**
5. 在报告页观察左侧 DAG 运行状态,随着任务完成,点击右侧对应的标签页查看数据和分析结果。
## 4. 常见问题
* **Q: 报告生成卡住怎么办?**
* A: 检查左侧“实时日志”,查看是否有 API 连接超时或配额耗尽的错误。
* A: 检查左侧“实时日志”,查看是否有 API 连接超时或配额耗尽的错误。也可以在 "Overview" 标签页查看是否有任务标记为失败。
* **Q: 如何添加本地模型?**
* A: 在 AI Provider 页添加新的 ProviderBase URL 填入 `http://localhost:11434/v1` (Ollama 默认地址)。
* **Q: 为什么看不到某些数据源?**
* A: 数据源列表由后端服务动态注册。请确保对应的 Provider 微服务(如 `tushare-provider-service`)已正常启动并注册到网关。

View File

@ -3,14 +3,13 @@ import { z } from "zod";
export type AnalysisModuleConfig = {
analysis_prompt: string;
context_selector: ContextSelectorConfig;
context_selector: SelectionMode;
dependencies: Array<string>;
id?: (string | null) | undefined;
llm_config?: (null | LlmConfig) | undefined;
name: string;
output_type: string;
};
export type ContextSelectorConfig = SelectionMode;
export type SelectionMode =
| {
Manual: {
@ -29,6 +28,7 @@ export type SelectionMode =
};
};
export type LlmConfig = Partial<{
extra_params: {} | null;
max_tokens: number | null;
model_id: string | null;
temperature: number | null;
@ -126,6 +126,20 @@ export type TaskProgress = {
task_name: string;
};
export type ObservabilityTaskStatus = "Queued" | "InProgress" | "Completed" | "Failed";
export type TaskStateSnapshot = {
content?: (string | null) | undefined;
input_commit?: (string | null) | undefined;
logs: Array<string>;
metadata?: (null | TaskMetadata) | undefined;
output_commit?: (string | null) | undefined;
status: TaskStatus;
task_id: string;
};
export type TaskMetadata = {
execution_log_path?: (string | null) | undefined;
extra: {};
output_path?: (string | null) | undefined;
};
export type WorkflowDag = {
edges: Array<TaskDependency>;
nodes: Array<TaskNode>;
@ -189,7 +203,9 @@ export type WorkflowEvent =
}
| {
payload: {
logs: Array<string>;
task_graph: WorkflowDag;
task_states?: {} | undefined;
tasks_metadata: {};
tasks_output: {};
tasks_status: {};
@ -197,10 +213,6 @@ export type WorkflowEvent =
};
type: "WorkflowStateSnapshot";
};
export type TaskMetadata = Partial<{
execution_log_path: string | null;
output_path: string | null;
}>;
export type WorkflowHistoryDto = {
created_at: string;
end_time?: (string | null) | undefined;
@ -214,8 +226,42 @@ export type WorkflowHistoryDto = {
};
export type Value = unknown;
export const DataSourceProvider = z.enum([
"Tushare",
"Finnhub",
"Alphavantage",
"Yfinance",
"Mock",
]);
export const DataSourceConfig = z.object({
api_key: z.union([z.string(), z.null()]).optional(),
api_url: z.union([z.string(), z.null()]).optional(),
enabled: z.boolean(),
provider: DataSourceProvider,
});
export const DataSourcesConfig =
z.record(DataSourceConfig);
export const TestLlmConfigRequest = z.object({
api_base_url: z.string(),
api_key: z.string(),
model_id: z.string(),
});
export const LlmModel = z.object({
is_active: z.boolean(),
model_id: z.string(),
name: z.union([z.string(), z.null()]).optional(),
});
export const LlmProvider = z.object({
api_base_url: z.string(),
api_key: z.string(),
models: z.array(LlmModel),
name: z.string(),
});
export const LlmProvidersConfig = z.record(LlmProvider);
export const AnalysisTemplateSummary = z.object({ id: z.string(), name: z.string() });
export const LlmConfig = z
.object({
extra_params: z.union([z.object({}).partial().passthrough(), z.null()]),
max_tokens: z.union([z.number(), z.null()]),
model_id: z.union([z.string(), z.null()]),
temperature: z.union([z.number(), z.null()]),
@ -244,10 +290,9 @@ export const SelectionMode = z.union([
})
.passthrough(),
]);
export const ContextSelectorConfig = SelectionMode;
export const AnalysisModuleConfig = z.object({
analysis_prompt: z.string(),
context_selector: ContextSelectorConfig,
context_selector: SelectionMode,
dependencies: z.array(z.string()),
id: z.union([z.string(), z.null()]).optional(),
llm_config: z.union([z.null(), LlmConfig]).optional(),
@ -258,39 +303,6 @@ export const AnalysisTemplateSet = z.object({
modules: z.record(AnalysisModuleConfig),
name: z.string(),
});
export const AnalysisTemplateSets =
z.record(AnalysisTemplateSet);
export const DataSourceProvider = z.enum([
"Tushare",
"Finnhub",
"Alphavantage",
"Yfinance",
]);
export const DataSourceConfig = z.object({
api_key: z.union([z.string(), z.null()]).optional(),
api_url: z.union([z.string(), z.null()]).optional(),
enabled: z.boolean(),
provider: DataSourceProvider,
});
export const DataSourcesConfig =
z.record(DataSourceConfig);
export const TestLlmConfigRequest = z.object({
api_base_url: z.string(),
api_key: z.string(),
model_id: z.string(),
});
export const LlmModel = z.object({
is_active: z.boolean(),
model_id: z.string(),
name: z.union([z.string(), z.null()]).optional(),
});
export const LlmProvider = z.object({
api_base_url: z.string(),
api_key: z.string(),
models: z.array(LlmModel),
name: z.string(),
});
export const LlmProvidersConfig = z.record(LlmProvider);
export const TestConfigRequest = z.object({ data: z.unknown(), type: z.string() });
export const TestConnectionResponse = z.object({
message: z.string(),
@ -384,6 +396,8 @@ export const TaskProgress = z.object({
status: ObservabilityTaskStatus,
task_name: z.string(),
});
export const AnalysisTemplateSets =
z.record(AnalysisTemplateSet);
export const CanonicalSymbol = z.string();
export const ServiceStatus = z.enum(["Ok", "Degraded", "Unhealthy"]);
export const HealthStatus = z.object({
@ -402,12 +416,11 @@ export const TaskDependency = z.object({
from: z.string(),
to: z.string(),
});
export const TaskMetadata = z
.object({
execution_log_path: z.union([z.string(), z.null()]),
output_path: z.union([z.string(), z.null()]),
})
.partial();
export const TaskMetadata = z.object({
execution_log_path: z.union([z.string(), z.null()]).optional(),
extra: z.object({}).partial().passthrough(),
output_path: z.union([z.string(), z.null()]).optional(),
});
export const TaskStatus = z.enum([
"Pending",
"Scheduled",
@ -424,6 +437,15 @@ export const TaskNode = z.object({
name: z.string(),
type: TaskType,
});
export const TaskStateSnapshot = z.object({
content: z.union([z.string(), z.null()]).optional(),
input_commit: z.union([z.string(), z.null()]).optional(),
logs: z.array(z.string()),
metadata: z.union([z.null(), TaskMetadata]).optional(),
output_commit: z.union([z.string(), z.null()]).optional(),
status: TaskStatus,
task_id: z.string(),
});
export const WorkflowDag = z.object({
edges: z.array(TaskDependency),
nodes: z.array(TaskNode),
@ -506,7 +528,9 @@ export const WorkflowEvent = z.union([
.object({
payload: z
.object({
logs: z.array(z.string()),
task_graph: WorkflowDag,
task_states: z.record(TaskStateSnapshot).optional(),
tasks_metadata: z.record(TaskMetadata),
tasks_output: z.record(z.union([z.string(), z.null()])),
tasks_status: z.record(TaskStatus),
@ -517,14 +541,17 @@ export const WorkflowEvent = z.union([
})
.passthrough(),
]);
export const WorkflowEventType = z.enum([
"WorkflowStarted",
"TaskStateChanged",
"TaskStreamUpdate",
"TaskLog",
"WorkflowCompleted",
"WorkflowFailed",
"WorkflowStateSnapshot",
]);
export const schemas = {
LlmConfig,
SelectionMode,
ContextSelectorConfig,
AnalysisModuleConfig,
AnalysisTemplateSet,
AnalysisTemplateSets,
DataSourceProvider,
DataSourceConfig,
DataSourcesConfig,
@ -532,6 +559,11 @@ export const schemas = {
LlmModel,
LlmProvider,
LlmProvidersConfig,
AnalysisTemplateSummary,
LlmConfig,
SelectionMode,
AnalysisModuleConfig,
AnalysisTemplateSet,
TestConfigRequest,
TestConnectionResponse,
DiscoverPreviewRequest,
@ -548,6 +580,7 @@ export const schemas = {
RequestAcceptedResponse,
ObservabilityTaskStatus,
TaskProgress,
AnalysisTemplateSets,
CanonicalSymbol,
ServiceStatus,
HealthStatus,
@ -557,32 +590,13 @@ export const schemas = {
TaskStatus,
TaskType,
TaskNode,
TaskStateSnapshot,
WorkflowDag,
WorkflowEvent,
WorkflowEventType,
};
export const endpoints = makeApi([
{
method: "get",
path: "/api/v1/configs/analysis_template_sets",
alias: "get_analysis_template_sets",
requestFormat: "json",
response: z.record(AnalysisTemplateSet),
},
{
method: "put",
path: "/api/v1/configs/analysis_template_sets",
alias: "update_analysis_template_sets",
requestFormat: "json",
parameters: [
{
name: "body",
type: "Body",
schema: z.record(AnalysisTemplateSet),
},
],
response: z.record(AnalysisTemplateSet),
},
{
method: "get",
path: "/api/v1/configs/data_sources",
@ -639,6 +653,81 @@ export const endpoints = makeApi([
],
response: z.void(),
},
{
method: "get",
path: "/api/v1/configs/templates",
alias: "get_templates",
requestFormat: "json",
response: z.array(AnalysisTemplateSummary),
},
{
method: "get",
path: "/api/v1/configs/templates/:id",
alias: "get_template_by_id",
requestFormat: "json",
parameters: [
{
name: "id",
type: "Path",
schema: z.string(),
},
],
response: AnalysisTemplateSet,
errors: [
{
status: 404,
description: `Template not found`,
schema: z.void(),
},
],
},
{
method: "put",
path: "/api/v1/configs/templates/:id",
alias: "update_template",
requestFormat: "json",
parameters: [
{
name: "body",
type: "Body",
schema: AnalysisTemplateSet,
},
{
name: "id",
type: "Path",
schema: z.string(),
},
],
response: AnalysisTemplateSet,
errors: [
{
status: 404,
description: `Template not found`,
schema: z.void(),
},
],
},
{
method: "delete",
path: "/api/v1/configs/templates/:id",
alias: "delete_template",
requestFormat: "json",
parameters: [
{
name: "id",
type: "Path",
schema: z.string(),
},
],
response: z.void(),
errors: [
{
status: 404,
description: `Template not found`,
schema: z.void(),
},
],
},
{
method: "post",
path: "/api/v1/configs/test",

View File

@ -13,6 +13,7 @@ import { History, Loader2 } from 'lucide-react';
import { WorkflowHistorySummaryDto } from '@/api/schema.gen';
import { z } from 'zod';
import { client } from '@/api/client';
import { useAnalysisTemplates } from "@/hooks/useConfig";
type WorkflowHistorySummary = z.infer<typeof WorkflowHistorySummaryDto>;
@ -20,6 +21,7 @@ export function RecentReportsDropdown() {
const [reports, setReports] = useState<WorkflowHistorySummary[]>([]);
const [loading, setLoading] = useState(false);
const navigate = useNavigate();
const { data: templates } = useAnalysisTemplates();
const loadReports = async () => {
setLoading(true);
@ -64,7 +66,7 @@ export function RecentReportsDropdown() {
<span className="text-xs font-normal text-muted-foreground">{new Date(report.start_time).toLocaleDateString()}</span>
</div>
<div className="flex justify-between w-full text-xs text-muted-foreground">
<span>{report.template_id || 'Default'}</span>
<span>{templates?.find(t => t.id === report.template_id)?.name || report.template_id || 'Default'}</span>
<span className={report.status === 'Completed' ? 'text-green-600' : report.status === 'Failed' ? 'text-destructive' : 'text-amber-600'}>{report.status}</span>
</div>
</DropdownMenuItem>

View File

@ -4,10 +4,11 @@ import { Card, CardContent, CardHeader, CardTitle, CardDescription } from "@/com
import { Table, TableBody, TableCell, TableHead, TableHeader, TableRow } from "@/components/ui/table";
import { Badge } from "@/components/ui/badge";
import { Button } from "@/components/ui/button";
import { Loader2, ArrowRight, History, RefreshCw } from "lucide-react";
import { Loader2, ArrowRight, History, RefreshCw, Trash2 } from "lucide-react";
import { WorkflowHistorySummaryDto } from '@/api/schema.gen';
import { z } from 'zod';
import { client } from '@/api/client';
import { useAnalysisTemplates } from "@/hooks/useConfig";
type WorkflowHistorySummary = z.infer<typeof WorkflowHistorySummaryDto>;
@ -15,6 +16,7 @@ export function RecentWorkflowsList() {
const [history, setHistory] = useState<WorkflowHistorySummary[]>([]);
const [loading, setLoading] = useState(false);
const navigate = useNavigate();
const { data: templates } = useAnalysisTemplates();
const fetchHistory = async () => {
setLoading(true);
@ -29,6 +31,23 @@ export function RecentWorkflowsList() {
}
};
const handleClearHistory = async () => {
if (confirm("Are you sure you want to clear ALL history? This cannot be undone.")) {
try {
const res = await fetch('/api/v1/system/history', { method: 'DELETE' });
if (res.ok) {
fetchHistory();
} else {
console.error("Failed to clear history");
alert("Failed to clear history");
}
} catch (e) {
console.error(e);
alert("Error clearing history");
}
}
};
useEffect(() => {
fetchHistory();
}, []);
@ -49,9 +68,14 @@ export function RecentWorkflowsList() {
Your recently generated fundamental analysis reports.
</CardDescription>
</div>
<Button variant="ghost" size="icon" onClick={fetchHistory} disabled={loading}>
<RefreshCw className={`h-4 w-4 ${loading ? 'animate-spin' : ''}`} />
</Button>
<div className="flex gap-2">
<Button variant="ghost" size="icon" onClick={handleClearHistory} title="Clear All History">
<Trash2 className="h-4 w-4 text-muted-foreground hover:text-destructive" />
</Button>
<Button variant="ghost" size="icon" onClick={fetchHistory} disabled={loading}>
<RefreshCw className={`h-4 w-4 ${loading ? 'animate-spin' : ''}`} />
</Button>
</div>
</CardHeader>
<CardContent>
<Table>
@ -77,7 +101,7 @@ export function RecentWorkflowsList() {
<TableRow key={item.request_id} className="group cursor-pointer hover:bg-muted/50" onClick={() => navigate(`/history/${item.request_id}`)}>
<TableCell className="font-medium">{item.symbol}</TableCell>
<TableCell>{item.market}</TableCell>
<TableCell className="text-muted-foreground">{item.template_id || 'Default'}</TableCell>
<TableCell className="text-muted-foreground">{templates?.find(t => t.id === item.template_id)?.name || item.template_id || 'Default'}</TableCell>
<TableCell>
<StatusBadge status={item.status} />
</TableCell>

View File

@ -0,0 +1,326 @@
import { useState } from 'react';
import * as htmlToImage from 'html-to-image';
import { Button } from '@/components/ui/button';
import { Loader2, FileDown } from 'lucide-react';
import { useWorkflowStore } from '@/stores/useWorkflowStore';
import { cn, formatNodeName } from '@/lib/utils';
import { cropImage } from '@/lib/image-processing';
import { schemas } from '@/api/schema.gen';
import { marked } from 'marked';
interface PDFExportButtonProps {
symbol?: string | null;
market?: string | null;
templateName?: string | null;
requestId?: string;
className?: string;
}
export function PDFExportButton({ symbol, market, templateName, requestId, className }: PDFExportButtonProps) {
// State to track PDF generation process
const [isGenerating, setIsGenerating] = useState(false);
const { tasks, dag } = useWorkflowStore();
// const apiBaseUrl = (import.meta as any).env.VITE_API_TARGET || '/v1';
const generateHTML = async (graphImage: string) => {
// CSS Styles (GitHub Markdown + Tailwind-like base styles)
const styles = `
<style>
@page {
margin: 0 2%;
}
body {
font-family: "ChineseFont", -apple-system, BlinkMacSystemFont, "Segoe UI", Helvetica, Arial, sans-serif, "Apple Color Emoji", "Segoe UI Emoji";
line-height: 1.6;
color: #24292f;
max-width: 900px;
margin: 0 auto;
padding: 40px;
background-color: #ffffff;
}
h1, h2, h3, h4, h5, h6 {
margin-top: 24px;
margin-bottom: 16px;
font-weight: 600;
line-height: 1.25;
}
h1 { font-size: 2em; border-bottom: 1px solid #eaecef; padding-bottom: .3em; }
h2 { font-size: 1.5em; border-bottom: 1px solid #eaecef; padding-bottom: .3em; }
h3 { font-size: 1.25em; }
p { margin-top: 0; margin-bottom: 16px; text-align: justify; }
code {
padding: .2em .4em;
margin: 0;
font-size: 85%;
background-color: rgba(27,31,35,.05);
border-radius: 3px;
font-family: SFMono-Regular,Consolas,Liberation Mono,Menlo,monospace;
}
pre {
padding: 16px;
overflow: auto;
font-size: 85%;
line-height: 1.45;
background-color: #f6f8fa;
border-radius: 3px;
margin-bottom: 16px;
}
pre code {
display: inline;
padding: 0;
margin: 0;
overflow: visible;
line-height: inherit;
word-wrap: normal;
background-color: transparent;
border: 0;
}
blockquote {
padding: 0 1em;
color: #6a737d;
border-left: .25em solid #dfe2e5;
margin: 0 0 16px 0;
}
ul, ol { padding-left: 2em; margin-bottom: 16px; }
li { margin-bottom: 4px; }
img { max-width: 100%; box-sizing: content-box; background-color: #fff; }
table {
display: block;
width: 100%;
overflow: auto;
margin-bottom: 16px;
border-spacing: 0;
border-collapse: collapse;
}
table tr {
background-color: #fff;
border-top: 1px solid #c6cbd1;
}
table tr:nth-child(2n) {
background-color: #f6f8fa;
}
table th, table td {
padding: 6px 13px;
border: 1px solid #dfe2e5;
}
table th {
font-weight: 600;
}
hr {
height: .25em;
padding: 0;
margin: 24px 0;
background-color: #e1e4e8;
border: 0;
}
/* Report Specific Styles */
.report-header {
margin-bottom: 40px;
border-bottom: 2px solid #eaecef;
padding-bottom: 20px;
display: flex;
justify-content: space-between;
align-items: flex-end;
}
.report-title {
font-size: 2.5em;
font-weight: bold;
margin: 0;
line-height: 1.2;
}
.report-meta {
text-align: right;
color: #586069;
font-size: 0.9em;
}
.section-title {
background-color: #f6f8fa;
padding: 10px 15px;
border-left: 5px solid #0366d6;
margin-top: 30px;
margin-bottom: 20px;
font-size: 1.5em;
font-weight: bold;
}
.workflow-graph {
margin: 30px 0;
text-align: center;
border: 1px solid #eaecef;
padding: 10px;
border-radius: 6px;
}
.workflow-graph img {
max-width: 100%;
max-height: 800px;
object-fit: contain;
margin: 0 auto;
display: block;
}
.footer {
margin-top: 50px;
padding-top: 20px;
border-top: 1px solid #eaecef;
text-align: center;
color: #586069;
font-size: 0.8em;
}
/* Printing Optimization */
@media print {
body { max-width: 100%; padding: 0; }
.section-title { break-after: avoid; }
pre, blockquote, table { break-inside: avoid; }
img { max-width: 100% !important; }
/* Ensure background colors are printed */
* { -webkit-print-color-adjust: exact; print-color-adjust: exact; }
}
</style>
`;
// Generate Tasks HTML
let tasksHtml = '';
if (dag?.nodes) {
for (const node of dag.nodes) {
const task = tasks[node.id];
if (task && task.status === schemas.TaskStatus.enum.Completed && task.content) {
const parsedContent = await marked.parse(task.content);
tasksHtml += `
<div class="section">
<div class="section-title">${node.display_name || formatNodeName(node.name)}</div>
<div class="markdown-body">
${parsedContent}
</div>
</div>
`;
}
}
}
// Assemble Full HTML
return `
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>${symbol} - Analysis Report</title>
${styles}
</head>
<body>
<div class="report-header">
<div>
<div class="report-title">${symbol || 'Unknown Symbol'}</div>
<div style="font-size: 1.2em; color: #586069;">${market || 'Unknown Market'}</div>
</div>
<div class="report-meta">
<div>Template: ${templateName || 'Default'}</div>
<div>ID: ${requestId || 'N/A'}</div>
<div>Date: ${new Date().toLocaleDateString()}</div>
</div>
</div>
${graphImage ? `
<div class="section">
<div class="section-title">Workflow Topology</div>
<div class="workflow-graph">
<img src="${graphImage}" alt="Workflow Graph" />
</div>
</div>
` : ''}
${tasksHtml}
<div class="footer">
Generated by Fundamental Analysis Platform ${new Date().toISOString()}
</div>
</body>
</html>
`;
};
const handleExport = async () => {
setIsGenerating(true);
try {
// 1. Capture the Graph Image
const flowElement = document.querySelector('.react-flow') as HTMLElement;
let graphImage = '';
if (flowElement) {
try {
const filter = (node: HTMLElement) => {
const exclusionClasses = ['react-flow__controls', 'react-flow__minimap', 'react-flow__panel', 'react-flow__background'];
return !exclusionClasses.some(classname => node.classList?.contains(classname));
};
// Wait for animations
await new Promise(resolve => setTimeout(resolve, 100));
graphImage = await htmlToImage.toPng(flowElement, {
filter,
backgroundColor: '#ffffff',
pixelRatio: 2,
cacheBust: true,
});
// Auto-crop whitespace
graphImage = await cropImage(graphImage);
} catch (e) {
console.warn("Graph capture failed:", e);
}
}
// 2. Generate HTML Content
const htmlContent = await generateHTML(graphImage);
// 3. Send to Backend for PDF Conversion
const formData = new FormData();
const blob = new Blob([htmlContent], { type: 'text/html' });
formData.append('index.html', blob, 'index.html');
// Send to /api/v1/generate-pdf which is proxied by Vite to API Gateway
// The API Gateway then forwards it to the Report Service
const response = await fetch(`/api/v1/generate-pdf`, {
method: 'POST',
body: formData,
});
if (!response.ok) {
throw new Error(`PDF generation failed: ${response.statusText}`);
}
// 4. Download PDF
const pdfBlob = await response.blob();
const url = URL.createObjectURL(pdfBlob);
const link = document.createElement('a');
link.href = url;
link.download = `${symbol || 'Report'}_${market || 'Analysis'}.pdf`;
document.body.appendChild(link);
link.click();
document.body.removeChild(link);
URL.revokeObjectURL(url);
} catch (e) {
console.error("Export failed", e);
alert("Failed to generate PDF. Please try again.");
} finally {
setIsGenerating(false);
}
};
return (
<div className="flex gap-2">
<Button
size="sm"
variant="outline"
className={cn("gap-2", className)}
onClick={handleExport}
disabled={isGenerating}
>
{isGenerating ? <Loader2 className="h-4 w-4 animate-spin" /> : <FileDown className="h-4 w-4" />}
{isGenerating ? "Generating PDF..." : "Export PDF"}
</Button>
</div>
);
}

View File

@ -1,9 +1,8 @@
import React, { useState, useEffect } from 'react';
import { ScrollArea } from "@/components/ui/scroll-area";
import { Badge } from "@/components/ui/badge";
import { Card, CardContent, CardHeader, CardTitle } from "@/components/ui/card";
import { Button } from "@/components/ui/button";
import { ChevronRight, ChevronDown, FileText, Folder, FileJson, RefreshCw, GitBranch, GitCommit } from 'lucide-react';
import { Card, CardHeader, CardTitle } from "@/components/ui/card";
import { ChevronRight, ChevronDown, FileText, Folder, FileJson, RefreshCw, GitCommit } from 'lucide-react';
import { cn } from "@/lib/utils";
// Types mirroring the Rust backend
@ -154,11 +153,18 @@ export const ContextExplorer: React.FC<ContextExplorerProps> = ({
const res = await fetch(`/api/context/${reqId}/tree/${commitHash}?path=`);
if (res.ok) {
const data = await res.json();
data.sort((a: DirEntry, b: DirEntry) => {
if (a.kind === b.kind) return a.name.localeCompare(b.name);
return a.kind === 'Dir' ? -1 : 1;
});
setRootEntries(data);
if (Array.isArray(data)) {
data.sort((a: DirEntry, b: DirEntry) => {
if (a.kind === b.kind) return a.name.localeCompare(b.name);
return a.kind === 'Dir' ? -1 : 1;
});
setRootEntries(data);
} else {
console.error("ContextExplorer: Expected array from tree API, got:", data);
setRootEntries([]);
}
} else {
console.error("ContextExplorer: Fetch failed", res.status, res.statusText);
}
} catch (e) {
console.error(e);

View File

@ -0,0 +1,76 @@
import { useState } from 'react';
import { Terminal, ChevronUp, ChevronDown } from 'lucide-react';
import { Card } from "@/components/ui/card";
import { Button } from "@/components/ui/button";
import { useAutoScroll } from '@/hooks/useAutoScroll';
import { cn } from "@/lib/utils";
export interface LogEntry {
log: string;
timestamp?: number; // Added optional timestamp for potential sorting if needed
}
interface RealtimeLogsProps {
logs: string[];
className?: string;
}
export function RealtimeLogs({ logs, className }: RealtimeLogsProps) {
// Default to expanded if there are logs, or maybe collapsed to be unintrusive?
// Original code: const [isExpanded, setIsExpanded] = useState(false);
// Let's keep it collapsed by default as per original code to avoid clutter.
const [isExpanded, setIsExpanded] = useState(false);
const logsViewportRef = useAutoScroll(logs.length);
const toggleExpand = () => {
setIsExpanded(!isExpanded);
};
return (
<Card className={cn("flex flex-col shadow-sm transition-all duration-300 ease-in-out border-l-4 border-l-primary py-0 gap-0 bg-background overflow-hidden", className, isExpanded ? "h-[300px]" : "h-8")}>
<div
className="flex items-center justify-between px-2 py-1 cursor-pointer hover:bg-muted/50 transition-colors h-8 shrink-0"
onClick={toggleExpand}
>
<div className="flex items-center gap-2 overflow-hidden flex-1">
<Terminal className="h-3 w-3 text-muted-foreground shrink-0" />
<span className="text-[10px] font-medium text-muted-foreground whitespace-nowrap shrink-0 mr-2">Real-time Logs</span>
{/* Preview last log when collapsed */}
{!isExpanded && logs.length > 0 && (
<div className="flex-1 flex items-center gap-2 overflow-hidden text-[10px] font-mono text-muted-foreground/80">
<span className="truncate">{logs[logs.length - 1]}</span>
</div>
)}
{!isExpanded && logs.length === 0 && (
<span className="text-[10px] italic text-muted-foreground/50">Waiting for logs...</span>
)}
</div>
<Button variant="ghost" size="icon" className="h-4 w-4 text-muted-foreground hover:text-foreground shrink-0 ml-2">
{isExpanded ? <ChevronUp className="h-3 w-3" /> : <ChevronDown className="h-3 w-3" />}
</Button>
</div>
{/* Expanded Content */}
<div
className={cn(
"flex-1 bg-muted/10 border-t transition-all duration-300 min-h-0",
isExpanded ? "opacity-100 visible" : "opacity-0 invisible h-0 overflow-hidden"
)}
>
<div ref={logsViewportRef} className="h-full overflow-y-auto p-3 font-mono text-[10px] leading-relaxed scrollbar-thin scrollbar-thumb-border scrollbar-track-transparent">
<div className="space-y-1">
{logs.length === 0 && <span className="text-muted-foreground italic">Waiting for logs...</span>}
{logs.map((entry, i) => (
<div key={i} className="break-all flex gap-2">
<span className="text-foreground/90">{entry}</span>
</div>
))}
</div>
</div>
</div>
</Card>
);
}

View File

@ -1,7 +1,6 @@
import { useEffect, useCallback, useState } from 'react';
import ReactFlow, {
Background,
Controls,
Node,
Edge,
useNodesState,
@ -79,7 +78,7 @@ const useGridLayout = () => {
// 1. Topological Sort (Rank-Based Grid)
// Calculate Ranks
const ranks = new Map<string, number>();
const nodeMap = new Map(dagNodes.map(n => [n.id, n]));
// const nodeMap = new Map(dagNodes.map(n => [n.id, n])); // Removed unused nodeMap
const incomingEdges = new Map<string, string[]>();
dagNodes.forEach(n => incomingEdges.set(n.id, []));

View File

@ -1,5 +1,5 @@
import { useQuery, useMutation, useQueryClient } from '@tanstack/react-query';
import { LlmProvidersConfig, DataSourcesConfig, AnalysisTemplateSets, TestConfigRequest, TestLlmConfigRequest } from '../types/config';
import { LlmProvidersConfig, DataSourcesConfig, TestConfigRequest, TestLlmConfigRequest } from '../types/config';
import { client } from '../api/client';
// --- Hooks ---
@ -59,16 +59,40 @@ export function useAnalysisTemplates() {
return useQuery({
queryKey: ['analysis-templates'],
queryFn: async () => {
return await client.get_analysis_template_sets();
return await client.get_templates();
}
});
}
export function useUpdateAnalysisTemplates() {
export function useAnalysisTemplate(id: string | null) {
return useQuery({
queryKey: ['analysis-template', id],
queryFn: async () => {
if (!id) return null;
return await client.get_template_by_id({ params: { id } });
},
enabled: !!id
});
}
export function useSaveAnalysisTemplate() {
const queryClient = useQueryClient();
return useMutation({
mutationFn: async (config: AnalysisTemplateSets) => {
return await client.update_analysis_template_sets(config);
mutationFn: async ({ id, template }: { id: string, template: AnalysisTemplateSet }) => {
return await client.update_template(template, { params: { id } });
},
onSuccess: (_data, variables) => {
queryClient.invalidateQueries({ queryKey: ['analysis-templates'] });
queryClient.invalidateQueries({ queryKey: ['analysis-template', variables.id] });
}
});
}
export function useDeleteAnalysisTemplate() {
const queryClient = useQueryClient();
return useMutation({
mutationFn: async (id: string) => {
return await client.delete_template(undefined, { params: { id } });
},
onSuccess: () => {
queryClient.invalidateQueries({ queryKey: ['analysis-templates'] });
@ -76,6 +100,9 @@ export function useUpdateAnalysisTemplates() {
});
}
export type AnalysisTemplateSet = import('../api/schema.gen').AnalysisTemplateSet;
export function useDiscoverModels() {
const queryClient = useQueryClient();
return useMutation({

View File

@ -38,6 +38,43 @@
}
}
/*
Markdown Typography Overrides
Ensure high contrast for all text elements within prose content.
*/
@layer components {
.prose {
/* Force base text color */
@apply text-foreground;
/* Force specific elements to use foreground color to avoid "faint" gray defaults */
& :where(p, ul, ol, li, blockquote, strong, b, i, em, code, h1, h2, h3, h4, h5, h6, th, td, span) {
color: var(--color-foreground) !important;
}
/* Ensure links use primary color */
& a {
@apply text-primary hover:underline decoration-primary/30 underline-offset-4;
color: var(--color-primary) !important;
}
/* Table styling fixes */
& :where(thead, tbody, tr) {
border-color: var(--color-border) !important;
}
& :where(th, td) {
border-color: var(--color-border) !important;
}
/* Code block fixes */
& pre {
@apply bg-muted text-foreground;
border-color: var(--color-border) !important;
border-width: 1px !important;
}
}
}
.writing-vertical-lr {
writing-mode: vertical-lr;
}

View File

@ -0,0 +1,164 @@
/**
* Process image data to crop whitespace
*/
export async function cropImage(
imageSrc: string,
options: {
threshold?: number; // 0-255, higher means more colors are considered "white"
padding?: number; // padding in pixels to keep around content
} = {}
): Promise<string> {
const { threshold = 252, padding = 20 } = options; // 252 covers pure white and very light compression artifacts
return new Promise((resolve) => {
const img = new Image();
// Enable CORS if needed, though usually data URLs don't need it
img.crossOrigin = "anonymous";
img.onload = () => {
try {
const canvas = document.createElement('canvas');
canvas.width = img.width;
canvas.height = img.height;
const ctx = canvas.getContext('2d');
if (!ctx) {
resolve(imageSrc);
return;
}
ctx.drawImage(img, 0, 0);
const imageData = ctx.getImageData(0, 0, canvas.width, canvas.height);
const data = imageData.data;
const { width, height } = canvas;
// Check if pixel is "content" (darker than threshold)
// Returns true if it is CONTENT
const isContent = (r: number, g: number, b: number) => {
return r < threshold || g < threshold || b < threshold;
};
let top = -1, bottom = -1, left = -1, right = -1;
// Scan Top-down for top boundary
for (let y = 0; y < height; y++) {
for (let x = 0; x < width; x++) {
const idx = (y * width + x) * 4;
if (isContent(data[idx], data[idx+1], data[idx+2])) {
top = y;
break;
}
}
if (top !== -1) break;
}
// If top is still -1, the image is empty (all white)
if (top === -1) {
resolve(imageSrc);
return;
}
// Scan Bottom-up for bottom boundary
for (let y = height - 1; y >= top; y--) {
for (let x = 0; x < width; x++) {
const idx = (y * width + x) * 4;
if (isContent(data[idx], data[idx+1], data[idx+2])) {
bottom = y + 1;
break;
}
}
if (bottom !== -1) break;
}
// Scan Left-right for left boundary
// We only need to scan within the vertical bounds we found
for (let x = 0; x < width; x++) {
for (let y = top; y < bottom; y++) {
const idx = (y * width + x) * 4;
if (isContent(data[idx], data[idx+1], data[idx+2])) {
left = x;
break;
}
}
if (left !== -1) break;
}
// Scan Right-left for right boundary
for (let x = width - 1; x >= left; x--) {
for (let y = top; y < bottom; y++) {
const idx = (y * width + x) * 4;
if (isContent(data[idx], data[idx+1], data[idx+2])) {
right = x + 1;
break;
}
}
if (right !== -1) break;
}
// Calculate crop coordinates with padding
const cropX = Math.max(0, left - padding);
const cropY = Math.max(0, top - padding);
// Calculate width/height ensuring we don't go out of bounds
// right is exclusive index + 1, so width is right - left.
// content width = right - left
// target width = content width + 2 * padding
// but limited by image bounds
const contentWidth = right - left;
const contentHeight = bottom - top;
// Check if we found valid bounds
if (contentWidth <= 0 || contentHeight <= 0) {
resolve(imageSrc);
return;
}
// Adjust padding to not exceed original image
// Actually, we want the crop rect to include padding if it exists in source,
// or maybe just white pad if it doesn't?
// The current logic `Math.max(0, left - padding)` keeps padding INSIDE the original image.
// If the content is at the edge, we might want to ADD whitespace?
// Usually cropping means reducing size. Let's stick to keeping what's in the image.
const rectRight = Math.min(width, right + padding);
const rectBottom = Math.min(height, bottom + padding);
const finalWidth = rectRight - cropX;
const finalHeight = rectBottom - cropY;
const finalCanvas = document.createElement('canvas');
finalCanvas.width = finalWidth;
finalCanvas.height = finalHeight;
const finalCtx = finalCanvas.getContext('2d');
if (!finalCtx) {
resolve(imageSrc);
return;
}
// Fill with white in case of transparency (though we are copying from opaque usually)
finalCtx.fillStyle = '#ffffff';
finalCtx.fillRect(0, 0, finalWidth, finalHeight);
finalCtx.drawImage(
canvas,
cropX, cropY, finalWidth, finalHeight, // Source rect
0, 0, finalWidth, finalHeight // Dest rect
);
resolve(finalCanvas.toDataURL('image/png'));
} catch (e) {
console.error("Image processing failed", e);
resolve(imageSrc);
}
};
img.onerror = (e) => {
console.error("Image loading failed", e);
resolve(imageSrc);
};
img.src = imageSrc;
});
}

View File

@ -1,6 +1,14 @@
import { StrictMode } from 'react'
import { createRoot } from 'react-dom/client'
import { QueryClient, QueryClientProvider } from '@tanstack/react-query'
import * as buffer from 'buffer';
// Polyfill Buffer for @react-pdf/renderer
if (typeof window !== 'undefined') {
// @ts-ignore
window.Buffer = window.Buffer || buffer.Buffer;
}
import './index.css'
import App from './App'

View File

@ -7,7 +7,7 @@ import { Input } from "@/components/ui/input"
import { Label } from "@/components/ui/label"
import { Select, SelectContent, SelectItem, SelectTrigger, SelectValue } from "@/components/ui/select"
import { BarChart3, Search, Sparkles, Loader2, AlertCircle } from "lucide-react"
import { useAnalysisTemplates, useLlmProviders } from "@/hooks/useConfig"
import { useAnalysisTemplates, useAnalysisTemplate, useLlmProviders } from "@/hooks/useConfig"
import { client } from '@/api/client';
import { DataRequest } from '@/api/schema.gen';
import { z } from 'zod';
@ -24,25 +24,25 @@ export function Dashboard() {
const [templateId, setTemplateId] = useState("");
const { data: templates, isLoading: isTemplatesLoading } = useAnalysisTemplates();
const { data: selectedTemplate } = useAnalysisTemplate(templateId || null);
const { data: llmProviders } = useLlmProviders();
const [validationError, setValidationError] = useState<string | null>(null);
// Auto-select first template when loaded
useEffect(() => {
if (templates && Object.keys(templates).length > 0 && !templateId) {
setTemplateId(Object.keys(templates)[0]);
if (templates && templates.length > 0 && !templateId) {
setTemplateId(templates[0].id);
}
}, [templates, templateId]);
// Validate template against providers
useEffect(() => {
if (!templateId || !templates || !templates[templateId] || !llmProviders) {
if (!selectedTemplate || !llmProviders) {
setValidationError(null);
return;
}
const selectedTemplate = templates[templateId];
const missingConfigs: string[] = [];
Object.values(selectedTemplate.modules).forEach(module => {
@ -63,7 +63,7 @@ export function Dashboard() {
setValidationError(null);
}
}, [templateId, templates, llmProviders]);
}, [selectedTemplate, llmProviders]);
const startWorkflowMutation = useMutation({
mutationFn: async (payload: DataRequestDTO) => {
@ -155,9 +155,9 @@ export function Dashboard() {
<SelectValue placeholder={isTemplatesLoading ? "Loading templates..." : "Select a template"} />
</SelectTrigger>
<SelectContent>
{templates && Object.keys(templates).length > 0 ? (
Object.entries(templates).map(([id, t]) => (
<SelectItem key={id} value={id}>
{templates && templates.length > 0 ? (
templates.map((t) => (
<SelectItem key={t.id} value={t.id}>
{t.name}
</SelectItem>
))
@ -203,7 +203,7 @@ export function Dashboard() {
<div className="grid grid-cols-1 md:grid-cols-3 gap-4 max-w-4xl mx-auto text-left">
<FeatureCard title="多源数据聚合" desc="集成 Tushare, Finnhub 等多个专业金融数据源。" />
<FeatureCard title="AI 驱动分析" desc="使用 GPT-4o 等大模型进行深度财务指标解读。" />
<FeatureCard title="AI 驱动分析" desc="配置任意大模型、Prompt和分析流,进行深度财务指标解读。" />
<FeatureCard title="可视化工作流" desc="全流程透明化,实时查看每个分析步骤的状态。" />
</div>
</div>

View File

@ -12,10 +12,10 @@ import { Button } from '@/components/ui/button';
import ReactMarkdown from 'react-markdown';
import remarkGfm from 'remark-gfm';
import { useAnalysisTemplates } from "@/hooks/useConfig"
import { RecentReportsDropdown } from '@/components/RecentReportsDropdown';
import { WorkflowStatus, ConnectionStatus, TaskState, TaskNode } from '@/types/workflow';
import { Progress } from "@/components/ui/progress"
import { cn, formatNodeName } from '@/lib/utils';
import { PDFExportButton } from '@/components/report/PDFExportButton';
export function HistoricalReportPage() {
const { id } = useParams();
@ -46,7 +46,7 @@ export function HistoricalReportPage() {
} = useWorkflowStore();
const { data: templates } = useAnalysisTemplates();
const templateName = templates && templateId ? templates[templateId]?.name : templateId;
const templateName = templates?.find(t => t.id === templateId)?.name || templateId;
// Initialization Logic - Historical Mode Only
useEffect(() => {
@ -117,7 +117,7 @@ export function HistoricalReportPage() {
const tabNodes = dag?.nodes || [];
return (
<div className="container py-4 space-y-4 min-h-[calc(100vh-4rem)] flex flex-col">
<div className="w-full px-6 py-4 space-y-4 min-h-[calc(100vh-4rem)] flex flex-col">
{/* Header Area */}
<div className="flex items-center justify-between shrink-0">
<div className="space-y-1">
@ -132,8 +132,12 @@ export function HistoricalReportPage() {
</div>
</div>
<div className="flex gap-2">
<RecentReportsDropdown />
<Button size="sm" variant="outline">Export PDF</Button>
<PDFExportButton
symbol={symbol}
market={market}
templateName={templateName}
requestId={id}
/>
</div>
</div>
@ -362,8 +366,8 @@ function TaskDetailView({ task, requestId, mode: _mode }: { task?: TaskState, re
<div className="relative h-full flex flex-col overflow-hidden">
{/* Main Report View */}
<div className="flex-1 overflow-auto p-8 bg-background">
<div className="max-w-4xl mx-auto">
<div className="prose dark:prose-invert max-w-none prose-p:text-foreground prose-headings:text-foreground prose-li:text-foreground prose-strong:text-foreground prose-span:text-foreground">
<div className="w-full">
<div className="prose dark:prose-invert max-w-none prose-p:text-foreground prose-headings:text-foreground prose-li:text-foreground prose-strong:text-foreground prose-span:text-foreground prose-td:text-foreground prose-th:text-foreground">
{task?.content ? (
<ReactMarkdown remarkPlugins={[remarkGfm]}>
{task.content || ''}
@ -396,9 +400,9 @@ function TaskDetailView({ task, requestId, mode: _mode }: { task?: TaskState, re
</Button>
</div>
{/* Inspector Panel (Right Side Sheet) */}
{/* Inspector Panel (Overlaid on Content) */}
<div className={cn(
"absolute top-0 right-0 h-full w-[600px] bg-background border-l shadow-2xl transition-transform duration-300 transform z-20 flex flex-col",
"absolute top-0 right-0 h-full w-full bg-background border-l shadow-2xl transition-transform duration-300 transform z-20 flex flex-col",
isInspectorOpen ? "translate-x-0" : "translate-x-full"
)}>
<div className="flex items-center justify-between p-4 border-b shrink-0">
@ -411,55 +415,20 @@ function TaskDetailView({ task, requestId, mode: _mode }: { task?: TaskState, re
</Button>
</div>
<Tabs defaultValue="logs" className="flex-1 flex flex-col min-h-0">
<div className="px-4 border-b shrink-0">
<TabsList className="w-full justify-start h-10 p-0 bg-transparent">
<TabsTrigger value="logs" className="rounded-none border-b-2 border-transparent data-[state=active]:border-primary px-4 py-2">Logs</TabsTrigger>
{hasContext && (
<TabsTrigger value="context" className="rounded-none border-b-2 border-transparent data-[state=active]:border-primary px-4 py-2">Context</TabsTrigger>
)}
<TabsTrigger value="raw" className="rounded-none border-b-2 border-transparent data-[state=active]:border-primary px-4 py-2">Metadata</TabsTrigger>
</TabsList>
</div>
<TabsContent value="logs" className="flex-1 m-0 p-0 overflow-auto bg-muted/30">
{task?.logs && task.logs.length > 0 ? (
<div className="p-4 space-y-1 font-mono text-xs">
{task.logs.map((log, i) => (
<div key={i} className="break-words whitespace-pre-wrap">{log}</div>
))}
</div>
) : (
<div className="flex items-center justify-center h-full text-muted-foreground text-sm">
No logs available
</div>
)}
{/* TODO: Add support for loading _execution.md in historical mode */}
</TabsContent>
<TabsContent value="context" className="flex-1 m-0 p-0 min-h-0">
{requestId && (task?.inputCommit || task?.outputCommit) && (
<ContextExplorer
reqId={requestId}
commitHash={task.outputCommit || task.inputCommit!}
diffTargetHash={task.outputCommit ? task.inputCommit : undefined}
className="h-full p-4"
/>
)}
</TabsContent>
<TabsContent value="raw" className="flex-1 m-0 p-4 overflow-auto">
<pre className="text-xs font-mono bg-muted p-4 rounded-lg overflow-auto">
{JSON.stringify({
status: task?.status,
progress: task?.progress,
message: task?.message,
inputCommit: task?.inputCommit,
outputCommit: task?.outputCommit
}, null, 2)}
</pre>
</TabsContent>
</Tabs>
<div className="flex-1 flex flex-col min-h-0">
{hasContext && requestId && (task?.inputCommit || task?.outputCommit) ? (
<ContextExplorer
reqId={requestId}
commitHash={task.outputCommit || task.inputCommit!}
diffTargetHash={task.outputCommit ? task.inputCommit : undefined}
className="h-full p-4"
/>
) : (
<div className="flex items-center justify-center h-full text-muted-foreground text-sm">
No context available
</div>
)}
</div>
</div>
</div>
);

View File

@ -1,4 +1,4 @@
import { useState, useEffect, useMemo } from 'react';
import { useState, useEffect } from 'react';
import { useParams, useSearchParams } from 'react-router-dom';
import { Badge } from '@/components/ui/badge';
import { Tabs, TabsContent, TabsList, TabsTrigger } from "@/components/ui/tabs"
@ -7,16 +7,18 @@ import { WorkflowVisualizer } from '@/components/workflow/WorkflowVisualizer';
import { ContextExplorer } from '@/components/workflow/ContextExplorer';
import { useWorkflowStore } from '@/stores/useWorkflowStore';
import { TaskStatus, schemas } from '@/api/schema.gen';
import { Loader2, CheckCircle2, AlertCircle, Clock, PanelLeftClose, PanelLeftOpen, FileText, GitBranch, TerminalSquare, X, List, Trash2 } from 'lucide-react';
import { Loader2, CheckCircle2, AlertCircle, Clock, PanelLeftClose, PanelLeftOpen, TerminalSquare, X } from 'lucide-react';
import { Button } from '@/components/ui/button';
import ReactMarkdown from 'react-markdown';
import remarkGfm from 'remark-gfm';
import { useAnalysisTemplates } from "@/hooks/useConfig"
import { RecentReportsDropdown } from '@/components/RecentReportsDropdown';
import { WorkflowStatus, ConnectionStatus, TaskState } from '@/types/workflow';
import { Progress } from "@/components/ui/progress"
import { cn, formatNodeName } from '@/lib/utils';
import { RealtimeLogs } from '@/components/workflow/RealtimeLogs';
import { PDFExportButton } from '@/components/report/PDFExportButton';
export function ReportPage() {
const { id } = useParams();
const [searchParams] = useSearchParams();
@ -43,11 +45,12 @@ export function ReportPage() {
tasks,
dag,
activeTab,
setActiveTab
setActiveTab,
logs: globalLogs
} = useWorkflowStore();
const { data: templates } = useAnalysisTemplates();
const templateName = templates && templateId ? templates[templateId]?.name : templateId;
const templateName = templates?.find(t => t.id === templateId)?.name || templateId;
// Initialization & Connection Logic
useEffect(() => {
@ -60,9 +63,11 @@ export function ReportPage() {
// If the workflow is already finished, SSE might close immediately or 404.
const loadSnapshot = async () => {
try {
console.log(`[ReportPage] Fetching snapshot for ${id}...`);
const res = await fetch(`/api/v1/workflow/snapshot/${id}`);
if (res.ok) {
const snapshot = await res.json();
console.log(`[ReportPage] Snapshot loaded successfully for ${id}`, snapshot);
// Handle tagged enum wrapper (type/payload) if present
let rawPayload = snapshot.data_payload;
@ -71,9 +76,11 @@ export function ReportPage() {
}
loadFromSnapshot(rawPayload);
} else {
console.warn(`[ReportPage] Snapshot fetch failed: ${res.status} ${res.statusText}`);
}
} catch (e) {
console.warn("Snapshot load failed (normal for new tasks):", e);
console.warn("[ReportPage] Snapshot load exception (normal for new tasks):", e);
}
};
@ -81,25 +88,39 @@ export function ReportPage() {
// 2. Connect to Real-time Stream
try {
console.log(`[ReportPage] Initializing EventSource for ${id}...`);
eventSource = new EventSource(`/api/v1/workflow/events/${id}`);
eventSource.onopen = () => {
console.log(`[ReportPage] SSE Connection Opened for ${id}`);
};
eventSource.onmessage = (event) => {
try {
// console.log(`[ReportPage] SSE Message received:`, event.data);
const parsedEvent = JSON.parse(event.data);
if (parsedEvent.type === schemas.WorkflowEventType.enum.WorkflowStateSnapshot) {
console.log(`[ReportPage] !!! Received WorkflowStateSnapshot !!!`, parsedEvent);
} else if (parsedEvent.type !== schemas.WorkflowEventType.enum.TaskStreamUpdate && parsedEvent.type !== schemas.WorkflowEventType.enum.TaskLog) {
// Suppress high-frequency logs to prevent browser lag
console.log(`[ReportPage] SSE Event: ${parsedEvent.type}`, parsedEvent);
}
handleEvent(parsedEvent);
} catch (e) {
console.error("Failed to parse SSE event:", e);
console.error("[ReportPage] Failed to parse SSE event:", e);
}
};
eventSource.onerror = (err) => {
// Standard behavior: if connection closes, it might be finished or failed.
// We rely on Snapshot for history if SSE fails.
console.warn("SSE Connection Closed/Error", err);
console.warn("[ReportPage] SSE Connection Closed/Error", err);
eventSource?.close();
};
} catch (e) {
console.error("Failed to init SSE:", e);
console.error("[ReportPage] Failed to init SSE:", e);
}
return () => {
@ -110,8 +131,16 @@ export function ReportPage() {
// Include ALL nodes in tabs to allow debugging context for DataFetch tasks
const tabNodes = dag?.nodes || [];
return (
<div className="container py-4 space-y-4 min-h-[calc(100vh-4rem)] flex flex-col">
// Use global raw logs directly
// const { tasks, logs: globalLogs } = useWorkflowStore();
return (
<div className="w-full px-6 py-4 space-y-4 min-h-[calc(100vh-4rem)] flex flex-col">
{/* Realtime Logs - Only in realtime mode */}
{mode === 'realtime' && (
<RealtimeLogs logs={globalLogs} className="fixed bottom-0 left-0 right-0 z-50 w-full border-l-0 border-t-4 border-t-primary rounded-none shadow-[0_-4px_12px_rgba(0,0,0,0.1)]" />
)}
{/* Header Area */}
<div className="flex items-center justify-between shrink-0">
<div className="space-y-1">
@ -126,32 +155,12 @@ export function ReportPage() {
</div>
</div>
<div className="flex gap-2">
<RecentReportsDropdown />
<Button
size="sm"
variant="destructive"
className="gap-2"
onClick={async () => {
if (confirm("Are you sure you want to clear ALL history? This cannot be undone.")) {
try {
const res = await fetch('/api/v1/system/history', { method: 'DELETE' });
if (res.ok) {
window.location.href = '/';
} else {
console.error("Failed to clear history");
alert("Failed to clear history");
}
} catch (e) {
console.error(e);
alert("Error clearing history");
}
}
}}
>
<Trash2 className="h-4 w-4" />
Clear History
</Button>
<Button size="sm" variant="outline">Export PDF</Button>
<PDFExportButton
symbol={symbol}
market={market}
templateName={templateName}
requestId={id}
/>
</div>
</div>
@ -283,7 +292,15 @@ function OverviewTabContent({ status, tasks, totalTasks, completedTasks }: {
totalTasks: number,
completedTasks: number
}) {
const progress = totalTasks > 0 ? (completedTasks / totalTasks) * 100 : 0;
// Count ALL tasks that have reached a terminal state (Completed, Skipped, Failed)
// This is more accurate for "progress" than just successful completions.
const processedCount = Object.values(tasks).filter(t =>
t.status === schemas.TaskStatus.enum.Completed ||
t.status === schemas.TaskStatus.enum.Skipped ||
t.status === schemas.TaskStatus.enum.Failed
).length;
const progress = totalTasks > 0 ? (processedCount / totalTasks) * 100 : 0;
// Find errors
const failedTasks = Object.entries(tasks).filter(([_, t]) => t.status === schemas.TaskStatus.enum.Failed);
@ -312,7 +329,7 @@ function OverviewTabContent({ status, tasks, totalTasks, completedTasks }: {
<div className="space-y-2">
<div className="flex justify-between text-sm text-muted-foreground">
<span>Overall Progress</span>
<span>{Math.round(progress)}% ({completedTasks}/{totalTasks} tasks)</span>
<span>{Math.round(progress)}% ({processedCount}/{totalTasks} tasks)</span>
</div>
<Progress value={progress} className="h-2" />
</div>
@ -370,7 +387,7 @@ function OverviewTabContent({ status, tasks, totalTasks, completedTasks }: {
)
}
function TaskDetailView({ taskId, task, requestId, mode }: { taskId: string, task?: TaskState, requestId?: string, mode: 'realtime' | 'historical' }) {
function TaskDetailView({ taskId, task, requestId }: { taskId: string, task?: TaskState, requestId?: string, mode: 'realtime' | 'historical' }) {
const [isInspectorOpen, setIsInspectorOpen] = useState(false);
const { setTaskContent } = useWorkflowStore();
@ -442,8 +459,8 @@ function TaskDetailView({ taskId, task, requestId, mode }: { taskId: string, tas
<div className="relative h-full flex flex-col overflow-hidden">
{/* Main Report View */}
<div className="flex-1 overflow-auto p-8 bg-background">
<div className="max-w-4xl mx-auto">
<div className="prose dark:prose-invert max-w-none prose-p:text-foreground prose-headings:text-foreground prose-li:text-foreground prose-strong:text-foreground prose-span:text-foreground">
<div className="w-full">
<div className="prose dark:prose-invert max-w-none">
{task?.content ? (
<ReactMarkdown remarkPlugins={[remarkGfm]}>
{task.content || ''}
@ -476,9 +493,9 @@ function TaskDetailView({ taskId, task, requestId, mode }: { taskId: string, tas
</Button>
</div>
{/* Inspector Panel (Right Side Sheet) */}
{/* Inspector Panel (Overlaid on Content) */}
<div className={cn(
"absolute top-0 right-0 h-full w-[600px] bg-background border-l shadow-2xl transition-transform duration-300 transform z-20 flex flex-col",
"absolute top-0 right-0 h-full w-full bg-background border-l shadow-2xl transition-transform duration-300 transform z-20 flex flex-col",
isInspectorOpen ? "translate-x-0" : "translate-x-full"
)}>
<div className="flex items-center justify-between p-4 border-b shrink-0">
@ -491,55 +508,20 @@ function TaskDetailView({ taskId, task, requestId, mode }: { taskId: string, tas
</Button>
</div>
<Tabs defaultValue="logs" className="flex-1 flex flex-col min-h-0">
<div className="px-4 border-b shrink-0">
<TabsList className="w-full justify-start h-10 p-0 bg-transparent">
<TabsTrigger value="logs" className="rounded-none border-b-2 border-transparent data-[state=active]:border-primary px-4 py-2">Logs</TabsTrigger>
{hasContext && (
<TabsTrigger value="context" className="rounded-none border-b-2 border-transparent data-[state=active]:border-primary px-4 py-2">Context</TabsTrigger>
)}
<TabsTrigger value="raw" className="rounded-none border-b-2 border-transparent data-[state=active]:border-primary px-4 py-2">Metadata</TabsTrigger>
</TabsList>
</div>
<TabsContent value="logs" className="flex-1 m-0 p-0 overflow-auto bg-muted/30">
{task?.logs && task.logs.length > 0 ? (
<div className="p-4 space-y-1 font-mono text-xs">
{task.logs.map((log, i) => (
<div key={i} className="break-words whitespace-pre-wrap">{log}</div>
))}
</div>
) : (
<div className="flex items-center justify-center h-full text-muted-foreground text-sm">
No logs available
</div>
)}
{/* TODO: Add support for loading _execution.md in historical mode */}
</TabsContent>
<TabsContent value="context" className="flex-1 m-0 p-0 min-h-0">
{requestId && (task?.inputCommit || task?.outputCommit) && (
<ContextExplorer
reqId={requestId}
commitHash={task.outputCommit || task.inputCommit!}
diffTargetHash={task.outputCommit ? task.inputCommit : undefined}
className="h-full p-4"
/>
)}
</TabsContent>
<TabsContent value="raw" className="flex-1 m-0 p-4 overflow-auto">
<pre className="text-xs font-mono bg-muted p-4 rounded-lg overflow-auto">
{JSON.stringify({
status: task?.status,
progress: task?.progress,
message: task?.message,
inputCommit: task?.inputCommit,
outputCommit: task?.outputCommit
}, null, 2)}
</pre>
</TabsContent>
</Tabs>
<div className="flex-1 flex flex-col min-h-0">
{hasContext && requestId && (task?.inputCommit || task?.outputCommit) ? (
<ContextExplorer
reqId={requestId}
commitHash={task.outputCommit || task.inputCommit!}
diffTargetHash={task.outputCommit ? task.inputCommit : undefined}
className="h-full p-4"
/>
) : (
<div className="flex items-center justify-center h-full text-muted-foreground text-sm">
No context available
</div>
)}
</div>
</div>
</div>
);

View File

@ -1,5 +1,5 @@
import { useDataSources, useUpdateDataSources, useTestDataSource, useRegisteredProviders } from "@/hooks/useConfig"
import { DataSourceConfig } from "@/types/config"
import { DataSourceConfig, DataSourceProvider, DataSourceProviders } from "@/types/config"
import { useToast } from "@/hooks/use-toast"
import { DynamicConfigForm } from "@/components/config/DynamicConfigForm"
@ -98,18 +98,28 @@ export function DataSourceTab() {
// Find existing config or create default
const configEntry = dataSources ? (dataSources as Record<string, any>)[meta.id] : undefined;
// Default config structure.
// Note: We default 'provider' field to the ID from metadata.
// Backend expects specific enum values for 'provider', but currently our IDs match (lowercase/uppercase handling needed?)
// The backend DataSourceProvider enum is PascalCase (Tushare), but IDs are likely lowercase (tushare).
// However, DataSourceConfig.provider is an enum.
// We might need to map ID to Enum if strict.
// For now, assuming the backend persistence can handle the string or we just store it.
// Actually, the 'provider' field in DataSourceConfig is DataSourceProvider enum.
// Let's hope the JSON deserialization handles "tushare" -> Tushare.
// We know that meta.id must be a valid DataSourceProvider because the backend
// only registers providers that are part of the enum system.
// However, meta.id comes as lowercase (e.g., "tushare") while the Enum expects PascalCase (e.g., "Tushare").
// To maintain strict type safety and follow the Single Source of Truth,
// we need to cast or map it correctly.
// Since we cannot change the backend serialization easily without breaking other things,
// and we must respect the Zod schema, we try to match it case-insensitively to the Enum.
let providerEnum = Object.values(DataSourceProviders).find(
(p) => p.toLowerCase() === meta.id.toLowerCase()
);
if (!providerEnum) {
console.warn(`Provider ID '${meta.id}' from metadata does not match any known DataSourceProvider enum.`);
// Fallback or skip? If we skip, the user can't configure it.
// If we cast forcefully, Zod might reject it on save.
// Let's attempt to use it as is but cast to satisfy TS, acknowledging the risk if it doesn't match.
providerEnum = meta.id as DataSourceProvider;
}
const config = (configEntry || {
provider: meta.id, // This might need capitalization adjustment
provider: providerEnum,
enabled: false,
// We init other fields as empty, they will be filled by DynamicConfigForm
}) as DataSourceConfig;

View File

@ -1,5 +1,11 @@
import { useState, useEffect, useMemo } from "react"
import { useAnalysisTemplates, useUpdateAnalysisTemplates, useLlmProviders } from "@/hooks/useConfig"
import {
useAnalysisTemplates,
useAnalysisTemplate,
useSaveAnalysisTemplate,
useDeleteAnalysisTemplate,
useLlmProviders
} from "@/hooks/useConfig"
import { AnalysisTemplateSet, AnalysisModuleConfig } from "@/types/config"
import { schemas } from "@/api/schema.gen"
import { z } from "zod"
@ -16,29 +22,28 @@ import { useToast } from "@/hooks/use-toast"
export function TemplateTab() {
const { data: templates, isLoading } = useAnalysisTemplates();
const updateTemplates = useUpdateAnalysisTemplates();
const saveTemplate = useSaveAnalysisTemplate();
const deleteTemplate = useDeleteAnalysisTemplate();
const { toast } = useToast();
const [selectedId, setSelectedId] = useState<string | null>(null);
// Auto select first if none selected
useEffect(() => {
if (templates && !selectedId && Object.keys(templates).length > 0) {
setSelectedId(Object.keys(templates)[0]);
}
if (templates && !selectedId && templates.length > 0) {
setSelectedId(templates[0].id);
}
}, [templates, selectedId]);
if (isLoading) return <div>Loading templates...</div>;
const handleCreateTemplate = () => {
if (!templates) return;
const newId = crypto.randomUUID();
const newTemplate: AnalysisTemplateSet = {
name: "New Template",
modules: {}
};
const newTemplates = { ...templates, [newId]: newTemplate };
updateTemplates.mutate(newTemplates, {
saveTemplate.mutate({ id: newId, template: newTemplate }, {
onSuccess: () => {
toast({ title: "Success", description: "Template created" });
setSelectedId(newId);
@ -47,88 +52,96 @@ export function TemplateTab() {
});
}
const handleUpdateTemplate = (id: string, updatedTemplate: AnalysisTemplateSet) => {
if (!templates) return;
const newTemplates = { ...templates, [id]: updatedTemplate };
updateTemplates.mutate(newTemplates, {
onSuccess: () => toast({ title: "Success", description: "Template saved" }),
onError: () => toast({ title: "Error", description: "Failed to save template", type: "error" })
const handleDeleteTemplate = (id: string) => {
deleteTemplate.mutate(id, {
onSuccess: () => {
toast({ title: "Success", description: "Template deleted" });
if (selectedId === id) setSelectedId(null);
},
onError: () => toast({ title: "Error", description: "Failed to delete template", type: "error" })
});
}
const handleDeleteTemplate = (id: string) => {
if (!templates) return;
// eslint-disable-next-line @typescript-eslint/no-unused-vars
const { [id]: removed, ...rest } = templates;
updateTemplates.mutate(rest, {
onSuccess: () => {
toast({ title: "Success", description: "Template deleted" });
if (selectedId === id) setSelectedId(null);
},
onError: () => toast({ title: "Error", description: "Failed to delete template", type: "error" })
});
}
return (
<div className="flex h-[600px] border rounded-md overflow-hidden">
{/* Sidebar List */}
<div className="w-[250px] border-r bg-muted/10 flex flex-col">
<div className="p-4 border-b">
<h4 className="font-medium mb-1"></h4>
<p className="text-xs text-muted-foreground"></p>
</div>
<ScrollArea className="flex-1">
<div className="p-2 space-y-1">
{templates && templates.map((t) => (
<div key={t.id} className="group relative flex items-center">
<button
onClick={() => setSelectedId(t.id)}
className={`w-full text-left px-3 py-2 rounded-md text-sm transition-colors flex items-center justify-between ${
selectedId === t.id ? "bg-accent text-accent-foreground font-medium" : "hover:bg-muted"
}`}
>
<span className="truncate pr-6">{t.name}</span>
{selectedId === t.id && <ArrowRight className="h-3 w-3 opacity-50" />}
</button>
{/* Delete button visible on hover */}
<button
onClick={(e) => { e.stopPropagation(); handleDeleteTemplate(t.id); }}
className="absolute right-2 top-2 hidden group-hover:block text-muted-foreground hover:text-destructive"
>
<Trash2 className="h-3 w-3" />
</button>
</div>
))}
</div>
</ScrollArea>
<div className="p-3 border-t bg-background">
<Button size="sm" variant="outline" className="w-full" onClick={handleCreateTemplate}>
<Plus className="mr-2 h-4 w-4" />
</Button>
</div>
</div>
const activeTemplate = (templates && selectedId) ? (templates as Record<string, AnalysisTemplateSet>)[selectedId] : null;
return (
<div className="flex h-[600px] border rounded-md overflow-hidden">
{/* Sidebar List */}
<div className="w-[250px] border-r bg-muted/10 flex flex-col">
<div className="p-4 border-b">
<h4 className="font-medium mb-1"></h4>
<p className="text-xs text-muted-foreground"></p>
</div>
<ScrollArea className="flex-1">
<div className="p-2 space-y-1">
{templates && Object.entries(templates).map(([id, t]) => (
<div key={id} className="group relative flex items-center">
<button
onClick={() => setSelectedId(id)}
className={`w-full text-left px-3 py-2 rounded-md text-sm transition-colors flex items-center justify-between ${
selectedId === id ? "bg-accent text-accent-foreground font-medium" : "hover:bg-muted"
}`}
>
<span className="truncate pr-6">{(t as AnalysisTemplateSet).name}</span>
{selectedId === id && <ArrowRight className="h-3 w-3 opacity-50" />}
</button>
{/* Delete button visible on hover */}
<button
onClick={(e) => { e.stopPropagation(); handleDeleteTemplate(id); }}
className="absolute right-2 top-2 hidden group-hover:block text-muted-foreground hover:text-destructive"
>
<Trash2 className="h-3 w-3" />
</button>
</div>
))}
</div>
</ScrollArea>
<div className="p-3 border-t bg-background">
<Button size="sm" variant="outline" className="w-full" onClick={handleCreateTemplate}>
<Plus className="mr-2 h-4 w-4" />
</Button>
</div>
</div>
{/* Main Content */}
<div className="flex-1 bg-background flex flex-col">
{activeTemplate && selectedId ? (
<TemplateDetailView
key={selectedId} // Force remount on ID change
template={activeTemplate}
onSave={(t) => handleUpdateTemplate(selectedId, t)}
isSaving={updateTemplates.isPending}
/>
) : (
<div className="flex-1 flex items-center justify-center text-muted-foreground">
{templates && Object.keys(templates).length === 0 ? "No templates found. Create one." : "Select a template"}
</div>
)}
</div>
{/* Main Content */}
<div className="flex-1 bg-background flex flex-col">
{selectedId ? (
<TemplateDetailWrapper
key={selectedId}
templateId={selectedId}
/>
) : (
<div className="flex-1 flex items-center justify-center text-muted-foreground">
{templates && templates.length === 0 ? "No templates found. Create one." : "Select a template"}
</div>
)}
</div>
</div>
)
}
function TemplateDetailWrapper({ templateId }: { templateId: string }) {
const { data: template, isLoading, isError } = useAnalysisTemplate(templateId);
const saveTemplate = useSaveAnalysisTemplate();
const { toast } = useToast();
if (isLoading) return <div className="flex items-center justify-center h-full">Loading details...</div>;
if (isError || !template) return <div className="flex items-center justify-center h-full text-destructive">Error loading template</div>;
const handleSave = (updatedTemplate: AnalysisTemplateSet) => {
saveTemplate.mutate({ id: templateId, template: updatedTemplate }, {
onSuccess: () => toast({ title: "Success", description: "Template saved" }),
onError: () => toast({ title: "Error", description: "Failed to save template", type: "error" })
});
};
return (
<TemplateDetailView
template={template}
onSave={handleSave}
isSaving={saveTemplate.isPending}
/>
);
}
function TemplateDetailView({ template, onSave, isSaving }: { template: AnalysisTemplateSet, onSave: (t: AnalysisTemplateSet) => void, isSaving: boolean }) {
const [localTemplate, setLocalTemplate] = useState(template);
const [isDirty, setIsDirty] = useState(false);

View File

@ -8,6 +8,7 @@ interface WorkflowStoreState {
mode: 'realtime' | 'historical';
dag: WorkflowDag | null;
tasks: Record<string, TaskState>;
logs: string[]; // Global realtime logs
error: string | null;
activeTab: string; // For UI linking
@ -19,6 +20,7 @@ interface WorkflowStoreState {
updateTaskContent: (taskId: string, delta: string) => void; // Stream content (append)
setTaskContent: (taskId: string, content: string) => void; // Set full content
appendTaskLog: (taskId: string, log: string) => void;
appendGlobalLog: (log: string) => void; // New action for raw global logs
setActiveTab: (tabId: string) => void;
completeWorkflow: (result: unknown) => void;
failWorkflow: (reason: string) => void;
@ -33,6 +35,7 @@ export const useWorkflowStore = create<WorkflowStoreState>((set, get) => ({
mode: 'realtime',
dag: null,
tasks: {},
logs: [],
error: null,
activeTab: 'overview',
@ -42,6 +45,7 @@ export const useWorkflowStore = create<WorkflowStoreState>((set, get) => ({
mode: 'realtime',
error: null,
tasks: {},
logs: [],
activeTab: 'overview'
}),
@ -155,6 +159,12 @@ export const useWorkflowStore = create<WorkflowStoreState>((set, get) => ({
});
},
appendGlobalLog: (log) => {
set(state => ({
logs: [...state.logs, log]
}));
},
setActiveTab: (tabId) => set({ activeTab: tabId }),
completeWorkflow: (_result) => set({ status: schemas.TaskStatus.enum.Completed }),
@ -162,79 +172,129 @@ export const useWorkflowStore = create<WorkflowStoreState>((set, get) => ({
handleEvent: (event: WorkflowEvent) => {
const state = get();
// console.log('Handling Event:', event.type, event);
// Enhanced Logging (Filtered)
if (event.type !== schemas.WorkflowEventType.enum.TaskStreamUpdate && event.type !== schemas.WorkflowEventType.enum.TaskLog) {
console.log(`[Store] Handling Event: ${event.type}`, event);
}
switch (event.type) {
case 'WorkflowStarted':
case schemas.WorkflowEventType.enum.WorkflowStarted:
state.setDag(event.payload.task_graph);
break;
case 'TaskStateChanged': {
case schemas.WorkflowEventType.enum.TaskStateChanged: {
const p = event.payload;
console.log(`[Store] Task Update: ${p.task_id} -> ${p.status}`);
// @ts-ignore
state.updateTaskStatus(
p.task_id,
p.status,
p.message || undefined,
(p.message === null) ? undefined : p.message,
p.progress || undefined,
p.input_commit,
p.output_commit
p.input_commit || undefined, // Explicitly handle null/undefined
p.output_commit || undefined
);
break;
}
case 'TaskStreamUpdate': {
case schemas.WorkflowEventType.enum.TaskStreamUpdate: {
const p = event.payload;
state.updateTaskContent(p.task_id, p.content_delta);
break;
}
// @ts-ignore
case 'TaskLog': {
case schemas.WorkflowEventType.enum.TaskLog: {
const p = event.payload;
const time = new Date(p.timestamp).toLocaleTimeString();
const log = `[${time}] [${p.level}] ${p.message}`;
// Update Task-specific logs
state.appendTaskLog(p.task_id, log);
// Update Global Raw Logs
const globalLog = `[${time}] [${p.task_id}] [${p.level}] ${p.message}`;
state.appendGlobalLog(globalLog);
break;
}
case 'WorkflowCompleted': {
case schemas.WorkflowEventType.enum.WorkflowCompleted: {
console.log("[Store] Workflow Completed");
state.completeWorkflow(event.payload.result_summary);
break;
}
case 'WorkflowFailed': {
case schemas.WorkflowEventType.enum.WorkflowFailed: {
console.log("[Store] Workflow Failed:", event.payload.reason);
state.failWorkflow(event.payload.reason);
break;
}
case 'WorkflowStateSnapshot': {
case schemas.WorkflowEventType.enum.WorkflowStateSnapshot: {
// Used for real-time rehydration (e.g. page refresh)
console.log("[Store] Processing WorkflowStateSnapshot...", event.payload);
// First, restore DAG if present
if (event.payload.task_graph) {
// WARNING: setDag resets tasks to initial state!
// We must be careful not to lose existing state if we are just updating.
// But usually Snapshot means "replace everything".
state.setDag(event.payload.task_graph);
}
const currentTasks = get().tasks;
const currentTasks = get().tasks; // These are now reset if setDag was called
const newTasks = { ...currentTasks };
if (event.payload.tasks_status) {
Object.entries(event.payload.tasks_status).forEach(([taskId, status]) => {
if (newTasks[taskId] && status) {
newTasks[taskId] = { ...newTasks[taskId], status: status as TaskStatus };
}
});
const payload = event.payload as any;
// NEW: Handle task_states (Comprehensive Snapshot)
if (payload.task_states) {
Object.entries(payload.task_states).forEach(([taskId, stateSnapshot]: [string, any]) => {
// Merge or Create
const existing = newTasks[taskId] || {
status: schemas.TaskStatus.enum.Pending,
logs: [],
progress: 0,
content: ''
};
newTasks[taskId] = {
...existing,
status: stateSnapshot.status,
// Prefer snapshot logs if available, they are the full history
logs: (stateSnapshot.logs && stateSnapshot.logs.length > 0) ? stateSnapshot.logs : existing.logs,
// Prefer snapshot content if available
content: stateSnapshot.content !== undefined && stateSnapshot.content !== null ? stateSnapshot.content : existing.content,
inputCommit: stateSnapshot.input_commit,
outputCommit: stateSnapshot.output_commit,
metadata: stateSnapshot.metadata
};
});
} else {
// Fallback / Compatibility
if (payload.tasks_status) {
Object.entries(payload.tasks_status).forEach(([taskId, status]) => {
if (newTasks[taskId] && status) {
newTasks[taskId] = { ...newTasks[taskId], status: status as TaskStatus };
}
});
}
if (payload.tasks_output) {
Object.entries(payload.tasks_output).forEach(([taskId, outputCommit]) => {
if (newTasks[taskId] && outputCommit) {
newTasks[taskId] = { ...newTasks[taskId], outputCommit: outputCommit as string };
}
});
}
if (payload.tasks_metadata) {
Object.entries(payload.tasks_metadata).forEach(([taskId, metadata]) => {
if (newTasks[taskId] && metadata) {
// @ts-ignore
newTasks[taskId] = { ...newTasks[taskId], metadata: metadata };
}
});
}
}
if (event.payload.tasks_output) {
Object.entries(event.payload.tasks_output).forEach(([taskId, outputCommit]) => {
if (newTasks[taskId] && outputCommit) {
newTasks[taskId] = { ...newTasks[taskId], outputCommit: outputCommit as string };
}
});
}
if (event.payload.tasks_metadata) {
Object.entries(event.payload.tasks_metadata).forEach(([taskId, metadata]) => {
if (newTasks[taskId] && metadata) {
// Note: The generated client types define metadata as TaskMetadata which includes optional paths.
// We store it directly as it matches our TaskState.metadata shape partially.
newTasks[taskId] = { ...newTasks[taskId], metadata: metadata };
}
});
// Handle global log replay
// @ts-ignore
if (payload.logs && Array.isArray(payload.logs)) {
set({ logs: payload.logs });
}
set({ tasks: newTasks });
@ -244,7 +304,11 @@ export const useWorkflowStore = create<WorkflowStoreState>((set, get) => ({
},
loadFromSnapshot: (payload: any) => {
// Used for loading completed/archived sessions
const dag = payload.task_graph;
// Check if we have the new `task_states` format in the snapshot
const taskStates = payload.task_states;
const tasks_status = payload.tasks_status;
const tasks_output = payload.tasks_output;
const tasks_metadata = payload.tasks_metadata;
@ -253,14 +317,28 @@ export const useWorkflowStore = create<WorkflowStoreState>((set, get) => ({
if (dag) {
dag.nodes.forEach((node: any) => {
newTasks[node.id] = {
status: tasks_status?.[node.id] || node.initial_status,
logs: [],
progress: 100,
content: '', // Content is not in snapshot, needs on-demand loading
outputCommit: tasks_output?.[node.id],
metadata: tasks_metadata?.[node.id]
};
if (taskStates && taskStates[node.id]) {
// Use new format
const s = taskStates[node.id];
newTasks[node.id] = {
status: s.status,
logs: s.logs || [],
progress: s.status === 'Completed' ? 100 : 0,
content: s.content || '',
outputCommit: s.output_commit,
metadata: s.metadata
};
} else {
// Legacy fallback
newTasks[node.id] = {
status: tasks_status?.[node.id] || node.initial_status,
logs: [],
progress: 100,
content: '', // Content is not in legacy snapshot
outputCommit: tasks_output?.[node.id],
metadata: tasks_metadata?.[node.id]
};
}
});
}
@ -271,6 +349,10 @@ export const useWorkflowStore = create<WorkflowStoreState>((set, get) => ({
mode: 'historical',
error: null
});
if (payload.logs) {
set({ logs: payload.logs });
}
},
reset: () => set({
@ -279,6 +361,7 @@ export const useWorkflowStore = create<WorkflowStoreState>((set, get) => ({
mode: 'realtime',
dag: null,
tasks: {},
logs: [],
error: null,
activeTab: 'overview'
})

View File

@ -7,17 +7,26 @@ export default defineConfig(({ mode }) => {
const env = loadEnv(mode, process.cwd(), '')
return {
plugins: [react()],
// Explicitly configure public directory behavior if needed,
// but usually 'public' is default.
publicDir: 'public',
optimizeDeps: {
exclude: ['dagre'],
// 'web-worker' needs to be optimized or handled correctly by Vite for elkjs
include: ['elkjs/lib/elk.bundled.js']
include: ['elkjs/lib/elk.bundled.js', 'buffer']
},
resolve: {
alias: {
"@": path.resolve(__dirname, "./src"),
// Force buffer to resolve to the installed package, not node built-in
buffer: "buffer",
},
},
server: {
// Ensure static files are served correctly
fs: {
strict: false,
},
proxy: {
'/api': {
target: env.VITE_API_TARGET || 'http://localhost:4000',

View File

@ -9,56 +9,6 @@
"version": "0.1.0"
},
"paths": {
"/api/v1/configs/analysis_template_sets": {
"get": {
"tags": [
"api"
],
"summary": "[GET /api/v1/configs/analysis_template_sets]",
"operationId": "get_analysis_template_sets",
"responses": {
"200": {
"description": "Analysis template sets configuration",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/AnalysisTemplateSets"
}
}
}
}
}
},
"put": {
"tags": [
"api"
],
"summary": "[PUT /api/v1/configs/analysis_template_sets]",
"operationId": "update_analysis_template_sets",
"requestBody": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/AnalysisTemplateSets"
}
}
},
"required": true
},
"responses": {
"200": {
"description": "Updated analysis template sets configuration",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/AnalysisTemplateSets"
}
}
}
}
}
}
},
"/api/v1/configs/data_sources": {
"get": {
"tags": [
@ -183,6 +133,134 @@
}
}
},
"/api/v1/configs/templates": {
"get": {
"tags": [
"api"
],
"summary": "[GET /api/v1/configs/templates]",
"operationId": "get_templates",
"responses": {
"200": {
"description": "List of analysis templates",
"content": {
"application/json": {
"schema": {
"type": "array",
"items": {
"$ref": "#/components/schemas/AnalysisTemplateSummary"
}
}
}
}
}
}
}
},
"/api/v1/configs/templates/{id}": {
"get": {
"tags": [
"api"
],
"summary": "[GET /api/v1/configs/templates/{id}]",
"operationId": "get_template_by_id",
"parameters": [
{
"name": "id",
"in": "path",
"description": "Template ID",
"required": true,
"schema": {
"type": "string"
}
}
],
"responses": {
"200": {
"description": "Analysis template details",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/AnalysisTemplateSet"
}
}
}
},
"404": {
"description": "Template not found"
}
}
},
"put": {
"tags": [
"api"
],
"summary": "[PUT /api/v1/configs/templates/{id}]",
"operationId": "update_template",
"parameters": [
{
"name": "id",
"in": "path",
"description": "Template ID",
"required": true,
"schema": {
"type": "string"
}
}
],
"requestBody": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/AnalysisTemplateSet"
}
}
},
"required": true
},
"responses": {
"200": {
"description": "Updated analysis template",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/AnalysisTemplateSet"
}
}
}
},
"404": {
"description": "Template not found"
}
}
},
"delete": {
"tags": [
"api"
],
"summary": "[DELETE /api/v1/configs/templates/{id}]",
"operationId": "delete_template",
"parameters": [
{
"name": "id",
"in": "path",
"description": "Template ID",
"required": true,
"schema": {
"type": "string"
}
}
],
"responses": {
"204": {
"description": "Template deleted"
},
"404": {
"description": "Template not found"
}
}
}
},
"/api/v1/configs/test": {
"post": {
"tags": [
@ -508,7 +586,7 @@
"type": "string"
},
"context_selector": {
"$ref": "#/components/schemas/ContextSelectorConfig"
"$ref": "#/components/schemas/SelectionMode"
},
"dependencies": {
"type": "array",
@ -576,6 +654,23 @@
"type": "string"
}
},
"AnalysisTemplateSummary": {
"type": "object",
"description": "Summary of an analysis template (for listing purposes).",
"required": [
"id",
"name"
],
"properties": {
"id": {
"type": "string"
},
"name": {
"type": "string"
}
},
"additionalProperties": false
},
"CanonicalSymbol": {
"type": "string",
"description": "CanonicalSymbol 是系统内部唯一的股票代码标识符类型\n它封装了一个标准化的字符串遵循 Yahoo Finance 格式)\n使用 newtype 模式防止与普通 String 混淆",
@ -649,13 +744,6 @@
"Region"
]
},
"ContextSelectorConfig": {
"allOf": [
{
"$ref": "#/components/schemas/SelectionMode"
}
]
},
"DataRequest": {
"type": "object",
"required": [
@ -784,6 +872,16 @@
"LlmConfig": {
"type": "object",
"properties": {
"extra_params": {
"type": [
"object",
"null"
],
"additionalProperties": {},
"propertyNames": {
"type": "string"
}
},
"max_tokens": {
"type": [
"integer",
@ -1105,6 +1203,9 @@
"TaskMetadata": {
"type": "object",
"description": "Metadata produced by a task execution.",
"required": [
"extra"
],
"properties": {
"execution_log_path": {
"type": [
@ -1113,6 +1214,14 @@
],
"description": "The execution trace log path"
},
"extra": {
"type": "object",
"description": "Additional arbitrary metadata",
"additionalProperties": {},
"propertyNames": {
"type": "string"
}
},
"output_path": {
"type": [
"string",
@ -1189,6 +1298,58 @@
},
"additionalProperties": false
},
"TaskStateSnapshot": {
"type": "object",
"description": "Comprehensive snapshot state for a single task",
"required": [
"task_id",
"status",
"logs"
],
"properties": {
"content": {
"type": [
"string",
"null"
]
},
"input_commit": {
"type": [
"string",
"null"
]
},
"logs": {
"type": "array",
"items": {
"type": "string"
}
},
"metadata": {
"oneOf": [
{
"type": "null"
},
{
"$ref": "#/components/schemas/TaskMetadata"
}
]
},
"output_commit": {
"type": [
"string",
"null"
]
},
"status": {
"$ref": "#/components/schemas/TaskStatus"
},
"task_id": {
"type": "string"
}
},
"additionalProperties": false
},
"TaskStatus": {
"type": "string",
"enum": [
@ -1531,12 +1692,29 @@
"task_graph",
"tasks_status",
"tasks_output",
"tasks_metadata"
"tasks_metadata",
"logs"
],
"properties": {
"logs": {
"type": "array",
"items": {
"type": "string"
}
},
"task_graph": {
"$ref": "#/components/schemas/WorkflowDag"
},
"task_states": {
"type": "object",
"description": "New: Detailed state for each task including logs and content buffer",
"additionalProperties": {
"$ref": "#/components/schemas/TaskStateSnapshot"
},
"propertyNames": {
"type": "string"
}
},
"tasks_metadata": {
"type": "object",
"additionalProperties": {
@ -1584,6 +1762,18 @@
],
"description": "Unified event stream for frontend consumption."
},
"WorkflowEventType": {
"type": "string",
"enum": [
"WorkflowStarted",
"TaskStateChanged",
"TaskStreamUpdate",
"TaskLog",
"WorkflowCompleted",
"WorkflowFailed",
"WorkflowStateSnapshot"
]
},
"WorkflowHistoryDto": {
"type": "object",
"required": [

2126
package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@ -3,9 +3,14 @@
"@radix-ui/react-dialog": "^1.1.15",
"@radix-ui/react-popover": "^1.1.15",
"@radix-ui/react-progress": "^1.1.8",
"@react-pdf/renderer": "^4.3.1",
"cmdk": "^1.1.1",
"elkjs": "^0.11.0",
"html-to-image": "^1.11.13",
"immer": "^10.2.0",
"marked": "^17.0.1",
"react-markdown": "^10.1.0",
"remark-gfm": "^4.0.1",
"zustand": "^5.0.8"
}
}

View File

@ -6,7 +6,7 @@ set -e
# 配置变量
REGISTRY="harbor.3prism.ai"
PROJECT="fundamental_analysis"
VERSION="latest" # 或者使用 $(date +%Y%m%d%H%M%S) 生成时间戳版本
VERSION="latest"
NAMESPACE="$REGISTRY/$PROJECT"
# 颜色输出
@ -15,70 +15,154 @@ YELLOW='\033[1;33m'
RED='\033[0;31m'
NC='\033[0m' # No Color
echo -e "${GREEN}=== 开始构建并推送镜像到 $NAMESPACE ===${NC}"
# 清理工作目录函数
function cleanup {
echo -e "\n${YELLOW}>>> 清理临时文件...${NC}"
rm -rf ./dist_bin
rm -rf ./temp_build_context
# 尝试删除构建容器(如果存在)
docker rm -f fundamental-builder-extract 2>/dev/null || true
}
trap cleanup EXIT
# 定义服务列表及其 Dockerfile 路径
# 格式: "服务名:Dockerfile路径"
SERVICES=(
"data-persistence-service:services/data-persistence-service/Dockerfile"
"api-gateway:services/api-gateway/Dockerfile"
"alphavantage-provider-service:services/alphavantage-provider-service/Dockerfile"
"tushare-provider-service:services/tushare-provider-service/Dockerfile"
"finnhub-provider-service:services/finnhub-provider-service/Dockerfile"
"yfinance-provider-service:services/yfinance-provider-service/Dockerfile"
"report-generator-service:services/report-generator-service/Dockerfile"
"frontend:frontend/Dockerfile.prod"
echo -e "${GREEN}=== 开始优化的构建部署流程 ===${NC}"
echo -e "目标仓库: $NAMESPACE"
# ==========================================
# 阶段 1: 全局构建 (Build Once)
# ==========================================
echo -e "\n${YELLOW}>>> [阶段 1/3] 全局构建: 编译所有 Rust 服务...${NC}"
echo "使用 Dockerfile: docker/Dockerfile.builder"
# 检查是否需要重新构建 (这一步可以进一步优化但为了简单起见我们总是构建依赖Docker层缓存)
docker build -t fundamental-workspace-builder -f docker/Dockerfile.builder .
# 提取二进制文件
echo -e "${YELLOW}>>> 正在提取二进制文件...${NC}"
mkdir -p ./dist_bin
# 创建临时容器
docker create --name fundamental-builder-extract fundamental-workspace-builder
# 从容器中复制 target/release 目录下的二进制文件
# 注意: 这里我们复制整个 release 目录可能会太大,我们只复制二进制文件
# 但是 docker cp 不支持通配符复制特定文件列表,所以我们先全部复制出来,或者我们知道名字
# 定义二进制文件映射 (服务目录 -> 二进制名称)
# 如果二进制名称与目录名一致,则只需列出目录名
declare -A SERVICE_BIN_MAP
SERVICE_BIN_MAP=(
["data-persistence-service"]="data-persistence-service-server"
["api-gateway"]="api-gateway"
["alphavantage-provider-service"]="alphavantage-provider-service"
["tushare-provider-service"]="tushare-provider-service"
["finnhub-provider-service"]="finnhub-provider-service"
["yfinance-provider-service"]="yfinance-provider-service"
["report-generator-service"]="report-generator-service"
["workflow-orchestrator-service"]="workflow-orchestrator-service"
# ["mock-provider-service"]="mock-provider-service" # Skipped for Prod
)
# 总大小计数器
for SERVICE_DIR in "${!SERVICE_BIN_MAP[@]}"; do
BINARY_NAME="${SERVICE_BIN_MAP[$SERVICE_DIR]}"
echo "提取: $BINARY_NAME"
docker cp "fundamental-builder-extract:/usr/src/app/target/release/$BINARY_NAME" "./dist_bin/$BINARY_NAME"
done
# 删除临时容器
docker rm -f fundamental-builder-extract
echo -e "${GREEN}√ 二进制提取完成${NC}"
# ==========================================
# 阶段 2: 前端构建 (Frontend)
# ==========================================
echo -e "\n${YELLOW}>>> [阶段 2/3] 构建前端服务...${NC}"
FRONTEND_IMAGE="$NAMESPACE/frontend:$VERSION"
docker build -t "$FRONTEND_IMAGE" -f docker/Dockerfile.frontend.prod .
echo -e "${YELLOW}>>> 推送前端镜像...${NC}"
docker push "$FRONTEND_IMAGE"
echo -e "${GREEN}√ 前端处理完成${NC}"
# ==========================================
# 阶段 3: 打包与分发 (Package Many)
# ==========================================
echo -e "\n${YELLOW}>>> [阶段 3/3] 打包并推送后端微服务...${NC}"
TOTAL_SIZE=0
for entry in "${SERVICES[@]}"; do
KEY="${entry%%:*}"
DOCKERFILE="${entry#*:}"
IMAGE_NAME="$NAMESPACE/$KEY:$VERSION"
for SERVICE_DIR in "${!SERVICE_BIN_MAP[@]}"; do
BINARY_NAME="${SERVICE_BIN_MAP[$SERVICE_DIR]}"
IMAGE_NAME="$NAMESPACE/$SERVICE_DIR:$VERSION"
echo -e "\n${YELLOW}>>> 正在构建 $KEY ...${NC}"
echo "使用 Dockerfile: $DOCKERFILE"
echo -e "\n------------------------------------------------"
echo -e "${YELLOW}处理服务: $SERVICE_DIR${NC}"
# 构建镜像
# 注意:构建上下文始终为项目根目录 (.)
docker build -t "$IMAGE_NAME" -f "$DOCKERFILE" .
# 准备构建上下文
CONTEXT_DIR="./temp_build_context/$SERVICE_DIR"
rm -rf "$CONTEXT_DIR"
mkdir -p "$CONTEXT_DIR"
mkdir -p "$CONTEXT_DIR/assets"
# 获取镜像大小 (MB)
SIZE_BYTES=$(docker inspect "$IMAGE_NAME" --format='{{.Size}}')
SIZE_MB=$(echo "scale=2; $SIZE_BYTES / 1024 / 1024" | bc)
# 1. 复制二进制文件并重命名为 app
cp "./dist_bin/$BINARY_NAME" "$CONTEXT_DIR/app"
echo -e "${GREEN}$KEY 构建完成. 大小: ${SIZE_MB} MB${NC}"
# 2. 复制配置目录 (如果需要)
# data-persistence-service 等服务需要根目录的 config
cp -r config "$CONTEXT_DIR/config"
# 累加大小
TOTAL_SIZE=$(echo "$TOTAL_SIZE + $SIZE_BYTES" | bc)
# 检查单个镜像大小是否异常 (例如超过 500MB 对于 Rust 微服务来说通常是不正常的,除非包含大模型)
if (( $(echo "$SIZE_MB > 500" | bc -l) )); then
echo -e "${RED}警告: $KEY 镜像大小超过 500MB请检查 Dockerfile 是否包含不必要的文件!${NC}"
# 这里我们可以选择暂停询问用户,或者只是警告
# 3. 复制服务特定的资产 (Assets)
# 3.1 Migrations
if [ -d "services/$SERVICE_DIR/migrations" ]; then
echo " - 包含 migrations"
mkdir -p "$CONTEXT_DIR/assets/migrations"
cp -r "services/$SERVICE_DIR/migrations/"* "$CONTEXT_DIR/assets/migrations/"
fi
echo -e "${YELLOW}>>> 正在推送 $KEY 到 Harbor ...${NC}"
# 3.2 Templates
if [ -d "services/$SERVICE_DIR/templates" ]; then
echo " - 包含 templates"
mkdir -p "$CONTEXT_DIR/assets/templates"
cp -r "services/$SERVICE_DIR/templates/"* "$CONTEXT_DIR/assets/templates/"
fi
# 3.3 Cookies
if [ -f "services/$SERVICE_DIR/cookies.txt" ]; then
echo " - 包含 cookies.txt"
cp "services/$SERVICE_DIR/cookies.txt" "$CONTEXT_DIR/assets/cookies.txt"
fi
# 3.4 Web Assets (e.g. data-persistence-service assets folder if exists)
if [ -d "services/$SERVICE_DIR/assets" ]; then
echo " - 包含 web assets"
cp -r "services/$SERVICE_DIR/assets/"* "$CONTEXT_DIR/assets/"
fi
# 4. 构建极简镜像
# 不需要传递构建参数,因为文件已经准备好了
docker build -t "$IMAGE_NAME" -f docker/Dockerfile.dist "$CONTEXT_DIR"
# 5. 推送
echo -e "${YELLOW} 推送 $SERVICE_DIR 到 Harbor ...${NC}"
docker push "$IMAGE_NAME"
# 统计大小
SIZE_BYTES=$(docker inspect "$IMAGE_NAME" --format='{{.Size}}')
TOTAL_SIZE=$(echo "$TOTAL_SIZE + $SIZE_BYTES" | bc)
done
TOTAL_SIZE_MB=$(echo "scale=2; $TOTAL_SIZE / 1024 / 1024" | bc)
echo -e "\n${GREEN}=== 所有镜像处理完成 ===${NC}"
echo -e "${GREEN}总大小: ${TOTAL_SIZE_MB} MB${NC}"
echo -e "${GREEN}后端总大小: ${TOTAL_SIZE_MB} MB${NC}"
# 检查总大小是否超过 1GB (1024 MB)
if (( $(echo "$TOTAL_SIZE_MB > 1024" | bc -l) )); then
echo -e "${RED}警告: 总镜像大小超过 1GB请注意远程仓库的空间限制${NC}"
else
echo -e "${GREEN}总大小在 1GB 限制范围内。${NC}"
fi
# 生成服务器使用的 docker-compose.server.yml
# ==========================================
# 阶段 4: 生成部署文件
# ==========================================
echo -e "\n${YELLOW}>>> 正在生成服务器部署文件 docker-compose.server.yml ...${NC}"
cat > docker-compose.server.yml <<EOF
cat > docker-compose.server.yml <<YAML
services:
postgres-db:
image: timescale/timescaledb:2.15.2-pg16
@ -97,147 +181,141 @@ services:
retries: 10
networks:
- app-network
restart: always
nats:
image: nats:2.9
container_name: fundamental-nats
volumes:
- nats_data:/data
networks:
- app-network
restart: always
data-persistence-service:
image: $NAMESPACE/data-persistence-service:$VERSION
container_name: data-persistence-service
restart: unless-stopped
environment:
HOST: 0.0.0.0
PORT: 3000
DATABASE_URL: postgresql://postgres:postgres@postgres-db:5432/fundamental
RUST_LOG: info
RUST_BACKTRACE: "1"
SKIP_MIGRATIONS_ON_MISMATCH: "1"
depends_on:
postgres-db:
condition: service_healthy
healthcheck:
test: ["CMD-SHELL", "curl -fsS http://localhost:3000/health >/dev/null || exit 1"]
interval: 10s
timeout: 5s
retries: 5
networks:
- app-network
frontend:
image: $NAMESPACE/frontend:$VERSION
container_name: fundamental-frontend
restart: unless-stopped
environment:
NEXT_PUBLIC_BACKEND_URL: http://api-gateway:4000/v1
FRONTEND_INTERNAL_URL: http://fundamental-frontend:3000
BACKEND_INTERNAL_URL: http://api-gateway:4000/v1
NODE_ENV: production
ports:
- "3001:3000"
depends_on:
api-gateway:
condition: service_healthy
networks:
- app-network
restart: always
api-gateway:
image: $NAMESPACE/api-gateway:$VERSION
container_name: api-gateway
restart: unless-stopped
environment:
SERVER_PORT: 4000
NATS_ADDR: nats://nats:4222
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000/api/v1
PROVIDER_SERVICES: '["http://alphavantage-provider-service:8000", "http://tushare-provider-service:8001", "http://finnhub-provider-service:8002", "http://yfinance-provider-service:8003"]'
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000
REPORT_GENERATOR_SERVICE_URL: http://report-generator-service:8004
RUST_LOG: info,axum=info
RUST_BACKTRACE: "1"
depends_on:
- nats
- data-persistence-service
- alphavantage-provider-service
- tushare-provider-service
- finnhub-provider-service
- yfinance-provider-service
nats:
condition: service_started
data-persistence-service:
condition: service_healthy
networks:
- app-network
healthcheck:
test: ["CMD-SHELL", "curl -fsS http://localhost:4000/health >/dev/null || exit 1"]
interval: 5s
interval: 10s
timeout: 5s
retries: 12
retries: 5
restart: always
alphavantage-provider-service:
image: $NAMESPACE/alphavantage-provider-service:$VERSION
container_name: alphavantage-provider-service
restart: unless-stopped
volumes:
- workflow_data:/mnt/workflow_data
environment:
SERVER_PORT: 8000
NATS_ADDR: nats://nats:4222
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000/api/v1
RUST_LOG: info,axum=info
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000
API_GATEWAY_URL: http://api-gateway:4000
WORKFLOW_DATA_PATH: /mnt/workflow_data
SERVICE_HOST: alphavantage-provider-service
RUST_LOG: info
RUST_BACKTRACE: "1"
depends_on:
- nats
- data-persistence-service
networks:
- app-network
healthcheck:
test: ["CMD-SHELL", "curl -fsS http://localhost:8000/health >/dev/null || exit 1"]
interval: 5s
timeout: 5s
retries: 12
restart: always
tushare-provider-service:
image: $NAMESPACE/tushare-provider-service:$VERSION
container_name: tushare-provider-service
restart: unless-stopped
volumes:
- workflow_data:/mnt/workflow_data
environment:
SERVER_PORT: 8001
NATS_ADDR: nats://nats:4222
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000/api/v1
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000
TUSHARE_API_URL: http://api.waditu.com
RUST_LOG: info,axum=info
API_GATEWAY_URL: http://api-gateway:4000
WORKFLOW_DATA_PATH: /mnt/workflow_data
SERVICE_HOST: tushare-provider-service
RUST_LOG: info
RUST_BACKTRACE: "1"
depends_on:
- nats
- data-persistence-service
networks:
- app-network
healthcheck:
test: ["CMD-SHELL", "curl -fsS http://localhost:8001/health >/dev/null || exit 1"]
interval: 5s
timeout: 5s
retries: 12
restart: always
finnhub-provider-service:
image: $NAMESPACE/finnhub-provider-service:$VERSION
container_name: finnhub-provider-service
restart: unless-stopped
volumes:
- workflow_data:/mnt/workflow_data
environment:
SERVER_PORT: 8002
NATS_ADDR: nats://nats:4222
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000/api/v1
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000
FINNHUB_API_URL: https://finnhub.io/api/v1
RUST_LOG: info,axum=info
API_GATEWAY_URL: http://api-gateway:4000
WORKFLOW_DATA_PATH: /mnt/workflow_data
SERVICE_HOST: finnhub-provider-service
RUST_LOG: info
RUST_BACKTRACE: "1"
depends_on:
- nats
- data-persistence-service
networks:
- app-network
healthcheck:
test: ["CMD-SHELL", "curl -fsS http://localhost:8002/health >/dev/null || exit 1"]
interval: 5s
timeout: 5s
retries: 12
restart: always
yfinance-provider-service:
image: $NAMESPACE/yfinance-provider-service:$VERSION
container_name: yfinance-provider-service
restart: unless-stopped
volumes:
- workflow_data:/mnt/workflow_data
environment:
SERVER_PORT: 8003
NATS_ADDR: nats://nats:4222
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000/api/v1
RUST_LOG: info,axum=info
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000
API_GATEWAY_URL: http://api-gateway:4000
WORKFLOW_DATA_PATH: /mnt/workflow_data
SERVICE_HOST: yfinance-provider-service
RUST_LOG: info
RUST_BACKTRACE: "1"
depends_on:
- nats
@ -247,41 +325,79 @@ services:
dns:
- 8.8.8.8
- 8.8.4.4
healthcheck:
test: ["CMD-SHELL", "curl -fsS http://localhost:8003/health >/dev/null || exit 1"]
interval: 5s
timeout: 5s
retries: 12
restart: always
report-generator-service:
image: $NAMESPACE/report-generator-service:$VERSION
container_name: report-generator-service
restart: unless-stopped
volumes:
- workflow_data:/mnt/workflow_data
environment:
SERVER_PORT: 8004
NATS_ADDR: nats://nats:4222
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000/api/v1
RUST_LOG: info,axum=info
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000
GOTENBERG_URL: http://gotenberg:3000
WORKFLOW_DATA_PATH: /mnt/workflow_data
RUST_LOG: info
RUST_BACKTRACE: "1"
depends_on:
- nats
- data-persistence-service
- gotenberg
networks:
- app-network
restart: always
workflow-orchestrator-service:
image: $NAMESPACE/workflow-orchestrator-service:$VERSION
container_name: workflow-orchestrator-service
volumes:
- workflow_data:/mnt/workflow_data
environment:
SERVER_PORT: 8005
NATS_ADDR: nats://nats:4222
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000
WORKFLOW_DATA_PATH: /mnt/workflow_data
RUST_LOG: info
RUST_BACKTRACE: "1"
depends_on:
- nats
- data-persistence-service
networks:
- app-network
healthcheck:
test: ["CMD-SHELL", "curl -fsS http://localhost:8004/health >/dev/null || exit 1"]
interval: 5s
timeout: 5s
retries: 12
restart: always
gotenberg:
image: gotenberg/gotenberg:8
container_name: gotenberg
networks:
- app-network
restart: always
frontend:
image: $NAMESPACE/frontend:$VERSION
container_name: fundamental-frontend
ports:
- "8080:80" # Map host 8080 to container 80 (Nginx)
depends_on:
api-gateway:
condition: service_healthy
networks:
- app-network
restart: always
volumes:
workflow_data:
pgdata:
nats_data:
networks:
app-network:
EOF
YAML
echo -e "${GREEN}生成完成: docker-compose.server.yml${NC}"
echo -e "请将此文件复制到远程服务器,并执行: docker-compose -f docker-compose.server.yml up -d"
echo -e "请执行以下步骤更新远端服务器:"
echo -e "1. 将 docker-compose.server.yml 复制到服务器"
echo -e "2. 在服务器执行: docker-compose -f docker-compose.server.yml pull (拉取最新镜像)"
echo -e "3. 在服务器执行: docker-compose -f docker-compose.server.yml up -d (重启服务)"
echo -e " 或者一键命令: docker-compose -f docker-compose.server.yml up -d --pull always"

View File

@ -4,7 +4,7 @@ set -e
# Configuration
COMPOSE_FILE="docker-compose.test.yml"
export NATS_ADDR="nats://localhost:4223"
export DATA_PERSISTENCE_SERVICE_URL="http://localhost:3001/api/v1"
export DATA_PERSISTENCE_SERVICE_URL="http://localhost:3005"
# For services that might need direct DB access (e.g. persistence tests)
export DATABASE_URL="postgresql://postgres:postgres@localhost:5433/fundamental_test"
@ -47,7 +47,7 @@ function start_env() {
# Simple wait loop for persistence service
local max_retries=30
local count=0
while ! curl -s http://localhost:3001/health > /dev/null; do
while ! curl -s http://localhost:3005/health > /dev/null; do
sleep 2
count=$((count+1))
if [ $count -ge $max_retries ]; then
@ -102,7 +102,7 @@ function run_tests() {
}
function check_env_ready() {
if curl -s http://localhost:3001/health > /dev/null; then
if curl -s http://localhost:3005/health > /dev/null; then
return 0
else
return 1

View File

@ -44,3 +44,4 @@ anyhow = "1.0"
chrono = { version = "0.4", features = ["serde"] }
sse-stream = "0.2"
futures = "0.3"
async-trait = "0.1.89"

View File

@ -6,7 +6,7 @@ use tracing::{info, warn};
#[derive(Deserialize)]
pub struct TestConnectionRequest {
// This is the MCP endpoint URL
pub api_url: String,
pub api_url: Option<String>,
// The API key is passed for validation but might not be used directly
// in the MCP connection itself, depending on auth mechanism.
pub api_key: Option<String>,
@ -23,20 +23,15 @@ pub struct TestConnectionResponse {
pub async fn test_connection(
Json(payload): Json<TestConnectionRequest>,
) -> impl IntoResponse {
info!("Testing connection to MCP endpoint: {}", payload.api_url);
// Default MCP URL if not provided
let base_url = payload.api_url
.filter(|s| !s.is_empty())
.unwrap_or_else(|| "https://mcp.alphavantage.co/mcp".to_string());
if payload.api_url.is_empty() {
return (
StatusCode::BAD_REQUEST,
Json(TestConnectionResponse {
success: false,
message: "API URL (MCP Endpoint) cannot be empty.".to_string(),
}),
).into_response();
}
info!("Testing connection to MCP endpoint: {}", base_url);
// 要求传入 base MCP URL不包含查询参数与 api_key然后按官方文档拼接 ?apikey=
if payload.api_url.contains('?') {
if base_url.contains('?') {
return (
StatusCode::BAD_REQUEST,
Json(TestConnectionResponse {
@ -45,6 +40,7 @@ pub async fn test_connection(
}),
).into_response();
}
let Some(key) = &payload.api_key else {
return (
StatusCode::BAD_REQUEST,
@ -54,7 +50,8 @@ pub async fn test_connection(
}),
).into_response();
};
let final_url = format!("{}?apikey={}", payload.api_url, key);
let final_url = format!("{}?apikey={}", base_url, key);
info!("Testing MCP with final endpoint: {}", final_url);
let mcp_client = match AvClient::connect(&final_url).await {
Ok(client) => client,

View File

@ -25,7 +25,7 @@ async fn poll_and_update_config(state: &AppState) -> Result<()> {
info!("Polling for data source configurations...");
let client = reqwest::Client::new();
let url = format!(
"{}/configs/data_sources",
"{}/api/v1/configs/data_sources",
state.config.data_persistence_service_url
);

View File

@ -0,0 +1,13 @@
use anyhow::Result;
use common_contracts::workflow_types::WorkflowTaskCommand;
use crate::state::AppState;
use crate::workflow_adapter::AlphavantageNode;
use common_contracts::workflow_runner::WorkflowNodeRunner;
use std::sync::Arc;
pub async fn handle_workflow_command(state: AppState, nats: async_nats::Client, cmd: WorkflowTaskCommand) -> Result<()> {
let node = Arc::new(AlphavantageNode::new(state));
let runner = WorkflowNodeRunner::new(nats);
runner.run(node, cmd).await
}

View File

@ -6,7 +6,8 @@ mod mapping;
mod message_consumer;
// mod persistence; // Removed
mod state;
mod worker;
mod workflow_adapter;
mod generic_worker;
mod av_client;
mod config_poller;
mod transport;

View File

@ -1,7 +1,6 @@
use crate::error::Result;
use crate::state::{AppState, ServiceOperationalStatus};
use common_contracts::messages::FetchCompanyDataCommand;
use common_contracts::subjects::NatsSubject;
use common_contracts::workflow_types::WorkflowTaskCommand;
use futures_util::StreamExt;
use std::time::Duration;
use tracing::{error, info, warn};
@ -24,7 +23,7 @@ pub async fn run(state: AppState) -> Result<()> {
match async_nats::connect(&state.config.nats_addr).await {
Ok(client) => {
info!("Successfully connected to NATS.");
if let Err(e) = subscribe_and_process(state.clone(), client).await {
if let Err(e) = subscribe_workflow(state.clone(), client).await {
error!("NATS subscription error: {}. Reconnecting in 10s...", e);
}
}
@ -36,45 +35,54 @@ pub async fn run(state: AppState) -> Result<()> {
}
}
async fn subscribe_and_process(state: AppState, client: async_nats::Client) -> Result<()> {
let subject = NatsSubject::DataFetchCommands.to_string();
use common_contracts::ack::TaskAcknowledgement;
async fn subscribe_workflow(state: AppState, client: async_nats::Client) -> Result<()> {
// Alphavantage routing key: provider.alphavantage
let subject = "workflow.cmd.provider.alphavantage".to_string();
let mut subscriber = client.subscribe(subject.clone()).await?;
info!(
"Consumer started, waiting for messages on subject '{}'",
subject
);
info!("Workflow Consumer started on '{}'", subject);
while let Some(message) = subscriber.next().await {
// Check status
let current_status = state.status.read().await.clone();
if matches!(current_status, ServiceOperationalStatus::Degraded {..}) {
warn!("Service became degraded. Disconnecting from NATS and pausing consumption.");
warn!("Service became degraded. Disconnecting from NATS.");
// Reject if degraded
if let Some(reply_to) = message.reply {
let ack = TaskAcknowledgement::Rejected { reason: "Service degraded".to_string() };
if let Ok(payload) = serde_json::to_vec(&ack) {
let _ = client.publish(reply_to, payload.into()).await;
}
}
subscriber.unsubscribe().await?;
return Ok(());
}
info!("Received NATS message.");
let state_clone = state.clone();
let publisher_clone = client.clone();
// Accept
if let Some(reply_to) = message.reply.clone() {
let ack = TaskAcknowledgement::Accepted;
if let Ok(payload) = serde_json::to_vec(&ack) {
if let Err(e) = client.publish(reply_to, payload.into()).await {
error!("Failed to send Acceptance Ack: {}", e);
}
}
}
let state = state.clone();
let client = client.clone();
tokio::spawn(async move {
match serde_json::from_slice::<FetchCompanyDataCommand>(&message.payload) {
Ok(command) => {
let request_id = command.request_id;
info!("Deserialized command for symbol: {}", command.symbol);
if let Err(e) =
crate::worker::handle_fetch_command(state_clone.clone(), command, publisher_clone)
.await
{
error!("Error handling fetch command: {:?}", e);
if let Some(mut task) = state_clone.tasks.get_mut(&request_id) {
task.status = common_contracts::observability::ObservabilityTaskStatus::Failed;
task.details = format!("Worker failed: {}", e);
}
match serde_json::from_slice::<WorkflowTaskCommand>(&message.payload) {
Ok(cmd) => {
info!("Received workflow command for task: {}", cmd.task_id);
if let Err(e) = crate::generic_worker::handle_workflow_command(state, client, cmd).await {
error!("Generic worker handler failed: {}", e);
}
}
Err(e) => {
error!("Failed to deserialize message: {}", e);
}
},
Err(e) => error!("Failed to parse WorkflowTaskCommand: {}", e),
}
});
}

View File

@ -1,432 +0,0 @@
use crate::error::{Result, AppError};
use crate::mapping::{CombinedFinancials, parse_company_profile, parse_financials, parse_realtime_quote};
use common_contracts::persistence_client::PersistenceClient;
use common_contracts::dtos::{ProviderCacheDto, SessionDataDto};
use crate::state::{AppState, TaskStore};
use chrono::{Utc, Datelike, Duration};
use common_contracts::messages::{FetchCompanyDataCommand, FinancialsPersistedEvent, DataFetchFailedEvent};
use common_contracts::observability::{TaskProgress, ObservabilityTaskStatus};
use tracing::{error, info, instrument, warn};
use uuid::Uuid;
use serde_json::Value;
#[instrument(skip(state, command, publisher), fields(request_id = %command.request_id, symbol = %command.symbol))]
pub async fn handle_fetch_command(
state: AppState,
command: FetchCompanyDataCommand,
publisher: async_nats::Client,
) -> Result<()> {
match handle_fetch_command_inner(state.clone(), &command, &publisher).await {
Ok(_) => Ok(()),
Err(e) => {
error!("AlphaVantage workflow failed: {}", e);
// Publish failure event
let event = DataFetchFailedEvent {
request_id: command.request_id,
symbol: command.symbol.clone(),
error: e.to_string(),
provider_id: Some("alphavantage".to_string()),
};
let _ = publisher
.publish(
"events.data.fetch_failed".to_string(),
serde_json::to_vec(&event).unwrap().into(),
)
.await;
// Update task status
if let Some(mut task) = state.tasks.get_mut(&command.request_id) {
task.status = ObservabilityTaskStatus::Failed;
task.details = format!("Failed: {}", e);
} else {
// If task doesn't exist (e.g. failed at insert), create a failed task
let task = TaskProgress {
request_id: command.request_id,
task_name: format!("alphavantage:{}", command.symbol),
status: ObservabilityTaskStatus::Failed,
progress_percent: 0,
details: format!("Failed: {}", e),
started_at: Utc::now(),
};
state.tasks.insert(command.request_id, task);
}
Err(e)
}
}
}
async fn handle_fetch_command_inner(
state: AppState,
command: &FetchCompanyDataCommand,
publisher: &async_nats::Client,
) -> Result<()> {
info!("Handling fetch data command.");
let task = TaskProgress {
request_id: command.request_id,
task_name: format!("alphavantage:{}", command.symbol),
status: ObservabilityTaskStatus::InProgress,
progress_percent: 0,
details: "Initializing...".to_string(),
started_at: Utc::now(),
};
state.tasks.insert(command.request_id, task);
let client = match state.get_provider().await {
Some(p) => p,
None => {
let reason = "Execution failed: Alphavantage provider is not available (misconfigured).".to_string();
return Err(AppError::ProviderNotAvailable(reason));
}
};
let persistence_client =
PersistenceClient::new(state.config.data_persistence_service_url.clone());
let symbol = command.symbol.clone();
// Symbol conversion using shared logic
let av_symbol = symbol.to_alphavantage();
info!("Using symbol for AlphaVantage: {}", av_symbol);
update_task_progress(
&state.tasks,
command.request_id,
10,
"Checking cache...",
None,
)
.await;
// --- 1. Check Cache ---
let cache_key = format!("alphavantage:{}:all", av_symbol);
let (overview_json, income_json, balance_json, cashflow_json, quote_json) = match persistence_client.get_cache(&cache_key).await.map_err(|e| AppError::Internal(e.to_string()))? {
Some(cache_entry) => {
info!("Cache HIT for {}", cache_key);
// Deserialize tuple of JSONs
let data: (Value, Value, Value, Value, Value) = serde_json::from_value(cache_entry.data_payload)
.map_err(|e| AppError::Internal(format!("Failed to deserialize cache: {}", e)))?;
update_task_progress(
&state.tasks,
command.request_id,
50,
"Data retrieved from cache",
None,
).await;
data
},
None => {
info!("Cache MISS for {}", cache_key);
update_task_progress(
&state.tasks,
command.request_id,
20,
"Fetching from AlphaVantage API...",
None,
).await;
let params_overview = vec![("symbol", av_symbol.as_str())];
let params_income = vec![("symbol", av_symbol.as_str())];
let params_balance = vec![("symbol", av_symbol.as_str())];
let params_cashflow = vec![("symbol", av_symbol.as_str())];
// Add datatype=json to force JSON response if supported (or at least Python-dict like)
let params_quote = vec![("symbol", av_symbol.as_str()), ("datatype", "json")];
let overview_json = client.query("COMPANY_OVERVIEW", &params_overview).await?;
check_av_response(&overview_json)?;
tokio::time::sleep(std::time::Duration::from_secs(2)).await; // Rate limit protection
let quote_json = client.query("GLOBAL_QUOTE", &params_quote).await?;
check_av_response(&quote_json)?;
tokio::time::sleep(std::time::Duration::from_secs(2)).await;
let income_json = client.query("INCOME_STATEMENT", &params_income).await?;
check_av_response(&income_json)?;
tokio::time::sleep(std::time::Duration::from_secs(2)).await;
let balance_json = client.query("BALANCE_SHEET", &params_balance).await?;
check_av_response(&balance_json)?;
tokio::time::sleep(std::time::Duration::from_secs(2)).await;
let cashflow_json = client.query("CASH_FLOW", &params_cashflow).await?;
check_av_response(&cashflow_json)?;
let data = (
overview_json,
income_json,
balance_json,
cashflow_json,
quote_json
);
// Write to Cache
let payload = serde_json::json!(data);
persistence_client.set_cache(&ProviderCacheDto {
cache_key,
data_payload: payload,
expires_at: Utc::now() + Duration::hours(24),
updated_at: None,
}).await.map_err(|e| AppError::Internal(e.to_string()))?;
data
}
};
update_task_progress(
&state.tasks,
command.request_id,
70,
"Data fetched, processing...",
None,
)
.await;
// --- 2. Transform and Snapshot Data ---
// 2.1 Profile
if let Some(_symbol_val) = overview_json.get("Symbol") {
match parse_company_profile(overview_json) {
Ok(profile_to_persist) => {
// Update Global Profile
// REMOVED: upsert_company_profile is deprecated.
// let _ = persistence_client.upsert_company_profile(profile_to_persist.clone()).await;
// Snapshot Profile
persistence_client.insert_session_data(&SessionDataDto {
request_id: command.request_id,
symbol: command.symbol.to_string(),
provider: "alphavantage".to_string(),
data_type: "company_profile".to_string(),
data_payload: serde_json::to_value(&profile_to_persist).unwrap(),
created_at: None,
}).await.map_err(|e| AppError::Internal(e.to_string()))?;
},
Err(e) => {
warn!("Failed to parse CompanyProfile: {}", e);
}
}
} else {
// If Symbol is missing but check_av_response passed, it might be an empty object {}
warn!("COMPANY_OVERVIEW returned JSON without 'Symbol' field: {:?}", overview_json);
}
// 2.2 Financials
let mut years_updated: Vec<u16> = Vec::new();
if income_json.get("annualReports").is_some() {
let combined_financials = CombinedFinancials {
income: income_json,
balance_sheet: balance_json,
cash_flow: cashflow_json,
};
match parse_financials(combined_financials) {
Ok(financials_to_persist) => {
if !financials_to_persist.is_empty() {
years_updated = financials_to_persist
.iter()
.map(|f| f.period_date.year() as u16)
.collect();
// Snapshot Financials
persistence_client.insert_session_data(&SessionDataDto {
request_id: command.request_id,
symbol: command.symbol.to_string(),
provider: "alphavantage".to_string(),
data_type: "financial_statements".to_string(),
data_payload: serde_json::to_value(&financials_to_persist).unwrap(),
created_at: None,
}).await.map_err(|e| AppError::Internal(e.to_string()))?;
}
},
Err(e) => {
warn!("Failed to parse Financials: {}", e);
}
}
}
// 2.3 Quote
// Fix Python-dict string if necessary
let fixed_quote_json = if let Some(s) = quote_json.as_str() {
if s.trim().starts_with("{'Global Quote'") {
let fixed = s.replace("'", "\"");
match serde_json::from_str::<Value>(&fixed) {
Ok(v) => v,
Err(e) => {
warn!("Failed to fix/parse quoted JSON string: {}. Error: {}", s, e);
quote_json // fallback to original
}
}
} else {
quote_json
}
} else {
quote_json
};
// Realtime quote is global/time-series, so we still use upsert_realtime_quote
let mut summary = format!("Fetched {} years of financial data", years_updated.len());
match parse_realtime_quote(fixed_quote_json, &command.market) {
Ok(mut quote_to_persist) => {
quote_to_persist.symbol = command.symbol.to_string();
// Snapshot Realtime Quote
let _ = persistence_client.insert_session_data(&SessionDataDto {
request_id: command.request_id,
symbol: command.symbol.to_string(),
provider: "alphavantage".to_string(),
data_type: "realtime_quote".to_string(),
data_payload: serde_json::to_value(&quote_to_persist).unwrap(),
created_at: None,
}).await;
summary = format!("Parsed Realtime Quote for {}: Price={}, Volume={:?}",
quote_to_persist.symbol, quote_to_persist.price, quote_to_persist.volume);
},
Err(e) => {
warn!("Failed to parse RealtimeQuote: {}", e);
}
}
update_task_progress(
&state.tasks,
command.request_id,
90,
"Snapshot created, publishing events...",
None,
)
.await;
// --- 3. Publish events ---
let event = FinancialsPersistedEvent {
request_id: command.request_id,
symbol: command.symbol.clone(),
years_updated,
template_id: command.template_id.clone(),
provider_id: Some("alphavantage".to_string()),
data_summary: Some(summary),
};
let subject = "events.data.financials_persisted".to_string();
publisher
.publish(subject, serde_json::to_vec(&event).unwrap().into())
.await?;
// Update Provider Status
// REMOVED: update_provider_status is deprecated or missing in client.
/*
persistence_client.update_provider_status(command.symbol.as_str(), "alphavantage", common_contracts::dtos::ProviderStatusDto {
last_updated: chrono::Utc::now(),
status: TaskStatus::Completed,
data_version: None,
}).await?;
*/
update_task_progress(
&state.tasks,
command.request_id,
100,
"Task completed successfully",
Some(ObservabilityTaskStatus::Completed),
).await;
info!("AlphaVantage task completed successfully.");
Ok(())
}
fn check_av_response(v: &Value) -> Result<()> {
if let Some(note) = v.get("Note").and_then(|s| s.as_str()) {
return Err(AppError::Internal(format!("AlphaVantage Rate Limit: {}", note)));
}
if let Some(info) = v.get("Information").and_then(|s| s.as_str()) {
return Err(AppError::Internal(format!("AlphaVantage Information: {}", info)));
}
Ok(())
}
async fn update_task_progress(tasks: &TaskStore, request_id: Uuid, percent: u8, details: &str, status: Option<ObservabilityTaskStatus>) {
if let Some(mut task) = tasks.get_mut(&request_id) {
task.progress_percent = percent;
task.details = details.to_string();
if let Some(s) = status {
task.status = s;
}
info!("Task update: {}% - {} (Status: {:?})", percent, details, task.status);
}
}
#[cfg(test)]
mod integration_tests {
use super::*;
use crate::config::AppConfig;
use crate::state::AppState;
use std::time::Duration;
use common_contracts::symbol_utils::{CanonicalSymbol, Market};
#[tokio::test]
async fn test_alphavantage_fetch_flow() {
// Check if running in test environment
if std::env::var("NATS_ADDR").is_err() {
// Skip if env vars not set (e.g. running cargo test without script)
// But better to panic to alert developer
// panic!("Must run integration tests with run_component_tests.sh or set env vars");
println!("Skipping integration test (no environment)");
return;
}
// 1. Environment Variables
// Assumed set by external script, but we double check specific overrides for component test
// NATS_ADDR, DATA_PERSISTENCE_SERVICE_URL, ALPHAVANTAGE_API_KEY, ALPHAVANTAGE_MCP_URL
let api_key = std::env::var("ALPHAVANTAGE_API_KEY")
.unwrap_or_else(|_| "PUOO7UPTNXN325NN".to_string());
let mcp_url = std::env::var("ALPHAVANTAGE_MCP_URL")
.expect("ALPHAVANTAGE_MCP_URL must be set");
let config = AppConfig::load().expect("Failed to load config");
let state = AppState::new(config.clone()).expect("Failed to create state");
// 2. Manual Init Provider (Skip Config Poller)
state.update_provider(
Some(api_key),
Some(mcp_url)
).await;
// Wait for connection
let mut connected = false;
for _ in 0..10 {
if state.get_provider().await.is_some() {
connected = true;
break;
}
tokio::time::sleep(Duration::from_millis(500)).await;
}
assert!(connected, "Failed to connect to AlphaVantage MCP Provider");
// 3. Construct Command
let request_id = Uuid::new_v4();
let cmd = FetchCompanyDataCommand {
request_id,
symbol: CanonicalSymbol::new("IBM", &Market::US),
market: "US".to_string(),
template_id: Some("default".to_string()),
output_path: None,
};
// 4. NATS
let nats_client = async_nats::connect(&config.nats_addr).await
.expect("Failed to connect to NATS");
// 5. Run
let result = handle_fetch_command_inner(state.clone(), &cmd, &nats_client).await;
// 6. Assert
assert!(result.is_ok(), "Worker execution failed: {:?}", result.err());
let task = state.tasks.get(&request_id).expect("Task should exist");
assert_eq!(task.status, ObservabilityTaskStatus::Completed);
}
}

View File

@ -0,0 +1,145 @@
use async_trait::async_trait;
use anyhow::{Result, anyhow, Context};
use serde_json::{json, Value};
use std::collections::HashMap;
use std::time::Duration;
use tokio::time::sleep;
use common_contracts::workflow_node::{WorkflowNode, NodeContext, NodeExecutionResult, ArtifactContent, CacheKey};
use common_contracts::data_formatting;
use crate::state::AppState;
use crate::mapping;
pub struct AlphavantageNode {
state: AppState,
}
impl AlphavantageNode {
pub fn new(state: AppState) -> Self {
Self { state }
}
}
#[async_trait]
impl WorkflowNode for AlphavantageNode {
fn node_type(&self) -> &str {
"alphavantage"
}
fn get_cache_config(&self, config: &Value) -> Option<(CacheKey, Duration)> {
let symbol = config.get("symbol").and_then(|s| s.as_str())?;
let key_parts = vec![
"alphavantage",
"company_data",
symbol,
"all"
];
let cache_key = CacheKey(key_parts.join(":"));
let ttl = Duration::from_secs(86400); // 24h
Some((cache_key, ttl))
}
async fn execute(&self, _ctx: &NodeContext, config: &Value) -> Result<NodeExecutionResult> {
let symbol = config.get("symbol").and_then(|s| s.as_str()).unwrap_or("").to_string();
if symbol.is_empty() {
return Err(anyhow!("Missing symbol in config"));
}
// 1. Get Provider (MCP Client)
let provider = self.state.get_provider().await
.ok_or_else(|| anyhow!("Alphavantage Provider not initialized"))?;
// 2. Fetch Data via MCP (Sequential with Rate Limit Protection)
// COMPANY_OVERVIEW
let overview_json = provider.query("COMPANY_OVERVIEW", &[("symbol", &symbol)]).await
.context("Failed to fetch COMPANY_OVERVIEW")?;
check_av_response(&overview_json)?;
sleep(Duration::from_secs(2)).await;
// GLOBAL_QUOTE
let _quote_json = provider.query("GLOBAL_QUOTE", &[("symbol", &symbol), ("datatype", "json")]).await
.context("Failed to fetch GLOBAL_QUOTE")?;
// check_av_response(&quote_json)?; // Quote not strictly required for Profile/Financials report
sleep(Duration::from_secs(2)).await;
// INCOME_STATEMENT
let income_json = provider.query("INCOME_STATEMENT", &[("symbol", &symbol)]).await
.context("Failed to fetch INCOME_STATEMENT")?;
check_av_response(&income_json)?;
sleep(Duration::from_secs(2)).await;
// BALANCE_SHEET
let balance_json = provider.query("BALANCE_SHEET", &[("symbol", &symbol)]).await
.context("Failed to fetch BALANCE_SHEET")?;
check_av_response(&balance_json)?;
sleep(Duration::from_secs(2)).await;
// CASH_FLOW
let cashflow_json = provider.query("CASH_FLOW", &[("symbol", &symbol)]).await
.context("Failed to fetch CASH_FLOW")?;
check_av_response(&cashflow_json)?;
// 3. Parse & Combine
let profile = mapping::parse_company_profile(overview_json)?;
let combined = mapping::CombinedFinancials {
income: income_json,
balance_sheet: balance_json,
cash_flow: cashflow_json,
};
let financials = mapping::parse_financials(combined)?;
// 4. Artifacts
let mut artifacts = HashMap::new();
artifacts.insert("profile.json".to_string(), json!(profile).into());
artifacts.insert("financials.json".to_string(), json!(financials).into());
Ok(NodeExecutionResult {
artifacts,
meta_summary: Some(json!({
"symbol": symbol,
"records": financials.len()
})),
})
}
fn render_report(&self, result: &NodeExecutionResult) -> Result<String> {
let profile_json = match result.artifacts.get("profile.json") {
Some(ArtifactContent::Json(v)) => v,
_ => return Err(anyhow!("Missing profile.json")),
};
let financials_json = match result.artifacts.get("financials.json") {
Some(ArtifactContent::Json(v)) => v,
_ => return Err(anyhow!("Missing financials.json")),
};
let symbol = profile_json["symbol"].as_str().unwrap_or("Unknown");
let mut report_md = String::new();
report_md.push_str(&format!("# Alphavantage Data Report: {}\n\n", symbol));
report_md.push_str("## Company Profile\n\n");
report_md.push_str(&data_formatting::format_data(profile_json));
report_md.push_str("\n\n");
report_md.push_str("## Financial Statements\n\n");
report_md.push_str(&data_formatting::format_data(financials_json));
Ok(report_md)
}
}
fn check_av_response(v: &Value) -> Result<()> {
if let Some(note) = v.get("Note").and_then(|s| s.as_str()) {
return Err(anyhow!("AlphaVantage Rate Limit: {}", note));
}
if let Some(info) = v.get("Information").and_then(|s| s.as_str()) {
return Err(anyhow!("AlphaVantage Information: {}", info));
}
Ok(())
}

View File

@ -5,7 +5,7 @@ edition = "2024"
[dependencies]
# Web Service
axum = "0.8.7"
axum = { version = "0.8.7", features = ["multipart"] }
tokio = { version = "1", features = ["full"] }
tower-http = { version = "0.6.6", features = ["cors", "trace"] }
utoipa = { version = "5.4", features = ["chrono", "uuid"] }
@ -21,7 +21,7 @@ futures-util = "0.3"
async-stream = "0.3"
# HTTP Client
reqwest = { version = "0.12", default-features = false, features = ["json", "rustls-tls", "stream"] }
reqwest = { version = "0.12", default-features = false, features = ["json", "rustls-tls", "stream", "multipart"] }
# Concurrency & Async
uuid = { version = "1.8", features = ["v4"] }

View File

@ -2,26 +2,26 @@ use crate::error::Result;
use crate::state::AppState;
use axum::{
Router,
extract::{Path, Query, State},
extract::{Path, Query, State, Multipart},
http::StatusCode,
response::{IntoResponse, Json},
routing::{get, post},
};
use common_contracts::config_models::{
AnalysisTemplateSets, DataSourceProvider,
DataSourcesConfig, LlmProvider, LlmProvidersConfig,
DataSourcesConfig, LlmProvidersConfig,
AnalysisTemplateSummary, AnalysisTemplateSet
};
use common_contracts::dtos::{SessionDataDto, WorkflowHistoryDto, WorkflowHistorySummaryDto};
use common_contracts::messages::GenerateReportCommand;
use common_contracts::messages::{GenerateReportCommand, StartWorkflowCommand, SyncStateCommand, WorkflowEvent};
use common_contracts::observability::{TaskProgress, ObservabilityTaskStatus};
use common_contracts::registry::ProviderMetadata;
use common_contracts::subjects::{NatsSubject, SubjectMessage};
use common_contracts::symbol_utils::{CanonicalSymbol, Market};
use futures_util::future::join_all;
use futures_util::stream::StreamExt;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use tokio::try_join;
use serde::Deserialize;
use std::collections::{HashMap, HashSet};
// use tokio::try_join;
use tracing::{error, info, warn};
use uuid::Uuid;
use utoipa::OpenApi;
@ -132,8 +132,6 @@ async fn mock_models() -> impl IntoResponse {
(StatusCode::OK, [(header::CONTENT_TYPE, "application/json")], Json(body))
}
use common_contracts::messages::{StartWorkflowCommand, SyncStateCommand, WorkflowEvent};
/// [DELETE /v1/system/history]
#[utoipa::path(
delete,
@ -187,9 +185,17 @@ fn create_v1_router() -> Router<AppState> {
"/configs/llm_providers",
get(get_llm_providers_config).put(update_llm_providers_config),
)
// .route(
// "/configs/analysis_template_sets",
// get(get_analysis_template_sets).put(update_analysis_template_sets),
// )
.route(
"/configs/analysis_template_sets",
get(get_analysis_template_sets).put(update_analysis_template_sets),
"/configs/templates",
get(get_templates),
)
.route(
"/configs/templates/{id}",
get(get_template_by_id).put(update_template).delete(delete_template),
)
.route(
"/configs/data_sources",
@ -197,132 +203,75 @@ fn create_v1_router() -> Router<AppState> {
)
.route("/configs/test", post(test_data_source_config))
.route("/configs/llm/test", post(test_llm_config))
.route("/config", get(get_legacy_system_config))
// .route("/config", get(get_legacy_system_config))
.route("/discover-models/{provider_id}", get(discover_models))
.route("/discover-models", post(discover_models_preview))
.route("/registry/register", post(registry::register_service))
.route("/registry/heartbeat", post(registry::heartbeat))
.route("/registry/deregister", post(registry::deregister_service))
.route("/registry/providers", get(get_registered_providers))
// PDF Generation Proxy
.route("/generate-pdf", post(proxy_generate_pdf))
}
// --- Legacy Config Compatibility ---
async fn proxy_generate_pdf(
State(state): State<AppState>,
multipart: Multipart,
) -> Result<impl IntoResponse> {
let url = format!(
"{}/generate-pdf",
state.config.report_generator_service_url.trim_end_matches('/')
);
#[derive(Serialize, Default)]
struct LegacyDatabaseConfig {
url: Option<String>,
}
let mut form = reqwest::multipart::Form::new();
let mut multipart = multipart;
#[derive(Serialize, Default)]
struct LegacyNewApiConfig {
provider_id: Option<String>,
provider_name: Option<String>,
api_key: Option<String>,
base_url: Option<String>,
model_count: usize,
}
#[derive(Serialize, Default)]
struct LegacyDataSourceConfig {
provider: String,
api_key: Option<String>,
api_url: Option<String>,
enabled: bool,
}
#[derive(Serialize)]
struct LegacySystemConfigResponse {
database: LegacyDatabaseConfig,
new_api: LegacyNewApiConfig,
data_sources: HashMap<String, LegacyDataSourceConfig>,
llm_providers: LlmProvidersConfig,
analysis_template_sets: AnalysisTemplateSets,
}
async fn get_legacy_system_config(State(state): State<AppState>) -> Result<impl IntoResponse> {
let persistence = state.persistence_client.clone();
let (llm_providers, analysis_template_sets, data_sources) = try_join!(
persistence.get_llm_providers_config(),
persistence.get_analysis_template_sets(),
persistence.get_data_sources_config()
)?;
let new_api = derive_primary_provider(&llm_providers);
let ds_map = project_data_sources(data_sources);
let database_url = std::env::var("DATABASE_URL").ok();
let response = LegacySystemConfigResponse {
database: LegacyDatabaseConfig { url: database_url },
new_api,
data_sources: ds_map,
llm_providers,
analysis_template_sets,
};
Ok(Json(response))
}
fn derive_primary_provider(providers: &LlmProvidersConfig) -> LegacyNewApiConfig {
const PREFERRED_IDS: [&str; 3] = ["new_api", "openrouter", "default"];
let mut selected_id: Option<String> = None;
let mut selected_provider: Option<&LlmProvider> = None;
for preferred in PREFERRED_IDS {
if let Some(provider) = providers.get(preferred) {
selected_id = Some(preferred.to_string());
selected_provider = Some(provider);
break;
while let Some(field) = multipart.next_field().await.map_err(|e| crate::error::AppError::BadRequest(e.to_string()))? {
let name = field.name().unwrap_or("").to_string();
if name == "index.html" {
let data = field.bytes().await.map_err(|e| crate::error::AppError::BadRequest(e.to_string()))?;
let part = reqwest::multipart::Part::bytes(data.to_vec())
.file_name("index.html")
.mime_str("text/html")
.map_err(|e| crate::error::AppError::Internal(anyhow::anyhow!(e)))?;
form = form.part("index.html", part);
}
}
if selected_provider.is_none() {
if let Some((fallback_id, provider)) = providers.iter().next() {
selected_id = Some(fallback_id.clone());
selected_provider = Some(provider);
}
let client = reqwest::Client::new();
let response = client.post(&url)
.multipart(form)
.send()
.await
.map_err(|e| crate::error::AppError::Internal(anyhow::anyhow!(e)))?;
if !response.status().is_success() {
let error_text = response.text().await.unwrap_or_default();
return Err(crate::error::AppError::Internal(anyhow::anyhow!("Report service failed: {}", error_text)));
}
if let Some(provider) = selected_provider {
LegacyNewApiConfig {
provider_id: selected_id,
provider_name: Some(provider.name.clone()),
api_key: Some(provider.api_key.clone()),
base_url: Some(provider.api_base_url.clone()),
model_count: provider.models.len(),
}
} else {
LegacyNewApiConfig::default()
}
let content_type = response.headers().get(reqwest::header::CONTENT_TYPE)
.and_then(|v| v.to_str().ok())
.unwrap_or("application/pdf")
.to_string();
let body_stream = axum::body::Body::from_stream(response.bytes_stream());
Ok(axum::response::Response::builder()
.status(StatusCode::OK)
.header("Content-Type", content_type)
.header("Content-Disposition", "attachment; filename=\"report.pdf\"")
.body(body_stream)
.unwrap())
}
fn project_data_sources(
configs: DataSourcesConfig,
) -> HashMap<String, LegacyDataSourceConfig> {
configs
.0
.into_iter()
.map(|(key, cfg)| {
let provider = provider_id(&cfg.provider).to_string();
let entry = LegacyDataSourceConfig {
provider,
api_key: cfg.api_key.clone(),
api_url: cfg.api_url.clone(),
enabled: cfg.enabled,
};
(key, entry)
})
.collect()
}
// ... rest of file (unchanged) ...
// Including legacy config and other handlers here to complete file write...
fn provider_id(provider: &DataSourceProvider) -> &'static str {
match provider {
DataSourceProvider::Tushare => "tushare",
DataSourceProvider::Finnhub => "finnhub",
DataSourceProvider::Alphavantage => "alphavantage",
DataSourceProvider::Yfinance => "yfinance",
}
}
// --- Legacy Config Compatibility - REMOVED ---
/*
// Legacy structs and handlers removed to enforce new design.
*/
// --- Helper Functions ---
@ -441,7 +390,7 @@ async fn get_workflow_snapshot(
) -> Result<impl IntoResponse> {
// Note: The persistence service currently returns ALL session data for a request_id
// and ignores the query params. We must filter manually here until persistence service is updated.
let snapshots = state.persistence_client.get_session_data(request_id, Some("orchestrator"), Some("workflow_snapshot")).await?;
let snapshots = state.persistence_client.get_session_data(request_id).await?;
info!("get_workflow_snapshot: retrieved {} records for {}", snapshots.len(), request_id);
@ -493,10 +442,39 @@ async fn workflow_events_stream(
// 3. Convert NATS stream to SSE stream
let stream = async_stream::stream! {
while let Some(msg) = subscriber.next().await {
if let Ok(event) = serde_json::from_slice::<WorkflowEvent>(&msg.payload) {
match axum::response::sse::Event::default().json_data(event) {
Ok(sse_event) => yield Ok::<_, anyhow::Error>(sse_event),
Err(e) => error!("Failed to serialize SSE event: {}", e),
// Try to verify payload size
let payload_len = msg.payload.len();
if payload_len > 100 * 1024 { // 100KB warning
warn!("Received large NATS message: {} bytes", payload_len);
}
match serde_json::from_slice::<WorkflowEvent>(&msg.payload) {
Ok(event) => {
// Extra debug for Snapshot
if let WorkflowEvent::WorkflowStateSnapshot { .. } = &event {
info!("Forwarding WorkflowStateSnapshot to SSE client");
}
match axum::response::sse::Event::default().json_data(event) {
Ok(sse_event) => yield Ok::<_, anyhow::Error>(sse_event),
Err(e) => error!("Failed to serialize SSE event: {}", e),
}
},
Err(e) => {
// Try to parse as generic JSON to debug content
error!("Failed to deserialize WorkflowEvent from NATS payload. Error: {}", e);
if let Ok(json_val) = serde_json::from_slice::<serde_json::Value>(&msg.payload) {
// Print first 500 chars of JSON to avoid flooding logs
let json_str = json_val.to_string();
let preview = if json_str.len() > 500 {
format!("{}...", &json_str[..500])
} else {
json_str
};
error!("Payload preview: {}", preview);
} else {
error!("Payload is not valid JSON. Raw bytes len: {}", msg.payload.len());
}
}
}
}
@ -846,7 +824,7 @@ pub struct TestLlmConfigRequest {
pub model_id: String,
}
/// [POST /v1/configs/llm/test]
/// [POST /api/v1/configs/llm/test]
#[utoipa::path(
post,
path = "/api/v1/configs/llm/test",
@ -922,40 +900,118 @@ async fn update_llm_providers_config(
Ok(Json(updated_config))
}
/// [GET /api/v1/configs/analysis_template_sets]
// /// [GET /api/v1/configs/analysis_template_sets]
// #[utoipa::path(
// get,
// path = "/api/v1/configs/analysis_template_sets",
// responses(
// (status = 200, description = "Analysis template sets configuration", body = AnalysisTemplateSets)
// )
// )]
// async fn get_analysis_template_sets(State(state): State<AppState>) -> Result<impl IntoResponse> {
// let config = state
// .persistence_client
// .get_analysis_template_sets()
// .await?;
// Ok(Json(config))
// }
// /// [PUT /api/v1/configs/analysis_template_sets]
// #[utoipa::path(
// put,
// path = "/api/v1/configs/analysis_template_sets",
// request_body = AnalysisTemplateSets,
// responses(
// (status = 200, description = "Updated analysis template sets configuration", body = AnalysisTemplateSets)
// )
// )]
// async fn update_analysis_template_sets(
// State(state): State<AppState>,
// Json(payload): Json<AnalysisTemplateSets>,
// ) -> Result<impl IntoResponse> {
// let updated_config = state
// .persistence_client
// .update_analysis_template_sets(&payload)
// .await?;
// Ok(Json(updated_config))
// }
/// [GET /api/v1/configs/templates]
#[utoipa::path(
get,
path = "/api/v1/configs/analysis_template_sets",
path = "/api/v1/configs/templates",
responses(
(status = 200, description = "Analysis template sets configuration", body = AnalysisTemplateSets)
(status = 200, description = "List of analysis templates", body = Vec<AnalysisTemplateSummary>)
)
)]
async fn get_analysis_template_sets(State(state): State<AppState>) -> Result<impl IntoResponse> {
let config = state
.persistence_client
.get_analysis_template_sets()
.await?;
Ok(Json(config))
async fn get_templates(State(state): State<AppState>) -> Result<impl IntoResponse> {
let templates = state.persistence_client.get_templates().await?;
Ok(Json(templates))
}
/// [PUT /api/v1/configs/analysis_template_sets]
/// [GET /api/v1/configs/templates/{id}]
#[utoipa::path(
put,
path = "/api/v1/configs/analysis_template_sets",
request_body = AnalysisTemplateSets,
get,
path = "/api/v1/configs/templates/{id}",
params(
("id" = String, Path, description = "Template ID")
),
responses(
(status = 200, description = "Updated analysis template sets configuration", body = AnalysisTemplateSets)
(status = 200, description = "Analysis template details", body = AnalysisTemplateSet),
(status = 404, description = "Template not found")
)
)]
async fn update_analysis_template_sets(
async fn get_template_by_id(
State(state): State<AppState>,
Json(payload): Json<AnalysisTemplateSets>,
Path(id): Path<String>,
) -> Result<impl IntoResponse> {
let updated_config = state
let template = state.persistence_client.get_template_by_id(&id).await?;
Ok(Json(template))
}
/// [PUT /api/v1/configs/templates/{id}]
#[utoipa::path(
put,
path = "/api/v1/configs/templates/{id}",
params(
("id" = String, Path, description = "Template ID")
),
request_body = AnalysisTemplateSet,
responses(
(status = 200, description = "Updated analysis template", body = AnalysisTemplateSet),
(status = 404, description = "Template not found")
)
)]
async fn update_template(
State(state): State<AppState>,
Path(id): Path<String>,
Json(payload): Json<AnalysisTemplateSet>,
) -> Result<impl IntoResponse> {
let updated_template = state
.persistence_client
.update_analysis_template_sets(&payload)
.update_template(&id, &payload)
.await?;
Ok(Json(updated_config))
Ok(Json(updated_template))
}
/// [DELETE /api/v1/configs/templates/{id}]
#[utoipa::path(
delete,
path = "/api/v1/configs/templates/{id}",
params(
("id" = String, Path, description = "Template ID")
),
responses(
(status = 204, description = "Template deleted"),
(status = 404, description = "Template not found")
)
)]
async fn delete_template(
State(state): State<AppState>,
Path(id): Path<String>,
) -> Result<impl IntoResponse> {
state.persistence_client.delete_template(&id).await?;
Ok(StatusCode::NO_CONTENT)
}
/// [GET /api/v1/configs/data_sources]
@ -1006,12 +1062,13 @@ async fn get_registered_providers(State(state): State<AppState>) -> Result<impl
let entries = state.registry.get_entries();
let mut seen_ids = HashSet::new();
let providers: Vec<ProviderMetadata> = entries
.into_iter()
.filter_map(|entry| {
// Only return DataProvider services that have metadata
if entry.registration.role == common_contracts::registry::ServiceRole::DataProvider {
entry.registration.metadata
entry.registration.metadata.filter(|m| seen_ids.insert(m.id.clone()))
} else {
None
}

View File

@ -1,7 +1,6 @@
mod api;
mod config;
mod error;
mod persistence;
mod state;
mod openapi;
#[cfg(test)]

View File

@ -15,8 +15,12 @@ use crate::api;
api::resolve_symbol,
api::get_llm_providers_config,
api::update_llm_providers_config,
api::get_analysis_template_sets,
api::update_analysis_template_sets,
// api::get_analysis_template_sets,
// api::update_analysis_template_sets,
api::get_templates,
api::get_template_by_id,
api::update_template,
api::delete_template,
api::get_data_sources_config,
api::update_data_sources_config,
api::test_data_source_config,
@ -32,6 +36,7 @@ use crate::api;
// Workflow
StartWorkflowCommand,
WorkflowEvent,
WorkflowEventType,
WorkflowDag,
TaskNode,
TaskDependency,

View File

@ -1,203 +0,0 @@
//!
//! 数据持久化服务客户端
//!
use crate::error::Result;
use common_contracts::config_models::{
AnalysisTemplateSets, DataSourcesConfig, LlmProvidersConfig,
};
use common_contracts::dtos::{CompanyProfileDto, TimeSeriesFinancialDto, WorkflowHistoryDto, WorkflowHistorySummaryDto};
use uuid::Uuid;
#[derive(Clone)]
pub struct PersistenceClient {
client: reqwest::Client,
base_url: String,
}
impl PersistenceClient {
pub fn new(base_url: String) -> Self {
Self {
client: reqwest::Client::new(),
base_url,
}
}
pub async fn get_company_profile(&self, symbol: &str) -> Result<CompanyProfileDto> {
let url = format!("{}/companies/{}", self.base_url, symbol);
let profile = self
.client
.get(&url)
.send()
.await?
.error_for_status()?
.json::<CompanyProfileDto>()
.await?;
Ok(profile)
}
pub async fn get_financials(&self, symbol: &str) -> Result<Vec<TimeSeriesFinancialDto>> {
let url = format!(
"{}/market-data/financial-statements/{}",
self.base_url, symbol
);
let financials = self
.client
.get(&url)
.send()
.await?
.error_for_status()?
.json::<Vec<TimeSeriesFinancialDto>>()
.await?;
Ok(financials)
}
#[allow(dead_code)]
pub async fn get_session_data(
&self,
request_id: Uuid,
provider: Option<&str>,
data_type: Option<&str>,
) -> Result<Vec<common_contracts::dtos::SessionDataDto>> {
let url = format!("{}/session-data/{}", self.base_url, request_id);
let mut req = self.client.get(&url);
if let Some(p) = provider {
req = req.query(&[("provider", p)]);
}
if let Some(d) = data_type {
req = req.query(&[("data_type", d)]);
}
let data = req
.send()
.await?
.error_for_status()?
.json::<Vec<common_contracts::dtos::SessionDataDto>>()
.await?;
Ok(data)
}
pub async fn get_workflow_histories(&self, symbol: Option<&str>, limit: Option<i64>) -> Result<Vec<WorkflowHistorySummaryDto>> {
let url = format!("{}/history", self.base_url);
let mut req = self.client.get(&url);
if let Some(s) = symbol {
req = req.query(&[("symbol", s)]);
}
if let Some(l) = limit {
req = req.query(&[("limit", l)]);
}
let resp = req.send().await?.error_for_status()?;
let results = resp.json().await?;
Ok(results)
}
pub async fn get_workflow_history_by_id(&self, request_id: Uuid) -> Result<WorkflowHistoryDto> {
let url = format!("{}/history/{}", self.base_url, request_id);
let resp = self.client.get(&url).send().await?.error_for_status()?;
let result = resp.json().await?;
Ok(result)
}
pub async fn clear_history(&self) -> Result<()> {
let url = format!("{}/system/history", self.base_url);
self.client
.delete(&url)
.send()
.await?
.error_for_status()?;
Ok(())
}
// --- Config Methods ---
pub async fn get_llm_providers_config(&self) -> Result<LlmProvidersConfig> {
let url = format!("{}/configs/llm_providers", self.base_url);
let config = self
.client
.get(&url)
.send()
.await?
.error_for_status()?
.json::<LlmProvidersConfig>()
.await?;
Ok(config)
}
pub async fn update_llm_providers_config(
&self,
payload: &LlmProvidersConfig,
) -> Result<LlmProvidersConfig> {
let url = format!("{}/configs/llm_providers", self.base_url);
let updated_config = self
.client
.put(&url)
.json(payload)
.send()
.await?
.error_for_status()?
.json::<LlmProvidersConfig>()
.await?;
Ok(updated_config)
}
pub async fn get_analysis_template_sets(&self) -> Result<AnalysisTemplateSets> {
let url = format!("{}/configs/analysis_template_sets", self.base_url);
let config = self
.client
.get(&url)
.send()
.await?
.error_for_status()?
.json::<AnalysisTemplateSets>()
.await?;
Ok(config)
}
pub async fn update_analysis_template_sets(
&self,
payload: &AnalysisTemplateSets,
) -> Result<AnalysisTemplateSets> {
let url = format!("{}/configs/analysis_template_sets", self.base_url);
let updated_config = self
.client
.put(&url)
.json(payload)
.send()
.await?
.error_for_status()?
.json::<AnalysisTemplateSets>()
.await?;
Ok(updated_config)
}
pub async fn get_data_sources_config(&self) -> Result<DataSourcesConfig> {
let url = format!("{}/configs/data_sources", self.base_url);
let config = self
.client
.get(&url)
.send()
.await?
.error_for_status()?
.json::<DataSourcesConfig>()
.await?;
Ok(config)
}
pub async fn update_data_sources_config(
&self,
payload: &DataSourcesConfig,
) -> Result<DataSourcesConfig> {
let url = format!("{}/configs/data_sources", self.base_url);
let updated_config = self
.client
.put(&url)
.json(payload)
.send()
.await?
.error_for_status()?
.json::<DataSourcesConfig>()
.await?;
Ok(updated_config)
}
}

View File

@ -1,6 +1,6 @@
use crate::config::AppConfig;
use crate::error::Result;
use crate::persistence::PersistenceClient;
use common_contracts::persistence_client::PersistenceClient;
use async_nats::Client as NatsClient;
use common_contracts::registry::{ServiceRegistration, ServiceRole};
use std::collections::HashMap;

View File

@ -0,0 +1,53 @@
use service_kit::api_dto;
#[api_dto]
pub enum TaskAcknowledgement {
Accepted,
Rejected { reason: String },
}
#[cfg(test)]
mod tests {
use super::*;
use serde_json::json;
#[test]
fn test_ack_serialization() {
// 1. Test Accepted
let ack = TaskAcknowledgement::Accepted;
let json = serde_json::to_value(&ack).unwrap();
assert_eq!(json, json!("Accepted"));
// 2. Test Rejected
let ack = TaskAcknowledgement::Rejected { reason: "Bad Key".to_string() };
let json = serde_json::to_value(&ack).unwrap();
assert_eq!(json, json!({
"Rejected": {
"reason": "Bad Key"
}
}));
}
#[test]
fn test_ack_deserialization() {
// 1. Test Accepted
let json = json!("Accepted");
let ack: TaskAcknowledgement = serde_json::from_value(json).unwrap();
match ack {
TaskAcknowledgement::Accepted => (),
_ => panic!("Expected Accepted"),
}
// 2. Test Rejected
let json = json!({
"Rejected": {
"reason": "Timeout"
}
});
let ack: TaskAcknowledgement = serde_json::from_value(json).unwrap();
match ack {
TaskAcknowledgement::Rejected { reason } => assert_eq!(reason, "Timeout"),
_ => panic!("Expected Rejected"),
}
}
}

View File

@ -88,6 +88,13 @@ pub struct AnalysisTemplateSet {
pub modules: HashMap<String, AnalysisModuleConfig>,
}
/// Summary of an analysis template (for listing purposes).
#[api_dto]
pub struct AnalysisTemplateSummary {
pub id: String,
pub name: String,
}
/// Configuration for a single analysis module.
pub use crate::configs::AnalysisModuleConfig;
@ -121,6 +128,7 @@ pub enum DataSourceProvider {
Finnhub,
Alphavantage,
Yfinance,
Mock,
}
#[api_dto]

View File

@ -7,8 +7,7 @@ pub struct LlmConfig {
pub model_id: Option<String>,
pub temperature: Option<f32>,
pub max_tokens: Option<u32>,
#[serde(flatten)]
pub extra_params: HashMap<String, serde_json::Value>,
pub extra_params: Option<HashMap<String, serde_json::Value>>,
}
#[api_dto]
@ -26,12 +25,7 @@ pub enum SelectionMode {
},
}
#[api_dto]
#[derive(PartialEq)]
pub struct ContextSelectorConfig {
#[serde(flatten)]
pub mode: SelectionMode,
}
pub type ContextSelectorConfig = SelectionMode;
#[api_dto]
#[derive(PartialEq)]

View File

@ -17,3 +17,4 @@ pub mod configs;
pub mod data_formatting;
pub mod workflow_node;
pub mod workflow_runner;
pub mod ack;

View File

@ -95,10 +95,33 @@ pub struct TaskMetadata {
/// The execution trace log path
pub execution_log_path: Option<String>,
/// Additional arbitrary metadata
#[serde(flatten)]
pub extra: HashMap<String, serde_json::Value>,
}
/// Comprehensive snapshot state for a single task
#[api_dto]
pub struct TaskStateSnapshot {
pub task_id: String,
pub status: TaskStatus,
pub logs: Vec<String>, // Historical logs for this task
pub content: Option<String>, // Current streamed content buffer
pub input_commit: Option<String>,
pub output_commit: Option<String>,
pub metadata: Option<TaskMetadata>,
}
#[api_dto]
#[derive(Copy, PartialEq, Eq, Hash)]
pub enum WorkflowEventType {
WorkflowStarted,
TaskStateChanged,
TaskStreamUpdate,
TaskLog,
WorkflowCompleted,
WorkflowFailed,
WorkflowStateSnapshot,
}
// Topic: events.workflow.{request_id}
/// Unified event stream for frontend consumption.
#[api_dto]
@ -158,7 +181,13 @@ pub enum WorkflowEvent {
task_graph: WorkflowDag,
tasks_status: HashMap<String, TaskStatus>, // 当前所有任务的最新状态
tasks_output: HashMap<String, Option<String>>, // (可选) 已完成任务的关键输出摘要 (commit hash)
tasks_metadata: HashMap<String, TaskMetadata> // (New) 任务的关键元数据
tasks_metadata: HashMap<String, TaskMetadata>, // (New) 任务的关键元数据
/// New: Detailed state for each task including logs and content buffer
#[serde(default)]
task_states: HashMap<String, TaskStateSnapshot>,
logs: Vec<String>, // (New) 当前Session的历史日志回放 (Global)
}
}

View File

@ -4,7 +4,8 @@ use crate::dtos::{
NewWorkflowHistory, WorkflowHistoryDto, WorkflowHistorySummaryDto
};
use crate::config_models::{
DataSourcesConfig, LlmProvidersConfig, AnalysisTemplateSets
DataSourcesConfig, LlmProvidersConfig,
AnalysisTemplateSet, AnalysisTemplateSummary
};
use reqwest::{Client, StatusCode};
use uuid::Uuid;
@ -27,7 +28,7 @@ impl PersistenceClient {
// --- Workflow History (NEW) ---
pub async fn create_workflow_history(&self, dto: &NewWorkflowHistory) -> Result<WorkflowHistoryDto> {
let url = format!("{}/history", self.base_url);
let url = format!("{}/api/v1/history", self.base_url);
let resp = self.client
.post(&url)
.json(dto)
@ -39,7 +40,7 @@ impl PersistenceClient {
}
pub async fn get_workflow_histories(&self, symbol: Option<&str>, limit: Option<i64>) -> Result<Vec<WorkflowHistorySummaryDto>> {
let url = format!("{}/history", self.base_url);
let url = format!("{}/api/v1/history", self.base_url);
let mut req = self.client.get(&url);
if let Some(s) = symbol {
req = req.query(&[("symbol", s)]);
@ -53,7 +54,7 @@ impl PersistenceClient {
}
pub async fn get_workflow_history_by_id(&self, request_id: Uuid) -> Result<WorkflowHistoryDto> {
let url = format!("{}/history/{}", self.base_url, request_id);
let url = format!("{}/api/v1/history/{}", self.base_url, request_id);
let resp = self.client.get(&url).send().await?.error_for_status()?;
let result = resp.json().await?;
Ok(result)
@ -62,7 +63,7 @@ impl PersistenceClient {
// --- Session Data ---
pub async fn insert_session_data(&self, dto: &SessionDataDto) -> Result<()> {
let url = format!("{}/session-data", self.base_url);
let url = format!("{}/api/v1/session-data", self.base_url);
self.client
.post(&url)
.json(dto)
@ -73,7 +74,7 @@ impl PersistenceClient {
}
pub async fn get_session_data(&self, request_id: Uuid) -> Result<Vec<SessionDataDto>> {
let url = format!("{}/session-data/{}", self.base_url, request_id);
let url = format!("{}/api/v1/session-data/{}", self.base_url, request_id);
let resp = self.client.get(&url).send().await?.error_for_status()?;
let data = resp.json().await?;
Ok(data)
@ -82,7 +83,7 @@ impl PersistenceClient {
// --- Provider Cache ---
pub async fn get_cache(&self, key: &str) -> Result<Option<ProviderCacheDto>> {
let url = format!("{}/provider-cache", self.base_url);
let url = format!("{}/api/v1/provider-cache", self.base_url);
let resp = self.client
.get(&url)
.query(&[("key", key)])
@ -99,7 +100,7 @@ impl PersistenceClient {
}
pub async fn set_cache(&self, dto: &ProviderCacheDto) -> Result<()> {
let url = format!("{}/provider-cache", self.base_url);
let url = format!("{}/api/v1/provider-cache", self.base_url);
self.client
.post(&url)
.json(dto)
@ -111,8 +112,21 @@ impl PersistenceClient {
// --- Existing Methods (Ported for completeness) ---
pub async fn get_financials(&self, symbol: &str) -> Result<Vec<TimeSeriesFinancialDto>> {
let url = format!("{}/api/v1/market-data/financial-statements/{}", self.base_url, symbol);
let resp = self.client.get(&url).send().await?.error_for_status()?;
let financials = resp.json().await?;
Ok(financials)
}
pub async fn clear_history(&self) -> Result<()> {
let url = format!("{}/api/v1/system/history", self.base_url);
self.client.delete(&url).send().await?.error_for_status()?;
Ok(())
}
pub async fn get_company_profile(&self, symbol: &str) -> Result<Option<CompanyProfileDto>> {
let url = format!("{}/companies/{}", self.base_url, symbol);
let url = format!("{}/api/v1/companies/{}", self.base_url, symbol);
let resp = self.client.get(&url).send().await?;
if resp.status() == StatusCode::NOT_FOUND {
return Ok(None);
@ -125,7 +139,7 @@ impl PersistenceClient {
if dtos.is_empty() {
return Ok(());
}
let url = format!("{}/market-data/financials/batch", self.base_url);
let url = format!("{}/api/v1/market-data/financials/batch", self.base_url);
let batch = TimeSeriesFinancialBatchDto { records: dtos };
self.client
@ -140,51 +154,80 @@ impl PersistenceClient {
// --- Configs ---
pub async fn get_data_sources_config(&self) -> Result<DataSourcesConfig> {
let url = format!("{}/configs/data_sources", self.base_url);
let url = format!("{}/api/v1/configs/data_sources", self.base_url);
let resp = self.client.get(&url).send().await?.error_for_status()?;
let config = resp.json().await?;
Ok(config)
}
pub async fn update_data_sources_config(&self, config: &DataSourcesConfig) -> Result<DataSourcesConfig> {
let url = format!("{}/configs/data_sources", self.base_url);
let url = format!("{}/api/v1/configs/data_sources", self.base_url);
let resp = self.client.put(&url).json(config).send().await?.error_for_status()?;
let updated = resp.json().await?;
Ok(updated)
}
pub async fn get_llm_providers_config(&self) -> Result<LlmProvidersConfig> {
let url = format!("{}/configs/llm_providers", self.base_url);
let url = format!("{}/api/v1/configs/llm_providers", self.base_url);
let resp = self.client.get(&url).send().await?.error_for_status()?;
let config = resp.json().await?;
Ok(config)
}
pub async fn update_llm_providers_config(&self, config: &LlmProvidersConfig) -> Result<LlmProvidersConfig> {
let url = format!("{}/configs/llm_providers", self.base_url);
let url = format!("{}/api/v1/configs/llm_providers", self.base_url);
let resp = self.client.put(&url).json(config).send().await?.error_for_status()?;
let updated = resp.json().await?;
Ok(updated)
}
pub async fn get_analysis_template_sets(&self) -> Result<AnalysisTemplateSets> {
let url = format!("{}/configs/analysis_template_sets", self.base_url);
// pub async fn get_analysis_template_sets(&self) -> Result<AnalysisTemplateSets> {
// let url = format!("{}/api/v1/configs/analysis_template_sets", self.base_url);
// let resp = self.client.get(&url).send().await?.error_for_status()?;
// let config = resp.json().await?;
// Ok(config)
// }
// pub async fn update_analysis_template_sets(&self, config: &AnalysisTemplateSets) -> Result<AnalysisTemplateSets> {
// let url = format!("{}/api/v1/configs/analysis_template_sets", self.base_url);
// let resp = self.client.put(&url).json(config).send().await?.error_for_status()?;
// let updated = resp.json().await?;
// Ok(updated)
// }
// --- Templates (Granular API) ---
pub async fn get_templates(&self) -> Result<Vec<AnalysisTemplateSummary>> {
let url = format!("{}/api/v1/templates", self.base_url);
let resp = self.client.get(&url).send().await?.error_for_status()?;
let config = resp.json().await?;
Ok(config)
let summaries = resp.json().await?;
Ok(summaries)
}
pub async fn update_analysis_template_sets(&self, config: &AnalysisTemplateSets) -> Result<AnalysisTemplateSets> {
let url = format!("{}/configs/analysis_template_sets", self.base_url);
let resp = self.client.put(&url).json(config).send().await?.error_for_status()?;
pub async fn get_template_by_id(&self, id: &str) -> Result<AnalysisTemplateSet> {
let url = format!("{}/api/v1/templates/{}", self.base_url, id);
let resp = self.client.get(&url).send().await?.error_for_status()?;
let template = resp.json().await?;
Ok(template)
}
pub async fn update_template(&self, id: &str, template: &AnalysisTemplateSet) -> Result<AnalysisTemplateSet> {
let url = format!("{}/api/v1/templates/{}", self.base_url, id);
let resp = self.client.put(&url).json(template).send().await?.error_for_status()?;
let updated = resp.json().await?;
Ok(updated)
}
pub async fn delete_template(&self, id: &str) -> Result<()> {
let url = format!("{}/api/v1/templates/{}", self.base_url, id);
self.client.delete(&url).send().await?.error_for_status()?;
Ok(())
}
// --- Deprecated/Legacy Support ---
pub async fn update_provider_status(&self, symbol: &str, provider_id: &str, status: ProviderStatusDto) -> Result<()> {
let url = format!("{}/companies/{}/providers/{}/status", self.base_url, symbol, provider_id);
let url = format!("{}/api/v1/companies/{}/providers/{}/status", self.base_url, symbol, provider_id);
self.client.put(&url).json(&status).send().await?.error_for_status()?;
Ok(())
}

View File

@ -1,6 +1,7 @@
use async_trait::async_trait;
use anyhow::Result;
use serde_json::Value;
use serde::{Serialize, Deserialize};
use std::collections::HashMap;
/// Context provided to the node execution
@ -21,6 +22,7 @@ impl NodeContext {
}
/// Content of an artifact
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum ArtifactContent {
Json(Value),
Text(String),
@ -60,11 +62,29 @@ pub struct NodeExecutionResult {
pub meta_summary: Option<Value>,
}
/// New Type for Cache Key to avoid string confusion
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct CacheKey(pub String);
impl std::fmt::Display for CacheKey {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", self.0)
}
}
#[async_trait]
pub trait WorkflowNode: Send + Sync {
/// Unique identifier/type of the node (e.g., "yfinance", "analysis")
fn node_type(&self) -> &str;
/// Cache Configuration Interface
///
/// Returns `None` (default) to bypass cache.
/// Returns `Some((CacheKey, Duration))` to enable caching.
fn get_cache_config(&self, _config: &Value) -> Option<(CacheKey, std::time::Duration)> {
None
}
/// Core execution logic
///
/// # Arguments

View File

@ -3,11 +3,15 @@ use anyhow::Result;
use tracing::{info, error};
use async_nats::Client;
use crate::workflow_types::{WorkflowTaskCommand, WorkflowTaskEvent, TaskStatus, TaskResult};
use crate::messages::WorkflowEvent as CommonWorkflowEvent;
use crate::workflow_node::{WorkflowNode, NodeContext};
use crate::subjects::SubjectMessage;
use workflow_context::WorkerContext;
use crate::workflow_node::{WorkflowNode, NodeContext, ArtifactContent, NodeExecutionResult};
use crate::dtos::ProviderCacheDto;
use crate::persistence_client::PersistenceClient;
use crate::workflow_types::{WorkflowTaskCommand, WorkflowTaskEvent, TaskResult, TaskStatus};
use chrono::Utc;
pub struct WorkflowNodeRunner {
nats: Client,
@ -25,21 +29,132 @@ impl WorkflowNodeRunner {
let task_id = cmd.task_id.clone();
info!("Starting node execution: type={}, task_id={}", node.node_type(), task_id);
// 1. Prepare Context
// 0. Publish Running Event
let running_event = WorkflowTaskEvent {
request_id: cmd.request_id,
task_id: task_id.clone(),
status: TaskStatus::Running,
result: None,
};
self.publish_event(running_event).await?;
// Setup Persistence Client (TODO: Pass this in properly instead of creating ad-hoc)
// For now we assume standard internal URL or fallback
let persistence_url = std::env::var("DATA_PERSISTENCE_SERVICE_URL").unwrap_or_else(|_| "http://data-persistence-service:3000".to_string());
let persistence = PersistenceClient::new(persistence_url);
// 1. Cache Check (Pre-check)
let cache_config = node.get_cache_config(&cmd.config);
if let Some((cache_key, _)) = &cache_config {
let key_str = cache_key.to_string();
match persistence.get_cache(&key_str).await {
Ok(Some(cached_entry)) => {
info!("Cache HIT for key: {}", key_str);
// Deserialize artifacts
if let Ok(artifacts) = serde_json::from_value::<std::collections::HashMap<String, ArtifactContent>>(cached_entry.data_payload) {
let result = NodeExecutionResult {
artifacts,
meta_summary: Some(serde_json::json!({"source": "cache", "key": key_str})),
};
// Pre-check: Validate cache content by attempting render_report
// If it fails (e.g. missing financials.md in old cache), treat as Cache MISS
match node.render_report(&result) {
Ok(_) => {
// Skip execution, jump to report rendering & commit
return self.process_result(node, &cmd, result).await;
},
Err(e) => {
tracing::warn!("Cache HIT but validation failed: {}. Treating as MISS.", e);
// Fall through to normal execution...
}
}
} else {
error!("Failed to deserialize cached artifacts for {}", key_str);
}
},
Ok(None) => info!("Cache MISS for key: {}", key_str),
Err(e) => error!("Cache lookup failed: {}", e),
}
}
// 2. Prepare Context
let root_path = cmd.storage.root_path.clone();
let req_id = cmd.request_id.to_string();
let base_commit = cmd.context.base_commit.clone().unwrap_or_default();
let context = NodeContext::new(req_id.clone(), base_commit.clone(), root_path.clone());
// 2. Execute Node Logic (Async)
// 3. Execute Node Logic (Async) with Heartbeat
let hb_task_id = task_id.clone();
let hb_req_id = cmd.request_id;
let hb_nats = self.nats.clone();
let heartbeat_handle = tokio::spawn(async move {
let mut interval = tokio::time::interval(std::time::Duration::from_secs(5));
loop {
interval.tick().await;
// Publish Heartbeat
let event = WorkflowTaskEvent {
request_id: hb_req_id,
task_id: hb_task_id.clone(),
status: TaskStatus::Running,
result: None,
};
let subject = event.subject().to_string();
if let Ok(payload) = serde_json::to_vec(&event) {
if let Err(e) = hb_nats.publish(subject, payload.into()).await {
error!("Failed to publish heartbeat: {}", e);
}
}
}
});
let exec_result = match node.execute(&context, &cmd.config).await {
Ok(res) => res,
Ok(res) => {
heartbeat_handle.abort();
res
},
Err(e) => {
heartbeat_handle.abort();
return self.handle_failure(&cmd, &e.to_string()).await;
}
};
// 4. Cache Write (Post-write)
if let Some((cache_key, ttl)) = cache_config {
let key_str = cache_key.to_string();
if let Ok(payload) = serde_json::to_value(&exec_result.artifacts) {
let cache_dto = ProviderCacheDto {
cache_key: key_str.clone(),
data_payload: payload,
expires_at: Utc::now() + chrono::Duration::from_std(ttl).unwrap_or(chrono::Duration::hours(24)),
updated_at: None,
};
// Fire and forget cache write
let p_client = persistence.clone();
tokio::spawn(async move {
if let Err(e) = p_client.set_cache(&cache_dto).await {
error!("Failed to write cache for {}: {}", key_str, e);
}
});
}
}
// 5. Process Result (Render, Commit, Publish)
self.process_result(node, &cmd, exec_result).await
}
// Extracted common logic for processing execution result (whether from cache or fresh execution)
async fn process_result<N>(&self, node: Arc<N>, cmd: &WorkflowTaskCommand, exec_result: NodeExecutionResult) -> Result<()>
where N: WorkflowNode + 'static
{
let task_id = cmd.task_id.clone();
let root_path = cmd.storage.root_path.clone();
let req_id = cmd.request_id.to_string();
let base_commit = cmd.context.base_commit.clone().unwrap_or_default();
// 3. Render Report (Sync)
let report_md = match node.render_report(&exec_result) {
Ok(md) => md,
@ -54,7 +169,11 @@ impl WorkflowNodeRunner {
let base_commit_clone = base_commit.clone();
let root_path_clone = root_path.clone();
let req_id_clone = req_id.clone();
// Check for financials.md BEFORE moving artifacts
let has_financials_md = exec_result.artifacts.contains_key("financials.md");
let exec_result_artifacts = exec_result.artifacts;
let report_md_clone = report_md.clone();
let symbol = cmd.config.get("symbol").and_then(|s| s.as_str()).unwrap_or("unknown").to_string();
let symbol_for_blocking = symbol.clone();
@ -77,9 +196,11 @@ impl WorkflowNodeRunner {
ctx.write_file(&full_path, std::str::from_utf8(&bytes).unwrap_or(""))?;
}
// Write Report
let report_path = format!("{}/report.md", base_dir);
ctx.write_file(&report_path, &report_md_clone)?;
// Write Report (ONLY if not superseded by financials.md)
if !has_financials_md {
let report_path = format!("{}/report.md", base_dir);
ctx.write_file(&report_path, &report_md_clone)?;
}
// Write Execution Log
let log_path = format!("{}/_execution.md", base_dir);
@ -98,20 +219,43 @@ impl WorkflowNodeRunner {
Err(e) => return self.handle_failure(&cmd, &format!("Task join error: {}", e)).await,
};
// 5. Publish Stream Update
// 5. Publish Stream Update (ONLY if not already streamed)
// If the worker streamed content, we don't want to double-publish the full report here as a delta.
// We assume if it's a large report and no stream happened, we might want to push it.
// But for now, let's be conservative: ONLY publish if we are sure no streaming happened, or if it's a very short summary.
// Actually, with the new Orchestrator forwarding logic, we should probably SKIP this full-report push if Orchestrator is already forwarding LLM streams.
//
// Current logic:
// LLM Client streams tokens -> Orchestrator forwards -> Frontend (Streaming OK)
// Here -> We push FULL report as one chunk -> Frontend appends it (Duplicate content!)
// FIX: Do NOT publish full report as StreamUpdate here if it's likely been streamed or if it's large.
// Or better: don't publish it here at all. The `TaskCompleted` event implicitly tells Orchestrator/Frontend that the task is done.
// The Frontend can fetch the final content from the Commit if needed, or rely on the accumulated Stream updates.
// If we disable this, tasks that DON'T stream (like DataFetch) won't show content until completion?
// DataFetch usually produces structured data, not Markdown stream.
// Let's keep it for DataFetch but disable for Analysis?
// Or just trust that `TaskCompleted` + loading from Commit is the Source of Truth for final state.
// Let's COMMENT OUT this block to prevent duplication/loops.
/*
let stream_event = CommonWorkflowEvent::TaskStreamUpdate {
task_id: task_id.clone(),
content_delta: report_md.clone(),
index: 0,
};
self.publish_common(&cmd.request_id, stream_event).await?;
*/
// 5.1 Update Meta Summary with Paths
let mut summary = exec_result.meta_summary.clone().unwrap_or(serde_json::json!({}));
if let Some(obj) = summary.as_object_mut() {
// Reconstruct paths used in VGCS block (must match)
let base_dir = format!("raw/{}/{}", node.node_type(), symbol);
obj.insert("output_path".to_string(), serde_json::Value::String(format!("{}/report.md", base_dir)));
let output_filename = if has_financials_md { "financials.md" } else { "report.md" };
obj.insert("output_path".to_string(), serde_json::Value::String(format!("{}/{}", base_dir, output_filename)));
obj.insert("execution_log_path".to_string(), serde_json::Value::String(format!("{}/_execution.md", base_dir)));
}

View File

@ -5,6 +5,7 @@ mod configs;
mod market_data;
mod system;
mod session_data;
mod templates;
use crate::AppState;
use axum::{
@ -19,15 +20,15 @@ pub fn create_router(_state: AppState) -> Router<AppState> {
.route("/api/v1/system/history", axum::routing::delete(system::clear_history))
// Configs
.route(
"/configs/llm_providers",
"/api/v1/configs/llm_providers",
get(configs::get_llm_providers_config).put(configs::update_llm_providers_config),
)
// .route(
// "/api/v1/configs/analysis_template_sets",
// get(configs::get_analysis_template_sets).put(configs::update_analysis_template_sets),
// )
.route(
"/configs/analysis_template_sets",
get(configs::get_analysis_template_sets).put(configs::update_analysis_template_sets),
)
.route(
"/configs/data_sources",
"/api/v1/configs/data_sources",
get(configs::get_data_sources_config).put(configs::update_data_sources_config),
)
// Companies
@ -72,6 +73,15 @@ pub fn create_router(_state: AppState) -> Router<AppState> {
.route(
"/history/{request_id}",
get(history::get_workflow_history_by_id),
)
// Templates (NEW)
.route(
"/api/v1/templates",
get(templates::get_templates),
)
.route(
"/api/v1/templates/{id}",
get(templates::get_template_by_id).put(templates::update_template).delete(templates::delete_template),
);
router

View File

@ -0,0 +1,80 @@
use axum::{extract::{Path, State}, Json};
use common_contracts::config_models::{AnalysisTemplateSets, AnalysisTemplateSet, AnalysisTemplateSummary};
use service_kit::api;
use crate::{db::system_config, AppState, ServerError};
#[api(GET, "/api/v1/templates", output(detail = "Vec<AnalysisTemplateSummary>"))]
pub async fn get_templates(
State(state): State<AppState>,
) -> Result<Json<Vec<AnalysisTemplateSummary>>, ServerError> {
let pool = state.pool();
// Note: This fetches the entire config blob. Optimization would be to query JSONB fields directly,
// but for now we follow the document-store pattern as requested.
let config = system_config::get_config::<AnalysisTemplateSets>(pool, "analysis_template_sets").await?;
let mut summaries: Vec<AnalysisTemplateSummary> = config.iter()
.map(|(id, template)| AnalysisTemplateSummary {
id: id.clone(),
name: template.name.clone(),
})
.collect();
// Sort by name for consistency
summaries.sort_by(|a, b| a.name.cmp(&b.name));
Ok(Json(summaries))
}
#[api(GET, "/api/v1/templates/{id}", output(detail = "AnalysisTemplateSet"))]
pub async fn get_template_by_id(
State(state): State<AppState>,
Path(id): Path<String>,
) -> Result<Json<AnalysisTemplateSet>, ServerError> {
let pool = state.pool();
let mut config = system_config::get_config::<AnalysisTemplateSets>(pool, "analysis_template_sets").await?;
let template = config.remove(&id).ok_or_else(|| ServerError::NotFound(format!("Template {} not found", id)))?;
Ok(Json(template))
}
#[api(PUT, "/api/v1/templates/{id}", output(detail = "AnalysisTemplateSet"))]
pub async fn update_template(
State(state): State<AppState>,
Path(id): Path<String>,
Json(payload): Json<AnalysisTemplateSet>,
) -> Result<Json<AnalysisTemplateSet>, ServerError> {
let pool = state.pool();
// 1. Fetch the whole config blob
let mut config = system_config::get_config::<AnalysisTemplateSets>(pool, "analysis_template_sets").await?;
// 2. Update the specific template in the map
config.insert(id.clone(), payload);
// 3. Save the whole blob back
let _ = system_config::update_config(pool, "analysis_template_sets", &config).await?;
// 4. Return the updated template
Ok(Json(config.remove(&id).unwrap()))
}
#[api(DELETE, "/api/v1/templates/{id}")]
pub async fn delete_template(
State(state): State<AppState>,
Path(id): Path<String>,
) -> Result<axum::http::StatusCode, ServerError> {
let pool = state.pool();
// 1. Fetch the whole config blob
let mut config = system_config::get_config::<AnalysisTemplateSets>(pool, "analysis_template_sets").await?;
// 2. Remove the specific template
if config.remove(&id).is_none() {
return Err(ServerError::NotFound(format!("Template {} not found", id)));
}
// 3. Save the whole blob back
let _ = system_config::update_config(pool, "analysis_template_sets", &config).await?;
Ok(axum::http::StatusCode::NO_CONTENT)
}

View File

@ -43,3 +43,4 @@ config = "0.15.19"
# Error Handling
thiserror = "2.0.17"
anyhow = "1.0"
async-trait = "0.1.89"

View File

@ -1,20 +1,84 @@
use std::collections::HashMap;
use axum::{
extract::State,
response::Json,
routing::get,
response::{Json, IntoResponse},
routing::{get, post},
Router,
http::StatusCode,
};
use common_contracts::observability::{HealthStatus, ServiceStatus, TaskProgress};
use crate::state::{AppState, ServiceOperationalStatus};
use serde::Deserialize;
use crate::fh_client::FinnhubClient;
pub fn create_router(app_state: AppState) -> Router {
Router::new()
.route("/health", get(health_check))
.route("/tasks", get(get_current_tasks))
.route("/test", post(test_connection))
.with_state(app_state)
}
#[derive(Deserialize)]
struct TestRequest {
api_key: Option<String>,
api_url: Option<String>,
}
async fn test_connection(
State(state): State<AppState>,
Json(payload): Json<TestRequest>,
) -> impl IntoResponse {
let api_url = payload.api_url
.filter(|s| !s.is_empty())
.unwrap_or_else(|| state.config.finnhub_api_url.clone());
let api_key = if let Some(k) = payload.api_key.filter(|s| !s.is_empty()) {
k
} else if let Some(k) = &state.config.finnhub_api_key {
k.clone()
} else {
return (
StatusCode::BAD_REQUEST,
Json(serde_json::json!({
"success": false,
"message": "No API Key provided or configured"
}))
).into_response();
};
// Validate API Key with a lightweight request (e.g. AAPL quote)
let client_res = FinnhubClient::new(api_url, api_key);
match client_res {
Ok(client) => {
match client.get::<serde_json::Value>("quote", vec![("symbol".to_string(), "AAPL".to_string())]).await {
Ok(_) => (
StatusCode::OK,
Json(serde_json::json!({
"success": true,
"message": "Connection successful"
}))
).into_response(),
Err(e) => (
StatusCode::BAD_REQUEST,
Json(serde_json::json!({
"success": false,
"message": format!("Connection failed: {}", e)
}))
).into_response(),
}
},
Err(e) => (
StatusCode::INTERNAL_SERVER_ERROR,
Json(serde_json::json!({
"success": false,
"message": format!("Failed to initialize client: {}", e)
}))
).into_response()
}
}
/// [GET /health]
/// Provides the current health status of the module.
async fn health_check(State(state): State<AppState>) -> Json<HealthStatus> {

View File

@ -25,7 +25,7 @@ async fn poll_and_update_config(state: &AppState) -> Result<()> {
info!("Polling for data source configurations...");
let client = reqwest::Client::new();
let url = format!(
"{}/configs/data_sources",
"{}/api/v1/configs/data_sources",
state.config.data_persistence_service_url
);

View File

@ -0,0 +1,13 @@
use anyhow::Result;
use common_contracts::workflow_types::WorkflowTaskCommand;
use crate::state::AppState;
use crate::workflow_adapter::FinnhubNode;
use common_contracts::workflow_runner::WorkflowNodeRunner;
use std::sync::Arc;
pub async fn handle_workflow_command(state: AppState, nats: async_nats::Client, cmd: WorkflowTaskCommand) -> Result<()> {
let node = Arc::new(FinnhubNode::new(state));
let runner = WorkflowNodeRunner::new(nats);
runner.run(node, cmd).await
}

View File

@ -7,7 +7,8 @@ mod mapping;
mod message_consumer;
// mod persistence; // Removed
mod state;
mod worker;
mod workflow_adapter;
mod generic_worker;
mod config_poller;
use crate::config::AppConfig;

View File

@ -1,7 +1,6 @@
use crate::error::Result;
use crate::state::{AppState, ServiceOperationalStatus};
use common_contracts::messages::FetchCompanyDataCommand;
use common_contracts::subjects::NatsSubject;
use common_contracts::workflow_types::WorkflowTaskCommand;
use futures_util::StreamExt;
use std::time::Duration;
use tracing::{error, info, warn};
@ -24,7 +23,7 @@ pub async fn run(state: AppState) -> Result<()> {
match async_nats::connect(&state.config.nats_addr).await {
Ok(client) => {
info!("Successfully connected to NATS.");
if let Err(e) = subscribe_and_process(state.clone(), client).await {
if let Err(e) = subscribe_workflow(state.clone(), client).await {
error!("NATS subscription error: {}. Reconnecting in 10s...", e);
}
}
@ -36,54 +35,56 @@ pub async fn run(state: AppState) -> Result<()> {
}
}
async fn subscribe_and_process(state: AppState, client: async_nats::Client) -> Result<()> {
let subject = NatsSubject::DataFetchCommands.to_string();
use common_contracts::ack::TaskAcknowledgement;
async fn subscribe_workflow(state: AppState, client: async_nats::Client) -> Result<()> {
// Finnhub routing key: provider.finnhub
let subject = "workflow.cmd.provider.finnhub".to_string();
let mut subscriber = client.subscribe(subject.clone()).await?;
info!(
"Consumer started, waiting for messages on subject '{}'",
subject
);
info!("Workflow Consumer started on '{}'", subject);
while let Some(message) = subscriber.next().await {
// Check status
let current_status = state.status.read().await.clone();
if matches!(current_status, ServiceOperationalStatus::Degraded {..}) {
warn!("Service became degraded. Disconnecting from NATS and pausing consumption.");
warn!("Service became degraded. Disconnecting from NATS.");
// Reject if degraded
if let Some(reply_to) = message.reply {
let ack = TaskAcknowledgement::Rejected { reason: "Service degraded".to_string() };
if let Ok(payload) = serde_json::to_vec(&ack) {
let _ = client.publish(reply_to, payload.into()).await;
}
}
subscriber.unsubscribe().await?;
return Ok(());
}
info!("Received NATS message.");
let state_clone = state.clone();
let publisher_clone = client.clone();
// Accept
if let Some(reply_to) = message.reply.clone() {
let ack = TaskAcknowledgement::Accepted;
if let Ok(payload) = serde_json::to_vec(&ack) {
if let Err(e) = client.publish(reply_to, payload.into()).await {
error!("Failed to send Acceptance Ack: {}", e);
}
}
}
let state = state.clone();
let client = client.clone();
tokio::spawn(async move {
match serde_json::from_slice::<FetchCompanyDataCommand>(&message.payload) {
Ok(command) => {
info!("Deserialized command for symbol: {}", command.symbol);
// Skip processing if market is 'CN'
if command.market.to_uppercase() == "CN" {
info!(
"Skipping command for symbol '{}' as its market ('{}') is 'CN'.",
command.symbol, command.market
);
return;
match serde_json::from_slice::<WorkflowTaskCommand>(&message.payload) {
Ok(cmd) => {
info!("Received workflow command for task: {}", cmd.task_id);
if let Err(e) = crate::generic_worker::handle_workflow_command(state, client, cmd).await {
error!("Generic worker handler failed: {}", e);
}
if let Err(e) =
crate::worker::handle_fetch_command(state_clone, command, publisher_clone)
.await
{
error!("Error handling fetch command: {:?}", e);
}
}
Err(e) => {
error!("Failed to deserialize message: {}", e);
}
},
Err(e) => error!("Failed to parse WorkflowTaskCommand: {}", e),
}
});
}
Ok(())
}

View File

@ -1,265 +0,0 @@
use crate::error::{AppError, Result};
use common_contracts::persistence_client::PersistenceClient;
use crate::state::AppState;
use chrono::{Datelike, Utc, Duration};
use common_contracts::dtos::{CompanyProfileDto, TimeSeriesFinancialDto, SessionDataDto, ProviderCacheDto};
use common_contracts::messages::{CompanyProfilePersistedEvent, FetchCompanyDataCommand, FinancialsPersistedEvent, DataFetchFailedEvent};
use common_contracts::observability::{TaskProgress, ObservabilityTaskStatus};
use tracing::{error, info};
pub async fn handle_fetch_command(
state: AppState,
command: FetchCompanyDataCommand,
publisher: async_nats::Client,
) -> Result<()> {
match handle_fetch_command_inner(state.clone(), &command, &publisher).await {
Ok(_) => Ok(()),
Err(e) => {
error!("Finnhub workflow failed: {}", e);
// Publish failure event
let event = DataFetchFailedEvent {
request_id: command.request_id,
symbol: command.symbol.clone(),
error: e.to_string(),
provider_id: Some("finnhub".to_string()),
};
let _ = publisher
.publish(
"events.data.fetch_failed".to_string(),
serde_json::to_vec(&event).unwrap().into(),
)
.await;
// Update task status
if let Some(mut task) = state.tasks.get_mut(&command.request_id) {
task.status = ObservabilityTaskStatus::Failed;
task.details = format!("Failed: {}", e);
} else {
// If task doesn't exist (e.g. failed at insert), create a failed task
let task = TaskProgress {
request_id: command.request_id,
task_name: format!("finnhub:{}", command.symbol),
status: ObservabilityTaskStatus::Failed,
progress_percent: 0,
details: format!("Failed: {}", e),
started_at: Utc::now(),
};
state.tasks.insert(command.request_id, task);
}
Err(e)
}
}
}
async fn handle_fetch_command_inner(
state: AppState,
command: &FetchCompanyDataCommand,
publisher: &async_nats::Client,
) -> Result<()> {
info!("Handling Finnhub fetch data command.");
state.tasks.insert(
command.request_id,
TaskProgress {
request_id: command.request_id,
task_name: format!("finnhub:{}", command.symbol),
status: ObservabilityTaskStatus::InProgress,
progress_percent: 10,
details: "Fetching data from Finnhub".to_string(),
started_at: chrono::Utc::now(),
},
);
let provider = match state.get_provider().await {
Some(p) => p,
None => {
let reason = "Execution failed: Finnhub provider is not available (misconfigured).".to_string();
// Return error to trigger outer handler
return Err(AppError::ProviderNotAvailable(reason));
}
};
let persistence_client = PersistenceClient::new(state.config.data_persistence_service_url.clone());
let symbol = command.symbol.to_string();
// --- 1. Check Cache ---
if let Some(mut task) = state.tasks.get_mut(&command.request_id) {
task.details = "Checking cache...".to_string();
}
let cache_key = format!("finnhub:{}:all", symbol);
let (profile, financials) = match persistence_client.get_cache(&cache_key).await.map_err(|e| AppError::Internal(e.to_string()))? {
Some(cache_entry) => {
info!("Cache HIT for {}", cache_key);
let data: (CompanyProfileDto, Vec<TimeSeriesFinancialDto>) = serde_json::from_value(cache_entry.data_payload)
.map_err(|e| AppError::Internal(format!("Failed to deserialize cache: {}", e)))?;
if let Some(mut task) = state.tasks.get_mut(&command.request_id) {
task.details = "Data retrieved from cache".to_string();
task.progress_percent = 50;
}
data
},
None => {
info!("Cache MISS for {}", cache_key);
if let Some(mut task) = state.tasks.get_mut(&command.request_id) {
task.details = "Fetching from Finnhub API...".to_string();
task.progress_percent = 20;
}
let (p, f) = provider.fetch_all_data(command.symbol.as_str()).await?;
// Write to Cache
let payload = serde_json::json!((&p, &f));
persistence_client.set_cache(&ProviderCacheDto {
cache_key,
data_payload: payload,
expires_at: Utc::now() + Duration::hours(24),
updated_at: None,
}).await.map_err(|e| AppError::Internal(e.to_string()))?;
(p, f)
}
};
// --- 2. Snapshot Data ---
if let Some(mut task) = state.tasks.get_mut(&command.request_id) {
task.details = "Snapshotting data...".to_string();
task.progress_percent = 80;
}
// Global Profile
// REMOVED: upsert_company_profile is deprecated.
// let _ = persistence_client.upsert_company_profile(profile.clone()).await;
// Snapshot Profile
persistence_client.insert_session_data(&SessionDataDto {
request_id: command.request_id,
symbol: symbol.clone(),
provider: "finnhub".to_string(),
data_type: "company_profile".to_string(),
data_payload: serde_json::to_value(&profile).unwrap(),
created_at: None,
}).await.map_err(|e| AppError::Internal(e.to_string()))?;
// Snapshot Financials
persistence_client.insert_session_data(&SessionDataDto {
request_id: command.request_id,
symbol: symbol.clone(),
provider: "finnhub".to_string(),
data_type: "financial_statements".to_string(),
data_payload: serde_json::to_value(&financials).unwrap(),
created_at: None,
}).await.map_err(|e| AppError::Internal(e.to_string()))?;
// Update Provider Status
// REMOVED: update_provider_status is deprecated or missing in client.
/*
persistence_client.update_provider_status(command.symbol.as_str(), "finnhub", common_contracts::dtos::ProviderStatusDto {
last_updated: chrono::Utc::now(),
status: TaskStatus::Completed,
data_version: None,
}).await?;
*/
// --- 3. Publish events ---
let profile_event = CompanyProfilePersistedEvent {
request_id: command.request_id,
symbol: command.symbol.clone(),
};
publisher
.publish(
"events.data.company_profile_persisted".to_string(),
serde_json::to_vec(&profile_event).unwrap().into(),
)
.await?;
let years_set: std::collections::BTreeSet<u16> =
financials.iter().map(|f| f.period_date.year() as u16).collect();
let summary = format!("Fetched {} years of data from Finnhub", years_set.len());
let financials_event = FinancialsPersistedEvent {
request_id: command.request_id,
symbol: command.symbol.clone(),
years_updated: years_set.into_iter().collect(),
template_id: command.template_id.clone(),
provider_id: Some("finnhub".to_string()),
data_summary: Some(summary),
};
publisher
.publish(
"events.data.financials_persisted".to_string(),
serde_json::to_vec(&financials_event).unwrap().into(),
)
.await?;
// 4. Finalize
if let Some(mut task) = state.tasks.get_mut(&command.request_id) {
task.status = ObservabilityTaskStatus::Completed;
task.progress_percent = 100;
task.details = "Workflow finished successfully".to_string();
}
info!("Task {} completed successfully.", command.request_id);
Ok(())
}
#[cfg(test)]
mod integration_tests {
use super::*;
use crate::config::AppConfig;
use crate::state::AppState;
use common_contracts::symbol_utils::{CanonicalSymbol, Market};
use uuid::Uuid;
#[tokio::test]
async fn test_finnhub_fetch_flow() {
if std::env::var("NATS_ADDR").is_err() {
println!("Skipping integration test (no environment)");
return;
}
// 1. Environment
let api_key = std::env::var("FINNHUB_API_KEY")
.unwrap_or_else(|_| "d3fjs5pr01qolkndil0gd3fjs5pr01qolkndil10".to_string());
let api_url = std::env::var("FINNHUB_API_URL")
.unwrap_or_else(|_| "https://finnhub.io/api/v1".to_string());
let config = AppConfig::load().expect("Failed to load config");
let state = AppState::new(config.clone());
// 2. Manual Init Provider
state.update_provider(
Some(api_key),
Some(api_url)
).await;
assert!(state.get_provider().await.is_some());
// 3. Construct Command (AAPL)
let request_id = Uuid::new_v4();
let cmd = FetchCompanyDataCommand {
request_id,
symbol: CanonicalSymbol::new("AAPL", &Market::US),
market: "US".to_string(),
template_id: Some("default".to_string()),
output_path: None,
};
// 4. NATS
let nats_client = async_nats::connect(&config.nats_addr).await
.expect("Failed to connect to NATS");
// 5. Run
let result = handle_fetch_command_inner(state.clone(), &cmd, &nats_client).await;
// 6. Assert
assert!(result.is_ok(), "Worker execution failed: {:?}", result.err());
let task = state.tasks.get(&request_id).expect("Task should exist");
assert_eq!(task.status, ObservabilityTaskStatus::Completed);
}
}

View File

@ -0,0 +1,97 @@
use async_trait::async_trait;
use anyhow::{Result, anyhow, Context};
use serde_json::{json, Value};
use std::collections::HashMap;
use std::time::Duration;
use common_contracts::workflow_node::{WorkflowNode, NodeContext, NodeExecutionResult, ArtifactContent, CacheKey};
use common_contracts::data_formatting;
use crate::state::AppState;
pub struct FinnhubNode {
state: AppState,
}
impl FinnhubNode {
pub fn new(state: AppState) -> Self {
Self { state }
}
}
#[async_trait]
impl WorkflowNode for FinnhubNode {
fn node_type(&self) -> &str {
"finnhub"
}
fn get_cache_config(&self, config: &Value) -> Option<(CacheKey, Duration)> {
let symbol = config.get("symbol").and_then(|s| s.as_str())?;
let key_parts = vec![
"finnhub",
"company_data",
symbol,
"all"
];
let cache_key = CacheKey(key_parts.join(":"));
// Finnhub data - 24h TTL
let ttl = Duration::from_secs(86400);
Some((cache_key, ttl))
}
async fn execute(&self, _ctx: &NodeContext, config: &Value) -> Result<NodeExecutionResult> {
let symbol = config.get("symbol").and_then(|s| s.as_str()).unwrap_or("").to_string();
if symbol.is_empty() {
return Err(anyhow!("Missing symbol in config"));
}
// 1. Fetch Data
let provider = self.state.get_provider().await
.ok_or_else(|| anyhow!("Finnhub Provider not initialized"))?;
let (profile, financials) = provider.fetch_all_data(&symbol).await
.context("Failed to fetch data from Finnhub")?;
// 2. Artifacts
let mut artifacts = HashMap::new();
artifacts.insert("profile.json".to_string(), json!(profile).into());
artifacts.insert("financials.json".to_string(), json!(financials).into());
Ok(NodeExecutionResult {
artifacts,
meta_summary: Some(json!({
"symbol": symbol,
"records": financials.len()
})),
})
}
fn render_report(&self, result: &NodeExecutionResult) -> Result<String> {
let profile_json = match result.artifacts.get("profile.json") {
Some(ArtifactContent::Json(v)) => v,
_ => return Err(anyhow!("Missing profile.json")),
};
let financials_json = match result.artifacts.get("financials.json") {
Some(ArtifactContent::Json(v)) => v,
_ => return Err(anyhow!("Missing financials.json")),
};
let symbol = profile_json["symbol"].as_str().unwrap_or("Unknown");
let mut report_md = String::new();
report_md.push_str(&format!("# Finnhub Data Report: {}\n\n", symbol));
report_md.push_str("## Company Profile\n\n");
report_md.push_str(&data_formatting::format_data(profile_json));
report_md.push_str("\n\n");
report_md.push_str("## Financial Statements\n\n");
report_md.push_str(&data_formatting::format_data(financials_json));
Ok(report_md)
}
}

View File

@ -3,15 +3,17 @@ use axum::{
extract::State,
http::StatusCode,
response::{IntoResponse, Json},
routing::get,
routing::{get, post},
Router,
};
use common_contracts::observability::TaskProgress;
use serde::Deserialize;
pub fn create_router(state: AppState) -> Router {
Router::new()
.route("/health", get(health_check))
.route("/tasks", get(get_tasks))
.route("/test", post(test_connection))
.with_state(state)
}
@ -24,3 +26,24 @@ async fn get_tasks(State(state): State<AppState>) -> impl IntoResponse {
Json(tasks)
}
#[derive(Deserialize)]
struct TestRequest {
// 允许接收任意参数,但不做处理
#[allow(dead_code)]
api_key: Option<String>,
#[allow(dead_code)]
api_url: Option<String>,
}
async fn test_connection(
Json(_payload): Json<TestRequest>,
) -> impl IntoResponse {
// Mock Provider 总是成功
(
StatusCode::OK,
Json(serde_json::json!({
"success": true,
"message": "Mock Provider connection successful"
}))
)
}

View File

@ -2,6 +2,7 @@ use anyhow::Result;
use tracing::{info, error};
use common_contracts::workflow_types::WorkflowTaskCommand;
use common_contracts::subjects::NatsSubject;
use common_contracts::ack::TaskAcknowledgement;
use crate::state::AppState;
use futures_util::StreamExt;
use std::sync::Arc;
@ -20,19 +21,55 @@ pub async fn run_consumer(state: AppState) -> Result<()> {
while let Some(message) = subscriber.next().await {
info!("Received Workflow NATS message.");
// 1. Parse Command eagerly to check config
let cmd = match serde_json::from_slice::<WorkflowTaskCommand>(&message.payload) {
Ok(c) => c,
Err(e) => {
error!("Failed to parse WorkflowTaskCommand: {}", e);
continue;
}
};
// 2. Check Simulation Mode
let mode_raw = cmd.config.get("simulation_mode").and_then(|v| v.as_str()).unwrap_or("normal");
let mode = mode_raw.to_lowercase();
info!("Processing task {} with mode: {}", cmd.task_id, mode);
if mode == "timeout_ack" {
info!("Simulating Timeout (No ACK) for task {}", cmd.task_id);
continue; // Skip processing
}
if mode == "reject" {
info!("Simulating Rejection for task {}", cmd.task_id);
if let Some(reply_to) = message.reply {
let ack = TaskAcknowledgement::Rejected { reason: "Simulated Rejection".into() };
if let Ok(payload) = serde_json::to_vec(&ack) {
if let Err(e) = client.publish(reply_to, payload.into()).await {
error!("Failed to send Rejection ACK: {}", e);
}
}
}
continue;
}
// 3. Normal / Crash / Hang Mode -> Send Accepted
if let Some(reply_to) = message.reply {
let ack = TaskAcknowledgement::Accepted;
if let Ok(payload) = serde_json::to_vec(&ack) {
if let Err(e) = client.publish(reply_to, payload.into()).await {
error!("Failed to send Acceptance ACK: {}", e);
}
}
}
let state_clone = state.clone();
let client_clone = client.clone();
tokio::spawn(async move {
match serde_json::from_slice::<WorkflowTaskCommand>(&message.payload) {
Ok(cmd) => {
if let Err(e) = handle_workflow_command(state_clone, client_clone, cmd).await {
error!("Error handling workflow command: {:?}", e);
}
}
Err(e) => {
error!("Failed to deserialize workflow message: {}", e);
}
if let Err(e) = handle_workflow_command(state_clone, client_clone, cmd).await {
error!("Error handling workflow command: {:?}", e);
}
});
}

View File

@ -5,10 +5,11 @@ use serde_json::{json, Value};
use std::collections::HashMap;
use chrono::NaiveDate;
use common_contracts::workflow_node::{WorkflowNode, NodeContext, NodeExecutionResult, ArtifactContent};
use common_contracts::workflow_node::{WorkflowNode, NodeContext, NodeExecutionResult, ArtifactContent, CacheKey};
use common_contracts::data_formatting;
use common_contracts::dtos::{CompanyProfileDto, TimeSeriesFinancialDto};
use crate::state::AppState;
use std::time::Duration;
pub struct MockNode {
#[allow(dead_code)]
@ -27,7 +28,37 @@ impl WorkflowNode for MockNode {
"mock"
}
fn get_cache_config(&self, config: &Value) -> Option<(CacheKey, Duration)> {
let symbol = config.get("symbol").and_then(|s| s.as_str())?;
let key_parts = vec![
"mock",
"company_data",
symbol,
"all"
];
let cache_key = CacheKey(key_parts.join(":"));
// Mock data is static, but we can cache it for 1 hour
let ttl = Duration::from_secs(3600);
Some((cache_key, ttl))
}
async fn execute(&self, _ctx: &NodeContext, config: &Value) -> Result<NodeExecutionResult> {
let mode = config.get("simulation_mode").and_then(|v| v.as_str()).unwrap_or("normal");
if mode == "hang" {
tracing::info!("Simulating Hang (Sleep 600s)...");
tokio::time::sleep(Duration::from_secs(600)).await;
}
if mode == "crash" {
tracing::info!("Simulating Crash (Process Exit)...");
tokio::time::sleep(Duration::from_secs(1)).await;
std::process::exit(1);
}
let symbol = config.get("symbol").and_then(|s| s.as_str()).unwrap_or("MOCK").to_string();
// Generate Dummy Data

View File

@ -5,7 +5,7 @@ edition = "2024"
[dependencies]
# Web Service
axum = "0.8.7"
axum = { version = "0.8.7", features = ["multipart"] }
tokio = { version = "1.0", features = ["full"] }
tower-http = { version = "0.6.6", features = ["cors"] }
@ -18,7 +18,7 @@ async-nats = "0.45.0"
futures = "0.3"
# Data Persistence Client
reqwest = { version = "0.12.4", default-features = false, features = ["json", "rustls-tls"] }
reqwest = { version = "0.12.4", default-features = false, features = ["json", "rustls-tls", "multipart"] }
# Concurrency & Async
async-trait = "0.1.80"

View File

@ -1,17 +1,19 @@
use std::collections::HashMap;
use axum::{
extract::{State, Query},
extract::{State, Query, Multipart},
http::StatusCode,
response::{Json, sse::{Event, Sse}},
response::{Json, sse::{Event, Sse}, IntoResponse, Response},
routing::{get, post},
Router,
body::Body,
};
use common_contracts::observability::{HealthStatus, ServiceStatus, TaskProgress};
use serde::Deserialize;
use uuid::Uuid;
use crate::state::AppState;
use crate::llm_client::LlmClient;
use futures::Stream; // Ensure futures is available
use futures::Stream;
use reqwest::multipart;
pub fn create_router(app_state: AppState) -> Router {
Router::new()
@ -19,6 +21,7 @@ pub fn create_router(app_state: AppState) -> Router {
.route("/tasks", get(get_current_tasks))
.route("/test-llm", post(test_llm_connection))
.route("/analysis-results/stream", get(stream_analysis_results))
.route("/generate-pdf", post(generate_pdf))
.with_state(app_state)
}
@ -26,7 +29,6 @@ pub fn create_router(app_state: AppState) -> Router {
/// Provides the current health status of the module.
async fn health_check(State(_state): State<AppState>) -> Json<HealthStatus> {
let mut details = HashMap::new();
// In a real scenario, we would check connections to the message bus, etc.
details.insert("message_bus_connection".to_string(), "ok".to_string());
let status = HealthStatus {
@ -79,7 +81,6 @@ pub struct StreamQuery {
}
/// [GET /analysis-results/stream]
/// SSE endpoint for streaming analysis results.
async fn stream_analysis_results(
State(state): State<AppState>,
Query(query): Query<StreamQuery>,
@ -97,7 +98,6 @@ async fn stream_analysis_results(
break;
},
Err(tokio::sync::broadcast::error::RecvError::Lagged(cnt)) => {
// Lagged, maybe log it.
tracing::warn!("Stream lagged by {} messages", cnt);
}
}
@ -106,3 +106,69 @@ async fn stream_analysis_results(
Sse::new(stream).keep_alive(axum::response::sse::KeepAlive::default())
}
/// [POST /generate-pdf]
/// Receives HTML content as multipart form data, sends to Gotenberg, and returns PDF.
async fn generate_pdf(
State(state): State<AppState>,
mut multipart: Multipart,
) -> Result<impl IntoResponse, (StatusCode, String)> {
// 1. Extract HTML content from multipart
let mut html_content = String::new();
while let Some(field) = multipart.next_field().await.map_err(|e| (StatusCode::BAD_REQUEST, e.to_string()))? {
let name = field.name().unwrap_or("").to_string();
if name == "index.html" {
html_content = field.text().await.map_err(|e| (StatusCode::BAD_REQUEST, e.to_string()))?;
}
}
if html_content.is_empty() {
return Err((StatusCode::BAD_REQUEST, "Missing index.html field".to_string()));
}
// 2. Prepare Gotenberg Request
// Gotenberg expects multipart/form-data with 'files' (index.html)
let form = multipart::Form::new()
.part("files", multipart::Part::text(html_content).file_name("index.html").mime_str("text/html").unwrap())
// Optional: Customize PDF options
.text("marginTop", "0")
.text("marginBottom", "0")
.text("marginLeft", "0")
.text("marginRight", "0")
.text("printBackground", "true") // Print background graphics
.text("preferCssPageSize", "true"); // Use CSS @page size
let gotenberg_url = format!("{}/forms/chromium/convert/html", state.config.gotenberg_url);
tracing::info!("Sending request to Gotenberg: {}", gotenberg_url);
// 3. Call Gotenberg
let client = reqwest::Client::new();
let response = client.post(&gotenberg_url)
.multipart(form)
.send()
.await
.map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, format!("Failed to contact Gotenberg: {}", e)))?;
if !response.status().is_success() {
let error_text = response.text().await.unwrap_or_default();
tracing::error!("Gotenberg error: {}", error_text);
return Err((StatusCode::INTERNAL_SERVER_ERROR, format!("Gotenberg conversion failed: {}", error_text)));
}
// 4. Stream PDF back to client
let content_type = response.headers().get(reqwest::header::CONTENT_TYPE)
.and_then(|v| v.to_str().ok())
.unwrap_or("application/pdf")
.to_string();
let body_stream = Body::from_stream(response.bytes_stream());
Ok(Response::builder()
.status(StatusCode::OK)
.header("Content-Type", content_type)
.header("Content-Disposition", "attachment; filename=\"report.pdf\"")
.body(body_stream)
.unwrap())
}

View File

@ -6,6 +6,12 @@ pub struct AppConfig {
pub nats_addr: String,
pub data_persistence_service_url: String,
pub workflow_data_path: String,
#[serde(default = "default_gotenberg_url")]
pub gotenberg_url: String,
}
fn default_gotenberg_url() -> String {
"http://gotenberg:3000".to_string()
}
#[allow(dead_code)]

View File

@ -63,6 +63,22 @@ pub async fn subscribe_to_commands(
Ok(task_cmd) => {
info!("Received WorkflowTaskCommand for task_id: {}", task_cmd.task_id);
// --- 0. Immediate Acknowledgement ---
if let Some(reply_subject) = message.reply.clone() {
let ack = common_contracts::ack::TaskAcknowledgement::Accepted;
if let Ok(payload) = serde_json::to_vec(&ack) {
if let Err(e) = nats.publish(reply_subject, payload.into()).await {
error!("Failed to send ACK for task {}: {}", task_cmd.task_id, e);
} else {
info!("ACK sent for task {}", task_cmd.task_id);
}
}
} else {
// This should only happen for fire-and-forget dispatch, which orchestrator doesn't use
// but logging it is good.
tracing::warn!("No reply subject for task {}, cannot send ACK.", task_cmd.task_id);
}
// 1. Extract params from config
let symbol_str = task_cmd.config.get("symbol").and_then(|v| v.as_str());
let market_str = task_cmd.config.get("market").and_then(|v| v.as_str());

View File

@ -25,7 +25,7 @@ impl PersistenceClient {
// --- Config Fetching & Updating Methods ---
pub async fn get_llm_providers_config(&self) -> Result<LlmProvidersConfig> {
let url = format!("{}/configs/llm_providers", self.base_url);
let url = format!("{}/api/v1/configs/llm_providers", self.base_url);
info!("Fetching LLM providers config from {}", url);
let config = self
.client

View File

@ -2,6 +2,7 @@ use std::collections::HashMap;
use std::sync::Arc;
use common_contracts::messages::{GenerateReportCommand, WorkflowEvent};
use common_contracts::workflow_types::{WorkflowTaskEvent, TaskStatus};
use futures_util::StreamExt;
use tracing::{info, instrument, error};
use workflow_context::WorkerContext;
@ -43,6 +44,19 @@ async fn run_vgcs_based_generation(
) -> Result<String> {
info!("Running VGCS based generation for task {:?}", command.task_id);
if let Some(task_id) = &command.task_id {
let running_evt = WorkflowTaskEvent {
request_id: command.request_id,
task_id: task_id.clone(),
status: TaskStatus::Running,
result: None,
};
let subject = common_contracts::subjects::NatsSubject::WorkflowEventTaskCompleted.to_string();
if let Ok(payload) = serde_json::to_vec(&running_evt) {
let _ = state.nats.publish(subject, payload.into()).await;
}
}
let persistence_client = PersistenceClient::new(state.config.data_persistence_service_url.clone());
let llm_providers = persistence_client.get_llm_providers_config().await.map_err(|e| ProviderError::Configuration(e.to_string()))?;
@ -188,21 +202,25 @@ async fn run_vgcs_based_generation(
let mut stream = llm_client.stream_text(final_prompt_to_send).await.map_err(|e| ProviderError::LlmApi(e.to_string()))?;
let mut full_content = String::new();
let mut stream_index = 0;
while let Some(chunk_res) = stream.next().await {
if let Ok(chunk) = chunk_res {
if !chunk.is_empty() {
full_content.push_str(&chunk);
// Publish Stream Update
// Send Stream Update to NATS
if let Some(task_id) = &command.task_id {
let stream_evt = WorkflowEvent::TaskStreamUpdate {
let event = WorkflowEvent::TaskStreamUpdate {
task_id: task_id.clone(),
content_delta: chunk,
index: 0,
content_delta: chunk.clone(),
index: stream_index,
};
if let Ok(payload) = serde_json::to_vec(&stream_evt) {
let subject = common_contracts::subjects::NatsSubject::WorkflowProgress(command.request_id).to_string();
stream_index += 1;
let subject = common_contracts::subjects::NatsSubject::WorkflowProgress(command.request_id).to_string();
// We ignore errors here to prevent blocking the main generation flow
if let Ok(payload) = serde_json::to_vec(&event) {
let _ = state.nats.publish(subject, payload.into()).await;
}
}

View File

@ -25,7 +25,7 @@ async fn poll_and_update_config(state: &AppState) -> Result<()> {
info!("Polling for data source configurations...");
let client = reqwest::Client::new();
let url = format!(
"{}/configs/data_sources",
"{}/api/v1/configs/data_sources",
state.config.data_persistence_service_url
);

View File

@ -0,0 +1,556 @@
use anyhow::Result;
use serde::{Deserialize, Serialize};
use std::collections::{BTreeMap, HashMap};
/// Tushare 原始数据条目
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TushareMetric {
pub metric_name: String,
pub period_date: String, // YYYY-MM-DD
pub value: Option<f64>,
// source 和 symbol 暂时不用,因为是单只股票的报表
}
/// 格式化后的报表结构
pub struct FormattedReport {
pub title: String,
pub blocks: Vec<YearBlock>,
}
/// 年度/时期数据块
pub struct YearBlock {
pub title: String, // "2024年度" 或 "2020 - 2024"
pub periods: Vec<String>, // 列头: ["2024-12-31", ...] 或 ["2024", "2023", ...]
pub sections: Vec<ReportSection>,
}
/// 报表类型
#[derive(Debug, Clone, Copy)]
pub enum ReportType {
Quarterly, // 季报模式 (默认)
Yearly5Year, // 5年聚合模式
}
/// 报表分段 (如: 资产负债表)
pub struct ReportSection {
pub title: String,
pub rows: Vec<FormatRow>,
}
/// 格式化行
pub struct FormatRow {
pub label: String,
pub values: Vec<String>, // 已经格式化好的字符串 (e.g. "14.20 亿")
}
/// 单位策略
#[derive(Debug, Clone, Copy)]
pub enum UnitStrategy {
CurrencyYi, // 亿 (除以 1e8, 保留2位)
CurrencyWan, // 万 (除以 1e4, 保留2位)
Percent, // 百分比 (乘 100, 保留2位 + %)
Integer, // 整数
Raw, // 原始值
Days, // 天数 (保留1位)
}
/// 字段元数据
struct MetricMeta {
display_name: &'static str,
category: SectionCategory,
strategy: UnitStrategy,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
enum SectionCategory {
Snapshot,
Income,
Balance,
CashFlow,
Ratios,
Misc, // 兜底
}
impl SectionCategory {
fn title(&self) -> &'static str {
match self {
SectionCategory::Snapshot => "关键指标",
SectionCategory::Income => "利润表",
SectionCategory::Balance => "资产负债表",
SectionCategory::CashFlow => "现金流量表",
SectionCategory::Ratios => "运营与比率",
SectionCategory::Misc => "其他指标",
}
}
}
pub struct TushareFormatter {
meta_map: HashMap<String, MetricMeta>,
}
impl TushareFormatter {
pub fn new() -> Self {
let mut meta_map = HashMap::new();
Self::init_dictionary(&mut meta_map);
Self { meta_map }
}
/// 初始化数据字典
fn init_dictionary(map: &mut HashMap<String, MetricMeta>) {
// 辅助宏
macro_rules! m {
($key:expr, $name:expr, $cat:ident, $strat:ident) => {
map.insert(
$key.to_string(),
MetricMeta {
display_name: $name,
category: SectionCategory::$cat,
strategy: UnitStrategy::$strat,
},
);
};
}
// --- Snapshot & Market ---
m!("total_mv", "总市值", Snapshot, CurrencyYi);
m!("employees", "员工人数", Snapshot, Integer);
m!("holder_num", "股东户数", Snapshot, CurrencyWan);
m!("close", "收盘价", Snapshot, Raw);
m!("pe", "市盈率(PE)", Snapshot, Raw);
m!("pb", "市净率(PB)", Snapshot, Raw);
// --- Income Statement ---
m!("revenue", "营业收入", Income, CurrencyYi);
m!("n_income", "净利润", Income, CurrencyYi);
m!("rd_exp", "研发费用", Income, CurrencyYi);
m!("sell_exp", "销售费用", Income, CurrencyYi);
m!("admin_exp", "管理费用", Income, CurrencyYi);
m!("fin_exp", "财务费用", Income, CurrencyYi);
m!("total_cogs", "营业成本", Income, CurrencyYi);
m!("tax_to_ebt", "实际税率", Income, Percent);
m!("__tax_rate", "所得税率(Est)", Income, Percent);
m!("income_tax_exp", "所得税费用", Income, CurrencyYi);
m!("total_profit", "利润总额", Income, CurrencyYi);
// --- Balance Sheet ---
m!("total_assets", "总资产", Balance, CurrencyYi);
m!("fix_assets", "固定资产", Balance, CurrencyYi);
m!("inventories", "存货", Balance, CurrencyYi);
m!("accounts_receiv", "应收账款", Balance, CurrencyYi);
m!("accounts_pay", "应付账款", Balance, CurrencyYi);
m!("prepayment", "预付款项", Balance, CurrencyYi);
m!("adv_receipts", "预收款项", Balance, CurrencyYi);
m!("contract_liab", "合同负债", Balance, CurrencyYi);
m!("money_cap", "货币资金", Balance, CurrencyYi);
m!("lt_eqt_invest", "长期股权投资", Balance, CurrencyYi);
m!("goodwill", "商誉", Balance, CurrencyYi);
m!("st_borr", "短期借款", Balance, CurrencyYi);
m!("lt_borr", "长期借款", Balance, CurrencyYi);
m!("total_liab", "总负债", Balance, CurrencyYi);
m!("total_hldr_eqy_exc_min_int", "股东权益", Balance, CurrencyYi); // 归母权益
// --- Cash Flow ---
m!("n_cashflow_act", "经营净现金流", CashFlow, CurrencyYi);
m!("c_paid_to_for_empl", "支付职工现金", CashFlow, CurrencyYi);
m!("c_pay_acq_const_fiolta", "购建资产支付", CashFlow, CurrencyYi);
m!("dividend_amount", "分红总额", CashFlow, CurrencyYi);
m!("n_cashflow_inv", "投资净现金流", CashFlow, CurrencyYi);
m!("n_cashflow_fina", "筹资净现金流", CashFlow, CurrencyYi);
// --- Ratios ---
m!("arturn_days", "应收周转天数", Ratios, Days);
m!("invturn_days", "存货周转天数", Ratios, Days);
m!("__gross_margin", "毛利率", Ratios, Percent);
m!("__net_margin", "净利率", Ratios, Percent);
m!("__money_cap_ratio", "现金占比", Ratios, Percent);
m!("__fix_assets_ratio", "固定资产占比", Ratios, Percent);
m!("__lt_invest_ratio", "长投占比", Ratios, Percent);
m!("__goodwill_ratio", "商誉占比", Ratios, Percent);
m!("__ar_ratio", "应收占比", Ratios, Percent);
m!("__ap_ratio", "应付占比", Ratios, Percent);
m!("__st_borr_ratio", "短贷占比", Ratios, Percent);
m!("__lt_borr_ratio", "长贷占比", Ratios, Percent);
m!("__rd_rate", "研发费率", Ratios, Percent);
m!("__sell_rate", "销售费率", Ratios, Percent);
m!("__admin_rate", "管理费率", Ratios, Percent);
m!("roe", "ROE", Ratios, Percent);
m!("roa", "ROA", Ratios, Percent);
m!("grossprofit_margin", "毛利率(原始)", Ratios, Percent);
m!("netprofit_margin", "净利率(原始)", Ratios, Percent);
// --- Derived/Misc (Previously Misc) ---
m!("__depr_ratio", "折旧营收比", Ratios, Percent);
m!("__inventories_ratio", "存货资产比", Ratios, Percent);
m!("__prepay_ratio", "预付资产比", Ratios, Percent);
m!("depr_fa_coga_dpba", "资产折旧摊销", CashFlow, CurrencyYi);
}
/// 格式化数值 (仅返回数值字符串)
fn format_value(&self, val: f64, strategy: UnitStrategy) -> String {
match strategy {
UnitStrategy::CurrencyYi => format!("{:.2}", val / 1e8),
UnitStrategy::CurrencyWan => format!("{:.2}", val / 1e4),
UnitStrategy::Percent => format!("{:.2}", val),
UnitStrategy::Integer => format!("{:.0}", val),
UnitStrategy::Raw => format!("{:.2}", val),
UnitStrategy::Days => format!("{:.1}", val),
}
}
/// 获取单位后缀
fn get_unit_suffix(&self, strategy: UnitStrategy) -> &'static str {
match strategy {
UnitStrategy::CurrencyYi => "(亿)",
UnitStrategy::CurrencyWan => "(万)",
UnitStrategy::Percent => "(%)",
UnitStrategy::Integer => "",
UnitStrategy::Raw => "",
UnitStrategy::Days => "(天)",
}
}
/// 主入口: 将扁平的 Tushare 数据转换为 Markdown 字符串
pub fn format_to_markdown(&self, symbol: &str, metrics: Vec<TushareMetric>) -> Result<String> {
let report_type = self.detect_report_type(&metrics);
let report = match report_type {
ReportType::Yearly5Year => self.pivot_data_yearly(symbol, metrics)?,
ReportType::Quarterly => self.pivot_data_quarterly(symbol, metrics)?,
};
self.render_markdown(&report)
}
/// 检测报表类型策略
fn detect_report_type(&self, metrics: &[TushareMetric]) -> ReportType {
// 策略:检查关键财务指标(如净利润 n_income的日期分布
// 如果 80% 以上的数据都是 12-31 结尾,则认为是年报优先模式
let target_metric = "n_income";
let mut total_count = 0;
let mut year_end_count = 0;
for m in metrics {
if m.metric_name == target_metric {
total_count += 1;
if m.period_date.ends_with("-12-31") {
year_end_count += 1;
}
}
}
// 如果没有净利润数据,退回到检查所有数据
if total_count == 0 {
for m in metrics {
total_count += 1;
if m.period_date.ends_with("-12-31") {
year_end_count += 1;
}
}
}
if total_count > 0 {
let ratio = year_end_count as f64 / total_count as f64;
// 如果年报数据占比超过 80%,或者总数据量很少且都是年报
if ratio > 0.8 {
return ReportType::Yearly5Year;
}
}
// 默认季报模式(原逻辑)
ReportType::Quarterly
}
/// 模式 A: 5年聚合年报模式 (Yearly 5-Year Aggregation)
/// 结构Block = 5年 (e.g., 2020-2024), Columns = [2024, 2023, 2022, 2021, 2020]
/// 注意:对于每一年,选取该年内最新的报告期数据作为代表(通常是年报 12-31如果是当年则是最新季报
fn pivot_data_yearly(&self, symbol: &str, metrics: Vec<TushareMetric>) -> Result<FormattedReport> {
// 1. 按年份分组,找出每一年最新的 period_date
// Map<Year, MaxDate>
let mut year_max_date: HashMap<String, String> = HashMap::new();
for m in &metrics {
let year = m.period_date.split('-').next().unwrap_or("").to_string();
if year.is_empty() { continue; }
year_max_date.entry(year)
.and_modify(|curr| {
if m.period_date > *curr {
*curr = m.period_date.clone();
}
})
.or_insert(m.period_date.clone());
}
// 2. 收集数据,只保留对应年份最大日期的数据
// Map<Year, Map<Metric, Value>>
let mut data_map: HashMap<String, HashMap<String, f64>> = HashMap::new();
let mut all_years: Vec<String> = Vec::new();
for m in metrics {
if let Some(val) = m.value {
let year = m.period_date.split('-').next().unwrap_or("").to_string();
if let Some(max_date) = year_max_date.get(&year) {
// 只有当这条数据的日期匹配该年最大日期时才采纳
// 注意:不同指标的最大日期可能理论上不同(数据缺失),但通常财务报表是整齐的
// 这里简化逻辑:只要该指标的日期等于该年的最大日期(基于所有指标的最大值?还是基于该指标?)
// 严格来说应该是:对于特定年份,我们选定一个“主报告期”(该年所有数据中日期的最大值)。
if m.period_date == *max_date {
data_map
.entry(year.clone())
.or_default()
.insert(m.metric_name, val);
if !all_years.contains(&year) {
all_years.push(year);
}
}
}
}
}
// 排序年份 (倒序: 2024, 2023...)
all_years.sort_by(|a, b| b.cmp(a));
// 3. 按 5 年分块
let chunks = all_years.chunks(5);
let mut blocks = Vec::new();
for chunk in chunks {
if chunk.is_empty() { continue; }
let start_year = chunk.last().unwrap();
let end_year = chunk.first().unwrap();
// 标题显示范围
let block_title = format!("{} - {}", start_year, end_year);
let periods = chunk.to_vec(); // ["2024", "2023", ...]
// 构建 Sections
let sections = self.build_sections(&periods, &|year| data_map.get(year));
blocks.push(YearBlock {
title: block_title,
periods,
sections,
});
}
Ok(FormattedReport {
title: format!("{} 财务年报 (5年聚合)", symbol),
blocks,
})
}
/// 模式 B: 季报模式 (Quarterly) - 原逻辑
/// 结构Block = 1年 (e.g., 2024), Columns = [2024-12-31, 2024-09-30, ...]
fn pivot_data_quarterly(&self, symbol: &str, metrics: Vec<TushareMetric>) -> Result<FormattedReport> {
// Map<Year, Map<Date, Map<Metric, Value>>>
let mut year_map: BTreeMap<String, BTreeMap<String, HashMap<String, f64>>> = BTreeMap::new();
for m in metrics {
if let Some(val) = m.value {
let year = m.period_date.split('-').next().unwrap_or("Unknown").to_string();
year_map
.entry(year)
.or_default()
.entry(m.period_date.clone())
.or_default()
.insert(m.metric_name, val);
}
}
let mut blocks = Vec::new();
// 倒序遍历年份
for (year, date_map) in year_map.iter().rev() {
let mut periods: Vec<String> = date_map.keys().cloned().collect();
periods.sort_by(|a, b| b.cmp(a)); // 倒序日期
// 构建 Sections
// 适配器闭包:给定 period (日期), 返回 MetricMap
let sections = self.build_sections(&periods, &|period| date_map.get(period));
blocks.push(YearBlock {
title: format!("{}年度", year),
periods,
sections,
});
}
Ok(FormattedReport {
title: format!("{} 财务数据明细 (季报视图)", symbol),
blocks,
})
}
/// 通用 Section 构建器
/// periods: 列头列表 (可能是年份 "2024" 也可能是日期 "2024-12-31")
/// data_provider: 闭包,根据 period 获取该列的数据 Map<Metric, Value>
fn build_sections<'a, F>(
&self,
periods: &[String],
data_provider: &F
) -> Vec<ReportSection>
where F: Fn(&str) -> Option<&'a HashMap<String, f64>>
{
// 1. 收集该 Block 下所有出现的 metric keys
let mut all_metric_keys = std::collections::HashSet::new();
for p in periods {
if let Some(map) = data_provider(p) {
for k in map.keys() {
all_metric_keys.insert(k.clone());
}
}
}
// 2. 分类
let mut cat_metrics: BTreeMap<SectionCategory, Vec<String>> = BTreeMap::new();
for key in all_metric_keys {
if let Some(meta) = self.meta_map.get(&key) {
cat_metrics.entry(meta.category).or_default().push(key);
} else {
cat_metrics.entry(SectionCategory::Misc).or_default().push(key);
}
}
// 3. 生成 Sections
let mut sections = Vec::new();
let categories = vec![
SectionCategory::Snapshot,
SectionCategory::Income,
SectionCategory::Balance,
SectionCategory::CashFlow,
SectionCategory::Ratios,
SectionCategory::Misc,
];
for cat in categories {
if let Some(keys) = cat_metrics.get(&cat) {
let mut sorted_keys = keys.clone();
// 按照预定义的顺序或者字母序排序?目前简单用字母序,理想情况应该有 weight
sorted_keys.sort();
let mut rows = Vec::new();
for key in sorted_keys {
let (label, strategy) = if let Some(meta) = self.meta_map.get(&key) {
(meta.display_name.to_string(), meta.strategy)
} else {
(key.clone(), UnitStrategy::Raw)
};
// Append unit suffix to label
let label_with_unit = format!("{}{}", label, self.get_unit_suffix(strategy));
let mut row_vals = Vec::new();
for p in periods {
let val_opt = data_provider(p).and_then(|m| m.get(&key));
if let Some(val) = val_opt {
row_vals.push(self.format_value(*val, strategy));
} else {
row_vals.push("-".to_string());
}
}
rows.push(FormatRow { label: label_with_unit, values: row_vals });
}
sections.push(ReportSection {
title: cat.title().to_string(),
rows,
});
}
}
sections
}
fn render_markdown(&self, report: &FormattedReport) -> Result<String> {
let mut md = String::new();
md.push_str(&format!("# {}\n\n", report.title));
for block in &report.blocks {
md.push_str(&format!("## {}\n\n", block.title));
for section in &block.sections {
if section.rows.is_empty() { continue; }
md.push_str(&format!("### {}\n", section.title));
// Table Header
md.push_str("| 指标 |");
for p in &block.periods {
md.push_str(&format!(" {} |", p));
}
md.push('\n');
// Separator
md.push_str("| :--- |");
for _ in &block.periods {
md.push_str(" :--- |");
}
md.push('\n');
// Rows
for row in &section.rows {
md.push_str(&format!("| **{}** |", row.label));
for v in &row.values {
md.push_str(&format!(" {} |", v));
}
md.push('\n');
}
md.push('\n');
}
}
Ok(md)
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::fs;
use std::path::PathBuf;
#[test]
fn test_format_tushare_real_data() {
// Try to locate the assets file relative to the crate root
let mut path = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
path.push("../../assets/tushare.json");
if !path.exists() {
println!("SKIPPED: Test data not found at {:?}", path);
return;
}
println!("Loading test data from: {:?}", path);
let json_content = fs::read_to_string(path).expect("Failed to read tushare.json");
let metrics: Vec<TushareMetric> = serde_json::from_str(&json_content).expect("Failed to parse JSON");
println!("Loaded {} metrics", metrics.len());
let formatter = TushareFormatter::new();
let md = formatter.format_to_markdown("600521.SS", metrics).expect("Format markdown");
println!("\n=== GENERATED MARKDOWN REPORT START ===\n");
println!("{}", md);
println!("\n=== GENERATED MARKDOWN REPORT END ===\n");
// Assertions
// Title adapts to report type (e.g. "财务年报 (5年聚合)")
assert!(md.contains("# 600521.SS 财务"));
// Verify structure: Should have year ranges in 5-year mode
// "2021 - 2025"
assert!(md.contains("## 2021 - 2025"));
assert!(md.contains("## 2016 - 2020"));
// Verify content density
// Ensure we have specific sections populated
assert!(md.contains("### 利润表"));
assert!(md.contains("### 资产负债表"));
// Check for specific data point formatting (based on sample)
// "money_cap": 1420023169.52 (2025-09-30) -> 14.20 亿
// Updated assertion: Value should be "14.20", unit is in label "货币资金(亿)"
assert!(md.contains("14.20"));
assert!(md.contains("货币资金(亿)"));
}
}

View File

@ -1,6 +1,7 @@
mod api;
mod config;
mod error;
mod formatter;
mod mapping;
mod message_consumer;
// mod persistence; // Removed in favor of common_contracts::persistence_client

View File

@ -4,6 +4,7 @@ use common_contracts::messages::FetchCompanyDataCommand;
use common_contracts::workflow_types::WorkflowTaskCommand; // Import
use common_contracts::observability::ObservabilityTaskStatus;
use common_contracts::subjects::NatsSubject;
use common_contracts::ack::TaskAcknowledgement;
use futures_util::StreamExt;
use tracing::{error, info, warn};
@ -16,17 +17,8 @@ pub async fn run(state: AppState) -> Result<()> {
info!("Starting NATS message consumer...");
loop {
let status = state.status.read().await.clone();
if let ServiceOperationalStatus::Degraded { reason } = status {
warn!(
"Service is in degraded state (reason: {}). Pausing message consumption for 5s.",
reason
);
tokio::time::sleep(Duration::from_secs(5)).await;
continue;
}
info!("Service is Active. Connecting to NATS...");
// Always connect, regardless of Degraded status
info!("Connecting to NATS...");
match async_nats::connect(&state.config.nats_addr).await {
Ok(client) => {
info!("Successfully connected to NATS.");
@ -55,7 +47,32 @@ async fn subscribe_workflow(state: AppState, client: async_nats::Client) -> Resu
info!("Workflow Consumer started on '{}'", subject);
while let Some(message) = subscriber.next().await {
// Check status check (omitted for brevity, assuming handled)
// Check Status (Handshake)
let current_status = state.status.read().await.clone();
// If Degraded, Reject immediately
if let ServiceOperationalStatus::Degraded { reason } = current_status {
warn!("Rejecting task due to degraded state: {}", reason);
if let Some(reply_to) = message.reply {
let ack = TaskAcknowledgement::Rejected { reason };
if let Ok(payload) = serde_json::to_vec(&ack) {
if let Err(e) = client.publish(reply_to, payload.into()).await {
error!("Failed to send Rejection Ack: {}", e);
}
}
}
continue;
}
// If Active, Accept
if let Some(reply_to) = message.reply.clone() {
let ack = TaskAcknowledgement::Accepted;
if let Ok(payload) = serde_json::to_vec(&ack) {
if let Err(e) = client.publish(reply_to, payload.into()).await {
error!("Failed to send Acceptance Ack: {}", e);
}
}
}
let state = state.clone();
let client = client.clone();
@ -63,6 +80,7 @@ async fn subscribe_workflow(state: AppState, client: async_nats::Client) -> Resu
tokio::spawn(async move {
match serde_json::from_slice::<WorkflowTaskCommand>(&message.payload) {
Ok(cmd) => {
// TODO: Implement Heartbeat inside handle_workflow_command or wrapper
if let Err(e) = crate::generic_worker::handle_workflow_command(state, client, cmd).await {
error!("Generic worker handler failed: {}", e);
}

View File

@ -3,11 +3,12 @@ use anyhow::{Result, anyhow, Context};
use serde_json::{json, Value};
use std::collections::HashMap;
use common_contracts::workflow_node::{WorkflowNode, NodeContext, NodeExecutionResult, ArtifactContent};
use common_contracts::data_formatting;
use common_contracts::workflow_node::{WorkflowNode, NodeContext, NodeExecutionResult, ArtifactContent, CacheKey};
use common_contracts::persistence_client::PersistenceClient;
use common_contracts::workflow_harness::TaskState;
use crate::state::AppState;
use crate::formatter::{TushareFormatter, TushareMetric};
use std::time::Duration;
pub struct TushareNode {
state: AppState,
@ -25,6 +26,24 @@ impl WorkflowNode for TushareNode {
"tushare"
}
fn get_cache_config(&self, config: &Value) -> Option<(CacheKey, Duration)> {
let symbol = config.get("symbol").and_then(|s| s.as_str())?;
// Construct Tuple Key: provider:interface:arg1
let key_parts = vec![
"tushare",
"company_data", // Conceptual interface name
symbol,
"all" // Scope
];
let cache_key = CacheKey(key_parts.join(":"));
// Tushare data is financial reports, valid for at least 7*24 hours
let ttl = Duration::from_secs(7 * 24 * 60 * 60);
Some((cache_key, ttl))
}
async fn execute(&self, _ctx: &NodeContext, config: &Value) -> Result<NodeExecutionResult> {
let symbol = config.get("symbol").and_then(|s| s.as_str()).unwrap_or("").to_string();
let _market = config.get("market").and_then(|s| s.as_str()).unwrap_or("CN").to_string();
@ -50,7 +69,22 @@ impl WorkflowNode for TushareNode {
// 3. Artifacts
let mut artifacts = HashMap::new();
artifacts.insert("profile.json".to_string(), json!(profile).into());
artifacts.insert("financials.json".to_string(), json!(financials).into());
// Format Report directly to markdown
let metrics: Vec<TushareMetric> = financials.iter().map(|d| TushareMetric {
metric_name: d.metric_name.clone(),
period_date: d.period_date.to_string(),
value: Some(d.value),
}).collect();
let formatter = TushareFormatter::new();
let report_md = formatter.format_to_markdown(&symbol, metrics.clone())
.context("Failed to format markdown report")?;
artifacts.insert("financials.md".to_string(), ArtifactContent::Text(report_md));
// 4. Dump Metrics for Robustness (Recover from missing financials.md)
artifacts.insert("_metrics_dump.json".to_string(), json!(metrics).into());
Ok(NodeExecutionResult {
artifacts,
@ -62,28 +96,27 @@ impl WorkflowNode for TushareNode {
}
fn render_report(&self, result: &NodeExecutionResult) -> Result<String> {
let profile_json = match result.artifacts.get("profile.json") {
Some(ArtifactContent::Json(v)) => v,
_ => return Err(anyhow!("Missing profile.json")),
};
let financials_json = match result.artifacts.get("financials.json") {
Some(ArtifactContent::Json(v)) => v,
_ => return Err(anyhow!("Missing financials.json")),
};
match result.artifacts.get("financials.md") {
Some(ArtifactContent::Text(s)) => Ok(s.clone()),
_ => {
// Robustness: Try to regenerate if financials.md is missing (e.g. cache hit but old version or partial cache)
if let Some(ArtifactContent::Json(json_val)) = result.artifacts.get("_metrics_dump.json") {
// Clone value to deserialize
if let Ok(metrics) = serde_json::from_value::<Vec<TushareMetric>>(json_val.clone()) {
let formatter = TushareFormatter::new();
let symbol = result.meta_summary.as_ref()
.and_then(|v| v.get("symbol"))
.and_then(|v| v.as_str())
.unwrap_or("Unknown");
let symbol = profile_json["symbol"].as_str().unwrap_or("Unknown");
let mut report_md = String::new();
report_md.push_str(&format!("# Tushare Data Report: {}\n\n", symbol));
report_md.push_str("## Company Profile\n\n");
report_md.push_str(&data_formatting::format_data(profile_json));
report_md.push_str("\n\n");
report_md.push_str("## Financial Statements\n\n");
report_md.push_str(&data_formatting::format_data(financials_json));
Ok(report_md)
tracing::info!("Regenerating financials.md from cached metrics dump for {}", symbol);
return formatter.format_to_markdown(symbol, metrics)
.context("Failed to regenerate markdown report from metrics");
}
}
Err(anyhow!("Missing financials.md"))
}
}
}
}

View File

@ -17,8 +17,31 @@ impl AppConfig {
.unwrap_or_else(|_| "8005".to_string())
.parse()
.context("SERVER_PORT must be a number")?;
// Note: The previous default value included "/api/v1", but PersistenceClient might expect the base URL.
// However, looking at PersistenceClient implementation:
// let url = format!("{}/history", self.base_url);
// And in data-persistence-service api/mod.rs:
// .route("/api/v1/templates", ...)
// So the client should point to the root, OR the routes should not have /api/v1 prefix if client has it.
// Let's check data-persistence-service again.
// It routes:
// .route("/api/v1/templates", ...)
// If PersistenceClient base_url is "http://svc:3000/api/v1", then
// format!("{}/api/v1/templates", base_url) -> "http://svc:3000/api/v1/api/v1/templates" (DOUBLE!)
// Correct fix: The base URL should NOT include /api/v1 if the client methods append it, OR the client methods should not append it.
// Checking common-contracts/persistence_client.rs:
// pub async fn get_templates(&self) -> Result<Vec<AnalysisTemplateSummary>> {
// let url = format!("{}/api/v1/templates", self.base_url);
// }
// So base_url MUST NOT end with /api/v1.
let data_persistence_service_url = env::var("DATA_PERSISTENCE_SERVICE_URL")
.unwrap_or_else(|_| "http://data-persistence-service:3000/api/v1".to_string());
.unwrap_or_else(|_| "http://data-persistence-service:3000".to_string());
let workflow_data_path = env::var("WORKFLOW_DATA_PATH")
.unwrap_or_else(|_| "/mnt/workflow_data".to_string());

Some files were not shown because too many files have changed in this diff Show More