Compare commits
2 Commits
a1e4b265ba
...
427776b863
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
427776b863 | ||
|
|
60e6c8f61b |
@ -67,7 +67,8 @@ services:
|
|||||||
ports:
|
ports:
|
||||||
- "13001:3001"
|
- "13001:3001"
|
||||||
depends_on:
|
depends_on:
|
||||||
- api-gateway
|
api-gateway:
|
||||||
|
condition: service_healthy
|
||||||
networks:
|
networks:
|
||||||
- app-network
|
- app-network
|
||||||
|
|
||||||
@ -95,6 +96,11 @@ services:
|
|||||||
- yfinance-provider-service
|
- yfinance-provider-service
|
||||||
networks:
|
networks:
|
||||||
- app-network
|
- app-network
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD-SHELL", "curl -fsS http://localhost:4000/health >/dev/null || exit 1"]
|
||||||
|
interval: 5s
|
||||||
|
timeout: 5s
|
||||||
|
retries: 12
|
||||||
|
|
||||||
alphavantage-provider-service:
|
alphavantage-provider-service:
|
||||||
build:
|
build:
|
||||||
@ -105,7 +111,6 @@ services:
|
|||||||
SERVER_PORT: 8000
|
SERVER_PORT: 8000
|
||||||
NATS_ADDR: nats://nats:4222
|
NATS_ADDR: nats://nats:4222
|
||||||
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000/api/v1
|
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000/api/v1
|
||||||
ALPHAVANTAGE_API_KEY: ${ALPHAVANTAGE_API_KEY}
|
|
||||||
RUST_LOG: info,axum=info
|
RUST_LOG: info,axum=info
|
||||||
RUST_BACKTRACE: "1"
|
RUST_BACKTRACE: "1"
|
||||||
depends_on:
|
depends_on:
|
||||||
@ -129,8 +134,6 @@ services:
|
|||||||
NATS_ADDR: nats://nats:4222
|
NATS_ADDR: nats://nats:4222
|
||||||
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000/api/v1
|
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000/api/v1
|
||||||
TUSHARE_API_URL: http://api.waditu.com
|
TUSHARE_API_URL: http://api.waditu.com
|
||||||
# Please provide your Tushare token via .env
|
|
||||||
TUSHARE_API_TOKEN: ${TUSHARE_API_TOKEN}
|
|
||||||
RUST_LOG: info,axum=info
|
RUST_LOG: info,axum=info
|
||||||
RUST_BACKTRACE: "1"
|
RUST_BACKTRACE: "1"
|
||||||
depends_on:
|
depends_on:
|
||||||
@ -154,8 +157,6 @@ services:
|
|||||||
NATS_ADDR: nats://nats:4222
|
NATS_ADDR: nats://nats:4222
|
||||||
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000/api/v1
|
DATA_PERSISTENCE_SERVICE_URL: http://data-persistence-service:3000/api/v1
|
||||||
FINNHUB_API_URL: https://finnhub.io/api/v1
|
FINNHUB_API_URL: https://finnhub.io/api/v1
|
||||||
# Please provide your Finnhub token in .env file
|
|
||||||
FINNHUB_API_KEY: ${FINNHUB_API_KEY}
|
|
||||||
RUST_LOG: info,axum=info
|
RUST_LOG: info,axum=info
|
||||||
RUST_BACKTRACE: "1"
|
RUST_BACKTRACE: "1"
|
||||||
depends_on:
|
depends_on:
|
||||||
|
|||||||
@ -29,8 +29,8 @@ owner: '@lv'
|
|||||||
```
|
```
|
||||||
+-------------+ +------------------+ +---------------------------+
|
+-------------+ +------------------+ +---------------------------+
|
||||||
| | HTTP | | | |
|
| | HTTP | | | |
|
||||||
| 前端 |----->| API 网关 |----->| 消息总线 |
|
| 前端 |----->| API 网关 |----->| 消息总线 (NATS) |
|
||||||
| (Next.js) | | (Rust) | | (例如 RabbitMQ, NATS) |
|
| (Next.js) | | (Rust) | | |
|
||||||
| | | | | |
|
| | | | | |
|
||||||
+-------------+ +-------+----------+ +-------------+-------------+
|
+-------------+ +-------+----------+ +-------------+-------------+
|
||||||
| |
|
| |
|
||||||
@ -69,7 +69,7 @@ owner: '@lv'
|
|||||||
- 为所有其他内部微服务提供稳定、统一的数据库读写 HTTP 接口。
|
- 为所有其他内部微服务提供稳定、统一的数据库读写 HTTP 接口。
|
||||||
|
|
||||||
- **消息总线 (Message Bus)**:
|
- **消息总线 (Message Bus)**:
|
||||||
- 整个系统的神经中枢,负责所有服务间的异步通信。
|
- 整个系统的神经中枢,负责所有服务间的异步通信。当前选用 **NATS** 作为具体实现。
|
||||||
|
|
||||||
## 3. `SystemModule` 核心规范
|
## 3. `SystemModule` 核心规范
|
||||||
|
|
||||||
|
|||||||
@ -0,0 +1,245 @@
|
|||||||
|
---
|
||||||
|
status: "Active"
|
||||||
|
date: "2025-11-17"
|
||||||
|
author: "AI 助手"
|
||||||
|
---
|
||||||
|
|
||||||
|
# 设计文档:可配置的分析模板与编排器
|
||||||
|
|
||||||
|
## 1. 概述与目标
|
||||||
|
|
||||||
|
### 1.1. 问题陈述
|
||||||
|
|
||||||
|
我们当前基于 Rust 的后端缺少执行智能、多步骤财务分析所需的核心业务逻辑。`report-generator-service` 作为此逻辑的载体,其内部实现尚不完整。更重要的是,当前的系统设计缺少一个清晰的、可扩展的方式来管理和复用成套的分析流程,并且在配置初始化方面存在对本地文件的依赖,这不符合我们健壮的系统设计原则。
|
||||||
|
|
||||||
|
### 1.2. 目标
|
||||||
|
|
||||||
|
本任务旨在我们的 Rust 微服务架构中,设计并实现一个以**分析模板集(Analysis Template Sets)**为核心的、健壮的、可配置的**分析模块编排器**。该系统将允许我们创建、管理和执行多套独立的、包含复杂依赖关系的分析工作流。
|
||||||
|
|
||||||
|
为达成此目标,需要完成以下任务:
|
||||||
|
1. **引入分析模板集**:在系统顶层设计中引入“分析模板集”的概念,每个模板集包含一套独立的分析模块及其配置。
|
||||||
|
2. **实现前端模板化管理**:在前端配置中心实现对“分析模板集”的完整 CRUD 管理,并允许在每个模板集内部对分析模块进行 CRUD 管理。
|
||||||
|
3. **构建健壮的后端编排器**:在 `report-generator-service` 中实现一个能够执行指定分析模板集的后端编排器,该编排器需基于拓扑排序来处理模块间的依赖关系。
|
||||||
|
4. **实现无文件依赖的数据初始化**:通过在服务二进制文件中嵌入默认配置的方式,实现系统首次启动时的数据播种(Seeding),彻底移除对本地配置文件的依赖。
|
||||||
|
|
||||||
|
## 2. 新数据模型 (`common-contracts`)
|
||||||
|
|
||||||
|
为了支持“分析模板集”的概念,我们需要定义新的数据结构。
|
||||||
|
|
||||||
|
```rust
|
||||||
|
// common-contracts/src/config_models.rs
|
||||||
|
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use std::collections::HashMap;
|
||||||
|
|
||||||
|
// 整个系统的分析模板配置,作为顶级对象存储在数据库中
|
||||||
|
// Key: 模板ID (e.g., "standard_fundamentals")
|
||||||
|
pub type AnalysisTemplateSets = HashMap<String, AnalysisTemplateSet>;
|
||||||
|
|
||||||
|
// 单个分析模板集,代表一套完整的分析流程
|
||||||
|
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||||
|
pub struct AnalysisTemplateSet {
|
||||||
|
pub name: String, // 人类可读的模板名称, e.g., "标准基本面分析"
|
||||||
|
// 该模板集包含的所有分析模块
|
||||||
|
// Key: 模块ID (e.g., "fundamental_analysis")
|
||||||
|
pub modules: HashMap<String, AnalysisModuleConfig>,
|
||||||
|
}
|
||||||
|
|
||||||
|
// 单个分析模块的配置 (与之前定义保持一致)
|
||||||
|
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||||
|
pub struct AnalysisModuleConfig {
|
||||||
|
pub name: String,
|
||||||
|
pub provider_id: String,
|
||||||
|
pub model_id: String,
|
||||||
|
pub prompt_template: String,
|
||||||
|
// 依赖关系列表,其中的字符串必须是同一个模板集内其他模块的ID
|
||||||
|
pub dependencies: Vec<String>,
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## 3. 系统架构与数据流
|
||||||
|
|
||||||
|
### 3.1. 高层数据流
|
||||||
|
|
||||||
|
1. **配置流程**:
|
||||||
|
* **用户** 在 **前端** 与配置页面交互,创建或修改一个“分析模板集”。
|
||||||
|
* **前端** 向 **API 网关** 发送 `PUT /api/v1/configs/analysis_template_sets` 请求。
|
||||||
|
* **API 网关** 将请求代理至 **数据持久化服务**,由其将序列化后的 `AnalysisTemplateSets` 对象完整保存到数据库中。
|
||||||
|
|
||||||
|
2. **执行流程**:
|
||||||
|
* **用户** 在 **前端** 选择一个**分析模板集**,然后为特定的股票代码触发分析。
|
||||||
|
* **前端** 向 **API 网关** 发送 `POST /api/v1/analysis-requests/{symbol}` 请求,请求体中包含所选的 `template_id`。
|
||||||
|
* **API 网关** 验证请求,并向 **NATS 消息总线** 发布一条包含 `symbol`, `template_id` 和 `request_id` 的 `GenerateReportCommand` 消息。
|
||||||
|
* **报告生成服务** 订阅该消息,并根据 `template_id` 启动指定的编排工作流。
|
||||||
|
|
||||||
|
## 4. 前端实施计划 (`/config` 页面)
|
||||||
|
|
||||||
|
前端配置页面需要重构为两级结构:
|
||||||
|
|
||||||
|
1. **第一级:模板集管理**
|
||||||
|
* 显示一个包含所有“分析模板集”的列表。
|
||||||
|
* 提供“创建新模板集”、“重命名”、“删除模板集”的功能。
|
||||||
|
* 用户选择一个模板集后,进入第二级管理界面。
|
||||||
|
|
||||||
|
2. **第二级:分析模块管理 (在选定的模板集内)**
|
||||||
|
* **主界面**: 进入模板集后,主界面将以列表形式展示该模板集内所有的分析模块。每个模块将以一个独立的“卡片”形式呈现。
|
||||||
|
* **创建 (Create)**:
|
||||||
|
* 在模块列表的顶部或底部,将设置一个“新增分析模块”按钮。
|
||||||
|
* 点击后,将展开一个表单,要求用户输入新模块的**模块ID**(唯一的、机器可读的英文标识符)和**模块名称**(人类可读的显示名称)。
|
||||||
|
* **读取 (Read)**:
|
||||||
|
* 每个模块卡片默认会显示其**模块名称**和**模块ID**。
|
||||||
|
* 卡片可以被展开,以显示其详细配置。
|
||||||
|
* **更新 (Update)**:
|
||||||
|
* 在展开的模块卡片内,所有配置项均可编辑:
|
||||||
|
* **LLM Provider**: 一个下拉菜单,选项为系统中所有已配置的LLM供应商。
|
||||||
|
* **Model**: 一个级联下拉菜单,根据所选的Provider动态加载其可用模型。
|
||||||
|
* **提示词模板**: 一个多行文本输入框,用于编辑模块的核心Prompt。
|
||||||
|
* **依赖关系**: 一个复选框列表,该列表**仅显示当前模板集内除本模块外的所有其他模块**,用于勾选依赖项。
|
||||||
|
* **删除 (Delete)**:
|
||||||
|
* 每个模块卡片的右上角将设置一个“删除”按钮。
|
||||||
|
* 点击后,会弹出一个确认对话框,防止用户误操作。
|
||||||
|
|
||||||
|
## 6. 数据库与数据结构设计
|
||||||
|
|
||||||
|
为了支撑上述功能,我们需要在 `data-persistence-service` 中明确两个核心的数据存储模型:一个用于存储**配置**,一个用于存储**结果**。
|
||||||
|
|
||||||
|
### 6.1. 配置存储:`system_config` 表
|
||||||
|
|
||||||
|
我们将利用现有的 `system_config` 表来存储整个分析模板集的配置。
|
||||||
|
|
||||||
|
- **用途**: 作为所有分析模板集的“单一事实来源”。
|
||||||
|
- **存储方式**:
|
||||||
|
- 表中的一条记录。
|
||||||
|
- `config_key` (主键): `analysis_template_sets`
|
||||||
|
- `config_value` (类型: `JSONB`): 存储序列化后的 `AnalysisTemplateSets` (即 `HashMap<String, AnalysisTemplateSet>`) 对象。
|
||||||
|
- **对应数据结构 (`common-contracts`)**: 我们在第2节中定义的 `AnalysisTemplateSets` 类型是此记录的直接映射。
|
||||||
|
|
||||||
|
### 6.2. 结果存储:`analysis_results` 表 (新)
|
||||||
|
|
||||||
|
为了存储每次分析工作流执行后,各个模块生成的具体内容,我们需要一张新表。
|
||||||
|
|
||||||
|
- **表名**: `analysis_results`
|
||||||
|
- **用途**: 持久化存储每一次分析运行的产出,便于历史追溯和未来查询。
|
||||||
|
- **SQL Schema**:
|
||||||
|
```sql
|
||||||
|
CREATE TABLE analysis_results (
|
||||||
|
id BIGSERIAL PRIMARY KEY,
|
||||||
|
request_id UUID NOT NULL, -- 关联单次完整分析请求的ID
|
||||||
|
symbol VARCHAR(32) NOT NULL, -- 关联的股票代码
|
||||||
|
template_id VARCHAR(64) NOT NULL, -- 使用的分析模板集ID
|
||||||
|
module_id VARCHAR(64) NOT NULL, -- 产出此结果的模块ID
|
||||||
|
content TEXT NOT NULL, -- LLM生成的分析内容
|
||||||
|
meta_data JSONB, -- 存储额外元数据 (e.g., model_name, tokens, elapsed_ms)
|
||||||
|
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||||
|
|
||||||
|
-- 建立索引以优化查询
|
||||||
|
INDEX idx_analysis_results_request_id (request_id),
|
||||||
|
INDEX idx_analysis_results_symbol_template (symbol, template_id)
|
||||||
|
);
|
||||||
|
```
|
||||||
|
- **对应数据结构 (`common-contracts`)**:
|
||||||
|
```rust
|
||||||
|
// common-contracts/src/dtos.rs
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||||
|
pub struct NewAnalysisResult {
|
||||||
|
pub request_id: Uuid,
|
||||||
|
pub symbol: String,
|
||||||
|
pub template_id: String,
|
||||||
|
pub module_id: String,
|
||||||
|
pub content: String,
|
||||||
|
pub meta_data: serde_json::Value,
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## 5. 后端实施计划
|
||||||
|
|
||||||
|
### 5.1. `data-persistence-service`
|
||||||
|
|
||||||
|
- **数据初始化 (无文件依赖)**: 实现一次性的、基于硬编码的启动逻辑。
|
||||||
|
1. 在 `data-persistence-service` 的代码中,将 `config/analysis-config.json` 的内容硬编码为一个 Rust 字符串常量。
|
||||||
|
2. 在服务启动时,检查 `system_config` 表中是否存在键为 `analysis_template_sets` 的记录。
|
||||||
|
3. 如果**不存在**,则:
|
||||||
|
a. 解析硬编码的字符串,构建一个默认的 `AnalysisTemplateSet` (例如,ID为 `default`, 名称为 “默认分析模板”)。
|
||||||
|
b. 将这个默认模板集包装进一个 `AnalysisTemplateSets` 的 HashMap 中。
|
||||||
|
c. 将序列化后的 `AnalysisTemplateSets` 对象写入数据库。
|
||||||
|
4. 此机制确保系统在首次部署时,无需任何外部文件即可拥有一套功能完备的默认分析模板。
|
||||||
|
- **新职责**: 实现对 `analysis_results` 表的CRUD操作API。
|
||||||
|
|
||||||
|
### 5.2. `api-gateway`
|
||||||
|
|
||||||
|
- **端点更新**: `POST /api/v1/analysis-requests/{symbol}`。
|
||||||
|
- **逻辑变更**:
|
||||||
|
* 该端点现在需要从请求体中解析出 `template_id`。
|
||||||
|
* 它构建的 `GenerateReportCommand` 消息中,必须包含 `template_id` 字段。
|
||||||
|
|
||||||
|
### 5.3. `report-generator-service` (核心任务)
|
||||||
|
|
||||||
|
`worker.rs` 中的编排逻辑需要进行如下调整和实现:
|
||||||
|
|
||||||
|
1. **消息消费者**: 订阅的 `GenerateReportCommand` 消息现在会包含 `template_id`。
|
||||||
|
|
||||||
|
2. **编排逻辑 (`run_report_generation_workflow`)**:
|
||||||
|
* **获取配置**: 从 `data-persistence-service` 获取完整的 `AnalysisTemplateSets` 对象。
|
||||||
|
* **选择模板**: 根据传入的 `template_id`,从 `AnalysisTemplateSets` 中选择出本次需要执行的 `AnalysisTemplateSet`。如果找不到,则记录错误并终止。
|
||||||
|
* **构建依赖图**: 使用所选模板集中的 `modules` 来构建有向图。强烈推荐使用 `petgraph` crate。
|
||||||
|
* **拓扑排序**: 对该图执行拓扑排序,**必须包含循环检测**。
|
||||||
|
* **顺序执行**: 遍历排序后的模块列表,后续的上下文注入、LLM调用和结果持久化逻辑与之前设计一致,但操作范围仅限于当前模板集内的模块。
|
||||||
|
|
||||||
|
3. **补全缺失逻辑**:
|
||||||
|
* **实现结果持久化**: 调用 `data-persistence-service` 提供的API,将每个模块生成的 `NewAnalysisResult` 存入 `analysis_results` 表。
|
||||||
|
|
||||||
|
## 6. 未来工作
|
||||||
|
|
||||||
|
### 6.1. 演进至 "Deep Research" 模块
|
||||||
|
|
||||||
|
此设计为未来的 "Deep Research" 模块演进奠定了坚实的基础。当该模块准备就绪时,我们可以创建一个新的“分析模板集”,其中的某些模块(如 `news_analysis`)将不再直接调用 LLM,而是调用 Deep Research 服务。Deep Research 服务将执行复杂的数据挖掘,并将高度精炼的结果返回给编排器,再由编排器注入到后续的 LLM 调用中,从而实现“数据驱动”的分析范式。
|
||||||
|
|
||||||
|
### 6.2. 引入工具调用框架 (Tool Calling Framework)
|
||||||
|
|
||||||
|
为了以一种更通用和可扩展的方式向提示词模板中注入多样化的上下文数据,我们规划引入“工具调用”框架。
|
||||||
|
|
||||||
|
- **概念**: “工具”是指一段独立的、用于获取特定类型数据的程序(例如,获取财务数据、获取实时股价、获取最新新闻等)。
|
||||||
|
- **配置**: 在前端的模块配置界面,除了依赖关系外,我们还将为每个模块提供一个“可用工具”的复选框列表。用户可以为模块勾选需要调用的一个或多个工具。
|
||||||
|
- **执行**:
|
||||||
|
1. 在 `report-generator-service` 的编排器执行一个模块前,它会先检查该模块配置中启用了哪些“工具”。
|
||||||
|
2. 编排器将按顺序执行这些工具。
|
||||||
|
3. 每个工具的输出(例如,格式化为Markdown的财务数据表格)将被注入到一个统一的上下文字段中。
|
||||||
|
- **首个工具**: 我们设想的第一个工具就是 **`财务数据注入工具`**。它将负责获取并格式化财务报表,其实现逻辑与本文档旧版本中描述的“核心逻辑细化”部分一致。
|
||||||
|
|
||||||
|
通过此框架,我们可以将数据注入的逻辑与编排器的核心逻辑解耦,使其更易于维护和扩展。**此项为远期规划,不在本轮实施范围之内。**
|
||||||
|
|
||||||
|
## 8. 实施清单 (Step-by-Step To-do List)
|
||||||
|
|
||||||
|
以下是为完成本项目所需的、按顺序排列的开发任务清单。
|
||||||
|
|
||||||
|
### 阶段一:数据模型与持久化层准备
|
||||||
|
|
||||||
|
- [x] **任务 1.1**: 在 `common-contracts` crate 中,创建或更新 `src/config_models.rs`,定义 `AnalysisTemplateSets`, `AnalysisTemplateSet`, `AnalysisModuleConfig` 等新的数据结构。
|
||||||
|
- [x] **任务 1.2**: 在 `common-contracts` crate 中,创建或更新 `src/dtos.rs`,定义用于写入分析结果的 `NewAnalysisResult` 数据传输对象 (DTO)。
|
||||||
|
- [x] **任务 1.3**: 在 `data-persistence-service` 中,创建新的数据库迁移文件 (`migrations/`),用于新增 `analysis_results` 表,其 schema 遵循本文档第6.2节的定义。
|
||||||
|
- [x] **任务 1.4**: 在 `data-persistence-service` 中,实现 `analysis_results` 表的 CRUD API (至少需要 `create` 方法)。
|
||||||
|
- [x] **任务 1.5**: 在 `data-persistence-service` 中,实现数据播种(Seeding)逻辑:在服务启动时,将硬编码的默认分析模板集写入数据库(如果尚不存在)。
|
||||||
|
|
||||||
|
### 阶段二:后端核心逻辑实现 (`report-generator-service`)
|
||||||
|
|
||||||
|
- [x] **任务 2.1**: 为 `report-generator-service` 添加 `petgraph` crate 作为依赖,用于构建和处理依赖图。
|
||||||
|
- [x] **任务 2.2**: 重构 `worker.rs` 中的 `run_report_generation_workflow` 函数,使其能够接收包含 `template_id` 的消息。
|
||||||
|
- [x] **任务 2.3**: 在 `worker.rs` 中,**实现完整的拓扑排序算法**,用以替代当前简陋的循环实现。此算法必须包含循环依赖检测。
|
||||||
|
- [x] **任务 2.4**: 更新编排器逻辑,使其能够根据 `template_id` 从获取到的 `AnalysisTemplateSets` 中选择正确的工作流进行处理。
|
||||||
|
- [x] **任务 2.5**: 实现调用 `data-persistence-service` 的逻辑,将每个模块成功生成的 `NewAnalysisResult` 持久化到 `analysis_results` 表中。
|
||||||
|
|
||||||
|
### 阶段三:服务集成与端到端打通
|
||||||
|
|
||||||
|
- [x] **任务 3.1**: 在 `api-gateway` 中,新增 `POST /api/v1/analysis-requests/{symbol}` 端点。
|
||||||
|
- [x] **任务 3.2**: 在 `api-gateway` 的新端点中,实现接收前端请求(包含 `template_id`),并向 NATS 发布 `GenerateReportCommand` 消息的逻辑。
|
||||||
|
- [x] **任务 3.3**: 在 `report-generator-service` 中,更新其 NATS 消费者,使其能够正确订阅和解析新的 `GenerateReportCommand` 消息。
|
||||||
|
- [x] **任务 3.4**: 进行端到端集成测试,确保从前端触发的请求能够正确地启动 `report-generator-service` 并执行完整的分析流程(此时可不关心前端UI)。
|
||||||
|
|
||||||
|
### 阶段四:前端 UI 实现
|
||||||
|
|
||||||
|
- [x] **任务 4.1**: 重构 `frontend/src/app/config/page.tsx` 页面,实现两级管理结构:先管理“分析模板集”。
|
||||||
|
- [x] **任务 4.2**: 实现“分析模板集”的创建、重命名和删除功能,并调用对应的后端API。
|
||||||
|
- [x] **任务 4.3**: 实现模板集内部的“分析模块”管理界面,包括模块的创建、更新(所有字段)和删除功能。
|
||||||
|
- [x] **任务 4.4**: 确保在分析请求发起的页面(例如主查询页面),用户可以选择使用哪个“分析模板集”来执行分析。
|
||||||
|
- [x] **任务 4.5**: 更新前端调用 `api-gateway` 的逻辑,在分析请求的 body 中附带上用户选择的 `template_id`。
|
||||||
@ -0,0 +1,98 @@
|
|||||||
|
# 任务文档:配置管理重构——统一API凭证管理
|
||||||
|
|
||||||
|
- **状态**: Active
|
||||||
|
- **创建日期**: 2025-11-17
|
||||||
|
- **负责人**: @AI-Assistant
|
||||||
|
- **审查人**: @lv
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 1. 背景与目标
|
||||||
|
|
||||||
|
### 1.1. 当前问题
|
||||||
|
|
||||||
|
当前系统对外部服务(如 Tushare, Finnhub)API Token 的管理方式存在两个主要问题:
|
||||||
|
|
||||||
|
1. **配置方式分裂**:
|
||||||
|
- **敏感凭证 (API Tokens)**: 通过启动时的**环境变量**注入。这种方式虽然安全,但缺乏灵活性,每次修改都需要重新部署或重启服务。
|
||||||
|
- **业务逻辑配置 (AI模型选择等)**: 通过**数据库**统一管理,并支持UI动态调整。
|
||||||
|
- 这种分裂的管理模式增加了系统的运维复杂性,与我们追求的“单一事实源”架构理念不符。
|
||||||
|
|
||||||
|
2. **服务韧性不足**:
|
||||||
|
- 依赖环境变量的服务采取“快速失败” (Fail-Fast) 策略。如果启动时未提供有效的 API Token,服务会立即崩溃退出。
|
||||||
|
- 这种模式虽然能尽早暴露问题,但在一个动态的、持续运行的系统中显得过于“僵硬”。我们期望的行为是:服务在缺少非核心配置时,应能进入一个“降级”状态,待配置就绪后再自动恢复工作,而不是直接停止运行。
|
||||||
|
|
||||||
|
### 1.2. 改造目标
|
||||||
|
|
||||||
|
本次重构旨在将所有外部服务的 API Token 配置,从环境变量迁移到数据库中,实现与业务逻辑配置的统一管理。具体目标如下:
|
||||||
|
|
||||||
|
- **统一配置源**: 将 `system_config` 数据库表作为所有可变配置(包括API Tokens)的唯一事实源。
|
||||||
|
- **提升易用性**: 允许用户通过前端UI界面,集中管理和更新所有数据源的 API Token。
|
||||||
|
- **增强服务韧性**: 改造数据提供商服务,使其在缺少 API Token 时不会崩溃,而是进入“降级模式”,并能在 Token 被提供后自动恢复正常工作。
|
||||||
|
- **简化部署**: 移除对多个环境变量的依赖,使服务的部署和运维过程更加简洁。
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 2. 实施方案
|
||||||
|
|
||||||
|
本次改造将遵循“后端 -> 服务 -> 前端”的顺序分层实施,确保每一步都有坚实的基础。
|
||||||
|
|
||||||
|
### 2.1. 数据模型与持久化层
|
||||||
|
|
||||||
|
我们将通过复用 `system_config` 表中现有的 `(config_key, config_value)` 存储模式,来扩展配置管理的能力,使其能够安全地存储和检索数据源的配置。
|
||||||
|
|
||||||
|
1. **定义数据结构**: 在 `common-contracts` 共享库中,定义一个清晰的、用于描述数据源配置的 `DataSourceConfig` 结构体。它将包含 `provider_id`, `api_token`, `api_url` 等字段。
|
||||||
|
2. **复用现有表结构**: 我们将向 `system_config` 表中插入一条新的记录,其 `config_key` 固定为 `"data_sources"`,并将所有数据源的配置集合(一个 `HashMap<String, DataSourceConfig>`)序列化后存入该记录的 `config_value` 字段中。
|
||||||
|
3. **扩展API**: 在 `data-persistence-service` 中增加新的 HTTP API 端点,用于对数据源配置进行增、删、改、查(CRUD)操作。例如:
|
||||||
|
- `GET /api/v1/configs/data-sources`: 获取所有数据源的配置列表。
|
||||||
|
- `PUT /api/v1/configs/data-sources`: 创建或更新所有数据源的配置。
|
||||||
|
|
||||||
|
### 2.2. 微服务改造:引入“降级与恢复”模式
|
||||||
|
|
||||||
|
这是本次重构的核心。所有依赖外部 API Token 的数据提供商服务 (`finnhub`, `tushare`, `alphavantage`) 都将进行如下改造:
|
||||||
|
|
||||||
|
1. **移除启动时检查**: 删除 `config.rs` 中检查环境变量并导致程序崩溃的逻辑。
|
||||||
|
2. **引入内部状态机**: 每个服务内部将维护一个状态(例如 `State<ServiceOperationalStatus>`),包含 `Active` 和 `Degraded(reason: String)` 两种状态。
|
||||||
|
3. **动态配置加载**: 服务将不再从环境变量读取 Token,而是在内部启动一个**后台任务**(轮询器),该任务会:
|
||||||
|
- 在服务启动时,以及之后每隔一段时间(例如 60 秒),调用 `data-persistence-service` 的新 API 来获取自己的配置。
|
||||||
|
- 如果成功获取到有效的 Token,则更新服务内部的 API 客户端,并将服务状态设置为 `Active`。此时,服务正常订阅和处理来自 NATS 的消息。
|
||||||
|
- 如果未能获取 Token(或 Token 为空),则将服务状态设置为 `Degraded`,并附上原因(如 "API Token not configured")。在这种状态下,服务**不会**订阅 NATS 消息队列,避免接收无法处理的任务。
|
||||||
|
4. **更新健康检查**: 服务的 `/health` 端点将反映其内部状态。当处于 `Degraded` 状态时,健康检查接口应返回相应的状态码和信息,以便监控系统能够清晰地了解服务当前是否可用。
|
||||||
|
|
||||||
|
### 2.3. 前端UI实现
|
||||||
|
|
||||||
|
为了让用户能够方便地管理这些配置,我们将在前端进行如下调整:
|
||||||
|
|
||||||
|
1. **创建新UI组件**: 在 `/config` 页面,新增一个名为“数据源配置”的管理面板。
|
||||||
|
2. **功能实现**: 该面板将提供一个表单或列表,允许用户:
|
||||||
|
- 查看当前所有数据源(Tushare, Finnhub 等)的配置状态。
|
||||||
|
- 为每个数据源输入或更新其 API Token。
|
||||||
|
- 保存更改。点击保存后,前端将调用 `data-persistence-service` 的新 API,将更新后的配置持久化到数据库中。
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 3. 详细任务清单
|
||||||
|
|
||||||
|
### 第一阶段:后端基础
|
||||||
|
|
||||||
|
- [x] ~~**任务 BE-1**: 在 `common-contracts` 中定义 `DataSourceConfig` 和 `DataSourceProvider` 等共享数据结构。~~
|
||||||
|
- [x] ~~**任务 BE-3**: 在 `data-persistence-service` 中实现对数据源配置的 CRUD 业务逻辑。~~
|
||||||
|
- [x] ~~**任务 BE-4**: 在 `data-persistence-service` 中暴露 `GET /api/v1/configs/data-sources` 和 `PUT /api/v1/configs/data-sources` 这两个 API 端点。~~
|
||||||
|
|
||||||
|
### 第二阶段:微服务改造
|
||||||
|
|
||||||
|
- [x] ~~**任务 SVC-1**: **(Finnhub)** 重构 `finnhub-provider-service`:~~
|
||||||
|
- [x] ~~移除 `config.rs` 中的 `FINNHUB_API_KEY` 环境变量加载逻辑。~~
|
||||||
|
- [x] ~~实现内部状态机 (`Active`/`Degraded`) 和动态配置轮询器。~~
|
||||||
|
- [x] ~~修改 `/health` 端点以反映内部状态。~~
|
||||||
|
- [x] ~~调整 NATS 消息订阅逻辑,只在 `Active` 状态下进行订阅。~~
|
||||||
|
- [x] ~~**任务 SVC-2**: **(Tushare)** 以 `finnhub-provider-service` 为模板,对 `tushare-provider-service` 进行相同的重构。~~
|
||||||
|
- [x] ~~**任务 SVC-3**: **(Alphavantage)** 以 `finnhub-provider-service` 为模板,对 `alphavantage-provider-service` 进行相同的重构。~~
|
||||||
|
- [x] ~~**任务 SVC-4**: **(审查)** 审查 `report-generator-service` 的 LLM 配置加载逻辑,确保其与新的动态配置模式在设计理念上保持一致。~~
|
||||||
|
|
||||||
|
### 第三阶段:前端实现
|
||||||
|
|
||||||
|
- [x] **任务 FE-1**: 在 `/config` 页面设计并实现“数据源配置”UI 组件。
|
||||||
|
- [x] **任务 FE-2**: 实现 `useApi.ts` 中用于获取和更新数据源配置的 hooks。
|
||||||
|
- [x] **任务 FE-3**: 将 UI 组件与 API hooks 连接,完成前端的完整功能。
|
||||||
|
- [x] **任务 FE-4**: 调整 `/llm-config` 页面,使其在UI/UX风格上与新的“数据源配置”面板保持一致性。
|
||||||
45
frontend/src/app/api/configs/analysis_template_sets/route.ts
Normal file
45
frontend/src/app/api/configs/analysis_template_sets/route.ts
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
const BACKEND_BASE = process.env.BACKEND_INTERNAL_URL || process.env.NEXT_PUBLIC_BACKEND_URL;
|
||||||
|
|
||||||
|
export async function GET() {
|
||||||
|
if (!BACKEND_BASE) {
|
||||||
|
return new Response('NEXT_PUBLIC_BACKEND_URL 未配置', { status: 500 });
|
||||||
|
}
|
||||||
|
try {
|
||||||
|
const resp = await fetch(`${BACKEND_BASE}/configs/analysis_template_sets`, {
|
||||||
|
headers: { 'Content-Type': 'application/json' },
|
||||||
|
cache: 'no-store',
|
||||||
|
});
|
||||||
|
const text = await resp.text();
|
||||||
|
return new Response(text, {
|
||||||
|
status: resp.status,
|
||||||
|
headers: { 'Content-Type': resp.headers.get('Content-Type') || 'application/json' },
|
||||||
|
});
|
||||||
|
} catch (e: any) {
|
||||||
|
const errorBody = JSON.stringify({ message: e?.message || '连接后端失败' });
|
||||||
|
return new Response(errorBody, { status: 502, headers: { 'Content-Type': 'application/json' } });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export async function PUT(req: Request) {
|
||||||
|
if (!BACKEND_BASE) {
|
||||||
|
return new Response('NEXT_PUBLIC_BACKEND_URL 未配置', { status: 500 });
|
||||||
|
}
|
||||||
|
const body = await req.text();
|
||||||
|
try {
|
||||||
|
const resp = await fetch(`${BACKEND_BASE}/configs/analysis_template_sets`, {
|
||||||
|
method: 'PUT',
|
||||||
|
headers: { 'Content-Type': 'application/json' },
|
||||||
|
body,
|
||||||
|
});
|
||||||
|
const text = await resp.text();
|
||||||
|
return new Response(text, {
|
||||||
|
status: resp.status,
|
||||||
|
headers: { 'Content-Type': resp.headers.get('Content-Type') || 'application/json' },
|
||||||
|
});
|
||||||
|
} catch (e: any) {
|
||||||
|
const errorBody = JSON.stringify({ message: e?.message || '连接后端失败' });
|
||||||
|
return new Response(errorBody, { status: 502, headers: { 'Content-Type': 'application/json' } });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
45
frontend/src/app/api/configs/data_sources/route.ts
Normal file
45
frontend/src/app/api/configs/data_sources/route.ts
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
const BACKEND_BASE = process.env.BACKEND_INTERNAL_URL || process.env.NEXT_PUBLIC_BACKEND_URL;
|
||||||
|
|
||||||
|
export async function GET() {
|
||||||
|
if (!BACKEND_BASE) {
|
||||||
|
return new Response('NEXT_PUBLIC_BACKEND_URL 未配置', { status: 500 });
|
||||||
|
}
|
||||||
|
try {
|
||||||
|
const resp = await fetch(`${BACKEND_BASE}/configs/data_sources`, {
|
||||||
|
headers: { 'Content-Type': 'application/json' },
|
||||||
|
cache: 'no-store',
|
||||||
|
});
|
||||||
|
const text = await resp.text();
|
||||||
|
return new Response(text, {
|
||||||
|
status: resp.status,
|
||||||
|
headers: { 'Content-Type': resp.headers.get('Content-Type') || 'application/json' },
|
||||||
|
});
|
||||||
|
} catch (e: any) {
|
||||||
|
const errorBody = JSON.stringify({ message: e?.message || '连接后端失败' });
|
||||||
|
return new Response(errorBody, { status: 502, headers: { 'Content-Type': 'application/json' } });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export async function PUT(req: Request) {
|
||||||
|
if (!BACKEND_BASE) {
|
||||||
|
return new Response('NEXT_PUBLIC_BACKEND_URL 未配置', { status: 500 });
|
||||||
|
}
|
||||||
|
const body = await req.text();
|
||||||
|
try {
|
||||||
|
const resp = await fetch(`${BACKEND_BASE}/configs/data_sources`, {
|
||||||
|
method: 'PUT',
|
||||||
|
headers: { 'Content-Type': 'application/json' },
|
||||||
|
body,
|
||||||
|
});
|
||||||
|
const text = await resp.text();
|
||||||
|
return new Response(text, {
|
||||||
|
status: resp.status,
|
||||||
|
headers: { 'Content-Type': resp.headers.get('Content-Type') || 'application/json' },
|
||||||
|
});
|
||||||
|
} catch (e: any) {
|
||||||
|
const errorBody = JSON.stringify({ message: e?.message || '连接后端失败' });
|
||||||
|
return new Response(errorBody, { status: 502, headers: { 'Content-Type': 'application/json' } });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -4,6 +4,7 @@ export async function GET() {
|
|||||||
if (!BACKEND_BASE) {
|
if (!BACKEND_BASE) {
|
||||||
return new Response('NEXT_PUBLIC_BACKEND_URL 未配置', { status: 500 });
|
return new Response('NEXT_PUBLIC_BACKEND_URL 未配置', { status: 500 });
|
||||||
}
|
}
|
||||||
|
try {
|
||||||
const resp = await fetch(`${BACKEND_BASE}/configs/llm_providers`, {
|
const resp = await fetch(`${BACKEND_BASE}/configs/llm_providers`, {
|
||||||
headers: { 'Content-Type': 'application/json' },
|
headers: { 'Content-Type': 'application/json' },
|
||||||
cache: 'no-store',
|
cache: 'no-store',
|
||||||
@ -13,6 +14,10 @@ export async function GET() {
|
|||||||
status: resp.status,
|
status: resp.status,
|
||||||
headers: { 'Content-Type': resp.headers.get('Content-Type') || 'application/json' },
|
headers: { 'Content-Type': resp.headers.get('Content-Type') || 'application/json' },
|
||||||
});
|
});
|
||||||
|
} catch (e: any) {
|
||||||
|
const errorBody = JSON.stringify({ message: e?.message || '连接后端失败' });
|
||||||
|
return new Response(errorBody, { status: 502, headers: { 'Content-Type': 'application/json' } });
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
export async function PUT(req: Request) {
|
export async function PUT(req: Request) {
|
||||||
@ -20,6 +25,7 @@ export async function PUT(req: Request) {
|
|||||||
return new Response('NEXT_PUBLIC_BACKEND_URL 未配置', { status: 500 });
|
return new Response('NEXT_PUBLIC_BACKEND_URL 未配置', { status: 500 });
|
||||||
}
|
}
|
||||||
const body = await req.text();
|
const body = await req.text();
|
||||||
|
try {
|
||||||
const resp = await fetch(`${BACKEND_BASE}/configs/llm_providers`, {
|
const resp = await fetch(`${BACKEND_BASE}/configs/llm_providers`, {
|
||||||
method: 'PUT',
|
method: 'PUT',
|
||||||
headers: { 'Content-Type': 'application/json' },
|
headers: { 'Content-Type': 'application/json' },
|
||||||
@ -30,5 +36,9 @@ export async function PUT(req: Request) {
|
|||||||
status: resp.status,
|
status: resp.status,
|
||||||
headers: { 'Content-Type': resp.headers.get('Content-Type') || 'application/json' },
|
headers: { 'Content-Type': resp.headers.get('Content-Type') || 'application/json' },
|
||||||
});
|
});
|
||||||
|
} catch (e: any) {
|
||||||
|
const errorBody = JSON.stringify({ message: e?.message || '连接后端失败' });
|
||||||
|
return new Response(errorBody, { status: 502, headers: { 'Content-Type': 'application/json' } });
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -1,13 +1,18 @@
|
|||||||
const BACKEND_BASE = process.env.NEXT_PUBLIC_BACKEND_URL;
|
const BACKEND_BASE = process.env.BACKEND_INTERNAL_URL || process.env.NEXT_PUBLIC_BACKEND_URL;
|
||||||
|
|
||||||
export async function GET(
|
export async function GET(
|
||||||
_req: Request,
|
_req: Request,
|
||||||
context: { params: Promise<{ provider_id: string }> }
|
context: any
|
||||||
) {
|
) {
|
||||||
if (!BACKEND_BASE) {
|
if (!BACKEND_BASE) {
|
||||||
return new Response('NEXT_PUBLIC_BACKEND_URL 未配置', { status: 500 });
|
return new Response('BACKEND_INTERNAL_URL/NEXT_PUBLIC_BACKEND_URL 未配置', { status: 500 });
|
||||||
|
}
|
||||||
|
const raw = context?.params;
|
||||||
|
const params = raw && typeof raw.then === 'function' ? await raw : raw;
|
||||||
|
const provider_id = params?.provider_id as string | undefined;
|
||||||
|
if (!provider_id) {
|
||||||
|
return new Response('provider_id 缺失', { status: 400 });
|
||||||
}
|
}
|
||||||
const { provider_id } = await context.params;
|
|
||||||
const target = `${BACKEND_BASE}/discover-models/${encodeURIComponent(provider_id)}`;
|
const target = `${BACKEND_BASE}/discover-models/${encodeURIComponent(provider_id)}`;
|
||||||
const resp = await fetch(target, {
|
const resp = await fetch(target, {
|
||||||
headers: { 'Content-Type': 'application/json' },
|
headers: { 'Content-Type': 'application/json' },
|
||||||
|
|||||||
26
frontend/src/app/api/discover-models/route.ts
Normal file
26
frontend/src/app/api/discover-models/route.ts
Normal file
@ -0,0 +1,26 @@
|
|||||||
|
const BACKEND_BASE = process.env.BACKEND_INTERNAL_URL || process.env.NEXT_PUBLIC_BACKEND_URL;
|
||||||
|
|
||||||
|
export async function POST(req: Request) {
|
||||||
|
if (!BACKEND_BASE) {
|
||||||
|
return new Response('BACKEND_INTERNAL_URL/NEXT_PUBLIC_BACKEND_URL 未配置', { status: 500 });
|
||||||
|
}
|
||||||
|
const body = await req.text();
|
||||||
|
try {
|
||||||
|
const resp = await fetch(`${BACKEND_BASE}/discover-models`, {
|
||||||
|
method: 'POST',
|
||||||
|
headers: { 'Content-Type': 'application/json' },
|
||||||
|
body,
|
||||||
|
cache: 'no-store',
|
||||||
|
});
|
||||||
|
const text = await resp.text();
|
||||||
|
return new Response(text, {
|
||||||
|
status: resp.status,
|
||||||
|
headers: { 'Content-Type': resp.headers.get('Content-Type') || 'application/json' },
|
||||||
|
});
|
||||||
|
} catch (e: any) {
|
||||||
|
const errorBody = JSON.stringify({ message: e?.message || '连接后端失败' });
|
||||||
|
return new Response(errorBody, { status: 502, headers: { 'Content-Type': 'application/json' } });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
File diff suppressed because it is too large
Load Diff
@ -5,127 +5,173 @@ import { Card, CardContent, CardDescription, CardHeader, CardTitle } from "@/com
|
|||||||
import { Input } from "@/components/ui/input";
|
import { Input } from "@/components/ui/input";
|
||||||
import { Button } from "@/components/ui/button";
|
import { Button } from "@/components/ui/button";
|
||||||
import { Badge } from "@/components/ui/badge";
|
import { Badge } from "@/components/ui/badge";
|
||||||
|
import { Label } from "@/components/ui/label";
|
||||||
|
import { Select, SelectContent, SelectItem, SelectTrigger, SelectValue } from "@/components/ui/select";
|
||||||
|
import { useAnalysisTemplateSets } from "@/hooks/useApi"; // Import the new hook
|
||||||
|
import type { AnalysisTemplateSets } from "@/types";
|
||||||
|
|
||||||
type ReportItem = {
|
type ReportItem = {
|
||||||
report_id: string;
|
report_id: string;
|
||||||
created_at?: number;
|
created_at?: number;
|
||||||
score?: number;
|
score?: number;
|
||||||
status?: string;
|
status?: string;
|
||||||
};
|
};
|
||||||
|
|
||||||
export default function QueryPage() {
|
export default function QueryPage() {
|
||||||
const [market, setMarket] = useState<"cn" | "us" | "jp">("cn");
|
const [market, setMarket] = useState<"cn" | "us" | "jp">("cn");
|
||||||
const [orgId, setOrgId] = useState("AAPL");
|
const [orgId, setOrgId] = useState("600519");
|
||||||
const [loading, setLoading] = useState(false);
|
const [loading, setLoading] = useState(false);
|
||||||
const [reports, setReports] = useState<ReportItem[]>([]);
|
const [reports, setReports] = useState<ReportItem[]>([]);
|
||||||
const [msg, setMsg] = useState<string | null>(null);
|
const [msg, setMsg] = useState<string | null>(null);
|
||||||
|
|
||||||
async function loadReports() {
|
// --- New State for Template Selection ---
|
||||||
if (!market || !orgId) return;
|
const { data: templateSets } = useAnalysisTemplateSets();
|
||||||
setLoading(true);
|
const [selectedTemplateId, setSelectedTemplateId] = useState<string>('');
|
||||||
try {
|
|
||||||
const res = await fetch(`/api/orgs/${market}/${orgId}/reports`);
|
// Auto-select first template when available
|
||||||
const data = await res.json();
|
useEffect(() => {
|
||||||
setReports(data.reports ?? []);
|
if (templateSets && Object.keys(templateSets).length > 0 && !selectedTemplateId) {
|
||||||
} catch (e) {
|
setSelectedTemplateId(Object.keys(templateSets)[0]);
|
||||||
setMsg("加载失败");
|
}
|
||||||
} finally {
|
}, [templateSets, selectedTemplateId]);
|
||||||
setLoading(false);
|
|
||||||
|
|
||||||
|
async function loadReports() {
|
||||||
|
if (!market || !orgId) return;
|
||||||
|
setLoading(true);
|
||||||
|
try {
|
||||||
|
// This API seems deprecated, but we keep the logic for now.
|
||||||
|
// In a real scenario, this would query the new `analysis_results` table.
|
||||||
|
const res = await fetch(`/api/orgs/${market}/${orgId}/reports`);
|
||||||
|
const data = await res.json();
|
||||||
|
setReports(data.reports ?? []);
|
||||||
|
} catch (e) {
|
||||||
|
setMsg("加载历史报告失败");
|
||||||
|
} finally {
|
||||||
|
setLoading(false);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
async function triggerGenerate() {
|
async function triggerGenerate() {
|
||||||
if (!market || !orgId) return;
|
if (!orgId || !selectedTemplateId) {
|
||||||
setMsg("已触发生成任务…");
|
setMsg("企业ID和分析模板不能为空");
|
||||||
try {
|
return;
|
||||||
const res = await fetch(`/api/orgs/${market}/${orgId}/reports/generate`, {
|
}
|
||||||
method: "POST",
|
setMsg("已触发生成任务…");
|
||||||
});
|
try {
|
||||||
const data = await res.json();
|
const res = await fetch(`/api/analysis-requests/${orgId}`, {
|
||||||
if (data.queued) {
|
method: "POST",
|
||||||
setMsg("生成任务已入队,稍后自动出现在列表中");
|
headers: { 'Content-Type': 'application/json' },
|
||||||
// 简单轮询刷新
|
body: JSON.stringify({ template_id: selectedTemplateId }),
|
||||||
setTimeout(loadReports, 1500);
|
});
|
||||||
} else {
|
const data = await res.json();
|
||||||
setMsg("触发失败");
|
if (res.status === 202 && data.request_id) {
|
||||||
}
|
setMsg(`生成任务已入队 (Request ID: ${data.request_id}),请稍后查询结果。`);
|
||||||
} catch {
|
// Simple polling to refresh history
|
||||||
setMsg("触发失败");
|
setTimeout(loadReports, 3000);
|
||||||
|
} else {
|
||||||
|
const errorMsg = data.error || "触发失败,未知错误。";
|
||||||
|
setMsg(`触发失败: ${errorMsg}`);
|
||||||
|
}
|
||||||
|
} catch (e: any) {
|
||||||
|
setMsg(`触发失败: ${e.message || "网络请求错误"}`);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
loadReports();
|
loadReports();
|
||||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||||
}, []);
|
}, []);
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<div className="space-y-6">
|
<div className="space-y-6">
|
||||||
<header className="space-y-2">
|
<header className="space-y-2">
|
||||||
<h1 className="text-2xl font-semibold">统一查询</h1>
|
<h1 className="text-2xl font-semibold">统一查询与分析</h1>
|
||||||
<p className="text-sm text-muted-foreground">输入企业ID与市场,查询历史报告并触发新报告生成。</p>
|
<p className="text-sm text-muted-foreground">输入企业ID,选择分析模板,触发新报告生成或查询历史报告。</p>
|
||||||
</header>
|
</header>
|
||||||
|
|
||||||
<Card>
|
<Card>
|
||||||
<CardHeader>
|
<CardHeader>
|
||||||
<CardTitle>查询条件</CardTitle>
|
<CardTitle>查询与分析</CardTitle>
|
||||||
<CardDescription>选择市场并输入企业ID(如 us:AAPL / cn:600519)</CardDescription>
|
<CardDescription>选择市场、企业ID和分析模板来启动一个新的分析任务。</CardDescription>
|
||||||
</CardHeader>
|
</CardHeader>
|
||||||
<CardContent className="space-y-3">
|
<CardContent className="space-y-4">
|
||||||
<div className="flex gap-2">
|
<div className="grid grid-cols-1 md:grid-cols-3 gap-4">
|
||||||
<select
|
<div className="space-y-2">
|
||||||
className="border rounded px-2 py-1 bg-background"
|
<Label>市场</Label>
|
||||||
value={market}
|
<Select value={market} onValueChange={(v) => setMarket(v as any)}>
|
||||||
onChange={(e) => setMarket(e.target.value as "cn" | "us" | "jp")}
|
<SelectTrigger>
|
||||||
>
|
<SelectValue />
|
||||||
<option value="cn">中国(cn)</option>
|
</SelectTrigger>
|
||||||
<option value="us">美国(us)</option>
|
<SelectContent>
|
||||||
<option value="jp">日本(jp)</option>
|
<SelectItem value="cn">中国 (cn)</SelectItem>
|
||||||
</select>
|
<SelectItem value="us">美国 (us)</SelectItem>
|
||||||
<Input
|
<SelectItem value="jp">日本 (jp)</SelectItem>
|
||||||
value={orgId}
|
</SelectContent>
|
||||||
onChange={(e) => setOrgId(e.target.value)}
|
</Select>
|
||||||
placeholder="输入企业ID,如 AAPL / 600519"
|
</div>
|
||||||
/>
|
<div className="space-y-2">
|
||||||
<Button onClick={loadReports} disabled={loading}>查询</Button>
|
<Label>企业ID</Label>
|
||||||
<Button onClick={triggerGenerate} variant="secondary">触发生成</Button>
|
<Input
|
||||||
</div>
|
value={orgId}
|
||||||
{msg && <p className="text-xs text-muted-foreground">{msg}</p>}
|
onChange={(e) => setOrgId(e.target.value)}
|
||||||
</CardContent>
|
placeholder="输入企业ID,如 600519"
|
||||||
</Card>
|
/>
|
||||||
|
</div>
|
||||||
|
<div className="space-y-2">
|
||||||
|
<Label>分析模板</Label>
|
||||||
|
<Select value={selectedTemplateId} onValueChange={setSelectedTemplateId}>
|
||||||
|
<SelectTrigger>
|
||||||
|
<SelectValue placeholder="选择一个分析模板..." />
|
||||||
|
</SelectTrigger>
|
||||||
|
<SelectContent>
|
||||||
|
{templateSets && Object.entries(templateSets).map(([id, set]) => (
|
||||||
|
<SelectItem key={id} value={id}>{set.name}</SelectItem>
|
||||||
|
))}
|
||||||
|
</SelectContent>
|
||||||
|
</Select>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<div className="flex gap-2">
|
||||||
|
<Button onClick={loadReports} disabled={loading}>查询历史</Button>
|
||||||
|
<Button onClick={triggerGenerate} variant="secondary" disabled={!selectedTemplateId}>触发生成</Button>
|
||||||
|
</div>
|
||||||
|
{msg && <p className="text-xs text-muted-foreground pt-2">{msg}</p>}
|
||||||
|
</CardContent>
|
||||||
|
</Card>
|
||||||
|
|
||||||
<Card>
|
<Card>
|
||||||
<CardHeader>
|
<CardHeader>
|
||||||
<CardTitle>历史报告</CardTitle>
|
<CardTitle>历史报告</CardTitle>
|
||||||
<CardDescription>最新在前</CardDescription>
|
<CardDescription>最新在前</CardDescription>
|
||||||
</CardHeader>
|
</CardHeader>
|
||||||
<CardContent className="grid gap-3 sm:grid-cols-2 lg:grid-cols-3">
|
<CardContent className="grid gap-3 sm:grid-cols-2 lg:grid-cols-3">
|
||||||
{loading && <p>加载中…</p>}
|
{loading && <p>加载中…</p>}
|
||||||
{!loading && reports.length === 0 && <p>暂无报告</p>}
|
{!loading && reports.length === 0 && <p>暂无报告</p>}
|
||||||
{reports.map((r) => (
|
{reports.map((r) => (
|
||||||
<div key={r.report_id} className="border rounded p-3 space-y-2">
|
<div key={r.report_id} className="border rounded p-3 space-y-2">
|
||||||
<div className="flex items-center justify-between">
|
<div className="flex items-center justify-between">
|
||||||
<span className="font-mono text-sm">#{r.report_id}</span>
|
<span className="font-mono text-sm">#{r.report_id}</span>
|
||||||
<Badge variant={r.status === "done" ? "secondary" : "outline"}>{r.status ?? "unknown"}</Badge>
|
<Badge variant={r.status === "done" ? "secondary" : "outline"}>{r.status ?? "unknown"}</Badge>
|
||||||
</div>
|
</div>
|
||||||
<div className="text-sm text-muted-foreground">
|
<div className="text-sm text-muted-foreground">
|
||||||
{r.created_at ? new Date(r.created_at * 1000).toLocaleString() : "-"}
|
{r.created_at ? new Date(r.created_at * 1000).toLocaleString() : "-"}
|
||||||
</div>
|
</div>
|
||||||
<div className="text-sm">评分:{r.score ?? "-"}</div>
|
<div className="text-sm">评分:{r.score ?? "-"}</div>
|
||||||
<div className="flex gap-2">
|
<div className="flex gap-2">
|
||||||
<a
|
<a
|
||||||
className="text-xs underline"
|
className="text-xs underline"
|
||||||
href={`/api/reports/${r.report_id}?market=${market}&org_id=${orgId}`}
|
href={`/api/reports/${r.report_id}?market=${market}&org_id=${orgId}`}
|
||||||
target="_blank"
|
target="_blank"
|
||||||
rel="noreferrer"
|
rel="noreferrer"
|
||||||
>
|
>
|
||||||
查看JSON
|
查看JSON
|
||||||
</a>
|
</a>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
))}
|
))}
|
||||||
</CardContent>
|
</CardContent>
|
||||||
</Card>
|
</Card>
|
||||||
</div>
|
</div>
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
@ -6,7 +6,9 @@ import {
|
|||||||
AnalysisConfigResponse,
|
AnalysisConfigResponse,
|
||||||
LlmProvidersConfig,
|
LlmProvidersConfig,
|
||||||
AnalysisModulesConfig,
|
AnalysisModulesConfig,
|
||||||
|
AnalysisTemplateSets, // New type
|
||||||
FinancialConfigResponse,
|
FinancialConfigResponse,
|
||||||
|
DataSourcesConfig,
|
||||||
} from "@/types";
|
} from "@/types";
|
||||||
import { useEffect, useState } from "react";
|
import { useEffect, useState } from "react";
|
||||||
// Execution-step types not used currently; keep API minimal and explicit
|
// Execution-step types not used currently; keep API minimal and explicit
|
||||||
@ -335,7 +337,36 @@ export async function discoverProviderModels(providerId: string) {
|
|||||||
return res.json();
|
return res.json();
|
||||||
}
|
}
|
||||||
|
|
||||||
// --- Analysis Modules Config Hooks ---
|
export async function discoverProviderModelsPreview(apiBaseUrl: string, apiKey: string) {
|
||||||
|
const res = await fetch(`/api/discover-models`, {
|
||||||
|
method: 'POST',
|
||||||
|
headers: { 'Content-Type': 'application/json' },
|
||||||
|
body: JSON.stringify({ api_base_url: apiBaseUrl, api_key: apiKey }),
|
||||||
|
});
|
||||||
|
return res.json();
|
||||||
|
}
|
||||||
|
|
||||||
|
// --- Analysis Template Sets Config Hooks (NEW) ---
|
||||||
|
|
||||||
|
export function useAnalysisTemplateSets() {
|
||||||
|
return useSWR<AnalysisTemplateSets>('/api/configs/analysis_template_sets', fetcher);
|
||||||
|
}
|
||||||
|
|
||||||
|
export async function updateAnalysisTemplateSets(payload: AnalysisTemplateSets) {
|
||||||
|
const res = await fetch('/api/configs/analysis_template_sets', {
|
||||||
|
method: 'PUT',
|
||||||
|
headers: { 'Content-Type': 'application/json' },
|
||||||
|
body: JSON.stringify(payload),
|
||||||
|
});
|
||||||
|
if (!res.ok) {
|
||||||
|
const text = await res.text().catch(() => '');
|
||||||
|
throw new Error(text || `HTTP ${res.status}`);
|
||||||
|
}
|
||||||
|
return res.json() as Promise<AnalysisTemplateSets>;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// --- Analysis Modules Config Hooks (OLD - DEPRECATED) ---
|
||||||
|
|
||||||
export function useAnalysisModules() {
|
export function useAnalysisModules() {
|
||||||
return useSWR<AnalysisModulesConfig>('/api/configs/analysis_modules', fetcher);
|
return useSWR<AnalysisModulesConfig>('/api/configs/analysis_modules', fetcher);
|
||||||
@ -353,3 +384,22 @@ export async function updateAnalysisModules(payload: AnalysisModulesConfig) {
|
|||||||
}
|
}
|
||||||
return res.json() as Promise<AnalysisModulesConfig>;
|
return res.json() as Promise<AnalysisModulesConfig>;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// --- Data Sources Config Hooks ---
|
||||||
|
|
||||||
|
export function useDataSourcesConfig() {
|
||||||
|
return useSWR<DataSourcesConfig>('/api/configs/data_sources', fetcher);
|
||||||
|
}
|
||||||
|
|
||||||
|
export async function updateDataSourcesConfig(payload: DataSourcesConfig) {
|
||||||
|
const res = await fetch('/api/configs/data_sources', {
|
||||||
|
method: 'PUT',
|
||||||
|
headers: { 'Content-Type': 'application/json' },
|
||||||
|
body: JSON.stringify(payload),
|
||||||
|
});
|
||||||
|
if (!res.ok) {
|
||||||
|
const text = await res.text().catch(() => '');
|
||||||
|
throw new Error(text || `HTTP ${res.status}`);
|
||||||
|
}
|
||||||
|
return res.json() as Promise<DataSourcesConfig>;
|
||||||
|
}
|
||||||
|
|||||||
@ -498,3 +498,44 @@ export interface AnalysisModuleConfig {
|
|||||||
|
|
||||||
/** 分析模块配置集合:键为 module_id(如 bull_case) */
|
/** 分析模块配置集合:键为 module_id(如 bull_case) */
|
||||||
export type AnalysisModulesConfig = Record<string, AnalysisModuleConfig>;
|
export type AnalysisModulesConfig = Record<string, AnalysisModuleConfig>;
|
||||||
|
|
||||||
|
|
||||||
|
// ============================================================================
|
||||||
|
// 分析模板集配置类型 (NEW)
|
||||||
|
// ============================================================================
|
||||||
|
|
||||||
|
/**
|
||||||
|
* 单个分析模板集,代表一套完整的分析流程
|
||||||
|
* e.g., "Standard Fundamental Analysis"
|
||||||
|
*/
|
||||||
|
export interface AnalysisTemplateSet {
|
||||||
|
/** 人类可读的模板名称, e.g., "标准基本面分析" */
|
||||||
|
name: string;
|
||||||
|
/**
|
||||||
|
* 该模板集包含的所有分析模块
|
||||||
|
* Key: 模块ID (e.g., "fundamental_analysis")
|
||||||
|
*/
|
||||||
|
modules: Record<string, AnalysisModuleConfig>;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* 整个系统的分析模板配置,作为顶级对象
|
||||||
|
* Key: 模板ID (e.g., "standard_fundamentals")
|
||||||
|
*/
|
||||||
|
export type AnalysisTemplateSets = Record<string, AnalysisTemplateSet>;
|
||||||
|
|
||||||
|
|
||||||
|
// ============================================================================
|
||||||
|
// 数据源配置类型(与后端 common-contracts 配置保持结构一致)
|
||||||
|
// ============================================================================
|
||||||
|
|
||||||
|
export type DataSourceProvider = 'tushare' | 'finnhub' | 'alphavantage' | 'yfinance';
|
||||||
|
|
||||||
|
export interface DataSourceConfig {
|
||||||
|
provider: DataSourceProvider;
|
||||||
|
api_key?: string | null;
|
||||||
|
api_url?: string | null;
|
||||||
|
enabled: boolean;
|
||||||
|
}
|
||||||
|
|
||||||
|
export type DataSourcesConfig = Record<string, DataSourceConfig>;
|
||||||
@ -6,7 +6,7 @@ use axum::{
|
|||||||
Router,
|
Router,
|
||||||
};
|
};
|
||||||
use common_contracts::observability::{HealthStatus, ServiceStatus, TaskProgress};
|
use common_contracts::observability::{HealthStatus, ServiceStatus, TaskProgress};
|
||||||
use crate::state::AppState;
|
use crate::state::{AppState, ServiceOperationalStatus};
|
||||||
|
|
||||||
pub fn create_router(app_state: AppState) -> Router {
|
pub fn create_router(app_state: AppState) -> Router {
|
||||||
Router::new()
|
Router::new()
|
||||||
@ -17,14 +17,22 @@ pub fn create_router(app_state: AppState) -> Router {
|
|||||||
|
|
||||||
/// [GET /health]
|
/// [GET /health]
|
||||||
/// Provides the current health status of the module.
|
/// Provides the current health status of the module.
|
||||||
async fn health_check(State(_state): State<AppState>) -> Json<HealthStatus> {
|
async fn health_check(State(state): State<AppState>) -> Json<HealthStatus> {
|
||||||
let mut details = HashMap::new();
|
let mut details = HashMap::new();
|
||||||
// In a real scenario, we would check connections to the message bus, etc.
|
let operational_status = state.status.read().await;
|
||||||
details.insert("message_bus_connection".to_string(), "ok".to_string());
|
|
||||||
|
let (service_status, reason) = match &*operational_status {
|
||||||
|
ServiceOperationalStatus::Active => (ServiceStatus::Ok, "ok".to_string()),
|
||||||
|
ServiceOperationalStatus::Degraded { reason } => {
|
||||||
|
(ServiceStatus::Degraded, reason.clone())
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
details.insert("operational_status".to_string(), reason);
|
||||||
|
|
||||||
let status = HealthStatus {
|
let status = HealthStatus {
|
||||||
module_id: "alphavantage-provider-service".to_string(),
|
module_id: "alphavantage-provider-service".to_string(),
|
||||||
status: ServiceStatus::Ok,
|
status: service_status,
|
||||||
version: env!("CARGO_PKG_VERSION").to_string(),
|
version: env!("CARGO_PKG_VERSION").to_string(),
|
||||||
details,
|
details,
|
||||||
};
|
};
|
||||||
|
|||||||
@ -1,12 +1,12 @@
|
|||||||
use secrecy::SecretString;
|
use secrecy::SecretString;
|
||||||
use serde::Deserialize;
|
use serde::Deserialize;
|
||||||
|
|
||||||
#[derive(Deserialize, Debug)]
|
#[derive(Debug, Deserialize, Clone)]
|
||||||
pub struct AppConfig {
|
pub struct AppConfig {
|
||||||
pub server_port: u16,
|
pub server_port: u16,
|
||||||
pub nats_addr: String,
|
pub nats_addr: String,
|
||||||
pub alphavantage_api_key: SecretString,
|
|
||||||
pub data_persistence_service_url: String,
|
pub data_persistence_service_url: String,
|
||||||
|
pub alphavantage_api_key: Option<SecretString>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl AppConfig {
|
impl AppConfig {
|
||||||
@ -15,6 +15,14 @@ impl AppConfig {
|
|||||||
.add_source(config::Environment::default().separator("__"))
|
.add_source(config::Environment::default().separator("__"))
|
||||||
.build()?;
|
.build()?;
|
||||||
|
|
||||||
config.try_deserialize()
|
let cfg: AppConfig = config.try_deserialize()?;
|
||||||
|
|
||||||
|
if cfg.data_persistence_service_url.trim().is_empty() {
|
||||||
|
return Err(config::ConfigError::Message(
|
||||||
|
"DATA_PERSISTENCE_SERVICE_URL must not be empty".to_string(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(cfg)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
56
services/alphavantage-provider-service/src/config_poller.rs
Normal file
56
services/alphavantage-provider-service/src/config_poller.rs
Normal file
@ -0,0 +1,56 @@
|
|||||||
|
use crate::error::Result;
|
||||||
|
use crate::state::AppState;
|
||||||
|
use common_contracts::config_models::{DataSourceConfig, DataSourceProvider};
|
||||||
|
use secrecy::SecretString;
|
||||||
|
use std::collections::HashMap;
|
||||||
|
use std::time::Duration;
|
||||||
|
use tracing::{error, info, instrument};
|
||||||
|
|
||||||
|
const POLLING_INTERVAL_SECONDS: u64 = 60;
|
||||||
|
|
||||||
|
#[instrument(skip(state))]
|
||||||
|
pub async fn run_config_poller(state: AppState) {
|
||||||
|
info!("Starting configuration poller...");
|
||||||
|
let mut interval = tokio::time::interval(Duration::from_secs(POLLING_INTERVAL_SECONDS));
|
||||||
|
interval.tick().await; // Initial tick happens immediately
|
||||||
|
|
||||||
|
loop {
|
||||||
|
if let Err(e) = poll_and_update_config(&state).await {
|
||||||
|
error!("Failed to poll and update config: {:?}", e);
|
||||||
|
}
|
||||||
|
interval.tick().await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn poll_and_update_config(state: &AppState) -> Result<()> {
|
||||||
|
info!("Polling for data source configurations...");
|
||||||
|
let client = reqwest::Client::new();
|
||||||
|
let url = format!(
|
||||||
|
"{}/configs/data_sources",
|
||||||
|
state.config.data_persistence_service_url
|
||||||
|
);
|
||||||
|
|
||||||
|
let response = client.get(&url).send().await?;
|
||||||
|
response.error_for_status_ref()?;
|
||||||
|
|
||||||
|
let configs: HashMap<String, DataSourceConfig> = response.json().await?;
|
||||||
|
|
||||||
|
let alphavantage_config = configs.values().find(|cfg| {
|
||||||
|
matches!(cfg.provider, DataSourceProvider::Alphavantage) && cfg.enabled
|
||||||
|
});
|
||||||
|
|
||||||
|
if let Some(config) = alphavantage_config {
|
||||||
|
if let Some(api_key) = &config.api_key {
|
||||||
|
state.update_provider(Some(SecretString::from(api_key.clone()))).await;
|
||||||
|
info!("Successfully updated Alphavantage provider with new configuration.");
|
||||||
|
} else {
|
||||||
|
state.update_provider(None).await;
|
||||||
|
info!("Alphavantage provider is enabled but API key is missing. Service is degraded.");
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
state.update_provider(None).await;
|
||||||
|
info!("No enabled Alphavantage configuration found. Service is degraded.");
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
@ -1,40 +1,41 @@
|
|||||||
|
use anyhow::anyhow;
|
||||||
|
use reqwest::Error as ReqwestError;
|
||||||
use thiserror::Error;
|
use thiserror::Error;
|
||||||
|
|
||||||
pub type Result<T> = std::result::Result<T, AppError>;
|
|
||||||
|
|
||||||
#[derive(Error, Debug)]
|
#[derive(Error, Debug)]
|
||||||
pub enum AppError {
|
pub enum AppError {
|
||||||
#[error("Configuration error: {0}")]
|
#[error("Configuration error: {0}")]
|
||||||
Configuration(String),
|
Configuration(String),
|
||||||
|
|
||||||
#[error("Message bus error: {0}")]
|
|
||||||
MessageBus(#[from] async_nats::Error),
|
|
||||||
|
|
||||||
#[error("Message bus publish error: {0}")]
|
|
||||||
MessageBusPublish(#[from] async_nats::PublishError),
|
|
||||||
|
|
||||||
#[error("Message bus subscribe error: {0}")]
|
|
||||||
MessageBusSubscribe(String),
|
|
||||||
|
|
||||||
#[error("Message bus connect error: {0}")]
|
|
||||||
MessageBusConnect(String),
|
|
||||||
|
|
||||||
#[error("HTTP request to another service failed: {0}")]
|
|
||||||
ServiceRequest(#[from] reqwest::Error),
|
|
||||||
|
|
||||||
#[error("Data parsing error: {0}")]
|
#[error("Data parsing error: {0}")]
|
||||||
DataParsing(#[from] anyhow::Error),
|
DataParsing(#[from] anyhow::Error),
|
||||||
|
|
||||||
|
#[error("Internal error: {0}")]
|
||||||
|
Internal(String),
|
||||||
|
|
||||||
|
#[error("Provider not available: {0}")]
|
||||||
|
ProviderNotAvailable(String),
|
||||||
|
|
||||||
|
#[error(transparent)]
|
||||||
|
Reqwest(#[from] ReqwestError),
|
||||||
|
|
||||||
|
#[error(transparent)]
|
||||||
|
Nats(#[from] async_nats::Error),
|
||||||
|
|
||||||
|
#[error(transparent)]
|
||||||
|
NatsSubscribe(#[from] async_nats::client::SubscribeError),
|
||||||
|
|
||||||
|
#[error(transparent)]
|
||||||
|
NatsUnsubscribe(#[from] async_nats::UnsubscribeError),
|
||||||
|
|
||||||
|
#[error(transparent)]
|
||||||
|
NatsPublish(#[from] async_nats::error::Error<async_nats::client::PublishErrorKind>),
|
||||||
}
|
}
|
||||||
|
|
||||||
// 手动实现针对 async-nats 泛型错误类型的 From 转换
|
impl From<config::ConfigError> for AppError {
|
||||||
impl From<async_nats::error::Error<async_nats::ConnectErrorKind>> for AppError {
|
fn from(e: config::ConfigError) -> Self {
|
||||||
fn from(err: async_nats::error::Error<async_nats::ConnectErrorKind>) -> Self {
|
AppError::Configuration(e.to_string())
|
||||||
AppError::MessageBusConnect(err.to_string())
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<async_nats::SubscribeError> for AppError {
|
pub type Result<T, E = AppError> = std::result::Result<T, E>;
|
||||||
fn from(err: async_nats::SubscribeError) -> Self {
|
|
||||||
AppError::MessageBusSubscribe(err.to_string())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@ -7,6 +7,7 @@ mod persistence;
|
|||||||
mod state;
|
mod state;
|
||||||
mod worker;
|
mod worker;
|
||||||
mod av_client;
|
mod av_client;
|
||||||
|
mod config_poller;
|
||||||
|
|
||||||
use crate::config::AppConfig;
|
use crate::config::AppConfig;
|
||||||
use crate::error::Result;
|
use crate::error::Result;
|
||||||
@ -29,6 +30,9 @@ async fn main() -> Result<()> {
|
|||||||
// Initialize application state
|
// Initialize application state
|
||||||
let app_state = AppState::new(config)?;
|
let app_state = AppState::new(config)?;
|
||||||
|
|
||||||
|
// --- Start the config poller ---
|
||||||
|
tokio::spawn(config_poller::run_config_poller(app_state.clone()));
|
||||||
|
|
||||||
// Create the Axum router
|
// Create the Axum router
|
||||||
let app = api::create_router(app_state.clone());
|
let app = api::create_router(app_state.clone());
|
||||||
|
|
||||||
|
|||||||
@ -1,26 +1,57 @@
|
|||||||
use crate::error::Result;
|
use crate::error::Result;
|
||||||
use crate::state::AppState;
|
use crate::state::{AppState, ServiceOperationalStatus};
|
||||||
use common_contracts::messages::FetchCompanyDataCommand;
|
use common_contracts::messages::FetchCompanyDataCommand;
|
||||||
use futures_util::StreamExt;
|
use futures_util::StreamExt;
|
||||||
use tracing::{error, info};
|
use std::time::Duration;
|
||||||
|
use tracing::{error, info, warn};
|
||||||
|
|
||||||
const SUBJECT_NAME: &str = "data_fetch_commands";
|
const SUBJECT_NAME: &str = "data_fetch_commands";
|
||||||
|
|
||||||
pub async fn run(state: AppState) -> Result<()> {
|
pub async fn run(state: AppState) -> Result<()> {
|
||||||
info!("Starting NATS message consumer...");
|
info!("Starting NATS message consumer...");
|
||||||
|
|
||||||
let client = async_nats::connect(&state.config.nats_addr).await?;
|
loop {
|
||||||
info!("Connected to NATS.");
|
let status = state.status.read().await.clone();
|
||||||
|
if let ServiceOperationalStatus::Degraded { reason } = status {
|
||||||
|
warn!(
|
||||||
|
"Service is in degraded state (reason: {}). Pausing message consumption for 30s.",
|
||||||
|
reason
|
||||||
|
);
|
||||||
|
tokio::time::sleep(Duration::from_secs(30)).await;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
// This is a simple subscriber. For production, consider JetStream for durability.
|
info!("Service is Active. Connecting to NATS...");
|
||||||
|
match async_nats::connect(&state.config.nats_addr).await {
|
||||||
|
Ok(client) => {
|
||||||
|
info!("Successfully connected to NATS.");
|
||||||
|
if let Err(e) = subscribe_and_process(state.clone(), client).await {
|
||||||
|
error!("NATS subscription error: {}. Reconnecting in 10s...", e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
error!("Failed to connect to NATS: {}. Retrying in 10s...", e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
tokio::time::sleep(Duration::from_secs(10)).await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn subscribe_and_process(state: AppState, client: async_nats::Client) -> Result<()> {
|
||||||
let mut subscriber = client.subscribe(SUBJECT_NAME.to_string()).await?;
|
let mut subscriber = client.subscribe(SUBJECT_NAME.to_string()).await?;
|
||||||
|
|
||||||
info!(
|
info!(
|
||||||
"Consumer started, waiting for messages on subject '{}'",
|
"Consumer started, waiting for messages on subject '{}'",
|
||||||
SUBJECT_NAME
|
SUBJECT_NAME
|
||||||
);
|
);
|
||||||
|
|
||||||
while let Some(message) = subscriber.next().await {
|
while let Some(message) = subscriber.next().await {
|
||||||
|
let current_status = state.status.read().await.clone();
|
||||||
|
if matches!(current_status, ServiceOperationalStatus::Degraded {..}) {
|
||||||
|
warn!("Service became degraded. Disconnecting from NATS and pausing consumption.");
|
||||||
|
subscriber.unsubscribe().await?;
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
info!("Received NATS message.");
|
info!("Received NATS message.");
|
||||||
let state_clone = state.clone();
|
let state_clone = state.clone();
|
||||||
let publisher_clone = client.clone();
|
let publisher_clone = client.clone();
|
||||||
@ -42,6 +73,5 @@ pub async fn run(state: AppState) -> Result<()> {
|
|||||||
}
|
}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1,23 +1,74 @@
|
|||||||
use std::sync::Arc;
|
use crate::av_client::AvClient;
|
||||||
|
use crate::config::AppConfig;
|
||||||
use common_contracts::observability::TaskProgress;
|
use common_contracts::observability::TaskProgress;
|
||||||
use dashmap::DashMap;
|
use dashmap::DashMap;
|
||||||
|
use secrecy::{ExposeSecret, SecretString};
|
||||||
|
use std::sync::Arc;
|
||||||
|
use tokio::sync::RwLock;
|
||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
use crate::config::AppConfig;
|
|
||||||
use crate::error::Result;
|
|
||||||
|
|
||||||
pub type TaskStore = Arc<DashMap<Uuid, TaskProgress>>;
|
#[derive(Clone, Debug)]
|
||||||
|
pub enum ServiceOperationalStatus {
|
||||||
|
Active,
|
||||||
|
Degraded { reason: String },
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct AppState {
|
pub struct AppState {
|
||||||
|
pub tasks: Arc<DashMap<Uuid, TaskProgress>>,
|
||||||
pub config: Arc<AppConfig>,
|
pub config: Arc<AppConfig>,
|
||||||
pub tasks: TaskStore,
|
pub status: Arc<RwLock<ServiceOperationalStatus>>,
|
||||||
|
av_provider: Arc<RwLock<Option<Arc<AvClient>>>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl AppState {
|
impl AppState {
|
||||||
pub fn new(config: AppConfig) -> Result<Self> {
|
pub fn new(config: AppConfig) -> Result<Self, anyhow::Error> {
|
||||||
|
let initial_status = if config.alphavantage_api_key.is_some() {
|
||||||
|
ServiceOperationalStatus::Degraded { reason: "Initializing provider, waiting for config poller to connect.".to_string() }
|
||||||
|
} else {
|
||||||
|
ServiceOperationalStatus::Degraded { reason: "Alphavantage API Key is not configured.".to_string() }
|
||||||
|
};
|
||||||
|
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
config: Arc::new(config),
|
|
||||||
tasks: Arc::new(DashMap::new()),
|
tasks: Arc::new(DashMap::new()),
|
||||||
|
config: Arc::new(config),
|
||||||
|
status: Arc::new(RwLock::new(initial_status)),
|
||||||
|
av_provider: Arc::new(RwLock::new(None)),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn get_provider(&self) -> Option<Arc<AvClient>> {
|
||||||
|
self.av_provider.read().await.clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn update_provider(&self, api_key: Option<SecretString>) {
|
||||||
|
let mut provider_guard = self.av_provider.write().await;
|
||||||
|
let mut status_guard = self.status.write().await;
|
||||||
|
|
||||||
|
if let Some(key) = api_key {
|
||||||
|
let mcp_endpoint = format!(
|
||||||
|
"https://mcp.alphavantage.co/mcp?apikey={}",
|
||||||
|
key.expose_secret()
|
||||||
|
);
|
||||||
|
match AvClient::connect(&mcp_endpoint).await {
|
||||||
|
Ok(new_provider) => {
|
||||||
|
*provider_guard = Some(Arc::new(new_provider));
|
||||||
|
*status_guard = ServiceOperationalStatus::Active;
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
*provider_guard = None;
|
||||||
|
*status_guard = ServiceOperationalStatus::Degraded {
|
||||||
|
reason: format!("Failed to connect to Alphavantage: {}", e),
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
*provider_guard = None;
|
||||||
|
*status_guard = ServiceOperationalStatus::Degraded {
|
||||||
|
reason: "Alphavantage API Key is not configured.".to_string(),
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub type TaskStore = DashMap<Uuid, TaskProgress>;
|
||||||
|
|||||||
@ -1,4 +1,4 @@
|
|||||||
use crate::error::Result;
|
use crate::error::{Result, AppError};
|
||||||
use crate::mapping::{CombinedFinancials, parse_company_profile, parse_financials, parse_realtime_quote};
|
use crate::mapping::{CombinedFinancials, parse_company_profile, parse_financials, parse_realtime_quote};
|
||||||
use crate::persistence::PersistenceClient;
|
use crate::persistence::PersistenceClient;
|
||||||
use crate::state::{AppState, TaskStore};
|
use crate::state::{AppState, TaskStore};
|
||||||
@ -30,9 +30,21 @@ pub async fn handle_fetch_command(
|
|||||||
};
|
};
|
||||||
state.tasks.insert(command.request_id, task);
|
state.tasks.insert(command.request_id, task);
|
||||||
|
|
||||||
let api_key = state.config.alphavantage_api_key.expose_secret();
|
let client = match state.get_provider().await {
|
||||||
let mcp_endpoint = format!("https://mcp.alphavantage.co/mcp?apikey={}", api_key);
|
Some(p) => p,
|
||||||
let client = Arc::new(AvClient::connect(&mcp_endpoint).await?);
|
None => {
|
||||||
|
let reason = "Execution failed: Alphavantage provider is not available (misconfigured).".to_string();
|
||||||
|
error!("{}", reason);
|
||||||
|
update_task_progress(
|
||||||
|
&state.tasks,
|
||||||
|
command.request_id,
|
||||||
|
100,
|
||||||
|
&reason,
|
||||||
|
).await;
|
||||||
|
return Err(AppError::ProviderNotAvailable(reason));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
let persistence_client =
|
let persistence_client =
|
||||||
PersistenceClient::new(state.config.data_persistence_service_url.clone());
|
PersistenceClient::new(state.config.data_persistence_service_url.clone());
|
||||||
let symbol = command.symbol.clone();
|
let symbol = command.symbol.clone();
|
||||||
|
|||||||
1
services/api-gateway/Cargo.lock
generated
1
services/api-gateway/Cargo.lock
generated
@ -2997,6 +2997,7 @@ dependencies = [
|
|||||||
"tower",
|
"tower",
|
||||||
"tower-layer",
|
"tower-layer",
|
||||||
"tower-service",
|
"tower-service",
|
||||||
|
"tracing",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
|
|||||||
@ -7,7 +7,7 @@ edition = "2024"
|
|||||||
# Web Service
|
# Web Service
|
||||||
axum = "0.8.7"
|
axum = "0.8.7"
|
||||||
tokio = { version = "1", features = ["full"] }
|
tokio = { version = "1", features = ["full"] }
|
||||||
tower-http = { version = "0.6.6", features = ["cors"] }
|
tower-http = { version = "0.6.6", features = ["cors", "trace"] }
|
||||||
|
|
||||||
# Shared Contracts
|
# Shared Contracts
|
||||||
common-contracts = { path = "../common-contracts" }
|
common-contracts = { path = "../common-contracts" }
|
||||||
|
|||||||
@ -3,20 +3,12 @@ FROM rust:1.90 as builder
|
|||||||
|
|
||||||
WORKDIR /usr/src/app
|
WORKDIR /usr/src/app
|
||||||
|
|
||||||
# Pre-build dependencies to leverage Docker layer caching
|
# Deterministic dependency caching without shipping a stub binary
|
||||||
COPY ./services/common-contracts /usr/src/app/services/common-contracts
|
COPY ./services/common-contracts /usr/src/app/services/common-contracts
|
||||||
COPY ./services/api-gateway/Cargo.toml ./services/api-gateway/Cargo.lock* ./services/api-gateway/
|
COPY ./services/api-gateway/Cargo.toml ./services/api-gateway/Cargo.lock* ./services/api-gateway/
|
||||||
|
|
||||||
WORKDIR /usr/src/app/services/api-gateway
|
WORKDIR /usr/src/app/services/api-gateway
|
||||||
RUN mkdir -p src && \
|
# Copy the full source code and build the final binary
|
||||||
echo "fn main() {}" > src/main.rs && \
|
|
||||||
cargo build --release --bin api-gateway
|
|
||||||
|
|
||||||
# Copy the full source code
|
|
||||||
COPY ./services/api-gateway /usr/src/app/services/api-gateway
|
COPY ./services/api-gateway /usr/src/app/services/api-gateway
|
||||||
|
|
||||||
# Build the application
|
|
||||||
WORKDIR /usr/src/app/services/api-gateway
|
|
||||||
RUN cargo build --release --bin api-gateway
|
RUN cargo build --release --bin api-gateway
|
||||||
|
|
||||||
# 2. Runtime Stage
|
# 2. Runtime Stage
|
||||||
@ -25,7 +17,10 @@ FROM debian:bookworm-slim
|
|||||||
# Set timezone
|
# Set timezone
|
||||||
ENV TZ=Asia/Shanghai
|
ENV TZ=Asia/Shanghai
|
||||||
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||||
RUN apt-get update && apt-get install -y --no-install-recommends ca-certificates libssl3 && rm -rf /var/lib/apt/lists/*
|
# Install minimal runtime deps:
|
||||||
|
# - ca-certificates/libssl3: TLS support for outbound HTTPS
|
||||||
|
# - curl: required for container healthcheck defined in docker-compose.yml
|
||||||
|
RUN apt-get update && apt-get install -y --no-install-recommends ca-certificates libssl3 curl && rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
# Copy the built binary from the builder stage
|
# Copy the built binary from the builder stage
|
||||||
COPY --from=builder /usr/src/app/services/api-gateway/target/release/api-gateway /usr/local/bin/
|
COPY --from=builder /usr/src/app/services/api-gateway/target/release/api-gateway /usr/local/bin/
|
||||||
|
|||||||
@ -7,7 +7,7 @@ use axum::{
|
|||||||
routing::{get, post},
|
routing::{get, post},
|
||||||
Router,
|
Router,
|
||||||
};
|
};
|
||||||
use common_contracts::messages::FetchCompanyDataCommand;
|
use common_contracts::messages::{FetchCompanyDataCommand, GenerateReportCommand};
|
||||||
use common_contracts::observability::{HealthStatus, ServiceStatus, TaskProgress};
|
use common_contracts::observability::{HealthStatus, ServiceStatus, TaskProgress};
|
||||||
use futures_util::future::join_all;
|
use futures_util::future::join_all;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
@ -16,6 +16,7 @@ use tracing::{info, warn};
|
|||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
|
|
||||||
const DATA_FETCH_QUEUE: &str = "data_fetch_commands";
|
const DATA_FETCH_QUEUE: &str = "data_fetch_commands";
|
||||||
|
const ANALYSIS_COMMANDS_QUEUE: &str = "analysis.commands.generate_report";
|
||||||
|
|
||||||
// --- Request/Response Structs ---
|
// --- Request/Response Structs ---
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
@ -29,6 +30,11 @@ pub struct RequestAcceptedResponse {
|
|||||||
pub request_id: Uuid,
|
pub request_id: Uuid,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
pub struct AnalysisRequest {
|
||||||
|
pub template_id: String,
|
||||||
|
}
|
||||||
|
|
||||||
// --- Router Definition ---
|
// --- Router Definition ---
|
||||||
pub fn create_router(app_state: AppState) -> Router {
|
pub fn create_router(app_state: AppState) -> Router {
|
||||||
Router::new()
|
Router::new()
|
||||||
@ -41,13 +47,28 @@ pub fn create_router(app_state: AppState) -> Router {
|
|||||||
fn create_v1_router() -> Router<AppState> {
|
fn create_v1_router() -> Router<AppState> {
|
||||||
Router::new()
|
Router::new()
|
||||||
.route("/data-requests", post(trigger_data_fetch))
|
.route("/data-requests", post(trigger_data_fetch))
|
||||||
|
.route(
|
||||||
|
"/analysis-requests/{symbol}",
|
||||||
|
post(trigger_analysis_generation),
|
||||||
|
)
|
||||||
.route("/companies/{symbol}/profile", get(get_company_profile))
|
.route("/companies/{symbol}/profile", get(get_company_profile))
|
||||||
.route("/tasks/{request_id}", get(get_task_progress))
|
.route("/tasks/{request_id}", get(get_task_progress))
|
||||||
// --- New Config Routes ---
|
// --- New Config Routes ---
|
||||||
.route("/configs/llm_providers", get(get_llm_providers_config).put(update_llm_providers_config))
|
.route(
|
||||||
.route("/configs/analysis_modules", get(get_analysis_modules_config).put(update_analysis_modules_config))
|
"/configs/llm_providers",
|
||||||
// --- New Discover Route ---
|
get(get_llm_providers_config).put(update_llm_providers_config),
|
||||||
|
)
|
||||||
|
.route(
|
||||||
|
"/configs/analysis_template_sets",
|
||||||
|
get(get_analysis_template_sets).put(update_analysis_template_sets),
|
||||||
|
)
|
||||||
|
.route(
|
||||||
|
"/configs/data_sources",
|
||||||
|
get(get_data_sources_config).put(update_data_sources_config),
|
||||||
|
)
|
||||||
|
// --- New Discover Routes ---
|
||||||
.route("/discover-models/{provider_id}", get(discover_models))
|
.route("/discover-models/{provider_id}", get(discover_models))
|
||||||
|
.route("/discover-models", post(discover_models_preview))
|
||||||
}
|
}
|
||||||
|
|
||||||
// --- Health & Stateless Tasks ---
|
// --- Health & Stateless Tasks ---
|
||||||
@ -100,6 +121,36 @@ async fn trigger_data_fetch(
|
|||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// [POST /v1/analysis-requests/:symbol]
|
||||||
|
/// Triggers the analysis report generation workflow by publishing a command.
|
||||||
|
async fn trigger_analysis_generation(
|
||||||
|
State(state): State<AppState>,
|
||||||
|
Path(symbol): Path<String>,
|
||||||
|
Json(payload): Json<AnalysisRequest>,
|
||||||
|
) -> Result<impl IntoResponse> {
|
||||||
|
let request_id = Uuid::new_v4();
|
||||||
|
let command = GenerateReportCommand {
|
||||||
|
request_id,
|
||||||
|
symbol,
|
||||||
|
template_id: payload.template_id,
|
||||||
|
};
|
||||||
|
|
||||||
|
info!(request_id = %request_id, "Publishing analysis generation command");
|
||||||
|
|
||||||
|
state
|
||||||
|
.nats_client
|
||||||
|
.publish(
|
||||||
|
ANALYSIS_COMMANDS_QUEUE.to_string(),
|
||||||
|
serde_json::to_vec(&command).unwrap().into(),
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
Ok((
|
||||||
|
StatusCode::ACCEPTED,
|
||||||
|
Json(RequestAcceptedResponse { request_id }),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
/// [GET /v1/companies/:symbol/profile]
|
/// [GET /v1/companies/:symbol/profile]
|
||||||
/// Queries the persisted company profile from the data-persistence-service.
|
/// Queries the persisted company profile from the data-persistence-service.
|
||||||
async fn get_company_profile(
|
async fn get_company_profile(
|
||||||
@ -159,7 +210,9 @@ async fn get_task_progress(
|
|||||||
|
|
||||||
// --- Config API Handlers (Proxy to data-persistence-service) ---
|
// --- Config API Handlers (Proxy to data-persistence-service) ---
|
||||||
|
|
||||||
use common_contracts::config_models::{LlmProvidersConfig, AnalysisModulesConfig};
|
use common_contracts::config_models::{
|
||||||
|
AnalysisTemplateSets, DataSourcesConfig, LlmProvidersConfig,
|
||||||
|
};
|
||||||
|
|
||||||
/// [GET /v1/configs/llm_providers]
|
/// [GET /v1/configs/llm_providers]
|
||||||
async fn get_llm_providers_config(
|
async fn get_llm_providers_config(
|
||||||
@ -178,20 +231,43 @@ async fn update_llm_providers_config(
|
|||||||
Ok(Json(updated_config))
|
Ok(Json(updated_config))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// [GET /v1/configs/analysis_modules]
|
/// [GET /v1/configs/analysis_template_sets]
|
||||||
async fn get_analysis_modules_config(
|
async fn get_analysis_template_sets(
|
||||||
State(state): State<AppState>,
|
State(state): State<AppState>,
|
||||||
) -> Result<impl IntoResponse> {
|
) -> Result<impl IntoResponse> {
|
||||||
let config = state.persistence_client.get_analysis_modules_config().await?;
|
let config = state
|
||||||
|
.persistence_client
|
||||||
|
.get_analysis_template_sets()
|
||||||
|
.await?;
|
||||||
Ok(Json(config))
|
Ok(Json(config))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// [PUT /v1/configs/analysis_modules]
|
/// [PUT /v1/configs/analysis_template_sets]
|
||||||
async fn update_analysis_modules_config(
|
async fn update_analysis_template_sets(
|
||||||
State(state): State<AppState>,
|
State(state): State<AppState>,
|
||||||
Json(payload): Json<AnalysisModulesConfig>,
|
Json(payload): Json<AnalysisTemplateSets>,
|
||||||
) -> Result<impl IntoResponse> {
|
) -> Result<impl IntoResponse> {
|
||||||
let updated_config = state.persistence_client.update_analysis_modules_config(&payload).await?;
|
let updated_config = state
|
||||||
|
.persistence_client
|
||||||
|
.update_analysis_template_sets(&payload)
|
||||||
|
.await?;
|
||||||
|
Ok(Json(updated_config))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// [GET /v1/configs/data_sources]
|
||||||
|
async fn get_data_sources_config(
|
||||||
|
State(state): State<AppState>,
|
||||||
|
) -> Result<impl IntoResponse> {
|
||||||
|
let config = state.persistence_client.get_data_sources_config().await?;
|
||||||
|
Ok(Json(config))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// [PUT /v1/configs/data_sources]
|
||||||
|
async fn update_data_sources_config(
|
||||||
|
State(state): State<AppState>,
|
||||||
|
Json(payload): Json<DataSourcesConfig>,
|
||||||
|
) -> Result<impl IntoResponse> {
|
||||||
|
let updated_config = state.persistence_client.update_data_sources_config(&payload).await?;
|
||||||
Ok(Json(updated_config))
|
Ok(Json(updated_config))
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -200,11 +276,13 @@ async fn discover_models(
|
|||||||
State(state): State<AppState>,
|
State(state): State<AppState>,
|
||||||
Path(provider_id): Path<String>,
|
Path(provider_id): Path<String>,
|
||||||
) -> Result<impl IntoResponse> {
|
) -> Result<impl IntoResponse> {
|
||||||
|
info!("discover_models: provider_id={}", provider_id);
|
||||||
let providers = state.persistence_client.get_llm_providers_config().await?;
|
let providers = state.persistence_client.get_llm_providers_config().await?;
|
||||||
|
|
||||||
if let Some(provider) = providers.get(&provider_id) {
|
if let Some(provider) = providers.get(&provider_id) {
|
||||||
let client = reqwest::Client::new();
|
let client = reqwest::Client::new();
|
||||||
let url = format!("{}/models", provider.api_base_url.trim_end_matches('/'));
|
let url = format!("{}/models", provider.api_base_url.trim_end_matches('/'));
|
||||||
|
info!("discover_models: target_url={} (provider_id={})", url, provider_id);
|
||||||
|
|
||||||
let response = client
|
let response = client
|
||||||
.get(&url)
|
.get(&url)
|
||||||
@ -215,10 +293,7 @@ async fn discover_models(
|
|||||||
if !response.status().is_success() {
|
if !response.status().is_success() {
|
||||||
let status = response.status();
|
let status = response.status();
|
||||||
let error_text = response.text().await?;
|
let error_text = response.text().await?;
|
||||||
warn!(
|
warn!("discover_models failed: provider_id={} status={} body={}", provider_id, status, error_text);
|
||||||
"Failed to discover models for provider '{}'. Status: {}, Body: {}",
|
|
||||||
provider_id, status, error_text
|
|
||||||
);
|
|
||||||
// Return a structured error to the frontend
|
// Return a structured error to the frontend
|
||||||
return Ok((
|
return Ok((
|
||||||
StatusCode::BAD_GATEWAY,
|
StatusCode::BAD_GATEWAY,
|
||||||
@ -232,9 +307,52 @@ async fn discover_models(
|
|||||||
let models_json: serde_json::Value = response.json().await?;
|
let models_json: serde_json::Value = response.json().await?;
|
||||||
Ok((StatusCode::OK, Json(models_json)).into_response())
|
Ok((StatusCode::OK, Json(models_json)).into_response())
|
||||||
} else {
|
} else {
|
||||||
|
warn!("discover_models: provider not found: {}", provider_id);
|
||||||
Ok((
|
Ok((
|
||||||
StatusCode::NOT_FOUND,
|
StatusCode::NOT_FOUND,
|
||||||
Json(serde_json::json!({ "error": "Provider not found" })),
|
Json(serde_json::json!({ "error": "Provider not found" })),
|
||||||
).into_response())
|
).into_response())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
struct DiscoverPreviewRequest {
|
||||||
|
api_base_url: String,
|
||||||
|
api_key: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// [POST /v1/discover-models]
|
||||||
|
/// Preview discovery without persisting provider configuration.
|
||||||
|
async fn discover_models_preview(
|
||||||
|
Json(payload): Json<DiscoverPreviewRequest>,
|
||||||
|
) -> Result<impl IntoResponse> {
|
||||||
|
let redacted_key = if payload.api_key.is_empty() { "<empty>" } else { "<redacted>" };
|
||||||
|
info!(
|
||||||
|
"discover_models_preview: target_url={}/models api_key={}",
|
||||||
|
payload.api_base_url.trim_end_matches('/'),
|
||||||
|
redacted_key
|
||||||
|
);
|
||||||
|
let client = reqwest::Client::new();
|
||||||
|
let url = format!("{}/models", payload.api_base_url.trim_end_matches('/'));
|
||||||
|
let response = client
|
||||||
|
.get(&url)
|
||||||
|
.bearer_auth(&payload.api_key)
|
||||||
|
.send()
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
if !response.status().is_success() {
|
||||||
|
let status = response.status();
|
||||||
|
let error_text = response.text().await?;
|
||||||
|
warn!("discover_models_preview failed: status={} body={}", status, error_text);
|
||||||
|
return Ok((
|
||||||
|
StatusCode::BAD_GATEWAY,
|
||||||
|
Json(serde_json::json!({
|
||||||
|
"error": "Failed to fetch models from provider",
|
||||||
|
"provider_error": error_text,
|
||||||
|
})),
|
||||||
|
).into_response());
|
||||||
|
}
|
||||||
|
|
||||||
|
let models_json: serde_json::Value = response.json().await?;
|
||||||
|
Ok((StatusCode::OK, Json(models_json)).into_response())
|
||||||
|
}
|
||||||
|
|||||||
@ -7,11 +7,36 @@ mod persistence;
|
|||||||
use crate::config::AppConfig;
|
use crate::config::AppConfig;
|
||||||
use crate::error::Result;
|
use crate::error::Result;
|
||||||
use crate::state::AppState;
|
use crate::state::AppState;
|
||||||
use tracing::info;
|
use tracing::{error, info};
|
||||||
|
use tracing_subscriber::EnvFilter;
|
||||||
use std::process;
|
use std::process;
|
||||||
|
use std::io::{self, Write};
|
||||||
|
use tower_http::trace::TraceLayer;
|
||||||
|
|
||||||
#[tokio::main]
|
#[tokio::main]
|
||||||
async fn main() {
|
async fn main() {
|
||||||
|
// Ensure panics are clearly printed with backtraces (independent of env var)
|
||||||
|
std::panic::set_hook(Box::new(|panic_info| {
|
||||||
|
eprintln!("panic in api-gateway: {}", panic_info);
|
||||||
|
eprintln!("backtrace:\n{:?}", std::backtrace::Backtrace::force_capture());
|
||||||
|
}));
|
||||||
|
|
||||||
|
// Emit earliest visibility logs to stderr for containers that restart quickly
|
||||||
|
let ts = std::time::SystemTime::now()
|
||||||
|
.duration_since(std::time::UNIX_EPOCH)
|
||||||
|
.map(|d| d.as_secs())
|
||||||
|
.unwrap_or(0);
|
||||||
|
eprintln!("api-gateway launching: pid={}, ts_unix={}", process::id(), ts);
|
||||||
|
// Print critical environment variables relevant to configuration (no secrets)
|
||||||
|
eprintln!(
|
||||||
|
"env: SERVER_PORT={:?}, NATS_ADDR={:?}, DATA_PERSISTENCE_SERVICE_URL={:?}, PROVIDER_SERVICES.len={}",
|
||||||
|
std::env::var("SERVER_PORT").ok(),
|
||||||
|
std::env::var("NATS_ADDR").ok(),
|
||||||
|
std::env::var("DATA_PERSISTENCE_SERVICE_URL").ok(),
|
||||||
|
std::env::var("PROVIDER_SERVICES").ok().map(|s| s.len()).unwrap_or(0),
|
||||||
|
);
|
||||||
|
let _ = io::stderr().flush();
|
||||||
|
|
||||||
if let Err(e) = run().await {
|
if let Err(e) = run().await {
|
||||||
eprintln!("api-gateway failed to start: {}", e);
|
eprintln!("api-gateway failed to start: {}", e);
|
||||||
process::exit(1);
|
process::exit(1);
|
||||||
@ -19,9 +44,16 @@ async fn main() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn run() -> Result<()> {
|
async fn run() -> Result<()> {
|
||||||
// Initialize logging
|
// Initialize deterministic logging, default to info if not provided
|
||||||
|
let env_filter = EnvFilter::try_from_default_env()
|
||||||
|
.unwrap_or_else(|_| EnvFilter::new("info,axum=info,hyper=info"));
|
||||||
tracing_subscriber::fmt()
|
tracing_subscriber::fmt()
|
||||||
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
|
.with_env_filter(env_filter)
|
||||||
|
.with_target(true)
|
||||||
|
.with_thread_ids(true)
|
||||||
|
.with_thread_names(true)
|
||||||
|
.with_ansi(false)
|
||||||
|
.compact()
|
||||||
.init();
|
.init();
|
||||||
|
|
||||||
info!("Starting api-gateway service...");
|
info!("Starting api-gateway service...");
|
||||||
@ -29,20 +61,36 @@ async fn run() -> Result<()> {
|
|||||||
// Load configuration
|
// Load configuration
|
||||||
let config = AppConfig::load().map_err(|e| error::AppError::Configuration(e.to_string()))?;
|
let config = AppConfig::load().map_err(|e| error::AppError::Configuration(e.to_string()))?;
|
||||||
let port = config.server_port;
|
let port = config.server_port;
|
||||||
|
info!(
|
||||||
|
server_port = port,
|
||||||
|
nats_addr = %config.nats_addr,
|
||||||
|
persistence_url = %config.data_persistence_service_url,
|
||||||
|
"Loaded configuration"
|
||||||
|
);
|
||||||
info!("Configured provider services: {:?}", config.provider_services);
|
info!("Configured provider services: {:?}", config.provider_services);
|
||||||
|
|
||||||
// Initialize application state
|
// Initialize application state
|
||||||
let app_state = AppState::new(config).await?;
|
let app_state = AppState::new(config).await?;
|
||||||
|
|
||||||
// Create the Axum router
|
// Create the Axum router
|
||||||
let app = api::create_router(app_state);
|
let app = api::create_router(app_state)
|
||||||
|
// Request-level tracing for better observability in Tilt/Compose logs
|
||||||
|
.layer(TraceLayer::new_for_http());
|
||||||
|
|
||||||
// Start the HTTP server
|
// Start the HTTP server
|
||||||
let listener = tokio::net::TcpListener::bind(format!("0.0.0.0:{}", port))
|
let addr = format!("0.0.0.0:{}", port);
|
||||||
.await
|
let listener = match tokio::net::TcpListener::bind(&addr).await {
|
||||||
.unwrap();
|
Ok(l) => l,
|
||||||
|
Err(e) => {
|
||||||
|
error!(%addr, err = %e, "Failed to bind TCP listener");
|
||||||
|
return Err(error::AppError::Anyhow(anyhow::anyhow!(e)));
|
||||||
|
}
|
||||||
|
};
|
||||||
info!("HTTP server listening on port {}", port);
|
info!("HTTP server listening on port {}", port);
|
||||||
axum::serve(listener, app).await.unwrap();
|
if let Err(e) = axum::serve(listener, app).await {
|
||||||
|
error!(err = %e, "HTTP server terminated with error");
|
||||||
|
return Err(error::AppError::Anyhow(anyhow::anyhow!(e)));
|
||||||
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|||||||
@ -4,7 +4,7 @@
|
|||||||
|
|
||||||
use crate::error::Result;
|
use crate::error::Result;
|
||||||
use common_contracts::dtos::CompanyProfileDto;
|
use common_contracts::dtos::CompanyProfileDto;
|
||||||
use common_contracts::config_models::{LlmProvidersConfig, AnalysisModulesConfig};
|
use common_contracts::config_models::{LlmProvidersConfig, DataSourcesConfig, AnalysisTemplateSets};
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct PersistenceClient {
|
pub struct PersistenceClient {
|
||||||
@ -48,7 +48,10 @@ impl PersistenceClient {
|
|||||||
Ok(config)
|
Ok(config)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn update_llm_providers_config(&self, payload: &LlmProvidersConfig) -> Result<LlmProvidersConfig> {
|
pub async fn update_llm_providers_config(
|
||||||
|
&self,
|
||||||
|
payload: &LlmProvidersConfig,
|
||||||
|
) -> Result<LlmProvidersConfig> {
|
||||||
let url = format!("{}/configs/llm_providers", self.base_url);
|
let url = format!("{}/configs/llm_providers", self.base_url);
|
||||||
let updated_config = self
|
let updated_config = self
|
||||||
.client
|
.client
|
||||||
@ -62,21 +65,24 @@ impl PersistenceClient {
|
|||||||
Ok(updated_config)
|
Ok(updated_config)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn get_analysis_modules_config(&self) -> Result<AnalysisModulesConfig> {
|
pub async fn get_analysis_template_sets(&self) -> Result<AnalysisTemplateSets> {
|
||||||
let url = format!("{}/configs/analysis_modules", self.base_url);
|
let url = format!("{}/configs/analysis_template_sets", self.base_url);
|
||||||
let config = self
|
let config = self
|
||||||
.client
|
.client
|
||||||
.get(&url)
|
.get(&url)
|
||||||
.send()
|
.send()
|
||||||
.await?
|
.await?
|
||||||
.error_for_status()?
|
.error_for_status()?
|
||||||
.json::<AnalysisModulesConfig>()
|
.json::<AnalysisTemplateSets>()
|
||||||
.await?;
|
.await?;
|
||||||
Ok(config)
|
Ok(config)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn update_analysis_modules_config(&self, payload: &AnalysisModulesConfig) -> Result<AnalysisModulesConfig> {
|
pub async fn update_analysis_template_sets(
|
||||||
let url = format!("{}/configs/analysis_modules", self.base_url);
|
&self,
|
||||||
|
payload: &AnalysisTemplateSets,
|
||||||
|
) -> Result<AnalysisTemplateSets> {
|
||||||
|
let url = format!("{}/configs/analysis_template_sets", self.base_url);
|
||||||
let updated_config = self
|
let updated_config = self
|
||||||
.client
|
.client
|
||||||
.put(&url)
|
.put(&url)
|
||||||
@ -84,7 +90,34 @@ impl PersistenceClient {
|
|||||||
.send()
|
.send()
|
||||||
.await?
|
.await?
|
||||||
.error_for_status()?
|
.error_for_status()?
|
||||||
.json::<AnalysisModulesConfig>()
|
.json::<AnalysisTemplateSets>()
|
||||||
|
.await?;
|
||||||
|
Ok(updated_config)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn get_data_sources_config(&self) -> Result<DataSourcesConfig> {
|
||||||
|
let url = format!("{}/configs/data_sources", self.base_url);
|
||||||
|
let config = self
|
||||||
|
.client
|
||||||
|
.get(&url)
|
||||||
|
.send()
|
||||||
|
.await?
|
||||||
|
.error_for_status()?
|
||||||
|
.json::<DataSourcesConfig>()
|
||||||
|
.await?;
|
||||||
|
Ok(config)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn update_data_sources_config(&self, payload: &DataSourcesConfig) -> Result<DataSourcesConfig> {
|
||||||
|
let url = format!("{}/configs/data_sources", self.base_url);
|
||||||
|
let updated_config = self
|
||||||
|
.client
|
||||||
|
.put(&url)
|
||||||
|
.json(payload)
|
||||||
|
.send()
|
||||||
|
.await?
|
||||||
|
.error_for_status()?
|
||||||
|
.json::<DataSourcesConfig>()
|
||||||
.await?;
|
.await?;
|
||||||
Ok(updated_config)
|
Ok(updated_config)
|
||||||
}
|
}
|
||||||
|
|||||||
@ -22,15 +22,73 @@ pub struct LlmProvider {
|
|||||||
// 整个LLM Provider注册中心的数据结构
|
// 整个LLM Provider注册中心的数据结构
|
||||||
pub type LlmProvidersConfig = HashMap<String, LlmProvider>; // Key: provider_id, e.g., "openai_official"
|
pub type LlmProvidersConfig = HashMap<String, LlmProvider>; // Key: provider_id, e.g., "openai_official"
|
||||||
|
|
||||||
// 单个分析模块的配置
|
// --- Analysis Module Config (NEW TEMPLATE-BASED STRUCTURE) ---
|
||||||
#[derive(Serialize, Deserialize, Debug, Clone, ToSchema)]
|
|
||||||
|
/// Top-level configuration object for all analysis templates.
|
||||||
|
/// Key: Template ID (e.g., "standard_fundamentals")
|
||||||
|
pub type AnalysisTemplateSets = HashMap<String, AnalysisTemplateSet>;
|
||||||
|
|
||||||
|
/// A single, self-contained set of analysis modules representing a complete workflow.
|
||||||
|
/// e.g., "Standard Fundamental Analysis"
|
||||||
|
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, ToSchema)]
|
||||||
|
pub struct AnalysisTemplateSet {
|
||||||
|
/// Human-readable name for the template set.
|
||||||
|
pub name: String,
|
||||||
|
/// All analysis modules contained within this template set.
|
||||||
|
/// Key: Module ID (e.g., "fundamental_analysis")
|
||||||
|
pub modules: HashMap<String, AnalysisModuleConfig>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Configuration for a single analysis module.
|
||||||
|
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, ToSchema)]
|
||||||
pub struct AnalysisModuleConfig {
|
pub struct AnalysisModuleConfig {
|
||||||
pub name: String, // "看涨分析"
|
pub name: String,
|
||||||
pub provider_id: String, // 引用 LlmProvidersConfig 的 Key
|
pub provider_id: String,
|
||||||
pub model_id: String, // 引用 LlmModel 中的 model_id
|
pub model_id: String,
|
||||||
|
pub prompt_template: String,
|
||||||
|
/// List of dependencies. Each string must be a key in the parent `modules` HashMap.
|
||||||
|
pub dependencies: Vec<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
// --- Analysis Module Config (OLD DEPRECATED STRUCTURE) ---
|
||||||
|
|
||||||
|
// This is the old, flat structure for analysis modules.
|
||||||
|
// It is DEPRECATED and will be removed once all services are migrated
|
||||||
|
// to the new AnalysisTemplateSets model.
|
||||||
|
pub type AnalysisModulesConfig = HashMap<String, OldAnalysisModuleConfig>;
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
|
||||||
|
pub struct OldAnalysisModuleConfig {
|
||||||
|
pub name: String,
|
||||||
|
pub provider_id: String,
|
||||||
|
pub model_id: String,
|
||||||
pub prompt_template: String,
|
pub prompt_template: String,
|
||||||
pub dependencies: Vec<String>,
|
pub dependencies: Vec<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
// 整个分析模块配置集合的数据结构
|
|
||||||
pub type AnalysisModulesConfig = HashMap<String, AnalysisModuleConfig>; // Key: module_id, e.g., "bull_case"
|
#[derive(Serialize, Deserialize, Debug, Clone, Default)]
|
||||||
|
pub struct SystemConfig {
|
||||||
|
pub llm_providers: LlmProvidersConfig,
|
||||||
|
pub analysis_modules: AnalysisModulesConfig,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, ToSchema)]
|
||||||
|
#[serde(rename_all = "snake_case")]
|
||||||
|
pub enum DataSourceProvider {
|
||||||
|
Tushare,
|
||||||
|
Finnhub,
|
||||||
|
Alphavantage,
|
||||||
|
Yfinance,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, ToSchema)]
|
||||||
|
pub struct DataSourceConfig {
|
||||||
|
pub provider: DataSourceProvider,
|
||||||
|
pub api_key: Option<String>,
|
||||||
|
pub api_url: Option<String>,
|
||||||
|
pub enabled: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
// 数据源配置集合(集中、强类型、单一来源)
|
||||||
|
pub type DataSourcesConfig = HashMap<String, DataSourceConfig>;
|
||||||
|
|||||||
@ -48,27 +48,31 @@ pub struct DailyMarketDataBatchDto {
|
|||||||
pub records: Vec<DailyMarketDataDto>,
|
pub records: Vec<DailyMarketDataDto>,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Analysis Results API DTOs
|
// Analysis Results API DTOs (NEW)
|
||||||
#[api_dto]
|
#[api_dto]
|
||||||
pub struct NewAnalysisResultDto {
|
pub struct NewAnalysisResult {
|
||||||
|
pub request_id: Uuid,
|
||||||
pub symbol: String,
|
pub symbol: String,
|
||||||
|
pub template_id: String,
|
||||||
pub module_id: String,
|
pub module_id: String,
|
||||||
pub model_name: Option<String>,
|
|
||||||
pub content: String,
|
pub content: String,
|
||||||
pub meta_data: Option<JsonValue>,
|
pub meta_data: JsonValue,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Represents a persisted analysis result read from the database.
|
||||||
#[api_dto]
|
#[api_dto]
|
||||||
pub struct AnalysisResultDto {
|
pub struct AnalysisResultDto {
|
||||||
pub id: Uuid,
|
pub id: i64,
|
||||||
|
pub request_id: Uuid,
|
||||||
pub symbol: String,
|
pub symbol: String,
|
||||||
|
pub template_id: String,
|
||||||
pub module_id: String,
|
pub module_id: String,
|
||||||
pub generated_at: chrono::DateTime<chrono::Utc>,
|
|
||||||
pub model_name: Option<String>,
|
|
||||||
pub content: String,
|
pub content: String,
|
||||||
pub meta_data: Option<JsonValue>,
|
pub meta_data: JsonValue,
|
||||||
|
pub created_at: chrono::DateTime<chrono::Utc>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// Realtime Quotes DTOs
|
// Realtime Quotes DTOs
|
||||||
#[api_dto]
|
#[api_dto]
|
||||||
pub struct RealtimeQuoteDto {
|
pub struct RealtimeQuoteDto {
|
||||||
|
|||||||
@ -2,13 +2,27 @@ use serde::{Serialize, Deserialize};
|
|||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
|
|
||||||
// --- Commands ---
|
// --- Commands ---
|
||||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
///
|
||||||
|
/// Published by: `api-gateway`
|
||||||
|
/// Consumed by: `*-provider-services`
|
||||||
|
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||||
pub struct FetchCompanyDataCommand {
|
pub struct FetchCompanyDataCommand {
|
||||||
pub request_id: Uuid,
|
pub request_id: Uuid,
|
||||||
pub symbol: String,
|
pub symbol: String,
|
||||||
pub market: String,
|
pub market: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Command to start a full report generation workflow.
|
||||||
|
///
|
||||||
|
/// Published by: `api-gateway`
|
||||||
|
/// Consumed by: `report-generator-service`
|
||||||
|
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||||
|
pub struct GenerateReportCommand {
|
||||||
|
pub request_id: Uuid,
|
||||||
|
pub symbol: String,
|
||||||
|
pub template_id: String,
|
||||||
|
}
|
||||||
|
|
||||||
// --- Events ---
|
// --- Events ---
|
||||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||||
pub struct CompanyProfilePersistedEvent {
|
pub struct CompanyProfilePersistedEvent {
|
||||||
|
|||||||
@ -74,7 +74,7 @@ wasm-cli = []
|
|||||||
mcp = ["service_kit/mcp"]
|
mcp = ["service_kit/mcp"]
|
||||||
# 可选:透传 api-cli 给 service_kit
|
# 可选:透传 api-cli 给 service_kit
|
||||||
# api-cli = ["service_kit/api-cli"]
|
# api-cli = ["service_kit/api-cli"]
|
||||||
full-data = []
|
# full-data = []
|
||||||
|
|
||||||
# --- For Local Development ---
|
# --- For Local Development ---
|
||||||
# If you are developing `service_kit` locally, uncomment the following lines
|
# If you are developing `service_kit` locally, uncomment the following lines
|
||||||
|
|||||||
@ -19,6 +19,8 @@ RUN cargo chef cook --release --recipe-path /app/services/data-persistence-servi
|
|||||||
# 复制服务源码用于实际构建
|
# 复制服务源码用于实际构建
|
||||||
COPY services/common-contracts /app/services/common-contracts
|
COPY services/common-contracts /app/services/common-contracts
|
||||||
COPY services/data-persistence-service /app/services/data-persistence-service
|
COPY services/data-persistence-service /app/services/data-persistence-service
|
||||||
|
## 为了在编译期通过 include_str! 嵌入根目录配置,将 /config 拷贝到 /app/config
|
||||||
|
COPY config /app/config
|
||||||
RUN cargo build --release --bin data-persistence-service-server
|
RUN cargo build --release --bin data-persistence-service-server
|
||||||
|
|
||||||
FROM debian:bookworm-slim AS runtime
|
FROM debian:bookworm-slim AS runtime
|
||||||
@ -30,5 +32,7 @@ COPY --from=builder /app/services/data-persistence-service/target/release/data-p
|
|||||||
COPY services/data-persistence-service/migrations ./migrations
|
COPY services/data-persistence-service/migrations ./migrations
|
||||||
ENV HOST=0.0.0.0
|
ENV HOST=0.0.0.0
|
||||||
ENV PORT=3000
|
ENV PORT=3000
|
||||||
|
## 当迁移版本发生偏差时,允许继续启动(仅容器默认;本地可覆盖)
|
||||||
|
ENV SKIP_MIGRATIONS_ON_MISMATCH=1
|
||||||
EXPOSE 3000
|
EXPOSE 3000
|
||||||
ENTRYPOINT ["/usr/local/bin/data-persistence-service-server"]
|
ENTRYPOINT ["/usr/local/bin/data-persistence-service-server"]
|
||||||
|
|||||||
@ -1,94 +1,121 @@
|
|||||||
use crate::{
|
use crate::models::AnalysisResult;
|
||||||
db,
|
use crate::{AppState, ServerError};
|
||||||
dtos::{AnalysisResultDto, NewAnalysisResultDto},
|
|
||||||
AppState, ServerError,
|
|
||||||
};
|
|
||||||
use axum::{
|
use axum::{
|
||||||
extract::{Path, Query, State},
|
extract::{Path, Query, State},
|
||||||
|
http::StatusCode,
|
||||||
|
response::IntoResponse,
|
||||||
Json,
|
Json,
|
||||||
};
|
};
|
||||||
|
use common_contracts::dtos::{AnalysisResultDto, NewAnalysisResult};
|
||||||
use serde::Deserialize;
|
use serde::Deserialize;
|
||||||
use service_kit::api;
|
use tracing::instrument;
|
||||||
use uuid::Uuid;
|
use anyhow::Error as AnyhowError;
|
||||||
use tracing::info;
|
|
||||||
|
|
||||||
#[derive(Deserialize, utoipa::ToSchema)]
|
#[derive(Debug, Deserialize)]
|
||||||
pub struct AnalysisQuery {
|
pub struct AnalysisQuery {
|
||||||
pub symbol: String,
|
pub symbol: String,
|
||||||
pub module_id: Option<String>,
|
pub module_id: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[api(POST, "/api/v1/analysis-results", output(detail = "AnalysisResultDto"))]
|
/// Creates a new analysis result and returns the created record.
|
||||||
|
#[instrument(skip(state, payload), fields(request_id = %payload.request_id, symbol = %payload.symbol, module_id = %payload.module_id))]
|
||||||
pub async fn create_analysis_result(
|
pub async fn create_analysis_result(
|
||||||
State(state): State<AppState>,
|
State(state): State<AppState>,
|
||||||
Json(payload): Json<NewAnalysisResultDto>,
|
Json(payload): Json<NewAnalysisResult>,
|
||||||
) -> Result<Json<AnalysisResultDto>, ServerError> {
|
) -> Result<impl IntoResponse, ServerError> {
|
||||||
info!(target: "api", symbol = %payload.symbol, module_id = %payload.module_id, "POST /analysis-results → create_analysis_result called");
|
let result = sqlx::query_as::<_, AnalysisResult>(
|
||||||
let new_result = db::create_analysis_result(&state.pool, &payload).await?;
|
r#"
|
||||||
|
INSERT INTO analysis_results (request_id, symbol, template_id, module_id, content, meta_data)
|
||||||
|
VALUES ($1, $2, $3, $4, $5, $6)
|
||||||
|
RETURNING *
|
||||||
|
"#
|
||||||
|
)
|
||||||
|
.bind(&payload.request_id)
|
||||||
|
.bind(&payload.symbol)
|
||||||
|
.bind(&payload.template_id)
|
||||||
|
.bind(&payload.module_id)
|
||||||
|
.bind(&payload.content)
|
||||||
|
.bind(&payload.meta_data)
|
||||||
|
.fetch_one(state.pool())
|
||||||
|
.await
|
||||||
|
.map_err(AnyhowError::from)?;
|
||||||
|
|
||||||
// Convert model to DTO
|
|
||||||
let dto = AnalysisResultDto {
|
let dto = AnalysisResultDto {
|
||||||
id: new_result.id,
|
id: result.id,
|
||||||
symbol: new_result.symbol,
|
request_id: result.request_id,
|
||||||
module_id: new_result.module_id,
|
symbol: result.symbol,
|
||||||
generated_at: new_result.generated_at,
|
template_id: result.template_id,
|
||||||
model_name: new_result.model_name,
|
module_id: result.module_id,
|
||||||
content: new_result.content,
|
content: result.content,
|
||||||
meta_data: new_result.meta_data,
|
meta_data: result.meta_data,
|
||||||
|
created_at: result.created_at,
|
||||||
};
|
};
|
||||||
|
|
||||||
info!(target: "api", id = %dto.id, symbol = %dto.symbol, module_id = %dto.module_id, "create_analysis_result completed");
|
Ok((StatusCode::CREATED, Json(dto)))
|
||||||
Ok(Json(dto))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[api(GET, "/api/v1/analysis-results", output(list = "AnalysisResultDto"))]
|
/// Retrieves all analysis results for a given symbol.
|
||||||
|
#[instrument(skip(state))]
|
||||||
pub async fn get_analysis_results(
|
pub async fn get_analysis_results(
|
||||||
State(state): State<AppState>,
|
State(state): State<AppState>,
|
||||||
Query(query): Query<AnalysisQuery>,
|
Query(query): Query<AnalysisQuery>,
|
||||||
) -> Result<Json<Vec<AnalysisResultDto>>, ServerError> {
|
) -> Result<Json<Vec<AnalysisResultDto>>, ServerError> {
|
||||||
info!(target: "api", symbol = %query.symbol, module_id = ?query.module_id, "GET /analysis-results → get_analysis_results called");
|
let results = sqlx::query_as::<_, AnalysisResult>(
|
||||||
let results = db::get_analysis_results(&state.pool, &query.symbol, query.module_id.as_deref()).await?;
|
r#"
|
||||||
|
SELECT * FROM analysis_results
|
||||||
|
WHERE symbol = $1
|
||||||
|
ORDER BY created_at DESC
|
||||||
|
"#
|
||||||
|
)
|
||||||
|
.bind(&query.symbol)
|
||||||
|
.fetch_all(state.pool())
|
||||||
|
.await
|
||||||
|
.map_err(AnyhowError::from)?;
|
||||||
|
|
||||||
// Convert Vec<Model> to Vec<Dto>
|
let dtos = results
|
||||||
let dtos: Vec<AnalysisResultDto> = results
|
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|r| AnalysisResultDto {
|
.map(|r| AnalysisResultDto {
|
||||||
id: r.id,
|
id: r.id,
|
||||||
|
request_id: r.request_id,
|
||||||
symbol: r.symbol,
|
symbol: r.symbol,
|
||||||
|
template_id: r.template_id,
|
||||||
module_id: r.module_id,
|
module_id: r.module_id,
|
||||||
generated_at: r.generated_at,
|
|
||||||
model_name: r.model_name,
|
|
||||||
content: r.content,
|
content: r.content,
|
||||||
meta_data: r.meta_data,
|
meta_data: r.meta_data,
|
||||||
|
created_at: r.created_at,
|
||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
info!(target: "api", count = dtos.len(), symbol = %query.symbol, "get_analysis_results completed");
|
|
||||||
Ok(Json(dtos))
|
Ok(Json(dtos))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[api(GET, "/api/v1/analysis-results/{id}", output(detail = "AnalysisResultDto"))]
|
/// Retrieves a single analysis result by its primary ID.
|
||||||
|
#[instrument(skip(state))]
|
||||||
pub async fn get_analysis_result_by_id(
|
pub async fn get_analysis_result_by_id(
|
||||||
State(state): State<AppState>,
|
State(state): State<AppState>,
|
||||||
Path(id): Path<String>,
|
Path(id): Path<i64>,
|
||||||
) -> Result<Json<AnalysisResultDto>, ServerError> {
|
) -> Result<Json<AnalysisResultDto>, ServerError> {
|
||||||
let parsed = Uuid::parse_str(&id).map_err(|e| ServerError::Anyhow(e.into()))?;
|
let result = sqlx::query_as::<_, AnalysisResult>(
|
||||||
info!(target: "api", id = %id, "GET /analysis-results/{{id}} → get_analysis_result_by_id called");
|
r#"
|
||||||
let result = db::get_analysis_result_by_id(&state.pool, parsed)
|
SELECT * FROM analysis_results
|
||||||
.await?
|
WHERE id = $1
|
||||||
.ok_or_else(|| ServerError::NotFound(format!("Analysis result with id '{}' not found", id)))?;
|
"#
|
||||||
|
)
|
||||||
|
.bind(&id)
|
||||||
|
.fetch_one(state.pool())
|
||||||
|
.await
|
||||||
|
.map_err(AnyhowError::from)?;
|
||||||
|
|
||||||
// Convert model to DTO
|
|
||||||
let dto = AnalysisResultDto {
|
let dto = AnalysisResultDto {
|
||||||
id: result.id,
|
id: result.id,
|
||||||
|
request_id: result.request_id,
|
||||||
symbol: result.symbol,
|
symbol: result.symbol,
|
||||||
|
template_id: result.template_id,
|
||||||
module_id: result.module_id,
|
module_id: result.module_id,
|
||||||
generated_at: result.generated_at,
|
|
||||||
model_name: result.model_name,
|
|
||||||
content: result.content,
|
content: result.content,
|
||||||
meta_data: result.meta_data,
|
meta_data: result.meta_data,
|
||||||
|
created_at: result.created_at,
|
||||||
};
|
};
|
||||||
|
|
||||||
info!(target: "api", id = %dto.id, symbol = %dto.symbol, module_id = %dto.module_id, "get_analysis_result_by_id completed");
|
|
||||||
Ok(Json(dto))
|
Ok(Json(dto))
|
||||||
}
|
}
|
||||||
|
|||||||
@ -9,6 +9,7 @@ use axum::{
|
|||||||
};
|
};
|
||||||
use service_kit::api;
|
use service_kit::api;
|
||||||
use tracing::info;
|
use tracing::info;
|
||||||
|
use anyhow::Error as AnyhowError;
|
||||||
|
|
||||||
#[api(PUT, "/api/v1/companies")]
|
#[api(PUT, "/api/v1/companies")]
|
||||||
pub async fn upsert_company(
|
pub async fn upsert_company(
|
||||||
@ -16,7 +17,7 @@ pub async fn upsert_company(
|
|||||||
Json(payload): Json<CompanyProfileDto>,
|
Json(payload): Json<CompanyProfileDto>,
|
||||||
) -> Result<(), ServerError> {
|
) -> Result<(), ServerError> {
|
||||||
info!(target: "api", symbol = %payload.symbol, "PUT /companies → upsert_company called");
|
info!(target: "api", symbol = %payload.symbol, "PUT /companies → upsert_company called");
|
||||||
db::upsert_company(&state.pool, &payload).await?;
|
db::upsert_company(&state.pool, &payload).await.map_err(AnyhowError::from)?;
|
||||||
info!(target: "api", symbol = %payload.symbol, "upsert_company completed");
|
info!(target: "api", symbol = %payload.symbol, "upsert_company completed");
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -28,7 +29,8 @@ pub async fn get_company_by_symbol(
|
|||||||
) -> Result<Json<CompanyProfileDto>, ServerError> {
|
) -> Result<Json<CompanyProfileDto>, ServerError> {
|
||||||
info!(target: "api", symbol = %symbol, "GET /companies/{{symbol}} → get_company_by_symbol called");
|
info!(target: "api", symbol = %symbol, "GET /companies/{{symbol}} → get_company_by_symbol called");
|
||||||
let company = db::get_company_by_symbol(&state.pool, &symbol)
|
let company = db::get_company_by_symbol(&state.pool, &symbol)
|
||||||
.await?
|
.await
|
||||||
|
.map_err(AnyhowError::from)?
|
||||||
.ok_or_else(|| ServerError::NotFound(format!("Company with symbol '{}' not found", symbol)))?;
|
.ok_or_else(|| ServerError::NotFound(format!("Company with symbol '{}' not found", symbol)))?;
|
||||||
|
|
||||||
// Convert from model to DTO
|
// Convert from model to DTO
|
||||||
|
|||||||
@ -1,7 +1,7 @@
|
|||||||
use axum::{extract::State, Json};
|
use axum::{extract::State, Json};
|
||||||
use common_contracts::config_models::{LlmProvidersConfig, AnalysisModulesConfig};
|
use common_contracts::config_models::{AnalysisTemplateSets, DataSourcesConfig, LlmProvidersConfig};
|
||||||
use service_kit::api;
|
use service_kit::api;
|
||||||
|
use tracing::instrument;
|
||||||
use crate::{db::system_config, AppState, ServerError};
|
use crate::{db::system_config, AppState, ServerError};
|
||||||
|
|
||||||
#[api(GET, "/api/v1/configs/llm_providers", output(detail = "LlmProvidersConfig"))]
|
#[api(GET, "/api/v1/configs/llm_providers", output(detail = "LlmProvidersConfig"))]
|
||||||
@ -23,21 +23,48 @@ pub async fn update_llm_providers_config(
|
|||||||
Ok(Json(updated_config))
|
Ok(Json(updated_config))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[api(GET, "/api/v1/configs/analysis_modules", output(detail = "AnalysisModulesConfig"))]
|
#[api(GET, "/api/v1/configs/analysis_template_sets", output(detail = "AnalysisTemplateSets"))]
|
||||||
pub async fn get_analysis_modules_config(
|
pub async fn get_analysis_template_sets(
|
||||||
State(state): State<AppState>,
|
State(state): State<AppState>,
|
||||||
) -> Result<Json<AnalysisModulesConfig>, ServerError> {
|
) -> Result<Json<AnalysisTemplateSets>, ServerError> {
|
||||||
let pool = state.pool();
|
let pool = state.pool();
|
||||||
let config = system_config::get_config::<AnalysisModulesConfig>(pool, "analysis_modules").await?;
|
let config = system_config::get_config::<AnalysisTemplateSets>(pool, "analysis_template_sets").await?;
|
||||||
Ok(Json(config))
|
Ok(Json(config))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[api(PUT, "/api/v1/configs/analysis_modules", output(detail = "AnalysisModulesConfig"))]
|
#[api(PUT, "/api/v1/configs/analysis_template_sets", output(detail = "AnalysisTemplateSets"))]
|
||||||
pub async fn update_analysis_modules_config(
|
pub async fn update_analysis_template_sets(
|
||||||
State(state): State<AppState>,
|
State(state): State<AppState>,
|
||||||
Json(payload): Json<AnalysisModulesConfig>,
|
Json(payload): Json<AnalysisTemplateSets>,
|
||||||
) -> Result<Json<AnalysisModulesConfig>, ServerError> {
|
) -> Result<Json<AnalysisTemplateSets>, ServerError> {
|
||||||
let pool = state.pool();
|
let pool = state.pool();
|
||||||
let updated_config = system_config::update_config(pool, "analysis_modules", &payload).await?;
|
let updated = system_config::update_config(pool, "analysis_template_sets", &payload).await?;
|
||||||
|
Ok(Json(updated))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
GET,
|
||||||
|
"/api/v1/configs/data_sources",
|
||||||
|
output(detail = "DataSourcesConfig")
|
||||||
|
)]
|
||||||
|
pub async fn get_data_sources_config(
|
||||||
|
State(state): State<AppState>,
|
||||||
|
) -> Result<Json<DataSourcesConfig>, ServerError> {
|
||||||
|
let pool = state.pool();
|
||||||
|
let config = system_config::get_config::<DataSourcesConfig>(pool, "data_sources").await?;
|
||||||
|
Ok(Json(config))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
PUT,
|
||||||
|
"/api/v1/configs/data_sources",
|
||||||
|
output(detail = "DataSourcesConfig")
|
||||||
|
)]
|
||||||
|
pub async fn update_data_sources_config(
|
||||||
|
State(state): State<AppState>,
|
||||||
|
Json(payload): Json<DataSourcesConfig>,
|
||||||
|
) -> Result<Json<DataSourcesConfig>, ServerError> {
|
||||||
|
let pool = state.pool();
|
||||||
|
let updated_config = system_config::update_config(pool, "data_sources", &payload).await?;
|
||||||
Ok(Json(updated_config))
|
Ok(Json(updated_config))
|
||||||
}
|
}
|
||||||
|
|||||||
@ -11,6 +11,7 @@ use chrono::NaiveDate;
|
|||||||
use serde::Deserialize;
|
use serde::Deserialize;
|
||||||
use service_kit::api;
|
use service_kit::api;
|
||||||
use tracing::info;
|
use tracing::info;
|
||||||
|
use anyhow::Error as AnyhowError;
|
||||||
|
|
||||||
#[derive(Deserialize, utoipa::ToSchema)]
|
#[derive(Deserialize, utoipa::ToSchema)]
|
||||||
pub struct FinancialsQuery {
|
pub struct FinancialsQuery {
|
||||||
@ -23,7 +24,7 @@ pub async fn batch_insert_financials(
|
|||||||
Json(payload): Json<crate::dtos::TimeSeriesFinancialBatchDto>,
|
Json(payload): Json<crate::dtos::TimeSeriesFinancialBatchDto>,
|
||||||
) -> Result<axum::http::StatusCode, ServerError> {
|
) -> Result<axum::http::StatusCode, ServerError> {
|
||||||
info!(target: "api", count = payload.records.len(), "POST /market-data/financials/batch → batch_insert_financials called");
|
info!(target: "api", count = payload.records.len(), "POST /market-data/financials/batch → batch_insert_financials called");
|
||||||
db::batch_insert_financials(&state.pool, &payload.records).await?;
|
db::batch_insert_financials(&state.pool, &payload.records).await.map_err(AnyhowError::from)?;
|
||||||
info!(target: "api", count = payload.records.len(), "batch_insert_financials completed");
|
info!(target: "api", count = payload.records.len(), "batch_insert_financials completed");
|
||||||
Ok(axum::http::StatusCode::CREATED)
|
Ok(axum::http::StatusCode::CREATED)
|
||||||
}
|
}
|
||||||
@ -36,7 +37,7 @@ pub async fn get_financials_by_symbol(
|
|||||||
) -> Result<Json<Vec<TimeSeriesFinancialDto>>, ServerError> {
|
) -> Result<Json<Vec<TimeSeriesFinancialDto>>, ServerError> {
|
||||||
info!(target: "api", symbol = %symbol, metrics = ?query.metrics, "GET /market-data/financials/{{symbol}} → get_financials_by_symbol called");
|
info!(target: "api", symbol = %symbol, metrics = ?query.metrics, "GET /market-data/financials/{{symbol}} → get_financials_by_symbol called");
|
||||||
let metrics = query.metrics.map(|s| s.split(',').map(String::from).collect());
|
let metrics = query.metrics.map(|s| s.split(',').map(String::from).collect());
|
||||||
let financials = db::get_financials_by_symbol(&state.pool, &symbol, metrics).await?;
|
let financials = db::get_financials_by_symbol(&state.pool, &symbol, metrics).await.map_err(AnyhowError::from)?;
|
||||||
|
|
||||||
// Convert Vec<Model> to Vec<Dto>
|
// Convert Vec<Model> to Vec<Dto>
|
||||||
let dtos: Vec<TimeSeriesFinancialDto> = financials
|
let dtos: Vec<TimeSeriesFinancialDto> = financials
|
||||||
@ -70,7 +71,7 @@ pub async fn upsert_realtime_quote(
|
|||||||
Json(quote): Json<RealtimeQuoteDto>,
|
Json(quote): Json<RealtimeQuoteDto>,
|
||||||
) -> Result<axum::http::StatusCode, ServerError> {
|
) -> Result<axum::http::StatusCode, ServerError> {
|
||||||
info!(target: "api", symbol = %quote.symbol, market = %quote.market, "POST /market-data/quotes → upsert_realtime_quote called");
|
info!(target: "api", symbol = %quote.symbol, market = %quote.market, "POST /market-data/quotes → upsert_realtime_quote called");
|
||||||
db::insert_realtime_quote(&state.pool, "e).await?;
|
db::insert_realtime_quote(&state.pool, "e).await.map_err(AnyhowError::from)?;
|
||||||
Ok(axum::http::StatusCode::CREATED)
|
Ok(axum::http::StatusCode::CREATED)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -82,7 +83,7 @@ pub async fn get_latest_realtime_quote(
|
|||||||
) -> Result<Json<RealtimeQuoteDto>, ServerError> {
|
) -> Result<Json<RealtimeQuoteDto>, ServerError> {
|
||||||
let market = q.market.clone();
|
let market = q.market.clone();
|
||||||
info!(target: "api", symbol = %symbol, market = %market, "GET /market-data/quotes/{{market}}/{{symbol}} → get_latest_realtime_quote called");
|
info!(target: "api", symbol = %symbol, market = %market, "GET /market-data/quotes/{{market}}/{{symbol}} → get_latest_realtime_quote called");
|
||||||
if let Some(rec) = db::get_latest_realtime_quote(&state.pool, &market, &symbol).await? {
|
if let Some(rec) = db::get_latest_realtime_quote(&state.pool, &market, &symbol).await.map_err(AnyhowError::from)? {
|
||||||
if let Some(max_age) = q.max_age_seconds {
|
if let Some(max_age) = q.max_age_seconds {
|
||||||
let cutoff = chrono::Utc::now() - chrono::Duration::seconds(max_age);
|
let cutoff = chrono::Utc::now() - chrono::Duration::seconds(max_age);
|
||||||
if rec.ts < cutoff {
|
if rec.ts < cutoff {
|
||||||
@ -121,7 +122,7 @@ pub async fn batch_insert_daily_data(
|
|||||||
Json(payload): Json<crate::dtos::DailyMarketDataBatchDto>,
|
Json(payload): Json<crate::dtos::DailyMarketDataBatchDto>,
|
||||||
) -> Result<axum::http::StatusCode, ServerError> {
|
) -> Result<axum::http::StatusCode, ServerError> {
|
||||||
info!(target: "api", count = payload.records.len(), "POST /market-data/daily/batch → batch_insert_daily_data called");
|
info!(target: "api", count = payload.records.len(), "POST /market-data/daily/batch → batch_insert_daily_data called");
|
||||||
db::batch_insert_daily_data(&state.pool, &payload.records).await?;
|
db::batch_insert_daily_data(&state.pool, &payload.records).await.map_err(AnyhowError::from)?;
|
||||||
info!(target: "api", count = payload.records.len(), "batch_insert_daily_data completed");
|
info!(target: "api", count = payload.records.len(), "batch_insert_daily_data completed");
|
||||||
Ok(axum::http::StatusCode::CREATED)
|
Ok(axum::http::StatusCode::CREATED)
|
||||||
}
|
}
|
||||||
@ -135,7 +136,8 @@ pub async fn get_daily_data_by_symbol(
|
|||||||
info!(target: "api", symbol = %symbol, start = ?query.start_date, end = ?query.end_date, "GET /market-data/daily/{{symbol}} → get_daily_data_by_symbol called");
|
info!(target: "api", symbol = %symbol, start = ?query.start_date, end = ?query.end_date, "GET /market-data/daily/{{symbol}} → get_daily_data_by_symbol called");
|
||||||
let daily_data =
|
let daily_data =
|
||||||
db::get_daily_data_by_symbol(&state.pool, &symbol, query.start_date, query.end_date)
|
db::get_daily_data_by_symbol(&state.pool, &symbol, query.start_date, query.end_date)
|
||||||
.await?;
|
.await
|
||||||
|
.map_err(AnyhowError::from)?;
|
||||||
|
|
||||||
// Convert Vec<Model> to Vec<Dto>
|
// Convert Vec<Model> to Vec<Dto>
|
||||||
let dtos: Vec<DailyMarketDataDto> = daily_data
|
let dtos: Vec<DailyMarketDataDto> = daily_data
|
||||||
|
|||||||
@ -1,10 +1,47 @@
|
|||||||
// This module will contain all the API handler definitions
|
mod analysis;
|
||||||
// which are then collected by the `inventory` crate.
|
mod companies;
|
||||||
#[cfg(feature = "full-data")]
|
mod configs;
|
||||||
pub mod companies;
|
mod market_data;
|
||||||
#[cfg(feature = "full-data")]
|
mod system;
|
||||||
pub mod market_data;
|
use crate::AppState;
|
||||||
#[cfg(feature = "full-data")]
|
use axum::{
|
||||||
pub mod analysis;
|
routing::{get, post},
|
||||||
pub mod system;
|
Router,
|
||||||
pub mod configs;
|
};
|
||||||
|
|
||||||
|
pub fn create_router(_state: AppState) -> Router<AppState> {
|
||||||
|
let router: Router<AppState> = Router::new()
|
||||||
|
// System
|
||||||
|
.route("/health", get(system::get_health))
|
||||||
|
// Configs
|
||||||
|
.route(
|
||||||
|
"/configs/llm_providers",
|
||||||
|
get(configs::get_llm_providers_config).put(configs::update_llm_providers_config),
|
||||||
|
)
|
||||||
|
.route(
|
||||||
|
"/configs/analysis_template_sets",
|
||||||
|
get(configs::get_analysis_template_sets).put(configs::update_analysis_template_sets),
|
||||||
|
)
|
||||||
|
.route(
|
||||||
|
"/configs/data_sources",
|
||||||
|
get(configs::get_data_sources_config).put(configs::update_data_sources_config),
|
||||||
|
)
|
||||||
|
// Companies
|
||||||
|
.route("/companies/{symbol}", get(companies::get_company_by_symbol))
|
||||||
|
// Market Data
|
||||||
|
.route(
|
||||||
|
"/market-data/financial-statements/{symbol}",
|
||||||
|
get(market_data::get_financials_by_symbol),
|
||||||
|
)
|
||||||
|
// Analysis Results
|
||||||
|
.route(
|
||||||
|
"/analysis-results",
|
||||||
|
post(analysis::create_analysis_result).get(analysis::get_analysis_results),
|
||||||
|
)
|
||||||
|
.route(
|
||||||
|
"/analysis-results/:id",
|
||||||
|
get(analysis::get_analysis_result_by_id),
|
||||||
|
);
|
||||||
|
|
||||||
|
router
|
||||||
|
}
|
||||||
|
|||||||
42
services/data-persistence-service/src/db/companies.rs
Normal file
42
services/data-persistence-service/src/db/companies.rs
Normal file
@ -0,0 +1,42 @@
|
|||||||
|
use common_contracts::dtos::CompanyProfileDto;
|
||||||
|
use common_contracts::models::CompanyProfile;
|
||||||
|
use sqlx::PgPool;
|
||||||
|
|
||||||
|
pub async fn upsert_company(pool: &PgPool, payload: &CompanyProfileDto) -> Result<(), sqlx::Error> {
|
||||||
|
sqlx::query(
|
||||||
|
r#"
|
||||||
|
INSERT INTO company_profiles (symbol, name, industry, list_date, additional_info, updated_at)
|
||||||
|
VALUES ($1, $2, $3, $4, $5, NOW())
|
||||||
|
ON CONFLICT (symbol) DO UPDATE SET
|
||||||
|
name = EXCLUDED.name,
|
||||||
|
industry = EXCLUDED.industry,
|
||||||
|
list_date = EXCLUDED.list_date,
|
||||||
|
additional_info = EXCLUDED.additional_info,
|
||||||
|
updated_at = NOW()
|
||||||
|
"#,
|
||||||
|
)
|
||||||
|
.bind(&payload.symbol)
|
||||||
|
.bind(&payload.name)
|
||||||
|
.bind(&payload.industry)
|
||||||
|
.bind(&payload.list_date)
|
||||||
|
.bind(&payload.additional_info)
|
||||||
|
.execute(pool)
|
||||||
|
.await?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn get_company_by_symbol(pool: &PgPool, symbol: &str) -> Result<Option<CompanyProfile>, sqlx::Error> {
|
||||||
|
let rec = sqlx::query_as::<_, CompanyProfile>(
|
||||||
|
r#"
|
||||||
|
SELECT symbol, name, industry, list_date, additional_info, updated_at
|
||||||
|
FROM company_profiles
|
||||||
|
WHERE symbol = $1
|
||||||
|
"#,
|
||||||
|
)
|
||||||
|
.bind(symbol)
|
||||||
|
.fetch_optional(pool)
|
||||||
|
.await?;
|
||||||
|
Ok(rec)
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
237
services/data-persistence-service/src/db/market_data.rs
Normal file
237
services/data-persistence-service/src/db/market_data.rs
Normal file
@ -0,0 +1,237 @@
|
|||||||
|
use chrono::NaiveDate;
|
||||||
|
use common_contracts::dtos::{DailyMarketDataDto, RealtimeQuoteDto, TimeSeriesFinancialDto};
|
||||||
|
use common_contracts::models::{DailyMarketData, RealtimeQuote, TimeSeriesFinancial};
|
||||||
|
use sqlx::PgPool;
|
||||||
|
|
||||||
|
pub async fn batch_insert_financials(pool: &PgPool, records: &[TimeSeriesFinancialDto]) -> Result<(), sqlx::Error> {
|
||||||
|
let mut tx = pool.begin().await?;
|
||||||
|
for r in records {
|
||||||
|
sqlx::query(
|
||||||
|
r#"
|
||||||
|
INSERT INTO time_series_financials (symbol, metric_name, period_date, value, source)
|
||||||
|
VALUES ($1, $2, $3, $4, $5)
|
||||||
|
ON CONFLICT (symbol, metric_name, period_date) DO UPDATE SET
|
||||||
|
value = EXCLUDED.value,
|
||||||
|
source = EXCLUDED.source
|
||||||
|
"#,
|
||||||
|
)
|
||||||
|
.bind(&r.symbol)
|
||||||
|
.bind(&r.metric_name)
|
||||||
|
.bind(&r.period_date)
|
||||||
|
.bind(&r.value)
|
||||||
|
.bind(&r.source)
|
||||||
|
.execute(&mut *tx)
|
||||||
|
.await?;
|
||||||
|
}
|
||||||
|
tx.commit().await?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn get_financials_by_symbol(
|
||||||
|
pool: &PgPool,
|
||||||
|
symbol: &str,
|
||||||
|
metrics: Option<Vec<String>>,
|
||||||
|
) -> Result<Vec<TimeSeriesFinancial>, sqlx::Error> {
|
||||||
|
if let Some(metrics) = metrics {
|
||||||
|
let recs = sqlx::query_as::<_, TimeSeriesFinancial>(
|
||||||
|
r#"
|
||||||
|
SELECT symbol, metric_name, period_date, value, source
|
||||||
|
FROM time_series_financials
|
||||||
|
WHERE symbol = $1 AND metric_name = ANY($2)
|
||||||
|
ORDER BY period_date DESC
|
||||||
|
"#,
|
||||||
|
)
|
||||||
|
.bind(symbol)
|
||||||
|
.bind(&metrics)
|
||||||
|
.fetch_all(pool)
|
||||||
|
.await?;
|
||||||
|
Ok(recs)
|
||||||
|
} else {
|
||||||
|
let recs = sqlx::query_as::<_, TimeSeriesFinancial>(
|
||||||
|
r#"
|
||||||
|
SELECT symbol, metric_name, period_date, value, source
|
||||||
|
FROM time_series_financials
|
||||||
|
WHERE symbol = $1
|
||||||
|
ORDER BY period_date DESC
|
||||||
|
"#,
|
||||||
|
)
|
||||||
|
.bind(symbol)
|
||||||
|
.fetch_all(pool)
|
||||||
|
.await?;
|
||||||
|
Ok(recs)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn insert_realtime_quote(pool: &PgPool, q: &RealtimeQuoteDto) -> Result<(), sqlx::Error> {
|
||||||
|
sqlx::query(
|
||||||
|
r#"
|
||||||
|
INSERT INTO realtime_quotes (
|
||||||
|
symbol, market, ts, price, open_price, high_price, low_price,
|
||||||
|
prev_close, change, change_percent, volume, source, updated_at
|
||||||
|
)
|
||||||
|
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, NOW())
|
||||||
|
ON CONFLICT (symbol, market, ts) DO UPDATE SET
|
||||||
|
price = EXCLUDED.price,
|
||||||
|
open_price = EXCLUDED.open_price,
|
||||||
|
high_price = EXCLUDED.high_price,
|
||||||
|
low_price = EXCLUDED.low_price,
|
||||||
|
prev_close = EXCLUDED.prev_close,
|
||||||
|
change = EXCLUDED.change,
|
||||||
|
change_percent = EXCLUDED.change_percent,
|
||||||
|
volume = EXCLUDED.volume,
|
||||||
|
source = EXCLUDED.source,
|
||||||
|
updated_at = NOW()
|
||||||
|
"#,
|
||||||
|
)
|
||||||
|
.bind(&q.symbol)
|
||||||
|
.bind(&q.market)
|
||||||
|
.bind(&q.ts)
|
||||||
|
.bind(&q.price)
|
||||||
|
.bind(&q.open_price)
|
||||||
|
.bind(&q.high_price)
|
||||||
|
.bind(&q.low_price)
|
||||||
|
.bind(&q.prev_close)
|
||||||
|
.bind(&q.change)
|
||||||
|
.bind(&q.change_percent)
|
||||||
|
.bind(&q.volume)
|
||||||
|
.bind(&q.source)
|
||||||
|
.execute(pool)
|
||||||
|
.await?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn get_latest_realtime_quote(
|
||||||
|
pool: &PgPool,
|
||||||
|
market: &str,
|
||||||
|
symbol: &str,
|
||||||
|
) -> Result<Option<RealtimeQuote>, sqlx::Error> {
|
||||||
|
let rec = sqlx::query_as::<_, RealtimeQuote>(
|
||||||
|
r#"
|
||||||
|
SELECT symbol, market, ts, price, open_price, high_price, low_price,
|
||||||
|
prev_close, change, change_percent, volume, source, updated_at
|
||||||
|
FROM realtime_quotes
|
||||||
|
WHERE market = $1 AND symbol = $2
|
||||||
|
ORDER BY ts DESC
|
||||||
|
LIMIT 1
|
||||||
|
"#,
|
||||||
|
)
|
||||||
|
.bind(market)
|
||||||
|
.bind(symbol)
|
||||||
|
.fetch_optional(pool)
|
||||||
|
.await?;
|
||||||
|
Ok(rec)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn batch_insert_daily_data(pool: &PgPool, records: &[DailyMarketDataDto]) -> Result<(), sqlx::Error> {
|
||||||
|
let mut tx = pool.begin().await?;
|
||||||
|
for r in records {
|
||||||
|
sqlx::query(
|
||||||
|
r#"
|
||||||
|
INSERT INTO daily_market_data (
|
||||||
|
symbol, trade_date, open_price, high_price, low_price, close_price,
|
||||||
|
volume, pe, pb, total_mv
|
||||||
|
)
|
||||||
|
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)
|
||||||
|
ON CONFLICT (symbol, trade_date) DO UPDATE SET
|
||||||
|
open_price = EXCLUDED.open_price,
|
||||||
|
high_price = EXCLUDED.high_price,
|
||||||
|
low_price = EXCLUDED.low_price,
|
||||||
|
close_price = EXCLUDED.close_price,
|
||||||
|
volume = EXCLUDED.volume,
|
||||||
|
pe = EXCLUDED.pe,
|
||||||
|
pb = EXCLUDED.pb,
|
||||||
|
total_mv = EXCLUDED.total_mv
|
||||||
|
"#,
|
||||||
|
)
|
||||||
|
.bind(&r.symbol)
|
||||||
|
.bind(&r.trade_date)
|
||||||
|
.bind(&r.open_price)
|
||||||
|
.bind(&r.high_price)
|
||||||
|
.bind(&r.low_price)
|
||||||
|
.bind(&r.close_price)
|
||||||
|
.bind(&r.volume)
|
||||||
|
.bind(&r.pe)
|
||||||
|
.bind(&r.pb)
|
||||||
|
.bind(&r.total_mv)
|
||||||
|
.execute(&mut *tx)
|
||||||
|
.await?;
|
||||||
|
}
|
||||||
|
tx.commit().await?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn get_daily_data_by_symbol(
|
||||||
|
pool: &PgPool,
|
||||||
|
symbol: &str,
|
||||||
|
start_date: Option<NaiveDate>,
|
||||||
|
end_date: Option<NaiveDate>,
|
||||||
|
) -> Result<Vec<DailyMarketData>, sqlx::Error> {
|
||||||
|
match (start_date, end_date) {
|
||||||
|
(Some(start), Some(end)) => {
|
||||||
|
let recs = sqlx::query_as::<_, DailyMarketData>(
|
||||||
|
r#"
|
||||||
|
SELECT symbol, trade_date, open_price, high_price, low_price, close_price,
|
||||||
|
volume, pe, pb, total_mv
|
||||||
|
FROM daily_market_data
|
||||||
|
WHERE symbol = $1 AND trade_date BETWEEN $2 AND $3
|
||||||
|
ORDER BY trade_date DESC
|
||||||
|
"#,
|
||||||
|
)
|
||||||
|
.bind(symbol)
|
||||||
|
.bind(start)
|
||||||
|
.bind(end)
|
||||||
|
.fetch_all(pool)
|
||||||
|
.await?;
|
||||||
|
Ok(recs)
|
||||||
|
}
|
||||||
|
(Some(start), None) => {
|
||||||
|
let recs = sqlx::query_as::<_, DailyMarketData>(
|
||||||
|
r#"
|
||||||
|
SELECT symbol, trade_date, open_price, high_price, low_price, close_price,
|
||||||
|
volume, pe, pb, total_mv
|
||||||
|
FROM daily_market_data
|
||||||
|
WHERE symbol = $1 AND trade_date >= $2
|
||||||
|
ORDER BY trade_date DESC
|
||||||
|
"#,
|
||||||
|
)
|
||||||
|
.bind(symbol)
|
||||||
|
.bind(start)
|
||||||
|
.fetch_all(pool)
|
||||||
|
.await?;
|
||||||
|
Ok(recs)
|
||||||
|
}
|
||||||
|
(None, Some(end)) => {
|
||||||
|
let recs = sqlx::query_as::<_, DailyMarketData>(
|
||||||
|
r#"
|
||||||
|
SELECT symbol, trade_date, open_price, high_price, low_price, close_price,
|
||||||
|
volume, pe, pb, total_mv
|
||||||
|
FROM daily_market_data
|
||||||
|
WHERE symbol = $1 AND trade_date <= $2
|
||||||
|
ORDER BY trade_date DESC
|
||||||
|
"#,
|
||||||
|
)
|
||||||
|
.bind(symbol)
|
||||||
|
.bind(end)
|
||||||
|
.fetch_all(pool)
|
||||||
|
.await?;
|
||||||
|
Ok(recs)
|
||||||
|
}
|
||||||
|
(None, None) => {
|
||||||
|
let recs = sqlx::query_as::<_, DailyMarketData>(
|
||||||
|
r#"
|
||||||
|
SELECT symbol, trade_date, open_price, high_price, low_price, close_price,
|
||||||
|
volume, pe, pb, total_mv
|
||||||
|
FROM daily_market_data
|
||||||
|
WHERE symbol = $1
|
||||||
|
ORDER BY trade_date DESC
|
||||||
|
"#,
|
||||||
|
)
|
||||||
|
.bind(symbol)
|
||||||
|
.fetch_all(pool)
|
||||||
|
.await?;
|
||||||
|
Ok(recs)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -5,3 +5,11 @@
|
|||||||
// to fetch or store data.
|
// to fetch or store data.
|
||||||
|
|
||||||
pub mod system_config;
|
pub mod system_config;
|
||||||
|
pub mod companies;
|
||||||
|
pub mod market_data;
|
||||||
|
|
||||||
|
pub use companies::{get_company_by_symbol, upsert_company};
|
||||||
|
pub use market_data::{
|
||||||
|
batch_insert_daily_data, batch_insert_financials, get_daily_data_by_symbol,
|
||||||
|
get_financials_by_symbol, get_latest_realtime_quote, insert_realtime_quote,
|
||||||
|
};
|
||||||
|
|||||||
@ -1,7 +1,8 @@
|
|||||||
|
mod seeding;
|
||||||
use data_persistence_service as app;
|
use data_persistence_service as app;
|
||||||
use axum::Router;
|
use axum::Router;
|
||||||
use sqlx::PgPool;
|
use sqlx::PgPool;
|
||||||
use thiserror::Error;
|
use sqlx::migrate::MigrateError;
|
||||||
use tracing_subscriber::{EnvFilter, fmt::SubscriberBuilder};
|
use tracing_subscriber::{EnvFilter, fmt::SubscriberBuilder};
|
||||||
use tower_http::trace::TraceLayer;
|
use tower_http::trace::TraceLayer;
|
||||||
|
|
||||||
@ -19,6 +20,49 @@ pub async fn main() {
|
|||||||
let db_url = std::env::var("DATABASE_URL").expect("DATABASE_URL must be set");
|
let db_url = std::env::var("DATABASE_URL").expect("DATABASE_URL must be set");
|
||||||
let pool = PgPool::connect(&db_url).await.expect("Failed to connect to database");
|
let pool = PgPool::connect(&db_url).await.expect("Failed to connect to database");
|
||||||
|
|
||||||
|
// Run database migrations (strict by default; can be skipped via env for dev)
|
||||||
|
let skip_migrations = std::env::var("SKIP_MIGRATIONS")
|
||||||
|
.map(|v| {
|
||||||
|
let v = v.to_ascii_lowercase();
|
||||||
|
v == "1" || v == "true" || v == "yes" || v == "on"
|
||||||
|
})
|
||||||
|
.unwrap_or(false);
|
||||||
|
if skip_migrations {
|
||||||
|
println!("⚠️ SKIP_MIGRATIONS=1 → 跳过数据库迁移(仅建议用于本地开发/调试)。");
|
||||||
|
} else {
|
||||||
|
let res = sqlx::migrate!("./migrations").run(&pool).await;
|
||||||
|
if let Err(e) = res {
|
||||||
|
let allow_on_mismatch = std::env::var("SKIP_MIGRATIONS_ON_MISMATCH")
|
||||||
|
.map(|v| {
|
||||||
|
let v = v.to_ascii_lowercase();
|
||||||
|
v == "1" || v == "true" || v == "yes" || v == "on"
|
||||||
|
})
|
||||||
|
.unwrap_or(false);
|
||||||
|
match &e {
|
||||||
|
MigrateError::VersionMismatch(ver) if allow_on_mismatch => {
|
||||||
|
eprintln!(
|
||||||
|
"❗ 检测到迁移版本不一致:VersionMismatch({}).\n\
|
||||||
|
已根据 SKIP_MIGRATIONS_ON_MISMATCH=1 跳过迁移运行,仅继续启动服务。\n\
|
||||||
|
建议修复:\n\
|
||||||
|
1) 不要修改已应用的迁移文件;如需变更,请创建新迁移;\n\
|
||||||
|
2) 或使用 `sqlx migrate repair` 修复校验;\n\
|
||||||
|
3) 或清空/重建数据库后重新应用迁移。",
|
||||||
|
ver
|
||||||
|
);
|
||||||
|
}
|
||||||
|
_ => {
|
||||||
|
panic!("Failed to run database migrations: {}", e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Seed the database with default data if necessary
|
||||||
|
if let Err(e) = seeding::seed_data(&pool).await {
|
||||||
|
tracing::error!("Failed to seed database: {}", e);
|
||||||
|
// We don't exit here, as the app might still be functional.
|
||||||
|
}
|
||||||
|
|
||||||
let state = app::AppState::new(pool);
|
let state = app::AppState::new(pool);
|
||||||
|
|
||||||
let openapi = app::build_openapi_spec();
|
let openapi = app::build_openapi_spec();
|
||||||
|
|||||||
@ -1 +1,25 @@
|
|||||||
|
use common_contracts::config_models::{AnalysisTemplateSets, DataSourceConfig, LlmProvidersConfig};
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use std::collections::HashMap;
|
||||||
|
use uuid::Uuid;
|
||||||
|
|
||||||
pub use common_contracts::models::*;
|
pub use common_contracts::models::*;
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize, Debug, Clone, Default)]
|
||||||
|
pub struct SystemConfig {
|
||||||
|
pub llm_providers: LlmProvidersConfig,
|
||||||
|
pub analysis_template_sets: AnalysisTemplateSets,
|
||||||
|
pub data_sources: HashMap<String, DataSourceConfig>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, sqlx::FromRow)]
|
||||||
|
pub struct AnalysisResult {
|
||||||
|
pub id: i64,
|
||||||
|
pub request_id: Uuid,
|
||||||
|
pub symbol: String,
|
||||||
|
pub template_id: String,
|
||||||
|
pub module_id: String,
|
||||||
|
pub content: String,
|
||||||
|
pub meta_data: serde_json::Value,
|
||||||
|
pub created_at: chrono::DateTime<chrono::Utc>,
|
||||||
|
}
|
||||||
|
|||||||
91
services/data-persistence-service/src/seeding.rs
Normal file
91
services/data-persistence-service/src/seeding.rs
Normal file
@ -0,0 +1,91 @@
|
|||||||
|
//! One-time data seeding logic for initializing the database.
|
||||||
|
|
||||||
|
use data_persistence_service::models::SystemConfig;
|
||||||
|
use common_contracts::config_models::{AnalysisModuleConfig, AnalysisTemplateSet, AnalysisTemplateSets};
|
||||||
|
use sqlx::PgPool;
|
||||||
|
use std::collections::HashMap;
|
||||||
|
use tracing::info;
|
||||||
|
|
||||||
|
const DEFAULT_ANALYSIS_CONFIG_JSON: &str = include_str!("../../../config/analysis-config.json");
|
||||||
|
const CONFIG_KEY: &str = "analysis_template_sets";
|
||||||
|
|
||||||
|
#[derive(serde::Deserialize)]
|
||||||
|
struct RawAnalysisConfig {
|
||||||
|
analysis_modules: HashMap<String, RawModule>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(serde::Deserialize)]
|
||||||
|
struct RawModule {
|
||||||
|
name: String,
|
||||||
|
#[serde(default)]
|
||||||
|
dependencies: Vec<String>,
|
||||||
|
#[serde(rename = "model")]
|
||||||
|
model_id: String,
|
||||||
|
prompt_template: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Seeds the database with default configurations if they don't already exist.
|
||||||
|
pub async fn seed_data(pool: &PgPool) -> Result<(), sqlx::Error> {
|
||||||
|
info!("Checking if default data seeding is required...");
|
||||||
|
|
||||||
|
let mut tx = pool.begin().await?;
|
||||||
|
|
||||||
|
// Check if the 'analysis_template_sets' config already exists.
|
||||||
|
let count: i64 = sqlx::query_scalar("SELECT COUNT(*) FROM system_config WHERE config_key = $1")
|
||||||
|
.bind(CONFIG_KEY)
|
||||||
|
.fetch_one(&mut *tx)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
if count == 0 {
|
||||||
|
info!("No 'analysis_template_sets' config found. Seeding default analysis templates...");
|
||||||
|
|
||||||
|
// 解析当前仓库中的配置文件结构:
|
||||||
|
// { "analysis_modules": { "<module_id>": { name, model, prompt_template, dependencies? } } }
|
||||||
|
let raw: RawAnalysisConfig = serde_json::from_str(DEFAULT_ANALYSIS_CONFIG_JSON)
|
||||||
|
.expect("Failed to parse embedded default analysis config JSON");
|
||||||
|
|
||||||
|
let modules = raw
|
||||||
|
.analysis_modules
|
||||||
|
.into_iter()
|
||||||
|
.map(|(k, v)| {
|
||||||
|
(
|
||||||
|
k,
|
||||||
|
AnalysisModuleConfig {
|
||||||
|
name: v.name,
|
||||||
|
provider_id: "".to_string(), // 由用户后续配置
|
||||||
|
model_id: v.model_id,
|
||||||
|
prompt_template: v.prompt_template,
|
||||||
|
dependencies: v.dependencies,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let default_template_set = AnalysisTemplateSet {
|
||||||
|
name: "默认分析模板".to_string(),
|
||||||
|
modules,
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut template_sets = AnalysisTemplateSets::new();
|
||||||
|
template_sets.insert("default".to_string(), default_template_set);
|
||||||
|
|
||||||
|
// 仅存储 analysis_template_sets 自身为该 key 的值
|
||||||
|
let config_value = serde_json::to_value(&template_sets)
|
||||||
|
.expect("Failed to serialize default analysis template sets");
|
||||||
|
|
||||||
|
// Insert the new default config.
|
||||||
|
sqlx::query("INSERT INTO system_config (config_key, config_value) VALUES ($1, $2)")
|
||||||
|
.bind(CONFIG_KEY)
|
||||||
|
.bind(config_value)
|
||||||
|
.execute(&mut *tx)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
info!("Successfully seeded default analysis templates.");
|
||||||
|
} else {
|
||||||
|
info!("Database already seeded. Skipping.");
|
||||||
|
}
|
||||||
|
|
||||||
|
tx.commit().await?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
@ -6,7 +6,7 @@ use axum::{
|
|||||||
Router,
|
Router,
|
||||||
};
|
};
|
||||||
use common_contracts::observability::{HealthStatus, ServiceStatus, TaskProgress};
|
use common_contracts::observability::{HealthStatus, ServiceStatus, TaskProgress};
|
||||||
use crate::state::AppState;
|
use crate::state::{AppState, ServiceOperationalStatus};
|
||||||
|
|
||||||
pub fn create_router(app_state: AppState) -> Router {
|
pub fn create_router(app_state: AppState) -> Router {
|
||||||
Router::new()
|
Router::new()
|
||||||
@ -17,14 +17,22 @@ pub fn create_router(app_state: AppState) -> Router {
|
|||||||
|
|
||||||
/// [GET /health]
|
/// [GET /health]
|
||||||
/// Provides the current health status of the module.
|
/// Provides the current health status of the module.
|
||||||
async fn health_check(State(_state): State<AppState>) -> Json<HealthStatus> {
|
async fn health_check(State(state): State<AppState>) -> Json<HealthStatus> {
|
||||||
let mut details = HashMap::new();
|
let mut details = HashMap::new();
|
||||||
// In a real scenario, we would check connections to the message bus, etc.
|
let operational_status = state.status.read().await;
|
||||||
details.insert("message_bus_connection".to_string(), "ok".to_string());
|
|
||||||
|
let (service_status, reason) = match &*operational_status {
|
||||||
|
ServiceOperationalStatus::Active => (ServiceStatus::Ok, "ok".to_string()),
|
||||||
|
ServiceOperationalStatus::Degraded { reason } => {
|
||||||
|
(ServiceStatus::Degraded, reason.clone())
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
details.insert("operational_status".to_string(), reason);
|
||||||
|
|
||||||
let status = HealthStatus {
|
let status = HealthStatus {
|
||||||
module_id: "finnhub-provider-service".to_string(),
|
module_id: "finnhub-provider-service".to_string(),
|
||||||
status: ServiceStatus::Ok,
|
status: service_status,
|
||||||
version: env!("CARGO_PKG_VERSION").to_string(),
|
version: env!("CARGO_PKG_VERSION").to_string(),
|
||||||
details,
|
details,
|
||||||
};
|
};
|
||||||
|
|||||||
@ -7,7 +7,7 @@ pub struct AppConfig {
|
|||||||
pub nats_addr: String,
|
pub nats_addr: String,
|
||||||
pub data_persistence_service_url: String,
|
pub data_persistence_service_url: String,
|
||||||
pub finnhub_api_url: String,
|
pub finnhub_api_url: String,
|
||||||
pub finnhub_api_key: SecretString,
|
pub finnhub_api_key: Option<SecretString>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl AppConfig {
|
impl AppConfig {
|
||||||
@ -39,11 +39,6 @@ impl AppConfig {
|
|||||||
"FINNHUB_API_URL must not be empty".to_string(),
|
"FINNHUB_API_URL must not be empty".to_string(),
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
if cfg.finnhub_api_key.expose_secret().trim().is_empty() {
|
|
||||||
return Err(config::ConfigError::Message(
|
|
||||||
"FINNHUB_API_KEY must not be empty".to_string(),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(cfg)
|
Ok(cfg)
|
||||||
}
|
}
|
||||||
|
|||||||
56
services/finnhub-provider-service/src/config_poller.rs
Normal file
56
services/finnhub-provider-service/src/config_poller.rs
Normal file
@ -0,0 +1,56 @@
|
|||||||
|
use crate::error::Result;
|
||||||
|
use crate::state::AppState;
|
||||||
|
use common_contracts::config_models::{DataSourceConfig, DataSourceProvider};
|
||||||
|
use secrecy::SecretString;
|
||||||
|
use std::collections::HashMap;
|
||||||
|
use std::time::Duration;
|
||||||
|
use tracing::{error, info, instrument};
|
||||||
|
|
||||||
|
const POLLING_INTERVAL_SECONDS: u64 = 60;
|
||||||
|
|
||||||
|
#[instrument(skip(state))]
|
||||||
|
pub async fn run_config_poller(state: AppState) {
|
||||||
|
info!("Starting configuration poller...");
|
||||||
|
let mut interval = tokio::time::interval(Duration::from_secs(POLLING_INTERVAL_SECONDS));
|
||||||
|
interval.tick().await; // Initial tick happens immediately
|
||||||
|
|
||||||
|
loop {
|
||||||
|
if let Err(e) = poll_and_update_config(&state).await {
|
||||||
|
error!("Failed to poll and update config: {:?}", e);
|
||||||
|
}
|
||||||
|
interval.tick().await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn poll_and_update_config(state: &AppState) -> Result<()> {
|
||||||
|
info!("Polling for data source configurations...");
|
||||||
|
let client = reqwest::Client::new();
|
||||||
|
let url = format!(
|
||||||
|
"{}/configs/data_sources",
|
||||||
|
state.config.data_persistence_service_url
|
||||||
|
);
|
||||||
|
|
||||||
|
let response = client.get(&url).send().await?;
|
||||||
|
response.error_for_status_ref()?;
|
||||||
|
|
||||||
|
let configs: HashMap<String, DataSourceConfig> = response.json().await?;
|
||||||
|
|
||||||
|
let finnhub_config = configs.values().find(|cfg| {
|
||||||
|
matches!(cfg.provider, DataSourceProvider::Finnhub) && cfg.enabled
|
||||||
|
});
|
||||||
|
|
||||||
|
if let Some(config) = finnhub_config {
|
||||||
|
if let Some(api_key) = &config.api_key {
|
||||||
|
state.update_provider(Some(SecretString::from(api_key.clone()))).await;
|
||||||
|
info!("Successfully updated Finnhub provider with new configuration.");
|
||||||
|
} else {
|
||||||
|
state.update_provider(None).await;
|
||||||
|
info!("Finnhub provider is enabled but API key is missing. Service is degraded.");
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
state.update_provider(None).await;
|
||||||
|
info!("No enabled Finnhub configuration found. Service is degraded.");
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
@ -1,40 +1,38 @@
|
|||||||
|
use anyhow::anyhow;
|
||||||
|
use reqwest::Error as ReqwestError;
|
||||||
use thiserror::Error;
|
use thiserror::Error;
|
||||||
|
|
||||||
pub type Result<T> = std::result::Result<T, AppError>;
|
|
||||||
|
|
||||||
#[derive(Error, Debug)]
|
#[derive(Error, Debug)]
|
||||||
pub enum AppError {
|
pub enum AppError {
|
||||||
#[error("Configuration error: {0}")]
|
#[error("Configuration error: {0}")]
|
||||||
Configuration(String),
|
Configuration(String),
|
||||||
|
|
||||||
#[error("Message bus error: {0}")]
|
|
||||||
MessageBus(#[from] async_nats::Error),
|
|
||||||
|
|
||||||
#[error("Message bus publish error: {0}")]
|
|
||||||
MessageBusPublish(#[from] async_nats::PublishError),
|
|
||||||
|
|
||||||
#[error("Message bus subscribe error: {0}")]
|
|
||||||
MessageBusSubscribe(String),
|
|
||||||
|
|
||||||
#[error("Message bus connect error: {0}")]
|
|
||||||
MessageBusConnect(String),
|
|
||||||
|
|
||||||
#[error("HTTP request to another service failed: {0}")]
|
|
||||||
ServiceRequest(#[from] reqwest::Error),
|
|
||||||
|
|
||||||
#[error("Data parsing error: {0}")]
|
#[error("Data parsing error: {0}")]
|
||||||
DataParsing(#[from] anyhow::Error),
|
DataParsing(#[from] anyhow::Error),
|
||||||
|
|
||||||
|
#[error("Provider not available: {0}")]
|
||||||
|
ProviderNotAvailable(String),
|
||||||
|
|
||||||
|
#[error(transparent)]
|
||||||
|
Reqwest(#[from] ReqwestError),
|
||||||
|
|
||||||
|
#[error(transparent)]
|
||||||
|
Nats(#[from] async_nats::Error),
|
||||||
|
|
||||||
|
#[error(transparent)]
|
||||||
|
NatsSubscribe(#[from] async_nats::client::SubscribeError),
|
||||||
|
|
||||||
|
#[error(transparent)]
|
||||||
|
NatsUnsubscribe(#[from] async_nats::UnsubscribeError),
|
||||||
|
|
||||||
|
#[error(transparent)]
|
||||||
|
NatsPublish(#[from] async_nats::error::Error<async_nats::client::PublishErrorKind>),
|
||||||
}
|
}
|
||||||
|
|
||||||
// 手动实现针对 async-nats 泛型错误类型的 From 转换
|
impl From<config::ConfigError> for AppError {
|
||||||
impl From<async_nats::error::Error<async_nats::ConnectErrorKind>> for AppError {
|
fn from(e: config::ConfigError) -> Self {
|
||||||
fn from(err: async_nats::error::Error<async_nats::ConnectErrorKind>) -> Self {
|
AppError::Configuration(e.to_string())
|
||||||
AppError::MessageBusConnect(err.to_string())
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<async_nats::SubscribeError> for AppError {
|
pub type Result<T, E = AppError> = std::result::Result<T, E>;
|
||||||
fn from(err: async_nats::SubscribeError) -> Self {
|
|
||||||
AppError::MessageBusSubscribe(err.to_string())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@ -55,6 +55,7 @@ pub struct ReportItem {
|
|||||||
pub label: String,
|
pub label: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
pub struct FinnhubDataProvider {
|
pub struct FinnhubDataProvider {
|
||||||
client: FinnhubClient,
|
client: FinnhubClient,
|
||||||
}
|
}
|
||||||
|
|||||||
@ -8,6 +8,7 @@ mod message_consumer;
|
|||||||
mod persistence;
|
mod persistence;
|
||||||
mod state;
|
mod state;
|
||||||
mod worker;
|
mod worker;
|
||||||
|
mod config_poller;
|
||||||
|
|
||||||
use crate::config::AppConfig;
|
use crate::config::AppConfig;
|
||||||
use crate::error::Result;
|
use crate::error::Result;
|
||||||
@ -30,6 +31,9 @@ async fn main() -> Result<()> {
|
|||||||
// Initialize application state
|
// Initialize application state
|
||||||
let app_state = AppState::new(config);
|
let app_state = AppState::new(config);
|
||||||
|
|
||||||
|
// --- Start the config poller ---
|
||||||
|
tokio::spawn(config_poller::run_config_poller(app_state.clone()));
|
||||||
|
|
||||||
// Create the Axum router
|
// Create the Axum router
|
||||||
let app = api::create_router(app_state.clone());
|
let app = api::create_router(app_state.clone());
|
||||||
|
|
||||||
|
|||||||
@ -1,27 +1,58 @@
|
|||||||
use crate::error::Result;
|
use crate::error::Result;
|
||||||
use crate::state::AppState;
|
use crate::state::{AppState, ServiceOperationalStatus};
|
||||||
use common_contracts::messages::FetchCompanyDataCommand;
|
use common_contracts::messages::FetchCompanyDataCommand;
|
||||||
use futures_util::StreamExt;
|
use futures_util::StreamExt;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use tracing::{error, info};
|
use std::time::Duration;
|
||||||
|
use tracing::{error, info, warn};
|
||||||
|
|
||||||
const SUBJECT_NAME: &str = "data_fetch_commands";
|
const SUBJECT_NAME: &str = "data_fetch_commands";
|
||||||
|
|
||||||
pub async fn run(state: AppState) -> Result<()> {
|
pub async fn run(state: AppState) -> Result<()> {
|
||||||
info!("Starting NATS message consumer...");
|
info!("Starting NATS message consumer...");
|
||||||
|
|
||||||
let client = async_nats::connect(&state.config.nats_addr).await?;
|
loop {
|
||||||
info!("Connected to NATS.");
|
let status = state.status.read().await.clone();
|
||||||
|
if let ServiceOperationalStatus::Degraded { reason } = status {
|
||||||
|
warn!(
|
||||||
|
"Service is in degraded state (reason: {}). Pausing message consumption for 30s.",
|
||||||
|
reason
|
||||||
|
);
|
||||||
|
tokio::time::sleep(Duration::from_secs(30)).await;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
// This is a simple subscriber. For production, consider JetStream for durability.
|
info!("Service is Active. Connecting to NATS...");
|
||||||
|
match async_nats::connect(&state.config.nats_addr).await {
|
||||||
|
Ok(client) => {
|
||||||
|
info!("Successfully connected to NATS.");
|
||||||
|
if let Err(e) = subscribe_and_process(state.clone(), client).await {
|
||||||
|
error!("NATS subscription error: {}. Reconnecting in 10s...", e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
error!("Failed to connect to NATS: {}. Retrying in 10s...", e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
tokio::time::sleep(Duration::from_secs(10)).await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn subscribe_and_process(state: AppState, client: async_nats::Client) -> Result<()> {
|
||||||
let mut subscriber = client.subscribe(SUBJECT_NAME.to_string()).await?;
|
let mut subscriber = client.subscribe(SUBJECT_NAME.to_string()).await?;
|
||||||
|
|
||||||
info!(
|
info!(
|
||||||
"Consumer started, waiting for messages on subject '{}'",
|
"Consumer started, waiting for messages on subject '{}'",
|
||||||
SUBJECT_NAME
|
SUBJECT_NAME
|
||||||
);
|
);
|
||||||
|
|
||||||
while let Some(message) = subscriber.next().await {
|
while let Some(message) = subscriber.next().await {
|
||||||
|
let current_status = state.status.read().await.clone();
|
||||||
|
if matches!(current_status, ServiceOperationalStatus::Degraded {..}) {
|
||||||
|
warn!("Service became degraded. Disconnecting from NATS and pausing consumption.");
|
||||||
|
subscriber.unsubscribe().await?;
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
info!("Received NATS message.");
|
info!("Received NATS message.");
|
||||||
let state_clone = state.clone();
|
let state_clone = state.clone();
|
||||||
let publisher_clone = client.clone();
|
let publisher_clone = client.clone();
|
||||||
@ -30,6 +61,16 @@ pub async fn run(state: AppState) -> Result<()> {
|
|||||||
match serde_json::from_slice::<FetchCompanyDataCommand>(&message.payload) {
|
match serde_json::from_slice::<FetchCompanyDataCommand>(&message.payload) {
|
||||||
Ok(command) => {
|
Ok(command) => {
|
||||||
info!("Deserialized command for symbol: {}", command.symbol);
|
info!("Deserialized command for symbol: {}", command.symbol);
|
||||||
|
|
||||||
|
// Skip processing if market is 'CN'
|
||||||
|
if command.market.to_uppercase() == "CN" {
|
||||||
|
info!(
|
||||||
|
"Skipping command for symbol '{}' as its market ('{}') is 'CN'.",
|
||||||
|
command.symbol, command.market
|
||||||
|
);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
if let Err(e) =
|
if let Err(e) =
|
||||||
crate::worker::handle_fetch_command(state_clone, command, publisher_clone)
|
crate::worker::handle_fetch_command(state_clone, command, publisher_clone)
|
||||||
.await
|
.await
|
||||||
|
|||||||
@ -1,32 +1,73 @@
|
|||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use dashmap::DashMap;
|
|
||||||
use uuid::Uuid;
|
|
||||||
|
|
||||||
use common_contracts::observability::TaskProgress;
|
use common_contracts::observability::TaskProgress;
|
||||||
use secrecy::ExposeSecret;
|
use secrecy::{ExposeSecret, SecretString};
|
||||||
|
use std::sync::Arc;
|
||||||
|
use tokio::sync::RwLock;
|
||||||
|
|
||||||
use crate::config::AppConfig;
|
use crate::config::AppConfig;
|
||||||
use crate::finnhub::FinnhubDataProvider;
|
use crate::finnhub::FinnhubDataProvider;
|
||||||
|
use dashmap::DashMap;
|
||||||
|
use uuid::Uuid;
|
||||||
|
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub enum ServiceOperationalStatus {
|
||||||
|
Active,
|
||||||
|
Degraded { reason: String },
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct AppState {
|
pub struct AppState {
|
||||||
pub tasks: Arc<DashMap<Uuid, TaskProgress>>,
|
pub tasks: Arc<DashMap<Uuid, TaskProgress>>,
|
||||||
pub config: Arc<AppConfig>,
|
pub config: Arc<AppConfig>,
|
||||||
pub finnhub_provider: Arc<FinnhubDataProvider>,
|
pub status: Arc<RwLock<ServiceOperationalStatus>>,
|
||||||
|
finnhub_provider: Arc<RwLock<Option<FinnhubDataProvider>>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl AppState {
|
impl AppState {
|
||||||
pub fn new(config: AppConfig) -> Self {
|
pub fn new(config: AppConfig) -> Self {
|
||||||
let provider = Arc::new(FinnhubDataProvider::new(
|
let (initial_provider, initial_status) =
|
||||||
config.finnhub_api_url.clone(),
|
if let Some(api_key) = config.finnhub_api_key.as_ref() {
|
||||||
config.finnhub_api_key.expose_secret().to_string(),
|
let provider = FinnhubDataProvider::new(
|
||||||
));
|
config.finnhub_api_url.clone(),
|
||||||
|
api_key.expose_secret().to_string(),
|
||||||
|
);
|
||||||
|
(Some(provider), ServiceOperationalStatus::Active)
|
||||||
|
} else {
|
||||||
|
(
|
||||||
|
None,
|
||||||
|
ServiceOperationalStatus::Degraded {
|
||||||
|
reason: "Finnhub API Key is not configured.".to_string(),
|
||||||
|
},
|
||||||
|
)
|
||||||
|
};
|
||||||
|
|
||||||
Self {
|
Self {
|
||||||
tasks: Arc::new(DashMap::new()),
|
tasks: Arc::new(DashMap::new()),
|
||||||
config: Arc::new(config),
|
config: Arc::new(config),
|
||||||
finnhub_provider: provider,
|
status: Arc::new(RwLock::new(initial_status)),
|
||||||
|
finnhub_provider: Arc::new(RwLock::new(initial_provider)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn get_provider(&self) -> Option<FinnhubDataProvider> {
|
||||||
|
self.finnhub_provider.read().await.clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn update_provider(&self, api_key: Option<SecretString>) {
|
||||||
|
let mut provider_guard = self.finnhub_provider.write().await;
|
||||||
|
let mut status_guard = self.status.write().await;
|
||||||
|
|
||||||
|
if let Some(key) = api_key {
|
||||||
|
let new_provider = FinnhubDataProvider::new(
|
||||||
|
self.config.finnhub_api_url.clone(),
|
||||||
|
key.expose_secret().to_string(),
|
||||||
|
);
|
||||||
|
*provider_guard = Some(new_provider);
|
||||||
|
*status_guard = ServiceOperationalStatus::Active;
|
||||||
|
} else {
|
||||||
|
*provider_guard = None;
|
||||||
|
*status_guard = ServiceOperationalStatus::Degraded {
|
||||||
|
reason: "Finnhub API Key is not configured.".to_string(),
|
||||||
|
};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1,11 +1,11 @@
|
|||||||
use crate::error::Result;
|
use crate::error::{AppError, Result};
|
||||||
use crate::persistence::PersistenceClient;
|
use crate::persistence::PersistenceClient;
|
||||||
use crate::state::AppState;
|
use crate::state::AppState;
|
||||||
use chrono::Datelike;
|
use chrono::Datelike;
|
||||||
use common_contracts::dtos::{CompanyProfileDto, TimeSeriesFinancialDto};
|
use common_contracts::dtos::{CompanyProfileDto, TimeSeriesFinancialDto};
|
||||||
use common_contracts::messages::{CompanyProfilePersistedEvent, FetchCompanyDataCommand, FinancialsPersistedEvent};
|
use common_contracts::messages::{CompanyProfilePersistedEvent, FetchCompanyDataCommand, FinancialsPersistedEvent};
|
||||||
use common_contracts::observability::TaskProgress;
|
use common_contracts::observability::TaskProgress;
|
||||||
use tracing::info;
|
use tracing::{error, info};
|
||||||
|
|
||||||
pub async fn handle_fetch_command(
|
pub async fn handle_fetch_command(
|
||||||
state: AppState,
|
state: AppState,
|
||||||
@ -26,11 +26,22 @@ pub async fn handle_fetch_command(
|
|||||||
},
|
},
|
||||||
);
|
);
|
||||||
|
|
||||||
|
let provider = match state.get_provider().await {
|
||||||
|
Some(p) => p,
|
||||||
|
None => {
|
||||||
|
let reason = "Execution failed: Finnhub provider is not available (misconfigured).".to_string();
|
||||||
|
error!("{}", reason);
|
||||||
|
if let Some(mut task) = state.tasks.get_mut(&command.request_id) {
|
||||||
|
task.status = "Failed".to_string();
|
||||||
|
task.details = reason.clone();
|
||||||
|
}
|
||||||
|
return Err(AppError::ProviderNotAvailable(reason));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
// 1. Fetch data via provider
|
// 1. Fetch data via provider
|
||||||
let (profile, financials): (CompanyProfileDto, Vec<TimeSeriesFinancialDto>) = state
|
let (profile, financials): (CompanyProfileDto, Vec<TimeSeriesFinancialDto>) =
|
||||||
.finnhub_provider
|
provider.fetch_all_data(&command.symbol).await?;
|
||||||
.fetch_all_data(&command.symbol)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
// 2. Persist
|
// 2. Persist
|
||||||
{
|
{
|
||||||
|
|||||||
17
services/report-generator-service/Cargo.lock
generated
17
services/report-generator-service/Cargo.lock
generated
@ -728,6 +728,12 @@ version = "0.1.5"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "3a3076410a55c90011c298b04d0cfa770b00fa04e1e3c97d3f6c9de105a03844"
|
checksum = "3a3076410a55c90011c298b04d0cfa770b00fa04e1e3c97d3f6c9de105a03844"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "fixedbitset"
|
||||||
|
version = "0.4.2"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "flume"
|
name = "flume"
|
||||||
version = "0.11.1"
|
version = "0.11.1"
|
||||||
@ -1657,6 +1663,16 @@ dependencies = [
|
|||||||
"sha2",
|
"sha2",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "petgraph"
|
||||||
|
version = "0.6.5"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db"
|
||||||
|
dependencies = [
|
||||||
|
"fixedbitset",
|
||||||
|
"indexmap",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "phf"
|
name = "phf"
|
||||||
version = "0.11.3"
|
version = "0.11.3"
|
||||||
@ -2037,6 +2053,7 @@ dependencies = [
|
|||||||
"config",
|
"config",
|
||||||
"dashmap",
|
"dashmap",
|
||||||
"futures",
|
"futures",
|
||||||
|
"petgraph",
|
||||||
"reqwest",
|
"reqwest",
|
||||||
"secrecy",
|
"secrecy",
|
||||||
"serde",
|
"serde",
|
||||||
|
|||||||
@ -41,3 +41,4 @@ thiserror = "2.0.17"
|
|||||||
anyhow = "1.0"
|
anyhow = "1.0"
|
||||||
chrono = "0.4.38"
|
chrono = "0.4.38"
|
||||||
tera = "1.19"
|
tera = "1.19"
|
||||||
|
petgraph = "0.6.5"
|
||||||
|
|||||||
@ -38,7 +38,7 @@ async fn main() -> Result<()> {
|
|||||||
.map_err(|e| ProviderError::Internal(anyhow::anyhow!(e.to_string())))?;
|
.map_err(|e| ProviderError::Internal(anyhow::anyhow!(e.to_string())))?;
|
||||||
let state_clone = app_state.clone();
|
let state_clone = app_state.clone();
|
||||||
tokio::spawn(async move {
|
tokio::spawn(async move {
|
||||||
if let Err(e) = message_consumer::subscribe_to_events(state_clone, nats_client).await {
|
if let Err(e) = message_consumer::subscribe_to_commands(state_clone, nats_client).await {
|
||||||
tracing::error!("message consumer exited with error: {:?}", e);
|
tracing::error!("message consumer exited with error: {:?}", e);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|||||||
@ -1,36 +1,40 @@
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use common_contracts::messages::FinancialsPersistedEvent;
|
use common_contracts::messages::GenerateReportCommand;
|
||||||
use futures::StreamExt;
|
use futures::StreamExt;
|
||||||
use tracing::{error, info};
|
use tracing::{error, info};
|
||||||
|
|
||||||
use crate::{state::AppState, worker::run_report_generation_workflow};
|
use crate::{state::AppState, worker::run_report_generation_workflow};
|
||||||
|
|
||||||
const SUBJECT_NAME: &str = "events.data.financials_persisted";
|
const SUBJECT_NAME: &str = "analysis.commands.generate_report";
|
||||||
|
|
||||||
pub async fn subscribe_to_events(
|
pub async fn subscribe_to_commands(
|
||||||
app_state: AppState,
|
app_state: AppState,
|
||||||
nats_client: async_nats::Client,
|
nats_client: async_nats::Client,
|
||||||
) -> Result<(), anyhow::Error> {
|
) -> Result<(), anyhow::Error> {
|
||||||
let mut subscriber = nats_client.subscribe(SUBJECT_NAME.to_string()).await?;
|
let mut subscriber = nats_client.subscribe(SUBJECT_NAME.to_string()).await?;
|
||||||
info!(
|
info!(
|
||||||
"Consumer started, waiting for messages on subject '{}'",
|
"Consumer started, waiting for commands on subject '{}'",
|
||||||
SUBJECT_NAME
|
SUBJECT_NAME
|
||||||
);
|
);
|
||||||
|
|
||||||
while let Some(message) = subscriber.next().await {
|
while let Some(message) = subscriber.next().await {
|
||||||
info!("Received NATS message for financials persisted event.");
|
info!("Received NATS command to generate report.");
|
||||||
let state_clone = app_state.clone();
|
let state_clone = app_state.clone();
|
||||||
tokio::spawn(async move {
|
tokio::spawn(async move {
|
||||||
match serde_json::from_slice::<FinancialsPersistedEvent>(&message.payload) {
|
match serde_json::from_slice::<GenerateReportCommand>(&message.payload) {
|
||||||
Ok(event) => {
|
Ok(command) => {
|
||||||
info!("Deserialized event for symbol: {}", event.symbol);
|
info!(
|
||||||
if let Err(e) = run_report_generation_workflow(Arc::new(state_clone), event).await {
|
"Deserialized command for symbol: {}, template: {}",
|
||||||
|
command.symbol, command.template_id
|
||||||
|
);
|
||||||
|
if let Err(e) = run_report_generation_workflow(Arc::new(state_clone), command).await
|
||||||
|
{
|
||||||
error!("Error running report generation workflow: {:?}", e);
|
error!("Error running report generation workflow: {:?}", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
error!("Failed to deserialize message: {}", e);
|
error!("Failed to deserialize GenerateReportCommand: {}", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|||||||
@ -6,8 +6,11 @@
|
|||||||
|
|
||||||
use crate::error::Result;
|
use crate::error::Result;
|
||||||
use common_contracts::{
|
use common_contracts::{
|
||||||
config_models::{AnalysisModulesConfig, LlmProvidersConfig},
|
config_models::{AnalysisTemplateSets, LlmProvidersConfig},
|
||||||
dtos::{CompanyProfileDto, RealtimeQuoteDto, TimeSeriesFinancialBatchDto, TimeSeriesFinancialDto},
|
dtos::{
|
||||||
|
CompanyProfileDto, NewAnalysisResult, RealtimeQuoteDto, TimeSeriesFinancialBatchDto,
|
||||||
|
TimeSeriesFinancialDto,
|
||||||
|
},
|
||||||
};
|
};
|
||||||
use tracing::info;
|
use tracing::info;
|
||||||
|
|
||||||
@ -43,7 +46,7 @@ impl PersistenceClient {
|
|||||||
&self,
|
&self,
|
||||||
symbol: &str,
|
symbol: &str,
|
||||||
) -> Result<Vec<TimeSeriesFinancialDto>> {
|
) -> Result<Vec<TimeSeriesFinancialDto>> {
|
||||||
let url = format!("{}/market-data/financials/{}", self.base_url, symbol);
|
let url = format!("{}/market-data/financial-statements/{}", self.base_url, symbol);
|
||||||
info!("Fetching financials for {} from {}", symbol, url);
|
info!("Fetching financials for {} from {}", symbol, url);
|
||||||
let dtos = self
|
let dtos = self
|
||||||
.client
|
.client
|
||||||
@ -72,20 +75,37 @@ impl PersistenceClient {
|
|||||||
Ok(config)
|
Ok(config)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn get_analysis_modules_config(&self) -> Result<AnalysisModulesConfig> {
|
pub async fn get_analysis_template_sets(&self) -> Result<AnalysisTemplateSets> {
|
||||||
let url = format!("{}/configs/analysis_modules", self.base_url);
|
let url = format!("{}/configs/analysis_template_sets", self.base_url);
|
||||||
info!("Fetching analysis modules config from {}", url);
|
info!("Fetching analysis template sets from {}", url);
|
||||||
let config = self
|
let config = self
|
||||||
.client
|
.client
|
||||||
.get(&url)
|
.get(&url)
|
||||||
.send()
|
.send()
|
||||||
.await?
|
.await?
|
||||||
.error_for_status()?
|
.error_for_status()?
|
||||||
.json::<AnalysisModulesConfig>()
|
.json::<AnalysisTemplateSets>()
|
||||||
.await?;
|
.await?;
|
||||||
Ok(config)
|
Ok(config)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// --- Data Writing Methods ---
|
||||||
|
|
||||||
|
pub async fn create_analysis_result(&self, result: NewAnalysisResult) -> Result<()> {
|
||||||
|
let url = format!("{}/analysis-results", self.base_url);
|
||||||
|
info!(
|
||||||
|
"Persisting analysis result for symbol '{}', module '{}' to {}",
|
||||||
|
result.symbol, result.module_id, url
|
||||||
|
);
|
||||||
|
self.client
|
||||||
|
.post(&url)
|
||||||
|
.json(&result)
|
||||||
|
.send()
|
||||||
|
.await?
|
||||||
|
.error_for_status()?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
pub async fn upsert_company_profile(&self, profile: CompanyProfileDto) -> Result<()> {
|
pub async fn upsert_company_profile(&self, profile: CompanyProfileDto) -> Result<()> {
|
||||||
let url = format!("{}/companies", self.base_url);
|
let url = format!("{}/companies", self.base_url);
|
||||||
info!("Upserting company profile for {} to {}", profile.symbol, url);
|
info!("Upserting company profile for {} to {}", profile.symbol, url);
|
||||||
@ -100,7 +120,7 @@ impl PersistenceClient {
|
|||||||
|
|
||||||
pub async fn upsert_realtime_quote(&self, quote: RealtimeQuoteDto) -> Result<()> {
|
pub async fn upsert_realtime_quote(&self, quote: RealtimeQuoteDto) -> Result<()> {
|
||||||
let url = format!("{}/market-data/quotes", self.base_url);
|
let url = format!("{}/market-data/quotes", self.base_url);
|
||||||
info!("Upserting realtime quote for {} to {}", quote.symbol, url);
|
info!("Upserting realtime quote for {} to {}", quote.symbol, url);
|
||||||
self.client
|
self.client
|
||||||
.post(&url)
|
.post(&url)
|
||||||
.json("e)
|
.json("e)
|
||||||
@ -116,7 +136,12 @@ impl PersistenceClient {
|
|||||||
}
|
}
|
||||||
let url = format!("{}/market-data/financials/batch", self.base_url);
|
let url = format!("{}/market-data/financials/batch", self.base_url);
|
||||||
let symbol = dtos[0].symbol.clone();
|
let symbol = dtos[0].symbol.clone();
|
||||||
info!("Batch inserting {} financial statements for {} to {}", dtos.len(), symbol, url);
|
info!(
|
||||||
|
"Batch inserting {} financial statements for {} to {}",
|
||||||
|
dtos.len(),
|
||||||
|
symbol,
|
||||||
|
url
|
||||||
|
);
|
||||||
|
|
||||||
let batch = TimeSeriesFinancialBatchDto { records: dtos };
|
let batch = TimeSeriesFinancialBatchDto { records: dtos };
|
||||||
|
|
||||||
|
|||||||
@ -1,21 +1,26 @@
|
|||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use common_contracts::config_models::{AnalysisModuleConfig, AnalysisModulesConfig, LlmProvider, LlmProvidersConfig};
|
|
||||||
use common_contracts::dtos::{CompanyProfileDto, TimeSeriesFinancialDto};
|
use common_contracts::config_models::{
|
||||||
use common_contracts::messages::FinancialsPersistedEvent;
|
AnalysisModuleConfig, AnalysisTemplateSets, LlmProvidersConfig,
|
||||||
|
};
|
||||||
|
use common_contracts::dtos::{CompanyProfileDto, NewAnalysisResult, TimeSeriesFinancialDto};
|
||||||
|
use common_contracts::messages::GenerateReportCommand; // Assuming this command is defined
|
||||||
|
use petgraph::algo::toposort;
|
||||||
|
use petgraph::graph::DiGraph;
|
||||||
use tera::{Context, Tera};
|
use tera::{Context, Tera};
|
||||||
use tracing::{info, warn, instrument};
|
use tracing::{info, warn, instrument, error};
|
||||||
|
use uuid::Uuid;
|
||||||
|
|
||||||
use crate::error::{ProviderError, Result};
|
use crate::error::{ProviderError, Result};
|
||||||
use crate::llm_client::LlmClient;
|
use crate::llm_client::LlmClient;
|
||||||
use crate::persistence::PersistenceClient;
|
use crate::persistence::PersistenceClient;
|
||||||
use crate::state::AppState;
|
use crate::state::AppState;
|
||||||
use crate::templates::render_prompt;
|
|
||||||
|
|
||||||
#[instrument(skip_all, fields(symbol = %event.symbol))]
|
#[instrument(skip_all, fields(request_id = %command.request_id, symbol = %command.symbol, template_id = %command.template_id))]
|
||||||
pub async fn run_report_generation_workflow(
|
pub async fn run_report_generation_workflow(
|
||||||
state: Arc<AppState>,
|
state: Arc<AppState>,
|
||||||
event: FinancialsPersistedEvent,
|
command: GenerateReportCommand,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
info!("Starting report generation workflow.");
|
info!("Starting report generation workflow.");
|
||||||
|
|
||||||
@ -23,88 +28,167 @@ pub async fn run_report_generation_workflow(
|
|||||||
PersistenceClient::new(state.config.data_persistence_service_url.clone());
|
PersistenceClient::new(state.config.data_persistence_service_url.clone());
|
||||||
|
|
||||||
// 1. Fetch all necessary data AND configurations in parallel
|
// 1. Fetch all necessary data AND configurations in parallel
|
||||||
let (profile, financials, llm_providers, analysis_modules) =
|
let (profile, financials, llm_providers, template_sets) =
|
||||||
fetch_data_and_configs(&persistence_client, &event.symbol).await?;
|
match fetch_data_and_configs(&persistence_client, &command.symbol).await {
|
||||||
|
Ok(data) => data,
|
||||||
|
Err(e) => {
|
||||||
|
error!("Failed to fetch initial data and configs: {}", e);
|
||||||
|
return Err(e);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
if financials.is_empty() {
|
// 2. Select the correct template set
|
||||||
warn!("No financial data found. Aborting report generation.");
|
let template_set = match template_sets.get(&command.template_id) {
|
||||||
return Ok(());
|
Some(ts) => ts,
|
||||||
}
|
None => {
|
||||||
|
let err_msg = format!("Analysis template set with ID '{}' not found.", command.template_id);
|
||||||
|
error!("{}", err_msg);
|
||||||
|
return Err(ProviderError::Configuration(err_msg));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
// --- New: Dynamic, Multi-Module Workflow ---
|
// 3. Topologically sort modules to get execution order
|
||||||
|
let sorted_modules = match sort_modules_by_dependency(&template_set.modules) {
|
||||||
|
Ok(order) => order,
|
||||||
|
Err(e) => {
|
||||||
|
error!("Failed to sort analysis modules: {}", e);
|
||||||
|
return Err(e);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
info!(execution_order = ?sorted_modules, "Successfully determined module execution order.");
|
||||||
|
|
||||||
|
// 4. Execute modules in order
|
||||||
let mut generated_results: HashMap<String, String> = HashMap::new();
|
let mut generated_results: HashMap<String, String> = HashMap::new();
|
||||||
|
|
||||||
// Naive sequential execution based on dependencies. A proper topological sort would be better.
|
for module_id in sorted_modules {
|
||||||
// For now, we just iterate multiple times to resolve dependencies.
|
let module_config = template_set.modules.get(&module_id).unwrap(); // Should not fail due to sorting logic
|
||||||
for _ in 0..analysis_modules.len() {
|
|
||||||
for (module_id, module_config) in &analysis_modules {
|
info!(module_id = %module_id, "All dependencies met. Generating report for module.");
|
||||||
if generated_results.contains_key(module_id.as_str()) {
|
|
||||||
continue; // Already generated
|
let llm_client = match create_llm_client_for_module(&llm_providers, module_config) {
|
||||||
|
Ok(client) => client,
|
||||||
|
Err(e) => {
|
||||||
|
error!(module_id = %module_id, "Failed to create LLM client: {}. Skipping module.", e);
|
||||||
|
generated_results.insert(module_id.clone(), format!("Error: Failed to create LLM client: {}", e));
|
||||||
|
continue;
|
||||||
}
|
}
|
||||||
|
};
|
||||||
|
|
||||||
// Check if all dependencies are met
|
let mut context = Context::new();
|
||||||
let deps_met = module_config.dependencies.iter().all(|dep| generated_results.contains_key(dep));
|
context.insert("company_name", &profile.name);
|
||||||
if !deps_met {
|
context.insert("ts_code", &command.symbol);
|
||||||
continue; // Will try again in the next iteration
|
|
||||||
|
for dep in &module_config.dependencies {
|
||||||
|
if let Some(content) = generated_results.get(dep) {
|
||||||
|
context.insert(dep, content);
|
||||||
}
|
}
|
||||||
|
|
||||||
info!(module_id = %module_id, "All dependencies met. Generating report for module.");
|
|
||||||
|
|
||||||
// 2. Dynamically create LLM client for this module
|
|
||||||
let llm_client = create_llm_client_for_module(&state, &llm_providers, module_config)?;
|
|
||||||
|
|
||||||
// 3. Create context and render the prompt
|
|
||||||
let mut context = Context::new();
|
|
||||||
context.insert("company_name", &profile.name);
|
|
||||||
context.insert("ts_code", &event.symbol);
|
|
||||||
// Inject dependencies into context
|
|
||||||
for dep in &module_config.dependencies {
|
|
||||||
if let Some(content) = generated_results.get(dep) {
|
|
||||||
context.insert(dep, content);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// A placeholder for financial data, can be expanded
|
|
||||||
context.insert("financial_data", "...");
|
|
||||||
|
|
||||||
let prompt = Tera::one_off(&module_config.prompt_template, &context, true)
|
|
||||||
.map_err(|e| ProviderError::Internal(anyhow::anyhow!("Prompt rendering failed for module '{}': {}", module_id, e)))?;
|
|
||||||
|
|
||||||
// 4. Call the LLM to generate the content for this module
|
|
||||||
let content = llm_client.generate_text(prompt).await?;
|
|
||||||
info!(module_id = %module_id, "Successfully generated content.");
|
|
||||||
|
|
||||||
// TODO: Persist the generated result via persistence_client
|
|
||||||
|
|
||||||
generated_results.insert(module_id.clone(), content);
|
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
if generated_results.len() != analysis_modules.len() {
|
// TODO: This is a placeholder. Implement proper financial data formatting and injection.
|
||||||
warn!("Could not generate all modules due to missing dependencies or circular dependency.");
|
context.insert("financial_data", "...");
|
||||||
|
|
||||||
|
let prompt = match Tera::one_off(&module_config.prompt_template, &context, true) {
|
||||||
|
Ok(p) => p,
|
||||||
|
Err(e) => {
|
||||||
|
let err_msg = format!("Prompt rendering failed: {}", e);
|
||||||
|
error!(module_id = %module_id, "{}", err_msg);
|
||||||
|
generated_results.insert(module_id.clone(), format!("Error: {}", err_msg));
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let content = match llm_client.generate_text(prompt).await {
|
||||||
|
Ok(c) => c,
|
||||||
|
Err(e) => {
|
||||||
|
let err_msg = format!("LLM generation failed: {}", e);
|
||||||
|
error!(module_id = %module_id, "{}", err_msg);
|
||||||
|
generated_results.insert(module_id.clone(), format!("Error: {}", err_msg));
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
info!(module_id = %module_id, "Successfully generated content.");
|
||||||
|
|
||||||
|
let result_to_persist = NewAnalysisResult {
|
||||||
|
request_id: command.request_id,
|
||||||
|
symbol: command.symbol.clone(),
|
||||||
|
template_id: command.template_id.clone(),
|
||||||
|
module_id: module_id.clone(),
|
||||||
|
content: content.clone(),
|
||||||
|
meta_data: serde_json::json!({ "model_id": module_config.model_id }),
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Err(e) = persistence_client.create_analysis_result(result_to_persist).await {
|
||||||
|
error!(module_id = %module_id, "Failed to persist analysis result: {}", e);
|
||||||
|
// Decide if we should continue or fail the whole workflow
|
||||||
|
}
|
||||||
|
|
||||||
|
generated_results.insert(module_id.clone(), content);
|
||||||
}
|
}
|
||||||
|
|
||||||
info!("Report generation workflow finished.");
|
info!("Report generation workflow finished.");
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn sort_modules_by_dependency(
|
||||||
|
modules: &HashMap<String, AnalysisModuleConfig>,
|
||||||
|
) -> Result<Vec<String>> {
|
||||||
|
let mut graph = DiGraph::<String, ()>::new();
|
||||||
|
let mut node_map = HashMap::new();
|
||||||
|
|
||||||
|
for module_id in modules.keys() {
|
||||||
|
let index = graph.add_node(module_id.clone());
|
||||||
|
node_map.insert(module_id.clone(), index);
|
||||||
|
}
|
||||||
|
|
||||||
|
for (module_id, module_config) in modules {
|
||||||
|
if let Some(&module_index) = node_map.get(module_id) {
|
||||||
|
for dep in &module_config.dependencies {
|
||||||
|
if let Some(&dep_index) = node_map.get(dep) {
|
||||||
|
graph.add_edge(dep_index, module_index, ());
|
||||||
|
} else {
|
||||||
|
return Err(ProviderError::Configuration(format!(
|
||||||
|
"Module '{}' has a missing dependency: '{}'",
|
||||||
|
module_id, dep
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
match toposort(&graph, None) {
|
||||||
|
Ok(sorted_nodes) => {
|
||||||
|
let sorted_ids = sorted_nodes
|
||||||
|
.into_iter()
|
||||||
|
.map(|node_index| graph[node_index].clone())
|
||||||
|
.collect();
|
||||||
|
Ok(sorted_ids)
|
||||||
|
}
|
||||||
|
Err(cycle) => {
|
||||||
|
let cycle_id = graph[cycle.node_id()].clone();
|
||||||
|
Err(ProviderError::Configuration(format!(
|
||||||
|
"Circular dependency detected in analysis modules. Cycle involves: '{}'",
|
||||||
|
cycle_id
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fn create_llm_client_for_module(
|
fn create_llm_client_for_module(
|
||||||
state: &Arc<AppState>,
|
|
||||||
llm_providers: &LlmProvidersConfig,
|
llm_providers: &LlmProvidersConfig,
|
||||||
module_config: &AnalysisModuleConfig,
|
module_config: &AnalysisModuleConfig,
|
||||||
) -> Result<LlmClient> {
|
) -> Result<LlmClient> {
|
||||||
let provider = llm_providers.get(&module_config.provider_id).ok_or_else(|| {
|
let provider = llm_providers.get(&module_config.provider_id).ok_or_else(|| {
|
||||||
ProviderError::Configuration(format!(
|
ProviderError::Configuration(format!(
|
||||||
"Provider '{}' not found in llm_providers config",
|
"Provider '{}' not found for module '{}'",
|
||||||
module_config.provider_id
|
module_config.provider_id, module_config.name
|
||||||
))
|
))
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
// In the old design, the api key name was stored. In the new design, it's stored directly.
|
|
||||||
let api_key = provider.api_key.clone();
|
|
||||||
|
|
||||||
Ok(LlmClient::new(
|
Ok(LlmClient::new(
|
||||||
provider.api_base_url.clone(),
|
provider.api_base_url.clone(),
|
||||||
api_key.into(), // Convert String to SecretString
|
provider.api_key.clone().into(),
|
||||||
module_config.model_id.clone(),
|
module_config.model_id.clone(),
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
@ -116,13 +200,13 @@ async fn fetch_data_and_configs(
|
|||||||
CompanyProfileDto,
|
CompanyProfileDto,
|
||||||
Vec<TimeSeriesFinancialDto>,
|
Vec<TimeSeriesFinancialDto>,
|
||||||
LlmProvidersConfig,
|
LlmProvidersConfig,
|
||||||
AnalysisModulesConfig,
|
AnalysisTemplateSets,
|
||||||
)> {
|
)> {
|
||||||
let (profile, financials, llm_providers, analysis_modules) = tokio::try_join!(
|
let (profile, financials, llm_providers, template_sets) = tokio::try_join!(
|
||||||
client.get_company_profile(symbol),
|
client.get_company_profile(symbol),
|
||||||
client.get_financial_statements(symbol),
|
client.get_financial_statements(symbol),
|
||||||
client.get_llm_providers_config(),
|
client.get_llm_providers_config(),
|
||||||
client.get_analysis_modules_config(),
|
client.get_analysis_template_sets(), // Changed from get_analysis_modules_config
|
||||||
)?;
|
)?;
|
||||||
Ok((profile, financials, llm_providers, analysis_modules))
|
Ok((profile, financials, llm_providers, template_sets))
|
||||||
}
|
}
|
||||||
|
|||||||
11
services/tushare-provider-service/Cargo.lock
generated
11
services/tushare-provider-service/Cargo.lock
generated
@ -2296,6 +2296,16 @@ version = "4.1.0"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "1c107b6f4780854c8b126e228ea8869f4d7b71260f962fefb57b996b8959ba6b"
|
checksum = "1c107b6f4780854c8b126e228ea8869f4d7b71260f962fefb57b996b8959ba6b"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "secrecy"
|
||||||
|
version = "0.8.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "9bd1c54ea06cfd2f6b63219704de0b9b4f72dcc2b8fdef820be6cd799780e91e"
|
||||||
|
dependencies = [
|
||||||
|
"serde",
|
||||||
|
"zeroize",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "security-framework"
|
name = "security-framework"
|
||||||
version = "2.11.1"
|
version = "2.11.1"
|
||||||
@ -3312,6 +3322,7 @@ dependencies = [
|
|||||||
"rmcp",
|
"rmcp",
|
||||||
"rust_decimal",
|
"rust_decimal",
|
||||||
"rust_decimal_macros",
|
"rust_decimal_macros",
|
||||||
|
"secrecy",
|
||||||
"serde",
|
"serde",
|
||||||
"serde_json",
|
"serde_json",
|
||||||
"thiserror 2.0.17",
|
"thiserror 2.0.17",
|
||||||
|
|||||||
@ -31,3 +31,4 @@ chrono = "0.4.38"
|
|||||||
rust_decimal = "1.35.0"
|
rust_decimal = "1.35.0"
|
||||||
rust_decimal_macros = "1.35.0"
|
rust_decimal_macros = "1.35.0"
|
||||||
itertools = "0.14.0"
|
itertools = "0.14.0"
|
||||||
|
secrecy = { version = "0.8", features = ["serde"] }
|
||||||
|
|||||||
@ -1,7 +1,7 @@
|
|||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use axum::{routing::get, Router, extract::State, response::Json};
|
use axum::{routing::get, Router, extract::State, response::Json};
|
||||||
|
|
||||||
use crate::state::AppState;
|
use crate::state::{AppState, ServiceOperationalStatus};
|
||||||
use common_contracts::observability::{HealthStatus, ServiceStatus};
|
use common_contracts::observability::{HealthStatus, ServiceStatus};
|
||||||
|
|
||||||
pub fn create_router(app_state: AppState) -> Router {
|
pub fn create_router(app_state: AppState) -> Router {
|
||||||
@ -11,12 +11,22 @@ pub fn create_router(app_state: AppState) -> Router {
|
|||||||
.with_state(app_state)
|
.with_state(app_state)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn health_check(State(_state): State<AppState>) -> Json<HealthStatus> {
|
async fn health_check(State(state): State<AppState>) -> Json<HealthStatus> {
|
||||||
let mut details = HashMap::new();
|
let mut details = HashMap::new();
|
||||||
details.insert("message_bus_connection".to_string(), "ok".to_string());
|
let operational_status = state.status.read().await;
|
||||||
|
|
||||||
|
let (service_status, reason) = match &*operational_status {
|
||||||
|
ServiceOperationalStatus::Active => (ServiceStatus::Ok, "ok".to_string()),
|
||||||
|
ServiceOperationalStatus::Degraded { reason } => {
|
||||||
|
(ServiceStatus::Degraded, reason.clone())
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
details.insert("operational_status".to_string(), reason);
|
||||||
|
|
||||||
let status = HealthStatus {
|
let status = HealthStatus {
|
||||||
module_id: "tushare-provider-service".to_string(),
|
module_id: "tushare-provider-service".to_string(),
|
||||||
status: ServiceStatus::Ok,
|
status: service_status,
|
||||||
version: env!("CARGO_PKG_VERSION").to_string(),
|
version: env!("CARGO_PKG_VERSION").to_string(),
|
||||||
details,
|
details,
|
||||||
};
|
};
|
||||||
|
|||||||
@ -1,4 +1,5 @@
|
|||||||
use serde::Deserialize;
|
use serde::Deserialize;
|
||||||
|
use secrecy::SecretString;
|
||||||
|
|
||||||
#[derive(Debug, Deserialize, Clone)]
|
#[derive(Debug, Deserialize, Clone)]
|
||||||
pub struct AppConfig {
|
pub struct AppConfig {
|
||||||
@ -6,7 +7,7 @@ pub struct AppConfig {
|
|||||||
pub nats_addr: String,
|
pub nats_addr: String,
|
||||||
pub data_persistence_service_url: String,
|
pub data_persistence_service_url: String,
|
||||||
pub tushare_api_url: String,
|
pub tushare_api_url: String,
|
||||||
pub tushare_api_token: String,
|
pub tushare_api_token: Option<SecretString>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl AppConfig {
|
impl AppConfig {
|
||||||
@ -36,11 +37,6 @@ impl AppConfig {
|
|||||||
"TUSHARE_API_URL must not be empty".to_string(),
|
"TUSHARE_API_URL must not be empty".to_string(),
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
if cfg.tushare_api_token.trim().is_empty() || cfg.tushare_api_token.trim() == "YOUR_TUSHARE_API_TOKEN" {
|
|
||||||
return Err(config::ConfigError::Message(
|
|
||||||
"TUSHARE_API_TOKEN must be provided (non-empty, non-placeholder)".to_string(),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(cfg)
|
Ok(cfg)
|
||||||
}
|
}
|
||||||
|
|||||||
56
services/tushare-provider-service/src/config_poller.rs
Normal file
56
services/tushare-provider-service/src/config_poller.rs
Normal file
@ -0,0 +1,56 @@
|
|||||||
|
use crate::error::Result;
|
||||||
|
use crate::state::AppState;
|
||||||
|
use common_contracts::config_models::{DataSourceConfig, DataSourceProvider};
|
||||||
|
use secrecy::SecretString;
|
||||||
|
use std::collections::HashMap;
|
||||||
|
use std::time::Duration;
|
||||||
|
use tracing::{error, info, instrument};
|
||||||
|
|
||||||
|
const POLLING_INTERVAL_SECONDS: u64 = 60;
|
||||||
|
|
||||||
|
#[instrument(skip(state))]
|
||||||
|
pub async fn run_config_poller(state: AppState) {
|
||||||
|
info!("Starting configuration poller...");
|
||||||
|
let mut interval = tokio::time::interval(Duration::from_secs(POLLING_INTERVAL_SECONDS));
|
||||||
|
interval.tick().await; // Initial tick happens immediately
|
||||||
|
|
||||||
|
loop {
|
||||||
|
if let Err(e) = poll_and_update_config(&state).await {
|
||||||
|
error!("Failed to poll and update config: {:?}", e);
|
||||||
|
}
|
||||||
|
interval.tick().await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn poll_and_update_config(state: &AppState) -> Result<()> {
|
||||||
|
info!("Polling for data source configurations...");
|
||||||
|
let client = reqwest::Client::new();
|
||||||
|
let url = format!(
|
||||||
|
"{}/configs/data_sources",
|
||||||
|
state.config.data_persistence_service_url
|
||||||
|
);
|
||||||
|
|
||||||
|
let response = client.get(&url).send().await?;
|
||||||
|
response.error_for_status_ref()?;
|
||||||
|
|
||||||
|
let configs: HashMap<String, DataSourceConfig> = response.json().await?;
|
||||||
|
|
||||||
|
let tushare_config = configs.values().find(|cfg| {
|
||||||
|
matches!(cfg.provider, DataSourceProvider::Tushare) && cfg.enabled
|
||||||
|
});
|
||||||
|
|
||||||
|
if let Some(config) = tushare_config {
|
||||||
|
if let Some(api_key) = &config.api_key {
|
||||||
|
state.update_provider(Some(SecretString::from(api_key.clone()))).await;
|
||||||
|
info!("Successfully updated Tushare provider with new configuration.");
|
||||||
|
} else {
|
||||||
|
state.update_provider(None).await;
|
||||||
|
info!("Tushare provider is enabled but API key is missing. Service is degraded.");
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
state.update_provider(None).await;
|
||||||
|
info!("No enabled Tushare configuration found. Service is degraded.");
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
@ -1,27 +1,47 @@
|
|||||||
|
use anyhow::anyhow;
|
||||||
|
use reqwest::Error as ReqwestError;
|
||||||
use thiserror::Error;
|
use thiserror::Error;
|
||||||
|
|
||||||
#[derive(Error, Debug)]
|
#[derive(Error, Debug)]
|
||||||
pub enum ProviderError {
|
pub enum AppError {
|
||||||
#[error("API request failed: {0}")]
|
|
||||||
ApiRequest(#[from] reqwest::Error),
|
|
||||||
|
|
||||||
#[error("Failed to parse JSON response: {0}")]
|
|
||||||
JsonParsing(#[from] serde_json::Error),
|
|
||||||
|
|
||||||
#[error("Tushare API returned an error: code={code}, message='{msg}'")]
|
|
||||||
TushareApi { code: i64, msg: String },
|
|
||||||
|
|
||||||
#[error("Configuration error: {0}")]
|
#[error("Configuration error: {0}")]
|
||||||
Configuration(String),
|
Configuration(String),
|
||||||
|
|
||||||
#[error("Data mapping error: {0}")]
|
#[error("Data parsing error: {0}")]
|
||||||
|
DataParsing(#[from] anyhow::Error),
|
||||||
|
|
||||||
|
#[error("Mapping error: {0}")]
|
||||||
Mapping(String),
|
Mapping(String),
|
||||||
|
|
||||||
#[error("Persistence client error: {0}")]
|
|
||||||
Persistence(String),
|
|
||||||
|
|
||||||
#[error("Internal error: {0}")]
|
#[error("Internal error: {0}")]
|
||||||
Internal(#[from] anyhow::Error),
|
Internal(String),
|
||||||
|
|
||||||
|
#[error("Provider not available: {0}")]
|
||||||
|
ProviderNotAvailable(String),
|
||||||
|
|
||||||
|
#[error(transparent)]
|
||||||
|
Reqwest(#[from] ReqwestError),
|
||||||
|
|
||||||
|
#[error(transparent)]
|
||||||
|
Nats(#[from] async_nats::Error),
|
||||||
|
|
||||||
|
#[error(transparent)]
|
||||||
|
NatsSubscribe(#[from] async_nats::client::SubscribeError),
|
||||||
|
|
||||||
|
#[error(transparent)]
|
||||||
|
NatsUnsubscribe(#[from] async_nats::UnsubscribeError),
|
||||||
|
|
||||||
|
#[error(transparent)]
|
||||||
|
NatsPublish(#[from] async_nats::error::Error<async_nats::client::PublishErrorKind>),
|
||||||
|
|
||||||
|
#[error(transparent)]
|
||||||
|
SerdeJson(#[from] serde_json::Error),
|
||||||
}
|
}
|
||||||
|
|
||||||
pub type Result<T> = std::result::Result<T, ProviderError>;
|
impl From<config::ConfigError> for AppError {
|
||||||
|
fn from(e: config::ConfigError) -> Self {
|
||||||
|
AppError::Configuration(e.to_string())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub type Result<T, E = AppError> = std::result::Result<T, E>;
|
||||||
|
|||||||
@ -8,9 +8,10 @@ mod state;
|
|||||||
mod ts_client;
|
mod ts_client;
|
||||||
mod tushare;
|
mod tushare;
|
||||||
mod worker;
|
mod worker;
|
||||||
|
mod config_poller;
|
||||||
|
|
||||||
use crate::config::AppConfig;
|
use crate::config::AppConfig;
|
||||||
use crate::error::{Result, ProviderError};
|
use crate::error::{Result, AppError};
|
||||||
use crate::state::AppState;
|
use crate::state::AppState;
|
||||||
use tracing::info;
|
use tracing::info;
|
||||||
|
|
||||||
@ -24,12 +25,15 @@ async fn main() -> Result<()> {
|
|||||||
info!("Starting tushare-provider-service...");
|
info!("Starting tushare-provider-service...");
|
||||||
|
|
||||||
// Load configuration
|
// Load configuration
|
||||||
let config = AppConfig::load().map_err(|e| ProviderError::Configuration(e.to_string()))?;
|
let config = AppConfig::load().map_err(|e| AppError::Configuration(e.to_string()))?;
|
||||||
let port = config.server_port;
|
let port = config.server_port;
|
||||||
|
|
||||||
// Initialize application state
|
// Initialize application state
|
||||||
let app_state = AppState::new(config);
|
let app_state = AppState::new(config);
|
||||||
|
|
||||||
|
// --- Start the config poller ---
|
||||||
|
tokio::spawn(config_poller::run_config_poller(app_state.clone()));
|
||||||
|
|
||||||
// Create the Axum router
|
// Create the Axum router
|
||||||
let app = api::create_router(app_state.clone());
|
let app = api::create_router(app_state.clone());
|
||||||
|
|
||||||
|
|||||||
@ -8,7 +8,7 @@ use rust_decimal::prelude::*;
|
|||||||
use rust_decimal_macros::dec;
|
use rust_decimal_macros::dec;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
error::ProviderError,
|
error::AppError,
|
||||||
tushare::{
|
tushare::{
|
||||||
BalanceSheet, Cashflow, Dividend, FinaIndicator, Income, Repurchase, StkHolderNumber,
|
BalanceSheet, Cashflow, Dividend, FinaIndicator, Income, Repurchase, StkHolderNumber,
|
||||||
},
|
},
|
||||||
@ -28,7 +28,7 @@ pub struct TushareFinancials {
|
|||||||
pub fn map_financial_statements(
|
pub fn map_financial_statements(
|
||||||
symbol: &str,
|
symbol: &str,
|
||||||
raw_data: TushareFinancials,
|
raw_data: TushareFinancials,
|
||||||
) -> Result<Vec<TimeSeriesFinancialDto>, ProviderError> {
|
) -> Result<Vec<TimeSeriesFinancialDto>, AppError> {
|
||||||
// 1. Merge all financial data by end_date
|
// 1. Merge all financial data by end_date
|
||||||
let mut by_date = merge_financial_data(&raw_data);
|
let mut by_date = merge_financial_data(&raw_data);
|
||||||
|
|
||||||
@ -251,12 +251,12 @@ fn calculate_derived_metrics(series: &mut SeriesMap) {
|
|||||||
series.extend(new_series);
|
series.extend(new_series);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn flatten_series_to_dtos(symbol: &str, series: SeriesMap) -> Result<Vec<TimeSeriesFinancialDto>, ProviderError> {
|
fn flatten_series_to_dtos(symbol: &str, series: SeriesMap) -> Result<Vec<TimeSeriesFinancialDto>, AppError> {
|
||||||
let mut dtos: Vec<TimeSeriesFinancialDto> = Vec::new();
|
let mut dtos: Vec<TimeSeriesFinancialDto> = Vec::new();
|
||||||
for (metric_name, data_points) in series {
|
for (metric_name, data_points) in series {
|
||||||
for point in data_points {
|
for point in data_points {
|
||||||
let period_date = NaiveDate::parse_from_str(&point.period, "%Y%m%d")
|
let period_date = NaiveDate::parse_from_str(&point.period, "%Y%m%d")
|
||||||
.map_err(|e| ProviderError::Mapping(format!("Invalid period '{}': {}", point.period, e)))?;
|
.map_err(|e| AppError::Mapping(format!("Invalid period '{}': {}", point.period, e)))?;
|
||||||
dtos.push(TimeSeriesFinancialDto {
|
dtos.push(TimeSeriesFinancialDto {
|
||||||
symbol: symbol.to_string(),
|
symbol: symbol.to_string(),
|
||||||
metric_name: metric_name.clone(),
|
metric_name: metric_name.clone(),
|
||||||
|
|||||||
@ -1,5 +1,5 @@
|
|||||||
use crate::error::{Result, ProviderError};
|
use crate::error::Result;
|
||||||
use crate::state::AppState;
|
use crate::state::{AppState, ServiceOperationalStatus};
|
||||||
use common_contracts::messages::FetchCompanyDataCommand;
|
use common_contracts::messages::FetchCompanyDataCommand;
|
||||||
use futures_util::StreamExt;
|
use futures_util::StreamExt;
|
||||||
use tracing::{error, info, warn};
|
use tracing::{error, info, warn};
|
||||||
@ -7,34 +7,60 @@ use tracing::{error, info, warn};
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use tokio::sync::mpsc;
|
use tokio::sync::mpsc;
|
||||||
use chrono::Utc;
|
use chrono::Utc;
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
const SUBJECT_NAME: &str = "data_fetch_commands";
|
const SUBJECT_NAME: &str = "data_fetch_commands";
|
||||||
|
|
||||||
pub async fn run(state: AppState) -> Result<()> {
|
pub async fn run(state: AppState) -> Result<()> {
|
||||||
info!("Starting NATS message consumer...");
|
info!("Starting NATS message consumer...");
|
||||||
|
|
||||||
let client = async_nats::connect(&state.config.nats_addr)
|
loop {
|
||||||
.await
|
let status = state.status.read().await.clone();
|
||||||
.map_err(|e| ProviderError::Internal(anyhow::anyhow!("NATS connect failed: {}", e)))?;
|
if let ServiceOperationalStatus::Degraded { reason } = status {
|
||||||
info!("Connected to NATS.");
|
warn!(
|
||||||
subscribe_to_data_commands(Arc::new(state), client).await
|
"Service is in degraded state (reason: {}). Pausing message consumption for 30s.",
|
||||||
|
reason
|
||||||
|
);
|
||||||
|
tokio::time::sleep(Duration::from_secs(30)).await;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
info!("Service is Active. Connecting to NATS...");
|
||||||
|
match async_nats::connect(&state.config.nats_addr).await {
|
||||||
|
Ok(client) => {
|
||||||
|
info!("Successfully connected to NATS.");
|
||||||
|
if let Err(e) = subscribe_and_process(state.clone(), client).await {
|
||||||
|
error!("NATS subscription error: {}. Reconnecting in 10s...", e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
error!("Failed to connect to NATS: {}. Retrying in 10s...", e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
tokio::time::sleep(Duration::from_secs(10)).await;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn subscribe_to_data_commands(app_state: Arc<AppState>, nats_client: async_nats::Client) -> Result<()> {
|
async fn subscribe_and_process(
|
||||||
// This is a simple subscriber. For production, consider JetStream for durability.
|
state: AppState,
|
||||||
let mut subscriber = nats_client
|
client: async_nats::Client,
|
||||||
.subscribe(SUBJECT_NAME.to_string())
|
) -> Result<()> {
|
||||||
.await
|
let mut subscriber = client.subscribe(SUBJECT_NAME.to_string()).await?;
|
||||||
.map_err(|e| ProviderError::Internal(anyhow::anyhow!("NATS subscribe failed: {}", e)))?;
|
|
||||||
|
|
||||||
info!(
|
info!(
|
||||||
"Consumer started, waiting for messages on subject '{}'",
|
"Consumer started, waiting for messages on subject '{}'",
|
||||||
SUBJECT_NAME
|
SUBJECT_NAME
|
||||||
);
|
);
|
||||||
|
|
||||||
while let Some(message) = subscriber.next().await {
|
while let Some(message) = subscriber.next().await {
|
||||||
|
let current_status = state.status.read().await.clone();
|
||||||
|
if matches!(current_status, ServiceOperationalStatus::Degraded {..}) {
|
||||||
|
warn!("Service became degraded. Disconnecting from NATS and pausing consumption.");
|
||||||
|
subscriber.unsubscribe().await?;
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
info!("Received NATS message.");
|
info!("Received NATS message.");
|
||||||
let state_for_closure = app_state.clone();
|
let state_for_closure = Arc::new(state.clone());
|
||||||
|
|
||||||
tokio::spawn(async move {
|
tokio::spawn(async move {
|
||||||
if let Err(e) = serde_json::from_slice::<FetchCompanyDataCommand>(&message.payload) {
|
if let Err(e) = serde_json::from_slice::<FetchCompanyDataCommand>(&message.payload) {
|
||||||
|
|||||||
@ -1,31 +1,72 @@
|
|||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use dashmap::DashMap;
|
|
||||||
use uuid::Uuid;
|
|
||||||
|
|
||||||
use common_contracts::observability::TaskProgress;
|
|
||||||
|
|
||||||
use crate::config::AppConfig;
|
use crate::config::AppConfig;
|
||||||
use crate::tushare::TushareDataProvider;
|
use crate::tushare::TushareDataProvider;
|
||||||
|
use common_contracts::observability::TaskProgress;
|
||||||
|
use dashmap::DashMap;
|
||||||
|
use secrecy::{ExposeSecret, SecretString};
|
||||||
|
use std::sync::Arc;
|
||||||
|
use tokio::sync::RwLock;
|
||||||
|
use uuid::Uuid;
|
||||||
|
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub enum ServiceOperationalStatus {
|
||||||
|
Active,
|
||||||
|
Degraded { reason: String },
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct AppState {
|
pub struct AppState {
|
||||||
pub tasks: Arc<DashMap<Uuid, TaskProgress>>,
|
pub tasks: Arc<DashMap<Uuid, TaskProgress>>,
|
||||||
pub config: Arc<AppConfig>,
|
pub config: Arc<AppConfig>,
|
||||||
pub tushare_provider: Arc<TushareDataProvider>,
|
pub status: Arc<RwLock<ServiceOperationalStatus>>,
|
||||||
|
tushare_provider: Arc<RwLock<Option<TushareDataProvider>>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl AppState {
|
impl AppState {
|
||||||
pub fn new(config: AppConfig) -> Self {
|
pub fn new(config: AppConfig) -> Self {
|
||||||
let provider = Arc::new(TushareDataProvider::new(
|
let (initial_provider, initial_status) =
|
||||||
config.tushare_api_url.clone(),
|
if let Some(api_key) = config.tushare_api_token.as_ref() {
|
||||||
config.tushare_api_token.clone(),
|
let provider = TushareDataProvider::new(
|
||||||
));
|
config.tushare_api_url.clone(),
|
||||||
|
api_key.expose_secret().clone(),
|
||||||
|
);
|
||||||
|
(Some(provider), ServiceOperationalStatus::Active)
|
||||||
|
} else {
|
||||||
|
(
|
||||||
|
None,
|
||||||
|
ServiceOperationalStatus::Degraded {
|
||||||
|
reason: "Tushare API Key is not configured.".to_string(),
|
||||||
|
},
|
||||||
|
)
|
||||||
|
};
|
||||||
|
|
||||||
Self {
|
Self {
|
||||||
tasks: Arc::new(DashMap::new()),
|
tasks: Arc::new(DashMap::new()),
|
||||||
config: Arc::new(config),
|
config: Arc::new(config),
|
||||||
tushare_provider: provider,
|
status: Arc::new(RwLock::new(initial_status)),
|
||||||
|
tushare_provider: Arc::new(RwLock::new(initial_provider)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn get_provider(&self) -> Option<TushareDataProvider> {
|
||||||
|
self.tushare_provider.read().await.clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn update_provider(&self, api_key: Option<SecretString>) {
|
||||||
|
let mut provider_guard = self.tushare_provider.write().await;
|
||||||
|
let mut status_guard = self.status.write().await;
|
||||||
|
|
||||||
|
if let Some(key) = api_key {
|
||||||
|
let new_provider = TushareDataProvider::new(
|
||||||
|
self.config.tushare_api_url.clone(),
|
||||||
|
key.expose_secret().clone(),
|
||||||
|
);
|
||||||
|
*provider_guard = Some(new_provider);
|
||||||
|
*status_guard = ServiceOperationalStatus::Active;
|
||||||
|
} else {
|
||||||
|
*provider_guard = None;
|
||||||
|
*status_guard = ServiceOperationalStatus::Degraded {
|
||||||
|
reason: "Tushare API Key is not configured.".to_string(),
|
||||||
|
};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1,4 +1,4 @@
|
|||||||
use crate::error::ProviderError;
|
use crate::error::AppError;
|
||||||
use serde::{de::DeserializeOwned, Deserialize, Serialize};
|
use serde::{de::DeserializeOwned, Deserialize, Serialize};
|
||||||
use tracing::info;
|
use tracing::info;
|
||||||
|
|
||||||
@ -46,7 +46,7 @@ impl TushareClient {
|
|||||||
api_name: &str,
|
api_name: &str,
|
||||||
params: serde_json::Value,
|
params: serde_json::Value,
|
||||||
fields: &str,
|
fields: &str,
|
||||||
) -> Result<Vec<T>, ProviderError> {
|
) -> Result<Vec<T>, AppError> {
|
||||||
let request_payload = TushareRequest {
|
let request_payload = TushareRequest {
|
||||||
api_name,
|
api_name,
|
||||||
token: &self.api_token,
|
token: &self.api_token,
|
||||||
@ -67,15 +67,16 @@ impl TushareClient {
|
|||||||
let response: TushareResponse<T> = serde_json::from_str(&text)?;
|
let response: TushareResponse<T> = serde_json::from_str(&text)?;
|
||||||
|
|
||||||
if response.code != 0 {
|
if response.code != 0 {
|
||||||
return Err(ProviderError::TushareApi {
|
return Err(AppError::DataParsing(anyhow::anyhow!(format!(
|
||||||
code: response.code,
|
"Tushare API error code {}: {}",
|
||||||
msg: response.msg,
|
response.code, response.msg
|
||||||
});
|
))));
|
||||||
}
|
}
|
||||||
|
|
||||||
let data = response.data.ok_or_else(|| ProviderError::TushareApi {
|
let data = response.data.ok_or_else(|| {
|
||||||
code: -1,
|
AppError::DataParsing(anyhow::anyhow!(
|
||||||
msg: "No data field in response".to_string(),
|
"Tushare response missing data field"
|
||||||
|
))
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
let items = data
|
let items = data
|
||||||
|
|||||||
@ -4,7 +4,7 @@ use serde_json::json;
|
|||||||
use tokio;
|
use tokio;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
error::ProviderError,
|
error::AppError,
|
||||||
mapping::{map_financial_statements, TushareFinancials},
|
mapping::{map_financial_statements, TushareFinancials},
|
||||||
ts_client::TushareClient,
|
ts_client::TushareClient,
|
||||||
};
|
};
|
||||||
@ -25,7 +25,7 @@ impl TushareDataProvider {
|
|||||||
pub async fn fetch_all_data(
|
pub async fn fetch_all_data(
|
||||||
&self,
|
&self,
|
||||||
symbol: &str,
|
symbol: &str,
|
||||||
) -> Result<(CompanyProfileDto, Vec<TimeSeriesFinancialDto>), ProviderError> {
|
) -> Result<(CompanyProfileDto, Vec<TimeSeriesFinancialDto>), AppError> {
|
||||||
let (
|
let (
|
||||||
stock_basic,
|
stock_basic,
|
||||||
stock_company,
|
stock_company,
|
||||||
@ -42,18 +42,18 @@ impl TushareDataProvider {
|
|||||||
let ts_code = stock_basic
|
let ts_code = stock_basic
|
||||||
.get(0)
|
.get(0)
|
||||||
.map(|r| r.ts_code.clone())
|
.map(|r| r.ts_code.clone())
|
||||||
.ok_or_else(|| ProviderError::Mapping("stock_basic missing first row".to_string()))?;
|
.ok_or_else(|| AppError::Mapping("stock_basic missing first row".to_string()))?;
|
||||||
let name = stock_basic
|
let name = stock_basic
|
||||||
.get(0)
|
.get(0)
|
||||||
.and_then(|r| r.name.clone())
|
.and_then(|r| r.name.clone())
|
||||||
.ok_or_else(|| ProviderError::Mapping("stock_basic.name missing".to_string()))?;
|
.ok_or_else(|| AppError::Mapping("stock_basic.name missing".to_string()))?;
|
||||||
let industry = stock_basic.get(0).and_then(|r| r.industry.clone());
|
let industry = stock_basic.get(0).and_then(|r| r.industry.clone());
|
||||||
let list_date = stock_basic
|
let list_date = stock_basic
|
||||||
.get(0)
|
.get(0)
|
||||||
.and_then(|r| r.list_date.clone())
|
.and_then(|r| r.list_date.clone())
|
||||||
.map(|d| NaiveDate::parse_from_str(&d, "%Y%m%d"))
|
.map(|d| NaiveDate::parse_from_str(&d, "%Y%m%d"))
|
||||||
.transpose()
|
.transpose()
|
||||||
.map_err(|e| ProviderError::Mapping(format!("Invalid list_date: {}", e)))?;
|
.map_err(|e| AppError::Mapping(format!("Invalid list_date: {}", e)))?;
|
||||||
|
|
||||||
let profile = CompanyProfileDto {
|
let profile = CompanyProfileDto {
|
||||||
symbol: ts_code,
|
symbol: ts_code,
|
||||||
@ -94,7 +94,7 @@ impl TushareDataProvider {
|
|||||||
Vec<Dividend>,
|
Vec<Dividend>,
|
||||||
Vec<StkHolderNumber>,
|
Vec<StkHolderNumber>,
|
||||||
),
|
),
|
||||||
ProviderError,
|
AppError,
|
||||||
> {
|
> {
|
||||||
let params = json!({ "ts_code": symbol });
|
let params = json!({ "ts_code": symbol });
|
||||||
|
|
||||||
|
|||||||
@ -5,42 +5,52 @@ use common_contracts::{
|
|||||||
messages::{CompanyProfilePersistedEvent, FetchCompanyDataCommand, FinancialsPersistedEvent},
|
messages::{CompanyProfilePersistedEvent, FetchCompanyDataCommand, FinancialsPersistedEvent},
|
||||||
};
|
};
|
||||||
use tokio::sync::mpsc;
|
use tokio::sync::mpsc;
|
||||||
use tracing::info;
|
use tracing::{info, error};
|
||||||
use chrono::Datelike;
|
use chrono::Datelike;
|
||||||
|
|
||||||
use crate::{error::ProviderError, persistence::PersistenceClient, state::AppState};
|
use crate::{error::AppError, persistence::PersistenceClient, state::AppState};
|
||||||
|
|
||||||
pub async fn run_tushare_workflow(
|
pub async fn run_tushare_workflow(
|
||||||
state: Arc<AppState>,
|
state: Arc<AppState>,
|
||||||
command: FetchCompanyDataCommand,
|
command: FetchCompanyDataCommand,
|
||||||
completion_tx: mpsc::Sender<()>,
|
completion_tx: mpsc::Sender<()>,
|
||||||
) -> Result<(), ProviderError> {
|
) -> Result<(), AppError> {
|
||||||
let task_id = command.request_id;
|
let task_id = command.request_id;
|
||||||
let symbol = command.symbol.clone();
|
let symbol = command.symbol.clone();
|
||||||
|
|
||||||
|
let provider = match state.get_provider().await {
|
||||||
|
Some(p) => p,
|
||||||
|
None => {
|
||||||
|
let reason = "Execution failed: Tushare provider is not available (misconfigured).".to_string();
|
||||||
|
error!("{}", reason);
|
||||||
|
if let Some(mut task) = state.tasks.get_mut(&task_id) {
|
||||||
|
task.status = "Failed".to_string();
|
||||||
|
task.details = reason.clone();
|
||||||
|
}
|
||||||
|
return Err(AppError::ProviderNotAvailable(reason));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
// 1. Update task progress: Fetching data
|
// 1. Update task progress: Fetching data
|
||||||
{
|
{
|
||||||
let mut entry = state
|
let mut entry = state
|
||||||
.tasks
|
.tasks
|
||||||
.get_mut(&task_id)
|
.get_mut(&task_id)
|
||||||
.ok_or_else(|| ProviderError::Internal(anyhow::anyhow!("Task not found")))?;
|
.ok_or_else(|| AppError::Internal("Task not found".to_string()))?;
|
||||||
entry.status = "FetchingData".to_string();
|
entry.status = "FetchingData".to_string();
|
||||||
entry.progress_percent = 10;
|
entry.progress_percent = 10;
|
||||||
entry.details = "Starting data fetch from Tushare".to_string();
|
entry.details = "Starting data fetch from Tushare".to_string();
|
||||||
}
|
}
|
||||||
|
|
||||||
// 2. Fetch data using the provider
|
// 2. Fetch data using the provider
|
||||||
let (profile, financials) = state
|
let (profile, financials) = provider.fetch_all_data(&symbol).await?;
|
||||||
.tushare_provider
|
|
||||||
.fetch_all_data(&symbol)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
// 3. Update task progress: Persisting data
|
// 3. Update task progress: Persisting data
|
||||||
{
|
{
|
||||||
let mut entry = state
|
let mut entry = state
|
||||||
.tasks
|
.tasks
|
||||||
.get_mut(&task_id)
|
.get_mut(&task_id)
|
||||||
.ok_or_else(|| ProviderError::Internal(anyhow::anyhow!("Task not found")))?;
|
.ok_or_else(|| AppError::Internal("Task not found".to_string()))?;
|
||||||
entry.status = "PersistingData".to_string();
|
entry.status = "PersistingData".to_string();
|
||||||
entry.progress_percent = 60;
|
entry.progress_percent = 60;
|
||||||
entry.details = "Data fetched, persisting to database".to_string();
|
entry.details = "Data fetched, persisting to database".to_string();
|
||||||
@ -60,7 +70,7 @@ pub async fn run_tushare_workflow(
|
|||||||
// 5. Publish events
|
// 5. Publish events
|
||||||
let nats_client = async_nats::connect(&state.config.nats_addr)
|
let nats_client = async_nats::connect(&state.config.nats_addr)
|
||||||
.await
|
.await
|
||||||
.map_err(|e| ProviderError::Internal(anyhow::anyhow!("NATS connection failed: {}", e)))?;
|
.map_err(|e| AppError::Internal(format!("NATS connection failed: {}", e)))?;
|
||||||
|
|
||||||
publish_events(&nats_client, &command, &financials).await?;
|
publish_events(&nats_client, &command, &financials).await?;
|
||||||
|
|
||||||
@ -69,7 +79,7 @@ pub async fn run_tushare_workflow(
|
|||||||
let mut entry = state
|
let mut entry = state
|
||||||
.tasks
|
.tasks
|
||||||
.get_mut(&task_id)
|
.get_mut(&task_id)
|
||||||
.ok_or_else(|| ProviderError::Internal(anyhow::anyhow!("Task not found")))?;
|
.ok_or_else(|| AppError::Internal("Task not found".to_string()))?;
|
||||||
entry.status = "Completed".to_string();
|
entry.status = "Completed".to_string();
|
||||||
entry.progress_percent = 100;
|
entry.progress_percent = 100;
|
||||||
entry.details = "Workflow finished successfully".to_string();
|
entry.details = "Workflow finished successfully".to_string();
|
||||||
@ -91,7 +101,7 @@ async fn persist_data(
|
|||||||
financials: &[TimeSeriesFinancialDto],
|
financials: &[TimeSeriesFinancialDto],
|
||||||
state: &Arc<AppState>,
|
state: &Arc<AppState>,
|
||||||
task_id: uuid::Uuid,
|
task_id: uuid::Uuid,
|
||||||
) -> Result<(), ProviderError> {
|
) -> Result<(), AppError> {
|
||||||
// In a real implementation, we'd use tokio::try_join! to run these in parallel.
|
// In a real implementation, we'd use tokio::try_join! to run these in parallel.
|
||||||
if let Err(e) = client.upsert_company_profile(profile.clone()).await {
|
if let Err(e) = client.upsert_company_profile(profile.clone()).await {
|
||||||
state
|
state
|
||||||
@ -128,7 +138,7 @@ async fn publish_events(
|
|||||||
nats_client: &async_nats::Client,
|
nats_client: &async_nats::Client,
|
||||||
command: &FetchCompanyDataCommand,
|
command: &FetchCompanyDataCommand,
|
||||||
financials: &[TimeSeriesFinancialDto],
|
financials: &[TimeSeriesFinancialDto],
|
||||||
) -> Result<(), ProviderError> {
|
) -> Result<(), AppError> {
|
||||||
let profile_event = CompanyProfilePersistedEvent {
|
let profile_event = CompanyProfilePersistedEvent {
|
||||||
request_id: command.request_id,
|
request_id: command.request_id,
|
||||||
symbol: command.symbol.clone(),
|
symbol: command.symbol.clone(),
|
||||||
@ -138,8 +148,7 @@ async fn publish_events(
|
|||||||
"events.data.company_profile_persisted",
|
"events.data.company_profile_persisted",
|
||||||
serde_json::to_vec(&profile_event).unwrap().into(),
|
serde_json::to_vec(&profile_event).unwrap().into(),
|
||||||
)
|
)
|
||||||
.await
|
.await?;
|
||||||
.map_err(|e| ProviderError::Internal(anyhow::anyhow!("Event publishing failed: {}", e)))?;
|
|
||||||
|
|
||||||
let years: std::collections::BTreeSet<u16> = financials
|
let years: std::collections::BTreeSet<u16> = financials
|
||||||
.iter()
|
.iter()
|
||||||
@ -155,8 +164,7 @@ async fn publish_events(
|
|||||||
"events.data.financials_persisted",
|
"events.data.financials_persisted",
|
||||||
serde_json::to_vec(&financials_event).unwrap().into(),
|
serde_json::to_vec(&financials_event).unwrap().into(),
|
||||||
)
|
)
|
||||||
.await
|
.await?;
|
||||||
.map_err(|e| ProviderError::Internal(anyhow::anyhow!("Event publishing failed: {}", e)))?;
|
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user