Fundamental_Analysis/services/tushare-provider-service/src/message_consumer.rs
Lv, Qi 60e6c8f61b feat(config): 将服务配置全面迁移至数据库
本次提交完成了一项重要的架构重构,将所有外部服务的API凭证管理从环境变量迁移到了中心化的数据库配置中。

主要变更:

1.  **统一配置源**:
    -   `data-persistence-service` 现已提供 `/api/v1/configs/data_sources` 端点,用于统一管理数据源配置。
    -   所有配置(LLM 和数据源)现在都通过数据库的 `system_config` 表进行管理,实现了“单一事实源”。

2.  **增强服务韧性**:
    -   重构了 `finnhub-`, `tushare-`, `alphavantage-provider-service`。
    -   这些服务在启动时不再强制要求 API Key。
    -   引入了动态配置轮询器 (`config_poller`),服务现在可以定期从数据库获取最新配置。
    -   实现了“降级模式”:当配置缺失时,服务会进入 `Degraded` 状态并暂停处理消息,而不是直接崩溃。配置恢复后,服务会自动回到 `Active` 状态。
    -   `/health` 端点现在能准确反映服务的真实运行状态。

3.  **前端易用性提升**:
    -   您在 `/config` 页面上增加了“数据源配置”面板,允许用户通过 UI 动态更新所有 API Token。

4.  **部署简化**:
    -   从 `docker-compose.yml` 中移除了所有已废弃的 `_API_KEY` 环境变量,消除了启动时的警告。

这项重构显著提升了系统的可维护性、健壮性和用户体验,为未来的功能扩展奠定了坚实的基础。
2025-11-18 05:58:18 +08:00

142 lines
5.3 KiB
Rust

use crate::error::Result;
use crate::state::{AppState, ServiceOperationalStatus};
use common_contracts::messages::FetchCompanyDataCommand;
use futures_util::StreamExt;
use tracing::{error, info, warn};
use std::sync::Arc;
use tokio::sync::mpsc;
use chrono::Utc;
use std::time::Duration;
const SUBJECT_NAME: &str = "data_fetch_commands";
pub async fn run(state: AppState) -> Result<()> {
info!("Starting NATS message consumer...");
loop {
let status = state.status.read().await.clone();
if let ServiceOperationalStatus::Degraded { reason } = status {
warn!(
"Service is in degraded state (reason: {}). Pausing message consumption for 30s.",
reason
);
tokio::time::sleep(Duration::from_secs(30)).await;
continue;
}
info!("Service is Active. Connecting to NATS...");
match async_nats::connect(&state.config.nats_addr).await {
Ok(client) => {
info!("Successfully connected to NATS.");
if let Err(e) = subscribe_and_process(state.clone(), client).await {
error!("NATS subscription error: {}. Reconnecting in 10s...", e);
}
}
Err(e) => {
error!("Failed to connect to NATS: {}. Retrying in 10s...", e);
}
}
tokio::time::sleep(Duration::from_secs(10)).await;
}
}
async fn subscribe_and_process(
state: AppState,
client: async_nats::Client,
) -> Result<()> {
let mut subscriber = client.subscribe(SUBJECT_NAME.to_string()).await?;
info!(
"Consumer started, waiting for messages on subject '{}'",
SUBJECT_NAME
);
while let Some(message) = subscriber.next().await {
let current_status = state.status.read().await.clone();
if matches!(current_status, ServiceOperationalStatus::Degraded {..}) {
warn!("Service became degraded. Disconnecting from NATS and pausing consumption.");
subscriber.unsubscribe().await?;
return Ok(());
}
info!("Received NATS message.");
let state_for_closure = Arc::new(state.clone());
tokio::spawn(async move {
if let Err(e) = serde_json::from_slice::<FetchCompanyDataCommand>(&message.payload) {
error!("Failed to deserialize message: {}", e);
warn!("Received non-json message: {:?}", message.payload);
return;
}
let command = match serde_json::from_slice::<FetchCompanyDataCommand>(&message.payload) {
Ok(c) => c,
Err(e) => {
error!("Failed to deserialize message: {}", e);
return;
}
};
info!("Received data fetch command for symbol: {}", command.symbol);
// Tushare is for the Chinese market ("CN")
if command.market.to_uppercase() != "CN" {
info!(
"Skipping command for symbol '{}' as its market ('{}') is not 'CN'.",
command.symbol, command.market
);
return;
}
let (tx, rx) = mpsc::channel(1);
let task_id = command.request_id;
// Initialize deterministic progress entry
state_for_closure.tasks.insert(task_id, common_contracts::observability::TaskProgress {
request_id: task_id,
task_name: format!("tushare:{}", command.symbol),
status: "Received".to_string(),
progress_percent: 0,
details: "Command received".to_string(),
started_at: Utc::now(),
});
// Spawn the workflow in a separate task
let workflow_state = state_for_closure.clone();
tokio::spawn(async move {
let workflow_state_for_error = workflow_state.clone();
let result = crate::worker::run_tushare_workflow(workflow_state, command, tx).await;
if let Err(e) = result {
error!(
"Error executing Tushare workflow for task {}: {:?}",
task_id, e
);
// Update task to failed status
if let Some(mut task) = workflow_state_for_error.tasks.get_mut(&task_id) {
task.status = "Failed".to_string();
task.details = format!("Workflow failed: {}", e);
}
}
});
// Spawn a separate task to clean up the task entry after completion or timeout
let cleanup_state = state_for_closure.clone();
tokio::spawn(async move {
let mut rx = rx;
match rx.recv().await {
Some(_) => {
info!("Task {} completed successfully, removing from map.", task_id);
cleanup_state.tasks.remove(&task_id);
}
None => {
warn!(
"Task {} completion signal not received, removing after timeout.",
task_id
);
cleanup_state.tasks.remove(&task_id);
}
}
});
});
}
Ok(())
}