Fundamental_Analysis/services/workflow-orchestrator-service/src/main.rs
Lv, Qi e9e4d0c1b3 chore: massive update covering recent refactoring and bug fixes
- fix: infinite message loop in workflow orchestrator
- feat: restore realtime LLM streaming from report generator to frontend
- refactor: major update to provider services (generic workers, workflow adapters)
- refactor: common contracts and message definitions updated
- feat: enhanced logging and observability in orchestrator
- docs: update project management tasks and status
- chore: dependency updates and config adjustments
2025-11-30 19:17:02 +08:00

107 lines
4.1 KiB
Rust

use anyhow::Result;
use tracing::info;
use std::sync::Arc;
use workflow_orchestrator_service::{config, state, message_consumer, api, task_monitor};
use tracing_subscriber::{EnvFilter, layer::SubscriberExt, util::SubscriberInitExt, Layer};
use tokio::sync::broadcast;
use common_contracts::messages::WorkflowEvent;
use common_contracts::subjects::NatsSubject;
#[tokio::main]
async fn main() -> Result<()> {
// Load configuration first
let config = config::AppConfig::load()?;
// Initialize Log Manager
let log_manager = Arc::new(workflow_orchestrator_service::logging::LogBufferManager::new("temp_logs"));
log_manager.cleanup_all(); // Clean up old logs on startup
// Initialize Realtime Log Broadcast Channel
let (log_tx, mut log_rx) = broadcast::channel::<workflow_orchestrator_service::logging::LogEntry>(1000);
// Initialize Tracing with custom layer
let fmt_layer = tracing_subscriber::fmt::layer().with_filter(EnvFilter::try_from_default_env().unwrap_or_else(|_| "info".into()));
let file_log_layer = workflow_orchestrator_service::logging::FileRequestLogLayer::new(log_manager.clone(), log_tx.clone());
tracing_subscriber::registry()
.with(fmt_layer)
.with(file_log_layer)
.init();
info!("Starting workflow-orchestrator-service...");
// Initialize application state
let state = Arc::new(state::AppState::new(config.clone(), log_manager, log_tx).await?);
// Connect to NATS
let nats_client = {
let mut attempts = 0;
loop {
match async_nats::connect(&config.nats_addr).await {
Ok(client) => break client,
Err(e) => {
attempts += 1;
if attempts > 30 {
return Err(anyhow::anyhow!("Failed to connect to NATS after 30 attempts: {}", e));
}
tracing::warn!("Failed to connect to NATS: {}. Retrying in 2s... (Attempt {}/30)", e, attempts);
tokio::time::sleep(std::time::Duration::from_secs(2)).await;
}
}
}
};
info!("Connected to NATS at {}", config.nats_addr);
// Start Realtime Log Pusher
let nats_pusher = nats_client.clone();
tokio::spawn(async move {
info!("Starting Realtime Log Pusher...");
while let Ok(entry) = log_rx.recv().await {
// Convert to WorkflowEvent::TaskLog
// Since entry.request_id is string, we parse it back to Uuid to get the subject
if let Ok(req_id) = uuid::Uuid::parse_str(&entry.request_id) {
let event = WorkflowEvent::TaskLog {
task_id: "workflow".to_string(), // Ideally we capture task_id too, but for now generic "workflow" or infer from msg
level: entry.level,
message: entry.message,
timestamp: entry.timestamp,
};
let subject = NatsSubject::WorkflowProgress(req_id).to_string();
if let Ok(payload) = serde_json::to_vec(&event) {
if let Err(e) = nats_pusher.publish(subject, payload.into()).await {
tracing::error!("Failed to push realtime log to NATS: {}", e);
}
}
}
}
});
// Start Message Consumer
let state_clone = state.clone();
let nats_clone = nats_client.clone();
tokio::spawn(async move {
if let Err(e) = message_consumer::run(state_clone, nats_clone).await {
tracing::error!("Message consumer failed: {}", e);
}
});
// Start Task Monitor (Watchdog)
let state_monitor = state.clone();
let nats_monitor = nats_client.clone();
tokio::spawn(async move {
task_monitor::run(state_monitor, nats_monitor).await;
});
// Start HTTP Server
let app = api::create_router(state.clone());
let addr = format!("0.0.0.0:{}", config.server_port);
let listener = tokio::net::TcpListener::bind(&addr).await?;
info!("HTTP server listening on {}", addr);
axum::serve(listener, app).await?;
Ok(())
}