- Removed dependencies on analysis_results table - Implemented workflow_history storage in Data Persistence Service - Updated Workflow Orchestrator to save workflow snapshots to history - Refactored Frontend to consume workflow_history and fetch reports via VGCS - Fixed Data Providers (Tushare, YFinance) to report output paths in metadata - Updated documentation and task status
77 lines
2.4 KiB
Rust
77 lines
2.4 KiB
Rust
use anyhow::{Result, anyhow};
|
|
use serde_json::{json, Value};
|
|
use std::time::Duration;
|
|
use tracing::debug;
|
|
|
|
pub struct LlmClient {
|
|
http_client: reqwest::Client,
|
|
api_base_url: String,
|
|
api_key: String,
|
|
model: String,
|
|
}
|
|
|
|
impl LlmClient {
|
|
pub fn new(api_url: String, api_key: String, model: String) -> Self {
|
|
let api_url = api_url.trim();
|
|
|
|
// Normalize base URL (handling /v1, /chat/completions etc is tricky, keeping it simple for now)
|
|
// Assuming api_url is the base (e.g., https://api.openai.com/v1)
|
|
let base_url = api_url.trim_end_matches('/').to_string();
|
|
|
|
let http_client = reqwest::Client::builder()
|
|
.timeout(Duration::from_secs(60))
|
|
.build()
|
|
.unwrap_or_default();
|
|
|
|
Self {
|
|
http_client,
|
|
api_base_url: base_url,
|
|
api_key,
|
|
model,
|
|
}
|
|
}
|
|
|
|
pub async fn chat_completion(&self, system_prompt: &str, user_prompt: &str) -> Result<String> {
|
|
let url = if self.api_base_url.ends_with("/chat/completions") {
|
|
self.api_base_url.clone()
|
|
} else {
|
|
format!("{}/chat/completions", self.api_base_url)
|
|
};
|
|
|
|
debug!("Sending request to LLM: {} ({})", self.model, url);
|
|
|
|
let body = json!({
|
|
"model": self.model,
|
|
"messages": [
|
|
{ "role": "system", "content": system_prompt },
|
|
{ "role": "user", "content": user_prompt }
|
|
],
|
|
"temperature": 0.1 // Low temperature for deterministic selection
|
|
});
|
|
|
|
let res = self.http_client.post(&url)
|
|
.header("Authorization", format!("Bearer {}", self.api_key))
|
|
.header("Content-Type", "application/json")
|
|
.json(&body)
|
|
.send()
|
|
.await
|
|
.map_err(|e| anyhow!("LLM request failed: {}", e))?;
|
|
|
|
if !res.status().is_success() {
|
|
let status = res.status();
|
|
let text = res.text().await.unwrap_or_default();
|
|
return Err(anyhow!("LLM API error {}: {}", status, text));
|
|
}
|
|
|
|
let json: Value = res.json().await
|
|
.map_err(|e| anyhow!("Failed to parse LLM response: {}", e))?;
|
|
|
|
let content = json["choices"][0]["message"]["content"]
|
|
.as_str()
|
|
.ok_or_else(|| anyhow!("Invalid LLM response format"))?;
|
|
|
|
Ok(content.to_string())
|
|
}
|
|
}
|
|
|