Fundamental_Analysis/backend/app/services/alpha_engine_client.py
xucheng a79efd8150 feat: Enhance configuration management with new LLM provider support and API integration
- Backend: Introduced new endpoints for LLM configuration retrieval and updates in `config.py`, allowing dynamic management of LLM provider settings.
- Updated schemas to include `AlphaEngineConfig` for better integration with the new provider.
- Frontend: Added state management for AlphaEngine API credentials in the configuration page, ensuring seamless user experience.
- Configuration files updated to reflect changes in LLM provider settings and API keys.

BREAKING CHANGE: The default LLM provider has been changed from `new_api` to `alpha_engine`, requiring updates to existing configurations.
2025-11-11 20:49:27 +08:00

261 lines
9.2 KiB
Python

"""
AlphaEngine Client for investment Q&A API
"""
import json
import re
import time
from typing import Dict, Optional, AsyncGenerator
import httpx
from requests.exceptions import ChunkedEncodingError
class AlphaEngineClient:
"""Client for AlphaEngine investment Q&A API"""
def __init__(
self,
api_url: str,
api_key: str,
token: str,
user_id: int = 999041,
model: str = "deepseek-r1",
using_indicator: bool = True,
start_time: str = "2024-01-01",
doc_show_type: list = None,
simple_tracking: bool = True
):
"""
Initialize AlphaEngine client
Args:
api_url: API endpoint URL
api_key: X-API-KEY for authentication
token: Token for authentication
user_id: User ID
model: Model name (default: deepseek-r1)
using_indicator: Whether to use indicators
start_time: Start time for data query
doc_show_type: Document types to show (default: ["A001", "A002", "A003", "A004"])
simple_tracking: Whether to enable simple tracking
"""
self.api_url = api_url.rstrip('/')
self.api_key = api_key
self.token = token
self.user_id = user_id
self.model = model
self.using_indicator = using_indicator
self.start_time = start_time
self.doc_show_type = doc_show_type or ["A001", "A002", "A003", "A004"]
self.simple_tracking = simple_tracking
async def generate_analysis(
self,
analysis_type: str,
company_name: str,
ts_code: str,
prompt_template: str,
financial_data: Optional[Dict] = None,
context: Optional[Dict] = None
) -> Dict:
"""
Generate analysis using AlphaEngine API (non-streaming)
Args:
analysis_type: Type of analysis
company_name: Company name
ts_code: Stock code
prompt_template: Prompt template with placeholders
financial_data: Optional financial data for context
context: Optional dictionary with results from previous analyses
Returns:
Dict with analysis content and metadata
"""
start_time = time.perf_counter_ns()
# Build prompt from template
prompt = self._build_prompt(
prompt_template,
company_name,
ts_code,
financial_data,
context
)
# Call AlphaEngine API
try:
async with httpx.AsyncClient(timeout=300.0) as client:
headers = {
'token': self.token,
'X-API-KEY': self.api_key,
'Content-Type': 'application/json'
}
payload = {
"msg": prompt,
"history": [],
"user_id": self.user_id,
"model": self.model,
"using_indicator": self.using_indicator,
"start_time": self.start_time,
"doc_show_type": self.doc_show_type,
"simple_tracking": self.simple_tracking
}
response = await client.post(
f"{self.api_url}/api/v3/finchat",
json=payload,
headers=headers
)
if response.status_code != 200:
raise Exception(f"AlphaEngine API error: HTTP {response.status_code} - {response.text}")
result_text = response.text
# Parse response to extract final answer
final_answer_match = re.findall(r'\{"id":"_final","content":"(.*?)"}', result_text)
final_answer = final_answer_match[0] if final_answer_match else result_text
# Extract COT if available
cot_match = re.findall(r'\{"id":"_cot","content":"(.*?)"}', result_text)
cot = "".join(cot_match) if cot_match else ""
# Extract tracking documents if available
tracking_match = re.findall(r'\{"id":"tracking_documents","content":\s*(\[[^]]*])}', result_text)
tracking_docs = json.loads(tracking_match[0]) if tracking_match else []
elapsed_ms = int((time.perf_counter_ns() - start_time) / 1_000_000)
return {
"content": final_answer,
"model": self.model,
"tokens": {
"prompt_tokens": 0, # AlphaEngine doesn't provide token usage
"completion_tokens": 0,
"total_tokens": 0,
},
"elapsed_ms": elapsed_ms,
"success": True,
"analysis_type": analysis_type,
"cot": cot,
"tracking_documents": tracking_docs,
}
except Exception as e:
elapsed_ms = int((time.perf_counter_ns() - start_time) / 1_000_000)
return {
"content": "",
"model": self.model,
"tokens": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0},
"elapsed_ms": elapsed_ms,
"success": False,
"error": str(e),
"analysis_type": analysis_type,
}
async def generate_analysis_stream(
self,
analysis_type: str,
company_name: str,
ts_code: str,
prompt_template: str,
financial_data: Optional[Dict] = None,
context: Optional[Dict] = None
) -> AsyncGenerator[str, None]:
"""
Yield analysis content chunks using AlphaEngine streaming API
Yields plain text chunks as they arrive.
"""
# Build prompt
prompt = self._build_prompt(
prompt_template,
company_name,
ts_code,
financial_data,
context,
)
try:
async with httpx.AsyncClient(timeout=300.0) as client:
headers = {
'token': self.token,
'X-API-KEY': self.api_key,
'Content-Type': 'application/json'
}
payload = {
"msg": prompt,
"history": [],
"user_id": self.user_id,
"model": self.model,
"using_indicator": self.using_indicator,
"start_time": self.start_time,
"doc_show_type": self.doc_show_type,
"simple_tracking": self.simple_tracking
}
async with client.stream(
"POST",
f"{self.api_url}/api/v3/finchat",
json=payload,
headers=headers
) as response:
if response.status_code != 200:
yield f"\n\n[错误] HTTP {response.status_code}: {response.text}\n"
return
async for chunk in response.aiter_bytes(chunk_size=128):
try:
chunk_text = chunk.decode('utf-8', 'ignore')
yield chunk_text
except UnicodeDecodeError:
chunk_text = chunk.decode('utf-8', 'replace')
yield chunk_text
except Exception as e:
yield f"\n\n[错误] {type(e).__name__}: {str(e)}\n"
def _build_prompt(
self,
prompt_template: str,
company_name: str,
ts_code: str,
financial_data: Optional[Dict] = None,
context: Optional[Dict] = None
) -> str:
"""Build prompt from template by replacing placeholders"""
import string
# Start with base placeholders
placeholders = {
"company_name": company_name,
"ts_code": ts_code,
}
# Add financial data if provided
financial_data_str = ""
if financial_data:
try:
financial_data_str = json.dumps(financial_data, ensure_ascii=False, indent=2)
except Exception:
financial_data_str = str(financial_data)
placeholders["financial_data"] = financial_data_str
# Add context from previous analysis steps
if context:
placeholders.update(context)
# Replace placeholders in template
class SafeFormatter(string.Formatter):
def get_value(self, key, args, kwargs):
if isinstance(key, str):
return kwargs.get(key, f"{{{key}}}")
else:
return super().get_value(key, args, kwargs)
formatter = SafeFormatter()
prompt = formatter.format(prompt_template, **placeholders)
return prompt