Compare commits

...

3 Commits

Author SHA1 Message Date
xucheng
a79efd8150 feat: Enhance configuration management with new LLM provider support and API integration
- Backend: Introduced new endpoints for LLM configuration retrieval and updates in `config.py`, allowing dynamic management of LLM provider settings.
- Updated schemas to include `AlphaEngineConfig` for better integration with the new provider.
- Frontend: Added state management for AlphaEngine API credentials in the configuration page, ensuring seamless user experience.
- Configuration files updated to reflect changes in LLM provider settings and API keys.

BREAKING CHANGE: The default LLM provider has been changed from `new_api` to `alpha_engine`, requiring updates to existing configurations.
2025-11-11 20:49:27 +08:00
xucheng
00a79499d4 Update project status documentation to reflect changes in financial data sources and internal network details; minor adjustments in user guide and font README files for consistency. 2025-11-11 18:29:35 +08:00
xucheng
70c0549a5a Update contract: rename file, add contract number, modify payment terms, add continuous development clause, fix tax descriptions for small-scale taxpayer 2025-11-11 18:27:58 +08:00
19 changed files with 1653 additions and 222 deletions

View File

@ -1,7 +1,6 @@
"""
API router for configuration management
"""
from fastapi import APIRouter, Depends, HTTPException
from typing import Dict, Any, Optional
from pydantic import BaseModel
from app.core.dependencies import get_config_manager
from app.schemas.config import ConfigResponse, ConfigUpdateRequest, ConfigTestRequest, ConfigTestResponse
@ -9,11 +8,112 @@ from app.services.config_manager import ConfigManager
router = APIRouter()
class LLMConfigUpdate(BaseModel):
provider: str
model: Optional[str] = None
@router.get("/", response_model=ConfigResponse)
async def get_config(config_manager: ConfigManager = Depends(get_config_manager)):
"""Retrieve the current system configuration."""
return await config_manager.get_config()
@router.get("/llm", response_model=Dict[str, Any])
async def get_llm_config(config_manager: ConfigManager = Depends(get_config_manager)):
"""Get LLM provider and model configuration."""
llm_config = await config_manager.get_llm_config()
return llm_config
@router.put("/llm", response_model=Dict[str, Any])
async def update_llm_config(
llm_update: LLMConfigUpdate,
config_manager: ConfigManager = Depends(get_config_manager)
):
"""Update LLM provider and model configuration."""
import json
import os
provider = llm_update.provider
model = llm_update.model
# Load base config
project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", ".."))
config_path = os.path.join(project_root, "config", "config.json")
base_config = {}
if os.path.exists(config_path):
try:
with open(config_path, "r", encoding="utf-8") as f:
base_config = json.load(f)
except Exception:
pass
# Update llm config
if "llm" not in base_config:
base_config["llm"] = {}
base_config["llm"]["provider"] = provider
if model:
# Update model in the provider-specific config
if provider in base_config["llm"]:
if not isinstance(base_config["llm"][provider], dict):
base_config["llm"][provider] = {}
base_config["llm"][provider]["model"] = model
# Save to file
try:
with open(config_path, "w", encoding="utf-8") as f:
json.dump(base_config, f, ensure_ascii=False, indent=2)
except Exception as e:
raise HTTPException(status_code=500, detail=f"Failed to save config: {str(e)}")
# Also update in database - use same error handling as _load_dynamic_config_from_db
try:
from app.models.system_config import SystemConfig
from sqlalchemy.future import select
result = await config_manager.db.execute(
select(SystemConfig).where(SystemConfig.config_key == "llm")
)
existing_llm_config = result.scalar_one_or_none()
if existing_llm_config:
if isinstance(existing_llm_config.config_value, dict):
existing_llm_config.config_value["provider"] = provider
if model:
if provider not in existing_llm_config.config_value:
existing_llm_config.config_value[provider] = {}
elif not isinstance(existing_llm_config.config_value[provider], dict):
existing_llm_config.config_value[provider] = {}
existing_llm_config.config_value[provider]["model"] = model
else:
existing_llm_config.config_value = {"provider": provider}
if model:
existing_llm_config.config_value[provider] = {"model": model}
else:
new_llm_config = SystemConfig(
config_key="llm",
config_value={"provider": provider}
)
if model:
new_llm_config.config_value[provider] = {"model": model}
config_manager.db.add(new_llm_config)
await config_manager.db.commit()
except Exception as e:
# Rollback on error
try:
await config_manager.db.rollback()
except Exception:
pass
# Log the error but don't fail the request since file was already saved
import logging
logger = logging.getLogger(__name__)
logger.warning(f"Failed to update LLM config in database (file saved successfully): {e}")
# Continue anyway since file config was saved successfully
return await config_manager.get_llm_config()
@router.put("/", response_model=ConfigResponse)
async def update_config(
config_update: ConfigUpdateRequest,

View File

@ -8,7 +8,7 @@ from datetime import datetime, timezone, timedelta
from enum import Enum
from typing import Dict, List
from fastapi import APIRouter, HTTPException, Query
from fastapi import APIRouter, HTTPException, Query, Depends
from fastapi.responses import StreamingResponse
from app.core.config import settings
@ -24,6 +24,9 @@ from app.schemas.financial import (
)
from app.services.company_profile_client import CompanyProfileClient
from app.services.analysis_client import AnalysisClient, load_analysis_config, get_analysis_config
from app.core.dependencies import get_config_manager
from app.services.config_manager import ConfigManager
from app.services.client_factory import create_analysis_client
# Lazy DataManager loader to avoid import-time failures when optional providers/config are missing
_dm = None
@ -92,6 +95,7 @@ async def get_data_sources():
async def generate_full_analysis(
ts_code: str,
company_name: str = Query(None, description="Company name for better context"),
config_manager: ConfigManager = Depends(get_config_manager),
):
"""
Generate a full analysis report by orchestrating multiple analysis modules
@ -102,20 +106,11 @@ async def generate_full_analysis(
logger.info(f"[API] Full analysis requested for {ts_code}")
# Load base and analysis configurations
base_cfg = _load_json(BASE_CONFIG_PATH)
llm_provider = base_cfg.get("llm", {}).get("provider", "gemini")
llm_config = base_cfg.get("llm", {}).get(llm_provider, {})
api_key = llm_config.get("api_key")
base_url = llm_config.get("base_url")
if not api_key:
logger.error(f"[API] API key for {llm_provider} not configured")
raise HTTPException(
status_code=500,
detail=f"API key for {llm_provider} not configured."
)
# Load LLM configuration using ConfigManager
llm_config_result = await config_manager.get_llm_config()
default_provider = llm_config_result["provider"]
default_config = llm_config_result["config"]
global_model = llm_config_result.get("model") # 全局模型配置
analysis_config_full = load_analysis_config()
modules_config = analysis_config_full.get("analysis_modules", {})
@ -211,10 +206,15 @@ async def generate_full_analysis(
module_config = modules_config[module_type]
logger.info(f"[Orchestrator] Starting analysis for module: {module_type}")
client = AnalysisClient(
api_key=api_key,
base_url=base_url,
model=module_config.get("model", "gemini-1.5-flash")
# 统一使用全局配置,不再从模块配置读取 provider 和 model
# 使用全局 provider 和 model
model = global_model or default_config.get("model", "gemini-1.5-flash")
# Create client using factory with global config
client = create_analysis_client(
provider=default_provider,
config=default_config,
model=model
)
# Gather context from completed dependencies
@ -468,6 +468,7 @@ async def get_financials(
async def get_company_profile(
ts_code: str,
company_name: str = Query(None, description="Company name for better context"),
config_manager: ConfigManager = Depends(get_config_manager),
):
"""
Get company profile for a company using Gemini AI (non-streaming, single response)
@ -477,19 +478,26 @@ async def get_company_profile(
logger.info(f"[API] Company profile requested for {ts_code}")
# Load config
base_cfg = _load_json(BASE_CONFIG_PATH)
llm_provider = base_cfg.get("llm", {}).get("provider", "gemini")
llm_config = base_cfg.get("llm", {}).get(llm_provider, {})
# Load LLM configuration using ConfigManager
llm_config_result = await config_manager.get_llm_config()
provider = llm_config_result["provider"]
provider_config = llm_config_result["config"]
api_key = llm_config.get("api_key")
base_url = llm_config.get("base_url") # Will be None if not set, handled by client
# CompanyProfileClient only supports OpenAI-compatible APIs
if provider == "alpha_engine":
raise HTTPException(
status_code=400,
detail="Company profile generation does not support AlphaEngine provider. Please use OpenAI-compatible API."
)
api_key = provider_config.get("api_key")
base_url = provider_config.get("base_url")
if not api_key:
logger.error(f"[API] API key for {llm_provider} not configured")
logger.error(f"[API] API key for {provider} not configured")
raise HTTPException(
status_code=500,
detail=f"API key for {llm_provider} not configured."
detail=f"API key for {provider} not configured."
)
client = CompanyProfileClient(
@ -573,6 +581,7 @@ async def generate_analysis(
ts_code: str,
analysis_type: str,
company_name: str = Query(None, description="Company name for better context"),
config_manager: ConfigManager = Depends(get_config_manager),
):
"""
Generate analysis for a company using Gemini AI
@ -591,37 +600,11 @@ async def generate_analysis(
logger.info(f"[API] Analysis requested for {ts_code}, type: {analysis_type}")
# Load config
base_cfg = _load_json(BASE_CONFIG_PATH)
llm_provider = base_cfg.get("llm", {}).get("provider", "gemini")
llm_config = base_cfg.get("llm", {}).get(llm_provider, {})
api_key = llm_config.get("api_key")
base_url = llm_config.get("base_url")
if not api_key:
logger.error(f"[API] API key for {llm_provider} not configured")
raise HTTPException(
status_code=500,
detail=f"API key for {llm_provider} not configured."
)
# Get analysis configuration
analysis_cfg = get_analysis_config(analysis_type)
if not analysis_cfg:
raise HTTPException(
status_code=404,
detail=f"Analysis type '{analysis_type}' not found in configuration"
)
model = analysis_cfg.get("model", "gemini-2.5-flash")
prompt_template = analysis_cfg.get("prompt_template", "")
if not prompt_template:
raise HTTPException(
status_code=500,
detail=f"Prompt template not found for analysis type '{analysis_type}'"
)
# Load LLM configuration using ConfigManager
llm_config_result = await config_manager.get_llm_config()
default_provider = llm_config_result["provider"]
default_config = llm_config_result["config"]
global_model = llm_config_result.get("model") # 全局模型配置
# Get company name from ts_code if not provided
financial_data = None
@ -656,8 +639,30 @@ async def generate_analysis(
logger.info(f"[API] Generating {analysis_type} for {company_name}")
# Initialize analysis client with configured model
client = AnalysisClient(api_key=api_key, base_url=base_url, model=model)
# Get analysis configuration for prompt template
analysis_cfg = get_analysis_config(analysis_type)
if not analysis_cfg:
raise HTTPException(
status_code=404,
detail=f"Analysis type '{analysis_type}' not found in configuration"
)
prompt_template = analysis_cfg.get("prompt_template", "")
if not prompt_template:
raise HTTPException(
status_code=500,
detail=f"Prompt template not found for analysis type '{analysis_type}'"
)
# 统一使用全局配置,不再从模块配置读取 provider 和 model
model = global_model or default_config.get("model", "gemini-1.5-flash")
# 统一使用全局配置创建客户端
client = create_analysis_client(
provider=default_provider,
config=default_config,
model=model
)
# Prepare dependency context for single-module generation
# If the requested module declares dependencies, generate them first and inject their outputs
@ -701,12 +706,18 @@ async def generate_analysis(
# Fallback: if cycle detected, just use any order
order = list(all_required)
# Generate dependencies in order
# Generate dependencies in order - 统一使用全局配置
completed = {}
for mod in order:
cfg = modules_config.get(mod, {})
dep_ctx = {d: completed.get(d, "") for d in (cfg.get("dependencies", []) or [])}
dep_client = AnalysisClient(api_key=api_key, base_url=base_url, model=cfg.get("model", model))
# 统一使用全局配置,不再从模块配置读取
dep_client = create_analysis_client(
provider=default_provider,
config=default_config,
model=model
)
dep_result = await dep_client.generate_analysis(
analysis_type=mod,
company_name=company_name,
@ -888,6 +899,7 @@ async def stream_analysis(
ts_code: str,
analysis_type: str,
company_name: str = Query(None, description="Company name for better context"),
config_manager: ConfigManager = Depends(get_config_manager),
):
"""
Stream analysis content chunks for a given module using OpenAI-compatible streaming.
@ -899,24 +911,19 @@ async def stream_analysis(
logger.info(f"[API] Streaming analysis requested for {ts_code}, type: {analysis_type}")
# Load config
base_cfg = _load_json(BASE_CONFIG_PATH)
llm_provider = base_cfg.get("llm", {}).get("provider", "gemini")
llm_config = base_cfg.get("llm", {}).get(llm_provider, {})
api_key = llm_config.get("api_key")
base_url = llm_config.get("base_url")
if not api_key:
logger.error(f"[API] API key for {llm_provider} not configured")
raise HTTPException(status_code=500, detail=f"API key for {llm_provider} not configured.")
# Load LLM configuration using ConfigManager
llm_config_result = await config_manager.get_llm_config()
default_provider = llm_config_result["provider"]
default_config = llm_config_result["config"]
global_model = llm_config_result.get("model") # 全局模型配置
# Get analysis configuration
analysis_cfg = get_analysis_config(analysis_type)
if not analysis_cfg:
raise HTTPException(status_code=404, detail=f"Analysis type '{analysis_type}' not found in configuration")
model = analysis_cfg.get("model", "gemini-2.5-flash")
# 统一使用全局配置,不再从模块配置读取 provider 和 model
model = global_model or default_config.get("model", "gemini-1.5-flash")
prompt_template = analysis_cfg.get("prompt_template", "")
if not prompt_template:
raise HTTPException(status_code=500, detail=f"Prompt template not found for analysis type '{analysis_type}'")
@ -972,7 +979,13 @@ async def stream_analysis(
for mod in order:
cfg = modules_config.get(mod, {})
dep_ctx = {d: completed.get(d, "") for d in (cfg.get("dependencies", []) or [])}
dep_client = AnalysisClient(api_key=api_key, base_url=base_url, model=cfg.get("model", model))
# 统一使用全局配置,不再从模块配置读取
dep_client = create_analysis_client(
provider=default_provider,
config=default_config,
model=model
)
dep_result = await dep_client.generate_analysis(
analysis_type=mod,
company_name=company_name,
@ -986,7 +999,12 @@ async def stream_analysis(
except Exception:
context = {}
client = AnalysisClient(api_key=api_key, base_url=base_url, model=model)
# 统一使用全局配置创建客户端
client = create_analysis_client(
provider=default_provider,
config=default_config,
model=model
)
async def streamer():
# Optional header line to help client-side UI

View File

@ -2,7 +2,7 @@ import logging
import os
import json
from typing import Dict
from fastapi import APIRouter, BackgroundTasks, HTTPException
from fastapi import APIRouter, BackgroundTasks, HTTPException, Depends
# Lazy loader for DataManager
_dm = None
@ -23,6 +23,9 @@ def get_dm():
return _dm
from app.services.analysis_client import AnalysisClient, load_analysis_config
from app.core.dependencies import get_config_manager
from app.services.config_manager import ConfigManager
from app.services.client_factory import create_analysis_client
router = APIRouter()
logger = logging.getLogger(__name__)
@ -40,7 +43,7 @@ def _load_json(path: str) -> Dict:
except Exception:
return {}
async def run_full_analysis(org_id: str):
async def run_full_analysis(org_id: str, config_manager: ConfigManager = None):
"""
Asynchronous task to run a full analysis for a given stock.
This function is market-agnostic and relies on DataManager.
@ -48,16 +51,23 @@ async def run_full_analysis(org_id: str):
logger.info(f"Starting full analysis task for {org_id}")
# 1. Load configurations
base_cfg = _load_json(BASE_CONFIG_PATH)
llm_provider = base_cfg.get("llm", {}).get("provider", "gemini")
llm_config = base_cfg.get("llm", {}).get(llm_provider, {})
if config_manager is None:
# If called from background task, we need to create a new session
from app.core.database import AsyncSessionLocal
async with AsyncSessionLocal() as session:
config_manager = ConfigManager(db_session=session)
await _run_analysis_with_config(org_id, config_manager)
else:
await _run_analysis_with_config(org_id, config_manager)
api_key = llm_config.get("api_key")
base_url = llm_config.get("base_url")
if not api_key:
logger.error(f"API key for {llm_provider} not configured. Aborting analysis for {org_id}.")
return
async def _run_analysis_with_config(org_id: str, config_manager: ConfigManager):
"""Internal function to run analysis with a ConfigManager instance"""
# Load LLM configuration using ConfigManager
llm_config_result = await config_manager.get_llm_config()
default_provider = llm_config_result["provider"]
default_config = llm_config_result["config"]
global_model = llm_config_result.get("model") # 全局模型配置
analysis_config_full = load_analysis_config()
modules_config = analysis_config_full.get("analysis_modules", {})
@ -96,10 +106,15 @@ async def run_full_analysis(org_id: str):
analysis_results = {}
for module_type, module_config in modules_config.items():
logger.info(f"Running analysis module: {module_type} for {org_id}")
client = AnalysisClient(
api_key=api_key,
base_url=base_url,
model=module_config.get("model", "gemini-1.5-flash")
# 统一使用全局配置,不再从模块配置读取 provider 和 model
model = global_model or default_config.get("model", "gemini-1.5-flash")
# Create client using factory with global config
client = create_analysis_client(
provider=default_provider,
config=default_config,
model=model
)
# Simplified context: use results from all previously completed modules
@ -128,7 +143,7 @@ async def run_full_analysis(org_id: str):
@router.post("/{market}/{org_id}/reports/generate")
async def trigger_report_generation(market: str, org_id: str, background_tasks: BackgroundTasks):
async def trigger_report_generation(market: str, org_id: str, background_tasks: BackgroundTasks, config_manager: ConfigManager = Depends(get_config_manager)):
"""
Triggers a background task to generate a full financial report.
This endpoint is now market-agnostic.
@ -137,7 +152,8 @@ async def trigger_report_generation(market: str, org_id: str, background_tasks:
# TODO: Create a report record in the database with "generating" status here.
background_tasks.add_task(run_full_analysis, org_id)
# Pass config_manager to the background task
background_tasks.add_task(run_full_analysis, org_id, config_manager)
logger.info(f"Queued analysis task for {org_id}.")
return {"queued": True, "market": market, "org_id": org_id}

View File

@ -1,7 +1,7 @@
"""
Configuration-related Pydantic schemas
"""
from typing import Dict, Optional, Any
from typing import Dict, Optional, Any, List
from pydantic import BaseModel, Field
class DatabaseConfig(BaseModel):
@ -11,17 +11,30 @@ class NewApiConfig(BaseModel):
api_key: str = Field(..., description="New API Key")
base_url: Optional[str] = None
class AlphaEngineConfig(BaseModel):
api_url: str = Field(..., description="AlphaEngine API URL")
api_key: str = Field(..., description="AlphaEngine API Key")
token: str = Field(..., description="AlphaEngine Token")
user_id: int = Field(999041, description="User ID")
model: str = Field("deepseek-r1", description="Model name")
using_indicator: bool = Field(True, description="Whether to use indicators")
start_time: str = Field("2024-01-01", description="Start time for data query")
doc_show_type: List[str] = Field(["A001", "A002", "A003", "A004"], description="Document types")
simple_tracking: bool = Field(True, description="Whether to enable simple tracking")
class DataSourceConfig(BaseModel):
api_key: str = Field(..., description="数据源API Key")
class ConfigResponse(BaseModel):
database: DatabaseConfig
new_api: NewApiConfig
alpha_engine: Optional[AlphaEngineConfig] = None
data_sources: Dict[str, DataSourceConfig]
class ConfigUpdateRequest(BaseModel):
database: Optional[DatabaseConfig] = None
new_api: Optional[NewApiConfig] = None
alpha_engine: Optional[AlphaEngineConfig] = None
data_sources: Optional[Dict[str, DataSourceConfig]] = None
class ConfigTestRequest(BaseModel):

View File

@ -0,0 +1,260 @@
"""
AlphaEngine Client for investment Q&A API
"""
import json
import re
import time
from typing import Dict, Optional, AsyncGenerator
import httpx
from requests.exceptions import ChunkedEncodingError
class AlphaEngineClient:
"""Client for AlphaEngine investment Q&A API"""
def __init__(
self,
api_url: str,
api_key: str,
token: str,
user_id: int = 999041,
model: str = "deepseek-r1",
using_indicator: bool = True,
start_time: str = "2024-01-01",
doc_show_type: list = None,
simple_tracking: bool = True
):
"""
Initialize AlphaEngine client
Args:
api_url: API endpoint URL
api_key: X-API-KEY for authentication
token: Token for authentication
user_id: User ID
model: Model name (default: deepseek-r1)
using_indicator: Whether to use indicators
start_time: Start time for data query
doc_show_type: Document types to show (default: ["A001", "A002", "A003", "A004"])
simple_tracking: Whether to enable simple tracking
"""
self.api_url = api_url.rstrip('/')
self.api_key = api_key
self.token = token
self.user_id = user_id
self.model = model
self.using_indicator = using_indicator
self.start_time = start_time
self.doc_show_type = doc_show_type or ["A001", "A002", "A003", "A004"]
self.simple_tracking = simple_tracking
async def generate_analysis(
self,
analysis_type: str,
company_name: str,
ts_code: str,
prompt_template: str,
financial_data: Optional[Dict] = None,
context: Optional[Dict] = None
) -> Dict:
"""
Generate analysis using AlphaEngine API (non-streaming)
Args:
analysis_type: Type of analysis
company_name: Company name
ts_code: Stock code
prompt_template: Prompt template with placeholders
financial_data: Optional financial data for context
context: Optional dictionary with results from previous analyses
Returns:
Dict with analysis content and metadata
"""
start_time = time.perf_counter_ns()
# Build prompt from template
prompt = self._build_prompt(
prompt_template,
company_name,
ts_code,
financial_data,
context
)
# Call AlphaEngine API
try:
async with httpx.AsyncClient(timeout=300.0) as client:
headers = {
'token': self.token,
'X-API-KEY': self.api_key,
'Content-Type': 'application/json'
}
payload = {
"msg": prompt,
"history": [],
"user_id": self.user_id,
"model": self.model,
"using_indicator": self.using_indicator,
"start_time": self.start_time,
"doc_show_type": self.doc_show_type,
"simple_tracking": self.simple_tracking
}
response = await client.post(
f"{self.api_url}/api/v3/finchat",
json=payload,
headers=headers
)
if response.status_code != 200:
raise Exception(f"AlphaEngine API error: HTTP {response.status_code} - {response.text}")
result_text = response.text
# Parse response to extract final answer
final_answer_match = re.findall(r'\{"id":"_final","content":"(.*?)"}', result_text)
final_answer = final_answer_match[0] if final_answer_match else result_text
# Extract COT if available
cot_match = re.findall(r'\{"id":"_cot","content":"(.*?)"}', result_text)
cot = "".join(cot_match) if cot_match else ""
# Extract tracking documents if available
tracking_match = re.findall(r'\{"id":"tracking_documents","content":\s*(\[[^]]*])}', result_text)
tracking_docs = json.loads(tracking_match[0]) if tracking_match else []
elapsed_ms = int((time.perf_counter_ns() - start_time) / 1_000_000)
return {
"content": final_answer,
"model": self.model,
"tokens": {
"prompt_tokens": 0, # AlphaEngine doesn't provide token usage
"completion_tokens": 0,
"total_tokens": 0,
},
"elapsed_ms": elapsed_ms,
"success": True,
"analysis_type": analysis_type,
"cot": cot,
"tracking_documents": tracking_docs,
}
except Exception as e:
elapsed_ms = int((time.perf_counter_ns() - start_time) / 1_000_000)
return {
"content": "",
"model": self.model,
"tokens": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0},
"elapsed_ms": elapsed_ms,
"success": False,
"error": str(e),
"analysis_type": analysis_type,
}
async def generate_analysis_stream(
self,
analysis_type: str,
company_name: str,
ts_code: str,
prompt_template: str,
financial_data: Optional[Dict] = None,
context: Optional[Dict] = None
) -> AsyncGenerator[str, None]:
"""
Yield analysis content chunks using AlphaEngine streaming API
Yields plain text chunks as they arrive.
"""
# Build prompt
prompt = self._build_prompt(
prompt_template,
company_name,
ts_code,
financial_data,
context,
)
try:
async with httpx.AsyncClient(timeout=300.0) as client:
headers = {
'token': self.token,
'X-API-KEY': self.api_key,
'Content-Type': 'application/json'
}
payload = {
"msg": prompt,
"history": [],
"user_id": self.user_id,
"model": self.model,
"using_indicator": self.using_indicator,
"start_time": self.start_time,
"doc_show_type": self.doc_show_type,
"simple_tracking": self.simple_tracking
}
async with client.stream(
"POST",
f"{self.api_url}/api/v3/finchat",
json=payload,
headers=headers
) as response:
if response.status_code != 200:
yield f"\n\n[错误] HTTP {response.status_code}: {response.text}\n"
return
async for chunk in response.aiter_bytes(chunk_size=128):
try:
chunk_text = chunk.decode('utf-8', 'ignore')
yield chunk_text
except UnicodeDecodeError:
chunk_text = chunk.decode('utf-8', 'replace')
yield chunk_text
except Exception as e:
yield f"\n\n[错误] {type(e).__name__}: {str(e)}\n"
def _build_prompt(
self,
prompt_template: str,
company_name: str,
ts_code: str,
financial_data: Optional[Dict] = None,
context: Optional[Dict] = None
) -> str:
"""Build prompt from template by replacing placeholders"""
import string
# Start with base placeholders
placeholders = {
"company_name": company_name,
"ts_code": ts_code,
}
# Add financial data if provided
financial_data_str = ""
if financial_data:
try:
financial_data_str = json.dumps(financial_data, ensure_ascii=False, indent=2)
except Exception:
financial_data_str = str(financial_data)
placeholders["financial_data"] = financial_data_str
# Add context from previous analysis steps
if context:
placeholders.update(context)
# Replace placeholders in template
class SafeFormatter(string.Formatter):
def get_value(self, key, args, kwargs):
if isinstance(key, str):
return kwargs.get(key, f"{{{key}}}")
else:
return super().get_value(key, args, kwargs)
formatter = SafeFormatter()
prompt = formatter.format(prompt_template, **placeholders)
return prompt

View File

@ -0,0 +1,60 @@
"""
Unified Analysis Client Factory
Creates appropriate client based on provider type
"""
from typing import Dict, Optional
from app.services.analysis_client import AnalysisClient
from app.services.alpha_engine_client import AlphaEngineClient
def create_analysis_client(
provider: str,
config: Dict,
model: str = None
):
"""
Create an analysis client based on provider type
Args:
provider: Provider type ("openai", "gemini", "new_api", "alpha_engine")
config: Configuration dictionary containing provider-specific settings
model: Model name (optional, may be overridden by config)
Returns:
Client instance (AnalysisClient or AlphaEngineClient)
"""
if provider == "alpha_engine":
# AlphaEngine specific configuration
api_url = config.get("api_url", "")
api_key = config.get("api_key", "")
token = config.get("token", "")
user_id = config.get("user_id", 999041)
model_name = model or config.get("model", "deepseek-r1")
using_indicator = config.get("using_indicator", True)
start_time = config.get("start_time", "2024-01-01")
doc_show_type = config.get("doc_show_type", ["A001", "A002", "A003", "A004"])
simple_tracking = config.get("simple_tracking", True)
return AlphaEngineClient(
api_url=api_url,
api_key=api_key,
token=token,
user_id=user_id,
model=model_name,
using_indicator=using_indicator,
start_time=start_time,
doc_show_type=doc_show_type,
simple_tracking=simple_tracking
)
else:
# OpenAI-compatible API (openai, gemini, new_api)
api_key = config.get("api_key", "")
base_url = config.get("base_url", "")
model_name = model or config.get("model", "gemini-1.5-flash")
return AnalysisClient(
api_key=api_key,
base_url=base_url,
model=model_name
)

View File

@ -68,14 +68,14 @@ class CompanyProfileClient:
"error": str(e),
}
def generate_profile_stream(
async def generate_profile_stream(
self,
company_name: str,
ts_code: str,
financial_data: Optional[Dict] = None
):
"""
Generate company profile using Gemini API with streaming
Generate company profile using OpenAI-compatible streaming API
Args:
company_name: Company name
@ -85,40 +85,31 @@ class CompanyProfileClient:
Yields:
Chunks of generated content
"""
import logging
logger = logging.getLogger(__name__)
logger.info(f"[CompanyProfile] Starting stream generation for {company_name} ({ts_code})")
# Build prompt
prompt = self._build_prompt(company_name, ts_code, financial_data)
logger.info(f"[CompanyProfile] Prompt built, length: {len(prompt)} chars")
# Call Gemini API with streaming
# Call OpenAI-compatible API with streaming
try:
logger.info("[CompanyProfile] Calling Gemini API with stream=True")
# Generate streaming response (sync call, but yields chunks)
response_stream = self.model.generate_content(prompt, stream=True)
logger.info("[CompanyProfile] Gemini API stream object created")
chunk_count = 0
# Stream chunks
logger.info("[CompanyProfile] Starting to iterate response stream")
for chunk in response_stream:
logger.info(f"[CompanyProfile] Received chunk from Gemini, has text: {hasattr(chunk, 'text')}")
if hasattr(chunk, 'text') and chunk.text:
chunk_count += 1
text_len = len(chunk.text)
logger.info(f"[CompanyProfile] Chunk {chunk_count}: {text_len} chars")
yield chunk.text
else:
logger.warning(f"[CompanyProfile] Chunk has no text attribute or empty, chunk: {chunk}")
logger.info(f"[CompanyProfile] Stream iteration completed. Total chunks: {chunk_count}")
stream = await self.client.chat.completions.create(
model=self.model_name,
messages=[{"role": "user", "content": prompt}],
stream=True,
)
# The SDK yields events with incremental deltas
async for event in stream:
try:
choice = event.choices[0] if getattr(event, "choices", None) else None
delta = getattr(choice, "delta", None) if choice is not None else None
content = getattr(delta, "content", None) if delta is not None else None
if content:
yield content
except Exception:
# Best-effort: ignore malformed chunks
continue
except Exception as e:
logger.error(f"[CompanyProfile] Error during streaming: {type(e).__name__}: {str(e)}", exc_info=True)
yield f"\n\n---\n\n**错误**: {type(e).__name__}: {str(e)}"
# Emit error message to the stream so the client can surface it
yield f"\n\n[错误] {type(e).__name__}: {str(e)}\n"
def _build_prompt(self, company_name: str, ts_code: str, financial_data: Optional[Dict] = None) -> str:
"""Build prompt for company profile generation"""

View File

@ -12,7 +12,7 @@ from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy.future import select
from app.models.system_config import SystemConfig
from app.schemas.config import ConfigResponse, ConfigUpdateRequest, DatabaseConfig, NewApiConfig, DataSourceConfig, ConfigTestResponse
from app.schemas.config import ConfigResponse, ConfigUpdateRequest, DatabaseConfig, NewApiConfig, AlphaEngineConfig, DataSourceConfig, ConfigTestResponse
class ConfigManager:
"""Manages system configuration by merging a static JSON file with dynamic settings from the database."""
@ -72,25 +72,114 @@ class ConfigManager:
# 兼容两种位置:优先使用 new_api其次回退到 llm.new_api
new_api_src = merged_config.get("new_api") or merged_config.get("llm", {}).get("new_api", {})
# 获取 alpha_engine 配置
alpha_engine_src = merged_config.get("alpha_engine") or merged_config.get("llm", {}).get("alpha_engine")
alpha_engine_config = None
if alpha_engine_src:
alpha_engine_config = AlphaEngineConfig(**alpha_engine_src)
return ConfigResponse(
database=DatabaseConfig(**merged_config.get("database", {})),
new_api=NewApiConfig(**(new_api_src or {})),
alpha_engine=alpha_engine_config,
data_sources={
k: DataSourceConfig(**v)
for k, v in merged_config.get("data_sources", {}).items()
}
)
async def get_llm_config(self, provider: str = None) -> Dict[str, Any]:
"""
Get LLM configuration for a specific provider
Args:
provider: Provider name (e.g., "new_api", "gemini", "alpha_engine")
If None, uses the configured provider from config
Returns:
Dictionary with provider configuration and provider name
"""
base_config = self._load_base_config_from_file()
db_config = await self._load_dynamic_config_from_db()
merged_config = self._merge_configs(base_config, db_config)
llm_config = merged_config.get("llm", {})
# Determine provider
if not provider:
provider = llm_config.get("provider", "new_api")
# Get provider-specific config
provider_config = llm_config.get(provider, {})
# Get global model from provider config if available
global_model = provider_config.get("model")
return {
"provider": provider,
"config": provider_config,
"model": global_model # 返回全局模型配置
}
def _filter_empty_values(self, config_dict: Dict[str, Any]) -> Dict[str, Any]:
"""Remove empty strings and None values from config dict, but keep 0 and False."""
filtered = {}
for key, value in config_dict.items():
if isinstance(value, dict):
filtered_value = self._filter_empty_values(value)
if filtered_value: # Only add if dict is not empty
filtered[key] = filtered_value
elif value is not None and value != "":
filtered[key] = value
return filtered
async def update_config(self, config_update: ConfigUpdateRequest) -> ConfigResponse:
"""Updates configuration in the database and returns the new merged config."""
try:
update_dict = config_update.dict(exclude_unset=True)
# 过滤空值
update_dict = self._filter_empty_values(update_dict)
# 验证配置数据
self._validate_config_data(update_dict)
# 处理 LLM 相关配置:需要保存到 llm 配置下
llm_updates = {}
if "new_api" in update_dict:
llm_updates["new_api"] = update_dict.pop("new_api")
if "alpha_engine" in update_dict:
llm_updates["alpha_engine"] = update_dict.pop("alpha_engine")
# 保存 LLM 配置
if llm_updates:
result = await self.db.execute(
select(SystemConfig).where(SystemConfig.config_key == "llm")
)
existing_llm_config = result.scalar_one_or_none()
if existing_llm_config:
if isinstance(existing_llm_config.config_value, dict):
merged_llm = self._merge_configs(existing_llm_config.config_value, llm_updates)
existing_llm_config.config_value = merged_llm
else:
existing_llm_config.config_value = llm_updates
else:
# 从文件加载基础配置,然后合并
base_config = self._load_base_config_from_file()
base_llm = base_config.get("llm", {})
merged_llm = self._merge_configs(base_llm, llm_updates)
new_llm_config = SystemConfig(config_key="llm", config_value=merged_llm)
self.db.add(new_llm_config)
# 保存其他配置database, data_sources 等)
for key, value in update_dict.items():
existing_config = await self.db.get(SystemConfig, key)
result = await self.db.execute(
select(SystemConfig).where(SystemConfig.config_key == key)
)
existing_config = result.scalar_one_or_none()
if existing_config:
# Merge with existing DB value before updating
if isinstance(existing_config.config_value, dict) and isinstance(value, dict):
@ -112,23 +201,32 @@ class ConfigManager:
"""Validate configuration data before saving."""
if "database" in config_data:
db_config = config_data["database"]
if "url" in db_config:
if "url" in db_config and db_config["url"]:
url = db_config["url"]
if not url.startswith(("postgresql://", "postgresql+asyncpg://")):
raise ValueError("数据库URL必须以 postgresql:// 或 postgresql+asyncpg:// 开头")
if "new_api" in config_data:
new_api_config = config_data["new_api"]
if "api_key" in new_api_config and len(new_api_config["api_key"]) < 10:
if "api_key" in new_api_config and new_api_config["api_key"] and len(new_api_config["api_key"]) < 10:
raise ValueError("New API Key长度不能少于10个字符")
if "base_url" in new_api_config and new_api_config["base_url"]:
base_url = new_api_config["base_url"]
if not base_url.startswith(("http://", "https://")):
raise ValueError("New API Base URL必须以 http:// 或 https:// 开头")
if "alpha_engine" in config_data:
alpha_engine_config = config_data["alpha_engine"]
if "api_key" in alpha_engine_config and alpha_engine_config["api_key"] and len(alpha_engine_config["api_key"]) < 5:
raise ValueError("AlphaEngine API Key长度不能少于5个字符")
if "api_url" in alpha_engine_config and alpha_engine_config["api_url"]:
api_url = alpha_engine_config["api_url"]
if not api_url.startswith(("http://", "https://")):
raise ValueError("AlphaEngine API URL必须以 http:// 或 https:// 开头")
if "data_sources" in config_data:
for source_name, source_config in config_data["data_sources"].items():
if "api_key" in source_config and len(source_config["api_key"]) < 10:
if "api_key" in source_config and source_config["api_key"] and len(source_config["api_key"]) < 10:
raise ValueError(f"{source_name} API Key长度不能少于10个字符")
async def test_config(self, config_type: str, config_data: Dict[str, Any]) -> ConfigTestResponse:
@ -142,6 +240,8 @@ class ConfigManager:
return await self._test_tushare(config_data)
elif config_type == "finnhub":
return await self._test_finnhub(config_data)
elif config_type == "alpha_engine":
return await self._test_alpha_engine(config_data)
else:
return ConfigTestResponse(
success=False,
@ -302,3 +402,57 @@ class ConfigManager:
success=False,
message=f"Finnhub API连接失败: {str(e)}"
)
async def _test_alpha_engine(self, config_data: Dict[str, Any]) -> ConfigTestResponse:
"""Test AlphaEngine API connection."""
api_url = config_data.get("api_url")
api_key = config_data.get("api_key")
token = config_data.get("token")
if not api_url or not api_key or not token:
return ConfigTestResponse(
success=False,
message="AlphaEngine API URL、API Key和Token均不能为空"
)
try:
async with httpx.AsyncClient(timeout=10.0) as client:
headers = {
'token': token,
'X-API-KEY': api_key,
'Content-Type': 'application/json'
}
# 发送一个简单的测试请求
payload = {
"msg": "测试连接",
"history": [],
"user_id": config_data.get("user_id", 999041),
"model": config_data.get("model", "deepseek-r1"),
"using_indicator": config_data.get("using_indicator", True),
"start_time": config_data.get("start_time", "2024-01-01"),
"doc_show_type": config_data.get("doc_show_type", ["A001", "A002", "A003", "A004"]),
"simple_tracking": config_data.get("simple_tracking", True)
}
response = await client.post(
f"{api_url.rstrip('/')}/api/v3/finchat",
json=payload,
headers=headers
)
if response.status_code == 200:
return ConfigTestResponse(
success=True,
message="AlphaEngine API连接成功"
)
else:
return ConfigTestResponse(
success=False,
message=f"AlphaEngine API测试失败: HTTP {response.status_code} - {response.text[:200]}"
)
except Exception as e:
return ConfigTestResponse(
success=False,
message=f"AlphaEngine API连接失败: {str(e)}"
)

File diff suppressed because one or more lines are too long

View File

@ -1,6 +1,6 @@
{
"llm": {
"provider": "new_api",
"provider": "alpha_engine",
"gemini": {
"base_url": "",
"api_key": "YOUR_GEMINI_API_KEY"
@ -8,6 +8,22 @@
"new_api": {
"base_url": "http://192.168.3.214:3000/v1",
"api_key": "sk-DdTTQ5fdU1aFW6gnYxSNYDgFsVQg938zUcmY4vaB7oPtcNs7"
},
"alpha_engine": {
"api_url": "http://api-ai-prod.valuesimplex.tech",
"api_key": "api@shangjian!",
"token": "9b5c0b6a5e1e4e8fioouiouqiuioasaz",
"user_id": 999041,
"model": "deepseek-r1",
"using_indicator": true,
"start_time": "2024-01-01",
"doc_show_type": [
"A001",
"A002",
"A003",
"A004"
],
"simple_tracking": true
}
},
"data_sources": {

View File

@ -0,0 +1,99 @@
# coding:utf-8
import json
import re
import requests
from requests.exceptions import ChunkedEncodingError
# 请求地址
qa_url = "http://api-ai-prod.valuesimplex.tech/api/v3/finchat"
# 熵简提供的x-api-key
api_key = "api@shangjian!"
token = "9b5c0b6a5e1e4e8fioouiouqiuioasaz"
user_id = 999041
def ask(question, user_id):
# 设置请求头
headers = {
'token': token,
'X-API-KEY': api_key,
'Content-Type': 'application/json'
}
# 构造请求体
payload = json.dumps({
"msg": question,
# 历史问答没有则为空List
"history": [],
"user_id": user_id,
"model": "deepseek-r1", # 默认值 不用改
"using_indicator": True, # 是否用指标
"start_time": "2024-01-01", # 开始时间
"doc_show_type": ["A001", "A002", "A003", "A004"], # 文档类型
"simple_tracking": simple_tracking # 是否简单溯源
})
print(f"******开始提问:[{question}]")
# 发送请求
response = requests.request("POST", qa_url, data=payload, headers=headers, stream=True)
qa_result = ''
# 判断请求是否成功
if response.status_code == 200:
if stream_enabled:
try:
for chunk in response.iter_content(chunk_size=128):
try:
chunk_event = chunk.decode('utf-8', 'ignore')
except UnicodeDecodeError as e:
# 自定义处理解码错误,例如替换无法解码的部分
chunk_event = chunk.decode('utf-8', 'replace')
print(f"Decoding error occurred: {e}")
qa_result += chunk_event
print(f"\033[1;32m" + chunk_event)
except ChunkedEncodingError:
print("Stream ended prematurely. Handling gracefully.")
else:
# 获取响应内容
qa_result = response.content
# 将响应内容解码为utf-8格式
qa_result = qa_result.decode('utf-8')
else:
print(f"Failed to get stream data. Status code: {response.status_code}")
# 返回结果
return qa_result
if __name__ == '__main__':
# 问题内容
question = '科大讯飞业绩怎么样?'
# 关闭吐字模式
stream_enabled = True
# 开启简单溯源
simple_tracking = True
# 调用函数进行问答
result = ask(question, user_id)
# 仅打印最终问答结果
print("**************COT**************")
cot_list = re.findall(r'\{"id":"_cot","content":"(.*?)"}', result)
cot = "".join(cot_list)
print(cot)
print("**********************************")
# 仅打印最终问答结果
print("**************最终答案**************")
print(re.findall(r'\{"id":"_final","content":"(.*?)"}', result)[0])
# print(result['answer'])
print("**********************************")
if simple_tracking:
print("**************溯源文件**************")
source_file = re.findall(r'\{"id":"tracking_documents","content":\s*(\[[^]]*])}', result)
if source_file and source_file.__len__() > 0:
print(source_file[0])
print("**********************************")

View File

@ -5,7 +5,7 @@
### 当前功能与数据状态
- **A股财务数据**:已可正常获取并落库/展示。
- **每股口径per-share数据**:仅部分可得;个别财务指标存在异常或口径不一致的问题。
- **美股财务数据**:仅部分可得;个别财务指标存在异常或口径不一致的问题。
- 相关定义、口径说明与已知问题,请参见文档:[财务数据字典](./financial_data_dictionary.md)。
- **报告与分析**
- 首页输入公司代码与市场,点击“生成报告”后,应展示:
@ -16,7 +16,7 @@
### 系统运行与部署
- **数据库与网络**
- 数据库部署在许公司内部网络环境中;需要内网或通过跳板/映射方式访问。
- 数据库部署在许公司内部网络环境中;需要内网或通过跳板/映射方式访问。
- 本地运行可能存在连接限制。数据库当前未在本机配置,但可按需进行本机配置(需要对 LV 做调整,最终仍以本机配置为主)。
- **运行方式**
- 可将项目打包为 Docker 运行,因此也支持纯本地部署。
@ -27,13 +27,13 @@
- 目前在许成的一台虚拟机上运行,便于访问内部数据库,并通过 LV 保垒机进行映射。
### 已知问题/限制
- 股数据覆盖面不全,部分财务指标存在疑似口径或计算问题(详见“财务数据字典”)。
- 股数据覆盖面不全,部分财务指标存在疑似口径或计算问题(详见“财务数据字典”)。
- 数据库处于内网环境,外部或本地直连存在门槛;需配置 LV/隧道或切换至本机数据库配置。
- 大模型分析仍以提示词工程为主,未融合多源结构化信号。
### 后续计划(优先级由高到低)
1. **完成美股数据获取并校验正确性**(当前最紧要)
- 引入更多数据源以提升覆盖面与一致性(如同花顺 iFinD);如能接入 Bloomberg蓬勃更佳,但实现难度较高。
- 引入更多数据源以提升覆盖面与一致性(如同花顺 iFinD 密钥可以问许成SDK 在官网可以下载 );如能接入 Bloomberg 更佳,但实现难度较高。
2. **接入第三方大模型 API**(由许成购买的公司提供)
- 数据范围:新闻公告、研究报告、电话会议纪要等。
- 具体接入方式尚未确定,需进一步讨论与评估。

View File

@ -274,3 +274,5 @@ A:

View File

@ -0,0 +1,448 @@
# 技术开发合同
(委托开发)
**合同编号:** 3PRISM-202511-001
**项目名称:** 股票AI分析系统
# 委托方(甲方)
单位名称: 无锡汇蠡投资管理中心(有限合伙)
统一社会信用代码(税号): 91320211MA1TDC2M14
单位地址: 无锡市建筑西路599-63号楼四楼404室
负责人/法定代表人: 许成
联系电话: 0510-88555066
开户银行: 江苏银行无锡科技支行
银行账户: 21910188000135723
# 研究开发方(乙方)
单位名称: 上海棱芯人工智能科技有限公司
统一社会信用代码(税号): 91310104MAERNTU37D
单位地址: 上海市徐汇区云锦路181号3层301室
负责人/法定代表人: 吕琦
联系电话: 13852282381
开户银行: 招商银行股份有限公司上海徐汇滨江支行
账号: 121986556710001
---
甲乙双方本着平等自愿、互惠互利的原则,根据《中华人民共和国民法典》及相关法律法规,经友好协商,就甲方委托乙方研究开发"股票AI分析系统"事宜,达成如下协议,以资共同遵守。
# 一、项目名称与开发目标
1. **项目名称:** 股票AI分析系统
2. **开发目标:** 研发一套基于人工智能的股票分析系统,形成"数据采集与处理 + AI模型训练 + 分析预测算法 + 可视化展示"的完整方案。实现多数据源金融数据获取、实时数据处理、深度学习模型构建与优化、量化投资策略开发、风险评估与回测分析、Web界面展示与API接口交付满足分析精度、预测准确率与系统稳定性的工程化应用系统与文档。
# 二、技术成果的内容、形式和要求
1. **项目委托:** 本项目由甲方委托乙方承担"股票AI分析系统"的总体研发工作包括数据采集接口开发、AI算法模型设计、量化策略研究与实现、系统架构设计、Web应用开发、API接口设计与实现以及成套技术文件编制。
2. **保证条款:** 乙方保证其具备完成本项目所需的技术能力,并确保所交付的技术成果(包括硬件设计和软件代码)为原创或已获得合法授权,不存在任何知识产权侵权行为。
3. **研发内容与技术要求:**
**3.1 数据采集与处理系统**
* **数据源接口开发:** 集成多家金融数据提供商API包括Tushare、Finnhub、Yahoo Finance等实现股票、指数、期货等金融数据的实时获取
* **数据清洗与预处理:** 建立数据质量控制流程,包括异常值检测、缺失值填充、数据标准化与特征工程;
* **数据库设计:** 设计高性能时序数据库架构,支持海量金融数据的存储与快速查询;
* **数据管道构建:** 实现ETL流程自动化保障数据采集的稳定性和实时性
* **API网关设计** 构建统一的数据访问接口,支持多种数据格式和协议。
**3.2 大模型集成与分析系统**
* **大模型集成:** 集成Google Gemini、OpenAI GPT、腾讯混元、字节跳动豆包等主流大语言模型的API接口
* **分析模块开发:** 实现9个专业分析模块公司简介、基本面分析、看涨分析、看跌分析、市场分析、新闻分析、交易分析、内部人动向、最终结论
* **智能分析流程:** 基于大模型的多轮对话分析,支持上下文理解和专业知识推理;
* **质量保障:** 建立分析结果审核机制,确保分析内容的专业性和准确性;
* **分析优化:** 持续优化分析prompt和模型参数提升分析质量和用户体验。
**3.3 Web应用与API服务**
* **前端界面开发:** 基于React/Next.js构建现代化Web界面支持图表展示和交互操作
* **后端API设计** 使用FastAPI开发RESTful API支持数据查询、模型调用和结果展示
* **实时数据流:** 实现WebSocket连接支持实时股价更新和市场数据推送
* **用户管理系统:** 构建用户权限控制,支持多用户同时访问和个性化配置;
* **部署运维:** 设计容器化部署方案,支持分布式部署和自动扩缩容;
* **开发语言:** 以Python为主实现AI算法与数据处理Web前端采用TypeScript/React后端采用Python/FastAPI。
4. **技术文档:**
* 系统架构设计文档(含数据流图、模块设计、接口说明)
* AI算法设计文档含模型架构、训练流程、性能评估报告
* 数据处理说明书含数据源配置、ETL流程、质量控制
* API接口文档含RESTful API规范、WebSocket协议、数据格式
* 源代码包含必要的注释与SDK/示例
* 部署运维手册(含安装配置、监控告警、故障排除指南)
# 三、研究开发经费及其支付方式
1. **合同总金额:** 本合同技术开发经费总额为人民币 **壹拾万元** 元(¥ **100,000.00**含增值税按照小规模纳税人3%征收率计算)。
2. **付款方式:** 采用两阶段付款方式先支付50%启动资金验收合格后再支付剩余50%,具体支付节点见附件《付款时间节点说明》。
3. **发票:** 乙方应在甲方每次付款前,向甲方开具等额的增值税普通发票,发票内容为"技术开发服务费"。
# 四、研究开发计划、进度和交付要求
1. **第一阶段7日内** 数据采集与处理系统开发
* 完成多数据源API接口集成与测试
* 完成数据清洗预处理流程开发;
* 建立时序数据库架构设计与实现。
2. **第二阶段7日内** 大模型集成与分析系统开发
* 完成主流大语言模型API集成与测试
* 实现9个专业分析模块的智能分析功能
* 建立分析流程自动化和质量控制机制。
3. **第三阶段7日内** Web应用与API服务开发
* 完成前端界面设计与开发;
* 实现后端API服务与实时数据流
* 构建用户管理系统与权限控制。
4. **第四阶段7日内** 系统集成测试与交付
* 完成系统集成测试与性能优化;
* 指标验证(预测准确率、响应时间、系统稳定性);
* 编制成套技术文档并完成最终交付。
# 五、验收标准与验收程序
1. **验收时间:** 乙方完成并提交最终交付物后甲方应在15个工作日内组织验收。
2. **功能与性能验收标准:**
* **数据采集与处理:**
* 多数据源接口测试合格数据获取成功率≥99.5%、响应时间<2秒
* 数据质量控制达标异常值检测率≥95%、数据完整性≥99%
* 数据库性能满足要求(查询响应时间<100ms并发处理能力1000QPS
* **大模型与分析:**
* 大模型API集成测试合格调用成功率≥99%、响应时间<5秒
* 9个分析模块功能完整公司简介、基本面分析、看涨看跌分析等
* 分析结果质量达标内容完整性≥95%、专业性评分≥80%
* 分析流程自动化运行(全程无需人工干预、异常处理完善)。
* **Web应用与服务**
* 前端界面响应时间<1秒支持并发用户500
* API接口可用性≥99.9%、平均响应时间<500ms
* 实时数据推送延迟<100ms
* **系统稳定性与可靠性:**
* 连续运行≥24小时无故障、内存泄漏<1MB/小时
* 系统可用性≥99.5%、平均故障恢复时间<30分钟
3. **交付物验收标准:**
* 所有设计文件系统架构图、数据库设计、API文档、源代码、SDK/示例与技术文档齐全、规范,与最终成果一致。
4. **验收程序:**
* 乙方提交书面验收申请及自测报告。
* 甲方根据本合同第五条第2、3款标准进行测试验证。
* 双方根据测试结果共同签署《项目验收报告》。
5. **验收异议处理:**
* 若验收不合格甲方应出具书面意见乙方应在15个工作日内进行修复并重新提交验收。若非乙方原因导致双方协商解决。
# 六、知识产权和保密条款
1. **知识产权归属:**
* **背景知识产权:** 双方在本项目前各自拥有的知识产权仍归各自所有。
* **项目成果知识产权:** 在本项目下由乙方开发完成的、与项目成果相关的全部知识产权(包括但不限于专利申请权、技术秘密、软件著作权、版权等)归甲方所有。乙方仅在为甲方提供服务期间享有使用权,服务结束后无权继续使用。
* 乙方不得将项目成果用于为其他客户提供服务,或进行任何形式的商业化利用。
2. **保密义务:**
* 双方应对在合作中知悉的对方商业秘密和技术秘密(包括本合同内容)负有保密义务。未经对方书面许可,不得向任何第三方泄露。
* 保密期限自本合同签订之日起至合同终止后 **三** 年。
# 七、技术服务和支持
1. **质保期:** 自项目最终验收合格之日起,乙方提供为期 **[ 3 ]** 个月的免费技术支持和缺陷修复服务。
2. **技术培训:** 乙方需为甲方技术人员提供一次免费的技术培训,内容包括设备操作、校准方法和软件架构讲解。
# 八、违约责任
1. **乙方违约:** 若乙方逾期交付,每逾期一日,应向甲方支付合同总金额 **0.1%** 的违约金,但违约金总额不超过合同总金额的 **20%**。若逾期超过30日甲方有权单方面解除合同。
2. **甲方违约:** 若甲方逾期支付款项,每逾期一日,应向乙方支付应付未付款项 **0.1%** 的违约金。
# 九、不可抗力
1. 因发生地震、战争、疫情等不可抗力事件导致本合同无法履行的遭遇不可抗力的一方应在事件发生后15日内通知对方并提供相关证明。双方可根据影响程度协商延期履行或解除合同互不承担违约责任。
# 十、争议解决
1. 凡因本合同引起的或与本合同有关的任何争议,双方应友好协商解决。
2. 协商不成的,任何一方均可向 **[甲方]** 所在地人民法院提起诉讼。
# 十一、其他约定事项
1. 本合同未尽事宜,双方可另行签订补充协议,补充协议与本合同具有同等法律效力。
2. 本合同附件是本合同不可分割的组成部分。
3. **持续开发条款:** 本项目验收合格后乙方享有优先开发权可为甲方提供持续开发服务包括但不限于引入其他数据源、接入其他API、扩展到跨市场如美国股票、日本股票、港股等。持续开发的范围、内容、时间和费用由双方另行协商确定并签订补充协议。
# 十二、合同生效
1. 本合同一式 **肆** 份,甲乙双方各执 **贰** 份,具有同等法律效力。
2. 本合同自双方签字盖章之日起生效。
---
**甲方(盖章):**
**法定代表人或授权代表(签字):**
**日期:**
---
**乙方(盖章):**
**法定代表人或授权代表(签字):**
**日期:**
---
# 附件
1. 《技术要求说明书(含验收标准)》
2. 《付款时间节点说明》
# 技术要求说明书
## 一、项目概述
本项目为"股票AI分析系统"目标是构建智能化股票分析与投资决策支持平台完成多数据源金融数据采集与处理系统集成主流大语言模型进行专业投资分析实现9个分析模块的自动化分析流程公司简介、基本面分析、看涨看跌分析等形成智能分析系统、Web可视化界面与API服务的完整方案达到分析质量、系统稳定性和用户体验等指标。
### 1.1 关键技术及其创新之处
- 多数据源金融数据融合与实时处理技术,实现海量异构数据的统一管理和高效查询;
- 大语言模型集成技术集成Google Gemini、OpenAI GPT等多主流大模型进行专业投资分析
- 智能分析框架设计实现9个专业分析模块的自动化分析流程公司简介、基本面分析、看涨看跌分析等
- 实时Web应用架构设计支持高并发访问和实时数据推送提升用户交互体验。
## 二、技术规格要求
### 2.0 主要技术指标或经济指标
- 分析质量评分≥80%(专业分析内容的质量评估);
- 系统响应时间:<500msAPI接口平均响应时间
- 数据处理能力≥1000QPS并发查询处理能力
- 系统可用性≥99.5%(全年运行时间占比);
- 用户并发支持≥500人同时在线用户数
### 2.1 数据采集与处理系统规格
#### 2.1.1 数据源接口
- **支持数据源:** Tushare、Finnhub、Yahoo Finance等多数据源
- **数据类型:** 股票价格、交易量、财务数据、宏观经济指标等;
- **更新频率:** 支持实时数据获取与批量历史数据导入;
- **API稳定性** 数据获取成功率≥99.5%、异常处理机制完善。
#### 2.1.2 数据存储架构
- **数据库类型:** 时序数据库TimescaleDB/InfluxDB+关系型数据库PostgreSQL
- **数据容量:** 支持亿级数据记录存储与快速查询;
- **备份策略:** 自动备份与灾难恢复机制;
- **性能指标:** 查询响应时间<100ms并发处理1000QPS
#### 2.1.3 数据质量控制
- **清洗规则:** 异常值检测、缺失值填充、数据标准化;
- **质量监控:** 实时监控数据完整性和准确性;
- **处理效率:** 支持TB级数据处理处理时间控制在合理范围内。
### 2.2 大模型集成与分析规格
#### 2.2.1 大模型集成
- **支持大模型:** 集成Google Gemini、OpenAI GPT、腾讯混元、字节跳动豆包等主流大语言模型
- **API接口** 提供统一的模型调用接口,支持多模型切换和负载均衡;
- **模型配置:** 支持不同模型的参数配置包括温度、最大token数等
- **调用优化:** 实现模型调用缓存、并发控制和错误重试机制。
#### 2.2.2 专业分析框架
- **分析模块:** 实现9个专业分析模块公司简介、基本面分析、看涨分析、看跌分析、市场分析、新闻分析、交易分析、内部人动向、最终结论
- **分析流程:** 基于大模型的智能分析,支持多轮对话和上下文理解;
- **质量控制:** 建立分析结果一致性检查和人工审核机制;
- **分析优化:** 支持分析prompt优化和结果格式标准化。
### 2.3 Web应用与API服务规格
#### 2.3.1 前端界面开发
- **技术栈:** React/Next.js + TypeScript + Tailwind CSS
- **功能组件:** 图表展示、数据表格、实时更新、用户交互界面;
- **响应式设计:** 支持PC端和移动端适配
- **用户体验:** 界面响应时间<1秒支持流畅的数据可视化
#### 2.3.2 后端API服务
- **框架选择:** FastAPI/Pyramid + Python支持异步处理
- **API设计** RESTful API规范支持数据查询、模型调用、结果缓存
- **安全性:** JWT认证、API限流、数据加密传输
- **性能优化:** 响应时间<500ms支持高并发访问
#### 2.3.3 系统集成与运维
- **容器化部署:** Docker + Kubernetes支持自动化部署和扩缩容
- **监控告警:** 实时监控系统状态、性能指标和错误日志;
- **数据备份:** 自动备份策略,确保数据安全和快速恢复;
- **扩展性:** 模块化设计,支持功能扩展和第三方集成。
## 三、性能指标要求
### 3.1 大模型分析性能指标
- **分析质量:** 专业分析内容完整性和准确性≥85%
- **响应效率:** 单个分析模块响应时间<30秒
- **一致性:** 相同输入多次分析结果一致性≥90%。
### 3.2 系统性能指标
- **响应时间:** API接口平均响应时间<500ms
- **并发处理:** 支持≥1000并发查询请求
- **数据吞吐:** 实时数据处理能力≥10,000条/秒。
### 3.3 稳定性与可靠性指标
- **系统可用性:** 年运行时间≥99.5%
- **故障恢复:** 平均故障恢复时间<30分钟
- **数据安全性:** 数据加密存储,满足金融行业安全标准。
## 四、开发交付要求
### 4.1 系统架构交付物
1. **设计文件:**
- 系统整体架构设计图(含数据流、模块关系、接口设计);
- 数据库设计文档(含表结构、索引设计、数据关系图);
- API接口设计文档含接口规范、数据格式、调用示例
2. **部署配置:**
- Docker容器化配置与Kubernetes部署脚本
- 环境配置说明与依赖包清单;
- 监控告警配置模板。
3. **测试环境:**
- 开发测试环境一套(含数据库、应用服务器配置);
- 测试数据样本与测试用例。
### 4.2 大模型与分析交付物
1. **源代码:**
- 数据采集处理源代码(含数据接口、清洗算法、存储逻辑);
- 大模型集成与分析源代码含API调用、多模型管理、分析流程
- Web应用源代码前端React/Next.js、后端FastAPI代码
- 编译/构建脚本与环境配置说明;
- 版本控制记录Git历史记录
2. **可执行程序:**
- 容器化应用镜像Docker images
- 大模型API调用服务与缓存系统
- 安装部署脚本与自动化运维工具。
3. **配置与文档:**
- 大模型API配置模板支持多模型切换
- 分析prompt模板与优化记录
- 分析结果质量评估报告与优化建议。
### 4.3 文档交付物
1. **技术文档:**
- 系统架构与数据库设计文档;
- 大模型集成与分析设计说明书;
- API接口与数据流说明书
- 部署运维与监控配置手册。
2. **用户文档:**
- 系统安装部署与使用手册;
- Web界面操作指南与功能说明
- API调用示例与集成指南
- 故障排除与技术支持手册。
3. **测试与验证文档:**
- 模型训练测试报告与性能评估;
- 系统功能测试用例与验收报告;
- 安全测试与渗透测试报告;
- 性能压力测试结果分析。
## 五、验收测试标准
### 5.1 数据处理与存储验收测试
1. **数据采集接口测试:**
- 多数据源API连接稳定性测试成功率≥99.5%
- 数据获取实时性测试(响应时间<2秒
- 数据质量校验测试完整性≥99%、准确性≥99%
- 异常处理机制测试(错误恢复时间<30秒
2. **数据库性能测试:**
- 数据存储与查询性能测试(响应时间<100ms
- 并发访问压力测试支持≥1000并发
- 数据备份恢复测试(恢复时间<1小时
- 数据安全性测试(加密存储、访问控制)。
### 5.2 大模型与分析验收测试
1. **大模型集成与分析功能:**
- 大模型API集成测试调用成功率≥99%、响应时间<5秒
- 9个分析模块功能测试公司简介、基本面分析、看涨看跌分析等
- 分析结果质量测试内容完整性≥95%、专业性评分≥80%
- 分析流程自动化测试(全程无需人工干预、异常处理完善)。
2. **分析优化与验证:**
- 多模型对比测试分析结果一致性≥85%、性能差异分析);
- Prompt优化效果测试分析质量提升≥10%
- 缓存机制测试重复查询响应加速≥50%)。
### 5.3 Web应用与服务验收测试
1. **端到端功能:**
- 用户注册登录与权限管理功能;
- 实时数据展示与图表交互功能;
- AI模型调用与结果展示功能
- 系统长时间稳定性测试≥24小时连续运行
2. **性能指标达成:**
- 前端界面响应时间<1秒支持500并发用户
- API接口可用性≥99.9%、平均响应时间<500ms
- 系统整体可用性≥99.5%、平均故障恢复时间<30分钟
## 六、质量保证要求
### 6.1 开发过程质量控制
- **设计评审:** 关键设计节点须进行评审
- **代码规范:** 严格遵循编码规范和最佳实践
- **测试覆盖:** 单元测试覆盖率≥80%
### 6.2 交付质量标准
- **文档完整性:** 所有交付文档必须完整、准确
- **代码质量:** 通过静态代码分析,无严重缺陷
- **产品一致性:** 样机与设计文档完全一致
# 付款时间节点说明
## 一、合同总金额
人民币 **壹拾万元** 元(¥ **100,000.00**
## 二、付款节点
### 2.1 付款进度安排
1. **第一笔(启动预付款):**
* **金额:** 合同总金额的 **50%** ,即人民币 **伍万元**
* **支付条件:** 合同正式签订生效后5个工作日内
* **用途:** 项目启动资金,用于前期设计、开发和实施
2. **第二笔(验收尾款):**
* **金额:** 合同总金额的 **50%** ,即人民币 **伍万元**
* **支付条件:** 项目最终交付并通过甲方验收后5个工作日内
* **验收标准:**
- 所有交付物齐全并符合合同要求
- 数据采集与处理系统正常运行
- 大模型集成与9个分析模块功能完整
- Web应用界面功能完善API服务稳定
- 系统整体性能达到技术要求说明书标准
- 完整技术文档和用户手册交付
### 2.2 付款条件说明
1. **验收标准:** 各阶段付款均需对应的验收标准全部达成
2. **验收时限:** 甲方应在乙方提交验收申请后10个工作日内完成验收
3. **逾期验收:** 甲方逾期验收的,视为自动验收通过
4. **付款延期:** 如遇法定节假日,付款期限顺延至节假日后第一个工作日
## 三、发票说明
### 3.1 发票开具要求
- **发票类型:** 增值税普通发票
- **发票内容:** 技术开发服务费
- **开具时间:** 乙方应在甲方每次付款前开具相应金额的发票
- **发票信息:** 按甲方提供的开票资料准确开具
### 3.2 税务处理
- **税率:** 乙方为小规模纳税人按照3%征收率计算增值税
- **纳税义务:** 乙方承担增值税纳税义务;甲方为一般纳税人的,可向税务机关申请抵扣
## 四、付款方式及账户信息
### 4.1 付款方式
- **付款方式:** 银行电汇转账
- **到账确认:** 以乙方银行账户实际到账时间为准
- **汇款凭证:** 甲方应保留银行转账凭证作为付款证明
### 4.2 收款账户信息
- **收款单位:** 上海棱芯人工智能科技有限公司
- **开户银行:** 招商银行股份有限公司上海徐汇滨江支行
- **银行账号:** 121986556710001
## 五、其他说明
### 5.1 合同变更
- 如需变更付款金额或时间节点,须经双方书面确认
- 因甲方原因导致的项目变更,相应调整付款安排
### 5.2 风险控制
- 乙方有权在未收到约定款项的情况下暂停相应工作
- 甲方逾期付款超过30日的乙方有权解除合同
### 5.3 争议处理
- 付款相关争议优先通过友好协商解决
- 协商不成的,**按主合同争议解决条款处理**

View File

@ -27,13 +27,20 @@ export default function ConfigPage() {
const [dbUrl, setDbUrl] = useState('');
const [newApiApiKey, setNewApiApiKey] = useState('');
const [newApiBaseUrl, setNewApiBaseUrl] = useState('');
const [alphaEngineApiUrl, setAlphaEngineApiUrl] = useState('');
const [alphaEngineApiKey, setAlphaEngineApiKey] = useState('');
const [alphaEngineToken, setAlphaEngineToken] = useState('');
const [alphaEngineUserId, setAlphaEngineUserId] = useState('');
const [tushareApiKey, setTushareApiKey] = useState('');
const [finnhubApiKey, setFinnhubApiKey] = useState('');
// 分析配置的本地状态
// 全局 LLM 配置
const [llmProvider, setLlmProvider] = useState('');
const [llmModel, setLlmModel] = useState('');
// 分析配置的本地状态(移除 provider 和 model
const [localAnalysisConfig, setLocalAnalysisConfig] = useState<Record<string, {
name: string;
model: string;
prompt_template: string;
dependencies?: string[];
}>>({});
@ -52,12 +59,40 @@ export default function ConfigPage() {
// 初始化分析配置的本地状态
useEffect(() => {
if (analysisConfig?.analysis_modules) {
setLocalAnalysisConfig(analysisConfig.analysis_modules);
// 移除每个模块的 provider 和 model 字段
const cleanedConfig: typeof localAnalysisConfig = {};
Object.entries(analysisConfig.analysis_modules).forEach(([key, value]: [string, any]) => {
cleanedConfig[key] = {
name: value.name || '',
prompt_template: value.prompt_template || '',
dependencies: value.dependencies || []
};
});
setLocalAnalysisConfig(cleanedConfig);
}
}, [analysisConfig]);
// 更新分析配置中的某个字段
const updateAnalysisField = (type: string, field: 'name' | 'model' | 'prompt_template', value: string) => {
// 初始化全局 LLM 配置(从后端获取)
useEffect(() => {
const loadLlmConfig = async () => {
try {
const response = await fetch('/api/config/llm');
if (response.ok) {
const data = await response.json();
setLlmProvider(data.provider || '');
// 从 provider 配置中获取 model
const providerConfig = data.config || {};
setLlmModel(providerConfig.model || '');
}
} catch (e) {
console.error('Failed to load LLM config:', e);
}
};
loadLlmConfig();
}, []);
// 更新分析配置中的某个字段(移除 provider 和 model
const updateAnalysisField = (type: string, field: 'name' | 'prompt_template', value: string) => {
setLocalAnalysisConfig(prev => ({
...prev,
[type]: {
@ -161,6 +196,15 @@ export default function ConfigPage() {
};
}
if (alphaEngineApiUrl || alphaEngineApiKey || alphaEngineToken) {
newConfig.alpha_engine = {
api_url: alphaEngineApiUrl || config?.alpha_engine?.api_url || '',
api_key: alphaEngineApiKey || config?.alpha_engine?.api_key || '',
token: alphaEngineToken || config?.alpha_engine?.token || '',
user_id: alphaEngineUserId ? parseInt(alphaEngineUserId) : (config?.alpha_engine?.user_id || 999041),
};
}
if (tushareApiKey || finnhubApiKey) {
newConfig.data_sources = {
...config?.data_sources,
@ -216,10 +260,23 @@ export default function ConfigPage() {
handleTest('finnhub', { api_key: finnhubApiKey || config?.data_sources?.finnhub?.api_key });
};
const handleTestAlphaEngine = () => {
handleTest('alpha_engine', {
api_url: alphaEngineApiUrl || config?.alpha_engine?.api_url,
api_key: alphaEngineApiKey || config?.alpha_engine?.api_key,
token: alphaEngineToken || config?.alpha_engine?.token,
user_id: alphaEngineUserId ? parseInt(alphaEngineUserId) : (config?.alpha_engine?.user_id || 999041)
});
};
const handleReset = () => {
setDbUrl('');
setNewApiApiKey('');
setNewApiBaseUrl('');
setAlphaEngineApiUrl('');
setAlphaEngineApiKey('');
setAlphaEngineToken('');
setAlphaEngineUserId('');
setTushareApiKey('');
setFinnhubApiKey('');
setTestResults({});
@ -345,41 +402,188 @@ export default function ConfigPage() {
<Card>
<CardHeader>
<CardTitle>AI </CardTitle>
<CardDescription>New API ( OpenAI )</CardDescription>
<CardDescription></CardDescription>
</CardHeader>
<CardContent className="space-y-4">
<div className="space-y-2">
<Label htmlFor="new-api-key">API Key</Label>
<div className="flex gap-2">
<Input
id="new-api-key"
type="password"
value={newApiApiKey}
onChange={(e) => setNewApiApiKey(e.target.value)}
placeholder="留空表示保持当前值"
className="flex-1"
/>
<Button onClick={handleTestNewApi} variant="outline">
<CardContent className="space-y-6">
{/* 全局 LLM 配置 */}
<div className="space-y-4 p-4 bg-muted/50 rounded-lg">
<h3 className="font-semibold"></h3>
<p className="text-sm text-muted-foreground">
使
</p>
<div className="space-y-4">
<div className="space-y-2">
<Label htmlFor="llm-provider"></Label>
<select
id="llm-provider"
value={llmProvider}
onChange={(e) => setLlmProvider(e.target.value)}
className="flex h-10 w-full rounded-md border border-input bg-background px-3 py-2 text-sm ring-offset-background file:border-0 file:bg-transparent file:text-sm file:font-medium placeholder:text-muted-foreground focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-ring focus-visible:ring-offset-2 disabled:cursor-not-allowed disabled:opacity-50"
>
<option value=""></option>
<option value="new_api">New API (OpenAI )</option>
<option value="gemini">Gemini</option>
<option value="alpha_engine">AlphaEngine</option>
</select>
</div>
<div className="space-y-2">
<Label htmlFor="llm-model"></Label>
<Input
id="llm-model"
type="text"
value={llmModel}
onChange={(e) => setLlmModel(e.target.value)}
placeholder="例如: gemini-1.5-pro, deepseek-r1"
/>
<p className="text-xs text-muted-foreground">
</p>
</div>
<Button
onClick={async () => {
if (!llmProvider) {
setSaveMessage('请先选择大模型提供商');
return;
}
try {
const response = await fetch('/api/config/llm', {
method: 'PUT',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
provider: llmProvider,
model: llmModel || undefined
})
});
if (response.ok) {
setSaveMessage('全局 LLM 配置保存成功!');
} else {
const data = await response.json();
setSaveMessage(`保存失败: ${data.detail || '未知错误'}`);
}
} catch (e: any) {
setSaveMessage(`保存失败: ${e.message}`);
}
setTimeout(() => setSaveMessage(''), 5000);
}}
variant="outline"
>
LLM
</Button>
{saveMessage && saveMessage.includes('LLM') && (
<Badge variant={saveMessage.includes('成功') ? 'default' : 'destructive'}>
{saveMessage}
</Badge>
)}
</div>
{testResults.new_api && (
<Badge variant={testResults.new_api.success ? 'default' : 'destructive'}>
{testResults.new_api.message}
</Badge>
)}
</div>
<div className="space-y-2">
<Label htmlFor="new-api-base-url">Base URL</Label>
<Input
id="new-api-base-url"
type="text"
value={newApiBaseUrl}
onChange={(e) => setNewApiBaseUrl(e.target.value)}
placeholder="例如: http://localhost:3000/v1"
className="flex-1"
/>
<Separator />
<div className="space-y-4">
<div>
<Label className="text-base font-medium">New API (OpenAI )</Label>
<p className="text-sm text-muted-foreground mb-2"> OpenAI API </p>
<div className="space-y-2">
<div>
<Label htmlFor="new-api-key">API Key</Label>
<div className="flex gap-2">
<Input
id="new-api-key"
type="password"
value={newApiApiKey}
onChange={(e) => setNewApiApiKey(e.target.value)}
placeholder="留空表示保持当前值"
className="flex-1"
/>
<Button onClick={handleTestNewApi} variant="outline">
</Button>
</div>
{testResults.new_api && (
<Badge variant={testResults.new_api.success ? 'default' : 'destructive'} className="mt-2">
{testResults.new_api.message}
</Badge>
)}
</div>
<div>
<Label htmlFor="new-api-base-url">Base URL</Label>
<Input
id="new-api-base-url"
type="text"
value={newApiBaseUrl}
onChange={(e) => setNewApiBaseUrl(e.target.value)}
placeholder="例如: http://localhost:3000/v1"
className="flex-1"
/>
</div>
</div>
</div>
<Separator />
<div>
<Label className="text-base font-medium">AlphaEngine</Label>
<p className="text-sm text-muted-foreground mb-2"> API</p>
<div className="space-y-2">
<div>
<Label htmlFor="alpha-engine-api-url">API URL</Label>
<Input
id="alpha-engine-api-url"
type="text"
value={alphaEngineApiUrl}
onChange={(e) => setAlphaEngineApiUrl(e.target.value)}
placeholder="例如: http://api-ai-prod.valuesimplex.tech"
className="flex-1"
/>
</div>
<div>
<Label htmlFor="alpha-engine-api-key">API Key</Label>
<div className="flex gap-2">
<Input
id="alpha-engine-api-key"
type="password"
value={alphaEngineApiKey}
onChange={(e) => setAlphaEngineApiKey(e.target.value)}
placeholder="留空表示保持当前值"
className="flex-1"
/>
<Button onClick={handleTestAlphaEngine} variant="outline">
</Button>
</div>
{testResults.alpha_engine && (
<Badge variant={testResults.alpha_engine.success ? 'default' : 'destructive'} className="mt-2">
{testResults.alpha_engine.message}
</Badge>
)}
</div>
<div>
<Label htmlFor="alpha-engine-token">Token</Label>
<Input
id="alpha-engine-token"
type="password"
value={alphaEngineToken}
onChange={(e) => setAlphaEngineToken(e.target.value)}
placeholder="留空表示保持当前值"
className="flex-1"
/>
</div>
<div>
<Label htmlFor="alpha-engine-user-id">User ID</Label>
<Input
id="alpha-engine-user-id"
type="number"
value={alphaEngineUserId}
onChange={(e) => setAlphaEngineUserId(e.target.value)}
placeholder="默认: 999041"
className="flex-1"
/>
</div>
</div>
</div>
</div>
</CardContent>
</Card>
@ -470,19 +674,6 @@ export default function ConfigPage() {
/>
</div>
<div className="space-y-2">
<Label htmlFor={`${type}-model`}></Label>
<Input
id={`${type}-model`}
value={config.model || ''}
onChange={(e) => updateAnalysisField(type, 'model', e.target.value)}
placeholder="例如: gemini-1.5-pro"
/>
<p className="text-xs text-muted-foreground">
AI
</p>
</div>
<div className="space-y-2">
<Label></Label>
<div className="grid grid-cols-2 sm:grid-cols-3 md:grid-cols-4 gap-2 rounded-lg border p-4">

View File

@ -20,3 +20,5 @@

View File

@ -110,6 +110,9 @@ export default function ReportPage() {
const [saving, setSaving] = useState(false)
const [saveMsg, setSaveMsg] = useState<string | null>(null)
// TradingView 显示控制
const [showTradingView, setShowTradingView] = useState(false)
const saveReport = async () => {
try {
setSaving(true)
@ -155,6 +158,9 @@ export default function ReportPage() {
return;
}
// 标记已触发分析
fullAnalysisTriggeredRef.current = true;
// 初始化/重置状态,准备顺序执行
stopRequestedRef.current = false;
abortControllerRef.current?.abort();
@ -182,12 +188,7 @@ export default function ReportPage() {
setManualRunKey((k) => k + 1);
};
useEffect(() => {
if (financials && !fullAnalysisTriggeredRef.current) {
fullAnalysisTriggeredRef.current = true;
runFullAnalysis();
}
}, [financials]);
// 移除自动开始分析的逻辑,改为手动触发
// 计算完成比例
const completionProgress = useMemo(() => {
@ -796,23 +797,53 @@ export default function ReportPage() {
</TabsList>
<TabsContent value="chart" className="space-y-4">
<h2 className="text-lg font-medium"> TradingView</h2>
<div className="flex items-center gap-3 text-sm mb-4">
<CheckCircle className="size-4 text-green-600" />
<div className="text-muted-foreground">
- {unifiedSymbol}
</div>
<div className="flex items-center justify-between mb-4">
<h2 className="text-lg font-medium"> TradingView</h2>
<Button
onClick={() => setShowTradingView(!showTradingView)}
variant={showTradingView ? "outline" : "default"}
>
{showTradingView ? '隐藏图表' : '显示图表'}
</Button>
</div>
<TradingViewWidget
symbol={unifiedSymbol}
market={marketParam}
height={500}
/>
{showTradingView ? (
<>
<div className="flex items-center gap-3 text-sm mb-4">
<CheckCircle className="size-4 text-green-600" />
<div className="text-muted-foreground">
- {unifiedSymbol}
</div>
</div>
<TradingViewWidget
symbol={unifiedSymbol}
market={marketParam}
height={500}
/>
</>
) : (
<div className="flex items-center justify-center p-8 border border-dashed rounded-lg">
<div className="text-center space-y-2">
<p className="text-muted-foreground">"显示图表" TradingView </p>
<p className="text-xs text-muted-foreground"> TradingView</p>
</div>
</div>
)}
</TabsContent>
<TabsContent value="financial" className="space-y-4">
<h2 className="text-lg font-medium"></h2>
<div className="flex items-center justify-between">
<h2 className="text-lg font-medium"></h2>
{financials && !fullAnalysisTriggeredRef.current && analysisConfig?.analysis_modules && (
<Button
onClick={runFullAnalysis}
disabled={isAnalysisRunningRef.current}
className="ml-auto"
>
</Button>
)}
</div>
<div className="flex items-center gap-3 text-sm">
{isLoading ? (
<Spinner className="size-4" />
@ -834,6 +865,25 @@ export default function ReportPage() {
</div>
)}
{financials && !fullAnalysisTriggeredRef.current && analysisConfig?.analysis_modules && (
<div className="bg-blue-50 border border-blue-200 rounded-lg p-4">
<div className="flex items-start gap-3">
<div className="flex-1">
<h3 className="font-medium text-blue-900 mb-1"></h3>
<p className="text-sm text-blue-700">
"开始分析"
</p>
</div>
<Button
onClick={runFullAnalysis}
disabled={isAnalysisRunningRef.current}
>
</Button>
</div>
</div>
)}
{financials && (

View File

@ -42,3 +42,5 @@ if (process.env.NODE_ENV !== 'production') globalForPrisma.prisma = prisma