- Covered by data-persistence-service tests (db/api). - No references or compose entries.
144 lines
5.6 KiB
Python
144 lines
5.6 KiB
Python
import logging
|
|
import os
|
|
import json
|
|
from typing import Dict
|
|
from fastapi import APIRouter, BackgroundTasks, HTTPException
|
|
|
|
# Lazy loader for DataManager
|
|
_dm = None
|
|
def get_dm():
|
|
global _dm
|
|
if _dm is not None:
|
|
return _dm
|
|
try:
|
|
from app.data_manager import data_manager as real_dm
|
|
_dm = real_dm
|
|
return _dm
|
|
except Exception:
|
|
# Return a stub if the real one fails to import
|
|
class _StubDM:
|
|
async def get_stock_basic(self, stock_code: str): return None
|
|
async def get_financial_statements(self, stock_code: str, report_dates): return []
|
|
_dm = _StubDM()
|
|
return _dm
|
|
|
|
from app.services.analysis_client import AnalysisClient, load_analysis_config
|
|
|
|
router = APIRouter()
|
|
logger = logging.getLogger(__name__)
|
|
|
|
# Constants for config paths
|
|
REPO_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", ".."))
|
|
BASE_CONFIG_PATH = os.path.join(REPO_ROOT, "config", "config.json")
|
|
|
|
def _load_json(path: str) -> Dict:
|
|
if not os.path.exists(path):
|
|
return {}
|
|
try:
|
|
with open(path, "r", encoding="utf-8") as f:
|
|
return json.load(f)
|
|
except Exception:
|
|
return {}
|
|
|
|
async def run_full_analysis(org_id: str):
|
|
"""
|
|
Asynchronous task to run a full analysis for a given stock.
|
|
This function is market-agnostic and relies on DataManager.
|
|
"""
|
|
logger.info(f"Starting full analysis task for {org_id}")
|
|
|
|
# 1. Load configurations
|
|
base_cfg = _load_json(BASE_CONFIG_PATH)
|
|
llm_provider = base_cfg.get("llm", {}).get("provider", "gemini")
|
|
llm_config = base_cfg.get("llm", {}).get(llm_provider, {})
|
|
|
|
api_key = llm_config.get("api_key")
|
|
base_url = llm_config.get("base_url")
|
|
|
|
if not api_key:
|
|
logger.error(f"API key for {llm_provider} not configured. Aborting analysis for {org_id}.")
|
|
return
|
|
|
|
analysis_config_full = load_analysis_config()
|
|
modules_config = analysis_config_full.get("analysis_modules", {})
|
|
if not modules_config:
|
|
logger.error(f"Analysis modules configuration not found. Aborting analysis for {org_id}.")
|
|
return
|
|
|
|
# 2. Fetch basic company info (name)
|
|
try:
|
|
basic_data = await get_dm().get_stock_basic(stock_code=org_id)
|
|
company_name = basic_data.get("name", org_id) if basic_data else org_id
|
|
logger.info(f"Got company name for {org_id}: {company_name}")
|
|
except Exception as e:
|
|
logger.warning(f"Failed to get company name for {org_id}. Using org_id as name. Error: {e}")
|
|
company_name = org_id
|
|
|
|
# 3. Fetch financial data
|
|
financial_data = None
|
|
try:
|
|
# You might want to make the date range configurable
|
|
from datetime import datetime
|
|
current_year = datetime.now().year
|
|
report_dates = [f"{year}1231" for year in range(current_year - 5, current_year)]
|
|
|
|
financial_statements = await get_dm().get_financial_statements(stock_code=org_id, report_dates=report_dates)
|
|
if financial_statements:
|
|
financial_data = {"series": financial_statements}
|
|
logger.info(f"Successfully fetched financial statements for {org_id}")
|
|
else:
|
|
logger.warning(f"Could not fetch financial statements for {org_id}")
|
|
except Exception as e:
|
|
logger.error(f"Error fetching financial data for {org_id}: {e}")
|
|
|
|
# 4. Execute analysis modules in order (simplified, assumes no complex dependencies for now)
|
|
# Note: A full implementation would need the topological sort from the financial router.
|
|
analysis_results = {}
|
|
for module_type, module_config in modules_config.items():
|
|
logger.info(f"Running analysis module: {module_type} for {org_id}")
|
|
client = AnalysisClient(
|
|
api_key=api_key,
|
|
base_url=base_url,
|
|
model=module_config.get("model", "gemini-1.5-flash")
|
|
)
|
|
|
|
# Simplified context: use results from all previously completed modules
|
|
context = analysis_results.copy()
|
|
|
|
result = await client.generate_analysis(
|
|
analysis_type=module_type,
|
|
company_name=company_name,
|
|
ts_code=org_id,
|
|
prompt_template=module_config.get("prompt_template", ""),
|
|
financial_data=financial_data,
|
|
context=context,
|
|
)
|
|
|
|
if result.get("success"):
|
|
analysis_results[module_type] = result.get("content", "")
|
|
logger.info(f"Module {module_type} for {org_id} completed successfully.")
|
|
else:
|
|
logger.error(f"Module {module_type} for {org_id} failed: {result.get('error')}")
|
|
# Store error message to avoid breaking dependencies that might handle missing data
|
|
analysis_results[module_type] = f"Error: Analysis for {module_type} failed."
|
|
|
|
# 5. Save the final report
|
|
# TODO: Implement database logic to save the `analysis_results` to the report record.
|
|
logger.info(f"Full analysis for {org_id} finished. Results: {json.dumps(analysis_results, indent=2, ensure_ascii=False)}")
|
|
|
|
|
|
@router.post("/{market}/{org_id}/reports/generate")
|
|
async def trigger_report_generation(market: str, org_id: str, background_tasks: BackgroundTasks):
|
|
"""
|
|
Triggers a background task to generate a full financial report.
|
|
This endpoint is now market-agnostic.
|
|
"""
|
|
logger.info(f"Received report generation request for {org_id} in {market} market.")
|
|
|
|
# TODO: Create a report record in the database with "generating" status here.
|
|
|
|
background_tasks.add_task(run_full_analysis, org_id)
|
|
|
|
logger.info(f"Queued analysis task for {org_id}.")
|
|
return {"queued": True, "market": market, "org_id": org_id}
|