Fundamental_Analysis/backend/app/services/analysis_client.py

137 lines
4.7 KiB
Python

"""
Generic Analysis Client for various analysis types using Gemini API
"""
import time
import json
import os
from typing import Dict, Optional
import google.generativeai as genai
class AnalysisClient:
"""Generic client for generating various types of analysis using Gemini API"""
def __init__(self, api_key: str, model: str = "gemini-2.5-flash"):
"""Initialize Gemini client with API key and model"""
genai.configure(api_key=api_key)
self.model_name = model
self.model = genai.GenerativeModel(model)
async def generate_analysis(
self,
analysis_type: str,
company_name: str,
ts_code: str,
prompt_template: str,
financial_data: Optional[Dict] = None
) -> Dict:
"""
Generate analysis using Gemini API (non-streaming)
Args:
analysis_type: Type of analysis (e.g., "fundamental_analysis")
company_name: Company name
ts_code: Stock code
prompt_template: Prompt template with placeholders {company_name}, {ts_code}, {financial_data}
financial_data: Optional financial data for context
Returns:
Dict with analysis content and metadata
"""
start_time = time.perf_counter_ns()
# Build prompt from template
prompt = self._build_prompt(
prompt_template,
company_name,
ts_code,
financial_data
)
# Call Gemini API (using sync API in async context)
try:
import asyncio
loop = asyncio.get_event_loop()
response = await loop.run_in_executor(
None,
lambda: self.model.generate_content(prompt)
)
# Get token usage
usage_metadata = response.usage_metadata if hasattr(response, 'usage_metadata') else None
elapsed_ms = int((time.perf_counter_ns() - start_time) / 1_000_000)
return {
"content": response.text,
"model": self.model_name,
"tokens": {
"prompt_tokens": usage_metadata.prompt_token_count if usage_metadata else 0,
"completion_tokens": usage_metadata.candidates_token_count if usage_metadata else 0,
"total_tokens": usage_metadata.total_token_count if usage_metadata else 0,
} if usage_metadata else {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0},
"elapsed_ms": elapsed_ms,
"success": True,
"analysis_type": analysis_type,
}
except Exception as e:
elapsed_ms = int((time.perf_counter_ns() - start_time) / 1_000_000)
return {
"content": "",
"model": self.model_name,
"tokens": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0},
"elapsed_ms": elapsed_ms,
"success": False,
"error": str(e),
"analysis_type": analysis_type,
}
def _build_prompt(
self,
prompt_template: str,
company_name: str,
ts_code: str,
financial_data: Optional[Dict] = None
) -> str:
"""Build prompt from template by replacing placeholders"""
# Format financial data as string if provided
financial_data_str = ""
if financial_data:
try:
financial_data_str = json.dumps(financial_data, ensure_ascii=False, indent=2)
except Exception:
financial_data_str = str(financial_data)
# Replace placeholders in template
prompt = prompt_template.format(
company_name=company_name,
ts_code=ts_code,
financial_data=financial_data_str
)
return prompt
def load_analysis_config() -> Dict:
"""Load analysis configuration from JSON file"""
# Get project root: backend/app/services -> project_root/config/analysis-config.json
project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", ".."))
config_path = os.path.join(project_root, "config", "analysis-config.json")
if not os.path.exists(config_path):
return {}
try:
with open(config_path, "r", encoding="utf-8") as f:
return json.load(f)
except Exception:
return {}
def get_analysis_config(analysis_type: str) -> Optional[Dict]:
"""Get configuration for a specific analysis type"""
config = load_analysis_config()
modules = config.get("analysis_modules", {})
return modules.get(analysis_type)