- Fix `simple_test_analysis` template in E2E test setup to align with Orchestrator's data fetch logic.
- Implement and verify additional E2E scenarios:
- Scenario C: Partial Provider Failure (verified error propagation fix in Orchestrator).
- Scenario D: Invalid Symbol input.
- Scenario E: Analysis Module failure.
- Update `WorkflowStateMachine::handle_report_failed` to correctly scope error broadcasting to the specific task instead of failing effectively silently or broadly.
- Update testing strategy documentation to reflect completed Phase 4 testing.
- Skip Scenario B (Orchestrator Restart) as persistence is not yet implemented (decision made to defer persistence).
304 lines
11 KiB
TypeScript
304 lines
11 KiB
TypeScript
import { useState, useRef, useEffect, useMemo } from 'react';
|
||
import { useDataRequest, useTaskProgress, useAnalysisResults } from '@/hooks/useApi';
|
||
|
||
interface AnalysisState {
|
||
content: string;
|
||
loading: boolean;
|
||
error: string | null;
|
||
elapsed_ms?: number;
|
||
}
|
||
|
||
interface AnalysisRecord {
|
||
type: string;
|
||
name: string;
|
||
status: 'pending' | 'running' | 'done' | 'error';
|
||
start_ts?: string;
|
||
end_ts?: string;
|
||
duration_ms?: number;
|
||
tokens?: {
|
||
prompt_tokens: number;
|
||
completion_tokens: number;
|
||
total_tokens: number;
|
||
};
|
||
error?: string;
|
||
}
|
||
|
||
export function useAnalysisRunner(
|
||
financials: any,
|
||
financialConfig: any,
|
||
normalizedMarket: string,
|
||
unifiedSymbol: string,
|
||
isLoading: boolean,
|
||
error: any,
|
||
templateSets: any // Added templateSets
|
||
) {
|
||
// --- Template Logic ---
|
||
const [selectedTemplateId, setSelectedTemplateId] = useState<string>('');
|
||
const reportTemplateId = financials?.meta?.template_id;
|
||
|
||
// Sync selected template with report template when report loads
|
||
useEffect(() => {
|
||
if (reportTemplateId) {
|
||
setSelectedTemplateId(reportTemplateId);
|
||
}
|
||
}, [reportTemplateId]);
|
||
|
||
// Set default template if nothing selected and no report template
|
||
useEffect(() => {
|
||
if (!selectedTemplateId && !reportTemplateId && templateSets && Object.keys(templateSets).length > 0) {
|
||
const defaultId = Object.keys(templateSets).find(k => k.includes('standard') || k === 'default') || Object.keys(templateSets)[0];
|
||
setSelectedTemplateId(defaultId);
|
||
}
|
||
}, [templateSets, selectedTemplateId, reportTemplateId]);
|
||
|
||
// Determine active template set
|
||
const activeTemplateId = selectedTemplateId;
|
||
|
||
const activeTemplateSet = useMemo(() => {
|
||
if (!activeTemplateId || !templateSets) return null;
|
||
return templateSets[activeTemplateId] || null;
|
||
}, [activeTemplateId, templateSets]);
|
||
|
||
// Derive effective analysis config from template set, falling back to global config if needed
|
||
const activeAnalysisConfig = useMemo(() => {
|
||
if (activeTemplateSet) {
|
||
return {
|
||
...financialConfig,
|
||
analysis_modules: activeTemplateSet.modules,
|
||
};
|
||
}
|
||
return financialConfig; // Fallback to global config (legacy behavior)
|
||
}, [activeTemplateSet, financialConfig]);
|
||
|
||
// 分析类型列表
|
||
const analysisTypes = useMemo(() => {
|
||
if (!activeAnalysisConfig?.analysis_modules) return [];
|
||
return Object.keys(activeAnalysisConfig.analysis_modules);
|
||
}, [activeAnalysisConfig]);
|
||
|
||
// 分析状态管理
|
||
const [analysisStates, setAnalysisStates] = useState<Record<string, AnalysisState>>({});
|
||
|
||
const fullAnalysisTriggeredRef = useRef<boolean>(false);
|
||
const isAnalysisRunningRef = useRef<boolean>(false);
|
||
const analysisFetchedRefs = useRef<Record<string, boolean>>({});
|
||
const stopRequestedRef = useRef<boolean>(false);
|
||
const abortControllerRef = useRef<AbortController | null>(null);
|
||
const currentAnalysisTypeRef = useRef<string | null>(null);
|
||
const [manualRunKey, setManualRunKey] = useState(0);
|
||
|
||
// 当前正在执行的分析任务
|
||
const [currentAnalysisTask, setCurrentAnalysisTask] = useState<string | null>(null);
|
||
|
||
// 计时器状态
|
||
const [startTime, setStartTime] = useState<number | null>(null);
|
||
const [elapsedSeconds, setElapsedSeconds] = useState(0);
|
||
|
||
// 分析执行记录
|
||
const [analysisRecords, setAnalysisRecords] = useState<AnalysisRecord[]>([]);
|
||
|
||
// 新架构:触发分析与查看任务进度
|
||
const { trigger: triggerAnalysisRequest, isMutating: triggering } = useDataRequest();
|
||
const [requestId, setRequestId] = useState<string | null>(null);
|
||
const { progress: taskProgress } = useTaskProgress(requestId);
|
||
|
||
// 引入 Analysis Results 轮询
|
||
const { data: newAnalysisResults } = useAnalysisResults(unifiedSymbol);
|
||
|
||
// 1. Determine the Active Request ID (The one we want to display)
|
||
const activeRequestId = useMemo(() => {
|
||
// If the user manually triggered a task in this session, prioritize that
|
||
if (requestId) return requestId;
|
||
|
||
// Otherwise, default to the most recent result's request_id from the backend
|
||
// Assuming newAnalysisResults is sorted by created_at DESC
|
||
if (newAnalysisResults && newAnalysisResults.length > 0) {
|
||
return newAnalysisResults[0].request_id;
|
||
}
|
||
return null;
|
||
}, [requestId, newAnalysisResults]);
|
||
|
||
// 2. Filter results for the current batch
|
||
const currentBatchResults = useMemo(() => {
|
||
if (!newAnalysisResults || !activeRequestId) return [];
|
||
return newAnalysisResults.filter(r => r.request_id === activeRequestId);
|
||
}, [newAnalysisResults, activeRequestId]);
|
||
|
||
// 3. Sync analysisStates (Content) from current batch
|
||
// We only update if we have a result for that module in the current batch.
|
||
// If not, we leave it as is (or could clear it if we wanted strict mode).
|
||
// For now, we'll update based on what we find.
|
||
useEffect(() => {
|
||
if (!currentBatchResults) return;
|
||
|
||
setAnalysisStates(prev => {
|
||
const next = { ...prev };
|
||
let hasChanges = false;
|
||
|
||
currentBatchResults.forEach(result => {
|
||
const type = result.module_id;
|
||
const status = result.meta_data?.status || 'success';
|
||
const content = result.content;
|
||
|
||
const currentState = next[type];
|
||
|
||
// Only update if content changed or status changed
|
||
if (
|
||
!currentState ||
|
||
currentState.content !== content ||
|
||
(status === 'processing' && !currentState.loading) ||
|
||
(status === 'success' && currentState.loading) ||
|
||
(status === 'error' && !currentState.error)
|
||
) {
|
||
next[type] = {
|
||
content: content,
|
||
loading: status === 'processing',
|
||
error: status === 'error' ? result.meta_data?.error || 'Unknown error' : null,
|
||
};
|
||
hasChanges = true;
|
||
}
|
||
});
|
||
return hasChanges ? next : prev;
|
||
});
|
||
}, [currentBatchResults]);
|
||
|
||
// 4. Sync analysisRecords (Execution Details) from current batch
|
||
// This ensures Execution Details only shows the relevant modules for the current run.
|
||
useEffect(() => {
|
||
if (!currentBatchResults) return;
|
||
|
||
// If we are starting a new run (triggered), we might want to reset records initially?
|
||
// But currentBatchResults will eventually populate.
|
||
|
||
const records: AnalysisRecord[] = currentBatchResults.map(r => {
|
||
const statusStr = r.meta_data?.status;
|
||
let status: 'pending' | 'running' | 'done' | 'error' = 'done';
|
||
if (statusStr === 'processing') status = 'running';
|
||
else if (statusStr === 'error') status = 'error';
|
||
|
||
return {
|
||
type: r.module_id,
|
||
name: activeAnalysisConfig?.analysis_modules?.[r.module_id]?.name || r.module_id,
|
||
status: status,
|
||
duration_ms: r.meta_data?.elapsed_ms, // Backend needs to provide this in meta_data
|
||
error: r.meta_data?.error,
|
||
tokens: r.meta_data?.tokens // Backend needs to provide this
|
||
};
|
||
});
|
||
|
||
// Sort records to match the defined order in activeAnalysisConfig if possible
|
||
const definedOrder = Object.keys(activeAnalysisConfig?.analysis_modules || {});
|
||
records.sort((a, b) => {
|
||
const idxA = definedOrder.indexOf(a.type);
|
||
const idxB = definedOrder.indexOf(b.type);
|
||
if (idxA === -1) return 1;
|
||
if (idxB === -1) return -1;
|
||
return idxA - idxB;
|
||
});
|
||
|
||
setAnalysisRecords(records);
|
||
}, [currentBatchResults, activeAnalysisConfig]);
|
||
|
||
|
||
// 计算完成比例
|
||
const completionProgress = useMemo(() => {
|
||
const totalTasks = analysisRecords.length;
|
||
if (totalTasks === 0) return 0;
|
||
const completedTasks = analysisRecords.filter(r => r.status === 'done' || r.status === 'error').length;
|
||
return (completedTasks / totalTasks) * 100;
|
||
}, [analysisRecords]);
|
||
|
||
// 总耗时(ms)
|
||
const totalElapsedMs = useMemo(() => {
|
||
const finMs = financials?.meta?.elapsed_ms || 0;
|
||
const analysesMs = analysisRecords.reduce((sum, r) => sum + (r.duration_ms || 0), 0);
|
||
return finMs + analysesMs;
|
||
}, [financials?.meta?.elapsed_ms, analysisRecords]);
|
||
|
||
const hasRunningTask = useMemo(() => {
|
||
if (currentAnalysisTask !== null) return true;
|
||
// Also check analysisRecords derived from backend
|
||
if (analysisRecords.some(r => r.status === 'running')) return true;
|
||
return false;
|
||
}, [currentAnalysisTask, analysisRecords]);
|
||
|
||
// 全部任务是否完成
|
||
const allTasksCompleted = useMemo(() => {
|
||
if (analysisRecords.length === 0) return false;
|
||
const allDoneOrErrored = analysisRecords.every(r => r.status === 'done' || r.status === 'error');
|
||
return allDoneOrErrored && !hasRunningTask && currentAnalysisTask === null;
|
||
}, [analysisRecords, hasRunningTask, currentAnalysisTask]);
|
||
|
||
// 所有任务完成时,停止计时器
|
||
useEffect(() => {
|
||
if (allTasksCompleted) {
|
||
setStartTime(null);
|
||
}
|
||
}, [allTasksCompleted]);
|
||
|
||
useEffect(() => {
|
||
if (!startTime) return;
|
||
const interval = setInterval(() => {
|
||
const now = Date.now();
|
||
const elapsed = Math.floor((now - startTime) / 1000);
|
||
setElapsedSeconds(elapsed);
|
||
}, 1000);
|
||
return () => clearInterval(interval);
|
||
}, [startTime]);
|
||
|
||
const retryAnalysis = async (analysisType: string) => {
|
||
// Retry logic is complicated with the new backend-driven approach.
|
||
// Ideally, we should send a backend command to retry a specific module.
|
||
// For now, we can just re-trigger the whole template or alert the user.
|
||
// Or implementation TODO: Single module retry endpoint.
|
||
alert("单个模块重试功能在新架构中尚未就绪,请重新触发完整分析。");
|
||
};
|
||
|
||
const stopAll = () => {
|
||
// Clean up client-side state
|
||
stopRequestedRef.current = true;
|
||
isAnalysisRunningRef.current = false;
|
||
setStartTime(null);
|
||
// Ideally call backend to cancel job
|
||
};
|
||
|
||
const continuePending = () => {
|
||
// No-op in new architecture basically
|
||
};
|
||
|
||
const triggerAnalysis = async () => {
|
||
const reqId = await triggerAnalysisRequest(unifiedSymbol, normalizedMarket || '', selectedTemplateId);
|
||
if (reqId) {
|
||
setRequestId(reqId);
|
||
setStartTime(Date.now()); // Start timer
|
||
// Reset records to empty or wait for poll?
|
||
// Waiting for poll is safer to avoid flashing old data
|
||
setAnalysisRecords([]);
|
||
}
|
||
};
|
||
|
||
return {
|
||
activeAnalysisConfig, // Exported
|
||
analysisTypes,
|
||
analysisStates,
|
||
analysisRecords,
|
||
currentAnalysisTask,
|
||
triggerAnalysis,
|
||
triggering,
|
||
requestId,
|
||
setRequestId,
|
||
taskProgress,
|
||
startTime,
|
||
elapsedSeconds,
|
||
completionProgress,
|
||
totalElapsedMs,
|
||
stopAll,
|
||
continuePending,
|
||
retryAnalysis,
|
||
hasRunningTask,
|
||
isAnalysisRunning: hasRunningTask, // Simplified
|
||
selectedTemplateId, // Exported
|
||
setSelectedTemplateId, // Exported
|
||
};
|
||
}
|