diff --git a/backend/app/component/environment.py b/backend/app/component/environment.py index 3c43be8c..15ee6a5c 100644 --- a/backend/app/component/environment.py +++ b/backend/app/component/environment.py @@ -23,14 +23,21 @@ def set_user_env_path(env_path: str | None = None): Set user-specific environment path for current thread. If env_path is None, uses default global environment. """ + traceroot_logger.info("Setting user environment path", extra={"env_path": env_path, "exists": env_path and os.path.exists(env_path) if env_path else None}) + if env_path and os.path.exists(env_path): _thread_local.env_path = env_path # Load user-specific environment variables load_dotenv(dotenv_path=env_path, override=True) + traceroot_logger.info("User-specific environment loaded", extra={"env_path": env_path}) else: # Clear thread-local env_path to fall back to global if hasattr(_thread_local, 'env_path'): delattr(_thread_local, 'env_path') + traceroot_logger.info("Reset to default global environment") + + if env_path and not os.path.exists(env_path): + traceroot_logger.warning("User environment path does not exist, falling back to global", extra={"env_path": env_path}) def get_current_env_path() -> str: @@ -54,8 +61,8 @@ def env(key: str, default: Any) -> Any: ... def env(key: str, default=None): """ - Get environment variable. - First checks thread-local user-specific environment, + Get environment variable. + First checks thread-local user-specific environment, then falls back to global environment. """ # If we have a user-specific environment path, try to reload it to get latest values @@ -64,10 +71,14 @@ def env(key: str, default=None): from dotenv import dotenv_values user_env_values = dotenv_values(_thread_local.env_path) if key in user_env_values: - return user_env_values[key] or default - + value = user_env_values[key] or default + traceroot_logger.debug("Environment variable retrieved from user-specific config", extra={"key": key, "env_path": _thread_local.env_path, "has_value": value is not None}) + return value + # Fall back to global environment - return os.getenv(key, default) + value = os.getenv(key, default) + traceroot_logger.debug("Environment variable retrieved from global config", extra={"key": key, "has_value": value is not None, "using_default": value == default}) + return value def env_or_fail(key: str): diff --git a/backend/app/controller/health_controller.py b/backend/app/controller/health_controller.py index 296655bf..87ea5e2d 100644 --- a/backend/app/controller/health_controller.py +++ b/backend/app/controller/health_controller.py @@ -1,5 +1,8 @@ from fastapi import APIRouter from pydantic import BaseModel +from utils import traceroot_wrapper as traceroot + +logger = traceroot.get_logger("health_controller") router = APIRouter(tags=["Health"]) @@ -12,5 +15,8 @@ class HealthResponse(BaseModel): @router.get("/health", name="health check", response_model=HealthResponse) async def health_check(): """Health check endpoint for verifying backend is ready to accept requests.""" - return HealthResponse(status="ok", service="eigent") + logger.debug("Health check requested") + response = HealthResponse(status="ok", service="eigent") + logger.debug("Health check completed", extra={"status": response.status, "service": response.service}) + return response diff --git a/backend/app/service/chat_service.py b/backend/app/service/chat_service.py index 3a02b491..2c2b028e 100644 --- a/backend/app/service/chat_service.py +++ b/backend/app/service/chat_service.py @@ -362,6 +362,9 @@ async def step_solve(options: Chat, request: Request, task_lock: TaskLock): # Update the sync_step with new task_id if hasattr(item, 'new_task_id') and item.new_task_id: set_current_task_id(options.project_id, item.new_task_id) + # Reset summary generation flag for new tasks to ensure proper summaries + task_lock.summary_generated = False + logger.info("Reset summary_generated flag for new task", extra={"project_id": options.project_id, "new_task_id": item.new_task_id}) yield sse_json("confirmed", {"question": question}) @@ -385,32 +388,25 @@ async def step_solve(options: Chat, request: Request, task_lock: TaskLock): context_for_coordinator ) - if not task_lock.summary_generated: - summary_task_agent = task_summary_agent(options) - try: - summary_task_content = await asyncio.wait_for( - summary_task(summary_task_agent, camel_task), timeout=10 - ) - task_lock.summary_generated = True - logger.info("Generated summary for first task", extra={"project_id": options.project_id}) - except asyncio.TimeoutError: - logger.warning("summary_task timeout", extra={"project_id": options.project_id, "task_id": options.task_id}) - # Fallback to a minimal summary to unblock UI - fallback_name = "Task" - content_preview = camel_task.content if hasattr(camel_task, "content") else "" - if content_preview is None: - content_preview = "" - fallback_summary = ( - (content_preview[:80] + "...") if len(content_preview) > 80 else content_preview - ) - summary_task_content = f"{fallback_name}|{fallback_summary}" - task_lock.summary_generated = True - else: - if len(question) > 100: - summary_task_content = f"Task|{question[:97]}..." - else: - summary_task_content = f"Task|{question}" - logger.info("Skipped summary generation for subsequent task", extra={"project_id": options.project_id}) + summary_task_agent = task_summary_agent(options) + try: + summary_task_content = await asyncio.wait_for( + summary_task(summary_task_agent, camel_task), timeout=10 + ) + task_lock.summary_generated = True + logger.info("Generated summary for task", extra={"project_id": options.project_id}) + except asyncio.TimeoutError: + logger.warning("summary_task timeout", extra={"project_id": options.project_id, "task_id": options.task_id}) + # Fallback to a minimal summary to unblock UI + fallback_name = "Task" + content_preview = camel_task.content if hasattr(camel_task, "content") else "" + if content_preview is None: + content_preview = "" + fallback_summary = ( + (content_preview[:80] + "...") if len(content_preview) > 80 else content_preview + ) + summary_task_content = f"{fallback_name}|{fallback_summary}" + task_lock.summary_generated = True yield to_sub_tasks(camel_task, summary_task_content) # tracer.stop() @@ -514,8 +510,10 @@ async def step_solve(options: Chat, request: Request, task_lock: TaskLock): new_task_state = item.data.get('state', 'unknown') new_task_result = item.data.get('result', '') - - assert camel_task is not None + if camel_task is None: + logger.error(f"NEW_TASK_STATE action received but camel_task is None for project {options.project_id}, task {new_task_id}") + yield sse_json("error", {"message": "Cannot process new task state: current task not initialized."}) + continue old_task_content: str = camel_task.content old_task_result: str = await get_task_result_with_optional_summary(camel_task, options) @@ -588,11 +586,29 @@ async def step_solve(options: Chat, request: Request, task_lock: TaskLock): coordinator_context=context_for_multi_turn ) - task_content_for_summary = new_task_content - if len(task_content_for_summary) > 100: - new_summary_content = f"Follow-up Task|{task_content_for_summary[:97]}..." - else: - new_summary_content = f"Follow-up Task|{task_content_for_summary}" + # Generate proper LLM summary for multi-turn tasks instead of hardcoded fallback + try: + multi_turn_summary_agent = task_summary_agent(options) + new_summary_content = await asyncio.wait_for( + summary_task(multi_turn_summary_agent, camel_task), timeout=10 + ) + logger.info("Generated LLM summary for multi-turn task", extra={"project_id": options.project_id}) + except asyncio.TimeoutError: + logger.warning("Multi-turn summary_task timeout", extra={"project_id": options.project_id, "task_id": task_id}) + # Fallback to descriptive but not generic summary + task_content_for_summary = new_task_content + if len(task_content_for_summary) > 100: + new_summary_content = f"Follow-up Task|{task_content_for_summary[:97]}..." + else: + new_summary_content = f"Follow-up Task|{task_content_for_summary}" + except Exception as e: + logger.error(f"Error generating multi-turn task summary: {e}") + # Fallback to descriptive but not generic summary + task_content_for_summary = new_task_content + if len(task_content_for_summary) > 100: + new_summary_content = f"Follow-up Task|{task_content_for_summary[:97]}..." + else: + new_summary_content = f"Follow-up Task|{task_content_for_summary}" # Send the extracted events yield to_sub_tasks(camel_task, new_summary_content) @@ -666,16 +682,32 @@ async def step_solve(options: Chat, request: Request, task_lock: TaskLock): ) workforce.resume() elif item.action == Action.end: - assert camel_task is not None + logger.info(f"Processing END action for project {options.project_id}, task {options.task_id}, camel_task exists: {camel_task is not None}, current status: {task_lock.status}") + + # Prevent duplicate end processing + if task_lock.status == Status.done: + logger.warning(f"END action received but task already marked as done for project {options.project_id}, task {options.task_id}. Ignoring duplicate END action.") + continue + + if camel_task is None: + logger.warning(f"END action received but camel_task is None for project {options.project_id}, task {options.task_id}. This may indicate multiple END actions or improper task lifecycle management.") + # Use the item data as the final result if camel_task is None + final_result: str = str(item.data) if item.data else "Task completed" + else: + final_result: str = await get_task_result_with_optional_summary(camel_task, options) + task_lock.status = Status.done - final_result: str = await get_task_result_with_optional_summary(camel_task, options) task_lock.last_task_result = final_result - task_content: str = camel_task.content - if "=== CURRENT TASK ===" in task_content: - task_content = task_content.split("=== CURRENT TASK ===")[-1].strip() - + # Handle task content - use fallback if camel_task is None + if camel_task is not None: + task_content: str = camel_task.content + if "=== CURRENT TASK ===" in task_content: + task_content = task_content.split("=== CURRENT TASK ===")[-1].strip() + else: + task_content: str = f"Task {options.task_id}" + task_lock.add_conversation('task_result', { 'task_content': task_content, 'task_result': final_result, @@ -702,8 +734,9 @@ async def step_solve(options: Chat, request: Request, task_lock: TaskLock): # Check if this might be a misrouted second question if camel_task is None: logger.warning(f"SUPPLEMENT action received but camel_task is None for project {options.project_id}") + yield sse_json("error", {"message": "Cannot supplement task: task not initialized. Please start a task first."}) + continue else: - assert camel_task is not None task_lock.status = Status.processing camel_task.add_subtask( Task( diff --git a/backend/app/service/task.py b/backend/app/service/task.py index bc8d16d7..895c7d04 100644 --- a/backend/app/service/task.py +++ b/backend/app/service/task.py @@ -283,30 +283,39 @@ class TaskLock: self.question_agent = None self.current_task_id = None + logger.info("Task lock initialized", extra={"task_id": id, "created_at": self.created_at.isoformat()}) + async def put_queue(self, data: ActionData): self.last_accessed = datetime.now() + logger.debug("Adding item to task queue", extra={"task_id": self.id, "action": data.action}) await self.queue.put(data) async def get_queue(self): self.last_accessed = datetime.now() + logger.debug("Getting item from task queue", extra={"task_id": self.id}) return await self.queue.get() async def put_human_input(self, agent: str, data: Any = None): + logger.debug("Adding human input", extra={"task_id": self.id, "agent": agent, "has_data": data is not None}) await self.human_input[agent].put(data) async def get_human_input(self, agent: str): + logger.debug("Getting human input", extra={"task_id": self.id, "agent": agent}) return await self.human_input[agent].get() def add_human_input_listen(self, agent: str): + logger.debug("Adding human input listener", extra={"task_id": self.id, "agent": agent}) self.human_input[agent] = asyncio.Queue(1) def add_background_task(self, task: asyncio.Task) -> None: r"""Add a task to track and clean up weak references""" + logger.debug("Adding background task", extra={"task_id": self.id, "background_tasks_count": len(self.background_tasks)}) self.background_tasks.add(task) task.add_done_callback(lambda t: self.background_tasks.discard(t)) async def cleanup(self): r"""Cancel all background tasks and clean up resources""" + logger.info("Starting task lock cleanup", extra={"task_id": self.id, "background_tasks_count": len(self.background_tasks)}) for task in list(self.background_tasks): if not task.done(): task.cancel() @@ -315,9 +324,11 @@ class TaskLock: except asyncio.CancelledError: pass self.background_tasks.clear() + logger.info("Task lock cleanup completed", extra={"task_id": self.id}) def add_conversation(self, role: str, content: str | dict): """Add a conversation entry to history""" + logger.debug("Adding conversation entry", extra={"task_id": self.id, "role": role, "content_length": len(str(content))}) self.conversation_history.append({ 'role': role, 'content': content, @@ -344,7 +355,9 @@ task_index: dict[str, weakref.ref[Task]] = {} def get_task_lock(id: str) -> TaskLock: if id not in task_locks: + logger.error("Task lock not found", extra={"task_id": id}) raise ProgramException("Task not found") + logger.debug("Task lock retrieved", extra={"task_id": id}) return task_locks[id] @@ -357,12 +370,15 @@ def set_current_task_id(project_id: str, task_id: str) -> None: """Set the current task ID for a project's task lock""" task_lock = get_task_lock(project_id) task_lock.current_task_id = task_id - logger.info(f"Updated current_task_id to {task_id} for project {project_id}") + logger.info("Updated current task ID", extra={"project_id": project_id, "task_id": task_id}) def create_task_lock(id: str) -> TaskLock: if id in task_locks: + logger.warning("Attempting to create task lock that already exists", extra={"task_id": id}) raise ProgramException("Task already exists") + + logger.info("Creating new task lock", extra={"task_id": id}) task_locks[id] = TaskLock(id=id, queue=asyncio.Queue(), human_input={}) # Start cleanup task if not running @@ -370,26 +386,31 @@ def create_task_lock(id: str) -> TaskLock: # if _cleanup_task is None or _cleanup_task.done(): # _cleanup_task = asyncio.create_task(_periodic_cleanup()) + logger.info("Task lock created successfully", extra={"task_id": id, "total_task_locks": len(task_locks)}) return task_locks[id] def get_or_create_task_lock(id: str) -> TaskLock: """Get existing task lock or create a new one if it doesn't exist""" if id in task_locks: + logger.debug("Using existing task lock", extra={"task_id": id}) return task_locks[id] + logger.info("Task lock not found, creating new one", extra={"task_id": id}) return create_task_lock(id) async def delete_task_lock(id: str): if id not in task_locks: + logger.warning("Attempting to delete non-existent task lock", extra={"task_id": id}) raise ProgramException("Task not found") # Clean up background tasks before deletion task_lock = task_locks[id] + logger.info("Cleaning up task lock", extra={"task_id": id, "background_tasks": len(task_lock.background_tasks)}) await task_lock.cleanup() del task_locks[id] - logger.debug(f"Deleted task lock {id}, remaining locks: {len(task_locks)}") + logger.info("Task lock deleted successfully", extra={"task_id": id, "remaining_task_locks": len(task_locks)}) def get_camel_task(id: str, tasks: list[Task]) -> None | Task: diff --git a/backend/app/utils/single_agent_worker.py b/backend/app/utils/single_agent_worker.py index 25f81ae6..a6e82871 100644 --- a/backend/app/utils/single_agent_worker.py +++ b/backend/app/utils/single_agent_worker.py @@ -26,6 +26,13 @@ class SingleAgentWorker(BaseSingleAgentWorker): context_utility: ContextUtility | None = None, enable_workflow_memory: bool = False, ) -> None: + logger.info("Initializing SingleAgentWorker", extra={ + "description": description, + "worker_agent_name": worker.agent_name, + "use_agent_pool": use_agent_pool, + "pool_max_size": pool_max_size, + "enable_workflow_memory": enable_workflow_memory + }) super().__init__( description=description, worker=worker, @@ -61,6 +68,12 @@ class SingleAgentWorker(BaseSingleAgentWorker): worker_agent = await self._get_worker_agent() worker_agent.process_task_id = task.id # type: ignore rewrite line + logger.info("Starting task processing", extra={ + "task_id": task.id, + "worker_agent_id": worker_agent.agent_id, + "dependencies_count": len(dependencies) + }) + response_content = "" final_response = None try: diff --git a/backend/app/utils/toolkit/terminal_toolkit.py b/backend/app/utils/toolkit/terminal_toolkit.py index 0868724f..4e6472c5 100644 --- a/backend/app/utils/toolkit/terminal_toolkit.py +++ b/backend/app/utils/toolkit/terminal_toolkit.py @@ -11,6 +11,9 @@ from app.service.task import Action, ActionTerminalData, Agents, get_task_lock from app.utils.listen.toolkit_listen import auto_listen_toolkit from app.utils.toolkit.abstract_toolkit import AbstractToolkit from app.service.task import process_task +from utils import traceroot_wrapper as traceroot + +logger = traceroot.get_logger("terminal_toolkit") @auto_listen_toolkit(BaseTerminalToolkit) @@ -37,11 +40,22 @@ class TerminalToolkit(BaseTerminalToolkit, AbstractToolkit): self.agent_name = agent_name if working_directory is None: working_directory = env("file_save_path", os.path.expanduser("~/.eigent/terminal/")) + + logger.info("Initializing TerminalToolkit", extra={ + "api_task_id": api_task_id, + "agent_name": self.agent_name, + "working_directory": working_directory, + "safe_mode": safe_mode, + "use_docker_backend": use_docker_backend + }) + if TerminalToolkit._thread_pool is None: TerminalToolkit._thread_pool = ThreadPoolExecutor( max_workers=1, thread_name_prefix="terminal_toolkit" ) + logger.debug("Created terminal toolkit thread pool") + super().__init__( timeout=timeout, working_directory=working_directory, @@ -62,6 +76,11 @@ class TerminalToolkit(BaseTerminalToolkit, AbstractToolkit): """ # Convert ANSI escape sequences to plain text super()._write_to_log(log_file, content) + logger.debug("Terminal output logged", extra={ + "api_task_id": self.api_task_id, + "log_file": log_file, + "content_length": len(content) + }) self._update_terminal_output(_to_plain(content)) def _update_terminal_output(self, output: str): diff --git a/backend/app/utils/workforce.py b/backend/app/utils/workforce.py index 44bd9b78..5eb77bca 100644 --- a/backend/app/utils/workforce.py +++ b/backend/app/utils/workforce.py @@ -19,7 +19,6 @@ from app.service.task import ( Action, ActionAssignTaskData, ActionEndData, - ActionNewTaskStateData, ActionTaskStateData, get_camel_task, get_task_lock, @@ -45,6 +44,14 @@ class Workforce(BaseWorkforce): use_structured_output_handler: bool = True, ) -> None: self.api_task_id = api_task_id + logger.info("Initializing workforce", extra={ + "api_task_id": api_task_id, + "description": description[:100] + "..." if len(description) > 100 else description, + "children_count": len(children) if children else 0, + "graceful_shutdown_timeout": graceful_shutdown_timeout, + "share_memory": share_memory, + "use_structured_output_handler": use_structured_output_handler + }) super().__init__( description=description, children=children, @@ -65,13 +72,20 @@ class Workforce(BaseWorkforce): coordinator_context: Optional context ONLY for coordinator agent during decomposition. This context will NOT be passed to subtasks or worker agents. """ + logger.info("Starting task decomposition", extra={ + "api_task_id": self.api_task_id, + "task_id": task.id, + "task_content": task.content[:200] + "..." if len(task.content) > 200 else task.content, + "has_coordinator_context": bool(coordinator_context) + }) if not validate_task_content(task.content, task.id): task.state = TaskState.FAILED task.result = "Task failed: Invalid or empty content provided" - logger.warning( - f"Task {task.id} rejected: Invalid or empty content. Content preview: '{task.content[:50]}...'" - ) + logger.warning("Task rejected: Invalid or empty content", extra={ + "task_id": task.id, + "content_preview": task.content[:50] + "..." if len(task.content) > 50 else task.content + }) raise UserException(code.error, task.result) self.reset() @@ -81,11 +95,19 @@ class Workforce(BaseWorkforce): task.state = TaskState.OPEN subtasks = asyncio.run(self.handle_decompose_append_task(task)) + logger.info("Task decomposition completed", extra={ + "api_task_id": self.api_task_id, + "task_id": task.id, + "subtasks_count": len(subtasks) + }) return subtasks async def eigent_start(self, subtasks: list[Task]): """start the workforce""" - logger.debug(f"start the workforce {subtasks=}") + logger.info("Starting workforce execution", extra={ + "api_task_id": self.api_task_id, + "subtasks_count": len(subtasks) + }) self._pending_tasks.extendleft(reversed(subtasks)) # Save initial snapshot self.save_snapshot("Initial task decomposition") @@ -93,7 +115,10 @@ class Workforce(BaseWorkforce): try: await self.start() except Exception as e: - logger.error(f"Error in workforce execution: {e}") + logger.error("Error in workforce execution", extra={ + "api_task_id": self.api_task_id, + "error": str(e) + }, exc_info=True) self._state = WorkforceState.STOPPED raise finally: @@ -294,18 +319,11 @@ class Workforce(BaseWorkforce): "failure_count": task.failure_count, } - if self._task_is_new(task_data): - await task_lock.put_queue( - ActionNewTaskStateData( - data=task_data - ) - ) - else: - await task_lock.put_queue( - ActionTaskStateData( - data=task_data - ) + await task_lock.put_queue( + ActionTaskStateData( + data=task_data ) + ) return await super()._handle_completed_task(task) @@ -339,36 +357,6 @@ class Workforce(BaseWorkforce): return result - def _task_is_new(self, item:dict) -> bool: - # Validate the task state data object first - assert isinstance(item, dict) - task_id = item.get("task_id", "") - state = item.get("state", "") - result = item.get("result", "") - failure_count = item.get("failure_count", 0) - - # Validate required fields - if not task_id: - logger.error("Missing task_id in task_state data") - return False - elif not state: - logger.error(f"Missing state in task_state data for task {task_id}") - return False - - # Ensure failure_count is an integer - try: - failure_count = int(failure_count) - except (ValueError, TypeError): - logger.error(f"Invalid failure_count in task_state data for task {task_id}: {failure_count}") - failure_count = 0 # Default to 0 if invalid - - should_send_new_task_state = ( - state == "FAILED" or - (failure_count == 0 and result.strip() == "") - ) - - return should_send_new_task_state - def stop(self) -> None: super().stop() task_lock = get_task_lock(self.api_task_id) diff --git a/config/notarize.cjs b/config/notarize.cjs index e49639db..673976b7 100644 --- a/config/notarize.cjs +++ b/config/notarize.cjs @@ -11,9 +11,9 @@ exports.default = async function notarizing(context) { // Validate required environment variables if (!process.env.APPLE_ID || !process.env.APPLE_APP_SPECIFIC_PASSWORD || !process.env.APPLE_TEAM_ID) { - console.error("Missing required environment variables for notarization"); - console.error("Required: APPLE_ID, APPLE_APP_SPECIFIC_PASSWORD, APPLE_TEAM_ID"); - throw new Error("Notarization failed: Missing required environment variables"); + console.warn("Missing Apple environment variables for notarization"); + console.warn("Skipping notarization. Required: APPLE_ID, APPLE_APP_SPECIFIC_PASSWORD, APPLE_TEAM_ID"); + return; } return notarize({ diff --git a/resources/scripts/download.js b/resources/scripts/download.js index 124e7cc8..aeb6993a 100644 --- a/resources/scripts/download.js +++ b/resources/scripts/download.js @@ -76,6 +76,7 @@ export async function downloadWithRedirects(url, destinationPath) { // Check if file exists and has size > 0 try { + if (fs.existsSync(destinationPath)) { const stats = fs.statSync(destinationPath) if (stats.size === 0) { @@ -89,6 +90,7 @@ export async function downloadWithRedirects(url, destinationPath) { } } catch (err) { safeReject(new Error(`Failed to verify download: ${err.message}`)) + } }) }) diff --git a/server/app/component/database.py b/server/app/component/database.py index 1a9cfb9c..96bdeec6 100644 --- a/server/app/component/database.py +++ b/server/app/component/database.py @@ -1,6 +1,14 @@ from sqlmodel import Session, create_engine from app.component.environment import env, env_or_fail +from utils import traceroot_wrapper as traceroot +logger = traceroot.get_logger("database") + +logger.info("Initializing database engine", extra={ + "database_url_prefix": env_or_fail("database_url")[:20] + "...", + "debug_mode": env("debug") == "on", + "pool_size": 36 +}) engine = create_engine( env_or_fail("database_url"), @@ -8,11 +16,19 @@ engine = create_engine( pool_size=36, ) +logger.info("Database engine initialized successfully") + def session_make(): - return Session(engine) + logger.debug("Creating new database session") + session = Session(engine) + logger.debug("Database session created successfully") + return session def session(): + logger.debug("Creating database session context") with Session(engine) as session: + logger.debug("Database session context established") yield session + logger.debug("Database session context closed") diff --git a/server/app/component/environment.py b/server/app/component/environment.py index 626a79f8..d78f868a 100644 --- a/server/app/component/environment.py +++ b/server/app/component/environment.py @@ -5,9 +5,13 @@ from fastapi import APIRouter, FastAPI from dotenv import load_dotenv import importlib from typing import Any, overload +from utils import traceroot_wrapper as traceroot +logger = traceroot.get_logger("environment") +logger.info("Loading environment variables from .env file") load_dotenv() +logger.info("Environment variables loaded successfully") @overload @@ -23,20 +27,26 @@ def env(key: str, default: Any) -> Any: ... def env(key: str, default=None): - return os.getenv(key, default) + value = os.getenv(key, default) + logger.debug("Environment variable accessed", extra={"key": key, "has_value": value is not None, "using_default": value == default}) + return value def env_or_fail(key: str): value = env(key) if value is None: + logger.error("Required environment variable missing", extra={"key": key}) raise Exception("can't get env config value.") + logger.debug("Required environment variable retrieved", extra={"key": key}) return value def env_not_empty(key: str): value = env(key) if not value: + logger.error("Environment variable is empty", extra={"key": key}) raise Exception("env config value can't be empty.") + logger.debug("Non-empty environment variable retrieved", extra={"key": key}) return value @@ -71,8 +81,14 @@ def auto_include_routers(api: FastAPI, prefix: str, directory: str): :param prefix: 路由前缀 :param directory: 要扫描的目录路径 """ + logger.info("Starting automatic router registration", extra={ + "prefix": prefix, + "directory": directory + }) + # 将目录转换为绝对路径 dir_path = Path(directory).resolve() + router_count = 0 # 遍历目录下所有.py文件 for root, _, files in os.walk(dir_path): @@ -81,17 +97,44 @@ def auto_include_routers(api: FastAPI, prefix: str, directory: str): # 构造完整文件路径 file_path = Path(root) / file_name + logger.debug("Processing controller file", extra={ + "file_name": file_name, + "file_path": str(file_path) + }) + # 生成模块名称 module_name = file_path.stem - # 使用importlib加载模块 - spec = importlib.util.spec_from_file_location(module_name, file_path) - if spec is None or spec.loader is None: - continue - module = importlib.util.module_from_spec(spec) - spec.loader.exec_module(module) + try: + # 使用importlib加载模块 + spec = importlib.util.spec_from_file_location(module_name, file_path) + if spec is None or spec.loader is None: + logger.warning("Failed to create module spec", extra={"file_path": str(file_path)}) + continue + module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(module) - # 检查模块中是否存在router属性且是APIRouter实例 - router = getattr(module, "router", None) - if isinstance(router, APIRouter): - api.include_router(router, prefix=prefix) + # 检查模块中是否存在router属性且是APIRouter实例 + router = getattr(module, "router", None) + if isinstance(router, APIRouter): + api.include_router(router, prefix=prefix) + router_count += 1 + logger.debug("Router registered successfully", extra={ + "module_name": module_name, + "prefix": prefix + }) + else: + logger.debug("No valid router found in module", extra={"module_name": module_name}) + + except Exception as e: + logger.error("Failed to load controller module", extra={ + "module_name": module_name, + "file_path": str(file_path), + "error": str(e) + }, exc_info=True) + + logger.info("Automatic router registration completed", extra={ + "prefix": prefix, + "directory": directory, + "routers_registered": router_count + }) diff --git a/server/app/model/abstract/model.py b/server/app/model/abstract/model.py index 1e63fa7e..6211598d 100644 --- a/server/app/model/abstract/model.py +++ b/server/app/model/abstract/model.py @@ -10,6 +10,9 @@ from fastapi_babel import _ from app.exception.exception import UserException from app.component.database import engine from convert_case import snake_case +from utils import traceroot_wrapper as traceroot + +logger = traceroot.get_logger("abstract_model") class AbstractModel(SQLModel): @@ -27,6 +30,13 @@ class AbstractModel(SQLModel): options: ExecutableOption | list[ExecutableOption] | None = None, s: Session, ): + logger.debug("Executing query by conditions", extra={ + "model_class": cls.__name__, + "has_order_by": order_by is not None, + "limit": limit, + "offset": offset, + "has_options": options is not None + }) stmt = select(cls).where(*whereclause) if order_by is not None: stmt = stmt.order_by(order_by) @@ -44,8 +54,15 @@ class AbstractModel(SQLModel): *whereclause: ColumnExpressionArgument[bool] | bool, s: Session, ) -> bool: + logger.debug("Checking if record exists", extra={"model_class": cls.__name__}) res = s.exec(select(func.count("*")).where(*whereclause)).first() - return res is not None and res > 0 + result = res is not None and res > 0 + logger.debug("Record existence check result", extra={ + "model_class": cls.__name__, + "exists": result, + "count": res + }) + return result @classmethod def count( @@ -71,11 +88,24 @@ class AbstractModel(SQLModel): *whereclause: ColumnExpressionArgument[bool], s: Session, ): + logger.info("Deleting records by conditions", extra={"model_class": cls.__name__}) stmt = delete(cls).where(*whereclause) - s.connection().execute(stmt) + result = s.connection().execute(stmt) s.commit() + logger.info("Records deleted", extra={ + "model_class": cls.__name__, + "rows_affected": result.rowcount + }) def save(self, s: Session | None = None): + model_id = getattr(self, 'id', None) + is_new = model_id is None + logger.info("Saving model", extra={ + "model_class": self.__class__.__name__, + "model_id": model_id, + "is_new_record": is_new + }) + if s is None: with Session(engine, expire_on_commit=False) as s: s.add(self) @@ -84,7 +114,22 @@ class AbstractModel(SQLModel): s.add(self) s.commit() + logger.info("Model saved successfully", extra={ + "model_class": self.__class__.__name__, + "model_id": getattr(self, 'id', None), + "was_new_record": is_new + }) + def delete(self, s: Session): + model_id = getattr(self, 'id', None) + is_soft_delete = isinstance(self, DefaultTimes) + + logger.info("Deleting model", extra={ + "model_class": self.__class__.__name__, + "model_id": model_id, + "is_soft_delete": is_soft_delete + }) + if isinstance(self, DefaultTimes): self.deleted_at = datetime.now() self.save(s) @@ -92,6 +137,12 @@ class AbstractModel(SQLModel): s.delete(self) s.commit() + logger.info("Model deleted successfully", extra={ + "model_class": self.__class__.__name__, + "model_id": model_id, + "was_soft_delete": is_soft_delete + }) + def update_fields(self, update_dict: dict): for k, v in update_dict.items(): setattr(self, k, v) diff --git a/src/components/ChatBox/FloatingAction.tsx b/src/components/ChatBox/FloatingAction.tsx index 4a9ecb1b..160df39d 100644 --- a/src/components/ChatBox/FloatingAction.tsx +++ b/src/components/ChatBox/FloatingAction.tsx @@ -6,9 +6,9 @@ export interface FloatingActionProps { /** Current task status */ status: "running" | "pause" | "pending" | "finished"; /** Callback when pause button is clicked */ - onPause?: () => void; + // onPause?: () => void; // Commented out - temporary not needed /** Callback when resume button is clicked */ - onResume?: () => void; + // onResume?: () => void; // Commented out - temporary not needed /** Callback when skip to next is clicked */ onSkip?: () => void; /** Loading state for pause/resume actions */ @@ -19,14 +19,14 @@ export interface FloatingActionProps { export const FloatingAction = ({ status, - onPause, - onResume, + // onPause, // Commented out - temporary not needed + // onResume, // Commented out - temporary not needed onSkip, loading = false, className, }: FloatingActionProps) => { - // Only show when task is running or paused - if (status !== "running" && status !== "pause") { + // Only show when task is running (removed pause state) + if (status !== "running") { return null; } @@ -38,6 +38,18 @@ export const FloatingAction = ({ )} >