From 3ed134d7d5bb4d04b6117a087e56050ef8446919 Mon Sep 17 00:00:00 2001 From: Louis Knight-Webb Date: Fri, 8 Aug 2025 13:53:27 +0100 Subject: [PATCH] Deployments (#414) * init deployment * refactor state * pre executor app state refactor * deployment in app state * clone * fix executors * fix dependencies * command runner via app_state * clippy * remove dependency on ENVIRONMENT from command_runner * remove dependency on ENVIRONMENT from command_runner * build fix * clippy * fmt * featues * vscode lints for cloud * change streaming to SSE (#338) Remove debug logging Cleanup streaming logic feat: add helper function for creating SSE stream responses for stdout/stderr * update vscode guidance * move start * Fix executors * Move command executor to separate file * Fix imports for executors * Partial fix test_remote * Fix * fmt * Clippy * Add back GitHub cloud only routes * cleanup and shared types * Prepare for separate cloud crate * Init backend-common workspace * Update * WIP * WIP * WIP * WIP * WIP * WIP * Projects (and sqlx) * Tasks * WIP * Amp * Backend executor structs * Task attempts outline * Move to crates folder * Cleanup frontend dist * Split out executors into separate crate * Config and sentry * Create deployment method helper * Router * Config endpoints * Projects, analytics * Update analytics paths when keys not provided * Tasks, task context * Middleware, outline task attempts * Delete backend common * WIP container * WIP container * Migrate worktree_path to container_ref (generic) * WIP container service create * Launch container * Fix create task * Create worktree * Move logic into container * Execution outline * Executor selection * Use enum_dispatch to route spawn tree * Update route errors * Implement child calling * Move running executions to container * Add streaming with history * Drop cloud WIP * Logs * Logs * Refactor container logic to execution tracker * Chunk based streaming and cleanup * Alex/mirgate task templates (#350) * Re-enable task templates; migrate routes; migrate args and return types * Refactor task template routes; consolidate list functions into get_templates with query support * Fix get_templates function * Implement amp executor * Gemini WIP * Make streaming the event store reusable * Rewrite mutex to rwlock * Staging for normalised logs impl * Store custom LogMsg instead of event as more flexible * Cleanup * WIP newline stream for amp (tested and working, needs store impl) * refactor: move stranded `git2` logic out of `models` (#352) * remove legacy command_executor; move git2 logic into GitService * remove legacy cloud runner * put back config get route * remove dead logic * WIP amp normalisation * Normalized logs now save to save msg store as raw * Refactor auth endpoints (#355) * Re-enable auth;Change auth to use deployment Add auth service Move auth logic to service Add auth router and service integration to deployment Refactor auth service and routes to use octocrab Refactor auth error handling and improve token validation responses * rename auth_router to router for consistency * refactor: rename auth_service to auth for consistency (#356) * Refactor filesystem endpoints (#357) * feat: implement filesystem service with directory listing and git repo detection * refactor: update filesystem routes; sort repos by last modfied * Gemini executor logs normalization * feat: add sound file serving endpoint and implement sound file loading (#358) * Gemini executor followup (#360) * Sync logs to db (#359) * Exit monitor * Outline stream logs to DB * Outline read from the message store * Add execution_process_logs, store logs in DB * Stream logs from DB * Normalized logs from DB * Remove eronious .sqlx cache * Remove execution process stdout and stderr * Update execution process record on completion * Emit session event for amp * Update session ID when event is emitted * Split local/common spawn fn * Create initial executor session * Move normalized logs into executors * Store executor action * Refactor updated_at to use micro seconds * Follow up executions (#363) * Follow up request handler scaffold Rename coding agent initial / follow up actions * Follow ups * Response for follow up * Simplify execution actions for coding agents * fix executor selection (#362) * refactor: move logic out of `TaskAttempt` (#361) * re-enable /diff /pr /rebase /merge /branch-status /open-editor /delete-file endpoints * address review comments * remove relic * Claude Code (#365) * Use ApiError rather than DeploymentError type in routes (#366) * Fix fe routes (#367) * /api/filesystem/list -> /api/filesystem/directory * /api/projects/:project_id/tasks -> /api/tasks * Remove with-branch * /api/projects/:project_id/tasks/:task_id -> /api/tasks/:task_id * Post tasks * Update template routes * Update BE for github poll endpoint, FE still needs updating * WIP freeze old types * File picker fix * Project types * Solve tsc warna * Remove constants and FE cloud mode * Setup for /api/info refactor * WIP config refactor * Remove custom mapping to coding agents * Update settings to fix code editor * Config fix (will need further changes once attempts types migrated) * Tmp fix types * Config auto deserialisation * Alex/refactor background processes (#369) * feat: add cleanup for orphaned executions at startup * Fix worktree cleanup; re add worktree cleanup queries * refactor worktree cleanup for orphaned and externally deleted worktrees * Fix compile error * refactor: container creation lifecycle (#368) * Consolidate worktree logic in the WorktreeManager * move auxiliary logic into worktree manager * fix compile error * Rename core crate to server * Fix npm run dev * Fix fe routes 2 (#371) * Migrate config paths * Update sounds, refactor lib.rs * Project FE types * Branch * Cleanup sound constants * Template types * Cleanup file search and other unused types * Handle errors * wip: basic mcp config editing (#351) * Re-add notification service, move assets to common dir (#373) add config to containter, add notifications into exit monitor Refctor notification service Refactor notifications * Stderr support (#372) Refactor plain-text log processing and resuse it for gemini, stderr, and potentially other executors. * Fix fe routes 3 (#378) * Task attempts * Task types * Get single task attempt endpoint * Task attempt response * Branch status * More task attempt endpoints * Task attempt children * Events WIP * Stream events when task, task attempt and execution process change status * Fixes * Cleanup logs * Alex/refactor pr monitor (#377) * Refactor task status updates and add PR monitoring functionality * Add PR monitoring service and integrate it into deployment flow Refactor GitHub token retrieval in PR creation and monitoring services Fix github pr regex * Fix types * refactor: dev server logic (#374) * reimplement start dev server logic * robust process group killing * Fix fe routes 4 (#383) * Add endpoint to get execution processes * Update types for execution process * Further execution process type cleanup * Wipe existing logs display * Further process related cleanup * Update get task attempt endpoint * Frozen type removal * Diff types * Display raw logs WIP * fix: extract session id once per execution (#386) * Fix fe routes 5 (#387) * Display normalized logs * Add execution-process info endpoint * WIP load into virtualized * Simplified unified logs * Raw logs also use json patch now (simplifies FE keys) * WIP * Fix FE rendering * Remove timestamps * Fix conversation height * Cleanup entry display * Spacing * Mark the boundaries between different execution processes in the logs * Deduplicate entries * Fix replace * Fmt * put back stop execution process endpoint (#384) * Fix fe routes 6 (#391) * WIP cleanup to remove related tasks and plans * Refactor active tab * Remove existing diff FE logic * Rename tab * WIP stream file events * WIP track FS events * Respect gitignore * Debounced event * Deduplicate events * Refactor git diff * WIP stream diffs * Resolve issue with unstaged changes * Diff filter by files * Stream ongoing changes * Remove entries when reset and json patch safe entry ids * Update the diff tab * Cleanup logs * Cleanup * Error enum * Update create PR attempt URL * Follow up and open in IDE * Fix merge * refactor: introduce `AgentProfiles` (#388) * automatically schedule coding agent execution after setup script * profiles implementation * add next_action field to ExecutorAction type * make start_next_action generic to action type Remove ProfilesManager and DefaultCommandBuilder structs * store executor_action_type in the DB * update shared types * rename structs * fix compile error * Refactor remaining task routes (#389) * Implement deletion functionality for execution processes and task attempts, including recursive deletion of associated logs. refactor: deletion process for task attempts and associated entities feat: Refactor task and task attempt models to remove executor field - Removed the `executor` field from the `task_attempt` model and related queries. - Updated the `CreateTaskAndStart` struct to encapsulate task and attempt creation. - Modified the task creation and starting logic to accommodate the new structure. - Adjusted SQL queries and migration scripts to reflect the removal of the executor. - Enhanced notification service to handle executor types dynamically. - Updated TypeScript types to align with the changes in the Rust models. refactor: remove CreateTaskAndStart type and update related code Add TaskAttemptWithLatestProfile and alias in frontend Fix silent failure of sqlx builder Remove db migration Fix rebase errors * Remove unneeded delete logic; move common container logic to service * Profiles fe (#398) * Get things compiling * Refactor the config * WIP fix task attempt creation * Further config fixes * Sounds and executors in settings * Fix sounds * Display profile config * Onboarding * Remove hardcoded agents * Move follow up attempt params to shared * Remove further shared types * Remove comment (#400) * Codex (#380) * only trigger error message when RunReason is SetupScript (#396) * Opencode (#385) * Restore Gemini followups (#392) * fix task killing (#395) * commit changes after successful execution (#403) * Claude-code-router (#410) * Amp tool use (#407) * Config upgrades (#405) * Versioned config * Upgrade fixes * Save config after migration * Scoping * Update Executor types * Theme types fix * Cleanup * Change theme selector to an enum * Rename config schema version field * Diff improve (#412) * Ensure container exists * Safe handling when ExecutorAction isn't valid JSON in DB * Reset data when endpoint changes * refactor: conditional notification (#408) * conditional notification * fix next action run_reason * remove redundant log * Fix GitHub auth frontend (#404) * fix frontend github auth * Add GitHub error handling and update dependencies - Introduced GitHubMagicErrorStrings enum for consistent error messaging related to GitHub authentication and permissions. - Updated the GitHubService to include a check_token method for validating tokens. - Refactored auth and task_attempts routes to utilize the new error handling. - Added strum_macros dependency in Cargo.toml for enum display. * Refactor GitHub error handling and API response structure to use CreateGitHubPRErrorData * Refactor API response handling in CreatePRDialog and update attemptsApi to return structured results * Refactor tasksApi.createAndStart to remove projectId parameter from API call * use SCREAMING_SNAKE_CASE for consistency * Refactor GitHub error handling to replace CreateGitHubPRErrorData with GitHubServiceError across the codebase * Update crates/utils/src/response.rs Co-authored-by: Gabriel Gordon-Hall * Fix compile error * Fix types --------- Co-authored-by: Gabriel Gordon-Hall * Fix: (#415) - Config location - Serve FE from BE in prod - Create config when doesn't exist - Tmp disable building the MCP * Fix dev server route (#417) * remove legacy logic and unused crates (#418) * update CLAUDE.md for new project structure (#420) * fix mcp settings page (#419) * Fix cards not updating (vibe-kanban) (#416) * Commit changes from coding agent for task attempt 774a2cae-a763-4117-af0e-1287a043c462 * Commit changes from coding agent for task attempt 774a2cae-a763-4117-af0e-1287a043c462 * Commit changes from coding agent for task attempt 774a2cae-a763-4117-af0e-1287a043c462 * feat: update task status management in container service * refactor: simplify notification logic and finalize context checks in LocalContainerService * Task attempt fe fixes (#422) * Style tweaks * Refactor * Fix auto scroll * Implement stop endpoint for all execution processed in a task attempt * Weird race condition with amp * Remove log * Fix follow ups * Re-add stop task attempt endpoint (#421) * Re-add stop task attempt endpoint; remove legacy comments for implemented functionality * Fix kill race condition; fix state change when dev server * Ci fixes (#425) * Eslint fix * Remove #[ts(export)] * Fix tests * Clippy * Prettier * Fmt * Version downgrade * Fix API response * Don't treat clippy warnings as errors * Change crate name * Update cargo location * Update further refs * Reset versions * Bump versions * Update binary names * Branch fix * Prettier * Ensure finished event sends data (#434) * use option_env! when reading analytics vars (#435) * remove dead logic (#436) * update crate version across workspace (#437) * add all crates across the workspace * chore: bump version to 0.0.56 --------- Co-authored-by: Alex Netsch Co-authored-by: Gabriel Gordon-Hall Co-authored-by: Solomon Co-authored-by: Gabriel Gordon-Hall Co-authored-by: GitHub Action --- .github/workflows/pre-release.yml | 13 +- .github/workflows/test.yml | 2 +- .gitignore | 7 +- CLAUDE.md | 192 +-- Cargo.toml | 8 +- .../scripts/toast-notification.ps1 | 2 +- .../sounds/abstract-sound1.wav | Bin .../sounds/abstract-sound2.wav | Bin .../sounds/abstract-sound3.wav | Bin .../sounds/abstract-sound4.wav | Bin {backend => assets}/sounds/cow-mooing.wav | Bin .../sounds/phone-vibration.wav | Bin {backend => assets}/sounds/rooster.wav | Bin ...e5220590da6c31c77f161074fc62752d63881.json | 12 - ...a0b40ec23f3738c2e7399f067b558cf8d490e.json | 12 - ...f61abacfe62e758abe7030a6aa745140b95ca.json | 104 -- ...5ebe1d2e30cd745e59e189d56487b5639dfbb.json | 12 - ...774ecb1e10a899ef95da74066eccedca4d8b2.json | 12 - ...40407abce436cb81292d42b2dbe1e5c18eea1.json | 104 -- ...83fec1a856c6a710aff831abd2382ede76b43.json | 12 - ...99a14093f7ce2585bf9843585608f17ec575b.json | 104 -- ...8f7ef23177431eaed82dc08c94c3e5944340e.json | 12 - ...ac44252468c5226b2cdd7770f027332eed6d7.json | 104 -- ...dc9d5819225c6f8d9e077070c6e518a17f50b.json | 12 - ...1a219f3199890fa640afc946ef1a792d6d8de.json | 12 - ...6ff22bfb790f29466450735e0b8bb1bc4ec94.json | 12 - backend/src/app_state.rs | 240 --- backend/src/bin/cloud_runner.rs | 401 ----- backend/src/bin/generate_types.rs | 204 --- backend/src/bin/test_remote.rs | 659 -------- backend/src/command_runner.rs | 291 ---- backend/src/command_runner/local.rs | 703 --------- backend/src/command_runner/remote.rs | 402 ----- backend/src/execution_monitor.rs | 1193 -------------- backend/src/executor.rs | 1081 ------------- backend/src/executors/aider.rs | 935 ----------- backend/src/executors/aider/filter.rs | 269 ---- backend/src/executors/amp.rs | 658 -------- backend/src/executors/ccr.rs | 91 -- backend/src/executors/charm_opencode.rs | 99 -- backend/src/executors/claude.rs | 823 ---------- backend/src/executors/cleanup_script.rs | 121 -- backend/src/executors/codex.rs | 1001 ------------ backend/src/executors/dev_server.rs | 50 - backend/src/executors/echo.rs | 74 - backend/src/executors/gemini.rs | 697 --------- backend/src/executors/gemini/config.rs | 67 - backend/src/executors/gemini/streaming.rs | 363 ----- backend/src/executors/mod.rs | 25 - backend/src/executors/setup_script.rs | 127 -- backend/src/executors/sst_opencode.rs | 694 --------- backend/src/executors/sst_opencode/filter.rs | 184 --- backend/src/executors/sst_opencode/tools.rs | 166 -- backend/src/main.rs | 317 ---- backend/src/middleware/model_loaders.rs | 242 --- backend/src/models/api_response.rs | 35 - backend/src/models/config.rs | 433 ------ backend/src/models/project.rs | 362 ----- backend/src/models/task_attempt.rs | 1213 --------------- backend/src/routes/auth.rs | 262 ---- backend/src/routes/config.rs | 335 ---- backend/src/routes/filesystem.rs | 185 --- backend/src/routes/mod.rs | 10 - backend/src/routes/stream.rs | 244 --- backend/src/routes/task_attempts.rs | 1140 -------------- backend/src/routes/task_templates.rs | 147 -- backend/src/routes/tasks.rs | 277 ---- backend/src/services/git_service.rs | 1373 ----------------- backend/src/services/mod.rs | 13 - backend/src/services/pr_monitor.rs | 214 --- backend/src/services/process_service.rs | 944 ------------ build-npm-package.sh | 16 +- check-both.sh | 23 + ...6aaf8d3cce788d2494ff283e2fad71df0a05d.json | 0 ...aea0ad6bcc10ebd63f441d321a389f026e263.json | 0 ...f15fc4af894490bc65a4d344a7575cb0d8643.json | 0 ...c59a1cca3b6073517c982e5c08f62bc3ef4e4.json | 12 + ...d7c90b4d19b793e496c2e01593f32c5101f24.json | 12 +- ...8e9e61f0d37052e782bb5452ab8e1018d9b63.json | 0 ...d28f213b1c78fc2cf97698da877ff91d6c086.json | 12 + ...c2d3176019a68c6bb83d33008594821415a57.json | 0 ...d2f718c0fd6cf6202d5c9139fb1afda123d29.json | 12 + ...c78331f69f3637f4b8a554898b9e6ba5bba37.json | 18 +- ...d83ee3d344a5c85e1a5999247b6a44f3ae390.json | 8 +- ...c66d3298666a546e964a509538731ece90c9e.json | 0 ...9abd77695b05d1dd84ef3102930bc0fe6404f.json | 0 ...9157beb07ab9e77a03ec6fc30b4f56f9b8f6b.json | 0 ...9fd3c59fdba73c168628784f0a09488b80528.json | 38 + ...55eafbb174c8dda442081823406ac32809a94.json | 0 ...3d7e579dda37972c059e7515c4ceee4bd8dd3.json | 0 ...ce08f973355fd7809e2caaf966d207bcb7b4b.json | 74 + ...9ca89aeb448d0c1d2446c65cd43db40735e86.json | 0 ...2e0e4ea6d1432389c02468ad79f1f742d4031.json | 0 ...97932224dc325b23476cb84153d690227fd8b.json | 0 ...8526b3771cfbb0b22ae4b5d17b51af587b9e2.json | 0 ...c74c08d6c255ef598bb8ec3ff9a67f2344ab1.json | 12 + ...d91f54c47089c8e732ef80c3d1e85dfba1430.json | 12 + ...c8d99dd04c61dc468e2e4c5aa548436d13834.json | 0 ...b200038176dc8c56c49eeaaa65763a1b276eb.json | 0 ...f5efa39bf018b01b7a1f5ff6eefc9e4c55445.json | 6 +- ...af151e9adfdca26c226ad71020d291fd802d5.json | 0 ...2acbc1fb35dc2605e7be893610599b2427f2e.json | 0 ...44332ca70db3d6d6743b2601dc0d167536437.json | 0 ...646635f14e6ed9ff77d1c2225ce82e40fa03d.json | 4 +- ...7165b482838ff32a13c0da66b4160e170466b.json | 6 +- ...2ee66c9f7cd8b7f961fbda2f89fc0a1c442c2.json | 0 ...eb684840804f9279f9990cfdcb9015453d9d8.json | 104 ++ ...74b2083ceaf6c5cf82456a7d83c18323c5cec.json | 62 + ...0d73ba23020e76fd70854ac57f2da251702c3.json | 0 ...2c075b355e54836930226fc84390c5a4516f7.json | 104 ++ ...b0ce6848145d4769c31bed3fc8f492c070c06.json | 6 +- ...381a2a772537c3daa1f9d800b1ef1f191f21d.json | 12 + ...e21e174b195991eaa33722cbf5f76da84cfab.json | 0 ...c5f288ac7aaa5425cac40fc33f337e1a351f2.json | 0 ...8dcc0ea273dffea920838fc4dcc837293ba6b.json | 38 + ...9d700f06ae08821fee00c9a5b60492b05259c.json | 0 ...600ba66b9c919af26ae6ca79b1cc82d138158.json | 12 +- ...ba06cc3212ffffb8520fc7dbbcc8b60ada314.json | 0 ...20031614c831b7b085083bac20c1af314c538.json | 0 ...f560857e70cf8c1dee1eaecd75b096911cb17.json | 12 +- ...46a265d4db53be9554989a814b069d0af82f2.json | 0 ...0e26db4e65e28bba23ec26e77a1faa4dcc974.json | 74 + ...3837c3af677ab5d7a1167550ab1393ac07ea9.json | 6 +- ...9aad7fea3afb72c5104b2e2d1533825e81293.json | 36 +- ...902b25f18cd83d2ca8616bf3ec1c84728bf6f.json | 20 +- ...2d007e233e7db93aefba4fef08e7aa68f5ab7.json | 0 ...ee4442c0ab8b37aca0abf29fe5464c8539c6d.json | 74 + ...fe584fef35a015038e83a548edb59ecadaa18.json | 6 +- ...926463b9e7bf03a73cf31cafe73d046579d54.json | 48 +- ...ce8ab99a94962d5aa92117a6039201ffa6c2c.json | 12 + ...45e506c71c28c5994e430d9b0546adb15ffa2.json | 0 ...7efcf02f105564e7d97247dac1fd704312871.json | 74 + ...23e8ba438949679816316ef446e0b7b8eb3e6.json | 74 + ...7ab77ce31be2205887185900647b4bf49ea73.json | 8 +- crates/db/Cargo.toml | 24 + .../db}/migrations/20250617183714_init.sql | 0 .../20250620212427_execution_processes.sql | 0 ...emove_stdout_stderr_from_task_attempts.sql | 0 ...late_activities_to_execution_processes.sql | 0 .../20250623120000_executor_sessions.sql | 0 ...d_executor_type_to_execution_processes.sql | 0 ...50625000000_add_dev_script_to_projects.sql | 0 ...0701000000_add_branch_to_task_attempts.sql | 0 ...00001_add_pr_tracking_to_task_attempts.sql | 0 ...assistant_message_to_executor_sessions.sql | 0 ...00000_add_base_branch_to_task_attempts.sql | 0 ...250709000000_add_worktree_deleted_flag.sql | 0 .../20250710000000_add_setup_completion.sql | 0 .../20250715154859_add_task_templates.sql | 0 .../20250716143725_add_default_templates.sql | 0 ...32_update_executor_names_to_kebab_case.sql | 0 ...0250716170000_add_parent_task_to_tasks.sql | 0 ...717000000_drop_task_attempt_activities.sql | 0 ...9000000_add_cleanup_script_to_projects.sql | 0 ...eanupscript_to_process_type_constraint.sql | 0 ..._update_worktree_path_to_container_ref.sql | 8 + .../20250726210910_make_branch_optional.sql | 16 + ..._remove_command_from_execution_process.sql | 4 + ...0250727150349_remove_working_directory.sql | 3 + ...29162941_create_execution_process_logs.sql | 11 + ...ut_and_stderr_from_execution_processes.sql | 4 + ...executor_action_to_execution_processes.sql | 8 + ...0001_rename_process_type_to_run_reason.sql | 4 + ...d_execution_process_task_attempt_index.sql | 6 + ..._executor_action_type_to_task_attempts.sql | 5 + ...ix_executor_action_type_virtual_column.sql | 12 + crates/db/src/lib.rs | 76 + .../db}/src/models/execution_process.rs | 325 ++-- .../db/src/models/execution_process_logs.rs | 119 ++ .../db}/src/models/executor_session.rs | 26 +- {backend => crates/db}/src/models/mod.rs | 7 +- crates/db/src/models/project.rs | 215 +++ {backend => crates/db}/src/models/task.rs | 85 +- crates/db/src/models/task_attempt.rs | 578 +++++++ .../db}/src/models/task_template.rs | 3 - crates/deployment/Cargo.toml | 21 + crates/deployment/src/lib.rs | 180 +++ crates/executors/Cargo.toml | 37 + .../src/actions/coding_agent_follow_up.rs | 28 + .../src/actions/coding_agent_initial.rs | 25 + crates/executors/src/actions/mod.rs | 77 + crates/executors/src/actions/script.rs | 48 + crates/executors/src/command.rs | 337 ++++ crates/executors/src/executors/amp.rs | 604 ++++++++ crates/executors/src/executors/claude.rs | 1079 +++++++++++++ crates/executors/src/executors/codex.rs | 902 +++++++++++ crates/executors/src/executors/gemini.rs | 331 ++++ crates/executors/src/executors/mod.rs | 174 +++ crates/executors/src/executors/opencode.rs | 823 ++++++++++ crates/executors/src/lib.rs | 5 + crates/executors/src/logs/mod.rs | 52 + .../src/logs/plain_text_processor.rs | 438 ++++++ crates/executors/src/logs/stderr_processor.rs | 58 + .../executors/src/logs/utils/entry_index.rs | 68 + crates/executors/src/logs/utils/mod.rs | 7 + crates/executors/src/logs/utils/patch.rs | 115 ++ crates/executors/src/stdout_dup.rs | 127 ++ crates/local-deployment/Cargo.toml | 41 + crates/local-deployment/src/command.rs | 43 + crates/local-deployment/src/container.rs | 837 ++++++++++ crates/local-deployment/src/lib.rs | 148 ++ {backend => crates/server}/Cargo.toml | 52 +- {backend => crates/server}/build.rs | 2 +- crates/server/src/bin/generate_types.rs | 121 ++ .../server}/src/bin/mcp_task_server.rs | 3 +- crates/server/src/error.rs | 70 + crates/server/src/lib.rs | 9 + crates/server/src/main.rs | 76 + {backend => crates/server}/src/mcp/mod.rs | 0 .../server}/src/mcp/task_server.rs | 40 +- .../server}/src/middleware/mod.rs | 0 crates/server/src/middleware/model_loaders.rs | 205 +++ crates/server/src/routes/auth.rs | 128 ++ crates/server/src/routes/config.rs | 339 ++++ crates/server/src/routes/events.rs | 28 + .../server/src/routes/execution_processes.rs | 102 ++ crates/server/src/routes/filesystem.rs | 71 + crates/server/src/routes/frontend.rs | 54 + .../server}/src/routes/github.rs | 26 +- .../server}/src/routes/health.rs | 3 +- crates/server/src/routes/mod.rs | 41 + .../server}/src/routes/projects.rs | 234 +-- crates/server/src/routes/task_attempts.rs | 1022 ++++++++++++ crates/server/src/routes/task_templates.rs | 103 ++ crates/server/src/routes/tasks.rs | 224 +++ crates/services/Cargo.toml | 54 + crates/services/src/lib.rs | 1 + .../services}/src/services/analytics.rs | 38 +- crates/services/src/services/auth.rs | 131 ++ crates/services/src/services/config/mod.rs | 42 + .../src/services/config/versions/mod.rs | 2 + .../src/services/config/versions/v1.rs | 87 ++ .../src/services/config/versions/v2.rs | 389 +++++ crates/services/src/services/container.rs | 616 ++++++++ crates/services/src/services/events.rs | 197 +++ crates/services/src/services/filesystem.rs | 163 ++ .../src/services/filesystem_watcher.rs | 168 ++ crates/services/src/services/git.rs | 1190 ++++++++++++++ .../services}/src/services/github_service.rs | 97 +- crates/services/src/services/mod.rs | 13 + .../services/src/services/notification.rs | 110 +- crates/services/src/services/pr_monitor.rs | 142 ++ crates/services/src/services/sentry.rs | 33 + .../src/services}/worktree_manager.rs | 244 +-- crates/utils/Cargo.toml | 29 + crates/utils/src/assets.rs | 41 + crates/utils/src/browser.rs | 16 + crates/utils/src/diff.rs | 26 + .../src/utils.rs => crates/utils/src/lib.rs | 77 +- crates/utils/src/log_msg.rs | 58 + crates/utils/src/msg_store.rs | 175 +++ .../src/utils => crates/utils/src}/path.rs | 19 + crates/utils/src/response.rs | 41 + .../src/lib.rs => crates/utils/src/sentry.rs | 25 - .../src/utils => crates/utils/src}/shell.rs | 0 crates/utils/src/stream_lines.rs | 22 + .../src/utils => crates/utils/src}/text.rs | 0 dev_assets_seed/config.json | 4 +- frontend/package-lock.json | 73 +- frontend/package.json | 9 +- frontend/src/App.tsx | 15 +- frontend/src/components/GitHubLoginDialog.tsx | 36 +- .../DiffCard.tsx | 0 .../DisplayConversationEntry.tsx | 159 +- frontend/src/components/OnboardingDialog.tsx | 59 +- .../src/components/PrivacyOptInDialog.tsx | 2 +- frontend/src/components/config-provider.tsx | 136 +- .../context/TaskDetailsContextProvider.tsx | 335 +--- .../src/components/context/TaskPlanContext.ts | 33 - .../components/context/taskDetailsContext.ts | 48 +- frontend/src/components/logo.tsx | 6 +- frontend/src/components/logs/LogEntryRow.tsx | 62 + .../src/components/logs/ProcessStartCard.tsx | 76 + frontend/src/components/logs/StderrEntry.tsx | 13 + frontend/src/components/logs/StdoutEntry.tsx | 13 + .../src/components/projects/ProjectCard.tsx | 2 +- .../components/projects/project-detail.tsx | 11 +- .../projects/project-form-fields.tsx | 8 +- .../src/components/projects/project-form.tsx | 171 +- .../src/components/tasks/BranchSelector.tsx | 2 +- .../tasks/DeleteFileConfirmationDialog.tsx | 10 +- .../tasks/EditorSelectionDialog.tsx | 54 +- frontend/src/components/tasks/TaskCard.tsx | 12 +- .../tasks/TaskDetails/DiffChunkSection.tsx | 2 +- .../components/tasks/TaskDetails/DiffFile.tsx | 30 +- .../components/tasks/TaskDetails/DiffTab.tsx | 64 +- .../components/tasks/TaskDetails/LogsTab.tsx | 246 +-- .../TaskDetails/LogsTab/Conversation.tsx | 276 ---- .../TaskDetails/LogsTab/ConversationEntry.tsx | 56 - .../LogsTab/NormalizedConversationViewer.tsx | 92 -- .../tasks/TaskDetails/LogsTab/Prompt.tsx | 22 - .../LogsTab/SetupScriptRunning.tsx | 49 - .../components/tasks/TaskDetails/PlanTab.tsx | 258 ---- .../tasks/TaskDetails/ProcessCard.tsx | 204 +++ .../tasks/TaskDetails/ProcessesTab.tsx | 81 +- .../tasks/TaskDetails/RelatedTasksTab.tsx | 216 --- .../tasks/TaskDetails/TabNavigation.tsx | 67 +- .../src/components/tasks/TaskDetailsPanel.tsx | 15 +- .../components/tasks/TaskDetailsToolbar.tsx | 255 +-- .../components/tasks/TaskFollowUpSection.tsx | 20 +- .../src/components/tasks/TaskFormDialog.tsx | 81 +- .../tasks/Toolbar/CreateAttempt.tsx | 187 +-- .../tasks/Toolbar/CreatePRDialog.tsx | 83 +- .../tasks/Toolbar/CurrentAttempt.tsx | 231 +-- frontend/src/components/theme-provider.tsx | 10 +- frontend/src/components/theme-toggle.tsx | 7 +- .../components/ui/file-search-textarea.tsx | 12 +- frontend/src/components/ui/folder-picker.tsx | 4 +- frontend/src/hooks/use-system-info.ts | 40 - frontend/src/hooks/useDiffStream.ts | 38 + frontend/src/hooks/useEventSourceManager.ts | 159 ++ frontend/src/hooks/useJsonPatchStream.ts | 127 ++ frontend/src/hooks/useLogStream.ts | 74 + .../src/hooks/useNormalizedConversation.ts | 440 ------ frontend/src/hooks/useProcessConversation.ts | 92 ++ frontend/src/hooks/useProcessesLogs.ts | 115 ++ frontend/src/lib/api.ts | 500 +++--- frontend/src/lib/types.ts | 9 +- frontend/src/lib/utils.ts | 4 - frontend/src/main.tsx | 1 + frontend/src/pages/McpServers.tsx | 113 +- frontend/src/pages/Settings.tsx | 105 +- frontend/src/pages/project-tasks.tsx | 39 +- frontend/src/types/logs.ts | 17 + frontend/src/types/tabs.ts | 1 + frontend/src/utils/string.ts | 11 + package.json | 6 +- pnpm-lock.yaml | 66 +- scripts/prepare-db.js | 14 +- shared/old_frozen_types.ts | 12 + shared/types.ts | 234 ++- 331 files changed, 17971 insertions(+), 24665 deletions(-) rename {backend => assets}/scripts/toast-notification.ps1 (97%) rename {backend => assets}/sounds/abstract-sound1.wav (100%) rename {backend => assets}/sounds/abstract-sound2.wav (100%) rename {backend => assets}/sounds/abstract-sound3.wav (100%) rename {backend => assets}/sounds/abstract-sound4.wav (100%) rename {backend => assets}/sounds/cow-mooing.wav (100%) rename {backend => assets}/sounds/phone-vibration.wav (100%) rename {backend => assets}/sounds/rooster.wav (100%) delete mode 100644 backend/.sqlx/query-01b7e2bac1261d8be3d03c03df3e5220590da6c31c77f161074fc62752d63881.json delete mode 100644 backend/.sqlx/query-1c7b06ba1e112abf6b945a2ff08a0b40ec23f3738c2e7399f067b558cf8d490e.json delete mode 100644 backend/.sqlx/query-1f619f01f46859a64ded531dd0ef61abacfe62e758abe7030a6aa745140b95ca.json delete mode 100644 backend/.sqlx/query-1fca1ce14b4b20205364cd1f1f45ebe1d2e30cd745e59e189d56487b5639dfbb.json delete mode 100644 backend/.sqlx/query-36c9e3dd10648e94b949db5c91a774ecb1e10a899ef95da74066eccedca4d8b2.json delete mode 100644 backend/.sqlx/query-412bacd3477d86369082e90f52240407abce436cb81292d42b2dbe1e5c18eea1.json delete mode 100644 backend/.sqlx/query-5b902137b11022d2e1a5c4f6a9c83fec1a856c6a710aff831abd2382ede76b43.json delete mode 100644 backend/.sqlx/query-5ed1238e52e59bb5f76c0f153fd99a14093f7ce2585bf9843585608f17ec575b.json delete mode 100644 backend/.sqlx/query-8a67b3b3337248f06a57bdf8a908f7ef23177431eaed82dc08c94c3e5944340e.json delete mode 100644 backend/.sqlx/query-9472c8fb477958167f5fae40b85ac44252468c5226b2cdd7770f027332eed6d7.json delete mode 100644 backend/.sqlx/query-c614e6056b244ca07f1b9d44e7edc9d5819225c6f8d9e077070c6e518a17f50b.json delete mode 100644 backend/.sqlx/query-d2d0a1b985ebbca6a2b3e882a221a219f3199890fa640afc946ef1a792d6d8de.json delete mode 100644 backend/.sqlx/query-ed8456646fa69ddd412441955f06ff22bfb790f29466450735e0b8bb1bc4ec94.json delete mode 100644 backend/src/app_state.rs delete mode 100644 backend/src/bin/cloud_runner.rs delete mode 100644 backend/src/bin/generate_types.rs delete mode 100644 backend/src/bin/test_remote.rs delete mode 100644 backend/src/command_runner.rs delete mode 100644 backend/src/command_runner/local.rs delete mode 100644 backend/src/command_runner/remote.rs delete mode 100644 backend/src/execution_monitor.rs delete mode 100644 backend/src/executor.rs delete mode 100644 backend/src/executors/aider.rs delete mode 100644 backend/src/executors/aider/filter.rs delete mode 100644 backend/src/executors/amp.rs delete mode 100644 backend/src/executors/ccr.rs delete mode 100644 backend/src/executors/charm_opencode.rs delete mode 100644 backend/src/executors/claude.rs delete mode 100644 backend/src/executors/cleanup_script.rs delete mode 100644 backend/src/executors/codex.rs delete mode 100644 backend/src/executors/dev_server.rs delete mode 100644 backend/src/executors/echo.rs delete mode 100644 backend/src/executors/gemini.rs delete mode 100644 backend/src/executors/gemini/config.rs delete mode 100644 backend/src/executors/gemini/streaming.rs delete mode 100644 backend/src/executors/mod.rs delete mode 100644 backend/src/executors/setup_script.rs delete mode 100644 backend/src/executors/sst_opencode.rs delete mode 100644 backend/src/executors/sst_opencode/filter.rs delete mode 100644 backend/src/executors/sst_opencode/tools.rs delete mode 100644 backend/src/main.rs delete mode 100644 backend/src/middleware/model_loaders.rs delete mode 100644 backend/src/models/api_response.rs delete mode 100644 backend/src/models/config.rs delete mode 100644 backend/src/models/project.rs delete mode 100644 backend/src/models/task_attempt.rs delete mode 100644 backend/src/routes/auth.rs delete mode 100644 backend/src/routes/config.rs delete mode 100644 backend/src/routes/filesystem.rs delete mode 100644 backend/src/routes/mod.rs delete mode 100644 backend/src/routes/stream.rs delete mode 100644 backend/src/routes/task_attempts.rs delete mode 100644 backend/src/routes/task_templates.rs delete mode 100644 backend/src/routes/tasks.rs delete mode 100644 backend/src/services/git_service.rs delete mode 100644 backend/src/services/mod.rs delete mode 100644 backend/src/services/pr_monitor.rs delete mode 100644 backend/src/services/process_service.rs create mode 100755 check-both.sh rename {backend => crates/db}/.sqlx/query-00aa2d8701f6b1ed2e84ad00b9b6aaf8d3cce788d2494ff283e2fad71df0a05d.json (100%) rename {backend => crates/db}/.sqlx/query-03f2b02ba6dc5ea2b3cf6b1004caea0ad6bcc10ebd63f441d321a389f026e263.json (100%) rename {backend => crates/db}/.sqlx/query-0923b77d137a29fc54d399a873ff15fc4af894490bc65a4d344a7575cb0d8643.json (100%) create mode 100644 crates/db/.sqlx/query-0bf539bafb9c27cb352b0e08722c59a1cca3b6073517c982e5c08f62bc3ef4e4.json rename backend/.sqlx/query-6e8b860b14decfc2227dc57213f38442943d3fbef5c8418fd6b634c6e0f5e2ea.json => crates/db/.sqlx/query-1174eecd9f26565a4f4e1e367b5d7c90b4d19b793e496c2e01593f32c5101f24.json (66%) rename {backend => crates/db}/.sqlx/query-1268afe9ca849daa6722e3df7ca8e9e61f0d37052e782bb5452ab8e1018d9b63.json (100%) create mode 100644 crates/db/.sqlx/query-129f898c089030e5ce8c41ff43fd28f213b1c78fc2cf97698da877ff91d6c086.json rename {backend => crates/db}/.sqlx/query-1b082630a9622f8667ee7a9aba2c2d3176019a68c6bb83d33008594821415a57.json (100%) create mode 100644 crates/db/.sqlx/query-1e339e959f8d2cdac13b3e2b452d2f718c0fd6cf6202d5c9139fb1afda123d29.json rename backend/.sqlx/query-a9e93d5b09b29faf66e387e4d7596a792d81e75c4d3726e83c2963e8d7c9b56f.json => crates/db/.sqlx/query-1f1850b240af8edf2a05ad4a250c78331f69f3637f4b8a554898b9e6ba5bba37.json (66%) rename backend/.sqlx/query-4049ca413b285a05aca6b25385e9c8185575f01e9069e4e8581aa45d713f612f.json => crates/db/.sqlx/query-216193a63f7b0fb788566b63f56d83ee3d344a5c85e1a5999247b6a44f3ae390.json (78%) rename {backend => crates/db}/.sqlx/query-216efabcdaa2a6ea166e4468a6ac66d3298666a546e964a509538731ece90c9e.json (100%) rename {backend => crates/db}/.sqlx/query-2188432c66e9010684b6bb670d19abd77695b05d1dd84ef3102930bc0fe6404f.json (100%) rename {backend => crates/db}/.sqlx/query-290ce5c152be8d36e58ff42570f9157beb07ab9e77a03ec6fc30b4f56f9b8f6b.json (100%) create mode 100644 crates/db/.sqlx/query-2ec7648202fc6f496b97d9486cf9fd3c59fdba73c168628784f0a09488b80528.json rename {backend => crates/db}/.sqlx/query-36e4ba7bbd81b402d5a20b6005755eafbb174c8dda442081823406ac32809a94.json (100%) rename {backend => crates/db}/.sqlx/query-3a5b3c98a55ca183ab20c74708e3d7e579dda37972c059e7515c4ceee4bd8dd3.json (100%) create mode 100644 crates/db/.sqlx/query-3baa595eadaa8c720da7c185c5fce08f973355fd7809e2caaf966d207bcb7b4b.json rename {backend => crates/db}/.sqlx/query-3d0a1cabf2a52e9d90cdfd29c509ca89aeb448d0c1d2446c65cd43db40735e86.json (100%) rename {backend => crates/db}/.sqlx/query-3d6bd16fbce59efe30b7f67ea342e0e4ea6d1432389c02468ad79f1f742d4031.json (100%) rename {backend => crates/db}/.sqlx/query-417a8b1ff4e51de82aea0159a3b97932224dc325b23476cb84153d690227fd8b.json (100%) rename {backend => crates/db}/.sqlx/query-461cc1b0bb6fd909afc9dd2246e8526b3771cfbb0b22ae4b5d17b51af587b9e2.json (100%) create mode 100644 crates/db/.sqlx/query-4a52af0e7eedb3662a05b23e9a0c74c08d6c255ef598bb8ec3ff9a67f2344ab1.json create mode 100644 crates/db/.sqlx/query-56238751ac9cab8bd97ad787143d91f54c47089c8e732ef80c3d1e85dfba1430.json rename {backend => crates/db}/.sqlx/query-5a886026d75d515c01f347cc203c8d99dd04c61dc468e2e4c5aa548436d13834.json (100%) rename {backend => crates/db}/.sqlx/query-5ae4dea70309b2aa40d41412f70b200038176dc8c56c49eeaaa65763a1b276eb.json (100%) rename backend/.sqlx/query-93a1605f90e9672dad29b472b6ad85fa9a55ea3ffa5abcb8724b09d61be254ca.json => crates/db/.sqlx/query-62836ddbbe22ea720063ac2b8d3f5efa39bf018b01b7a1f5ff6eefc9e4c55445.json (51%) rename {backend => crates/db}/.sqlx/query-6ecfa16d0cf825aacf233544b5baf151e9adfdca26c226ad71020d291fd802d5.json (100%) rename {backend => crates/db}/.sqlx/query-72509d252c39fce77520aa816cb2acbc1fb35dc2605e7be893610599b2427f2e.json (100%) rename {backend => crates/db}/.sqlx/query-75239b2da188f749707d77f3c1544332ca70db3d6d6743b2601dc0d167536437.json (100%) rename backend/.sqlx/query-c67259be8bf4ee0cfd32167b2aa3b7fe9192809181a8171bf1c2d6df731967ae.json => crates/db/.sqlx/query-7e657b504fb7d8935fcb944f8f4646635f14e6ed9ff77d1c2225ce82e40fa03d.json (51%) rename backend/.sqlx/query-8aba98bb4d1701d1686d68371bca4edb4ba7f8b70693f86fc83860f8adda9065.json => crates/db/.sqlx/query-834bc0957cd530e4396b61311c27165b482838ff32a13c0da66b4160e170466b.json (63%) rename {backend => crates/db}/.sqlx/query-86d03eb70eef39c59296416867f2ee66c9f7cd8b7f961fbda2f89fc0a1c442c2.json (100%) create mode 100644 crates/db/.sqlx/query-8c691c79539b34f91f09e6dce51eb684840804f9279f9990cfdcb9015453d9d8.json create mode 100644 crates/db/.sqlx/query-8cc087f95fb55426ee6481bdd0f74b2083ceaf6c5cf82456a7d83c18323c5cec.json rename {backend => crates/db}/.sqlx/query-8f01ebd64bdcde6a090479f14810d73ba23020e76fd70854ac57f2da251702c3.json (100%) create mode 100644 crates/db/.sqlx/query-8f5d9d112659d04406c20c885f72c075b355e54836930226fc84390c5a4516f7.json rename backend/.sqlx/query-212828320e8d871ab9d83705a040b23bcf0393dc7252177fc539a74657f578ef.json => crates/db/.sqlx/query-8fcdb52af46ab995bd242392b57b0ce6848145d4769c31bed3fc8f492c070c06.json (87%) create mode 100644 crates/db/.sqlx/query-90d5b39dddf9f5c6c48cd8268f7381a2a772537c3daa1f9d800b1ef1f191f21d.json rename {backend => crates/db}/.sqlx/query-90fd607fcb2dca72239ff25e618e21e174b195991eaa33722cbf5f76da84cfab.json (100%) rename {backend => crates/db}/.sqlx/query-96036c4f9e0f48bdc5a4a4588f0c5f288ac7aaa5425cac40fc33f337e1a351f2.json (100%) create mode 100644 crates/db/.sqlx/query-97e6a03adc1c14e9ecabe7885598dcc0ea273dffea920838fc4dcc837293ba6b.json rename {backend => crates/db}/.sqlx/query-a31fff84f3b8e532fd1160447d89d700f06ae08821fee00c9a5b60492b05259c.json (100%) rename backend/.sqlx/query-92e8bdbcd80c5ff3db7a35cf79492048803ef305cbdef0d0a1fe5dc881ca8c71.json => crates/db/.sqlx/query-a500d5054ba09e64a4f98500a5c600ba66b9c919af26ae6ca79b1cc82d138158.json (64%) rename {backend => crates/db}/.sqlx/query-a5ba908419fb3e456bdd2daca41ba06cc3212ffffb8520fc7dbbcc8b60ada314.json (100%) rename {backend => crates/db}/.sqlx/query-ac5247c8d7fb86e4650c4b0eb9420031614c831b7b085083bac20c1af314c538.json (100%) rename backend/.sqlx/query-a6d2961718dbc3b1a925e549f49a159c561bef58c105529275f274b27e2eba5b.json => crates/db/.sqlx/query-acdb8488d9d698e8522a1a1a062f560857e70cf8c1dee1eaecd75b096911cb17.json (69%) rename {backend => crates/db}/.sqlx/query-b2b2c6b4d0b1a347b5c4cb63c3a46a265d4db53be9554989a814b069d0af82f2.json (100%) create mode 100644 crates/db/.sqlx/query-b8828d250bd93c1d77c97e3954b0e26db4e65e28bba23ec26e77a1faa4dcc974.json rename backend/.sqlx/query-d3b9ea1de1576af71b312924ce7f4ea8ae5dbe2ac138ea3b4470f2d5cd734846.json => crates/db/.sqlx/query-bbc3a97f21c9b6c60a64cd747843837c3af677ab5d7a1167550ab1393ac07ea9.json (52%) rename backend/.sqlx/query-58408c7a8cdeeda0bef359f1f9bd91299a339dc2b191462fc58c9736a56d5227.json => crates/db/.sqlx/query-c1b07b345d6cef9413e4dc19f139aad7fea3afb72c5104b2e2d1533825e81293.json (54%) rename backend/.sqlx/query-83d10e29f8478aff33434f9ac67068e013b888b953a2657e2bb72a6f619d04f2.json => crates/db/.sqlx/query-c1e5b46545fcef759610463d9bf902b25f18cd83d2ca8616bf3ec1c84728bf6f.json (52%) rename {backend => crates/db}/.sqlx/query-c50d2ff0b12e5bcc81e371089ee2d007e233e7db93aefba4fef08e7aa68f5ab7.json (100%) create mode 100644 crates/db/.sqlx/query-ca6acd3a57fc44e8e29e057700cee4442c0ab8b37aca0abf29fe5464c8539c6d.json rename backend/.sqlx/query-315cf28396b52c1215a53c72c57e0277d6143d8fd658f141a86d6fd0770fb539.json => crates/db/.sqlx/query-cb2d1da9c3e3ad9f09ea30165f5fe584fef35a015038e83a548edb59ecadaa18.json (64%) rename backend/.sqlx/query-9edb2c01e91fd0f0fe7b56e988c7ae0393150f50be3f419a981e035c0121dfc7.json => crates/db/.sqlx/query-cd9d629c4040d6766307998dde9926463b9e7bf03a73cf31cafe73d046579d54.json (50%) create mode 100644 crates/db/.sqlx/query-ce908743b4ad501211d530c4b25ce8ab99a94962d5aa92117a6039201ffa6c2c.json rename {backend => crates/db}/.sqlx/query-d30aa5786757f32bf2b9c5fe51a45e506c71c28c5994e430d9b0546adb15ffa2.json (100%) create mode 100644 crates/db/.sqlx/query-ecc6c9458bffcc70af47c1f55e97efcf02f105564e7d97247dac1fd704312871.json create mode 100644 crates/db/.sqlx/query-f58b737bf1deb0e8d57fca5b99423e8ba438949679816316ef446e0b7b8eb3e6.json rename backend/.sqlx/query-a157cf00616f703bfba21927f1eb1c9eec2a81c02da15f66efdba0b6c375de1b.json => crates/db/.sqlx/query-f9a448b2fdb1435b78a062e5ea77ab77ce31be2205887185900647b4bf49ea73.json (65%) create mode 100644 crates/db/Cargo.toml rename {backend => crates/db}/migrations/20250617183714_init.sql (100%) rename {backend => crates/db}/migrations/20250620212427_execution_processes.sql (100%) rename {backend => crates/db}/migrations/20250620214100_remove_stdout_stderr_from_task_attempts.sql (100%) rename {backend => crates/db}/migrations/20250621120000_relate_activities_to_execution_processes.sql (100%) rename {backend => crates/db}/migrations/20250623120000_executor_sessions.sql (100%) rename {backend => crates/db}/migrations/20250623130000_add_executor_type_to_execution_processes.sql (100%) rename {backend => crates/db}/migrations/20250625000000_add_dev_script_to_projects.sql (100%) rename {backend => crates/db}/migrations/20250701000000_add_branch_to_task_attempts.sql (100%) rename {backend => crates/db}/migrations/20250701000001_add_pr_tracking_to_task_attempts.sql (100%) rename {backend => crates/db}/migrations/20250701120000_add_assistant_message_to_executor_sessions.sql (100%) rename {backend => crates/db}/migrations/20250708000000_add_base_branch_to_task_attempts.sql (100%) rename {backend => crates/db}/migrations/20250709000000_add_worktree_deleted_flag.sql (100%) rename {backend => crates/db}/migrations/20250710000000_add_setup_completion.sql (100%) rename {backend => crates/db}/migrations/20250715154859_add_task_templates.sql (100%) rename {backend => crates/db}/migrations/20250716143725_add_default_templates.sql (100%) rename {backend => crates/db}/migrations/20250716161432_update_executor_names_to_kebab_case.sql (100%) rename {backend => crates/db}/migrations/20250716170000_add_parent_task_to_tasks.sql (100%) rename {backend => crates/db}/migrations/20250717000000_drop_task_attempt_activities.sql (100%) rename {backend => crates/db}/migrations/20250719000000_add_cleanup_script_to_projects.sql (100%) rename {backend => crates/db}/migrations/20250720000000_add_cleanupscript_to_process_type_constraint.sql (100%) create mode 100644 crates/db/migrations/20250726182144_update_worktree_path_to_container_ref.sql create mode 100644 crates/db/migrations/20250726210910_make_branch_optional.sql create mode 100644 crates/db/migrations/20250727124142_remove_command_from_execution_process.sql create mode 100644 crates/db/migrations/20250727150349_remove_working_directory.sql create mode 100644 crates/db/migrations/20250729162941_create_execution_process_logs.sql create mode 100644 crates/db/migrations/20250729165913_remove_stdout_and_stderr_from_execution_processes.sql create mode 100644 crates/db/migrations/20250730000000_add_executor_action_to_execution_processes.sql create mode 100644 crates/db/migrations/20250730000001_rename_process_type_to_run_reason.sql create mode 100644 crates/db/migrations/20250730124500_add_execution_process_task_attempt_index.sql create mode 100644 crates/db/migrations/20250805112332_add_executor_action_type_to_task_attempts.sql create mode 100644 crates/db/migrations/20250805122100_fix_executor_action_type_virtual_column.sql create mode 100644 crates/db/src/lib.rs rename {backend => crates/db}/src/models/execution_process.rs (58%) create mode 100644 crates/db/src/models/execution_process_logs.rs rename {backend => crates/db}/src/models/executor_session.rs (92%) rename {backend => crates/db}/src/models/mod.rs (53%) create mode 100644 crates/db/src/models/project.rs rename {backend => crates/db}/src/models/task.rs (83%) create mode 100644 crates/db/src/models/task_attempt.rs rename {backend => crates/db}/src/models/task_template.rs (99%) create mode 100644 crates/deployment/Cargo.toml create mode 100644 crates/deployment/src/lib.rs create mode 100644 crates/executors/Cargo.toml create mode 100644 crates/executors/src/actions/coding_agent_follow_up.rs create mode 100644 crates/executors/src/actions/coding_agent_initial.rs create mode 100644 crates/executors/src/actions/mod.rs create mode 100644 crates/executors/src/actions/script.rs create mode 100644 crates/executors/src/command.rs create mode 100644 crates/executors/src/executors/amp.rs create mode 100644 crates/executors/src/executors/claude.rs create mode 100644 crates/executors/src/executors/codex.rs create mode 100644 crates/executors/src/executors/gemini.rs create mode 100644 crates/executors/src/executors/mod.rs create mode 100644 crates/executors/src/executors/opencode.rs create mode 100644 crates/executors/src/lib.rs create mode 100644 crates/executors/src/logs/mod.rs create mode 100644 crates/executors/src/logs/plain_text_processor.rs create mode 100644 crates/executors/src/logs/stderr_processor.rs create mode 100644 crates/executors/src/logs/utils/entry_index.rs create mode 100644 crates/executors/src/logs/utils/mod.rs create mode 100644 crates/executors/src/logs/utils/patch.rs create mode 100644 crates/executors/src/stdout_dup.rs create mode 100644 crates/local-deployment/Cargo.toml create mode 100644 crates/local-deployment/src/command.rs create mode 100644 crates/local-deployment/src/container.rs create mode 100644 crates/local-deployment/src/lib.rs rename {backend => crates/server}/Cargo.toml (65%) rename {backend => crates/server}/build.rs (95%) create mode 100644 crates/server/src/bin/generate_types.rs rename {backend => crates/server}/src/bin/mcp_task_server.rs (95%) create mode 100644 crates/server/src/error.rs create mode 100644 crates/server/src/lib.rs create mode 100644 crates/server/src/main.rs rename {backend => crates/server}/src/mcp/mod.rs (100%) rename {backend => crates/server}/src/mcp/task_server.rs (96%) rename {backend => crates/server}/src/middleware/mod.rs (100%) create mode 100644 crates/server/src/middleware/model_loaders.rs create mode 100644 crates/server/src/routes/auth.rs create mode 100644 crates/server/src/routes/config.rs create mode 100644 crates/server/src/routes/events.rs create mode 100644 crates/server/src/routes/execution_processes.rs create mode 100644 crates/server/src/routes/filesystem.rs create mode 100644 crates/server/src/routes/frontend.rs rename {backend => crates/server}/src/routes/github.rs (94%) rename {backend => crates/server}/src/routes/health.rs (80%) create mode 100644 crates/server/src/routes/mod.rs rename {backend => crates/server}/src/routes/projects.rs (61%) create mode 100644 crates/server/src/routes/task_attempts.rs create mode 100644 crates/server/src/routes/task_templates.rs create mode 100644 crates/server/src/routes/tasks.rs create mode 100644 crates/services/Cargo.toml create mode 100644 crates/services/src/lib.rs rename {backend => crates/services}/src/services/analytics.rs (88%) create mode 100644 crates/services/src/services/auth.rs create mode 100644 crates/services/src/services/config/mod.rs create mode 100644 crates/services/src/services/config/versions/mod.rs create mode 100644 crates/services/src/services/config/versions/v1.rs create mode 100644 crates/services/src/services/config/versions/v2.rs create mode 100644 crates/services/src/services/container.rs create mode 100644 crates/services/src/services/events.rs create mode 100644 crates/services/src/services/filesystem.rs create mode 100644 crates/services/src/services/filesystem_watcher.rs create mode 100644 crates/services/src/services/git.rs rename {backend => crates/services}/src/services/github_service.rs (83%) create mode 100644 crates/services/src/services/mod.rs rename backend/src/services/notification_service.rs => crates/services/src/services/notification.rs (70%) create mode 100644 crates/services/src/services/pr_monitor.rs create mode 100644 crates/services/src/services/sentry.rs rename {backend/src/utils => crates/services/src/services}/worktree_manager.rs (72%) create mode 100644 crates/utils/Cargo.toml create mode 100644 crates/utils/src/assets.rs create mode 100644 crates/utils/src/browser.rs create mode 100644 crates/utils/src/diff.rs rename backend/src/utils.rs => crates/utils/src/lib.rs (54%) create mode 100644 crates/utils/src/log_msg.rs create mode 100644 crates/utils/src/msg_store.rs rename {backend/src/utils => crates/utils/src}/path.rs (85%) create mode 100644 crates/utils/src/response.rs rename backend/src/lib.rs => crates/utils/src/sentry.rs (59%) rename {backend/src/utils => crates/utils/src}/shell.rs (100%) create mode 100644 crates/utils/src/stream_lines.rs rename {backend/src/utils => crates/utils/src}/text.rs (100%) rename frontend/src/components/{tasks/TaskDetails => NormalizedConversation}/DiffCard.tsx (100%) rename frontend/src/components/{tasks/TaskDetails => NormalizedConversation}/DisplayConversationEntry.tsx (66%) delete mode 100644 frontend/src/components/context/TaskPlanContext.ts create mode 100644 frontend/src/components/logs/LogEntryRow.tsx create mode 100644 frontend/src/components/logs/ProcessStartCard.tsx create mode 100644 frontend/src/components/logs/StderrEntry.tsx create mode 100644 frontend/src/components/logs/StdoutEntry.tsx delete mode 100644 frontend/src/components/tasks/TaskDetails/LogsTab/Conversation.tsx delete mode 100644 frontend/src/components/tasks/TaskDetails/LogsTab/ConversationEntry.tsx delete mode 100644 frontend/src/components/tasks/TaskDetails/LogsTab/NormalizedConversationViewer.tsx delete mode 100644 frontend/src/components/tasks/TaskDetails/LogsTab/Prompt.tsx delete mode 100644 frontend/src/components/tasks/TaskDetails/LogsTab/SetupScriptRunning.tsx delete mode 100644 frontend/src/components/tasks/TaskDetails/PlanTab.tsx create mode 100644 frontend/src/components/tasks/TaskDetails/ProcessCard.tsx delete mode 100644 frontend/src/components/tasks/TaskDetails/RelatedTasksTab.tsx delete mode 100644 frontend/src/hooks/use-system-info.ts create mode 100644 frontend/src/hooks/useDiffStream.ts create mode 100644 frontend/src/hooks/useEventSourceManager.ts create mode 100644 frontend/src/hooks/useJsonPatchStream.ts create mode 100644 frontend/src/hooks/useLogStream.ts delete mode 100644 frontend/src/hooks/useNormalizedConversation.ts create mode 100644 frontend/src/hooks/useProcessConversation.ts create mode 100644 frontend/src/hooks/useProcessesLogs.ts create mode 100644 frontend/src/types/logs.ts create mode 100644 frontend/src/types/tabs.ts create mode 100644 frontend/src/utils/string.ts create mode 100644 shared/old_frozen_types.ts diff --git a/.github/workflows/pre-release.yml b/.github/workflows/pre-release.yml index 73275edd..c4edf243 100644 --- a/.github/workflows/pre-release.yml +++ b/.github/workflows/pre-release.yml @@ -95,7 +95,7 @@ jobs: npm version $new_version --no-git-tag-version --allow-same-version cd .. - cd backend && cargo set-version "$new_version" + cargo set-version --workspace "$new_version" echo "New version: $new_version" echo "new_version=$new_version" >> $GITHUB_OUTPUT @@ -105,7 +105,8 @@ jobs: run: | git config --local user.email "action@github.com" git config --local user.name "GitHub Action" - git add package.json package-lock.json npx-cli/package.json backend/Cargo.toml + git add package.json package-lock.json npx-cli/package.json + git add $(find . -name Cargo.toml) git commit -m "chore: bump version to ${{ steps.version.outputs.new_version }}" git tag -a ${{ steps.version.outputs.new_tag }} -m "Release ${{ steps.version.outputs.new_tag }}" git push @@ -221,7 +222,7 @@ jobs: - name: Build backend for target run: | - cargo build --release --target ${{ matrix.target }} -p vibe-kanban + cargo build --release --target ${{ matrix.target }} -p server cargo build --release --target ${{ matrix.target }} --bin mcp_task_server env: CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER: ${{ matrix.target == 'aarch64-unknown-linux-gnu' && 'aarch64-linux-gnu-gcc' || '' }} @@ -245,10 +246,10 @@ jobs: run: | mkdir -p dist if [[ "${{ matrix.os }}" == "windows-latest-l" ]]; then - cp target/${{ matrix.target }}/release/vibe-kanban.exe dist/vibe-kanban-${{ matrix.name }}.exe + cp target/${{ matrix.target }}/release/server.exe dist/vibe-kanban-${{ matrix.name }}.exe cp target/${{ matrix.target }}/release/mcp_task_server.exe dist/vibe-kanban-mcp-${{ matrix.name }}.exe else - cp target/${{ matrix.target }}/release/vibe-kanban dist/vibe-kanban-${{ matrix.name }} + cp target/${{ matrix.target }}/release/server dist/vibe-kanban-${{ matrix.name }} cp target/${{ matrix.target }}/release/mcp_task_server dist/vibe-kanban-mcp-${{ matrix.name }} fi @@ -268,7 +269,7 @@ jobs: if: runner.os == 'macOS' uses: indygreg/apple-code-sign-action@v1 with: - input_path: target/${{ matrix.target }}/release/vibe-kanban + input_path: target/${{ matrix.target }}/release/server output_path: vibe-kanban p12_file: certificate.p12 p12_password: ${{ secrets.APPLE_CERTIFICATE_PASSWORD }} diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 5a4cfac2..3fd039ca 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -60,4 +60,4 @@ jobs: cargo fmt --all -- --check npm run generate-types:check cargo test --workspace - cargo clippy --all --all-targets --all-features -- -D warnings + cargo clippy --all --all-targets --all-features diff --git a/.gitignore b/.gitignore index 57a64ef1..d802dfe4 100644 --- a/.gitignore +++ b/.gitignore @@ -37,10 +37,6 @@ yarn-error.log* ehthumbs.db Thumbs.db -# Logs -*.log -logs/ - # Runtime data pids *.pid @@ -74,6 +70,7 @@ backend/bindings build-npm-package-codesign.sh npx-cli/dist +npx-cli/vibe-kanban-* backend/db.sqlite # Development ports file @@ -82,3 +79,5 @@ backend/db.sqlite dev_assets /frontend/.env.sentry-build-plugin .ssh + +vibe-kanban-cloud/ \ No newline at end of file diff --git a/CLAUDE.md b/CLAUDE.md index 9b1273ae..efb8842c 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -2,116 +2,128 @@ This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository. -## Development Commands +## Essential Commands -### Core Development -- `pnpm run dev` - Start both frontend (port 3000) and backend (port 3001) with live reload -- `pnpm run check` - Run cargo check and TypeScript type checking - **always run this before committing** -- `pnpm run generate-types` - Generate TypeScript types from Rust structs (run after modifying Rust types) +### Development +```bash +# Start development servers with hot reload (frontend + backend) +pnpm run dev -### Testing and Validation -- `pnpm run frontend:dev` - Start frontend development server only -- `pnpm run backend:dev` - Start Rust backend only -- `cargo test` - Run Rust unit tests from backend directory -- `cargo fmt` - Format Rust code -- `cargo clippy` - Run Rust linter +# Individual dev servers +npm run frontend:dev # Frontend only (port 3000) +npm run backend:dev # Backend only (port auto-assigned) -### Building -- `./build-npm-package.sh` - Build production package for distribution -- `cargo build --release` - Build optimized Rust binary +# Build production version +./build-npm-package.sh +``` + +### Testing & Validation +```bash +# Run all checks (frontend + backend) +npm run check + +# Frontend specific +cd frontend && npm run lint # Lint TypeScript/React code +cd frontend && npm run format:check # Check formatting +cd frontend && npx tsc --noEmit # TypeScript type checking + +# Backend specific +cargo test --workspace # Run all Rust tests +cargo test -p # Test specific crate +cargo test test_name # Run specific test +cargo fmt --all -- --check # Check Rust formatting +cargo clippy --all --all-targets --all-features -- -D warnings # Linting + +# Type generation (after modifying Rust types) +npm run generate-types # Regenerate TypeScript types from Rust +npm run generate-types:check # Verify types are up to date +``` + +### Database Operations +```bash +# SQLx migrations +sqlx migrate run # Apply migrations +sqlx database create # Create database + +# Database is auto-copied from dev_assets_seed/ on dev server start +``` ## Architecture Overview ### Tech Stack -- **Backend**: Rust with Axum web framework, SQLite + SQLX, Tokio async runtime -- **Frontend**: React 18 + TypeScript, Vite, Tailwind CSS, Radix UI -- **Package Management**: pnpm workspace monorepo -- **Type Sharing**: Rust types exported to TypeScript via `ts-rs` +- **Backend**: Rust with Axum web framework, Tokio async runtime, SQLx for database +- **Frontend**: React 18 + TypeScript + Vite, Tailwind CSS, shadcn/ui components +- **Database**: SQLite with SQLx migrations +- **Type Sharing**: ts-rs generates TypeScript types from Rust structs +- **MCP Server**: Built-in Model Context Protocol server for AI agent integration -### Core Concepts +### Project Structure +``` +crates/ +├── server/ # Axum HTTP server, API routes, MCP server +├── db/ # Database models, migrations, SQLx queries +├── executors/ # AI coding agent integrations (Claude, Gemini, etc.) +├── services/ # Business logic, GitHub, auth, git operations +├── local-deployment/ # Local deployment logic +└── utils/ # Shared utilities -**Vibe Kanban** is an AI coding agent orchestration platform that manages multiple coding agents (Claude Code, Gemini CLI, Amp, etc.) through a unified interface. +frontend/ # React application +├── src/ +│ ├── components/ # React components (TaskCard, ProjectCard, etc.) +│ ├── pages/ # Route pages +│ ├── hooks/ # Custom React hooks (useEventSourceManager, etc.) +│ └── lib/ # API client, utilities -**Project Structure**: -- `/backend/src/` - Rust backend with API endpoints, database, and agent executors -- `/frontend/src/` - React frontend with task management UI -- `/backend/migrations/` - SQLite database schema migrations -- `/shared-types/` - Generated TypeScript types from Rust structs +shared/types.ts # Auto-generated TypeScript types from Rust +``` -**Executor System**: Each AI agent is implemented as an executor in `/backend/src/executors/`: -- `claude.rs` - Claude Code integration -- `gemini.rs` - Google Gemini CLI -- `amp.rs` - Amp coding agent -- `dev_server.rs` - Development server management -- `echo.rs` - Test/debug executor +### Key Architectural Patterns -**Key Backend Modules**: -- `/backend/src/api/` - REST API endpoints -- `/backend/src/db/` - Database models and queries -- `/backend/src/github/` - GitHub OAuth and API integration -- `/backend/src/git/` - Git operations and worktree management -- `/backend/src/mcp/` - Model Context Protocol server implementation +1. **Event Streaming**: Server-Sent Events (SSE) for real-time updates + - Process logs stream to frontend via `/api/events/processes/:id/logs` + - Task diffs stream via `/api/events/task-attempts/:id/diff` -### Database Schema -SQLite database with core entities: -- `projects` - Coding projects with GitHub repo integration -- `tasks` - Individual tasks assigned to executors -- `processes` - Execution processes with streaming logs -- `github_users`, `github_repos` - GitHub integration data +2. **Git Worktree Management**: Each task execution gets isolated git worktree + - Managed by `WorktreeManager` service + - Automatic cleanup of orphaned worktrees -### API Architecture -- RESTful endpoints at `/api/` prefix -- WebSocket streaming for real-time task updates at `/api/stream/:process_id` -- GitHub OAuth flow with PKCE -- MCP server exposed for external tool integration +3. **Executor Pattern**: Pluggable AI agent executors + - Each executor (Claude, Gemini, etc.) implements common interface + - Actions: `coding_agent_initial`, `coding_agent_follow_up`, `script` -## Development Guidelines +4. **MCP Integration**: Vibe Kanban acts as MCP server + - Tools: `list_projects`, `list_tasks`, `create_task`, `update_task`, etc. + - AI agents can manage tasks via MCP protocol -### Type management -- First ensure that `src/bin/generate_types.rs` is up to date with the types in the project -- **Always regenerate types after modifying Rust structs**: Run `pnpm run generate-types` -- Backend-first development: Define data structures in Rust, export to frontend -- Use `#[derive(Serialize, Deserialize, PartialEq, Debug, Clone, TS)]` for shared types +### API Patterns -### Code Style -- **Rust**: Use rustfmt, follow snake_case naming, leverage tokio for async operations -- **TypeScript**: Strict mode enabled, use `@/` path aliases for imports -- **React**: Functional components with hooks, avoid class components +- REST endpoints under `/api/*` +- Frontend dev server proxies to backend (configured in vite.config.ts) +- Authentication via GitHub OAuth (device flow) +- All database queries in `crates/db/src/models/` -### Git Integration Features -- Automatic branch creation per task -- Git worktree management for concurrent development -- GitHub PR creation and monitoring -- Commit streaming and real-time git status updates +### Development Workflow -### MCP Server Integration -Built-in MCP server provides task management tools: -- `create_task`, `update_task`, `delete_task` -- `list_tasks`, `get_task`, `list_projects` -- Requires `project_id` for most operations +1. **Backend changes first**: When modifying both frontend and backend, start with backend +2. **Type generation**: Run `npm run generate-types` after modifying Rust types +3. **Database migrations**: Create in `crates/db/migrations/`, apply with `sqlx migrate run` +4. **Component patterns**: Follow existing patterns in `frontend/src/components/` -### Process Execution -- All agent executions run as managed processes with streaming logs -- Process lifecycle: queued → running → completed/failed -- Real-time updates via WebSocket connections -- Automatic cleanup of completed processes +### Testing Strategy -### Environment Configuration -- Backend runs on port 3001, frontend proxies API calls in development -- GitHub OAuth requires `GITHUB_CLIENT_ID` and `GITHUB_CLIENT_SECRET` -- Optional PostHog analytics integration -- Rust nightly toolchain required (version 2025-05-18 or later) +- **Unit tests**: Colocated with code in each crate +- **Integration tests**: In `tests/` directory of relevant crates +- **Frontend tests**: TypeScript compilation and linting only +- **CI/CD**: GitHub Actions workflow in `.github/workflows/test.yml` -## Testing Strategy -- Run `pnpm run check` to validate both Rust and TypeScript code -- Use `cargo test` for backend unit tests -- Frontend testing focuses on component integration -- Process execution testing via echo executor +### Environment Variables -## Key Dependencies -- **axum** - Web framework and routing -- **sqlx** - Database operations with compile-time query checking -- **octocrab** - GitHub API client -- **rmcp** - MCP server implementation -- **@dnd-kit** - Drag-and-drop task management -- **react-router-dom** - Frontend routing +Build-time (set when building): +- `GITHUB_CLIENT_ID`: GitHub OAuth app ID (default: Bloop AI's app) +- `POSTHOG_API_KEY`: Analytics key (optional) + +Runtime: +- `BACKEND_PORT`: Backend server port (default: auto-assign) +- `FRONTEND_PORT`: Frontend dev port (default: 3000) +- `HOST`: Backend host (default: 127.0.0.1) +- `DISABLE_WORKTREE_ORPHAN_CLEANUP`: Debug flag for worktrees \ No newline at end of file diff --git a/Cargo.toml b/Cargo.toml index f02cbe3f..a9c3e024 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,19 +1,21 @@ [workspace] resolver = "2" -members = ["backend"] +members = ["crates/server", "crates/db", "crates/executors", "crates/services", "crates/utils", "crates/local-deployment", "crates/deployment"] [workspace.dependencies] tokio = { version = "1.0", features = ["full"] } -axum = { version = "0.7", features = ["macros"] } +axum = { version = "0.8.4", features = ["macros"] } tower-http = { version = "0.5", features = ["cors"] } serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" anyhow = "1.0" +thiserror = "2.0.12" tracing = "0.1" tracing-subscriber = { version = "0.3", features = ["env-filter"] } openssl-sys = { version = "0.9", features = ["vendored"] } +ts-rs = { git = "https://github.com/xazukx/ts-rs.git", branch = "use-ts-enum", features = ["uuid-impl", "chrono-impl", "no-serde-warnings"] } [profile.release] debug = true split-debuginfo = "packed" -strip = true \ No newline at end of file +strip = true diff --git a/backend/scripts/toast-notification.ps1 b/assets/scripts/toast-notification.ps1 similarity index 97% rename from backend/scripts/toast-notification.ps1 rename to assets/scripts/toast-notification.ps1 index 9719c89e..d2fc84ad 100644 --- a/backend/scripts/toast-notification.ps1 +++ b/assets/scripts/toast-notification.ps1 @@ -20,4 +20,4 @@ $Toast = [Windows.UI.Notifications.ToastNotification]::new($SerializedXml) $Toast.Tag = $AppName $Toast.Group = $AppName $Notifier = [Windows.UI.Notifications.ToastNotificationManager]::CreateToastNotifier($AppName) -$Notifier.Show($Toast) +$Notifier.Show($Toast) \ No newline at end of file diff --git a/backend/sounds/abstract-sound1.wav b/assets/sounds/abstract-sound1.wav similarity index 100% rename from backend/sounds/abstract-sound1.wav rename to assets/sounds/abstract-sound1.wav diff --git a/backend/sounds/abstract-sound2.wav b/assets/sounds/abstract-sound2.wav similarity index 100% rename from backend/sounds/abstract-sound2.wav rename to assets/sounds/abstract-sound2.wav diff --git a/backend/sounds/abstract-sound3.wav b/assets/sounds/abstract-sound3.wav similarity index 100% rename from backend/sounds/abstract-sound3.wav rename to assets/sounds/abstract-sound3.wav diff --git a/backend/sounds/abstract-sound4.wav b/assets/sounds/abstract-sound4.wav similarity index 100% rename from backend/sounds/abstract-sound4.wav rename to assets/sounds/abstract-sound4.wav diff --git a/backend/sounds/cow-mooing.wav b/assets/sounds/cow-mooing.wav similarity index 100% rename from backend/sounds/cow-mooing.wav rename to assets/sounds/cow-mooing.wav diff --git a/backend/sounds/phone-vibration.wav b/assets/sounds/phone-vibration.wav similarity index 100% rename from backend/sounds/phone-vibration.wav rename to assets/sounds/phone-vibration.wav diff --git a/backend/sounds/rooster.wav b/assets/sounds/rooster.wav similarity index 100% rename from backend/sounds/rooster.wav rename to assets/sounds/rooster.wav diff --git a/backend/.sqlx/query-01b7e2bac1261d8be3d03c03df3e5220590da6c31c77f161074fc62752d63881.json b/backend/.sqlx/query-01b7e2bac1261d8be3d03c03df3e5220590da6c31c77f161074fc62752d63881.json deleted file mode 100644 index c4e971c3..00000000 --- a/backend/.sqlx/query-01b7e2bac1261d8be3d03c03df3e5220590da6c31c77f161074fc62752d63881.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "db_name": "SQLite", - "query": "UPDATE executor_sessions\n SET session_id = $1, updated_at = datetime('now')\n WHERE execution_process_id = $2", - "describe": { - "columns": [], - "parameters": { - "Right": 2 - }, - "nullable": [] - }, - "hash": "01b7e2bac1261d8be3d03c03df3e5220590da6c31c77f161074fc62752d63881" -} diff --git a/backend/.sqlx/query-1c7b06ba1e112abf6b945a2ff08a0b40ec23f3738c2e7399f067b558cf8d490e.json b/backend/.sqlx/query-1c7b06ba1e112abf6b945a2ff08a0b40ec23f3738c2e7399f067b558cf8d490e.json deleted file mode 100644 index 3f49ea83..00000000 --- a/backend/.sqlx/query-1c7b06ba1e112abf6b945a2ff08a0b40ec23f3738c2e7399f067b558cf8d490e.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "db_name": "SQLite", - "query": "UPDATE task_attempts SET setup_completed_at = datetime('now'), updated_at = datetime('now') WHERE id = ?", - "describe": { - "columns": [], - "parameters": { - "Right": 1 - }, - "nullable": [] - }, - "hash": "1c7b06ba1e112abf6b945a2ff08a0b40ec23f3738c2e7399f067b558cf8d490e" -} diff --git a/backend/.sqlx/query-1f619f01f46859a64ded531dd0ef61abacfe62e758abe7030a6aa745140b95ca.json b/backend/.sqlx/query-1f619f01f46859a64ded531dd0ef61abacfe62e758abe7030a6aa745140b95ca.json deleted file mode 100644 index faa7786f..00000000 --- a/backend/.sqlx/query-1f619f01f46859a64ded531dd0ef61abacfe62e758abe7030a6aa745140b95ca.json +++ /dev/null @@ -1,104 +0,0 @@ -{ - "db_name": "SQLite", - "query": "SELECT \n id as \"id!: Uuid\", \n task_attempt_id as \"task_attempt_id!: Uuid\", \n process_type as \"process_type!: ExecutionProcessType\",\n executor_type,\n status as \"status!: ExecutionProcessStatus\",\n command, \n args, \n working_directory, \n stdout, \n stderr, \n exit_code,\n started_at as \"started_at!: DateTime\",\n completed_at as \"completed_at?: DateTime\",\n created_at as \"created_at!: DateTime\", \n updated_at as \"updated_at!: DateTime\"\n FROM execution_processes \n WHERE status = 'running' \n ORDER BY created_at ASC", - "describe": { - "columns": [ - { - "name": "id!: Uuid", - "ordinal": 0, - "type_info": "Blob" - }, - { - "name": "task_attempt_id!: Uuid", - "ordinal": 1, - "type_info": "Blob" - }, - { - "name": "process_type!: ExecutionProcessType", - "ordinal": 2, - "type_info": "Text" - }, - { - "name": "executor_type", - "ordinal": 3, - "type_info": "Text" - }, - { - "name": "status!: ExecutionProcessStatus", - "ordinal": 4, - "type_info": "Text" - }, - { - "name": "command", - "ordinal": 5, - "type_info": "Text" - }, - { - "name": "args", - "ordinal": 6, - "type_info": "Text" - }, - { - "name": "working_directory", - "ordinal": 7, - "type_info": "Text" - }, - { - "name": "stdout", - "ordinal": 8, - "type_info": "Text" - }, - { - "name": "stderr", - "ordinal": 9, - "type_info": "Text" - }, - { - "name": "exit_code", - "ordinal": 10, - "type_info": "Integer" - }, - { - "name": "started_at!: DateTime", - "ordinal": 11, - "type_info": "Text" - }, - { - "name": "completed_at?: DateTime", - "ordinal": 12, - "type_info": "Text" - }, - { - "name": "created_at!: DateTime", - "ordinal": 13, - "type_info": "Text" - }, - { - "name": "updated_at!: DateTime", - "ordinal": 14, - "type_info": "Text" - } - ], - "parameters": { - "Right": 0 - }, - "nullable": [ - true, - false, - false, - true, - false, - false, - true, - false, - true, - true, - true, - false, - true, - false, - false - ] - }, - "hash": "1f619f01f46859a64ded531dd0ef61abacfe62e758abe7030a6aa745140b95ca" -} diff --git a/backend/.sqlx/query-1fca1ce14b4b20205364cd1f1f45ebe1d2e30cd745e59e189d56487b5639dfbb.json b/backend/.sqlx/query-1fca1ce14b4b20205364cd1f1f45ebe1d2e30cd745e59e189d56487b5639dfbb.json deleted file mode 100644 index c0b46f85..00000000 --- a/backend/.sqlx/query-1fca1ce14b4b20205364cd1f1f45ebe1d2e30cd745e59e189d56487b5639dfbb.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "db_name": "SQLite", - "query": "UPDATE task_attempts SET pr_status = $1, pr_merged_at = $2, merge_commit = $3, updated_at = datetime('now') WHERE id = $4", - "describe": { - "columns": [], - "parameters": { - "Right": 4 - }, - "nullable": [] - }, - "hash": "1fca1ce14b4b20205364cd1f1f45ebe1d2e30cd745e59e189d56487b5639dfbb" -} diff --git a/backend/.sqlx/query-36c9e3dd10648e94b949db5c91a774ecb1e10a899ef95da74066eccedca4d8b2.json b/backend/.sqlx/query-36c9e3dd10648e94b949db5c91a774ecb1e10a899ef95da74066eccedca4d8b2.json deleted file mode 100644 index 1c6fc8af..00000000 --- a/backend/.sqlx/query-36c9e3dd10648e94b949db5c91a774ecb1e10a899ef95da74066eccedca4d8b2.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "db_name": "SQLite", - "query": "UPDATE execution_processes SET stderr = COALESCE(stderr, '') || $1, updated_at = datetime('now') WHERE id = $2", - "describe": { - "columns": [], - "parameters": { - "Right": 2 - }, - "nullable": [] - }, - "hash": "36c9e3dd10648e94b949db5c91a774ecb1e10a899ef95da74066eccedca4d8b2" -} diff --git a/backend/.sqlx/query-412bacd3477d86369082e90f52240407abce436cb81292d42b2dbe1e5c18eea1.json b/backend/.sqlx/query-412bacd3477d86369082e90f52240407abce436cb81292d42b2dbe1e5c18eea1.json deleted file mode 100644 index 7a6a9594..00000000 --- a/backend/.sqlx/query-412bacd3477d86369082e90f52240407abce436cb81292d42b2dbe1e5c18eea1.json +++ /dev/null @@ -1,104 +0,0 @@ -{ - "db_name": "SQLite", - "query": "SELECT \n ep.id as \"id!: Uuid\", \n ep.task_attempt_id as \"task_attempt_id!: Uuid\", \n ep.process_type as \"process_type!: ExecutionProcessType\",\n ep.executor_type,\n ep.status as \"status!: ExecutionProcessStatus\",\n ep.command, \n ep.args, \n ep.working_directory, \n ep.stdout, \n ep.stderr, \n ep.exit_code,\n ep.started_at as \"started_at!: DateTime\",\n ep.completed_at as \"completed_at?: DateTime\",\n ep.created_at as \"created_at!: DateTime\", \n ep.updated_at as \"updated_at!: DateTime\"\n FROM execution_processes ep\n JOIN task_attempts ta ON ep.task_attempt_id = ta.id\n JOIN tasks t ON ta.task_id = t.id\n WHERE ep.status = 'running' \n AND ep.process_type = 'devserver'\n AND t.project_id = $1\n ORDER BY ep.created_at ASC", - "describe": { - "columns": [ - { - "name": "id!: Uuid", - "ordinal": 0, - "type_info": "Blob" - }, - { - "name": "task_attempt_id!: Uuid", - "ordinal": 1, - "type_info": "Blob" - }, - { - "name": "process_type!: ExecutionProcessType", - "ordinal": 2, - "type_info": "Text" - }, - { - "name": "executor_type", - "ordinal": 3, - "type_info": "Text" - }, - { - "name": "status!: ExecutionProcessStatus", - "ordinal": 4, - "type_info": "Text" - }, - { - "name": "command", - "ordinal": 5, - "type_info": "Text" - }, - { - "name": "args", - "ordinal": 6, - "type_info": "Text" - }, - { - "name": "working_directory", - "ordinal": 7, - "type_info": "Text" - }, - { - "name": "stdout", - "ordinal": 8, - "type_info": "Text" - }, - { - "name": "stderr", - "ordinal": 9, - "type_info": "Text" - }, - { - "name": "exit_code", - "ordinal": 10, - "type_info": "Integer" - }, - { - "name": "started_at!: DateTime", - "ordinal": 11, - "type_info": "Text" - }, - { - "name": "completed_at?: DateTime", - "ordinal": 12, - "type_info": "Text" - }, - { - "name": "created_at!: DateTime", - "ordinal": 13, - "type_info": "Text" - }, - { - "name": "updated_at!: DateTime", - "ordinal": 14, - "type_info": "Text" - } - ], - "parameters": { - "Right": 1 - }, - "nullable": [ - true, - false, - false, - true, - false, - false, - true, - false, - true, - true, - true, - false, - true, - false, - false - ] - }, - "hash": "412bacd3477d86369082e90f52240407abce436cb81292d42b2dbe1e5c18eea1" -} diff --git a/backend/.sqlx/query-5b902137b11022d2e1a5c4f6a9c83fec1a856c6a710aff831abd2382ede76b43.json b/backend/.sqlx/query-5b902137b11022d2e1a5c4f6a9c83fec1a856c6a710aff831abd2382ede76b43.json deleted file mode 100644 index 48968a53..00000000 --- a/backend/.sqlx/query-5b902137b11022d2e1a5c4f6a9c83fec1a856c6a710aff831abd2382ede76b43.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "db_name": "SQLite", - "query": "UPDATE task_attempts SET worktree_path = $1, worktree_deleted = FALSE, setup_completed_at = NULL, updated_at = datetime('now') WHERE id = $2", - "describe": { - "columns": [], - "parameters": { - "Right": 2 - }, - "nullable": [] - }, - "hash": "5b902137b11022d2e1a5c4f6a9c83fec1a856c6a710aff831abd2382ede76b43" -} diff --git a/backend/.sqlx/query-5ed1238e52e59bb5f76c0f153fd99a14093f7ce2585bf9843585608f17ec575b.json b/backend/.sqlx/query-5ed1238e52e59bb5f76c0f153fd99a14093f7ce2585bf9843585608f17ec575b.json deleted file mode 100644 index b8eeb4f0..00000000 --- a/backend/.sqlx/query-5ed1238e52e59bb5f76c0f153fd99a14093f7ce2585bf9843585608f17ec575b.json +++ /dev/null @@ -1,104 +0,0 @@ -{ - "db_name": "SQLite", - "query": "INSERT INTO execution_processes (\n id, task_attempt_id, process_type, executor_type, status, command, args, \n working_directory, stdout, stderr, exit_code, started_at, \n completed_at, created_at, updated_at\n ) \n VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15) \n RETURNING \n id as \"id!: Uuid\", \n task_attempt_id as \"task_attempt_id!: Uuid\", \n process_type as \"process_type!: ExecutionProcessType\",\n executor_type,\n status as \"status!: ExecutionProcessStatus\",\n command, \n args, \n working_directory, \n stdout, \n stderr, \n exit_code,\n started_at as \"started_at!: DateTime\",\n completed_at as \"completed_at?: DateTime\",\n created_at as \"created_at!: DateTime\", \n updated_at as \"updated_at!: DateTime\"", - "describe": { - "columns": [ - { - "name": "id!: Uuid", - "ordinal": 0, - "type_info": "Blob" - }, - { - "name": "task_attempt_id!: Uuid", - "ordinal": 1, - "type_info": "Blob" - }, - { - "name": "process_type!: ExecutionProcessType", - "ordinal": 2, - "type_info": "Text" - }, - { - "name": "executor_type", - "ordinal": 3, - "type_info": "Text" - }, - { - "name": "status!: ExecutionProcessStatus", - "ordinal": 4, - "type_info": "Text" - }, - { - "name": "command", - "ordinal": 5, - "type_info": "Text" - }, - { - "name": "args", - "ordinal": 6, - "type_info": "Text" - }, - { - "name": "working_directory", - "ordinal": 7, - "type_info": "Text" - }, - { - "name": "stdout", - "ordinal": 8, - "type_info": "Text" - }, - { - "name": "stderr", - "ordinal": 9, - "type_info": "Text" - }, - { - "name": "exit_code", - "ordinal": 10, - "type_info": "Integer" - }, - { - "name": "started_at!: DateTime", - "ordinal": 11, - "type_info": "Text" - }, - { - "name": "completed_at?: DateTime", - "ordinal": 12, - "type_info": "Text" - }, - { - "name": "created_at!: DateTime", - "ordinal": 13, - "type_info": "Text" - }, - { - "name": "updated_at!: DateTime", - "ordinal": 14, - "type_info": "Text" - } - ], - "parameters": { - "Right": 15 - }, - "nullable": [ - true, - false, - false, - true, - false, - false, - true, - false, - true, - true, - true, - false, - true, - false, - false - ] - }, - "hash": "5ed1238e52e59bb5f76c0f153fd99a14093f7ce2585bf9843585608f17ec575b" -} diff --git a/backend/.sqlx/query-8a67b3b3337248f06a57bdf8a908f7ef23177431eaed82dc08c94c3e5944340e.json b/backend/.sqlx/query-8a67b3b3337248f06a57bdf8a908f7ef23177431eaed82dc08c94c3e5944340e.json deleted file mode 100644 index 996a68f1..00000000 --- a/backend/.sqlx/query-8a67b3b3337248f06a57bdf8a908f7ef23177431eaed82dc08c94c3e5944340e.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "db_name": "SQLite", - "query": "UPDATE executor_sessions \n SET summary = $1, updated_at = datetime('now') \n WHERE execution_process_id = $2", - "describe": { - "columns": [], - "parameters": { - "Right": 2 - }, - "nullable": [] - }, - "hash": "8a67b3b3337248f06a57bdf8a908f7ef23177431eaed82dc08c94c3e5944340e" -} diff --git a/backend/.sqlx/query-9472c8fb477958167f5fae40b85ac44252468c5226b2cdd7770f027332eed6d7.json b/backend/.sqlx/query-9472c8fb477958167f5fae40b85ac44252468c5226b2cdd7770f027332eed6d7.json deleted file mode 100644 index 98d4db3b..00000000 --- a/backend/.sqlx/query-9472c8fb477958167f5fae40b85ac44252468c5226b2cdd7770f027332eed6d7.json +++ /dev/null @@ -1,104 +0,0 @@ -{ - "db_name": "SQLite", - "query": "SELECT \n id as \"id!: Uuid\", \n task_attempt_id as \"task_attempt_id!: Uuid\", \n process_type as \"process_type!: ExecutionProcessType\",\n executor_type,\n status as \"status!: ExecutionProcessStatus\",\n command, \n args, \n working_directory, \n stdout, \n stderr, \n exit_code,\n started_at as \"started_at!: DateTime\",\n completed_at as \"completed_at?: DateTime\",\n created_at as \"created_at!: DateTime\", \n updated_at as \"updated_at!: DateTime\"\n FROM execution_processes \n WHERE task_attempt_id = $1 \n ORDER BY created_at ASC", - "describe": { - "columns": [ - { - "name": "id!: Uuid", - "ordinal": 0, - "type_info": "Blob" - }, - { - "name": "task_attempt_id!: Uuid", - "ordinal": 1, - "type_info": "Blob" - }, - { - "name": "process_type!: ExecutionProcessType", - "ordinal": 2, - "type_info": "Text" - }, - { - "name": "executor_type", - "ordinal": 3, - "type_info": "Text" - }, - { - "name": "status!: ExecutionProcessStatus", - "ordinal": 4, - "type_info": "Text" - }, - { - "name": "command", - "ordinal": 5, - "type_info": "Text" - }, - { - "name": "args", - "ordinal": 6, - "type_info": "Text" - }, - { - "name": "working_directory", - "ordinal": 7, - "type_info": "Text" - }, - { - "name": "stdout", - "ordinal": 8, - "type_info": "Text" - }, - { - "name": "stderr", - "ordinal": 9, - "type_info": "Text" - }, - { - "name": "exit_code", - "ordinal": 10, - "type_info": "Integer" - }, - { - "name": "started_at!: DateTime", - "ordinal": 11, - "type_info": "Text" - }, - { - "name": "completed_at?: DateTime", - "ordinal": 12, - "type_info": "Text" - }, - { - "name": "created_at!: DateTime", - "ordinal": 13, - "type_info": "Text" - }, - { - "name": "updated_at!: DateTime", - "ordinal": 14, - "type_info": "Text" - } - ], - "parameters": { - "Right": 1 - }, - "nullable": [ - true, - false, - false, - true, - false, - false, - true, - false, - true, - true, - true, - false, - true, - false, - false - ] - }, - "hash": "9472c8fb477958167f5fae40b85ac44252468c5226b2cdd7770f027332eed6d7" -} diff --git a/backend/.sqlx/query-c614e6056b244ca07f1b9d44e7edc9d5819225c6f8d9e077070c6e518a17f50b.json b/backend/.sqlx/query-c614e6056b244ca07f1b9d44e7edc9d5819225c6f8d9e077070c6e518a17f50b.json deleted file mode 100644 index c906387e..00000000 --- a/backend/.sqlx/query-c614e6056b244ca07f1b9d44e7edc9d5819225c6f8d9e077070c6e518a17f50b.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "db_name": "SQLite", - "query": "DELETE FROM tasks WHERE id = $1 AND project_id = $2", - "describe": { - "columns": [], - "parameters": { - "Right": 2 - }, - "nullable": [] - }, - "hash": "c614e6056b244ca07f1b9d44e7edc9d5819225c6f8d9e077070c6e518a17f50b" -} diff --git a/backend/.sqlx/query-d2d0a1b985ebbca6a2b3e882a221a219f3199890fa640afc946ef1a792d6d8de.json b/backend/.sqlx/query-d2d0a1b985ebbca6a2b3e882a221a219f3199890fa640afc946ef1a792d6d8de.json deleted file mode 100644 index 345271c9..00000000 --- a/backend/.sqlx/query-d2d0a1b985ebbca6a2b3e882a221a219f3199890fa640afc946ef1a792d6d8de.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "db_name": "SQLite", - "query": "UPDATE tasks SET status = $3, updated_at = CURRENT_TIMESTAMP WHERE id = $1 AND project_id = $2", - "describe": { - "columns": [], - "parameters": { - "Right": 3 - }, - "nullable": [] - }, - "hash": "d2d0a1b985ebbca6a2b3e882a221a219f3199890fa640afc946ef1a792d6d8de" -} diff --git a/backend/.sqlx/query-ed8456646fa69ddd412441955f06ff22bfb790f29466450735e0b8bb1bc4ec94.json b/backend/.sqlx/query-ed8456646fa69ddd412441955f06ff22bfb790f29466450735e0b8bb1bc4ec94.json deleted file mode 100644 index 896d7278..00000000 --- a/backend/.sqlx/query-ed8456646fa69ddd412441955f06ff22bfb790f29466450735e0b8bb1bc4ec94.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "db_name": "SQLite", - "query": "UPDATE execution_processes SET stdout = COALESCE(stdout, '') || $1, updated_at = datetime('now') WHERE id = $2", - "describe": { - "columns": [], - "parameters": { - "Right": 2 - }, - "nullable": [] - }, - "hash": "ed8456646fa69ddd412441955f06ff22bfb790f29466450735e0b8bb1bc4ec94" -} diff --git a/backend/src/app_state.rs b/backend/src/app_state.rs deleted file mode 100644 index c4598798..00000000 --- a/backend/src/app_state.rs +++ /dev/null @@ -1,240 +0,0 @@ -use std::{collections::HashMap, path::PathBuf, sync::Arc}; - -use tokio::sync::{Mutex, RwLock as TokioRwLock}; -use uuid::Uuid; - -use crate::{ - command_runner, - models::Environment, - services::{generate_user_id, AnalyticsConfig, AnalyticsService}, -}; - -#[derive(Debug)] -pub enum ExecutionType { - SetupScript, - CleanupScript, - CodingAgent, - DevServer, -} - -#[derive(Debug)] -pub struct RunningExecution { - pub task_attempt_id: Uuid, - pub _execution_type: ExecutionType, - pub child: command_runner::CommandProcess, -} - -#[derive(Debug, Clone)] -pub struct AppState { - running_executions: Arc>>, - pub db_pool: sqlx::SqlitePool, - config: Arc>, - pub analytics: Arc>, - user_id: String, - pub mode: Environment, -} - -impl AppState { - pub async fn new( - db_pool: sqlx::SqlitePool, - config: Arc>, - mode: Environment, - ) -> Self { - // Initialize analytics with user preferences - let user_enabled = { - let config_guard = config.read().await; - config_guard.analytics_enabled.unwrap_or(true) - }; - - let analytics_config = AnalyticsConfig::new(user_enabled); - let analytics = Arc::new(TokioRwLock::new(AnalyticsService::new(analytics_config))); - - Self { - running_executions: Arc::new(Mutex::new(HashMap::new())), - db_pool, - config, - analytics, - user_id: generate_user_id(), - mode, - } - } - - pub async fn update_analytics_config(&self, user_enabled: bool) { - // Check if analytics was disabled before this update - let was_analytics_disabled = { - let analytics = self.analytics.read().await; - !analytics.is_enabled() - }; - - let new_config = AnalyticsConfig::new(user_enabled); - let new_service = AnalyticsService::new(new_config); - let mut analytics = self.analytics.write().await; - *analytics = new_service; - - // If analytics was disabled and is now enabled, fire a session_start event - if was_analytics_disabled && analytics.is_enabled() { - analytics.track_event(&self.user_id, "session_start", None); - } - } - - // Running executions getters - pub async fn has_running_execution(&self, attempt_id: Uuid) -> bool { - let executions = self.running_executions.lock().await; - executions - .values() - .any(|exec| exec.task_attempt_id == attempt_id) - } - - pub async fn get_running_executions_for_monitor(&self) -> Vec<(Uuid, Uuid, bool, Option)> { - let mut executions = self.running_executions.lock().await; - let mut completed_executions = Vec::new(); - - for (execution_id, running_exec) in executions.iter_mut() { - match running_exec.child.try_wait().await { - Ok(Some(status)) => { - let success = status.success(); - let exit_code = status.code().map(|c| c as i64); - completed_executions.push(( - *execution_id, - running_exec.task_attempt_id, - success, - exit_code, - )); - } - Ok(None) => { - // Still running - } - Err(e) => { - tracing::error!("Error checking process status: {}", e); - completed_executions.push(( - *execution_id, - running_exec.task_attempt_id, - false, - None, - )); - } - } - } - - // Remove completed executions from the map - for (execution_id, _, _, _) in &completed_executions { - executions.remove(execution_id); - } - - completed_executions - } - - // Running executions setters - pub async fn add_running_execution(&self, execution_id: Uuid, execution: RunningExecution) { - let mut executions = self.running_executions.lock().await; - executions.insert(execution_id, execution); - } - - pub async fn stop_running_execution_by_id( - &self, - execution_id: Uuid, - ) -> Result> { - let mut executions = self.running_executions.lock().await; - let Some(exec) = executions.get_mut(&execution_id) else { - return Ok(false); - }; - - // Kill the process using CommandRunner's kill method - exec.child - .kill() - .await - .map_err(|e| Box::new(e) as Box)?; - - // only NOW remove it - executions.remove(&execution_id); - Ok(true) - } - - // Config getters - pub async fn get_sound_alerts_enabled(&self) -> bool { - let config = self.config.read().await; - config.sound_alerts - } - - pub async fn get_push_notifications_enabled(&self) -> bool { - let config = self.config.read().await; - config.push_notifications - } - - pub async fn get_sound_file(&self) -> crate::models::config::SoundFile { - let config = self.config.read().await; - config.sound_file.clone() - } - - pub fn get_config(&self) -> &Arc> { - &self.config - } - - pub async fn track_analytics_event( - &self, - event_name: &str, - properties: Option, - ) { - let analytics = self.analytics.read().await; - if analytics.is_enabled() { - analytics.track_event(&self.user_id, event_name, properties); - } else { - tracing::debug!("Analytics disabled, skipping event: {}", event_name); - } - } - - pub async fn update_sentry_scope(&self) { - let config = self.get_config().read().await; - let username = config.github.username.clone(); - let email = config.github.primary_email.clone(); - drop(config); - - let sentry_user = if username.is_some() || email.is_some() { - sentry::User { - id: Some(self.user_id.clone()), - username, - email, - ..Default::default() - } - } else { - sentry::User { - id: Some(self.user_id.clone()), - ..Default::default() - } - }; - - sentry::configure_scope(|scope| { - scope.set_user(Some(sentry_user)); - }); - } - - /// Get the workspace directory path, creating it if it doesn't exist in cloud mode - pub async fn get_workspace_path( - &self, - ) -> Result> { - if !self.mode.is_cloud() { - return Err("Workspace directory only available in cloud mode".into()); - } - - let workspace_path = { - let config = self.config.read().await; - match &config.workspace_dir { - Some(dir) => PathBuf::from(dir), - None => { - // Use default workspace directory - let home_dir = dirs::home_dir().ok_or("Could not find home directory")?; - home_dir.join(".vibe-kanban").join("projects") - } - } - }; - - // Create the workspace directory if it doesn't exist - if !workspace_path.exists() { - std::fs::create_dir_all(&workspace_path) - .map_err(|e| format!("Failed to create workspace directory: {}", e))?; - tracing::info!("Created workspace directory: {}", workspace_path.display()); - } - - Ok(workspace_path) - } -} diff --git a/backend/src/bin/cloud_runner.rs b/backend/src/bin/cloud_runner.rs deleted file mode 100644 index 4a40a9ec..00000000 --- a/backend/src/bin/cloud_runner.rs +++ /dev/null @@ -1,401 +0,0 @@ -use std::{collections::HashMap, sync::Arc}; - -use axum::{ - body::Body, - extract::{Path, State}, - http::StatusCode, - response::{Json, Response}, - routing::{delete, get, post}, - Router, -}; -use serde::Serialize; -use tokio::sync::Mutex; -use tokio_util::io::ReaderStream; -use tracing_subscriber::prelude::*; -use uuid::Uuid; -use vibe_kanban::command_runner::{CommandProcess, CommandRunner, CommandRunnerArgs}; - -// Structure to hold process and its streams -struct ProcessEntry { - process: CommandProcess, - // Store the actual stdout/stderr streams for direct streaming - stdout_stream: Option>, - stderr_stream: Option>, - completed: Arc>, -} - -impl std::fmt::Debug for ProcessEntry { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("ProcessEntry") - .field("process", &self.process) - .field("stdout_stream", &self.stdout_stream.is_some()) - .field("stderr_stream", &self.stderr_stream.is_some()) - .field("completed", &self.completed) - .finish() - } -} - -// Application state to manage running processes -#[derive(Clone)] -struct AppState { - processes: Arc>>, -} - -// Response type for API responses -#[derive(Debug, Serialize)] -struct ApiResponse { - success: bool, - data: Option, - error: Option, -} - -impl ApiResponse { - fn success(data: T) -> Self { - Self { - success: true, - data: Some(data), - error: None, - } - } - - #[allow(dead_code)] - fn error(message: String) -> Self { - Self { - success: false, - data: None, - error: Some(message), - } - } -} - -// Response type for command creation -#[derive(Debug, Serialize)] -struct CreateCommandResponse { - process_id: String, -} - -// Response type for process status -#[derive(Debug, Serialize)] -struct ProcessStatusResponse { - process_id: String, - running: bool, - exit_code: Option, - success: Option, -} - -#[tokio::main] -async fn main() -> Result<(), Box> { - // Initialize tracing - tracing_subscriber::registry() - .with( - tracing_subscriber::EnvFilter::try_from_default_env() - .unwrap_or_else(|_| "cloud_runner=info".into()), - ) - .with(tracing_subscriber::fmt::layer()) - .init(); - - // Create application state - let app_state = AppState { - processes: Arc::new(Mutex::new(HashMap::new())), - }; - - // Build router - let app = Router::new() - .route("/health", get(health_check)) - .route("/commands", post(create_command)) - .route("/commands/:process_id", delete(kill_command)) - .route("/commands/:process_id/status", get(get_process_status)) - .route("/commands/:process_id/stdout", get(get_process_stdout)) - .route("/commands/:process_id/stderr", get(get_process_stderr)) - .with_state(app_state); - - // Get port from environment or default to 8000 - let port = std::env::var("PORT").unwrap_or_else(|_| "8000".to_string()); - let addr = format!("0.0.0.0:{}", port); - - tracing::info!("Cloud Runner server starting on {}", addr); - - // Start the server - let listener = tokio::net::TcpListener::bind(&addr).await?; - axum::serve(listener, app).await?; - - Ok(()) -} - -// Health check endpoint -async fn health_check() -> Json> { - Json(ApiResponse::success("Cloud Runner is healthy".to_string())) -} - -// Create and start a new command -async fn create_command( - State(state): State, - Json(request): Json, -) -> Result>, StatusCode> { - tracing::info!("Creating command: {} {:?}", request.command, request.args); - - // Create a local command runner from the request - let runner = CommandRunner::from_args(request); - - // Start the process - let mut process = match runner.start().await { - Ok(process) => process, - Err(e) => { - tracing::error!("Failed to start command: {}", e); - return Err(StatusCode::INTERNAL_SERVER_ERROR); - } - }; - - // Generate unique process ID - let process_id = Uuid::new_v4().to_string(); - - // Create completion flag - let completed = Arc::new(Mutex::new(false)); - - // Get the streams from the process - we'll store them directly - let mut streams = match process.stream().await { - Ok(streams) => streams, - Err(e) => { - tracing::error!("Failed to get process streams: {}", e); - return Err(StatusCode::INTERNAL_SERVER_ERROR); - } - }; - - // Extract the streams for direct use - let stdout_stream = streams.stdout.take(); - let stderr_stream = streams.stderr.take(); - - // Spawn a task to monitor process completion - { - let process_id_for_completion = process_id.clone(); - let completed_flag = completed.clone(); - let processes_ref = state.processes.clone(); - tokio::spawn(async move { - // Wait for the process to complete - if let Ok(mut processes) = processes_ref.try_lock() { - if let Some(entry) = processes.get_mut(&process_id_for_completion) { - let _ = entry.process.wait().await; - *completed_flag.lock().await = true; - tracing::debug!("Marked process {} as completed", process_id_for_completion); - } - } - }); - } - - // Create process entry - let entry = ProcessEntry { - process, - stdout_stream, - stderr_stream, - completed: completed.clone(), - }; - - // Store the process entry - { - let mut processes = state.processes.lock().await; - processes.insert(process_id.clone(), entry); - } - - tracing::info!("Command started with process_id: {}", process_id); - - Ok(Json(ApiResponse::success(CreateCommandResponse { - process_id, - }))) -} - -// Kill a running command -async fn kill_command( - State(state): State, - Path(process_id): Path, -) -> Result>, StatusCode> { - tracing::info!("Killing command with process_id: {}", process_id); - - let mut processes = state.processes.lock().await; - - if let Some(mut entry) = processes.remove(&process_id) { - // First check if the process has already finished - match entry.process.status().await { - Ok(Some(_)) => { - // Process already finished, consider kill successful - tracing::info!( - "Process {} already completed, kill considered successful", - process_id - ); - Ok(Json(ApiResponse::success( - "Process was already completed".to_string(), - ))) - } - Ok(None) => { - // Process still running, attempt to kill - match entry.process.kill().await { - Ok(()) => { - tracing::info!("Successfully killed process: {}", process_id); - Ok(Json(ApiResponse::success( - "Process killed successfully".to_string(), - ))) - } - Err(e) => { - tracing::error!("Failed to kill process {}: {}", process_id, e); - - // Check if it's a "No such process" error (process finished during kill) - if e.to_string().contains("No such process") { - tracing::info!("Process {} finished during kill attempt", process_id); - Ok(Json(ApiResponse::success( - "Process finished during kill attempt".to_string(), - ))) - } else { - Err(StatusCode::INTERNAL_SERVER_ERROR) - } - } - } - } - Err(e) => { - tracing::error!("Failed to check status for process {}: {}", process_id, e); - // Still attempt to kill - match entry.process.kill().await { - Ok(()) => { - tracing::info!("Successfully killed process: {}", process_id); - Ok(Json(ApiResponse::success( - "Process killed successfully".to_string(), - ))) - } - Err(e) => { - tracing::error!("Failed to kill process {}: {}", process_id, e); - Err(StatusCode::INTERNAL_SERVER_ERROR) - } - } - } - } - } else { - tracing::warn!("Process not found: {}", process_id); - Err(StatusCode::NOT_FOUND) - } -} - -// Get status of a running command -async fn get_process_status( - State(state): State, - Path(process_id): Path, -) -> Result>, StatusCode> { - tracing::info!("Getting status for process_id: {}", process_id); - - let mut processes = state.processes.lock().await; - - if let Some(entry) = processes.get_mut(&process_id) { - match entry.process.status().await { - Ok(Some(exit_status)) => { - // Process has completed - let response = ProcessStatusResponse { - process_id: process_id.clone(), - running: false, - exit_code: exit_status.code(), - success: Some(exit_status.success()), - }; - Ok(Json(ApiResponse::success(response))) - } - Ok(None) => { - // Process is still running - let response = ProcessStatusResponse { - process_id: process_id.clone(), - running: true, - exit_code: None, - success: None, - }; - Ok(Json(ApiResponse::success(response))) - } - Err(e) => { - tracing::error!("Failed to get status for process {}: {}", process_id, e); - Err(StatusCode::INTERNAL_SERVER_ERROR) - } - } - } else { - tracing::warn!("Process not found: {}", process_id); - Err(StatusCode::NOT_FOUND) - } -} - -// Get stdout stream for a running command (direct streaming, no buffering) -async fn get_process_stdout( - State(state): State, - Path(process_id): Path, -) -> Result { - tracing::info!( - "Starting direct stdout stream for process_id: {}", - process_id - ); - - let mut processes = state.processes.lock().await; - - if let Some(entry) = processes.get_mut(&process_id) { - // Take ownership of stdout directly for streaming - if let Some(stdout) = entry.stdout_stream.take() { - drop(processes); // Release the lock early - - // Convert the AsyncRead (stdout) directly into an HTTP stream - let stream = ReaderStream::new(stdout); - - let response = Response::builder() - .header("content-type", "application/octet-stream") - .header("cache-control", "no-cache") - .body(Body::from_stream(stream)) - .map_err(|e| { - tracing::error!("Failed to build response stream: {}", e); - StatusCode::INTERNAL_SERVER_ERROR - })?; - - Ok(response) - } else { - tracing::error!( - "Stdout already taken or unavailable for process {}", - process_id - ); - Err(StatusCode::GONE) - } - } else { - tracing::warn!("Process not found for stdout: {}", process_id); - Err(StatusCode::NOT_FOUND) - } -} - -// Get stderr stream for a running command (direct streaming, no buffering) -async fn get_process_stderr( - State(state): State, - Path(process_id): Path, -) -> Result { - tracing::info!( - "Starting direct stderr stream for process_id: {}", - process_id - ); - - let mut processes = state.processes.lock().await; - - if let Some(entry) = processes.get_mut(&process_id) { - // Take ownership of stderr directly for streaming - if let Some(stderr) = entry.stderr_stream.take() { - drop(processes); // Release the lock early - - // Convert the AsyncRead (stderr) directly into an HTTP stream - let stream = ReaderStream::new(stderr); - - let response = Response::builder() - .header("content-type", "application/octet-stream") - .header("cache-control", "no-cache") - .body(Body::from_stream(stream)) - .map_err(|e| { - tracing::error!("Failed to build response stream: {}", e); - StatusCode::INTERNAL_SERVER_ERROR - })?; - - Ok(response) - } else { - tracing::error!( - "Stderr already taken or unavailable for process {}", - process_id - ); - Err(StatusCode::GONE) - } - } else { - tracing::warn!("Process not found for stderr: {}", process_id); - Err(StatusCode::NOT_FOUND) - } -} diff --git a/backend/src/bin/generate_types.rs b/backend/src/bin/generate_types.rs deleted file mode 100644 index dd6f02cb..00000000 --- a/backend/src/bin/generate_types.rs +++ /dev/null @@ -1,204 +0,0 @@ -use std::{env, fs, path::Path}; - -use ts_rs::TS; -// in [build-dependencies] - -fn generate_constants() -> String { - r#"// Generated constants -export const EXECUTOR_TYPES: string[] = [ - "echo", - "claude", - "claude-plan", - "amp", - "gemini", - "charm-opencode", - "claude-code-router", - "sst-opencode", - "aider", - "codex", -]; - -export const EDITOR_TYPES: EditorType[] = [ - "vscode", - "cursor", - "windsurf", - "intellij", - "zed", - "custom" -]; - -export const EXECUTOR_LABELS: Record = { - "echo": "Echo (Test Mode)", - "claude": "Claude Code", - "claude-plan": "Claude Code Plan", - "amp": "Amp", - "gemini": "Gemini", - "charm-opencode": "Charm Opencode", - "claude-code-router": "Claude Code Router", - "sst-opencode": "SST Opencode", - "aider": "Aider", - "codex": "Codex" -}; - -export const EDITOR_LABELS: Record = { - "vscode": "VS Code", - "cursor": "Cursor", - "windsurf": "Windsurf", - "intellij": "IntelliJ IDEA", - "zed": "Zed", - "custom": "Custom" -}; - -export const MCP_SUPPORTED_EXECUTORS: string[] = [ - "claude", - "amp", - "gemini", - "sst-opencode", - "charm-opencode", - "claude-code-router" -]; - -export const SOUND_FILES: SoundFile[] = [ - "abstract-sound1", - "abstract-sound2", - "abstract-sound3", - "abstract-sound4", - "cow-mooing", - "phone-vibration", - "rooster" -]; - -export const SOUND_LABELS: Record = { - "abstract-sound1": "Gentle Chime", - "abstract-sound2": "Soft Bell", - "abstract-sound3": "Digital Tone", - "abstract-sound4": "Subtle Alert", - "cow-mooing": "Cow Mooing", - "phone-vibration": "Phone Vibration", - "rooster": "Rooster Call" -};"# - .to_string() -} - -fn generate_types_content() -> String { - // 4. Friendly banner - const HEADER: &str = - "// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs).\n\ - // Do not edit this file manually.\n\ - // Auto-generated from Rust backend types using ts-rs\n\n"; - - // 5. Add `export` if it's missing, then join - let decls = [ - vibe_kanban::models::ApiResponse::<()>::decl(), - vibe_kanban::models::config::Config::decl(), - vibe_kanban::models::config::EnvironmentInfo::decl(), - vibe_kanban::models::config::Environment::decl(), - vibe_kanban::models::config::ThemeMode::decl(), - vibe_kanban::models::config::EditorConfig::decl(), - vibe_kanban::models::config::GitHubConfig::decl(), - vibe_kanban::models::config::EditorType::decl(), - vibe_kanban::models::config::EditorConstants::decl(), - vibe_kanban::models::config::SoundFile::decl(), - vibe_kanban::models::config::SoundConstants::decl(), - vibe_kanban::routes::config::ConfigConstants::decl(), - vibe_kanban::executor::ExecutorConfig::decl(), - vibe_kanban::executor::ExecutorConstants::decl(), - vibe_kanban::models::project::CreateProject::decl(), - vibe_kanban::models::project::CreateProjectFromGitHub::decl(), - vibe_kanban::models::project::Project::decl(), - vibe_kanban::models::project::ProjectWithBranch::decl(), - vibe_kanban::models::project::UpdateProject::decl(), - vibe_kanban::models::project::SearchResult::decl(), - vibe_kanban::models::project::SearchMatchType::decl(), - vibe_kanban::models::project::GitBranch::decl(), - vibe_kanban::models::project::CreateBranch::decl(), - vibe_kanban::models::task::CreateTask::decl(), - vibe_kanban::models::task::CreateTaskAndStart::decl(), - vibe_kanban::models::task::TaskStatus::decl(), - vibe_kanban::models::task::Task::decl(), - vibe_kanban::models::task::TaskWithAttemptStatus::decl(), - vibe_kanban::models::task::UpdateTask::decl(), - vibe_kanban::models::task_template::TaskTemplate::decl(), - vibe_kanban::models::task_template::CreateTaskTemplate::decl(), - vibe_kanban::models::task_template::UpdateTaskTemplate::decl(), - vibe_kanban::models::task_attempt::TaskAttemptStatus::decl(), - vibe_kanban::models::task_attempt::TaskAttempt::decl(), - vibe_kanban::models::task_attempt::CreateTaskAttempt::decl(), - vibe_kanban::models::task_attempt::UpdateTaskAttempt::decl(), - vibe_kanban::models::task_attempt::CreateFollowUpAttempt::decl(), - vibe_kanban::routes::filesystem::DirectoryEntry::decl(), - vibe_kanban::routes::filesystem::DirectoryListResponse::decl(), - vibe_kanban::routes::auth::DeviceStartResponse::decl(), - vibe_kanban::services::github_service::RepositoryInfo::decl(), - vibe_kanban::routes::task_attempts::ProcessLogsResponse::decl(), - vibe_kanban::models::task_attempt::DiffChunkType::decl(), - vibe_kanban::models::task_attempt::DiffChunk::decl(), - vibe_kanban::models::task_attempt::FileDiff::decl(), - vibe_kanban::models::task_attempt::WorktreeDiff::decl(), - vibe_kanban::models::task_attempt::BranchStatus::decl(), - vibe_kanban::models::task_attempt::ExecutionState::decl(), - vibe_kanban::models::task_attempt::TaskAttemptState::decl(), - vibe_kanban::models::execution_process::ExecutionProcess::decl(), - vibe_kanban::models::execution_process::ExecutionProcessSummary::decl(), - vibe_kanban::models::execution_process::ExecutionProcessStatus::decl(), - vibe_kanban::models::execution_process::ExecutionProcessType::decl(), - vibe_kanban::models::execution_process::CreateExecutionProcess::decl(), - vibe_kanban::models::execution_process::UpdateExecutionProcess::decl(), - vibe_kanban::models::executor_session::ExecutorSession::decl(), - vibe_kanban::models::executor_session::CreateExecutorSession::decl(), - vibe_kanban::models::executor_session::UpdateExecutorSession::decl(), - vibe_kanban::executor::NormalizedConversation::decl(), - vibe_kanban::executor::NormalizedEntry::decl(), - vibe_kanban::executor::NormalizedEntryType::decl(), - vibe_kanban::executor::ActionType::decl(), - ]; - - let body = decls - .into_iter() - .map(|d| { - let trimmed = d.trim_start(); - if trimmed.starts_with("export") { - d - } else { - format!("export {trimmed}") - } - }) - .collect::>() - .join("\n\n"); - - let constants = generate_constants(); - format!("{HEADER}{body}\n\n{constants}") -} - -fn main() { - let args: Vec = env::args().collect(); - let check_mode = args.iter().any(|arg| arg == "--check"); - - // 1. Make sure ../shared exists - let shared_path = Path::new("../shared"); - fs::create_dir_all(shared_path).expect("cannot create ../shared"); - - println!("Generating TypeScript types…"); - - // 2. Let ts-rs write its per-type files here (handy for debugging) - env::set_var("TS_RS_EXPORT_DIR", shared_path.to_str().unwrap()); - - let generated = generate_types_content(); - let types_path = shared_path.join("types.ts"); - - if check_mode { - // Read the current file - let current = fs::read_to_string(&types_path).unwrap_or_default(); - if current == generated { - println!("✅ shared/types.ts is up to date."); - std::process::exit(0); - } else { - eprintln!("❌ shared/types.ts is not up to date. Please run 'npm run generate-types' and commit the changes."); - std::process::exit(1); - } - } else { - // Write the file as before - fs::write(&types_path, generated).expect("unable to write types.ts"); - println!("✅ TypeScript types generated in ../shared/"); - } -} diff --git a/backend/src/bin/test_remote.rs b/backend/src/bin/test_remote.rs deleted file mode 100644 index da4db1dd..00000000 --- a/backend/src/bin/test_remote.rs +++ /dev/null @@ -1,659 +0,0 @@ -use std::env; - -use vibe_kanban::command_runner::CommandRunner; - -#[tokio::main] -async fn main() -> Result<(), Box> { - // Set up remote execution - env::set_var("CLOUD_EXECUTION", "1"); - env::set_var("CLOUD_SERVER_URL", "http://localhost:8000"); - - println!("🚀 Testing remote CommandRunner..."); - - // Test 1: Simple echo command - println!("\n📝 Test 1: Echo command"); - let mut runner = CommandRunner::new(); - let mut process = runner - .command("echo") - .arg("Hello from remote!") - .start() - .await?; - - println!("✅ Successfully started remote echo command!"); - - // Kill it (though echo probably finished already) - match process.kill().await { - Ok(()) => println!("✅ Successfully killed echo process"), - Err(e) => println!("⚠️ Kill failed (probably already finished): {}", e), - } - - // Test 2: Long-running command - println!("\n⏰ Test 2: Sleep command (5 seconds)"); - let mut runner2 = CommandRunner::new(); - let mut process2 = runner2.command("sleep").arg("5").start().await?; - - println!("✅ Successfully started remote sleep command!"); - - // Wait a bit then kill it - tokio::time::sleep(tokio::time::Duration::from_secs(1)).await; - process2.kill().await?; - println!("✅ Successfully killed sleep process!"); - - // Test 3: Command with environment variables - println!("\n🌍 Test 3: Environment variables"); - let mut runner3 = CommandRunner::new(); - let mut process3 = runner3 - .command("printenv") - .arg("TEST_VAR") - .env("TEST_VAR", "remote_test_value") - .start() - .await?; - - println!("✅ Successfully started remote printenv command!"); - process3.kill().await.ok(); // Don't fail if already finished - - // Test 4: Working directory - println!("\n📁 Test 4: Working directory"); - let mut runner4 = CommandRunner::new(); - let mut process4 = runner4.command("pwd").working_dir("/tmp").start().await?; - - println!("✅ Successfully started remote pwd command!"); - process4.kill().await.ok(); // Don't fail if already finished - - // Test 5: Process Status Checking (TDD - These will FAIL initially) - println!("\n📊 Test 5: Process Status Checking (TDD)"); - - // Test 5a: Status of running process - let mut runner5a = CommandRunner::new(); - let mut process5a = runner5a.command("sleep").arg("3").start().await?; - - println!("✅ Started sleep process for status testing"); - - // This should return None (still running) - match process5a.status().await { - Ok(None) => println!("✅ Status correctly shows process still running"), - Ok(Some(status)) => println!( - "⚠️ Process finished unexpectedly with status: {:?}", - status - ), - Err(e) => println!("❌ Status check failed (expected for now): {}", e), - } - - // Test try_wait (non-blocking) - match process5a.try_wait().await { - Ok(None) => println!("✅ try_wait correctly shows process still running"), - Ok(Some(status)) => println!( - "⚠️ Process finished unexpectedly with status: {:?}", - status - ), - Err(e) => println!("❌ try_wait failed (expected for now): {}", e), - } - - // Kill the process to test status of completed process - process5a.kill().await.ok(); - - // Test 5b: Status of completed process - let mut runner5b = CommandRunner::new(); - let mut process5b = runner5b.command("echo").arg("status test").start().await?; - - println!("✅ Started echo process for completion status testing"); - - // Wait for process to complete - tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; - - match process5b.status().await { - Ok(Some(status)) => { - println!( - "✅ Status correctly shows completed process: success={}, code={:?}", - status.success(), - status.code() - ); - } - Ok(None) => println!("⚠️ Process still running (might need more time)"), - Err(e) => println!("❌ Status check failed (expected for now): {}", e), - } - - // Test 5c: Wait for process completion - let mut runner5c = CommandRunner::new(); - let mut process5c = runner5c.command("echo").arg("wait test").start().await?; - - println!("✅ Started echo process for wait testing"); - - match process5c.wait().await { - Ok(status) => { - println!( - "✅ Wait completed successfully: success={}, code={:?}", - status.success(), - status.code() - ); - } - Err(e) => println!("❌ Wait failed (expected for now): {}", e), - } - - // Test 6: Output Streaming (TDD - These will FAIL initially) - println!("\n🌊 Test 6: Output Streaming (TDD)"); - - // Test 6a: Stdout streaming - let mut runner6a = CommandRunner::new(); - let mut process6a = runner6a - .command("echo") - .arg("Hello stdout streaming!") - .start() - .await?; - - println!("✅ Started echo process for stdout streaming test"); - - // Give the server a moment to capture output from fast commands like echo - tokio::time::sleep(tokio::time::Duration::from_millis(200)).await; - - match process6a.stream().await { - Ok(mut stream) => { - println!("✅ Got streams from process"); - - if let Some(stdout) = &mut stream.stdout { - use tokio::io::AsyncReadExt; - let mut buffer = Vec::new(); - - match stdout.read_to_end(&mut buffer).await { - Ok(bytes_read) => { - let output = String::from_utf8_lossy(&buffer); - if bytes_read > 0 && output.contains("Hello stdout streaming") { - println!("✅ Successfully read stdout: '{}'", output.trim()); - } else if bytes_read == 0 { - println!( - "❌ No stdout data received (expected for now - empty streams)" - ); - } else { - println!("⚠️ Unexpected stdout content: '{}'", output); - } - } - Err(e) => println!("❌ Failed to read stdout: {}", e), - } - } else { - println!("❌ No stdout stream available (expected for now)"); - } - } - Err(e) => println!("❌ Failed to get streams: {}", e), - } - - // Test 6b: Stderr streaming - let mut runner6b = CommandRunner::new(); - let mut process6b = runner6b - .command("bash") - .arg("-c") - .arg("echo 'Error message' >&2") - .start() - .await?; - - println!("✅ Started bash process for stderr streaming test"); - - // Give the server a moment to capture output from fast commands - tokio::time::sleep(tokio::time::Duration::from_millis(200)).await; - - match process6b.stream().await { - Ok(mut stream) => { - if let Some(stderr) = &mut stream.stderr { - use tokio::io::AsyncReadExt; - let mut buffer = Vec::new(); - - match stderr.read_to_end(&mut buffer).await { - Ok(bytes_read) => { - let output = String::from_utf8_lossy(&buffer); - if bytes_read > 0 && output.contains("Error message") { - println!("✅ Successfully read stderr: '{}'", output.trim()); - } else if bytes_read == 0 { - println!( - "❌ No stderr data received (expected for now - empty streams)" - ); - } else { - println!("⚠️ Unexpected stderr content: '{}'", output); - } - } - Err(e) => println!("❌ Failed to read stderr: {}", e), - } - } else { - println!("❌ No stderr stream available (expected for now)"); - } - } - Err(e) => println!("❌ Failed to get streams: {}", e), - } - - // Test 6c: Streaming from long-running process - let mut runner6c = CommandRunner::new(); - let mut process6c = runner6c - .command("bash") - .arg("-c") - .arg("for i in {1..3}; do echo \"Line $i\"; sleep 0.1; done") - .start() - .await?; - - println!("✅ Started bash process for streaming test"); - - // Give the server a moment to capture output from the command - tokio::time::sleep(tokio::time::Duration::from_millis(500)).await; - - match process6c.stream().await { - Ok(mut stream) => { - if let Some(stdout) = &mut stream.stdout { - use tokio::io::AsyncReadExt; - let mut buffer = [0u8; 1024]; - - // Try to read some data (this tests real-time streaming) - match tokio::time::timeout( - tokio::time::Duration::from_secs(2), - stdout.read(&mut buffer), - ) - .await - { - Ok(Ok(bytes_read)) => { - let output = String::from_utf8_lossy(&buffer[..bytes_read]); - if bytes_read > 0 { - println!("✅ Successfully streamed output: '{}'", output.trim()); - } else { - println!("❌ No streaming data received (expected for now)"); - } - } - Ok(Err(e)) => println!("❌ Stream read error: {}", e), - Err(_) => { - println!("❌ Stream read timeout (expected for now - no real streaming)") - } - } - } else { - println!("❌ No stdout stream available for streaming test"); - } - } - Err(e) => println!("❌ Failed to get streams for streaming test: {}", e), - } - - // Clean up - process6c.kill().await.ok(); - - // Test 7: Server Status API Endpoint (TDD - These will FAIL initially) - println!("\n🔍 Test 7: Server Status API Endpoint (TDD)"); - - // Create a process first - let client = reqwest::Client::new(); - let command_request = serde_json::json!({ - "command": "sleep", - "args": ["5"], - "working_dir": null, - "env_vars": [], - "stdin": null - }); - - let response = client - .post("http://localhost:8000/commands") - .json(&command_request) - .send() - .await?; - - if response.status().is_success() { - let body: serde_json::Value = response.json().await?; - if let Some(process_id) = body["data"]["process_id"].as_str() { - println!("✅ Created process for status API test: {}", process_id); - - // Test 7a: GET /commands/{id}/status for running process - let status_url = format!("http://localhost:8000/commands/{}/status", process_id); - match client.get(&status_url).send().await { - Ok(response) => { - if response.status().is_success() { - match response.json::().await { - Ok(status_body) => { - println!("✅ Got status response: {}", status_body); - - // Check expected structure - if let Some(data) = status_body.get("data") { - if let Some(running) = - data.get("running").and_then(|v| v.as_bool()) - { - if running { - println!( - "✅ Status correctly shows process is running" - ); - } else { - println!("⚠️ Process already finished"); - } - } else { - println!("❌ Missing 'running' field in status response"); - } - } else { - println!("❌ Missing 'data' field in status response"); - } - } - Err(e) => println!("❌ Failed to parse status JSON: {}", e), - } - } else { - println!( - "❌ Status API returned error: {} (expected for now)", - response.status() - ); - } - } - Err(e) => println!("❌ Status API request failed (expected for now): {}", e), - } - - // Kill the process - let _ = client - .delete(format!("http://localhost:8000/commands/{}", process_id)) - .send() - .await; - } - } - - // Test 7b: Status of completed process - let quick_command = serde_json::json!({ - "command": "echo", - "args": ["quick command"], - "working_dir": null, - "env_vars": [], - "stdin": null - }); - - let response = client - .post("http://localhost:8000/commands") - .json(&quick_command) - .send() - .await?; - - if response.status().is_success() { - let body: serde_json::Value = response.json().await?; - if let Some(process_id) = body["data"]["process_id"].as_str() { - println!( - "✅ Created quick process for completed status test: {}", - process_id - ); - - // Wait for it to complete - tokio::time::sleep(tokio::time::Duration::from_millis(200)).await; - - let status_url = format!("http://localhost:8000/commands/{}/status", process_id); - match client.get(&status_url).send().await { - Ok(response) => { - if response.status().is_success() { - match response.json::().await { - Ok(status_body) => { - println!("✅ Got completed status response: {}", status_body); - - if let Some(data) = status_body.get("data") { - if let Some(exit_code) = data.get("exit_code") { - println!("✅ Status includes exit code: {}", exit_code); - } - if let Some(success) = data.get("success") { - println!("✅ Status includes success flag: {}", success); - } - } - } - Err(e) => println!("❌ Failed to parse completed status JSON: {}", e), - } - } else { - println!( - "❌ Completed status API returned error: {}", - response.status() - ); - } - } - Err(e) => println!("❌ Completed status API request failed: {}", e), - } - } - } - - // Test 7c: Status of non-existent process (error handling) - let fake_id = "non-existent-process-id"; - let status_url = format!("http://localhost:8000/commands/{}/status", fake_id); - match client.get(&status_url).send().await { - Ok(response) => { - if response.status() == reqwest::StatusCode::NOT_FOUND { - println!("✅ Status API correctly returns 404 for non-existent process"); - } else { - println!( - "❌ Status API should return 404 for non-existent process, got: {}", - response.status() - ); - } - } - Err(e) => println!("❌ Error testing non-existent process status: {}", e), - } - - // Test 8: Server Streaming API Endpoint (TDD - These will FAIL initially) - println!("\n📡 Test 8: Server Streaming API Endpoint (TDD)"); - - // Create a process that generates output - let stream_command = serde_json::json!({ - "command": "bash", - "args": ["-c", "for i in {1..3}; do echo \"Stream line $i\"; sleep 0.1; done"], - "working_dir": null, - "env_vars": [], - "stdin": null - }); - - let response = client - .post("http://localhost:8000/commands") - .json(&stream_command) - .send() - .await?; - - if response.status().is_success() { - let body: serde_json::Value = response.json().await?; - if let Some(process_id) = body["data"]["process_id"].as_str() { - println!("✅ Created streaming process: {}", process_id); - - // Test 8a: GET /commands/{id}/stream endpoint - let stream_url = format!("http://localhost:8000/commands/{}/stream", process_id); - match client.get(&stream_url).send().await { - Ok(response) => { - if response.status().is_success() { - println!("✅ Stream endpoint accessible"); - if let Some(content_type) = response.headers().get("content-type") { - println!("✅ Content-Type: {:?}", content_type); - } - - // Try to read the response body - match response.text().await { - Ok(text) => { - if !text.is_empty() { - println!("✅ Received streaming data: '{}'", text.trim()); - } else { - println!("❌ No streaming data received (expected for now)"); - } - } - Err(e) => println!("❌ Failed to read stream response: {}", e), - } - } else { - println!( - "❌ Stream endpoint returned error: {} (expected for now)", - response.status() - ); - } - } - Err(e) => println!("❌ Stream API request failed (expected for now): {}", e), - } - - // Clean up - let _ = client - .delete(format!("http://localhost:8000/commands/{}", process_id)) - .send() - .await; - } - } - - // Test 8b: Streaming from non-existent process - let fake_stream_url = format!("http://localhost:8000/commands/{}/stream", "fake-id"); - match client.get(&fake_stream_url).send().await { - Ok(response) => { - if response.status() == reqwest::StatusCode::NOT_FOUND { - println!("✅ Stream API correctly returns 404 for non-existent process"); - } else { - println!( - "❌ Stream API should return 404 for non-existent process, got: {}", - response.status() - ); - } - } - Err(e) => println!("❌ Error testing non-existent process stream: {}", e), - } - - // Test 9: True Chunk-Based Streaming Verification (Fixed) - println!("\n🌊 Test 9: True Chunk-Based Streaming Verification"); - - // Create a longer-running process to avoid timing issues - let stream_command = serde_json::json!({ - "command": "bash", - "args": ["-c", "for i in {1..6}; do echo \"Chunk $i at $(date +%H:%M:%S.%3N)\"; sleep 0.5; done"], - "working_dir": null, - "env_vars": [], - "stdin": null - }); - - let response = client - .post("http://localhost:8000/commands") - .json(&stream_command) - .send() - .await?; - - if response.status().is_success() { - let body: serde_json::Value = response.json().await?; - if let Some(process_id) = body["data"]["process_id"].as_str() { - println!( - "✅ Created streaming process: {} (will run ~3 seconds)", - process_id - ); - - // Test chunk-based streaming with the /stream endpoint - let stream_url = format!("http://localhost:8000/commands/{}/stream", process_id); - - // Small delay to let the process start generating output - tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; - - let stream_response = client.get(&stream_url).send().await; - - match stream_response { - Ok(response) => { - if response.status().is_success() { - println!("✅ Stream endpoint accessible"); - - let start_time = std::time::Instant::now(); - - println!("🔍 Reading streaming response:"); - - // Try to read the response in chunks using a simpler approach - let bytes = match tokio::time::timeout( - tokio::time::Duration::from_secs(4), - response.bytes(), - ) - .await - { - Ok(Ok(bytes)) => bytes, - Ok(Err(e)) => { - println!(" ❌ Failed to read response: {}", e); - return Ok(()); - } - Err(_) => { - println!(" ❌ Response read timeout"); - return Ok(()); - } - }; - - let response_text = String::from_utf8_lossy(&bytes); - let lines: Vec<&str> = - response_text.lines().filter(|l| !l.is_empty()).collect(); - - println!("📊 Response analysis:"); - println!(" Total response size: {} bytes", bytes.len()); - println!(" Number of lines: {}", lines.len()); - println!( - " Read duration: {:.1}s", - start_time.elapsed().as_secs_f32() - ); - - if !lines.is_empty() { - println!(" Lines received:"); - for (i, line) in lines.iter().enumerate() { - println!(" {}: '{}'", i + 1, line); - } - } - - // The key insight: if we got multiple lines with different timestamps, - // it proves they were generated over time, even if delivered in one HTTP response - if lines.len() > 1 { - // Check if timestamps show progression - let first_line = lines[0]; - let last_line = lines[lines.len() - 1]; - - if first_line != last_line { - println!("✅ STREAMING VERIFIED: {} lines with different content/timestamps!", lines.len()); - println!( - " This proves the server captured streaming output over time" - ); - if lines.len() >= 3 { - println!(" First: '{}'", first_line); - println!(" Last: '{}'", last_line); - } - } else { - println!( - "⚠️ Multiple identical lines - may indicate buffering issue" - ); - } - } else if lines.len() == 1 { - println!("⚠️ Only 1 line received: '{}'", lines[0]); - println!( - " This suggests the process finished too quickly or timing issue" - ); - } else { - println!("❌ No output lines received"); - } - } else { - println!("❌ Stream endpoint error: {}", response.status()); - } - } - Err(e) => println!("❌ Stream request failed: {}", e), - } - - // Wait for process to complete, then verify final output - tokio::time::sleep(tokio::time::Duration::from_millis(500)).await; - - println!("\n🔍 Verification: Testing completed process output:"); - let stdout_url = format!("http://localhost:8000/commands/{}/stdout", process_id); - match client.get(&stdout_url).send().await { - Ok(response) if response.status().is_success() => { - if let Ok(text) = response.text().await { - let final_lines: Vec<&str> = - text.lines().filter(|l| !l.is_empty()).collect(); - println!( - "✅ Final stdout: {} lines, {} bytes", - final_lines.len(), - text.len() - ); - - if final_lines.len() >= 6 { - println!( - "✅ Process completed successfully - all expected output captured" - ); - } else { - println!( - "⚠️ Expected 6 lines, got {} - process may have been interrupted", - final_lines.len() - ); - } - } - } - _ => println!("⚠️ Final stdout check failed"), - } - - // Clean up - let _ = client - .delete(format!("http://localhost:8000/commands/{}", process_id)) - .send() - .await; - } - } - - println!("\n🎉 All TDD tests completed!"); - println!("💡 Expected failures show what needs to be implemented:"); - println!(" 📊 Remote status/wait methods"); - println!(" 🌊 Real output streaming"); - println!(" 🔍 GET /commands/:id/status endpoint"); - println!(" 📡 GET /commands/:id/stream endpoint"); - println!("🔧 Time to make the tests pass! 🚀"); - - Ok(()) -} diff --git a/backend/src/command_runner.rs b/backend/src/command_runner.rs deleted file mode 100644 index c24e6bac..00000000 --- a/backend/src/command_runner.rs +++ /dev/null @@ -1,291 +0,0 @@ -use async_trait::async_trait; -use serde::{Deserialize, Serialize}; -use tokio::io::AsyncRead; - -use crate::models::Environment; - -mod local; -mod remote; - -pub use local::LocalCommandExecutor; -pub use remote::RemoteCommandExecutor; - -// Core trait that defines the interface for command execution -#[async_trait] -pub trait CommandExecutor: Send + Sync { - /// Start a process and return a handle to it - async fn start( - &self, - request: &CommandRunnerArgs, - ) -> Result, CommandError>; -} - -// Trait for managing running processes -#[async_trait] -pub trait ProcessHandle: Send + Sync { - /// Check if the process is still running, return exit status if finished - async fn try_wait(&mut self) -> Result, CommandError>; - - /// Wait for the process to complete and return exit status - async fn wait(&mut self) -> Result; - - /// Kill the process - async fn kill(&mut self) -> Result<(), CommandError>; - - /// Get streams for stdout and stderr - async fn stream(&mut self) -> Result; - - /// Get process identifier (for debugging/logging) - fn process_id(&self) -> String; - - /// Check current status (alias for try_wait for backward compatibility) - async fn status(&mut self) -> Result, CommandError> { - self.try_wait().await - } -} - -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct CommandRunnerArgs { - pub command: String, - pub args: Vec, - pub working_dir: Option, - pub env_vars: Vec<(String, String)>, - pub stdin: Option, -} - -pub struct CommandRunner { - executor: Box, - command: Option, - args: Vec, - working_dir: Option, - env_vars: Vec<(String, String)>, - stdin: Option, -} -impl Default for CommandRunner { - fn default() -> Self { - Self::new() - } -} - -pub struct CommandProcess { - handle: Box, -} - -impl std::fmt::Debug for CommandProcess { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("CommandProcess") - .field("process_id", &self.handle.process_id()) - .finish() - } -} - -#[derive(Debug)] -pub enum CommandError { - SpawnFailed { - command: String, - error: std::io::Error, - }, - StatusCheckFailed { - error: std::io::Error, - }, - KillFailed { - error: std::io::Error, - }, - ProcessNotStarted, - NoCommandSet, - IoError { - error: std::io::Error, - }, -} -impl From for CommandError { - fn from(error: std::io::Error) -> Self { - CommandError::IoError { error } - } -} -impl std::fmt::Display for CommandError { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - CommandError::SpawnFailed { command, error } => { - write!(f, "Failed to spawn command '{}': {}", command, error) - } - CommandError::StatusCheckFailed { error } => { - write!(f, "Failed to check command status: {}", error) - } - CommandError::KillFailed { error } => { - write!(f, "Failed to kill command: {}", error) - } - CommandError::ProcessNotStarted => { - write!(f, "Process has not been started yet") - } - CommandError::NoCommandSet => { - write!(f, "No command has been set") - } - CommandError::IoError { error } => { - write!(f, "Failed to spawn command: {}", error) - } - } - } -} - -impl std::error::Error for CommandError {} - -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct CommandExitStatus { - /// Exit code (0 for success on most platforms) - code: Option, - /// Whether the process exited successfully - success: bool, - /// Unix signal that terminated the process (Unix only) - #[cfg(unix)] - signal: Option, - /// Optional remote process identifier for cloud execution - remote_process_id: Option, - /// Optional session identifier for remote execution tracking - remote_session_id: Option, -} - -impl CommandExitStatus { - /// Returns true if the process exited successfully - pub fn success(&self) -> bool { - self.success - } - - /// Returns the exit code of the process, if available - pub fn code(&self) -> Option { - self.code - } -} - -pub struct CommandStream { - pub stdout: Option>, - pub stderr: Option>, -} - -impl CommandRunner { - pub fn new() -> Self { - let env = std::env::var("ENVIRONMENT").unwrap_or_else(|_| "local".to_string()); - let mode = env.parse().unwrap_or(Environment::Local); - match mode { - Environment::Cloud => CommandRunner { - executor: Box::new(RemoteCommandExecutor::new()), - command: None, - args: Vec::new(), - working_dir: None, - env_vars: Vec::new(), - stdin: None, - }, - Environment::Local => CommandRunner { - executor: Box::new(LocalCommandExecutor::new()), - command: None, - args: Vec::new(), - working_dir: None, - env_vars: Vec::new(), - stdin: None, - }, - } - } - - pub fn command(&mut self, cmd: &str) -> &mut Self { - self.command = Some(cmd.to_string()); - self - } - - pub fn get_program(&self) -> &str { - self.command.as_deref().unwrap_or("") - } - - pub fn get_args(&self) -> &[String] { - &self.args - } - - pub fn get_current_dir(&self) -> Option<&str> { - self.working_dir.as_deref() - } - - pub fn arg(&mut self, arg: &str) -> &mut Self { - self.args.push(arg.to_string()); - self - } - - pub fn stdin(&mut self, prompt: &str) -> &mut Self { - self.stdin = Some(prompt.to_string()); - self - } - - pub fn working_dir(&mut self, dir: &str) -> &mut Self { - self.working_dir = Some(dir.to_string()); - self - } - - pub fn env(&mut self, key: &str, val: &str) -> &mut Self { - self.env_vars.push((key.to_string(), val.to_string())); - self - } - - /// Convert the current CommandRunner state to a CreateCommandRequest - pub fn to_args(&self) -> Option { - Some(CommandRunnerArgs { - command: self.command.clone()?, - args: self.args.clone(), - working_dir: self.working_dir.clone(), - env_vars: self.env_vars.clone(), - stdin: self.stdin.clone(), - }) - } - - /// Create a CommandRunner from a CreateCommandRequest, respecting the environment - #[allow(dead_code)] - pub fn from_args(request: CommandRunnerArgs) -> Self { - let mut runner = Self::new(); - runner.command(&request.command); - - for arg in &request.args { - runner.arg(arg); - } - - if let Some(dir) = &request.working_dir { - runner.working_dir(dir); - } - - for (key, value) in &request.env_vars { - runner.env(key, value); - } - - if let Some(stdin) = &request.stdin { - runner.stdin(stdin); - } - - runner - } - - pub async fn start(&self) -> Result { - let request = self.to_args().ok_or(CommandError::NoCommandSet)?; - let handle = self.executor.start(&request).await?; - - Ok(CommandProcess { handle }) - } -} - -impl CommandProcess { - #[allow(dead_code)] - pub async fn status(&mut self) -> Result, CommandError> { - self.handle.status().await - } - - pub async fn try_wait(&mut self) -> Result, CommandError> { - self.handle.try_wait().await - } - - pub async fn kill(&mut self) -> Result<(), CommandError> { - self.handle.kill().await - } - - pub async fn stream(&mut self) -> Result { - self.handle.stream().await - } - - #[allow(dead_code)] - pub async fn wait(&mut self) -> Result { - self.handle.wait().await - } -} diff --git a/backend/src/command_runner/local.rs b/backend/src/command_runner/local.rs deleted file mode 100644 index 0e951204..00000000 --- a/backend/src/command_runner/local.rs +++ /dev/null @@ -1,703 +0,0 @@ -use std::{process::Stdio, time::Duration}; - -use async_trait::async_trait; -use command_group::{AsyncCommandGroup, AsyncGroupChild}; -#[cfg(unix)] -use nix::{ - sys::signal::{killpg, Signal}, - unistd::{getpgid, Pid}, -}; -use tokio::process::Command; - -use crate::command_runner::{ - CommandError, CommandExecutor, CommandExitStatus, CommandRunnerArgs, CommandStream, - ProcessHandle, -}; - -pub struct LocalCommandExecutor; - -impl Default for LocalCommandExecutor { - fn default() -> Self { - Self::new() - } -} - -impl LocalCommandExecutor { - pub fn new() -> Self { - Self - } -} - -#[async_trait] -impl CommandExecutor for LocalCommandExecutor { - async fn start( - &self, - request: &CommandRunnerArgs, - ) -> Result, CommandError> { - let mut cmd = Command::new(&request.command); - - cmd.args(&request.args) - .kill_on_drop(true) - .stdin(Stdio::piped()) - .stdout(Stdio::piped()) - .stderr(Stdio::piped()); - - if let Some(dir) = &request.working_dir { - cmd.current_dir(dir); - } - - for (key, val) in &request.env_vars { - cmd.env(key, val); - } - - let mut child = cmd.group_spawn().map_err(|e| CommandError::SpawnFailed { - command: format!("{} {}", request.command, request.args.join(" ")), - error: e, - })?; - - if let Some(prompt) = &request.stdin { - // Write prompt to stdin safely - if let Some(mut stdin) = child.inner().stdin.take() { - use tokio::io::AsyncWriteExt; - stdin.write_all(prompt.as_bytes()).await?; - stdin.shutdown().await?; - } - } - - Ok(Box::new(LocalProcessHandle::new(child))) - } -} - -pub struct LocalProcessHandle { - child: Option, - process_id: String, -} - -impl LocalProcessHandle { - pub fn new(mut child: AsyncGroupChild) -> Self { - let process_id = child - .inner() - .id() - .map(|id| id.to_string()) - .unwrap_or_else(|| "unknown".to_string()); - - Self { - child: Some(child), - process_id, - } - } -} - -#[async_trait] -impl ProcessHandle for LocalProcessHandle { - async fn try_wait(&mut self) -> Result, CommandError> { - match &mut self.child { - Some(child) => match child - .inner() - .try_wait() - .map_err(|e| CommandError::StatusCheckFailed { error: e })? - { - Some(status) => Ok(Some(CommandExitStatus::from_local(status))), - None => Ok(None), - }, - None => Err(CommandError::ProcessNotStarted), - } - } - - async fn wait(&mut self) -> Result { - match &mut self.child { - Some(child) => { - let status = child - .wait() - .await - .map_err(|e| CommandError::KillFailed { error: e })?; - Ok(CommandExitStatus::from_local(status)) - } - None => Err(CommandError::ProcessNotStarted), - } - } - - async fn kill(&mut self) -> Result<(), CommandError> { - match &mut self.child { - Some(child) => { - // hit the whole process group, not just the leader - #[cfg(unix)] - { - if let Some(pid) = child.inner().id() { - let pgid = getpgid(Some(Pid::from_raw(pid as i32))).map_err(|e| { - CommandError::KillFailed { - error: std::io::Error::other(e), - } - })?; - - for sig in [Signal::SIGINT, Signal::SIGTERM, Signal::SIGKILL] { - if let Err(e) = killpg(pgid, sig) { - tracing::warn!( - "Failed to send signal {:?} to process group {}: {}", - sig, - pgid, - e - ); - } - tokio::time::sleep(Duration::from_secs(2)).await; - if child - .inner() - .try_wait() - .map_err(|e| CommandError::StatusCheckFailed { error: e })? - .is_some() - { - break; // gone! - } - } - } - } - - // final fallback – command_group already targets the group - child - .kill() - .await - .map_err(|e| CommandError::KillFailed { error: e })?; - child - .wait() - .await - .map_err(|e| CommandError::KillFailed { error: e })?; // reap - - // Clear the handle after successful kill - self.child = None; - Ok(()) - } - None => Err(CommandError::ProcessNotStarted), - } - } - - async fn stream(&mut self) -> Result { - match &mut self.child { - Some(child) => { - let stdout = child.inner().stdout.take(); - let stderr = child.inner().stderr.take(); - Ok(CommandStream::from_local(stdout, stderr)) - } - None => Err(CommandError::ProcessNotStarted), - } - } - - fn process_id(&self) -> String { - self.process_id.clone() - } -} - -// Local-specific implementations for shared types -impl CommandExitStatus { - /// Create a CommandExitStatus from a std::process::ExitStatus (for local processes) - pub fn from_local(status: std::process::ExitStatus) -> Self { - Self { - code: status.code(), - success: status.success(), - #[cfg(unix)] - signal: { - use std::os::unix::process::ExitStatusExt; - status.signal() - }, - remote_process_id: None, - remote_session_id: None, - } - } -} - -impl CommandStream { - /// Create a CommandStream from local process streams - pub fn from_local( - stdout: Option, - stderr: Option, - ) -> Self { - Self { - stdout: stdout.map(|s| Box::new(s) as Box), - stderr: stderr.map(|s| Box::new(s) as Box), - } - } -} - -#[cfg(test)] -mod tests { - use std::process::Stdio; - - use command_group::{AsyncCommandGroup, AsyncGroupChild}; - use tokio::{ - io::{AsyncReadExt, AsyncWriteExt}, - process::Command, - }; - - use crate::command_runner::*; - - // Helper function to create a comparison tokio::process::Command - async fn create_tokio_command( - cmd: &str, - args: &[&str], - working_dir: Option<&str>, - env_vars: &[(String, String)], - stdin_data: Option<&str>, - ) -> Result { - let mut command = Command::new(cmd); - command - .args(args) - .kill_on_drop(true) - .stdin(Stdio::piped()) - .stdout(Stdio::piped()) - .stderr(Stdio::piped()); - - if let Some(dir) = working_dir { - command.current_dir(dir); - } - - for (key, val) in env_vars { - command.env(key, val); - } - - let mut child = command.group_spawn()?; - - // Write stdin data if provided - if let Some(data) = stdin_data { - if let Some(mut stdin) = child.inner().stdin.take() { - stdin.write_all(data.as_bytes()).await?; - stdin.shutdown().await?; - } - } - - Ok(child) - } - - #[tokio::test] - async fn test_command_execution_comparison() { - // Ensure we're using local execution for this test - std::env::set_var("ENVIRONMENT", "local"); - let test_message = "hello world"; - - // Test with CommandRunner - let mut runner = CommandRunner::new(); - let mut process = runner - .command("echo") - .arg(test_message) - .start() - .await - .expect("CommandRunner should start echo command"); - - let mut stream = process.stream().await.expect("Should get stream"); - let mut stdout_data = Vec::new(); - if let Some(stdout) = &mut stream.stdout { - stdout - .read_to_end(&mut stdout_data) - .await - .expect("Should read stdout"); - } - let runner_output = String::from_utf8(stdout_data).expect("Should be valid UTF-8"); - - // Test with tokio::process::Command - let mut tokio_child = create_tokio_command("echo", &[test_message], None, &[], None) - .await - .expect("Should start tokio command"); - - let mut tokio_stdout_data = Vec::new(); - if let Some(stdout) = tokio_child.inner().stdout.take() { - let mut stdout = stdout; - stdout - .read_to_end(&mut tokio_stdout_data) - .await - .expect("Should read tokio stdout"); - } - let tokio_output = String::from_utf8(tokio_stdout_data).expect("Should be valid UTF-8"); - - // Both should produce the same output - assert_eq!(runner_output.trim(), tokio_output.trim()); - assert_eq!(runner_output.trim(), test_message); - } - - #[tokio::test] - async fn test_stdin_handling() { - // Ensure we're using local execution for this test - std::env::set_var("ENVIRONMENT", "local"); - let test_input = "test input data\n"; - - // Test with CommandRunner (using cat to echo stdin) - let mut runner = CommandRunner::new(); - let mut process = runner - .command("cat") - .stdin(test_input) - .start() - .await - .expect("CommandRunner should start cat command"); - - let mut stream = process.stream().await.expect("Should get stream"); - let mut stdout_data = Vec::new(); - if let Some(stdout) = &mut stream.stdout { - stdout - .read_to_end(&mut stdout_data) - .await - .expect("Should read stdout"); - } - let runner_output = String::from_utf8(stdout_data).expect("Should be valid UTF-8"); - - // Test with tokio::process::Command - let mut tokio_child = create_tokio_command("cat", &[], None, &[], Some(test_input)) - .await - .expect("Should start tokio command"); - - let mut tokio_stdout_data = Vec::new(); - if let Some(stdout) = tokio_child.inner().stdout.take() { - let mut stdout = stdout; - stdout - .read_to_end(&mut tokio_stdout_data) - .await - .expect("Should read tokio stdout"); - } - let tokio_output = String::from_utf8(tokio_stdout_data).expect("Should be valid UTF-8"); - - // Both should echo the input - assert_eq!(runner_output, tokio_output); - assert_eq!(runner_output, test_input); - } - - #[tokio::test] - async fn test_working_directory() { - // Use pwd command to check working directory - let test_dir = "/tmp"; - - // Test with CommandRunner - std::env::set_var("ENVIRONMENT", "local"); - let mut runner = CommandRunner::new(); - let mut process = runner - .command("pwd") - .working_dir(test_dir) - .start() - .await - .expect("CommandRunner should start pwd command"); - - let mut stream = process.stream().await.expect("Should get stream"); - let mut stdout_data = Vec::new(); - if let Some(stdout) = &mut stream.stdout { - stdout - .read_to_end(&mut stdout_data) - .await - .expect("Should read stdout"); - } - let runner_output = String::from_utf8(stdout_data).expect("Should be valid UTF-8"); - - // Test with tokio::process::Command - let mut tokio_child = create_tokio_command("pwd", &[], Some(test_dir), &[], None) - .await - .expect("Should start tokio command"); - - let mut tokio_stdout_data = Vec::new(); - if let Some(stdout) = tokio_child.inner().stdout.take() { - let mut stdout = stdout; - stdout - .read_to_end(&mut tokio_stdout_data) - .await - .expect("Should read tokio stdout"); - } - let tokio_output = String::from_utf8(tokio_stdout_data).expect("Should be valid UTF-8"); - - // Both should show the same working directory - assert_eq!(runner_output.trim(), tokio_output.trim()); - assert!(runner_output.trim().contains(test_dir)); - } - - #[tokio::test] - async fn test_environment_variables() { - let test_var = "TEST_VAR"; - let test_value = "test_value_123"; - - // Test with CommandRunner - std::env::set_var("ENVIRONMENT", "local"); - let mut runner = CommandRunner::new(); - let mut process = runner - .command("printenv") - .arg(test_var) - .env(test_var, test_value) - .start() - .await - .expect("CommandRunner should start printenv command"); - - let mut stream = process.stream().await.expect("Should get stream"); - let mut stdout_data = Vec::new(); - if let Some(stdout) = &mut stream.stdout { - stdout - .read_to_end(&mut stdout_data) - .await - .expect("Should read stdout"); - } - let runner_output = String::from_utf8(stdout_data).expect("Should be valid UTF-8"); - - // Test with tokio::process::Command - let env_vars = vec![(test_var.to_string(), test_value.to_string())]; - let mut tokio_child = create_tokio_command("printenv", &[test_var], None, &env_vars, None) - .await - .expect("Should start tokio command"); - - let mut tokio_stdout_data = Vec::new(); - if let Some(stdout) = tokio_child.inner().stdout.take() { - let mut stdout = stdout; - stdout - .read_to_end(&mut tokio_stdout_data) - .await - .expect("Should read tokio stdout"); - } - let tokio_output = String::from_utf8(tokio_stdout_data).expect("Should be valid UTF-8"); - - // Both should show the same environment variable - assert_eq!(runner_output.trim(), tokio_output.trim()); - assert_eq!(runner_output.trim(), test_value); - } - - #[tokio::test] - async fn test_process_group_creation() { - // Test that both CommandRunner and tokio::process::Command create process groups - // We'll use a sleep command that can be easily killed - - // Test with CommandRunner - std::env::set_var("ENVIRONMENT", "local"); - let mut runner = CommandRunner::new(); - let mut process = runner - .command("sleep") - .arg("10") // Sleep for 10 seconds - .start() - .await - .expect("CommandRunner should start sleep command"); - - // Check that process is running - let status = process.status().await.expect("Should check status"); - assert!(status.is_none(), "Process should still be running"); - - // Kill the process (might fail if already exited) - let _ = process.kill().await; - - // Wait a moment for the kill to take effect - tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; - - let final_status = process.status().await.expect("Should check final status"); - assert!( - final_status.is_some(), - "Process should have exited after kill" - ); - - // Test with tokio::process::Command for comparison - let mut tokio_child = create_tokio_command("sleep", &["10"], None, &[], None) - .await - .expect("Should start tokio sleep command"); - - // Check that process is running - let tokio_status = tokio_child - .inner() - .try_wait() - .expect("Should check tokio status"); - assert!( - tokio_status.is_none(), - "Tokio process should still be running" - ); - - // Kill the tokio process - tokio_child.kill().await.expect("Should kill tokio process"); - - // Wait a moment for the kill to take effect - tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; - - let tokio_final_status = tokio_child - .inner() - .try_wait() - .expect("Should check tokio final status"); - assert!( - tokio_final_status.is_some(), - "Tokio process should have exited after kill" - ); - } - - #[tokio::test] - async fn test_kill_operation() { - // Test killing processes with both implementations - - // Test CommandRunner kill - std::env::set_var("ENVIRONMENT", "local"); - let mut runner = CommandRunner::new(); - let mut process = runner - .command("sleep") - .arg("60") // Long sleep - .start() - .await - .expect("Should start CommandRunner sleep"); - - // Verify it's running - assert!(process - .status() - .await - .expect("Should check status") - .is_none()); - - // Kill and verify it stops (might fail if already exited) - let _ = process.kill().await; - - // Give it time to die - tokio::time::sleep(tokio::time::Duration::from_millis(200)).await; - - let exit_status = process.status().await.expect("Should get exit status"); - assert!(exit_status.is_some(), "Process should have exited"); - - // Test tokio::process::Command kill for comparison - let mut tokio_child = create_tokio_command("sleep", &["60"], None, &[], None) - .await - .expect("Should start tokio sleep"); - - // Verify it's running - assert!(tokio_child - .inner() - .try_wait() - .expect("Should check tokio status") - .is_none()); - - // Kill and verify it stops - tokio_child.kill().await.expect("Should kill tokio process"); - - // Give it time to die - tokio::time::sleep(tokio::time::Duration::from_millis(200)).await; - - let tokio_exit_status = tokio_child - .inner() - .try_wait() - .expect("Should get tokio exit status"); - assert!( - tokio_exit_status.is_some(), - "Tokio process should have exited" - ); - } - - #[tokio::test] - async fn test_status_monitoring() { - // Test status monitoring with a quick command - - // Test with CommandRunner - std::env::set_var("ENVIRONMENT", "local"); - let mut runner = CommandRunner::new(); - let mut process = runner - .command("echo") - .arg("quick test") - .start() - .await - .expect("Should start CommandRunner echo"); - - // Initially might be running or might have finished quickly - let _initial_status = process.status().await.expect("Should check initial status"); - - // Wait for completion - let exit_status = process.wait().await.expect("Should wait for completion"); - assert!(exit_status.success(), "Echo command should succeed"); - - // After wait, status should show completion - let final_status = process.status().await.expect("Should check final status"); - assert!( - final_status.is_some(), - "Should have exit status after completion" - ); - assert!( - final_status.unwrap().success(), - "Should show successful exit" - ); - - // Test with tokio::process::Command for comparison - let mut tokio_child = create_tokio_command("echo", &["quick test"], None, &[], None) - .await - .expect("Should start tokio echo"); - - // Wait for completion - let tokio_exit_status = tokio_child - .wait() - .await - .expect("Should wait for tokio completion"); - assert!( - tokio_exit_status.success(), - "Tokio echo command should succeed" - ); - - // After wait, status should show completion - let tokio_final_status = tokio_child - .inner() - .try_wait() - .expect("Should check tokio final status"); - assert!( - tokio_final_status.is_some(), - "Should have tokio exit status after completion" - ); - assert!( - tokio_final_status.unwrap().success(), - "Should show tokio successful exit" - ); - } - - #[tokio::test] - async fn test_wait_for_completion() { - // Test waiting for process completion with specific exit codes - - // Test successful command (exit code 0) - std::env::set_var("ENVIRONMENT", "local"); - let mut runner = CommandRunner::new(); - let mut process = runner - .command("true") // Command that exits with 0 - .start() - .await - .expect("Should start true command"); - - let exit_status = process - .wait() - .await - .expect("Should wait for true completion"); - assert!(exit_status.success(), "true command should succeed"); - assert_eq!(exit_status.code(), Some(0), "true should exit with code 0"); - - // Test failing command (exit code 1) - let mut runner2 = CommandRunner::new(); - let mut process2 = runner2 - .command("false") // Command that exits with 1 - .start() - .await - .expect("Should start false command"); - - let exit_status2 = process2 - .wait() - .await - .expect("Should wait for false completion"); - assert!(!exit_status2.success(), "false command should fail"); - assert_eq!( - exit_status2.code(), - Some(1), - "false should exit with code 1" - ); - - // Compare with tokio::process::Command - let mut tokio_child = create_tokio_command("true", &[], None, &[], None) - .await - .expect("Should start tokio true"); - - let tokio_exit_status = tokio_child - .wait() - .await - .expect("Should wait for tokio true"); - assert!(tokio_exit_status.success(), "tokio true should succeed"); - assert_eq!( - tokio_exit_status.code(), - Some(0), - "tokio true should exit with code 0" - ); - - let mut tokio_child2 = create_tokio_command("false", &[], None, &[], None) - .await - .expect("Should start tokio false"); - - let tokio_exit_status2 = tokio_child2 - .wait() - .await - .expect("Should wait for tokio false"); - assert!(!tokio_exit_status2.success(), "tokio false should fail"); - assert_eq!( - tokio_exit_status2.code(), - Some(1), - "tokio false should exit with code 1" - ); - } -} diff --git a/backend/src/command_runner/remote.rs b/backend/src/command_runner/remote.rs deleted file mode 100644 index e04d73c5..00000000 --- a/backend/src/command_runner/remote.rs +++ /dev/null @@ -1,402 +0,0 @@ -use std::{ - pin::Pin, - task::{Context, Poll}, -}; - -use async_trait::async_trait; -use tokio::io::AsyncRead; - -use crate::command_runner::{ - CommandError, CommandExecutor, CommandExitStatus, CommandRunnerArgs, CommandStream, - ProcessHandle, -}; - -pub struct RemoteCommandExecutor { - cloud_server_url: String, -} - -impl Default for RemoteCommandExecutor { - fn default() -> Self { - Self::new() - } -} - -impl RemoteCommandExecutor { - pub fn new() -> Self { - let cloud_server_url = std::env::var("CLOUD_SERVER_URL") - .unwrap_or_else(|_| "http://localhost:8000".to_string()); - Self { cloud_server_url } - } -} - -#[async_trait] -impl CommandExecutor for RemoteCommandExecutor { - async fn start( - &self, - request: &CommandRunnerArgs, - ) -> Result, CommandError> { - let client = reqwest::Client::new(); - let response = client - .post(format!("{}/commands", self.cloud_server_url)) - .json(request) - .send() - .await - .map_err(|e| CommandError::IoError { - error: std::io::Error::other(e), - })?; - - let result: serde_json::Value = - response.json().await.map_err(|e| CommandError::IoError { - error: std::io::Error::other(e), - })?; - - let process_id = - result["data"]["process_id"] - .as_str() - .ok_or_else(|| CommandError::IoError { - error: std::io::Error::other(format!( - "Missing process_id in response: {}", - result - )), - })?; - - Ok(Box::new(RemoteProcessHandle::new( - process_id.to_string(), - self.cloud_server_url.clone(), - ))) - } -} - -pub struct RemoteProcessHandle { - process_id: String, - cloud_server_url: String, -} - -impl RemoteProcessHandle { - pub fn new(process_id: String, cloud_server_url: String) -> Self { - Self { - process_id, - cloud_server_url, - } - } -} - -#[async_trait] -impl ProcessHandle for RemoteProcessHandle { - async fn try_wait(&mut self) -> Result, CommandError> { - // Make HTTP request to get status from cloud server - let client = reqwest::Client::new(); - let response = client - .get(format!( - "{}/commands/{}/status", - self.cloud_server_url, self.process_id - )) - .send() - .await - .map_err(|e| CommandError::StatusCheckFailed { - error: std::io::Error::other(e), - })?; - - if !response.status().is_success() { - if response.status() == reqwest::StatusCode::NOT_FOUND { - return Err(CommandError::StatusCheckFailed { - error: std::io::Error::new(std::io::ErrorKind::NotFound, "Process not found"), - }); - } else { - return Err(CommandError::StatusCheckFailed { - error: std::io::Error::other("Status check failed"), - }); - } - } - - let result: serde_json::Value = - response - .json() - .await - .map_err(|e| CommandError::StatusCheckFailed { - error: std::io::Error::other(e), - })?; - - let data = result["data"] - .as_object() - .ok_or_else(|| CommandError::StatusCheckFailed { - error: std::io::Error::other("Invalid response format"), - })?; - - let running = data["running"].as_bool().unwrap_or(false); - - if running { - Ok(None) // Still running - } else { - // Process completed, extract exit status - let exit_code = data["exit_code"].as_i64().map(|c| c as i32); - let success = data["success"].as_bool().unwrap_or(false); - - Ok(Some(CommandExitStatus::from_remote( - exit_code, - success, - Some(self.process_id.clone()), - None, - ))) - } - } - - async fn wait(&mut self) -> Result { - // Poll the status endpoint until process completes - loop { - let client = reqwest::Client::new(); - let response = client - .get(format!( - "{}/commands/{}/status", - self.cloud_server_url, self.process_id - )) - .send() - .await - .map_err(|e| CommandError::StatusCheckFailed { - error: std::io::Error::other(e), - })?; - - if !response.status().is_success() { - if response.status() == reqwest::StatusCode::NOT_FOUND { - return Err(CommandError::StatusCheckFailed { - error: std::io::Error::new( - std::io::ErrorKind::NotFound, - "Process not found", - ), - }); - } else { - return Err(CommandError::StatusCheckFailed { - error: std::io::Error::other("Status check failed"), - }); - } - } - - let result: serde_json::Value = - response - .json() - .await - .map_err(|e| CommandError::StatusCheckFailed { - error: std::io::Error::other(e), - })?; - - let data = - result["data"] - .as_object() - .ok_or_else(|| CommandError::StatusCheckFailed { - error: std::io::Error::other("Invalid response format"), - })?; - - let running = data["running"].as_bool().unwrap_or(false); - - if !running { - // Process completed, extract exit status and return - let exit_code = data["exit_code"].as_i64().map(|c| c as i32); - let success = data["success"].as_bool().unwrap_or(false); - - return Ok(CommandExitStatus::from_remote( - exit_code, - success, - Some(self.process_id.clone()), - None, - )); - } - - // Wait a bit before polling again - tokio::time::sleep(tokio::time::Duration::from_millis(20)).await; - } - } - - async fn kill(&mut self) -> Result<(), CommandError> { - let client = reqwest::Client::new(); - let response = client - .delete(format!( - "{}/commands/{}", - self.cloud_server_url, self.process_id - )) - .send() - .await - .map_err(|e| CommandError::KillFailed { - error: std::io::Error::other(e), - })?; - - if !response.status().is_success() { - if response.status() == reqwest::StatusCode::NOT_FOUND { - // Process not found, might have already finished - treat as success - return Ok(()); - } - - return Err(CommandError::KillFailed { - error: std::io::Error::other(format!( - "Remote kill failed with status: {}", - response.status() - )), - }); - } - - // Check if server indicates process was already completed - if let Ok(result) = response.json::().await { - if let Some(data) = result.get("data") { - if let Some(message) = data.as_str() { - tracing::info!("Kill result: {}", message); - } - } - } - - Ok(()) - } - - async fn stream(&mut self) -> Result { - // Create HTTP streams for stdout and stderr concurrently - let stdout_url = format!( - "{}/commands/{}/stdout", - self.cloud_server_url, self.process_id - ); - let stderr_url = format!( - "{}/commands/{}/stderr", - self.cloud_server_url, self.process_id - ); - - // Create both streams concurrently using tokio::try_join! - let (stdout_result, stderr_result) = - tokio::try_join!(HTTPStream::new(stdout_url), HTTPStream::new(stderr_url))?; - - let stdout_stream: Option> = - Some(Box::new(stdout_result) as Box); - let stderr_stream: Option> = - Some(Box::new(stderr_result) as Box); - - Ok(CommandStream { - stdout: stdout_stream, - stderr: stderr_stream, - }) - } - - fn process_id(&self) -> String { - self.process_id.clone() - } -} - -/// HTTP-based AsyncRead wrapper for true streaming -pub struct HTTPStream { - stream: Pin, reqwest::Error>> + Send>>, - current_chunk: Vec, - chunk_position: usize, - finished: bool, -} - -// HTTPStream needs to be Unpin to work with the AsyncRead trait bounds -impl Unpin for HTTPStream {} - -impl HTTPStream { - pub async fn new(url: String) -> Result { - let client = reqwest::Client::new(); - let response = client - .get(&url) - .send() - .await - .map_err(|e| CommandError::IoError { - error: std::io::Error::other(e), - })?; - - if !response.status().is_success() { - return Err(CommandError::IoError { - error: std::io::Error::other(format!( - "HTTP request failed with status: {}", - response.status() - )), - }); - } - - // Use chunk() method to create a stream - Ok(Self { - stream: Box::pin(futures_util::stream::unfold( - response, - |mut resp| async move { - match resp.chunk().await { - Ok(Some(chunk)) => Some((Ok(chunk.to_vec()), resp)), - Ok(None) => None, - Err(e) => Some((Err(e), resp)), - } - }, - )), - current_chunk: Vec::new(), - chunk_position: 0, - finished: false, - }) - } -} - -impl AsyncRead for HTTPStream { - fn poll_read( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut tokio::io::ReadBuf<'_>, - ) -> Poll> { - if self.finished { - return Poll::Ready(Ok(())); - } - - // First, try to read from current chunk if available - if self.chunk_position < self.current_chunk.len() { - let remaining_in_chunk = self.current_chunk.len() - self.chunk_position; - let to_read = std::cmp::min(remaining_in_chunk, buf.remaining()); - - let chunk_data = - &self.current_chunk[self.chunk_position..self.chunk_position + to_read]; - buf.put_slice(chunk_data); - self.chunk_position += to_read; - - return Poll::Ready(Ok(())); - } - - // Current chunk is exhausted, try to get the next chunk - match self.stream.as_mut().poll_next(cx) { - Poll::Ready(Some(Ok(chunk))) => { - if chunk.is_empty() { - // Empty chunk, mark as finished - self.finished = true; - Poll::Ready(Ok(())) - } else { - // New chunk available - self.current_chunk = chunk; - self.chunk_position = 0; - - // Read from the new chunk - let to_read = std::cmp::min(self.current_chunk.len(), buf.remaining()); - let chunk_data = &self.current_chunk[..to_read]; - buf.put_slice(chunk_data); - self.chunk_position = to_read; - - Poll::Ready(Ok(())) - } - } - Poll::Ready(Some(Err(e))) => Poll::Ready(Err(std::io::Error::other(e))), - Poll::Ready(None) => { - // Stream ended - self.finished = true; - Poll::Ready(Ok(())) - } - Poll::Pending => Poll::Pending, - } - } -} - -// Remote-specific implementations for shared types -impl CommandExitStatus { - /// Create a CommandExitStatus for remote processes - pub fn from_remote( - code: Option, - success: bool, - remote_process_id: Option, - remote_session_id: Option, - ) -> Self { - Self { - code, - success, - #[cfg(unix)] - signal: None, - remote_process_id, - remote_session_id, - } - } -} diff --git a/backend/src/execution_monitor.rs b/backend/src/execution_monitor.rs deleted file mode 100644 index 46955985..00000000 --- a/backend/src/execution_monitor.rs +++ /dev/null @@ -1,1193 +0,0 @@ -use git2::Repository; -use uuid::Uuid; - -use crate::{ - app_state::AppState, - models::{ - execution_process::{ExecutionProcess, ExecutionProcessStatus, ExecutionProcessType}, - task::{Task, TaskStatus}, - task_attempt::TaskAttempt, - }, - services::{NotificationConfig, NotificationService, ProcessService}, - utils::worktree_manager::WorktreeManager, -}; - -/// Delegation context structure -#[derive(Debug, serde::Deserialize)] -struct DelegationContext { - delegate_to: String, - operation_params: DelegationOperationParams, -} - -#[derive(Debug, serde::Deserialize)] -struct DelegationOperationParams { - task_id: uuid::Uuid, - project_id: uuid::Uuid, - attempt_id: uuid::Uuid, - additional: Option, -} - -/// Parse delegation context from process args JSON -fn parse_delegation_context(args_json: &str) -> Option { - // Parse the args JSON array - if let Ok(args_array) = serde_json::from_str::(args_json) { - if let Some(args) = args_array.as_array() { - // Look for --delegation-context flag - for (i, arg) in args.iter().enumerate() { - if let Some(arg_str) = arg.as_str() { - if arg_str == "--delegation-context" && i + 1 < args.len() { - // Next argument should be the delegation context JSON - if let Some(context_str) = args[i + 1].as_str() { - if let Ok(context) = - serde_json::from_str::(context_str) - { - return Some(context); - } - } - } - } - } - } - } - None -} - -/// Handle delegation after setup completion -async fn handle_setup_delegation(app_state: &AppState, delegation_context: DelegationContext) { - let params = &delegation_context.operation_params; - let task_id = params.task_id; - let project_id = params.project_id; - let attempt_id = params.attempt_id; - - tracing::info!( - "Delegating to {} after setup completion for attempt {}", - delegation_context.delegate_to, - attempt_id - ); - - let result = match delegation_context.delegate_to.as_str() { - "dev_server" => { - ProcessService::start_dev_server_direct( - &app_state.db_pool, - app_state, - attempt_id, - task_id, - project_id, - ) - .await - } - "coding_agent" => { - ProcessService::start_coding_agent( - &app_state.db_pool, - app_state, - attempt_id, - task_id, - project_id, - ) - .await - } - "followup" => { - let prompt = params - .additional - .as_ref() - .and_then(|a| a.get("prompt")) - .and_then(|p| p.as_str()) - .unwrap_or(""); - - ProcessService::start_followup_execution_direct( - &app_state.db_pool, - app_state, - attempt_id, - task_id, - project_id, - prompt, - ) - .await - .map(|_| ()) - } - _ => { - tracing::error!( - "Unknown delegation target: {}", - delegation_context.delegate_to - ); - return; - } - }; - - if let Err(e) = result { - tracing::error!( - "Failed to delegate to {} after setup completion: {}", - delegation_context.delegate_to, - e - ); - } else { - tracing::info!( - "Successfully delegated to {} after setup completion", - delegation_context.delegate_to - ); - } -} - -/// Commit any unstaged changes in the worktree after execution completion -async fn commit_execution_changes( - worktree_path: &str, - attempt_id: Uuid, - summary: Option<&str>, -) -> Result<(), Box> { - // Run git operations in a blocking task since git2 is synchronous - let worktree_path = worktree_path.to_string(); - let summary = summary.map(|s| s.to_string()); - tokio::task::spawn_blocking(move || { - let worktree_repo = Repository::open(&worktree_path)?; - - // Check if there are any changes to commit - let status = worktree_repo.statuses(None)?; - let has_changes = status.iter().any(|entry| { - let flags = entry.status(); - flags.contains(git2::Status::INDEX_NEW) - || flags.contains(git2::Status::INDEX_MODIFIED) - || flags.contains(git2::Status::INDEX_DELETED) - || flags.contains(git2::Status::WT_NEW) - || flags.contains(git2::Status::WT_MODIFIED) - || flags.contains(git2::Status::WT_DELETED) - }); - - if !has_changes { - return Ok::<(), Box>(()); - } - - // Get the current signature for commits - let signature = worktree_repo.signature()?; - - // Get the current HEAD commit - let head = worktree_repo.head()?; - let parent_commit = head.peel_to_commit()?; - - // Stage all changes - let mut worktree_index = worktree_repo.index()?; - worktree_index.add_all(["*"].iter(), git2::IndexAddOption::DEFAULT, None)?; - worktree_index.write()?; - - let tree_id = worktree_index.write_tree()?; - let tree = worktree_repo.find_tree(tree_id)?; - - // Create commit for the changes - let commit_message = if let Some(ref summary_msg) = summary { - summary_msg.clone() - } else { - format!("Task attempt {} - Final changes", attempt_id) - }; - worktree_repo.commit( - Some("HEAD"), - &signature, - &signature, - &commit_message, - &tree, - &[&parent_commit], - )?; - - Ok(()) - }) - .await??; - - Ok(()) -} - -/// Check if worktree has uncommitted changes and warn if so -fn check_uncommitted_changes(worktree_path: &str) { - if let Ok(repo) = Repository::open(worktree_path) { - if let Ok(statuses) = repo.statuses(None) { - // Simplified check: any status entry indicates changes - if !statuses.is_empty() { - tracing::warn!( - "Deleting worktree {} with uncommitted changes", - worktree_path - ); - } - } - } -} - -/// Delete a single git worktree and its filesystem directory using WorktreeManager -async fn delete_worktree( - worktree_path: &str, - main_repo_path: &str, - attempt_id: Uuid, -) -> Result<(), Box> { - let worktree_path_buf = std::path::PathBuf::from(worktree_path); - - // Check if worktree directory exists first - no-op if already gone - if !worktree_path_buf.exists() { - tracing::debug!( - "Worktree {} already doesn't exist, skipping cleanup", - worktree_path - ); - return Ok(()); - } - - // Check for uncommitted changes and warn - check_uncommitted_changes(worktree_path); - - match WorktreeManager::cleanup_worktree(&worktree_path_buf, Some(main_repo_path)).await { - Ok(_) => { - tracing::info!( - "Successfully cleaned up worktree for attempt {}", - attempt_id - ); - Ok(()) - } - Err(e) => { - tracing::error!( - "Failed to cleanup worktree for attempt {}: {}", - attempt_id, - e - ); - Err(Box::new(e) as Box) - } - } -} - -/// Clean up all worktrees for a specific task (immediate cleanup) -pub async fn cleanup_task_worktrees( - pool: &sqlx::SqlitePool, - task_id: Uuid, -) -> Result<(), Box> { - let task_attempts_with_project = - TaskAttempt::find_by_task_id_with_project(pool, task_id).await?; - - if task_attempts_with_project.is_empty() { - tracing::debug!("No worktrees found for task {} to clean up", task_id); - return Ok(()); - } - - tracing::info!( - "Starting immediate cleanup of {} worktrees for task {}", - task_attempts_with_project.len(), - task_id - ); - - let mut cleaned_count = 0; - let mut failed_count = 0; - - for (attempt_id, worktree_path, git_repo_path) in task_attempts_with_project { - if let Err(e) = delete_worktree(&worktree_path, &git_repo_path, attempt_id).await { - tracing::error!( - "Failed to cleanup worktree for attempt {}: {}", - attempt_id, - e - ); - failed_count += 1; - // Continue with other attempts even if one fails - } else { - // Mark worktree as deleted in database after successful cleanup - if let Err(e) = - crate::models::task_attempt::TaskAttempt::mark_worktree_deleted(pool, attempt_id) - .await - { - tracing::error!( - "Failed to mark worktree as deleted in database for attempt {}: {}", - attempt_id, - e - ); - } else { - cleaned_count += 1; - } - } - } - - tracing::info!( - "Completed immediate cleanup for task {}: {} worktrees cleaned, {} failed", - task_id, - cleaned_count, - failed_count - ); - - Ok(()) -} - -/// Defensively check for externally deleted worktrees and mark them as deleted in the database -async fn check_externally_deleted_worktrees(pool: &sqlx::SqlitePool) { - let active_attempts = match sqlx::query!( - r#"SELECT id as "id!: Uuid", worktree_path FROM task_attempts WHERE worktree_deleted = FALSE"# - ) - .fetch_all(pool) - .await - { - Ok(attempts) => attempts, - Err(e) => { - tracing::error!("Failed to query active task attempts for external deletion check: {}", e); - return; - } - }; - - tracing::debug!( - "Checking {} active worktrees for external deletion...", - active_attempts.len() - ); - - let mut externally_deleted_count = 0; - for record in active_attempts { - let attempt_id = record.id; - let worktree_path = &record.worktree_path; - - // Check if worktree directory exists - if !std::path::Path::new(worktree_path).exists() { - // Worktree was deleted externally, mark as deleted in database - if let Err(e) = - crate::models::task_attempt::TaskAttempt::mark_worktree_deleted(pool, attempt_id) - .await - { - tracing::error!( - "Failed to mark externally deleted worktree as deleted for attempt {}: {}", - attempt_id, - e - ); - } else { - externally_deleted_count += 1; - tracing::debug!( - "Marked externally deleted worktree as deleted for attempt {} (path: {})", - attempt_id, - worktree_path - ); - } - } - } - - if externally_deleted_count > 0 { - tracing::info!( - "Found and marked {} externally deleted worktrees", - externally_deleted_count - ); - } else { - tracing::debug!("No externally deleted worktrees found"); - } -} - -/// Find and delete orphaned worktrees that don't correspond to any task attempts -async fn cleanup_orphaned_worktrees(pool: &sqlx::SqlitePool) { - // Check if orphan cleanup is disabled via environment variable - if std::env::var("DISABLE_WORKTREE_ORPHAN_CLEANUP").is_ok() { - tracing::debug!("Orphan worktree cleanup is disabled via DISABLE_WORKTREE_ORPHAN_CLEANUP environment variable"); - return; - } - let worktree_base_dir = crate::models::task_attempt::TaskAttempt::get_worktree_base_dir(); - - // Check if base directory exists - if !worktree_base_dir.exists() { - tracing::debug!( - "Worktree base directory {} does not exist, skipping orphan cleanup", - worktree_base_dir.display() - ); - return; - } - - // Read all directories in the base directory - let entries = match std::fs::read_dir(&worktree_base_dir) { - Ok(entries) => entries, - Err(e) => { - tracing::error!( - "Failed to read worktree base directory {}: {}", - worktree_base_dir.display(), - e - ); - return; - } - }; - - let mut orphaned_count = 0; - let mut checked_count = 0; - - for entry in entries { - let entry = match entry { - Ok(entry) => entry, - Err(e) => { - tracing::warn!("Failed to read directory entry: {}", e); - continue; - } - }; - - let path = entry.path(); - - // Only process directories - if !path.is_dir() { - continue; - } - - let worktree_path_str = path.to_string_lossy().to_string(); - checked_count += 1; - - // Check if this worktree path exists in the database - let exists_in_db = match sqlx::query!( - "SELECT COUNT(*) as count FROM task_attempts WHERE worktree_path = ?", - worktree_path_str - ) - .fetch_one(pool) - .await - { - Ok(row) => row.count > 0, - Err(e) => { - tracing::error!( - "Failed to check database for worktree path {}: {}", - worktree_path_str, - e - ); - continue; - } - }; - - if !exists_in_db { - // This is an orphaned worktree - delete it - tracing::info!("Found orphaned worktree: {}", worktree_path_str); - - // For orphaned worktrees, we try to clean up git metadata if possible - // then remove the directory - if let Err(e) = cleanup_orphaned_worktree_directory(&path).await { - tracing::error!( - "Failed to remove orphaned worktree {}: {}", - worktree_path_str, - e - ); - } else { - orphaned_count += 1; - tracing::info!( - "Successfully removed orphaned worktree: {}", - worktree_path_str - ); - } - } - } - - if orphaned_count > 0 { - tracing::info!( - "Cleaned up {} orphaned worktrees (checked {} total directories)", - orphaned_count, - checked_count - ); - } else { - tracing::debug!( - "No orphaned worktrees found (checked {} directories)", - checked_count - ); - } -} - -/// Clean up an orphaned worktree directory, attempting to clean git metadata if possible -async fn cleanup_orphaned_worktree_directory( - worktree_path: &std::path::Path, -) -> Result<(), std::io::Error> { - // Use WorktreeManager for proper cleanup - it will try to infer the repo path - // and clean up what it can, even if the main repo is gone - match WorktreeManager::cleanup_worktree(worktree_path, None).await { - Ok(()) => { - tracing::debug!( - "WorktreeManager successfully cleaned up orphaned worktree: {}", - worktree_path.display() - ); - } - Err(e) => { - tracing::warn!( - "WorktreeManager cleanup failed for orphaned worktree {}: {}", - worktree_path.display(), - e - ); - - // If WorktreeManager cleanup failed, fall back to simple directory removal - // This ensures we delete as much as we can - if worktree_path.exists() { - tracing::debug!( - "Falling back to simple directory removal for orphaned worktree: {}", - worktree_path.display() - ); - std::fs::remove_dir_all(worktree_path).map_err(|e| { - std::io::Error::new( - e.kind(), - format!("Failed to remove orphaned worktree directory: {}", e), - ) - })?; - } - } - } - - Ok(()) -} - -pub async fn execution_monitor(app_state: AppState) { - let mut interval = tokio::time::interval(tokio::time::Duration::from_secs(5)); - let mut cleanup_interval = tokio::time::interval(tokio::time::Duration::from_secs(1800)); // 30 minutes - - loop { - tokio::select! { - _ = interval.tick() => { - // Check for completed processes FIRST to avoid race conditions - let completed_executions = app_state.get_running_executions_for_monitor().await; - - // Handle completed executions - for (execution_process_id, task_attempt_id, success, exit_code) in completed_executions { - let status_text = if success { - "completed successfully" - } else { - "failed" - }; - let exit_text = if let Some(code) = exit_code { - format!(" with exit code {}", code) - } else { - String::new() - }; - - tracing::info!( - "Execution {} {}{}", - execution_process_id, - status_text, - exit_text - ); - - // Update the execution process record - let execution_status = if success { - ExecutionProcessStatus::Completed - } else { - ExecutionProcessStatus::Failed - }; - - if let Err(e) = ExecutionProcess::update_completion( - &app_state.db_pool, - execution_process_id, - execution_status, - exit_code, - ) - .await - { - tracing::error!( - "Failed to update execution process {} completion: {}", - execution_process_id, - e - ); - } - - // Get the execution process to determine next steps - if let Ok(Some(execution_process)) = - ExecutionProcess::find_by_id(&app_state.db_pool, execution_process_id).await - { - match execution_process.process_type { - ExecutionProcessType::SetupScript => { - handle_setup_completion( - &app_state, - task_attempt_id, - execution_process, - success, - ) - .await; - } - ExecutionProcessType::CleanupScript => { - handle_cleanup_completion( - &app_state, - task_attempt_id, - execution_process_id, - execution_process, - success, - exit_code, - ) - .await; - } - ExecutionProcessType::CodingAgent => { - handle_coding_agent_completion( - &app_state, - task_attempt_id, - execution_process_id, - execution_process, - success, - exit_code, - ) - .await; - } - ExecutionProcessType::DevServer => { - handle_dev_server_completion( - &app_state, - task_attempt_id, - execution_process_id, - execution_process, - success, - exit_code, - ) - .await; - } - } - } else { - tracing::error!( - "Failed to find execution process {} for completion handling", - execution_process_id - ); - } - } - - // Check for orphaned execution processes AFTER handling completions - // Add a small delay to ensure completed processes are properly handled first - tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; - - let running_processes = match ExecutionProcess::find_running(&app_state.db_pool).await { - Ok(processes) => processes, - Err(e) => { - tracing::error!("Failed to query running execution processes: {}", e); - continue; - } - }; - - for process in running_processes { - // Check if this process is not actually running in the app state - if !app_state.has_running_execution(process.task_attempt_id).await { - // Additional check: if the process was recently updated, skip it to prevent race conditions - let now = chrono::Utc::now(); - let time_since_update = now - process.updated_at; - if time_since_update.num_seconds() < 10 { - // Process was updated within last 10 seconds, likely just completed - tracing::debug!( - "Skipping recently updated orphaned process {} (updated {} seconds ago)", - process.id, - time_since_update.num_seconds() - ); - continue; - } - - // This is truly an orphaned execution process - mark it as failed - tracing::info!( - "Found orphaned execution process {} for task attempt {}", - process.id, - process.task_attempt_id - ); - // This is truly an orphaned execution process - mark it as failed - tracing::info!( - "Found orphaned execution process {} for task attempt {}", - process.id, - process.task_attempt_id - ); - - // Update the execution process status first - if let Err(e) = ExecutionProcess::update_completion( - &app_state.db_pool, - process.id, - ExecutionProcessStatus::Failed, - None, // No exit code for orphaned processes - ) - .await - { - tracing::error!( - "Failed to update orphaned execution process {} status: {}", - process.id, - e - ); - continue; - } - - // Process marked as failed - - tracing::info!("Marked orphaned execution process {} as failed", process.id); - - // Update task status to InReview for coding agent and setup script failures - if matches!( - process.process_type, - ExecutionProcessType::CodingAgent | ExecutionProcessType::SetupScript - ) { - if let Ok(Some(task_attempt)) = - TaskAttempt::find_by_id(&app_state.db_pool, process.task_attempt_id).await - { - if let Ok(Some(task)) = - Task::find_by_id(&app_state.db_pool, task_attempt.task_id).await - { - if let Err(e) = Task::update_status( - &app_state.db_pool, - task.id, - task.project_id, - TaskStatus::InReview, - ) - .await - { - tracing::error!("Failed to update task status to InReview for orphaned attempt: {}", e); - } - } - } - } - } - } - } - _ = cleanup_interval.tick() => { - tracing::info!("Starting periodic worktree cleanup..."); - - // First, defensively check for externally deleted worktrees - check_externally_deleted_worktrees(&app_state.db_pool).await; - - // Then, find and delete orphaned worktrees that don't belong to any task - cleanup_orphaned_worktrees(&app_state.db_pool).await; - - // Then, proceed with normal expired worktree cleanup - match TaskAttempt::find_expired_for_cleanup(&app_state.db_pool).await { - Ok(expired_attempts) => { - if expired_attempts.is_empty() { - tracing::debug!("No expired worktrees found"); - } else { - tracing::info!("Found {} expired worktrees to clean up", expired_attempts.len()); - for (attempt_id, worktree_path, git_repo_path) in expired_attempts { - if let Err(e) = delete_worktree(&worktree_path, &git_repo_path, attempt_id).await { - tracing::error!("Failed to cleanup expired worktree {}: {}", attempt_id, e); - } else { - // Mark worktree as deleted in database after successful cleanup - if let Err(e) = crate::models::task_attempt::TaskAttempt::mark_worktree_deleted(&app_state.db_pool, attempt_id).await { - tracing::error!("Failed to mark worktree as deleted in database for attempt {}: {}", attempt_id, e); - } else { - tracing::info!("Successfully marked worktree as deleted for attempt {}", attempt_id); - } - } - } - } - } - Err(e) => { - tracing::error!("Failed to query expired task attempts: {}", e); - } - } - } - } - } -} - -/// Handle setup script completion -async fn handle_setup_completion( - app_state: &AppState, - task_attempt_id: Uuid, - execution_process: ExecutionProcess, - success: bool, -) { - if success { - // Mark setup as completed in database - if let Err(e) = TaskAttempt::mark_setup_completed(&app_state.db_pool, task_attempt_id).await - { - tracing::error!( - "Failed to mark setup as completed for attempt {}: {}", - task_attempt_id, - e - ); - } - - // Setup completed successfully - - // Check for delegation context in process args - let delegation_result = if let Some(args_json) = &execution_process.args { - parse_delegation_context(args_json) - } else { - None - }; - - if let Some(delegation_context) = delegation_result { - // Delegate to the original operation - handle_setup_delegation(app_state, delegation_context).await; - } else { - // Fallback to original behavior - start coding agent - if let Ok(Some(task_attempt)) = - TaskAttempt::find_by_id(&app_state.db_pool, task_attempt_id).await - { - if let Ok(Some(task)) = - Task::find_by_id(&app_state.db_pool, task_attempt.task_id).await - { - // Start the coding agent - if let Err(e) = ProcessService::start_coding_agent( - &app_state.db_pool, - app_state, - task_attempt_id, - task.id, - task.project_id, - ) - .await - { - tracing::error!( - "Failed to start coding agent after setup completion: {}", - e - ); - } - } - } - } - } else { - // Setup failed, update task status - - // Update task status to InReview since setup failed - if let Ok(Some(task_attempt)) = - TaskAttempt::find_by_id(&app_state.db_pool, task_attempt_id).await - { - if let Ok(Some(task)) = Task::find_by_id(&app_state.db_pool, task_attempt.task_id).await - { - if let Err(e) = Task::update_status( - &app_state.db_pool, - task.id, - task.project_id, - TaskStatus::InReview, - ) - .await - { - tracing::error!( - "Failed to update task status to InReview after setup failure: {}", - e - ); - } - } - } - } -} - -/// Handle coding agent completion -async fn handle_coding_agent_completion( - app_state: &AppState, - task_attempt_id: Uuid, - execution_process_id: Uuid, - execution_process: ExecutionProcess, - success: bool, - exit_code: Option, -) { - // Extract and store assistant message from execution logs - let summary = if let Some(stdout) = &execution_process.stdout { - if let Some(assistant_message) = crate::executor::parse_assistant_message_from_logs(stdout) - { - if let Err(e) = crate::models::executor_session::ExecutorSession::update_summary( - &app_state.db_pool, - execution_process_id, - &assistant_message, - ) - .await - { - tracing::error!( - "Failed to update summary for execution process {}: {}", - execution_process_id, - e - ); - None - } else { - tracing::info!( - "Successfully stored summary for execution process {}", - execution_process_id - ); - Some(assistant_message) - } - } else { - None - } - } else { - None - }; - - // Note: Notifications and status updates moved to cleanup completion handler - // to ensure they only fire after all processing (including cleanup) is complete - - // Get task attempt to access worktree path for committing changes - if let Ok(Some(task_attempt)) = - TaskAttempt::find_by_id(&app_state.db_pool, task_attempt_id).await - { - // Commit any unstaged changes after execution completion - if let Err(e) = commit_execution_changes( - &task_attempt.worktree_path, - task_attempt_id, - summary.as_deref(), - ) - .await - { - tracing::error!( - "Failed to commit execution changes for attempt {}: {}", - task_attempt_id, - e - ); - } else { - tracing::info!( - "Successfully committed execution changes for attempt {}", - task_attempt_id - ); - } - - // Coding agent execution completed - tracing::info!( - "Task attempt {} set to paused after coding agent completion", - task_attempt_id - ); - - // Run cleanup script if configured, otherwise immediately finalize task - if let Ok(Some(task)) = Task::find_by_id(&app_state.db_pool, task_attempt.task_id).await { - // Check if cleanup script should run - let should_run_cleanup = if let Ok(Some(project)) = - crate::models::project::Project::find_by_id(&app_state.db_pool, task.project_id) - .await - { - project - .cleanup_script - .as_ref() - .map(|script| !script.trim().is_empty()) - .unwrap_or(false) - } else { - false - }; - - if should_run_cleanup { - // Run cleanup script - completion will be handled in cleanup completion handler - if let Err(e) = - crate::services::process_service::ProcessService::run_cleanup_script_if_configured( - &app_state.db_pool, - app_state, - task_attempt_id, - task_attempt.task_id, - task.project_id, - ) - .await - { - tracing::error!( - "Failed to run cleanup script for attempt {}: {}", - task_attempt_id, - e - ); - // Even if cleanup fails to start, finalize the task - finalize_task_completion(app_state, task_attempt_id, &task, success, exit_code).await; - } - } else { - // No cleanup script configured, immediately finalize task - finalize_task_completion(app_state, task_attempt_id, &task, success, exit_code) - .await; - } - } - } else { - tracing::error!( - "Failed to find task attempt {} for coding agent completion", - task_attempt_id - ); - } -} - -/// Finalize task completion with notifications and status updates -async fn finalize_task_completion( - app_state: &AppState, - task_attempt_id: Uuid, - task: &crate::models::task::Task, - success: bool, - exit_code: Option, -) { - // Send notifications if enabled - let sound_enabled = app_state.get_sound_alerts_enabled().await; - let push_enabled = app_state.get_push_notifications_enabled().await; - - if sound_enabled || push_enabled { - let sound_file = app_state.get_sound_file().await; - let notification_config = NotificationConfig { - sound_enabled, - push_enabled, - }; - - let notification_service = NotificationService::new(notification_config); - - // Get task attempt for notification details - if let Ok(Some(task_attempt)) = - TaskAttempt::find_by_id(&app_state.db_pool, task_attempt_id).await - { - let title = format!("Task Complete: {}", task.title); - let message = if success { - format!( - "✅ '{}' completed successfully\nBranch: {}\nExecutor: {}", - task.title, - task_attempt.branch, - task_attempt.executor.as_deref().unwrap_or("default") - ) - } else { - format!( - "❌ '{}' execution failed\nBranch: {}\nExecutor: {}", - task.title, - task_attempt.branch, - task_attempt.executor.as_deref().unwrap_or("default") - ) - }; - - notification_service - .notify(&title, &message, &sound_file) - .await; - } - } - - // Track analytics event - app_state - .track_analytics_event( - "task_attempt_finished", - Some(serde_json::json!({ - "task_id": task.id.to_string(), - "project_id": task.project_id.to_string(), - "attempt_id": task_attempt_id.to_string(), - "execution_success": success, - "exit_code": exit_code, - })), - ) - .await; - - // Update task status to InReview - if let Err(e) = Task::update_status( - &app_state.db_pool, - task.id, - task.project_id, - TaskStatus::InReview, - ) - .await - { - tracing::error!( - "Failed to update task status to InReview for completed attempt: {}", - e - ); - } -} - -/// Handle cleanup script completion -async fn handle_cleanup_completion( - app_state: &AppState, - task_attempt_id: Uuid, - execution_process_id: Uuid, - _execution_process: ExecutionProcess, - success: bool, - exit_code: Option, -) { - let exit_text = if let Some(code) = exit_code { - format!(" with exit code {}", code) - } else { - String::new() - }; - - tracing::info!( - "Cleanup script for task attempt {} completed{}", - task_attempt_id, - exit_text - ); - - // Update execution process status - let process_status = if success { - ExecutionProcessStatus::Completed - } else { - ExecutionProcessStatus::Failed - }; - - if let Err(e) = ExecutionProcess::update_completion( - &app_state.db_pool, - execution_process_id, - process_status, - exit_code, - ) - .await - { - tracing::error!( - "Failed to update cleanup script execution process status: {}", - e - ); - } - - // Auto-commit changes after successful cleanup script execution - if success { - if let Ok(Some(task_attempt)) = - TaskAttempt::find_by_id(&app_state.db_pool, task_attempt_id).await - { - let commit_message = "Cleanup script"; - - if let Err(e) = commit_execution_changes( - &task_attempt.worktree_path, - task_attempt_id, - Some(commit_message), - ) - .await - { - tracing::error!( - "Failed to commit changes after cleanup script for attempt {}: {}", - task_attempt_id, - e - ); - } else { - tracing::info!( - "Successfully committed changes after cleanup script for attempt {}", - task_attempt_id - ); - } - } else { - tracing::error!( - "Failed to retrieve task attempt {} for cleanup commit", - task_attempt_id - ); - } - } - - // Finalize task completion after cleanup (whether successful or failed) - if let Ok(Some(task_attempt)) = - TaskAttempt::find_by_id(&app_state.db_pool, task_attempt_id).await - { - if let Ok(Some(task)) = Task::find_by_id(&app_state.db_pool, task_attempt.task_id).await { - // Get the coding agent execution process to determine original success status - let coding_success = if let Ok(processes) = - ExecutionProcess::find_by_task_attempt_id(&app_state.db_pool, task_attempt_id).await - { - // Find the most recent completed coding agent process - processes - .iter() - .filter(|p| { - p.process_type - == crate::models::execution_process::ExecutionProcessType::CodingAgent - }) - .filter(|p| { - p.status - == crate::models::execution_process::ExecutionProcessStatus::Completed - }) - .next_back() - .map(|p| p.exit_code == Some(0)) - .unwrap_or(false) - } else { - false - }; - - finalize_task_completion(app_state, task_attempt_id, &task, coding_success, exit_code) - .await; - } else { - tracing::error!( - "Failed to retrieve task {} for cleanup completion finalization", - task_attempt.task_id - ); - } - } else { - tracing::error!( - "Failed to retrieve task attempt {} for cleanup completion finalization", - task_attempt_id - ); - } -} - -/// Handle dev server completion (future functionality) -async fn handle_dev_server_completion( - app_state: &AppState, - task_attempt_id: Uuid, - execution_process_id: Uuid, - _execution_process: ExecutionProcess, - success: bool, - exit_code: Option, -) { - let exit_text = if let Some(code) = exit_code { - format!(" with exit code {}", code) - } else { - String::new() - }; - - tracing::info!( - "Dev server for task attempt {} completed{}", - task_attempt_id, - exit_text - ); - - // Update execution process status instead of creating activity - let process_status = if success { - ExecutionProcessStatus::Completed - } else { - ExecutionProcessStatus::Failed - }; - - if let Err(e) = ExecutionProcess::update_completion( - &app_state.db_pool, - execution_process_id, - process_status, - exit_code, - ) - .await - { - tracing::error!( - "Failed to update dev server execution process status: {}", - e - ); - } -} diff --git a/backend/src/executor.rs b/backend/src/executor.rs deleted file mode 100644 index 0b682933..00000000 --- a/backend/src/executor.rs +++ /dev/null @@ -1,1081 +0,0 @@ -use std::str::FromStr; - -use async_trait::async_trait; -use serde::{Deserialize, Serialize}; -use tokio::io::{AsyncBufReadExt, BufReader}; -use ts_rs::TS; -use uuid::Uuid; - -use crate::{ - command_runner::{CommandError, CommandProcess, CommandRunner}, - executors::{ - AiderExecutor, AmpExecutor, CCRExecutor, CharmOpencodeExecutor, ClaudeExecutor, - CodexExecutor, EchoExecutor, GeminiExecutor, SetupScriptExecutor, SstOpencodeExecutor, - }, -}; - -// Constants for database streaming - fast for near-real-time updates -const STDOUT_UPDATE_THRESHOLD: usize = 1; -const BUFFER_SIZE_THRESHOLD: usize = 256; - -/// Normalized conversation representation for different executor formats -#[derive(Debug, Clone, Serialize, Deserialize, TS)] -#[ts(export)] -pub struct NormalizedConversation { - pub entries: Vec, - pub session_id: Option, - pub executor_type: String, - pub prompt: Option, - pub summary: Option, -} - -/// Individual entry in a normalized conversation -#[derive(Debug, Clone, Serialize, Deserialize, TS)] -#[ts(export)] -pub struct NormalizedEntry { - pub timestamp: Option, - pub entry_type: NormalizedEntryType, - pub content: String, - #[ts(skip)] - pub metadata: Option, -} - -/// Types of entries in a normalized conversation -#[derive(Debug, Clone, Serialize, Deserialize, TS)] -#[serde(tag = "type", rename_all = "snake_case")] -#[ts(export)] -pub enum NormalizedEntryType { - UserMessage, - AssistantMessage, - ToolUse { - tool_name: String, - action_type: ActionType, - }, - SystemMessage, - ErrorMessage, - Thinking, -} - -/// Types of tool actions that can be performed -#[derive(Debug, Clone, Serialize, Deserialize, TS)] -#[serde(tag = "action", rename_all = "snake_case")] -#[ts(export)] -pub enum ActionType { - FileRead { path: String }, - FileWrite { path: String }, - CommandRun { command: String }, - Search { query: String }, - WebFetch { url: String }, - TaskCreate { description: String }, - PlanPresentation { plan: String }, - Other { description: String }, -} - -/// Context information for spawn failures to provide comprehensive error details -#[derive(Debug, Clone)] -pub struct SpawnContext { - /// The type of executor that failed (e.g., "Claude", "Amp", "Echo") - pub executor_type: String, - /// The command that failed to spawn - pub command: String, - /// Command line arguments - pub args: Vec, - /// Working directory where the command was executed - pub working_dir: String, - /// Task ID if available - pub task_id: Option, - /// Task title for user-friendly context - pub task_title: Option, - /// Additional executor-specific context - pub additional_context: Option, -} - -impl SpawnContext { - /// Set the executor type (required field not available in Command) - pub fn with_executor_type(mut self, executor_type: impl Into) -> Self { - self.executor_type = executor_type.into(); - self - } - - /// Add task context (optional, not available in Command) - pub fn with_task(mut self, task_id: Uuid, task_title: Option) -> Self { - self.task_id = Some(task_id); - self.task_title = task_title; - self - } - - /// Add additional context information (optional, not available in Command) - pub fn with_context(mut self, context: impl Into) -> Self { - self.additional_context = Some(context.into()); - self - } - /// Create SpawnContext from Command, then use builder methods for additional context - pub fn from_command(command: &CommandRunner, executor_type: impl Into) -> Self { - Self::from(command).with_executor_type(executor_type) - } - - /// Finalize the context and create an ExecutorError - pub fn spawn_error(self, error: CommandError) -> ExecutorError { - ExecutorError::spawn_failed(error, self) - } -} - -/// Extract SpawnContext from a tokio::process::Command -/// This automatically captures all available information from the Command object -impl From<&CommandRunner> for SpawnContext { - fn from(command: &CommandRunner) -> Self { - let program = command.get_program().to_string(); - let args = command.get_args().to_vec(); - - let working_dir = command - .get_current_dir() - .unwrap_or("current_dir") - .to_string(); - - Self { - executor_type: "Unknown".to_string(), // Must be set using with_executor_type() - command: program, - args, - working_dir, - task_id: None, - task_title: None, - additional_context: None, - } - } -} - -#[derive(Debug)] -pub enum ExecutorError { - SpawnFailed { - error: CommandError, - context: SpawnContext, - }, - TaskNotFound, - DatabaseError(sqlx::Error), - ContextCollectionFailed(String), - GitError(String), - InvalidSessionId(String), - FollowUpNotSupported, -} - -impl std::fmt::Display for ExecutorError { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - ExecutorError::SpawnFailed { error, context } => { - write!(f, "Failed to spawn {} process", context.executor_type)?; - - // Add task context if available - if let Some(ref title) = context.task_title { - write!(f, " for task '{}'", title)?; - } else if let Some(task_id) = context.task_id { - write!(f, " for task {}", task_id)?; - } - - // Add command details - write!(f, ": command '{}' ", context.command)?; - if !context.args.is_empty() { - write!(f, "with args [{}] ", context.args.join(", "))?; - } - - // Add working directory - write!(f, "in directory '{}' ", context.working_dir)?; - - // Add additional context if provided - if let Some(ref additional) = context.additional_context { - write!(f, "({}) ", additional)?; - } - - // Finally, add the underlying error - write!(f, "- {}", error) - } - ExecutorError::TaskNotFound => write!(f, "Task not found"), - ExecutorError::DatabaseError(e) => write!(f, "Database error: {}", e), - ExecutorError::ContextCollectionFailed(msg) => { - write!(f, "Context collection failed: {}", msg) - } - ExecutorError::GitError(msg) => write!(f, "Git operation error: {}", msg), - ExecutorError::InvalidSessionId(msg) => write!(f, "Invalid session_id: {}", msg), - ExecutorError::FollowUpNotSupported => { - write!(f, "This executor does not support follow-up sessions") - } - } - } -} - -impl std::error::Error for ExecutorError {} - -impl From for ExecutorError { - fn from(err: sqlx::Error) -> Self { - ExecutorError::DatabaseError(err) - } -} - -impl From for ExecutorError { - fn from(err: crate::models::task_attempt::TaskAttemptError) -> Self { - match err { - crate::models::task_attempt::TaskAttemptError::Database(e) => { - ExecutorError::DatabaseError(e) - } - crate::models::task_attempt::TaskAttemptError::Git(e) => { - ExecutorError::GitError(format!("Git operation failed: {}", e)) - } - crate::models::task_attempt::TaskAttemptError::TaskNotFound => { - ExecutorError::TaskNotFound - } - crate::models::task_attempt::TaskAttemptError::ProjectNotFound => { - ExecutorError::ContextCollectionFailed("Project not found".to_string()) - } - crate::models::task_attempt::TaskAttemptError::ValidationError(msg) => { - ExecutorError::ContextCollectionFailed(format!("Validation failed: {}", msg)) - } - crate::models::task_attempt::TaskAttemptError::BranchNotFound(branch) => { - ExecutorError::GitError(format!("Branch '{}' not found", branch)) - } - crate::models::task_attempt::TaskAttemptError::GitService(e) => { - ExecutorError::GitError(format!("Git service error: {}", e)) - } - crate::models::task_attempt::TaskAttemptError::GitHubService(e) => { - ExecutorError::GitError(format!("GitHub service error: {}", e)) - } - } - } -} - -impl ExecutorError { - /// Create a new SpawnFailed error with context - pub fn spawn_failed(error: CommandError, context: SpawnContext) -> Self { - ExecutorError::SpawnFailed { error, context } - } -} - -/// Trait for coding agents that can execute tasks, normalize logs, and support follow-up sessions -#[async_trait] -pub trait Executor: Send + Sync { - /// Spawn the command for a given task attempt - async fn spawn( - &self, - pool: &sqlx::SqlitePool, - task_id: Uuid, - worktree_path: &str, - ) -> Result; - - /// Spawn a follow-up session for executors that support it - /// - /// This method is used to continue an existing session with a new prompt. - /// Not all executors support follow-up sessions, so the default implementation - /// returns an error. - async fn spawn_followup( - &self, - _pool: &sqlx::SqlitePool, - _task_id: Uuid, - _session_id: &str, - _prompt: &str, - _worktree_path: &str, - ) -> Result { - Err(ExecutorError::FollowUpNotSupported) - } - /// Normalize executor logs into a standard format - fn normalize_logs( - &self, - _logs: &str, - _worktree_path: &str, - ) -> Result { - // Default implementation returns empty conversation - Ok(NormalizedConversation { - entries: vec![], - session_id: None, - executor_type: "unknown".to_string(), - prompt: None, - summary: None, - }) - } - - #[allow(clippy::result_large_err)] - async fn setup_streaming( - &self, - child: &mut CommandProcess, - pool: &sqlx::SqlitePool, - attempt_id: Uuid, - execution_process_id: Uuid, - ) -> Result<(), ExecutorError> { - let streams = child - .stream() - .await - .expect("Failed to get stdio from child process"); - let stdout = streams - .stdout - .expect("Failed to take stdout from child process"); - let stderr = streams - .stderr - .expect("Failed to take stderr from child process"); - - let pool_clone1 = pool.clone(); - let pool_clone2 = pool.clone(); - - tokio::spawn(stream_output_to_db( - stdout, - pool_clone1, - attempt_id, - execution_process_id, - true, - )); - tokio::spawn(stream_output_to_db( - stderr, - pool_clone2, - attempt_id, - execution_process_id, - false, - )); - - Ok(()) - } - - /// Execute the command and stream output to database in real-time - async fn execute_streaming( - &self, - pool: &sqlx::SqlitePool, - task_id: Uuid, - attempt_id: Uuid, - execution_process_id: Uuid, - worktree_path: &str, - ) -> Result { - let mut child = self.spawn(pool, task_id, worktree_path).await?; - Self::setup_streaming(self, &mut child, pool, attempt_id, execution_process_id).await?; - Ok(child) - } - - /// Execute a follow-up command and stream output to database in real-time - #[allow(clippy::too_many_arguments)] - async fn execute_followup_streaming( - &self, - pool: &sqlx::SqlitePool, - task_id: Uuid, - attempt_id: Uuid, - execution_process_id: Uuid, - session_id: &str, - prompt: &str, - worktree_path: &str, - ) -> Result { - let mut child = self - .spawn_followup(pool, task_id, session_id, prompt, worktree_path) - .await?; - Self::setup_streaming(self, &mut child, pool, attempt_id, execution_process_id).await?; - Ok(child) - } -} - -/// Runtime executor types for internal use -#[derive(Debug, Clone)] -pub enum ExecutorType { - SetupScript(String), - CleanupScript(String), - DevServer(String), - CodingAgent { - config: ExecutorConfig, - follow_up: Option, - }, -} - -/// Information needed to continue a previous session -#[derive(Debug, Clone)] -pub struct FollowUpInfo { - pub session_id: String, - pub prompt: String, -} - -/// Configuration for different executor types -#[derive(Debug, Clone, Serialize, Deserialize, TS)] -#[serde(tag = "type", rename_all = "kebab-case")] -#[ts(export)] -pub enum ExecutorConfig { - Echo, - Claude, - ClaudePlan, - Amp, - Gemini, - #[serde(alias = "setup_script")] - SetupScript { - script: String, - }, - ClaudeCodeRouter, - #[serde(alias = "charmopencode")] - CharmOpencode, - #[serde(alias = "opencode")] - SstOpencode, - Aider, - Codex, -} - -// Constants for frontend -#[derive(Debug, Clone, Serialize, Deserialize, TS)] -#[ts(export)] -pub struct ExecutorConstants { - pub executor_types: Vec, - pub executor_labels: Vec, -} - -impl FromStr for ExecutorConfig { - type Err = String; - - fn from_str(s: &str) -> Result { - match s { - "echo" => Ok(ExecutorConfig::Echo), - "claude" => Ok(ExecutorConfig::Claude), - "claude-plan" => Ok(ExecutorConfig::ClaudePlan), - "amp" => Ok(ExecutorConfig::Amp), - "gemini" => Ok(ExecutorConfig::Gemini), - "charm-opencode" => Ok(ExecutorConfig::CharmOpencode), - "claude-code-router" => Ok(ExecutorConfig::ClaudeCodeRouter), - "sst-opencode" => Ok(ExecutorConfig::SstOpencode), - "aider" => Ok(ExecutorConfig::Aider), - "codex" => Ok(ExecutorConfig::Codex), - "setup-script" => Ok(ExecutorConfig::SetupScript { - script: "setup script".to_string(), - }), - _ => Err(format!("Unknown executor type: {}", s)), - } - } -} - -impl ExecutorConfig { - pub fn create_executor(&self) -> Box { - match self { - ExecutorConfig::Echo => Box::new(EchoExecutor), - ExecutorConfig::Claude => Box::new(ClaudeExecutor::new()), - ExecutorConfig::ClaudePlan => Box::new(ClaudeExecutor::new_plan_mode()), - ExecutorConfig::Amp => Box::new(AmpExecutor), - ExecutorConfig::Gemini => Box::new(GeminiExecutor), - ExecutorConfig::ClaudeCodeRouter => Box::new(CCRExecutor::new()), - ExecutorConfig::CharmOpencode => Box::new(CharmOpencodeExecutor), - ExecutorConfig::SstOpencode => Box::new(SstOpencodeExecutor::new()), - ExecutorConfig::Aider => Box::new(AiderExecutor::new()), - ExecutorConfig::Codex => Box::new(CodexExecutor::new()), - ExecutorConfig::SetupScript { script } => { - Box::new(SetupScriptExecutor::new(script.clone())) - } - } - } - - pub fn config_path(&self) -> Option { - match self { - ExecutorConfig::Echo => None, - ExecutorConfig::CharmOpencode => { - dirs::home_dir().map(|home| home.join(".opencode.json")) - } - ExecutorConfig::Claude => dirs::home_dir().map(|home| home.join(".claude.json")), - ExecutorConfig::ClaudePlan => dirs::home_dir().map(|home| home.join(".claude.json")), - ExecutorConfig::ClaudeCodeRouter => { - dirs::home_dir().map(|home| home.join(".claude.json")) - } - ExecutorConfig::Amp => { - dirs::config_dir().map(|config| config.join("amp").join("settings.json")) - } - ExecutorConfig::Gemini => { - dirs::home_dir().map(|home| home.join(".gemini").join("settings.json")) - } - ExecutorConfig::SstOpencode => { - #[cfg(unix)] - { - xdg::BaseDirectories::with_prefix("opencode").get_config_file("opencode.json") - } - #[cfg(not(unix))] - { - dirs::config_dir().map(|config| config.join("opencode").join("opencode.json")) - } - } - ExecutorConfig::Aider => None, - ExecutorConfig::Codex => { - dirs::home_dir().map(|home| home.join(".codex").join("config.toml")) - } - ExecutorConfig::SetupScript { .. } => None, - } - } - - /// Get the JSON attribute path for MCP servers in the config file - pub fn mcp_attribute_path(&self) -> Option> { - match self { - ExecutorConfig::Echo => None, // Echo doesn't support MCP - ExecutorConfig::CharmOpencode => Some(vec!["mcpServers"]), - ExecutorConfig::SstOpencode => Some(vec!["mcp"]), - ExecutorConfig::Claude => Some(vec!["mcpServers"]), - ExecutorConfig::ClaudePlan => None, // Claude Plan shares Claude config - ExecutorConfig::Amp => Some(vec!["amp", "mcpServers"]), // Nested path for Amp - ExecutorConfig::Gemini => Some(vec!["mcpServers"]), - ExecutorConfig::ClaudeCodeRouter => Some(vec!["mcpServers"]), - ExecutorConfig::Aider => None, // Aider doesn't support MCP. https://github.com/Aider-AI/aider/issues/3314 - ExecutorConfig::Codex => None, // Codex uses TOML config, frontend doesn't handle TOML yet - ExecutorConfig::SetupScript { .. } => None, // Setup scripts don't support MCP - } - } - - /// Check if this executor supports MCP configuration - pub fn supports_mcp(&self) -> bool { - !matches!( - self, - ExecutorConfig::Echo - | ExecutorConfig::Aider - | ExecutorConfig::SetupScript { .. } - | ExecutorConfig::Codex - ) - } - - /// Get the display name for this executor - pub fn display_name(&self) -> &'static str { - match self { - ExecutorConfig::Echo => "Echo (Test Mode)", - ExecutorConfig::CharmOpencode => "Charm Opencode", - ExecutorConfig::SstOpencode => "SST Opencode", - ExecutorConfig::Claude => "Claude", - ExecutorConfig::ClaudePlan => "Claude Plan", - ExecutorConfig::Amp => "Amp", - ExecutorConfig::Gemini => "Gemini", - ExecutorConfig::ClaudeCodeRouter => "Claude Code Router", - ExecutorConfig::Aider => "Aider", - ExecutorConfig::Codex => "Codex", - ExecutorConfig::SetupScript { .. } => "Setup Script", - } - } -} - -impl std::fmt::Display for ExecutorConfig { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let s = match self { - ExecutorConfig::Echo => "echo", - ExecutorConfig::Claude => "claude", - ExecutorConfig::ClaudePlan => "claude-plan", - ExecutorConfig::Amp => "amp", - ExecutorConfig::Gemini => "gemini", - ExecutorConfig::SstOpencode => "sst-opencode", - ExecutorConfig::CharmOpencode => "charm-opencode", - ExecutorConfig::ClaudeCodeRouter => "claude-code-router", - ExecutorConfig::Aider => "aider", - ExecutorConfig::Codex => "codex", - ExecutorConfig::SetupScript { .. } => "setup-script", - }; - write!(f, "{}", s) - } -} - -/// Stream output from a child process to the database -pub async fn stream_output_to_db( - output: impl tokio::io::AsyncRead + Unpin, - pool: sqlx::SqlitePool, - attempt_id: Uuid, - execution_process_id: Uuid, - is_stdout: bool, -) { - if is_stdout { - stream_stdout_to_db(output, pool, attempt_id, execution_process_id).await; - } else { - stream_stderr_to_db(output, pool, attempt_id, execution_process_id).await; - } -} - -/// Stream stdout from a child process to the database (immediate updates) -async fn stream_stdout_to_db( - output: impl tokio::io::AsyncRead + Unpin, - pool: sqlx::SqlitePool, - attempt_id: Uuid, - execution_process_id: Uuid, -) { - use crate::models::{execution_process::ExecutionProcess, executor_session::ExecutorSession}; - - let mut reader = BufReader::new(output); - let mut line = String::new(); - let mut accumulated_output = String::new(); - let mut update_counter = 0; - let mut session_id_parsed = false; - - loop { - line.clear(); - match reader.read_line(&mut line).await { - Ok(0) => break, // EOF - Ok(_) => { - // Parse session ID from the first JSONL line - if !session_id_parsed { - if let Some(external_session_id) = parse_session_id_from_line(&line) { - if let Err(e) = ExecutorSession::update_session_id( - &pool, - execution_process_id, - &external_session_id, - ) - .await - { - tracing::error!( - "Failed to update session ID for execution process {}: {}", - execution_process_id, - e - ); - } else { - tracing::info!( - "Updated session ID {} for execution process {}", - external_session_id, - execution_process_id - ); - } - session_id_parsed = true; - } - } - accumulated_output.push_str(&line); - update_counter += 1; - - // Update database every threshold lines or when we have a significant amount of data - if update_counter >= STDOUT_UPDATE_THRESHOLD - || accumulated_output.len() > BUFFER_SIZE_THRESHOLD - { - if let Err(e) = ExecutionProcess::append_output( - &pool, - execution_process_id, - Some(&accumulated_output), - None, - ) - .await - { - tracing::error!( - "Failed to update stdout for attempt {}: {}", - attempt_id, - e - ); - } - accumulated_output.clear(); - update_counter = 0; - } - } - Err(e) => { - tracing::error!("Error reading stdout for attempt {}: {}", attempt_id, e); - break; - } - } - } - - // Flush any remaining output - if !accumulated_output.is_empty() { - if let Err(e) = ExecutionProcess::append_output( - &pool, - execution_process_id, - Some(&accumulated_output), - None, - ) - .await - { - tracing::error!("Failed to flush stdout for attempt {}: {}", attempt_id, e); - } - } -} - -/// Stream stderr from a child process to the database (buffered with timeout) -async fn stream_stderr_to_db( - output: impl tokio::io::AsyncRead + Unpin, - pool: sqlx::SqlitePool, - attempt_id: Uuid, - execution_process_id: Uuid, -) { - use tokio::time::{timeout, Duration}; - - let mut reader = BufReader::new(output); - let mut line = String::new(); - let mut accumulated_output = String::new(); - const STDERR_FLUSH_TIMEOUT_MS: u64 = 100; // Fast flush for near-real-time streaming - const STDERR_FLUSH_TIMEOUT: Duration = Duration::from_millis(STDERR_FLUSH_TIMEOUT_MS); - - loop { - line.clear(); - - // Try to read a line with a timeout - let read_result = timeout(STDERR_FLUSH_TIMEOUT, reader.read_line(&mut line)).await; - - match read_result { - Ok(Ok(0)) => { - // EOF - flush remaining output and break - break; - } - Ok(Ok(_)) => { - // Successfully read a line - just accumulate it - accumulated_output.push_str(&line); - } - Ok(Err(e)) => { - tracing::error!("Error reading stderr for attempt {}: {}", attempt_id, e); - break; - } - Err(_) => { - // Timeout occurred - flush accumulated output if any - if !accumulated_output.is_empty() { - flush_stderr_chunk( - &pool, - execution_process_id, - &accumulated_output, - attempt_id, - ) - .await; - accumulated_output.clear(); - } - } - } - } - - // Final flush for any remaining output - if !accumulated_output.is_empty() { - flush_stderr_chunk(&pool, execution_process_id, &accumulated_output, attempt_id).await; - } -} - -/// Flush a chunk of stderr output to the database -async fn flush_stderr_chunk( - pool: &sqlx::SqlitePool, - execution_process_id: Uuid, - content: &str, - attempt_id: Uuid, -) { - use crate::models::execution_process::ExecutionProcess; - - let trimmed = content.trim(); - if trimmed.is_empty() { - return; - } - - // Add a delimiter to separate chunks in the database - let chunk_with_delimiter = format!("{}\n---STDERR_CHUNK_BOUNDARY---\n", trimmed); - - if let Err(e) = ExecutionProcess::append_output( - pool, - execution_process_id, - None, - Some(&chunk_with_delimiter), - ) - .await - { - tracing::error!( - "Failed to flush stderr chunk for attempt {}: {}", - attempt_id, - e - ); - } else { - tracing::debug!( - "Flushed stderr chunk ({} chars) for process {}", - trimmed.len(), - execution_process_id - ); - } -} - -/// Parse assistant message from executor logs (JSONL format) -pub fn parse_assistant_message_from_logs(logs: &str) -> Option { - use serde_json::Value; - - let mut last_assistant_message = None; - - for line in logs.lines() { - let trimmed = line.trim(); - if trimmed.is_empty() { - continue; - } - - // Try to parse as JSON - if let Ok(json) = serde_json::from_str::(trimmed) { - // Check for Claude format: {"type":"assistant","message":{"content":[...]}} - if let Some(msg_type) = json.get("type").and_then(|t| t.as_str()) { - if msg_type == "assistant" { - if let Some(message) = json.get("message") { - if let Some(content) = message.get("content").and_then(|c| c.as_array()) { - // Extract text content from Claude assistant message - let mut text_parts = Vec::new(); - for content_item in content { - if let Some(content_type) = - content_item.get("type").and_then(|t| t.as_str()) - { - if content_type == "text" { - if let Some(text) = - content_item.get("text").and_then(|t| t.as_str()) - { - text_parts.push(text); - } - } - } - } - if !text_parts.is_empty() { - last_assistant_message = Some(text_parts.join("\n")); - } - } - } - continue; - } - } - - // Check for AMP format: {"type":"messages","messages":[[1,{"role":"assistant",...}]]} - if let Some(messages) = json.get("messages").and_then(|m| m.as_array()) { - for message_entry in messages { - if let Some(message_data) = message_entry.as_array().and_then(|arr| arr.get(1)) - { - if let Some(role) = message_data.get("role").and_then(|r| r.as_str()) { - if role == "assistant" { - if let Some(content) = - message_data.get("content").and_then(|c| c.as_array()) - { - // Extract text content from AMP assistant message - let mut text_parts = Vec::new(); - for content_item in content { - if let Some(content_type) = - content_item.get("type").and_then(|t| t.as_str()) - { - if content_type == "text" { - if let Some(text) = content_item - .get("text") - .and_then(|t| t.as_str()) - { - text_parts.push(text); - } - } - } - } - if !text_parts.is_empty() { - last_assistant_message = Some(text_parts.join("\n")); - } - } - } - } - } - } - } - } - } - - last_assistant_message -} - -/// Parse session_id from Claude or thread_id from Amp from the first JSONL line -fn parse_session_id_from_line(line: &str) -> Option { - use serde_json::Value; - - let trimmed = line.trim(); - if trimmed.is_empty() { - return None; - } - - // Try to parse as JSON - if let Ok(json) = serde_json::from_str::(trimmed) { - // Check for Claude session_id - if let Some(session_id) = json.get("session_id").and_then(|v| v.as_str()) { - return Some(session_id.to_string()); - } - - // Check for Amp threadID - if let Some(thread_id) = json.get("threadID").and_then(|v| v.as_str()) { - return Some(thread_id.to_string()); - } - } - - None -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::executors::{AiderExecutor, AmpExecutor, ClaudeExecutor}; - - #[test] - fn test_parse_claude_session_id() { - let claude_line = r#"{"type":"system","subtype":"init","cwd":"/private/tmp/mission-control-worktree-3abb979d-2e0e-4404-a276-c16d98a97dd5","session_id":"cc0889a2-0c59-43cc-926b-739a983888a2","tools":["Task","Bash","Glob","Grep","LS","exit_plan_mode","Read","Edit","MultiEdit","Write","NotebookRead","NotebookEdit","WebFetch","TodoRead","TodoWrite","WebSearch"],"mcp_servers":[],"model":"claude-sonnet-4-20250514","permissionMode":"bypassPermissions","apiKeySource":"/login managed key"}"#; - - assert_eq!( - parse_session_id_from_line(claude_line), - Some("cc0889a2-0c59-43cc-926b-739a983888a2".to_string()) - ); - } - - #[test] - fn test_parse_amp_thread_id() { - let amp_line = r#"{"type":"initial","threadID":"T-286f908a-2cd8-40cc-9490-da689b2f1560"}"#; - - assert_eq!( - parse_session_id_from_line(amp_line), - Some("T-286f908a-2cd8-40cc-9490-da689b2f1560".to_string()) - ); - } - - #[test] - fn test_parse_invalid_json() { - let invalid_line = "not json at all"; - assert_eq!(parse_session_id_from_line(invalid_line), None); - } - - #[test] - fn test_parse_json_without_ids() { - let other_json = r#"{"type":"other","message":"hello"}"#; - assert_eq!(parse_session_id_from_line(other_json), None); - } - - #[test] - fn test_parse_empty_line() { - assert_eq!(parse_session_id_from_line(""), None); - assert_eq!(parse_session_id_from_line(" "), None); - } - - #[test] - fn test_parse_assistant_message_from_logs() { - // Test AMP format - let amp_logs = r#"{"type":"initial","threadID":"T-e7af5516-e5a5-4754-8e34-810dc658716e"} -{"type":"messages","messages":[[0,{"role":"user","content":[{"type":"text","text":"Task title: Test task"}],"meta":{"sentAt":1751385490573}}]],"toolResults":[]} -{"type":"messages","messages":[[1,{"role":"assistant","content":[{"type":"thinking","thinking":"Testing"},{"type":"text","text":"The Pythagorean theorem states that in a right triangle, the square of the hypotenuse equals the sum of squares of the other two sides: **a² + b² = c²**."}],"state":{"type":"complete","stopReason":"end_turn"}}]],"toolResults":[]} -{"type":"state","state":"idle"} -{"type":"shutdown"}"#; - - let result = parse_assistant_message_from_logs(amp_logs); - assert!(result.is_some()); - assert!(result.as_ref().unwrap().contains("Pythagorean theorem")); - assert!(result.as_ref().unwrap().contains("a² + b² = c²")); - } - - #[test] - fn test_parse_claude_assistant_message_from_logs() { - // Test Claude format - let claude_logs = r#"{"type":"system","subtype":"init","cwd":"/private/tmp","session_id":"e988eeea-3712-46a1-82d4-84fbfaa69114","tools":[],"model":"claude-sonnet-4-20250514"} -{"type":"assistant","message":{"id":"msg_123","type":"message","role":"assistant","model":"claude-sonnet-4-20250514","content":[{"type":"text","text":"I'll explain the Pythagorean theorem for you.\n\nThe Pythagorean theorem states that in a right triangle, the square of the hypotenuse equals the sum of the squares of the other two sides.\n\n**Formula:** a² + b² = c²"}],"stop_reason":null},"session_id":"e988eeea-3712-46a1-82d4-84fbfaa69114"} -{"type":"result","subtype":"success","is_error":false,"duration_ms":6059,"result":"Final result"}"#; - - let result = parse_assistant_message_from_logs(claude_logs); - assert!(result.is_some()); - assert!(result.as_ref().unwrap().contains("Pythagorean theorem")); - assert!(result - .as_ref() - .unwrap() - .contains("**Formula:** a² + b² = c²")); - } - - #[test] - fn test_amp_log_normalization() { - let amp_executor = AmpExecutor; - let amp_logs = r#"{"type":"initial","threadID":"T-f8f7fec0-b330-47ab-b63a-b72c42f1ef6a"} -{"type":"messages","messages":[[0,{"role":"user","content":[{"type":"text","text":"Task title: Create and start should open task\nTask description: When I press 'create & start' on task creation dialog it should then open the task in the sidebar"}],"meta":{"sentAt":1751544747623}}]],"toolResults":[]} -{"type":"messages","messages":[[1,{"role":"assistant","content":[{"type":"thinking","thinking":"The user wants to implement a feature where pressing \"create & start\" on the task creation dialog should open the task in the sidebar."},{"type":"text","text":"I'll help you implement the \"create & start\" functionality. Let me explore the codebase to understand the current task creation and sidebar structure."},{"type":"tool_use","id":"toolu_01FQqskzGAhZaZu8H6qSs5pV","name":"todo_write","input":{"todos":[{"id":"1","content":"Explore task creation dialog component","status":"todo","priority":"high"}]}}],"state":{"type":"complete","stopReason":"tool_use"}}]],"toolResults":[]}"#; - - let result = amp_executor - .normalize_logs(amp_logs, "/tmp/test-worktree") - .unwrap(); - - assert_eq!(result.executor_type, "amp"); - assert_eq!( - result.session_id, - Some("T-f8f7fec0-b330-47ab-b63a-b72c42f1ef6a".to_string()) - ); - assert!(!result.entries.is_empty()); - - // Check that we have user message, assistant message, thinking, and tool use entries - let user_messages: Vec<_> = result - .entries - .iter() - .filter(|e| matches!(e.entry_type, NormalizedEntryType::UserMessage)) - .collect(); - assert!(!user_messages.is_empty()); - - let assistant_messages: Vec<_> = result - .entries - .iter() - .filter(|e| matches!(e.entry_type, NormalizedEntryType::AssistantMessage)) - .collect(); - assert!(!assistant_messages.is_empty()); - - let thinking_entries: Vec<_> = result - .entries - .iter() - .filter(|e| matches!(e.entry_type, NormalizedEntryType::Thinking)) - .collect(); - assert!(!thinking_entries.is_empty()); - - let tool_uses: Vec<_> = result - .entries - .iter() - .filter(|e| matches!(e.entry_type, NormalizedEntryType::ToolUse { .. })) - .collect(); - assert!(!tool_uses.is_empty()); - - // Check that tool use content is concise (not the old verbose format) - let todo_tool_use = tool_uses.iter().find(|e| match &e.entry_type { - NormalizedEntryType::ToolUse { tool_name, .. } => tool_name == "todo_write", - _ => false, - }); - assert!(todo_tool_use.is_some()); - let todo_tool_use = todo_tool_use.unwrap(); - // Should be concise, not "Tool: todo_write with input: ..." - assert_eq!( - todo_tool_use.content, - "TODO List:\n⏳ Explore task creation dialog component (high)" - ); - } - - #[test] - fn test_claude_log_normalization() { - let claude_executor = ClaudeExecutor::new(); - let claude_logs = r#"{"type":"system","subtype":"init","cwd":"/private/tmp/mission-control-worktree-8ff34214-7bb4-4a5a-9f47-bfdf79e20368","session_id":"499dcce4-04aa-4a3e-9e0c-ea0228fa87c9","tools":["Task","Bash","Glob","Grep","LS","exit_plan_mode","Read","Edit","MultiEdit","Write","NotebookRead","NotebookEdit","WebFetch","TodoRead","TodoWrite","WebSearch"],"mcp_servers":[],"model":"claude-sonnet-4-20250514","permissionMode":"bypassPermissions","apiKeySource":"none"} -{"type":"assistant","message":{"id":"msg_014xUHgkAhs6cRx5WVT3s7if","type":"message","role":"assistant","model":"claude-sonnet-4-20250514","content":[{"type":"text","text":"I'll help you list your projects using vibe-kanban. Let me first explore the codebase to understand how vibe-kanban works and find your projects."}],"stop_reason":null,"stop_sequence":null,"usage":{"input_tokens":4,"cache_creation_input_tokens":13497,"cache_read_input_tokens":0,"output_tokens":1,"service_tier":"standard"}},"parent_tool_use_id":null,"session_id":"499dcce4-04aa-4a3e-9e0c-ea0228fa87c9"} -{"type":"assistant","message":{"id":"msg_014xUHgkAhs6cRx5WVT3s7if","type":"message","role":"assistant","model":"claude-sonnet-4-20250514","content":[{"type":"tool_use","id":"toolu_01Br3TvXdmW6RPGpB5NihTHh","name":"Task","input":{"description":"Find vibe-kanban projects","prompt":"I need to find and list projects using vibe-kanban."}}],"stop_reason":null,"stop_sequence":null,"usage":{"input_tokens":4,"cache_creation_input_tokens":13497,"cache_read_input_tokens":0,"output_tokens":1,"service_tier":"standard"}},"parent_tool_use_id":null,"session_id":"499dcce4-04aa-4a3e-9e0c-ea0228fa87c9"}"#; - - let result = claude_executor - .normalize_logs(claude_logs, "/tmp/test-worktree") - .unwrap(); - - assert_eq!(result.executor_type, "Claude Code"); - assert_eq!( - result.session_id, - Some("499dcce4-04aa-4a3e-9e0c-ea0228fa87c9".to_string()) - ); - assert!(!result.entries.is_empty()); - - // Check that we have system, assistant message, and tool use entries - let system_messages: Vec<_> = result - .entries - .iter() - .filter(|e| matches!(e.entry_type, NormalizedEntryType::SystemMessage)) - .collect(); - assert!(!system_messages.is_empty()); - - let assistant_messages: Vec<_> = result - .entries - .iter() - .filter(|e| matches!(e.entry_type, NormalizedEntryType::AssistantMessage)) - .collect(); - assert!(!assistant_messages.is_empty()); - - let tool_uses: Vec<_> = result - .entries - .iter() - .filter(|e| matches!(e.entry_type, NormalizedEntryType::ToolUse { .. })) - .collect(); - assert!(!tool_uses.is_empty()); - - // Check that tool use content is concise (not the old verbose format) - let task_tool_use = tool_uses.iter().find(|e| match &e.entry_type { - NormalizedEntryType::ToolUse { tool_name, .. } => tool_name == "Task", - _ => false, - }); - assert!(task_tool_use.is_some()); - let task_tool_use = task_tool_use.unwrap(); - // Should be the task description, not "Tool: Task with input: ..." - assert_eq!(task_tool_use.content, "Find vibe-kanban projects"); - } - - #[test] - fn test_aider_executor_config_integration() { - // Test that Aider executor can be created from ExecutorConfig - let aider_config = ExecutorConfig::Aider; - let _executor = aider_config.create_executor(); - - // Test that it has the correct display name - assert_eq!(aider_config.display_name(), "Aider"); - assert_eq!(aider_config.to_string(), "aider"); - - // Test that it doesn't support MCP - assert!(!aider_config.supports_mcp()); - assert_eq!(aider_config.mcp_attribute_path(), None); - - // Test that it has the correct config path - let config_path = aider_config.config_path(); - assert!(config_path.is_none()); - - // Test that we can cast it to an AiderExecutor - // This mainly tests that the Box was created correctly - let aider_executor = AiderExecutor::new(); - let result = aider_executor.normalize_logs("", "/tmp"); - assert!(result.is_ok()); - assert_eq!(result.unwrap().executor_type, "aider"); - } -} diff --git a/backend/src/executors/aider.rs b/backend/src/executors/aider.rs deleted file mode 100644 index 0f7c1cd6..00000000 --- a/backend/src/executors/aider.rs +++ /dev/null @@ -1,935 +0,0 @@ -use async_trait::async_trait; -use serde_json::Value; -use tokio::io::{AsyncBufReadExt, BufReader}; -use uuid::Uuid; - -use crate::{ - command_runner::{CommandProcess, CommandRunner}, - executor::{ - ActionType, Executor, ExecutorError, NormalizedConversation, NormalizedEntry, - NormalizedEntryType, - }, - models::{ - execution_process::ExecutionProcess, executor_session::ExecutorSession, task::Task, - task_attempt::TaskAttempt, - }, - utils::{path::make_path_relative, shell::get_shell_command}, -}; - -// Sub-modules for utilities -pub mod filter; - -use self::filter::{parse_session_id_from_line, AiderFilter}; - -/// State for tracking diff blocks (SEARCH/REPLACE patterns) -#[derive(Debug, Clone)] -struct DiffBlockState { - /// Current mode: None, InSearch, InReplace - mode: DiffMode, - /// Accumulated content for the current diff block - content: Vec, - /// Start timestamp for the diff block - start_timestamp: Option>, - /// Buffered line that might be a file name - buffered_line: Option, - /// File name associated with current diff block - current_file: Option, -} - -#[derive(Debug, Clone, PartialEq)] -enum DiffMode { - None, - InSearch, - InReplace, -} - -impl Default for DiffBlockState { - fn default() -> Self { - Self { - mode: DiffMode::None, - content: Vec::new(), - start_timestamp: None, - buffered_line: None, - current_file: None, - } - } -} - -struct Content { - pub stdout: Option, - pub stderr: Option, -} - -/// Process a single line for session extraction and content formatting -async fn process_line_for_content( - line: &str, - session_extracted: &mut bool, - diff_state: &mut DiffBlockState, - worktree_path: &str, - pool: &sqlx::SqlitePool, - execution_process_id: uuid::Uuid, -) -> Option { - if !*session_extracted { - if let Some(session_id) = parse_session_id_from_line(line) { - if let Err(e) = - ExecutorSession::update_session_id(pool, execution_process_id, &session_id).await - { - tracing::error!( - "Failed to update session ID for execution process {}: {}", - execution_process_id, - e - ); - } else { - tracing::info!( - "Updated session ID {} for execution process {}", - session_id, - execution_process_id - ); - *session_extracted = true; - } - - // Don't return any content for session lines - return None; - } - } - - // Filter out noise completely - if AiderFilter::is_noise(line) { - return None; - } - - // Filter out user input echo - if AiderFilter::is_user_input(line) { - return None; - } - - // Handle diff block markers (SEARCH/REPLACE patterns) - if AiderFilter::is_diff_block_marker(line) { - let trimmed = line.trim(); - - match trimmed { - "<<<<<<< SEARCH" => { - // If we have a buffered line, it's the file name for this diff - if let Some(buffered) = diff_state.buffered_line.take() { - diff_state.current_file = Some(buffered); - } - - diff_state.mode = DiffMode::InSearch; - diff_state.content.clear(); - diff_state.start_timestamp = Some(chrono::Utc::now()); - return None; // Don't output individual markers - } - "=======" => { - if diff_state.mode == DiffMode::InSearch { - diff_state.mode = DiffMode::InReplace; - return None; // Don't output individual markers - } - } - ">>>>>>> REPLACE" => { - if diff_state.mode == DiffMode::InReplace { - // End of diff block - create atomic edit action - let diff_content = diff_state.content.join("\n"); - let formatted = format_diff_as_normalized_json( - &diff_content, - diff_state.current_file.as_deref(), - diff_state.start_timestamp, - worktree_path, - ); - - // Reset state - diff_state.mode = DiffMode::None; - diff_state.content.clear(); - diff_state.start_timestamp = None; - diff_state.current_file = None; - - return Some(Content { - stdout: Some(formatted), - stderr: None, - }); - } - } - _ => {} - } - return None; - } - - // If we're inside a diff block, accumulate content - if diff_state.mode != DiffMode::None { - diff_state.content.push(line.to_string()); - return None; // Don't output individual lines within diff blocks - } - - // Check if we have a buffered line from previous call - let mut result = None; - if let Some(buffered) = diff_state.buffered_line.take() { - // Output the buffered line as a normal message since current line is not a diff marker - let formatted = format_aider_content_as_normalized_json(&buffered, worktree_path); - result = Some(Content { - stdout: Some(formatted), - stderr: None, - }); - } - - // Check if line is a system message - if AiderFilter::is_system_message(line) { - // Apply scanning repo progress simplification for system messages - let processed_line = if AiderFilter::is_scanning_repo_progress(line) { - AiderFilter::simplify_scanning_repo_message(line) - } else { - line.to_string() - }; - - let formatted = format_aider_content_as_normalized_json(&processed_line, worktree_path); - - // If we had a buffered line, we need to handle both outputs - if result.is_some() { - // For now, prioritize the current system message and drop the buffered one - // TODO: In a real implementation, we might want to queue both - } - - return Some(Content { - stdout: Some(formatted), - stderr: None, - }); - } - - // Check if line is an error - if AiderFilter::is_error(line) { - let formatted = format_aider_content_as_normalized_json(line, worktree_path); - - // If we had a buffered line, prioritize the error - return Some(Content { - stdout: result.and_then(|r| r.stdout), - stderr: Some(formatted), - }); - } - - // Regular assistant message - buffer it in case next line is a diff marker - let trimmed = line.trim(); - if !trimmed.is_empty() { - diff_state.buffered_line = Some(line.to_string()); - } - - // Return any previously buffered content - result -} - -/// Stream stdout and stderr from Aider process with filtering -pub async fn stream_aider_stdout_stderr_to_db( - stdout: impl tokio::io::AsyncRead + Unpin + Send + 'static, - stderr: impl tokio::io::AsyncRead + Unpin + Send + 'static, - pool: sqlx::SqlitePool, - attempt_id: Uuid, - execution_process_id: Uuid, - worktree_path: String, -) { - let stdout_task = { - let pool = pool.clone(); - let worktree_path = worktree_path.clone(); - tokio::spawn(async move { - let mut reader = BufReader::new(stdout); - let mut line = String::new(); - let mut session_extracted = false; - let mut diff_state = DiffBlockState::default(); - - loop { - line.clear(); - - match reader.read_line(&mut line).await { - Ok(0) => break, // EOF - Ok(_) => { - line = line.trim_end_matches(['\r', '\n']).to_string(); - - let content = process_line_for_content( - &line, - &mut session_extracted, - &mut diff_state, - &worktree_path, - &pool, - execution_process_id, - ) - .await; - - if let Some(Content { stdout, stderr }) = content { - if let Err(e) = ExecutionProcess::append_output( - &pool, - execution_process_id, - stdout.as_deref(), - stderr.as_deref(), - ) - .await - { - tracing::error!( - "Failed to write Aider stdout line for attempt {}: {}", - attempt_id, - e - ); - } - } - } - Err(e) => { - tracing::error!("Error reading stdout for attempt {}: {}", attempt_id, e); - break; - } - } - } - - // Flush any remaining buffered content - if let Some(Content { stdout, stderr }) = - flush_buffered_content(&mut diff_state, &worktree_path) - { - if let Err(e) = ExecutionProcess::append_output( - &pool, - execution_process_id, - stdout.as_deref(), - stderr.as_deref(), - ) - .await - { - tracing::error!( - "Failed to write Aider buffered stdout line for attempt {}: {}", - attempt_id, - e - ); - } - } - }) - }; - - let stderr_task = { - let pool = pool.clone(); - let worktree_path = worktree_path.clone(); - tokio::spawn(async move { - let mut reader = BufReader::new(stderr); - let mut line = String::new(); - - loop { - line.clear(); - - match reader.read_line(&mut line).await { - Ok(0) => break, // EOF - Ok(_) => { - let trimmed = line.trim_end_matches(['\r', '\n']); - - // Apply filtering to stderr - filter out noise like "Scanning repo" progress - if !trimmed.trim().is_empty() && !AiderFilter::is_noise(trimmed) { - let formatted = - format_aider_content_as_normalized_json(trimmed, &worktree_path); - - if let Err(e) = ExecutionProcess::append_output( - &pool, - execution_process_id, - None, // No stdout content from stderr - Some(&formatted), - ) - .await - { - tracing::error!( - "Failed to write Aider stderr line for attempt {}: {}", - attempt_id, - e - ); - } - } - } - Err(e) => { - tracing::error!("Error reading stderr for attempt {}: {}", attempt_id, e); - break; - } - } - } - }) - }; - - // Wait for both tasks to complete - let _ = tokio::join!(stdout_task, stderr_task); -} - -/// Format diff content as a normalized JSON entry for atomic edit actions -fn format_diff_as_normalized_json( - _content: &str, - file_name: Option<&str>, - start_timestamp: Option>, - worktree_path: &str, -) -> String { - let timestamp = start_timestamp.unwrap_or_else(chrono::Utc::now); - let timestamp_str = timestamp.to_rfc3339_opts(chrono::SecondsFormat::Micros, true); - - let raw_path = file_name.unwrap_or("multiple_files").to_string(); - - // Normalize the path to be relative to worktree root (matching git diff format) - let path = make_path_relative(&raw_path, worktree_path); - - let normalized_entry = NormalizedEntry { - timestamp: Some(timestamp_str), - entry_type: NormalizedEntryType::ToolUse { - tool_name: "edit".to_string(), - action_type: ActionType::FileWrite { path: path.clone() }, - }, - content: format!("`{}`", path), - metadata: None, - }; - - serde_json::to_string(&normalized_entry).unwrap() + "\n" -} - -/// Flush any remaining buffered content when stream ends -fn flush_buffered_content(diff_state: &mut DiffBlockState, worktree_path: &str) -> Option { - if let Some(buffered) = diff_state.buffered_line.take() { - let formatted = format_aider_content_as_normalized_json(&buffered, worktree_path); - Some(Content { - stdout: Some(formatted), - stderr: None, - }) - } else { - None - } -} - -/// Format Aider content as normalized JSON entries for direct database storage -pub fn format_aider_content_as_normalized_json(content: &str, _worktree_path: &str) -> String { - let mut results = Vec::new(); - let base_timestamp = chrono::Utc::now(); - let mut entry_counter = 0u32; - - for line in content.lines() { - let trimmed = line.trim(); - if trimmed.is_empty() { - continue; - } - - // Generate unique timestamp for each entry by adding microseconds - let unique_timestamp = - base_timestamp + chrono::Duration::microseconds(entry_counter as i64); - let timestamp_str = unique_timestamp.to_rfc3339_opts(chrono::SecondsFormat::Micros, true); - entry_counter += 1; - - // Try to parse as existing JSON first - if let Ok(parsed_json) = serde_json::from_str::(trimmed) { - results.push(parsed_json.to_string()); - continue; - } - - if trimmed.is_empty() { - continue; - } - - // Check message type and create appropriate normalized entry - let normalized_entry = if AiderFilter::is_system_message(trimmed) { - NormalizedEntry { - timestamp: Some(timestamp_str), - entry_type: NormalizedEntryType::SystemMessage, - content: trimmed.to_string(), - metadata: None, - } - } else if AiderFilter::is_error(trimmed) { - NormalizedEntry { - timestamp: Some(timestamp_str), - entry_type: NormalizedEntryType::ErrorMessage, - content: trimmed.to_string(), - metadata: None, - } - } else { - // Regular assistant message - NormalizedEntry { - timestamp: Some(timestamp_str), - entry_type: NormalizedEntryType::AssistantMessage, - content: trimmed.to_string(), - metadata: None, - } - }; - - results.push(serde_json::to_string(&normalized_entry).unwrap()); - } - - // Ensure each JSON entry is on its own line - results.join("\n") + "\n" -} - -/// An executor that uses Aider CLI to process tasks -pub struct AiderExecutor { - executor_type: String, - command: String, -} - -impl Default for AiderExecutor { - fn default() -> Self { - Self::new() - } -} - -impl AiderExecutor { - /// Create a new AiderExecutor with default settings - pub fn new() -> Self { - Self { - executor_type: "Aider".to_string(), - command: "aider . --yes-always --no-show-model-warnings --skip-sanity-check-repo --no-stream --no-fancy-input".to_string(), - } - } -} - -#[async_trait] -impl Executor for AiderExecutor { - async fn spawn( - &self, - pool: &sqlx::SqlitePool, - task_id: Uuid, - worktree_path: &str, - ) -> Result { - // Get the task to fetch its description - let task = Task::find_by_id(pool, task_id) - .await? - .ok_or(ExecutorError::TaskNotFound)?; - - let prompt = if let Some(task_description) = task.description { - format!("{}\n{}", task.title, task_description) - } else { - task.title.to_string() - }; - - // Create temporary message file - let base_dir = TaskAttempt::get_worktree_base_dir(); - let sessions_dir = base_dir.join("aider").join("aider-messages"); - if let Err(e) = tokio::fs::create_dir_all(&sessions_dir).await { - tracing::warn!( - "Failed to create temp message directory {}: {}", - sessions_dir.display(), - e - ); - } - - let message_file = sessions_dir.join(format!("task_{}.md", task_id)); - - // Generate our own session ID and store it in the database immediately - let session_id = format!("aider_task_{}", task_id); - - // Create session directory and chat history file for session persistence - let session_dir = base_dir.join("aider").join("aider-sessions"); - if let Err(e) = tokio::fs::create_dir_all(&session_dir).await { - tracing::warn!( - "Failed to create session directory {}: {}", - session_dir.display(), - e - ); - } - let chat_file = session_dir.join(format!("{}.md", session_id)); - - // Use shell command for cross-platform compatibility - let (shell_cmd, shell_arg) = get_shell_command(); - let aider_command = format!( - "{} --chat-history-file {} --message-file {}", - &self.command, - chat_file.to_string_lossy(), - message_file.to_string_lossy() - ); - - // Write message file after command is prepared for better error context - tokio::fs::write(&message_file, prompt.as_bytes()) - .await - .map_err(|e| { - ExecutorError::ContextCollectionFailed(format!( - "Failed to write message file {}: {}", - message_file.display(), - e - )) - })?; - - tracing::debug!("Spawning Aider command: {}", &aider_command); - - let mut command = CommandRunner::new(); - command - .command(shell_cmd) - .arg(shell_arg) - .arg(&aider_command) - .working_dir(worktree_path) - .env("COLUMNS", "1000"); // Prevent line wrapping in aider output - - let child = command.start().await.map_err(|e| { - crate::executor::SpawnContext::from_command(&command, &self.executor_type) - .with_task(task_id, Some(task.title.clone())) - .with_context(format!("{} CLI execution for new task", self.executor_type)) - .spawn_error(e) - })?; - - tracing::debug!( - "Started Aider with message file {} for task {}: {:?}", - message_file.display(), - task_id, - prompt - ); - - Ok(child) - } - - /// Execute with Aider filtering for stdout and stderr - async fn execute_streaming( - &self, - pool: &sqlx::SqlitePool, - task_id: Uuid, - attempt_id: Uuid, - execution_process_id: Uuid, - worktree_path: &str, - ) -> Result { - // Generate our own session ID and store it in the database immediately - let session_id = format!("aider_task_{}", task_id); - if let Err(e) = - ExecutorSession::update_session_id(pool, execution_process_id, &session_id).await - { - tracing::error!( - "Failed to update session ID for execution process {}: {}", - execution_process_id, - e - ); - } else { - tracing::info!( - "Set session ID {} for execution process {}", - session_id, - execution_process_id - ); - } - - let mut child = self.spawn(pool, task_id, worktree_path).await?; - - // Take stdout and stderr pipes for Aider filtering - let streams = child - .stream() - .await - .expect("Failed to get stdio from child process"); - let stdout = streams - .stdout - .expect("Failed to take stdout from child process"); - let stderr = streams - .stderr - .expect("Failed to take stderr from child process"); - - // Start Aider filtering task - let pool_clone = pool.clone(); - let worktree_path_clone = worktree_path.to_string(); - tokio::spawn(stream_aider_stdout_stderr_to_db( - stdout, - stderr, - pool_clone, - attempt_id, - execution_process_id, - worktree_path_clone, - )); - - Ok(child) - } - - fn normalize_logs( - &self, - logs: &str, - _worktree_path: &str, - ) -> Result { - let mut entries = Vec::new(); - - for line in logs.lines() { - let trimmed = line.trim(); - if trimmed.is_empty() { - continue; - } - - // Simple passthrough: directly deserialize normalized JSON entries - if let Ok(entry) = serde_json::from_str::(trimmed) { - entries.push(entry); - } - } - - Ok(NormalizedConversation { - entries, - session_id: None, // Session ID is stored directly in the database - executor_type: "aider".to_string(), - prompt: None, - summary: None, - }) - } - - /// Execute follow-up with Aider filtering for stdout and stderr - async fn execute_followup_streaming( - &self, - pool: &sqlx::SqlitePool, - task_id: Uuid, - attempt_id: Uuid, - execution_process_id: Uuid, - session_id: &str, - prompt: &str, - worktree_path: &str, - ) -> Result { - // Update session ID for this execution process to ensure continuity - if let Err(e) = - ExecutorSession::update_session_id(pool, execution_process_id, session_id).await - { - tracing::error!( - "Failed to update session ID for followup execution process {}: {}", - execution_process_id, - e - ); - } else { - tracing::info!( - "Updated session ID {} for followup execution process {}", - session_id, - execution_process_id - ); - } - - let mut child = self - .spawn_followup(pool, task_id, session_id, prompt, worktree_path) - .await?; - - // Take stdout and stderr pipes for Aider filtering - let streams = child - .stream() - .await - .expect("Failed to get stdio from child process"); - let stdout = streams - .stdout - .expect("Failed to take stdout from child process"); - let stderr = streams - .stderr - .expect("Failed to take stderr from child process"); - - // Start Aider filtering task - let pool_clone = pool.clone(); - let worktree_path_clone = worktree_path.to_string(); - tokio::spawn(stream_aider_stdout_stderr_to_db( - stdout, - stderr, - pool_clone, - attempt_id, - execution_process_id, - worktree_path_clone, - )); - - Ok(child) - } - - async fn spawn_followup( - &self, - _pool: &sqlx::SqlitePool, - _task_id: Uuid, - session_id: &str, - prompt: &str, - worktree_path: &str, - ) -> Result { - let base_dir = TaskAttempt::get_worktree_base_dir(); - - // Create session directory if it doesn't exist - let session_dir = base_dir.join("aider").join("aider-sessions"); - if let Err(e) = tokio::fs::create_dir_all(&session_dir).await { - tracing::warn!( - "Failed to create session directory {}: {}", - session_dir.display(), - e - ); - } - - let chat_file = session_dir.join(format!("{}.md", session_id)); - - // Create temporary message file for the followup prompt - let sessions_dir = base_dir.join("aider").join("aider-messages"); - if let Err(e) = tokio::fs::create_dir_all(&sessions_dir).await { - tracing::warn!( - "Failed to create temp message directory {}: {}", - sessions_dir.display(), - e - ); - } - - let message_file = sessions_dir.join(format!("followup_{}.md", session_id)); - - // Use shell command for cross-platform compatibility - let (shell_cmd, shell_arg) = get_shell_command(); - let aider_command = format!( - "{} --restore-chat-history --chat-history-file {} --message-file {}", - self.command, - chat_file.to_string_lossy(), - message_file.to_string_lossy() - ); - - // Write message file after command is prepared for better error context - tokio::fs::write(&message_file, prompt.as_bytes()) - .await - .map_err(|e| { - ExecutorError::ContextCollectionFailed(format!( - "Failed to write followup message file {}: {}", - message_file.display(), - e - )) - })?; - - tracing::debug!("Spawning Aider command: {}", &aider_command); - - let mut command = CommandRunner::new(); - command - .command(shell_cmd) - .arg(shell_arg) - .arg(&aider_command) - .working_dir(worktree_path) - .env("COLUMNS", "1000"); // Prevent line wrapping in aider output - - let child = command.start().await.map_err(|e| { - crate::executor::SpawnContext::from_command(&command, &self.executor_type) - .with_context(format!( - "{} CLI followup execution for session {}", - self.executor_type, session_id - )) - .spawn_error(e) - })?; - - tracing::debug!( - "Started Aider followup with message file {} and chat history {} for session {}: {:?}", - message_file.display(), - chat_file.display(), - session_id, - prompt - ); - - Ok(child) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::executors::aider::{format_aider_content_as_normalized_json, AiderExecutor}; - - #[test] - fn test_normalize_logs_with_database_format() { - let executor = AiderExecutor::new(); - - // This is what the database should contain after our streaming function processes it - let logs = r#"{"timestamp":"2025-07-21T18:04:00Z","entry_type":{"type":"system_message"},"content":"Main model: anthropic/claude-sonnet-4-20250514","metadata":null} -{"timestamp":"2025-07-21T18:04:01Z","entry_type":{"type":"assistant_message"},"content":"I'll help you with this task.","metadata":null} -{"timestamp":"2025-07-21T18:04:02Z","entry_type":{"type":"error_message"},"content":"Error: File not found","metadata":null} -{"timestamp":"2025-07-21T18:04:03Z","entry_type":{"type":"assistant_message"},"content":"Let me try a different approach.","metadata":null}"#; - - let result = executor.normalize_logs(logs, "/path/to/repo").unwrap(); - - assert_eq!(result.entries.len(), 4); - - // First entry: system message - assert!(matches!( - result.entries[0].entry_type, - crate::executor::NormalizedEntryType::SystemMessage - )); - assert!(result.entries[0].content.contains("Main model:")); - assert!(result.entries[0].timestamp.is_some()); - - // Second entry: assistant message - assert!(matches!( - result.entries[1].entry_type, - crate::executor::NormalizedEntryType::AssistantMessage - )); - assert!(result.entries[1] - .content - .contains("help you with this task")); - - // Third entry: error message - assert!(matches!( - result.entries[2].entry_type, - crate::executor::NormalizedEntryType::ErrorMessage - )); - assert!(result.entries[2].content.contains("File not found")); - - // Fourth entry: assistant message - assert!(matches!( - result.entries[3].entry_type, - crate::executor::NormalizedEntryType::AssistantMessage - )); - assert!(result.entries[3].content.contains("different approach")); - } - - #[test] - fn test_format_aider_content_as_normalized_json() { - let content = r#"Main model: anthropic/claude-sonnet-4-20250514 -I'll help you implement this feature. -Error: Could not access file -Let me try a different approach."#; - - let result = format_aider_content_as_normalized_json(content, "/path/to/repo"); - let lines: Vec<&str> = result - .split('\n') - .filter(|line| !line.trim().is_empty()) - .collect(); - - // Should have 4 entries (1 system + 2 assistant + 1 error) - assert_eq!(lines.len(), 4); - - // Parse all entries and verify unique timestamps - let mut timestamps = Vec::new(); - for line in &lines { - let json: serde_json::Value = serde_json::from_str(line).unwrap(); - let timestamp = json["timestamp"].as_str().unwrap().to_string(); - timestamps.push(timestamp); - } - - // Verify all timestamps are unique (no duplicates) - let mut unique_timestamps = timestamps.clone(); - unique_timestamps.sort(); - unique_timestamps.dedup(); - assert_eq!( - timestamps.len(), - unique_timestamps.len(), - "All timestamps should be unique" - ); - - // Parse the first line (should be system message) - let first_json: serde_json::Value = serde_json::from_str(lines[0]).unwrap(); - assert_eq!(first_json["entry_type"]["type"], "system_message"); - assert!(first_json["content"] - .as_str() - .unwrap() - .contains("Main model:")); - - // Parse the second line (should be assistant message) - let second_json: serde_json::Value = serde_json::from_str(lines[1]).unwrap(); - assert_eq!(second_json["entry_type"]["type"], "assistant_message"); - assert!(second_json["content"] - .as_str() - .unwrap() - .contains("help you implement")); - - // Parse the third line (should be error message) - let third_json: serde_json::Value = serde_json::from_str(lines[2]).unwrap(); - assert_eq!(third_json["entry_type"]["type"], "error_message"); - assert!(third_json["content"] - .as_str() - .unwrap() - .contains("Could not access")); - - // Verify timestamps include microseconds for uniqueness - for timestamp in timestamps { - assert!( - timestamp.contains('.'), - "Timestamp should include microseconds: {}", - timestamp - ); - } - } - - #[test] - fn test_normalize_logs_edge_cases() { - let executor = AiderExecutor::new(); - - // Empty content - let result = executor.normalize_logs("", "/tmp").unwrap(); - assert_eq!(result.entries.len(), 0); - - // Only whitespace - let result = executor.normalize_logs(" \n\t\n ", "/tmp").unwrap(); - assert_eq!(result.entries.len(), 0); - - // Malformed JSON (current implementation skips invalid JSON) - let malformed = r#"{"timestamp":"2025-07-21T18:04:00Z","content":"incomplete"#; - let result = executor.normalize_logs(malformed, "/tmp").unwrap(); - assert_eq!(result.entries.len(), 0); // Current implementation skips invalid JSON - - // Mixed valid and invalid JSON - let mixed = r#"{"timestamp":"2025-07-21T18:04:00Z","entry_type":{"type":"assistant_message"},"content":"Valid entry","metadata":null} -Invalid line that's not JSON -{"timestamp":"2025-07-21T18:04:01Z","entry_type":{"type":"system_message"},"content":"Another valid entry","metadata":null}"#; - let result = executor.normalize_logs(mixed, "/tmp").unwrap(); - assert_eq!(result.entries.len(), 2); // Only valid JSON entries are parsed - } -} diff --git a/backend/src/executors/aider/filter.rs b/backend/src/executors/aider/filter.rs deleted file mode 100644 index 1d2b99cd..00000000 --- a/backend/src/executors/aider/filter.rs +++ /dev/null @@ -1,269 +0,0 @@ -use lazy_static::lazy_static; -use regex::Regex; - -lazy_static! { - static ref AIDER_SESSION_REGEX: Regex = Regex::new(r".*\b(chat|session|sessionID|id)=([^ ]+)").unwrap(); - static ref SYSTEM_MESSAGE_REGEX: Regex = Regex::new(r"^(Main model:|Weak model:)").unwrap(); - static ref ERROR_MESSAGE_REGEX: Regex = Regex::new(r"^(Error:|ERROR:|Warning:|WARN:|Exception:|Fatal:|FATAL:|✗|❌|\[ERROR\])").unwrap(); - static ref USER_INPUT_REGEX: Regex = Regex::new(r"^>\s+").unwrap(); - static ref NOISE_REGEX: Regex = Regex::new(r"^(\s*$|Warning: Input is not a terminal|\[\[?\d+;\d+R|─{5,}|\s*\d+%\||Added .* to|You can skip|System:|Aider:|Git repo:.*|Repo-map:|>|▶|\[SYSTEM\]|Scanning repo:|Initial repo scan|Tokens:|Using [a-zA-Z0-9_.-]+ model with API key from environment|Restored previous conversation history.|.*\.git/worktrees/.*)").unwrap(); - static ref SCANNING_REPO_PROGRESS_REGEX: Regex = Regex::new(r"^Scanning repo:\s+\d+%\|.*\|\s*\d+/\d+\s+\[.*\]").unwrap(); - static ref DIFF_BLOCK_MARKERS: Regex = Regex::new(r"^(<<<<<<< SEARCH|=======|>>>>>>> REPLACE)$").unwrap(); -} - -/// Filter for Aider CLI output -pub struct AiderFilter; - -impl AiderFilter { - /// Check if a line is a system message - pub fn is_system_message(line: &str) -> bool { - let trimmed = line.trim(); - SYSTEM_MESSAGE_REGEX.is_match(trimmed) - } - - /// Check if a line is an error message - pub fn is_error(line: &str) -> bool { - let trimmed = line.trim(); - ERROR_MESSAGE_REGEX.is_match(trimmed) - } - - /// Check if a line is noise that should be filtered out - pub fn is_noise(line: &str) -> bool { - let trimmed = line.trim(); - NOISE_REGEX.is_match(trimmed) - } - - /// Check if a line is user input (echo from stdin) - pub fn is_user_input(line: &str) -> bool { - let trimmed = line.trim(); - USER_INPUT_REGEX.is_match(trimmed) - } - - /// Check if a line is a scanning repo progress message that should be simplified - pub fn is_scanning_repo_progress(line: &str) -> bool { - let trimmed = line.trim(); - SCANNING_REPO_PROGRESS_REGEX.is_match(trimmed) - } - - /// Check if a line is a diff block marker (SEARCH/REPLACE blocks) - pub fn is_diff_block_marker(line: &str) -> bool { - let trimmed = line.trim(); - DIFF_BLOCK_MARKERS.is_match(trimmed) - } - - /// Simplify scanning repo progress to just "Scanning repo" - pub fn simplify_scanning_repo_message(line: &str) -> String { - if Self::is_scanning_repo_progress(line) { - "Scanning repo".to_string() - } else { - line.to_string() - } - } -} - -/// Parse session_id from Aider output lines -pub fn parse_session_id_from_line(line: &str) -> Option { - // Try regex for session ID extraction from various patterns - if let Some(captures) = AIDER_SESSION_REGEX.captures(line) { - if let Some(id) = captures.get(2) { - return Some(id.as_str().to_string()); - } - } - - None -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_is_system_message() { - // Only "Main model:" and "Weak model:" are system messages - assert!(AiderFilter::is_system_message( - "Main model: anthropic/claude-sonnet-4-20250514" - )); - assert!(AiderFilter::is_system_message( - "Weak model: anthropic/claude-3-5-haiku-20241022" - )); - - // Everything else is not a system message - assert!(!AiderFilter::is_system_message("System: Starting new chat")); - assert!(!AiderFilter::is_system_message("Git repo:")); - assert!(!AiderFilter::is_system_message( - "Git repo: ../vibe-kanban/.git/worktrees/vk- - - ing-fix with 280 files" - )); - assert!(!AiderFilter::is_system_message( - "Using sonnet model with API key from environment" - )); - assert!(!AiderFilter::is_system_message( - "I'll help you implement this" - )); - assert!(!AiderFilter::is_system_message( - "Error: something went wrong" - )); - assert!(!AiderFilter::is_system_message("")); - } - - #[test] - fn test_is_noise() { - // Test that complete Git repo lines are treated as noise - assert!(AiderFilter::is_noise( - "Git repo: ../vibe-kanban/.git/worktrees/vk-streaming-fix with 280 files" - )); - assert!(AiderFilter::is_noise("Git repo:")); - assert!(AiderFilter::is_noise( - "Using sonnet model with API key from environment" - )); - assert!(AiderFilter::is_noise("System: Starting new chat")); - assert!(AiderFilter::is_noise("Aider: Ready to help")); - assert!(AiderFilter::is_noise( - "Repo-map: using 4096 tokens, auto refresh" - )); - - // Test non-noise messages - assert!(!AiderFilter::is_noise( - "Main model: anthropic/claude-sonnet-4" - )); - assert!(!AiderFilter::is_noise("I'll help you implement this")); - assert!(!AiderFilter::is_noise("Error: something went wrong")); - } - - #[test] - fn test_is_error() { - // Test error message detection - assert!(AiderFilter::is_error("Error: File not found")); - assert!(AiderFilter::is_error("ERROR: Permission denied")); - assert!(AiderFilter::is_error("Warning: Deprecated function")); - assert!(AiderFilter::is_error("WARN: Configuration issue")); - assert!(AiderFilter::is_error("Exception: Invalid input")); - assert!(AiderFilter::is_error("Fatal: Cannot continue")); - assert!(AiderFilter::is_error("FATAL: System failure")); - assert!(AiderFilter::is_error("✗ Command failed")); - assert!(AiderFilter::is_error("❌ Task not completed")); - assert!(AiderFilter::is_error("[ERROR] Operation failed")); - assert!(AiderFilter::is_error(" Error: Starting with spaces ")); - - // Test non-error messages - assert!(!AiderFilter::is_error("I'll help you with this")); - assert!(!AiderFilter::is_error("System: Starting chat")); - assert!(!AiderFilter::is_error("Regular message")); - assert!(!AiderFilter::is_error("")); - } - - #[test] - fn test_parse_session_id_from_line() { - // Test session ID extraction from various formats - assert_eq!( - parse_session_id_from_line("Starting chat=ses_abc123 new session"), - Some("ses_abc123".to_string()) - ); - - assert_eq!( - parse_session_id_from_line("Aider session=aider_session_456"), - Some("aider_session_456".to_string()) - ); - - assert_eq!( - parse_session_id_from_line("DEBUG sessionID=debug_789 process"), - Some("debug_789".to_string()) - ); - - assert_eq!( - parse_session_id_from_line("Session id=simple_id started"), - Some("simple_id".to_string()) - ); - - // Test no session ID - assert_eq!(parse_session_id_from_line("No session here"), None); - assert_eq!(parse_session_id_from_line(""), None); - assert_eq!(parse_session_id_from_line("session= empty"), None); - } - - #[test] - fn test_message_classification_priority() { - // Error messages are not system messages - assert!(AiderFilter::is_error("Error: System configuration invalid")); - assert!(!AiderFilter::is_system_message( - "Error: System configuration invalid" - )); - - // System messages are not errors - assert!(AiderFilter::is_system_message( - "Main model: anthropic/claude-sonnet-4" - )); - assert!(!AiderFilter::is_error( - "Main model: anthropic/claude-sonnet-4" - )); - } - - #[test] - fn test_scanning_repo_progress_detection() { - // Test scanning repo progress detection - assert!(AiderFilter::is_scanning_repo_progress( - "Scanning repo: 0%| | 0/275 [00:00>>>>>> REPLACE")); - - // Test non-markers - assert!(!AiderFilter::is_diff_block_marker("Regular code line")); - assert!(!AiderFilter::is_diff_block_marker("def function():")); - assert!(!AiderFilter::is_diff_block_marker("")); - assert!(!AiderFilter::is_diff_block_marker("< SEARCH")); // Missing full marker - } - - #[test] - fn test_simplify_scanning_repo_message() { - // Test simplification of progress messages - assert_eq!( - AiderFilter::simplify_scanning_repo_message( - "Scanning repo: 0%| | 0/275 [00:00, - #[serde(rename = "toolResults")] - tool_results: Vec, - }, - #[serde(rename = "initial")] - Initial { - #[serde(rename = "threadID")] - thread_id: Option, - }, - #[serde(rename = "token-usage")] - TokenUsage(serde_json::Value), - #[serde(rename = "state")] - State { state: String }, - #[serde(rename = "shutdown")] - Shutdown, - #[serde(rename = "tool-status")] - ToolStatus(serde_json::Value), -} - -#[derive(Deserialize, Serialize, Debug, Clone, PartialEq, Eq)] -pub struct AmpMessage { - pub role: String, - pub content: Vec, - pub state: Option, - pub meta: Option, -} - -#[derive(Deserialize, Serialize, Debug, Clone, PartialEq, Eq)] -pub struct AmpMeta { - #[serde(rename = "sentAt")] - pub sent_at: u64, -} - -#[derive(Deserialize, Serialize, Debug, Clone, PartialEq, Eq)] -#[serde(tag = "type")] -pub enum AmpContentItem { - #[serde(rename = "text")] - Text { text: String }, - #[serde(rename = "thinking")] - Thinking { thinking: String }, - #[serde(rename = "tool_use")] - ToolUse { - id: String, - name: String, - input: serde_json::Value, - }, - #[serde(rename = "tool_result")] - ToolResult { - #[serde(rename = "toolUseID")] - tool_use_id: String, - run: serde_json::Value, - }, -} - -impl AmpJson { - pub fn should_process(&self) -> bool { - matches!(self, AmpJson::Messages { .. }) - } - - pub fn extract_session_id(&self) -> Option { - match self { - AmpJson::Initial { thread_id } => thread_id.clone(), - _ => None, - } - } - - pub fn has_streaming_content(&self) -> bool { - match self { - AmpJson::Messages { messages, .. } => messages.iter().any(|(_index, message)| { - if let Some(state) = &message.state { - if let Some(state_type) = state.get("type").and_then(|t| t.as_str()) { - state_type == "streaming" - } else { - false - } - } else { - false - } - }), - _ => false, - } - } - - pub fn to_normalized_entries( - &self, - executor: &AmpExecutor, - worktree_path: &str, - ) -> Vec { - match self { - AmpJson::Messages { messages, .. } => { - if self.has_streaming_content() { - return vec![]; - } - - let mut entries = Vec::new(); - for (_index, message) in messages { - let role = &message.role; - for content_item in &message.content { - if let Some(entry) = - content_item.to_normalized_entry(role, message, executor, worktree_path) - { - entries.push(entry); - } - } - } - entries - } - _ => vec![], - } - } -} - -impl AmpContentItem { - pub fn to_normalized_entry( - &self, - role: &str, - message: &AmpMessage, - executor: &AmpExecutor, - worktree_path: &str, - ) -> Option { - use serde_json::Value; - - let timestamp = message.meta.as_ref().map(|meta| meta.sent_at.to_string()); - - match self { - AmpContentItem::Text { text } => { - let entry_type = match role { - "user" => NormalizedEntryType::UserMessage, - "assistant" => NormalizedEntryType::AssistantMessage, - _ => return None, - }; - Some(NormalizedEntry { - timestamp, - entry_type, - content: text.clone(), - metadata: Some(serde_json::to_value(self).unwrap_or(Value::Null)), - }) - } - AmpContentItem::Thinking { thinking } => Some(NormalizedEntry { - timestamp, - entry_type: NormalizedEntryType::Thinking, - content: thinking.clone(), - metadata: Some(serde_json::to_value(self).unwrap_or(Value::Null)), - }), - AmpContentItem::ToolUse { name, input, .. } => { - let action_type = executor.extract_action_type(name, input, worktree_path); - let content = - executor.generate_concise_content(name, input, &action_type, worktree_path); - - Some(NormalizedEntry { - timestamp, - entry_type: NormalizedEntryType::ToolUse { - tool_name: name.clone(), - action_type, - }, - content, - metadata: Some(serde_json::to_value(self).unwrap_or(Value::Null)), - }) - } - AmpContentItem::ToolResult { .. } => None, - } - } -} - -#[async_trait] -impl Executor for AmpExecutor { - async fn spawn( - &self, - pool: &sqlx::SqlitePool, - task_id: Uuid, - worktree_path: &str, - ) -> Result { - // Get the task to fetch its description - let task = Task::find_by_id(pool, task_id) - .await? - .ok_or(ExecutorError::TaskNotFound)?; - - let prompt = if let Some(task_description) = task.description { - format!( - r#"project_id: {} - -Task title: {} -Task description: {}"#, - task.project_id, task.title, task_description - ) - } else { - format!( - r#"project_id: {} - -Task title: {}"#, - task.project_id, task.title - ) - }; - - // Use shell command for cross-platform compatibility - let (shell_cmd, shell_arg) = get_shell_command(); - // --format=jsonl is deprecated in latest versions of Amp CLI - let amp_command = "npx @sourcegraph/amp@0.0.1752148945-gd8844f --format=jsonl"; - - let mut command = CommandRunner::new(); - command - .command(shell_cmd) - .arg(shell_arg) - .arg(amp_command) - .stdin(&prompt) - .working_dir(worktree_path); - - let proc = command.start().await.map_err(|e| { - executor::SpawnContext::from_command(&command, "Amp") - .with_task(task_id, Some(task.title.clone())) - .with_context("Amp CLI execution for new task") - .spawn_error(e) - })?; - - Ok(proc) - } - - async fn spawn_followup( - &self, - _pool: &sqlx::SqlitePool, - _task_id: Uuid, - session_id: &str, - prompt: &str, - worktree_path: &str, - ) -> Result { - // Use shell command for cross-platform compatibility - let (shell_cmd, shell_arg) = get_shell_command(); - let amp_command = format!( - "npx @sourcegraph/amp@0.0.1752148945-gd8844f threads continue {} --format=jsonl", - session_id - ); - - let mut command = CommandRunner::new(); - command - .command(shell_cmd) - .arg(shell_arg) - .arg(&_command) - .stdin(prompt) - .working_dir(worktree_path); - - let proc = command.start().await.map_err(|e| { - crate::executor::SpawnContext::from_command(&command, "Amp") - .with_context(format!( - "Amp CLI followup execution for thread {}", - session_id - )) - .spawn_error(e) - })?; - - Ok(proc) - } - - fn normalize_logs( - &self, - logs: &str, - worktree_path: &str, - ) -> Result { - let mut entries = Vec::new(); - let mut session_id = None; - - for line in logs.lines() { - let trimmed = line.trim(); - if trimmed.is_empty() { - continue; - } - - // Try to parse as AmpMessage - let amp_message: AmpJson = match serde_json::from_str(trimmed) { - Ok(msg) => msg, - Err(_) => { - // If line isn't valid JSON, add it as raw text - entries.push(NormalizedEntry { - timestamp: None, - entry_type: NormalizedEntryType::SystemMessage, - content: format!("Raw output: {}", trimmed), - metadata: None, - }); - continue; - } - }; - - // Extract session ID if available - if session_id.is_none() { - if let Some(id) = amp_message.extract_session_id() { - session_id = Some(id); - } - } - - // Process the message if it's a type we care about - if amp_message.should_process() { - let new_entries = amp_message.to_normalized_entries(self, worktree_path); - entries.extend(new_entries); - } - } - - Ok(NormalizedConversation { - entries, - session_id, - executor_type: "amp".to_string(), - prompt: None, - summary: None, - }) - } -} - -impl AmpExecutor { - /// Convert absolute paths to relative paths based on worktree path - fn make_path_relative(&self, path: &str, worktree_path: &str) -> String { - let path_obj = Path::new(path); - let worktree_obj = Path::new(worktree_path); - - // If path is already relative, return as is - if path_obj.is_relative() { - return path.to_string(); - } - - // Try to make path relative to worktree path - if let Ok(relative_path) = path_obj.strip_prefix(worktree_obj) { - return relative_path.to_string_lossy().to_string(); - } - - // If we can't make it relative, return the original path - path.to_string() - } - - fn generate_concise_content( - &self, - tool_name: &str, - input: &serde_json::Value, - action_type: &ActionType, - worktree_path: &str, - ) -> String { - match action_type { - ActionType::FileRead { path } => format!("`{}`", path), - ActionType::FileWrite { path } => format!("`{}`", path), - ActionType::CommandRun { command } => format!("`{}`", command), - ActionType::Search { query } => format!("`{}`", query), - ActionType::WebFetch { url } => format!("`{}`", url), - ActionType::PlanPresentation { plan } => format!("Plan Presentation: `{}`", plan), - ActionType::TaskCreate { description } => description.clone(), - ActionType::Other { description: _ } => { - // For other tools, try to extract key information or fall back to tool name - match tool_name.to_lowercase().as_str() { - "todowrite" | "todoread" | "todo_write" | "todo_read" => { - if let Some(todos) = input.get("todos").and_then(|t| t.as_array()) { - let mut todo_items = Vec::new(); - for todo in todos { - if let (Some(content), Some(status)) = ( - todo.get("content").and_then(|c| c.as_str()), - todo.get("status").and_then(|s| s.as_str()), - ) { - let emoji = match status { - "completed" => "✅", - "in_progress" | "in-progress" => "🔄", - "pending" | "todo" => "⏳", - _ => "📝", - }; - let priority = todo - .get("priority") - .and_then(|p| p.as_str()) - .unwrap_or("medium"); - todo_items - .push(format!("{} {} ({})", emoji, content, priority)); - } - } - if !todo_items.is_empty() { - format!("TODO List:\n{}", todo_items.join("\n")) - } else { - "Managing TODO list".to_string() - } - } else { - "Managing TODO list".to_string() - } - } - "ls" => { - if let Some(path) = input.get("path").and_then(|p| p.as_str()) { - let relative_path = self.make_path_relative(path, worktree_path); - if relative_path.is_empty() { - "List directory".to_string() - } else { - format!("List directory: `{}`", relative_path) - } - } else { - "List directory".to_string() - } - } - "glob" => { - let pattern = input.get("pattern").and_then(|p| p.as_str()).unwrap_or("*"); - let path = input.get("path").and_then(|p| p.as_str()); - - if let Some(path) = path { - let relative_path = self.make_path_relative(path, worktree_path); - format!("Find files: `{}` in `{}`", pattern, relative_path) - } else { - format!("Find files: `{}`", pattern) - } - } - "grep" => { - let pattern = input.get("pattern").and_then(|p| p.as_str()).unwrap_or(""); - let include = input.get("include").and_then(|i| i.as_str()); - let path = input.get("path").and_then(|p| p.as_str()); - - let mut parts = vec![format!("Search: `{}`", pattern)]; - if let Some(include) = include { - parts.push(format!("in `{}`", include)); - } - if let Some(path) = path { - let relative_path = self.make_path_relative(path, worktree_path); - parts.push(format!("at `{}`", relative_path)); - } - parts.join(" ") - } - "read" => { - if let Some(file_path) = input.get("file_path").and_then(|p| p.as_str()) { - let relative_path = self.make_path_relative(file_path, worktree_path); - format!("Read file: `{}`", relative_path) - } else { - "Read file".to_string() - } - } - "write" => { - if let Some(file_path) = input.get("file_path").and_then(|p| p.as_str()) { - let relative_path = self.make_path_relative(file_path, worktree_path); - format!("Write file: `{}`", relative_path) - } else { - "Write file".to_string() - } - } - "edit" => { - if let Some(file_path) = input.get("file_path").and_then(|p| p.as_str()) { - let relative_path = self.make_path_relative(file_path, worktree_path); - format!("Edit file: `{}`", relative_path) - } else { - "Edit file".to_string() - } - } - "multiedit" => { - if let Some(file_path) = input.get("file_path").and_then(|p| p.as_str()) { - let relative_path = self.make_path_relative(file_path, worktree_path); - format!("Multi-edit file: `{}`", relative_path) - } else { - "Multi-edit file".to_string() - } - } - "bash" => { - if let Some(command) = input.get("command").and_then(|c| c.as_str()) { - format!("Run command: `{}`", command) - } else { - "Run command".to_string() - } - } - "webfetch" => { - if let Some(url) = input.get("url").and_then(|u| u.as_str()) { - format!("Fetch URL: `{}`", url) - } else { - "Fetch URL".to_string() - } - } - "task" => { - if let Some(description) = input.get("description").and_then(|d| d.as_str()) - { - format!("Task: {}", description) - } else if let Some(prompt) = input.get("prompt").and_then(|p| p.as_str()) { - format!("Task: {}", prompt) - } else { - "Task".to_string() - } - } - _ => tool_name.to_string(), - } - } - } - } - - fn extract_action_type( - &self, - tool_name: &str, - input: &serde_json::Value, - worktree_path: &str, - ) -> ActionType { - match tool_name.to_lowercase().as_str() { - "read_file" | "read" => { - if let Some(path) = input.get("path").and_then(|p| p.as_str()) { - ActionType::FileRead { - path: self.make_path_relative(path, worktree_path), - } - } else if let Some(file_path) = input.get("file_path").and_then(|p| p.as_str()) { - ActionType::FileRead { - path: self.make_path_relative(file_path, worktree_path), - } - } else { - ActionType::Other { - description: "File read operation".to_string(), - } - } - } - "edit_file" | "write" | "create_file" | "edit" | "multiedit" => { - if let Some(path) = input.get("path").and_then(|p| p.as_str()) { - ActionType::FileWrite { - path: self.make_path_relative(path, worktree_path), - } - } else if let Some(file_path) = input.get("file_path").and_then(|p| p.as_str()) { - ActionType::FileWrite { - path: self.make_path_relative(file_path, worktree_path), - } - } else { - ActionType::Other { - description: "File write operation".to_string(), - } - } - } - "bash" | "run_command" => { - if let Some(cmd) = input.get("cmd").and_then(|c| c.as_str()) { - ActionType::CommandRun { - command: cmd.to_string(), - } - } else if let Some(command) = input.get("command").and_then(|c| c.as_str()) { - ActionType::CommandRun { - command: command.to_string(), - } - } else { - ActionType::Other { - description: "Command execution".to_string(), - } - } - } - "grep" | "search" => { - if let Some(pattern) = input.get("pattern").and_then(|p| p.as_str()) { - ActionType::Search { - query: pattern.to_string(), - } - } else if let Some(query) = input.get("query").and_then(|q| q.as_str()) { - ActionType::Search { - query: query.to_string(), - } - } else { - ActionType::Other { - description: "Search operation".to_string(), - } - } - } - "web_fetch" | "webfetch" => { - if let Some(url) = input.get("url").and_then(|u| u.as_str()) { - ActionType::WebFetch { - url: url.to_string(), - } - } else { - ActionType::Other { - description: "Web fetch operation".to_string(), - } - } - } - "task" => { - if let Some(description) = input.get("description").and_then(|d| d.as_str()) { - ActionType::TaskCreate { - description: description.to_string(), - } - } else if let Some(prompt) = input.get("prompt").and_then(|p| p.as_str()) { - ActionType::TaskCreate { - description: prompt.to_string(), - } - } else { - ActionType::Other { - description: "Task creation".to_string(), - } - } - } - "glob" => ActionType::Other { - description: "File pattern search".to_string(), - }, - "ls" => ActionType::Other { - description: "List directory".to_string(), - }, - "todowrite" | "todoread" | "todo_write" | "todo_read" => ActionType::Other { - description: "Manage TODO list".to_string(), - }, - _ => ActionType::Other { - description: format!("Tool: {}", tool_name), - }, - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_filter_streaming_messages() { - // Test logs that simulate the actual normalize_logs behavior - let amp_executor = AmpExecutor; - let logs = r#"{"type":"messages","messages":[[7,{"role":"assistant","content":[{"type":"text","text":"Created all three files: test1.txt, test2.txt, and test3.txt"}],"state":{"type":"streaming"}}]],"toolResults":[]} -{"type":"messages","messages":[[7,{"role":"assistant","content":[{"type":"text","text":"Created all three files: test1.txt, test2.txt, and test3.txt, each with a line of text."}],"state":{"type":"streaming"}}]],"toolResults":[]} -{"type":"messages","messages":[[7,{"role":"assistant","content":[{"type":"text","text":"Created all three files: test1.txt, test2.txt, and test3.txt, each with a line of text."}],"state":{"type":"complete","stopReason":"end_turn"}}]],"toolResults":[]}"#; - - let result = amp_executor.normalize_logs(logs, "/tmp/test"); - assert!(result.is_ok()); - - let conversation = result.unwrap(); - - // Should only have 1 assistant message (the complete one) - let assistant_messages: Vec<_> = conversation - .entries - .iter() - .filter(|e| matches!(e.entry_type, NormalizedEntryType::AssistantMessage)) - .collect(); - - assert_eq!(assistant_messages.len(), 1); - assert_eq!(assistant_messages[0].content, "Created all three files: test1.txt, test2.txt, and test3.txt, each with a line of text."); - } - - #[test] - fn test_filter_preserves_messages_without_state() { - // Test that messages without state metadata are preserved (for compatibility) - let amp_executor = AmpExecutor; - let logs = r#"{"type":"messages","messages":[[1,{"role":"assistant","content":[{"type":"text","text":"Regular message"}]}]],"toolResults":[]}"#; - - let result = amp_executor.normalize_logs(logs, "/tmp/test"); - assert!(result.is_ok()); - - let conversation = result.unwrap(); - - // Should have 1 assistant message - let assistant_messages: Vec<_> = conversation - .entries - .iter() - .filter(|e| matches!(e.entry_type, NormalizedEntryType::AssistantMessage)) - .collect(); - - assert_eq!(assistant_messages.len(), 1); - assert_eq!(assistant_messages[0].content, "Regular message"); - } -} diff --git a/backend/src/executors/ccr.rs b/backend/src/executors/ccr.rs deleted file mode 100644 index 9bc40435..00000000 --- a/backend/src/executors/ccr.rs +++ /dev/null @@ -1,91 +0,0 @@ -use async_trait::async_trait; -use uuid::Uuid; - -use crate::{ - command_runner::CommandProcess, - executor::{Executor, ExecutorError, NormalizedConversation}, - executors::ClaudeExecutor, -}; - -/// An executor that uses Claude Code Router (CCR) to process tasks -/// This is a thin wrapper around ClaudeExecutor that uses Claude Code Router instead of Claude CLI -pub struct CCRExecutor(ClaudeExecutor); - -impl Default for CCRExecutor { - fn default() -> Self { - Self::new() - } -} - -impl CCRExecutor { - pub fn new() -> Self { - Self(ClaudeExecutor::with_command( - "claude-code-router".to_string(), - "npx -y @musistudio/claude-code-router code -p --dangerously-skip-permissions --verbose --output-format=stream-json".to_string(), - )) - } -} - -#[async_trait] -impl Executor for CCRExecutor { - async fn spawn( - &self, - pool: &sqlx::SqlitePool, - task_id: Uuid, - worktree_path: &str, - ) -> Result { - self.0.spawn(pool, task_id, worktree_path).await - } - - async fn spawn_followup( - &self, - pool: &sqlx::SqlitePool, - task_id: Uuid, - session_id: &str, - prompt: &str, - worktree_path: &str, - ) -> Result { - self.0 - .spawn_followup(pool, task_id, session_id, prompt, worktree_path) - .await - } - - fn normalize_logs( - &self, - logs: &str, - worktree_path: &str, - ) -> Result { - let filtered_logs = filter_ccr_service_messages(logs); - let mut result = self.0.normalize_logs(&filtered_logs, worktree_path)?; - result.executor_type = "claude-code-router".to_string(); - Ok(result) - } -} - -/// Filter out CCR service messages that appear in stdout but shouldn't be shown to users -/// These are informational messages from the CCR wrapper itself -fn filter_ccr_service_messages(logs: &str) -> String { - logs.lines() - .filter(|line| { - let trimmed = line.trim(); - - // Filter out known CCR service messages - if trimmed.eq("Service not running, starting service...") - || trimmed.eq("claude code router service has been successfully stopped.") - { - return false; - } - - // Filter out system init JSON that contains misleading model information - // CCR delegates to different models, so the init model info is incorrect - if trimmed.starts_with(r#"{"type":"system","subtype":"init""#) - && trimmed.contains(r#""model":"#) - { - return false; - } - - true - }) - .collect::>() - .join("\n") -} diff --git a/backend/src/executors/charm_opencode.rs b/backend/src/executors/charm_opencode.rs deleted file mode 100644 index 81e00d83..00000000 --- a/backend/src/executors/charm_opencode.rs +++ /dev/null @@ -1,99 +0,0 @@ -use async_trait::async_trait; -use uuid::Uuid; - -use crate::{ - command_runner::{CommandProcess, CommandRunner}, - executor::{Executor, ExecutorError}, - models::task::Task, - utils::shell::get_shell_command, -}; - -/// An executor that uses OpenCode to process tasks -pub struct CharmOpencodeExecutor; - -#[async_trait] -impl Executor for CharmOpencodeExecutor { - async fn spawn( - &self, - pool: &sqlx::SqlitePool, - task_id: Uuid, - worktree_path: &str, - ) -> Result { - // Get the task to fetch its description - let task = Task::find_by_id(pool, task_id) - .await? - .ok_or(ExecutorError::TaskNotFound)?; - - let prompt = if let Some(task_description) = task.description { - format!( - r#"project_id: {} - -Task title: {} -Task description: {}"#, - task.project_id, task.title, task_description - ) - } else { - format!( - r#"project_id: {} - -Task title: {}"#, - task.project_id, task.title - ) - }; - - // Use shell command for cross-platform compatibility - let (shell_cmd, shell_arg) = get_shell_command(); - let opencode_command = format!( - "opencode -p \"{}\" --output-format=json", - prompt.replace('"', "\\\"") - ); - - let mut command = CommandRunner::new(); - command - .command(shell_cmd) - .arg(shell_arg) - .arg(&opencode_command) - .working_dir(worktree_path); - - let proc = command.start().await.map_err(|e| { - crate::executor::SpawnContext::from_command(&command, "CharmOpenCode") - .with_task(task_id, Some(task.title.clone())) - .with_context("CharmOpenCode CLI execution for new task") - .spawn_error(e) - })?; - - Ok(proc) - } - - async fn spawn_followup( - &self, - _pool: &sqlx::SqlitePool, - _task_id: Uuid, - _session_id: &str, - prompt: &str, - worktree_path: &str, - ) -> Result { - // CharmOpencode doesn't support session-based followup, so we ignore session_id - // and just run with the new prompt - let (shell_cmd, shell_arg) = get_shell_command(); - let opencode_command = format!( - "opencode -p \"{}\" --output-format=json", - prompt.replace('"', "\\\"") - ); - - let mut command = CommandRunner::new(); - command - .command(shell_cmd) - .arg(shell_arg) - .arg(&opencode_command) - .working_dir(worktree_path); - - let proc = command.start().await.map_err(|e| { - crate::executor::SpawnContext::from_command(&command, "CharmOpenCode") - .with_context("CharmOpenCode CLI followup execution") - .spawn_error(e) - })?; - - Ok(proc) - } -} diff --git a/backend/src/executors/claude.rs b/backend/src/executors/claude.rs deleted file mode 100644 index 62f2001d..00000000 --- a/backend/src/executors/claude.rs +++ /dev/null @@ -1,823 +0,0 @@ -use std::path::Path; - -use async_trait::async_trait; -use uuid::Uuid; - -use crate::{ - command_runner::{CommandProcess, CommandRunner}, - executor::{ - ActionType, Executor, ExecutorError, NormalizedConversation, NormalizedEntry, - NormalizedEntryType, - }, - models::task::Task, - utils::shell::get_shell_command, -}; - -fn create_watchkill_script(command: &str) -> String { - let claude_plan_stop_indicator = "Exit plan mode?"; - format!( - r#"#!/usr/bin/env bash -set -euo pipefail - -word="{}" -command="{}" - -exit_code=0 -while IFS= read -r line; do - printf '%s\n' "$line" - if [[ $line == *"$word"* ]]; then - exit 0 - fi -done < <($command <&0 2>&1) - -exit_code=${{PIPESTATUS[0]}} -exit "$exit_code" -"#, - claude_plan_stop_indicator, command - ) -} - -/// An executor that uses Claude CLI to process tasks -pub struct ClaudeExecutor { - executor_type: String, - command: String, -} - -impl Default for ClaudeExecutor { - fn default() -> Self { - Self::new() - } -} - -impl ClaudeExecutor { - /// Create a new ClaudeExecutor with default settings - pub fn new() -> Self { - Self { - executor_type: "Claude Code".to_string(), - command: "npx -y @anthropic-ai/claude-code@latest -p --dangerously-skip-permissions --verbose --output-format=stream-json".to_string(), - } - } - - pub fn new_plan_mode() -> Self { - let command = "npx -y @anthropic-ai/claude-code@latest -p --permission-mode=plan --verbose --output-format=stream-json"; - let script = create_watchkill_script(command); - Self { - executor_type: "ClaudePlan".to_string(), - command: script, - } - } - - /// Create a new ClaudeExecutor with custom settings - pub fn with_command(executor_type: String, command: String) -> Self { - Self { - executor_type, - command, - } - } -} - -#[async_trait] -impl Executor for ClaudeExecutor { - async fn spawn( - &self, - pool: &sqlx::SqlitePool, - task_id: Uuid, - worktree_path: &str, - ) -> Result { - // Get the task to fetch its description - let task = Task::find_by_id(pool, task_id) - .await? - .ok_or(ExecutorError::TaskNotFound)?; - - let prompt = if let Some(task_description) = task.description { - format!( - r#"project_id: {} - -Task title: {} -Task description: {}"#, - task.project_id, task.title, task_description - ) - } else { - format!( - r#"project_id: {} - -Task title: {}"#, - task.project_id, task.title - ) - }; - - // Use shell command for cross-platform compatibility - let (shell_cmd, shell_arg) = get_shell_command(); - // Pass prompt via stdin instead of command line to avoid shell escaping issues - let claude_command = &self.command; - - let mut command = CommandRunner::new(); - command - .command(shell_cmd) - .arg(shell_arg) - .arg(claude_command) - .stdin(&prompt) - .working_dir(worktree_path) - .env("NODE_NO_WARNINGS", "1"); - - let proc = command.start().await.map_err(|e| { - crate::executor::SpawnContext::from_command(&command, &self.executor_type) - .with_task(task_id, Some(task.title.clone())) - .with_context(format!("{} CLI execution for new task", self.executor_type)) - .spawn_error(e) - })?; - Ok(proc) - } - - async fn spawn_followup( - &self, - _pool: &sqlx::SqlitePool, - _task_id: Uuid, - session_id: &str, - prompt: &str, - worktree_path: &str, - ) -> Result { - // Use shell command for cross-platform compatibility - let (shell_cmd, shell_arg) = get_shell_command(); - - // Determine the command based on whether this is plan mode or not - let claude_command = if self.executor_type == "ClaudePlan" { - let command = format!( - "npx -y @anthropic-ai/claude-code@latest -p --permission-mode=plan --verbose --output-format=stream-json --resume={}", - session_id - ); - create_watchkill_script(&command) - } else { - format!("{} --resume={}", self.command, session_id) - }; - - let mut command = CommandRunner::new(); - command - .command(shell_cmd) - .arg(shell_arg) - .arg(&claude_command) - .stdin(prompt) - .working_dir(worktree_path) - .env("NODE_NO_WARNINGS", "1"); - - let proc = command.start().await.map_err(|e| { - crate::executor::SpawnContext::from_command(&command, &self.executor_type) - .with_context(format!( - "{} CLI followup execution for session {}", - self.executor_type, session_id - )) - .spawn_error(e) - })?; - - Ok(proc) - } - - fn normalize_logs( - &self, - logs: &str, - worktree_path: &str, - ) -> Result { - use serde_json::Value; - - let mut entries = Vec::new(); - let mut session_id = None; - - for line in logs.lines() { - let trimmed = line.trim(); - if trimmed.is_empty() { - continue; - } - - // Try to parse as JSON - let json: Value = match serde_json::from_str(trimmed) { - Ok(json) => json, - Err(_) => { - // If line isn't valid JSON, add it as raw text - entries.push(NormalizedEntry { - timestamp: None, - entry_type: NormalizedEntryType::SystemMessage, - content: format!("Raw output: {}", trimmed), - metadata: None, - }); - continue; - } - }; - - // Extract session ID - if session_id.is_none() { - if let Some(sess_id) = json.get("session_id").and_then(|v| v.as_str()) { - session_id = Some(sess_id.to_string()); - } - } - - // Process different message types - let processed = if let Some(msg_type) = json.get("type").and_then(|t| t.as_str()) { - match msg_type { - "assistant" => { - if let Some(message) = json.get("message") { - if let Some(content) = message.get("content").and_then(|c| c.as_array()) - { - for content_item in content { - if let Some(content_type) = - content_item.get("type").and_then(|t| t.as_str()) - { - match content_type { - "text" => { - if let Some(text) = content_item - .get("text") - .and_then(|t| t.as_str()) - { - entries.push(NormalizedEntry { - timestamp: None, - entry_type: - NormalizedEntryType::AssistantMessage, - content: text.to_string(), - metadata: Some(content_item.clone()), - }); - } - } - "tool_use" => { - if let Some(tool_name) = content_item - .get("name") - .and_then(|n| n.as_str()) - { - let input = content_item - .get("input") - .unwrap_or(&Value::Null); - let action_type = self.extract_action_type( - tool_name, - input, - worktree_path, - ); - let content = self.generate_concise_content( - tool_name, - input, - &action_type, - worktree_path, - ); - - entries.push(NormalizedEntry { - timestamp: None, - entry_type: NormalizedEntryType::ToolUse { - tool_name: tool_name.to_string(), - action_type, - }, - content, - metadata: Some(content_item.clone()), - }); - } - } - _ => {} - } - } - } - } - } - true - } - "user" => { - if let Some(message) = json.get("message") { - if let Some(content) = message.get("content").and_then(|c| c.as_array()) - { - for content_item in content { - if let Some(content_type) = - content_item.get("type").and_then(|t| t.as_str()) - { - if content_type == "text" { - if let Some(text) = - content_item.get("text").and_then(|t| t.as_str()) - { - entries.push(NormalizedEntry { - timestamp: None, - entry_type: NormalizedEntryType::UserMessage, - content: text.to_string(), - metadata: Some(content_item.clone()), - }); - } - } - } - } - } - } - true - } - "system" => { - if let Some(subtype) = json.get("subtype").and_then(|s| s.as_str()) { - if subtype == "init" { - entries.push(NormalizedEntry { - timestamp: None, - entry_type: NormalizedEntryType::SystemMessage, - content: format!( - "System initialized with model: {}", - json.get("model") - .and_then(|m| m.as_str()) - .unwrap_or("unknown") - ), - metadata: Some(json.clone()), - }); - } - } - true - } - _ => false, - } - } else { - false - }; - - // If JSON didn't match expected patterns, add it as unrecognized JSON - // Skip JSON with type "result" as requested - if !processed { - if let Some(msg_type) = json.get("type").and_then(|t| t.as_str()) { - if msg_type == "result" { - // Skip result entries - continue; - } - } - entries.push(NormalizedEntry { - timestamp: None, - entry_type: NormalizedEntryType::SystemMessage, - content: format!("Unrecognized JSON: {}", trimmed), - metadata: Some(json), - }); - } - } - - Ok(NormalizedConversation { - entries, - session_id, - executor_type: self.executor_type.clone(), - prompt: None, - summary: None, - }) - } -} - -impl ClaudeExecutor { - /// Convert absolute paths to relative paths based on worktree path - fn make_path_relative(&self, path: &str, worktree_path: &str) -> String { - let path_obj = Path::new(path); - let worktree_path_obj = Path::new(worktree_path); - - tracing::debug!("Making path relative: {} -> {}", path, worktree_path); - - // If path is already relative, return as is - if path_obj.is_relative() { - return path.to_string(); - } - - // Try to make path relative to the worktree path - match path_obj.strip_prefix(worktree_path_obj) { - Ok(relative_path) => { - let result = relative_path.to_string_lossy().to_string(); - tracing::debug!("Successfully made relative: '{}' -> '{}'", path, result); - result - } - Err(_) => { - // Handle symlinks by resolving canonical paths - let canonical_path = std::fs::canonicalize(path); - let canonical_worktree = std::fs::canonicalize(worktree_path); - - match (canonical_path, canonical_worktree) { - (Ok(canon_path), Ok(canon_worktree)) => { - tracing::debug!( - "Trying canonical path resolution: '{}' -> '{}', '{}' -> '{}'", - path, - canon_path.display(), - worktree_path, - canon_worktree.display() - ); - - match canon_path.strip_prefix(&canon_worktree) { - Ok(relative_path) => { - let result = relative_path.to_string_lossy().to_string(); - tracing::debug!( - "Successfully made relative with canonical paths: '{}' -> '{}'", - path, - result - ); - result - } - Err(e) => { - tracing::warn!( - "Failed to make canonical path relative: '{}' relative to '{}', error: {}, returning original", - canon_path.display(), - canon_worktree.display(), - e - ); - path.to_string() - } - } - } - _ => { - tracing::debug!( - "Could not canonicalize paths (paths may not exist): '{}', '{}', returning original", - path, - worktree_path - ); - path.to_string() - } - } - } - } - } - - fn generate_concise_content( - &self, - tool_name: &str, - input: &serde_json::Value, - action_type: &ActionType, - worktree_path: &str, - ) -> String { - match action_type { - ActionType::FileRead { path } => format!("`{}`", path), - ActionType::FileWrite { path } => format!("`{}`", path), - ActionType::CommandRun { command } => format!("`{}`", command), - ActionType::Search { query } => format!("`{}`", query), - ActionType::WebFetch { url } => format!("`{}`", url), - ActionType::TaskCreate { description } => description.clone(), - ActionType::PlanPresentation { plan } => plan.clone(), - ActionType::Other { description: _ } => { - // For other tools, try to extract key information or fall back to tool name - match tool_name.to_lowercase().as_str() { - "todoread" | "todowrite" => { - // Extract todo list from input to show actual todos - if let Some(todos) = input.get("todos").and_then(|t| t.as_array()) { - let mut todo_items = Vec::new(); - for todo in todos { - if let Some(content) = todo.get("content").and_then(|c| c.as_str()) - { - let status = todo - .get("status") - .and_then(|s| s.as_str()) - .unwrap_or("pending"); - let status_emoji = match status { - "completed" => "✅", - "in_progress" => "🔄", - "pending" | "todo" => "⏳", - _ => "📝", - }; - let priority = todo - .get("priority") - .and_then(|p| p.as_str()) - .unwrap_or("medium"); - todo_items.push(format!( - "{} {} ({})", - status_emoji, content, priority - )); - } - } - if !todo_items.is_empty() { - format!("TODO List:\n{}", todo_items.join("\n")) - } else { - "Managing TODO list".to_string() - } - } else { - "Managing TODO list".to_string() - } - } - "ls" => { - if let Some(path) = input.get("path").and_then(|p| p.as_str()) { - let relative_path = self.make_path_relative(path, worktree_path); - if relative_path.is_empty() { - "List directory".to_string() - } else { - format!("List directory: `{}`", relative_path) - } - } else { - "List directory".to_string() - } - } - "glob" => { - let pattern = input.get("pattern").and_then(|p| p.as_str()).unwrap_or("*"); - let path = input.get("path").and_then(|p| p.as_str()); - - if let Some(search_path) = path { - format!( - "Find files: `{}` in `{}`", - pattern, - self.make_path_relative(search_path, worktree_path) - ) - } else { - format!("Find files: `{}`", pattern) - } - } - "codebase_search_agent" => { - if let Some(query) = input.get("query").and_then(|q| q.as_str()) { - format!("Search: {}", query) - } else { - "Codebase search".to_string() - } - } - _ => tool_name.to_string(), - } - } - } - } - - fn extract_action_type( - &self, - tool_name: &str, - input: &serde_json::Value, - worktree_path: &str, - ) -> ActionType { - match tool_name.to_lowercase().as_str() { - "read" => { - if let Some(file_path) = input.get("file_path").and_then(|p| p.as_str()) { - ActionType::FileRead { - path: self.make_path_relative(file_path, worktree_path), - } - } else { - ActionType::Other { - description: "File read operation".to_string(), - } - } - } - "edit" | "write" | "multiedit" => { - if let Some(file_path) = input.get("file_path").and_then(|p| p.as_str()) { - ActionType::FileWrite { - path: self.make_path_relative(file_path, worktree_path), - } - } else if let Some(path) = input.get("path").and_then(|p| p.as_str()) { - ActionType::FileWrite { - path: self.make_path_relative(path, worktree_path), - } - } else { - ActionType::Other { - description: "File write operation".to_string(), - } - } - } - "bash" => { - if let Some(command) = input.get("command").and_then(|c| c.as_str()) { - ActionType::CommandRun { - command: command.to_string(), - } - } else { - ActionType::Other { - description: "Command execution".to_string(), - } - } - } - "grep" => { - if let Some(pattern) = input.get("pattern").and_then(|p| p.as_str()) { - ActionType::Search { - query: pattern.to_string(), - } - } else { - ActionType::Other { - description: "Search operation".to_string(), - } - } - } - "glob" => { - if let Some(pattern) = input.get("pattern").and_then(|p| p.as_str()) { - ActionType::Other { - description: format!("Find files: {}", pattern), - } - } else { - ActionType::Other { - description: "File pattern search".to_string(), - } - } - } - "webfetch" => { - if let Some(url) = input.get("url").and_then(|u| u.as_str()) { - ActionType::WebFetch { - url: url.to_string(), - } - } else { - ActionType::Other { - description: "Web fetch operation".to_string(), - } - } - } - "task" => { - if let Some(description) = input.get("description").and_then(|d| d.as_str()) { - ActionType::TaskCreate { - description: description.to_string(), - } - } else if let Some(prompt) = input.get("prompt").and_then(|p| p.as_str()) { - ActionType::TaskCreate { - description: prompt.to_string(), - } - } else { - ActionType::Other { - description: "Task creation".to_string(), - } - } - } - "exit_plan_mode" | "exitplanmode" | "exit-plan-mode" => { - if let Some(plan) = input.get("plan").and_then(|p| p.as_str()) { - ActionType::PlanPresentation { - plan: plan.to_string(), - } - } else { - ActionType::Other { - description: "Plan presentation".to_string(), - } - } - } - _ => ActionType::Other { - description: format!("Tool: {}", tool_name), - }, - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_normalize_logs_ignores_result_type() { - let executor = ClaudeExecutor::new(); - let logs = r#"{"type":"system","subtype":"init","cwd":"/private/tmp","session_id":"e988eeea-3712-46a1-82d4-84fbfaa69114","tools":[],"model":"claude-sonnet-4-20250514"} -{"type":"assistant","message":{"id":"msg_123","type":"message","role":"assistant","model":"claude-sonnet-4-20250514","content":[{"type":"text","text":"Hello world"}],"stop_reason":null},"session_id":"e988eeea-3712-46a1-82d4-84fbfaa69114"} -{"type":"result","subtype":"success","is_error":false,"duration_ms":6059,"result":"Final result"} -{"type":"unknown","data":"some data"}"#; - - let result = executor.normalize_logs(logs, "/tmp/test-worktree").unwrap(); - - // Should have system message, assistant message, and unknown message - // but NOT the result message - assert_eq!(result.entries.len(), 3); - - // Check that no entry contains "result" - for entry in &result.entries { - assert!(!entry.content.contains("result")); - } - - // Check that unknown JSON is still processed - assert!(result - .entries - .iter() - .any(|e| e.content.contains("Unrecognized JSON"))); - } - - #[test] - fn test_make_path_relative() { - let executor = ClaudeExecutor::new(); - - // Test with relative path (should remain unchanged) - assert_eq!( - executor.make_path_relative("src/main.rs", "/tmp/test-worktree"), - "src/main.rs" - ); - - // Test with absolute path (should become relative if possible) - let test_worktree = "/tmp/test-worktree"; - let absolute_path = format!("{}/src/main.rs", test_worktree); - let result = executor.make_path_relative(&absolute_path, test_worktree); - assert_eq!(result, "src/main.rs"); - } - - #[test] - fn test_todo_tool_content_extraction() { - let executor = ClaudeExecutor::new(); - - // Test TodoWrite with actual todo list - let todo_input = serde_json::json!({ - "todos": [ - { - "id": "1", - "content": "Fix the navigation bug", - "status": "completed", - "priority": "high" - }, - { - "id": "2", - "content": "Add user authentication", - "status": "in_progress", - "priority": "medium" - }, - { - "id": "3", - "content": "Write documentation", - "status": "pending", - "priority": "low" - } - ] - }); - - let result = executor.generate_concise_content( - "TodoWrite", - &todo_input, - &ActionType::Other { - description: "Tool: TodoWrite".to_string(), - }, - "/tmp/test-worktree", - ); - - assert!(result.contains("TODO List:")); - assert!(result.contains("✅ Fix the navigation bug (high)")); - assert!(result.contains("🔄 Add user authentication (medium)")); - assert!(result.contains("⏳ Write documentation (low)")); - } - - #[test] - fn test_todo_tool_empty_list() { - let executor = ClaudeExecutor::new(); - - // Test TodoWrite with empty todo list - let empty_input = serde_json::json!({ - "todos": [] - }); - - let result = executor.generate_concise_content( - "TodoWrite", - &empty_input, - &ActionType::Other { - description: "Tool: TodoWrite".to_string(), - }, - "/tmp/test-worktree", - ); - - assert_eq!(result, "Managing TODO list"); - } - - #[test] - fn test_todo_tool_no_todos_field() { - let executor = ClaudeExecutor::new(); - - // Test TodoWrite with no todos field - let no_todos_input = serde_json::json!({ - "other_field": "value" - }); - - let result = executor.generate_concise_content( - "TodoWrite", - &no_todos_input, - &ActionType::Other { - description: "Tool: TodoWrite".to_string(), - }, - "/tmp/test-worktree", - ); - - assert_eq!(result, "Managing TODO list"); - } - - #[test] - fn test_glob_tool_content_extraction() { - let executor = ClaudeExecutor::new(); - - // Test Glob with pattern and path - let glob_input = serde_json::json!({ - "pattern": "**/*.ts", - "path": "/tmp/test-worktree/src" - }); - - let result = executor.generate_concise_content( - "Glob", - &glob_input, - &ActionType::Other { - description: "Find files: **/*.ts".to_string(), - }, - "/tmp/test-worktree", - ); - - assert_eq!(result, "Find files: `**/*.ts` in `src`"); - } - - #[test] - fn test_glob_tool_pattern_only() { - let executor = ClaudeExecutor::new(); - - // Test Glob with pattern only - let glob_input = serde_json::json!({ - "pattern": "*.js" - }); - - let result = executor.generate_concise_content( - "Glob", - &glob_input, - &ActionType::Other { - description: "Find files: *.js".to_string(), - }, - "/tmp/test-worktree", - ); - - assert_eq!(result, "Find files: `*.js`"); - } - - #[test] - fn test_ls_tool_content_extraction() { - let executor = ClaudeExecutor::new(); - - // Test LS with path - let ls_input = serde_json::json!({ - "path": "/tmp/test-worktree/components" - }); - - let result = executor.generate_concise_content( - "LS", - &ls_input, - &ActionType::Other { - description: "Tool: LS".to_string(), - }, - "/tmp/test-worktree", - ); - - assert_eq!(result, "List directory: `components`"); - } -} diff --git a/backend/src/executors/cleanup_script.rs b/backend/src/executors/cleanup_script.rs deleted file mode 100644 index c96dd710..00000000 --- a/backend/src/executors/cleanup_script.rs +++ /dev/null @@ -1,121 +0,0 @@ -use async_trait::async_trait; -use uuid::Uuid; - -use crate::{ - command_runner::{CommandProcess, CommandRunner}, - executor::{Executor, ExecutorError}, - models::{project::Project, task::Task}, - utils::shell::get_shell_command, -}; - -/// Executor for running project cleanup scripts -pub struct CleanupScriptExecutor { - pub script: String, -} - -#[async_trait] -impl Executor for CleanupScriptExecutor { - async fn spawn( - &self, - pool: &sqlx::SqlitePool, - task_id: Uuid, - worktree_path: &str, - ) -> Result { - // Validate the task and project exist - let task = Task::find_by_id(pool, task_id) - .await? - .ok_or(ExecutorError::TaskNotFound)?; - - let _project = Project::find_by_id(pool, task.project_id) - .await? - .ok_or(ExecutorError::TaskNotFound)?; // Reuse TaskNotFound for simplicity - - let (shell_cmd, shell_arg) = get_shell_command(); - let mut command = CommandRunner::new(); - command - .command(shell_cmd) - .arg(shell_arg) - .arg(&self.script) - .working_dir(worktree_path); - - let proc = command.start().await.map_err(|e| { - crate::executor::SpawnContext::from_command(&command, "CleanupScript") - .with_task(task_id, Some(task.title.clone())) - .with_context("Cleanup script execution") - .spawn_error(e) - })?; - - Ok(proc) - } - - /// Normalize cleanup script logs into a readable format - fn normalize_logs( - &self, - logs: &str, - _worktree_path: &str, - ) -> Result { - let mut entries = Vec::new(); - - // Add script command as first entry - entries.push(crate::executor::NormalizedEntry { - timestamp: None, - entry_type: crate::executor::NormalizedEntryType::SystemMessage, - content: format!("Executing cleanup script:\n{}", self.script), - metadata: None, - }); - - // Process the logs - split by lines and create entries - if !logs.trim().is_empty() { - let lines: Vec<&str> = logs.lines().collect(); - let mut current_chunk = String::new(); - - for line in lines { - current_chunk.push_str(line); - current_chunk.push('\n'); - - // Create entry for every 10 lines or when we encounter an error-like line - if current_chunk.lines().count() >= 10 - || line.to_lowercase().contains("error") - || line.to_lowercase().contains("failed") - || line.to_lowercase().contains("exception") - { - let entry_type = if line.to_lowercase().contains("error") - || line.to_lowercase().contains("failed") - || line.to_lowercase().contains("exception") - { - crate::executor::NormalizedEntryType::ErrorMessage - } else { - crate::executor::NormalizedEntryType::SystemMessage - }; - - entries.push(crate::executor::NormalizedEntry { - timestamp: Some(chrono::Utc::now().to_rfc3339()), - entry_type, - content: current_chunk.trim().to_string(), - metadata: None, - }); - - current_chunk.clear(); - } - } - - // Add any remaining content - if !current_chunk.trim().is_empty() { - entries.push(crate::executor::NormalizedEntry { - timestamp: Some(chrono::Utc::now().to_rfc3339()), - entry_type: crate::executor::NormalizedEntryType::SystemMessage, - content: current_chunk.trim().to_string(), - metadata: None, - }); - } - } - - Ok(crate::executor::NormalizedConversation { - entries, - session_id: None, - executor_type: "cleanup-script".to_string(), - prompt: Some(self.script.clone()), - summary: None, - }) - } -} diff --git a/backend/src/executors/codex.rs b/backend/src/executors/codex.rs deleted file mode 100644 index beb9a2df..00000000 --- a/backend/src/executors/codex.rs +++ /dev/null @@ -1,1001 +0,0 @@ -use std::path::PathBuf; - -use async_trait::async_trait; -use regex::Regex; -use serde_json::Value; -use tokio::io::{AsyncBufReadExt, BufReader}; -use uuid::Uuid; - -use crate::{ - command_runner::{CommandProcess, CommandRunner}, - executor::{ - ActionType, Executor, ExecutorError, NormalizedConversation, NormalizedEntry, - NormalizedEntryType, - }, - models::{executor_session::ExecutorSession, task::Task}, - utils::{path::make_path_relative, shell::get_shell_command}, -}; - -/// Check if a JSON message looks like a system configuration message -/// This prevents accidentally filtering out regular messages or tool messages -fn is_system_config_message(json: &Value) -> bool { - // System config messages have configuration-like fields and lack message structure - // They should NOT have "msg" field (which indicates structured codex messages) - // They should NOT have "id" field (which indicates task-related messages) - if json.get("msg").is_some() || json.get("id").is_some() { - return false; - } - - // Check for presence of typical system configuration fields - let config_fields = [ - "model", - "reasoning effort", - "provider", - "sandbox", - "approval", - "workdir", - ]; - let found_config_fields = config_fields - .iter() - .filter(|&field| json.get(field).is_some()) - .count(); - - // Consider it a config message if we have at least 2 config fields - found_config_fields >= 2 -} - -/// Extract session ID from codex stderr output -fn extract_session_id_from_line(line: &str) -> Option { - // Look for session_id in the log format: - // 2025-07-23T15:47:59.877058Z INFO codex_exec: Codex initialized with event: Event { id: "0", msg: SessionConfigured(SessionConfiguredEvent { session_id: 3cdcc4df-c7c3-4cca-8902-48c3d4a0f96b, model: "codex-mini-latest", history_log_id: 9104228, history_entry_count: 1 }) } - static SESSION_ID_REGEX: std::sync::OnceLock = std::sync::OnceLock::new(); - let regex = SESSION_ID_REGEX.get_or_init(|| { - Regex::new(r"session_id:\s*([0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12})").unwrap() - }); - - regex - .captures(line) - .and_then(|cap| cap.get(1)) - .map(|m| m.as_str().to_string()) -} - -/// Find codex rollout file path for given session_id -fn find_rollout_file_path(session_id: &str) -> Result { - let home_dir = dirs::home_dir().ok_or("Could not determine home directory")?; - let sessions_dir = home_dir.join(".codex").join("sessions"); - - // Scan the sessions directory recursively for rollout files matching the session_id - // Pattern: rollout-{YYYY}-{MM}-{DD}T{HH}-{mm}-{ss}-{session_id}.jsonl - let pattern = format!("rollout-*-{}.jsonl", session_id); - - fn scan_directory( - dir: &PathBuf, - _pattern: &str, - session_id: &str, - ) -> Result, String> { - if !dir.exists() { - return Ok(None); - } - - let entries = std::fs::read_dir(dir) - .map_err(|e| format!("Failed to read directory {}: {}", dir.display(), e))?; - - for entry in entries { - let entry = entry.map_err(|e| format!("Failed to read directory entry: {}", e))?; - let path = entry.path(); - - if path.is_dir() { - // Recursively search subdirectories - if let Some(found) = scan_directory(&path, _pattern, session_id)? { - return Ok(Some(found)); - } - } else if path.is_file() { - if let Some(filename) = path.file_name() { - if let Some(filename_str) = filename.to_str() { - if filename_str.contains(session_id) - && filename_str.starts_with("rollout-") - && filename_str.ends_with(".jsonl") - { - return Ok(Some(path)); - } - } - } - } - } - - Ok(None) - } - - scan_directory(&sessions_dir, &pattern, session_id)? - .ok_or_else(|| format!("Could not find rollout file for session_id: {}", session_id)) -} - -/// Stream stderr from codex process to extract session_id but don't save stderr to DB -pub async fn stream_codex_stderr_to_extract_session( - output: impl tokio::io::AsyncRead + Unpin, - pool: sqlx::SqlitePool, - execution_process_id: Uuid, -) { - let mut reader = BufReader::new(output); - let mut line = String::new(); - let mut session_extracted = false; - - loop { - line.clear(); - match reader.read_line(&mut line).await { - Ok(0) => break, // EOF - Ok(_) => { - line = line.trim_end_matches(['\r', '\n']).to_string(); - - if !session_extracted { - if let Some(session_id) = extract_session_id_from_line(&line) { - if let Err(e) = ExecutorSession::update_session_id( - &pool, - execution_process_id, - &session_id, - ) - .await - { - tracing::error!( - "Failed to update session ID for execution process {}: {}", - execution_process_id, - e - ); - } else { - tracing::info!( - "Updated session ID {} for execution process {}", - session_id, - execution_process_id - ); - session_extracted = true; - } - } - } - } - Err(e) => { - tracing::error!( - "Error reading stderr for execution process {}: {}", - execution_process_id, - e - ); - break; - } - } - } -} - -/// An executor that uses Codex CLI to process tasks -pub struct CodexExecutor { - executor_type: String, - command: String, -} - -impl Default for CodexExecutor { - fn default() -> Self { - Self::new() - } -} - -impl CodexExecutor { - /// Create a new CodexExecutor with default settings - pub fn new() -> Self { - Self { - executor_type: "Codex".to_string(), - command: "npx @openai/codex exec --json --dangerously-bypass-approvals-and-sandbox --skip-git-repo-check".to_string(), - } - } -} - -#[async_trait] -impl Executor for CodexExecutor { - async fn spawn( - &self, - pool: &sqlx::SqlitePool, - task_id: Uuid, - worktree_path: &str, - ) -> Result { - // Get the task to fetch its description - let task = Task::find_by_id(pool, task_id) - .await? - .ok_or(ExecutorError::TaskNotFound)?; - - let prompt = if let Some(task_description) = task.description { - format!( - r#"project_id: {} - -Task title: {} -Task description: {}"#, - task.project_id, task.title, task_description - ) - } else { - format!("project_id: {}\n{}", task.project_id, task.title) - }; - - // Use shell command for cross-platform compatibility - let (shell_cmd, shell_arg) = get_shell_command(); - - let mut command = CommandRunner::new(); - command - .command(shell_cmd) - .arg(shell_arg) - .arg(&self.command) - .stdin(&prompt) - .working_dir(worktree_path) - .env("NODE_NO_WARNINGS", "1") - .env("RUST_LOG", "info"); // Enable rust logging to capture session info - - let child = command.start().await.map_err(|e| { - crate::executor::SpawnContext::from_command(&command, &self.executor_type) - .with_task(task_id, Some(task.title.clone())) - .with_context(format!("{} CLI execution for new task", self.executor_type)) - .spawn_error(e) - })?; - - Ok(child) - } - - async fn spawn_followup( - &self, - _pool: &sqlx::SqlitePool, - _task_id: Uuid, - session_id: &str, - prompt: &str, - worktree_path: &str, - ) -> Result { - // Find the rollout file for this session - let rollout_file_path = - find_rollout_file_path(session_id).map_err(ExecutorError::InvalidSessionId)?; - - // Use shell command for cross-platform compatibility - let (shell_cmd, shell_arg) = get_shell_command(); - - let codex_command = format!( - "{} -c experimental_resume={}", - self.command, - rollout_file_path.display() - ); - - let mut command = CommandRunner::new(); - command - .command(shell_cmd) - .arg(shell_arg) - .arg(&codex_command) - .stdin(prompt) - .working_dir(worktree_path) - .env("NODE_NO_WARNINGS", "1") - .env("RUST_LOG", "info"); - - let child = command.start().await.map_err(|e| { - crate::executor::SpawnContext::from_command(&command, &self.executor_type) - .with_context(format!( - "{} CLI followup execution for session {}", - self.executor_type, session_id - )) - .spawn_error(e) - })?; - - Ok(child) - } - - /// Custom streaming setup to handle stderr for session extraction - async fn execute_streaming( - &self, - pool: &sqlx::SqlitePool, - task_id: Uuid, - attempt_id: Uuid, - execution_process_id: Uuid, - worktree_path: &str, - ) -> Result { - let mut child = self.spawn(pool, task_id, worktree_path).await?; - - // Get streams from the child process - let streams = child - .stream() - .await - .expect("Failed to get streams from child process"); - - // Take stdout for logs (standard streaming) - let stdout = streams - .stdout - .expect("Failed to take stdout from child process"); - - // Take stderr for session extraction only (don't save to DB) - let stderr = streams - .stderr - .expect("Failed to take stderr from child process"); - - let pool_clone1 = pool.clone(); - let pool_clone2 = pool.clone(); - - // Stream stdout to database (true = is_stdout) - tokio::spawn(crate::executor::stream_output_to_db( - stdout, - pool_clone1, - attempt_id, - execution_process_id, - true, - )); - - // Stream stderr for session extraction only (don't save to DB) - tokio::spawn(stream_codex_stderr_to_extract_session( - stderr, - pool_clone2, - execution_process_id, - )); - - Ok(child) - } - - /// Custom followup streaming with same stderr handling - async fn execute_followup_streaming( - &self, - pool: &sqlx::SqlitePool, - task_id: Uuid, - attempt_id: Uuid, - execution_process_id: Uuid, - session_id: &str, - prompt: &str, - worktree_path: &str, - ) -> Result { - let mut child = self - .spawn_followup(pool, task_id, session_id, prompt, worktree_path) - .await?; - - // Get streams from the child process - let streams = child - .stream() - .await - .expect("Failed to get streams from child process"); - - // Take stdout for logs (standard streaming) - let stdout = streams - .stdout - .expect("Failed to take stdout from child process"); - - // Take stderr for session extraction only (don't save to DB) - let stderr = streams - .stderr - .expect("Failed to take stderr from child process"); - - let pool_clone1 = pool.clone(); - let pool_clone2 = pool.clone(); - - // Stream stdout to database (true = is_stdout) - tokio::spawn(crate::executor::stream_output_to_db( - stdout, - pool_clone1, - attempt_id, - execution_process_id, - true, - )); - - // Stream stderr for session extraction only (don't save to DB) - tokio::spawn(stream_codex_stderr_to_extract_session( - stderr, - pool_clone2, - execution_process_id, - )); - - Ok(child) - } - - fn normalize_logs( - &self, - logs: &str, - worktree_path: &str, - ) -> Result { - let mut entries = Vec::new(); - let mut session_id = None; - - for line in logs.lines() { - let trimmed = line.trim(); - if trimmed.is_empty() { - continue; - } - - // Try to parse as JSON from codex jsonl output - let json: Value = match serde_json::from_str(trimmed) { - Ok(json) => json, - Err(_) => { - // If line isn't valid JSON, add it as raw text - entries.push(NormalizedEntry { - timestamp: None, - entry_type: NormalizedEntryType::SystemMessage, - content: format!("Raw output: {}", trimmed), - metadata: None, - }); - continue; - } - }; - - // Extract session ID if not already set - if session_id.is_none() { - if let Some(sess_id) = json.get("session_id").and_then(|v| v.as_str()) { - session_id = Some(sess_id.to_string()); - } - } - - // Handle initial configuration message - check if this looks like a system config - if is_system_config_message(&json) { - let mut config_params = Vec::new(); - - // Collect relevant system parameters that might be present - if let Some(model) = json.get("model").and_then(|m| m.as_str()) { - config_params.push(format!("model: {}", model)); - } - if let Some(reasoning_effort) = - json.get("reasoning effort").and_then(|r| r.as_str()) - { - config_params.push(format!("reasoning effort: {}", reasoning_effort)); - } - if let Some(provider) = json.get("provider").and_then(|p| p.as_str()) { - config_params.push(format!("provider: {}", provider)); - } - - // If we found any config parameters, create a system message - if !config_params.is_empty() { - let content = format!( - "Session Parameters:\n{}", - config_params - .iter() - .map(|param| param.to_string()) - .collect::>() - .join("\n") - ); - - entries.push(NormalizedEntry { - timestamp: None, - entry_type: NormalizedEntryType::SystemMessage, - content, - metadata: Some(json.clone()), - }); - continue; - } - } - - // Handle prompt message - ignore as requested since frontend displays it - if json.get("prompt").is_some() { - continue; - } - - // Process different message types based on codex jsonl format - if let Some(msg) = json.get("msg") { - if let Some(msg_type) = msg.get("type").and_then(|t| t.as_str()) { - match msg_type { - "task_started" => { - // Skip task_started messages as requested - continue; - } - "agent_reasoning" => { - if let Some(text) = msg.get("text").and_then(|t| t.as_str()) { - entries.push(NormalizedEntry { - timestamp: None, - entry_type: NormalizedEntryType::Thinking, - content: text.to_string(), - metadata: Some(json.clone()), - }); - } - } - "exec_command_begin" => { - if let Some(command_array) = - msg.get("command").and_then(|c| c.as_array()) - { - let command = command_array - .iter() - .filter_map(|v| v.as_str()) - .collect::>() - .join(" "); - - // Map shell command to bash tool - let (tool_name, action_type) = - if command_array.first().and_then(|v| v.as_str()) - == Some("bash") - { - ( - "bash".to_string(), - ActionType::CommandRun { - command: command.clone(), - }, - ) - } else { - ( - "shell".to_string(), - ActionType::CommandRun { - command: command.clone(), - }, - ) - }; - - entries.push(NormalizedEntry { - timestamp: None, - entry_type: NormalizedEntryType::ToolUse { - tool_name, - action_type, - }, - content: format!("`{}`", command), - metadata: Some(json.clone()), - }); - } - } - "exec_command_end" => { - // Skip command end entries to avoid duplication - continue; - } - "task_complete" => { - // Skip task_complete messages as requested - continue; - } - "token_count" => { - // Skip token count entries - continue; - } - "agent_message" => { - if let Some(message) = msg.get("message").and_then(|m| m.as_str()) { - entries.push(NormalizedEntry { - timestamp: None, - entry_type: NormalizedEntryType::AssistantMessage, - content: message.to_string(), - metadata: Some(json.clone()), - }); - } - } - "patch_apply_begin" => { - // Map to internal edit tool (same as claude.rs pattern) - if let Some(changes) = msg.get("changes").and_then(|c| c.as_object()) { - // For each file being modified, create an edit tool entry - for (file_path, _change_details) in changes { - // Make path relative to worktree using utility function - let relative_path = - make_path_relative(file_path, worktree_path); - - entries.push(NormalizedEntry { - timestamp: None, - entry_type: NormalizedEntryType::ToolUse { - tool_name: "edit".to_string(), - action_type: ActionType::FileWrite { - path: relative_path.clone(), - }, - }, - content: format!("`{}`", relative_path), - metadata: Some(json.clone()), - }); - } - } - } - "patch_apply_end" => { - // Skip patch end entries to avoid duplication (similar to exec_command_end) - continue; - } - "error" => { - if let Some(error_message) = msg.get("message").and_then(|m| m.as_str()) - { - entries.push(NormalizedEntry { - timestamp: None, - entry_type: NormalizedEntryType::ErrorMessage, - content: error_message.to_string(), - metadata: Some(json.clone()), - }); - } else { - entries.push(NormalizedEntry { - timestamp: None, - entry_type: NormalizedEntryType::ErrorMessage, - content: "Unknown error occurred".to_string(), - metadata: Some(json.clone()), - }); - } - } - _ => { - // Unknown message type, add as system message - entries.push(NormalizedEntry { - timestamp: None, - entry_type: NormalizedEntryType::SystemMessage, - content: format!("Unknown message type: {}", msg_type), - metadata: Some(json.clone()), - }); - } - } - } - } else { - // JSON without msg field, add as unrecognized - entries.push(NormalizedEntry { - timestamp: None, - entry_type: NormalizedEntryType::SystemMessage, - content: format!("Unrecognized JSON: {}", trimmed), - metadata: Some(json), - }); - } - } - - Ok(NormalizedConversation { - entries, - session_id, - executor_type: self.executor_type.clone(), - prompt: None, - summary: None, - }) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_extract_session_id_from_line() { - let line = "2025-07-23T15:47:59.877058Z INFO codex_exec: Codex initialized with event: Event { id: \"0\", msg: SessionConfigured(SessionConfiguredEvent { session_id: 3cdcc4df-c7c3-4cca-8902-48c3d4a0f96b, model: \"codex-mini-latest\", history_log_id: 9104228, history_entry_count: 1 }) }"; - let session_id = extract_session_id_from_line(line); - assert_eq!( - session_id, - Some("3cdcc4df-c7c3-4cca-8902-48c3d4a0f96b".to_string()) - ); - } - - #[test] - fn test_extract_session_id_no_match() { - let line = "Some random log line without session id"; - let session_id = extract_session_id_from_line(line); - assert_eq!(session_id, None); - } - - #[test] - fn test_normalize_logs_basic() { - let executor = CodexExecutor::new(); - let logs = r#"{"id":"1","msg":{"type":"task_started"}} -{"id":"1","msg":{"type":"agent_reasoning","text":"**Inspecting the directory tree**\n\nI want to check the root directory tree and I think using `ls -1` is acceptable since the guidelines don't explicitly forbid it, unlike `ls -R`, `find`, or `grep`. I could also consider using `rg --files`, but that might be too overwhelming if there are many files. Focusing on the top-level files and directories seems like a better approach. I'm particularly interested in `LICENSE`, `README.md`, and any relevant README files. So, let's start with `ls -1`."}} -{"id":"1","msg":{"type":"exec_command_begin","call_id":"call_I1o1QnQDtlLjGMg4Vd9HXJLd","command":["bash","-lc","ls -1"],"cwd":"/Users/user/dev/vk-wip"}} -{"id":"1","msg":{"type":"exec_command_end","call_id":"call_I1o1QnQDtlLjGMg4Vd9HXJLd","stdout":"AGENT.md\nCLAUDE.md\nCODE-OF-CONDUCT.md\nCargo.lock\nCargo.toml\nDockerfile\nLICENSE\nREADME.md\nbackend\nbuild-npm-package.sh\ndev_assets\ndev_assets_seed\nfrontend\nnode_modules\nnpx-cli\npackage-lock.json\npackage.json\npnpm-lock.yaml\npnpm-workspace.yaml\nrust-toolchain.toml\nrustfmt.toml\nscripts\nshared\ntest-npm-package.sh\n","stderr":"","exit_code":0}} -{"id":"1","msg":{"type":"task_complete","last_agent_message":"I can see the directory structure of your project. This appears to be a Rust project with a frontend/backend architecture, using pnpm for package management. The project includes various configuration files, documentation, and development assets."}}"#; - - let result = executor.normalize_logs(logs, "/tmp/test").unwrap(); - - // Should have: agent_reasoning, exec_command_begin (task_started and task_complete skipped) - assert_eq!(result.entries.len(), 2); - - // Check agent reasoning (thinking) - assert!(matches!( - result.entries[0].entry_type, - NormalizedEntryType::Thinking - )); - assert!(result.entries[0] - .content - .contains("Inspecting the directory tree")); - - // Check bash command - assert!(matches!( - result.entries[1].entry_type, - NormalizedEntryType::ToolUse { .. } - )); - if let NormalizedEntryType::ToolUse { - tool_name, - action_type, - } = &result.entries[1].entry_type - { - assert_eq!(tool_name, "bash"); - assert!(matches!(action_type, ActionType::CommandRun { .. })); - } - assert_eq!(result.entries[1].content, "`bash -lc ls -1`"); - } - - #[test] - fn test_normalize_logs_shell_vs_bash_mapping() { - let executor = CodexExecutor::new(); - - // Test shell command (not bash) - let shell_logs = r#"{"id":"1","msg":{"type":"exec_command_begin","call_id":"call_test","command":["sh","-c","echo hello"],"cwd":"/tmp"}}"#; - let result = executor.normalize_logs(shell_logs, "/tmp").unwrap(); - assert_eq!(result.entries.len(), 1); - - if let NormalizedEntryType::ToolUse { tool_name, .. } = &result.entries[0].entry_type { - assert_eq!(tool_name, "shell"); // Maps to shell, not bash - } - - // Test bash command - let bash_logs = r#"{"id":"1","msg":{"type":"exec_command_begin","call_id":"call_test","command":["bash","-c","echo hello"],"cwd":"/tmp"}}"#; - let result = executor.normalize_logs(bash_logs, "/tmp").unwrap(); - assert_eq!(result.entries.len(), 1); - - if let NormalizedEntryType::ToolUse { tool_name, .. } = &result.entries[0].entry_type { - assert_eq!(tool_name, "bash"); // Maps to bash - } - } - - #[test] - fn test_normalize_logs_token_count_skipped() { - let executor = CodexExecutor::new(); - let logs = r#"{"id":"1","msg":{"type":"task_started"}} -{"id":"1","msg":{"type":"token_count","input_tokens":1674,"cached_input_tokens":1627,"output_tokens":384,"reasoning_output_tokens":384,"total_tokens":2058}} -{"id":"1","msg":{"type":"task_complete","last_agent_message":"Done!"}}"#; - - let result = executor.normalize_logs(logs, "/tmp").unwrap(); - - // Should have: nothing (task_started, task_complete, and token_count all skipped) - assert_eq!(result.entries.len(), 0); - } - - #[test] - fn test_normalize_logs_malformed_json() { - let executor = CodexExecutor::new(); - let logs = r#"{"id":"1","msg":{"type":"task_started"}} -invalid json line here -{"id":"1","msg":{"type":"task_complete","last_agent_message":"Done!"}}"#; - - let result = executor.normalize_logs(logs, "/tmp").unwrap(); - - // Should have: raw output only (task_started and task_complete skipped) - assert_eq!(result.entries.len(), 1); - - // Check that malformed JSON becomes raw output - assert!(matches!( - result.entries[0].entry_type, - NormalizedEntryType::SystemMessage - )); - assert!(result.entries[0] - .content - .contains("Raw output: invalid json line here")); - } - - #[test] - fn test_find_rollout_file_path_basic() { - // Test the rollout file path logic (this is a unit test, won't actually find files) - let session_id = "00000000-0000-0000-0000-0000307f053d"; - - // This will likely fail because the directory doesn't exist, but we can test the logic - let result = find_rollout_file_path(session_id); - - // Should return an error since directory doesn't exist - assert!(result.is_err()); - assert!(result.unwrap_err().contains("Could not find rollout file")); - } - - #[test] - fn test_normalize_logs_config_message() { - let executor = CodexExecutor::new(); - let logs = r#"{"sandbox":"danger-full-access","reasoning summaries":"auto","approval":"Never","provider":"openai","reasoning effort":"medium","workdir":"/tmp","model":"codex-mini-latest"}"#; - - let result = executor.normalize_logs(logs, "/tmp").unwrap(); - - // Should have 1 entry for the configuration message - assert_eq!(result.entries.len(), 1); - - // Check configuration message - assert!(matches!( - result.entries[0].entry_type, - NormalizedEntryType::SystemMessage - )); - assert!(result.entries[0].content.contains("Session Parameters:")); - assert!(result.entries[0] - .content - .contains("model: codex-mini-latest")); - assert!(result.entries[0] - .content - .contains("reasoning effort: medium")); - assert!(result.entries[0].content.contains("provider: openai")); - } - - #[test] - fn test_normalize_logs_prompt_ignored() { - let executor = CodexExecutor::new(); - let logs = r#"{"prompt":"project_id: f61fbd6a-9552-4b68-a1fe-10561f028dfc\n \nTask title: describe this repo"} -{"id":"1","msg":{"type":"task_started"}} -{"id":"1","msg":{"type":"agent_message","message":"Hello, I'll help you with that."}}"#; - - let result = executor.normalize_logs(logs, "/tmp").unwrap(); - - // Should have 1 entry (prompt and task_started ignored, only agent_message) - assert_eq!(result.entries.len(), 1); - - // Check that we only have agent_message - assert!(matches!( - result.entries[0].entry_type, - NormalizedEntryType::AssistantMessage - )); - assert_eq!(result.entries[0].content, "Hello, I'll help you with that."); - } - - #[test] - fn test_normalize_logs_error_message() { - let executor = CodexExecutor::new(); - let logs = r#"{"id":"1","msg":{"type":"error","message":"Missing environment variable: `OPENAI_API_KEY`. Create an API key (https://platform.openai.com) and export it as an environment variable."}}"#; - - let result = executor.normalize_logs(logs, "/tmp").unwrap(); - - // Should have 1 entry for the error message - assert_eq!(result.entries.len(), 1); - - // Check error message - assert!(matches!( - result.entries[0].entry_type, - NormalizedEntryType::ErrorMessage - )); - assert!(result.entries[0] - .content - .contains("Missing environment variable: `OPENAI_API_KEY`")); - } - - #[test] - fn test_normalize_logs_error_message_no_content() { - let executor = CodexExecutor::new(); - let logs = r#"{"id":"1","msg":{"type":"error"}}"#; - - let result = executor.normalize_logs(logs, "/tmp").unwrap(); - - // Should have 1 entry for the error message - assert_eq!(result.entries.len(), 1); - - // Check error message fallback - assert!(matches!( - result.entries[0].entry_type, - NormalizedEntryType::ErrorMessage - )); - assert_eq!(result.entries[0].content, "Unknown error occurred"); - } - - #[test] - fn test_normalize_logs_real_example() { - let executor = CodexExecutor::new(); - let logs = r#"{"sandbox":"danger-full-access","reasoning summaries":"auto","approval":"Never","provider":"openai","reasoning effort":"medium","workdir":"/private/var/folders/4m/6cwx14sx59lc2k9km5ph76gh0000gn/T/vibe-kanban-dev/vk-ec8b-describe-t","model":"codex-mini-latest"} -{"prompt":"project_id: f61fbd6a-9552-4b68-a1fe-10561f028dfc\n \nTask title: describe this repo"} -{"id":"1","msg":{"type":"task_started"}} -{"id":"1","msg":{"type":"error","message":"Missing environment variable: `OPENAI_API_KEY`. Create an API key (https://platform.openai.com) and export it as an environment variable."}}"#; - - let result = executor.normalize_logs(logs, "/tmp").unwrap(); - - // Should have 2 entries: config, error (prompt and task_started ignored) - assert_eq!(result.entries.len(), 2); - - // Check configuration message - assert!(matches!( - result.entries[0].entry_type, - NormalizedEntryType::SystemMessage - )); - assert!(result.entries[0].content.contains("Session Parameters:")); - - // Check error message - assert!(matches!( - result.entries[1].entry_type, - NormalizedEntryType::ErrorMessage - )); - assert!(result.entries[1] - .content - .contains("Missing environment variable")); - } - - #[test] - fn test_is_system_config_message_predicate() { - use serde_json::json; - - // Should identify system config messages - let config_msg = json!({ - "sandbox": "danger-full-access", - "reasoning effort": "medium", - "provider": "openai", - "model": "codex-mini-latest" - }); - assert!(is_system_config_message(&config_msg)); - - // Should NOT identify regular codex messages with "msg" field - let regular_msg = json!({ - "id": "1", - "msg": {"type": "task_started"} - }); - assert!(!is_system_config_message(®ular_msg)); - - // Should NOT identify tool messages with "id" field - let tool_msg = json!({ - "id": "1", - "msg": {"type": "exec_command_begin", "command": ["bash", "-c", "echo hello"]} - }); - assert!(!is_system_config_message(&tool_msg)); - - // Should NOT identify prompt messages (they have only 1 field) - let prompt_msg = json!({ - "prompt": "project_id: abc\nTask title: test" - }); - assert!(!is_system_config_message(&prompt_msg)); - - // Should handle partial config messages (with at least 2 config fields) - let partial_config = json!({ - "model": "codex-mini-latest", - "provider": "openai" - }); - assert!(is_system_config_message(&partial_config)); - - // Should NOT match if only 1 config field - let single_field = json!({ - "model": "codex-mini-latest" - }); - assert!(!is_system_config_message(&single_field)); - } - - #[test] - fn test_normalize_logs_partial_config() { - let executor = CodexExecutor::new(); - // Test with just model and provider (should still work) - let logs = r#"{"model":"codex-mini-latest","provider":"openai"}"#; - - let result = executor.normalize_logs(logs, "/tmp").unwrap(); - - // Should have 1 entry for the configuration message - assert_eq!(result.entries.len(), 1); - - // Check configuration message contains available params - assert!(matches!( - result.entries[0].entry_type, - NormalizedEntryType::SystemMessage - )); - assert!(result.entries[0].content.contains("Session Parameters:")); - assert!(result.entries[0] - .content - .contains("model: codex-mini-latest")); - assert!(result.entries[0].content.contains("provider: openai")); - assert!(!result.entries[0].content.contains("reasoning_effort")); // Should not be present - } - - #[test] - fn test_normalize_logs_agent_message() { - let executor = CodexExecutor::new(); - let logs = r#"{"id":"1","msg":{"type":"agent_message","message":"I've made a small restructuring of the top‐level README:\n\n- **Inserted a \"Table of Contents\"** under the screenshot, linking to all major sections (Overview, Installation, Documentation, Support, Contributing, Development → Prerequisites/Running/Build, Environment Variables, Custom OAuth, and License).\n- **Appended a \"License\" section** at the bottom pointing to the Apache 2.0 LICENSE file.\n\nThese tweaks should make navigation and licensing info more discoverable. Let me know if you'd like any other adjustments!"}}"#; - - let result = executor.normalize_logs(logs, "/tmp").unwrap(); - - // Should have 1 entry for the agent message - assert_eq!(result.entries.len(), 1); - - // Check agent message - assert!(matches!( - result.entries[0].entry_type, - NormalizedEntryType::AssistantMessage - )); - assert!(result.entries[0] - .content - .contains("I've made a small restructuring")); - assert!(result.entries[0].content.contains("Table of Contents")); - } - - #[test] - fn test_normalize_logs_patch_apply() { - let executor = CodexExecutor::new(); - let logs = r#"{"id":"1","msg":{"type":"patch_apply_begin","call_id":"call_zr84aWQuwJR3aWgJLkfv56Gl","auto_approved":true,"changes":{"/private/var/folders/4m/6cwx14sx59lc2k9km5ph76gh0000gn/T/vibe-kanban-dev/vk-a712-minor-rest/README.md":{"update":{"unified_diff":"@@ -18,2 +18,17 @@\n \n+## Table of Contents\n+\n+- [Overview](#overview)\n+- [Installation](#installation)","move_path":null}}}}} -{"id":"1","msg":{"type":"patch_apply_end","call_id":"call_zr84aWQuwJR3aWgJLkfv56Gl","stdout":"Success. Updated the following files:\nM /private/var/folders/4m/6cwx14sx59lc2k9km5ph76gh0000gn/T/vibe-kanban-dev/vk-a712-minor-rest/README.md\n","stderr":"","success":true}}"#; - - let result = executor.normalize_logs(logs, "/tmp").unwrap(); - - // Should have 1 entry (patch_apply_begin, patch_apply_end skipped) - assert_eq!(result.entries.len(), 1); - - // Check edit tool use (follows claude.rs pattern) - assert!(matches!( - result.entries[0].entry_type, - NormalizedEntryType::ToolUse { .. } - )); - if let NormalizedEntryType::ToolUse { - tool_name, - action_type, - } = &result.entries[0].entry_type - { - assert_eq!(tool_name, "edit"); - assert!(matches!(action_type, ActionType::FileWrite { .. })); - } - assert!(result.entries[0].content.contains("README.md")); - } - - #[test] - fn test_normalize_logs_skip_task_messages() { - let executor = CodexExecutor::new(); - let logs = r#"{"id":"1","msg":{"type":"task_started"}} -{"id":"1","msg":{"type":"agent_message","message":"Hello world"}} -{"id":"1","msg":{"type":"task_complete","last_agent_message":"Done!"}}"#; - - let result = executor.normalize_logs(logs, "/tmp").unwrap(); - - // Should have 1 entry (task_started and task_complete skipped) - assert_eq!(result.entries.len(), 1); - - // Check that only agent_message remains - assert!(matches!( - result.entries[0].entry_type, - NormalizedEntryType::AssistantMessage - )); - assert_eq!(result.entries[0].content, "Hello world"); - } -} diff --git a/backend/src/executors/dev_server.rs b/backend/src/executors/dev_server.rs deleted file mode 100644 index 2949501e..00000000 --- a/backend/src/executors/dev_server.rs +++ /dev/null @@ -1,50 +0,0 @@ -use async_trait::async_trait; -use uuid::Uuid; - -use crate::{ - command_runner::{CommandProcess, CommandRunner}, - executor::{Executor, ExecutorError}, - models::{project::Project, task::Task}, - utils::shell::get_shell_command, -}; - -/// Executor for running project dev server scripts -pub struct DevServerExecutor { - pub script: String, -} - -#[async_trait] -impl Executor for DevServerExecutor { - async fn spawn( - &self, - pool: &sqlx::SqlitePool, - task_id: Uuid, - worktree_path: &str, - ) -> Result { - // Validate the task and project exist - let task = Task::find_by_id(pool, task_id) - .await? - .ok_or(ExecutorError::TaskNotFound)?; - - let _project = Project::find_by_id(pool, task.project_id) - .await? - .ok_or(ExecutorError::TaskNotFound)?; // Reuse TaskNotFound for simplicity - - let (shell_cmd, shell_arg) = get_shell_command(); - let mut runner = CommandRunner::new(); - runner - .command(shell_cmd) - .arg(shell_arg) - .arg(&self.script) - .working_dir(worktree_path); - - let process = runner.start().await.map_err(|e| { - crate::executor::SpawnContext::from_command(&runner, "DevServer") - .with_task(task_id, Some(task.title.clone())) - .with_context("Development server execution") - .spawn_error(e) - })?; - - Ok(process) - } -} diff --git a/backend/src/executors/echo.rs b/backend/src/executors/echo.rs deleted file mode 100644 index 1021e729..00000000 --- a/backend/src/executors/echo.rs +++ /dev/null @@ -1,74 +0,0 @@ -use async_trait::async_trait; -use uuid::Uuid; - -use crate::{ - command_runner::{CommandProcess, CommandRunner}, - executor::{Executor, ExecutorError, SpawnContext}, - models::task::Task, - utils::shell::get_shell_command, -}; - -/// A dummy executor that echoes the task title and description -pub struct EchoExecutor; - -#[async_trait] -impl Executor for EchoExecutor { - async fn spawn( - &self, - pool: &sqlx::SqlitePool, - task_id: Uuid, - _worktree_path: &str, - ) -> Result { - // Get the task to fetch its description - let task = Task::find_by_id(pool, task_id) - .await? - .ok_or(ExecutorError::TaskNotFound)?; - - let _message = format!( - "Executing task: {} - {}", - task.title, - task.description.as_deref().unwrap_or("No description") - ); - - // For demonstration of streaming, we can use a shell command that outputs multiple lines - let (shell_cmd, shell_arg) = get_shell_command(); - let script = if shell_cmd == "cmd" { - // Windows batch script - format!( - r#"echo Starting task: {} -for /l %%i in (1,1,50) do ( - echo Progress line %%i - timeout /t 1 /nobreak > nul -) -echo Task completed: {}"#, - task.title, task.title - ) - } else { - // Unix shell script (bash/sh) - format!( - r#"echo "Starting task: {}" -for i in {{1..50}}; do - echo "Progress line $i" - sleep 1 -done -echo "Task completed: {}""#, - task.title, task.title - ) - }; - - let mut command_runner = CommandRunner::new(); - command_runner - .command(shell_cmd) - .arg(shell_arg) - .arg(&script); - - let child = command_runner.start().await.map_err(|e| { - SpawnContext::from_command(&command_runner, "Echo") - .with_task(task_id, Some(task.title.clone())) - .with_context("Shell script execution for echo demo") - .spawn_error(e) - })?; - - Ok(child) - } -} diff --git a/backend/src/executors/gemini.rs b/backend/src/executors/gemini.rs deleted file mode 100644 index b0e32475..00000000 --- a/backend/src/executors/gemini.rs +++ /dev/null @@ -1,697 +0,0 @@ -//! Gemini executor implementation -//! -//! This module provides Gemini CLI-based task execution with streaming support. - -mod config; -mod streaming; - -use std::time::Instant; - -use async_trait::async_trait; -use config::{ - max_chunk_size, max_display_size, max_latency_ms, max_message_size, GeminiStreamConfig, -}; -// Re-export for external use -use serde_json::Value; -pub use streaming::GeminiPatchBatch; -use streaming::GeminiStreaming; -use uuid::Uuid; - -use crate::{ - command_runner::{CommandProcess, CommandRunner}, - executor::{ - Executor, ExecutorError, NormalizedConversation, NormalizedEntry, NormalizedEntryType, - }, - models::task::Task, - utils::shell::get_shell_command, -}; - -/// An executor that uses Gemini CLI to process tasks -pub struct GeminiExecutor; - -#[async_trait] -impl Executor for GeminiExecutor { - async fn spawn( - &self, - pool: &sqlx::SqlitePool, - task_id: Uuid, - worktree_path: &str, - ) -> Result { - // Get the task to fetch its description - let task = Task::find_by_id(pool, task_id) - .await? - .ok_or(ExecutorError::TaskNotFound)?; - - let prompt = if let Some(task_description) = task.description { - format!( - r#"project_id: {} - -Task title: {} -Task description: {}"#, - task.project_id, task.title, task_description - ) - } else { - format!( - r#"project_id: {} - -Task title: {}"#, - task.project_id, task.title - ) - }; - - let mut command = Self::create_gemini_command(worktree_path); - command.stdin(&prompt); - - let proc = command.start().await.map_err(|e| { - crate::executor::SpawnContext::from_command(&command, "Gemini") - .with_task(task_id, Some(task.title.clone())) - .with_context("Gemini CLI execution for new task") - .spawn_error(e) - })?; - - tracing::info!("Successfully started Gemini process for task {}", task_id); - - Ok(proc) - } - - async fn execute_streaming( - &self, - pool: &sqlx::SqlitePool, - task_id: Uuid, - attempt_id: Uuid, - execution_process_id: Uuid, - worktree_path: &str, - ) -> Result { - tracing::info!( - "Starting Gemini execution for task {} attempt {}", - task_id, - attempt_id - ); - - Self::update_session_id(pool, execution_process_id, &attempt_id.to_string()).await; - - let mut proc = self.spawn(pool, task_id, worktree_path).await?; - - tracing::info!( - "Gemini process spawned successfully for attempt {}", - attempt_id - ); - - Self::setup_streaming(pool, &mut proc, attempt_id, execution_process_id).await; - - Ok(proc) - } - - async fn spawn_followup( - &self, - pool: &sqlx::SqlitePool, - task_id: Uuid, - session_id: &str, - prompt: &str, - worktree_path: &str, - ) -> Result { - // For Gemini, session_id is the attempt_id - let attempt_id = Uuid::parse_str(session_id) - .map_err(|_| ExecutorError::InvalidSessionId(session_id.to_string()))?; - - let task = self.load_task(pool, task_id).await?; - let resume_context = self.collect_resume_context(pool, &task, attempt_id).await?; - let comprehensive_prompt = self.build_comprehensive_prompt(&task, &resume_context, prompt); - self.spawn_process(worktree_path, &comprehensive_prompt, attempt_id) - .await - } - - async fn execute_followup_streaming( - &self, - pool: &sqlx::SqlitePool, - task_id: Uuid, - attempt_id: Uuid, - execution_process_id: Uuid, - session_id: &str, - prompt: &str, - worktree_path: &str, - ) -> Result { - tracing::info!( - "Starting Gemini follow-up execution for attempt {} (session {})", - attempt_id, - session_id - ); - - // For Gemini, session_id is the attempt_id - update it in the database - Self::update_session_id(pool, execution_process_id, session_id).await; - - let mut proc = self - .spawn_followup(pool, task_id, session_id, prompt, worktree_path) - .await?; - - tracing::info!( - "Gemini follow-up process spawned successfully for attempt {}", - attempt_id - ); - - Self::setup_streaming(pool, &mut proc, attempt_id, execution_process_id).await; - - Ok(proc) - } - - fn normalize_logs( - &self, - logs: &str, - _worktree_path: &str, - ) -> Result { - let mut entries: Vec = Vec::new(); - let mut parse_errors = Vec::new(); - - for (line_num, line) in logs.lines().enumerate() { - let trimmed = line.trim(); - if trimmed.is_empty() { - continue; - } - - // Try to parse as JSON first (for NormalizedEntry format) - if trimmed.starts_with('{') { - match serde_json::from_str::(trimmed) { - Ok(entry) => { - entries.push(entry); - } - Err(e) => { - tracing::warn!( - "Failed to parse JSONL line {} in Gemini logs: {} - Line: {}", - line_num + 1, - e, - trimmed - ); - parse_errors.push(format!("Line {}: {}", line_num + 1, e)); - - // Create a fallback entry for unrecognized JSON - let fallback_entry = NormalizedEntry { - timestamp: Some(chrono::Utc::now().to_rfc3339()), - entry_type: NormalizedEntryType::SystemMessage, - content: format!("Raw output: {}", trimmed), - metadata: None, - }; - entries.push(fallback_entry); - } - } - } else { - // For non-JSON lines, treat as plain text content - let text_entry = NormalizedEntry { - timestamp: Some(chrono::Utc::now().to_rfc3339()), - entry_type: NormalizedEntryType::AssistantMessage, - content: trimmed.to_string(), - metadata: None, - }; - entries.push(text_entry); - } - } - - if !parse_errors.is_empty() { - tracing::warn!( - "Gemini normalize_logs encountered {} parse errors: {}", - parse_errors.len(), - parse_errors.join("; ") - ); - } - - tracing::debug!( - "Gemini normalize_logs processed {} lines, created {} entries", - logs.lines().count(), - entries.len() - ); - - Ok(NormalizedConversation { - entries, - session_id: None, // Session ID is managed directly via database, not extracted from logs - executor_type: "gemini".to_string(), - prompt: None, - summary: None, - }) - } - - // Note: Gemini streaming is handled by the Gemini-specific WAL system. - // See emit_content_batch() method which calls GeminiExecutor::push_patch(). -} - -impl GeminiExecutor { - /// Create a standardized Gemini CLI command - fn create_gemini_command(worktree_path: &str) -> CommandRunner { - let (shell_cmd, shell_arg) = get_shell_command(); - let gemini_command = "npx @google/gemini-cli@latest --yolo"; - - let mut command = CommandRunner::new(); - command - .command(shell_cmd) - .arg(shell_arg) - .arg(gemini_command) - .working_dir(worktree_path) - .env("NODE_NO_WARNINGS", "1"); - command - } - - /// Update executor session ID with error handling - async fn update_session_id( - pool: &sqlx::SqlitePool, - execution_process_id: Uuid, - session_id: &str, - ) { - if let Err(e) = crate::models::executor_session::ExecutorSession::update_session_id( - pool, - execution_process_id, - session_id, - ) - .await - { - tracing::error!( - "Failed to update session ID for Gemini execution process {}: {}", - execution_process_id, - e - ); - } else { - tracing::info!( - "Updated session ID {} for Gemini execution process {}", - session_id, - execution_process_id - ); - } - } - - /// Setup streaming for both stdout and stderr - async fn setup_streaming( - pool: &sqlx::SqlitePool, - proc: &mut CommandProcess, - attempt_id: Uuid, - execution_process_id: Uuid, - ) { - // Get stdout and stderr streams from CommandProcess - let mut stream = proc - .stream() - .await - .expect("Failed to get streams from command process"); - let stdout = stream - .stdout - .take() - .expect("Failed to get stdout from command stream"); - let stderr = stream - .stderr - .take() - .expect("Failed to get stderr from command stream"); - - // Start streaming tasks with Gemini-specific line-based message updates - let pool_clone1 = pool.clone(); - let pool_clone2 = pool.clone(); - - tokio::spawn(Self::stream_gemini_chunked( - stdout, - pool_clone1, - attempt_id, - execution_process_id, - )); - // Use default stderr streaming (no custom parsing) - tokio::spawn(crate::executor::stream_output_to_db( - stderr, - pool_clone2, - attempt_id, - execution_process_id, - false, - )); - } - - /// Push patches to the Gemini WAL system - pub fn push_patch(execution_process_id: Uuid, patches: Vec, content_length: usize) { - GeminiStreaming::push_patch(execution_process_id, patches, content_length); - } - - /// Get WAL batches for an execution process, optionally filtering by cursor - pub fn get_wal_batches( - execution_process_id: Uuid, - after_batch_id: Option, - ) -> Option> { - GeminiStreaming::get_wal_batches(execution_process_id, after_batch_id) - } - - /// Clean up WAL when execution process finishes - pub async fn finalize_execution( - pool: &sqlx::SqlitePool, - execution_process_id: Uuid, - final_buffer: &str, - ) { - GeminiStreaming::finalize_execution(pool, execution_process_id, final_buffer).await; - } - - /// Find the best boundary to split a chunk (newline preferred, sentence fallback) - pub fn find_chunk_boundary(buffer: &str, max_size: usize) -> usize { - GeminiStreaming::find_chunk_boundary(buffer, max_size) - } - - /// Conditionally flush accumulated content to database in chunks - pub async fn maybe_flush_chunk( - pool: &sqlx::SqlitePool, - execution_process_id: Uuid, - buffer: &mut String, - config: &GeminiStreamConfig, - ) { - GeminiStreaming::maybe_flush_chunk(pool, execution_process_id, buffer, config).await; - } - - /// Emit JSON patch for current message state - either "replace" for growing message or "add" for new message. - fn emit_message_patch( - execution_process_id: Uuid, - current_message: &str, - entry_count: &mut usize, - force_new_message: bool, - ) { - if current_message.is_empty() { - return; - } - - if force_new_message && *entry_count > 0 { - // Start new message: add new entry to array - *entry_count += 1; - let patch_vec = vec![serde_json::json!({ - "op": "add", - "path": format!("/entries/{}", *entry_count - 1), - "value": { - "timestamp": chrono::Utc::now().to_rfc3339(), - "entry_type": {"type": "assistant_message"}, - "content": current_message, - "metadata": null, - } - })]; - - Self::push_patch(execution_process_id, patch_vec, current_message.len()); - } else { - // Growing message: replace current entry - if *entry_count == 0 { - *entry_count = 1; // Initialize first message - } - - let patch_vec = vec![serde_json::json!({ - "op": "replace", - "path": format!("/entries/{}", *entry_count - 1), - "value": { - "timestamp": chrono::Utc::now().to_rfc3339(), - "entry_type": {"type": "assistant_message"}, - "content": current_message, - "metadata": null, - } - })]; - - Self::push_patch(execution_process_id, patch_vec, current_message.len()); - } - } - - /// Emit final content when stream ends - async fn emit_final_content( - execution_process_id: Uuid, - remaining_content: &str, - entry_count: &mut usize, - ) { - if !remaining_content.trim().is_empty() { - Self::emit_message_patch( - execution_process_id, - remaining_content, - entry_count, - false, // Don't force new message for final content - ); - } - } - - async fn load_task( - &self, - pool: &sqlx::SqlitePool, - task_id: Uuid, - ) -> Result { - Task::find_by_id(pool, task_id) - .await? - .ok_or(ExecutorError::TaskNotFound) - } - - async fn collect_resume_context( - &self, - pool: &sqlx::SqlitePool, - task: &Task, - attempt_id: Uuid, - ) -> Result { - crate::models::task_attempt::TaskAttempt::get_attempt_resume_context( - pool, - attempt_id, - task.id, - task.project_id, - ) - .await - .map_err(ExecutorError::from) - } - - fn build_comprehensive_prompt( - &self, - task: &Task, - resume_context: &crate::models::task_attempt::AttemptResumeContext, - prompt: &str, - ) -> String { - format!( - r#"RESUME CONTEXT FOR CONTINUING TASK -=== TASK INFORMATION === -Project ID: {} -Task ID: {} -Task Title: {} -Task Description: {} -=== EXECUTION HISTORY === -The following is the execution history from this task attempt: -{} -=== CURRENT CHANGES === -The following git diff shows changes made from the base branch to the current state: -```diff -{} -``` -=== CURRENT REQUEST === -{} -=== INSTRUCTIONS === -You are continuing work on the above task. The execution history shows what has been done previously, and the git diff shows the current state of all changes. Please continue from where the previous execution left off, taking into account all the context provided above. -"#, - task.project_id, - task.id, - task.title, - task.description - .as_deref() - .unwrap_or("No description provided"), - if resume_context.execution_history.trim().is_empty() { - "(No previous execution history)" - } else { - &resume_context.execution_history - }, - if resume_context.cumulative_diffs.trim().is_empty() { - "(No changes detected)" - } else { - &resume_context.cumulative_diffs - }, - prompt - ) - } - - async fn spawn_process( - &self, - worktree_path: &str, - comprehensive_prompt: &str, - attempt_id: Uuid, - ) -> Result { - tracing::info!( - "Spawning Gemini followup execution for attempt {} with resume context ({} chars)", - attempt_id, - comprehensive_prompt.len() - ); - - let mut command = GeminiExecutor::create_gemini_command(worktree_path); - command.stdin(comprehensive_prompt); - - let proc = command.start().await.map_err(|e| { - crate::executor::SpawnContext::from_command(&command, "Gemini") - .with_context(format!( - "Gemini CLI followup execution with context for attempt {}", - attempt_id - )) - .spawn_error(e) - })?; - - tracing::info!( - "Successfully started Gemini followup process for attempt {}", - attempt_id - ); - - Ok(proc) - } - - /// Format Gemini CLI output by inserting line breaks where periods are directly - /// followed by capital letters (common Gemini CLI formatting issue). - /// Handles both intra-chunk and cross-chunk period-to-capital transitions. - fn format_gemini_output(content: &str, accumulated_message: &str) -> String { - let mut result = String::with_capacity(content.len() + 100); // Reserve some extra space for potential newlines - let chars: Vec = content.chars().collect(); - - // Check for cross-chunk boundary: previous chunk ended with period, current starts with capital - if !accumulated_message.is_empty() && !content.is_empty() { - let ends_with_period = accumulated_message.ends_with('.'); - let starts_with_capital = chars - .first() - .map(|&c| c.is_uppercase() && c.is_alphabetic()) - .unwrap_or(false); - - if ends_with_period && starts_with_capital { - result.push('\n'); - } - } - - // Handle intra-chunk period-to-capital transitions - for i in 0..chars.len() { - result.push(chars[i]); - - // Check if current char is '.' and next char is uppercase letter (no space between) - if chars[i] == '.' && i + 1 < chars.len() { - let next_char = chars[i + 1]; - if next_char.is_uppercase() && next_char.is_alphabetic() { - result.push('\n'); - } - } - } - - result - } - - /// Stream Gemini output with dual-buffer approach: chunks for UI updates, messages for storage. - /// - /// **Chunks** (~2KB): Frequent UI updates using "replace" patches for smooth streaming - /// **Messages** (~8KB): Logical boundaries using "add" patches for new entries - /// **Consistent WAL/DB**: Both systems see same message structure via JSON patches - pub async fn stream_gemini_chunked( - mut output: impl tokio::io::AsyncRead + Unpin, - pool: sqlx::SqlitePool, - attempt_id: Uuid, - execution_process_id: Uuid, - ) { - use tokio::io::{AsyncReadExt, BufReader}; - - let chunk_limit = max_chunk_size(); - let display_chunk_size = max_display_size(); // ~2KB for UI updates - let message_boundary_size = max_message_size(); // ~8KB for new message boundaries - let max_latency = std::time::Duration::from_millis(max_latency_ms()); - - let mut reader = BufReader::new(&mut output); - - // Dual buffers: chunk buffer for UI, message buffer for DB - let mut current_message = String::new(); // Current assistant message content - let mut db_buffer = String::new(); // Buffer for database storage (using ChunkStore) - let mut entry_count = 0usize; // Track assistant message entries - - let mut read_buf = vec![0u8; chunk_limit.min(max_chunk_size())]; // Use configurable chunk limit, capped for memory efficiency - let mut last_chunk_emit = Instant::now(); - - // Configuration for WAL and DB management - let config = GeminiStreamConfig::default(); - - tracing::info!( - "Starting dual-buffer Gemini streaming for attempt {} (chunks: {}B, messages: {}B)", - attempt_id, - display_chunk_size, - message_boundary_size - ); - - loop { - match reader.read(&mut read_buf).await { - Ok(0) => { - // EOF: emit final content and flush to database - Self::emit_final_content( - execution_process_id, - ¤t_message, - &mut entry_count, - ) - .await; - - // Flush any remaining database buffer - Self::finalize_execution(&pool, execution_process_id, &db_buffer).await; - break; - } - Ok(n) => { - // Convert bytes to string and apply Gemini-specific formatting - let raw_chunk = String::from_utf8_lossy(&read_buf[..n]); - let formatted_chunk = Self::format_gemini_output(&raw_chunk, ¤t_message); - - // Add to both buffers - current_message.push_str(&formatted_chunk); - db_buffer.push_str(&formatted_chunk); - - // 1. Check for chunk emission (frequent UI updates ~2KB) - let should_emit_chunk = current_message.len() >= display_chunk_size - || (last_chunk_emit.elapsed() >= max_latency - && !current_message.is_empty()); - - if should_emit_chunk { - // Emit "replace" patch for growing message (smooth UI) - Self::emit_message_patch( - execution_process_id, - ¤t_message, - &mut entry_count, - false, // Not forcing new message - ); - last_chunk_emit = Instant::now(); - } - - // 2. Check for message boundary (new assistant message ~8KB) - let should_start_new_message = current_message.len() >= message_boundary_size; - - if should_start_new_message { - // Find optimal boundary for new message - let boundary = - Self::find_chunk_boundary(¤t_message, message_boundary_size); - - if boundary > 0 && boundary < current_message.len() { - // Split at boundary: complete current message, start new one - let completed_message = current_message[..boundary].to_string(); - let remaining_content = current_message[boundary..].to_string(); - - // CRITICAL FIX: Only emit "replace" patch to complete current message - // Do NOT emit "add" patch as it shifts existing database entries - Self::emit_message_patch( - execution_process_id, - &completed_message, - &mut entry_count, - false, // Complete current message - ); - - // Store the completed message to database - // This ensures the database gets the completed content at the boundary - Self::maybe_flush_chunk( - &pool, - execution_process_id, - &mut db_buffer, - &config, - ) - .await; - - // Start fresh message with remaining content (no WAL patch yet) - // Next chunk emission will create "replace" patch for entry_count + 1 - current_message = remaining_content; - entry_count += 1; // Move to next entry index for future patches - } - } - - // 3. Flush to database (same boundary detection) - Self::maybe_flush_chunk(&pool, execution_process_id, &mut db_buffer, &config) - .await; - } - Err(e) => { - tracing::error!( - "Error reading stdout for Gemini attempt {}: {}", - attempt_id, - e - ); - break; - } - } - } - - tracing::info!( - "Dual-buffer Gemini streaming completed for attempt {} ({} messages)", - attempt_id, - entry_count - ); - } -} diff --git a/backend/src/executors/gemini/config.rs b/backend/src/executors/gemini/config.rs deleted file mode 100644 index 04675dd5..00000000 --- a/backend/src/executors/gemini/config.rs +++ /dev/null @@ -1,67 +0,0 @@ -//! Gemini executor configuration and environment variable resolution -//! -//! This module contains configuration structures and functions for the Gemini executor, -//! including environment variable resolution for runtime parameters. - -/// Configuration for Gemini WAL compaction and DB chunking -#[derive(Debug, Clone)] -pub struct GeminiStreamConfig { - pub max_db_chunk_size: usize, - pub wal_compaction_threshold: usize, - pub wal_compaction_size: usize, - pub wal_compaction_interval_ms: u64, - pub max_wal_batches: usize, - pub max_wal_total_size: usize, -} - -impl Default for GeminiStreamConfig { - fn default() -> Self { - Self { - max_db_chunk_size: max_message_size(), - wal_compaction_threshold: 40, - wal_compaction_size: max_message_size() * 2, - wal_compaction_interval_ms: 30000, - max_wal_batches: 100, - max_wal_total_size: 1024 * 1024, // 1MB per process - } - } -} - -// Constants for configuration -/// Size-based streaming configuration -pub const DEFAULT_MAX_CHUNK_SIZE: usize = 5120; // bytes (read buffer size) -pub const DEFAULT_MAX_DISPLAY_SIZE: usize = 2000; // bytes (SSE emission threshold for smooth UI) -pub const DEFAULT_MAX_MESSAGE_SIZE: usize = 8000; // bytes (message boundary for new assistant entries) -pub const DEFAULT_MAX_LATENCY_MS: u64 = 50; // milliseconds - -/// Resolve MAX_CHUNK_SIZE from env or fallback -pub fn max_chunk_size() -> usize { - std::env::var("GEMINI_CLI_MAX_CHUNK_SIZE") - .ok() - .and_then(|v| v.parse::().ok()) - .unwrap_or(DEFAULT_MAX_CHUNK_SIZE) -} - -/// Resolve MAX_DISPLAY_SIZE from env or fallback -pub fn max_display_size() -> usize { - std::env::var("GEMINI_CLI_MAX_DISPLAY_SIZE") - .ok() - .and_then(|v| v.parse::().ok()) - .unwrap_or(DEFAULT_MAX_DISPLAY_SIZE) -} - -/// Resolve MAX_MESSAGE_SIZE from env or fallback -pub fn max_message_size() -> usize { - std::env::var("GEMINI_CLI_MAX_MESSAGE_SIZE") - .ok() - .and_then(|v| v.parse::().ok()) - .unwrap_or(DEFAULT_MAX_MESSAGE_SIZE) -} - -/// Resolve MAX_LATENCY_MS from env or fallback -pub fn max_latency_ms() -> u64 { - std::env::var("GEMINI_CLI_MAX_LATENCY_MS") - .ok() - .and_then(|v| v.parse::().ok()) - .unwrap_or(DEFAULT_MAX_LATENCY_MS) -} diff --git a/backend/src/executors/gemini/streaming.rs b/backend/src/executors/gemini/streaming.rs deleted file mode 100644 index 9fcee6f8..00000000 --- a/backend/src/executors/gemini/streaming.rs +++ /dev/null @@ -1,363 +0,0 @@ -//! Gemini streaming functionality with WAL and chunked storage -//! -//! This module provides real-time streaming support for Gemini execution processes -//! with Write-Ahead Log (WAL) capabilities for resumable streaming. - -use std::{collections::HashMap, sync::Mutex, time::Instant}; - -use json_patch::{patch, Patch, PatchOperation}; -use serde::{Deserialize, Serialize}; -use serde_json::Value; -use uuid::Uuid; - -use super::config::GeminiStreamConfig; -use crate::{ - executor::{NormalizedEntry, NormalizedEntryType}, - models::execution_process::ExecutionProcess, -}; - -lazy_static::lazy_static! { - /// Write-Ahead Log: Maps execution_process_id → WAL state (Gemini-specific) - static ref GEMINI_WAL_MAP: Mutex> = Mutex::new(HashMap::new()); -} - -/// A batch of JSON patches for Gemini streaming -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct GeminiPatchBatch { - /// Monotonic batch identifier for cursor-based streaming - pub batch_id: u64, - /// Array of JSON Patch operations (RFC 6902 format) - pub patches: Vec, - /// ISO 8601 timestamp when this batch was created - pub timestamp: String, - /// Total content length after applying all patches in this batch - pub content_length: usize, -} - -/// WAL state for a single Gemini execution process -#[derive(Debug)] -pub struct GeminiWalState { - pub batches: Vec, - pub total_content_length: usize, - pub next_batch_id: u64, - pub last_compaction: Instant, - pub last_db_flush: Instant, - pub last_access: Instant, -} - -impl Default for GeminiWalState { - fn default() -> Self { - Self::new() - } -} - -impl GeminiWalState { - pub fn new() -> Self { - let now = Instant::now(); - Self { - batches: Vec::new(), - total_content_length: 0, - next_batch_id: 1, - last_compaction: now, - last_db_flush: now, - last_access: now, - } - } -} - -/// Gemini streaming utilities -pub struct GeminiStreaming; - -impl GeminiStreaming { - /// Push patches to the Gemini WAL system - pub fn push_patch(execution_process_id: Uuid, patches: Vec, content_length: usize) { - let mut wal_map = GEMINI_WAL_MAP.lock().unwrap(); - let wal_state = wal_map.entry(execution_process_id).or_default(); - let config = GeminiStreamConfig::default(); - - // Update access time for orphan cleanup - wal_state.last_access = Instant::now(); - - // Enforce size limits - force compaction instead of clearing to prevent data loss - if wal_state.batches.len() >= config.max_wal_batches - || wal_state.total_content_length >= config.max_wal_total_size - { - tracing::warn!( - "WAL size limits exceeded for process {} (batches: {}, size: {}), forcing compaction", - execution_process_id, - wal_state.batches.len(), - wal_state.total_content_length - ); - - // Force compaction to preserve data instead of losing it - Self::compact_wal(wal_state); - - // If still over limits after compaction, keep only the most recent batches - if wal_state.batches.len() >= config.max_wal_batches { - let keep_count = config.max_wal_batches / 2; // Keep half - let remove_count = wal_state.batches.len() - keep_count; - wal_state.batches.drain(..remove_count); - tracing::warn!( - "After compaction still over limit, kept {} most recent batches", - keep_count - ); - } - } - - let batch = GeminiPatchBatch { - batch_id: wal_state.next_batch_id, - patches, - timestamp: chrono::Utc::now().to_rfc3339(), - content_length, - }; - - wal_state.next_batch_id += 1; - wal_state.batches.push(batch); - wal_state.total_content_length = content_length; - - // Check if compaction is needed - if Self::should_compact(wal_state, &config) { - Self::compact_wal(wal_state); - } - } - - /// Get WAL batches for an execution process, optionally filtering by cursor - pub fn get_wal_batches( - execution_process_id: Uuid, - after_batch_id: Option, - ) -> Option> { - GEMINI_WAL_MAP.lock().ok().and_then(|mut wal_map| { - wal_map.get_mut(&execution_process_id).map(|wal_state| { - // Update access time when WAL is retrieved - wal_state.last_access = Instant::now(); - - match after_batch_id { - Some(cursor) => { - // Return only batches with batch_id > cursor - wal_state - .batches - .iter() - .filter(|batch| batch.batch_id > cursor) - .cloned() - .collect() - } - None => { - // Return all batches - wal_state.batches.clone() - } - } - }) - }) - } - - /// Clean up WAL when execution process finishes - pub async fn finalize_execution( - pool: &sqlx::SqlitePool, - execution_process_id: Uuid, - final_buffer: &str, - ) { - // Flush any remaining content to database - if !final_buffer.trim().is_empty() { - Self::store_chunk_to_db(pool, execution_process_id, final_buffer).await; - } - - // Remove WAL entry - Self::purge_wal(execution_process_id); - } - - /// Remove WAL entry for a specific execution process - pub fn purge_wal(execution_process_id: Uuid) { - if let Ok(mut wal_map) = GEMINI_WAL_MAP.lock() { - wal_map.remove(&execution_process_id); - tracing::debug!( - "Cleaned up WAL for execution process {}", - execution_process_id - ); - } - } - - /// Find the best boundary to split a chunk (newline preferred, sentence fallback) - pub fn find_chunk_boundary(buffer: &str, max_size: usize) -> usize { - if buffer.len() <= max_size { - return buffer.len(); - } - - let search_window = &buffer[..max_size]; - - // First preference: newline boundary - if let Some(pos) = search_window.rfind('\n') { - return pos + 1; // Include the newline - } - - // Second preference: sentence boundary (., !, ?) - if let Some(pos) = search_window.rfind(&['.', '!', '?'][..]) { - if pos + 1 < search_window.len() { - return pos + 1; - } - } - - // Fallback: word boundary - if let Some(pos) = search_window.rfind(' ') { - return pos + 1; - } - - // Last resort: split at max_size - max_size - } - - /// Store a chunk to the database - async fn store_chunk_to_db(pool: &sqlx::SqlitePool, execution_process_id: Uuid, content: &str) { - if content.trim().is_empty() { - return; - } - - let entry = NormalizedEntry { - timestamp: Some(chrono::Utc::now().to_rfc3339()), - entry_type: NormalizedEntryType::AssistantMessage, - content: content.to_string(), - metadata: None, - }; - - match serde_json::to_string(&entry) { - Ok(jsonl_line) => { - let formatted_line = format!("{}\n", jsonl_line); - if let Err(e) = - ExecutionProcess::append_stdout(pool, execution_process_id, &formatted_line) - .await - { - tracing::error!("Failed to store chunk to database: {}", e); - } else { - tracing::debug!("Stored {}B chunk to database", content.len()); - } - } - Err(e) => { - tracing::error!("Failed to serialize chunk: {}", e); - } - } - } - - /// Conditionally flush accumulated content to database in chunks - pub async fn maybe_flush_chunk( - pool: &sqlx::SqlitePool, - execution_process_id: Uuid, - buffer: &mut String, - config: &GeminiStreamConfig, - ) { - if buffer.len() < config.max_db_chunk_size { - return; - } - - // Find the best split point (newline preferred, sentence boundary fallback) - let split_point = Self::find_chunk_boundary(buffer, config.max_db_chunk_size); - - if split_point > 0 { - let chunk = buffer[..split_point].to_string(); - buffer.drain(..split_point); - - // Store chunk to database - Self::store_chunk_to_db(pool, execution_process_id, &chunk).await; - - // Update WAL flush time - if let Ok(mut wal_map) = GEMINI_WAL_MAP.lock() { - if let Some(wal_state) = wal_map.get_mut(&execution_process_id) { - wal_state.last_db_flush = Instant::now(); - } - } - } - } - - /// Check if WAL compaction is needed based on configured thresholds - fn should_compact(wal_state: &GeminiWalState, config: &GeminiStreamConfig) -> bool { - wal_state.batches.len() >= config.wal_compaction_threshold - || wal_state.total_content_length >= config.wal_compaction_size - || wal_state.last_compaction.elapsed().as_millis() as u64 - >= config.wal_compaction_interval_ms - } - - /// Compact WAL by losslessly merging older patches into a snapshot - fn compact_wal(wal_state: &mut GeminiWalState) { - // Need at least a few batches to make compaction worthwhile - if wal_state.batches.len() <= 5 { - return; - } - - // Keep the most recent 3 batches for smooth incremental updates - let recent_count = 3; - let compact_count = wal_state.batches.len() - recent_count; - - if compact_count <= 1 { - return; // Not enough to compact - } - - // Start with an empty conversation and apply all patches sequentially - let mut conversation_value = serde_json::json!({ - "entries": [], - "session_id": null, - "executor_type": "gemini", - "prompt": null, - "summary": null - }); - - let mut total_content_length = 0; - let oldest_batch_id = wal_state.batches[0].batch_id; - let compact_timestamp = chrono::Utc::now().to_rfc3339(); - - // Apply patches from oldest to newest (excluding recent ones) using json-patch crate - for batch in &wal_state.batches[..compact_count] { - // Convert Vec to json_patch::Patch - let patch_operations: Result, _> = batch - .patches - .iter() - .map(|p| serde_json::from_value(p.clone())) - .collect(); - - match patch_operations { - Ok(ops) => { - let patch_obj = Patch(ops); - if let Err(e) = patch(&mut conversation_value, &patch_obj) { - tracing::warn!("Failed to apply patch during compaction: {}, skipping", e); - continue; - } - } - Err(e) => { - tracing::warn!("Failed to deserialize patch operations: {}, skipping", e); - continue; - } - } - total_content_length = batch.content_length; // Use the final length - } - - // Extract the final entries array for the snapshot - let final_entries = conversation_value - .get("entries") - .and_then(|v| v.as_array()) - .cloned() - .unwrap_or_default(); - - // Create a single snapshot patch that replaces the entire entries array - let snapshot_patch = GeminiPatchBatch { - batch_id: oldest_batch_id, // Use the oldest batch_id to maintain cursor compatibility - patches: vec![serde_json::json!({ - "op": "replace", - "path": "/entries", - "value": final_entries - })], - timestamp: compact_timestamp, - content_length: total_content_length, - }; - - // Replace old batches with snapshot + keep recent batches - let mut new_batches = vec![snapshot_patch]; - new_batches.extend_from_slice(&wal_state.batches[compact_count..]); - wal_state.batches = new_batches; - - wal_state.last_compaction = Instant::now(); - - tracing::info!( - "Losslessly compacted WAL: {} batches → {} (1 snapshot + {} recent), preserving all content", - compact_count + recent_count, - wal_state.batches.len(), - recent_count - ); - } -} diff --git a/backend/src/executors/mod.rs b/backend/src/executors/mod.rs deleted file mode 100644 index d3bb5c71..00000000 --- a/backend/src/executors/mod.rs +++ /dev/null @@ -1,25 +0,0 @@ -pub mod aider; -pub mod amp; -pub mod ccr; -pub mod charm_opencode; -pub mod claude; -pub mod cleanup_script; -pub mod codex; -pub mod dev_server; -pub mod echo; -pub mod gemini; -pub mod setup_script; -pub mod sst_opencode; - -pub use aider::AiderExecutor; -pub use amp::AmpExecutor; -pub use ccr::CCRExecutor; -pub use charm_opencode::CharmOpencodeExecutor; -pub use claude::ClaudeExecutor; -pub use cleanup_script::CleanupScriptExecutor; -pub use codex::CodexExecutor; -pub use dev_server::DevServerExecutor; -pub use echo::EchoExecutor; -pub use gemini::GeminiExecutor; -pub use setup_script::SetupScriptExecutor; -pub use sst_opencode::SstOpencodeExecutor; diff --git a/backend/src/executors/setup_script.rs b/backend/src/executors/setup_script.rs deleted file mode 100644 index 562ff6b6..00000000 --- a/backend/src/executors/setup_script.rs +++ /dev/null @@ -1,127 +0,0 @@ -use async_trait::async_trait; -use uuid::Uuid; - -use crate::{ - command_runner::{CommandProcess, CommandRunner}, - executor::{Executor, ExecutorError}, - models::{project::Project, task::Task}, - utils::shell::get_shell_command, -}; - -/// Executor for running project setup scripts -pub struct SetupScriptExecutor { - pub script: String, -} - -impl SetupScriptExecutor { - pub fn new(script: String) -> Self { - Self { script } - } -} - -#[async_trait] -impl Executor for SetupScriptExecutor { - async fn spawn( - &self, - pool: &sqlx::SqlitePool, - task_id: Uuid, - worktree_path: &str, - ) -> Result { - // Validate the task and project exist - let task = Task::find_by_id(pool, task_id) - .await? - .ok_or(ExecutorError::TaskNotFound)?; - - let _project = Project::find_by_id(pool, task.project_id) - .await? - .ok_or(ExecutorError::TaskNotFound)?; // Reuse TaskNotFound for simplicity - - let (shell_cmd, shell_arg) = get_shell_command(); - let mut command = CommandRunner::new(); - command - .command(shell_cmd) - .arg(shell_arg) - .arg(&self.script) - .working_dir(worktree_path); - - let proc = command.start().await.map_err(|e| { - crate::executor::SpawnContext::from_command(&command, "SetupScript") - .with_task(task_id, Some(task.title.clone())) - .with_context("Setup script execution") - .spawn_error(e) - })?; - - Ok(proc) - } - - /// Normalize setup script logs into a readable format - fn normalize_logs( - &self, - logs: &str, - _worktree_path: &str, - ) -> Result { - let mut entries = Vec::new(); - - // Add script command as first entry - entries.push(crate::executor::NormalizedEntry { - timestamp: None, - entry_type: crate::executor::NormalizedEntryType::SystemMessage, - content: format!("Executing setup script:\n{}", self.script), - metadata: None, - }); - - // Process the logs - split by lines and create entries - if !logs.trim().is_empty() { - let lines: Vec<&str> = logs.lines().collect(); - let mut current_chunk = String::new(); - - for line in lines { - current_chunk.push_str(line); - current_chunk.push('\n'); - - // Create entry for every 10 lines or when we encounter an error-like line - if current_chunk.lines().count() >= 10 - || line.to_lowercase().contains("error") - || line.to_lowercase().contains("failed") - || line.to_lowercase().contains("exception") - { - let entry_type = if line.to_lowercase().contains("error") - || line.to_lowercase().contains("failed") - || line.to_lowercase().contains("exception") - { - crate::executor::NormalizedEntryType::ErrorMessage - } else { - crate::executor::NormalizedEntryType::SystemMessage - }; - - entries.push(crate::executor::NormalizedEntry { - timestamp: Some(chrono::Utc::now().to_rfc3339()), - entry_type, - content: current_chunk.trim().to_string(), - metadata: None, - }); - - current_chunk.clear(); - } - } - - // Add any remaining content - if !current_chunk.trim().is_empty() { - entries.push(crate::executor::NormalizedEntry { - timestamp: Some(chrono::Utc::now().to_rfc3339()), - entry_type: crate::executor::NormalizedEntryType::SystemMessage, - content: current_chunk.trim().to_string(), - metadata: None, - }); - } - } - - Ok(crate::executor::NormalizedConversation { - entries, - session_id: None, - executor_type: "setup-script".to_string(), - prompt: Some(self.script.clone()), - summary: None, - }) - } -} diff --git a/backend/src/executors/sst_opencode.rs b/backend/src/executors/sst_opencode.rs deleted file mode 100644 index 2a27e296..00000000 --- a/backend/src/executors/sst_opencode.rs +++ /dev/null @@ -1,694 +0,0 @@ -use async_trait::async_trait; -use serde_json::{json, Value}; -use tokio::io::{AsyncBufReadExt, BufReader}; -use uuid::Uuid; - -use crate::{ - command_runner::{CommandProcess, CommandRunner}, - executor::{Executor, ExecutorError, NormalizedConversation, NormalizedEntry}, - models::{execution_process::ExecutionProcess, executor_session::ExecutorSession, task::Task}, - utils::shell::get_shell_command, -}; - -// Sub-modules for utilities -pub mod filter; -pub mod tools; - -use self::{ - filter::{parse_session_id_from_line, tool_usage_regex, OpenCodeFilter}, - tools::{determine_action_type, generate_tool_content, normalize_tool_name}, -}; - -struct Content { - pub stdout: Option, - pub stderr: Option, -} - -/// Process a single line for session extraction and content formatting -async fn process_line_for_content( - line: &str, - session_extracted: &mut bool, - worktree_path: &str, - pool: &sqlx::SqlitePool, - execution_process_id: uuid::Uuid, -) -> Option { - if !*session_extracted { - if let Some(session_id) = parse_session_id_from_line(line) { - if let Err(e) = - ExecutorSession::update_session_id(pool, execution_process_id, &session_id).await - { - tracing::error!( - "Failed to update session ID for execution process {}: {}", - execution_process_id, - e - ); - } else { - tracing::info!( - "Updated session ID {} for execution process {}", - session_id, - execution_process_id - ); - *session_extracted = true; - } - - // Don't return any content for session lines - return None; - } - } - - // Check if line is noise - if so, discard it - if OpenCodeFilter::is_noise(line) { - return None; - } - - if OpenCodeFilter::is_stderr(line) { - // If it's stderr, we don't need to process it further - return Some(Content { - stdout: None, - stderr: Some(line.to_string()), - }); - } - - // Format clean content as normalized JSON - let formatted = format_opencode_content_as_normalized_json(line, worktree_path); - Some(Content { - stdout: Some(formatted), - stderr: None, - }) -} - -/// Stream stderr from OpenCode process with filtering to separate clean output from noise -pub async fn stream_opencode_stderr_to_db( - output: impl tokio::io::AsyncRead + Unpin, - pool: sqlx::SqlitePool, - attempt_id: Uuid, - execution_process_id: Uuid, - worktree_path: String, -) { - let mut reader = BufReader::new(output); - let mut line = String::new(); - let mut session_extracted = false; - - loop { - line.clear(); - - match reader.read_line(&mut line).await { - Ok(0) => break, // EOF - Ok(_) => { - line = line.trim_end_matches(['\r', '\n']).to_string(); - - let content = process_line_for_content( - &line, - &mut session_extracted, - &worktree_path, - &pool, - execution_process_id, - ) - .await; - - if let Some(Content { stdout, stderr }) = content { - tracing::debug!( - "Processed OpenCode content for attempt {}: stdout={:?} stderr={:?}", - attempt_id, - stdout, - stderr, - ); - if let Err(e) = ExecutionProcess::append_output( - &pool, - execution_process_id, - stdout.as_deref(), - stderr.as_deref(), - ) - .await - { - tracing::error!( - "Failed to write OpenCode line for attempt {}: {}", - attempt_id, - e - ); - } - } - } - Err(e) => { - tracing::error!("Error reading stderr for attempt {}: {}", attempt_id, e); - break; - } - } - } -} - -/// Format OpenCode clean content as normalized JSON entries for direct database storage -fn format_opencode_content_as_normalized_json(content: &str, worktree_path: &str) -> String { - let mut results = Vec::new(); - let base_timestamp = chrono::Utc::now(); - let mut entry_counter = 0u32; - - for line in content.lines() { - let trimmed = line.trim(); - if trimmed.is_empty() { - continue; - } - - // Generate unique timestamp for each entry by adding microseconds - let unique_timestamp = - base_timestamp + chrono::Duration::microseconds(entry_counter as i64); - let timestamp_str = unique_timestamp.to_rfc3339_opts(chrono::SecondsFormat::Micros, true); - entry_counter += 1; - - // Try to parse as existing JSON first - if let Ok(parsed_json) = serde_json::from_str::(trimmed) { - results.push(parsed_json.to_string()); - continue; - } - - // Strip ANSI codes before processing - let cleaned = OpenCodeFilter::strip_ansi_codes(trimmed); - let cleaned_trim = cleaned.trim(); - - if cleaned_trim.is_empty() { - continue; - } - - // Check for tool usage patterns after ANSI stripping: | ToolName {...} - if let Some(captures) = tool_usage_regex().captures(cleaned_trim) { - if let (Some(tool_name), Some(tool_input)) = (captures.get(1), captures.get(2)) { - // Parse tool input - let input: serde_json::Value = - serde_json::from_str(tool_input.as_str()).unwrap_or(serde_json::Value::Null); - - // Normalize tool name for frontend compatibility (e.g., "Todo" → "todowrite") - let normalized_tool_name = normalize_tool_name(tool_name.as_str()); - - let normalized_entry = json!({ - "timestamp": timestamp_str, - "entry_type": { - "type": "tool_use", - "tool_name": normalized_tool_name, - "action_type": determine_action_type(&normalized_tool_name, &input, worktree_path) - }, - "content": generate_tool_content(&normalized_tool_name, &input, worktree_path), - "metadata": input - }); - results.push(normalized_entry.to_string()); - continue; - } - } - - // Regular assistant message - let normalized_entry = json!({ - "timestamp": timestamp_str, - "entry_type": { - "type": "assistant_message" - }, - "content": cleaned_trim, - "metadata": null - }); - results.push(normalized_entry.to_string()); - } - - // Ensure each JSON entry is on its own line - results.join("\n") + "\n" -} - -/// An executor that uses SST Opencode CLI to process tasks -pub struct SstOpencodeExecutor { - executor_type: String, - command: String, -} - -impl Default for SstOpencodeExecutor { - fn default() -> Self { - Self::new() - } -} - -impl SstOpencodeExecutor { - /// Create a new SstOpencodeExecutor with default settings - pub fn new() -> Self { - Self { - executor_type: "SST Opencode".to_string(), - command: "npx -y opencode-ai@latest run --print-logs".to_string(), - } - } -} - -/// An executor that resumes an SST Opencode session - -#[async_trait] -impl Executor for SstOpencodeExecutor { - async fn spawn( - &self, - pool: &sqlx::SqlitePool, - task_id: Uuid, - worktree_path: &str, - ) -> Result { - // Get the task to fetch its description - let task = Task::find_by_id(pool, task_id) - .await? - .ok_or(ExecutorError::TaskNotFound)?; - - let prompt = if let Some(task_description) = task.description { - format!( - r#"project_id: {} - -Task title: {} -Task description: {}"#, - task.project_id, task.title, task_description - ) - } else { - format!( - r#"project_id: {} - -Task title: {}"#, - task.project_id, task.title - ) - }; - - // Use shell command for cross-platform compatibility - let (shell_cmd, shell_arg) = get_shell_command(); - let opencode_command = &self.command; - - let mut command = CommandRunner::new(); - command - .command(shell_cmd) - .arg(shell_arg) - .arg(opencode_command) - .stdin(&prompt) - .working_dir(worktree_path) - .env("NODE_NO_WARNINGS", "1"); - - let proc = command.start().await.map_err(|e| { - crate::executor::SpawnContext::from_command(&command, &self.executor_type) - .with_task(task_id, Some(task.title.clone())) - .with_context(format!("{} CLI execution for new task", self.executor_type)) - .spawn_error(e) - })?; - - Ok(proc) - } - - /// Execute with OpenCode filtering for stderr - async fn execute_streaming( - &self, - pool: &sqlx::SqlitePool, - task_id: Uuid, - attempt_id: Uuid, - execution_process_id: Uuid, - worktree_path: &str, - ) -> Result { - let mut proc = self.spawn(pool, task_id, worktree_path).await?; - - // Get stderr stream from CommandProcess for OpenCode filtering - let mut stream = proc - .stream() - .await - .expect("Failed to get streams from command process"); - let stderr = stream - .stderr - .take() - .expect("Failed to get stderr from command stream"); - - // Start OpenCode stderr filtering task - let pool_clone = pool.clone(); - let worktree_path_clone = worktree_path.to_string(); - tokio::spawn(stream_opencode_stderr_to_db( - stderr, - pool_clone, - attempt_id, - execution_process_id, - worktree_path_clone, - )); - - Ok(proc) - } - - fn normalize_logs( - &self, - logs: &str, - _worktree_path: &str, - ) -> Result { - let mut entries = Vec::new(); - - for line in logs.lines() { - let trimmed = line.trim(); - if trimmed.is_empty() { - continue; - } - - // Simple passthrough: directly deserialize normalized JSON entries - if let Ok(entry) = serde_json::from_str::(trimmed) { - entries.push(entry); - } - } - - Ok(NormalizedConversation { - entries, - session_id: None, // Session ID is stored directly in the database - executor_type: "sst-opencode".to_string(), - prompt: None, - summary: None, - }) - } - - /// Execute follow-up with OpenCode filtering for stderr - async fn execute_followup_streaming( - &self, - pool: &sqlx::SqlitePool, - task_id: Uuid, - attempt_id: Uuid, - execution_process_id: Uuid, - session_id: &str, - prompt: &str, - worktree_path: &str, - ) -> Result { - let mut proc = self - .spawn_followup(pool, task_id, session_id, prompt, worktree_path) - .await?; - - // Get stderr stream from CommandProcess for OpenCode filtering - let mut stream = proc - .stream() - .await - .expect("Failed to get streams from command process"); - let stderr = stream - .stderr - .take() - .expect("Failed to get stderr from command stream"); - - // Start OpenCode stderr filtering task - let pool_clone = pool.clone(); - let worktree_path_clone = worktree_path.to_string(); - tokio::spawn(stream_opencode_stderr_to_db( - stderr, - pool_clone, - attempt_id, - execution_process_id, - worktree_path_clone, - )); - - Ok(proc) - } - - async fn spawn_followup( - &self, - _pool: &sqlx::SqlitePool, - _task_id: Uuid, - session_id: &str, - prompt: &str, - worktree_path: &str, - ) -> Result { - // Use shell command for cross-platform compatibility - let (shell_cmd, shell_arg) = get_shell_command(); - let opencode_command = format!("{} --session {}", self.command, session_id); - - let mut command = CommandRunner::new(); - command - .command(shell_cmd) - .arg(shell_arg) - .arg(&opencode_command) - .stdin(prompt) - .working_dir(worktree_path) - .env("NODE_NO_WARNINGS", "1"); - - let proc = command.start().await.map_err(|e| { - crate::executor::SpawnContext::from_command(&command, &self.executor_type) - .with_context(format!( - "{} CLI followup execution for session {}", - self.executor_type, session_id - )) - .spawn_error(e) - })?; - - Ok(proc) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::{ - executor::ActionType, - executors::sst_opencode::{ - format_opencode_content_as_normalized_json, SstOpencodeExecutor, - }, - }; - - // Test the actual format that comes from the database (normalized JSON entries) - #[test] - fn test_normalize_logs_with_database_format() { - let executor = SstOpencodeExecutor::new(); - - // This is what the database should contain after our streaming function processes it - let logs = r#"{"timestamp":"2025-07-16T18:04:00Z","entry_type":{"type":"tool_use","tool_name":"read","action_type":{"action":"file_read","path":"hello.js"}},"content":"`hello.js`","metadata":{"filePath":"/path/to/repo/hello.js"}} -{"timestamp":"2025-07-16T18:04:01Z","entry_type":{"type":"assistant_message"},"content":"I'll read the hello.js file to see its current contents.","metadata":null} -{"timestamp":"2025-07-16T18:04:02Z","entry_type":{"type":"tool_use","tool_name":"bash","action_type":{"action":"command_run","command":"ls -la"}},"content":"`ls -la`","metadata":{"command":"ls -la"}} -{"timestamp":"2025-07-16T18:04:03Z","entry_type":{"type":"assistant_message"},"content":"The file exists and contains a hello world function.","metadata":null}"#; - - let result = executor.normalize_logs(logs, "/path/to/repo").unwrap(); - - assert_eq!(result.entries.len(), 4); - - // First entry: file read tool use - assert!(matches!( - result.entries[0].entry_type, - crate::executor::NormalizedEntryType::ToolUse { .. } - )); - if let crate::executor::NormalizedEntryType::ToolUse { - tool_name, - action_type, - } = &result.entries[0].entry_type - { - assert_eq!(tool_name, "read"); - assert!(matches!(action_type, ActionType::FileRead { .. })); - } - assert_eq!(result.entries[0].content, "`hello.js`"); - assert!(result.entries[0].timestamp.is_some()); - - // Second entry: assistant message - assert!(matches!( - result.entries[1].entry_type, - crate::executor::NormalizedEntryType::AssistantMessage - )); - assert!(result.entries[1].content.contains("read the hello.js file")); - - // Third entry: bash tool use - assert!(matches!( - result.entries[2].entry_type, - crate::executor::NormalizedEntryType::ToolUse { .. } - )); - if let crate::executor::NormalizedEntryType::ToolUse { - tool_name, - action_type, - } = &result.entries[2].entry_type - { - assert_eq!(tool_name, "bash"); - assert!(matches!(action_type, ActionType::CommandRun { .. })); - } - - // Fourth entry: assistant message - assert!(matches!( - result.entries[3].entry_type, - crate::executor::NormalizedEntryType::AssistantMessage - )); - assert!(result.entries[3].content.contains("The file exists")); - } - - #[test] - fn test_normalize_logs_with_session_id() { - let executor = SstOpencodeExecutor::new(); - - // Test session ID in JSON metadata - current implementation always returns None for session_id - let logs = r#"{"timestamp":"2025-07-16T18:04:00Z","entry_type":{"type":"assistant_message"},"content":"Session started","metadata":null,"session_id":"ses_abc123"} -{"timestamp":"2025-07-16T18:04:01Z","entry_type":{"type":"assistant_message"},"content":"Hello world","metadata":null}"#; - - let result = executor.normalize_logs(logs, "/tmp").unwrap(); - assert_eq!(result.session_id, None); // Session ID is stored directly in the database - assert_eq!(result.entries.len(), 2); - } - - #[test] - fn test_normalize_logs_legacy_fallback() { - let executor = SstOpencodeExecutor::new(); - - // Current implementation doesn't handle legacy format - it only parses JSON entries - let logs = r#"INFO session=ses_legacy123 starting -| Read {"filePath":"/path/to/file.js"} -This is a plain assistant message"#; - - let result = executor.normalize_logs(logs, "/tmp").unwrap(); - - // Session ID is always None in current implementation - assert_eq!(result.session_id, None); - - // Current implementation skips non-JSON lines, so no entries will be parsed - assert_eq!(result.entries.len(), 0); - } - - #[test] - fn test_format_opencode_content_as_normalized_json() { - let content = r#"| Read {"filePath":"/path/to/repo/hello.js"} -I'll read this file to understand its contents. -| bash {"command":"ls -la"} -The file listing shows several items."#; - - let result = format_opencode_content_as_normalized_json(content, "/path/to/repo"); - let lines: Vec<&str> = result - .split('\n') - .filter(|line| !line.trim().is_empty()) - .collect(); - - // Should have 4 entries (2 tool uses + 2 assistant messages) - assert_eq!(lines.len(), 4); - - // Parse all entries and verify unique timestamps - let mut timestamps = Vec::new(); - for line in &lines { - let json: serde_json::Value = serde_json::from_str(line).unwrap(); - let timestamp = json["timestamp"].as_str().unwrap().to_string(); - timestamps.push(timestamp); - } - - // Verify all timestamps are unique (no duplicates) - let mut unique_timestamps = timestamps.clone(); - unique_timestamps.sort(); - unique_timestamps.dedup(); - assert_eq!( - timestamps.len(), - unique_timestamps.len(), - "All timestamps should be unique" - ); - - // Parse the first line (should be read tool use - normalized to lowercase) - let first_json: serde_json::Value = serde_json::from_str(lines[0]).unwrap(); - assert_eq!(first_json["entry_type"]["type"], "tool_use"); - assert_eq!(first_json["entry_type"]["tool_name"], "read"); - assert_eq!(first_json["content"], "`hello.js`"); - - // Parse the second line (should be assistant message) - let second_json: serde_json::Value = serde_json::from_str(lines[1]).unwrap(); - assert_eq!(second_json["entry_type"]["type"], "assistant_message"); - assert!(second_json["content"] - .as_str() - .unwrap() - .contains("read this file")); - - // Parse the third line (should be bash tool use) - let third_json: serde_json::Value = serde_json::from_str(lines[2]).unwrap(); - assert_eq!(third_json["entry_type"]["type"], "tool_use"); - assert_eq!(third_json["entry_type"]["tool_name"], "bash"); - assert_eq!(third_json["content"], "`ls -la`"); - - // Verify timestamps include microseconds for uniqueness - for timestamp in timestamps { - assert!( - timestamp.contains('.'), - "Timestamp should include microseconds: {}", - timestamp - ); - } - } - - #[test] - fn test_format_opencode_content_todo_operations() { - let content = r#"| TodoWrite {"todos":[{"id":"1","content":"Fix bug","status":"completed","priority":"high"},{"id":"2","content":"Add feature","status":"in_progress","priority":"medium"}]}"#; - - let result = format_opencode_content_as_normalized_json(content, "/tmp"); - let json: serde_json::Value = serde_json::from_str(&result).unwrap(); - - assert_eq!(json["entry_type"]["type"], "tool_use"); - assert_eq!(json["entry_type"]["tool_name"], "todowrite"); // Normalized from "TodoWrite" - assert_eq!(json["entry_type"]["action_type"]["action"], "other"); // Changed from task_create to other - - // Should contain formatted todo list - let content_str = json["content"].as_str().unwrap(); - assert!(content_str.contains("TODO List:")); - assert!(content_str.contains("✅ Fix bug (high)")); - assert!(content_str.contains("🔄 Add feature (medium)")); - } - - #[test] - fn test_format_opencode_content_todo_tool() { - // Test the "Todo" tool (case-sensitive, different from todowrite/todoread) - let content = r#"| Todo {"todos":[{"id":"1","content":"Review code","status":"pending","priority":"high"},{"id":"2","content":"Write tests","status":"in_progress","priority":"low"}]}"#; - - let result = format_opencode_content_as_normalized_json(content, "/tmp"); - let json: serde_json::Value = serde_json::from_str(&result).unwrap(); - - assert_eq!(json["entry_type"]["type"], "tool_use"); - assert_eq!(json["entry_type"]["tool_name"], "todowrite"); // Normalized from "Todo" - assert_eq!(json["entry_type"]["action_type"]["action"], "other"); // Changed from task_create to other - - // Should contain formatted todo list with proper emojis - let content_str = json["content"].as_str().unwrap(); - assert!(content_str.contains("TODO List:")); - assert!(content_str.contains("⏳ Review code (high)")); - assert!(content_str.contains("🔄 Write tests (low)")); - } - - #[test] - fn test_opencode_filter_noise_detection() { - use crate::executors::sst_opencode::filter::OpenCodeFilter; - - // Test noise detection - assert!(OpenCodeFilter::is_noise("")); - assert!(OpenCodeFilter::is_noise(" ")); - assert!(OpenCodeFilter::is_noise("█▀▀█ █▀▀█ Banner")); - assert!(OpenCodeFilter::is_noise("@ anthropic/claude-sonnet-4")); - assert!(OpenCodeFilter::is_noise("~ https://opencode.ai/s/abc123")); - assert!(OpenCodeFilter::is_noise("DEBUG some debug info")); - assert!(OpenCodeFilter::is_noise("INFO session info")); - assert!(OpenCodeFilter::is_noise("┌─────────────────┐")); - - // Test clean content detection (not noise) - assert!(!OpenCodeFilter::is_noise("| Read {\"file\":\"test.js\"}")); - assert!(!OpenCodeFilter::is_noise("Assistant response text")); - assert!(!OpenCodeFilter::is_noise("{\"type\":\"content\"}")); - assert!(!OpenCodeFilter::is_noise("session=abc123 started")); - assert!(!OpenCodeFilter::is_noise("Normal conversation text")); - } - - #[test] - fn test_normalize_logs_edge_cases() { - let executor = SstOpencodeExecutor::new(); - - // Empty content - let result = executor.normalize_logs("", "/tmp").unwrap(); - assert_eq!(result.entries.len(), 0); - - // Only whitespace - let result = executor.normalize_logs(" \n\t\n ", "/tmp").unwrap(); - assert_eq!(result.entries.len(), 0); - - // Malformed JSON (current implementation skips invalid JSON) - let malformed = r#"{"timestamp":"2025-01-16T18:04:00Z","content":"incomplete"#; - let result = executor.normalize_logs(malformed, "/tmp").unwrap(); - assert_eq!(result.entries.len(), 0); // Current implementation skips invalid JSON - - // Mixed valid and invalid JSON - let mixed = r#"{"timestamp":"2025-01-16T18:04:00Z","entry_type":{"type":"assistant_message"},"content":"Valid entry","metadata":null} -Invalid line that's not JSON -{"timestamp":"2025-01-16T18:04:01Z","entry_type":{"type":"assistant_message"},"content":"Another valid entry","metadata":null}"#; - let result = executor.normalize_logs(mixed, "/tmp").unwrap(); - assert_eq!(result.entries.len(), 2); // Only valid JSON entries are parsed - } - - #[test] - fn test_ansi_code_stripping() { - use crate::executors::sst_opencode::filter::OpenCodeFilter; - - // Test ANSI escape sequence removal - let ansi_text = "\x1b[31mRed text\x1b[0m normal text"; - let cleaned = OpenCodeFilter::strip_ansi_codes(ansi_text); - assert_eq!(cleaned, "Red text normal text"); - - // Test unicode escape sequences - let unicode_ansi = "Text with \\u001b[32mgreen\\u001b[0m color"; - let cleaned = OpenCodeFilter::strip_ansi_codes(unicode_ansi); - assert_eq!(cleaned, "Text with green color"); - - // Test text without ANSI codes (unchanged) - let plain_text = "Regular text without codes"; - let cleaned = OpenCodeFilter::strip_ansi_codes(plain_text); - assert_eq!(cleaned, plain_text); - } -} diff --git a/backend/src/executors/sst_opencode/filter.rs b/backend/src/executors/sst_opencode/filter.rs deleted file mode 100644 index bf900cf1..00000000 --- a/backend/src/executors/sst_opencode/filter.rs +++ /dev/null @@ -1,184 +0,0 @@ -use lazy_static::lazy_static; -use regex::Regex; - -lazy_static! { - static ref OPENCODE_LOG_REGEX: Regex = Regex::new(r"^(INFO|DEBUG|WARN|ERROR)\s+.*").unwrap(); - static ref SESSION_ID_REGEX: Regex = Regex::new(r".*\b(id|session|sessionID)=([^ ]+)").unwrap(); - static ref TOOL_USAGE_REGEX: Regex = Regex::new(r"^\|\s*([a-zA-Z]+)\s*(.*)").unwrap(); - static ref NPM_WARN_REGEX: Regex = Regex::new(r"^npm warn .*").unwrap(); -} - -/// Filter for OpenCode stderr output -pub struct OpenCodeFilter; - -impl OpenCodeFilter { - /// Check if a line should be skipped as noise - pub fn is_noise(line: &str) -> bool { - let trimmed = line.trim(); - - // Empty lines are noise - if trimmed.is_empty() { - return true; - } - - // Strip ANSI escape codes for analysis - let cleaned = Self::strip_ansi_codes(trimmed); - let cleaned_trim = cleaned.trim(); - - // Skip tool calls - they are NOT noise - if TOOL_USAGE_REGEX.is_match(cleaned_trim) { - return false; - } - - // OpenCode log lines are noise (includes session logs) - if is_opencode_log_line(cleaned_trim) { - return true; - } - - if NPM_WARN_REGEX.is_match(cleaned_trim) { - return true; - } - - // Spinner glyphs - if cleaned_trim.len() == 1 && "⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏".contains(cleaned_trim) { - return true; - } - - // Banner lines containing block glyphs (Unicode Block Elements range) - if cleaned_trim - .chars() - .any(|c| ('\u{2580}'..='\u{259F}').contains(&c)) - { - return true; - } - - // UI/stats frames using Box Drawing glyphs (U+2500-257F) - if cleaned_trim - .chars() - .any(|c| ('\u{2500}'..='\u{257F}').contains(&c)) - { - return true; - } - - // Model banner (@ with spaces) - if cleaned_trim.starts_with("@ ") { - return true; - } - - // Share link - if cleaned_trim.starts_with("~") && cleaned_trim.contains("https://opencode.ai/s/") { - return true; - } - - // Everything else (assistant messages) is NOT noise - false - } - - pub fn is_stderr(_line: &str) -> bool { - false - } - - /// Strip ANSI escape codes from text (conservative) - pub fn strip_ansi_codes(text: &str) -> String { - // Handle both unicode escape sequences and raw ANSI codes - let result = text.replace("\\u001b", "\x1b"); - - let mut cleaned = String::new(); - let mut chars = result.chars().peekable(); - - while let Some(ch) = chars.next() { - if ch == '\x1b' { - // Skip ANSI escape sequence - if chars.peek() == Some(&'[') { - chars.next(); // consume '[' - // Skip until we find a letter (end of ANSI sequence) - for next_ch in chars.by_ref() { - if next_ch.is_ascii_alphabetic() { - break; - } - } - } - } else { - cleaned.push(ch); - } - } - - cleaned - } -} - -/// Detect if a line is an OpenCode log line format using regex -pub fn is_opencode_log_line(line: &str) -> bool { - OPENCODE_LOG_REGEX.is_match(line) -} - -/// Parse session_id from OpenCode log lines -pub fn parse_session_id_from_line(line: &str) -> Option { - // Only apply to OpenCode log lines - if !is_opencode_log_line(line) { - return None; - } - - // Try regex for session ID extraction from service=session logs - if let Some(captures) = SESSION_ID_REGEX.captures(line) { - if let Some(id) = captures.get(2) { - return Some(id.as_str().to_string()); - } - } - - None -} - -/// Get the tool usage regex for parsing tool patterns -pub fn tool_usage_regex() -> &'static Regex { - &TOOL_USAGE_REGEX -} - -#[cfg(test)] -mod tests { - #[test] - fn test_session_id_extraction() { - use crate::executors::sst_opencode::filter::parse_session_id_from_line; - - // Test session ID extraction from session= format (only works on OpenCode log lines) - assert_eq!( - parse_session_id_from_line("INFO session=ses_abc123 starting"), - Some("ses_abc123".to_string()) - ); - - assert_eq!( - parse_session_id_from_line("DEBUG id=debug_id process"), - Some("debug_id".to_string()) - ); - - // Test lines without log prefix (should return None) - assert_eq!( - parse_session_id_from_line("session=simple_id chatting"), - None - ); - - // Test no session ID - assert_eq!(parse_session_id_from_line("No session here"), None); - assert_eq!(parse_session_id_from_line(""), None); - } - - #[test] - fn test_ansi_code_stripping() { - use crate::executors::sst_opencode::filter::OpenCodeFilter; - - // Test ANSI escape sequence removal - let ansi_text = "\x1b[31mRed text\x1b[0m normal text"; - let cleaned = OpenCodeFilter::strip_ansi_codes(ansi_text); - assert_eq!(cleaned, "Red text normal text"); - - // Test unicode escape sequences - let unicode_ansi = "Text with \\u001b[32mgreen\\u001b[0m color"; - let cleaned = OpenCodeFilter::strip_ansi_codes(unicode_ansi); - assert_eq!(cleaned, "Text with green color"); - - // Test text without ANSI codes (unchanged) - let plain_text = "Regular text without codes"; - let cleaned = OpenCodeFilter::strip_ansi_codes(plain_text); - assert_eq!(cleaned, plain_text); - } -} diff --git a/backend/src/executors/sst_opencode/tools.rs b/backend/src/executors/sst_opencode/tools.rs deleted file mode 100644 index 630dd319..00000000 --- a/backend/src/executors/sst_opencode/tools.rs +++ /dev/null @@ -1,166 +0,0 @@ -use serde_json::{json, Value}; - -use crate::utils::path::make_path_relative; - -/// Normalize tool names to match frontend expectations for purple box styling -pub fn normalize_tool_name(tool_name: &str) -> String { - match tool_name { - "Todo" => "todowrite".to_string(), // Generic TODO tool → todowrite - "TodoWrite" => "todowrite".to_string(), - "TodoRead" => "todoread".to_string(), - "ExitPlanMode" => "exitplanmode".to_string(), // Normalize ExitPlanMode to lowercase - _ => tool_name.to_lowercase(), // Convert all tool names to lowercase for consistency - } -} - -/// Helper function to determine action type for tool usage -pub fn determine_action_type(tool_name: &str, input: &Value, worktree_path: &str) -> Value { - match tool_name.to_lowercase().as_str() { - "read" => { - if let Some(file_path) = input.get("filePath").and_then(|p| p.as_str()) { - json!({ - "action": "file_read", - "path": make_path_relative(file_path, worktree_path) - }) - } else { - json!({"action": "other", "description": "File read operation"}) - } - } - "write" | "edit" => { - if let Some(file_path) = input.get("filePath").and_then(|p| p.as_str()) { - json!({ - "action": "file_write", - "path": make_path_relative(file_path, worktree_path) - }) - } else { - json!({"action": "other", "description": "File write operation"}) - } - } - "bash" => { - if let Some(command) = input.get("command").and_then(|c| c.as_str()) { - json!({"action": "command_run", "command": command}) - } else { - json!({"action": "other", "description": "Command execution"}) - } - } - "grep" => { - if let Some(pattern) = input.get("pattern").and_then(|p| p.as_str()) { - json!({"action": "search", "query": pattern}) - } else { - json!({"action": "other", "description": "Search operation"}) - } - } - "todowrite" | "todoread" => { - json!({"action": "other", "description": "TODO list management"}) - } - "exitplanmode" => { - // Extract the plan from the input - let plan_content = if let Some(plan) = input.get("plan").and_then(|p| p.as_str()) { - plan.to_string() - } else { - // Fallback - use the full input as plan if no specific plan field - serde_json::to_string_pretty(input).unwrap_or_default() - }; - json!({ - "action": "plan_presentation", - "plan": plan_content - }) - } - _ => json!({"action": "other", "description": format!("Tool: {}", tool_name)}), - } -} - -/// Helper function to generate concise content for tool usage -pub fn generate_tool_content(tool_name: &str, input: &Value, worktree_path: &str) -> String { - match tool_name.to_lowercase().as_str() { - "read" => { - if let Some(file_path) = input.get("filePath").and_then(|p| p.as_str()) { - format!("`{}`", make_path_relative(file_path, worktree_path)) - } else { - "Read file".to_string() - } - } - "write" | "edit" => { - if let Some(file_path) = input.get("filePath").and_then(|p| p.as_str()) { - format!("`{}`", make_path_relative(file_path, worktree_path)) - } else { - "Write file".to_string() - } - } - "bash" => { - if let Some(command) = input.get("command").and_then(|c| c.as_str()) { - format!("`{}`", command) - } else { - "Execute command".to_string() - } - } - "todowrite" | "todoread" => generate_todo_content(input), - "exitplanmode" => { - // Show the plan content or a summary - if let Some(plan) = input.get("plan").and_then(|p| p.as_str()) { - // Truncate long plans for display - if plan.len() > 100 { - format!("{}...", &plan[..97]) - } else { - plan.to_string() - } - } else { - "Plan presentation".to_string() - } - } - _ => format!("`{}`", tool_name), - } -} - -/// Generate formatted content for TODO tools -fn generate_todo_content(input: &Value) -> String { - // Extract todo list from input to show actual todos - if let Some(todos) = input.get("todos").and_then(|t| t.as_array()) { - let mut todo_items = Vec::new(); - for todo in todos { - if let Some(content) = todo.get("content").and_then(|c| c.as_str()) { - let status = todo - .get("status") - .and_then(|s| s.as_str()) - .unwrap_or("pending"); - let status_emoji = match status { - "completed" => "✅", - "in_progress" => "🔄", - "pending" | "todo" => "⏳", - _ => "📝", - }; - let priority = todo - .get("priority") - .and_then(|p| p.as_str()) - .unwrap_or("medium"); - todo_items.push(format!("{} {} ({})", status_emoji, content, priority)); - } - } - if !todo_items.is_empty() { - format!("TODO List:\n{}", todo_items.join("\n")) - } else { - "Managing TODO list".to_string() - } - } else { - "Managing TODO list".to_string() - } -} - -#[cfg(test)] -mod tests { - #[test] - fn test_normalize_tool_name() { - use crate::executors::sst_opencode::tools::normalize_tool_name; - - // Test TODO tool normalization - assert_eq!(normalize_tool_name("Todo"), "todowrite"); - assert_eq!(normalize_tool_name("TodoWrite"), "todowrite"); - assert_eq!(normalize_tool_name("TodoRead"), "todoread"); - - // Test other tools are converted to lowercase - assert_eq!(normalize_tool_name("Read"), "read"); - assert_eq!(normalize_tool_name("Write"), "write"); - assert_eq!(normalize_tool_name("bash"), "bash"); - assert_eq!(normalize_tool_name("SomeOtherTool"), "someothertool"); - } -} diff --git a/backend/src/main.rs b/backend/src/main.rs deleted file mode 100644 index 0cd77c71..00000000 --- a/backend/src/main.rs +++ /dev/null @@ -1,317 +0,0 @@ -use std::{str::FromStr, sync::Arc}; - -use axum::{ - body::Body, - http::{header, HeaderValue, StatusCode}, - middleware::from_fn_with_state, - response::{IntoResponse, Json as ResponseJson, Response}, - routing::{get, post}, - Json, Router, -}; -use sentry_tower::NewSentryLayer; -use sqlx::{sqlite::SqliteConnectOptions, SqlitePool}; -use strip_ansi_escapes::strip; -use tokio::sync::RwLock; -use tower_http::cors::CorsLayer; -use tracing_subscriber::{filter::LevelFilter, prelude::*}; -use vibe_kanban::{sentry_layer, Assets, ScriptAssets, SoundAssets}; - -mod app_state; -mod command_runner; -mod execution_monitor; -mod executor; -mod executors; -mod mcp; -mod middleware; -mod models; -mod routes; -mod services; -mod utils; - -use app_state::AppState; -use execution_monitor::execution_monitor; -use middleware::{ - load_execution_process_simple_middleware, load_project_middleware, - load_task_attempt_middleware, load_task_middleware, load_task_template_middleware, -}; -use models::{ApiResponse, Config, Environment}; -use routes::{ - auth, config, filesystem, github, health, projects, stream, task_attempts, task_templates, - tasks, -}; -use services::PrMonitorService; - -async fn echo_handler( - Json(payload): Json, -) -> ResponseJson> { - ResponseJson(ApiResponse::success(payload)) -} - -async fn static_handler(uri: axum::extract::Path) -> impl IntoResponse { - let path = uri.trim_start_matches('/'); - serve_file(path).await -} - -async fn index_handler() -> impl IntoResponse { - serve_file("index.html").await -} - -async fn serve_file(path: &str) -> impl IntoResponse { - let file = Assets::get(path); - - match file { - Some(content) => { - let mime = mime_guess::from_path(path).first_or_octet_stream(); - - Response::builder() - .status(StatusCode::OK) - .header( - header::CONTENT_TYPE, - HeaderValue::from_str(mime.as_ref()).unwrap(), - ) - .body(Body::from(content.data.into_owned())) - .unwrap() - } - None => { - // For SPA routing, serve index.html for unknown routes - if let Some(index) = Assets::get("index.html") { - Response::builder() - .status(StatusCode::OK) - .header(header::CONTENT_TYPE, HeaderValue::from_static("text/html")) - .body(Body::from(index.data.into_owned())) - .unwrap() - } else { - Response::builder() - .status(StatusCode::NOT_FOUND) - .body(Body::from("404 Not Found")) - .unwrap() - } - } - } -} - -async fn serve_sound_file( - axum::extract::Path(filename): axum::extract::Path, -) -> impl IntoResponse { - // Validate filename contains only expected sound files - let valid_sounds = [ - "abstract-sound1.wav", - "abstract-sound2.wav", - "abstract-sound3.wav", - "abstract-sound4.wav", - "cow-mooing.wav", - "phone-vibration.wav", - "rooster.wav", - ]; - - if !valid_sounds.contains(&filename.as_str()) { - return Response::builder() - .status(StatusCode::NOT_FOUND) - .body(Body::from("Sound file not found")) - .unwrap(); - } - - match SoundAssets::get(&filename) { - Some(content) => Response::builder() - .status(StatusCode::OK) - .header(header::CONTENT_TYPE, HeaderValue::from_static("audio/wav")) - .body(Body::from(content.data.into_owned())) - .unwrap(), - None => Response::builder() - .status(StatusCode::NOT_FOUND) - .body(Body::from("Sound file not found")) - .unwrap(), - } -} - -fn main() -> anyhow::Result<()> { - let environment = if cfg!(debug_assertions) { - "dev" - } else { - "production" - }; - let _guard = sentry::init(("https://1065a1d276a581316999a07d5dffee26@o4509603705192449.ingest.de.sentry.io/4509605576441937", sentry::ClientOptions { - release: sentry::release_name!(), - environment: Some(environment.into()), - attach_stacktrace: true, - ..Default::default() - })); - sentry::configure_scope(|scope| { - scope.set_tag("source", "server"); - }); - tokio::runtime::Builder::new_multi_thread() - .enable_all() - .build() - .unwrap() - .block_on(async { - tracing_subscriber::registry() - .with(tracing_subscriber::fmt::layer().with_filter(LevelFilter::INFO)) - .with(sentry_layer()) - .init(); - - // Create asset directory if it doesn't exist - if !utils::asset_dir().exists() { - std::fs::create_dir_all(utils::asset_dir())?; - } - - // Database connection - let database_url = format!( - "sqlite://{}", - utils::asset_dir().join("db.sqlite").to_string_lossy() - ); - - let options = SqliteConnectOptions::from_str(&database_url)?.create_if_missing(true); - let pool = SqlitePool::connect_with(options).await?; - sqlx::migrate!("./migrations").run(&pool).await?; - - // Load configuration - let config_path = utils::config_path(); - let config = Config::load(&config_path)?; - let config_arc = Arc::new(RwLock::new(config)); - - let env = std::env::var("ENVIRONMENT") - .unwrap_or_else(|_| "local".to_string()); - let mode = env.parse().unwrap_or(Environment::Local); - tracing::info!("Running in {mode} mode" ); - - // Create app state - let app_state = AppState::new(pool.clone(), config_arc.clone(), mode).await; - - app_state.update_sentry_scope().await; - - // Track session start event - app_state.track_analytics_event("session_start", None).await; - // Start background task to check for init status and spawn processes - let state_clone = app_state.clone(); - tokio::spawn(async move { - execution_monitor(state_clone).await; - }); - - // Start PR monitoring service - let pr_monitor = PrMonitorService::new(pool.clone()); - let config_for_monitor = config_arc.clone(); - - tokio::spawn(async move { - pr_monitor.start_with_config(config_for_monitor).await; - }); - - // Public routes (no auth required) - let public_routes = Router::new() - .route("/api/health", get(health::health_check)) - .route("/api/echo", post(echo_handler)); - - // Create routers with different middleware layers - let base_routes = Router::new() - .merge(stream::stream_router()) - .merge(filesystem::filesystem_router()) - .merge(config::config_router()) - .merge(auth::auth_router()) - .route("/sounds/:filename", get(serve_sound_file)) - .merge( - Router::new() - .route("/execution-processes/:process_id", get(task_attempts::get_execution_process)) - .route_layer(from_fn_with_state(app_state.clone(), load_execution_process_simple_middleware)) - ); - - // Template routes with task template middleware applied selectively - let template_routes = Router::new() - .route("/templates", get(task_templates::list_templates).post(task_templates::create_template)) - .route("/templates/global", get(task_templates::list_global_templates)) - .route( - "/projects/:project_id/templates", - get(task_templates::list_project_templates), - ) - .merge( - Router::new() - .route( - "/templates/:template_id", - get(task_templates::get_template) - .put(task_templates::update_template) - .delete(task_templates::delete_template), - ) - .route_layer(from_fn_with_state(app_state.clone(), load_task_template_middleware)) - ); - - // Project routes with project middleware - let project_routes = Router::new() - .merge(projects::projects_base_router()) - .merge(projects::projects_with_id_router() - .layer(from_fn_with_state(app_state.clone(), load_project_middleware))); - - // Task routes with appropriate middleware - let task_routes = Router::new() - .merge(tasks::tasks_project_router() - .layer(from_fn_with_state(app_state.clone(), load_project_middleware))) - .merge(tasks::tasks_with_id_router() - .layer(from_fn_with_state(app_state.clone(), load_task_middleware))); - - // Task attempt routes with appropriate middleware - let task_attempt_routes = Router::new() - .merge(task_attempts::task_attempts_list_router(app_state.clone()) - .layer(from_fn_with_state(app_state.clone(), load_task_middleware))) - .merge(task_attempts::task_attempts_with_id_router(app_state.clone()) - .layer(from_fn_with_state(app_state.clone(), load_task_attempt_middleware))); - - // Conditionally add GitHub routes for cloud mode - let mut api_routes = Router::new() - .merge(base_routes) - .merge(template_routes) - .merge(project_routes) - .merge(task_routes) - .merge(task_attempt_routes); - - if mode.is_cloud() { - api_routes = api_routes.merge(github::github_router()); - tracing::info!("GitHub repository routes enabled (cloud mode)"); - } - - // All routes (no auth required) - let app_routes = Router::new() - .nest( - "/api", - api_routes - .layer(from_fn_with_state(app_state.clone(), auth::sentry_user_context_middleware)), - ); - - let app = Router::new() - .merge(public_routes) - .merge(app_routes) - // Static file serving routes - .route("/", get(index_handler)) - .route("/*path", get(static_handler)) - .with_state(app_state) - .layer(CorsLayer::permissive()) - .layer(NewSentryLayer::new_from_top()); - - let port = std::env::var("BACKEND_PORT") - .or_else(|_| std::env::var("PORT")) - .ok() - .and_then(|s| { - // remove any ANSI codes, then turn into String - let cleaned = String::from_utf8(strip(s.as_bytes())) - .expect("UTF-8 after stripping ANSI"); - cleaned.trim().parse::().ok() - }) - .unwrap_or_else(|| { - tracing::info!("No PORT environment variable set, using port 0 for auto-assignment"); - 0 - }); // Use 0 to find free port if no specific port provided - - let host = std::env::var("HOST").unwrap_or_else(|_| "127.0.0.1".to_string()); - let listener = tokio::net::TcpListener::bind(format!("{host}:{port}")).await?; - let actual_port = listener.local_addr()?.port(); // get → 53427 (example) - - tracing::info!("Server running on http://{host}:{actual_port}"); - - if !cfg!(debug_assertions) { - tracing::info!("Opening browser..."); - if let Err(e) = utils::open_browser(&format!("http://127.0.0.1:{actual_port}")).await { - tracing::warn!("Failed to open browser automatically: {}. Please open http://127.0.0.1:{} manually.", e, actual_port); - } - } - - axum::serve(listener, app).await?; - - Ok(()) - }) -} diff --git a/backend/src/middleware/model_loaders.rs b/backend/src/middleware/model_loaders.rs deleted file mode 100644 index 6b202e9c..00000000 --- a/backend/src/middleware/model_loaders.rs +++ /dev/null @@ -1,242 +0,0 @@ -use axum::{ - extract::{Path, State}, - http::StatusCode, - middleware::Next, - response::Response, -}; -use uuid::Uuid; - -use crate::{ - app_state::AppState, - models::{ - execution_process::ExecutionProcess, project::Project, task::Task, - task_attempt::TaskAttempt, task_template::TaskTemplate, - }, -}; - -/// Middleware that loads and injects a Project based on the project_id path parameter -pub async fn load_project_middleware( - State(app_state): State, - Path(project_id): Path, - request: axum::extract::Request, - next: Next, -) -> Result { - // Load the project from the database - let project = match Project::find_by_id(&app_state.db_pool, project_id).await { - Ok(Some(project)) => project, - Ok(None) => { - tracing::warn!("Project {} not found", project_id); - return Err(StatusCode::NOT_FOUND); - } - Err(e) => { - tracing::error!("Failed to fetch project {}: {}", project_id, e); - return Err(StatusCode::INTERNAL_SERVER_ERROR); - } - }; - - // Insert the project as an extension - let mut request = request; - request.extensions_mut().insert(project); - - // Continue with the next middleware/handler - Ok(next.run(request).await) -} - -/// Middleware that loads and injects both Project and Task based on project_id and task_id path parameters -pub async fn load_task_middleware( - State(app_state): State, - Path((project_id, task_id)): Path<(Uuid, Uuid)>, - request: axum::extract::Request, - next: Next, -) -> Result { - // Load the project first - let project = match Project::find_by_id(&app_state.db_pool, project_id).await { - Ok(Some(project)) => project, - Ok(None) => { - tracing::warn!("Project {} not found", project_id); - return Err(StatusCode::NOT_FOUND); - } - Err(e) => { - tracing::error!("Failed to fetch project {}: {}", project_id, e); - return Err(StatusCode::INTERNAL_SERVER_ERROR); - } - }; - - // Load the task and validate it belongs to the project - let task = match Task::find_by_id_and_project_id(&app_state.db_pool, task_id, project_id).await - { - Ok(Some(task)) => task, - Ok(None) => { - tracing::warn!("Task {} not found in project {}", task_id, project_id); - return Err(StatusCode::NOT_FOUND); - } - Err(e) => { - tracing::error!( - "Failed to fetch task {} in project {}: {}", - task_id, - project_id, - e - ); - return Err(StatusCode::INTERNAL_SERVER_ERROR); - } - }; - - // Insert both models as extensions - let mut request = request; - request.extensions_mut().insert(project); - request.extensions_mut().insert(task); - - // Continue with the next middleware/handler - Ok(next.run(request).await) -} - -/// Middleware that loads and injects Project, Task, and TaskAttempt based on project_id, task_id, and attempt_id path parameters -pub async fn load_task_attempt_middleware( - State(app_state): State, - Path((project_id, task_id, attempt_id)): Path<(Uuid, Uuid, Uuid)>, - request: axum::extract::Request, - next: Next, -) -> Result { - // Load the full context in one call using the existing method - let context = match TaskAttempt::load_context( - &app_state.db_pool, - attempt_id, - task_id, - project_id, - ) - .await - { - Ok(context) => context, - Err(e) => { - tracing::error!( - "Failed to load context for attempt {} in task {} in project {}: {}", - attempt_id, - task_id, - project_id, - e - ); - return Err(StatusCode::NOT_FOUND); - } - }; - - // Insert all models as extensions - let mut request = request; - request.extensions_mut().insert(context.project); - request.extensions_mut().insert(context.task); - request.extensions_mut().insert(context.task_attempt); - - // Continue with the next middleware/handler - Ok(next.run(request).await) -} - -/// Simple middleware that loads and injects ExecutionProcess based on the process_id path parameter -/// without any additional validation -pub async fn load_execution_process_simple_middleware( - State(app_state): State, - Path(process_id): Path, - mut request: axum::extract::Request, - next: Next, -) -> Result { - // Load the execution process from the database - let execution_process = match ExecutionProcess::find_by_id(&app_state.db_pool, process_id).await - { - Ok(Some(process)) => process, - Ok(None) => { - tracing::warn!("ExecutionProcess {} not found", process_id); - return Err(StatusCode::NOT_FOUND); - } - Err(e) => { - tracing::error!("Failed to fetch execution process {}: {}", process_id, e); - return Err(StatusCode::INTERNAL_SERVER_ERROR); - } - }; - - // Inject the execution process into the request - request.extensions_mut().insert(execution_process); - - // Continue to the next middleware/handler - Ok(next.run(request).await) -} - -/// Middleware that loads and injects Project, Task, TaskAttempt, and ExecutionProcess -/// based on the path parameters: project_id, task_id, attempt_id, process_id -pub async fn load_execution_process_with_context_middleware( - State(app_state): State, - Path((project_id, task_id, attempt_id, process_id)): Path<(Uuid, Uuid, Uuid, Uuid)>, - request: axum::extract::Request, - next: Next, -) -> Result { - // Load the task attempt context first - let context = match TaskAttempt::load_context( - &app_state.db_pool, - attempt_id, - task_id, - project_id, - ) - .await - { - Ok(context) => context, - Err(e) => { - tracing::error!( - "Failed to load context for attempt {} in task {} in project {}: {}", - attempt_id, - task_id, - project_id, - e - ); - return Err(StatusCode::NOT_FOUND); - } - }; - - // Load the execution process - let execution_process = match ExecutionProcess::find_by_id(&app_state.db_pool, process_id).await - { - Ok(Some(process)) => process, - Ok(None) => { - tracing::warn!("ExecutionProcess {} not found", process_id); - return Err(StatusCode::NOT_FOUND); - } - Err(e) => { - tracing::error!("Failed to fetch execution process {}: {}", process_id, e); - return Err(StatusCode::INTERNAL_SERVER_ERROR); - } - }; - - // Insert all models as extensions - let mut request = request; - request.extensions_mut().insert(context.project); - request.extensions_mut().insert(context.task); - request.extensions_mut().insert(context.task_attempt); - request.extensions_mut().insert(execution_process); - - // Continue with the next middleware/handler - Ok(next.run(request).await) -} - -/// Middleware that loads and injects TaskTemplate based on the template_id path parameter -pub async fn load_task_template_middleware( - State(app_state): State, - Path(template_id): Path, - request: axum::extract::Request, - next: Next, -) -> Result { - // Load the task template from the database - let task_template = match TaskTemplate::find_by_id(&app_state.db_pool, template_id).await { - Ok(Some(template)) => template, - Ok(None) => { - tracing::warn!("TaskTemplate {} not found", template_id); - return Err(StatusCode::NOT_FOUND); - } - Err(e) => { - tracing::error!("Failed to fetch task template {}: {}", template_id, e); - return Err(StatusCode::INTERNAL_SERVER_ERROR); - } - }; - - // Insert the task template as an extension - let mut request = request; - request.extensions_mut().insert(task_template); - - // Continue with the next middleware/handler - Ok(next.run(request).await) -} diff --git a/backend/src/models/api_response.rs b/backend/src/models/api_response.rs deleted file mode 100644 index 00cfd3be..00000000 --- a/backend/src/models/api_response.rs +++ /dev/null @@ -1,35 +0,0 @@ -mod response { - use serde::Serialize; - use ts_rs::TS; - - #[derive(Debug, Serialize, TS)] - #[ts(export)] - pub struct ApiResponse { - success: bool, - data: Option, - message: Option, - } - - impl ApiResponse { - /// Creates a successful response, with `data` and no message. - pub fn success(data: T) -> Self { - ApiResponse { - success: true, - data: Some(data), - message: None, - } - } - - /// Creates an error response, with `message` and no data. - pub fn error(message: &str) -> Self { - ApiResponse { - success: false, - data: None, - message: Some(message.to_string()), - } - } - } -} - -// Re-export the type, but its fields remain private -pub use response::ApiResponse; diff --git a/backend/src/models/config.rs b/backend/src/models/config.rs deleted file mode 100644 index a34b68e7..00000000 --- a/backend/src/models/config.rs +++ /dev/null @@ -1,433 +0,0 @@ -use std::{path::PathBuf, str::FromStr}; - -use serde::{Deserialize, Serialize}; -use ts_rs::TS; - -use crate::executor::ExecutorConfig; - -#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, TS)] -#[ts(export)] -#[serde(rename_all = "lowercase")] -pub enum Environment { - Local, - Cloud, -} - -impl FromStr for Environment { - type Err = String; - - fn from_str(s: &str) -> Result { - match s.to_lowercase().as_str() { - "local" => Ok(Environment::Local), - "cloud" => Ok(Environment::Cloud), - _ => Err(format!("Invalid environment: {}", s)), - } - } -} - -impl Environment { - pub fn is_cloud(&self) -> bool { - matches!(self, Environment::Cloud) - } - - pub fn is_local(&self) -> bool { - matches!(self, Environment::Local) - } -} - -impl std::fmt::Display for Environment { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - Environment::Local => write!(f, "local"), - Environment::Cloud => write!(f, "cloud"), - } - } -} - -#[derive(Debug, Clone, Serialize, Deserialize, TS)] -#[ts(export)] -pub struct EnvironmentInfo { - pub os_type: String, - pub os_version: String, - pub architecture: String, - pub bitness: String, -} - -#[derive(Debug, Clone, Serialize, Deserialize, TS)] -#[ts(export)] -pub struct Config { - pub theme: ThemeMode, - pub executor: ExecutorConfig, - pub disclaimer_acknowledged: bool, - pub onboarding_acknowledged: bool, - pub github_login_acknowledged: bool, - pub telemetry_acknowledged: bool, - pub sound_alerts: bool, - pub sound_file: SoundFile, - pub push_notifications: bool, - pub editor: EditorConfig, - pub github: GitHubConfig, - pub analytics_enabled: Option, - pub environment: EnvironmentInfo, - pub workspace_dir: Option, -} - -#[derive(Debug, Clone, Serialize, Deserialize, TS)] -#[ts(export)] -#[serde(rename_all = "lowercase")] -pub enum ThemeMode { - Light, - Dark, - System, - Purple, - Green, - Blue, - Orange, - Red, -} - -#[derive(Debug, Clone, Serialize, Deserialize, TS)] -#[ts(export)] -pub struct EditorConfig { - pub editor_type: EditorType, - pub custom_command: Option, -} - -#[derive(Debug, Clone, Serialize, Deserialize, TS)] -#[ts(export)] -pub struct GitHubConfig { - pub pat: Option, - pub token: Option, - pub username: Option, - pub primary_email: Option, - pub default_pr_base: Option, -} - -#[derive(Debug, Clone, Serialize, Deserialize, TS)] -#[ts(export)] -#[serde(rename_all = "lowercase")] -pub enum EditorType { - VSCode, - Cursor, - Windsurf, - IntelliJ, - Zed, - Custom, -} - -#[derive(Debug, Clone, Serialize, Deserialize, TS)] -#[ts(export)] -#[serde(rename_all = "kebab-case")] -pub enum SoundFile { - AbstractSound1, - AbstractSound2, - AbstractSound3, - AbstractSound4, - CowMooing, - PhoneVibration, - Rooster, -} - -// Constants for frontend -#[derive(Debug, Clone, Serialize, Deserialize, TS)] -#[ts(export)] -pub struct EditorConstants { - pub editor_types: Vec, - pub editor_labels: Vec, -} - -#[derive(Debug, Clone, Serialize, Deserialize, TS)] -#[ts(export)] -pub struct SoundConstants { - pub sound_files: Vec, - pub sound_labels: Vec, -} - -impl EditorConstants { - pub fn new() -> Self { - Self { - editor_types: vec![ - EditorType::VSCode, - EditorType::Cursor, - EditorType::Windsurf, - EditorType::IntelliJ, - EditorType::Zed, - EditorType::Custom, - ], - editor_labels: vec![ - "VS Code".to_string(), - "Cursor".to_string(), - "Windsurf".to_string(), - "IntelliJ IDEA".to_string(), - "Zed".to_string(), - "Custom".to_string(), - ], - } - } -} - -impl Default for EditorConstants { - fn default() -> Self { - Self::new() - } -} - -impl SoundConstants { - pub fn new() -> Self { - Self { - sound_files: vec![ - SoundFile::AbstractSound1, - SoundFile::AbstractSound2, - SoundFile::AbstractSound3, - SoundFile::AbstractSound4, - SoundFile::CowMooing, - SoundFile::PhoneVibration, - SoundFile::Rooster, - ], - sound_labels: vec![ - "Gentle Chime".to_string(), - "Soft Bell".to_string(), - "Digital Tone".to_string(), - "Subtle Alert".to_string(), - "Cow Mooing".to_string(), - "Phone Vibration".to_string(), - "Rooster Call".to_string(), - ], - } - } -} - -impl Default for SoundConstants { - fn default() -> Self { - Self::new() - } -} - -impl Default for Config { - fn default() -> Self { - let info = os_info::get(); - - Self { - theme: ThemeMode::System, - executor: ExecutorConfig::Claude, - disclaimer_acknowledged: false, - onboarding_acknowledged: false, - github_login_acknowledged: false, - telemetry_acknowledged: false, - sound_alerts: true, - sound_file: SoundFile::AbstractSound4, - push_notifications: true, - editor: EditorConfig::default(), - github: GitHubConfig::default(), - analytics_enabled: None, - environment: EnvironmentInfo { - os_type: info.os_type().to_string(), - os_version: info.version().to_string(), - architecture: info.architecture().unwrap_or("unknown").to_string(), - bitness: info.bitness().to_string(), - }, - workspace_dir: None, - } - } -} - -impl Default for EditorConfig { - fn default() -> Self { - Self { - editor_type: EditorType::VSCode, - custom_command: None, - } - } -} - -impl Default for GitHubConfig { - fn default() -> Self { - Self { - pat: None, - token: None, - username: None, - primary_email: None, - default_pr_base: Some("main".to_string()), - } - } -} - -impl EditorConfig { - pub fn get_command(&self) -> Vec { - match &self.editor_type { - EditorType::VSCode => vec!["code".to_string()], - EditorType::Cursor => vec!["cursor".to_string()], - EditorType::Windsurf => vec!["windsurf".to_string()], - EditorType::IntelliJ => vec!["idea".to_string()], - EditorType::Zed => vec!["zed".to_string()], - EditorType::Custom => { - if let Some(custom) = &self.custom_command { - custom.split_whitespace().map(|s| s.to_string()).collect() - } else { - vec!["code".to_string()] // fallback to VSCode - } - } - } - } -} - -impl SoundFile { - pub fn to_filename(&self) -> &'static str { - match self { - SoundFile::AbstractSound1 => "abstract-sound1.wav", - SoundFile::AbstractSound2 => "abstract-sound2.wav", - SoundFile::AbstractSound3 => "abstract-sound3.wav", - SoundFile::AbstractSound4 => "abstract-sound4.wav", - SoundFile::CowMooing => "cow-mooing.wav", - SoundFile::PhoneVibration => "phone-vibration.wav", - SoundFile::Rooster => "rooster.wav", - } - } - - /// Get or create a cached sound file with the embedded sound data - pub async fn get_path(&self) -> Result> { - use std::io::Write; - - let filename = self.to_filename(); - let cache_dir = crate::utils::cache_dir(); - let cached_path = cache_dir.join(format!("sound-{}", filename)); - - // Check if cached file already exists and is valid - if cached_path.exists() { - // Verify file has content (basic validation) - if let Ok(metadata) = std::fs::metadata(&cached_path) { - if metadata.len() > 0 { - return Ok(cached_path); - } - } - } - - // File doesn't exist or is invalid, create it - let sound_data = crate::SoundAssets::get(filename) - .ok_or_else(|| format!("Embedded sound file not found: {}", filename))? - .data; - - // Ensure cache directory exists - std::fs::create_dir_all(&cache_dir) - .map_err(|e| format!("Failed to create cache directory: {}", e))?; - - let mut file = std::fs::File::create(&cached_path) - .map_err(|e| format!("Failed to create cached sound file: {}", e))?; - - file.write_all(&sound_data) - .map_err(|e| format!("Failed to write sound data to cached file: {}", e))?; - - drop(file); // Ensure file is closed - - Ok(cached_path) - } -} - -impl Config { - pub fn load(config_path: &PathBuf) -> anyhow::Result { - if config_path.exists() { - let content = std::fs::read_to_string(config_path)?; - - // Try to deserialize as is first - match serde_json::from_str::(&content) { - Ok(mut config) => { - if config.analytics_enabled.is_none() { - config.analytics_enabled = Some(true); - } - - // Always save back to ensure new fields are written to disk - config.save(config_path)?; - Ok(config) - } - Err(_) => { - // If full deserialization fails, try to merge with defaults - match Self::load_with_defaults(&content, config_path) { - Ok(config) => Ok(config), - Err(_) => { - // Even partial loading failed - backup the corrupted file - if let Err(e) = Self::backup_corrupted_config(config_path) { - tracing::error!("Failed to backup corrupted config: {}", e); - } - - // Remove corrupted file and create a default config - if let Err(e) = std::fs::remove_file(config_path) { - tracing::error!("Failed to remove corrupted config file: {}", e); - } - - // Create and save default config - let config = Config::default(); - config.save(config_path)?; - Ok(config) - } - } - } - } - } else { - let config = Config::default(); - config.save(config_path)?; - Ok(config) - } - } - - fn load_with_defaults(content: &str, config_path: &PathBuf) -> anyhow::Result { - // Parse as generic JSON value - let existing_value: serde_json::Value = serde_json::from_str(content)?; - - // Get default config as JSON value - let default_config = Config::default(); - let default_value = serde_json::to_value(&default_config)?; - - // Merge existing config with defaults - let merged_value = Self::merge_json_values(default_value, existing_value); - - // Deserialize merged value back to Config - let config: Config = serde_json::from_value(merged_value)?; - - // Save the updated config with any missing defaults - config.save(config_path)?; - - Ok(config) - } - - fn merge_json_values( - mut base: serde_json::Value, - overlay: serde_json::Value, - ) -> serde_json::Value { - match (&mut base, overlay) { - (serde_json::Value::Object(base_map), serde_json::Value::Object(overlay_map)) => { - for (key, value) in overlay_map { - base_map - .entry(key) - .and_modify(|base_value| { - *base_value = - Self::merge_json_values(base_value.clone(), value.clone()); - }) - .or_insert(value); - } - base - } - (_, overlay) => overlay, // Use overlay value for non-objects - } - } - - /// Create a backup of the corrupted config file - fn backup_corrupted_config(config_path: &PathBuf) -> anyhow::Result<()> { - let timestamp = chrono::Utc::now().format("%Y%m%d_%H%M%S"); - let backup_filename = format!("config_backup_{}.json", timestamp); - - let backup_path = config_path - .parent() - .unwrap_or_else(|| std::path::Path::new(".")) - .join(backup_filename); - - std::fs::copy(config_path, &backup_path)?; - tracing::info!("Corrupted config backed up to: {}", backup_path.display()); - Ok(()) - } - - pub fn save(&self, config_path: &PathBuf) -> anyhow::Result<()> { - let content = serde_json::to_string_pretty(self)?; - std::fs::write(config_path, content)?; - Ok(()) - } -} diff --git a/backend/src/models/project.rs b/backend/src/models/project.rs deleted file mode 100644 index 23752211..00000000 --- a/backend/src/models/project.rs +++ /dev/null @@ -1,362 +0,0 @@ -use chrono::{DateTime, Utc}; -use git2::{BranchType, Repository}; -use serde::{Deserialize, Serialize}; -use sqlx::{FromRow, SqlitePool}; -use ts_rs::TS; -use uuid::Uuid; - -#[derive(Debug, Clone, FromRow, Serialize, Deserialize, TS)] -#[ts(export)] -pub struct Project { - pub id: Uuid, - pub name: String, - pub git_repo_path: String, - pub setup_script: Option, - pub dev_script: Option, - pub cleanup_script: Option, - - #[ts(type = "Date")] - pub created_at: DateTime, - #[ts(type = "Date")] - pub updated_at: DateTime, -} - -#[derive(Debug, Deserialize, TS)] -#[ts(export)] -pub struct CreateProject { - pub name: String, - pub git_repo_path: String, - pub use_existing_repo: bool, - pub setup_script: Option, - pub dev_script: Option, - pub cleanup_script: Option, -} - -#[derive(Debug, Deserialize, TS)] -#[ts(export)] -pub struct UpdateProject { - pub name: Option, - pub git_repo_path: Option, - pub setup_script: Option, - pub dev_script: Option, - pub cleanup_script: Option, -} - -#[derive(Debug, Deserialize, TS)] -#[ts(export)] -pub struct CreateProjectFromGitHub { - pub repository_id: i64, - pub name: String, - pub clone_url: String, - pub setup_script: Option, - pub dev_script: Option, - pub cleanup_script: Option, -} - -#[derive(Debug, Serialize, TS)] -#[ts(export)] -pub struct ProjectWithBranch { - pub id: Uuid, - pub name: String, - pub git_repo_path: String, - pub setup_script: Option, - pub dev_script: Option, - pub cleanup_script: Option, - pub current_branch: Option, - - #[ts(type = "Date")] - pub created_at: DateTime, - #[ts(type = "Date")] - pub updated_at: DateTime, -} - -#[derive(Debug, Serialize, TS)] -#[ts(export)] -pub struct SearchResult { - pub path: String, - pub is_file: bool, - pub match_type: SearchMatchType, -} - -#[derive(Debug, Serialize, TS)] -#[ts(export)] -pub enum SearchMatchType { - FileName, - DirectoryName, - FullPath, -} - -#[derive(Debug, Serialize, TS)] -#[ts(export)] -pub struct GitBranch { - pub name: String, - pub is_current: bool, - pub is_remote: bool, - #[ts(type = "Date")] - pub last_commit_date: DateTime, -} - -#[derive(Debug, Deserialize, TS)] -#[ts(export)] -pub struct CreateBranch { - pub name: String, - pub base_branch: Option, -} - -impl Project { - pub async fn find_all(pool: &SqlitePool) -> Result, sqlx::Error> { - sqlx::query_as!( - Project, - r#"SELECT id as "id!: Uuid", name, git_repo_path, setup_script, dev_script, cleanup_script, created_at as "created_at!: DateTime", updated_at as "updated_at!: DateTime" FROM projects ORDER BY created_at DESC"# - ) - .fetch_all(pool) - .await - } - - pub async fn find_by_id(pool: &SqlitePool, id: Uuid) -> Result, sqlx::Error> { - sqlx::query_as!( - Project, - r#"SELECT id as "id!: Uuid", name, git_repo_path, setup_script, dev_script, cleanup_script, created_at as "created_at!: DateTime", updated_at as "updated_at!: DateTime" FROM projects WHERE id = $1"#, - id - ) - .fetch_optional(pool) - .await - } - - pub async fn find_by_git_repo_path( - pool: &SqlitePool, - git_repo_path: &str, - ) -> Result, sqlx::Error> { - sqlx::query_as!( - Project, - r#"SELECT id as "id!: Uuid", name, git_repo_path, setup_script, dev_script, cleanup_script, created_at as "created_at!: DateTime", updated_at as "updated_at!: DateTime" FROM projects WHERE git_repo_path = $1"#, - git_repo_path - ) - .fetch_optional(pool) - .await - } - - pub async fn find_by_git_repo_path_excluding_id( - pool: &SqlitePool, - git_repo_path: &str, - exclude_id: Uuid, - ) -> Result, sqlx::Error> { - sqlx::query_as!( - Project, - r#"SELECT id as "id!: Uuid", name, git_repo_path, setup_script, dev_script, cleanup_script, created_at as "created_at!: DateTime", updated_at as "updated_at!: DateTime" FROM projects WHERE git_repo_path = $1 AND id != $2"#, - git_repo_path, - exclude_id - ) - .fetch_optional(pool) - .await - } - - pub async fn create( - pool: &SqlitePool, - data: &CreateProject, - project_id: Uuid, - ) -> Result { - sqlx::query_as!( - Project, - r#"INSERT INTO projects (id, name, git_repo_path, setup_script, dev_script, cleanup_script) VALUES ($1, $2, $3, $4, $5, $6) RETURNING id as "id!: Uuid", name, git_repo_path, setup_script, dev_script, cleanup_script, created_at as "created_at!: DateTime", updated_at as "updated_at!: DateTime""#, - project_id, - data.name, - data.git_repo_path, - data.setup_script, - data.dev_script, - data.cleanup_script - ) - .fetch_one(pool) - .await - } - - pub async fn update( - pool: &SqlitePool, - id: Uuid, - name: String, - git_repo_path: String, - setup_script: Option, - dev_script: Option, - cleanup_script: Option, - ) -> Result { - sqlx::query_as!( - Project, - r#"UPDATE projects SET name = $2, git_repo_path = $3, setup_script = $4, dev_script = $5, cleanup_script = $6 WHERE id = $1 RETURNING id as "id!: Uuid", name, git_repo_path, setup_script, dev_script, cleanup_script, created_at as "created_at!: DateTime", updated_at as "updated_at!: DateTime""#, - id, - name, - git_repo_path, - setup_script, - dev_script, - cleanup_script - ) - .fetch_one(pool) - .await - } - - pub async fn delete(pool: &SqlitePool, id: Uuid) -> Result { - let result = sqlx::query!("DELETE FROM projects WHERE id = $1", id) - .execute(pool) - .await?; - Ok(result.rows_affected()) - } - - pub async fn exists(pool: &SqlitePool, id: Uuid) -> Result { - let result = sqlx::query!( - r#" - SELECT COUNT(*) as "count!: i64" - FROM projects - WHERE id = $1 - "#, - id - ) - .fetch_one(pool) - .await?; - - Ok(result.count > 0) - } - - pub fn get_current_branch(&self) -> Result { - let repo = Repository::open(&self.git_repo_path)?; - let head = repo.head()?; - - if let Some(branch_name) = head.shorthand() { - Ok(branch_name.to_string()) - } else { - Ok("HEAD".to_string()) - } - } - - pub fn with_branch_info(self) -> ProjectWithBranch { - let current_branch = self.get_current_branch().ok(); - - ProjectWithBranch { - id: self.id, - name: self.name, - git_repo_path: self.git_repo_path, - setup_script: self.setup_script, - dev_script: self.dev_script, - cleanup_script: self.cleanup_script, - current_branch, - created_at: self.created_at, - updated_at: self.updated_at, - } - } - - pub fn get_all_branches(&self) -> Result, git2::Error> { - let repo = Repository::open(&self.git_repo_path)?; - let current_branch = self.get_current_branch().unwrap_or_default(); - let mut branches = Vec::new(); - - // Helper function to get last commit date for a branch - let get_last_commit_date = |branch: &git2::Branch| -> Result, git2::Error> { - if let Some(target) = branch.get().target() { - if let Ok(commit) = repo.find_commit(target) { - let timestamp = commit.time().seconds(); - return Ok(DateTime::from_timestamp(timestamp, 0).unwrap_or_else(Utc::now)); - } - } - Ok(Utc::now()) // Default to now if we can't get the commit date - }; - - // Get local branches - let local_branches = repo.branches(Some(BranchType::Local))?; - for branch_result in local_branches { - let (branch, _) = branch_result?; - if let Some(name) = branch.name()? { - let last_commit_date = get_last_commit_date(&branch)?; - branches.push(GitBranch { - name: name.to_string(), - is_current: name == current_branch, - is_remote: false, - last_commit_date, - }); - } - } - - // Get remote branches - let remote_branches = repo.branches(Some(BranchType::Remote))?; - for branch_result in remote_branches { - let (branch, _) = branch_result?; - if let Some(name) = branch.name()? { - // Skip remote HEAD references - if !name.ends_with("/HEAD") { - let last_commit_date = get_last_commit_date(&branch)?; - branches.push(GitBranch { - name: name.to_string(), - is_current: false, - is_remote: true, - last_commit_date, - }); - } - } - } - - // Sort branches: current first, then by most recent commit date - branches.sort_by(|a, b| { - if a.is_current && !b.is_current { - std::cmp::Ordering::Less - } else if !a.is_current && b.is_current { - std::cmp::Ordering::Greater - } else { - // Sort by most recent commit date (newest first) - b.last_commit_date.cmp(&a.last_commit_date) - } - }); - - Ok(branches) - } - - pub fn create_branch( - &self, - branch_name: &str, - base_branch: Option<&str>, - ) -> Result { - let repo = Repository::open(&self.git_repo_path)?; - - // Get the base branch reference - default to current branch if not specified - let base_branch_name = match base_branch { - Some(name) => name.to_string(), - None => self - .get_current_branch() - .unwrap_or_else(|_| "HEAD".to_string()), - }; - - // Find the base commit - let base_commit = if base_branch_name == "HEAD" { - repo.head()?.peel_to_commit()? - } else { - // Try to find the branch as local first, then remote - let base_ref = if let Ok(local_ref) = - repo.find_reference(&format!("refs/heads/{}", base_branch_name)) - { - local_ref - } else if let Ok(remote_ref) = - repo.find_reference(&format!("refs/remotes/{}", base_branch_name)) - { - remote_ref - } else { - return Err(git2::Error::from_str(&format!( - "Base branch '{}' not found", - base_branch_name - ))); - }; - base_ref.peel_to_commit()? - }; - - // Create the new branch - let _new_branch = repo.branch(branch_name, &base_commit, false)?; - - // Get the commit date for the new branch (same as base commit) - let last_commit_date = { - let timestamp = base_commit.time().seconds(); - DateTime::from_timestamp(timestamp, 0).unwrap_or_else(Utc::now) - }; - - Ok(GitBranch { - name: branch_name.to_string(), - is_current: false, - is_remote: false, - last_commit_date, - }) - } -} diff --git a/backend/src/models/task_attempt.rs b/backend/src/models/task_attempt.rs deleted file mode 100644 index f7234b88..00000000 --- a/backend/src/models/task_attempt.rs +++ /dev/null @@ -1,1213 +0,0 @@ -use std::path::Path; - -use chrono::{DateTime, Utc}; -use git2::{BranchType, Error as GitError, Repository}; -use serde::{Deserialize, Serialize}; -use sqlx::{FromRow, SqlitePool, Type}; -use tracing::info; -use ts_rs::TS; -use uuid::Uuid; - -use super::{project::Project, task::Task}; -use crate::services::{ - CreatePrRequest, GitHubRepoInfo, GitHubService, GitHubServiceError, GitService, - GitServiceError, ProcessService, -}; - -// Constants for git diff operations -const GIT_DIFF_CONTEXT_LINES: u32 = 3; -const GIT_DIFF_INTERHUNK_LINES: u32 = 0; - -#[derive(Debug)] -pub enum TaskAttemptError { - Database(sqlx::Error), - Git(GitError), - GitService(GitServiceError), - GitHubService(GitHubServiceError), - TaskNotFound, - ProjectNotFound, - ValidationError(String), - BranchNotFound(String), -} - -impl std::fmt::Display for TaskAttemptError { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - TaskAttemptError::Database(e) => write!(f, "Database error: {}", e), - TaskAttemptError::Git(e) => write!(f, "Git error: {}", e), - TaskAttemptError::GitService(e) => write!(f, "Git service error: {}", e), - TaskAttemptError::GitHubService(e) => write!(f, "GitHub service error: {}", e), - TaskAttemptError::TaskNotFound => write!(f, "Task not found"), - TaskAttemptError::ProjectNotFound => write!(f, "Project not found"), - TaskAttemptError::ValidationError(e) => write!(f, "Validation error: {}", e), - TaskAttemptError::BranchNotFound(branch) => write!(f, "Branch '{}' not found", branch), - } - } -} - -impl std::error::Error for TaskAttemptError {} - -impl From for TaskAttemptError { - fn from(err: sqlx::Error) -> Self { - TaskAttemptError::Database(err) - } -} - -impl From for TaskAttemptError { - fn from(err: GitError) -> Self { - TaskAttemptError::Git(err) - } -} - -impl From for TaskAttemptError { - fn from(err: GitServiceError) -> Self { - TaskAttemptError::GitService(err) - } -} - -impl From for TaskAttemptError { - fn from(err: GitHubServiceError) -> Self { - TaskAttemptError::GitHubService(err) - } -} - -#[derive(Debug, Clone, Type, Serialize, Deserialize, PartialEq, TS)] -#[sqlx(type_name = "task_attempt_status", rename_all = "lowercase")] -#[serde(rename_all = "lowercase")] -#[ts(export)] -pub enum TaskAttemptStatus { - SetupRunning, - SetupComplete, - SetupFailed, - ExecutorRunning, - ExecutorComplete, - ExecutorFailed, -} - -#[derive(Debug, Clone, FromRow, Serialize, Deserialize, TS)] -#[ts(export)] -pub struct TaskAttempt { - pub id: Uuid, - pub task_id: Uuid, // Foreign key to Task - pub worktree_path: String, - pub branch: String, // Git branch name for this task attempt - pub base_branch: String, // Base branch this attempt is based on - pub merge_commit: Option, - pub executor: Option, // Name of the executor to use - pub pr_url: Option, // GitHub PR URL - pub pr_number: Option, // GitHub PR number - pub pr_status: Option, // open, closed, merged - pub pr_merged_at: Option>, // When PR was merged - pub worktree_deleted: bool, // Flag indicating if worktree has been cleaned up - pub setup_completed_at: Option>, // When setup script was last completed - pub created_at: DateTime, - pub updated_at: DateTime, -} - -#[derive(Debug, Deserialize, TS)] -#[ts(export)] -pub struct CreateTaskAttempt { - pub executor: Option, // Optional executor name (defaults to "echo") - pub base_branch: Option, // Optional base branch to checkout (defaults to current HEAD) -} - -#[derive(Debug, Deserialize, TS)] -#[ts(export)] -pub struct UpdateTaskAttempt { - // Currently no updateable fields, but keeping struct for API compatibility -} - -/// GitHub PR creation parameters -pub struct CreatePrParams<'a> { - pub attempt_id: Uuid, - pub task_id: Uuid, - pub project_id: Uuid, - pub github_token: &'a str, - pub title: &'a str, - pub body: Option<&'a str>, - pub base_branch: Option<&'a str>, -} - -#[derive(Debug, Deserialize, TS)] -#[ts(export)] -pub struct CreateFollowUpAttempt { - pub prompt: String, -} - -#[derive(Debug, Clone, Serialize, Deserialize, TS)] -#[ts(export)] -pub enum DiffChunkType { - Equal, - Insert, - Delete, -} - -#[derive(Debug, Clone, Serialize, Deserialize, TS)] -#[ts(export)] -pub struct DiffChunk { - pub chunk_type: DiffChunkType, - pub content: String, -} - -#[derive(Debug, Clone, Serialize, Deserialize, TS)] -#[ts(export)] -pub struct FileDiff { - pub path: String, - pub chunks: Vec, -} - -#[derive(Debug, Clone, Serialize, Deserialize, TS)] -#[ts(export)] -pub struct WorktreeDiff { - pub files: Vec, -} - -#[derive(Debug, Clone, Serialize, Deserialize, TS)] -#[ts(export)] -pub struct BranchStatus { - pub is_behind: bool, - pub commits_behind: usize, - pub commits_ahead: usize, - pub up_to_date: bool, - pub merged: bool, - pub has_uncommitted_changes: bool, - pub base_branch_name: String, -} - -#[derive(Debug, Clone, Serialize, Deserialize, TS)] -#[ts(export)] -pub enum ExecutionState { - NotStarted, - SetupRunning, - SetupComplete, - SetupFailed, - SetupStopped, - CodingAgentRunning, - CodingAgentComplete, - CodingAgentFailed, - CodingAgentStopped, - Complete, -} - -#[derive(Debug, Clone, Serialize, Deserialize, TS)] -#[ts(export)] -pub struct TaskAttemptState { - pub execution_state: ExecutionState, - pub has_changes: bool, - pub has_setup_script: bool, - pub setup_process_id: Option, - pub coding_agent_process_id: Option, -} - -/// Context data for resume operations (simplified) -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct AttemptResumeContext { - pub execution_history: String, - pub cumulative_diffs: String, -} - -#[derive(Debug)] -pub struct TaskAttemptContext { - pub task_attempt: TaskAttempt, - pub task: Task, - pub project: Project, -} - -impl TaskAttempt { - /// Load task attempt with full validation - ensures task_attempt belongs to task and task belongs to project - pub async fn load_context( - pool: &SqlitePool, - attempt_id: Uuid, - task_id: Uuid, - project_id: Uuid, - ) -> Result { - // Single query with JOIN validation to ensure proper relationships - let task_attempt = sqlx::query_as!( - TaskAttempt, - r#"SELECT ta.id AS "id!: Uuid", - ta.task_id AS "task_id!: Uuid", - ta.worktree_path, - ta.branch, - ta.base_branch, - ta.merge_commit, - ta.executor, - ta.pr_url, - ta.pr_number, - ta.pr_status, - ta.pr_merged_at AS "pr_merged_at: DateTime", - ta.worktree_deleted AS "worktree_deleted!: bool", - ta.setup_completed_at AS "setup_completed_at: DateTime", - ta.created_at AS "created_at!: DateTime", - ta.updated_at AS "updated_at!: DateTime" - FROM task_attempts ta - JOIN tasks t ON ta.task_id = t.id - JOIN projects p ON t.project_id = p.id - WHERE ta.id = $1 AND t.id = $2 AND p.id = $3"#, - attempt_id, - task_id, - project_id - ) - .fetch_optional(pool) - .await? - .ok_or(TaskAttemptError::TaskNotFound)?; - - // Load task and project (we know they exist due to JOIN validation) - let task = Task::find_by_id(pool, task_id) - .await? - .ok_or(TaskAttemptError::TaskNotFound)?; - - let project = Project::find_by_id(pool, project_id) - .await? - .ok_or(TaskAttemptError::ProjectNotFound)?; - - Ok(TaskAttemptContext { - task_attempt, - task, - project, - }) - } - - /// Helper function to mark a worktree as deleted in the database - pub async fn mark_worktree_deleted( - pool: &SqlitePool, - attempt_id: Uuid, - ) -> Result<(), sqlx::Error> { - sqlx::query!( - "UPDATE task_attempts SET worktree_deleted = TRUE, updated_at = datetime('now') WHERE id = ?", - attempt_id - ) - .execute(pool) - .await?; - Ok(()) - } - - /// Get the base directory for vibe-kanban worktrees - pub fn get_worktree_base_dir() -> std::path::PathBuf { - let dir_name = if cfg!(debug_assertions) { - "vibe-kanban-dev" - } else { - "vibe-kanban" - }; - - if cfg!(target_os = "macos") { - // macOS already uses /var/folders/... which is persistent storage - std::env::temp_dir().join(dir_name) - } else if cfg!(target_os = "linux") { - // Linux: use /var/tmp instead of /tmp to avoid RAM usage - std::path::PathBuf::from("/var/tmp").join(dir_name) - } else { - // Windows and other platforms: use temp dir with vibe-kanban subdirectory - std::env::temp_dir().join(dir_name) - } - } - - pub async fn find_by_id(pool: &SqlitePool, id: Uuid) -> Result, sqlx::Error> { - sqlx::query_as!( - TaskAttempt, - r#"SELECT id AS "id!: Uuid", - task_id AS "task_id!: Uuid", - worktree_path, - branch, - merge_commit, - base_branch, - executor, - pr_url, - pr_number, - pr_status, - pr_merged_at AS "pr_merged_at: DateTime", - worktree_deleted AS "worktree_deleted!: bool", - setup_completed_at AS "setup_completed_at: DateTime", - created_at AS "created_at!: DateTime", - updated_at AS "updated_at!: DateTime" - FROM task_attempts - WHERE id = $1"#, - id - ) - .fetch_optional(pool) - .await - } - - pub async fn find_by_task_id( - pool: &SqlitePool, - task_id: Uuid, - ) -> Result, sqlx::Error> { - sqlx::query_as!( - TaskAttempt, - r#"SELECT id AS "id!: Uuid", - task_id AS "task_id!: Uuid", - worktree_path, - branch, - base_branch, - merge_commit, - executor, - pr_url, - pr_number, - pr_status, - pr_merged_at AS "pr_merged_at: DateTime", - worktree_deleted AS "worktree_deleted!: bool", - setup_completed_at AS "setup_completed_at: DateTime", - created_at AS "created_at!: DateTime", - updated_at AS "updated_at!: DateTime" - FROM task_attempts - WHERE task_id = $1 - ORDER BY created_at DESC"#, - task_id - ) - .fetch_all(pool) - .await - } - - /// Find task attempts by task_id with project git repo path for cleanup operations - pub async fn find_by_task_id_with_project( - pool: &SqlitePool, - task_id: Uuid, - ) -> Result, sqlx::Error> { - let records = sqlx::query!( - r#" - SELECT ta.id as "attempt_id!: Uuid", ta.worktree_path, p.git_repo_path as "git_repo_path!" - FROM task_attempts ta - JOIN tasks t ON ta.task_id = t.id - JOIN projects p ON t.project_id = p.id - WHERE ta.task_id = $1 - "#, - task_id - ) - .fetch_all(pool) - .await?; - - Ok(records - .into_iter() - .map(|r| (r.attempt_id, r.worktree_path, r.git_repo_path)) - .collect()) - } - - /// Find task attempts that are expired (24+ hours since last activity) and eligible for worktree cleanup - /// Activity includes: execution completion, task attempt updates (including worktree recreation), - /// and any attempts that are currently in progress - pub async fn find_expired_for_cleanup( - pool: &SqlitePool, - ) -> Result, sqlx::Error> { - let records = sqlx::query!( - r#" - SELECT ta.id as "attempt_id!: Uuid", ta.worktree_path, p.git_repo_path as "git_repo_path!" - FROM task_attempts ta - LEFT JOIN execution_processes ep ON ta.id = ep.task_attempt_id AND ep.completed_at IS NOT NULL - JOIN tasks t ON ta.task_id = t.id - JOIN projects p ON t.project_id = p.id - WHERE ta.worktree_deleted = FALSE - -- Exclude attempts with any running processes (in progress) - AND ta.id NOT IN ( - SELECT DISTINCT ep2.task_attempt_id - FROM execution_processes ep2 - WHERE ep2.completed_at IS NULL - ) - GROUP BY ta.id, ta.worktree_path, p.git_repo_path, ta.updated_at - HAVING datetime('now', '-24 hours') > datetime( - MAX( - CASE - WHEN ep.completed_at IS NOT NULL THEN ep.completed_at - ELSE ta.updated_at - END - ) - ) - ORDER BY MAX( - CASE - WHEN ep.completed_at IS NOT NULL THEN ep.completed_at - ELSE ta.updated_at - END - ) ASC - "# - ) - .fetch_all(pool) - .await?; - - Ok(records - .into_iter() - .filter_map(|r| { - r.worktree_path - .map(|path| (r.attempt_id, path, r.git_repo_path)) - }) - .collect()) - } - - pub async fn create( - pool: &SqlitePool, - data: &CreateTaskAttempt, - task_id: Uuid, - ) -> Result { - let attempt_id = Uuid::new_v4(); - // let prefixed_id = format!("vibe-kanban-{}", attempt_id); - - // First, get the task to get the project_id - let task = Task::find_by_id(pool, task_id) - .await? - .ok_or(TaskAttemptError::TaskNotFound)?; - - // Create a unique and helpful branch name - let task_title_id = crate::utils::text::git_branch_id(&task.title); - let task_attempt_branch = format!( - "vk-{}-{}", - crate::utils::text::short_uuid(&attempt_id), - task_title_id - ); - - // Generate worktree path using vibe-kanban specific directory - let worktree_path = Self::get_worktree_base_dir().join(&task_attempt_branch); - let worktree_path_str = worktree_path.to_string_lossy().to_string(); - - // Then get the project using the project_id - let project = Project::find_by_id(pool, task.project_id) - .await? - .ok_or(TaskAttemptError::ProjectNotFound)?; - - // Create GitService instance - let git_service = GitService::new(&project.git_repo_path)?; - - // Determine the resolved base branch name first - let resolved_base_branch = if let Some(ref base_branch) = data.base_branch { - base_branch.clone() - } else { - // Default to current HEAD branch name or "main" - git_service.get_default_branch_name()? - }; - - // Create the worktree using GitService - git_service.create_worktree( - &task_attempt_branch, - &worktree_path, - data.base_branch.as_deref(), - )?; - - // Insert the record into the database - Ok(sqlx::query_as!( - TaskAttempt, - r#"INSERT INTO task_attempts (id, task_id, worktree_path, branch, base_branch, merge_commit, executor, pr_url, pr_number, pr_status, pr_merged_at, worktree_deleted, setup_completed_at) - VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13) - RETURNING id as "id!: Uuid", task_id as "task_id!: Uuid", worktree_path, branch, base_branch, merge_commit, executor, pr_url, pr_number, pr_status, pr_merged_at as "pr_merged_at: DateTime", worktree_deleted as "worktree_deleted!: bool", setup_completed_at as "setup_completed_at: DateTime", created_at as "created_at!: DateTime", updated_at as "updated_at!: DateTime""#, - attempt_id, - task_id, - worktree_path_str, - task_attempt_branch, - resolved_base_branch, - Option::::None, // merge_commit is always None during creation - data.executor, - Option::::None, // pr_url is None during creation - Option::::None, // pr_number is None during creation - Option::::None, // pr_status is None during creation - Option::>::None, // pr_merged_at is None during creation - false, // worktree_deleted is false during creation - Option::>::None // setup_completed_at is None during creation - ) - .fetch_one(pool) - .await?) - } - - /// Perform the actual merge operation using GitService - fn perform_merge_operation( - worktree_path: &str, - main_repo_path: &str, - branch_name: &str, - base_branch: &str, - task_title: &str, - task_description: &Option, - task_id: Uuid, - ) -> Result { - let git_service = GitService::new(main_repo_path)?; - let worktree_path = Path::new(worktree_path); - - // Extract first section of UUID (before first hyphen) - let task_uuid_str = task_id.to_string(); - let first_uuid_section = task_uuid_str.split('-').next().unwrap_or(&task_uuid_str); - - // Create commit message with task title and description - let mut commit_message = format!("{} (vibe-kanban {})", task_title, first_uuid_section); - - // Add description on next line if it exists - if let Some(description) = task_description { - if !description.trim().is_empty() { - commit_message.push_str("\n\n"); - commit_message.push_str(description); - } - } - - git_service - .merge_changes(worktree_path, branch_name, base_branch, &commit_message) - .map_err(TaskAttemptError::from) - } - - /// Perform the actual git rebase operations using GitService - fn perform_rebase_operation( - worktree_path: &str, - main_repo_path: &str, - new_base_branch: Option, - old_base_branch: String, - ) -> Result { - let git_service = GitService::new(main_repo_path)?; - let worktree_path = Path::new(worktree_path); - - git_service - .rebase_branch(worktree_path, new_base_branch.as_deref(), &old_base_branch) - .map_err(TaskAttemptError::from) - } - - /// Merge the worktree changes back to the main repository - pub async fn merge_changes( - pool: &SqlitePool, - attempt_id: Uuid, - task_id: Uuid, - project_id: Uuid, - ) -> Result { - // Load context with full validation - let ctx = TaskAttempt::load_context(pool, attempt_id, task_id, project_id).await?; - - // Ensure worktree exists (recreate if needed for cold task support) - let worktree_path = - Self::ensure_worktree_exists(pool, attempt_id, project_id, "merge").await?; - - // Perform the actual merge operation - let merge_commit_id = Self::perform_merge_operation( - &worktree_path, - &ctx.project.git_repo_path, - &ctx.task_attempt.branch, - &ctx.task_attempt.base_branch, - &ctx.task.title, - &ctx.task.description, - ctx.task.id, - )?; - - // Update the task attempt with the merge commit - sqlx::query!( - "UPDATE task_attempts SET merge_commit = $1, updated_at = datetime('now') WHERE id = $2", - merge_commit_id, - attempt_id - ) - .execute(pool) - .await?; - - Ok(merge_commit_id) - } - - /// Start the execution flow for a task attempt (setup script + executor) - pub async fn start_execution( - pool: &SqlitePool, - app_state: &crate::app_state::AppState, - attempt_id: Uuid, - task_id: Uuid, - project_id: Uuid, - ) -> Result<(), TaskAttemptError> { - ProcessService::start_execution(pool, app_state, attempt_id, task_id, project_id).await - } - - /// Start a dev server for this task attempt - pub async fn start_dev_server( - pool: &SqlitePool, - app_state: &crate::app_state::AppState, - attempt_id: Uuid, - task_id: Uuid, - project_id: Uuid, - ) -> Result<(), TaskAttemptError> { - ProcessService::start_dev_server(pool, app_state, attempt_id, task_id, project_id).await - } - - /// Start a follow-up execution using the same executor type as the first process - /// Returns the attempt_id that was actually used (always the original attempt_id for session continuity) - pub async fn start_followup_execution( - pool: &SqlitePool, - app_state: &crate::app_state::AppState, - attempt_id: Uuid, - task_id: Uuid, - project_id: Uuid, - prompt: &str, - ) -> Result { - ProcessService::start_followup_execution( - pool, app_state, attempt_id, task_id, project_id, prompt, - ) - .await - } - - /// Ensure worktree exists, recreating from branch if needed (cold task support) - pub async fn ensure_worktree_exists( - pool: &SqlitePool, - attempt_id: Uuid, - project_id: Uuid, - context: &str, - ) -> Result { - let task_attempt = TaskAttempt::find_by_id(pool, attempt_id) - .await? - .ok_or(TaskAttemptError::TaskNotFound)?; - - // Return existing path if worktree still exists - if std::path::Path::new(&task_attempt.worktree_path).exists() { - return Ok(task_attempt.worktree_path); - } - - // Recreate worktree from branch - info!( - "Worktree {} no longer exists, recreating from branch {} for {}", - task_attempt.worktree_path, task_attempt.branch, context - ); - - let new_worktree_path = - Self::recreate_worktree_from_branch(pool, &task_attempt, project_id).await?; - - // Update database with new path, reset worktree_deleted flag, and clear setup completion - sqlx::query!( - "UPDATE task_attempts SET worktree_path = $1, worktree_deleted = FALSE, setup_completed_at = NULL, updated_at = datetime('now') WHERE id = $2", - new_worktree_path, - attempt_id - ) - .execute(pool) - .await?; - - Ok(new_worktree_path) - } - - /// Recreate a worktree from an existing branch (for cold task support) - pub async fn recreate_worktree_from_branch( - pool: &SqlitePool, - task_attempt: &TaskAttempt, - project_id: Uuid, - ) -> Result { - let project = Project::find_by_id(pool, project_id) - .await? - .ok_or(TaskAttemptError::ProjectNotFound)?; - - // Create GitService instance - let git_service = GitService::new(&project.git_repo_path)?; - - // Use the stored worktree path from database - this ensures we recreate in the exact same location - // where Claude originally created its session, maintaining session continuity - let stored_worktree_path = std::path::PathBuf::from(&task_attempt.worktree_path); - - let result_path = git_service - .recreate_worktree_from_branch(&task_attempt.branch, &stored_worktree_path) - .await?; - - Ok(result_path.to_string_lossy().to_string()) - } - - /// Get the git diff between the base commit and the current committed worktree state - pub async fn get_diff( - pool: &SqlitePool, - attempt_id: Uuid, - task_id: Uuid, - project_id: Uuid, - ) -> Result { - // Load context with full validation - let ctx = TaskAttempt::load_context(pool, attempt_id, task_id, project_id).await?; - - // Create GitService instance - let git_service = GitService::new(&ctx.project.git_repo_path)?; - - if let Some(merge_commit_id) = &ctx.task_attempt.merge_commit { - // Task attempt has been merged - show the diff from the merge commit - git_service - .get_enhanced_diff( - Path::new(""), - Some(merge_commit_id), - &ctx.task_attempt.base_branch, - ) - .map_err(TaskAttemptError::from) - } else { - // Task attempt not yet merged - get worktree diff - // Ensure worktree exists (recreate if needed for cold task support) - let worktree_path = - Self::ensure_worktree_exists(pool, attempt_id, project_id, "diff").await?; - - git_service - .get_enhanced_diff( - Path::new(&worktree_path), - None, - &ctx.task_attempt.base_branch, - ) - .map_err(TaskAttemptError::from) - } - } - - /// Get the branch status for this task attempt - pub async fn get_branch_status( - pool: &SqlitePool, - attempt_id: Uuid, - task_id: Uuid, - project_id: Uuid, - ) -> Result { - // Load context with full validation - let ctx = TaskAttempt::load_context(pool, attempt_id, task_id, project_id).await?; - - use git2::{Status, StatusOptions}; - - // Ensure worktree exists (recreate if needed for cold task support) - let main_repo = Repository::open(&ctx.project.git_repo_path)?; - let attempt_branch = ctx.task_attempt.branch.clone(); - - // ── locate the commit pointed to by the attempt branch ─────────────────────── - let attempt_ref = main_repo - // try "refs/heads/" first, then raw name - .find_reference(&format!("refs/heads/{}", attempt_branch)) - .or_else(|_| main_repo.find_reference(&attempt_branch))?; - let attempt_oid = attempt_ref.target().unwrap(); - - // ── determine the base branch & ahead/behind counts ───────────────────────── - let base_branch_name = ctx.task_attempt.base_branch.clone(); - - // 1. prefer the branch’s configured upstream, if any - if let Ok(local_branch) = main_repo.find_branch(&attempt_branch, BranchType::Local) { - if let Ok(upstream) = local_branch.upstream() { - if let Some(_name) = upstream.name()? { - if let Some(base_oid) = upstream.get().target() { - let (_ahead, _behind) = - main_repo.graph_ahead_behind(attempt_oid, base_oid)?; - // Ignore upstream since we use stored base branch - } - } - } - } - - // Calculate ahead/behind counts using the stored base branch - let (commits_ahead, commits_behind) = - if let Ok(base_branch) = main_repo.find_branch(&base_branch_name, BranchType::Local) { - if let Some(base_oid) = base_branch.get().target() { - main_repo.graph_ahead_behind(attempt_oid, base_oid)? - } else { - (0, 0) // Base branch has no commits - } - } else { - // Base branch doesn't exist, assume no relationship - (0, 0) - }; - - // ── detect any uncommitted / untracked changes ─────────────────────────────── - let repo_for_status = Repository::open(&ctx.project.git_repo_path)?; - - let mut status_opts = StatusOptions::new(); - status_opts - .include_untracked(true) - .recurse_untracked_dirs(true) - .include_ignored(false); - - let has_uncommitted_changes = repo_for_status - .statuses(Some(&mut status_opts))? - .iter() - .any(|e| e.status() != Status::CURRENT); - - // ── assemble & return ──────────────────────────────────────────────────────── - Ok(BranchStatus { - is_behind: commits_behind > 0, - commits_behind, - commits_ahead, - up_to_date: commits_behind == 0 && commits_ahead == 0, - merged: ctx.task_attempt.merge_commit.is_some(), - has_uncommitted_changes, - base_branch_name, - }) - } - - /// Rebase the worktree branch onto specified base branch (or current HEAD if none specified) - pub async fn rebase_attempt( - pool: &SqlitePool, - attempt_id: Uuid, - task_id: Uuid, - project_id: Uuid, - new_base_branch: Option, - ) -> Result { - // Load context with full validation - let ctx = TaskAttempt::load_context(pool, attempt_id, task_id, project_id).await?; - - // Use the stored base branch if no new base branch is provided - let effective_base_branch = - new_base_branch.or_else(|| Some(ctx.task_attempt.base_branch.clone())); - - // Ensure worktree exists (recreate if needed for cold task support) - let worktree_path = - Self::ensure_worktree_exists(pool, attempt_id, project_id, "rebase").await?; - - let new_base_commit = Self::perform_rebase_operation( - &worktree_path, - &ctx.project.git_repo_path, - effective_base_branch.clone(), - ctx.task_attempt.base_branch.clone(), - )?; - - // Update the database with the new base branch if it was changed - if let Some(new_base_branch) = &effective_base_branch { - if new_base_branch != &ctx.task_attempt.base_branch { - // For remote branches, store the local branch name in the database - let db_branch_name = if new_base_branch.starts_with("origin/") { - new_base_branch.strip_prefix("origin/").unwrap() - } else { - new_base_branch - }; - - sqlx::query!( - "UPDATE task_attempts SET base_branch = $1, updated_at = datetime('now') WHERE id = $2", - db_branch_name, - attempt_id - ) - .execute(pool) - .await?; - } - } - - Ok(new_base_commit) - } - - /// Delete a file from the worktree and commit the change - pub async fn delete_file( - pool: &SqlitePool, - attempt_id: Uuid, - task_id: Uuid, - project_id: Uuid, - file_path: &str, - ) -> Result { - // Load context with full validation - let ctx = TaskAttempt::load_context(pool, attempt_id, task_id, project_id).await?; - - // Ensure worktree exists (recreate if needed for cold task support) - let worktree_path_str = - Self::ensure_worktree_exists(pool, attempt_id, project_id, "delete file").await?; - - // Create GitService instance - let git_service = GitService::new(&ctx.project.git_repo_path)?; - - // Use GitService to delete file and commit - let commit_id = - git_service.delete_file_and_commit(Path::new(&worktree_path_str), file_path)?; - - Ok(commit_id) - } - - /// Create a GitHub PR for this task attempt - pub async fn create_github_pr( - pool: &SqlitePool, - params: CreatePrParams<'_>, - ) -> Result { - // Load context with full validation - let ctx = - TaskAttempt::load_context(pool, params.attempt_id, params.task_id, params.project_id) - .await?; - - // Ensure worktree exists (recreate if needed for cold task support) - let worktree_path = - Self::ensure_worktree_exists(pool, params.attempt_id, params.project_id, "GitHub PR") - .await?; - - // Create GitHub service instance - let github_service = GitHubService::new(params.github_token)?; - - // Use GitService to get the remote URL, then create GitHubRepoInfo - let git_service = GitService::new(&ctx.project.git_repo_path)?; - let (owner, repo_name) = git_service - .get_github_repo_info() - .map_err(|e| TaskAttemptError::ValidationError(e.to_string()))?; - let repo_info = GitHubRepoInfo { owner, repo_name }; - - // Push the branch to GitHub first - Self::push_branch_to_github( - &ctx.project.git_repo_path, - &worktree_path, - &ctx.task_attempt.branch, - params.github_token, - )?; - - // Create the PR using GitHub service - let pr_request = CreatePrRequest { - title: params.title.to_string(), - body: params.body.map(|s| s.to_string()), - head_branch: ctx.task_attempt.branch.clone(), - base_branch: params.base_branch.unwrap_or("main").to_string(), - }; - - let pr_info = github_service.create_pr(&repo_info, &pr_request).await?; - - // Update the task attempt with PR information - sqlx::query!( - "UPDATE task_attempts SET pr_url = $1, pr_number = $2, pr_status = $3, updated_at = datetime('now') WHERE id = $4", - pr_info.url, - pr_info.number, - pr_info.status, - params.attempt_id - ) - .execute(pool) - .await?; - - Ok(pr_info.url) - } - - /// Push the branch to GitHub remote - fn push_branch_to_github( - git_repo_path: &str, - worktree_path: &str, - branch_name: &str, - github_token: &str, - ) -> Result<(), TaskAttemptError> { - // Use GitService to push to GitHub - let git_service = GitService::new(git_repo_path)?; - git_service - .push_to_github(Path::new(worktree_path), branch_name, github_token) - .map_err(TaskAttemptError::from) - } - - /// Update PR status and merge commit - pub async fn update_pr_status( - pool: &SqlitePool, - attempt_id: Uuid, - status: &str, - merged_at: Option>, - merge_commit_sha: Option<&str>, - ) -> Result<(), sqlx::Error> { - sqlx::query!( - "UPDATE task_attempts SET pr_status = $1, pr_merged_at = $2, merge_commit = $3, updated_at = datetime('now') WHERE id = $4", - status, - merged_at, - merge_commit_sha, - attempt_id - ) - .execute(pool) - .await?; - - Ok(()) - } - - /// Get the current execution state for a task attempt - pub async fn get_execution_state( - pool: &SqlitePool, - attempt_id: Uuid, - task_id: Uuid, - project_id: Uuid, - ) -> Result { - // Load context with full validation - let ctx = TaskAttempt::load_context(pool, attempt_id, task_id, project_id).await?; - - let has_setup_script = ctx - .project - .setup_script - .as_ref() - .map(|script| !script.trim().is_empty()) - .unwrap_or(false); - - // Get all execution processes for this attempt, ordered by created_at - let processes = - crate::models::execution_process::ExecutionProcess::find_by_task_attempt_id( - pool, attempt_id, - ) - .await?; - - // Find setup and coding agent processes - let setup_process = processes.iter().find(|p| { - matches!( - p.process_type, - crate::models::execution_process::ExecutionProcessType::SetupScript - ) - }); - - let coding_agent_process = processes.iter().find(|p| { - matches!( - p.process_type, - crate::models::execution_process::ExecutionProcessType::CodingAgent - ) - }); - - // Determine execution state based on processes - let execution_state = if let Some(setup) = setup_process { - match setup.status { - crate::models::execution_process::ExecutionProcessStatus::Running => { - ExecutionState::SetupRunning - } - crate::models::execution_process::ExecutionProcessStatus::Completed => { - if let Some(agent) = coding_agent_process { - match agent.status { - crate::models::execution_process::ExecutionProcessStatus::Running => { - ExecutionState::CodingAgentRunning - } - crate::models::execution_process::ExecutionProcessStatus::Completed => { - ExecutionState::CodingAgentComplete - } - crate::models::execution_process::ExecutionProcessStatus::Failed => { - ExecutionState::CodingAgentFailed - } - crate::models::execution_process::ExecutionProcessStatus::Killed => { - ExecutionState::CodingAgentStopped - } - } - } else { - ExecutionState::SetupComplete - } - } - crate::models::execution_process::ExecutionProcessStatus::Failed => { - ExecutionState::SetupFailed - } - crate::models::execution_process::ExecutionProcessStatus::Killed => { - ExecutionState::SetupStopped - } - } - } else if let Some(agent) = coding_agent_process { - // No setup script, only coding agent - match agent.status { - crate::models::execution_process::ExecutionProcessStatus::Running => { - ExecutionState::CodingAgentRunning - } - crate::models::execution_process::ExecutionProcessStatus::Completed => { - ExecutionState::CodingAgentComplete - } - crate::models::execution_process::ExecutionProcessStatus::Failed => { - ExecutionState::CodingAgentFailed - } - crate::models::execution_process::ExecutionProcessStatus::Killed => { - ExecutionState::CodingAgentStopped - } - } - } else { - // No processes started yet - ExecutionState::NotStarted - }; - - // Check if there are any changes (quick diff check) - let has_changes = match Self::get_diff(pool, attempt_id, task_id, project_id).await { - Ok(diff) => !diff.files.is_empty(), - Err(_) => false, // If diff fails, assume no changes - }; - - Ok(TaskAttemptState { - execution_state, - has_changes, - has_setup_script, - setup_process_id: setup_process.map(|p| p.id.to_string()), - coding_agent_process_id: coding_agent_process.map(|p| p.id.to_string()), - }) - } - - /// Check if setup script has been completed for this worktree - pub async fn is_setup_completed( - pool: &SqlitePool, - attempt_id: Uuid, - ) -> Result { - let task_attempt = Self::find_by_id(pool, attempt_id) - .await? - .ok_or(TaskAttemptError::TaskNotFound)?; - - Ok(task_attempt.setup_completed_at.is_some()) - } - - /// Mark setup script as completed for this worktree - pub async fn mark_setup_completed( - pool: &SqlitePool, - attempt_id: Uuid, - ) -> Result<(), TaskAttemptError> { - sqlx::query!( - "UPDATE task_attempts SET setup_completed_at = datetime('now'), updated_at = datetime('now') WHERE id = ?", - attempt_id - ) - .execute(pool) - .await?; - - Ok(()) - } - - /// Get execution history from current attempt only (simplified) - pub async fn get_attempt_execution_history( - pool: &SqlitePool, - attempt_id: Uuid, - ) -> Result { - // Get all coding agent processes for this attempt - let processes = - crate::models::execution_process::ExecutionProcess::find_by_task_attempt_id( - pool, attempt_id, - ) - .await?; - - // Filter to coding agent processes only and aggregate stdout - let coding_processes: Vec<_> = processes - .into_iter() - .filter(|p| { - matches!( - p.process_type, - crate::models::execution_process::ExecutionProcessType::CodingAgent - ) - }) - .collect(); - - let mut history = String::new(); - for process in coding_processes { - if let Some(stdout) = process.stdout { - if !stdout.trim().is_empty() { - history.push_str(&stdout); - history.push('\n'); - } - } - } - - Ok(history) - } - - /// Get diff between base_branch and current attempt (simplified) - pub async fn get_attempt_diff( - pool: &SqlitePool, - attempt_id: Uuid, - project_id: Uuid, - ) -> Result { - // Get the task attempt with base_branch - let attempt = Self::find_by_id(pool, attempt_id) - .await? - .ok_or(TaskAttemptError::TaskNotFound)?; - - // Get the project - let project = Project::find_by_id(pool, project_id) - .await? - .ok_or(TaskAttemptError::ProjectNotFound)?; - - // Open the main repository - let repo = Repository::open(&project.git_repo_path)?; - - // Get base branch commit - let base_branch = repo - .find_branch(&attempt.base_branch, git2::BranchType::Local) - .map_err(|_| TaskAttemptError::BranchNotFound(attempt.base_branch.clone()))?; - let base_commit = base_branch.get().peel_to_commit()?; - - // Get current branch commit - let current_branch = repo - .find_branch(&attempt.branch, git2::BranchType::Local) - .map_err(|_| TaskAttemptError::BranchNotFound(attempt.branch.clone()))?; - let current_commit = current_branch.get().peel_to_commit()?; - - // Create diff between base and current - let base_tree = base_commit.tree()?; - let current_tree = current_commit.tree()?; - - let mut diff_opts = git2::DiffOptions::new(); - diff_opts.context_lines(GIT_DIFF_CONTEXT_LINES); - diff_opts.interhunk_lines(GIT_DIFF_INTERHUNK_LINES); - - let diff = - repo.diff_tree_to_tree(Some(&base_tree), Some(¤t_tree), Some(&mut diff_opts))?; - - // Convert to text format - let mut diff_text = String::new(); - diff.print(git2::DiffFormat::Patch, |_delta, _hunk, line| { - let content = std::str::from_utf8(line.content()).unwrap_or(""); - diff_text.push_str(&format!("{}{}", line.origin(), content)); - true - })?; - - Ok(diff_text) - } - - /// Get comprehensive resume context for Gemini followup execution (simplified) - pub async fn get_attempt_resume_context( - pool: &SqlitePool, - attempt_id: Uuid, - _task_id: Uuid, - project_id: Uuid, - ) -> Result { - // Get execution history from current attempt only - let execution_history = Self::get_attempt_execution_history(pool, attempt_id).await?; - - // Get diff between base_branch and current attempt - let cumulative_diffs = Self::get_attempt_diff(pool, attempt_id, project_id).await?; - - Ok(AttemptResumeContext { - execution_history, - cumulative_diffs, - }) - } -} diff --git a/backend/src/routes/auth.rs b/backend/src/routes/auth.rs deleted file mode 100644 index 3135ebf9..00000000 --- a/backend/src/routes/auth.rs +++ /dev/null @@ -1,262 +0,0 @@ -use axum::{ - extract::{Request, State}, - middleware::Next, - response::{Json as ResponseJson, Response}, - routing::{get, post}, - Json, Router, -}; -use ts_rs::TS; - -use crate::{app_state::AppState, models::ApiResponse}; - -pub fn auth_router() -> Router { - Router::new() - .route("/auth/github/device/start", post(device_start)) - .route("/auth/github/device/poll", post(device_poll)) - .route("/auth/github/check", get(github_check_token)) -} - -#[derive(serde::Deserialize)] -struct DeviceStartRequest {} - -#[derive(serde::Serialize, TS)] -#[ts(export)] -pub struct DeviceStartResponse { - pub device_code: String, - pub user_code: String, - pub verification_uri: String, - pub expires_in: u32, - pub interval: u32, -} - -#[derive(serde::Deserialize)] -struct DevicePollRequest { - device_code: String, -} - -/// POST /auth/github/device/start -async fn device_start() -> ResponseJson> { - let client_id = option_env!("GITHUB_CLIENT_ID").unwrap_or("Ov23li9bxz3kKfPOIsGm"); - - let params = [("client_id", client_id), ("scope", "user:email,repo")]; - let client = reqwest::Client::new(); - let res = client - .post("https://github.com/login/device/code") - .header("Accept", "application/json") - .form(¶ms) - .send() - .await; - let res = match res { - Ok(r) => r, - Err(e) => { - return ResponseJson(ApiResponse::error(&format!( - "Failed to contact GitHub: {e}" - ))); - } - }; - let json: serde_json::Value = match res.json().await { - Ok(j) => j, - Err(e) => { - return ResponseJson(ApiResponse::error(&format!( - "Failed to parse GitHub response: {e}" - ))); - } - }; - if let ( - Some(device_code), - Some(user_code), - Some(verification_uri), - Some(expires_in), - Some(interval), - ) = ( - json.get("device_code").and_then(|v| v.as_str()), - json.get("user_code").and_then(|v| v.as_str()), - json.get("verification_uri").and_then(|v| v.as_str()), - json.get("expires_in").and_then(|v| v.as_u64()), - json.get("interval").and_then(|v| v.as_u64()), - ) { - ResponseJson(ApiResponse::success(DeviceStartResponse { - device_code: device_code.to_string(), - user_code: user_code.to_string(), - verification_uri: verification_uri.to_string(), - expires_in: expires_in.try_into().unwrap_or(600), - interval: interval.try_into().unwrap_or(5), - })) - } else { - ResponseJson(ApiResponse::error(&format!("GitHub error: {}", json))) - } -} - -/// POST /auth/github/device/poll -async fn device_poll( - State(app_state): State, - Json(payload): Json, -) -> ResponseJson> { - let client_id = option_env!("GITHUB_CLIENT_ID").unwrap_or("Ov23li9bxz3kKfPOIsGm"); - - let params = [ - ("client_id", client_id), - ("device_code", payload.device_code.as_str()), - ("grant_type", "urn:ietf:params:oauth:grant-type:device_code"), - ]; - let client = reqwest::Client::new(); - let res = client - .post("https://github.com/login/oauth/access_token") - .header("Accept", "application/json") - .form(¶ms) - .send() - .await; - let res = match res { - Ok(r) => r, - Err(e) => { - return ResponseJson(ApiResponse::error(&format!( - "Failed to contact GitHub: {e}" - ))); - } - }; - let json: serde_json::Value = match res.json().await { - Ok(j) => j, - Err(e) => { - return ResponseJson(ApiResponse::error(&format!( - "Failed to parse GitHub response: {e}" - ))); - } - }; - if let Some(error) = json.get("error").and_then(|v| v.as_str()) { - // Not authorized yet, or other error - return ResponseJson(ApiResponse::error(error)); - } - let access_token = json.get("access_token").and_then(|v| v.as_str()); - if let Some(access_token) = access_token { - // Fetch user info - let user_res = client - .get("https://api.github.com/user") - .bearer_auth(access_token) - .header("User-Agent", "vibe-kanban-app") - .send() - .await; - let user_json: serde_json::Value = match user_res { - Ok(res) => match res.json().await { - Ok(json) => json, - Err(e) => { - return ResponseJson(ApiResponse::error(&format!( - "Failed to parse GitHub user response: {e}" - ))); - } - }, - Err(e) => { - return ResponseJson(ApiResponse::error(&format!( - "Failed to fetch user info: {e}" - ))); - } - }; - let username = user_json - .get("login") - .and_then(|v| v.as_str()) - .map(|s| s.to_string()); - // Fetch user emails - let emails_res = client - .get("https://api.github.com/user/emails") - .bearer_auth(access_token) - .header("User-Agent", "vibe-kanban-app") - .send() - .await; - let emails_json: serde_json::Value = match emails_res { - Ok(res) => match res.json().await { - Ok(json) => json, - Err(e) => { - return ResponseJson(ApiResponse::error(&format!( - "Failed to parse GitHub emails response: {e}" - ))); - } - }, - Err(e) => { - return ResponseJson(ApiResponse::error(&format!( - "Failed to fetch user emails: {e}" - ))); - } - }; - let primary_email = emails_json - .as_array() - .and_then(|arr| { - arr.iter() - .find(|email| { - email - .get("primary") - .and_then(|v| v.as_bool()) - .unwrap_or(false) - }) - .and_then(|email| email.get("email").and_then(|v| v.as_str())) - }) - .map(|s| s.to_string()); - // Save to config - { - let mut config = app_state.get_config().write().await; - config.github.username = username.clone(); - config.github.primary_email = primary_email.clone(); - config.github.token = Some(access_token.to_string()); - config.github_login_acknowledged = true; // Also acknowledge the GitHub login step - let config_path = crate::utils::config_path(); - if config.save(&config_path).is_err() { - return ResponseJson(ApiResponse::error("Failed to save config")); - } - } - app_state.update_sentry_scope().await; - // Identify user in PostHog - let mut props = serde_json::Map::new(); - if let Some(ref username) = username { - props.insert( - "username".to_string(), - serde_json::Value::String(username.clone()), - ); - } - if let Some(ref email) = primary_email { - props.insert( - "email".to_string(), - serde_json::Value::String(email.clone()), - ); - } - { - let props = serde_json::Value::Object(props); - app_state - .track_analytics_event("$identify", Some(props)) - .await; - } - - ResponseJson(ApiResponse::success("GitHub login successful".to_string())) - } else { - ResponseJson(ApiResponse::error("No access token yet")) - } -} - -/// GET /auth/github/check -async fn github_check_token(State(app_state): State) -> ResponseJson> { - let config = app_state.get_config().read().await; - let token = config.github.token.clone(); - drop(config); - if let Some(token) = token { - let client = reqwest::Client::new(); - let res = client - .get("https://api.github.com/user") - .bearer_auth(&token) - .header("User-Agent", "vibe-kanban-app") - .send() - .await; - match res { - Ok(r) if r.status().is_success() => ResponseJson(ApiResponse::success(())), - _ => ResponseJson(ApiResponse::error("github_token_invalid")), - } - } else { - ResponseJson(ApiResponse::error("github_token_invalid")) - } -} - -/// Middleware to set Sentry user context for every request -pub async fn sentry_user_context_middleware( - State(app_state): State, - req: Request, - next: Next, -) -> Response { - app_state.update_sentry_scope().await; - next.run(req).await -} diff --git a/backend/src/routes/config.rs b/backend/src/routes/config.rs deleted file mode 100644 index 434cc307..00000000 --- a/backend/src/routes/config.rs +++ /dev/null @@ -1,335 +0,0 @@ -use std::collections::HashMap; - -use axum::{ - extract::{Query, State}, - response::Json as ResponseJson, - routing::{get, post}, - Json, Router, -}; -use serde::{Deserialize, Serialize}; -use serde_json::Value; -use tokio::fs; -use ts_rs::TS; - -use crate::{ - app_state::AppState, - executor::ExecutorConfig, - models::{ - config::{Config, EditorConstants, SoundConstants}, - ApiResponse, Environment, - }, - utils, -}; - -pub fn config_router() -> Router { - Router::new() - .route("/config", get(get_config)) - .route("/config", post(update_config)) - .route("/config/constants", get(get_config_constants)) - .route("/mcp-servers", get(get_mcp_servers)) - .route("/mcp-servers", post(update_mcp_servers)) -} - -async fn get_config(State(app_state): State) -> ResponseJson> { - let mut config = app_state.get_config().read().await.clone(); - - // Update environment info dynamically - let info = os_info::get(); - config.environment.os_type = info.os_type().to_string(); - config.environment.os_version = info.version().to_string(); - config.environment.architecture = info.architecture().unwrap_or("unknown").to_string(); - config.environment.bitness = info.bitness().to_string(); - - ResponseJson(ApiResponse::success(config)) -} - -async fn update_config( - State(app_state): State, - Json(new_config): Json, -) -> ResponseJson> { - let config_path = utils::config_path(); - - match new_config.save(&config_path) { - Ok(_) => { - let mut config = app_state.get_config().write().await; - *config = new_config.clone(); - drop(config); - - app_state - .update_analytics_config(new_config.analytics_enabled.unwrap_or(true)) - .await; - - ResponseJson(ApiResponse::success(new_config)) - } - Err(e) => ResponseJson(ApiResponse::error(&format!("Failed to save config: {}", e))), - } -} - -#[derive(Debug, Serialize, Deserialize, TS)] -#[ts(export)] -pub struct ConfigConstants { - pub editor: EditorConstants, - pub sound: SoundConstants, - pub mode: Environment, -} - -async fn get_config_constants( - State(app_state): State, -) -> ResponseJson> { - let constants = ConfigConstants { - editor: EditorConstants::new(), - sound: SoundConstants::new(), - mode: app_state.mode, - }; - - ResponseJson(ApiResponse::success(constants)) -} - -#[derive(Debug, Deserialize)] -struct McpServerQuery { - executor: Option, -} - -/// Common logic for resolving executor configuration and validating MCP support -fn resolve_executor_config( - query_executor: Option, - saved_config: &ExecutorConfig, -) -> Result { - let executor_config = match query_executor { - Some(executor_type) => executor_type - .parse::() - .map_err(|e| e.to_string())?, - None => saved_config.clone(), - }; - - if !executor_config.supports_mcp() { - return Err(format!( - "{} executor does not support MCP configuration", - executor_config.display_name() - )); - } - - Ok(executor_config) -} - -async fn get_mcp_servers( - State(app_state): State, - Query(query): Query, -) -> ResponseJson> { - let saved_config = { - let config = app_state.get_config().read().await; - config.executor.clone() - }; - - let executor_config = match resolve_executor_config(query.executor, &saved_config) { - Ok(config) => config, - Err(message) => { - return ResponseJson(ApiResponse::error(&message)); - } - }; - - // Get the config file path for this executor - let config_path = match executor_config.config_path() { - Some(path) => path, - None => { - return ResponseJson(ApiResponse::error("Could not determine config file path")); - } - }; - - match read_mcp_servers_from_config(&config_path, &executor_config).await { - Ok(servers) => { - let response_data = serde_json::json!({ - "servers": servers, - "config_path": config_path.to_string_lossy().to_string() - }); - ResponseJson(ApiResponse::success(response_data)) - } - Err(e) => ResponseJson(ApiResponse::error(&format!( - "Failed to read MCP servers: {}", - e - ))), - } -} - -async fn update_mcp_servers( - State(app_state): State, - Query(query): Query, - Json(new_servers): Json>, -) -> ResponseJson> { - let saved_config = { - let config = app_state.get_config().read().await; - config.executor.clone() - }; - - let executor_config = match resolve_executor_config(query.executor, &saved_config) { - Ok(config) => config, - Err(message) => { - return ResponseJson(ApiResponse::error(&message)); - } - }; - - // Get the config file path for this executor - let config_path = match executor_config.config_path() { - Some(path) => path, - None => { - return ResponseJson(ApiResponse::error("Could not determine config file path")); - } - }; - - match update_mcp_servers_in_config(&config_path, &executor_config, new_servers).await { - Ok(message) => ResponseJson(ApiResponse::success(message)), - Err(e) => ResponseJson(ApiResponse::error(&format!( - "Failed to update MCP servers: {}", - e - ))), - } -} - -async fn update_mcp_servers_in_config( - file_path: &std::path::Path, - executor_config: &ExecutorConfig, - new_servers: HashMap, -) -> Result> { - // Ensure parent directory exists - if let Some(parent) = file_path.parent() { - fs::create_dir_all(parent).await?; - } - - // Read existing config file or create empty object if it doesn't exist - let file_content = fs::read_to_string(file_path) - .await - .unwrap_or_else(|_| "{}".to_string()); - let mut config: Value = serde_json::from_str(&file_content)?; - - // Get the attribute path for MCP servers - let mcp_path = executor_config.mcp_attribute_path().unwrap(); - - // Get the current server count for comparison - let old_servers = get_mcp_servers_from_config_path(&config, &mcp_path).len(); - - // Set the MCP servers using the correct attribute path - set_mcp_servers_in_config_path(&mut config, &mcp_path, &new_servers)?; - - // Write the updated config back to file - let updated_content = serde_json::to_string_pretty(&config)?; - fs::write(file_path, updated_content).await?; - - let new_count = new_servers.len(); - let message = match (old_servers, new_count) { - (0, 0) => "No MCP servers configured".to_string(), - (0, n) => format!("Added {} MCP server(s)", n), - (old, new) if old == new => format!("Updated MCP server configuration ({} server(s))", new), - (old, new) => format!( - "Updated MCP server configuration (was {}, now {})", - old, new - ), - }; - - Ok(message) -} - -async fn read_mcp_servers_from_config( - file_path: &std::path::Path, - executor_config: &ExecutorConfig, -) -> Result, Box> { - // Read the config file, return empty if it doesn't exist - let file_content = fs::read_to_string(file_path) - .await - .unwrap_or_else(|_| "{}".to_string()); - let config: Value = serde_json::from_str(&file_content)?; - - // Get the attribute path for MCP servers - let mcp_path = executor_config.mcp_attribute_path().unwrap(); - - // Get the servers using the correct attribute path - let servers = get_mcp_servers_from_config_path(&config, &mcp_path); - - Ok(servers) -} - -/// Helper function to get MCP servers from config using a path -fn get_mcp_servers_from_config_path(config: &Value, path: &[&str]) -> HashMap { - // Special handling for AMP - use flat key structure - if path.len() == 2 && path[0] == "amp" && path[1] == "mcpServers" { - let flat_key = format!("{}.{}", path[0], path[1]); - let current = match config.get(&flat_key) { - Some(val) => val, - None => return HashMap::new(), - }; - - // Extract the servers object - match current.as_object() { - Some(servers) => servers - .iter() - .map(|(k, v)| (k.clone(), v.clone())) - .collect(), - None => HashMap::new(), - } - } else { - let mut current = config; - - // Navigate to the target location - for &part in path { - current = match current.get(part) { - Some(val) => val, - None => return HashMap::new(), - }; - } - - // Extract the servers object - match current.as_object() { - Some(servers) => servers - .iter() - .map(|(k, v)| (k.clone(), v.clone())) - .collect(), - None => HashMap::new(), - } - } -} - -/// Helper function to set MCP servers in config using a path -fn set_mcp_servers_in_config_path( - config: &mut Value, - path: &[&str], - servers: &HashMap, -) -> Result<(), Box> { - // Ensure config is an object - if !config.is_object() { - *config = serde_json::json!({}); - } - - // Special handling for AMP - use flat key structure - if path.len() == 2 && path[0] == "amp" && path[1] == "mcpServers" { - let flat_key = format!("{}.{}", path[0], path[1]); - config - .as_object_mut() - .unwrap() - .insert(flat_key, serde_json::to_value(servers)?); - return Ok(()); - } - - let mut current = config; - - // Navigate/create the nested structure (all parts except the last) - for &part in &path[..path.len() - 1] { - if current.get(part).is_none() { - current - .as_object_mut() - .unwrap() - .insert(part.to_string(), serde_json::json!({})); - } - current = current.get_mut(part).unwrap(); - if !current.is_object() { - *current = serde_json::json!({}); - } - } - - // Set the final attribute - let final_attr = path.last().unwrap(); - current - .as_object_mut() - .unwrap() - .insert(final_attr.to_string(), serde_json::to_value(servers)?); - - Ok(()) -} diff --git a/backend/src/routes/filesystem.rs b/backend/src/routes/filesystem.rs deleted file mode 100644 index 1255d947..00000000 --- a/backend/src/routes/filesystem.rs +++ /dev/null @@ -1,185 +0,0 @@ -use std::{ - fs, - path::{Path, PathBuf}, -}; - -use axum::{ - extract::Query, http::StatusCode, response::Json as ResponseJson, routing::get, Router, -}; -use serde::{Deserialize, Serialize}; -use ts_rs::TS; - -use crate::{app_state::AppState, models::ApiResponse}; - -#[derive(Debug, Serialize, TS)] -#[ts(export)] -pub struct DirectoryEntry { - pub name: String, - pub path: String, - pub is_directory: bool, - pub is_git_repo: bool, -} - -#[derive(Debug, Serialize, TS)] -#[ts(export)] -pub struct DirectoryListResponse { - pub entries: Vec, - pub current_path: String, -} - -#[derive(Debug, Deserialize)] -pub struct ListDirectoryQuery { - path: Option, -} - -pub async fn list_directory( - Query(query): Query, -) -> Result>, StatusCode> { - let path_str = query.path.unwrap_or_else(|| { - // Default to user's home directory - dirs::home_dir() - .or_else(dirs::desktop_dir) - .or_else(dirs::document_dir) - .unwrap_or_else(|| { - if cfg!(windows) { - std::env::var("USERPROFILE") - .map(PathBuf::from) - .unwrap_or_else(|_| PathBuf::from("C:\\")) - } else { - PathBuf::from("/") - } - }) - .to_string_lossy() - .to_string() - }); - - let path = Path::new(&path_str); - - if !path.exists() { - return Ok(ResponseJson(ApiResponse::error("Directory does not exist"))); - } - - if !path.is_dir() { - return Ok(ResponseJson(ApiResponse::error("Path is not a directory"))); - } - - match fs::read_dir(path) { - Ok(entries) => { - let mut directory_entries = Vec::new(); - - for entry in entries.flatten() { - let path = entry.path(); - let metadata = entry.metadata().ok(); - - if let Some(name) = path.file_name().and_then(|n| n.to_str()) { - // Skip hidden files/directories - if name.starts_with('.') && name != ".." { - continue; - } - - let is_directory = metadata.is_some_and(|m| m.is_dir()); - let is_git_repo = if is_directory { - path.join(".git").exists() - } else { - false - }; - - directory_entries.push(DirectoryEntry { - name: name.to_string(), - path: path.to_string_lossy().to_string(), - is_directory, - is_git_repo, - }); - } - } - - // Sort: directories first, then files, both alphabetically - directory_entries.sort_by(|a, b| match (a.is_directory, b.is_directory) { - (true, false) => std::cmp::Ordering::Less, - (false, true) => std::cmp::Ordering::Greater, - _ => a.name.to_lowercase().cmp(&b.name.to_lowercase()), - }); - - Ok(ResponseJson(ApiResponse::success(DirectoryListResponse { - entries: directory_entries, - current_path: path.to_string_lossy().to_string(), - }))) - } - Err(e) => { - tracing::error!("Failed to read directory: {}", e); - Ok(ResponseJson(ApiResponse::error(&format!( - "Failed to read directory: {}", - e - )))) - } - } -} - -pub async fn validate_git_path( - Query(query): Query, -) -> Result>, StatusCode> { - let path_str = query.path.ok_or(StatusCode::BAD_REQUEST)?; - let path = Path::new(&path_str); - - // Check if path exists and is a git repo - let is_valid_git_repo = path.exists() && path.is_dir() && path.join(".git").exists(); - - Ok(ResponseJson(ApiResponse::success(is_valid_git_repo))) -} - -pub async fn create_git_repo( - Query(query): Query, -) -> Result>, StatusCode> { - let path_str = query.path.ok_or(StatusCode::BAD_REQUEST)?; - let path = Path::new(&path_str); - - // Create directory if it doesn't exist - if !path.exists() { - if let Err(e) = fs::create_dir_all(path) { - tracing::error!("Failed to create directory: {}", e); - return Ok(ResponseJson(ApiResponse::error(&format!( - "Failed to create directory: {}", - e - )))); - } - } - - // Check if it's already a git repo - if path.join(".git").exists() { - return Ok(ResponseJson(ApiResponse::success(()))); - } - - // Initialize git repository - match std::process::Command::new("git") - .arg("init") - .current_dir(path) - .output() - { - Ok(output) => { - if output.status.success() { - Ok(ResponseJson(ApiResponse::success(()))) - } else { - let error_msg = String::from_utf8_lossy(&output.stderr); - tracing::error!("Git init failed: {}", error_msg); - Ok(ResponseJson(ApiResponse::error(&format!( - "Git init failed: {}", - error_msg - )))) - } - } - Err(e) => { - tracing::error!("Failed to run git init: {}", e); - Ok(ResponseJson(ApiResponse::error(&format!( - "Failed to run git init: {}", - e - )))) - } - } -} - -pub fn filesystem_router() -> Router { - Router::new() - .route("/filesystem/list", get(list_directory)) - .route("/filesystem/validate-git", get(validate_git_path)) - .route("/filesystem/create-git", get(create_git_repo)) -} diff --git a/backend/src/routes/mod.rs b/backend/src/routes/mod.rs deleted file mode 100644 index f1f259ce..00000000 --- a/backend/src/routes/mod.rs +++ /dev/null @@ -1,10 +0,0 @@ -pub mod auth; -pub mod config; -pub mod filesystem; -pub mod github; -pub mod health; -pub mod projects; -pub mod stream; -pub mod task_attempts; -pub mod task_templates; -pub mod tasks; diff --git a/backend/src/routes/stream.rs b/backend/src/routes/stream.rs deleted file mode 100644 index a9d97630..00000000 --- a/backend/src/routes/stream.rs +++ /dev/null @@ -1,244 +0,0 @@ -use std::time::Duration; - -use axum::{ - extract::{Path, Query, State}, - response::sse::{Event, Sse}, - routing::get, - Router, -}; -use futures_util::stream::Stream; -use serde::{Deserialize, Serialize}; -use serde_json::Value; -use uuid::Uuid; - -use crate::{ - app_state::AppState, - executors::gemini::GeminiExecutor, - models::execution_process::{ExecutionProcess, ExecutionProcessStatus}, -}; - -/// Interval for DB tail polling (ms) - now blazing fast for real-time updates -const TAIL_INTERVAL_MS: u64 = 100; - -/// Structured batch data for SSE streaming -#[derive(Serialize)] -struct BatchData { - batch_id: u64, - patches: Vec, -} - -/// Query parameters for resumable SSE streaming -#[derive(Debug, Deserialize)] -pub struct StreamQuery { - /// Optional cursor to resume streaming from specific batch ID - since_batch_id: Option, -} - -/// SSE handler for incremental normalized-logs JSON-Patch streaming -/// -/// GET /api/projects/:project_id/execution-processes/:process_id/normalized-logs/stream?since_batch_id=123 -pub async fn normalized_logs_stream( - Path((_project_id, process_id)): Path<(Uuid, Uuid)>, - Query(query): Query, - State(app_state): State, -) -> Sse>> { - // Check if this is a Gemini executor (only executor with streaming support) - let is_gemini = match ExecutionProcess::find_by_id(&app_state.db_pool, process_id).await { - Ok(Some(process)) => process.executor_type.as_deref() == Some("gemini"), - _ => { - tracing::warn!( - "Failed to find execution process {} for SSE streaming", - process_id - ); - false - } - }; - - // Use blazing fast polling interval for Gemini (only streaming executor) - let poll_interval = if is_gemini { 50 } else { TAIL_INTERVAL_MS }; - - // Stream that yields patches from WAL (fast-path) or DB tail (fallback) - let stream = async_stream::stream! { - // Track previous stdout length and entry count for database polling fallback - let mut last_len: usize = 0; - let mut last_entry_count: usize = query.since_batch_id.unwrap_or(1) as usize; - let mut interval = tokio::time::interval(Duration::from_millis(poll_interval)); - let mut last_seen_batch_id: u64 = query.since_batch_id.unwrap_or(0); // Cursor for WAL streaming - - // Monotonic batch ID for fallback polling (always start at 1) - let since = query.since_batch_id.unwrap_or(1); - let mut fallback_batch_id: u64 = since + 1; - - // Fast catch-up phase for resumable streaming - if let Some(since_batch) = query.since_batch_id { - if !is_gemini { - // Load current process state to get all available entries - if let Ok(Some(proc)) = ExecutionProcess::find_by_id(&app_state.db_pool, process_id).await { - if let Some(stdout) = &proc.stdout { - // Create executor and normalize logs to get all entries - if let Some(executor) = proc.executor_type - .as_deref() - .unwrap_or("unknown") - .parse::() - .ok() - .map(|cfg| cfg.create_executor()) - { - if let Ok(normalized) = executor.normalize_logs(stdout, &proc.working_directory) { - // Send all entries after since_batch_id immediately - let start_entry = since_batch as usize; - let catch_up_entries = normalized.entries.get(start_entry..).unwrap_or(&[]); - - for (i, entry) in catch_up_entries.iter().enumerate() { - let batch_data = BatchData { - batch_id: since_batch + 1 + i as u64, - patches: vec![serde_json::json!({ - "op": "add", - "path": "/entries/-", - "value": entry - })], - }; - yield Ok(Event::default().event("patch").data(serde_json::to_string(&batch_data).unwrap_or_default())); - } - - // Update cursors to current state - last_entry_count = normalized.entries.len(); - fallback_batch_id = since_batch + 1 + catch_up_entries.len() as u64; - last_len = stdout.len(); - } - } - } - } - } - } - - loop { - interval.tick().await; - - // Check process status first - let process_status = match ExecutionProcess::find_by_id(&app_state.db_pool, process_id).await { - Ok(Some(proc)) => proc.status, - _ => { - tracing::warn!("Execution process {} not found during SSE streaming", process_id); - break; - } - }; - - if is_gemini { - // Gemini streaming: Read from Gemini WAL using cursor - let cursor = if last_seen_batch_id == 0 { None } else { Some(last_seen_batch_id) }; - if let Some(new_batches) = GeminiExecutor::get_wal_batches(process_id, cursor) { - // Send any new batches since last cursor - for batch in &new_batches { - // Send full batch including batch_id for cursor tracking - let batch_data = BatchData { - batch_id: batch.batch_id, - patches: batch.patches.clone(), - }; - let json = serde_json::to_string(&batch_data).unwrap_or_default(); - yield Ok(Event::default().event("patch").data(json)); - // Update cursor to highest batch_id seen - last_seen_batch_id = batch.batch_id.max(last_seen_batch_id); - } - } - } else { - // Fallback: Database polling for non-streaming executors - // 1. Load the process - let proc = match ExecutionProcess::find_by_id(&app_state.db_pool, process_id) - .await - .ok() - .flatten() - { - Some(p) => p, - None => { - tracing::warn!("Execution process {} not found during SSE polling", process_id); - continue; - } - }; - - // 2. Grab the stdout and check if there's new content - let stdout = match proc.stdout { - Some(ref s) if s.len() > last_len && !s[last_len..].trim().is_empty() => s.clone(), - _ => continue, // no new output - }; - - // 3. Instantiate the right executor - let executor = match proc.executor_type - .as_deref() - .unwrap_or("unknown") - .parse::() - .ok() - .map(|cfg| cfg.create_executor()) - { - Some(exec) => exec, - None => { - tracing::warn!( - "Unknown executor '{}' for process {}", - proc.executor_type.unwrap_or_default(), - process_id - ); - continue; - } - }; - - // 4. Normalize logs - let normalized = match executor.normalize_logs(&stdout, &proc.working_directory) { - Ok(norm) => norm, - Err(err) => { - tracing::error!( - "Failed to normalize logs for process {}: {}", - process_id, - err - ); - continue; - } - }; - - if last_entry_count > normalized.entries.len() { - continue; - } - - // 5. Compute patches for any new entries - if last_entry_count >= normalized.entries.len() { - continue; - } - let new_entries = [&normalized.entries[last_entry_count]]; - let patches: Vec = new_entries - .iter() - .map(|entry| serde_json::json!({ - "op": "add", - "path": "/entries/-", - "value": entry - })) - .collect(); - - // 6. Emit the batch - let batch_data = BatchData { - batch_id: fallback_batch_id - 1, - patches, - }; - let json = serde_json::to_string(&batch_data).unwrap_or_default(); - yield Ok(Event::default().event("patch").data(json)); - - // 7. Update our cursors - fallback_batch_id += 1; - last_entry_count += 1; - last_len = stdout.len(); - } - - // Stop streaming when process completed - if process_status != ExecutionProcessStatus::Running { - break; - } - } - }; - - Sse::new(stream).keep_alive(axum::response::sse::KeepAlive::default()) -} - -/// Router exposing `/normalized-logs/stream` -pub fn stream_router() -> Router { - Router::new().route( - "/projects/:project_id/execution-processes/:process_id/normalized-logs/stream", - get(normalized_logs_stream), - ) -} diff --git a/backend/src/routes/task_attempts.rs b/backend/src/routes/task_attempts.rs deleted file mode 100644 index aefb0498..00000000 --- a/backend/src/routes/task_attempts.rs +++ /dev/null @@ -1,1140 +0,0 @@ -use axum::{ - extract::{Query, State}, - http::StatusCode, - middleware::from_fn_with_state, - response::Json as ResponseJson, - routing::get, - Extension, Json, Router, -}; -use serde::{Deserialize, Serialize}; -use sqlx::SqlitePool; -use ts_rs::TS; -use uuid::Uuid; - -use crate::{ - app_state::AppState, - executor::{ - ActionType, ExecutorConfig, NormalizedConversation, NormalizedEntry, NormalizedEntryType, - }, - middleware::{load_execution_process_with_context_middleware, load_task_attempt_middleware}, - models::{ - config::Config, - execution_process::{ - ExecutionProcess, ExecutionProcessStatus, ExecutionProcessSummary, ExecutionProcessType, - }, - project::Project, - task::{Task, TaskStatus}, - task_attempt::{ - BranchStatus, CreateFollowUpAttempt, CreatePrParams, CreateTaskAttempt, TaskAttempt, - TaskAttemptState, WorktreeDiff, - }, - ApiResponse, - }, -}; - -#[derive(Debug, Deserialize, Serialize)] -pub struct RebaseTaskAttemptRequest { - pub new_base_branch: Option, -} - -#[derive(Debug, Deserialize, Serialize)] -pub struct CreateGitHubPRRequest { - pub title: String, - pub body: Option, - pub base_branch: Option, -} - -#[derive(Debug, Serialize)] -pub struct FollowUpResponse { - pub message: String, - pub actual_attempt_id: Uuid, - pub created_new_attempt: bool, -} - -#[derive(Debug, Serialize, TS)] -#[ts(export)] -pub struct ProcessLogsResponse { - pub id: Uuid, - pub process_type: ExecutionProcessType, - pub command: String, - pub executor_type: Option, - pub status: ExecutionProcessStatus, - pub normalized_conversation: NormalizedConversation, -} - -// Helper to normalize logs for a process (extracted from get_execution_process_normalized_logs) -async fn normalize_process_logs( - db_pool: &SqlitePool, - process: &ExecutionProcess, -) -> NormalizedConversation { - use crate::models::{ - execution_process::ExecutionProcessType, executor_session::ExecutorSession, - }; - let executor_session = ExecutorSession::find_by_execution_process_id(db_pool, process.id) - .await - .ok() - .flatten(); - - let has_stdout = process - .stdout - .as_ref() - .map(|s| !s.trim().is_empty()) - .unwrap_or(false); - let has_stderr = process - .stderr - .as_ref() - .map(|s| !s.trim().is_empty()) - .unwrap_or(false); - - if !has_stdout && !has_stderr { - return NormalizedConversation { - entries: vec![], - session_id: None, - executor_type: process - .executor_type - .clone() - .unwrap_or("unknown".to_string()), - prompt: executor_session.as_ref().and_then(|s| s.prompt.clone()), - summary: executor_session.as_ref().and_then(|s| s.summary.clone()), - }; - } - - // Parse stdout as JSONL using executor normalization - let mut stdout_entries = Vec::new(); - if let Some(stdout) = &process.stdout { - if !stdout.trim().is_empty() { - let executor_type = process.executor_type.as_deref().unwrap_or("unknown"); - let executor_config = if process.process_type == ExecutionProcessType::SetupScript { - ExecutorConfig::SetupScript { - script: executor_session - .as_ref() - .and_then(|s| s.prompt.clone()) - .unwrap_or_else(|| "setup script".to_string()), - } - } else { - match executor_type.to_string().parse() { - Ok(config) => config, - Err(_) => { - return NormalizedConversation { - entries: vec![], - session_id: None, - executor_type: executor_type.to_string(), - prompt: executor_session.as_ref().and_then(|s| s.prompt.clone()), - summary: executor_session.as_ref().and_then(|s| s.summary.clone()), - }; - } - } - }; - let executor = executor_config.create_executor(); - let working_dir_path = match std::fs::canonicalize(&process.working_directory) { - Ok(canonical_path) => canonical_path.to_string_lossy().to_string(), - Err(_) => process.working_directory.clone(), - }; - if let Ok(normalized) = executor.normalize_logs(stdout, &working_dir_path) { - stdout_entries = normalized.entries; - } - } - } - // Parse stderr chunks separated by boundary markers - let mut stderr_entries = Vec::new(); - if let Some(stderr) = &process.stderr { - let trimmed = stderr.trim(); - if !trimmed.is_empty() { - let chunks: Vec<&str> = trimmed.split("---STDERR_CHUNK_BOUNDARY---").collect(); - for chunk in chunks { - let chunk_trimmed = chunk.trim(); - if !chunk_trimmed.is_empty() { - let filtered_content = chunk_trimmed.replace("---STDERR_CHUNK_BOUNDARY---", ""); - if !filtered_content.trim().is_empty() { - stderr_entries.push(NormalizedEntry { - timestamp: Some(chrono::Utc::now().to_rfc3339()), - entry_type: NormalizedEntryType::ErrorMessage, - content: filtered_content.trim().to_string(), - metadata: None, - }); - } - } - } - } - } - let mut all_entries = Vec::new(); - all_entries.extend(stdout_entries); - all_entries.extend(stderr_entries); - all_entries.sort_by(|a, b| match (&a.timestamp, &b.timestamp) { - (Some(a_ts), Some(b_ts)) => a_ts.cmp(b_ts), - (Some(_), None) => std::cmp::Ordering::Less, - (None, Some(_)) => std::cmp::Ordering::Greater, - (None, None) => std::cmp::Ordering::Equal, - }); - let executor_type = if process.process_type == ExecutionProcessType::SetupScript { - "setup-script".to_string() - } else { - process - .executor_type - .clone() - .unwrap_or("unknown".to_string()) - }; - NormalizedConversation { - entries: all_entries, - session_id: None, - executor_type, - prompt: executor_session.as_ref().and_then(|s| s.prompt.clone()), - summary: executor_session.as_ref().and_then(|s| s.summary.clone()), - } -} - -/// Get all normalized logs for all execution processes of a task attempt -pub async fn get_task_attempt_all_logs( - Extension(_project): Extension, - Extension(_task): Extension, - Extension(task_attempt): Extension, - State(app_state): State, -) -> Result>>, StatusCode> { - // Fetch all execution processes for this attempt - let processes = match ExecutionProcess::find_by_task_attempt_id( - &app_state.db_pool, - task_attempt.id, - ) - .await - { - Ok(list) => list, - Err(_) => return Err(StatusCode::INTERNAL_SERVER_ERROR), - }; - // For each process, normalize logs - let mut result = Vec::new(); - for process in processes { - let normalized_conversation = normalize_process_logs(&app_state.db_pool, &process).await; - result.push(ProcessLogsResponse { - id: process.id, - process_type: process.process_type.clone(), - command: process.command.clone(), - executor_type: process.executor_type.clone(), - status: process.status.clone(), - normalized_conversation, - }); - } - Ok(Json(ApiResponse::success(result))) -} - -pub async fn get_task_attempts( - Extension(_project): Extension, - Extension(task): Extension, - State(app_state): State, -) -> Result>>, StatusCode> { - match TaskAttempt::find_by_task_id(&app_state.db_pool, task.id).await { - Ok(attempts) => Ok(ResponseJson(ApiResponse::success(attempts))), - Err(e) => { - tracing::error!("Failed to fetch task attempts for task {}: {}", task.id, e); - Err(StatusCode::INTERNAL_SERVER_ERROR) - } - } -} - -pub async fn create_task_attempt( - Extension(_project): Extension, - Extension(task): Extension, - State(app_state): State, - Json(payload): Json, -) -> Result>, StatusCode> { - let executor_string = payload.executor.as_ref().map(|exec| exec.to_string()); - - match TaskAttempt::create(&app_state.db_pool, &payload, task.id).await { - Ok(attempt) => { - app_state - .track_analytics_event( - "task_attempt_started", - Some(serde_json::json!({ - "task_id": task.id.to_string(), - "executor_type": executor_string.as_deref().unwrap_or("default"), - "attempt_id": attempt.id.to_string(), - })), - ) - .await; - - // Start execution asynchronously (don't block the response) - let app_state_clone = app_state.clone(); - let attempt_id = attempt.id; - let task_id = task.id; - let project_id = _project.id; - tokio::spawn(async move { - if let Err(e) = TaskAttempt::start_execution( - &app_state_clone.db_pool, - &app_state_clone, - attempt_id, - task_id, - project_id, - ) - .await - { - tracing::error!( - "Failed to start execution for task attempt {}: {}", - attempt_id, - e - ); - } - }); - - Ok(ResponseJson(ApiResponse::success(attempt))) - } - Err(e) => { - tracing::error!("Failed to create task attempt: {}", e); - Err(StatusCode::INTERNAL_SERVER_ERROR) - } - } -} - -pub async fn get_task_attempt_diff( - Extension(project): Extension, - Extension(task): Extension, - Extension(task_attempt): Extension, - State(app_state): State, -) -> Result>, StatusCode> { - match TaskAttempt::get_diff(&app_state.db_pool, task_attempt.id, task.id, project.id).await { - Ok(diff) => Ok(ResponseJson(ApiResponse::success(diff))), - Err(e) => { - tracing::error!( - "Failed to get diff for task attempt {}: {}", - task_attempt.id, - e - ); - Err(StatusCode::INTERNAL_SERVER_ERROR) - } - } -} - -#[axum::debug_handler] -pub async fn merge_task_attempt( - Extension(project): Extension, - Extension(task): Extension, - Extension(task_attempt): Extension, - State(app_state): State, -) -> Result>, StatusCode> { - match TaskAttempt::merge_changes(&app_state.db_pool, task_attempt.id, task.id, project.id).await - { - Ok(_) => { - // Update task status to Done - if let Err(e) = Task::update_status( - &app_state.db_pool, - task.id, - project.id, - crate::models::task::TaskStatus::Done, - ) - .await - { - tracing::error!("Failed to update task status to Done after merge: {}", e); - return Err(StatusCode::INTERNAL_SERVER_ERROR); - } - - // Track task attempt merged event - app_state - .track_analytics_event( - "task_attempt_merged", - Some(serde_json::json!({ - "task_id": task.id.to_string(), - "project_id": project.id.to_string(), - "attempt_id": task_attempt.id.to_string(), - })), - ) - .await; - - Ok(ResponseJson(ApiResponse::success(()))) - } - Err(e) => { - tracing::error!("Failed to merge task attempt {}: {}", task_attempt.id, e); - Ok(ResponseJson(ApiResponse::error(&format!( - "Failed to merge: {}", - e - )))) - } - } -} - -pub async fn create_github_pr( - Extension(project): Extension, - Extension(task): Extension, - Extension(task_attempt): Extension, - State(app_state): State, - Json(request): Json, -) -> Result>, StatusCode> { - // Load the user's GitHub configuration - let config = match Config::load(&crate::utils::config_path()) { - Ok(config) => config, - Err(e) => { - tracing::error!("Failed to load config: {}", e); - return Err(StatusCode::INTERNAL_SERVER_ERROR); - } - }; - - let github_token = match config.github.token { - Some(token) => token, - None => { - return Ok(ResponseJson(ApiResponse::error( - "GitHub authentication not configured. Please sign in with GitHub.", - ))); - } - }; - - // Get the task attempt to access the stored base branch - let attempt = &task_attempt; - - let base_branch = request.base_branch.unwrap_or_else(|| { - // Use the stored base branch from the task attempt as the default - // Fall back to config default or "main" only if stored base branch is somehow invalid - if !attempt.base_branch.trim().is_empty() { - attempt.base_branch.clone() - } else { - config - .github - .default_pr_base - .unwrap_or_else(|| "main".to_string()) - } - }); - - match TaskAttempt::create_github_pr( - &app_state.db_pool, - CreatePrParams { - attempt_id: task_attempt.id, - task_id: task.id, - project_id: project.id, - github_token: &config.github.pat.unwrap_or(github_token), - title: &request.title, - body: request.body.as_deref(), - base_branch: Some(&base_branch), - }, - ) - .await - { - Ok(pr_url) => { - app_state - .track_analytics_event( - "github_pr_created", - Some(serde_json::json!({ - "task_id": task.id.to_string(), - "project_id": project.id.to_string(), - "attempt_id": task_attempt.id.to_string(), - })), - ) - .await; - - Ok(ResponseJson(ApiResponse::success(pr_url))) - } - Err(e) => { - tracing::error!( - "Failed to create GitHub PR for attempt {}: {}", - task_attempt.id, - e - ); - let message = match &e { - crate::models::task_attempt::TaskAttemptError::GitHubService( - crate::services::GitHubServiceError::TokenInvalid, - ) => Some("github_token_invalid".to_string()), - crate::models::task_attempt::TaskAttemptError::GitService( - crate::services::git_service::GitServiceError::Git(err), - ) if err - .message() - .contains("too many redirects or authentication replays") => - { - Some("insufficient_github_permissions".to_string()) // PAT is invalid - } - crate::models::task_attempt::TaskAttemptError::GitService( - crate::services::git_service::GitServiceError::Git(err), - ) if err.message().contains("status code: 403") => { - Some("insufficient_github_permissions".to_string()) - } - crate::models::task_attempt::TaskAttemptError::GitService( - crate::services::git_service::GitServiceError::Git(err), - ) if err.message().contains("status code: 404") => { - Some("github_repo_not_found_or_no_access".to_string()) - } - _ => Some(format!("Failed to create PR: {}", e)), - }; - Ok(ResponseJson(ApiResponse::error( - message.as_deref().unwrap_or("Unknown error"), - ))) - } - } -} - -#[derive(serde::Deserialize)] -pub struct OpenEditorRequest { - editor_type: Option, -} - -pub async fn open_task_attempt_in_editor( - Extension(_project): Extension, - Extension(_task): Extension, - Extension(task_attempt): Extension, - State(app_state): State, - Json(payload): Json>, -) -> Result>, StatusCode> { - // Get the task attempt to access the worktree path - let attempt = &task_attempt; - - // Get editor command from config or override - let editor_command = { - let config_guard = app_state.get_config().read().await; - if let Some(ref request) = payload { - if let Some(ref editor_type) = request.editor_type { - // Create a temporary editor config with the override - use crate::models::config::{EditorConfig, EditorType}; - let override_editor_type = match editor_type.as_str() { - "vscode" => EditorType::VSCode, - "cursor" => EditorType::Cursor, - "windsurf" => EditorType::Windsurf, - "intellij" => EditorType::IntelliJ, - "zed" => EditorType::Zed, - "custom" => EditorType::Custom, - _ => config_guard.editor.editor_type.clone(), - }; - let temp_config = EditorConfig { - editor_type: override_editor_type, - custom_command: config_guard.editor.custom_command.clone(), - }; - temp_config.get_command() - } else { - config_guard.editor.get_command() - } - } else { - config_guard.editor.get_command() - } - }; - - // Open editor in the worktree directory - let mut cmd = std::process::Command::new(&editor_command[0]); - for arg in &editor_command[1..] { - cmd.arg(arg); - } - cmd.arg(&attempt.worktree_path); - - match cmd.spawn() { - Ok(_) => { - tracing::info!( - "Opened editor ({}) for task attempt {} at path: {}", - editor_command.join(" "), - task_attempt.id, - attempt.worktree_path - ); - Ok(ResponseJson(ApiResponse::success(()))) - } - Err(e) => { - tracing::error!( - "Failed to open editor ({}) for attempt {}: {}", - editor_command.join(" "), - task_attempt.id, - e - ); - Err(StatusCode::INTERNAL_SERVER_ERROR) - } - } -} - -pub async fn get_task_attempt_branch_status( - Extension(project): Extension, - Extension(task): Extension, - Extension(task_attempt): Extension, - State(app_state): State, -) -> Result>, StatusCode> { - match TaskAttempt::get_branch_status(&app_state.db_pool, task_attempt.id, task.id, project.id) - .await - { - Ok(status) => Ok(ResponseJson(ApiResponse::success(status))), - Err(e) => { - tracing::error!( - "Failed to get branch status for task attempt {}: {}", - task_attempt.id, - e - ); - Err(StatusCode::INTERNAL_SERVER_ERROR) - } - } -} - -#[axum::debug_handler] -pub async fn rebase_task_attempt( - Extension(project): Extension, - Extension(task): Extension, - Extension(task_attempt): Extension, - State(app_state): State, - request_body: Option>, -) -> Result>, StatusCode> { - // Extract new base branch from request body if provided - let new_base_branch = request_body.and_then(|body| body.new_base_branch.clone()); - - match TaskAttempt::rebase_attempt( - &app_state.db_pool, - task_attempt.id, - task.id, - project.id, - new_base_branch, - ) - .await - { - Ok(_new_base_commit) => Ok(ResponseJson(ApiResponse::success(()))), - Err(e) => { - tracing::error!("Failed to rebase task attempt {}: {}", task_attempt.id, e); - Ok(ResponseJson(ApiResponse::error(&e.to_string()))) - } - } -} - -pub async fn get_task_attempt_execution_processes( - Extension(_project): Extension, - Extension(_task): Extension, - Extension(task_attempt): Extension, - State(app_state): State, -) -> Result>>, StatusCode> { - match ExecutionProcess::find_summaries_by_task_attempt_id(&app_state.db_pool, task_attempt.id) - .await - { - Ok(processes) => Ok(ResponseJson(ApiResponse::success(processes))), - Err(e) => { - tracing::error!( - "Failed to fetch execution processes for attempt {}: {}", - task_attempt.id, - e - ); - Err(StatusCode::INTERNAL_SERVER_ERROR) - } - } -} - -pub async fn get_execution_process( - Extension(execution_process): Extension, -) -> Result>, StatusCode> { - Ok(ResponseJson(ApiResponse::success(execution_process))) -} - -#[axum::debug_handler] -pub async fn stop_all_execution_processes( - Extension(_project): Extension, - Extension(_task): Extension, - Extension(task_attempt): Extension, - State(app_state): State, -) -> Result>, StatusCode> { - // Get all execution processes for the task attempt - let processes = match ExecutionProcess::find_by_task_attempt_id( - &app_state.db_pool, - task_attempt.id, - ) - .await - { - Ok(processes) => processes, - Err(e) => { - tracing::error!( - "Failed to fetch execution processes for attempt {}: {}", - task_attempt.id, - e - ); - return Err(StatusCode::INTERNAL_SERVER_ERROR); - } - }; - - let mut stopped_count = 0; - let mut errors = Vec::new(); - - // Stop all running processes - for process in processes { - match app_state.stop_running_execution_by_id(process.id).await { - Ok(true) => { - stopped_count += 1; - - // Update the execution process status in the database - if let Err(e) = ExecutionProcess::update_completion( - &app_state.db_pool, - process.id, - crate::models::execution_process::ExecutionProcessStatus::Killed, - None, - ) - .await - { - tracing::error!("Failed to update execution process status: {}", e); - errors.push(format!("Failed to update process {} status", process.id)); - } else { - // Process stopped successfully - } - } - Ok(false) => { - // Process was not running, which is fine - } - Err(e) => { - tracing::error!("Failed to stop execution process {}: {}", process.id, e); - errors.push(format!("Failed to stop process {}: {}", process.id, e)); - } - } - } - - if !errors.is_empty() { - return Ok(ResponseJson(ApiResponse::error(&format!( - "Stopped {} processes, but encountered errors: {}", - stopped_count, - errors.join(", ") - )))); - } - - if stopped_count == 0 { - return Ok(ResponseJson(ApiResponse::success(()))); - } - - Ok(ResponseJson(ApiResponse::success(()))) -} - -#[axum::debug_handler] -pub async fn stop_execution_process( - Extension(_project): Extension, - Extension(_task): Extension, - Extension(_task_attempt): Extension, - Extension(execution_process): Extension, - State(app_state): State, -) -> Result>, StatusCode> { - // Stop the specific execution process - let stopped = match app_state - .stop_running_execution_by_id(execution_process.id) - .await - { - Ok(stopped) => stopped, - Err(e) => { - tracing::error!( - "Failed to stop execution process {}: {}", - execution_process.id, - e - ); - return Err(StatusCode::INTERNAL_SERVER_ERROR); - } - }; - - if !stopped { - return Ok(ResponseJson(ApiResponse::success(()))); - } - - // Update the execution process status in the database - if let Err(e) = ExecutionProcess::update_completion( - &app_state.db_pool, - execution_process.id, - crate::models::execution_process::ExecutionProcessStatus::Killed, - None, - ) - .await - { - tracing::error!("Failed to update execution process status: {}", e); - return Err(StatusCode::INTERNAL_SERVER_ERROR); - } - - // Process stopped successfully - - Ok(ResponseJson(ApiResponse::success(()))) -} - -#[derive(serde::Deserialize)] -pub struct DeleteFileQuery { - file_path: String, -} - -#[axum::debug_handler] -pub async fn delete_task_attempt_file( - Extension(project): Extension, - Extension(task): Extension, - Extension(task_attempt): Extension, - Query(query): Query, - State(app_state): State, -) -> Result>, StatusCode> { - match TaskAttempt::delete_file( - &app_state.db_pool, - task_attempt.id, - task.id, - project.id, - &query.file_path, - ) - .await - { - Ok(_commit_id) => Ok(ResponseJson(ApiResponse::success(()))), - Err(e) => { - tracing::error!( - "Failed to delete file '{}' from task attempt {}: {}", - query.file_path, - task_attempt.id, - e - ); - Ok(ResponseJson(ApiResponse::error(&e.to_string()))) - } - } -} - -pub async fn create_followup_attempt( - Extension(project): Extension, - Extension(task): Extension, - Extension(task_attempt): Extension, - State(app_state): State, - Json(payload): Json, -) -> Result>, StatusCode> { - // Start follow-up execution synchronously to catch errors - match TaskAttempt::start_followup_execution( - &app_state.db_pool, - &app_state, - task_attempt.id, - task.id, - project.id, - &payload.prompt, - ) - .await - { - Ok(actual_attempt_id) => { - let created_new_attempt = actual_attempt_id != task_attempt.id; - let message = if created_new_attempt { - format!( - "Follow-up execution started on new attempt {} (original worktree was deleted)", - actual_attempt_id - ) - } else { - "Follow-up execution started successfully".to_string() - }; - - Ok(ResponseJson(ApiResponse::success(FollowUpResponse { - message: message.clone(), - actual_attempt_id, - created_new_attempt, - }))) - } - Err(e) => { - tracing::error!( - "Failed to start follow-up execution for task attempt {}: {}", - task_attempt.id, - e - ); - Err(StatusCode::INTERNAL_SERVER_ERROR) - } - } -} - -pub async fn start_dev_server( - Extension(project): Extension, - Extension(task): Extension, - Extension(task_attempt): Extension, - State(app_state): State, -) -> Result>, StatusCode> { - // Stop any existing dev servers for this project - let existing_dev_servers = - match ExecutionProcess::find_running_dev_servers_by_project(&app_state.db_pool, project.id) - .await - { - Ok(servers) => servers, - Err(e) => { - tracing::error!( - "Failed to find running dev servers for project {}: {}", - project.id, - e - ); - return Err(StatusCode::INTERNAL_SERVER_ERROR); - } - }; - - for dev_server in existing_dev_servers { - tracing::info!( - "Stopping existing dev server {} for project {}", - dev_server.id, - project.id - ); - - // Stop the running process - if let Err(e) = app_state.stop_running_execution_by_id(dev_server.id).await { - tracing::error!("Failed to stop dev server {}: {}", dev_server.id, e); - } else { - // Update the execution process status in the database - if let Err(e) = ExecutionProcess::update_completion( - &app_state.db_pool, - dev_server.id, - crate::models::execution_process::ExecutionProcessStatus::Killed, - None, - ) - .await - { - tracing::error!( - "Failed to update dev server {} status: {}", - dev_server.id, - e - ); - } - } - } - - // Start dev server execution - match TaskAttempt::start_dev_server( - &app_state.db_pool, - &app_state, - task_attempt.id, - task.id, - project.id, - ) - .await - { - Ok(_) => Ok(ResponseJson(ApiResponse::success(()))), - Err(e) => { - tracing::error!( - "Failed to start dev server for task attempt {}: {}", - task_attempt.id, - e - ); - Ok(ResponseJson(ApiResponse::error(&e.to_string()))) - } - } -} - -pub async fn get_task_attempt_execution_state( - Extension(project): Extension, - Extension(task): Extension, - Extension(task_attempt): Extension, - State(app_state): State, -) -> Result>, StatusCode> { - // Get the execution state - match TaskAttempt::get_execution_state(&app_state.db_pool, task_attempt.id, task.id, project.id) - .await - { - Ok(state) => Ok(ResponseJson(ApiResponse::success(state))), - Err(e) => { - tracing::error!( - "Failed to get execution state for task attempt {}: {}", - task_attempt.id, - e - ); - Err(StatusCode::INTERNAL_SERVER_ERROR) - } - } -} - -/// Find plan content with context by searching through multiple processes in the same attempt -async fn find_plan_content_with_context( - pool: &SqlitePool, - attempt_id: Uuid, -) -> Result { - // Get all execution processes for this attempt - let execution_processes = - match ExecutionProcess::find_by_task_attempt_id(pool, attempt_id).await { - Ok(processes) => processes, - Err(e) => { - tracing::error!( - "Failed to fetch execution processes for attempt {}: {}", - attempt_id, - e - ); - return Err(StatusCode::INTERNAL_SERVER_ERROR); - } - }; - - // Look for claudeplan processes (most recent first) - for claudeplan_process in execution_processes - .iter() - .rev() - .filter(|p| p.executor_type.as_deref() == Some("claude-plan")) - { - if let Some(stdout) = &claudeplan_process.stdout { - if !stdout.trim().is_empty() { - // Create executor and normalize logs - let executor_config = ExecutorConfig::ClaudePlan; - let executor = executor_config.create_executor(); - - // Use working directory for normalization - let working_dir_path = - match std::fs::canonicalize(&claudeplan_process.working_directory) { - Ok(canonical_path) => canonical_path.to_string_lossy().to_string(), - Err(_) => claudeplan_process.working_directory.clone(), - }; - - // Normalize logs and extract plan content - match executor.normalize_logs(stdout, &working_dir_path) { - Ok(normalized_conversation) => { - // Search for plan content in the normalized conversation - if let Some(plan_content) = normalized_conversation - .entries - .iter() - .rev() - .find_map(|entry| { - if let NormalizedEntryType::ToolUse { - action_type: ActionType::PlanPresentation { plan }, - .. - } = &entry.entry_type - { - Some(plan.clone()) - } else { - None - } - }) - { - return Ok(plan_content); - } - } - Err(_) => { - continue; - } - } - } - } - } - - tracing::error!( - "No claudeplan content found in any process in attempt {}", - attempt_id - ); - Err(StatusCode::NOT_FOUND) -} - -pub async fn approve_plan( - Extension(project): Extension, - Extension(task): Extension, - Extension(task_attempt): Extension, - State(app_state): State, -) -> Result>, StatusCode> { - let current_task = &task; - - // Find plan content with context across the task hierarchy - let plan_content = find_plan_content_with_context(&app_state.db_pool, task_attempt.id).await?; - - use crate::models::task::CreateTask; - let new_task_id = Uuid::new_v4(); - let create_task_data = CreateTask { - project_id: project.id, - title: format!("Execute Plan: {}", current_task.title), - description: Some(plan_content), - parent_task_attempt: Some(task_attempt.id), - }; - - let new_task = match Task::create(&app_state.db_pool, &create_task_data, new_task_id).await { - Ok(task) => task, - Err(e) => { - tracing::error!("Failed to create new task: {}", e); - return Err(StatusCode::INTERNAL_SERVER_ERROR); - } - }; - - // Mark original task as completed since it now has children - if let Err(e) = - Task::update_status(&app_state.db_pool, task.id, project.id, TaskStatus::Done).await - { - tracing::error!("Failed to update original task status to Done: {}", e); - return Err(StatusCode::INTERNAL_SERVER_ERROR); - } else { - tracing::info!( - "Original task {} marked as Done after plan approval (has children)", - task.id - ); - } - - Ok(ResponseJson(ApiResponse::success(FollowUpResponse { - message: format!("Plan approved and new task created: {}", new_task.title), - actual_attempt_id: new_task_id, // Return the new task ID - created_new_attempt: true, - }))) -} - -pub async fn get_task_attempt_details( - Extension(task_attempt): Extension, -) -> Result>, StatusCode> { - Ok(ResponseJson(ApiResponse::success(task_attempt))) -} - -pub async fn get_task_attempt_children( - Extension(task_attempt): Extension, - Extension(project): Extension, - State(app_state): State, -) -> Result>>, StatusCode> { - match Task::find_related_tasks_by_attempt_id(&app_state.db_pool, task_attempt.id, project.id) - .await - { - Ok(related_tasks) => Ok(ResponseJson(ApiResponse::success(related_tasks))), - Err(e) => { - tracing::error!( - "Failed to fetch children for task attempt {}: {}", - task_attempt.id, - e - ); - Err(StatusCode::INTERNAL_SERVER_ERROR) - } - } -} - -pub fn task_attempts_list_router(_state: AppState) -> Router { - Router::new().route( - "/projects/:project_id/tasks/:task_id/attempts", - get(get_task_attempts).post(create_task_attempt), - ) -} - -pub fn task_attempts_with_id_router(_state: AppState) -> Router { - use axum::routing::post; - - Router::new() - .route( - "/projects/:project_id/tasks/:task_id/attempts/:attempt_id/diff", - get(get_task_attempt_diff), - ) - .route( - "/projects/:project_id/tasks/:task_id/attempts/:attempt_id/merge", - post(merge_task_attempt), - ) - .route( - "/projects/:project_id/tasks/:task_id/attempts/:attempt_id/branch-status", - get(get_task_attempt_branch_status), - ) - .route( - "/projects/:project_id/tasks/:task_id/attempts/:attempt_id/rebase", - post(rebase_task_attempt), - ) - .route( - "/projects/:project_id/tasks/:task_id/attempts/:attempt_id/open-editor", - post(open_task_attempt_in_editor), - ) - .route( - "/projects/:project_id/tasks/:task_id/attempts/:attempt_id/delete-file", - post(delete_task_attempt_file), - ) - .route( - "/projects/:project_id/tasks/:task_id/attempts/:attempt_id/create-pr", - post(create_github_pr), - ) - .route( - "/projects/:project_id/tasks/:task_id/attempts/:attempt_id/execution-processes", - get(get_task_attempt_execution_processes), - ) - .route( - "/projects/:project_id/tasks/:task_id/attempts/:attempt_id/stop", - post(stop_all_execution_processes), - ) - .merge( - Router::new() - .route( - "/projects/:project_id/tasks/:task_id/attempts/:attempt_id/execution-processes/:process_id/stop", - post(stop_execution_process), - ) - .route_layer(from_fn_with_state(_state.clone(), load_execution_process_with_context_middleware)) - ) - .route( - "/projects/:project_id/tasks/:task_id/attempts/:attempt_id/logs", - get(get_task_attempt_all_logs), - ) - .route( - "/projects/:project_id/tasks/:task_id/attempts/:attempt_id/follow-up", - post(create_followup_attempt), - ) - .route( - "/projects/:project_id/tasks/:task_id/attempts/:attempt_id/start-dev-server", - post(start_dev_server), - ) - .route( - "/projects/:project_id/tasks/:task_id/attempts/:attempt_id", - get(get_task_attempt_execution_state), - ) - .route( - "/projects/:project_id/tasks/:task_id/attempts/:attempt_id/approve-plan", - post(approve_plan), - ) - .route( - "/projects/:project_id/tasks/:task_id/attempts/:attempt_id/children", - get(get_task_attempt_children), - ) - .merge( - Router::new() - .route( - "/attempts/:attempt_id/details", - get(get_task_attempt_details), - ) - .route_layer(from_fn_with_state(_state.clone(), load_task_attempt_middleware)) - ) -} diff --git a/backend/src/routes/task_templates.rs b/backend/src/routes/task_templates.rs deleted file mode 100644 index 97247618..00000000 --- a/backend/src/routes/task_templates.rs +++ /dev/null @@ -1,147 +0,0 @@ -use axum::{ - extract::{Path, State}, - http::StatusCode, - response::IntoResponse, - Extension, Json, -}; -use uuid::Uuid; - -use crate::{ - app_state::AppState, - models::{ - api_response::ApiResponse, - task_template::{CreateTaskTemplate, TaskTemplate, UpdateTaskTemplate}, - }, -}; - -pub async fn list_templates( - State(state): State, -) -> Result>)> { - match TaskTemplate::find_all(&state.db_pool).await { - Ok(templates) => Ok(Json(ApiResponse::success(templates))), - Err(e) => Err(( - StatusCode::INTERNAL_SERVER_ERROR, - Json(ApiResponse::error(&format!( - "Failed to fetch templates: {}", - e - ))), - )), - } -} - -pub async fn list_project_templates( - State(state): State, - Path(project_id): Path, -) -> Result>)> { - match TaskTemplate::find_by_project_id(&state.db_pool, Some(project_id)).await { - Ok(templates) => Ok(Json(ApiResponse::success(templates))), - Err(e) => Err(( - StatusCode::INTERNAL_SERVER_ERROR, - Json(ApiResponse::error(&format!( - "Failed to fetch templates: {}", - e - ))), - )), - } -} - -pub async fn list_global_templates( - State(state): State, -) -> Result>)> { - match TaskTemplate::find_by_project_id(&state.db_pool, None).await { - Ok(templates) => Ok(Json(ApiResponse::success(templates))), - Err(e) => Err(( - StatusCode::INTERNAL_SERVER_ERROR, - Json(ApiResponse::error(&format!( - "Failed to fetch global templates: {}", - e - ))), - )), - } -} - -pub async fn get_template( - Extension(template): Extension, -) -> Result>)> { - Ok(Json(ApiResponse::success(template))) -} - -pub async fn create_template( - State(state): State, - Json(payload): Json, -) -> Result>)> { - match TaskTemplate::create(&state.db_pool, &payload).await { - Ok(template) => Ok((StatusCode::CREATED, Json(ApiResponse::success(template)))), - Err(e) => { - if e.to_string().contains("UNIQUE constraint failed") { - Err(( - StatusCode::CONFLICT, - Json(ApiResponse::error( - "A template with this name already exists in this scope", - )), - )) - } else { - Err(( - StatusCode::INTERNAL_SERVER_ERROR, - Json(ApiResponse::error(&format!( - "Failed to create template: {}", - e - ))), - )) - } - } - } -} - -pub async fn update_template( - Extension(template): Extension, - State(state): State, - Json(payload): Json, -) -> Result>)> { - match TaskTemplate::update(&state.db_pool, template.id, &payload).await { - Ok(template) => Ok(Json(ApiResponse::success(template))), - Err(e) => { - if matches!(e, sqlx::Error::RowNotFound) { - Err(( - StatusCode::NOT_FOUND, - Json(ApiResponse::error("Template not found")), - )) - } else if e.to_string().contains("UNIQUE constraint failed") { - Err(( - StatusCode::CONFLICT, - Json(ApiResponse::error( - "A template with this name already exists in this scope", - )), - )) - } else { - Err(( - StatusCode::INTERNAL_SERVER_ERROR, - Json(ApiResponse::error(&format!( - "Failed to update template: {}", - e - ))), - )) - } - } - } -} - -pub async fn delete_template( - Extension(template): Extension, - State(state): State, -) -> Result>)> { - match TaskTemplate::delete(&state.db_pool, template.id).await { - Ok(0) => Err(( - StatusCode::NOT_FOUND, - Json(ApiResponse::error("Template not found")), - )), - Ok(_) => Ok(Json(ApiResponse::success(()))), - Err(e) => Err(( - StatusCode::INTERNAL_SERVER_ERROR, - Json(ApiResponse::error(&format!( - "Failed to delete template: {}", - e - ))), - )), - } -} diff --git a/backend/src/routes/tasks.rs b/backend/src/routes/tasks.rs deleted file mode 100644 index 4650ff67..00000000 --- a/backend/src/routes/tasks.rs +++ /dev/null @@ -1,277 +0,0 @@ -use axum::{ - extract::State, http::StatusCode, response::Json as ResponseJson, routing::get, Extension, - Json, Router, -}; -use uuid::Uuid; - -use crate::{ - app_state::AppState, - execution_monitor, - models::{ - project::Project, - task::{CreateTask, CreateTaskAndStart, Task, TaskWithAttemptStatus, UpdateTask}, - task_attempt::{CreateTaskAttempt, TaskAttempt}, - ApiResponse, - }, -}; - -pub async fn get_project_tasks( - Extension(project): Extension, - State(app_state): State, -) -> Result>>, StatusCode> { - match Task::find_by_project_id_with_attempt_status(&app_state.db_pool, project.id).await { - Ok(tasks) => Ok(ResponseJson(ApiResponse::success(tasks))), - Err(e) => { - tracing::error!("Failed to fetch tasks for project {}: {}", project.id, e); - Err(StatusCode::INTERNAL_SERVER_ERROR) - } - } -} - -pub async fn get_task( - Extension(task): Extension, -) -> Result>, StatusCode> { - Ok(ResponseJson(ApiResponse::success(task))) -} - -pub async fn create_task( - Extension(project): Extension, - State(app_state): State, - Json(mut payload): Json, -) -> Result>, StatusCode> { - let id = Uuid::new_v4(); - - // Ensure the project_id in the payload matches the project from middleware - payload.project_id = project.id; - - tracing::debug!( - "Creating task '{}' in project {}", - payload.title, - project.id - ); - - match Task::create(&app_state.db_pool, &payload, id).await { - Ok(task) => { - // Track task creation event - app_state - .track_analytics_event( - "task_created", - Some(serde_json::json!({ - "task_id": task.id.to_string(), - "project_id": project.id.to_string(), - "has_description": task.description.is_some(), - })), - ) - .await; - - Ok(ResponseJson(ApiResponse::success(task))) - } - Err(e) => { - tracing::error!("Failed to create task: {}", e); - Err(StatusCode::INTERNAL_SERVER_ERROR) - } - } -} - -pub async fn create_task_and_start( - Extension(project): Extension, - State(app_state): State, - Json(mut payload): Json, -) -> Result>, StatusCode> { - let task_id = Uuid::new_v4(); - - // Ensure the project_id in the payload matches the project from middleware - payload.project_id = project.id; - - tracing::debug!( - "Creating and starting task '{}' in project {}", - payload.title, - project.id - ); - - // Create the task first - let create_task_payload = CreateTask { - project_id: payload.project_id, - title: payload.title.clone(), - description: payload.description.clone(), - parent_task_attempt: payload.parent_task_attempt, - }; - let task = match Task::create(&app_state.db_pool, &create_task_payload, task_id).await { - Ok(task) => task, - Err(e) => { - tracing::error!("Failed to create task: {}", e); - return Err(StatusCode::INTERNAL_SERVER_ERROR); - } - }; - - // Create task attempt - let executor_string = payload.executor.as_ref().map(|exec| exec.to_string()); - let attempt_payload = CreateTaskAttempt { - executor: executor_string.clone(), - base_branch: None, // Not supported in task creation endpoint, only in task attempts - }; - - match TaskAttempt::create(&app_state.db_pool, &attempt_payload, task_id).await { - Ok(attempt) => { - app_state - .track_analytics_event( - "task_created", - Some(serde_json::json!({ - "task_id": task.id.to_string(), - "project_id": project.id.to_string(), - "has_description": task.description.is_some(), - })), - ) - .await; - - app_state - .track_analytics_event( - "task_attempt_started", - Some(serde_json::json!({ - "task_id": task.id.to_string(), - "executor_type": executor_string.as_deref().unwrap_or("default"), - "attempt_id": attempt.id.to_string(), - })), - ) - .await; - - // Start execution asynchronously (don't block the response) - let app_state_clone = app_state.clone(); - let attempt_id = attempt.id; - tokio::spawn(async move { - if let Err(e) = TaskAttempt::start_execution( - &app_state_clone.db_pool, - &app_state_clone, - attempt_id, - task_id, - project.id, - ) - .await - { - tracing::error!( - "Failed to start execution for task attempt {}: {}", - attempt_id, - e - ); - } - }); - - Ok(ResponseJson(ApiResponse::success(task))) - } - Err(e) => { - tracing::error!("Failed to create task attempt: {}", e); - Err(StatusCode::INTERNAL_SERVER_ERROR) - } - } -} - -pub async fn update_task( - Extension(project): Extension, - Extension(existing_task): Extension, - State(app_state): State, - Json(payload): Json, -) -> Result>, StatusCode> { - // Use existing values if not provided in update - let title = payload.title.unwrap_or(existing_task.title); - let description = payload.description.or(existing_task.description); - let status = payload.status.unwrap_or(existing_task.status); - let parent_task_attempt = payload - .parent_task_attempt - .or(existing_task.parent_task_attempt); - - match Task::update( - &app_state.db_pool, - existing_task.id, - project.id, - title, - description, - status, - parent_task_attempt, - ) - .await - { - Ok(task) => Ok(ResponseJson(ApiResponse::success(task))), - Err(e) => { - tracing::error!("Failed to update task: {}", e); - Err(StatusCode::INTERNAL_SERVER_ERROR) - } - } -} - -pub async fn delete_task( - Extension(project): Extension, - Extension(task): Extension, - State(app_state): State, -) -> Result>, StatusCode> { - // Clean up all worktrees for this task before deletion - if let Err(e) = execution_monitor::cleanup_task_worktrees(&app_state.db_pool, task.id).await { - tracing::error!("Failed to cleanup worktrees for task {}: {}", task.id, e); - // Continue with deletion even if cleanup fails - } - - // Clean up all executor sessions for this task before deletion - match TaskAttempt::find_by_task_id(&app_state.db_pool, task.id).await { - Ok(task_attempts) => { - for attempt in task_attempts { - if let Err(e) = - crate::models::executor_session::ExecutorSession::delete_by_task_attempt_id( - &app_state.db_pool, - attempt.id, - ) - .await - { - tracing::error!( - "Failed to cleanup executor sessions for task attempt {}: {}", - attempt.id, - e - ); - // Continue with deletion even if session cleanup fails - } else { - tracing::debug!( - "Cleaned up executor sessions for task attempt {}", - attempt.id - ); - } - } - } - Err(e) => { - tracing::error!("Failed to get task attempts for session cleanup: {}", e); - // Continue with deletion even if we can't get task attempts - } - } - - match Task::delete(&app_state.db_pool, task.id, project.id).await { - Ok(rows_affected) => { - if rows_affected == 0 { - Err(StatusCode::NOT_FOUND) - } else { - Ok(ResponseJson(ApiResponse::success(()))) - } - } - Err(e) => { - tracing::error!("Failed to delete task: {}", e); - Err(StatusCode::INTERNAL_SERVER_ERROR) - } - } -} - -pub fn tasks_project_router() -> Router { - use axum::routing::post; - - Router::new() - .route( - "/projects/:project_id/tasks", - get(get_project_tasks).post(create_task), - ) - .route( - "/projects/:project_id/tasks/create-and-start", - post(create_task_and_start), - ) -} - -pub fn tasks_with_id_router() -> Router { - Router::new().route( - "/projects/:project_id/tasks/:task_id", - get(get_task).put(update_task).delete(delete_task), - ) -} diff --git a/backend/src/services/git_service.rs b/backend/src/services/git_service.rs deleted file mode 100644 index 79ae3f93..00000000 --- a/backend/src/services/git_service.rs +++ /dev/null @@ -1,1373 +0,0 @@ -use std::path::{Path, PathBuf}; - -use git2::{ - build::CheckoutBuilder, BranchType, CherrypickOptions, Cred, DiffOptions, Error as GitError, - FetchOptions, RemoteCallbacks, Repository, WorktreeAddOptions, -}; -use regex; -use tracing::{debug, info}; - -use crate::{ - models::task_attempt::{DiffChunk, DiffChunkType, FileDiff, WorktreeDiff}, - utils::worktree_manager::WorktreeManager, -}; - -#[derive(Debug)] -pub enum GitServiceError { - Git(GitError), - IoError(std::io::Error), - InvalidRepository(String), - BranchNotFound(String), - - MergeConflicts(String), - InvalidPath(String), - WorktreeDirty(String), -} - -impl std::fmt::Display for GitServiceError { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - GitServiceError::Git(e) => write!(f, "Git error: {}", e), - GitServiceError::IoError(e) => write!(f, "IO error: {}", e), - GitServiceError::InvalidRepository(e) => write!(f, "Invalid repository: {}", e), - GitServiceError::BranchNotFound(e) => write!(f, "Branch not found: {}", e), - - GitServiceError::MergeConflicts(e) => write!(f, "Merge conflicts: {}", e), - GitServiceError::InvalidPath(e) => write!(f, "Invalid path: {}", e), - GitServiceError::WorktreeDirty(e) => { - write!(f, "Worktree has uncommitted changes: {}", e) - } - } - } -} - -impl std::error::Error for GitServiceError {} - -impl From for GitServiceError { - fn from(err: GitError) -> Self { - GitServiceError::Git(err) - } -} - -impl From for GitServiceError { - fn from(err: std::io::Error) -> Self { - GitServiceError::IoError(err) - } -} - -/// Service for managing Git operations in task execution workflows -pub struct GitService { - repo_path: PathBuf, -} - -impl GitService { - /// Create a new GitService for the given repository path - pub fn new>(repo_path: P) -> Result { - let repo_path = repo_path.as_ref().to_path_buf(); - - // Validate that the path exists and is a git repository - if !repo_path.exists() { - return Err(GitServiceError::InvalidPath(format!( - "Repository path does not exist: {}", - repo_path.display() - ))); - } - - // Try to open the repository to validate it - Repository::open(&repo_path).map_err(|e| { - GitServiceError::InvalidRepository(format!( - "Failed to open repository at {}: {}", - repo_path.display(), - e - )) - })?; - - Ok(Self { repo_path }) - } - - /// Open the repository - fn open_repo(&self) -> Result { - Repository::open(&self.repo_path).map_err(GitServiceError::from) - } - - /// Create a worktree with a new branch - pub fn create_worktree( - &self, - branch_name: &str, - worktree_path: &Path, - base_branch: Option<&str>, - ) -> Result<(), GitServiceError> { - let repo = self.open_repo()?; - - // Ensure parent directory exists - if let Some(parent) = worktree_path.parent() { - std::fs::create_dir_all(parent)?; - } - - // Choose base reference - let base_reference = if let Some(base_branch) = base_branch { - let branch = repo - .find_branch(base_branch, BranchType::Local) - .map_err(|_| GitServiceError::BranchNotFound(base_branch.to_string()))?; - branch.into_reference() - } else { - // Handle new repositories without any commits - match repo.head() { - Ok(head_ref) => head_ref, - Err(e) - if e.class() == git2::ErrorClass::Reference - && e.code() == git2::ErrorCode::UnbornBranch => - { - // Repository has no commits yet, create an initial commit - self.create_initial_commit(&repo)?; - repo.find_reference("refs/heads/main")? - } - Err(e) => return Err(e.into()), - } - }; - - // Create branch - repo.branch(branch_name, &base_reference.peel_to_commit()?, false)?; - - let branch = repo.find_branch(branch_name, BranchType::Local)?; - let branch_ref = branch.into_reference(); - let mut worktree_opts = WorktreeAddOptions::new(); - worktree_opts.reference(Some(&branch_ref)); - - // Create the worktree at the specified path - repo.worktree(branch_name, worktree_path, Some(&worktree_opts))?; - - // Fix commondir for Windows/WSL compatibility - let worktree_name = worktree_path - .file_name() - .and_then(|n| n.to_str()) - .unwrap_or(branch_name); - if let Err(e) = - WorktreeManager::fix_worktree_commondir_for_windows_wsl(&self.repo_path, worktree_name) - { - tracing::warn!("Failed to fix worktree commondir for Windows/WSL: {}", e); - } - - info!( - "Created worktree '{}' at path: {}", - branch_name, - worktree_path.display() - ); - Ok(()) - } - - /// Create an initial commit for empty repositories - fn create_initial_commit(&self, repo: &Repository) -> Result<(), GitServiceError> { - let signature = repo.signature().unwrap_or_else(|_| { - // Fallback if no Git config is set - git2::Signature::now("Vibe Kanban", "noreply@vibekanban.com") - .expect("Failed to create fallback signature") - }); - - let tree_id = { - let tree_builder = repo.treebuilder(None)?; - tree_builder.write()? - }; - let tree = repo.find_tree(tree_id)?; - - // Create initial commit on main branch - let _commit_id = repo.commit( - Some("refs/heads/main"), - &signature, - &signature, - "Initial commit", - &tree, - &[], - )?; - - // Set HEAD to point to main branch - repo.set_head("refs/heads/main")?; - - info!("Created initial commit for empty repository"); - Ok(()) - } - - /// Merge changes from a worktree branch back to the main repository - pub fn merge_changes( - &self, - worktree_path: &Path, - branch_name: &str, - base_branch_name: &str, - commit_message: &str, - ) -> Result { - // Open the worktree repository - let worktree_repo = Repository::open(worktree_path)?; - - // Check if worktree is dirty before proceeding - self.check_worktree_clean(&worktree_repo)?; - - // Verify the task branch exists in the worktree - let task_branch = worktree_repo - .find_branch(branch_name, BranchType::Local) - .map_err(|_| GitServiceError::BranchNotFound(branch_name.to_string()))?; - - // Get the base branch from the worktree - let base_branch = worktree_repo - .find_branch(base_branch_name, BranchType::Local) - .map_err(|_| GitServiceError::BranchNotFound(base_branch_name.to_string()))?; - - // Get commits - let base_commit = base_branch.get().peel_to_commit()?; - let task_commit = task_branch.get().peel_to_commit()?; - - // Get the signature for the merge commit - let signature = worktree_repo.signature()?; - - // Perform a squash merge - create a single commit with all changes - let squash_commit_id = self.perform_squash_merge( - &worktree_repo, - &base_commit, - &task_commit, - &signature, - commit_message, - base_branch_name, - )?; - - // Fix: Update main repo's HEAD if it's pointing to the base branch - let main_repo = self.open_repo()?; - let refname = format!("refs/heads/{}", base_branch_name); - - if let Ok(main_head) = main_repo.head() { - if let Some(branch_name) = main_head.shorthand() { - if branch_name == base_branch_name { - // Only update main repo's HEAD if it's currently on the base branch - main_repo.set_head(&refname)?; - let mut co = CheckoutBuilder::new(); - co.force(); - main_repo.checkout_head(Some(&mut co))?; - } - } - } - - info!("Created squash merge commit: {}", squash_commit_id); - Ok(squash_commit_id.to_string()) - } - - /// Check if the worktree is clean (no uncommitted changes to tracked files) - fn check_worktree_clean(&self, repo: &Repository) -> Result<(), GitServiceError> { - let mut status_options = git2::StatusOptions::new(); - status_options - .include_untracked(false) // Don't include untracked files - .include_ignored(false); // Don't include ignored files - - let statuses = repo.statuses(Some(&mut status_options))?; - - if !statuses.is_empty() { - let mut dirty_files = Vec::new(); - for entry in statuses.iter() { - let status = entry.status(); - // Only consider files that are actually tracked and modified - if status.intersects( - git2::Status::INDEX_MODIFIED - | git2::Status::INDEX_NEW - | git2::Status::INDEX_DELETED - | git2::Status::INDEX_RENAMED - | git2::Status::INDEX_TYPECHANGE - | git2::Status::WT_MODIFIED - | git2::Status::WT_DELETED - | git2::Status::WT_RENAMED - | git2::Status::WT_TYPECHANGE, - ) { - if let Some(path) = entry.path() { - dirty_files.push(path.to_string()); - } - } - } - - if !dirty_files.is_empty() { - return Err(GitServiceError::WorktreeDirty(dirty_files.join(", "))); - } - } - - Ok(()) - } - - /// Perform a squash merge of task branch into base branch, but fail on conflicts - fn perform_squash_merge( - &self, - repo: &Repository, - base_commit: &git2::Commit, - task_commit: &git2::Commit, - signature: &git2::Signature, - commit_message: &str, - base_branch_name: &str, - ) -> Result { - // Attempt an in-memory merge to detect conflicts - let merge_opts = git2::MergeOptions::new(); - let mut index = repo.merge_commits(base_commit, task_commit, Some(&merge_opts))?; - - // If there are conflicts, return an error - if index.has_conflicts() { - return Err(GitServiceError::MergeConflicts( - "Merge failed due to conflicts. Please resolve conflicts manually.".to_string(), - )); - } - - // Write the merged tree back to the repository - let tree_id = index.write_tree_to(repo)?; - let tree = repo.find_tree(tree_id)?; - - // Create a squash commit: use merged tree with base_commit as sole parent - let squash_commit_id = repo.commit( - None, // Don't update any reference yet - signature, // Author - signature, // Committer - commit_message, // Custom message - &tree, // Merged tree content - &[base_commit], // Single parent: base branch commit - )?; - - // Update the base branch reference to point to the new commit - let refname = format!("refs/heads/{}", base_branch_name); - repo.reference(&refname, squash_commit_id, true, "Squash merge")?; - - Ok(squash_commit_id) - } - - /// Rebase a worktree branch onto a new base - pub fn rebase_branch( - &self, - worktree_path: &Path, - new_base_branch: Option<&str>, - old_base_branch: &str, - ) -> Result { - let worktree_repo = Repository::open(worktree_path)?; - let main_repo = self.open_repo()?; - - // Check if there's an existing rebase in progress and abort it - let state = worktree_repo.state(); - if state == git2::RepositoryState::Rebase - || state == git2::RepositoryState::RebaseInteractive - || state == git2::RepositoryState::RebaseMerge - { - tracing::warn!("Existing rebase in progress, aborting it first"); - // Try to abort the existing rebase - if let Ok(mut existing_rebase) = worktree_repo.open_rebase(None) { - let _ = existing_rebase.abort(); - } - } - - // Get the target base branch reference - let base_branch_name = match new_base_branch { - Some(branch) => branch.to_string(), - None => main_repo - .head() - .ok() - .and_then(|head| head.shorthand().map(|s| s.to_string())) - .unwrap_or_else(|| "main".to_string()), - }; - let base_branch_name = base_branch_name.as_str(); - - // Handle remote branches by fetching them first and creating/updating local tracking branches - let local_branch_name = if base_branch_name.starts_with("origin/") { - // This is a remote branch, fetch it and create/update local tracking branch - let remote_branch_name = base_branch_name.strip_prefix("origin/").unwrap(); - - // First, fetch the latest changes from remote - self.fetch_from_remote(&main_repo)?; - - // Try to find the remote branch after fetch - let remote_branch = main_repo - .find_branch(base_branch_name, BranchType::Remote) - .map_err(|_| GitServiceError::BranchNotFound(base_branch_name.to_string()))?; - - // Check if local tracking branch exists - match main_repo.find_branch(remote_branch_name, BranchType::Local) { - Ok(mut local_branch) => { - // Local tracking branch exists, update it to match remote - let remote_commit = remote_branch.get().peel_to_commit()?; - local_branch - .get_mut() - .set_target(remote_commit.id(), "Update local branch to match remote")?; - } - Err(_) => { - // Local tracking branch doesn't exist, create it - let remote_commit = remote_branch.get().peel_to_commit()?; - main_repo.branch(remote_branch_name, &remote_commit, false)?; - } - } - - // Use the local branch name for rebase - remote_branch_name - } else { - // This is already a local branch - base_branch_name - }; - - // Get the local branch for rebase - let base_branch = main_repo - .find_branch(local_branch_name, BranchType::Local) - .map_err(|_| GitServiceError::BranchNotFound(local_branch_name.to_string()))?; - - let new_base_commit_id = base_branch.get().peel_to_commit()?.id(); - - // Get the HEAD commit of the worktree (the changes to rebase) - let head = worktree_repo.head()?; - let task_branch_commit_id = head.peel_to_commit()?.id(); - - let signature = worktree_repo.signature()?; - - // Find the old base branch - let old_base_branch_ref = if old_base_branch.starts_with("origin/") { - // Remote branch - get local tracking branch name - let remote_branch_name = old_base_branch.strip_prefix("origin/").unwrap(); - main_repo - .find_branch(remote_branch_name, BranchType::Local) - .map_err(|_| GitServiceError::BranchNotFound(remote_branch_name.to_string()))? - } else { - // Local branch - main_repo - .find_branch(old_base_branch, BranchType::Local) - .map_err(|_| GitServiceError::BranchNotFound(old_base_branch.to_string()))? - }; - - let old_base_commit_id = old_base_branch_ref.get().peel_to_commit()?.id(); - - // Find commits unique to the task branch - let unique_commits = Self::find_unique_commits( - &worktree_repo, - task_branch_commit_id, - old_base_commit_id, - new_base_commit_id, - )?; - - if !unique_commits.is_empty() { - // Reset HEAD to the new base branch - let new_base_commit = worktree_repo.find_commit(new_base_commit_id)?; - worktree_repo.reset(new_base_commit.as_object(), git2::ResetType::Hard, None)?; - - // Cherry-pick the unique commits - Self::cherry_pick_commits(&worktree_repo, &unique_commits, &signature)?; - } else { - // No unique commits to rebase, just reset to new base - let new_base_commit = worktree_repo.find_commit(new_base_commit_id)?; - worktree_repo.reset(new_base_commit.as_object(), git2::ResetType::Hard, None)?; - } - - // Get the final commit ID after rebase - let final_head = worktree_repo.head()?; - let final_commit = final_head.peel_to_commit()?; - - info!("Rebase completed. New HEAD: {}", final_commit.id()); - Ok(final_commit.id().to_string()) - } - - /// Get enhanced diff for task attempts (from merge commit or worktree) - pub fn get_enhanced_diff( - &self, - worktree_path: &Path, - merge_commit_id: Option<&str>, - base_branch: &str, - ) -> Result { - let mut files = Vec::new(); - - if let Some(merge_commit_id) = merge_commit_id { - // Task attempt has been merged - show the diff from the merge commit - self.get_merged_diff(merge_commit_id, &mut files)?; - } else { - // Task attempt not yet merged - get worktree diff - self.get_worktree_diff(worktree_path, base_branch, &mut files)?; - } - - Ok(WorktreeDiff { files }) - } - - /// Get diff from a merge commit - fn get_merged_diff( - &self, - merge_commit_id: &str, - files: &mut Vec, - ) -> Result<(), GitServiceError> { - let main_repo = self.open_repo()?; - let merge_commit = main_repo.find_commit(git2::Oid::from_str(merge_commit_id)?)?; - - // A merge commit has multiple parents - first parent is the main branch before merge, - // second parent is the branch that was merged - let parents: Vec<_> = merge_commit.parents().collect(); - - // Create diff options with more context - let mut diff_opts = DiffOptions::new(); - diff_opts.context_lines(10); - diff_opts.interhunk_lines(0); - - let diff = if parents.len() >= 2 { - let base_tree = parents[0].tree()?; - let merged_tree = parents[1].tree()?; - main_repo.diff_tree_to_tree( - Some(&base_tree), - Some(&merged_tree), - Some(&mut diff_opts), - )? - } else { - // Fast-forward merge or single parent - let base_tree = if !parents.is_empty() { - parents[0].tree()? - } else { - main_repo.find_tree(git2::Oid::zero())? - }; - let merged_tree = merge_commit.tree()?; - main_repo.diff_tree_to_tree( - Some(&base_tree), - Some(&merged_tree), - Some(&mut diff_opts), - )? - }; - - // Process each diff delta - diff.foreach( - &mut |delta, _progress| { - if let Some(path_str) = delta.new_file().path().and_then(|p| p.to_str()) { - let old_file = delta.old_file(); - let new_file = delta.new_file(); - - if let Ok(diff_chunks) = - self.generate_git_diff_chunks(&main_repo, &old_file, &new_file, path_str) - { - if !diff_chunks.is_empty() { - files.push(FileDiff { - path: path_str.to_string(), - chunks: diff_chunks, - }); - } else if delta.status() == git2::Delta::Added - || delta.status() == git2::Delta::Deleted - { - files.push(FileDiff { - path: path_str.to_string(), - chunks: vec![DiffChunk { - chunk_type: if delta.status() == git2::Delta::Added { - DiffChunkType::Insert - } else { - DiffChunkType::Delete - }, - content: format!( - "{} file", - if delta.status() == git2::Delta::Added { - "Added" - } else { - "Deleted" - } - ), - }], - }); - } - } - } - true - }, - None, - None, - None, - )?; - - Ok(()) - } - - /// Get diff for a worktree (before merge) - fn get_worktree_diff( - &self, - worktree_path: &Path, - base_branch: &str, - files: &mut Vec, - ) -> Result<(), GitServiceError> { - let worktree_repo = Repository::open(worktree_path)?; - let main_repo = self.open_repo()?; - - // Get the base branch commit - let base_branch_ref = main_repo - .find_branch(base_branch, BranchType::Local) - .map_err(|_| GitServiceError::BranchNotFound(base_branch.to_string()))?; - let base_branch_oid = base_branch_ref.get().peel_to_commit()?.id(); - - // Get the current worktree HEAD commit - let worktree_head = worktree_repo.head()?; - let worktree_head_oid = worktree_head.peel_to_commit()?.id(); - - // Find the merge base (common ancestor) between the base branch and worktree head - let base_oid = worktree_repo.merge_base(base_branch_oid, worktree_head_oid)?; - let base_commit = worktree_repo.find_commit(base_oid)?; - let base_tree = base_commit.tree()?; - - // Get the current tree from the worktree HEAD commit - let current_commit = worktree_repo.find_commit(worktree_head_oid)?; - let current_tree = current_commit.tree()?; - - // Create a diff between the base tree and current tree - let mut diff_opts = DiffOptions::new(); - diff_opts.context_lines(10); - diff_opts.interhunk_lines(0); - - let diff = worktree_repo.diff_tree_to_tree( - Some(&base_tree), - Some(¤t_tree), - Some(&mut diff_opts), - )?; - - // Process committed changes - diff.foreach( - &mut |delta, _progress| { - if let Some(path_str) = delta.new_file().path().and_then(|p| p.to_str()) { - let old_file = delta.old_file(); - let new_file = delta.new_file(); - - if let Ok(diff_chunks) = self.generate_git_diff_chunks( - &worktree_repo, - &old_file, - &new_file, - path_str, - ) { - if !diff_chunks.is_empty() { - files.push(FileDiff { - path: path_str.to_string(), - chunks: diff_chunks, - }); - } else if delta.status() == git2::Delta::Added - || delta.status() == git2::Delta::Deleted - { - files.push(FileDiff { - path: path_str.to_string(), - chunks: vec![DiffChunk { - chunk_type: if delta.status() == git2::Delta::Added { - DiffChunkType::Insert - } else { - DiffChunkType::Delete - }, - content: format!( - "{} file", - if delta.status() == git2::Delta::Added { - "Added" - } else { - "Deleted" - } - ), - }], - }); - } - } - } - true - }, - None, - None, - None, - )?; - - // Also get unstaged changes (working directory changes) - let current_tree = worktree_repo.head()?.peel_to_tree()?; - - let mut unstaged_diff_opts = DiffOptions::new(); - unstaged_diff_opts.context_lines(10); - unstaged_diff_opts.interhunk_lines(0); - unstaged_diff_opts.include_untracked(true); - - let unstaged_diff = worktree_repo - .diff_tree_to_workdir_with_index(Some(¤t_tree), Some(&mut unstaged_diff_opts))?; - - // Process unstaged changes - unstaged_diff.foreach( - &mut |delta, _progress| { - if let Some(path_str) = delta.new_file().path().and_then(|p| p.to_str()) { - if let Err(e) = self.process_unstaged_file( - files, - &worktree_repo, - base_oid, - worktree_path, - path_str, - &delta, - ) { - eprintln!("Error processing unstaged file {}: {:?}", path_str, e); - } - } - true - }, - None, - None, - None, - )?; - - Ok(()) - } - - /// Generate diff chunks using Git's native diff algorithm - fn generate_git_diff_chunks( - &self, - repo: &Repository, - old_file: &git2::DiffFile, - new_file: &git2::DiffFile, - file_path: &str, - ) -> Result, GitServiceError> { - let mut chunks = Vec::new(); - - // Create a patch for the single file using Git's native diff - let old_blob = if !old_file.id().is_zero() { - Some(repo.find_blob(old_file.id())?) - } else { - None - }; - - let new_blob = if !new_file.id().is_zero() { - Some(repo.find_blob(new_file.id())?) - } else { - None - }; - - // Generate patch using Git's diff algorithm - let mut diff_opts = DiffOptions::new(); - diff_opts.context_lines(10); - diff_opts.interhunk_lines(0); - - let patch = match (old_blob.as_ref(), new_blob.as_ref()) { - (Some(old_b), Some(new_b)) => git2::Patch::from_blobs( - old_b, - Some(Path::new(file_path)), - new_b, - Some(Path::new(file_path)), - Some(&mut diff_opts), - )?, - (None, Some(new_b)) => git2::Patch::from_buffers( - &[], - Some(Path::new(file_path)), - new_b.content(), - Some(Path::new(file_path)), - Some(&mut diff_opts), - )?, - (Some(old_b), None) => git2::Patch::from_blob_and_buffer( - old_b, - Some(Path::new(file_path)), - &[], - Some(Path::new(file_path)), - Some(&mut diff_opts), - )?, - (None, None) => { - return Ok(chunks); - } - }; - - // Process the patch hunks - for hunk_idx in 0..patch.num_hunks() { - let (_hunk, hunk_lines) = patch.hunk(hunk_idx)?; - - for line_idx in 0..hunk_lines { - let line = patch.line_in_hunk(hunk_idx, line_idx)?; - let content = String::from_utf8_lossy(line.content()).to_string(); - - let chunk_type = match line.origin() { - ' ' => DiffChunkType::Equal, - '+' => DiffChunkType::Insert, - '-' => DiffChunkType::Delete, - _ => continue, - }; - - chunks.push(DiffChunk { - chunk_type, - content, - }); - } - } - - Ok(chunks) - } - - /// Process unstaged file changes - fn process_unstaged_file( - &self, - files: &mut Vec, - worktree_repo: &Repository, - base_oid: git2::Oid, - worktree_path: &Path, - path_str: &str, - delta: &git2::DiffDelta, - ) -> Result<(), GitServiceError> { - // Check if we already have a diff for this file from committed changes - if let Some(existing_file) = files.iter_mut().find(|f| f.path == path_str) { - // File already has committed changes, create a combined diff - let base_content = self.get_base_file_content(worktree_repo, base_oid, path_str)?; - let working_content = self.get_working_file_content(worktree_path, path_str, delta)?; - - if base_content != working_content { - if let Ok(combined_chunks) = - self.create_combined_diff_chunks(&base_content, &working_content, path_str) - { - existing_file.chunks = combined_chunks; - } - } - } else { - // File only has unstaged changes - let base_content = self.get_base_file_content(worktree_repo, base_oid, path_str)?; - let working_content = self.get_working_file_content(worktree_path, path_str, delta)?; - - if base_content != working_content || delta.status() != git2::Delta::Modified { - if let Ok(chunks) = - self.create_combined_diff_chunks(&base_content, &working_content, path_str) - { - if !chunks.is_empty() { - files.push(FileDiff { - path: path_str.to_string(), - chunks, - }); - } - } else if delta.status() != git2::Delta::Modified { - // Fallback for added/deleted files - files.push(FileDiff { - path: path_str.to_string(), - chunks: vec![DiffChunk { - chunk_type: if delta.status() == git2::Delta::Added { - DiffChunkType::Insert - } else { - DiffChunkType::Delete - }, - content: format!( - "{} file", - if delta.status() == git2::Delta::Added { - "Added" - } else { - "Deleted" - } - ), - }], - }); - } - } - } - - Ok(()) - } - - /// Get the content of a file at the base commit - fn get_base_file_content( - &self, - repo: &Repository, - base_oid: git2::Oid, - path_str: &str, - ) -> Result { - if let Ok(base_commit) = repo.find_commit(base_oid) { - if let Ok(base_tree) = base_commit.tree() { - if let Ok(entry) = base_tree.get_path(Path::new(path_str)) { - if let Ok(blob) = repo.find_blob(entry.id()) { - return Ok(String::from_utf8_lossy(blob.content()).to_string()); - } - } - } - } - Ok(String::new()) - } - - /// Get the content of a file in the working directory - fn get_working_file_content( - &self, - worktree_path: &Path, - path_str: &str, - delta: &git2::DiffDelta, - ) -> Result { - if delta.status() != git2::Delta::Deleted { - let file_path = worktree_path.join(path_str); - std::fs::read_to_string(&file_path).map_err(GitServiceError::from) - } else { - Ok(String::new()) - } - } - - /// Create diff chunks from two text contents - fn create_combined_diff_chunks( - &self, - old_content: &str, - new_content: &str, - path_str: &str, - ) -> Result, GitServiceError> { - let mut diff_opts = DiffOptions::new(); - diff_opts.context_lines(10); - diff_opts.interhunk_lines(0); - - let patch = git2::Patch::from_buffers( - old_content.as_bytes(), - Some(Path::new(path_str)), - new_content.as_bytes(), - Some(Path::new(path_str)), - Some(&mut diff_opts), - )?; - - let mut chunks = Vec::new(); - - for hunk_idx in 0..patch.num_hunks() { - let (_hunk, hunk_lines) = patch.hunk(hunk_idx)?; - - for line_idx in 0..hunk_lines { - let line = patch.line_in_hunk(hunk_idx, line_idx)?; - let content = String::from_utf8_lossy(line.content()).to_string(); - - let chunk_type = match line.origin() { - ' ' => DiffChunkType::Equal, - '+' => DiffChunkType::Insert, - '-' => DiffChunkType::Delete, - _ => continue, - }; - - chunks.push(DiffChunk { - chunk_type, - content, - }); - } - } - - Ok(chunks) - } - - /// Delete a file from the repository and commit the change - pub fn delete_file_and_commit( - &self, - worktree_path: &Path, - file_path: &str, - ) -> Result { - let repo = Repository::open(worktree_path)?; - - // Get the absolute path to the file within the worktree - let file_full_path = worktree_path.join(file_path); - - // Check if file exists and delete it - if file_full_path.exists() { - std::fs::remove_file(&file_full_path).map_err(|e| { - GitServiceError::IoError(std::io::Error::other(format!( - "Failed to delete file {}: {}", - file_path, e - ))) - })?; - - debug!("Deleted file: {}", file_path); - } else { - info!("File {} does not exist, skipping deletion", file_path); - } - - // Stage the deletion - let mut index = repo.index()?; - index.remove_path(Path::new(file_path))?; - index.write()?; - - // Create a commit for the file deletion - let signature = repo.signature()?; - let tree_id = index.write_tree()?; - let tree = repo.find_tree(tree_id)?; - - // Get the current HEAD commit - let head = repo.head()?; - let parent_commit = head.peel_to_commit()?; - - let commit_message = format!("Delete file: {}", file_path); - let commit_id = repo.commit( - Some("HEAD"), - &signature, - &signature, - &commit_message, - &tree, - &[&parent_commit], - )?; - - info!("File {} deleted and committed: {}", file_path, commit_id); - - Ok(commit_id.to_string()) - } - - /// Get the default branch name for the repository - pub fn get_default_branch_name(&self) -> Result { - let repo = self.open_repo()?; - - let result = match repo.head() { - Ok(head_ref) => Ok(head_ref.shorthand().unwrap_or("main").to_string()), - Err(e) - if e.class() == git2::ErrorClass::Reference - && e.code() == git2::ErrorCode::UnbornBranch => - { - Ok("main".to_string()) // Repository has no commits yet - } - Err(_) => Ok("main".to_string()), // Fallback - }; - result - } - - /// Recreate a worktree from an existing branch (for cold task support) - pub async fn recreate_worktree_from_branch( - &self, - branch_name: &str, - stored_worktree_path: &Path, - ) -> Result { - let repo = self.open_repo()?; - - // Verify branch exists before proceeding - let _branch = repo - .find_branch(branch_name, BranchType::Local) - .map_err(|_| GitServiceError::BranchNotFound(branch_name.to_string()))?; - drop(_branch); - - let stored_worktree_path_str = stored_worktree_path.to_string_lossy().to_string(); - - info!( - "Recreating worktree using stored path: {} (branch: {})", - stored_worktree_path_str, branch_name - ); - - // Clean up existing directory if it exists to avoid git sync issues - if stored_worktree_path.exists() { - debug!( - "Removing existing directory before worktree recreation: {}", - stored_worktree_path_str - ); - std::fs::remove_dir_all(stored_worktree_path).map_err(|e| { - GitServiceError::IoError(std::io::Error::other(format!( - "Failed to remove existing worktree directory {}: {}", - stored_worktree_path_str, e - ))) - })?; - } - - // Ensure parent directory exists - critical for session continuity - if let Some(parent) = stored_worktree_path.parent() { - std::fs::create_dir_all(parent).map_err(|e| { - GitServiceError::IoError(std::io::Error::other(format!( - "Failed to create parent directory for worktree path {}: {}", - stored_worktree_path_str, e - ))) - })?; - } - - // Extract repository path for WorktreeManager - let repo_path = repo - .workdir() - .ok_or_else(|| { - GitServiceError::InvalidRepository( - "Repository has no working directory".to_string(), - ) - })? - .to_str() - .ok_or_else(|| { - GitServiceError::InvalidRepository("Repository path is not valid UTF-8".to_string()) - })? - .to_string(); - - WorktreeManager::ensure_worktree_exists( - repo_path, - branch_name.to_string(), - stored_worktree_path.to_path_buf(), - ) - .await - .map_err(|e| { - GitServiceError::IoError(std::io::Error::other(format!( - "WorktreeManager error: {}", - e - ))) - })?; - - info!( - "Successfully recreated worktree at original path: {} -> {}", - branch_name, stored_worktree_path_str - ); - Ok(stored_worktree_path.to_path_buf()) - } - - /// Extract GitHub owner and repo name from git repo path - pub fn get_github_repo_info(&self) -> Result<(String, String), GitServiceError> { - let repo = self.open_repo()?; - let remote = repo.find_remote("origin").map_err(|_| { - GitServiceError::InvalidRepository("No 'origin' remote found".to_string()) - })?; - - let url = remote.url().ok_or_else(|| { - GitServiceError::InvalidRepository("Remote origin has no URL".to_string()) - })?; - - // Parse GitHub URL (supports both HTTPS and SSH formats) - let github_regex = regex::Regex::new(r"github\.com[:/]([^/]+)/(.+?)(?:\.git)?/?$") - .map_err(|e| GitServiceError::InvalidRepository(format!("Regex error: {}", e)))?; - - if let Some(captures) = github_regex.captures(url) { - let owner = captures.get(1).unwrap().as_str().to_string(); - let repo_name = captures.get(2).unwrap().as_str().to_string(); - Ok((owner, repo_name)) - } else { - Err(GitServiceError::InvalidRepository(format!( - "Not a GitHub repository: {}", - url - ))) - } - } - - /// Push the branch to GitHub remote - pub fn push_to_github( - &self, - worktree_path: &Path, - branch_name: &str, - github_token: &str, - ) -> Result<(), GitServiceError> { - let repo = Repository::open(worktree_path)?; - - // Get the remote - let remote = repo.find_remote("origin")?; - let remote_url = remote.url().ok_or_else(|| { - GitServiceError::InvalidRepository("Remote origin has no URL".to_string()) - })?; - - // Convert SSH URL to HTTPS URL if necessary - let https_url = if remote_url.starts_with("git@github.com:") { - // Convert git@github.com:owner/repo.git to https://github.com/owner/repo.git - remote_url.replace("git@github.com:", "https://github.com/") - } else if remote_url.starts_with("ssh://git@github.com/") { - // Convert ssh://git@github.com/owner/repo.git to https://github.com/owner/repo.git - remote_url.replace("ssh://git@github.com/", "https://github.com/") - } else { - remote_url.to_string() - }; - - // Create a temporary remote with HTTPS URL for pushing - let temp_remote_name = "temp_https_origin"; - - // Remove any existing temp remote - let _ = repo.remote_delete(temp_remote_name); - - // Create temporary HTTPS remote - let mut temp_remote = repo.remote(temp_remote_name, &https_url)?; - - // Create refspec for pushing the branch - let refspec = format!("refs/heads/{}:refs/heads/{}", branch_name, branch_name); - - // Set up authentication callback using the GitHub token - let mut callbacks = git2::RemoteCallbacks::new(); - callbacks.credentials(|_url, username_from_url, _allowed_types| { - git2::Cred::userpass_plaintext(username_from_url.unwrap_or("git"), github_token) - }); - - // Configure push options - let mut push_options = git2::PushOptions::new(); - push_options.remote_callbacks(callbacks); - - // Push the branch - let push_result = temp_remote.push(&[&refspec], Some(&mut push_options)); - - // Clean up the temporary remote - let _ = repo.remote_delete(temp_remote_name); - - // Check push result - push_result?; - - info!("Pushed branch {} to GitHub using HTTPS", branch_name); - Ok(()) - } - - /// Fetch from remote repository, with SSH authentication callbacks - fn fetch_from_remote(&self, repo: &Repository) -> Result<(), GitServiceError> { - // Find the “origin” remote - let mut remote = repo.find_remote("origin").map_err(|_| { - GitServiceError::Git(git2::Error::from_str("Remote 'origin' not found")) - })?; - - // Prepare callbacks for authentication - let mut callbacks = RemoteCallbacks::new(); - callbacks.credentials(|_url, username_from_url, _| { - // Try SSH agent first - if let Some(username) = username_from_url { - if let Ok(cred) = Cred::ssh_key_from_agent(username) { - return Ok(cred); - } - } - // Fallback to key file (~/.ssh/id_rsa) - let home = dirs::home_dir() - .ok_or_else(|| git2::Error::from_str("Could not find home directory"))?; - let key_path = home.join(".ssh").join("id_rsa"); - Cred::ssh_key(username_from_url.unwrap_or("git"), None, &key_path, None) - }); - - // Set up fetch options with our callbacks - let mut fetch_opts = FetchOptions::new(); - fetch_opts.remote_callbacks(callbacks); - - // Actually fetch (no specific refspecs = fetch all configured) - remote - .fetch(&[] as &[&str], Some(&mut fetch_opts), None) - .map_err(GitServiceError::Git)?; - Ok(()) - } - - /// Find the merge-base between two commits - fn get_merge_base( - repo: &Repository, - commit1: git2::Oid, - commit2: git2::Oid, - ) -> Result { - repo.merge_base(commit1, commit2) - .map_err(GitServiceError::Git) - } - - /// Find commits that are unique to the task branch (not in either base branch) - fn find_unique_commits( - repo: &Repository, - task_branch_commit: git2::Oid, - old_base_commit: git2::Oid, - new_base_commit: git2::Oid, - ) -> Result, GitServiceError> { - // Find merge-base between task branch and old base branch - let task_old_base_merge_base = - Self::get_merge_base(repo, task_branch_commit, old_base_commit)?; - - // Find merge-base between old base and new base - let old_new_base_merge_base = Self::get_merge_base(repo, old_base_commit, new_base_commit)?; - - // Get all commits from task branch back to the merge-base with old base - let mut walker = repo.revwalk()?; - walker.push(task_branch_commit)?; - walker.hide(task_old_base_merge_base)?; - - let mut task_commits = Vec::new(); - for commit_id in walker { - let commit_id = commit_id?; - - // Check if this commit is not in the old base branch lineage - // (i.e., it's not between old_new_base_merge_base and old_base_commit) - let is_in_old_base = repo - .graph_descendant_of(commit_id, old_new_base_merge_base) - .unwrap_or(false) - && repo - .graph_descendant_of(old_base_commit, commit_id) - .unwrap_or(false); - - if !is_in_old_base { - task_commits.push(commit_id); - } - } - - // Reverse to get chronological order for cherry-picking - task_commits.reverse(); - Ok(task_commits) - } - - /// Cherry-pick specific commits onto a new base - fn cherry_pick_commits( - repo: &Repository, - commits: &[git2::Oid], - signature: &git2::Signature, - ) -> Result<(), GitServiceError> { - for &commit_id in commits { - let commit = repo.find_commit(commit_id)?; - - // Cherry-pick the commit - let mut cherrypick_opts = CherrypickOptions::new(); - repo.cherrypick(&commit, Some(&mut cherrypick_opts))?; - - // Check for conflicts - let mut index = repo.index()?; - if index.has_conflicts() { - return Err(GitServiceError::MergeConflicts(format!( - "Cherry-pick failed due to conflicts on commit {}", - commit_id - ))); - } - - // Commit the cherry-pick - let tree_id = index.write_tree()?; - let tree = repo.find_tree(tree_id)?; - let head_commit = repo.head()?.peel_to_commit()?; - - repo.commit( - Some("HEAD"), - signature, - signature, - commit.message().unwrap_or("Cherry-picked commit"), - &tree, - &[&head_commit], - )?; - } - - Ok(()) - } - - /// Clone a repository to the specified directory - pub fn clone_repository( - clone_url: &str, - target_path: &Path, - token: Option<&str>, - ) -> Result { - if let Some(parent) = target_path.parent() { - std::fs::create_dir_all(parent)?; - } - - // Set up callbacks for authentication if token is provided - let mut callbacks = RemoteCallbacks::new(); - if let Some(token) = token { - callbacks.credentials(|_url, username_from_url, _allowed_types| { - Cred::userpass_plaintext(username_from_url.unwrap_or("git"), token) - }); - } else { - // Fallback to SSH agent and key file authentication - callbacks.credentials(|_url, username_from_url, _| { - // Try SSH agent first - if let Some(username) = username_from_url { - if let Ok(cred) = Cred::ssh_key_from_agent(username) { - return Ok(cred); - } - } - // Fallback to key file (~/.ssh/id_rsa) - let home = dirs::home_dir() - .ok_or_else(|| git2::Error::from_str("Could not find home directory"))?; - let key_path = home.join(".ssh").join("id_rsa"); - Cred::ssh_key(username_from_url.unwrap_or("git"), None, &key_path, None) - }); - } - - // Set up fetch options with our callbacks - let mut fetch_opts = FetchOptions::new(); - fetch_opts.remote_callbacks(callbacks); - - // Create a repository builder with fetch options - let mut builder = git2::build::RepoBuilder::new(); - builder.fetch_options(fetch_opts); - - let repo = builder.clone(clone_url, target_path)?; - - tracing::info!( - "Successfully cloned repository from {} to {}", - clone_url, - target_path.display() - ); - - Ok(repo) - } -} - -#[cfg(test)] -mod tests { - use tempfile::TempDir; - - use super::*; - - fn create_test_repo() -> (TempDir, Repository) { - let temp_dir = TempDir::new().unwrap(); - let repo = Repository::init(temp_dir.path()).unwrap(); - - // Configure the repository - let mut config = repo.config().unwrap(); - config.set_str("user.name", "Test User").unwrap(); - config.set_str("user.email", "test@example.com").unwrap(); - - (temp_dir, repo) - } - - #[test] - fn test_git_service_creation() { - let (temp_dir, _repo) = create_test_repo(); - let _git_service = GitService::new(temp_dir.path()).unwrap(); - } - - #[test] - fn test_invalid_repository_path() { - let result = GitService::new("/nonexistent/path"); - assert!(result.is_err()); - } - - #[test] - fn test_default_branch_name() { - let (temp_dir, _repo) = create_test_repo(); - let git_service = GitService::new(temp_dir.path()).unwrap(); - let branch_name = git_service.get_default_branch_name().unwrap(); - assert_eq!(branch_name, "main"); - } -} diff --git a/backend/src/services/mod.rs b/backend/src/services/mod.rs deleted file mode 100644 index 20a1c8dc..00000000 --- a/backend/src/services/mod.rs +++ /dev/null @@ -1,13 +0,0 @@ -pub mod analytics; -pub mod git_service; -pub mod github_service; -pub mod notification_service; -pub mod pr_monitor; -pub mod process_service; - -pub use analytics::{generate_user_id, AnalyticsConfig, AnalyticsService}; -pub use git_service::{GitService, GitServiceError}; -pub use github_service::{CreatePrRequest, GitHubRepoInfo, GitHubService, GitHubServiceError}; -pub use notification_service::{NotificationConfig, NotificationService}; -pub use pr_monitor::PrMonitorService; -pub use process_service::ProcessService; diff --git a/backend/src/services/pr_monitor.rs b/backend/src/services/pr_monitor.rs deleted file mode 100644 index aa0ab72f..00000000 --- a/backend/src/services/pr_monitor.rs +++ /dev/null @@ -1,214 +0,0 @@ -use std::{sync::Arc, time::Duration}; - -use sqlx::SqlitePool; -use tokio::{sync::RwLock, time::interval}; -use tracing::{debug, error, info, warn}; -use uuid::Uuid; - -use crate::{ - models::{ - config::Config, - task::{Task, TaskStatus}, - task_attempt::TaskAttempt, - }, - services::{GitHubRepoInfo, GitHubService, GitService}, -}; - -/// Service to monitor GitHub PRs and update task status when they are merged -pub struct PrMonitorService { - pool: SqlitePool, - poll_interval: Duration, -} - -#[derive(Debug)] -pub struct PrInfo { - pub attempt_id: Uuid, - pub task_id: Uuid, - pub project_id: Uuid, - pub pr_number: i64, - pub repo_owner: String, - pub repo_name: String, - pub github_token: String, -} - -impl PrMonitorService { - pub fn new(pool: SqlitePool) -> Self { - Self { - pool, - poll_interval: Duration::from_secs(60), // Check every minute - } - } - - /// Start the PR monitoring service with config - pub async fn start_with_config(&self, config: Arc>) { - info!( - "Starting PR monitoring service with interval {:?}", - self.poll_interval - ); - - let mut interval = interval(self.poll_interval); - - loop { - interval.tick().await; - - // Get GitHub token from config - let github_token = { - let config_read = config.read().await; - if config_read.github.pat.is_some() { - config_read.github.pat.clone() - } else { - config_read.github.token.clone() - } - }; - - match github_token { - Some(token) => { - if let Err(e) = self.check_all_open_prs_with_token(&token).await { - error!("Error checking PRs: {}", e); - } - } - None => { - debug!("No GitHub token configured, skipping PR monitoring"); - } - } - } - } - - /// Check all open PRs for updates with the provided GitHub token - async fn check_all_open_prs_with_token( - &self, - github_token: &str, - ) -> Result<(), Box> { - let open_prs = self.get_open_prs_with_token(github_token).await?; - - if open_prs.is_empty() { - debug!("No open PRs to check"); - return Ok(()); - } - - info!("Checking {} open PRs", open_prs.len()); - - for pr_info in open_prs { - if let Err(e) = self.check_pr_status(&pr_info).await { - error!( - "Error checking PR #{} for attempt {}: {}", - pr_info.pr_number, pr_info.attempt_id, e - ); - } - } - - Ok(()) - } - - /// Get all task attempts with open PRs using the provided GitHub token - async fn get_open_prs_with_token( - &self, - github_token: &str, - ) -> Result, sqlx::Error> { - let rows = sqlx::query!( - r#"SELECT - ta.id as "attempt_id!: Uuid", - ta.task_id as "task_id!: Uuid", - ta.pr_number as "pr_number!: i64", - ta.pr_url, - t.project_id as "project_id!: Uuid", - p.git_repo_path - FROM task_attempts ta - JOIN tasks t ON ta.task_id = t.id - JOIN projects p ON t.project_id = p.id - WHERE ta.pr_status = 'open' AND ta.pr_number IS NOT NULL"# - ) - .fetch_all(&self.pool) - .await?; - - let mut pr_infos = Vec::new(); - - for row in rows { - // Get GitHub repo info from local git repository - match GitService::new(&row.git_repo_path) { - Ok(git_service) => match git_service.get_github_repo_info() { - Ok((owner, repo_name)) => { - pr_infos.push(PrInfo { - attempt_id: row.attempt_id, - task_id: row.task_id, - project_id: row.project_id, - pr_number: row.pr_number, - repo_owner: owner, - repo_name, - github_token: github_token.to_string(), - }); - } - Err(e) => { - warn!( - "Could not extract repo info from git path {}: {}", - row.git_repo_path, e - ); - } - }, - Err(e) => { - warn!( - "Could not create git service for path {}: {}", - row.git_repo_path, e - ); - } - } - } - - Ok(pr_infos) - } - - /// Check the status of a specific PR - async fn check_pr_status( - &self, - pr_info: &PrInfo, - ) -> Result<(), Box> { - let github_service = GitHubService::new(&pr_info.github_token)?; - - let repo_info = GitHubRepoInfo { - owner: pr_info.repo_owner.clone(), - repo_name: pr_info.repo_name.clone(), - }; - - let pr_status = github_service - .update_pr_status(&repo_info, pr_info.pr_number) - .await?; - - debug!( - "PR #{} status: {} (was open)", - pr_info.pr_number, pr_status.status - ); - - // Update the PR status in the database - if pr_status.status != "open" { - // Extract merge commit SHA if the PR was merged - let merge_commit_sha = pr_status.merge_commit_sha.as_deref(); - - TaskAttempt::update_pr_status( - &self.pool, - pr_info.attempt_id, - &pr_status.status, - pr_status.merged_at, - merge_commit_sha, - ) - .await?; - - // If the PR was merged, update the task status to done - if pr_status.merged { - info!( - "PR #{} was merged, updating task {} to done", - pr_info.pr_number, pr_info.task_id - ); - - Task::update_status( - &self.pool, - pr_info.task_id, - pr_info.project_id, - TaskStatus::Done, - ) - .await?; - } - } - - Ok(()) - } -} diff --git a/backend/src/services/process_service.rs b/backend/src/services/process_service.rs deleted file mode 100644 index f4d61eb5..00000000 --- a/backend/src/services/process_service.rs +++ /dev/null @@ -1,944 +0,0 @@ -use std::str::FromStr; - -use sqlx::SqlitePool; -use tracing::{debug, info}; -use uuid::Uuid; - -use crate::{ - command_runner, - executor::Executor, - models::{ - execution_process::{CreateExecutionProcess, ExecutionProcess, ExecutionProcessType}, - executor_session::{CreateExecutorSession, ExecutorSession}, - project::Project, - task::Task, - task_attempt::{TaskAttempt, TaskAttemptError}, - }, - utils::shell::get_shell_command, -}; - -/// Service responsible for managing process execution lifecycle -pub struct ProcessService; - -impl ProcessService { - /// Run cleanup script if project has one configured - pub async fn run_cleanup_script_if_configured( - pool: &SqlitePool, - app_state: &crate::app_state::AppState, - attempt_id: Uuid, - task_id: Uuid, - project_id: Uuid, - ) -> Result<(), TaskAttemptError> { - // Get project to check if cleanup script exists - let project = Project::find_by_id(pool, project_id) - .await? - .ok_or(TaskAttemptError::ProjectNotFound)?; - - if Self::should_run_cleanup_script(&project) { - // Get worktree path - let task_attempt = TaskAttempt::find_by_id(pool, attempt_id).await?.ok_or( - TaskAttemptError::ValidationError("Task attempt not found".to_string()), - )?; - - tracing::info!( - "Running cleanup script for project {} in attempt {}", - project_id, - attempt_id - ); - - Self::start_cleanup_script( - pool, - app_state, - attempt_id, - task_id, - &project, - &task_attempt.worktree_path, - ) - .await?; - } else { - tracing::debug!("No cleanup script configured for project {}", project_id); - } - - Ok(()) - } - - /// Automatically run setup if needed, then continue with the specified operation - pub async fn auto_setup_and_execute( - pool: &SqlitePool, - app_state: &crate::app_state::AppState, - attempt_id: Uuid, - task_id: Uuid, - project_id: Uuid, - operation: &str, // "dev_server", "coding_agent", or "followup" - operation_params: Option, - ) -> Result<(), TaskAttemptError> { - // Check if setup is completed for this worktree - let setup_completed = TaskAttempt::is_setup_completed(pool, attempt_id).await?; - - // Get project to check if setup script exists - let project = Project::find_by_id(pool, project_id) - .await? - .ok_or(TaskAttemptError::ProjectNotFound)?; - - let needs_setup = Self::should_run_setup_script(&project) && !setup_completed; - - if needs_setup { - // Run setup with delegation to the original operation - Self::execute_setup_with_delegation( - pool, - app_state, - attempt_id, - task_id, - project_id, - operation, - operation_params, - ) - .await - } else { - // Setup not needed or already completed, continue with original operation - match operation { - "dev_server" => { - Self::start_dev_server_direct(pool, app_state, attempt_id, task_id, project_id) - .await - } - "coding_agent" => { - Self::start_coding_agent(pool, app_state, attempt_id, task_id, project_id).await - } - "followup" => { - let prompt = operation_params - .as_ref() - .and_then(|p| p.get("prompt")) - .and_then(|p| p.as_str()) - .unwrap_or(""); - Self::start_followup_execution_direct( - pool, app_state, attempt_id, task_id, project_id, prompt, - ) - .await - .map(|_| ()) - } - _ => Err(TaskAttemptError::ValidationError(format!( - "Unknown operation: {}", - operation - ))), - } - } - } - - /// Execute setup script with delegation context for continuing after completion - async fn execute_setup_with_delegation( - pool: &SqlitePool, - app_state: &crate::app_state::AppState, - attempt_id: Uuid, - task_id: Uuid, - project_id: Uuid, - delegate_to: &str, - operation_params: Option, - ) -> Result<(), TaskAttemptError> { - let (task_attempt, project) = - Self::load_execution_context(pool, attempt_id, project_id).await?; - - // Create delegation context for execution monitor - let delegation_context = serde_json::json!({ - "delegate_to": delegate_to, - "operation_params": { - "task_id": task_id, - "project_id": project_id, - "attempt_id": attempt_id, - "additional": operation_params - } - }); - - // Create modified setup script execution with delegation context in args - let setup_script = project.setup_script.as_ref().unwrap(); - let process_id = Uuid::new_v4(); - - // Create execution process record with delegation context - let _execution_process = Self::create_execution_process_record_with_delegation( - pool, - attempt_id, - process_id, - setup_script, - &task_attempt.worktree_path, - delegation_context, - ) - .await?; - - // Setup script starting with delegation - - tracing::info!( - "Starting setup script with delegation to {} for task attempt {}", - delegate_to, - attempt_id - ); - - // Execute the setup script - let child = Self::execute_setup_script_process( - setup_script, - pool, - task_id, - attempt_id, - process_id, - &task_attempt.worktree_path, - ) - .await?; - - // Register for monitoring - Self::register_for_monitoring( - app_state, - process_id, - attempt_id, - &ExecutionProcessType::SetupScript, - child, - ) - .await; - - tracing::info!( - "Started setup execution with delegation {} for task attempt {}", - process_id, - attempt_id - ); - Ok(()) - } - - /// Start the execution flow for a task attempt (setup script + executor) - pub async fn start_execution( - pool: &SqlitePool, - app_state: &crate::app_state::AppState, - attempt_id: Uuid, - task_id: Uuid, - project_id: Uuid, - ) -> Result<(), TaskAttemptError> { - use crate::models::task::{Task, TaskStatus}; - - // Load required entities - let (task_attempt, project) = - Self::load_execution_context(pool, attempt_id, project_id).await?; - - // Update task status to indicate execution has started - Task::update_status(pool, task_id, project_id, TaskStatus::InProgress).await?; - - // Determine execution sequence based on project configuration - if Self::should_run_setup_script(&project) { - Self::start_setup_script( - pool, - app_state, - attempt_id, - task_id, - &project, - &task_attempt.worktree_path, - ) - .await - } else { - Self::start_coding_agent(pool, app_state, attempt_id, task_id, project_id).await - } - } - - /// Start the coding agent after setup is complete or if no setup is needed - pub async fn start_coding_agent( - pool: &SqlitePool, - app_state: &crate::app_state::AppState, - attempt_id: Uuid, - task_id: Uuid, - _project_id: Uuid, - ) -> Result<(), TaskAttemptError> { - let task_attempt = TaskAttempt::find_by_id(pool, attempt_id) - .await? - .ok_or(TaskAttemptError::TaskNotFound)?; - - let executor_config = Self::resolve_executor_config(&task_attempt.executor); - - Self::start_process_execution( - pool, - app_state, - attempt_id, - task_id, - crate::executor::ExecutorType::CodingAgent { - config: executor_config, - follow_up: None, - }, - "Starting executor".to_string(), - ExecutionProcessType::CodingAgent, - &task_attempt.worktree_path, - ) - .await - } - - /// Start a dev server for this task attempt (with automatic setup) - pub async fn start_dev_server( - pool: &SqlitePool, - app_state: &crate::app_state::AppState, - attempt_id: Uuid, - task_id: Uuid, - project_id: Uuid, - ) -> Result<(), TaskAttemptError> { - // Ensure worktree exists (recreate if needed for cold task support) - let _worktree_path = - TaskAttempt::ensure_worktree_exists(pool, attempt_id, project_id, "dev server").await?; - - // Use automatic setup logic - Self::auto_setup_and_execute( - pool, - app_state, - attempt_id, - task_id, - project_id, - "dev_server", - None, - ) - .await - } - - /// Start a dev server directly without setup check (internal method) - pub async fn start_dev_server_direct( - pool: &SqlitePool, - app_state: &crate::app_state::AppState, - attempt_id: Uuid, - task_id: Uuid, - project_id: Uuid, - ) -> Result<(), TaskAttemptError> { - // Ensure worktree exists (recreate if needed for cold task support) - let worktree_path = - TaskAttempt::ensure_worktree_exists(pool, attempt_id, project_id, "dev server").await?; - - // Get the project to access the dev_script - let project = Project::find_by_id(pool, project_id) - .await? - .ok_or(TaskAttemptError::TaskNotFound)?; - - let dev_script = project.dev_script.ok_or_else(|| { - TaskAttemptError::ValidationError( - "No dev script configured for this project".to_string(), - ) - })?; - - if dev_script.trim().is_empty() { - return Err(TaskAttemptError::ValidationError( - "Dev script is empty".to_string(), - )); - } - - let result = Self::start_process_execution( - pool, - app_state, - attempt_id, - task_id, - crate::executor::ExecutorType::DevServer(dev_script), - "Starting dev server".to_string(), - ExecutionProcessType::DevServer, - &worktree_path, - ) - .await; - - if result.is_ok() { - app_state - .track_analytics_event( - "dev_server_started", - Some(serde_json::json!({ - "task_id": task_id.to_string(), - "project_id": project_id.to_string(), - "attempt_id": attempt_id.to_string() - })), - ) - .await; - } - - result - } - - /// Start a follow-up execution using the same executor type as the first process (with automatic setup) - /// Returns the attempt_id that was actually used (always the original attempt_id for session continuity) - pub async fn start_followup_execution( - pool: &SqlitePool, - app_state: &crate::app_state::AppState, - attempt_id: Uuid, - task_id: Uuid, - project_id: Uuid, - prompt: &str, - ) -> Result { - use crate::models::task::{Task, TaskStatus}; - - // Get the current task attempt to check if worktree is deleted - let current_attempt = TaskAttempt::find_by_id(pool, attempt_id) - .await? - .ok_or(TaskAttemptError::TaskNotFound)?; - - let actual_attempt_id = attempt_id; - - if current_attempt.worktree_deleted { - info!( - "Resurrecting deleted attempt {} (branch: {}) for followup execution - maintaining session continuity", - attempt_id, current_attempt.branch - ); - } else { - info!( - "Continuing followup execution on active attempt {} (branch: {})", - attempt_id, current_attempt.branch - ); - } - - // Update task status to indicate follow-up execution has started - Task::update_status(pool, task_id, project_id, TaskStatus::InProgress).await?; - - // Ensure worktree exists (recreate if needed for cold task support) - // This will resurrect the worktree at the exact same path for session continuity - let _worktree_path = - TaskAttempt::ensure_worktree_exists(pool, actual_attempt_id, project_id, "followup") - .await?; - - // Use automatic setup logic with followup parameters - let operation_params = serde_json::json!({ - "prompt": prompt - }); - - Self::auto_setup_and_execute( - pool, - app_state, - attempt_id, - task_id, - project_id, - "followup", - Some(operation_params), - ) - .await?; - - Ok(actual_attempt_id) - } - - /// Start a follow-up execution directly without setup check (internal method) - pub async fn start_followup_execution_direct( - pool: &SqlitePool, - app_state: &crate::app_state::AppState, - attempt_id: Uuid, - task_id: Uuid, - project_id: Uuid, - prompt: &str, - ) -> Result { - // Ensure worktree exists (recreate if needed for cold task support) - // This will resurrect the worktree at the exact same path for session continuity - let worktree_path = - TaskAttempt::ensure_worktree_exists(pool, attempt_id, project_id, "followup").await?; - - // Find the most recent coding agent execution process to get the executor type - // Look up processes from the ORIGINAL attempt to find the session - let execution_processes = - ExecutionProcess::find_by_task_attempt_id(pool, attempt_id).await?; - let most_recent_coding_agent = execution_processes - .iter() - .rev() // Reverse to get most recent first (since they're ordered by created_at ASC) - .find(|p| matches!(p.process_type, ExecutionProcessType::CodingAgent)) - .ok_or_else(|| { - tracing::error!( - "No previous coding agent execution found for task attempt {}. Found {} processes: {:?}", - attempt_id, - execution_processes.len(), - execution_processes.iter().map(|p| format!("{:?}", p.process_type)).collect::>() - ); - TaskAttemptError::ValidationError("No previous coding agent execution found for follow-up".to_string()) - })?; - - // Get the executor session to find the session ID - // This looks up the session from the original attempt's processes - let executor_session = - ExecutorSession::find_by_execution_process_id(pool, most_recent_coding_agent.id) - .await? - .ok_or_else(|| { - tracing::error!( - "No executor session found for execution process {} (task attempt {})", - most_recent_coding_agent.id, - attempt_id - ); - TaskAttemptError::ValidationError( - "No executor session found for follow-up".to_string(), - ) - })?; - - let executor_config: crate::executor::ExecutorConfig = match most_recent_coding_agent - .executor_type - .as_deref() - { - Some(executor_str) => executor_str.parse().unwrap(), - _ => { - tracing::error!( - "Invalid or missing executor type '{}' for execution process {} (task attempt {})", - most_recent_coding_agent.executor_type.as_deref().unwrap_or("None"), - most_recent_coding_agent.id, - attempt_id - ); - return Err(TaskAttemptError::ValidationError(format!( - "Invalid executor type for follow-up: {}", - most_recent_coding_agent - .executor_type - .as_deref() - .unwrap_or("None") - ))); - } - }; - - // Try to use follow-up with session ID, but fall back to new session if it fails - let followup_executor = if let Some(session_id) = &executor_session.session_id { - // First try with session ID for continuation - debug!( - "SESSION_FOLLOWUP: Attempting follow-up execution with session ID: {} (attempt: {}, worktree: {})", - session_id, attempt_id, worktree_path - ); - crate::executor::ExecutorType::CodingAgent { - config: executor_config.clone(), - follow_up: Some(crate::executor::FollowUpInfo { - session_id: session_id.clone(), - prompt: prompt.to_string(), - }), - } - } else { - // No session ID available, start new session - tracing::warn!( - "SESSION_FOLLOWUP: No session ID available for follow-up execution on attempt {}, starting new session (worktree: {})", - attempt_id, worktree_path - ); - crate::executor::ExecutorType::CodingAgent { - config: executor_config.clone(), - follow_up: None, - } - }; - - // Try to start the follow-up execution - let execution_result = Self::start_process_execution( - pool, - app_state, - attempt_id, - task_id, - followup_executor, - "Starting follow-up executor".to_string(), - ExecutionProcessType::CodingAgent, - &worktree_path, - ) - .await; - - // If follow-up execution failed and we tried to use a session ID, - // fall back to a new session - if execution_result.is_err() && executor_session.session_id.is_some() { - tracing::warn!( - "SESSION_FOLLOWUP: Follow-up execution with session ID '{}' failed for attempt {}, falling back to new session. Error: {:?}", - executor_session.session_id.as_ref().unwrap(), - attempt_id, - execution_result.as_ref().err() - ); - - // Create a new session instead of trying to resume - let new_session_executor = crate::executor::ExecutorType::CodingAgent { - config: executor_config, - follow_up: None, - }; - - Self::start_process_execution( - pool, - app_state, - attempt_id, - task_id, - new_session_executor, - "Starting new executor session (follow-up session failed)".to_string(), - ExecutionProcessType::CodingAgent, - &worktree_path, - ) - .await?; - } else { - // Either it succeeded or we already tried without session ID - execution_result?; - } - - Ok(attempt_id) - } - - /// Unified function to start any type of process execution - #[allow(clippy::too_many_arguments)] - pub async fn start_process_execution( - pool: &SqlitePool, - app_state: &crate::app_state::AppState, - attempt_id: Uuid, - task_id: Uuid, - executor_type: crate::executor::ExecutorType, - activity_note: String, - process_type: ExecutionProcessType, - worktree_path: &str, - ) -> Result<(), TaskAttemptError> { - let process_id = Uuid::new_v4(); - - // Create execution process record - let _execution_process = Self::create_execution_process_record( - pool, - attempt_id, - process_id, - &executor_type, - process_type.clone(), - worktree_path, - ) - .await?; - - // Create executor session for coding agents - if matches!(process_type, ExecutionProcessType::CodingAgent) { - // Extract follow-up prompt if this is a follow-up execution - let followup_prompt = match &executor_type { - crate::executor::ExecutorType::CodingAgent { - follow_up: Some(ref info), - .. - } => Some(info.prompt.clone()), - _ => None, - }; - Self::create_executor_session_record( - pool, - attempt_id, - task_id, - process_id, - followup_prompt, - ) - .await?; - } - - // Process started successfully - - tracing::info!("Starting {} for task attempt {}", activity_note, attempt_id); - - // Execute the process - let child = Self::execute_process( - &executor_type, - pool, - task_id, - attempt_id, - process_id, - worktree_path, - ) - .await?; - - // Register for monitoring - Self::register_for_monitoring(app_state, process_id, attempt_id, &process_type, child) - .await; - - tracing::info!( - "Started execution {} for task attempt {}", - process_id, - attempt_id - ); - Ok(()) - } - - /// Load the execution context (task attempt and project) with validation - async fn load_execution_context( - pool: &SqlitePool, - attempt_id: Uuid, - project_id: Uuid, - ) -> Result<(TaskAttempt, Project), TaskAttemptError> { - let task_attempt = TaskAttempt::find_by_id(pool, attempt_id) - .await? - .ok_or(TaskAttemptError::TaskNotFound)?; - - let project = Project::find_by_id(pool, project_id) - .await? - .ok_or(TaskAttemptError::ProjectNotFound)?; - - Ok((task_attempt, project)) - } - - /// Check if setup script should be executed - fn should_run_setup_script(project: &Project) -> bool { - project - .setup_script - .as_ref() - .map(|script| !script.trim().is_empty()) - .unwrap_or(false) - } - - fn should_run_cleanup_script(project: &Project) -> bool { - project - .cleanup_script - .as_ref() - .map(|script| !script.trim().is_empty()) - .unwrap_or(false) - } - - /// Start the setup script execution - async fn start_setup_script( - pool: &SqlitePool, - app_state: &crate::app_state::AppState, - attempt_id: Uuid, - task_id: Uuid, - project: &Project, - worktree_path: &str, - ) -> Result<(), TaskAttemptError> { - let setup_script = project.setup_script.as_ref().unwrap(); - - Self::start_process_execution( - pool, - app_state, - attempt_id, - task_id, - crate::executor::ExecutorType::SetupScript(setup_script.clone()), - "Starting setup script".to_string(), - ExecutionProcessType::SetupScript, - worktree_path, - ) - .await - } - - /// Start the cleanup script execution - async fn start_cleanup_script( - pool: &SqlitePool, - app_state: &crate::app_state::AppState, - attempt_id: Uuid, - task_id: Uuid, - project: &Project, - worktree_path: &str, - ) -> Result<(), TaskAttemptError> { - let cleanup_script = project.cleanup_script.as_ref().unwrap(); - - Self::start_process_execution( - pool, - app_state, - attempt_id, - task_id, - crate::executor::ExecutorType::CleanupScript(cleanup_script.clone()), - "Starting cleanup script".to_string(), - ExecutionProcessType::CleanupScript, - worktree_path, - ) - .await - } - - /// Resolve executor configuration from string name - fn resolve_executor_config(executor_name: &Option) -> crate::executor::ExecutorConfig { - if let Some(name) = executor_name { - crate::executor::ExecutorConfig::from_str(name).unwrap_or_else(|_| { - tracing::warn!( - "Unknown executor type '{}', defaulting to EchoExecutor", - name - ); - crate::executor::ExecutorConfig::Echo - }) - } else { - tracing::warn!("No executor type specified, defaulting to EchoExecutor"); - crate::executor::ExecutorConfig::Echo - } - } - - /// Create execution process database record - async fn create_execution_process_record( - pool: &SqlitePool, - attempt_id: Uuid, - process_id: Uuid, - executor_type: &crate::executor::ExecutorType, - process_type: ExecutionProcessType, - worktree_path: &str, - ) -> Result { - let (shell_cmd, shell_arg) = get_shell_command(); - let (command, args, executor_type_string) = match executor_type { - crate::executor::ExecutorType::SetupScript(_) => ( - shell_cmd.to_string(), - Some(serde_json::to_string(&[shell_arg, "setup-script"]).unwrap()), - Some("setup-script".to_string()), - ), - crate::executor::ExecutorType::CleanupScript(_) => ( - shell_cmd.to_string(), - Some(serde_json::to_string(&[shell_arg, "cleanup-script"]).unwrap()), - Some("cleanup-script".to_string()), - ), - crate::executor::ExecutorType::DevServer(_) => ( - shell_cmd.to_string(), - Some(serde_json::to_string(&[shell_arg, "dev_server"]).unwrap()), - None, // Dev servers don't have an executor type - ), - crate::executor::ExecutorType::CodingAgent { config, follow_up } => { - let command = if follow_up.is_some() { - "followup_executor".to_string() - } else { - "executor".to_string() - }; - (command, None, Some(format!("{}", config))) - } - }; - - let create_process = CreateExecutionProcess { - task_attempt_id: attempt_id, - process_type, - executor_type: executor_type_string, - command, - args, - working_directory: worktree_path.to_string(), - }; - - ExecutionProcess::create(pool, &create_process, process_id) - .await - .map_err(TaskAttemptError::from) - } - - /// Create executor session record for coding agents - async fn create_executor_session_record( - pool: &SqlitePool, - attempt_id: Uuid, - task_id: Uuid, - process_id: Uuid, - followup_prompt: Option, - ) -> Result<(), TaskAttemptError> { - // Use follow-up prompt if provided, otherwise get the task to create prompt - let prompt = if let Some(followup_prompt) = followup_prompt { - followup_prompt - } else { - let task = Task::find_by_id(pool, task_id) - .await? - .ok_or(TaskAttemptError::TaskNotFound)?; - format!("{}\n\n{}", task.title, task.description.unwrap_or_default()) - }; - - let session_id = Uuid::new_v4(); - let create_session = CreateExecutorSession { - task_attempt_id: attempt_id, - execution_process_id: process_id, - prompt: Some(prompt), - }; - - ExecutorSession::create(pool, &create_session, session_id) - .await - .map(|_| ()) - .map_err(TaskAttemptError::from) - } - - /// Execute the process based on type - async fn execute_process( - executor_type: &crate::executor::ExecutorType, - pool: &SqlitePool, - task_id: Uuid, - attempt_id: Uuid, - process_id: Uuid, - worktree_path: &str, - ) -> Result { - use crate::executors::{CleanupScriptExecutor, DevServerExecutor, SetupScriptExecutor}; - - let result = match executor_type { - crate::executor::ExecutorType::SetupScript(script) => { - let executor = SetupScriptExecutor { - script: script.clone(), - }; - executor - .execute_streaming(pool, task_id, attempt_id, process_id, worktree_path) - .await - } - crate::executor::ExecutorType::CleanupScript(script) => { - let executor = CleanupScriptExecutor { - script: script.clone(), - }; - executor - .execute_streaming(pool, task_id, attempt_id, process_id, worktree_path) - .await - } - crate::executor::ExecutorType::DevServer(script) => { - let executor = DevServerExecutor { - script: script.clone(), - }; - executor - .execute_streaming(pool, task_id, attempt_id, process_id, worktree_path) - .await - } - crate::executor::ExecutorType::CodingAgent { config, follow_up } => { - let executor = config.create_executor(); - - if let Some(ref follow_up_info) = follow_up { - executor - .execute_followup_streaming( - pool, - task_id, - attempt_id, - process_id, - &follow_up_info.session_id, - &follow_up_info.prompt, - worktree_path, - ) - .await - } else { - executor - .execute_streaming(pool, task_id, attempt_id, process_id, worktree_path) - .await - } - } - }; - - result.map_err(|e| TaskAttemptError::Git(git2::Error::from_str(&e.to_string()))) - } - - /// Register process for monitoring - async fn register_for_monitoring( - app_state: &crate::app_state::AppState, - process_id: Uuid, - attempt_id: Uuid, - process_type: &ExecutionProcessType, - child: command_runner::CommandProcess, - ) { - let execution_type = match process_type { - ExecutionProcessType::SetupScript => crate::app_state::ExecutionType::SetupScript, - ExecutionProcessType::CleanupScript => crate::app_state::ExecutionType::CleanupScript, - ExecutionProcessType::CodingAgent => crate::app_state::ExecutionType::CodingAgent, - ExecutionProcessType::DevServer => crate::app_state::ExecutionType::DevServer, - }; - - app_state - .add_running_execution( - process_id, - crate::app_state::RunningExecution { - task_attempt_id: attempt_id, - _execution_type: execution_type, - child, - }, - ) - .await; - } - - /// Create execution process database record with delegation context - async fn create_execution_process_record_with_delegation( - pool: &SqlitePool, - attempt_id: Uuid, - process_id: Uuid, - _setup_script: &str, - worktree_path: &str, - delegation_context: serde_json::Value, - ) -> Result { - let (shell_cmd, shell_arg) = get_shell_command(); - - // Store delegation context in args for execution monitor to read - let args_with_delegation = serde_json::json!([ - shell_arg, - "setup-script", - "--delegation-context", - delegation_context.to_string() - ]); - - let create_process = CreateExecutionProcess { - task_attempt_id: attempt_id, - process_type: ExecutionProcessType::SetupScript, - executor_type: Some("setup-script".to_string()), - command: shell_cmd.to_string(), - args: Some(args_with_delegation.to_string()), - working_directory: worktree_path.to_string(), - }; - - ExecutionProcess::create(pool, &create_process, process_id) - .await - .map_err(TaskAttemptError::from) - } - - /// Execute setup script process specifically - async fn execute_setup_script_process( - setup_script: &str, - pool: &SqlitePool, - task_id: Uuid, - attempt_id: Uuid, - process_id: Uuid, - worktree_path: &str, - ) -> Result { - use crate::executors::SetupScriptExecutor; - - let executor = SetupScriptExecutor { - script: setup_script.to_string(), - }; - - executor - .execute_streaming(pool, task_id, attempt_id, process_id, worktree_path) - .await - .map_err(|e| TaskAttemptError::Git(git2::Error::from_str(&e.to_string()))) - } -} diff --git a/build-npm-package.sh b/build-npm-package.sh index e63fc892..55c32a51 100755 --- a/build-npm-package.sh +++ b/build-npm-package.sh @@ -10,24 +10,24 @@ echo "🔨 Building frontend..." (cd frontend && npm run build) echo "🔨 Building Rust binaries..." -cargo build --release --manifest-path backend/Cargo.toml -cargo build --release --bin mcp_task_server --manifest-path backend/Cargo.toml +cargo build --release --manifest-path Cargo.toml +# cargo build --release --bin mcp_task_server --manifest-path Cargo.toml echo "📦 Creating distribution package..." # Copy the main binary -cp target/release/vibe-kanban vibe-kanban -cp target/release/mcp_task_server vibe-kanban-mcp +cp target/release/server vibe-kanban +# cp target/release/mcp_task_server vibe-kanban-mcp zip vibe-kanban.zip vibe-kanban -zip vibe-kanban-mcp.zip vibe-kanban-mcp +# zip vibe-kanban-mcp.zip vibe-kanban-mcp -rm vibe-kanban vibe-kanban-mcp +rm vibe-kanban #vibe-kanban-mcp mv vibe-kanban.zip npx-cli/dist/macos-arm64/vibe-kanban.zip -mv vibe-kanban-mcp.zip npx-cli/dist/macos-arm64/vibe-kanban-mcp.zip +# mv vibe-kanban-mcp.zip npx-cli/dist/macos-arm64/vibe-kanban-mcp.zip echo "✅ NPM package ready!" echo "📁 Files created:" echo " - npx-cli/dist/macos-arm64/vibe-kanban.zip" -echo " - npx-cli/dist/macos-arm64/vibe-kanban-mcp.zip" \ No newline at end of file +# echo " - npx-cli/dist/macos-arm64/vibe-kanban-mcp.zip" \ No newline at end of file diff --git a/check-both.sh b/check-both.sh new file mode 100755 index 00000000..87857191 --- /dev/null +++ b/check-both.sh @@ -0,0 +1,23 @@ +#!/usr/bin/env bash +# ─ load up your Rust/Cargo from ~/.cargo/env ─ +if [ -f "$HOME/.cargo/env" ]; then + # this is where `cargo` typically lives + source "$HOME/.cargo/env" +fi + +# now run both checks +cargo check --workspace --message-format=json "$@" +cargo check --workspace --message-format=json --features cloud "$@" + +# Add this to .vscode/settings.json to lint both cloud and non-cloud +# { +# // rust-analyzer will still do its usual code‑lens, inlay, etc. based +# // on whatever "cargo.features" you pick here (can be [] for no-features, +# // or ["foo"] for a specific feature). +# "rust-analyzer.cargo.features": "all", +# // overrideCommand must emit JSON diagnostics. We're just calling our +# // script which in turn calls cargo twice. +# "rust-analyzer.check.overrideCommand": [ +# "${workspaceFolder}/check-both.sh" +# ] +# } \ No newline at end of file diff --git a/backend/.sqlx/query-00aa2d8701f6b1ed2e84ad00b9b6aaf8d3cce788d2494ff283e2fad71df0a05d.json b/crates/db/.sqlx/query-00aa2d8701f6b1ed2e84ad00b9b6aaf8d3cce788d2494ff283e2fad71df0a05d.json similarity index 100% rename from backend/.sqlx/query-00aa2d8701f6b1ed2e84ad00b9b6aaf8d3cce788d2494ff283e2fad71df0a05d.json rename to crates/db/.sqlx/query-00aa2d8701f6b1ed2e84ad00b9b6aaf8d3cce788d2494ff283e2fad71df0a05d.json diff --git a/backend/.sqlx/query-03f2b02ba6dc5ea2b3cf6b1004caea0ad6bcc10ebd63f441d321a389f026e263.json b/crates/db/.sqlx/query-03f2b02ba6dc5ea2b3cf6b1004caea0ad6bcc10ebd63f441d321a389f026e263.json similarity index 100% rename from backend/.sqlx/query-03f2b02ba6dc5ea2b3cf6b1004caea0ad6bcc10ebd63f441d321a389f026e263.json rename to crates/db/.sqlx/query-03f2b02ba6dc5ea2b3cf6b1004caea0ad6bcc10ebd63f441d321a389f026e263.json diff --git a/backend/.sqlx/query-0923b77d137a29fc54d399a873ff15fc4af894490bc65a4d344a7575cb0d8643.json b/crates/db/.sqlx/query-0923b77d137a29fc54d399a873ff15fc4af894490bc65a4d344a7575cb0d8643.json similarity index 100% rename from backend/.sqlx/query-0923b77d137a29fc54d399a873ff15fc4af894490bc65a4d344a7575cb0d8643.json rename to crates/db/.sqlx/query-0923b77d137a29fc54d399a873ff15fc4af894490bc65a4d344a7575cb0d8643.json diff --git a/crates/db/.sqlx/query-0bf539bafb9c27cb352b0e08722c59a1cca3b6073517c982e5c08f62bc3ef4e4.json b/crates/db/.sqlx/query-0bf539bafb9c27cb352b0e08722c59a1cca3b6073517c982e5c08f62bc3ef4e4.json new file mode 100644 index 00000000..1668e749 --- /dev/null +++ b/crates/db/.sqlx/query-0bf539bafb9c27cb352b0e08722c59a1cca3b6073517c982e5c08f62bc3ef4e4.json @@ -0,0 +1,12 @@ +{ + "db_name": "SQLite", + "query": "UPDATE tasks SET status = $2, updated_at = CURRENT_TIMESTAMP WHERE id = $1", + "describe": { + "columns": [], + "parameters": { + "Right": 2 + }, + "nullable": [] + }, + "hash": "0bf539bafb9c27cb352b0e08722c59a1cca3b6073517c982e5c08f62bc3ef4e4" +} diff --git a/backend/.sqlx/query-6e8b860b14decfc2227dc57213f38442943d3fbef5c8418fd6b634c6e0f5e2ea.json b/crates/db/.sqlx/query-1174eecd9f26565a4f4e1e367b5d7c90b4d19b793e496c2e01593f32c5101f24.json similarity index 66% rename from backend/.sqlx/query-6e8b860b14decfc2227dc57213f38442943d3fbef5c8418fd6b634c6e0f5e2ea.json rename to crates/db/.sqlx/query-1174eecd9f26565a4f4e1e367b5d7c90b4d19b793e496c2e01593f32c5101f24.json index 0ce1b7b0..64f8c018 100644 --- a/backend/.sqlx/query-6e8b860b14decfc2227dc57213f38442943d3fbef5c8418fd6b634c6e0f5e2ea.json +++ b/crates/db/.sqlx/query-1174eecd9f26565a4f4e1e367b5d7c90b4d19b793e496c2e01593f32c5101f24.json @@ -1,6 +1,6 @@ { "db_name": "SQLite", - "query": "INSERT INTO task_attempts (id, task_id, worktree_path, branch, base_branch, merge_commit, executor, pr_url, pr_number, pr_status, pr_merged_at, worktree_deleted, setup_completed_at)\n VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13)\n RETURNING id as \"id!: Uuid\", task_id as \"task_id!: Uuid\", worktree_path, branch, base_branch, merge_commit, executor, pr_url, pr_number, pr_status, pr_merged_at as \"pr_merged_at: DateTime\", worktree_deleted as \"worktree_deleted!: bool\", setup_completed_at as \"setup_completed_at: DateTime\", created_at as \"created_at!: DateTime\", updated_at as \"updated_at!: DateTime\"", + "query": "INSERT INTO task_attempts (id, task_id, container_ref, branch, base_branch, merge_commit, base_coding_agent, pr_url, pr_number, pr_status, pr_merged_at, worktree_deleted, setup_completed_at)\n VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13)\n RETURNING id as \"id!: Uuid\", task_id as \"task_id!: Uuid\", container_ref, branch, base_branch, merge_commit, base_coding_agent as \"base_coding_agent!\", pr_url, pr_number, pr_status, pr_merged_at as \"pr_merged_at: DateTime\", worktree_deleted as \"worktree_deleted!: bool\", setup_completed_at as \"setup_completed_at: DateTime\", created_at as \"created_at!: DateTime\", updated_at as \"updated_at!: DateTime\"", "describe": { "columns": [ { @@ -14,7 +14,7 @@ "type_info": "Blob" }, { - "name": "worktree_path", + "name": "container_ref", "ordinal": 2, "type_info": "Text" }, @@ -34,7 +34,7 @@ "type_info": "Text" }, { - "name": "executor", + "name": "base_coding_agent!", "ordinal": 6, "type_info": "Text" }, @@ -85,8 +85,8 @@ "nullable": [ true, false, - false, - false, + true, + true, false, true, true, @@ -100,5 +100,5 @@ false ] }, - "hash": "6e8b860b14decfc2227dc57213f38442943d3fbef5c8418fd6b634c6e0f5e2ea" + "hash": "1174eecd9f26565a4f4e1e367b5d7c90b4d19b793e496c2e01593f32c5101f24" } diff --git a/backend/.sqlx/query-1268afe9ca849daa6722e3df7ca8e9e61f0d37052e782bb5452ab8e1018d9b63.json b/crates/db/.sqlx/query-1268afe9ca849daa6722e3df7ca8e9e61f0d37052e782bb5452ab8e1018d9b63.json similarity index 100% rename from backend/.sqlx/query-1268afe9ca849daa6722e3df7ca8e9e61f0d37052e782bb5452ab8e1018d9b63.json rename to crates/db/.sqlx/query-1268afe9ca849daa6722e3df7ca8e9e61f0d37052e782bb5452ab8e1018d9b63.json diff --git a/crates/db/.sqlx/query-129f898c089030e5ce8c41ff43fd28f213b1c78fc2cf97698da877ff91d6c086.json b/crates/db/.sqlx/query-129f898c089030e5ce8c41ff43fd28f213b1c78fc2cf97698da877ff91d6c086.json new file mode 100644 index 00000000..8c0859b1 --- /dev/null +++ b/crates/db/.sqlx/query-129f898c089030e5ce8c41ff43fd28f213b1c78fc2cf97698da877ff91d6c086.json @@ -0,0 +1,12 @@ +{ + "db_name": "SQLite", + "query": "UPDATE task_attempts SET container_ref = $1, updated_at = $2 WHERE id = $3", + "describe": { + "columns": [], + "parameters": { + "Right": 3 + }, + "nullable": [] + }, + "hash": "129f898c089030e5ce8c41ff43fd28f213b1c78fc2cf97698da877ff91d6c086" +} diff --git a/backend/.sqlx/query-1b082630a9622f8667ee7a9aba2c2d3176019a68c6bb83d33008594821415a57.json b/crates/db/.sqlx/query-1b082630a9622f8667ee7a9aba2c2d3176019a68c6bb83d33008594821415a57.json similarity index 100% rename from backend/.sqlx/query-1b082630a9622f8667ee7a9aba2c2d3176019a68c6bb83d33008594821415a57.json rename to crates/db/.sqlx/query-1b082630a9622f8667ee7a9aba2c2d3176019a68c6bb83d33008594821415a57.json diff --git a/crates/db/.sqlx/query-1e339e959f8d2cdac13b3e2b452d2f718c0fd6cf6202d5c9139fb1afda123d29.json b/crates/db/.sqlx/query-1e339e959f8d2cdac13b3e2b452d2f718c0fd6cf6202d5c9139fb1afda123d29.json new file mode 100644 index 00000000..af14b65d --- /dev/null +++ b/crates/db/.sqlx/query-1e339e959f8d2cdac13b3e2b452d2f718c0fd6cf6202d5c9139fb1afda123d29.json @@ -0,0 +1,12 @@ +{ + "db_name": "SQLite", + "query": "DELETE FROM tasks WHERE id = $1", + "describe": { + "columns": [], + "parameters": { + "Right": 1 + }, + "nullable": [] + }, + "hash": "1e339e959f8d2cdac13b3e2b452d2f718c0fd6cf6202d5c9139fb1afda123d29" +} diff --git a/backend/.sqlx/query-a9e93d5b09b29faf66e387e4d7596a792d81e75c4d3726e83c2963e8d7c9b56f.json b/crates/db/.sqlx/query-1f1850b240af8edf2a05ad4a250c78331f69f3637f4b8a554898b9e6ba5bba37.json similarity index 66% rename from backend/.sqlx/query-a9e93d5b09b29faf66e387e4d7596a792d81e75c4d3726e83c2963e8d7c9b56f.json rename to crates/db/.sqlx/query-1f1850b240af8edf2a05ad4a250c78331f69f3637f4b8a554898b9e6ba5bba37.json index 8b27451c..da50ab4f 100644 --- a/backend/.sqlx/query-a9e93d5b09b29faf66e387e4d7596a792d81e75c4d3726e83c2963e8d7c9b56f.json +++ b/crates/db/.sqlx/query-1f1850b240af8edf2a05ad4a250c78331f69f3637f4b8a554898b9e6ba5bba37.json @@ -1,6 +1,6 @@ { "db_name": "SQLite", - "query": "SELECT id AS \"id!: Uuid\",\n task_id AS \"task_id!: Uuid\",\n worktree_path,\n branch,\n base_branch,\n merge_commit,\n executor,\n pr_url,\n pr_number,\n pr_status,\n pr_merged_at AS \"pr_merged_at: DateTime\",\n worktree_deleted AS \"worktree_deleted!: bool\",\n setup_completed_at AS \"setup_completed_at: DateTime\",\n created_at AS \"created_at!: DateTime\",\n updated_at AS \"updated_at!: DateTime\"\n FROM task_attempts\n WHERE task_id = $1\n ORDER BY created_at DESC", + "query": "SELECT id AS \"id!: Uuid\",\n task_id AS \"task_id!: Uuid\",\n container_ref,\n branch,\n merge_commit,\n base_branch,\n base_coding_agent AS \"base_coding_agent!\",\n pr_url,\n pr_number,\n pr_status,\n pr_merged_at AS \"pr_merged_at: DateTime\",\n worktree_deleted AS \"worktree_deleted!: bool\",\n setup_completed_at AS \"setup_completed_at: DateTime\",\n created_at AS \"created_at!: DateTime\",\n updated_at AS \"updated_at!: DateTime\"\n FROM task_attempts\n WHERE rowid = $1", "describe": { "columns": [ { @@ -14,7 +14,7 @@ "type_info": "Blob" }, { - "name": "worktree_path", + "name": "container_ref", "ordinal": 2, "type_info": "Text" }, @@ -24,17 +24,17 @@ "type_info": "Text" }, { - "name": "base_branch", + "name": "merge_commit", "ordinal": 4, "type_info": "Text" }, { - "name": "merge_commit", + "name": "base_branch", "ordinal": 5, "type_info": "Text" }, { - "name": "executor", + "name": "base_coding_agent!", "ordinal": 6, "type_info": "Text" }, @@ -85,12 +85,12 @@ "nullable": [ true, false, - false, - false, - false, true, true, true, + false, + true, + true, true, true, true, @@ -100,5 +100,5 @@ false ] }, - "hash": "a9e93d5b09b29faf66e387e4d7596a792d81e75c4d3726e83c2963e8d7c9b56f" + "hash": "1f1850b240af8edf2a05ad4a250c78331f69f3637f4b8a554898b9e6ba5bba37" } diff --git a/backend/.sqlx/query-4049ca413b285a05aca6b25385e9c8185575f01e9069e4e8581aa45d713f612f.json b/crates/db/.sqlx/query-216193a63f7b0fb788566b63f56d83ee3d344a5c85e1a5999247b6a44f3ae390.json similarity index 78% rename from backend/.sqlx/query-4049ca413b285a05aca6b25385e9c8185575f01e9069e4e8581aa45d713f612f.json rename to crates/db/.sqlx/query-216193a63f7b0fb788566b63f56d83ee3d344a5c85e1a5999247b6a44f3ae390.json index 648e8544..f014637f 100644 --- a/backend/.sqlx/query-4049ca413b285a05aca6b25385e9c8185575f01e9069e4e8581aa45d713f612f.json +++ b/crates/db/.sqlx/query-216193a63f7b0fb788566b63f56d83ee3d344a5c85e1a5999247b6a44f3ae390.json @@ -1,6 +1,6 @@ { "db_name": "SQLite", - "query": "\n SELECT ta.id as \"attempt_id!: Uuid\", ta.worktree_path, p.git_repo_path as \"git_repo_path!\"\n FROM task_attempts ta\n JOIN tasks t ON ta.task_id = t.id\n JOIN projects p ON t.project_id = p.id\n WHERE ta.task_id = $1\n ", + "query": "\n SELECT ta.id as \"attempt_id!: Uuid\", ta.container_ref, p.git_repo_path as \"git_repo_path!\"\n FROM task_attempts ta\n JOIN tasks t ON ta.task_id = t.id\n JOIN projects p ON t.project_id = p.id\n WHERE ta.task_id = $1\n ", "describe": { "columns": [ { @@ -9,7 +9,7 @@ "type_info": "Blob" }, { - "name": "worktree_path", + "name": "container_ref", "ordinal": 1, "type_info": "Text" }, @@ -24,9 +24,9 @@ }, "nullable": [ true, - false, + true, false ] }, - "hash": "4049ca413b285a05aca6b25385e9c8185575f01e9069e4e8581aa45d713f612f" + "hash": "216193a63f7b0fb788566b63f56d83ee3d344a5c85e1a5999247b6a44f3ae390" } diff --git a/backend/.sqlx/query-216efabcdaa2a6ea166e4468a6ac66d3298666a546e964a509538731ece90c9e.json b/crates/db/.sqlx/query-216efabcdaa2a6ea166e4468a6ac66d3298666a546e964a509538731ece90c9e.json similarity index 100% rename from backend/.sqlx/query-216efabcdaa2a6ea166e4468a6ac66d3298666a546e964a509538731ece90c9e.json rename to crates/db/.sqlx/query-216efabcdaa2a6ea166e4468a6ac66d3298666a546e964a509538731ece90c9e.json diff --git a/backend/.sqlx/query-2188432c66e9010684b6bb670d19abd77695b05d1dd84ef3102930bc0fe6404f.json b/crates/db/.sqlx/query-2188432c66e9010684b6bb670d19abd77695b05d1dd84ef3102930bc0fe6404f.json similarity index 100% rename from backend/.sqlx/query-2188432c66e9010684b6bb670d19abd77695b05d1dd84ef3102930bc0fe6404f.json rename to crates/db/.sqlx/query-2188432c66e9010684b6bb670d19abd77695b05d1dd84ef3102930bc0fe6404f.json diff --git a/backend/.sqlx/query-290ce5c152be8d36e58ff42570f9157beb07ab9e77a03ec6fc30b4f56f9b8f6b.json b/crates/db/.sqlx/query-290ce5c152be8d36e58ff42570f9157beb07ab9e77a03ec6fc30b4f56f9b8f6b.json similarity index 100% rename from backend/.sqlx/query-290ce5c152be8d36e58ff42570f9157beb07ab9e77a03ec6fc30b4f56f9b8f6b.json rename to crates/db/.sqlx/query-290ce5c152be8d36e58ff42570f9157beb07ab9e77a03ec6fc30b4f56f9b8f6b.json diff --git a/crates/db/.sqlx/query-2ec7648202fc6f496b97d9486cf9fd3c59fdba73c168628784f0a09488b80528.json b/crates/db/.sqlx/query-2ec7648202fc6f496b97d9486cf9fd3c59fdba73c168628784f0a09488b80528.json new file mode 100644 index 00000000..4f038c2e --- /dev/null +++ b/crates/db/.sqlx/query-2ec7648202fc6f496b97d9486cf9fd3c59fdba73c168628784f0a09488b80528.json @@ -0,0 +1,38 @@ +{ + "db_name": "SQLite", + "query": "SELECT \n execution_id as \"execution_id!: Uuid\",\n logs,\n byte_size,\n inserted_at as \"inserted_at!: DateTime\"\n FROM execution_process_logs \n WHERE execution_id = $1", + "describe": { + "columns": [ + { + "name": "execution_id!: Uuid", + "ordinal": 0, + "type_info": "Blob" + }, + { + "name": "logs", + "ordinal": 1, + "type_info": "Text" + }, + { + "name": "byte_size", + "ordinal": 2, + "type_info": "Integer" + }, + { + "name": "inserted_at!: DateTime", + "ordinal": 3, + "type_info": "Text" + } + ], + "parameters": { + "Right": 1 + }, + "nullable": [ + true, + false, + false, + false + ] + }, + "hash": "2ec7648202fc6f496b97d9486cf9fd3c59fdba73c168628784f0a09488b80528" +} diff --git a/backend/.sqlx/query-36e4ba7bbd81b402d5a20b6005755eafbb174c8dda442081823406ac32809a94.json b/crates/db/.sqlx/query-36e4ba7bbd81b402d5a20b6005755eafbb174c8dda442081823406ac32809a94.json similarity index 100% rename from backend/.sqlx/query-36e4ba7bbd81b402d5a20b6005755eafbb174c8dda442081823406ac32809a94.json rename to crates/db/.sqlx/query-36e4ba7bbd81b402d5a20b6005755eafbb174c8dda442081823406ac32809a94.json diff --git a/backend/.sqlx/query-3a5b3c98a55ca183ab20c74708e3d7e579dda37972c059e7515c4ceee4bd8dd3.json b/crates/db/.sqlx/query-3a5b3c98a55ca183ab20c74708e3d7e579dda37972c059e7515c4ceee4bd8dd3.json similarity index 100% rename from backend/.sqlx/query-3a5b3c98a55ca183ab20c74708e3d7e579dda37972c059e7515c4ceee4bd8dd3.json rename to crates/db/.sqlx/query-3a5b3c98a55ca183ab20c74708e3d7e579dda37972c059e7515c4ceee4bd8dd3.json diff --git a/crates/db/.sqlx/query-3baa595eadaa8c720da7c185c5fce08f973355fd7809e2caaf966d207bcb7b4b.json b/crates/db/.sqlx/query-3baa595eadaa8c720da7c185c5fce08f973355fd7809e2caaf966d207bcb7b4b.json new file mode 100644 index 00000000..e493c8d2 --- /dev/null +++ b/crates/db/.sqlx/query-3baa595eadaa8c720da7c185c5fce08f973355fd7809e2caaf966d207bcb7b4b.json @@ -0,0 +1,74 @@ +{ + "db_name": "SQLite", + "query": "SELECT \n ep.id as \"id!: Uuid\", \n ep.task_attempt_id as \"task_attempt_id!: Uuid\", \n ep.run_reason as \"run_reason!: ExecutionProcessRunReason\",\n ep.executor_action as \"executor_action!: sqlx::types::Json\",\n ep.status as \"status!: ExecutionProcessStatus\",\n ep.exit_code,\n ep.started_at as \"started_at!: DateTime\",\n ep.completed_at as \"completed_at?: DateTime\",\n ep.created_at as \"created_at!: DateTime\", \n ep.updated_at as \"updated_at!: DateTime\"\n FROM execution_processes ep\n JOIN task_attempts ta ON ep.task_attempt_id = ta.id\n JOIN tasks t ON ta.task_id = t.id\n WHERE ep.status = 'running' \n AND ep.run_reason = 'devserver'\n AND t.project_id = $1\n ORDER BY ep.created_at ASC", + "describe": { + "columns": [ + { + "name": "id!: Uuid", + "ordinal": 0, + "type_info": "Blob" + }, + { + "name": "task_attempt_id!: Uuid", + "ordinal": 1, + "type_info": "Blob" + }, + { + "name": "run_reason!: ExecutionProcessRunReason", + "ordinal": 2, + "type_info": "Text" + }, + { + "name": "executor_action!: sqlx::types::Json", + "ordinal": 3, + "type_info": "Text" + }, + { + "name": "status!: ExecutionProcessStatus", + "ordinal": 4, + "type_info": "Text" + }, + { + "name": "exit_code", + "ordinal": 5, + "type_info": "Integer" + }, + { + "name": "started_at!: DateTime", + "ordinal": 6, + "type_info": "Text" + }, + { + "name": "completed_at?: DateTime", + "ordinal": 7, + "type_info": "Text" + }, + { + "name": "created_at!: DateTime", + "ordinal": 8, + "type_info": "Text" + }, + { + "name": "updated_at!: DateTime", + "ordinal": 9, + "type_info": "Text" + } + ], + "parameters": { + "Right": 1 + }, + "nullable": [ + true, + false, + false, + false, + false, + true, + false, + true, + false, + false + ] + }, + "hash": "3baa595eadaa8c720da7c185c5fce08f973355fd7809e2caaf966d207bcb7b4b" +} diff --git a/backend/.sqlx/query-3d0a1cabf2a52e9d90cdfd29c509ca89aeb448d0c1d2446c65cd43db40735e86.json b/crates/db/.sqlx/query-3d0a1cabf2a52e9d90cdfd29c509ca89aeb448d0c1d2446c65cd43db40735e86.json similarity index 100% rename from backend/.sqlx/query-3d0a1cabf2a52e9d90cdfd29c509ca89aeb448d0c1d2446c65cd43db40735e86.json rename to crates/db/.sqlx/query-3d0a1cabf2a52e9d90cdfd29c509ca89aeb448d0c1d2446c65cd43db40735e86.json diff --git a/backend/.sqlx/query-3d6bd16fbce59efe30b7f67ea342e0e4ea6d1432389c02468ad79f1f742d4031.json b/crates/db/.sqlx/query-3d6bd16fbce59efe30b7f67ea342e0e4ea6d1432389c02468ad79f1f742d4031.json similarity index 100% rename from backend/.sqlx/query-3d6bd16fbce59efe30b7f67ea342e0e4ea6d1432389c02468ad79f1f742d4031.json rename to crates/db/.sqlx/query-3d6bd16fbce59efe30b7f67ea342e0e4ea6d1432389c02468ad79f1f742d4031.json diff --git a/backend/.sqlx/query-417a8b1ff4e51de82aea0159a3b97932224dc325b23476cb84153d690227fd8b.json b/crates/db/.sqlx/query-417a8b1ff4e51de82aea0159a3b97932224dc325b23476cb84153d690227fd8b.json similarity index 100% rename from backend/.sqlx/query-417a8b1ff4e51de82aea0159a3b97932224dc325b23476cb84153d690227fd8b.json rename to crates/db/.sqlx/query-417a8b1ff4e51de82aea0159a3b97932224dc325b23476cb84153d690227fd8b.json diff --git a/backend/.sqlx/query-461cc1b0bb6fd909afc9dd2246e8526b3771cfbb0b22ae4b5d17b51af587b9e2.json b/crates/db/.sqlx/query-461cc1b0bb6fd909afc9dd2246e8526b3771cfbb0b22ae4b5d17b51af587b9e2.json similarity index 100% rename from backend/.sqlx/query-461cc1b0bb6fd909afc9dd2246e8526b3771cfbb0b22ae4b5d17b51af587b9e2.json rename to crates/db/.sqlx/query-461cc1b0bb6fd909afc9dd2246e8526b3771cfbb0b22ae4b5d17b51af587b9e2.json diff --git a/crates/db/.sqlx/query-4a52af0e7eedb3662a05b23e9a0c74c08d6c255ef598bb8ec3ff9a67f2344ab1.json b/crates/db/.sqlx/query-4a52af0e7eedb3662a05b23e9a0c74c08d6c255ef598bb8ec3ff9a67f2344ab1.json new file mode 100644 index 00000000..9bdd2e18 --- /dev/null +++ b/crates/db/.sqlx/query-4a52af0e7eedb3662a05b23e9a0c74c08d6c255ef598bb8ec3ff9a67f2344ab1.json @@ -0,0 +1,12 @@ +{ + "db_name": "SQLite", + "query": "UPDATE executor_sessions \n SET summary = $1, updated_at = $2 \n WHERE execution_process_id = $3", + "describe": { + "columns": [], + "parameters": { + "Right": 3 + }, + "nullable": [] + }, + "hash": "4a52af0e7eedb3662a05b23e9a0c74c08d6c255ef598bb8ec3ff9a67f2344ab1" +} diff --git a/crates/db/.sqlx/query-56238751ac9cab8bd97ad787143d91f54c47089c8e732ef80c3d1e85dfba1430.json b/crates/db/.sqlx/query-56238751ac9cab8bd97ad787143d91f54c47089c8e732ef80c3d1e85dfba1430.json new file mode 100644 index 00000000..0c235f5c --- /dev/null +++ b/crates/db/.sqlx/query-56238751ac9cab8bd97ad787143d91f54c47089c8e732ef80c3d1e85dfba1430.json @@ -0,0 +1,12 @@ +{ + "db_name": "SQLite", + "query": "INSERT INTO execution_process_logs (execution_id, logs, byte_size, inserted_at)\n VALUES ($1, $2, $3, datetime('now', 'subsec'))\n ON CONFLICT (execution_id) DO UPDATE\n SET logs = logs || $2,\n byte_size = byte_size + $3,\n inserted_at = datetime('now', 'subsec')", + "describe": { + "columns": [], + "parameters": { + "Right": 3 + }, + "nullable": [] + }, + "hash": "56238751ac9cab8bd97ad787143d91f54c47089c8e732ef80c3d1e85dfba1430" +} diff --git a/backend/.sqlx/query-5a886026d75d515c01f347cc203c8d99dd04c61dc468e2e4c5aa548436d13834.json b/crates/db/.sqlx/query-5a886026d75d515c01f347cc203c8d99dd04c61dc468e2e4c5aa548436d13834.json similarity index 100% rename from backend/.sqlx/query-5a886026d75d515c01f347cc203c8d99dd04c61dc468e2e4c5aa548436d13834.json rename to crates/db/.sqlx/query-5a886026d75d515c01f347cc203c8d99dd04c61dc468e2e4c5aa548436d13834.json diff --git a/backend/.sqlx/query-5ae4dea70309b2aa40d41412f70b200038176dc8c56c49eeaaa65763a1b276eb.json b/crates/db/.sqlx/query-5ae4dea70309b2aa40d41412f70b200038176dc8c56c49eeaaa65763a1b276eb.json similarity index 100% rename from backend/.sqlx/query-5ae4dea70309b2aa40d41412f70b200038176dc8c56c49eeaaa65763a1b276eb.json rename to crates/db/.sqlx/query-5ae4dea70309b2aa40d41412f70b200038176dc8c56c49eeaaa65763a1b276eb.json diff --git a/backend/.sqlx/query-93a1605f90e9672dad29b472b6ad85fa9a55ea3ffa5abcb8724b09d61be254ca.json b/crates/db/.sqlx/query-62836ddbbe22ea720063ac2b8d3f5efa39bf018b01b7a1f5ff6eefc9e4c55445.json similarity index 51% rename from backend/.sqlx/query-93a1605f90e9672dad29b472b6ad85fa9a55ea3ffa5abcb8724b09d61be254ca.json rename to crates/db/.sqlx/query-62836ddbbe22ea720063ac2b8d3f5efa39bf018b01b7a1f5ff6eefc9e4c55445.json index fe9ab658..861c331b 100644 --- a/backend/.sqlx/query-93a1605f90e9672dad29b472b6ad85fa9a55ea3ffa5abcb8724b09d61be254ca.json +++ b/crates/db/.sqlx/query-62836ddbbe22ea720063ac2b8d3f5efa39bf018b01b7a1f5ff6eefc9e4c55445.json @@ -1,10 +1,10 @@ { "db_name": "SQLite", - "query": "SELECT COUNT(*) as count FROM task_attempts WHERE worktree_path = ?", + "query": "SELECT EXISTS(SELECT 1 FROM task_attempts WHERE container_ref = ?) as \"exists!: bool\"", "describe": { "columns": [ { - "name": "count", + "name": "exists!: bool", "ordinal": 0, "type_info": "Integer" } @@ -16,5 +16,5 @@ false ] }, - "hash": "93a1605f90e9672dad29b472b6ad85fa9a55ea3ffa5abcb8724b09d61be254ca" + "hash": "62836ddbbe22ea720063ac2b8d3f5efa39bf018b01b7a1f5ff6eefc9e4c55445" } diff --git a/backend/.sqlx/query-6ecfa16d0cf825aacf233544b5baf151e9adfdca26c226ad71020d291fd802d5.json b/crates/db/.sqlx/query-6ecfa16d0cf825aacf233544b5baf151e9adfdca26c226ad71020d291fd802d5.json similarity index 100% rename from backend/.sqlx/query-6ecfa16d0cf825aacf233544b5baf151e9adfdca26c226ad71020d291fd802d5.json rename to crates/db/.sqlx/query-6ecfa16d0cf825aacf233544b5baf151e9adfdca26c226ad71020d291fd802d5.json diff --git a/backend/.sqlx/query-72509d252c39fce77520aa816cb2acbc1fb35dc2605e7be893610599b2427f2e.json b/crates/db/.sqlx/query-72509d252c39fce77520aa816cb2acbc1fb35dc2605e7be893610599b2427f2e.json similarity index 100% rename from backend/.sqlx/query-72509d252c39fce77520aa816cb2acbc1fb35dc2605e7be893610599b2427f2e.json rename to crates/db/.sqlx/query-72509d252c39fce77520aa816cb2acbc1fb35dc2605e7be893610599b2427f2e.json diff --git a/backend/.sqlx/query-75239b2da188f749707d77f3c1544332ca70db3d6d6743b2601dc0d167536437.json b/crates/db/.sqlx/query-75239b2da188f749707d77f3c1544332ca70db3d6d6743b2601dc0d167536437.json similarity index 100% rename from backend/.sqlx/query-75239b2da188f749707d77f3c1544332ca70db3d6d6743b2601dc0d167536437.json rename to crates/db/.sqlx/query-75239b2da188f749707d77f3c1544332ca70db3d6d6743b2601dc0d167536437.json diff --git a/backend/.sqlx/query-c67259be8bf4ee0cfd32167b2aa3b7fe9192809181a8171bf1c2d6df731967ae.json b/crates/db/.sqlx/query-7e657b504fb7d8935fcb944f8f4646635f14e6ed9ff77d1c2225ce82e40fa03d.json similarity index 51% rename from backend/.sqlx/query-c67259be8bf4ee0cfd32167b2aa3b7fe9192809181a8171bf1c2d6df731967ae.json rename to crates/db/.sqlx/query-7e657b504fb7d8935fcb944f8f4646635f14e6ed9ff77d1c2225ce82e40fa03d.json index d4c4941e..26c4ad90 100644 --- a/backend/.sqlx/query-c67259be8bf4ee0cfd32167b2aa3b7fe9192809181a8171bf1c2d6df731967ae.json +++ b/crates/db/.sqlx/query-7e657b504fb7d8935fcb944f8f4646635f14e6ed9ff77d1c2225ce82e40fa03d.json @@ -1,6 +1,6 @@ { "db_name": "SQLite", - "query": "UPDATE execution_processes \n SET status = $1, exit_code = $2, completed_at = $3, updated_at = datetime('now') \n WHERE id = $4", + "query": "UPDATE execution_processes \n SET status = $1, exit_code = $2, completed_at = $3\n WHERE id = $4", "describe": { "columns": [], "parameters": { @@ -8,5 +8,5 @@ }, "nullable": [] }, - "hash": "c67259be8bf4ee0cfd32167b2aa3b7fe9192809181a8171bf1c2d6df731967ae" + "hash": "7e657b504fb7d8935fcb944f8f4646635f14e6ed9ff77d1c2225ce82e40fa03d" } diff --git a/backend/.sqlx/query-8aba98bb4d1701d1686d68371bca4edb4ba7f8b70693f86fc83860f8adda9065.json b/crates/db/.sqlx/query-834bc0957cd530e4396b61311c27165b482838ff32a13c0da66b4160e170466b.json similarity index 63% rename from backend/.sqlx/query-8aba98bb4d1701d1686d68371bca4edb4ba7f8b70693f86fc83860f8adda9065.json rename to crates/db/.sqlx/query-834bc0957cd530e4396b61311c27165b482838ff32a13c0da66b4160e170466b.json index e36fffa6..91b1fb5f 100644 --- a/backend/.sqlx/query-8aba98bb4d1701d1686d68371bca4edb4ba7f8b70693f86fc83860f8adda9065.json +++ b/crates/db/.sqlx/query-834bc0957cd530e4396b61311c27165b482838ff32a13c0da66b4160e170466b.json @@ -1,6 +1,6 @@ { "db_name": "SQLite", - "query": "SELECT DISTINCT t.id as \"id!: Uuid\", t.project_id as \"project_id!: Uuid\", t.title, t.description, t.status as \"status!: TaskStatus\", t.parent_task_attempt as \"parent_task_attempt: Uuid\", t.created_at as \"created_at!: DateTime\", t.updated_at as \"updated_at!: DateTime\"\n FROM tasks t\n WHERE (\n -- Find children: tasks that have this attempt as parent\n t.parent_task_attempt = $1 AND t.project_id = $2\n ) OR (\n -- Find parent: task that owns the parent attempt of current task\n EXISTS (\n SELECT 1 FROM tasks current_task \n JOIN task_attempts parent_attempt ON current_task.parent_task_attempt = parent_attempt.id\n WHERE parent_attempt.task_id = t.id \n AND parent_attempt.id = $1 \n AND current_task.project_id = $2\n )\n )\n -- Exclude the current task itself to prevent circular references\n AND t.id != (SELECT task_id FROM task_attempts WHERE id = $1)\n ORDER BY t.created_at DESC", + "query": "SELECT DISTINCT t.id as \"id!: Uuid\", t.project_id as \"project_id!: Uuid\", t.title, t.description, t.status as \"status!: TaskStatus\", t.parent_task_attempt as \"parent_task_attempt: Uuid\", t.created_at as \"created_at!: DateTime\", t.updated_at as \"updated_at!: DateTime\"\n FROM tasks t\n WHERE (\n -- Find children: tasks that have this attempt as parent\n t.parent_task_attempt = $1\n ) OR (\n -- Find parent: task that owns the parent attempt of current task\n EXISTS (\n SELECT 1 FROM tasks current_task \n JOIN task_attempts parent_attempt ON current_task.parent_task_attempt = parent_attempt.id\n WHERE parent_attempt.task_id = t.id \n )\n )\n -- Exclude the current task itself to prevent circular references\n AND t.id != (SELECT task_id FROM task_attempts WHERE id = $1)\n ORDER BY t.created_at DESC", "describe": { "columns": [ { @@ -45,7 +45,7 @@ } ], "parameters": { - "Right": 2 + "Right": 1 }, "nullable": [ true, @@ -58,5 +58,5 @@ false ] }, - "hash": "8aba98bb4d1701d1686d68371bca4edb4ba7f8b70693f86fc83860f8adda9065" + "hash": "834bc0957cd530e4396b61311c27165b482838ff32a13c0da66b4160e170466b" } diff --git a/backend/.sqlx/query-86d03eb70eef39c59296416867f2ee66c9f7cd8b7f961fbda2f89fc0a1c442c2.json b/crates/db/.sqlx/query-86d03eb70eef39c59296416867f2ee66c9f7cd8b7f961fbda2f89fc0a1c442c2.json similarity index 100% rename from backend/.sqlx/query-86d03eb70eef39c59296416867f2ee66c9f7cd8b7f961fbda2f89fc0a1c442c2.json rename to crates/db/.sqlx/query-86d03eb70eef39c59296416867f2ee66c9f7cd8b7f961fbda2f89fc0a1c442c2.json diff --git a/crates/db/.sqlx/query-8c691c79539b34f91f09e6dce51eb684840804f9279f9990cfdcb9015453d9d8.json b/crates/db/.sqlx/query-8c691c79539b34f91f09e6dce51eb684840804f9279f9990cfdcb9015453d9d8.json new file mode 100644 index 00000000..1c504d19 --- /dev/null +++ b/crates/db/.sqlx/query-8c691c79539b34f91f09e6dce51eb684840804f9279f9990cfdcb9015453d9d8.json @@ -0,0 +1,104 @@ +{ + "db_name": "SQLite", + "query": "SELECT id AS \"id!: Uuid\",\n task_id AS \"task_id!: Uuid\",\n container_ref,\n branch,\n base_branch,\n merge_commit,\n base_coding_agent AS \"base_coding_agent!\",\n pr_url,\n pr_number,\n pr_status,\n pr_merged_at AS \"pr_merged_at: DateTime\",\n worktree_deleted AS \"worktree_deleted!: bool\",\n setup_completed_at AS \"setup_completed_at: DateTime\",\n created_at AS \"created_at!: DateTime\",\n updated_at AS \"updated_at!: DateTime\"\n FROM task_attempts\n WHERE task_id = $1\n ORDER BY created_at DESC", + "describe": { + "columns": [ + { + "name": "id!: Uuid", + "ordinal": 0, + "type_info": "Blob" + }, + { + "name": "task_id!: Uuid", + "ordinal": 1, + "type_info": "Blob" + }, + { + "name": "container_ref", + "ordinal": 2, + "type_info": "Text" + }, + { + "name": "branch", + "ordinal": 3, + "type_info": "Text" + }, + { + "name": "base_branch", + "ordinal": 4, + "type_info": "Text" + }, + { + "name": "merge_commit", + "ordinal": 5, + "type_info": "Text" + }, + { + "name": "base_coding_agent!", + "ordinal": 6, + "type_info": "Text" + }, + { + "name": "pr_url", + "ordinal": 7, + "type_info": "Text" + }, + { + "name": "pr_number", + "ordinal": 8, + "type_info": "Integer" + }, + { + "name": "pr_status", + "ordinal": 9, + "type_info": "Text" + }, + { + "name": "pr_merged_at: DateTime", + "ordinal": 10, + "type_info": "Datetime" + }, + { + "name": "worktree_deleted!: bool", + "ordinal": 11, + "type_info": "Bool" + }, + { + "name": "setup_completed_at: DateTime", + "ordinal": 12, + "type_info": "Datetime" + }, + { + "name": "created_at!: DateTime", + "ordinal": 13, + "type_info": "Text" + }, + { + "name": "updated_at!: DateTime", + "ordinal": 14, + "type_info": "Text" + } + ], + "parameters": { + "Right": 1 + }, + "nullable": [ + true, + false, + true, + true, + false, + true, + true, + true, + true, + true, + true, + false, + true, + false, + false + ] + }, + "hash": "8c691c79539b34f91f09e6dce51eb684840804f9279f9990cfdcb9015453d9d8" +} diff --git a/crates/db/.sqlx/query-8cc087f95fb55426ee6481bdd0f74b2083ceaf6c5cf82456a7d83c18323c5cec.json b/crates/db/.sqlx/query-8cc087f95fb55426ee6481bdd0f74b2083ceaf6c5cf82456a7d83c18323c5cec.json new file mode 100644 index 00000000..83753159 --- /dev/null +++ b/crates/db/.sqlx/query-8cc087f95fb55426ee6481bdd0f74b2083ceaf6c5cf82456a7d83c18323c5cec.json @@ -0,0 +1,62 @@ +{ + "db_name": "SQLite", + "query": "SELECT id as \"id!: Uuid\", project_id as \"project_id!: Uuid\", title, description, status as \"status!: TaskStatus\", parent_task_attempt as \"parent_task_attempt: Uuid\", created_at as \"created_at!: DateTime\", updated_at as \"updated_at!: DateTime\"\n FROM tasks \n WHERE rowid = $1", + "describe": { + "columns": [ + { + "name": "id!: Uuid", + "ordinal": 0, + "type_info": "Blob" + }, + { + "name": "project_id!: Uuid", + "ordinal": 1, + "type_info": "Blob" + }, + { + "name": "title", + "ordinal": 2, + "type_info": "Text" + }, + { + "name": "description", + "ordinal": 3, + "type_info": "Text" + }, + { + "name": "status!: TaskStatus", + "ordinal": 4, + "type_info": "Text" + }, + { + "name": "parent_task_attempt: Uuid", + "ordinal": 5, + "type_info": "Blob" + }, + { + "name": "created_at!: DateTime", + "ordinal": 6, + "type_info": "Text" + }, + { + "name": "updated_at!: DateTime", + "ordinal": 7, + "type_info": "Text" + } + ], + "parameters": { + "Right": 1 + }, + "nullable": [ + true, + false, + false, + true, + false, + true, + false, + false + ] + }, + "hash": "8cc087f95fb55426ee6481bdd0f74b2083ceaf6c5cf82456a7d83c18323c5cec" +} diff --git a/backend/.sqlx/query-8f01ebd64bdcde6a090479f14810d73ba23020e76fd70854ac57f2da251702c3.json b/crates/db/.sqlx/query-8f01ebd64bdcde6a090479f14810d73ba23020e76fd70854ac57f2da251702c3.json similarity index 100% rename from backend/.sqlx/query-8f01ebd64bdcde6a090479f14810d73ba23020e76fd70854ac57f2da251702c3.json rename to crates/db/.sqlx/query-8f01ebd64bdcde6a090479f14810d73ba23020e76fd70854ac57f2da251702c3.json diff --git a/crates/db/.sqlx/query-8f5d9d112659d04406c20c885f72c075b355e54836930226fc84390c5a4516f7.json b/crates/db/.sqlx/query-8f5d9d112659d04406c20c885f72c075b355e54836930226fc84390c5a4516f7.json new file mode 100644 index 00000000..2ddbbbed --- /dev/null +++ b/crates/db/.sqlx/query-8f5d9d112659d04406c20c885f72c075b355e54836930226fc84390c5a4516f7.json @@ -0,0 +1,104 @@ +{ + "db_name": "SQLite", + "query": "SELECT id AS \"id!: Uuid\",\n task_id AS \"task_id!: Uuid\",\n container_ref,\n branch,\n base_branch,\n merge_commit,\n base_coding_agent AS \"base_coding_agent!\",\n pr_url,\n pr_number,\n pr_status,\n pr_merged_at AS \"pr_merged_at: DateTime\",\n worktree_deleted AS \"worktree_deleted!: bool\",\n setup_completed_at AS \"setup_completed_at: DateTime\",\n created_at AS \"created_at!: DateTime\",\n updated_at AS \"updated_at!: DateTime\"\n FROM task_attempts\n ORDER BY created_at DESC", + "describe": { + "columns": [ + { + "name": "id!: Uuid", + "ordinal": 0, + "type_info": "Blob" + }, + { + "name": "task_id!: Uuid", + "ordinal": 1, + "type_info": "Blob" + }, + { + "name": "container_ref", + "ordinal": 2, + "type_info": "Text" + }, + { + "name": "branch", + "ordinal": 3, + "type_info": "Text" + }, + { + "name": "base_branch", + "ordinal": 4, + "type_info": "Text" + }, + { + "name": "merge_commit", + "ordinal": 5, + "type_info": "Text" + }, + { + "name": "base_coding_agent!", + "ordinal": 6, + "type_info": "Text" + }, + { + "name": "pr_url", + "ordinal": 7, + "type_info": "Text" + }, + { + "name": "pr_number", + "ordinal": 8, + "type_info": "Integer" + }, + { + "name": "pr_status", + "ordinal": 9, + "type_info": "Text" + }, + { + "name": "pr_merged_at: DateTime", + "ordinal": 10, + "type_info": "Datetime" + }, + { + "name": "worktree_deleted!: bool", + "ordinal": 11, + "type_info": "Bool" + }, + { + "name": "setup_completed_at: DateTime", + "ordinal": 12, + "type_info": "Datetime" + }, + { + "name": "created_at!: DateTime", + "ordinal": 13, + "type_info": "Text" + }, + { + "name": "updated_at!: DateTime", + "ordinal": 14, + "type_info": "Text" + } + ], + "parameters": { + "Right": 0 + }, + "nullable": [ + true, + false, + true, + true, + false, + true, + true, + true, + true, + true, + true, + false, + true, + false, + false + ] + }, + "hash": "8f5d9d112659d04406c20c885f72c075b355e54836930226fc84390c5a4516f7" +} diff --git a/backend/.sqlx/query-212828320e8d871ab9d83705a040b23bcf0393dc7252177fc539a74657f578ef.json b/crates/db/.sqlx/query-8fcdb52af46ab995bd242392b57b0ce6848145d4769c31bed3fc8f492c070c06.json similarity index 87% rename from backend/.sqlx/query-212828320e8d871ab9d83705a040b23bcf0393dc7252177fc539a74657f578ef.json rename to crates/db/.sqlx/query-8fcdb52af46ab995bd242392b57b0ce6848145d4769c31bed3fc8f492c070c06.json index c8cc615c..b2757d7c 100644 --- a/backend/.sqlx/query-212828320e8d871ab9d83705a040b23bcf0393dc7252177fc539a74657f578ef.json +++ b/crates/db/.sqlx/query-8fcdb52af46ab995bd242392b57b0ce6848145d4769c31bed3fc8f492c070c06.json @@ -1,6 +1,6 @@ { "db_name": "SQLite", - "query": "\n SELECT ta.id as \"attempt_id!: Uuid\", ta.worktree_path, p.git_repo_path as \"git_repo_path!\"\n FROM task_attempts ta\n LEFT JOIN execution_processes ep ON ta.id = ep.task_attempt_id AND ep.completed_at IS NOT NULL\n JOIN tasks t ON ta.task_id = t.id\n JOIN projects p ON t.project_id = p.id\n WHERE ta.worktree_deleted = FALSE\n -- Exclude attempts with any running processes (in progress)\n AND ta.id NOT IN (\n SELECT DISTINCT ep2.task_attempt_id\n FROM execution_processes ep2\n WHERE ep2.completed_at IS NULL\n )\n GROUP BY ta.id, ta.worktree_path, p.git_repo_path, ta.updated_at\n HAVING datetime('now', '-24 hours') > datetime(\n MAX(\n CASE\n WHEN ep.completed_at IS NOT NULL THEN ep.completed_at\n ELSE ta.updated_at\n END\n )\n )\n ORDER BY MAX(\n CASE\n WHEN ep.completed_at IS NOT NULL THEN ep.completed_at\n ELSE ta.updated_at\n END\n ) ASC\n ", + "query": "\n SELECT ta.id as \"attempt_id!: Uuid\", ta.container_ref, p.git_repo_path as \"git_repo_path!\"\n FROM task_attempts ta\n LEFT JOIN execution_processes ep ON ta.id = ep.task_attempt_id AND ep.completed_at IS NOT NULL\n JOIN tasks t ON ta.task_id = t.id\n JOIN projects p ON t.project_id = p.id\n WHERE ta.worktree_deleted = FALSE\n -- Exclude attempts with any running processes (in progress)\n AND ta.id NOT IN (\n SELECT DISTINCT ep2.task_attempt_id\n FROM execution_processes ep2\n WHERE ep2.completed_at IS NULL\n )\n GROUP BY ta.id, ta.container_ref, p.git_repo_path, ta.updated_at\n HAVING datetime('now', '-24 hours') > datetime(\n MAX(\n CASE\n WHEN ep.completed_at IS NOT NULL THEN ep.completed_at\n ELSE ta.updated_at\n END\n )\n )\n ORDER BY MAX(\n CASE\n WHEN ep.completed_at IS NOT NULL THEN ep.completed_at\n ELSE ta.updated_at\n END\n ) ASC\n ", "describe": { "columns": [ { @@ -9,7 +9,7 @@ "type_info": "Blob" }, { - "name": "worktree_path", + "name": "container_ref", "ordinal": 1, "type_info": "Text" }, @@ -28,5 +28,5 @@ true ] }, - "hash": "212828320e8d871ab9d83705a040b23bcf0393dc7252177fc539a74657f578ef" + "hash": "8fcdb52af46ab995bd242392b57b0ce6848145d4769c31bed3fc8f492c070c06" } diff --git a/crates/db/.sqlx/query-90d5b39dddf9f5c6c48cd8268f7381a2a772537c3daa1f9d800b1ef1f191f21d.json b/crates/db/.sqlx/query-90d5b39dddf9f5c6c48cd8268f7381a2a772537c3daa1f9d800b1ef1f191f21d.json new file mode 100644 index 00000000..bc95e384 --- /dev/null +++ b/crates/db/.sqlx/query-90d5b39dddf9f5c6c48cd8268f7381a2a772537c3daa1f9d800b1ef1f191f21d.json @@ -0,0 +1,12 @@ +{ + "db_name": "SQLite", + "query": "UPDATE executor_sessions\n SET session_id = $1, updated_at = $2\n WHERE execution_process_id = $3", + "describe": { + "columns": [], + "parameters": { + "Right": 3 + }, + "nullable": [] + }, + "hash": "90d5b39dddf9f5c6c48cd8268f7381a2a772537c3daa1f9d800b1ef1f191f21d" +} diff --git a/backend/.sqlx/query-90fd607fcb2dca72239ff25e618e21e174b195991eaa33722cbf5f76da84cfab.json b/crates/db/.sqlx/query-90fd607fcb2dca72239ff25e618e21e174b195991eaa33722cbf5f76da84cfab.json similarity index 100% rename from backend/.sqlx/query-90fd607fcb2dca72239ff25e618e21e174b195991eaa33722cbf5f76da84cfab.json rename to crates/db/.sqlx/query-90fd607fcb2dca72239ff25e618e21e174b195991eaa33722cbf5f76da84cfab.json diff --git a/backend/.sqlx/query-96036c4f9e0f48bdc5a4a4588f0c5f288ac7aaa5425cac40fc33f337e1a351f2.json b/crates/db/.sqlx/query-96036c4f9e0f48bdc5a4a4588f0c5f288ac7aaa5425cac40fc33f337e1a351f2.json similarity index 100% rename from backend/.sqlx/query-96036c4f9e0f48bdc5a4a4588f0c5f288ac7aaa5425cac40fc33f337e1a351f2.json rename to crates/db/.sqlx/query-96036c4f9e0f48bdc5a4a4588f0c5f288ac7aaa5425cac40fc33f337e1a351f2.json diff --git a/crates/db/.sqlx/query-97e6a03adc1c14e9ecabe7885598dcc0ea273dffea920838fc4dcc837293ba6b.json b/crates/db/.sqlx/query-97e6a03adc1c14e9ecabe7885598dcc0ea273dffea920838fc4dcc837293ba6b.json new file mode 100644 index 00000000..0875fb2d --- /dev/null +++ b/crates/db/.sqlx/query-97e6a03adc1c14e9ecabe7885598dcc0ea273dffea920838fc4dcc837293ba6b.json @@ -0,0 +1,38 @@ +{ + "db_name": "SQLite", + "query": "INSERT INTO execution_process_logs (execution_id, logs, byte_size, inserted_at)\n VALUES ($1, $2, $3, $4)\n ON CONFLICT (execution_id) DO UPDATE\n SET logs = EXCLUDED.logs, \n byte_size = EXCLUDED.byte_size,\n inserted_at = EXCLUDED.inserted_at\n RETURNING \n execution_id as \"execution_id!: Uuid\",\n logs,\n byte_size,\n inserted_at as \"inserted_at!: DateTime\"", + "describe": { + "columns": [ + { + "name": "execution_id!: Uuid", + "ordinal": 0, + "type_info": "Blob" + }, + { + "name": "logs", + "ordinal": 1, + "type_info": "Text" + }, + { + "name": "byte_size", + "ordinal": 2, + "type_info": "Integer" + }, + { + "name": "inserted_at!: DateTime", + "ordinal": 3, + "type_info": "Text" + } + ], + "parameters": { + "Right": 4 + }, + "nullable": [ + true, + false, + false, + false + ] + }, + "hash": "97e6a03adc1c14e9ecabe7885598dcc0ea273dffea920838fc4dcc837293ba6b" +} diff --git a/backend/.sqlx/query-a31fff84f3b8e532fd1160447d89d700f06ae08821fee00c9a5b60492b05259c.json b/crates/db/.sqlx/query-a31fff84f3b8e532fd1160447d89d700f06ae08821fee00c9a5b60492b05259c.json similarity index 100% rename from backend/.sqlx/query-a31fff84f3b8e532fd1160447d89d700f06ae08821fee00c9a5b60492b05259c.json rename to crates/db/.sqlx/query-a31fff84f3b8e532fd1160447d89d700f06ae08821fee00c9a5b60492b05259c.json diff --git a/backend/.sqlx/query-92e8bdbcd80c5ff3db7a35cf79492048803ef305cbdef0d0a1fe5dc881ca8c71.json b/crates/db/.sqlx/query-a500d5054ba09e64a4f98500a5c600ba66b9c919af26ae6ca79b1cc82d138158.json similarity index 64% rename from backend/.sqlx/query-92e8bdbcd80c5ff3db7a35cf79492048803ef305cbdef0d0a1fe5dc881ca8c71.json rename to crates/db/.sqlx/query-a500d5054ba09e64a4f98500a5c600ba66b9c919af26ae6ca79b1cc82d138158.json index 7782656e..5d963554 100644 --- a/backend/.sqlx/query-92e8bdbcd80c5ff3db7a35cf79492048803ef305cbdef0d0a1fe5dc881ca8c71.json +++ b/crates/db/.sqlx/query-a500d5054ba09e64a4f98500a5c600ba66b9c919af26ae6ca79b1cc82d138158.json @@ -1,6 +1,6 @@ { "db_name": "SQLite", - "query": "SELECT ta.id AS \"id!: Uuid\",\n ta.task_id AS \"task_id!: Uuid\",\n ta.worktree_path,\n ta.branch,\n ta.base_branch,\n ta.merge_commit,\n ta.executor,\n ta.pr_url,\n ta.pr_number,\n ta.pr_status,\n ta.pr_merged_at AS \"pr_merged_at: DateTime\",\n ta.worktree_deleted AS \"worktree_deleted!: bool\",\n ta.setup_completed_at AS \"setup_completed_at: DateTime\",\n ta.created_at AS \"created_at!: DateTime\",\n ta.updated_at AS \"updated_at!: DateTime\"\n FROM task_attempts ta\n JOIN tasks t ON ta.task_id = t.id\n JOIN projects p ON t.project_id = p.id\n WHERE ta.id = $1 AND t.id = $2 AND p.id = $3", + "query": "SELECT ta.id AS \"id!: Uuid\",\n ta.task_id AS \"task_id!: Uuid\",\n ta.container_ref,\n ta.branch,\n ta.base_branch,\n ta.merge_commit,\n ta.base_coding_agent AS \"base_coding_agent!\",\n ta.pr_url,\n ta.pr_number,\n ta.pr_status,\n ta.pr_merged_at AS \"pr_merged_at: DateTime\",\n ta.worktree_deleted AS \"worktree_deleted!: bool\",\n ta.setup_completed_at AS \"setup_completed_at: DateTime\",\n ta.created_at AS \"created_at!: DateTime\",\n ta.updated_at AS \"updated_at!: DateTime\"\n FROM task_attempts ta\n JOIN tasks t ON ta.task_id = t.id\n JOIN projects p ON t.project_id = p.id\n WHERE ta.id = $1 AND t.id = $2 AND p.id = $3", "describe": { "columns": [ { @@ -14,7 +14,7 @@ "type_info": "Blob" }, { - "name": "worktree_path", + "name": "container_ref", "ordinal": 2, "type_info": "Text" }, @@ -34,7 +34,7 @@ "type_info": "Text" }, { - "name": "executor", + "name": "base_coding_agent!", "ordinal": 6, "type_info": "Text" }, @@ -85,8 +85,8 @@ "nullable": [ true, false, - false, - false, + true, + true, false, true, true, @@ -100,5 +100,5 @@ false ] }, - "hash": "92e8bdbcd80c5ff3db7a35cf79492048803ef305cbdef0d0a1fe5dc881ca8c71" + "hash": "a500d5054ba09e64a4f98500a5c600ba66b9c919af26ae6ca79b1cc82d138158" } diff --git a/backend/.sqlx/query-a5ba908419fb3e456bdd2daca41ba06cc3212ffffb8520fc7dbbcc8b60ada314.json b/crates/db/.sqlx/query-a5ba908419fb3e456bdd2daca41ba06cc3212ffffb8520fc7dbbcc8b60ada314.json similarity index 100% rename from backend/.sqlx/query-a5ba908419fb3e456bdd2daca41ba06cc3212ffffb8520fc7dbbcc8b60ada314.json rename to crates/db/.sqlx/query-a5ba908419fb3e456bdd2daca41ba06cc3212ffffb8520fc7dbbcc8b60ada314.json diff --git a/backend/.sqlx/query-ac5247c8d7fb86e4650c4b0eb9420031614c831b7b085083bac20c1af314c538.json b/crates/db/.sqlx/query-ac5247c8d7fb86e4650c4b0eb9420031614c831b7b085083bac20c1af314c538.json similarity index 100% rename from backend/.sqlx/query-ac5247c8d7fb86e4650c4b0eb9420031614c831b7b085083bac20c1af314c538.json rename to crates/db/.sqlx/query-ac5247c8d7fb86e4650c4b0eb9420031614c831b7b085083bac20c1af314c538.json diff --git a/backend/.sqlx/query-a6d2961718dbc3b1a925e549f49a159c561bef58c105529275f274b27e2eba5b.json b/crates/db/.sqlx/query-acdb8488d9d698e8522a1a1a062f560857e70cf8c1dee1eaecd75b096911cb17.json similarity index 69% rename from backend/.sqlx/query-a6d2961718dbc3b1a925e549f49a159c561bef58c105529275f274b27e2eba5b.json rename to crates/db/.sqlx/query-acdb8488d9d698e8522a1a1a062f560857e70cf8c1dee1eaecd75b096911cb17.json index 098b19e9..a195c918 100644 --- a/backend/.sqlx/query-a6d2961718dbc3b1a925e549f49a159c561bef58c105529275f274b27e2eba5b.json +++ b/crates/db/.sqlx/query-acdb8488d9d698e8522a1a1a062f560857e70cf8c1dee1eaecd75b096911cb17.json @@ -1,6 +1,6 @@ { "db_name": "SQLite", - "query": "SELECT id AS \"id!: Uuid\",\n task_id AS \"task_id!: Uuid\",\n worktree_path,\n branch,\n merge_commit,\n base_branch,\n executor,\n pr_url,\n pr_number,\n pr_status,\n pr_merged_at AS \"pr_merged_at: DateTime\",\n worktree_deleted AS \"worktree_deleted!: bool\",\n setup_completed_at AS \"setup_completed_at: DateTime\",\n created_at AS \"created_at!: DateTime\",\n updated_at AS \"updated_at!: DateTime\"\n FROM task_attempts\n WHERE id = $1", + "query": "SELECT id AS \"id!: Uuid\",\n task_id AS \"task_id!: Uuid\",\n container_ref,\n branch,\n merge_commit,\n base_branch,\n base_coding_agent AS \"base_coding_agent!\",\n pr_url,\n pr_number,\n pr_status,\n pr_merged_at AS \"pr_merged_at: DateTime\",\n worktree_deleted AS \"worktree_deleted!: bool\",\n setup_completed_at AS \"setup_completed_at: DateTime\",\n created_at AS \"created_at!: DateTime\",\n updated_at AS \"updated_at!: DateTime\"\n FROM task_attempts\n WHERE id = $1", "describe": { "columns": [ { @@ -14,7 +14,7 @@ "type_info": "Blob" }, { - "name": "worktree_path", + "name": "container_ref", "ordinal": 2, "type_info": "Text" }, @@ -34,7 +34,7 @@ "type_info": "Text" }, { - "name": "executor", + "name": "base_coding_agent!", "ordinal": 6, "type_info": "Text" }, @@ -85,8 +85,8 @@ "nullable": [ true, false, - false, - false, + true, + true, true, false, true, @@ -100,5 +100,5 @@ false ] }, - "hash": "a6d2961718dbc3b1a925e549f49a159c561bef58c105529275f274b27e2eba5b" + "hash": "acdb8488d9d698e8522a1a1a062f560857e70cf8c1dee1eaecd75b096911cb17" } diff --git a/backend/.sqlx/query-b2b2c6b4d0b1a347b5c4cb63c3a46a265d4db53be9554989a814b069d0af82f2.json b/crates/db/.sqlx/query-b2b2c6b4d0b1a347b5c4cb63c3a46a265d4db53be9554989a814b069d0af82f2.json similarity index 100% rename from backend/.sqlx/query-b2b2c6b4d0b1a347b5c4cb63c3a46a265d4db53be9554989a814b069d0af82f2.json rename to crates/db/.sqlx/query-b2b2c6b4d0b1a347b5c4cb63c3a46a265d4db53be9554989a814b069d0af82f2.json diff --git a/crates/db/.sqlx/query-b8828d250bd93c1d77c97e3954b0e26db4e65e28bba23ec26e77a1faa4dcc974.json b/crates/db/.sqlx/query-b8828d250bd93c1d77c97e3954b0e26db4e65e28bba23ec26e77a1faa4dcc974.json new file mode 100644 index 00000000..da8920a4 --- /dev/null +++ b/crates/db/.sqlx/query-b8828d250bd93c1d77c97e3954b0e26db4e65e28bba23ec26e77a1faa4dcc974.json @@ -0,0 +1,74 @@ +{ + "db_name": "SQLite", + "query": "SELECT \n id as \"id!: Uuid\", \n task_attempt_id as \"task_attempt_id!: Uuid\", \n run_reason as \"run_reason!: ExecutionProcessRunReason\",\n executor_action as \"executor_action!: sqlx::types::Json\",\n status as \"status!: ExecutionProcessStatus\",\n exit_code,\n started_at as \"started_at!: DateTime\",\n completed_at as \"completed_at?: DateTime\",\n created_at as \"created_at!: DateTime\", \n updated_at as \"updated_at!: DateTime\"\n FROM execution_processes \n WHERE status = 'running' \n ORDER BY created_at ASC", + "describe": { + "columns": [ + { + "name": "id!: Uuid", + "ordinal": 0, + "type_info": "Blob" + }, + { + "name": "task_attempt_id!: Uuid", + "ordinal": 1, + "type_info": "Blob" + }, + { + "name": "run_reason!: ExecutionProcessRunReason", + "ordinal": 2, + "type_info": "Text" + }, + { + "name": "executor_action!: sqlx::types::Json", + "ordinal": 3, + "type_info": "Text" + }, + { + "name": "status!: ExecutionProcessStatus", + "ordinal": 4, + "type_info": "Text" + }, + { + "name": "exit_code", + "ordinal": 5, + "type_info": "Integer" + }, + { + "name": "started_at!: DateTime", + "ordinal": 6, + "type_info": "Text" + }, + { + "name": "completed_at?: DateTime", + "ordinal": 7, + "type_info": "Text" + }, + { + "name": "created_at!: DateTime", + "ordinal": 8, + "type_info": "Text" + }, + { + "name": "updated_at!: DateTime", + "ordinal": 9, + "type_info": "Text" + } + ], + "parameters": { + "Right": 0 + }, + "nullable": [ + true, + false, + false, + false, + false, + true, + false, + true, + false, + false + ] + }, + "hash": "b8828d250bd93c1d77c97e3954b0e26db4e65e28bba23ec26e77a1faa4dcc974" +} diff --git a/backend/.sqlx/query-d3b9ea1de1576af71b312924ce7f4ea8ae5dbe2ac138ea3b4470f2d5cd734846.json b/crates/db/.sqlx/query-bbc3a97f21c9b6c60a64cd747843837c3af677ab5d7a1167550ab1393ac07ea9.json similarity index 52% rename from backend/.sqlx/query-d3b9ea1de1576af71b312924ce7f4ea8ae5dbe2ac138ea3b4470f2d5cd734846.json rename to crates/db/.sqlx/query-bbc3a97f21c9b6c60a64cd747843837c3af677ab5d7a1167550ab1393ac07ea9.json index 51fc4464..e43f9225 100644 --- a/backend/.sqlx/query-d3b9ea1de1576af71b312924ce7f4ea8ae5dbe2ac138ea3b4470f2d5cd734846.json +++ b/crates/db/.sqlx/query-bbc3a97f21c9b6c60a64cd747843837c3af677ab5d7a1167550ab1393ac07ea9.json @@ -1,12 +1,12 @@ { "db_name": "SQLite", - "query": "UPDATE executor_sessions \n SET prompt = $1, updated_at = datetime('now') \n WHERE id = $2", + "query": "UPDATE executor_sessions \n SET prompt = $1, updated_at = $2 \n WHERE id = $3", "describe": { "columns": [], "parameters": { - "Right": 2 + "Right": 3 }, "nullable": [] }, - "hash": "d3b9ea1de1576af71b312924ce7f4ea8ae5dbe2ac138ea3b4470f2d5cd734846" + "hash": "bbc3a97f21c9b6c60a64cd747843837c3af677ab5d7a1167550ab1393ac07ea9" } diff --git a/backend/.sqlx/query-58408c7a8cdeeda0bef359f1f9bd91299a339dc2b191462fc58c9736a56d5227.json b/crates/db/.sqlx/query-c1b07b345d6cef9413e4dc19f139aad7fea3afb72c5104b2e2d1533825e81293.json similarity index 54% rename from backend/.sqlx/query-58408c7a8cdeeda0bef359f1f9bd91299a339dc2b191462fc58c9736a56d5227.json rename to crates/db/.sqlx/query-c1b07b345d6cef9413e4dc19f139aad7fea3afb72c5104b2e2d1533825e81293.json index 4214e7d9..59817903 100644 --- a/backend/.sqlx/query-58408c7a8cdeeda0bef359f1f9bd91299a339dc2b191462fc58c9736a56d5227.json +++ b/crates/db/.sqlx/query-c1b07b345d6cef9413e4dc19f139aad7fea3afb72c5104b2e2d1533825e81293.json @@ -1,6 +1,6 @@ { "db_name": "SQLite", - "query": "SELECT \n id as \"id!: Uuid\", \n task_attempt_id as \"task_attempt_id!: Uuid\", \n process_type as \"process_type!: ExecutionProcessType\",\n executor_type,\n status as \"status!: ExecutionProcessStatus\",\n command, \n args, \n working_directory, \n exit_code,\n started_at as \"started_at!: DateTime\",\n completed_at as \"completed_at?: DateTime\",\n created_at as \"created_at!: DateTime\", \n updated_at as \"updated_at!: DateTime\"\n FROM execution_processes \n WHERE task_attempt_id = $1 \n ORDER BY created_at ASC", + "query": "SELECT \n id as \"id!: Uuid\", \n task_attempt_id as \"task_attempt_id!: Uuid\", \n run_reason as \"run_reason!: ExecutionProcessRunReason\",\n executor_action as \"executor_action!: sqlx::types::Json\",\n status as \"status!: ExecutionProcessStatus\",\n exit_code,\n started_at as \"started_at!: DateTime\",\n completed_at as \"completed_at?: DateTime\",\n created_at as \"created_at!: DateTime\", \n updated_at as \"updated_at!: DateTime\"\n FROM execution_processes \n WHERE rowid = $1", "describe": { "columns": [ { @@ -14,12 +14,12 @@ "type_info": "Blob" }, { - "name": "process_type!: ExecutionProcessType", + "name": "run_reason!: ExecutionProcessRunReason", "ordinal": 2, "type_info": "Text" }, { - "name": "executor_type", + "name": "executor_action!: sqlx::types::Json", "ordinal": 3, "type_info": "Text" }, @@ -28,44 +28,29 @@ "ordinal": 4, "type_info": "Text" }, - { - "name": "command", - "ordinal": 5, - "type_info": "Text" - }, - { - "name": "args", - "ordinal": 6, - "type_info": "Text" - }, - { - "name": "working_directory", - "ordinal": 7, - "type_info": "Text" - }, { "name": "exit_code", - "ordinal": 8, + "ordinal": 5, "type_info": "Integer" }, { "name": "started_at!: DateTime", - "ordinal": 9, + "ordinal": 6, "type_info": "Text" }, { "name": "completed_at?: DateTime", - "ordinal": 10, + "ordinal": 7, "type_info": "Text" }, { "name": "created_at!: DateTime", - "ordinal": 11, + "ordinal": 8, "type_info": "Text" }, { "name": "updated_at!: DateTime", - "ordinal": 12, + "ordinal": 9, "type_info": "Text" } ], @@ -76,17 +61,14 @@ true, false, false, - true, false, false, true, false, true, false, - true, - false, false ] }, - "hash": "58408c7a8cdeeda0bef359f1f9bd91299a339dc2b191462fc58c9736a56d5227" + "hash": "c1b07b345d6cef9413e4dc19f139aad7fea3afb72c5104b2e2d1533825e81293" } diff --git a/backend/.sqlx/query-83d10e29f8478aff33434f9ac67068e013b888b953a2657e2bb72a6f619d04f2.json b/crates/db/.sqlx/query-c1e5b46545fcef759610463d9bf902b25f18cd83d2ca8616bf3ec1c84728bf6f.json similarity index 52% rename from backend/.sqlx/query-83d10e29f8478aff33434f9ac67068e013b888b953a2657e2bb72a6f619d04f2.json rename to crates/db/.sqlx/query-c1e5b46545fcef759610463d9bf902b25f18cd83d2ca8616bf3ec1c84728bf6f.json index 28ea4ef6..75fc2785 100644 --- a/backend/.sqlx/query-83d10e29f8478aff33434f9ac67068e013b888b953a2657e2bb72a6f619d04f2.json +++ b/crates/db/.sqlx/query-c1e5b46545fcef759610463d9bf902b25f18cd83d2ca8616bf3ec1c84728bf6f.json @@ -1,6 +1,6 @@ { "db_name": "SQLite", - "query": "SELECT \n ta.id as \"attempt_id!: Uuid\",\n ta.task_id as \"task_id!: Uuid\",\n ta.pr_number as \"pr_number!: i64\",\n ta.pr_url,\n t.project_id as \"project_id!: Uuid\",\n p.git_repo_path\n FROM task_attempts ta\n JOIN tasks t ON ta.task_id = t.id \n JOIN projects p ON t.project_id = p.id\n WHERE ta.pr_status = 'open' AND ta.pr_number IS NOT NULL", + "query": "SELECT \n ta.id as \"attempt_id!: Uuid\",\n ta.task_id as \"task_id!: Uuid\",\n ta.pr_number as \"pr_number!: i64\",\n ta.pr_url as \"pr_url!: String\"\n FROM task_attempts ta\n WHERE ta.pr_status = 'open' AND ta.pr_number IS NOT NULL", "describe": { "columns": [ { @@ -19,19 +19,9 @@ "type_info": "Integer" }, { - "name": "pr_url", + "name": "pr_url!: String", "ordinal": 3, "type_info": "Text" - }, - { - "name": "project_id!: Uuid", - "ordinal": 4, - "type_info": "Blob" - }, - { - "name": "git_repo_path", - "ordinal": 5, - "type_info": "Text" } ], "parameters": { @@ -41,10 +31,8 @@ true, false, true, - true, - false, - false + true ] }, - "hash": "83d10e29f8478aff33434f9ac67068e013b888b953a2657e2bb72a6f619d04f2" + "hash": "c1e5b46545fcef759610463d9bf902b25f18cd83d2ca8616bf3ec1c84728bf6f" } diff --git a/backend/.sqlx/query-c50d2ff0b12e5bcc81e371089ee2d007e233e7db93aefba4fef08e7aa68f5ab7.json b/crates/db/.sqlx/query-c50d2ff0b12e5bcc81e371089ee2d007e233e7db93aefba4fef08e7aa68f5ab7.json similarity index 100% rename from backend/.sqlx/query-c50d2ff0b12e5bcc81e371089ee2d007e233e7db93aefba4fef08e7aa68f5ab7.json rename to crates/db/.sqlx/query-c50d2ff0b12e5bcc81e371089ee2d007e233e7db93aefba4fef08e7aa68f5ab7.json diff --git a/crates/db/.sqlx/query-ca6acd3a57fc44e8e29e057700cee4442c0ab8b37aca0abf29fe5464c8539c6d.json b/crates/db/.sqlx/query-ca6acd3a57fc44e8e29e057700cee4442c0ab8b37aca0abf29fe5464c8539c6d.json new file mode 100644 index 00000000..32dcdd26 --- /dev/null +++ b/crates/db/.sqlx/query-ca6acd3a57fc44e8e29e057700cee4442c0ab8b37aca0abf29fe5464c8539c6d.json @@ -0,0 +1,74 @@ +{ + "db_name": "SQLite", + "query": "INSERT INTO execution_processes (\n id, task_attempt_id, run_reason, executor_action, status, \n exit_code, started_at, \n completed_at, created_at, updated_at\n ) \n VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10) \n RETURNING \n id as \"id!: Uuid\", \n task_attempt_id as \"task_attempt_id!: Uuid\", \n run_reason as \"run_reason!: ExecutionProcessRunReason\",\n executor_action as \"executor_action!: sqlx::types::Json\",\n status as \"status!: ExecutionProcessStatus\",\n exit_code,\n started_at as \"started_at!: DateTime\",\n completed_at as \"completed_at?: DateTime\",\n created_at as \"created_at!: DateTime\", \n updated_at as \"updated_at!: DateTime\"", + "describe": { + "columns": [ + { + "name": "id!: Uuid", + "ordinal": 0, + "type_info": "Blob" + }, + { + "name": "task_attempt_id!: Uuid", + "ordinal": 1, + "type_info": "Blob" + }, + { + "name": "run_reason!: ExecutionProcessRunReason", + "ordinal": 2, + "type_info": "Text" + }, + { + "name": "executor_action!: sqlx::types::Json", + "ordinal": 3, + "type_info": "Text" + }, + { + "name": "status!: ExecutionProcessStatus", + "ordinal": 4, + "type_info": "Text" + }, + { + "name": "exit_code", + "ordinal": 5, + "type_info": "Integer" + }, + { + "name": "started_at!: DateTime", + "ordinal": 6, + "type_info": "Text" + }, + { + "name": "completed_at?: DateTime", + "ordinal": 7, + "type_info": "Text" + }, + { + "name": "created_at!: DateTime", + "ordinal": 8, + "type_info": "Text" + }, + { + "name": "updated_at!: DateTime", + "ordinal": 9, + "type_info": "Text" + } + ], + "parameters": { + "Right": 10 + }, + "nullable": [ + true, + false, + false, + false, + false, + true, + false, + true, + false, + false + ] + }, + "hash": "ca6acd3a57fc44e8e29e057700cee4442c0ab8b37aca0abf29fe5464c8539c6d" +} diff --git a/backend/.sqlx/query-315cf28396b52c1215a53c72c57e0277d6143d8fd658f141a86d6fd0770fb539.json b/crates/db/.sqlx/query-cb2d1da9c3e3ad9f09ea30165f5fe584fef35a015038e83a548edb59ecadaa18.json similarity index 64% rename from backend/.sqlx/query-315cf28396b52c1215a53c72c57e0277d6143d8fd658f141a86d6fd0770fb539.json rename to crates/db/.sqlx/query-cb2d1da9c3e3ad9f09ea30165f5fe584fef35a015038e83a548edb59ecadaa18.json index 733fc5f3..c7164bcf 100644 --- a/backend/.sqlx/query-315cf28396b52c1215a53c72c57e0277d6143d8fd658f141a86d6fd0770fb539.json +++ b/crates/db/.sqlx/query-cb2d1da9c3e3ad9f09ea30165f5fe584fef35a015038e83a548edb59ecadaa18.json @@ -1,6 +1,6 @@ { "db_name": "SQLite", - "query": "SELECT\n t.id AS \"id!: Uuid\",\n t.project_id AS \"project_id!: Uuid\",\n t.title,\n t.description,\n t.status AS \"status!: TaskStatus\",\n t.parent_task_attempt AS \"parent_task_attempt: Uuid\",\n t.created_at AS \"created_at!: DateTime\",\n t.updated_at AS \"updated_at!: DateTime\",\n\n CASE WHEN EXISTS (\n SELECT 1\n FROM task_attempts ta\n JOIN execution_processes ep\n ON ep.task_attempt_id = ta.id\n WHERE ta.task_id = t.id\n AND ep.status = 'running'\n AND ep.process_type IN ('setupscript','cleanupscript','codingagent')\n LIMIT 1\n ) THEN 1 ELSE 0 END AS \"has_in_progress_attempt!: i64\",\n\n CASE WHEN EXISTS (\n SELECT 1\n FROM task_attempts ta\n WHERE ta.task_id = t.id\n AND ta.merge_commit IS NOT NULL\n LIMIT 1\n ) THEN 1 ELSE 0 END AS \"has_merged_attempt!: i64\",\n\n CASE WHEN (\n SELECT ep.status\n FROM task_attempts ta\n JOIN execution_processes ep\n ON ep.task_attempt_id = ta.id\n WHERE ta.task_id = t.id\n AND ep.process_type IN ('setupscript','cleanupscript','codingagent')\n ORDER BY ep.created_at DESC\n LIMIT 1\n ) IN ('failed','killed') THEN 1 ELSE 0 END\n AS \"last_attempt_failed!: i64\",\n\n ( SELECT ta.executor\n FROM task_attempts ta\n WHERE ta.task_id = t.id\n ORDER BY ta.created_at DESC\n LIMIT 1\n ) AS \"latest_attempt_executor\"\n\nFROM tasks t\nWHERE t.project_id = $1\nORDER BY t.created_at DESC", + "query": "SELECT\n t.id AS \"id!: Uuid\",\n t.project_id AS \"project_id!: Uuid\",\n t.title,\n t.description,\n t.status AS \"status!: TaskStatus\",\n t.parent_task_attempt AS \"parent_task_attempt: Uuid\",\n t.created_at AS \"created_at!: DateTime\",\n t.updated_at AS \"updated_at!: DateTime\",\n\n CASE WHEN EXISTS (\n SELECT 1\n FROM task_attempts ta\n JOIN execution_processes ep\n ON ep.task_attempt_id = ta.id\n WHERE ta.task_id = t.id\n AND ep.status = 'running'\n AND ep.run_reason IN ('setupscript','cleanupscript','codingagent')\n LIMIT 1\n ) THEN 1 ELSE 0 END AS \"has_in_progress_attempt!: i64\",\n\n CASE WHEN EXISTS (\n SELECT 1\n FROM task_attempts ta\n WHERE ta.task_id = t.id\n AND ta.merge_commit IS NOT NULL\n LIMIT 1\n ) THEN 1 ELSE 0 END AS \"has_merged_attempt!: i64\",\n\n CASE WHEN (\n SELECT ep.status\n FROM task_attempts ta\n JOIN execution_processes ep\n ON ep.task_attempt_id = ta.id\n WHERE ta.task_id = t.id\n AND ep.run_reason IN ('setupscript','cleanupscript','codingagent')\n ORDER BY ep.created_at DESC\n LIMIT 1\n ) IN ('failed','killed') THEN 1 ELSE 0 END\n AS \"last_attempt_failed!: i64\",\n\n ( SELECT ta.base_coding_agent\n FROM task_attempts ta\n WHERE ta.task_id = t.id\n ORDER BY ta.created_at DESC\n LIMIT 1\n ) AS \"base_coding_agent!: String\"\n\nFROM tasks t\nWHERE t.project_id = $1\nORDER BY t.created_at DESC", "describe": { "columns": [ { @@ -59,7 +59,7 @@ "type_info": "Integer" }, { - "name": "latest_attempt_executor", + "name": "base_coding_agent!: String", "ordinal": 11, "type_info": "Text" } @@ -82,5 +82,5 @@ true ] }, - "hash": "315cf28396b52c1215a53c72c57e0277d6143d8fd658f141a86d6fd0770fb539" + "hash": "cb2d1da9c3e3ad9f09ea30165f5fe584fef35a015038e83a548edb59ecadaa18" } diff --git a/backend/.sqlx/query-9edb2c01e91fd0f0fe7b56e988c7ae0393150f50be3f419a981e035c0121dfc7.json b/crates/db/.sqlx/query-cd9d629c4040d6766307998dde9926463b9e7bf03a73cf31cafe73d046579d54.json similarity index 50% rename from backend/.sqlx/query-9edb2c01e91fd0f0fe7b56e988c7ae0393150f50be3f419a981e035c0121dfc7.json rename to crates/db/.sqlx/query-cd9d629c4040d6766307998dde9926463b9e7bf03a73cf31cafe73d046579d54.json index d2b42366..97fe29e9 100644 --- a/backend/.sqlx/query-9edb2c01e91fd0f0fe7b56e988c7ae0393150f50be3f419a981e035c0121dfc7.json +++ b/crates/db/.sqlx/query-cd9d629c4040d6766307998dde9926463b9e7bf03a73cf31cafe73d046579d54.json @@ -1,6 +1,6 @@ { "db_name": "SQLite", - "query": "SELECT \n id as \"id!: Uuid\", \n task_attempt_id as \"task_attempt_id!: Uuid\", \n process_type as \"process_type!: ExecutionProcessType\",\n executor_type,\n status as \"status!: ExecutionProcessStatus\",\n command, \n args, \n working_directory, \n stdout, \n stderr, \n exit_code,\n started_at as \"started_at!: DateTime\",\n completed_at as \"completed_at?: DateTime\",\n created_at as \"created_at!: DateTime\", \n updated_at as \"updated_at!: DateTime\"\n FROM execution_processes \n WHERE id = $1", + "query": "SELECT \n id as \"id!: Uuid\", \n task_attempt_id as \"task_attempt_id!: Uuid\", \n run_reason as \"run_reason!: ExecutionProcessRunReason\",\n executor_action as \"executor_action!: sqlx::types::Json\",\n status as \"status!: ExecutionProcessStatus\",\n exit_code,\n started_at as \"started_at!: DateTime\",\n completed_at as \"completed_at?: DateTime\",\n created_at as \"created_at!: DateTime\", \n updated_at as \"updated_at!: DateTime\"\n FROM execution_processes \n WHERE id = $1", "describe": { "columns": [ { @@ -14,12 +14,12 @@ "type_info": "Blob" }, { - "name": "process_type!: ExecutionProcessType", + "name": "run_reason!: ExecutionProcessRunReason", "ordinal": 2, "type_info": "Text" }, { - "name": "executor_type", + "name": "executor_action!: sqlx::types::Json", "ordinal": 3, "type_info": "Text" }, @@ -28,54 +28,29 @@ "ordinal": 4, "type_info": "Text" }, - { - "name": "command", - "ordinal": 5, - "type_info": "Text" - }, - { - "name": "args", - "ordinal": 6, - "type_info": "Text" - }, - { - "name": "working_directory", - "ordinal": 7, - "type_info": "Text" - }, - { - "name": "stdout", - "ordinal": 8, - "type_info": "Text" - }, - { - "name": "stderr", - "ordinal": 9, - "type_info": "Text" - }, { "name": "exit_code", - "ordinal": 10, + "ordinal": 5, "type_info": "Integer" }, { "name": "started_at!: DateTime", - "ordinal": 11, + "ordinal": 6, "type_info": "Text" }, { "name": "completed_at?: DateTime", - "ordinal": 12, + "ordinal": 7, "type_info": "Text" }, { "name": "created_at!: DateTime", - "ordinal": 13, + "ordinal": 8, "type_info": "Text" }, { "name": "updated_at!: DateTime", - "ordinal": 14, + "ordinal": 9, "type_info": "Text" } ], @@ -86,19 +61,14 @@ true, false, false, - true, false, false, true, false, true, - true, - true, - false, - true, false, false ] }, - "hash": "9edb2c01e91fd0f0fe7b56e988c7ae0393150f50be3f419a981e035c0121dfc7" + "hash": "cd9d629c4040d6766307998dde9926463b9e7bf03a73cf31cafe73d046579d54" } diff --git a/crates/db/.sqlx/query-ce908743b4ad501211d530c4b25ce8ab99a94962d5aa92117a6039201ffa6c2c.json b/crates/db/.sqlx/query-ce908743b4ad501211d530c4b25ce8ab99a94962d5aa92117a6039201ffa6c2c.json new file mode 100644 index 00000000..383ee706 --- /dev/null +++ b/crates/db/.sqlx/query-ce908743b4ad501211d530c4b25ce8ab99a94962d5aa92117a6039201ffa6c2c.json @@ -0,0 +1,12 @@ +{ + "db_name": "SQLite", + "query": "UPDATE task_attempts SET branch = $1, updated_at = $2 WHERE id = $3", + "describe": { + "columns": [], + "parameters": { + "Right": 3 + }, + "nullable": [] + }, + "hash": "ce908743b4ad501211d530c4b25ce8ab99a94962d5aa92117a6039201ffa6c2c" +} diff --git a/backend/.sqlx/query-d30aa5786757f32bf2b9c5fe51a45e506c71c28c5994e430d9b0546adb15ffa2.json b/crates/db/.sqlx/query-d30aa5786757f32bf2b9c5fe51a45e506c71c28c5994e430d9b0546adb15ffa2.json similarity index 100% rename from backend/.sqlx/query-d30aa5786757f32bf2b9c5fe51a45e506c71c28c5994e430d9b0546adb15ffa2.json rename to crates/db/.sqlx/query-d30aa5786757f32bf2b9c5fe51a45e506c71c28c5994e430d9b0546adb15ffa2.json diff --git a/crates/db/.sqlx/query-ecc6c9458bffcc70af47c1f55e97efcf02f105564e7d97247dac1fd704312871.json b/crates/db/.sqlx/query-ecc6c9458bffcc70af47c1f55e97efcf02f105564e7d97247dac1fd704312871.json new file mode 100644 index 00000000..3dbb4dcb --- /dev/null +++ b/crates/db/.sqlx/query-ecc6c9458bffcc70af47c1f55e97efcf02f105564e7d97247dac1fd704312871.json @@ -0,0 +1,74 @@ +{ + "db_name": "SQLite", + "query": "SELECT \n id as \"id!: Uuid\", \n task_attempt_id as \"task_attempt_id!: Uuid\", \n run_reason as \"run_reason!: ExecutionProcessRunReason\",\n executor_action as \"executor_action!: sqlx::types::Json\",\n status as \"status!: ExecutionProcessStatus\",\n exit_code,\n started_at as \"started_at!: DateTime\",\n completed_at as \"completed_at?: DateTime\",\n created_at as \"created_at!: DateTime\", \n updated_at as \"updated_at!: DateTime\"\n FROM execution_processes \n WHERE task_attempt_id = $1 \n ORDER BY created_at ASC", + "describe": { + "columns": [ + { + "name": "id!: Uuid", + "ordinal": 0, + "type_info": "Blob" + }, + { + "name": "task_attempt_id!: Uuid", + "ordinal": 1, + "type_info": "Blob" + }, + { + "name": "run_reason!: ExecutionProcessRunReason", + "ordinal": 2, + "type_info": "Text" + }, + { + "name": "executor_action!: sqlx::types::Json", + "ordinal": 3, + "type_info": "Text" + }, + { + "name": "status!: ExecutionProcessStatus", + "ordinal": 4, + "type_info": "Text" + }, + { + "name": "exit_code", + "ordinal": 5, + "type_info": "Integer" + }, + { + "name": "started_at!: DateTime", + "ordinal": 6, + "type_info": "Text" + }, + { + "name": "completed_at?: DateTime", + "ordinal": 7, + "type_info": "Text" + }, + { + "name": "created_at!: DateTime", + "ordinal": 8, + "type_info": "Text" + }, + { + "name": "updated_at!: DateTime", + "ordinal": 9, + "type_info": "Text" + } + ], + "parameters": { + "Right": 1 + }, + "nullable": [ + true, + false, + false, + false, + false, + true, + false, + true, + false, + false + ] + }, + "hash": "ecc6c9458bffcc70af47c1f55e97efcf02f105564e7d97247dac1fd704312871" +} diff --git a/crates/db/.sqlx/query-f58b737bf1deb0e8d57fca5b99423e8ba438949679816316ef446e0b7b8eb3e6.json b/crates/db/.sqlx/query-f58b737bf1deb0e8d57fca5b99423e8ba438949679816316ef446e0b7b8eb3e6.json new file mode 100644 index 00000000..9e0d0b39 --- /dev/null +++ b/crates/db/.sqlx/query-f58b737bf1deb0e8d57fca5b99423e8ba438949679816316ef446e0b7b8eb3e6.json @@ -0,0 +1,74 @@ +{ + "db_name": "SQLite", + "query": "SELECT \n id as \"id!: Uuid\", \n task_attempt_id as \"task_attempt_id!: Uuid\", \n run_reason as \"run_reason!: ExecutionProcessRunReason\",\n executor_action as \"executor_action!: sqlx::types::Json\",\n status as \"status!: ExecutionProcessStatus\",\n exit_code,\n started_at as \"started_at!: DateTime\",\n completed_at as \"completed_at?: DateTime\",\n created_at as \"created_at!: DateTime\", \n updated_at as \"updated_at!: DateTime\"\n FROM execution_processes \n WHERE task_attempt_id = $1 \n AND executor_action_type = $2\n ORDER BY created_at DESC \n LIMIT 1", + "describe": { + "columns": [ + { + "name": "id!: Uuid", + "ordinal": 0, + "type_info": "Blob" + }, + { + "name": "task_attempt_id!: Uuid", + "ordinal": 1, + "type_info": "Blob" + }, + { + "name": "run_reason!: ExecutionProcessRunReason", + "ordinal": 2, + "type_info": "Text" + }, + { + "name": "executor_action!: sqlx::types::Json", + "ordinal": 3, + "type_info": "Text" + }, + { + "name": "status!: ExecutionProcessStatus", + "ordinal": 4, + "type_info": "Text" + }, + { + "name": "exit_code", + "ordinal": 5, + "type_info": "Integer" + }, + { + "name": "started_at!: DateTime", + "ordinal": 6, + "type_info": "Text" + }, + { + "name": "completed_at?: DateTime", + "ordinal": 7, + "type_info": "Text" + }, + { + "name": "created_at!: DateTime", + "ordinal": 8, + "type_info": "Text" + }, + { + "name": "updated_at!: DateTime", + "ordinal": 9, + "type_info": "Text" + } + ], + "parameters": { + "Right": 2 + }, + "nullable": [ + true, + false, + false, + false, + false, + true, + false, + true, + false, + false + ] + }, + "hash": "f58b737bf1deb0e8d57fca5b99423e8ba438949679816316ef446e0b7b8eb3e6" +} diff --git a/backend/.sqlx/query-a157cf00616f703bfba21927f1eb1c9eec2a81c02da15f66efdba0b6c375de1b.json b/crates/db/.sqlx/query-f9a448b2fdb1435b78a062e5ea77ab77ce31be2205887185900647b4bf49ea73.json similarity index 65% rename from backend/.sqlx/query-a157cf00616f703bfba21927f1eb1c9eec2a81c02da15f66efdba0b6c375de1b.json rename to crates/db/.sqlx/query-f9a448b2fdb1435b78a062e5ea77ab77ce31be2205887185900647b4bf49ea73.json index b214d0d1..222f0e2c 100644 --- a/backend/.sqlx/query-a157cf00616f703bfba21927f1eb1c9eec2a81c02da15f66efdba0b6c375de1b.json +++ b/crates/db/.sqlx/query-f9a448b2fdb1435b78a062e5ea77ab77ce31be2205887185900647b4bf49ea73.json @@ -1,6 +1,6 @@ { "db_name": "SQLite", - "query": "SELECT id as \"id!: Uuid\", worktree_path FROM task_attempts WHERE worktree_deleted = FALSE", + "query": "SELECT id as \"id!: Uuid\", container_ref FROM task_attempts WHERE worktree_deleted = FALSE", "describe": { "columns": [ { @@ -9,7 +9,7 @@ "type_info": "Blob" }, { - "name": "worktree_path", + "name": "container_ref", "ordinal": 1, "type_info": "Text" } @@ -19,8 +19,8 @@ }, "nullable": [ true, - false + true ] }, - "hash": "a157cf00616f703bfba21927f1eb1c9eec2a81c02da15f66efdba0b6c375de1b" + "hash": "f9a448b2fdb1435b78a062e5ea77ab77ce31be2205887185900647b4bf49ea73" } diff --git a/crates/db/Cargo.toml b/crates/db/Cargo.toml new file mode 100644 index 00000000..ca6c4ef6 --- /dev/null +++ b/crates/db/Cargo.toml @@ -0,0 +1,24 @@ +[package] +name = "db" +version = "0.0.56" +edition = "2024" + +[dependencies] +utils = { path = "../utils" } +executors = { path = "../executors" } +tokio = { workspace = true } +tokio-util = { version = "0.7", features = ["io"] } +thiserror = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } +anyhow = { workspace = true } +tracing = { workspace = true } +tracing-subscriber = { workspace = true } +sqlx = { version = "0.8.6", features = ["runtime-tokio-rustls", "sqlite", "chrono", "uuid"] } +chrono = { version = "0.4", features = ["serde"] } +uuid = { version = "1.0", features = ["v4", "serde"] } +ts-rs = { workspace = true } +async-trait = "0.1" +regex = "1.11.1" +sentry-tracing = { version = "0.41.0", features = ["backtrace"] } +futures-util = "0.3" diff --git a/backend/migrations/20250617183714_init.sql b/crates/db/migrations/20250617183714_init.sql similarity index 100% rename from backend/migrations/20250617183714_init.sql rename to crates/db/migrations/20250617183714_init.sql diff --git a/backend/migrations/20250620212427_execution_processes.sql b/crates/db/migrations/20250620212427_execution_processes.sql similarity index 100% rename from backend/migrations/20250620212427_execution_processes.sql rename to crates/db/migrations/20250620212427_execution_processes.sql diff --git a/backend/migrations/20250620214100_remove_stdout_stderr_from_task_attempts.sql b/crates/db/migrations/20250620214100_remove_stdout_stderr_from_task_attempts.sql similarity index 100% rename from backend/migrations/20250620214100_remove_stdout_stderr_from_task_attempts.sql rename to crates/db/migrations/20250620214100_remove_stdout_stderr_from_task_attempts.sql diff --git a/backend/migrations/20250621120000_relate_activities_to_execution_processes.sql b/crates/db/migrations/20250621120000_relate_activities_to_execution_processes.sql similarity index 100% rename from backend/migrations/20250621120000_relate_activities_to_execution_processes.sql rename to crates/db/migrations/20250621120000_relate_activities_to_execution_processes.sql diff --git a/backend/migrations/20250623120000_executor_sessions.sql b/crates/db/migrations/20250623120000_executor_sessions.sql similarity index 100% rename from backend/migrations/20250623120000_executor_sessions.sql rename to crates/db/migrations/20250623120000_executor_sessions.sql diff --git a/backend/migrations/20250623130000_add_executor_type_to_execution_processes.sql b/crates/db/migrations/20250623130000_add_executor_type_to_execution_processes.sql similarity index 100% rename from backend/migrations/20250623130000_add_executor_type_to_execution_processes.sql rename to crates/db/migrations/20250623130000_add_executor_type_to_execution_processes.sql diff --git a/backend/migrations/20250625000000_add_dev_script_to_projects.sql b/crates/db/migrations/20250625000000_add_dev_script_to_projects.sql similarity index 100% rename from backend/migrations/20250625000000_add_dev_script_to_projects.sql rename to crates/db/migrations/20250625000000_add_dev_script_to_projects.sql diff --git a/backend/migrations/20250701000000_add_branch_to_task_attempts.sql b/crates/db/migrations/20250701000000_add_branch_to_task_attempts.sql similarity index 100% rename from backend/migrations/20250701000000_add_branch_to_task_attempts.sql rename to crates/db/migrations/20250701000000_add_branch_to_task_attempts.sql diff --git a/backend/migrations/20250701000001_add_pr_tracking_to_task_attempts.sql b/crates/db/migrations/20250701000001_add_pr_tracking_to_task_attempts.sql similarity index 100% rename from backend/migrations/20250701000001_add_pr_tracking_to_task_attempts.sql rename to crates/db/migrations/20250701000001_add_pr_tracking_to_task_attempts.sql diff --git a/backend/migrations/20250701120000_add_assistant_message_to_executor_sessions.sql b/crates/db/migrations/20250701120000_add_assistant_message_to_executor_sessions.sql similarity index 100% rename from backend/migrations/20250701120000_add_assistant_message_to_executor_sessions.sql rename to crates/db/migrations/20250701120000_add_assistant_message_to_executor_sessions.sql diff --git a/backend/migrations/20250708000000_add_base_branch_to_task_attempts.sql b/crates/db/migrations/20250708000000_add_base_branch_to_task_attempts.sql similarity index 100% rename from backend/migrations/20250708000000_add_base_branch_to_task_attempts.sql rename to crates/db/migrations/20250708000000_add_base_branch_to_task_attempts.sql diff --git a/backend/migrations/20250709000000_add_worktree_deleted_flag.sql b/crates/db/migrations/20250709000000_add_worktree_deleted_flag.sql similarity index 100% rename from backend/migrations/20250709000000_add_worktree_deleted_flag.sql rename to crates/db/migrations/20250709000000_add_worktree_deleted_flag.sql diff --git a/backend/migrations/20250710000000_add_setup_completion.sql b/crates/db/migrations/20250710000000_add_setup_completion.sql similarity index 100% rename from backend/migrations/20250710000000_add_setup_completion.sql rename to crates/db/migrations/20250710000000_add_setup_completion.sql diff --git a/backend/migrations/20250715154859_add_task_templates.sql b/crates/db/migrations/20250715154859_add_task_templates.sql similarity index 100% rename from backend/migrations/20250715154859_add_task_templates.sql rename to crates/db/migrations/20250715154859_add_task_templates.sql diff --git a/backend/migrations/20250716143725_add_default_templates.sql b/crates/db/migrations/20250716143725_add_default_templates.sql similarity index 100% rename from backend/migrations/20250716143725_add_default_templates.sql rename to crates/db/migrations/20250716143725_add_default_templates.sql diff --git a/backend/migrations/20250716161432_update_executor_names_to_kebab_case.sql b/crates/db/migrations/20250716161432_update_executor_names_to_kebab_case.sql similarity index 100% rename from backend/migrations/20250716161432_update_executor_names_to_kebab_case.sql rename to crates/db/migrations/20250716161432_update_executor_names_to_kebab_case.sql diff --git a/backend/migrations/20250716170000_add_parent_task_to_tasks.sql b/crates/db/migrations/20250716170000_add_parent_task_to_tasks.sql similarity index 100% rename from backend/migrations/20250716170000_add_parent_task_to_tasks.sql rename to crates/db/migrations/20250716170000_add_parent_task_to_tasks.sql diff --git a/backend/migrations/20250717000000_drop_task_attempt_activities.sql b/crates/db/migrations/20250717000000_drop_task_attempt_activities.sql similarity index 100% rename from backend/migrations/20250717000000_drop_task_attempt_activities.sql rename to crates/db/migrations/20250717000000_drop_task_attempt_activities.sql diff --git a/backend/migrations/20250719000000_add_cleanup_script_to_projects.sql b/crates/db/migrations/20250719000000_add_cleanup_script_to_projects.sql similarity index 100% rename from backend/migrations/20250719000000_add_cleanup_script_to_projects.sql rename to crates/db/migrations/20250719000000_add_cleanup_script_to_projects.sql diff --git a/backend/migrations/20250720000000_add_cleanupscript_to_process_type_constraint.sql b/crates/db/migrations/20250720000000_add_cleanupscript_to_process_type_constraint.sql similarity index 100% rename from backend/migrations/20250720000000_add_cleanupscript_to_process_type_constraint.sql rename to crates/db/migrations/20250720000000_add_cleanupscript_to_process_type_constraint.sql diff --git a/crates/db/migrations/20250726182144_update_worktree_path_to_container_ref.sql b/crates/db/migrations/20250726182144_update_worktree_path_to_container_ref.sql new file mode 100644 index 00000000..5948fbfc --- /dev/null +++ b/crates/db/migrations/20250726182144_update_worktree_path_to_container_ref.sql @@ -0,0 +1,8 @@ +-- Add migration script here + +ALTER TABLE task_attempts ADD COLUMN container_ref TEXT; -- nullable +UPDATE task_attempts SET container_ref = worktree_path; + +-- If you might have triggers or indexes on worktree_path, drop them before this step. + +ALTER TABLE task_attempts DROP COLUMN worktree_path; \ No newline at end of file diff --git a/crates/db/migrations/20250726210910_make_branch_optional.sql b/crates/db/migrations/20250726210910_make_branch_optional.sql new file mode 100644 index 00000000..3572146e --- /dev/null +++ b/crates/db/migrations/20250726210910_make_branch_optional.sql @@ -0,0 +1,16 @@ +-- Add migration script here + +-- 1) Create replacement column (nullable TEXT) +ALTER TABLE task_attempts ADD COLUMN branch_new TEXT; -- nullable + +-- 2) Copy existing values +UPDATE task_attempts SET branch_new = branch; + +-- If you have indexes/triggers/constraints that reference "branch", +-- drop them before the next two steps and recreate them afterwards. + +-- 3) Remove the old non-nullable column +ALTER TABLE task_attempts DROP COLUMN branch; + +-- 4) Keep the original column name +ALTER TABLE task_attempts RENAME COLUMN branch_new TO branch; diff --git a/crates/db/migrations/20250727124142_remove_command_from_execution_process.sql b/crates/db/migrations/20250727124142_remove_command_from_execution_process.sql new file mode 100644 index 00000000..f17da047 --- /dev/null +++ b/crates/db/migrations/20250727124142_remove_command_from_execution_process.sql @@ -0,0 +1,4 @@ +-- Add migration script here + +ALTER TABLE execution_processes DROP COLUMN command; +ALTER TABLE execution_processes DROP COLUMN args; \ No newline at end of file diff --git a/crates/db/migrations/20250727150349_remove_working_directory.sql b/crates/db/migrations/20250727150349_remove_working_directory.sql new file mode 100644 index 00000000..69279d45 --- /dev/null +++ b/crates/db/migrations/20250727150349_remove_working_directory.sql @@ -0,0 +1,3 @@ +-- Add migration script here + +ALTER TABLE execution_processes DROP COLUMN working_directory; \ No newline at end of file diff --git a/crates/db/migrations/20250729162941_create_execution_process_logs.sql b/crates/db/migrations/20250729162941_create_execution_process_logs.sql new file mode 100644 index 00000000..ae753267 --- /dev/null +++ b/crates/db/migrations/20250729162941_create_execution_process_logs.sql @@ -0,0 +1,11 @@ +PRAGMA foreign_keys = ON; + +CREATE TABLE execution_process_logs ( + execution_id BLOB PRIMARY KEY, + logs TEXT NOT NULL, -- JSONL format (one LogMsg per line) + byte_size INTEGER NOT NULL, + inserted_at TEXT NOT NULL DEFAULT (datetime('now', 'subsec')), + FOREIGN KEY (execution_id) REFERENCES execution_processes(id) ON DELETE CASCADE +); + +CREATE INDEX idx_execution_process_logs_inserted_at ON execution_process_logs(inserted_at); diff --git a/crates/db/migrations/20250729165913_remove_stdout_and_stderr_from_execution_processes.sql b/crates/db/migrations/20250729165913_remove_stdout_and_stderr_from_execution_processes.sql new file mode 100644 index 00000000..5b662d03 --- /dev/null +++ b/crates/db/migrations/20250729165913_remove_stdout_and_stderr_from_execution_processes.sql @@ -0,0 +1,4 @@ +-- Add migration script here + +ALTER TABLE execution_processes DROP COLUMN stdout; +ALTER TABLE execution_processes DROP COLUMN stderr; \ No newline at end of file diff --git a/crates/db/migrations/20250730000000_add_executor_action_to_execution_processes.sql b/crates/db/migrations/20250730000000_add_executor_action_to_execution_processes.sql new file mode 100644 index 00000000..5655cc74 --- /dev/null +++ b/crates/db/migrations/20250730000000_add_executor_action_to_execution_processes.sql @@ -0,0 +1,8 @@ +PRAGMA foreign_keys = ON; + +-- Clear existing execution_processes records since we can't meaningfully migrate them +-- (old records lack the actual script content and prompts needed for ExecutorActions) +DELETE FROM execution_processes; + +-- Add executor_action column to execution_processes table for storing full ExecutorActions JSON +ALTER TABLE execution_processes ADD COLUMN executor_action TEXT NOT NULL DEFAULT ''; diff --git a/crates/db/migrations/20250730000001_rename_process_type_to_run_reason.sql b/crates/db/migrations/20250730000001_rename_process_type_to_run_reason.sql new file mode 100644 index 00000000..4dea70f4 --- /dev/null +++ b/crates/db/migrations/20250730000001_rename_process_type_to_run_reason.sql @@ -0,0 +1,4 @@ +PRAGMA foreign_keys = ON; + +-- Rename process_type column to run_reason for better semantic clarity +ALTER TABLE execution_processes RENAME COLUMN process_type TO run_reason; diff --git a/crates/db/migrations/20250730124500_add_execution_process_task_attempt_index.sql b/crates/db/migrations/20250730124500_add_execution_process_task_attempt_index.sql new file mode 100644 index 00000000..84551adf --- /dev/null +++ b/crates/db/migrations/20250730124500_add_execution_process_task_attempt_index.sql @@ -0,0 +1,6 @@ +ALTER TABLE execution_processes +ADD COLUMN executor_action_type TEXT + GENERATED ALWAYS AS (json_extract(executor_action, '$.type')) VIRTUAL; + +CREATE INDEX idx_execution_processes_task_attempt_type_created +ON execution_processes (task_attempt_id, executor_action_type, created_at DESC); \ No newline at end of file diff --git a/crates/db/migrations/20250805112332_add_executor_action_type_to_task_attempts.sql b/crates/db/migrations/20250805112332_add_executor_action_type_to_task_attempts.sql new file mode 100644 index 00000000..2fa7f435 --- /dev/null +++ b/crates/db/migrations/20250805112332_add_executor_action_type_to_task_attempts.sql @@ -0,0 +1,5 @@ +-- Remove unused executor_type column from execution_processes +ALTER TABLE execution_processes DROP COLUMN executor_type; + +ALTER TABLE task_attempts RENAME COLUMN executor TO base_coding_agent; + diff --git a/crates/db/migrations/20250805122100_fix_executor_action_type_virtual_column.sql b/crates/db/migrations/20250805122100_fix_executor_action_type_virtual_column.sql new file mode 100644 index 00000000..499a11de --- /dev/null +++ b/crates/db/migrations/20250805122100_fix_executor_action_type_virtual_column.sql @@ -0,0 +1,12 @@ +-- Drop the existing virtual column and index +DROP INDEX IF EXISTS idx_execution_processes_task_attempt_type_created; +ALTER TABLE execution_processes DROP COLUMN executor_action_type; + +-- Recreate the virtual column with the correct JSON path +ALTER TABLE execution_processes +ADD COLUMN executor_action_type TEXT + GENERATED ALWAYS AS (json_extract(executor_action, '$.typ.type')) VIRTUAL; + +-- Recreate the index +CREATE INDEX idx_execution_processes_task_attempt_type_created +ON execution_processes (task_attempt_id, executor_action_type, created_at DESC); diff --git a/crates/db/src/lib.rs b/crates/db/src/lib.rs new file mode 100644 index 00000000..69133c69 --- /dev/null +++ b/crates/db/src/lib.rs @@ -0,0 +1,76 @@ +use std::{str::FromStr, sync::Arc}; + +use sqlx::{ + Error, Pool, Sqlite, SqlitePool, + sqlite::{SqliteConnectOptions, SqliteConnection, SqlitePoolOptions}, +}; +use utils::assets::asset_dir; + +pub mod models; + +#[derive(Clone)] +pub struct DBService { + pub pool: Pool, +} + +impl DBService { + pub async fn new() -> Result { + let database_url = format!( + "sqlite://{}", + asset_dir().join("db.sqlite").to_string_lossy() + ); + let options = SqliteConnectOptions::from_str(&database_url)?.create_if_missing(true); + let pool = SqlitePool::connect_with(options).await?; + sqlx::migrate!("./migrations").run(&pool).await?; + Ok(DBService { pool }) + } + + pub async fn new_with_after_connect(after_connect: F) -> Result + where + F: for<'a> Fn( + &'a mut SqliteConnection, + ) -> std::pin::Pin< + Box> + Send + 'a>, + > + Send + + Sync + + 'static, + { + let pool = Self::create_pool(Some(Arc::new(after_connect))).await?; + Ok(DBService { pool }) + } + + async fn create_pool(after_connect: Option>) -> Result, Error> + where + F: for<'a> Fn( + &'a mut SqliteConnection, + ) -> std::pin::Pin< + Box> + Send + 'a>, + > + Send + + Sync + + 'static, + { + let database_url = format!( + "sqlite://{}", + asset_dir().join("db.sqlite").to_string_lossy() + ); + let options = SqliteConnectOptions::from_str(&database_url)?.create_if_missing(true); + + let pool = if let Some(hook) = after_connect { + SqlitePoolOptions::new() + .after_connect(move |conn, _meta| { + let hook = hook.clone(); + Box::pin(async move { + hook(conn).await?; + Ok(()) + }) + }) + .connect_with(options) + .await? + } else { + SqlitePool::connect_with(options).await? + }; + + sqlx::migrate!("./migrations").run(&pool).await?; + Ok(pool) + } +} diff --git a/backend/src/models/execution_process.rs b/crates/db/src/models/execution_process.rs similarity index 58% rename from backend/src/models/execution_process.rs rename to crates/db/src/models/execution_process.rs index eb81a03d..df6d1142 100644 --- a/backend/src/models/execution_process.rs +++ b/crates/db/src/models/execution_process.rs @@ -1,31 +1,16 @@ use chrono::{DateTime, Utc}; -use serde::{Deserialize, Serialize, Serializer}; +use executors::actions::{ExecutorAction, ExecutorActionKind}; +use serde::{Deserialize, Serialize}; +use serde_json::Value; use sqlx::{FromRow, SqlitePool, Type}; use ts_rs::TS; use uuid::Uuid; -use crate::app_state::ExecutionType; - -/// Filter out stderr boundary markers from output -fn filter_stderr_boundary_markers(stderr: &Option) -> Option { - stderr - .as_ref() - .map(|s| s.replace("---STDERR_CHUNK_BOUNDARY---", "")) -} - -/// Custom serializer for stderr field that filters out boundary markers -fn serialize_filtered_stderr(stderr: &Option, serializer: S) -> Result -where - S: Serializer, -{ - let filtered = filter_stderr_boundary_markers(stderr); - filtered.serialize(serializer) -} +use super::{task::Task, task_attempt::TaskAttempt}; #[derive(Debug, Clone, Type, Serialize, Deserialize, PartialEq, TS)] #[sqlx(type_name = "execution_process_status", rename_all = "lowercase")] #[serde(rename_all = "lowercase")] -#[ts(export)] pub enum ExecutionProcessStatus { Running, Completed, @@ -34,52 +19,23 @@ pub enum ExecutionProcessStatus { } #[derive(Debug, Clone, Type, Serialize, Deserialize, PartialEq, TS)] -#[sqlx(type_name = "execution_process_type", rename_all = "lowercase")] +#[sqlx(type_name = "execution_process_run_reason", rename_all = "lowercase")] #[serde(rename_all = "lowercase")] -#[ts(export)] -pub enum ExecutionProcessType { +pub enum ExecutionProcessRunReason { SetupScript, CleanupScript, CodingAgent, DevServer, } -impl From for ExecutionProcessType { - fn from(exec_type: ExecutionType) -> Self { - match exec_type { - ExecutionType::SetupScript => ExecutionProcessType::SetupScript, - ExecutionType::CleanupScript => ExecutionProcessType::CleanupScript, - ExecutionType::CodingAgent => ExecutionProcessType::CodingAgent, - ExecutionType::DevServer => ExecutionProcessType::DevServer, - } - } -} - -impl From for ExecutionType { - fn from(exec_type: ExecutionProcessType) -> Self { - match exec_type { - ExecutionProcessType::SetupScript => ExecutionType::SetupScript, - ExecutionProcessType::CleanupScript => ExecutionType::CleanupScript, - ExecutionProcessType::CodingAgent => ExecutionType::CodingAgent, - ExecutionProcessType::DevServer => ExecutionType::DevServer, - } - } -} - #[derive(Debug, Clone, FromRow, Serialize, Deserialize, TS)] -#[ts(export)] pub struct ExecutionProcess { pub id: Uuid, pub task_attempt_id: Uuid, - pub process_type: ExecutionProcessType, - pub executor_type: Option, // "echo", "claude", "amp", etc. - only for CodingAgent processes + pub run_reason: ExecutionProcessRunReason, + #[ts(skip)] + pub executor_action: sqlx::types::Json, pub status: ExecutionProcessStatus, - pub command: String, - pub args: Option, // JSON array of arguments - pub working_directory: String, - pub stdout: Option, - #[serde(serialize_with = "serialize_filtered_stderr")] - pub stderr: Option, pub exit_code: Option, pub started_at: DateTime, pub completed_at: Option>, @@ -88,18 +44,13 @@ pub struct ExecutionProcess { } #[derive(Debug, Deserialize, TS)] -#[ts(export)] pub struct CreateExecutionProcess { pub task_attempt_id: Uuid, - pub process_type: ExecutionProcessType, - pub executor_type: Option, - pub command: String, - pub args: Option, - pub working_directory: String, + pub executor_action: ExecutorAction, + pub run_reason: ExecutionProcessRunReason, } #[derive(Debug, Deserialize, TS)] -#[ts(export)] #[allow(dead_code)] pub struct UpdateExecutionProcess { pub status: Option, @@ -108,16 +59,13 @@ pub struct UpdateExecutionProcess { } #[derive(Debug, Clone, FromRow, Serialize, Deserialize, TS)] -#[ts(export)] pub struct ExecutionProcessSummary { pub id: Uuid, pub task_attempt_id: Uuid, - pub process_type: ExecutionProcessType, - pub executor_type: Option, // "echo", "claude", "amp", etc. - only for CodingAgent processes + pub run_reason: ExecutionProcessRunReason, + #[ts(skip)] + pub executor_action: sqlx::types::Json, pub status: ExecutionProcessStatus, - pub command: String, - pub args: Option, // JSON array of arguments - pub working_directory: String, pub exit_code: Option, pub started_at: DateTime, pub completed_at: Option>, @@ -125,6 +73,20 @@ pub struct ExecutionProcessSummary { pub updated_at: DateTime, } +#[derive(Debug)] +pub struct ExecutionContext { + pub execution_process: ExecutionProcess, + pub task_attempt: TaskAttempt, + pub task: Task, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum ExecutorActionField { + ExecutorAction(ExecutorAction), + Other(Value), +} + impl ExecutionProcess { /// Find execution process by ID pub async fn find_by_id(pool: &SqlitePool, id: Uuid) -> Result, sqlx::Error> { @@ -133,14 +95,9 @@ impl ExecutionProcess { r#"SELECT id as "id!: Uuid", task_attempt_id as "task_attempt_id!: Uuid", - process_type as "process_type!: ExecutionProcessType", - executor_type, + run_reason as "run_reason!: ExecutionProcessRunReason", + executor_action as "executor_action!: sqlx::types::Json", status as "status!: ExecutionProcessStatus", - command, - args, - working_directory, - stdout, - stderr, exit_code, started_at as "started_at!: DateTime", completed_at as "completed_at?: DateTime", @@ -154,6 +111,29 @@ impl ExecutionProcess { .await } + /// Find execution process by rowid + pub async fn find_by_rowid(pool: &SqlitePool, rowid: i64) -> Result, sqlx::Error> { + sqlx::query_as!( + ExecutionProcess, + r#"SELECT + id as "id!: Uuid", + task_attempt_id as "task_attempt_id!: Uuid", + run_reason as "run_reason!: ExecutionProcessRunReason", + executor_action as "executor_action!: sqlx::types::Json", + status as "status!: ExecutionProcessStatus", + exit_code, + started_at as "started_at!: DateTime", + completed_at as "completed_at?: DateTime", + created_at as "created_at!: DateTime", + updated_at as "updated_at!: DateTime" + FROM execution_processes + WHERE rowid = $1"#, + rowid + ) + .fetch_optional(pool) + .await + } + /// Find all execution processes for a task attempt pub async fn find_by_task_attempt_id( pool: &SqlitePool, @@ -164,14 +144,9 @@ impl ExecutionProcess { r#"SELECT id as "id!: Uuid", task_attempt_id as "task_attempt_id!: Uuid", - process_type as "process_type!: ExecutionProcessType", - executor_type, + run_reason as "run_reason!: ExecutionProcessRunReason", + executor_action as "executor_action!: sqlx::types::Json", status as "status!: ExecutionProcessStatus", - command, - args, - working_directory, - stdout, - stderr, exit_code, started_at as "started_at!: DateTime", completed_at as "completed_at?: DateTime", @@ -196,12 +171,9 @@ impl ExecutionProcess { r#"SELECT id as "id!: Uuid", task_attempt_id as "task_attempt_id!: Uuid", - process_type as "process_type!: ExecutionProcessType", - executor_type, + run_reason as "run_reason!: ExecutionProcessRunReason", + executor_action as "executor_action!: sqlx::types::Json", status as "status!: ExecutionProcessStatus", - command, - args, - working_directory, exit_code, started_at as "started_at!: DateTime", completed_at as "completed_at?: DateTime", @@ -223,14 +195,9 @@ impl ExecutionProcess { r#"SELECT id as "id!: Uuid", task_attempt_id as "task_attempt_id!: Uuid", - process_type as "process_type!: ExecutionProcessType", - executor_type, + run_reason as "run_reason!: ExecutionProcessRunReason", + executor_action as "executor_action!: sqlx::types::Json", status as "status!: ExecutionProcessStatus", - command, - args, - working_directory, - stdout, - stderr, exit_code, started_at as "started_at!: DateTime", completed_at as "completed_at?: DateTime", @@ -254,14 +221,9 @@ impl ExecutionProcess { r#"SELECT ep.id as "id!: Uuid", ep.task_attempt_id as "task_attempt_id!: Uuid", - ep.process_type as "process_type!: ExecutionProcessType", - ep.executor_type, + ep.run_reason as "run_reason!: ExecutionProcessRunReason", + ep.executor_action as "executor_action!: sqlx::types::Json", ep.status as "status!: ExecutionProcessStatus", - ep.command, - ep.args, - ep.working_directory, - ep.stdout, - ep.stderr, ep.exit_code, ep.started_at as "started_at!: DateTime", ep.completed_at as "completed_at?: DateTime", @@ -271,7 +233,7 @@ impl ExecutionProcess { JOIN task_attempts ta ON ep.task_attempt_id = ta.id JOIN tasks t ON ta.task_id = t.id WHERE ep.status = 'running' - AND ep.process_type = 'devserver' + AND ep.run_reason = 'devserver' AND t.project_id = $1 ORDER BY ep.created_at ASC"#, project_id @@ -280,6 +242,38 @@ impl ExecutionProcess { .await } + /// Find latest execution process by task attempt and executor action type + pub async fn find_latest_by_task_attempt_and_action_type( + pool: &SqlitePool, + task_attempt_id: Uuid, + executor_action: &ExecutorActionKind, + ) -> Result, sqlx::Error> { + let executor_action_kind = executor_action.to_string(); + sqlx::query_as!( + ExecutionProcess, + r#"SELECT + id as "id!: Uuid", + task_attempt_id as "task_attempt_id!: Uuid", + run_reason as "run_reason!: ExecutionProcessRunReason", + executor_action as "executor_action!: sqlx::types::Json", + status as "status!: ExecutionProcessStatus", + exit_code, + started_at as "started_at!: DateTime", + completed_at as "completed_at?: DateTime", + created_at as "created_at!: DateTime", + updated_at as "updated_at!: DateTime" + FROM execution_processes + WHERE task_attempt_id = $1 + AND executor_action_type = $2 + ORDER BY created_at DESC + LIMIT 1"#, + task_attempt_id, + executor_action_kind + ) + .fetch_optional(pool) + .await + } + /// Create a new execution process pub async fn create( pool: &SqlitePool, @@ -287,26 +281,22 @@ impl ExecutionProcess { process_id: Uuid, ) -> Result { let now = Utc::now(); + let executor_action_json = sqlx::types::Json(&data.executor_action); sqlx::query_as!( ExecutionProcess, r#"INSERT INTO execution_processes ( - id, task_attempt_id, process_type, executor_type, status, command, args, - working_directory, stdout, stderr, exit_code, started_at, + id, task_attempt_id, run_reason, executor_action, status, + exit_code, started_at, completed_at, created_at, updated_at ) - VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10) RETURNING id as "id!: Uuid", task_attempt_id as "task_attempt_id!: Uuid", - process_type as "process_type!: ExecutionProcessType", - executor_type, + run_reason as "run_reason!: ExecutionProcessRunReason", + executor_action as "executor_action!: sqlx::types::Json", status as "status!: ExecutionProcessStatus", - command, - args, - working_directory, - stdout, - stderr, exit_code, started_at as "started_at!: DateTime", completed_at as "completed_at?: DateTime", @@ -314,14 +304,9 @@ impl ExecutionProcess { updated_at as "updated_at!: DateTime""#, process_id, data.task_attempt_id, - data.process_type, - data.executor_type, + data.run_reason, + executor_action_json, ExecutionProcessStatus::Running, - data.command, - data.args, - data.working_directory, - None::, // stdout - None::, // stderr None::, // exit_code now, // started_at None::>, // completed_at @@ -331,6 +316,14 @@ impl ExecutionProcess { .fetch_one(pool) .await } + pub async fn was_killed(pool: &SqlitePool, id: Uuid) -> bool { + if let Ok(exp_process) = Self::find_by_id(pool, id).await + && exp_process.is_some_and(|ep| ep.status == ExecutionProcessStatus::Killed) + { + return true; + } + false + } /// Update execution process status and completion info pub async fn update_completion( @@ -347,7 +340,7 @@ impl ExecutionProcess { sqlx::query!( r#"UPDATE execution_processes - SET status = $1, exit_code = $2, completed_at = $3, updated_at = datetime('now') + SET status = $1, exit_code = $2, completed_at = $3 WHERE id = $4"#, status, exit_code, @@ -360,60 +353,6 @@ impl ExecutionProcess { Ok(()) } - /// Append to stdout for this execution process (for streaming updates) - pub async fn append_stdout( - pool: &SqlitePool, - id: Uuid, - stdout_append: &str, - ) -> Result<(), sqlx::Error> { - sqlx::query!( - "UPDATE execution_processes SET stdout = COALESCE(stdout, '') || $1, updated_at = datetime('now') WHERE id = $2", - stdout_append, - id - ) - .execute(pool) - .await?; - - Ok(()) - } - - /// Append to stderr for this execution process (for streaming updates) - pub async fn append_stderr( - pool: &SqlitePool, - id: Uuid, - stderr_append: &str, - ) -> Result<(), sqlx::Error> { - sqlx::query!( - "UPDATE execution_processes SET stderr = COALESCE(stderr, '') || $1, updated_at = datetime('now') WHERE id = $2", - stderr_append, - id - ) - .execute(pool) - .await?; - - Ok(()) - } - - /// Append to both stdout and stderr for this execution process - pub async fn append_output( - pool: &SqlitePool, - id: Uuid, - stdout_append: Option<&str>, - stderr_append: Option<&str>, - ) -> Result<(), sqlx::Error> { - if let Some(stdout_data) = stdout_append { - Self::append_stdout(pool, id, stdout_data).await?; - } - - if let Some(stderr_data) = stderr_append { - Self::append_stderr(pool, id, stderr_data).await?; - } - - Ok(()) - } - - /// Delete execution processes for a task attempt (cleanup) - #[allow(dead_code)] pub async fn delete_by_task_attempt_id( pool: &SqlitePool, task_attempt_id: Uuid, @@ -424,7 +363,47 @@ impl ExecutionProcess { ) .execute(pool) .await?; - Ok(()) } + + pub fn executor_action(&self) -> Result<&ExecutorAction, anyhow::Error> { + match &self.executor_action.0 { + ExecutorActionField::ExecutorAction(action) => Ok(action), + ExecutorActionField::Other(_) => Err(anyhow::anyhow!( + "Executor action is not a valid ExecutorAction JSON object" + )), + } + } + + /// Get the parent TaskAttempt for this execution process + pub async fn parent_task_attempt( + &self, + pool: &SqlitePool, + ) -> Result, sqlx::Error> { + TaskAttempt::find_by_id(pool, self.task_attempt_id).await + } + + /// Load execution context with related task attempt and task + pub async fn load_context( + pool: &SqlitePool, + exec_id: Uuid, + ) -> Result { + let execution_process = Self::find_by_id(pool, exec_id) + .await? + .ok_or(sqlx::Error::RowNotFound)?; + + let task_attempt = TaskAttempt::find_by_id(pool, execution_process.task_attempt_id) + .await? + .ok_or(sqlx::Error::RowNotFound)?; + + let task = Task::find_by_id(pool, task_attempt.task_id) + .await? + .ok_or(sqlx::Error::RowNotFound)?; + + Ok(ExecutionContext { + execution_process, + task_attempt, + task, + }) + } } diff --git a/crates/db/src/models/execution_process_logs.rs b/crates/db/src/models/execution_process_logs.rs new file mode 100644 index 00000000..b30c7bb6 --- /dev/null +++ b/crates/db/src/models/execution_process_logs.rs @@ -0,0 +1,119 @@ +use chrono::{DateTime, Utc}; +use serde::{Deserialize, Serialize}; +use sqlx::{FromRow, SqlitePool}; +use ts_rs::TS; +use utils::log_msg::LogMsg; +use uuid::Uuid; + +#[derive(Debug, Clone, FromRow, Serialize, Deserialize, TS)] +pub struct ExecutionProcessLogs { + pub execution_id: Uuid, + pub logs: String, // JSONL format + pub byte_size: i64, + pub inserted_at: DateTime, +} + +#[derive(Debug, Deserialize, TS)] +pub struct CreateExecutionProcessLogs { + pub execution_id: Uuid, + pub logs: String, + pub byte_size: i64, +} + +impl ExecutionProcessLogs { + /// Find logs by execution process ID + pub async fn find_by_execution_id( + pool: &SqlitePool, + execution_id: Uuid, + ) -> Result, sqlx::Error> { + sqlx::query_as!( + ExecutionProcessLogs, + r#"SELECT + execution_id as "execution_id!: Uuid", + logs, + byte_size, + inserted_at as "inserted_at!: DateTime" + FROM execution_process_logs + WHERE execution_id = $1"#, + execution_id + ) + .fetch_optional(pool) + .await + } + + /// Create or update execution process logs + pub async fn upsert( + pool: &SqlitePool, + data: &CreateExecutionProcessLogs, + ) -> Result { + let now = Utc::now(); + + sqlx::query_as!( + ExecutionProcessLogs, + r#"INSERT INTO execution_process_logs (execution_id, logs, byte_size, inserted_at) + VALUES ($1, $2, $3, $4) + ON CONFLICT (execution_id) DO UPDATE + SET logs = EXCLUDED.logs, + byte_size = EXCLUDED.byte_size, + inserted_at = EXCLUDED.inserted_at + RETURNING + execution_id as "execution_id!: Uuid", + logs, + byte_size, + inserted_at as "inserted_at!: DateTime""#, + data.execution_id, + data.logs, + data.byte_size, + now + ) + .fetch_one(pool) + .await + } + + /// Parse JSONL logs back into Vec + pub fn parse_logs(&self) -> Result, serde_json::Error> { + let mut messages = Vec::new(); + for line in self.logs.lines() { + if !line.trim().is_empty() { + let msg: LogMsg = serde_json::from_str(line)?; + messages.push(msg); + } + } + Ok(messages) + } + + /// Convert Vec to JSONL format + pub fn serialize_logs(messages: &[LogMsg]) -> Result { + let mut jsonl = String::new(); + for msg in messages { + let line = serde_json::to_string(msg)?; + jsonl.push_str(&line); + jsonl.push('\n'); + } + Ok(jsonl) + } + + /// Append a JSONL line to the logs for an execution process + pub async fn append_log_line( + pool: &SqlitePool, + execution_id: Uuid, + jsonl_line: &str, + ) -> Result<(), sqlx::Error> { + let byte_size = jsonl_line.len() as i64; + sqlx::query!( + r#"INSERT INTO execution_process_logs (execution_id, logs, byte_size, inserted_at) + VALUES ($1, $2, $3, datetime('now', 'subsec')) + ON CONFLICT (execution_id) DO UPDATE + SET logs = logs || $2, + byte_size = byte_size + $3, + inserted_at = datetime('now', 'subsec')"#, + execution_id, + jsonl_line, + byte_size + ) + .execute(pool) + .await?; + + Ok(()) + } +} diff --git a/backend/src/models/executor_session.rs b/crates/db/src/models/executor_session.rs similarity index 92% rename from backend/src/models/executor_session.rs rename to crates/db/src/models/executor_session.rs index 4206881b..4dcefc17 100644 --- a/backend/src/models/executor_session.rs +++ b/crates/db/src/models/executor_session.rs @@ -5,7 +5,6 @@ use ts_rs::TS; use uuid::Uuid; #[derive(Debug, Clone, FromRow, Serialize, Deserialize, TS)] -#[ts(export)] pub struct ExecutorSession { pub id: Uuid, pub task_attempt_id: Uuid, @@ -18,7 +17,6 @@ pub struct ExecutorSession { } #[derive(Debug, Deserialize, TS)] -#[ts(export)] pub struct CreateExecutorSession { pub task_attempt_id: Uuid, pub execution_process_id: Uuid, @@ -26,7 +24,6 @@ pub struct CreateExecutorSession { } #[derive(Debug, Deserialize, TS)] -#[ts(export)] #[allow(dead_code)] pub struct UpdateExecutorSession { pub session_id: Option, @@ -117,7 +114,9 @@ impl ExecutorSession { tracing::debug!( "Creating executor session: id={}, task_attempt_id={}, execution_process_id={}, external_session_id=None (will be set later)", - session_id, data.task_attempt_id, data.execution_process_id + session_id, + data.task_attempt_id, + data.execution_process_id ); sqlx::query_as!( @@ -155,11 +154,13 @@ impl ExecutorSession { execution_process_id: Uuid, external_session_id: &str, ) -> Result<(), sqlx::Error> { + let now = Utc::now(); sqlx::query!( r#"UPDATE executor_sessions - SET session_id = $1, updated_at = datetime('now') - WHERE execution_process_id = $2"#, + SET session_id = $1, updated_at = $2 + WHERE execution_process_id = $3"#, external_session_id, + now, execution_process_id ) .execute(pool) @@ -175,11 +176,13 @@ impl ExecutorSession { id: Uuid, prompt: &str, ) -> Result<(), sqlx::Error> { + let now = Utc::now(); sqlx::query!( r#"UPDATE executor_sessions - SET prompt = $1, updated_at = datetime('now') - WHERE id = $2"#, + SET prompt = $1, updated_at = $2 + WHERE id = $3"#, prompt, + now, id ) .execute(pool) @@ -194,11 +197,13 @@ impl ExecutorSession { execution_process_id: Uuid, summary: &str, ) -> Result<(), sqlx::Error> { + let now = Utc::now(); sqlx::query!( r#"UPDATE executor_sessions - SET summary = $1, updated_at = datetime('now') - WHERE execution_process_id = $2"#, + SET summary = $1, updated_at = $2 + WHERE execution_process_id = $3"#, summary, + now, execution_process_id ) .execute(pool) @@ -208,7 +213,6 @@ impl ExecutorSession { } /// Delete executor sessions for a task attempt (cleanup) - #[allow(dead_code)] pub async fn delete_by_task_attempt_id( pool: &SqlitePool, task_attempt_id: Uuid, diff --git a/backend/src/models/mod.rs b/crates/db/src/models/mod.rs similarity index 53% rename from backend/src/models/mod.rs rename to crates/db/src/models/mod.rs index e1907e0c..e740b690 100644 --- a/backend/src/models/mod.rs +++ b/crates/db/src/models/mod.rs @@ -1,12 +1,7 @@ -pub mod api_response; -pub mod config; pub mod execution_process; +pub mod execution_process_logs; pub mod executor_session; pub mod project; pub mod task; pub mod task_attempt; - pub mod task_template; - -pub use api_response::ApiResponse; -pub use config::{Config, Environment}; diff --git a/crates/db/src/models/project.rs b/crates/db/src/models/project.rs new file mode 100644 index 00000000..35d4db09 --- /dev/null +++ b/crates/db/src/models/project.rs @@ -0,0 +1,215 @@ +use std::path::PathBuf; + +use chrono::{DateTime, Utc}; +use serde::{Deserialize, Serialize}; +use sqlx::{FromRow, SqlitePool}; +use thiserror::Error; +use ts_rs::TS; +use uuid::Uuid; + +#[derive(Debug, Error)] +pub enum ProjectError { + #[error(transparent)] + Database(#[from] sqlx::Error), + #[error("Project not found")] + ProjectNotFound, + #[error("Project with git repository path already exists")] + GitRepoPathExists, + #[error("Failed to check existing git repository path: {0}")] + GitRepoCheckFailed(String), + #[error("Failed to create project: {0}")] + CreateFailed(String), +} + +#[derive(Debug, Clone, FromRow, Serialize, Deserialize, TS)] +pub struct Project { + pub id: Uuid, + pub name: String, + pub git_repo_path: PathBuf, + pub setup_script: Option, + pub dev_script: Option, + pub cleanup_script: Option, + + #[ts(type = "Date")] + pub created_at: DateTime, + #[ts(type = "Date")] + pub updated_at: DateTime, +} + +#[derive(Debug, Deserialize, TS)] +pub struct CreateProject { + pub name: String, + pub git_repo_path: String, + pub use_existing_repo: bool, + pub setup_script: Option, + pub dev_script: Option, + pub cleanup_script: Option, +} + +#[derive(Debug, Deserialize, TS)] +pub struct UpdateProject { + pub name: Option, + pub git_repo_path: Option, + pub setup_script: Option, + pub dev_script: Option, + pub cleanup_script: Option, +} + +#[derive(Debug, Serialize, TS)] +pub struct ProjectWithBranch { + pub id: Uuid, + pub name: String, + pub git_repo_path: PathBuf, + pub setup_script: Option, + pub dev_script: Option, + pub cleanup_script: Option, + pub current_branch: Option, + + #[ts(type = "Date")] + pub created_at: DateTime, + #[ts(type = "Date")] + pub updated_at: DateTime, +} + +impl ProjectWithBranch { + pub fn from_project(project: Project, current_branch: Option) -> Self { + Self { + id: project.id, + name: project.name, + git_repo_path: project.git_repo_path, + setup_script: project.setup_script, + dev_script: project.dev_script, + cleanup_script: project.cleanup_script, + current_branch, + created_at: project.created_at, + updated_at: project.updated_at, + } + } +} + +#[derive(Debug, Serialize, TS)] +pub struct SearchResult { + pub path: String, + pub is_file: bool, + pub match_type: SearchMatchType, +} + +#[derive(Debug, Serialize, TS)] +pub enum SearchMatchType { + FileName, + DirectoryName, + FullPath, +} + +impl Project { + pub async fn find_all(pool: &SqlitePool) -> Result, sqlx::Error> { + sqlx::query_as!( + Project, + r#"SELECT id as "id!: Uuid", name, git_repo_path, setup_script, dev_script, cleanup_script, created_at as "created_at!: DateTime", updated_at as "updated_at!: DateTime" FROM projects ORDER BY created_at DESC"# + ) + .fetch_all(pool) + .await + } + + pub async fn find_by_id(pool: &SqlitePool, id: Uuid) -> Result, sqlx::Error> { + sqlx::query_as!( + Project, + r#"SELECT id as "id!: Uuid", name, git_repo_path, setup_script, dev_script, cleanup_script, created_at as "created_at!: DateTime", updated_at as "updated_at!: DateTime" FROM projects WHERE id = $1"#, + id + ) + .fetch_optional(pool) + .await + } + + pub async fn find_by_git_repo_path( + pool: &SqlitePool, + git_repo_path: &str, + ) -> Result, sqlx::Error> { + sqlx::query_as!( + Project, + r#"SELECT id as "id!: Uuid", name, git_repo_path, setup_script, dev_script, cleanup_script, created_at as "created_at!: DateTime", updated_at as "updated_at!: DateTime" FROM projects WHERE git_repo_path = $1"#, + git_repo_path + ) + .fetch_optional(pool) + .await + } + + pub async fn find_by_git_repo_path_excluding_id( + pool: &SqlitePool, + git_repo_path: &str, + exclude_id: Uuid, + ) -> Result, sqlx::Error> { + sqlx::query_as!( + Project, + r#"SELECT id as "id!: Uuid", name, git_repo_path, setup_script, dev_script, cleanup_script, created_at as "created_at!: DateTime", updated_at as "updated_at!: DateTime" FROM projects WHERE git_repo_path = $1 AND id != $2"#, + git_repo_path, + exclude_id + ) + .fetch_optional(pool) + .await + } + + pub async fn create( + pool: &SqlitePool, + data: &CreateProject, + project_id: Uuid, + ) -> Result { + sqlx::query_as!( + Project, + r#"INSERT INTO projects (id, name, git_repo_path, setup_script, dev_script, cleanup_script) VALUES ($1, $2, $3, $4, $5, $6) RETURNING id as "id!: Uuid", name, git_repo_path, setup_script, dev_script, cleanup_script, created_at as "created_at!: DateTime", updated_at as "updated_at!: DateTime""#, + project_id, + data.name, + data.git_repo_path, + data.setup_script, + data.dev_script, + data.cleanup_script + ) + .fetch_one(pool) + .await + } + + pub async fn update( + pool: &SqlitePool, + id: Uuid, + name: String, + git_repo_path: String, + setup_script: Option, + dev_script: Option, + cleanup_script: Option, + ) -> Result { + sqlx::query_as!( + Project, + r#"UPDATE projects SET name = $2, git_repo_path = $3, setup_script = $4, dev_script = $5, cleanup_script = $6 WHERE id = $1 RETURNING id as "id!: Uuid", name, git_repo_path, setup_script, dev_script, cleanup_script, created_at as "created_at!: DateTime", updated_at as "updated_at!: DateTime""#, + id, + name, + git_repo_path, + setup_script, + dev_script, + cleanup_script + ) + .fetch_one(pool) + .await + } + + pub async fn delete(pool: &SqlitePool, id: Uuid) -> Result { + let result = sqlx::query!("DELETE FROM projects WHERE id = $1", id) + .execute(pool) + .await?; + Ok(result.rows_affected()) + } + + pub async fn exists(pool: &SqlitePool, id: Uuid) -> Result { + let result = sqlx::query!( + r#" + SELECT COUNT(*) as "count!: i64" + FROM projects + WHERE id = $1 + "#, + id + ) + .fetch_one(pool) + .await?; + + Ok(result.count > 0) + } +} diff --git a/backend/src/models/task.rs b/crates/db/src/models/task.rs similarity index 83% rename from backend/src/models/task.rs rename to crates/db/src/models/task.rs index faa5e4a0..8f3bf169 100644 --- a/backend/src/models/task.rs +++ b/crates/db/src/models/task.rs @@ -4,10 +4,11 @@ use sqlx::{FromRow, SqlitePool, Type}; use ts_rs::TS; use uuid::Uuid; +use super::project::Project; + #[derive(Debug, Clone, Type, Serialize, Deserialize, PartialEq, TS)] #[sqlx(type_name = "task_status", rename_all = "lowercase")] #[serde(rename_all = "lowercase")] -#[ts(export)] pub enum TaskStatus { Todo, InProgress, @@ -17,7 +18,6 @@ pub enum TaskStatus { } #[derive(Debug, Clone, FromRow, Serialize, Deserialize, TS)] -#[ts(export)] pub struct Task { pub id: Uuid, pub project_id: Uuid, // Foreign key to Project @@ -30,7 +30,6 @@ pub struct Task { } #[derive(Debug, Clone, Serialize, Deserialize, TS)] -#[ts(export)] pub struct TaskWithAttemptStatus { pub id: Uuid, pub project_id: Uuid, @@ -43,11 +42,10 @@ pub struct TaskWithAttemptStatus { pub has_in_progress_attempt: bool, pub has_merged_attempt: bool, pub last_attempt_failed: bool, - pub latest_attempt_executor: Option, + pub base_coding_agent: String, } #[derive(Debug, Deserialize, TS)] -#[ts(export)] pub struct CreateTask { pub project_id: Uuid, pub title: String, @@ -56,17 +54,6 @@ pub struct CreateTask { } #[derive(Debug, Deserialize, TS)] -#[ts(export)] -pub struct CreateTaskAndStart { - pub project_id: Uuid, - pub title: String, - pub description: Option, - pub parent_task_attempt: Option, - pub executor: Option, -} - -#[derive(Debug, Deserialize, TS)] -#[ts(export)] pub struct UpdateTask { pub title: Option, pub description: Option, @@ -75,6 +62,18 @@ pub struct UpdateTask { } impl Task { + pub fn to_prompt(&self) -> String { + if let Some(description) = &self.description { + format!("Title: {}\n\nDescription:{}", &self.title, description) + } else { + self.title.clone() + } + } + + pub async fn parent_project(&self, pool: &SqlitePool) -> Result, sqlx::Error> { + Project::find_by_id(pool, self.project_id).await + } + pub async fn find_by_project_id_with_attempt_status( pool: &SqlitePool, project_id: Uuid, @@ -97,7 +96,7 @@ impl Task { ON ep.task_attempt_id = ta.id WHERE ta.task_id = t.id AND ep.status = 'running' - AND ep.process_type IN ('setupscript','cleanupscript','codingagent') + AND ep.run_reason IN ('setupscript','cleanupscript','codingagent') LIMIT 1 ) THEN 1 ELSE 0 END AS "has_in_progress_attempt!: i64", @@ -115,18 +114,18 @@ impl Task { JOIN execution_processes ep ON ep.task_attempt_id = ta.id WHERE ta.task_id = t.id - AND ep.process_type IN ('setupscript','cleanupscript','codingagent') + AND ep.run_reason IN ('setupscript','cleanupscript','codingagent') ORDER BY ep.created_at DESC LIMIT 1 ) IN ('failed','killed') THEN 1 ELSE 0 END AS "last_attempt_failed!: i64", - ( SELECT ta.executor + ( SELECT ta.base_coding_agent FROM task_attempts ta - WHERE ta.task_id = t.id + WHERE ta.task_id = t.id ORDER BY ta.created_at DESC - LIMIT 1 - ) AS "latest_attempt_executor" + LIMIT 1 + ) AS "base_coding_agent!: String" FROM tasks t WHERE t.project_id = $1 @@ -150,7 +149,7 @@ ORDER BY t.created_at DESC"#, has_in_progress_attempt: rec.has_in_progress_attempt != 0, has_merged_attempt: rec.has_merged_attempt != 0, last_attempt_failed: rec.last_attempt_failed != 0, - latest_attempt_executor: rec.latest_attempt_executor, + base_coding_agent: rec.base_coding_agent, }) .collect(); @@ -169,6 +168,18 @@ ORDER BY t.created_at DESC"#, .await } + pub async fn find_by_rowid(pool: &SqlitePool, rowid: i64) -> Result, sqlx::Error> { + sqlx::query_as!( + Task, + r#"SELECT id as "id!: Uuid", project_id as "project_id!: Uuid", title, description, status as "status!: TaskStatus", parent_task_attempt as "parent_task_attempt: Uuid", created_at as "created_at!: DateTime", updated_at as "updated_at!: DateTime" + FROM tasks + WHERE rowid = $1"#, + rowid + ) + .fetch_optional(pool) + .await + } + pub async fn find_by_id_and_project_id( pool: &SqlitePool, id: Uuid, @@ -216,7 +227,6 @@ ORDER BY t.created_at DESC"#, status: TaskStatus, parent_task_attempt: Option, ) -> Result { - let status_value = status as TaskStatus; sqlx::query_as!( Task, r#"UPDATE tasks @@ -227,7 +237,7 @@ ORDER BY t.created_at DESC"#, project_id, title, description, - status_value, + status, parent_task_attempt ) .fetch_one(pool) @@ -237,29 +247,22 @@ ORDER BY t.created_at DESC"#, pub async fn update_status( pool: &SqlitePool, id: Uuid, - project_id: Uuid, status: TaskStatus, ) -> Result<(), sqlx::Error> { - let status_value = status as TaskStatus; sqlx::query!( - "UPDATE tasks SET status = $3, updated_at = CURRENT_TIMESTAMP WHERE id = $1 AND project_id = $2", + "UPDATE tasks SET status = $2, updated_at = CURRENT_TIMESTAMP WHERE id = $1", id, - project_id, - status_value + status ) .execute(pool) .await?; Ok(()) } - pub async fn delete(pool: &SqlitePool, id: Uuid, project_id: Uuid) -> Result { - let result = sqlx::query!( - "DELETE FROM tasks WHERE id = $1 AND project_id = $2", - id, - project_id - ) - .execute(pool) - .await?; + pub async fn delete(pool: &SqlitePool, id: Uuid) -> Result { + let result = sqlx::query!("DELETE FROM tasks WHERE id = $1", id) + .execute(pool) + .await?; Ok(result.rows_affected()) } @@ -281,7 +284,6 @@ ORDER BY t.created_at DESC"#, pub async fn find_related_tasks_by_attempt_id( pool: &SqlitePool, attempt_id: Uuid, - project_id: Uuid, ) -> Result, sqlx::Error> { // Find both children and parent for this attempt sqlx::query_as!( @@ -290,22 +292,19 @@ ORDER BY t.created_at DESC"#, FROM tasks t WHERE ( -- Find children: tasks that have this attempt as parent - t.parent_task_attempt = $1 AND t.project_id = $2 + t.parent_task_attempt = $1 ) OR ( -- Find parent: task that owns the parent attempt of current task EXISTS ( SELECT 1 FROM tasks current_task JOIN task_attempts parent_attempt ON current_task.parent_task_attempt = parent_attempt.id WHERE parent_attempt.task_id = t.id - AND parent_attempt.id = $1 - AND current_task.project_id = $2 ) ) -- Exclude the current task itself to prevent circular references AND t.id != (SELECT task_id FROM task_attempts WHERE id = $1) ORDER BY t.created_at DESC"#, attempt_id, - project_id ) .fetch_all(pool) .await diff --git a/crates/db/src/models/task_attempt.rs b/crates/db/src/models/task_attempt.rs new file mode 100644 index 00000000..9354f6d9 --- /dev/null +++ b/crates/db/src/models/task_attempt.rs @@ -0,0 +1,578 @@ +use chrono::{DateTime, Utc}; +use serde::{Deserialize, Serialize}; +use sqlx::{FromRow, SqlitePool, Type}; +use thiserror::Error; +use ts_rs::TS; +use uuid::Uuid; + +use super::{project::Project, task::Task}; + +#[derive(Debug)] +pub struct PrInfo { + pub attempt_id: Uuid, + pub task_id: Uuid, + pub pr_number: i64, + pub repo_owner: String, + pub repo_name: String, +} + +impl PrInfo { + pub fn from_task_attempt_data( + attempt_id: Uuid, + task_id: Uuid, + pr_number: i64, + pr_url: &str, + ) -> Result { + let re = regex::Regex::new(r"github\.com/(?P[^/]+)/(?P[^/]+)").unwrap(); + let caps = re + .captures(pr_url) + .ok_or_else(|| sqlx::Error::ColumnNotFound("Invalid URL format".into()))?; + + let owner = caps.name("owner").unwrap().as_str().to_string(); + let repo_name = caps.name("repo").unwrap().as_str().to_string(); + + Ok(Self { + attempt_id, + task_id, + pr_number, + repo_owner: owner, + repo_name, + }) + } +} + +#[derive(Debug, Error)] +pub enum TaskAttemptError { + #[error(transparent)] + Database(#[from] sqlx::Error), + #[error("Task not found")] + TaskNotFound, + #[error("Project not found")] + ProjectNotFound, + #[error("Validation error: {0}")] + ValidationError(String), + #[error("Branch not found: {0}")] + BranchNotFound(String), +} + +#[derive(Debug, Clone, Type, Serialize, Deserialize, PartialEq, TS)] +#[sqlx(type_name = "task_attempt_status", rename_all = "lowercase")] +#[serde(rename_all = "lowercase")] +pub enum TaskAttemptStatus { + SetupRunning, + SetupComplete, + SetupFailed, + ExecutorRunning, + ExecutorComplete, + ExecutorFailed, +} + +#[derive(Debug, Clone, FromRow, Serialize, Deserialize, TS)] +pub struct TaskAttempt { + pub id: Uuid, + pub task_id: Uuid, // Foreign key to Task + pub container_ref: Option, // Path to a worktree (local), or cloud container id + pub branch: Option, // Git branch name for this task attempt + pub base_branch: String, // Base branch this attempt is based on + pub merge_commit: Option, + pub base_coding_agent: String, // Name of the base coding agent to use ("AMP", "CLAUDE_CODE", + // "GEMINI", etc.) + pub pr_url: Option, // GitHub PR URL + pub pr_number: Option, // GitHub PR number + pub pr_status: Option, // open, closed, merged + pub pr_merged_at: Option>, // When PR was merged + pub worktree_deleted: bool, // Flag indicating if worktree has been cleaned up + pub setup_completed_at: Option>, // When setup script was last completed + pub created_at: DateTime, + pub updated_at: DateTime, +} + +/// GitHub PR creation parameters +pub struct CreatePrParams<'a> { + pub attempt_id: Uuid, + pub task_id: Uuid, + pub project_id: Uuid, + pub github_token: &'a str, + pub title: &'a str, + pub body: Option<&'a str>, + pub base_branch: Option<&'a str>, +} + +#[derive(Debug, Deserialize, TS)] +pub struct CreateFollowUpAttempt { + pub prompt: String, +} + +/// Context data for resume operations (simplified) +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct AttemptResumeContext { + pub execution_history: String, + pub cumulative_diffs: String, +} + +#[derive(Debug)] +pub struct TaskAttemptContext { + pub task_attempt: TaskAttempt, + pub task: Task, + pub project: Project, +} + +#[derive(Debug, Deserialize, TS)] +pub struct CreateTaskAttempt { + pub base_coding_agent: String, + pub base_branch: String, +} + +impl TaskAttempt { + pub async fn parent_task(&self, pool: &SqlitePool) -> Result, sqlx::Error> { + Task::find_by_id(pool, self.task_id).await + } + + /// Fetch all task attempts, optionally filtered by task_id. Newest first. + pub async fn fetch_all( + pool: &SqlitePool, + task_id: Option, + ) -> Result, TaskAttemptError> { + let attempts = match task_id { + Some(tid) => sqlx::query_as!( + TaskAttempt, + r#"SELECT id AS "id!: Uuid", + task_id AS "task_id!: Uuid", + container_ref, + branch, + base_branch, + merge_commit, + base_coding_agent AS "base_coding_agent!", + pr_url, + pr_number, + pr_status, + pr_merged_at AS "pr_merged_at: DateTime", + worktree_deleted AS "worktree_deleted!: bool", + setup_completed_at AS "setup_completed_at: DateTime", + created_at AS "created_at!: DateTime", + updated_at AS "updated_at!: DateTime" + FROM task_attempts + WHERE task_id = $1 + ORDER BY created_at DESC"#, + tid + ) + .fetch_all(pool) + .await + .map_err(TaskAttemptError::Database)?, + None => sqlx::query_as!( + TaskAttempt, + r#"SELECT id AS "id!: Uuid", + task_id AS "task_id!: Uuid", + container_ref, + branch, + base_branch, + merge_commit, + base_coding_agent AS "base_coding_agent!", + pr_url, + pr_number, + pr_status, + pr_merged_at AS "pr_merged_at: DateTime", + worktree_deleted AS "worktree_deleted!: bool", + setup_completed_at AS "setup_completed_at: DateTime", + created_at AS "created_at!: DateTime", + updated_at AS "updated_at!: DateTime" + FROM task_attempts + ORDER BY created_at DESC"# + ) + .fetch_all(pool) + .await + .map_err(TaskAttemptError::Database)?, + }; + + Ok(attempts) + } + + /// Load task attempt with full validation - ensures task_attempt belongs to task and task belongs to project + pub async fn load_context( + pool: &SqlitePool, + attempt_id: Uuid, + task_id: Uuid, + project_id: Uuid, + ) -> Result { + // Single query with JOIN validation to ensure proper relationships + let task_attempt = sqlx::query_as!( + TaskAttempt, + r#"SELECT ta.id AS "id!: Uuid", + ta.task_id AS "task_id!: Uuid", + ta.container_ref, + ta.branch, + ta.base_branch, + ta.merge_commit, + ta.base_coding_agent AS "base_coding_agent!", + ta.pr_url, + ta.pr_number, + ta.pr_status, + ta.pr_merged_at AS "pr_merged_at: DateTime", + ta.worktree_deleted AS "worktree_deleted!: bool", + ta.setup_completed_at AS "setup_completed_at: DateTime", + ta.created_at AS "created_at!: DateTime", + ta.updated_at AS "updated_at!: DateTime" + FROM task_attempts ta + JOIN tasks t ON ta.task_id = t.id + JOIN projects p ON t.project_id = p.id + WHERE ta.id = $1 AND t.id = $2 AND p.id = $3"#, + attempt_id, + task_id, + project_id + ) + .fetch_optional(pool) + .await? + .ok_or(TaskAttemptError::TaskNotFound)?; + + // Load task and project (we know they exist due to JOIN validation) + let task = Task::find_by_id(pool, task_id) + .await? + .ok_or(TaskAttemptError::TaskNotFound)?; + + let project = Project::find_by_id(pool, project_id) + .await? + .ok_or(TaskAttemptError::ProjectNotFound)?; + + Ok(TaskAttemptContext { + task_attempt, + task, + project, + }) + } + + /// Update container reference + pub async fn update_container_ref( + pool: &SqlitePool, + attempt_id: Uuid, + container_ref: &str, + ) -> Result<(), sqlx::Error> { + let now = Utc::now(); + sqlx::query!( + "UPDATE task_attempts SET container_ref = $1, updated_at = $2 WHERE id = $3", + container_ref, + now, + attempt_id + ) + .execute(pool) + .await?; + Ok(()) + } + + pub async fn update_branch( + pool: &SqlitePool, + attempt_id: Uuid, + branch: &str, + ) -> Result<(), sqlx::Error> { + let now = Utc::now(); + sqlx::query!( + "UPDATE task_attempts SET branch = $1, updated_at = $2 WHERE id = $3", + branch, + now, + attempt_id + ) + .execute(pool) + .await?; + Ok(()) + } + + /// Helper function to mark a worktree as deleted in the database + pub async fn mark_worktree_deleted( + pool: &SqlitePool, + attempt_id: Uuid, + ) -> Result<(), sqlx::Error> { + sqlx::query!( + "UPDATE task_attempts SET worktree_deleted = TRUE, updated_at = datetime('now') WHERE id = ?", + attempt_id + ) + .execute(pool) + .await?; + Ok(()) + } + + pub async fn find_by_id(pool: &SqlitePool, id: Uuid) -> Result, sqlx::Error> { + sqlx::query_as!( + TaskAttempt, + r#"SELECT id AS "id!: Uuid", + task_id AS "task_id!: Uuid", + container_ref, + branch, + merge_commit, + base_branch, + base_coding_agent AS "base_coding_agent!", + pr_url, + pr_number, + pr_status, + pr_merged_at AS "pr_merged_at: DateTime", + worktree_deleted AS "worktree_deleted!: bool", + setup_completed_at AS "setup_completed_at: DateTime", + created_at AS "created_at!: DateTime", + updated_at AS "updated_at!: DateTime" + FROM task_attempts + WHERE id = $1"#, + id + ) + .fetch_optional(pool) + .await + } + + pub async fn find_by_rowid(pool: &SqlitePool, rowid: i64) -> Result, sqlx::Error> { + sqlx::query_as!( + TaskAttempt, + r#"SELECT id AS "id!: Uuid", + task_id AS "task_id!: Uuid", + container_ref, + branch, + merge_commit, + base_branch, + base_coding_agent AS "base_coding_agent!", + pr_url, + pr_number, + pr_status, + pr_merged_at AS "pr_merged_at: DateTime", + worktree_deleted AS "worktree_deleted!: bool", + setup_completed_at AS "setup_completed_at: DateTime", + created_at AS "created_at!: DateTime", + updated_at AS "updated_at!: DateTime" + FROM task_attempts + WHERE rowid = $1"#, + rowid + ) + .fetch_optional(pool) + .await + } + + // pub async fn find_by_task_id( + // pool: &SqlitePool, + // task_id: Uuid, + // ) -> Result, sqlx::Error> { + // sqlx::query_as!( + // TaskAttempt, + // r#"SELECT id AS "id!: Uuid", + // task_id AS "task_id!: Uuid", + // worktree_path, + // branch, + // base_branch, + // merge_commit, + // executor, + // pr_url, + // pr_number, + // pr_status, + // pr_merged_at AS "pr_merged_at: DateTime", + // worktree_deleted AS "worktree_deleted!: bool", + // setup_completed_at AS "setup_completed_at: DateTime", + // created_at AS "created_at!: DateTime", + // updated_at AS "updated_at!: DateTime" + // FROM task_attempts + // WHERE task_id = $1 + // ORDER BY created_at DESC"#, + // task_id + // ) + // .fetch_all(pool) + // .await + // } + + /// Find task attempts by task_id with project git repo path for cleanup operations + pub async fn find_by_task_id_with_project( + pool: &SqlitePool, + task_id: Uuid, + ) -> Result, String)>, sqlx::Error> { + let records = sqlx::query!( + r#" + SELECT ta.id as "attempt_id!: Uuid", ta.container_ref, p.git_repo_path as "git_repo_path!" + FROM task_attempts ta + JOIN tasks t ON ta.task_id = t.id + JOIN projects p ON t.project_id = p.id + WHERE ta.task_id = $1 + "#, + task_id + ) + .fetch_all(pool) + .await?; + + Ok(records + .into_iter() + .map(|r| (r.attempt_id, r.container_ref, r.git_repo_path)) + .collect()) + } + + pub async fn find_by_worktree_deleted( + pool: &SqlitePool, + ) -> Result, sqlx::Error> { + let records = sqlx::query!( + r#"SELECT id as "id!: Uuid", container_ref FROM task_attempts WHERE worktree_deleted = FALSE"#, + ) + .fetch_all(pool).await?; + Ok(records + .into_iter() + .filter_map(|r| r.container_ref.map(|path| (r.id, path))) + .collect()) + } + + pub async fn container_ref_exists( + pool: &SqlitePool, + container_ref: &str, + ) -> Result { + let result = sqlx::query!( + r#"SELECT EXISTS(SELECT 1 FROM task_attempts WHERE container_ref = ?) as "exists!: bool""#, + container_ref + ) + .fetch_one(pool) + .await?; + + Ok(result.exists) + } + + /// Find task attempts that are expired (24+ hours since last activity) and eligible for worktree cleanup + /// Activity includes: execution completion, task attempt updates (including worktree recreation), + /// and any attempts that are currently in progress + pub async fn find_expired_for_cleanup( + pool: &SqlitePool, + ) -> Result, sqlx::Error> { + let records = sqlx::query!( + r#" + SELECT ta.id as "attempt_id!: Uuid", ta.container_ref, p.git_repo_path as "git_repo_path!" + FROM task_attempts ta + LEFT JOIN execution_processes ep ON ta.id = ep.task_attempt_id AND ep.completed_at IS NOT NULL + JOIN tasks t ON ta.task_id = t.id + JOIN projects p ON t.project_id = p.id + WHERE ta.worktree_deleted = FALSE + -- Exclude attempts with any running processes (in progress) + AND ta.id NOT IN ( + SELECT DISTINCT ep2.task_attempt_id + FROM execution_processes ep2 + WHERE ep2.completed_at IS NULL + ) + GROUP BY ta.id, ta.container_ref, p.git_repo_path, ta.updated_at + HAVING datetime('now', '-24 hours') > datetime( + MAX( + CASE + WHEN ep.completed_at IS NOT NULL THEN ep.completed_at + ELSE ta.updated_at + END + ) + ) + ORDER BY MAX( + CASE + WHEN ep.completed_at IS NOT NULL THEN ep.completed_at + ELSE ta.updated_at + END + ) ASC + "# + ) + .fetch_all(pool) + .await?; + + Ok(records + .into_iter() + .filter_map(|r| { + r.container_ref + .map(|path| (r.attempt_id, path, r.git_repo_path)) + }) + .collect()) + } + + pub async fn create( + pool: &SqlitePool, + data: &CreateTaskAttempt, + task_id: Uuid, + ) -> Result { + let attempt_id = Uuid::new_v4(); + // let prefixed_id = format!("vibe-kanban-{}", attempt_id); + // Insert the record into the database + Ok(sqlx::query_as!( + TaskAttempt, + r#"INSERT INTO task_attempts (id, task_id, container_ref, branch, base_branch, merge_commit, base_coding_agent, pr_url, pr_number, pr_status, pr_merged_at, worktree_deleted, setup_completed_at) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13) + RETURNING id as "id!: Uuid", task_id as "task_id!: Uuid", container_ref, branch, base_branch, merge_commit, base_coding_agent as "base_coding_agent!", pr_url, pr_number, pr_status, pr_merged_at as "pr_merged_at: DateTime", worktree_deleted as "worktree_deleted!: bool", setup_completed_at as "setup_completed_at: DateTime", created_at as "created_at!: DateTime", updated_at as "updated_at!: DateTime""#, + attempt_id, + task_id, + Option::::None, // Container isn't known yet + Option::::None, // branch name isn't known yet + data.base_branch, + Option::::None, // merge_commit is always None during creation + data.base_coding_agent, + Option::::None, // pr_url is None during creation + Option::::None, // pr_number is None during creation + Option::::None, // pr_status is None during creation + Option::>::None, // pr_merged_at is None during creation + false, // worktree_deleted is false during creation + Option::>::None // setup_completed_at is None during creation + ) + .fetch_one(pool) + .await?) + } + + /// Update the task attempt with the merge commit ID + pub async fn update_merge_commit( + pool: &SqlitePool, + attempt_id: Uuid, + merge_commit_id: &str, + ) -> Result<(), TaskAttemptError> { + sqlx::query!( + "UPDATE task_attempts SET merge_commit = $1, updated_at = datetime('now') WHERE id = $2", + merge_commit_id, + attempt_id + ) + .execute(pool) + .await?; + + Ok(()) + } + + pub async fn update_base_branch( + pool: &SqlitePool, + attempt_id: Uuid, + new_base_branch: &str, + ) -> Result<(), TaskAttemptError> { + sqlx::query!( + "UPDATE task_attempts SET base_branch = $1, updated_at = datetime('now') WHERE id = $2", + new_base_branch, + attempt_id, + ) + .execute(pool) + .await?; + + Ok(()) + } + + /// Update PR status for a task attempt + pub async fn update_pr_status( + pool: &SqlitePool, + attempt_id: Uuid, + pr_url: String, + pr_number: i64, + pr_status: String, + ) -> Result<(), sqlx::Error> { + sqlx::query!( + "UPDATE task_attempts SET pr_url = $1, pr_number = $2, pr_status = $3, updated_at = datetime('now') WHERE id = $4", + pr_url, + pr_number, + pr_status, + attempt_id + ) + .execute(pool) + .await?; + + Ok(()) + } + + pub async fn get_open_prs(pool: &SqlitePool) -> Result, sqlx::Error> { + let rows = sqlx::query!( + r#"SELECT + ta.id as "attempt_id!: Uuid", + ta.task_id as "task_id!: Uuid", + ta.pr_number as "pr_number!: i64", + ta.pr_url as "pr_url!: String" + FROM task_attempts ta + WHERE ta.pr_status = 'open' AND ta.pr_number IS NOT NULL"# + ) + .fetch_all(pool) + .await?; + Ok(rows + .into_iter() + .filter_map(|r| { + PrInfo::from_task_attempt_data(r.attempt_id, r.task_id, r.pr_number, &r.pr_url).ok() + }) + .collect()) + } +} diff --git a/backend/src/models/task_template.rs b/crates/db/src/models/task_template.rs similarity index 99% rename from backend/src/models/task_template.rs rename to crates/db/src/models/task_template.rs index 08c9b903..d0c6febe 100644 --- a/backend/src/models/task_template.rs +++ b/crates/db/src/models/task_template.rs @@ -5,7 +5,6 @@ use ts_rs::TS; use uuid::Uuid; #[derive(Debug, Clone, FromRow, Serialize, Deserialize, TS)] -#[ts(export)] pub struct TaskTemplate { pub id: Uuid, pub project_id: Option, // None for global templates @@ -17,7 +16,6 @@ pub struct TaskTemplate { } #[derive(Debug, Deserialize, TS)] -#[ts(export)] pub struct CreateTaskTemplate { pub project_id: Option, pub title: String, @@ -26,7 +24,6 @@ pub struct CreateTaskTemplate { } #[derive(Debug, Deserialize, TS)] -#[ts(export)] pub struct UpdateTaskTemplate { pub title: Option, pub description: Option, diff --git a/crates/deployment/Cargo.toml b/crates/deployment/Cargo.toml new file mode 100644 index 00000000..b8a1f98c --- /dev/null +++ b/crates/deployment/Cargo.toml @@ -0,0 +1,21 @@ +[package] +name = "deployment" +version = "0.0.56" +edition = "2024" + +[dependencies] +db = { path = "../db" } +utils = { path = "../utils" } +services = { path = "../services" } +executors = { path = "../executors" } +async-trait = "0.1" +thiserror = { workspace = true } +anyhow = { workspace = true } +tokio = { workspace = true } +sqlx = "0.8.6" +serde_json = { workspace = true } +tracing = { workspace = true } +git2 = "^0.18.1" +futures = "0.3.31" +axum = { workspace = true } + diff --git a/crates/deployment/src/lib.rs b/crates/deployment/src/lib.rs new file mode 100644 index 00000000..fdde9f02 --- /dev/null +++ b/crates/deployment/src/lib.rs @@ -0,0 +1,180 @@ +use std::{collections::HashMap, sync::Arc}; + +use anyhow::Error as AnyhowError; +use async_trait::async_trait; +use axum::response::sse::Event; +use db::{ + DBService, + models::{ + execution_process::{ExecutionProcess, ExecutionProcessRunReason, ExecutionProcessStatus}, + task::{Task, TaskStatus}, + task_attempt::{TaskAttempt, TaskAttemptError}, + }, +}; +use executors::executors::ExecutorError; +use futures::{StreamExt, TryStreamExt}; +use git2::Error as Git2Error; +use serde_json::Value; +use services::services::{ + analytics::AnalyticsService, + auth::{AuthError, AuthService}, + config::{Config, ConfigError}, + container::{ContainerError, ContainerService}, + events::{EventError, EventService}, + filesystem::{FilesystemError, FilesystemService}, + filesystem_watcher::FilesystemWatcherError, + git::{GitService, GitServiceError}, + pr_monitor::PrMonitorService, + sentry::SentryService, + worktree_manager::WorktreeError, +}; +use sqlx::{Error as SqlxError, types::Uuid}; +use thiserror::Error; +use tokio::sync::RwLock; +use utils::msg_store::MsgStore; + +#[derive(Debug, Error)] +pub enum DeploymentError { + #[error(transparent)] + Io(#[from] std::io::Error), + #[error(transparent)] + Sqlx(#[from] SqlxError), + #[error(transparent)] + Git2(#[from] Git2Error), + #[error(transparent)] + GitServiceError(#[from] GitServiceError), + #[error(transparent)] + FilesystemWatcherError(#[from] FilesystemWatcherError), + #[error(transparent)] + TaskAttempt(#[from] TaskAttemptError), + #[error(transparent)] + Container(#[from] ContainerError), + #[error(transparent)] + Executor(#[from] ExecutorError), + #[error(transparent)] + Auth(#[from] AuthError), + #[error(transparent)] + Filesystem(#[from] FilesystemError), + #[error(transparent)] + Worktree(#[from] WorktreeError), + #[error(transparent)] + Event(#[from] EventError), + #[error(transparent)] + Config(#[from] ConfigError), + #[error(transparent)] + Other(#[from] AnyhowError), +} + +#[async_trait] +pub trait Deployment: Clone + Send + Sync + 'static { + async fn new() -> Result; + + fn user_id(&self) -> &str; + + fn shared_types() -> Vec; + + fn config(&self) -> &Arc>; + + fn sentry(&self) -> &SentryService; + + fn db(&self) -> &DBService; + + fn analytics(&self) -> &Option; + + fn container(&self) -> &impl ContainerService; + + fn auth(&self) -> &AuthService; + + fn git(&self) -> &GitService; + + fn filesystem(&self) -> &FilesystemService; + + fn msg_stores(&self) -> &Arc>>>; + + fn events(&self) -> &EventService; + + async fn update_sentry_scope(&self) -> Result<(), DeploymentError> { + let user_id = self.user_id(); + let config = self.config().read().await; + let username = config.github.username.as_deref(); + let email = config.github.primary_email.as_deref(); + + self.sentry().update_scope(user_id, username, email).await; + + Ok(()) + } + + async fn spawn_pr_monitor_service(&self) -> tokio::task::JoinHandle<()> { + let db = self.db().clone(); + let config = self.config().clone(); + PrMonitorService::spawn(db, config).await + } + + async fn track_if_analytics_allowed(&self, event_name: &str, properties: Value) { + if let Some(true) = self.config().read().await.analytics_enabled { + // Does the user allow analytics? + if let Some(analytics) = self.analytics() { + // Is analytics setup? + analytics.track_event(self.user_id(), event_name, Some(properties.clone())); + } + } + } + + /// Cleanup executions marked as running in the db, call at startup + async fn cleanup_orphan_executions(&self) -> Result<(), DeploymentError> { + let running_processes = ExecutionProcess::find_running(&self.db().pool).await?; + for process in running_processes { + tracing::info!( + "Found orphaned execution process {} for task attempt {}", + process.id, + process.task_attempt_id + ); + // Update the execution process status first + if let Err(e) = ExecutionProcess::update_completion( + &self.db().pool, + process.id, + ExecutionProcessStatus::Failed, + None, // No exit code for orphaned processes + ) + .await + { + tracing::error!( + "Failed to update orphaned execution process {} status: {}", + process.id, + e + ); + continue; + } + // Process marked as failed + tracing::info!("Marked orphaned execution process {} as failed", process.id); + // Update task status to InReview for coding agent and setup script failures + if matches!( + process.run_reason, + ExecutionProcessRunReason::CodingAgent + | ExecutionProcessRunReason::SetupScript + | ExecutionProcessRunReason::CleanupScript + ) && let Ok(Some(task_attempt)) = + TaskAttempt::find_by_id(&self.db().pool, process.task_attempt_id).await + && let Ok(Some(task)) = task_attempt.parent_task(&self.db().pool).await + && let Err(e) = + Task::update_status(&self.db().pool, task.id, TaskStatus::InReview).await + { + tracing::error!( + "Failed to update task status to InReview for orphaned attempt: {}", + e + ); + } + } + Ok(()) + } + + async fn stream_events( + &self, + ) -> futures::stream::BoxStream<'static, Result> { + self.events() + .msg_store() + .history_plus_stream() + .map_ok(|m| m.to_sse_event()) + .boxed() + } +} diff --git a/crates/executors/Cargo.toml b/crates/executors/Cargo.toml new file mode 100644 index 00000000..922a57fc --- /dev/null +++ b/crates/executors/Cargo.toml @@ -0,0 +1,37 @@ +[package] +name = "executors" +version = "0.0.56" +edition = "2024" + +[dependencies] +utils = { path = "../utils" } +tokio = { workspace = true } +tokio-util = { version = "0.7", features = ["io"] } +bytes = "1.0" +serde = { workspace = true } +serde_json = { workspace = true } +tracing = { workspace = true } +tracing-subscriber = { workspace = true } +chrono = { version = "0.4", features = ["serde"] } +uuid = { version = "1.0", features = ["v4", "serde"] } +ts-rs = { workspace = true } +dirs = "5.0" +xdg = "3.0" +async-trait = "0.1" +rust-embed = "8.2" +directories = "6.0.0" +command-group = { version = "5.0", features = ["with-tokio"] } +regex = "1.11.1" +sentry-tracing = { version = "0.41.0", features = ["backtrace"] } +lazy_static = "1.4" +json-patch = "2.0" +strum_macros = "0.27.2" +thiserror = { workspace = true } +enum_dispatch = "0.3.13" +futures-io = "0.3.31" +tokio-stream = { version = "0.1.17", features = ["io-util"] } +futures = "0.3.31" +strum = "0.27.2" +bon = "3.6" +fork_stream = "0.1.0" +os_pipe = "1.2" diff --git a/crates/executors/src/actions/coding_agent_follow_up.rs b/crates/executors/src/actions/coding_agent_follow_up.rs new file mode 100644 index 00000000..f38f4491 --- /dev/null +++ b/crates/executors/src/actions/coding_agent_follow_up.rs @@ -0,0 +1,28 @@ +use std::path::PathBuf; + +use async_trait::async_trait; +use command_group::AsyncGroupChild; +use serde::{Deserialize, Serialize}; +use ts_rs::TS; + +use crate::{ + actions::Executable, + executors::{CodingAgent, ExecutorError, StandardCodingAgentExecutor}, +}; + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, TS)] +pub struct CodingAgentFollowUpRequest { + pub prompt: String, + pub session_id: String, + pub profile: String, +} + +#[async_trait] +impl Executable for CodingAgentFollowUpRequest { + async fn spawn(&self, current_dir: &PathBuf) -> Result { + let executor = CodingAgent::from_profile_str(&self.profile)?; + executor + .spawn_follow_up(current_dir, &self.prompt, &self.session_id) + .await + } +} diff --git a/crates/executors/src/actions/coding_agent_initial.rs b/crates/executors/src/actions/coding_agent_initial.rs new file mode 100644 index 00000000..740a6132 --- /dev/null +++ b/crates/executors/src/actions/coding_agent_initial.rs @@ -0,0 +1,25 @@ +use std::path::PathBuf; + +use async_trait::async_trait; +use command_group::AsyncGroupChild; +use serde::{Deserialize, Serialize}; +use ts_rs::TS; + +use crate::{ + actions::Executable, + executors::{CodingAgent, ExecutorError, StandardCodingAgentExecutor}, +}; + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, TS)] +pub struct CodingAgentInitialRequest { + pub prompt: String, + pub profile: String, +} + +#[async_trait] +impl Executable for CodingAgentInitialRequest { + async fn spawn(&self, current_dir: &PathBuf) -> Result { + let executor = CodingAgent::from_profile_str(&self.profile)?; + executor.spawn(current_dir, &self.prompt).await + } +} diff --git a/crates/executors/src/actions/mod.rs b/crates/executors/src/actions/mod.rs new file mode 100644 index 00000000..fe57c5ce --- /dev/null +++ b/crates/executors/src/actions/mod.rs @@ -0,0 +1,77 @@ +use std::path::PathBuf; + +use async_trait::async_trait; +use command_group::AsyncGroupChild; +use enum_dispatch::enum_dispatch; +use serde::{Deserialize, Serialize}; +use strum_macros::{Display, EnumDiscriminants}; +use ts_rs::TS; + +use crate::{ + actions::{ + coding_agent_follow_up::CodingAgentFollowUpRequest, + coding_agent_initial::CodingAgentInitialRequest, script::ScriptRequest, + }, + executors::ExecutorError, +}; +pub mod coding_agent_follow_up; +pub mod coding_agent_initial; +pub mod script; + +#[enum_dispatch] +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, TS, EnumDiscriminants, Display)] +#[serde(tag = "type")] +#[strum_discriminants(name(ExecutorActionKind), derive(Display))] +pub enum ExecutorActionType { + CodingAgentInitialRequest, + CodingAgentFollowUpRequest, + ScriptRequest, +} + +impl ExecutorActionType { + /// Get the action type as a string (matches the JSON "type" field) + pub fn action_type(&self) -> &'static str { + match self { + ExecutorActionType::CodingAgentInitialRequest(_) => "CodingAgentInitialRequest", + ExecutorActionType::CodingAgentFollowUpRequest(_) => "CodingAgentFollowUpRequest", + ExecutorActionType::ScriptRequest(_) => "ScriptRequest", + } + } +} + +#[derive(Debug, Clone, Serialize, Deserialize, TS)] +pub struct ExecutorAction { + pub typ: ExecutorActionType, + pub next_action: Option>, +} + +impl ExecutorAction { + pub fn new(typ: ExecutorActionType, next_action: Option>) -> Self { + Self { typ, next_action } + } + + pub fn typ(&self) -> &ExecutorActionType { + &self.typ + } + + pub fn next_action(&self) -> Option<&Box> { + self.next_action.as_ref() + } + + pub fn action_type(&self) -> &'static str { + self.typ.action_type() + } +} + +#[async_trait] +#[enum_dispatch(ExecutorActionType)] +pub trait Executable { + async fn spawn(&self, current_dir: &PathBuf) -> Result; +} + +#[async_trait] +impl Executable for ExecutorAction { + async fn spawn(&self, current_dir: &PathBuf) -> Result { + self.typ.spawn(current_dir).await + } +} diff --git a/crates/executors/src/actions/script.rs b/crates/executors/src/actions/script.rs new file mode 100644 index 00000000..6c855860 --- /dev/null +++ b/crates/executors/src/actions/script.rs @@ -0,0 +1,48 @@ +use std::path::PathBuf; + +use async_trait::async_trait; +use command_group::{AsyncCommandGroup, AsyncGroupChild}; +use serde::{Deserialize, Serialize}; +use tokio::process::Command; +use ts_rs::TS; +use utils::shell::get_shell_command; + +use crate::{actions::Executable, executors::ExecutorError}; + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, TS)] +pub enum ScriptRequestLanguage { + Bash, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, TS)] +pub enum ScriptContext { + SetupScript, + CleanupScript, + DevServer, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, TS)] +pub struct ScriptRequest { + pub script: String, + pub language: ScriptRequestLanguage, + pub context: ScriptContext, +} + +#[async_trait] +impl Executable for ScriptRequest { + async fn spawn(&self, current_dir: &PathBuf) -> Result { + let (shell_cmd, shell_arg) = get_shell_command(); + let mut command = Command::new(shell_cmd); + command + .kill_on_drop(true) + .stdout(std::process::Stdio::piped()) + .stderr(std::process::Stdio::piped()) + .arg(shell_arg) + .arg(&self.script) + .current_dir(current_dir); + + let child = command.group_spawn()?; + + Ok(child) + } +} diff --git a/crates/executors/src/command.rs b/crates/executors/src/command.rs new file mode 100644 index 00000000..f8263e8f --- /dev/null +++ b/crates/executors/src/command.rs @@ -0,0 +1,337 @@ +use std::{ + collections::{HashMap, HashSet}, + fs, + sync::OnceLock, +}; + +use serde::{Deserialize, Serialize}; +use ts_rs::TS; + +use crate::executors::BaseCodingAgent; + +static PROFILES_CACHE: OnceLock = OnceLock::new(); + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, TS)] +pub struct CommandBuilder { + /// Base executable command (e.g., "npx -y @anthropic-ai/claude-code@latest") + pub base: String, + /// Optional parameters to append to the base command + pub params: Option>, +} + +impl CommandBuilder { + pub fn new>(base: S) -> Self { + Self { + base: base.into(), + params: None, + } + } + + pub fn params(mut self, params: I) -> Self + where + I: IntoIterator, + I::Item: Into, + { + self.params = Some(params.into_iter().map(|p| p.into()).collect()); + self + } + + pub fn build_initial(&self) -> String { + let mut parts = vec![self.base.clone()]; + if let Some(ref params) = self.params { + parts.extend(params.clone()); + } + parts.join(" ") + } + + pub fn build_follow_up(&self, additional_args: &[String]) -> String { + let mut parts = vec![self.base.clone()]; + if let Some(ref params) = self.params { + parts.extend(params.clone()); + } + parts.extend(additional_args.iter().cloned()); + parts.join(" ") + } +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, TS)] +pub struct AgentProfile { + /// Unique identifier for this profile (e.g., "MyClaudeCode", "FastAmp") + pub label: String, + /// The executor type this profile configures + pub agent: BaseCodingAgent, + /// Command builder configuration + pub command: CommandBuilder, +} + +impl AgentProfile { + pub fn claude_code() -> Self { + Self { + label: "claude-code".to_string(), + agent: BaseCodingAgent::ClaudeCode, + command: CommandBuilder::new("npx -y @anthropic-ai/claude-code@latest").params(vec![ + "-p", + "--dangerously-skip-permissions", + "--verbose", + "--output-format=stream-json", + ]), + } + } + + pub fn claude_code_plan() -> Self { + Self { + label: "claude-code-plan".to_string(), + agent: BaseCodingAgent::ClaudeCode, + command: CommandBuilder::new("npx -y @anthropic-ai/claude-code@latest").params(vec![ + "-p", + "--permission-mode=plan", + "--verbose", + "--output-format=stream-json", + ]), + } + } + + pub fn claude_code_router() -> Self { + Self { + label: "claude-code-router".to_string(), + agent: BaseCodingAgent::ClaudeCode, + command: CommandBuilder::new("npx -y @musistudio/claude-code-router code").params( + vec![ + "-p", + "--dangerously-skip-permissions", + "--verbose", + "--output-format=stream-json", + ], + ), + } + } + + pub fn amp() -> Self { + Self { + label: "amp".to_string(), + agent: BaseCodingAgent::Amp, + command: CommandBuilder::new("npx -y @sourcegraph/amp@0.0.1752148945-gd8844f") + .params(vec!["--format=jsonl"]), + } + } + + pub fn gemini() -> Self { + Self { + label: "gemini".to_string(), + agent: BaseCodingAgent::Gemini, + command: CommandBuilder::new("npx -y @google/gemini-cli@latest").params(vec!["--yolo"]), + } + } + + pub fn codex() -> Self { + Self { + label: "codex".to_string(), + agent: BaseCodingAgent::Codex, + command: CommandBuilder::new("npx -y @openai/codex exec").params(vec![ + "--json", + "--dangerously-bypass-approvals-and-sandbox", + "--skip-git-repo-check", + ]), + } + } + + pub fn opencode() -> Self { + Self { + label: "opencode".to_string(), + agent: BaseCodingAgent::Opencode, + command: CommandBuilder::new("npx -y opencode-ai@latest run") + .params(vec!["--print-logs"]), + } + } +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, TS)] +pub struct AgentProfiles { + pub profiles: Vec, +} + +impl AgentProfiles { + pub fn get_cached() -> &'static AgentProfiles { + PROFILES_CACHE.get_or_init(Self::load) + } + + fn load() -> Self { + let mut profiles = Self::from_defaults(); + + if let Err(e) = profiles.extend_from_file() { + if e.kind() != std::io::ErrorKind::NotFound { + tracing::warn!("Failed to load additional profiles: {}", e); + } + } else { + tracing::info!("Loaded additional profiles from profiles.json"); + } + + profiles + } + + pub fn from_defaults() -> Self { + Self { + profiles: vec![ + AgentProfile::claude_code(), + AgentProfile::claude_code_plan(), + AgentProfile::claude_code_router(), + AgentProfile::amp(), + AgentProfile::gemini(), + AgentProfile::codex(), + AgentProfile::opencode(), + ], + } + } + + pub fn extend_from_file(&mut self) -> Result<(), std::io::Error> { + let profiles_path = utils::assets::profiles_path(); + if !profiles_path.exists() { + return Err(std::io::Error::new( + std::io::ErrorKind::NotFound, + format!("Profiles file not found at {profiles_path:?}"), + )); + } + + let content = fs::read_to_string(&profiles_path)?; + + let user_profiles: Self = serde_json::from_str(&content).map_err(|e| { + std::io::Error::new( + std::io::ErrorKind::InvalidData, + format!("Failed to parse profiles.json: {e}"), + ) + })?; + + let default_labels: HashSet = + self.profiles.iter().map(|p| p.label.clone()).collect(); + + // Only add user profiles with unique labels + for user_profile in user_profiles.profiles { + if !default_labels.contains(&user_profile.label) { + self.profiles.push(user_profile); + } else { + tracing::debug!( + "Skipping user profile '{}' - default with same label exists", + user_profile.label + ); + } + } + + Ok(()) + } + + pub fn get_profile(&self, label: &str) -> Option<&AgentProfile> { + self.profiles.iter().find(|p| p.label == label) + } + + pub fn get_profiles_for_agent(&self, agent: &BaseCodingAgent) -> Vec<&AgentProfile> { + self.profiles.iter().filter(|p| &p.agent == agent).collect() + } + + pub fn to_map(&self) -> HashMap { + self.profiles + .iter() + .map(|p| (p.label.clone(), p.clone())) + .collect() + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_command_builder() { + let builder = CommandBuilder::new("npx claude").params(vec!["--verbose", "--json"]); + assert_eq!(builder.build_initial(), "npx claude --verbose --json"); + assert_eq!( + builder.build_follow_up(&["--resume".to_string(), "session123".to_string()]), + "npx claude --verbose --json --resume session123" + ); + } + + #[test] + fn test_default_profiles() { + let profiles = AgentProfiles::from_defaults(); + assert!(profiles.profiles.len() == 7); + + let claude_profile = profiles.get_profile("claude-code").unwrap(); + assert_eq!(claude_profile.agent, BaseCodingAgent::ClaudeCode); + assert!( + claude_profile + .command + .build_initial() + .contains("claude-code") + ); + assert!( + claude_profile + .command + .build_initial() + .contains("--dangerously-skip-permissions") + ); + + let amp_profile = profiles.get_profile("amp").unwrap(); + assert_eq!(amp_profile.agent, BaseCodingAgent::Amp); + assert!(amp_profile.command.build_initial().contains("amp")); + assert!( + amp_profile + .command + .build_initial() + .contains("--format=jsonl") + ); + + let gemini_profile = profiles.get_profile("gemini").unwrap(); + assert_eq!(gemini_profile.agent, BaseCodingAgent::Gemini); + assert!(gemini_profile.command.build_initial().contains("gemini")); + assert!(gemini_profile.command.build_initial().contains("--yolo")); + + let codex_profile = profiles.get_profile("codex").unwrap(); + assert_eq!(codex_profile.agent, BaseCodingAgent::Codex); + assert!(codex_profile.command.build_initial().contains("codex")); + assert!(codex_profile.command.build_initial().contains("--json")); + + let opencode_profile = profiles.get_profile("opencode").unwrap(); + assert_eq!(opencode_profile.agent, BaseCodingAgent::Opencode); + assert!( + opencode_profile + .command + .build_initial() + .contains("opencode-ai") + ); + assert!(opencode_profile.command.build_initial().contains("run")); + assert!( + opencode_profile + .command + .build_initial() + .contains("--print-logs") + ); + + let claude_code_router_profile = profiles.get_profile("claude-code-router").unwrap(); + assert_eq!( + claude_code_router_profile.agent, + BaseCodingAgent::ClaudeCode + ); + assert!( + claude_code_router_profile + .command + .build_initial() + .contains("@musistudio/claude-code-router") + ); + assert!( + claude_code_router_profile + .command + .build_initial() + .contains("--dangerously-skip-permissions") + ); + } + + #[test] + fn test_profiles_for_agent() { + let profiles = AgentProfiles::from_defaults(); + + let claude_profiles = profiles.get_profiles_for_agent(&BaseCodingAgent::ClaudeCode); + assert_eq!(claude_profiles.len(), 3); // default, plan mode, and claude-code-router + + let amp_profiles = profiles.get_profiles_for_agent(&BaseCodingAgent::Amp); + assert_eq!(amp_profiles.len(), 1); + } +} diff --git a/crates/executors/src/executors/amp.rs b/crates/executors/src/executors/amp.rs new file mode 100644 index 00000000..ce481528 --- /dev/null +++ b/crates/executors/src/executors/amp.rs @@ -0,0 +1,604 @@ +use std::{collections::HashMap, path::PathBuf, process::Stdio, sync::Arc, time::Duration}; + +use async_trait::async_trait; +use command_group::{AsyncCommandGroup, AsyncGroupChild}; +use futures::StreamExt; +use json_patch::Patch; +use serde::{Deserialize, Serialize}; +use tokio::{io::AsyncWriteExt, process::Command}; +use ts_rs::TS; +use utils::{ + log_msg::LogMsg, msg_store::MsgStore, path::make_path_relative, shell::get_shell_command, +}; + +use crate::{ + command::{AgentProfiles, CommandBuilder}, + executors::{ExecutorError, StandardCodingAgentExecutor}, + logs::{ + ActionType, NormalizedEntry, NormalizedEntryType, + stderr_processor::normalize_stderr_logs, + utils::{EntryIndexProvider, patch::ConversationPatch}, + }, +}; + +/// An executor that uses Amp to process tasks +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, TS)] +pub struct Amp { + command_builder: CommandBuilder, +} + +impl Default for Amp { + fn default() -> Self { + Self::new() + } +} + +#[async_trait] +impl StandardCodingAgentExecutor for Amp { + async fn spawn( + &self, + current_dir: &PathBuf, + prompt: &str, + ) -> Result { + let (shell_cmd, shell_arg) = get_shell_command(); + let amp_command = self.command_builder.build_initial(); + + let mut command = Command::new(shell_cmd); + command + .kill_on_drop(true) + .stdin(Stdio::piped()) // <-- open a pipe + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .current_dir(current_dir) + .arg(shell_arg) + .arg(amp_command); + + let mut child = command.group_spawn()?; + + // feed the prompt in, then close the pipe so `amp` sees EOF + if let Some(mut stdin) = child.inner().stdin.take() { + stdin.write_all(prompt.as_bytes()).await.unwrap(); + stdin.shutdown().await.unwrap(); // or `drop(stdin);` + } + + Ok(child) + } + + async fn spawn_follow_up( + &self, + current_dir: &PathBuf, + prompt: &str, + session_id: &str, + ) -> Result { + // Use shell command for cross-platform compatibility + let (shell_cmd, shell_arg) = get_shell_command(); + let amp_command = self.command_builder.build_follow_up(&[ + "threads".to_string(), + "continue".to_string(), + session_id.to_string(), + ]); + + let mut command = Command::new(shell_cmd); + command + .kill_on_drop(true) + .stdin(Stdio::piped()) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .current_dir(current_dir) + .arg(shell_arg) + .arg(&_command); + + let mut child = command.group_spawn()?; + + // Feed the prompt in, then close the pipe so amp sees EOF + if let Some(mut stdin) = child.inner().stdin.take() { + stdin.write_all(prompt.as_bytes()).await?; + stdin.shutdown().await?; + } + + Ok(child) + } + + fn normalize_logs(&self, raw_logs_msg_store: Arc, current_dir: &PathBuf) { + let entry_index_provider = EntryIndexProvider::new(); + + // Process stderr logs using the standard stderr processor + normalize_stderr_logs(raw_logs_msg_store.clone(), entry_index_provider.clone()); + + // Process stdout logs (Amp's JSON output) + let current_dir = current_dir.clone(); + tokio::spawn(async move { + let mut s = raw_logs_msg_store.history_plus_stream(); + let mut buf = String::new(); + // 1 amp message id = multiple patch entry ids + let mut seen_amp_message_ids: HashMap> = HashMap::new(); + while let Some(Ok(m)) = s.next().await { + let chunk = match m { + LogMsg::Stdout(x) => x, + LogMsg::JsonPatch(_) | LogMsg::SessionId(_) | LogMsg::Stderr(_) => { + continue; + } + LogMsg::Finished => break, + }; + buf.push_str(&chunk); + + // Print complete lines; keep the trailing partial (if any) + for line in buf + .split_inclusive('\n') + .filter(|l| l.ends_with('\n')) + .map(str::to_owned) + .collect::>() + { + let trimmed = line.trim(); + match serde_json::from_str(trimmed) { + Ok(amp_json) => { + match amp_json { + AmpJson::Messages { + messages, + tool_results: _, + } => { + for (amp_message_id, message) in messages { + let role = &message.role; + + for (content_index, content_item) in + message.content.iter().enumerate() + { + let mut has_patch_ids = + seen_amp_message_ids.get_mut(&_message_id); + + if let Some(entry) = content_item.to_normalized_entry( + role, + &message, + ¤t_dir.to_string_lossy(), + ) { + let patch: Patch = match &mut has_patch_ids { + None => { + let new_id = entry_index_provider.next(); + seen_amp_message_ids + .entry(amp_message_id) + .or_default() + .push(new_id); + ConversationPatch::add_normalized_entry( + new_id, entry, + ) + } + Some(patch_ids) => { + match patch_ids.get(content_index) { + Some(patch_id) => { + ConversationPatch::replace( + *patch_id, entry, + ) + } + None => { + let new_id = + entry_index_provider.next(); + patch_ids.push(new_id); + ConversationPatch::add_normalized_entry(new_id, entry) + } + } + } + }; + + raw_logs_msg_store.push_patch(patch); + // TODO: debug this race condition + tokio::time::sleep(Duration::from_millis(1)).await; + } + } + } + } + AmpJson::Initial { thread_id } => { + if let Some(thread_id) = thread_id { + raw_logs_msg_store.push_session_id(thread_id); + } + } + _ => {} + } + } + Err(_) => { + let trimmed = line.trim(); + if !trimmed.is_empty() { + let entry = NormalizedEntry { + timestamp: None, + entry_type: NormalizedEntryType::SystemMessage, + content: format!("Raw output: {trimmed}"), + metadata: None, + }; + + let new_id = entry_index_provider.next(); + let patch = ConversationPatch::add_normalized_entry(new_id, entry); + raw_logs_msg_store.push_patch(patch); + // TODO: debug this race condition + tokio::time::sleep(Duration::from_millis(1)).await; + } + } + }; + } + buf = buf.rsplit('\n').next().unwrap_or("").to_owned(); + } + if !buf.is_empty() { + print!("{buf}"); + } + }); + } +} + +impl Amp { + pub fn new() -> Self { + let profile = AgentProfiles::get_cached() + .get_profile("amp") + .expect("Default amp profile should exist"); + + Self::with_command_builder(profile.command.clone()) + } + + pub fn with_command_builder(command_builder: CommandBuilder) -> Self { + Self { command_builder } + } +} + +#[derive(Deserialize, Serialize, Debug, Clone, PartialEq, Eq)] +#[serde(tag = "type")] +pub enum AmpJson { + #[serde(rename = "messages")] + Messages { + messages: Vec<(usize, AmpMessage)>, + #[serde(rename = "toolResults")] + tool_results: Vec, + }, + #[serde(rename = "initial")] + Initial { + #[serde(rename = "threadID")] + thread_id: Option, + }, + #[serde(rename = "token-usage")] + TokenUsage(serde_json::Value), + #[serde(rename = "state")] + State { state: String }, + #[serde(rename = "shutdown")] + Shutdown, + #[serde(rename = "tool-status")] + ToolStatus(serde_json::Value), +} + +impl AmpJson { + pub fn should_process(&self) -> bool { + matches!(self, AmpJson::Messages { .. }) + } + + pub fn extract_session_id(&self) -> Option { + match self { + AmpJson::Initial { thread_id } => thread_id.clone(), + _ => None, + } + } + + pub fn has_streaming_content(&self) -> bool { + match self { + AmpJson::Messages { messages, .. } => messages.iter().any(|(_index, message)| { + if let Some(state) = &message.state { + if let Some(state_type) = state.get("type").and_then(|t| t.as_str()) { + state_type == "streaming" + } else { + false + } + } else { + false + } + }), + _ => false, + } + } +} + +#[derive(Deserialize, Serialize, Debug, Clone, PartialEq, Eq)] +pub struct AmpMessage { + pub role: String, + pub content: Vec, + pub state: Option, + pub meta: Option, +} + +#[derive(Deserialize, Serialize, Debug, Clone, PartialEq, Eq)] +pub struct AmpMeta { + #[serde(rename = "sentAt")] + pub sent_at: u64, +} + +/// Tool data combining name and input +#[derive(Deserialize, Serialize, Debug, Clone, PartialEq, Eq)] +#[serde(tag = "name", content = "input")] +pub enum AmpToolData { + #[serde(alias = "read", alias = "read_file")] + Read { + #[serde(alias = "file_path")] + path: String, + }, + #[serde(alias = "create_file")] + CreateFile { + #[serde(alias = "file_path")] + path: String, + #[serde(alias = "file_content")] + content: Option, + }, + #[serde(alias = "edit_file", alias = "edit", alias = "undo_edit")] + EditFile { + #[serde(alias = "file_path")] + path: String, + #[serde(default)] + old_str: Option, + #[serde(default)] + new_str: Option, + }, + #[serde(alias = "bash")] + Bash { + #[serde(alias = "cmd")] + command: String, + }, + #[serde(alias = "grep", alias = "codebase_search_agent", alias = "Grep")] + Search { + #[serde(alias = "query")] + pattern: String, + #[serde(default)] + include: Option, + #[serde(default)] + path: Option, + }, + #[serde(alias = "read_web_page")] + ReadWebPage { url: String }, + #[serde(alias = "web_search")] + WebSearch { query: String }, + #[serde(alias = "task", alias = "Task")] + Task { + #[serde(alias = "prompt")] + description: String, + }, + #[serde(alias = "glob")] + Glob { + pattern: String, + #[serde(default)] + path: Option, + }, + #[serde(alias = "ls", alias = "list_directory")] + List { + #[serde(default)] + path: Option, + }, + #[serde(alias = "todo_write", alias = "todo_read")] + Todo { + #[serde(default)] + todos: Option>, + }, + /// Generic fallback for unknown tools + #[serde(untagged)] + Unknown { + #[serde(flatten)] + data: std::collections::HashMap, + }, +} + +impl AmpToolData { + pub fn get_name(&self) -> &str { + match self { + AmpToolData::Read { .. } => "read", + AmpToolData::CreateFile { .. } => "create_file", + AmpToolData::EditFile { .. } => "edit_file", + AmpToolData::Bash { .. } => "bash", + AmpToolData::Search { .. } => "search", + AmpToolData::ReadWebPage { .. } => "read_web_page", + AmpToolData::WebSearch { .. } => "web_search", + AmpToolData::Task { .. } => "task", + AmpToolData::Glob { .. } => "glob", + AmpToolData::List { .. } => "list", + AmpToolData::Todo { .. } => "todo", + AmpToolData::Unknown { data } => data + .get("name") + .and_then(|v| v.as_str()) + .unwrap_or("unknown"), + } + } +} + +#[derive(Deserialize, Serialize, Debug, Clone, PartialEq, Eq)] +pub struct TodoItem { + pub content: String, + pub status: String, + #[serde(default)] + pub priority: Option, +} + +#[derive(Deserialize, Serialize, Debug, Clone, PartialEq, Eq)] +#[serde(tag = "type")] +pub enum AmpContentItem { + #[serde(rename = "text")] + Text { text: String }, + #[serde(rename = "thinking")] + Thinking { thinking: String }, + #[serde(rename = "tool_use")] + ToolUse { + id: String, + #[serde(flatten)] + tool_data: AmpToolData, + }, + #[serde(rename = "tool_result")] + ToolResult { + #[serde(rename = "toolUseID")] + tool_use_id: String, + run: serde_json::Value, + }, +} + +impl AmpContentItem { + pub fn to_normalized_entry( + &self, + role: &str, + message: &AmpMessage, + worktree_path: &str, + ) -> Option { + use serde_json::Value; + + let timestamp = message.meta.as_ref().map(|meta| meta.sent_at.to_string()); + + match self { + AmpContentItem::Text { text } => { + let entry_type = match role { + "user" => NormalizedEntryType::UserMessage, + "assistant" => NormalizedEntryType::AssistantMessage, + _ => return None, + }; + Some(NormalizedEntry { + timestamp, + entry_type, + content: text.clone(), + metadata: Some(serde_json::to_value(self).unwrap_or(Value::Null)), + }) + } + AmpContentItem::Thinking { thinking } => Some(NormalizedEntry { + timestamp, + entry_type: NormalizedEntryType::Thinking, + content: thinking.clone(), + metadata: Some(serde_json::to_value(self).unwrap_or(Value::Null)), + }), + AmpContentItem::ToolUse { tool_data, .. } => { + let name = tool_data.get_name(); + let input = tool_data; + let action_type = Self::extract_action_type(name, input, worktree_path); + let content = + Self::generate_concise_content(name, input, &action_type, worktree_path); + + Some(NormalizedEntry { + timestamp, + entry_type: NormalizedEntryType::ToolUse { + tool_name: name.to_string(), + action_type, + }, + content, + metadata: Some(serde_json::to_value(self).unwrap_or(Value::Null)), + }) + } + AmpContentItem::ToolResult { .. } => None, + } + } + + fn extract_action_type( + tool_name: &str, + input: &AmpToolData, + worktree_path: &str, + ) -> ActionType { + match input { + AmpToolData::Read { path, .. } => ActionType::FileRead { + path: make_path_relative(path, worktree_path), + }, + AmpToolData::CreateFile { path, .. } => ActionType::FileWrite { + path: make_path_relative(path, worktree_path), + }, + AmpToolData::EditFile { path, .. } => ActionType::FileWrite { + path: make_path_relative(path, worktree_path), + }, + AmpToolData::Bash { command, .. } => ActionType::CommandRun { + command: command.clone(), + }, + AmpToolData::Search { pattern, .. } => ActionType::Search { + query: pattern.clone(), + }, + AmpToolData::ReadWebPage { url, .. } => ActionType::WebFetch { url: url.clone() }, + AmpToolData::WebSearch { query, .. } => ActionType::WebFetch { url: query.clone() }, + AmpToolData::Task { description, .. } => ActionType::TaskCreate { + description: description.clone(), + }, + AmpToolData::Glob { .. } => ActionType::Other { + description: "File pattern search".to_string(), + }, + AmpToolData::List { .. } => ActionType::Other { + description: "List directory".to_string(), + }, + AmpToolData::Todo { .. } => ActionType::Other { + description: "Manage TODO list".to_string(), + }, + AmpToolData::Unknown { .. } => ActionType::Other { + description: format!("Tool: {tool_name}"), + }, + } + } + + fn generate_concise_content( + tool_name: &str, + input: &AmpToolData, + action_type: &ActionType, + worktree_path: &str, + ) -> String { + match action_type { + ActionType::FileRead { path } => format!("`{path}`"), + ActionType::FileWrite { path } => format!("`{path}`"), + ActionType::CommandRun { command } => format!("`{command}`"), + ActionType::Search { query } => format!("`{query}`"), + ActionType::WebFetch { url } => format!("`{url}`"), + ActionType::PlanPresentation { plan } => format!("Plan Presentation: `{plan}`"), + ActionType::TaskCreate { description } => description.clone(), + ActionType::Other { description: _ } => { + // For other tools, try to extract key information or fall back to tool name + match input { + AmpToolData::Todo { todos, .. } => { + if let Some(todos) = todos { + let mut todo_items = Vec::new(); + for todo in todos { + let emoji = match todo.status.as_str() { + "completed" => "✅", + "in_progress" | "in-progress" => "🔄", + "pending" | "todo" => "⏳", + _ => "📝", + }; + let priority = todo.priority.as_deref().unwrap_or("medium"); + todo_items + .push(format!("{} {} ({})", emoji, todo.content, priority)); + } + if !todo_items.is_empty() { + format!("TODO List:\n{}", todo_items.join("\n")) + } else { + "Managing TODO list".to_string() + } + } else { + "Managing TODO list".to_string() + } + } + AmpToolData::List { path, .. } => { + if let Some(path) = path { + let relative_path = make_path_relative(path, worktree_path); + if relative_path.is_empty() { + "List directory".to_string() + } else { + format!("List directory: `{relative_path}`") + } + } else { + "List directory".to_string() + } + } + AmpToolData::Glob { pattern, path, .. } => { + if let Some(path) = path { + let relative_path = make_path_relative(path, worktree_path); + format!("Find files: `{pattern}` in `{relative_path}`") + } else { + format!("Find files: `{pattern}`") + } + } + AmpToolData::Search { + pattern, + include, + path, + .. + } => { + let mut parts = vec![format!("Search: `{}`", pattern)]; + if let Some(include) = include { + parts.push(format!("in `{include}`")); + } + if let Some(path) = path { + let relative_path = make_path_relative(path, worktree_path); + parts.push(format!("at `{relative_path}`")); + } + parts.join(" ") + } + _ => tool_name.to_string(), + } + } + } + } +} diff --git a/crates/executors/src/executors/claude.rs b/crates/executors/src/executors/claude.rs new file mode 100644 index 00000000..5caa20e5 --- /dev/null +++ b/crates/executors/src/executors/claude.rs @@ -0,0 +1,1079 @@ +use std::{path::PathBuf, process::Stdio, sync::Arc}; + +use async_trait::async_trait; +use command_group::{AsyncCommandGroup, AsyncGroupChild}; +use futures::StreamExt; +use serde::{Deserialize, Serialize}; +use tokio::{io::AsyncWriteExt, process::Command}; +use ts_rs::TS; +use utils::{ + log_msg::LogMsg, msg_store::MsgStore, path::make_path_relative, shell::get_shell_command, +}; + +use crate::{ + command::{AgentProfiles, CommandBuilder}, + executors::{ExecutorError, StandardCodingAgentExecutor}, + logs::{ + ActionType, NormalizedEntry, NormalizedEntryType, + stderr_processor::normalize_stderr_logs, + utils::{EntryIndexProvider, patch::ConversationPatch}, + }, +}; + +/// An executor that uses Claude CLI to process tasks +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, TS)] +pub struct ClaudeCode { + executor_type: String, + command_builder: CommandBuilder, +} + +impl Default for ClaudeCode { + fn default() -> Self { + Self::new() + } +} + +#[async_trait] +impl StandardCodingAgentExecutor for ClaudeCode { + async fn spawn( + &self, + current_dir: &PathBuf, + prompt: &str, + ) -> Result { + let (shell_cmd, shell_arg) = get_shell_command(); + let claude_command = self.command_builder.build_initial(); + + let mut command = Command::new(shell_cmd); + command + .kill_on_drop(true) + .stdin(Stdio::piped()) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .current_dir(current_dir) + .arg(shell_arg) + .arg(&claude_command); + + let mut child = command.group_spawn()?; + + // Feed the prompt in, then close the pipe so Claude sees EOF + if let Some(mut stdin) = child.inner().stdin.take() { + stdin.write_all(prompt.as_bytes()).await?; + stdin.shutdown().await?; + } + + Ok(child) + } + + async fn spawn_follow_up( + &self, + current_dir: &PathBuf, + prompt: &str, + session_id: &str, + ) -> Result { + let (shell_cmd, shell_arg) = get_shell_command(); + // Build follow-up command with --resume {session_id} + let claude_command = self + .command_builder + .build_follow_up(&["--resume".to_string(), session_id.to_string()]); + + let mut command = Command::new(shell_cmd); + command + .kill_on_drop(true) + .stdin(Stdio::piped()) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .current_dir(current_dir) + .arg(shell_arg) + .arg(&claude_command); + + let mut child = command.group_spawn()?; + + // Feed the followup prompt in, then close the pipe + if let Some(mut stdin) = child.inner().stdin.take() { + stdin.write_all(prompt.as_bytes()).await?; + stdin.shutdown().await?; + } + + Ok(child) + } + + fn normalize_logs(&self, msg_store: Arc, current_dir: &PathBuf) { + let entry_index_provider = EntryIndexProvider::new(); + + // Process stdout logs (Claude's JSON output) + ClaudeLogProcessor::process_logs( + self, + msg_store.clone(), + current_dir, + entry_index_provider.clone(), + ); + + // Process stderr logs using the standard stderr processor + normalize_stderr_logs(msg_store, entry_index_provider); + } +} + +impl ClaudeCode { + /// Create a new Claude executor with default settings + pub fn new() -> Self { + let profile = AgentProfiles::get_cached() + .get_profile("claude-code") + .expect("Default claude-code profile should exist"); + + Self::with_command_builder(profile.label.clone(), profile.command.clone()) + } + + /// Create a new Claude executor in plan mode with watchkill script + pub fn new_plan_mode() -> Self { + let profile = AgentProfiles::get_cached() + .get_profile("claude-code-plan") + .expect("Default claude-code-plan profile should exist"); + + let base_command = profile.command.build_initial(); + // Note: We'll need to update this to handle watchkill script properly + // For now, we'll create a custom command builder + let watchkill_command = create_watchkill_script(&base_command); + Self { + executor_type: "ClaudePlan".to_string(), + command_builder: CommandBuilder::new(watchkill_command), + } + } + + /// Create a new Claude executor using claude-code-router + pub fn new_claude_code_router() -> Self { + let profile = AgentProfiles::get_cached() + .get_profile("claude-code-router") + .expect("Default claude-code-router profile should exist"); + + Self::with_command_builder(profile.label.clone(), profile.command.clone()) + } + + /// Create a new Claude executor with custom command builder + pub fn with_command_builder(executor_type: String, command_builder: CommandBuilder) -> Self { + Self { + executor_type, + command_builder, + } + } +} + +fn create_watchkill_script(command: &str) -> String { + let claude_plan_stop_indicator = concat!("Exit ", "plan mode?"); // Use concat!() as a workaround to avoid killing plan mode when this file is read. + format!( + r#"#!/usr/bin/env bash +set -euo pipefail + +word="{claude_plan_stop_indicator}" +command="{command}" + +exit_code=0 +while IFS= read -r line; do + printf '%s\n' "$line" + if [[ $line == *"$word"* ]]; then + exit 0 + fi +done < <($command <&0 2>&1) + +exit_code=${{PIPESTATUS[0]}} +exit "$exit_code" +"# + ) +} + +/// Handles log processing and interpretation for Claude executor +struct ClaudeLogProcessor { + model_name: Option, +} + +impl ClaudeLogProcessor { + fn new() -> Self { + Self { model_name: None } + } + + /// Process raw logs and convert them to normalized entries with patches + fn process_logs( + _executor: &ClaudeCode, + msg_store: Arc, + current_dir: &PathBuf, + entry_index_provider: EntryIndexProvider, + ) { + let current_dir_clone = current_dir.clone(); + tokio::spawn(async move { + let mut stream = msg_store.history_plus_stream(); + let mut buffer = String::new(); + let worktree_path = current_dir_clone.to_string_lossy().to_string(); + let mut session_id_extracted = false; + let mut processor = Self::new(); + + while let Some(Ok(msg)) = stream.next().await { + let chunk = match msg { + LogMsg::Stdout(x) => x, + LogMsg::JsonPatch(_) | LogMsg::SessionId(_) | LogMsg::Stderr(_) => continue, + LogMsg::Finished => break, + }; + + buffer.push_str(&chunk); + + // Process complete JSON lines + for line in buffer + .split_inclusive('\n') + .filter(|l| l.ends_with('\n')) + .map(str::to_owned) + .collect::>() + { + let trimmed = line.trim(); + if trimmed.is_empty() { + continue; + } + + // Filter out claude-code-router service messages + if trimmed.starts_with("Service not running, starting service") + || trimmed + .contains("claude code router service has been successfully stopped") + { + continue; + } + + match serde_json::from_str::(trimmed) { + Ok(claude_json) => { + // Extract session ID if present + if !session_id_extracted + && let Some(session_id) = Self::extract_session_id(&claude_json) + { + msg_store.push_session_id(session_id); + session_id_extracted = true; + } + + // Convert to normalized entries and create patches + for entry in + processor.to_normalized_entries(&claude_json, &worktree_path) + { + let patch_id = entry_index_provider.next(); + let patch = + ConversationPatch::add_normalized_entry(patch_id, entry); + msg_store.push_patch(patch); + } + } + Err(_) => { + // Handle non-JSON output as raw system message + if !trimmed.is_empty() { + let entry = NormalizedEntry { + timestamp: None, + entry_type: NormalizedEntryType::SystemMessage, + content: format!("Raw output: {trimmed}"), + metadata: None, + }; + + let patch_id = entry_index_provider.next(); + let patch = + ConversationPatch::add_normalized_entry(patch_id, entry); + msg_store.push_patch(patch); + } + } + } + } + + // Keep the partial line in the buffer + buffer = buffer.rsplit('\n').next().unwrap_or("").to_owned(); + } + + // Handle any remaining content in buffer + if !buffer.trim().is_empty() { + let entry = NormalizedEntry { + timestamp: None, + entry_type: NormalizedEntryType::SystemMessage, + content: format!("Raw output: {}", buffer.trim()), + metadata: None, + }; + + let patch_id = entry_index_provider.next(); + let patch = ConversationPatch::add_normalized_entry(patch_id, entry); + msg_store.push_patch(patch); + } + }); + } + + /// Extract session ID from Claude JSON + fn extract_session_id(claude_json: &ClaudeJson) -> Option { + match claude_json { + ClaudeJson::System { session_id, .. } => session_id.clone(), + ClaudeJson::Assistant { session_id, .. } => session_id.clone(), + ClaudeJson::User { session_id, .. } => session_id.clone(), + ClaudeJson::ToolUse { session_id, .. } => session_id.clone(), + ClaudeJson::ToolResult { session_id, .. } => session_id.clone(), + ClaudeJson::Result { .. } => None, + ClaudeJson::Unknown => None, + } + } + + /// Convert Claude JSON to normalized entries + fn to_normalized_entries( + &mut self, + claude_json: &ClaudeJson, + worktree_path: &str, + ) -> Vec { + match claude_json { + ClaudeJson::System { subtype, .. } => { + let content = match subtype.as_deref() { + Some("init") => { + // Skip system init messages because it doesn't contain the actual model that will be used in assistant messages in case of claude-code-router. + // We'll send system initialized message with first assistant message that has a model field. + return vec![]; + } + Some(subtype) => format!("System: {subtype}"), + None => "System message".to_string(), + }; + + vec![NormalizedEntry { + timestamp: None, + entry_type: NormalizedEntryType::SystemMessage, + content, + metadata: Some( + serde_json::to_value(claude_json).unwrap_or(serde_json::Value::Null), + ), + }] + } + ClaudeJson::Assistant { message, .. } => { + let mut entries = Vec::new(); + + if self.model_name.is_none() + && let Some(model) = message.model.as_ref() + { + self.model_name = Some(model.clone()); + entries.push(NormalizedEntry { + timestamp: None, + entry_type: NormalizedEntryType::SystemMessage, + content: format!("System initialized with model: {model}"), + metadata: None, + }); + } + + for content_item in &message.content { + if let Some(entry) = Self::content_item_to_normalized_entry( + content_item, + "assistant", + worktree_path, + ) { + entries.push(entry); + } + } + entries + } + ClaudeJson::User { message, .. } => { + let mut entries = Vec::new(); + for content_item in &message.content { + if let Some(entry) = + Self::content_item_to_normalized_entry(content_item, "user", worktree_path) + { + entries.push(entry); + } + } + entries + } + ClaudeJson::ToolUse { + tool_name, input, .. + } => { + let action_type = Self::extract_action_type(tool_name, input, worktree_path); + let content = + Self::generate_concise_content(tool_name, input, &action_type, worktree_path); + + vec![NormalizedEntry { + timestamp: None, + entry_type: NormalizedEntryType::ToolUse { + tool_name: tool_name.clone(), + action_type, + }, + content, + metadata: Some( + serde_json::to_value(claude_json).unwrap_or(serde_json::Value::Null), + ), + }] + } + ClaudeJson::ToolResult { .. } => { + // TODO: Add proper ToolResult support to NormalizedEntry when the type system supports it + vec![] + } + ClaudeJson::Result { .. } => { + // Skip result messages + vec![] + } + ClaudeJson::Unknown => { + vec![NormalizedEntry { + timestamp: None, + entry_type: NormalizedEntryType::SystemMessage, + content: "Unrecognized JSON message from Claude".to_string(), + metadata: None, + }] + } + } + } + + /// Convert Claude content item to normalized entry + fn content_item_to_normalized_entry( + content_item: &ClaudeContentItem, + role: &str, + worktree_path: &str, + ) -> Option { + match content_item { + ClaudeContentItem::Text { text } => { + let entry_type = match role { + "user" => NormalizedEntryType::UserMessage, + "assistant" => NormalizedEntryType::AssistantMessage, + _ => return None, + }; + Some(NormalizedEntry { + timestamp: None, + entry_type, + content: text.clone(), + metadata: Some( + serde_json::to_value(content_item).unwrap_or(serde_json::Value::Null), + ), + }) + } + ClaudeContentItem::Thinking { thinking } => Some(NormalizedEntry { + timestamp: None, + entry_type: NormalizedEntryType::Thinking, + content: thinking.clone(), + metadata: Some( + serde_json::to_value(content_item).unwrap_or(serde_json::Value::Null), + ), + }), + ClaudeContentItem::ToolUse { name, input, .. } => { + let action_type = Self::extract_action_type(name, input, worktree_path); + let content = + Self::generate_concise_content(name, input, &action_type, worktree_path); + + Some(NormalizedEntry { + timestamp: None, + entry_type: NormalizedEntryType::ToolUse { + tool_name: name.clone(), + action_type, + }, + content, + metadata: Some( + serde_json::to_value(content_item).unwrap_or(serde_json::Value::Null), + ), + }) + } + ClaudeContentItem::ToolResult { .. } => { + // TODO: Add proper ToolResult support to NormalizedEntry when the type system supports it + None + } + } + } + + /// Extract action type from tool usage for better categorization + fn extract_action_type( + tool_name: &str, + input: &serde_json::Value, + worktree_path: &str, + ) -> ActionType { + match tool_name.to_lowercase().as_str() { + "read" => { + if let Some(file_path) = input.get("file_path").and_then(|p| p.as_str()) { + ActionType::FileRead { + path: make_path_relative(file_path, worktree_path), + } + } else { + ActionType::Other { + description: "File read operation".to_string(), + } + } + } + "edit" | "write" | "multiedit" => { + if let Some(file_path) = input.get("file_path").and_then(|p| p.as_str()) { + ActionType::FileWrite { + path: make_path_relative(file_path, worktree_path), + } + } else if let Some(path) = input.get("path").and_then(|p| p.as_str()) { + ActionType::FileWrite { + path: make_path_relative(path, worktree_path), + } + } else { + ActionType::Other { + description: "File write operation".to_string(), + } + } + } + "bash" => { + if let Some(command) = input.get("command").and_then(|c| c.as_str()) { + ActionType::CommandRun { + command: command.to_string(), + } + } else { + ActionType::Other { + description: "Command execution".to_string(), + } + } + } + "grep" => { + if let Some(pattern) = input.get("pattern").and_then(|p| p.as_str()) { + ActionType::Search { + query: pattern.to_string(), + } + } else { + ActionType::Other { + description: "Search operation".to_string(), + } + } + } + "webfetch" => { + if let Some(url) = input.get("url").and_then(|u| u.as_str()) { + ActionType::WebFetch { + url: url.to_string(), + } + } else { + ActionType::Other { + description: "Web fetch operation".to_string(), + } + } + } + "task" => { + if let Some(description) = input.get("description").and_then(|d| d.as_str()) { + ActionType::TaskCreate { + description: description.to_string(), + } + } else if let Some(prompt) = input.get("prompt").and_then(|p| p.as_str()) { + ActionType::TaskCreate { + description: prompt.to_string(), + } + } else { + ActionType::Other { + description: "Task creation".to_string(), + } + } + } + "exit_plan_mode" | "exitplanmode" | "exit-plan-mode" => { + if let Some(plan) = input.get("plan").and_then(|p| p.as_str()) { + ActionType::PlanPresentation { + plan: plan.to_string(), + } + } else { + ActionType::Other { + description: "Plan presentation".to_string(), + } + } + } + _ => ActionType::Other { + description: format!("Tool: {tool_name}"), + }, + } + } + + /// Generate concise, readable content for tool usage + fn generate_concise_content( + tool_name: &str, + input: &serde_json::Value, + action_type: &ActionType, + worktree_path: &str, + ) -> String { + match action_type { + ActionType::FileRead { path } => format!("`{path}`"), + ActionType::FileWrite { path } => format!("`{path}`"), + ActionType::CommandRun { command } => format!("`{command}`"), + ActionType::Search { query } => format!("`{query}`"), + ActionType::WebFetch { url } => format!("`{url}`"), + ActionType::TaskCreate { description } => description.clone(), + ActionType::PlanPresentation { plan } => plan.clone(), + ActionType::Other { description: _ } => match tool_name.to_lowercase().as_str() { + "todoread" | "todowrite" => { + if let Some(todos) = input.get("todos").and_then(|t| t.as_array()) { + let mut todo_items = Vec::new(); + for todo in todos { + if let Some(content) = todo.get("content").and_then(|c| c.as_str()) { + let status = todo + .get("status") + .and_then(|s| s.as_str()) + .unwrap_or("pending"); + let status_emoji = match status { + "completed" => "✅", + "in_progress" => "🔄", + "pending" | "todo" => "⏳", + _ => "📝", + }; + let priority = todo + .get("priority") + .and_then(|p| p.as_str()) + .unwrap_or("medium"); + todo_items.push(format!("{status_emoji} {content} ({priority})")); + } + } + if !todo_items.is_empty() { + format!("TODO List:\n{}", todo_items.join("\n")) + } else { + "Managing TODO list".to_string() + } + } else { + "Managing TODO list".to_string() + } + } + "ls" => { + if let Some(path) = input.get("path").and_then(|p| p.as_str()) { + let relative_path = make_path_relative(path, worktree_path); + if relative_path.is_empty() { + "List directory".to_string() + } else { + format!("List directory: `{relative_path}`") + } + } else { + "List directory".to_string() + } + } + "glob" => { + let pattern = input.get("pattern").and_then(|p| p.as_str()).unwrap_or("*"); + let path = input.get("path").and_then(|p| p.as_str()); + + if let Some(search_path) = path { + format!( + "Find files: `{}` in `{}`", + pattern, + make_path_relative(search_path, worktree_path) + ) + } else { + format!("Find files: `{pattern}`") + } + } + "codebase_search_agent" => { + if let Some(query) = input.get("query").and_then(|q| q.as_str()) { + format!("Search: {query}") + } else { + "Codebase search".to_string() + } + } + _ => tool_name.to_string(), + }, + } + } +} + +// Data structures for parsing Claude's JSON output format +#[derive(Deserialize, Serialize, Debug, Clone, PartialEq)] +#[serde(tag = "type")] +pub enum ClaudeJson { + #[serde(rename = "system")] + System { + subtype: Option, + session_id: Option, + cwd: Option, + tools: Option>, + model: Option, + }, + #[serde(rename = "assistant")] + Assistant { + message: ClaudeMessage, + session_id: Option, + }, + #[serde(rename = "user")] + User { + message: ClaudeMessage, + session_id: Option, + }, + #[serde(rename = "tool_use")] + ToolUse { + tool_name: String, + input: serde_json::Value, + session_id: Option, + }, + #[serde(rename = "tool_result")] + ToolResult { + result: serde_json::Value, + is_error: Option, + session_id: Option, + }, + #[serde(rename = "result")] + Result { + subtype: Option, + is_error: Option, + duration_ms: Option, + result: Option, + }, + // Catch-all for unknown message types + #[serde(other)] + Unknown, +} + +#[derive(Deserialize, Serialize, Debug, Clone, PartialEq)] +pub struct ClaudeMessage { + pub id: Option, + #[serde(rename = "type")] + pub message_type: Option, + pub role: String, + pub model: Option, + pub content: Vec, + pub stop_reason: Option, +} + +#[derive(Deserialize, Serialize, Debug, Clone, PartialEq)] +#[serde(tag = "type")] +pub enum ClaudeContentItem { + #[serde(rename = "text")] + Text { text: String }, + #[serde(rename = "thinking")] + Thinking { thinking: String }, + #[serde(rename = "tool_use")] + ToolUse { + id: String, + name: String, + input: serde_json::Value, + }, + #[serde(rename = "tool_result")] + ToolResult { + tool_use_id: String, + content: serde_json::Value, + is_error: Option, + }, +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_claude_json_parsing() { + let system_json = + r#"{"type":"system","subtype":"init","session_id":"abc123","model":"claude-sonnet-4"}"#; + let parsed: ClaudeJson = serde_json::from_str(system_json).unwrap(); + + assert_eq!( + ClaudeLogProcessor::extract_session_id(&parsed), + Some("abc123".to_string()) + ); + + let entries = ClaudeLogProcessor::new().to_normalized_entries(&parsed, ""); + assert_eq!(entries.len(), 0); + + let assistant_json = r#" + {"type":"assistant","message":{"type":"message","role":"assistant","model":"claude-sonnet-4-20250514","content":[{"type":"text","text":"Hi! I'm Claude Code."}]}}"#; + let parsed: ClaudeJson = serde_json::from_str(assistant_json).unwrap(); + let entries = ClaudeLogProcessor::new().to_normalized_entries(&parsed, ""); + + assert_eq!(entries.len(), 2); + assert!(matches!( + entries[0].entry_type, + NormalizedEntryType::SystemMessage + )); + assert_eq!( + entries[0].content, + "System initialized with model: claude-sonnet-4-20250514" + ); + } + + #[test] + fn test_assistant_message_parsing() { + let assistant_json = r#"{"type":"assistant","message":{"role":"assistant","content":[{"type":"text","text":"Hello world"}]},"session_id":"abc123"}"#; + let parsed: ClaudeJson = serde_json::from_str(assistant_json).unwrap(); + + let entries = ClaudeLogProcessor::new().to_normalized_entries(&parsed, ""); + assert_eq!(entries.len(), 1); + assert!(matches!( + entries[0].entry_type, + NormalizedEntryType::AssistantMessage + )); + assert_eq!(entries[0].content, "Hello world"); + } + + #[test] + fn test_result_message_ignored() { + let result_json = r#"{"type":"result","subtype":"success","is_error":false,"duration_ms":6059,"result":"Final result"}"#; + let parsed: ClaudeJson = serde_json::from_str(result_json).unwrap(); + + let entries = ClaudeLogProcessor::new().to_normalized_entries(&parsed, ""); + assert_eq!(entries.len(), 0); // Should be ignored like in old implementation + } + + #[test] + fn test_thinking_content() { + let thinking_json = r#"{"type":"assistant","message":{"role":"assistant","content":[{"type":"thinking","thinking":"Let me think about this..."}]}}"#; + let parsed: ClaudeJson = serde_json::from_str(thinking_json).unwrap(); + + let entries = ClaudeLogProcessor::new().to_normalized_entries(&parsed, ""); + assert_eq!(entries.len(), 1); + assert!(matches!( + entries[0].entry_type, + NormalizedEntryType::Thinking + )); + assert_eq!(entries[0].content, "Let me think about this..."); + } + + #[test] + fn test_todo_tool_content_extraction() { + // Test TodoWrite with actual todo list + let todo_input = serde_json::json!({ + "todos": [ + { + "id": "1", + "content": "Fix the navigation bug", + "status": "completed", + "priority": "high" + }, + { + "id": "2", + "content": "Add user authentication", + "status": "in_progress", + "priority": "medium" + }, + { + "id": "3", + "content": "Write documentation", + "status": "pending", + "priority": "low" + } + ] + }); + + let action_type = + ClaudeLogProcessor::extract_action_type("TodoWrite", &todo_input, "/tmp/test-worktree"); + let result = ClaudeLogProcessor::generate_concise_content( + "TodoWrite", + &todo_input, + &action_type, + "/tmp/test-worktree", + ); + + assert!(result.contains("TODO List:")); + assert!(result.contains("✅ Fix the navigation bug (high)")); + assert!(result.contains("🔄 Add user authentication (medium)")); + assert!(result.contains("⏳ Write documentation (low)")); + } + + #[test] + fn test_todo_tool_empty_list() { + // Test TodoWrite with empty todo list + let empty_input = serde_json::json!({ + "todos": [] + }); + + let action_type = ClaudeLogProcessor::extract_action_type( + "TodoWrite", + &empty_input, + "/tmp/test-worktree", + ); + let result = ClaudeLogProcessor::generate_concise_content( + "TodoWrite", + &empty_input, + &action_type, + "/tmp/test-worktree", + ); + + assert_eq!(result, "Managing TODO list"); + } + + #[test] + fn test_todo_tool_no_todos_field() { + // Test TodoWrite with no todos field + let no_todos_input = serde_json::json!({ + "other_field": "value" + }); + + let action_type = ClaudeLogProcessor::extract_action_type( + "TodoWrite", + &no_todos_input, + "/tmp/test-worktree", + ); + let result = ClaudeLogProcessor::generate_concise_content( + "TodoWrite", + &no_todos_input, + &action_type, + "/tmp/test-worktree", + ); + + assert_eq!(result, "Managing TODO list"); + } + + #[test] + fn test_glob_tool_content_extraction() { + // Test Glob with pattern and path + let glob_input = serde_json::json!({ + "pattern": "**/*.ts", + "path": "/tmp/test-worktree/src" + }); + + let action_type = + ClaudeLogProcessor::extract_action_type("Glob", &glob_input, "/tmp/test-worktree"); + let result = ClaudeLogProcessor::generate_concise_content( + "Glob", + &glob_input, + &action_type, + "/tmp/test-worktree", + ); + + assert_eq!(result, "Find files: `**/*.ts` in `src`"); + } + + #[test] + fn test_glob_tool_pattern_only() { + // Test Glob with pattern only + let glob_input = serde_json::json!({ + "pattern": "*.js" + }); + + let action_type = + ClaudeLogProcessor::extract_action_type("Glob", &glob_input, "/tmp/test-worktree"); + let result = ClaudeLogProcessor::generate_concise_content( + "Glob", + &glob_input, + &action_type, + "/tmp/test-worktree", + ); + + assert_eq!(result, "Find files: `*.js`"); + } + + #[test] + fn test_ls_tool_content_extraction() { + // Test LS with path + let ls_input = serde_json::json!({ + "path": "/tmp/test-worktree/components" + }); + + let action_type = + ClaudeLogProcessor::extract_action_type("LS", &ls_input, "/tmp/test-worktree"); + let result = ClaudeLogProcessor::generate_concise_content( + "LS", + &ls_input, + &action_type, + "/tmp/test-worktree", + ); + + assert_eq!(result, "List directory: `components`"); + } + + #[test] + fn test_path_relative_conversion() { + // Test with relative path (should remain unchanged) + let relative_result = make_path_relative("src/main.rs", "/tmp/test-worktree"); + assert_eq!(relative_result, "src/main.rs"); + + // Test with absolute path (should become relative if possible) + let test_worktree = "/tmp/test-worktree"; + let absolute_path = format!("{}/src/main.rs", test_worktree); + let absolute_result = make_path_relative(&absolute_path, test_worktree); + assert_eq!(absolute_result, "src/main.rs"); + } + + #[tokio::test] + async fn test_streaming_patch_generation() { + use std::sync::Arc; + + use utils::msg_store::MsgStore; + + let executor = ClaudeCode::new(); + let msg_store = Arc::new(MsgStore::new()); + let current_dir = std::path::PathBuf::from("/tmp/test-worktree"); + + // Push some test messages + msg_store.push_stdout( + r#"{"type":"system","subtype":"init","session_id":"test123"}"#.to_string(), + ); + msg_store.push_stdout(r#"{"type":"assistant","message":{"role":"assistant","content":[{"type":"text","text":"Hello"}]}}"#.to_string()); + msg_store.push_finished(); + + // Start normalization (this spawns async task) + executor.normalize_logs(msg_store.clone(), ¤t_dir); + + // Give some time for async processing + tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; + + // Check that the history now contains patch messages + let history = msg_store.get_history(); + let patch_count = history + .iter() + .filter(|msg| matches!(msg, utils::log_msg::LogMsg::JsonPatch(_))) + .count(); + assert!( + patch_count > 0, + "Expected JsonPatch messages to be generated from streaming processing" + ); + } + + #[test] + fn test_session_id_extraction() { + let system_json = r#"{"type":"system","session_id":"test-session-123"}"#; + let parsed: ClaudeJson = serde_json::from_str(system_json).unwrap(); + + assert_eq!( + ClaudeLogProcessor::extract_session_id(&parsed), + Some("test-session-123".to_string()) + ); + + let tool_use_json = + r#"{"type":"tool_use","tool_name":"read","input":{},"session_id":"another-session"}"#; + let parsed_tool: ClaudeJson = serde_json::from_str(tool_use_json).unwrap(); + + assert_eq!( + ClaudeLogProcessor::extract_session_id(&parsed_tool), + Some("another-session".to_string()) + ); + } + + #[test] + fn test_tool_result_parsing_ignored() { + let tool_result_json = r#"{"type":"tool_result","result":"File content here","is_error":false,"session_id":"test123"}"#; + let parsed: ClaudeJson = serde_json::from_str(tool_result_json).unwrap(); + + // Test session ID extraction from ToolResult still works + assert_eq!( + ClaudeLogProcessor::extract_session_id(&parsed), + Some("test123".to_string()) + ); + + // ToolResult messages should be ignored (produce no entries) until proper support is added + let entries = ClaudeLogProcessor::new().to_normalized_entries(&parsed, ""); + assert_eq!(entries.len(), 0); + } + + #[test] + fn test_content_item_tool_result_ignored() { + let assistant_with_tool_result = r#"{"type":"assistant","message":{"role":"assistant","content":[{"type":"tool_result","tool_use_id":"tool_123","content":"Operation completed","is_error":false}]}}"#; + let parsed: ClaudeJson = serde_json::from_str(assistant_with_tool_result).unwrap(); + + // ToolResult content items should be ignored (produce no entries) until proper support is added + let entries = ClaudeLogProcessor::new().to_normalized_entries(&parsed, ""); + assert_eq!(entries.len(), 0); + } + + #[test] + fn test_mixed_content_with_thinking_ignores_tool_result() { + let complex_assistant_json = r#"{"type":"assistant","message":{"role":"assistant","content":[{"type":"thinking","thinking":"I need to read the file first"},{"type":"text","text":"I'll help you with that"},{"type":"tool_result","tool_use_id":"tool_789","content":"Success","is_error":false}]}}"#; + let parsed: ClaudeJson = serde_json::from_str(complex_assistant_json).unwrap(); + + let entries = ClaudeLogProcessor::new().to_normalized_entries(&parsed, ""); + // Only thinking and text entries should be processed, tool_result ignored + assert_eq!(entries.len(), 2); + + // Check thinking entry + assert!(matches!( + entries[0].entry_type, + NormalizedEntryType::Thinking + )); + assert_eq!(entries[0].content, "I need to read the file first"); + + // Check assistant message + assert!(matches!( + entries[1].entry_type, + NormalizedEntryType::AssistantMessage + )); + assert_eq!(entries[1].content, "I'll help you with that"); + + // ToolResult entry is ignored - no third entry + } + + #[test] + fn test_claude_executor_command_building() { + // Test default executor produces correct command + let executor = ClaudeCode::new(); + let command = executor.command_builder.build_initial(); + assert!(command.contains("npx -y @anthropic-ai/claude-code@latest")); + assert!(command.contains("-p")); + assert!(command.contains("--dangerously-skip-permissions")); + assert!(command.contains("--verbose")); + assert!(command.contains("--output-format=stream-json")); + + // Test follow-up command + let follow_up = executor + .command_builder + .build_follow_up(&["--resume".to_string(), "test-session-123".to_string()]); + assert!(follow_up.contains("--resume test-session-123")); + assert!(follow_up.contains("-p")); // Still contains base params + } +} diff --git a/crates/executors/src/executors/codex.rs b/crates/executors/src/executors/codex.rs new file mode 100644 index 00000000..1cbadd64 --- /dev/null +++ b/crates/executors/src/executors/codex.rs @@ -0,0 +1,902 @@ +use std::{path::PathBuf, process::Stdio, sync::Arc}; + +use async_trait::async_trait; +use command_group::{AsyncCommandGroup, AsyncGroupChild}; +use futures::StreamExt; +use regex::Regex; +use serde::{Deserialize, Serialize}; +use tokio::{io::AsyncWriteExt, process::Command}; +use ts_rs::TS; +use utils::{msg_store::MsgStore, path::make_path_relative, shell::get_shell_command}; + +use crate::{ + command::{AgentProfiles, CommandBuilder}, + executors::{ExecutorError, StandardCodingAgentExecutor}, + logs::{ + ActionType, NormalizedEntry, NormalizedEntryType, + utils::{EntryIndexProvider, patch::ConversationPatch}, + }, +}; + +/// Handles session management for Codex executor +pub struct SessionHandler; + +impl SessionHandler { + /// Start monitoring stderr lines for session ID extraction + pub fn start_session_id_extraction(msg_store: Arc) { + tokio::spawn(async move { + let mut stderr_lines_stream = msg_store.stderr_lines_stream(); + + while let Some(Ok(line)) = stderr_lines_stream.next().await { + if let Some(session_id) = Self::extract_session_id_from_line(&line) { + msg_store.push_session_id(session_id); + } + } + }); + } + + /// Extract session ID from codex stderr output + pub fn extract_session_id_from_line(line: &str) -> Option { + // Look for session_id in the log format: + // 2025-07-23T15:47:59.877058Z INFO codex_exec: Codex initialized with event: Event { id: "0", msg: SessionConfigured(SessionConfiguredEvent { session_id: 3cdcc4df-c7c3-4cca-8902-48c3d4a0f96b, model: "codex-mini-latest", history_log_id: 9104228, history_entry_count: 1 }) } + static SESSION_ID_REGEX: std::sync::OnceLock = std::sync::OnceLock::new(); + let regex = SESSION_ID_REGEX.get_or_init(|| { + Regex::new(r"session_id:\s*([0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12})").unwrap() + }); + + regex + .captures(line) + .and_then(|cap| cap.get(1)) + .map(|m| m.as_str().to_string()) + } + + /// Find codex rollout file path for given session_id. Used during follow-up execution. + pub fn find_rollout_file_path(session_id: &str) -> Result { + let home_dir = dirs::home_dir().ok_or("Could not determine home directory")?; + let sessions_dir = home_dir.join(".codex").join("sessions"); + + // Scan the sessions directory recursively for rollout files matching the session_id + // Pattern: rollout-{YYYY}-{MM}-{DD}T{HH}-{mm}-{ss}-{session_id}.jsonl + Self::scan_directory(&sessions_dir, session_id) + } + + // Helper for `find_rollout_file_path`. + // Recursively scan directory for rollout files matching the session_id + fn scan_directory(dir: &PathBuf, session_id: &str) -> Result { + if !dir.exists() { + return Err(format!( + "Sessions directory does not exist: {}", + dir.display() + )); + } + + let entries = std::fs::read_dir(dir) + .map_err(|e| format!("Failed to read directory {}: {}", dir.display(), e))?; + + for entry in entries { + let entry = entry.map_err(|e| format!("Failed to read directory entry: {e}"))?; + let path = entry.path(); + + if path.is_dir() { + // Recursively search subdirectories + if let Ok(found) = Self::scan_directory(&path, session_id) { + return Ok(found); + } + } else if path.is_file() + && let Some(filename) = path.file_name() + && let Some(filename_str) = filename.to_str() + && filename_str.contains(session_id) + && filename_str.starts_with("rollout-") + && filename_str.ends_with(".jsonl") + { + return Ok(path); + } + } + + Err(format!( + "Could not find rollout file for session_id: {session_id}" + )) + } +} + +/// An executor that uses Codex CLI to process tasks +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, TS)] +pub struct Codex { + command_builder: CommandBuilder, +} + +impl Default for Codex { + fn default() -> Self { + Self::new() + } +} + +impl Codex { + /// Create a new Codex executor with default settings + pub fn new() -> Self { + let profile = AgentProfiles::get_cached() + .get_profile("codex") + .expect("Default codex profile should exist"); + + Self::with_command_builder(profile.command.clone()) + } + + /// Create a new Codex executor with custom command builder + pub fn with_command_builder(command_builder: CommandBuilder) -> Self { + Self { command_builder } + } +} + +#[async_trait] +impl StandardCodingAgentExecutor for Codex { + async fn spawn( + &self, + current_dir: &PathBuf, + prompt: &str, + ) -> Result { + let (shell_cmd, shell_arg) = get_shell_command(); + let codex_command = self.command_builder.build_initial(); + + let mut command = Command::new(shell_cmd); + command + .kill_on_drop(true) + .stdin(Stdio::piped()) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .current_dir(current_dir) + .arg(shell_arg) + .arg(&codex_command) + .env("NODE_NO_WARNINGS", "1") + .env("RUST_LOG", "info"); + + let mut child = command.group_spawn()?; + + // Feed the prompt in, then close the pipe so codex sees EOF + if let Some(mut stdin) = child.inner().stdin.take() { + stdin.write_all(prompt.as_bytes()).await?; + stdin.shutdown().await?; + } + + Ok(child) + } + + async fn spawn_follow_up( + &self, + current_dir: &PathBuf, + prompt: &str, + session_id: &str, + ) -> Result { + // Find the rollout file for the given session_id using SessionHandler + let rollout_file_path = + SessionHandler::find_rollout_file_path(session_id).map_err(|e| { + ExecutorError::SpawnError(std::io::Error::new(std::io::ErrorKind::NotFound, e)) + })?; + + let (shell_cmd, shell_arg) = get_shell_command(); + let codex_command = self.command_builder.build_follow_up(&[ + "-c".to_string(), + format!("experimental_resume={}", rollout_file_path.display()), + ]); + + let mut command = Command::new(shell_cmd); + command + .kill_on_drop(true) + .stdin(Stdio::piped()) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .current_dir(current_dir) + .arg(shell_arg) + .arg(&codex_command) + .env("NODE_NO_WARNINGS", "1") + .env("RUST_LOG", "info"); + + let mut child = command.group_spawn()?; + + // Feed the prompt in, then close the pipe so codex sees EOF + if let Some(mut stdin) = child.inner().stdin.take() { + stdin.write_all(prompt.as_bytes()).await?; + stdin.shutdown().await?; + } + + Ok(child) + } + + fn normalize_logs(&self, msg_store: Arc, current_dir: &PathBuf) { + let entry_index_provider = EntryIndexProvider::new(); + + // Process stderr logs for session extraction only (errors come through JSONL) + SessionHandler::start_session_id_extraction(msg_store.clone()); + + // Process stdout logs (Codex's JSONL output) + let current_dir = current_dir.clone(); + tokio::spawn(async move { + let mut stream = msg_store.stdout_lines_stream(); + + while let Some(Ok(line)) = stream.next().await { + let trimmed = line.trim(); + if trimmed.is_empty() { + continue; + } + + if let Ok(entries) = serde_json::from_str::(trimmed).map(|codex_json| { + codex_json + .to_normalized_entries(¤t_dir) + .unwrap_or_default() + }) { + for entry in entries { + let new_id = entry_index_provider.next(); + let patch = ConversationPatch::add_normalized_entry(new_id, entry); + msg_store.push_patch(patch); + } + } else { + // Handle malformed JSON as raw output + let entry = NormalizedEntry { + timestamp: None, + entry_type: NormalizedEntryType::SystemMessage, + content: format!("Raw output: {trimmed}"), + metadata: None, + }; + + let new_id = entry_index_provider.next(); + let patch = ConversationPatch::add_normalized_entry(new_id, entry); + msg_store.push_patch(patch); + } + } + }); + } +} + +// Data structures for parsing Codex's JSON output format +#[derive(Deserialize, Serialize, Debug, Clone, PartialEq)] +#[serde(untagged)] +pub enum CodexJson { + /// Structured message with id and msg fields + StructuredMessage { id: String, msg: CodexMsgContent }, + /// Prompt message (user input) + Prompt { prompt: String }, + /// System configuration message (first message with config fields) + SystemConfig { + #[serde(default)] + model: Option, + #[serde(rename = "reasoning effort", default)] + reasoning_effort: Option, + #[serde(default)] + provider: Option, + #[serde(default)] + sandbox: Option, + #[serde(default)] + approval: Option, + #[serde(default)] + workdir: Option, + #[serde(rename = "reasoning summaries", default)] + reasoning_summaries: Option, + #[serde(flatten)] + other_fields: std::collections::HashMap, + }, +} + +#[derive(Deserialize, Serialize, Debug, Clone, PartialEq)] +#[serde(tag = "type")] +pub enum CodexMsgContent { + #[serde(rename = "agent_message")] + AgentMessage { message: String }, + #[serde(rename = "agent_reasoning")] + AgentReasoning { text: String }, + #[serde(rename = "error")] + Error { message: Option }, + #[serde(rename = "exec_command_begin")] + ExecCommandBegin { + call_id: Option, + command: Vec, + cwd: Option, + }, + #[serde(rename = "exec_command_end")] + ExecCommandEnd { + call_id: Option, + stdout: Option, + stderr: Option, + success: Option, + }, + #[serde(rename = "patch_apply_begin")] + PatchApplyBegin { + call_id: Option, + auto_approved: Option, + changes: std::collections::HashMap, + }, + #[serde(rename = "patch_apply_end")] + PatchApplyEnd { + call_id: Option, + stdout: Option, + stderr: Option, + success: Option, + }, + #[serde(rename = "mcp_tool_call_begin")] + McpToolCallBegin { + call_id: String, + server: String, + tool: String, + arguments: serde_json::Value, + }, + #[serde(rename = "mcp_tool_call_end")] + McpToolCallEnd { + call_id: String, + result: serde_json::Value, + }, + #[serde(rename = "task_started")] + TaskStarted, + #[serde(rename = "task_complete")] + TaskComplete { last_agent_message: Option }, + #[serde(rename = "token_count")] + TokenCount { + input_tokens: Option, + cached_input_tokens: Option, + output_tokens: Option, + reasoning_output_tokens: Option, + total_tokens: Option, + }, + // Catch-all for unknown message types + #[serde(other)] + Unknown, +} + +impl CodexJson { + /// Convert to normalized entries + pub fn to_normalized_entries(&self, current_dir: &PathBuf) -> Option> { + match self { + CodexJson::SystemConfig { .. } => self.format_config_message().map(|content| { + vec![NormalizedEntry { + timestamp: None, + entry_type: NormalizedEntryType::SystemMessage, + content, + metadata: Some(serde_json::to_value(self).unwrap_or(serde_json::Value::Null)), + }] + }), + CodexJson::Prompt { .. } => None, // Skip prompt messages + CodexJson::StructuredMessage { msg, .. } => { + let this = &msg; + let metadata = serde_json::to_value(self).unwrap_or(serde_json::Value::Null); + + match this { + CodexMsgContent::AgentMessage { message } => Some(vec![NormalizedEntry { + timestamp: None, + entry_type: NormalizedEntryType::AssistantMessage, + content: message.clone(), + metadata: Some(metadata), + }]), + CodexMsgContent::AgentReasoning { text } => Some(vec![NormalizedEntry { + timestamp: None, + entry_type: NormalizedEntryType::Thinking, + content: text.clone(), + metadata: Some(metadata), + }]), + CodexMsgContent::Error { message } => { + let error_message = message + .clone() + .unwrap_or_else(|| "Unknown error occurred".to_string()); + Some(vec![NormalizedEntry { + timestamp: None, + entry_type: NormalizedEntryType::ErrorMessage, + content: error_message, + metadata: Some(metadata), + }]) + } + CodexMsgContent::ExecCommandBegin { command, .. } => { + let command_str = command.join(" "); + + // Map shell commands to tool names (following Claude pattern) + let tool_name = if command_str.contains("bash") { + "bash" + } else { + "shell" + }; + + Some(vec![NormalizedEntry { + timestamp: None, + entry_type: NormalizedEntryType::ToolUse { + tool_name: tool_name.to_string(), + action_type: ActionType::CommandRun { + command: command_str.clone(), + }, + }, + content: format!("`{command_str}`"), + metadata: Some(metadata), + }]) + } + CodexMsgContent::PatchApplyBegin { changes, .. } => { + if let Some((file_path, _change_data)) = changes.iter().next() { + // Make path relative to current directory + let relative_path = + make_path_relative(file_path, ¤t_dir.to_string_lossy()); + + return Some(vec![NormalizedEntry { + timestamp: None, + entry_type: NormalizedEntryType::ToolUse { + tool_name: "edit".to_string(), + action_type: ActionType::FileWrite { + path: relative_path.clone(), + }, + }, + content: relative_path, + metadata: Some(metadata), + }]); + } + None + } + CodexMsgContent::McpToolCallBegin { + server, + tool, + call_id: _, + .. + } => { + let tool_name = format!("mcp_{tool}"); + let content = tool.clone(); + + Some(vec![NormalizedEntry { + timestamp: None, + entry_type: NormalizedEntryType::ToolUse { + tool_name, + action_type: ActionType::Other { + description: format!("MCP tool call to {tool} from {server}"), + }, + }, + content, + metadata: Some(metadata), + }]) + } + // Ignored message types + CodexMsgContent::ExecCommandEnd { .. } + | CodexMsgContent::PatchApplyEnd { .. } + | CodexMsgContent::McpToolCallEnd { .. } + | CodexMsgContent::TaskStarted + | CodexMsgContent::TaskComplete { .. } + | CodexMsgContent::TokenCount { .. } + | CodexMsgContent::Unknown => None, + } + } + } + } + + /// Format system configuration message for display + fn format_config_message(&self) -> Option { + if let CodexJson::SystemConfig { + model, + reasoning_effort, + provider, + sandbox: _, + approval: _, + workdir: _, + reasoning_summaries: _, + other_fields: _, + } = self + { + let mut params = vec![]; + + if let Some(model) = model { + params.push(format!("model: {model}")); + } + if let Some(provider) = provider { + params.push(format!("provider: {provider}")); + } + if let Some(reasoning_effort) = reasoning_effort { + params.push(format!("reasoning effort: {reasoning_effort}")); + } + + if params.is_empty() { + None + } else { + Some(params.join(" ").to_string()) + } + } else { + None + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::logs::{ActionType, NormalizedEntry, NormalizedEntryType}; + + /// Test helper that directly tests the JSON parsing functions + fn parse_test_json_lines(input: &str) -> Vec { + let current_dir = PathBuf::from("/tmp"); + let mut entries = Vec::new(); + + for line in input.lines() { + let trimmed = line.trim(); + if trimmed.is_empty() { + continue; + } + + if let Ok(parsed_entries) = + serde_json::from_str::(trimmed).map(|codex_json| { + codex_json + .to_normalized_entries(¤t_dir) + .unwrap_or_default() + }) + { + entries.extend(parsed_entries); + } else { + // Handle malformed JSON as raw output + entries.push(NormalizedEntry { + timestamp: None, + entry_type: NormalizedEntryType::SystemMessage, + content: format!("Raw output: {}", trimmed), + metadata: None, + }); + } + } + + entries + } + + /// Test helper for testing CodexJson deserialization + fn test_codex_json_parsing(json_str: &str) -> Result { + serde_json::from_str(json_str) + } + + #[test] + fn test_extract_session_id_from_line() { + let line = "2025-07-23T15:47:59.877058Z INFO codex_exec: Codex initialized with event: Event { id: \"0\", msg: SessionConfigured(SessionConfiguredEvent { session_id: 3cdcc4df-c7c3-4cca-8902-48c3d4a0f96b, model: \"codex-mini-latest\", history_log_id: 9104228, history_entry_count: 1 }) }"; + + let session_id = SessionHandler::extract_session_id_from_line(line); + assert_eq!( + session_id, + Some("3cdcc4df-c7c3-4cca-8902-48c3d4a0f96b".to_string()) + ); + } + + #[test] + fn test_extract_session_id_no_match() { + let line = "Some random log line without session id"; + let session_id = SessionHandler::extract_session_id_from_line(line); + assert_eq!(session_id, None); + } + + #[test] + fn test_normalize_logs_basic() { + let logs = r#"{"id":"1","msg":{"type":"task_started"}} +{"id":"1","msg":{"type":"agent_reasoning","text":"**Inspecting the directory tree**\n\nI want to check the root directory tree and I think using `ls -1` is acceptable since the guidelines don't explicitly forbid it, unlike `ls -R`, `find`, or `grep`. I could also consider using `rg --files`, but that might be too overwhelming if there are many files. Focusing on the top-level files and directories seems like a better approach. I'm particularly interested in `LICENSE`, `README.md`, and any relevant README files. So, let's start with `ls -1`."}} +{"id":"1","msg":{"type":"exec_command_begin","call_id":"call_I1o1QnQDtlLjGMg4Vd9HXJLd","command":["bash","-lc","ls -1"],"cwd":"/Users/user/dev/vk-wip"}} +{"id":"1","msg":{"type":"exec_command_end","call_id":"call_I1o1QnQDtlLjGMg4Vd9HXJLd","stdout":"AGENT.md\nCLAUDE.md\nCODE-OF-CONDUCT.md\nCargo.lock\nCargo.toml\nDockerfile\nLICENSE\nREADME.md\nbackend\nbuild-npm-package.sh\ndev_assets\ndev_assets_seed\nfrontend\nnode_modules\nnpx-cli\npackage-lock.json\npackage.json\npnpm-lock.yaml\npnpm-workspace.yaml\nrust-toolchain.toml\nrustfmt.toml\nscripts\nshared\ntest-npm-package.sh\n","stderr":"","exit_code":0}} +{"id":"1","msg":{"type":"task_complete","last_agent_message":"I can see the directory structure of your project. This appears to be a Rust project with a frontend/backend architecture, using pnpm for package management. The project includes various configuration files, documentation, and development assets."}}"#; + + let entries = parse_test_json_lines(logs); + + // Should have: agent_reasoning, exec_command_begin (task_started and task_complete skipped) + assert_eq!(entries.len(), 2); + + // Check agent reasoning (thinking) + assert!(matches!( + entries[0].entry_type, + NormalizedEntryType::Thinking + )); + assert!(entries[0].content.contains("Inspecting the directory tree")); + + // Check bash command + assert!(matches!( + entries[1].entry_type, + NormalizedEntryType::ToolUse { .. } + )); + if let NormalizedEntryType::ToolUse { + tool_name, + action_type, + } = &entries[1].entry_type + { + assert_eq!(tool_name, "bash"); + assert!(matches!(action_type, ActionType::CommandRun { .. })); + } + assert_eq!(entries[1].content, "`bash -lc ls -1`"); + } + + #[test] + fn test_normalize_logs_shell_vs_bash_mapping() { + // Test shell command (not bash) + let shell_logs = r#"{"id":"1","msg":{"type":"exec_command_begin","call_id":"call_test","command":["sh","-c","echo hello"],"cwd":"/tmp"}}"#; + let entries = parse_test_json_lines(shell_logs); + assert_eq!(entries.len(), 1); + + if let NormalizedEntryType::ToolUse { tool_name, .. } = &entries[0].entry_type { + assert_eq!(tool_name, "shell"); // Maps to shell, not bash + } + + // Test bash command + let bash_logs = r#"{"id":"1","msg":{"type":"exec_command_begin","call_id":"call_test","command":["bash","-c","echo hello"],"cwd":"/tmp"}}"#; + let entries = parse_test_json_lines(bash_logs); + assert_eq!(entries.len(), 1); + + if let NormalizedEntryType::ToolUse { tool_name, .. } = &entries[0].entry_type { + assert_eq!(tool_name, "bash"); // Maps to bash + } + } + + #[test] + fn test_normalize_logs_token_count_skipped() { + let logs = r#"{"id":"1","msg":{"type":"task_started"}} +{"id":"1","msg":{"type":"token_count","input_tokens":1674,"cached_input_tokens":1627,"output_tokens":384,"reasoning_output_tokens":384,"total_tokens":2058}} +{"id":"1","msg":{"type":"task_complete","last_agent_message":"Done!"}}"#; + + let entries = parse_test_json_lines(logs); + + // Should have: nothing (task_started, task_complete, and token_count all skipped) + assert_eq!(entries.len(), 0); + } + + #[test] + fn test_normalize_logs_malformed_json() { + let logs = r#"{"id":"1","msg":{"type":"task_started"}} +invalid json line here +{"id":"1","msg":{"type":"task_complete","last_agent_message":"Done!"}}"#; + + let entries = parse_test_json_lines(logs); + + // Should have: raw output only (task_started and task_complete skipped) + assert_eq!(entries.len(), 1); + + // Check that malformed JSON becomes raw output + assert!(matches!( + entries[0].entry_type, + NormalizedEntryType::SystemMessage + )); + assert!( + entries[0] + .content + .contains("Raw output: invalid json line here") + ); + } + + #[test] + fn test_normalize_logs_prompt_ignored() { + let logs = r#"{"prompt":"project_id: f61fbd6a-9552-4b68-a1fe-10561f028dfc\n \nTask title: describe this repo"} +{"id":"1","msg":{"type":"task_started"}} +{"id":"1","msg":{"type":"agent_message","message":"Hello, I'll help you with that."}}"#; + + let entries = parse_test_json_lines(logs); + + // Should have 1 entry (prompt and task_started ignored, only agent_message) + assert_eq!(entries.len(), 1); + + // Check that we only have agent_message + assert!(matches!( + entries[0].entry_type, + NormalizedEntryType::AssistantMessage + )); + assert_eq!(entries[0].content, "Hello, I'll help you with that."); + } + + #[test] + fn test_normalize_logs_error_message() { + let logs = r#"{"id":"1","msg":{"type":"error","message":"Missing environment variable: `OPENAI_API_KEY`. Create an API key (https://platform.openai.com) and export it as an environment variable."}}"#; + + let entries = parse_test_json_lines(logs); + + // Should have 1 entry for the error message + assert_eq!(entries.len(), 1); + + // Check error message + assert!(matches!( + entries[0].entry_type, + NormalizedEntryType::ErrorMessage + )); + assert!( + entries[0] + .content + .contains("Missing environment variable: `OPENAI_API_KEY`") + ); + } + + #[test] + fn test_normalize_logs_error_message_no_content() { + let logs = r#"{"id":"1","msg":{"type":"error"}}"#; + + let entries = parse_test_json_lines(logs); + + // Should have 1 entry for the error message + assert_eq!(entries.len(), 1); + + // Check error message fallback + assert!(matches!( + entries[0].entry_type, + NormalizedEntryType::ErrorMessage + )); + assert_eq!(entries[0].content, "Unknown error occurred"); + } + + #[test] + fn test_normalize_logs_real_example() { + let logs = r#"{"sandbox":"danger-full-access","reasoning summaries":"auto","approval":"Never","provider":"openai","reasoning effort":"medium","workdir":"/private/var/folders/4m/6cwx14sx59lc2k9km5ph76gh0000gn/T/vibe-kanban-dev/vk-ec8b-describe-t","model":"codex-mini-latest"} +{"prompt":"project_id: f61fbd6a-9552-4b68-a1fe-10561f028dfc\n \nTask title: describe this repo"} +{"id":"1","msg":{"type":"task_started"}} +{"id":"1","msg":{"type":"error","message":"Missing environment variable: `OPENAI_API_KEY`. Create an API key (https://platform.openai.com) and export it as an environment variable."}}"#; + + let entries = parse_test_json_lines(logs); + + // Should have 2 entries: config, error (prompt and task_started ignored) + assert_eq!(entries.len(), 2); + + // Check configuration message + assert!(matches!( + entries[0].entry_type, + NormalizedEntryType::SystemMessage + )); + assert!(entries[0].content.contains("model")); + + // Check error message + assert!(matches!( + entries[1].entry_type, + NormalizedEntryType::ErrorMessage + )); + assert!(entries[1].content.contains("Missing environment variable")); + } + + #[test] + fn test_normalize_logs_partial_config() { + // Test with just model and provider (should still work) + let logs = r#"{"model":"codex-mini-latest","provider":"openai"}"#; + + let entries = parse_test_json_lines(logs); + + // Should have 1 entry for the configuration message + assert_eq!(entries.len(), 1); + + // Check configuration message contains available params + assert!(matches!( + entries[0].entry_type, + NormalizedEntryType::SystemMessage + )); + } + + #[test] + fn test_normalize_logs_agent_message() { + let logs = r#"{"id":"1","msg":{"type":"agent_message","message":"I've made a small restructuring of the top‐level README:\n\n- **Inserted a \"Table of Contents\"** under the screenshot, linking to all major sections (Overview, Installation, Documentation, Support, Contributing, Development → Prerequisites/Running/Build, Environment Variables, Custom OAuth, and License).\n- **Appended a \"License\" section** at the bottom pointing to the Apache 2.0 LICENSE file.\n\nThese tweaks should make navigation and licensing info more discoverable. Let me know if you'd like any other adjustments!"}}"#; + + let entries = parse_test_json_lines(logs); + + // Should have 1 entry for the agent message + assert_eq!(entries.len(), 1); + + // Check agent message + assert!(matches!( + entries[0].entry_type, + NormalizedEntryType::AssistantMessage + )); + assert!( + entries[0] + .content + .contains("I've made a small restructuring") + ); + assert!(entries[0].content.contains("Table of Contents")); + } + + #[test] + fn test_normalize_logs_patch_apply() { + let logs = r#"{"id":"1","msg":{"type":"patch_apply_begin","call_id":"call_zr84aWQuwJR3aWgJLkfv56Gl","auto_approved":true,"changes":{"/private/var/folders/4m/6cwx14sx59lc2k9km5ph76gh0000gn/T/vibe-kanban-dev/vk-a712-minor-rest/README.md":{"update":{"unified_diff":"@@ -18,2 +18,17 @@\n \n+## Table of Contents\n+\n+- [Overview](#overview)\n+- [Installation](#installation)","move_path":null}}}}} +{"id":"1","msg":{"type":"patch_apply_end","call_id":"call_zr84aWQuwJR3aWgJLkfv56Gl","stdout":"Success. Updated the following files:\nM /private/var/folders/4m/6cwx14sx59lc2k9km5ph76gh0000gn/T/vibe-kanban-dev/vk-a712-minor-rest/README.md\n","stderr":"","success":true}}"#; + + let entries = parse_test_json_lines(logs); + + // Should have 1 entry (patch_apply_begin, patch_apply_end skipped) + assert_eq!(entries.len(), 1); + + // Check edit tool use (follows claude.rs pattern) + assert!(matches!( + entries[0].entry_type, + NormalizedEntryType::ToolUse { .. } + )); + if let NormalizedEntryType::ToolUse { + tool_name, + action_type, + } = &entries[0].entry_type + { + assert_eq!(tool_name, "edit"); + assert!(matches!(action_type, ActionType::FileWrite { .. })); + } + assert!(entries[0].content.contains("README.md")); + } + + #[test] + fn test_normalize_logs_skip_task_messages() { + let logs = r#"{"id":"1","msg":{"type":"task_started"}} +{"id":"1","msg":{"type":"agent_message","message":"Hello world"}} +{"id":"1","msg":{"type":"task_complete","last_agent_message":"Done!"}}"#; + + let entries = parse_test_json_lines(logs); + + // Should have 1 entry (task_started and task_complete skipped) + assert_eq!(entries.len(), 1); + + // Check that only agent_message remains + assert!(matches!( + entries[0].entry_type, + NormalizedEntryType::AssistantMessage + )); + assert_eq!(entries[0].content, "Hello world"); + } + + #[test] + fn test_normalize_logs_mcp_tool_calls() { + let logs = r#"{"id":"1","msg":{"type":"mcp_tool_call_begin","call_id":"call_KHwEJyaUuL5D8sO7lPfImx7I","server":"vibe_kanban","tool":"list_projects","arguments":{}}} +{"id":"1","msg":{"type":"mcp_tool_call_end","call_id":"call_KHwEJyaUuL5D8sO7lPfImx7I","result":{"Ok":{"content":[{"text":"Projects listed successfully"}],"isError":false}}}} +{"id":"1","msg":{"type":"agent_message","message":"Here are your projects"}}"#; + + let entries = parse_test_json_lines(logs); + + // Should have 2 entries (mcp_tool_call_begin and agent_message, mcp_tool_call_end skipped) + assert_eq!(entries.len(), 2); + + // Check MCP tool call begin + assert!(matches!( + entries[0].entry_type, + NormalizedEntryType::ToolUse { .. } + )); + if let NormalizedEntryType::ToolUse { + tool_name, + action_type, + } = &entries[0].entry_type + { + assert_eq!(tool_name, "mcp_list_projects"); + assert!(matches!(action_type, ActionType::Other { .. })); + } + + // Check agent message + assert!(matches!( + entries[1].entry_type, + NormalizedEntryType::AssistantMessage + )); + assert_eq!(entries[1].content, "Here are your projects"); + } + + #[test] + fn test_normalize_logs_mcp_tool_call_multiple() { + let logs = r#"{"id":"1","msg":{"type":"mcp_tool_call_begin","call_id":"call_1","server":"vibe_kanban","tool":"create_task","arguments":{"title":"Test task"}}} +{"id":"1","msg":{"type":"mcp_tool_call_end","call_id":"call_1","result":{"Ok":{"content":[{"text":"Task created"}],"isError":false}}}} +{"id":"1","msg":{"type":"mcp_tool_call_begin","call_id":"call_2","server":"vibe_kanban","tool":"list_tasks","arguments":{}}} +{"id":"1","msg":{"type":"mcp_tool_call_end","call_id":"call_2","result":{"Ok":{"content":[{"text":"Tasks listed"}],"isError":false}}}}"#; + + let entries = parse_test_json_lines(logs); + + // Should have 2 entries (both mcp_tool_call_begin events, mcp_tool_call_end events skipped) + assert_eq!(entries.len(), 2); + + // Check first MCP tool call + if let NormalizedEntryType::ToolUse { tool_name, .. } = &entries[0].entry_type { + assert_eq!(tool_name, "mcp_create_task"); + } + assert!(entries[0].content.contains("create_task")); + + // Check second MCP tool call + if let NormalizedEntryType::ToolUse { tool_name, .. } = &entries[1].entry_type { + assert_eq!(tool_name, "mcp_list_tasks"); + } + assert!(entries[1].content.contains("list_tasks")); + } + + #[test] + fn test_codex_json_system_config_parsing() { + let config_json = r#"{"sandbox":"danger-full-access","reasoning summaries":"auto","approval":"Never","provider":"openai","reasoning effort":"medium","workdir":"/tmp","model":"codex-mini-latest"}"#; + + let parsed = test_codex_json_parsing(config_json).unwrap(); + assert!(matches!(parsed, CodexJson::SystemConfig { .. })); + + let current_dir = PathBuf::from("/tmp"); + let entries = parsed.to_normalized_entries(¤t_dir).unwrap(); + assert_eq!(entries.len(), 1); + assert!(matches!( + entries[0].entry_type, + NormalizedEntryType::SystemMessage + )); + assert!(entries[0].content.contains("model: codex-mini-latest")); + } + + #[test] + fn test_codex_json_prompt_parsing() { + let prompt_json = r#"{"prompt":"project_id: f61fbd6a-9552-4b68-a1fe-10561f028dfc\n\nTask title: describe this repo"}"#; + + let parsed = test_codex_json_parsing(prompt_json).unwrap(); + assert!(matches!(parsed, CodexJson::Prompt { .. })); + + let current_dir = PathBuf::from("/tmp"); + let entries = parsed.to_normalized_entries(¤t_dir); + assert!(entries.is_none()); // Should return None + } +} diff --git a/crates/executors/src/executors/gemini.rs b/crates/executors/src/executors/gemini.rs new file mode 100644 index 00000000..b3c44237 --- /dev/null +++ b/crates/executors/src/executors/gemini.rs @@ -0,0 +1,331 @@ +use std::{path::PathBuf, process::Stdio, sync::Arc}; + +use async_trait::async_trait; +use command_group::{AsyncCommandGroup, AsyncGroupChild}; +use futures::{StreamExt, stream::BoxStream}; +use serde::{Deserialize, Serialize}; +use tokio::{ + fs::{self, OpenOptions}, + io::{AsyncReadExt, AsyncWriteExt}, + process::Command, +}; +use ts_rs::TS; +use utils::{msg_store::MsgStore, shell::get_shell_command}; + +use crate::{ + command::{AgentProfiles, CommandBuilder}, + executors::{ExecutorError, StandardCodingAgentExecutor}, + logs::{ + NormalizedEntry, NormalizedEntryType, plain_text_processor::PlainTextLogProcessor, + stderr_processor::normalize_stderr_logs, utils::EntryIndexProvider, + }, + stdout_dup, +}; + +/// An executor that uses Gemini to process tasks +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, TS)] +pub struct Gemini { + command_builder: CommandBuilder, +} + +impl Default for Gemini { + fn default() -> Self { + Self::new() + } +} + +#[async_trait] +impl StandardCodingAgentExecutor for Gemini { + async fn spawn( + &self, + current_dir: &PathBuf, + prompt: &str, + ) -> Result { + let (shell_cmd, shell_arg) = get_shell_command(); + let gemini_command = self.command_builder.build_initial(); + + let mut command = Command::new(shell_cmd); + + command + .kill_on_drop(true) + .stdin(Stdio::piped()) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .current_dir(current_dir) + .arg(shell_arg) + .arg(gemini_command) + .env("NODE_NO_WARNINGS", "1"); + + let mut child = command.group_spawn()?; + + // Write prompt to stdin + if let Some(mut stdin) = child.inner().stdin.take() { + stdin.write_all(prompt.as_bytes()).await?; + stdin.shutdown().await?; + } + + // Duplicate stdout for session logging + let duplicate_stdout = stdout_dup::duplicate_stdout(&mut child)?; + tokio::spawn(Self::record_session( + duplicate_stdout, + current_dir.clone(), + prompt.to_string(), + false, + )); + + Ok(child) + } + + async fn spawn_follow_up( + &self, + current_dir: &PathBuf, + prompt: &str, + _session_id: &str, + ) -> Result { + // Build comprehensive prompt with session context + let followup_prompt = Self::build_followup_prompt(current_dir, prompt).await?; + + let (shell_cmd, shell_arg) = get_shell_command(); + let gemini_command = self.command_builder.build_follow_up(&[]); + + let mut command = Command::new(shell_cmd); + + command + .kill_on_drop(true) + .stdin(Stdio::piped()) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .current_dir(current_dir) + .arg(shell_arg) + .arg(gemini_command) + .env("NODE_NO_WARNINGS", "1"); + + let mut child = command.group_spawn()?; + + // Write comprehensive prompt to stdin + if let Some(mut stdin) = child.inner().stdin.take() { + stdin.write_all(followup_prompt.as_bytes()).await?; + stdin.shutdown().await?; + } + + // Duplicate stdout for session logging (resume existing session) + let duplicate_stdout = stdout_dup::duplicate_stdout(&mut child)?; + tokio::spawn(Self::record_session( + duplicate_stdout, + current_dir.clone(), + prompt.to_string(), + true, + )); + + Ok(child) + } + + /// Parses both stderr and stdout logs for Gemini executor using PlainTextLogProcessor. + /// + /// - Stderr: uses the standard stderr log processor, which formats stderr output as ErrorMessage entries. + /// - Stdout: applies custom `format_chunk` to insert line breaks on period-to-capital transitions, + /// then create assitant messages from the output. + /// + /// Each entry is converted into an `AssistantMessage` or `ErrorMessage` and emitted as patches. + /// + /// # Example + /// + /// ```rust,ignore + /// gemini.normalize_logs(msg_store.clone(), &worktree_path); + /// ``` + /// + /// Subsequent queries to `msg_store` will receive JSON patches representing parsed log entries. + /// Sets up log normalization for the Gemini executor: + /// - stderr via [`normalize_stderr_logs`] + /// - stdout via [`PlainTextLogProcessor`] with Gemini-specific formatting and default heuristics + fn normalize_logs(&self, msg_store: Arc, worktree_path: &PathBuf) { + let entry_index_counter = EntryIndexProvider::new(); + normalize_stderr_logs(msg_store.clone(), entry_index_counter.clone()); + + // Send session ID to msg_store to enable follow-ups + msg_store.push_session_id( + worktree_path + .file_name() + .unwrap_or_default() + .to_string_lossy() + .to_string(), + ); + + // Normalize Agent logs + tokio::spawn(async move { + let mut stdout = msg_store.stdout_chunked_stream(); + + // Create a processor with Gemini-specific formatting + let mut processor = PlainTextLogProcessor::builder() + .normalized_entry_producer(Box::new(|content: String| NormalizedEntry { + timestamp: None, + entry_type: NormalizedEntryType::AssistantMessage, + content, + metadata: None, + })) + .format_chunk(Box::new(|partial_line: Option<&str>, chunk: String| { + Self::format_stdout_chunk(&chunk, partial_line.unwrap_or("")) + })) + .index_provider(entry_index_counter) + .build(); + + while let Some(Ok(chunk)) = stdout.next().await { + for patch in processor.process(chunk) { + msg_store.push_patch(patch); + } + } + }); + } +} + +impl Gemini { + /// Make Gemini output more readable by inserting line breaks where periods are directly + /// followed by capital letters (common Gemini CLI formatting issue). + /// Handles both intra-chunk and cross-chunk period-to-capital transitions. + fn format_stdout_chunk(content: &str, accumulated_message: &str) -> String { + let mut result = String::with_capacity(content.len() + 100); + let chars: Vec = content.chars().collect(); + + // Check for cross-chunk boundary: previous chunk ended with period, current starts with capital + if !accumulated_message.is_empty() && !content.is_empty() { + let ends_with_period = accumulated_message.ends_with('.'); + let starts_with_capital = chars + .first() + .map(|&c| c.is_uppercase() && c.is_alphabetic()) + .unwrap_or(false); + + if ends_with_period && starts_with_capital { + result.push('\n'); + } + } + + // Handle intra-chunk period-to-capital transitions + for i in 0..chars.len() { + result.push(chars[i]); + + // Check if current char is '.' and next char is uppercase letter (no space between) + if chars[i] == '.' && i + 1 < chars.len() { + let next_char = chars[i + 1]; + if next_char.is_uppercase() && next_char.is_alphabetic() { + result.push('\n'); + } + } + } + + result + } + + async fn record_session( + mut stdout_stream: BoxStream<'static, std::io::Result>, + current_dir: PathBuf, + prompt: String, + resume_session: bool, + ) { + let file_path = + Self::get_sessions_base_dir().join(current_dir.file_name().unwrap_or_default()); + + // Ensure the directory exists + if let Some(parent) = file_path.parent() { + let _ = fs::create_dir_all(parent).await; + } + + // If not resuming session, delete the file first + if !resume_session { + let _ = fs::remove_file(&file_path).await; + } + + // Always append from here on + let mut file = match OpenOptions::new() + .create(true) + .append(true) + .open(&file_path) + .await + { + Ok(file) => file, + Err(_) => { + tracing::error!("Failed to open session file: {:?}", file_path); + return; + } + }; + + // Write user message as normalized entry + let mut user_message_json = serde_json::to_string(&NormalizedEntry { + timestamp: None, + entry_type: NormalizedEntryType::UserMessage, + content: prompt, + metadata: None, + }) + .unwrap_or_default(); + user_message_json.push('\n'); + let _ = file.write_all(user_message_json.as_bytes()).await; + + // Read stdout incrementally and append assistant message + let mut stdout_content = String::new(); + + // Read stdout until the process finishes + while let Some(Ok(chunk)) = stdout_stream.next().await { + stdout_content.push_str(&chunk); + } + + let mut assistant_message_json = serde_json::to_string(&NormalizedEntry { + timestamp: None, + entry_type: NormalizedEntryType::AssistantMessage, + content: stdout_content, + metadata: None, + }) + .unwrap_or_default(); + assistant_message_json.push('\n'); + let _ = file.write_all(assistant_message_json.as_bytes()).await; + } + + /// Build comprehensive prompt with session context for follow-up execution + async fn build_followup_prompt( + current_dir: &PathBuf, + prompt: &str, + ) -> Result { + let session_file_path = + Self::get_sessions_base_dir().join(current_dir.file_name().unwrap_or_default()); + + // Read existing session context + let session_context = fs::read_to_string(&session_file_path).await.map_err(|e| { + ExecutorError::FollowUpNotSupported(format!( + "No existing Gemini session found for this worktree. Session file not found at {session_file_path:?}: {e}" + )) + })?; + + Ok(format!( + r#"RESUME CONTEXT FOR CONTINUING TASK + +=== EXECUTION HISTORY === +The following is the conversation history from this session: +{session_context} + +=== CURRENT REQUEST === +{prompt} + +=== INSTRUCTIONS === +You are continuing work on the above task. The execution history shows the previous conversation in this session. Please continue from where the previous execution left off, taking into account all the context provided above. +"# + )) + } + + fn get_sessions_base_dir() -> PathBuf { + utils::path::get_vibe_kanban_temp_dir().join("gemini_sessions") + } +} + +impl Gemini { + /// Create a new Gemini executor with default settings + pub fn new() -> Self { + let profile = AgentProfiles::get_cached() + .get_profile("gemini") + .expect("Default gemini profile should exist"); + + Self::with_command_builder(profile.command.clone()) + } + + /// Create a new Gemini executor with custom command builder + pub fn with_command_builder(command_builder: CommandBuilder) -> Self { + Self { command_builder } + } +} diff --git a/crates/executors/src/executors/mod.rs b/crates/executors/src/executors/mod.rs new file mode 100644 index 00000000..e6d6a239 --- /dev/null +++ b/crates/executors/src/executors/mod.rs @@ -0,0 +1,174 @@ +use std::{path::PathBuf, sync::Arc}; + +use async_trait::async_trait; +use command_group::AsyncGroupChild; +use enum_dispatch::enum_dispatch; +use futures_io::Error as FuturesIoError; +use serde::{Deserialize, Serialize}; +use strum_macros::EnumDiscriminants; +use thiserror::Error; +use ts_rs::TS; +use utils::msg_store::MsgStore; + +use crate::{ + command::AgentProfiles, + executors::{amp::Amp, claude::ClaudeCode, codex::Codex, gemini::Gemini, opencode::Opencode}, +}; + +pub mod amp; +pub mod claude; +pub mod codex; +pub mod gemini; +pub mod opencode; + +#[derive(Debug, Error)] +pub enum ExecutorError { + #[error("Follow-up is not supported: {0}")] + FollowUpNotSupported(String), + #[error(transparent)] + SpawnError(#[from] FuturesIoError), + #[error("Unknown executor type: {0}")] + UnknownExecutorType(String), + #[error("I/O error: {0}")] + Io(std::io::Error), +} + +fn unknown_executor_error(s: &str) -> ExecutorError { + ExecutorError::UnknownExecutorType(format!("Unknown executor type: {s}.")) +} + +#[enum_dispatch] +#[derive( + Debug, Clone, Serialize, Deserialize, PartialEq, TS, EnumDiscriminants, strum_macros::EnumString, +)] +#[serde(rename_all = "SCREAMING_SNAKE_CASE")] +#[strum(serialize_all = "SCREAMING_SNAKE_CASE")] +#[strum(parse_err_ty = ExecutorError, parse_err_fn = unknown_executor_error)] +#[strum_discriminants( + name(BaseCodingAgent), + derive(strum_macros::Display, Serialize, Deserialize, TS), + strum(serialize_all = "SCREAMING_SNAKE_CASE"), + ts(use_ts_enum), + serde(rename_all = "SCREAMING_SNAKE_CASE") +)] +pub enum CodingAgent { + // Echo, + #[serde(alias = "claude")] + ClaudeCode, + // ClaudePlan, + Amp, + Gemini, + Codex, + // ClaudeCodeRouter, + Opencode, + // Aider, +} + +impl CodingAgent { + /// Create an executor from a profile string + /// Handles both default profiles ("claude-code", "amp", "gemini") and custom profiles + pub fn from_profile_str(profile: &str) -> Result { + match profile { + "claude-code" => Ok(CodingAgent::ClaudeCode(ClaudeCode::new())), + "claude-code-plan" => Ok(CodingAgent::ClaudeCode(ClaudeCode::new_plan_mode())), + "claude-code-router" => { + Ok(CodingAgent::ClaudeCode(ClaudeCode::new_claude_code_router())) + } + "amp" => Ok(CodingAgent::Amp(Amp::new())), + "gemini" => Ok(CodingAgent::Gemini(Gemini::new())), + "codex" => Ok(CodingAgent::Codex(Codex::new())), + "opencode" => Ok(CodingAgent::Opencode(Opencode::new())), + _ => { + // Try to load from AgentProfiles + if let Some(agent_profile) = AgentProfiles::get_cached().get_profile(profile) { + match agent_profile.agent { + BaseCodingAgent::ClaudeCode => { + Ok(CodingAgent::ClaudeCode(ClaudeCode::with_command_builder( + profile.to_string(), + agent_profile.command.clone(), + ))) + } + BaseCodingAgent::Amp => Ok(CodingAgent::Amp(Amp::with_command_builder( + agent_profile.command.clone(), + ))), + BaseCodingAgent::Gemini => Ok(CodingAgent::Gemini( + Gemini::with_command_builder(agent_profile.command.clone()), + )), + BaseCodingAgent::Codex => Ok(CodingAgent::Codex( + Codex::with_command_builder(agent_profile.command.clone()), + )), + BaseCodingAgent::Opencode => Ok(CodingAgent::Opencode( + Opencode::with_command_builder(agent_profile.command.clone()), + )), + } + } else { + Err(ExecutorError::UnknownExecutorType(format!( + "Unknown profile: {profile}" + ))) + } + } + } + } +} + +impl BaseCodingAgent { + /// Get the JSON attribute path for MCP servers in the config file + /// Returns None if the executor doesn't support MCP + pub fn mcp_attribute_path(&self) -> Option> { + match self { + //ExecutorConfig::CharmOpencode => Some(vec!["mcpServers"]), + Self::Opencode => Some(vec!["mcp"]), + Self::ClaudeCode => Some(vec!["mcpServers"]), + //ExecutorConfig::ClaudePlan => None, // Claude Plan shares Claude config + Self::Amp => Some(vec!["amp", "mcpServers"]), // Nested path for Amp + Self::Gemini => Some(vec!["mcpServers"]), + //ExecutorConfig::Aider => None, // Aider doesn't support MCP. https://github.com/Aider-AI/aider/issues/3314 + Self::Codex => None, // Codex uses TOML config, frontend doesn't handle TOML yet + } + } + + pub fn supports_mcp(&self) -> bool { + self.mcp_attribute_path().is_some() + } + + pub fn config_path(&self) -> Option { + match self { + //ExecutorConfig::CharmOpencode => { + //dirs::home_dir().map(|home| home.join(".opencode.json")) + //} + Self::ClaudeCode => dirs::home_dir().map(|home| home.join(".claude.json")), + //ExecutorConfig::ClaudePlan => dirs::home_dir().map(|home| home.join(".claude.json")), + Self::Opencode => { + #[cfg(unix)] + { + xdg::BaseDirectories::with_prefix("opencode").get_config_file("opencode.json") + } + #[cfg(not(unix))] + { + dirs::config_dir().map(|config| config.join("opencode").join("opencode.json")) + } + } + //ExecutorConfig::Aider => None, + Self::Codex => dirs::home_dir().map(|home| home.join(".codex").join("config.toml")), + Self::Amp => dirs::config_dir().map(|config| config.join("amp").join("settings.json")), + Self::Gemini => dirs::home_dir().map(|home| home.join(".gemini").join("settings.json")), + } + } +} + +#[async_trait] +#[enum_dispatch(CodingAgent)] +pub trait StandardCodingAgentExecutor { + async fn spawn( + &self, + current_dir: &PathBuf, + prompt: &str, + ) -> Result; + async fn spawn_follow_up( + &self, + current_dir: &PathBuf, + prompt: &str, + session_id: &str, + ) -> Result; + fn normalize_logs(&self, _raw_logs_event_store: Arc, _worktree_path: &PathBuf); +} diff --git a/crates/executors/src/executors/opencode.rs b/crates/executors/src/executors/opencode.rs new file mode 100644 index 00000000..279e389d --- /dev/null +++ b/crates/executors/src/executors/opencode.rs @@ -0,0 +1,823 @@ +use std::{fmt, path::PathBuf, process::Stdio, sync::Arc}; + +use async_trait::async_trait; +use command_group::{AsyncCommandGroup, AsyncGroupChild}; +use fork_stream::StreamExt as _; +use futures::{StreamExt, future::ready, stream::BoxStream}; +use lazy_static::lazy_static; +use regex::Regex; +use serde::{Deserialize, Serialize}; +use tokio::{io::AsyncWriteExt, process::Command}; +use ts_rs::TS; +use utils::{msg_store::MsgStore, path::make_path_relative, shell::get_shell_command}; + +use crate::{ + command::{AgentProfiles, CommandBuilder}, + executors::{ExecutorError, StandardCodingAgentExecutor}, + logs::{ + ActionType, NormalizedEntry, NormalizedEntryType, + plain_text_processor::{MessageBoundary, PlainTextLogProcessor}, + utils::EntryIndexProvider, + }, +}; + +/// An executor that uses OpenCode to process tasks +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, TS)] +pub struct Opencode { + command_builder: CommandBuilder, +} + +impl Default for Opencode { + fn default() -> Self { + Self::new() + } +} + +impl Opencode { + pub fn new() -> Self { + let profile = AgentProfiles::get_cached() + .get_profile("opencode") + .expect("Default opencode profile should exist"); + + Self::with_command_builder(profile.command.clone()) + } + + pub fn with_command_builder(command_builder: CommandBuilder) -> Self { + Self { command_builder } + } +} + +#[async_trait] +impl StandardCodingAgentExecutor for Opencode { + async fn spawn( + &self, + current_dir: &PathBuf, + prompt: &str, + ) -> Result { + let (shell_cmd, shell_arg) = get_shell_command(); + let opencode_command = self.command_builder.build_initial(); + + let mut command = Command::new(shell_cmd); + command + .kill_on_drop(true) + .stdin(Stdio::piped()) + .stdout(Stdio::piped()) // Keep stdout but we won't use it + .stderr(Stdio::piped()) + .current_dir(current_dir) + .arg(shell_arg) + .arg(opencode_command) + .env("NODE_NO_WARNINGS", "1"); + + let mut child = command.group_spawn()?; + + // Write prompt to stdin + if let Some(mut stdin) = child.inner().stdin.take() { + stdin.write_all(prompt.as_bytes()).await?; + stdin.shutdown().await?; + } + + Ok(child) + } + + async fn spawn_follow_up( + &self, + current_dir: &PathBuf, + prompt: &str, + session_id: &str, + ) -> Result { + let (shell_cmd, shell_arg) = get_shell_command(); + let opencode_command = self + .command_builder + .build_follow_up(&["--session".to_string(), session_id.to_string()]); + + let mut command = Command::new(shell_cmd); + command + .kill_on_drop(true) + .stdin(Stdio::piped()) + .stdout(Stdio::piped()) // Keep stdout but we won't use it + .stderr(Stdio::piped()) + .current_dir(current_dir) + .arg(shell_arg) + .arg(&opencode_command) + .env("NODE_NO_WARNINGS", "1"); + + let mut child = command.group_spawn()?; + + // Write prompt to stdin + if let Some(mut stdin) = child.inner().stdin.take() { + stdin.write_all(prompt.as_bytes()).await?; + stdin.shutdown().await?; + } + + Ok(child) + } + + /// Normalize logs for OpenCode executor + /// + /// This implementation uses three separate threads: + /// 1. Session ID thread: read by line, search for session ID format, store it. + /// 2. Error log recognition thread: read by line, identify error log lines, store them as error messages. + /// 3. Main normalizer thread: read stderr by line, filter out log lines, send lines (with '\n' appended) to plain text normalizer, + /// then define predicate for split and create appropriate normalized entry (either assistant or tool call). + fn normalize_logs(&self, msg_store: Arc, worktree_path: &PathBuf) { + let entry_index_counter = EntryIndexProvider::new(); + let worktree_path = worktree_path.clone(); + + let stderr_lines = msg_store + .stderr_lines_stream() + .filter_map(|res| ready(res.ok())) + .map(|line| LogUtils::strip_ansi_codes(&line)) + .fork(); + + // Log line: INFO 2025-08-05T10:17:26 +1ms service=session id=ses_786439b6dffe4bLqNBS4fGd7mJ + // error line: ! some error message + let log_lines = stderr_lines + .clone() + .filter(|line| { + ready(OPENCODE_LOG_REGEX.is_match(line) || LogUtils::is_error_line(line)) + }) + .boxed(); + + // Process log lines, which contain error messages and session ID + tokio::spawn(Self::process_opencode_log_lines( + log_lines, + msg_store.clone(), + entry_index_counter.clone(), + )); + + let agent_logs = stderr_lines + .filter(|line| { + ready( + !LogUtils::is_noise(line) + && !OPENCODE_LOG_REGEX.is_match(line) + && !LogUtils::is_error_line(line), + ) + }) + .boxed(); + + // Normalize agent logs + tokio::spawn(Self::process_agent_logs( + agent_logs, + worktree_path, + entry_index_counter, + msg_store, + )); + } +} +impl Opencode { + async fn process_opencode_log_lines( + mut log_lines: BoxStream<'_, String>, + msg_store: Arc, + entry_index_counter: EntryIndexProvider, + ) { + let mut session_id_extracted = false; + while let Some(line) = log_lines.next().await { + if line.starts_with("ERROR") + || line.starts_with("WARN") + || LogUtils::is_error_line(&line) + { + let entry = NormalizedEntry { + timestamp: None, + entry_type: NormalizedEntryType::ErrorMessage, + content: line.clone(), + metadata: None, + }; + + // Create a patch for this single entry + let patch = crate::logs::utils::ConversationPatch::add_normalized_entry( + entry_index_counter.next(), + entry, + ); + msg_store.push_patch(patch); + } else if !session_id_extracted + && let Some(session_id) = LogUtils::parse_session_id_from_line(&line) + { + msg_store.push_session_id(session_id); + session_id_extracted = true; + } + } + } + + async fn process_agent_logs( + mut agent_logs: BoxStream<'_, String>, + worktree_path: PathBuf, + entry_index_counter: EntryIndexProvider, + msg_store: Arc, + ) { + // Create processor for stderr content + let mut processor = PlainTextLogProcessor::builder() + .normalized_entry_producer(Box::new(move |content: String| { + Self::create_normalized_entry(content, &worktree_path.clone()) + })) + .message_boundary_predicate(Box::new(|lines: &[String]| Self::detect_tool_call(lines))) + .index_provider(entry_index_counter.clone()) + .build(); + + while let Some(line) = agent_logs.next().await { + debug_assert!(!line.ends_with('\n')); + + // Process the line through the plain text processor + for patch in processor.process(line + "\n") { + msg_store.push_patch(patch); + } + } + } + + /// Create normalized entry from content + pub fn create_normalized_entry(content: String, worktree_path: &PathBuf) -> NormalizedEntry { + // Check if this is a tool call + if let Some(tool_call) = ToolCall::parse(&content) { + let tool_name = tool_call.tool.name(); + let action_type = + ToolUtils::determine_action_type(&tool_call.tool, &worktree_path.to_string_lossy()); + let tool_content = + ToolUtils::generate_tool_content(&tool_call.tool, &worktree_path.to_string_lossy()); + + return NormalizedEntry { + timestamp: None, + entry_type: NormalizedEntryType::ToolUse { + tool_name, + action_type, + }, + content: tool_content, + metadata: Some(tool_call.arguments()), + }; + } + + // Default to assistant message + NormalizedEntry { + timestamp: None, + entry_type: NormalizedEntryType::AssistantMessage, + content, + metadata: None, + } + } + + /// Detect message boundaries for tool calls and other content using serde deserialization + pub fn detect_tool_call(lines: &[String]) -> Option { + for (i, line) in lines.iter().enumerate() { + if ToolCall::is_tool_line(line) { + if i == 0 { + // separate tool call from subsequent content + return Some(MessageBoundary::Split(1)); + } else { + // separate tool call from previous content + return Some(MessageBoundary::Split(i)); + } + } + } + None + } +} + +// ============================================================================= +// TOOL DEFINITIONS +// ============================================================================= + +/// Represents different types of tools that can be called by OpenCode +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +#[serde(tag = "tool_name", content = "arguments")] +pub enum Tool { + #[serde(rename = "read")] + Read { + #[serde(rename = "filePath")] + file_path: String, + #[serde(default)] + offset: Option, + #[serde(default)] + limit: Option, + }, + #[serde(rename = "write")] + Write { + #[serde(rename = "filePath")] + file_path: String, + content: String, + }, + #[serde(rename = "edit")] + Edit { + #[serde(rename = "filePath")] + file_path: String, + #[serde(rename = "oldString")] + old_string: String, + #[serde(rename = "newString")] + new_string: String, + #[serde(rename = "replaceAll", default)] + replace_all: Option, + }, + #[serde(rename = "bash")] + Bash { + command: String, + #[serde(default)] + timeout: Option, + #[serde(default)] + description: Option, + }, + #[serde(rename = "grep")] + Grep { + pattern: String, + #[serde(default)] + path: Option, + #[serde(default)] + include: Option, + }, + #[serde(rename = "glob")] + Glob { + pattern: String, + #[serde(default)] + path: Option, + }, + #[serde(rename = "todowrite")] + TodoWrite { todos: Vec }, + #[serde(rename = "todoread")] + TodoRead, + #[serde(rename = "list")] + List { + #[serde(default)] + path: Option, + #[serde(default)] + ignore: Option>, + }, + #[serde(rename = "webfetch")] + WebFetch { + url: String, + #[serde(default)] + format: Option, + #[serde(default)] + timeout: Option, + }, + /// Catch-all for unknown tools (including MCP tools) + Other { + tool_name: String, + arguments: serde_json::Value, + }, +} + +/// TODO information structure +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, TS)] +pub struct TodoInfo { + pub content: String, + pub status: String, + #[serde(default)] + pub priority: Option, +} + +/// Web fetch format options +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, TS)] +#[serde(rename_all = "lowercase")] +pub enum WebFetchFormat { + Text, + Markdown, + Html, +} + +impl Tool { + /// Get the tool name as a string + pub fn name(&self) -> String { + match self { + Tool::Read { .. } => "read".to_string(), + Tool::Write { .. } => "write".to_string(), + Tool::Edit { .. } => "edit".to_string(), + Tool::Bash { .. } => "bash".to_string(), + Tool::Grep { .. } => "grep".to_string(), + Tool::Glob { .. } => "glob".to_string(), + Tool::TodoWrite { .. } => "todowrite".to_string(), + Tool::TodoRead => "todoread".to_string(), + Tool::List { .. } => "list".to_string(), + Tool::WebFetch { .. } => "webfetch".to_string(), + Tool::Other { tool_name, .. } => tool_name.clone(), + } + } + + /// Get the tool arguments as JSON value + pub fn arguments(&self) -> serde_json::Value { + match self { + Tool::Read { + file_path, + offset, + limit, + } => { + let mut args = serde_json::json!({ "filePath": file_path }); + if let Some(offset) = offset { + args["offset"] = (*offset).into(); + } + if let Some(limit) = limit { + args["limit"] = (*limit).into(); + } + args + } + Tool::Write { file_path, content } => { + serde_json::json!({ "filePath": file_path, "content": content }) + } + Tool::Edit { + file_path, + old_string, + new_string, + replace_all, + } => { + let mut args = serde_json::json!({ + "filePath": file_path, + "oldString": old_string, + "newString": new_string + }); + if let Some(replace_all) = replace_all { + args["replaceAll"] = (*replace_all).into(); + } + args + } + Tool::Bash { + command, + timeout, + description, + } => { + let mut args = serde_json::json!({ "command": command }); + if let Some(timeout) = timeout { + args["timeout"] = (*timeout).into(); + } + if let Some(description) = description { + args["description"] = description.clone().into(); + } + args + } + Tool::Grep { + pattern, + path, + include, + } => { + let mut args = serde_json::json!({ "pattern": pattern }); + if let Some(path) = path { + args["path"] = path.clone().into(); + } + if let Some(include) = include { + args["include"] = include.clone().into(); + } + args + } + Tool::Glob { pattern, path } => { + let mut args = serde_json::json!({ "pattern": pattern }); + if let Some(path) = path { + args["path"] = path.clone().into(); + } + args + } + Tool::TodoWrite { todos } => { + serde_json::json!({ "todos": todos }) + } + Tool::TodoRead => serde_json::Value::Null, + Tool::List { path, ignore } => { + let mut args = serde_json::Value::Object(serde_json::Map::new()); + if let Some(path) = path { + args["path"] = path.clone().into(); + } + if let Some(ignore) = ignore { + args["ignore"] = ignore.clone().into(); + } + args + } + Tool::WebFetch { + url, + format, + timeout, + } => { + let mut args = serde_json::json!({ "url": url }); + if let Some(format) = format { + args["format"] = match format { + WebFetchFormat::Text => "text".into(), + WebFetchFormat::Markdown => "markdown".into(), + WebFetchFormat::Html => "html".into(), + }; + } + if let Some(timeout) = timeout { + args["timeout"] = (*timeout).into(); + } + args + } + Tool::Other { arguments, .. } => arguments.clone(), + } + } +} + +// ============================================================================= +// TOOL CALL PARSING +// ============================================================================= + +/// Represents a parsed tool call line from OpenCode output +#[derive(Debug, Clone, PartialEq)] +pub struct ToolCall { + pub tool: Tool, +} + +impl ToolCall { + /// Parse a tool call from a string that starts with | + pub fn parse(line: &str) -> Option { + let line: &str = line.trim_end(); + if !line.starts_with('|') { + return None; + } + + // Remove the | and any surrounding whitespace + let content = line[1..].trim(); + + // Split into tool name and JSON arguments + let parts: Vec<&str> = content.splitn(2, char::is_whitespace).collect(); + if parts.len() != 2 { + return None; + } + + let tool_name = parts[0].to_string().to_lowercase(); + let args_str = parts[1].trim(); + + // Try to parse the arguments as JSON + let arguments: serde_json::Value = match serde_json::from_str(args_str) { + Ok(args) => args, + Err(_) => return None, + }; + + // Create a JSON object that matches our Tool enum's serde format + let tool_json = serde_json::json!({ + "tool_name": tool_name, + "arguments": arguments + }); + + // Let serde deserialize the tool automatically + match serde_json::from_value::(tool_json) { + Ok(tool) => Some(ToolCall { tool }), + Err(_) => { + // If serde parsing fails, fall back to Other variant + Some(ToolCall { + tool: Tool::Other { + tool_name, + arguments, + }, + }) + } + } + } + + /// Check if a line is a valid tool line + pub fn is_tool_line(line: &str) -> bool { + Self::parse(line).is_some() + } + + /// Get the tool name + pub fn tool_name(&self) -> String { + self.tool.name() + } + + /// Get the tool arguments as JSON + pub fn arguments(&self) -> serde_json::Value { + self.tool.arguments() + } +} + +impl fmt::Display for ToolCall { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "| {} {}", self.tool.name(), self.tool.arguments()) + } +} + +// ============================================================================= +// TOOL UTILITIES +// ============================================================================= + +/// Utilities for processing tool calls +pub struct ToolUtils; + +impl ToolUtils { + pub fn normalize_tool_name(tool_name: &str) -> String { + tool_name.to_lowercase() + } + + /// Helper function to determine action type for tool usage + pub fn determine_action_type(tool: &Tool, worktree_path: &str) -> ActionType { + match tool { + Tool::Read { file_path, .. } => ActionType::FileRead { + path: make_path_relative(file_path, worktree_path), + }, + Tool::Write { file_path, .. } | Tool::Edit { file_path, .. } => ActionType::FileWrite { + path: make_path_relative(file_path, worktree_path), + }, + Tool::Bash { command, .. } => ActionType::CommandRun { + command: command.clone(), + }, + Tool::Grep { pattern, .. } => ActionType::Search { + query: pattern.clone(), + }, + Tool::Glob { pattern, .. } => ActionType::Search { + query: format!("glob: {pattern}"), + }, + Tool::List { .. } => ActionType::Other { + description: "Directory listing".to_string(), + }, + Tool::WebFetch { url, .. } => ActionType::Other { + description: format!("Web fetch: {url}"), + }, + Tool::TodoWrite { .. } | Tool::TodoRead => ActionType::Other { + description: "TODO list management".to_string(), + }, + Tool::Other { tool_name, .. } => { + // Handle MCP tools (format: client_name_tool_name) + if tool_name.contains('_') { + ActionType::Other { + description: format!("MCP tool: {tool_name}"), + } + } else { + ActionType::Other { + description: format!("Tool: {tool_name}"), + } + } + } + } + } + + /// Helper function to generate concise content for tool usage + pub fn generate_tool_content(tool: &Tool, worktree_path: &str) -> String { + match tool { + Tool::Read { file_path, .. } => { + format!("`{}`", make_path_relative(file_path, worktree_path)) + } + Tool::Write { file_path, .. } | Tool::Edit { file_path, .. } => { + format!("`{}`", make_path_relative(file_path, worktree_path)) + } + Tool::Bash { command, .. } => { + format!("`{command}`") + } + Tool::Grep { + pattern, + path, + include, + } => { + let search_path = path.as_deref().unwrap_or("."); + match include { + Some(include_pattern) => { + format!("`{pattern}` in `{search_path}` ({include_pattern})") + } + None => format!("`{pattern}` in `{search_path}`"), + } + } + Tool::Glob { pattern, path } => { + let search_path = path.as_deref().unwrap_or("."); + format!("glob `{pattern}` in `{search_path}`") + } + Tool::List { path, .. } => { + if let Some(path) = path { + format!("`{}`", make_path_relative(path, worktree_path)) + } else { + "List directory".to_string() + } + } + Tool::WebFetch { url, .. } => { + format!("fetch `{url}`") + } + Tool::TodoWrite { todos } => Self::generate_todo_content(todos), + Tool::TodoRead => "Managing TODO list".to_string(), + Tool::Other { tool_name, .. } => { + // Handle MCP tools (format: client_name_tool_name) + if tool_name.contains('_') { + format!("MCP: `{tool_name}`") + } else { + format!("`{tool_name}`") + } + } + } + } + + /// Generate formatted content for TODO tools from TodoInfo struct + fn generate_todo_content(todos: &[TodoInfo]) -> String { + if todos.is_empty() { + return "Managing TODO list".to_string(); + } + + let mut todo_items = Vec::new(); + for todo in todos { + let status_emoji = match todo.status.as_str() { + "completed" => "✅", + "in_progress" => "🔄", + "pending" | "todo" => "⏳", + _ => "📝", + }; + let priority = todo.priority.as_deref().unwrap_or("medium"); + todo_items.push(format!("{} {} ({})", status_emoji, todo.content, priority)); + } + format!("TODO List:\n{}", todo_items.join("\n")) + } +} + +// ============================================================================= +// Log interpretation UTILITIES +// ============================================================================= + +lazy_static! { + // Accurate regex for OpenCode log lines: LEVEL timestamp +ms ... + static ref OPENCODE_LOG_REGEX: Regex = Regex::new(r"^(INFO|DEBUG|WARN|ERROR)\s+\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\s+\+\d+\s*ms.*").unwrap(); + static ref SESSION_ID_REGEX: Regex = Regex::new(r".*\b(id|session|sessionID)=([^ ]+)").unwrap(); + static ref NPM_WARN_REGEX: Regex = Regex::new(r"^npm warn .*").unwrap(); +} + +/// Log utilities for OpenCode processing +pub struct LogUtils; + +impl LogUtils { + /// Strip ANSI escape codes from text (conservative) + pub fn strip_ansi_codes(text: &str) -> String { + // Handle both unicode escape sequences and raw ANSI codes + let result = text.replace("\\u001b", "\x1b"); + + let mut cleaned = String::new(); + let mut chars = result.chars().peekable(); + + while let Some(ch) = chars.next() { + if ch == '\x1b' { + // Skip ANSI escape sequence + if chars.peek() == Some(&'[') { + chars.next(); // consume '[' + // Skip until we find a letter (end of ANSI sequence) + for next_ch in chars.by_ref() { + if next_ch.is_ascii_alphabetic() { + break; + } + } + } + } else { + cleaned.push(ch); + } + } + + cleaned + } + + /// Check if a line should be skipped as noise + pub fn is_noise(line: &str) -> bool { + // Empty lines are noise + if line.is_empty() { + return true; + } + + let line = line.trim(); + + if NPM_WARN_REGEX.is_match(line) { + return true; + } + + // Spinner glyphs + if line.len() == 1 && "⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏".contains(line) { + return true; + } + + // Banner lines containing block glyphs (Unicode Block Elements range) + if line + .chars() + .take(1) + .any(|c| ('\u{2580}'..='\u{259F}').contains(&c)) + { + return true; + } + + // UI/stats frames using Box Drawing glyphs (U+2500-257F) + if line + .chars() + .take(1) + .any(|c| ('\u{2500}'..='\u{257F}').contains(&c)) + { + return true; + } + + // Model banner (@ with spaces) + if line.starts_with("@ ") { + return true; + } + + // Share link + if line.starts_with("~ https://opencode.ai/s/") { + return true; + } + + // Everything else is NOT noise + false + } + + /// Detect if a line is an OpenCode log line format using regex + pub fn is_opencode_log_line(line: &str) -> bool { + OPENCODE_LOG_REGEX.is_match(line) + } + + pub fn is_error_line(line: &str) -> bool { + line.starts_with("! ") + } + + /// Parse session_id from OpenCode log lines + pub fn parse_session_id_from_line(line: &str) -> Option { + // Only apply to OpenCode log lines + if !Self::is_opencode_log_line(line) { + return None; + } + + // Try regex for session ID extraction from service=session logs + if let Some(captures) = SESSION_ID_REGEX.captures(line) + && let Some(id) = captures.get(2) + { + return Some(id.as_str().to_string()); + } + + None + } +} diff --git a/crates/executors/src/lib.rs b/crates/executors/src/lib.rs new file mode 100644 index 00000000..46831052 --- /dev/null +++ b/crates/executors/src/lib.rs @@ -0,0 +1,5 @@ +pub mod actions; +pub mod command; +pub mod executors; +pub mod logs; +pub mod stdout_dup; diff --git a/crates/executors/src/logs/mod.rs b/crates/executors/src/logs/mod.rs new file mode 100644 index 00000000..db5cda14 --- /dev/null +++ b/crates/executors/src/logs/mod.rs @@ -0,0 +1,52 @@ +use serde::{Deserialize, Serialize}; +use ts_rs::TS; + +pub mod plain_text_processor; +pub mod stderr_processor; +pub mod utils; + +#[derive(Debug, Clone, Serialize, Deserialize, TS)] +pub struct NormalizedConversation { + pub entries: Vec, + pub session_id: Option, + pub executor_type: String, + pub prompt: Option, + pub summary: Option, +} + +#[derive(Debug, Clone, Serialize, Deserialize, TS)] +#[serde(tag = "type", rename_all = "snake_case")] +pub enum NormalizedEntryType { + UserMessage, + AssistantMessage, + ToolUse { + tool_name: String, + action_type: ActionType, + }, + SystemMessage, + ErrorMessage, + Thinking, +} + +#[derive(Debug, Clone, Serialize, Deserialize, TS)] +pub struct NormalizedEntry { + pub timestamp: Option, + pub entry_type: NormalizedEntryType, + pub content: String, + #[ts(skip)] + pub metadata: Option, +} + +/// Types of tool actions that can be performed +#[derive(Debug, Clone, Serialize, Deserialize, TS)] +#[serde(tag = "action", rename_all = "snake_case")] +pub enum ActionType { + FileRead { path: String }, + FileWrite { path: String }, + CommandRun { command: String }, + Search { query: String }, + WebFetch { url: String }, + TaskCreate { description: String }, + PlanPresentation { plan: String }, + Other { description: String }, +} diff --git a/crates/executors/src/logs/plain_text_processor.rs b/crates/executors/src/logs/plain_text_processor.rs new file mode 100644 index 00000000..8a232d3c --- /dev/null +++ b/crates/executors/src/logs/plain_text_processor.rs @@ -0,0 +1,438 @@ +//! Reusable log processor for plain-text streams with flexible clustering and formatting. +//! +//! Clusters messages into entries based on configurable size and time-gap heuristics, and supports +//! pluggable formatters for transforming or annotating chunks (e.g., inserting line breaks or parsing tool calls). +//! +//! Capable of handling mixed-format streams, including interleaved tool calls and assistant messages, +//! with custom split predicates to detect embedded markers and emit separate entries. +//! +//! ## Use cases +//! - **stderr_processor**: Cluster stderr lines by time gap and format as `ErrorMessage` log entries. +//! See [`stderr_processor::normalize_stderr_logs`]. +//! - **Gemini executor**: Post-process Gemini CLI output to make it prettier, then format it as assistant messages clustered by size. +//! See [`crate::executors::gemini::Gemini::format_stdout_chunk`]. +//! - **Tool call support**: detect lines starting with a distinct marker via `message_boundary_predicate` to separate tool invocations. +use std::{ + time::{Duration, Instant}, + vec, +}; + +use bon::bon; +use json_patch::Patch; + +use super::{ + NormalizedEntry, + utils::{ConversationPatch, EntryIndexProvider}, +}; + +/// Controls message boundary for advanced executors. +/// The main use-case is to support mixed-content log streams where tool calls and assistant messages are interleaved. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum MessageBoundary { + /// Conclude the current message entry at the given line. + /// Useful when we detect a message of a different kind than the current one, e.g., when a tool call starts we need to close the current assistant message. + Split(usize), + /// Request more content. Signals that the current entry is incomplete and should not be emitted yet. + /// This should only be the case in tool calls, as assistant messages can be partially emitted. + IncompleteContent, +} + +/// Internal buffer for collecting streaming text into individual lines. +/// Maintains line and size information for heuristics and processing. +#[derive(Debug)] +struct PlainTextBuffer { + /// All lines including last partial line. Complete lines have trailing \n, partial line doesn't + lines: Vec, + /// Current buffered length + total_len: usize, +} + +impl PlainTextBuffer { + /// Create a new empty buffer + pub fn new() -> Self { + Self { + lines: Vec::new(), + total_len: 0, + } + } + + /// Ingest a new text chunk into the buffer. + pub fn ingest(&mut self, text_chunk: String) { + debug_assert!(!text_chunk.is_empty()); + + // Add a new lines or grow the current partial line + let current_partial = if self.lines.last().is_some_and(|l| !l.ends_with('\n')) { + let partial = self.lines.pop().unwrap(); + self.total_len = self.total_len.saturating_sub(partial.len()); + partial + } else { + String::new() + }; + + // Process chunk + let combined_text = current_partial + &text_chunk; + let size = combined_text.len(); + + // Append new lines + let parts: Vec = combined_text + .split_inclusive('\n') + .map(ToString::to_string) + .collect(); + self.lines.extend(parts); + self.total_len += size; + } + + /// Remove and return the first `n` buffered lines, + pub fn drain_lines(&mut self, n: usize) -> Vec { + let n = n.min(self.lines.len()); + let drained: Vec = self.lines.drain(..n).collect(); + + // Update total_bytes + for line in &drained { + self.total_len = self.total_len.saturating_sub(line.len()); + } + + drained + } + + /// Remove and return lines until the content length is at least `len`. + /// Useful for size-based splitting of content. + pub fn drain_size(&mut self, len: usize) -> Vec { + let mut drained_len = 0; + let mut lines_to_drain = 0; + + for line in &self.lines { + if drained_len >= len && lines_to_drain > 0 { + break; + } + drained_len += line.len(); + lines_to_drain += 1; + } + + self.drain_lines(lines_to_drain) + } + + /// Empty the buffer, removing and returning all content, + pub fn flush(&mut self) -> Vec { + let result = self.lines.drain(..).collect(); + self.total_len = 0; + result + } + + /// Return the total number of lines. + pub fn line_count(&self) -> usize { + self.lines.len() + } + + /// Return the total length of content. + pub fn total_len(&self) -> usize { + self.total_len + } + + /// View lines. + pub fn lines(&self) -> &[String] { + &self.lines + } + + /// Get the current parial line. + pub fn partial_line(&self) -> Option<&str> { + if let Some(last) = self.lines.last() + && !last.ends_with('\n') + { + return Some(last); + } + None + } + + /// Check if the buffer is empty. + pub fn is_empty(&self) -> bool { + debug_assert!(self.lines.len() == 0 || self.total_len > 0); + self.total_len == 0 + } +} + +impl Default for PlainTextBuffer { + fn default() -> Self { + Self::new() + } +} + +/// Optional content formatting function. Can be used post-process raw output before creating normalized entries. +pub type FormatChunkFn = Box, String) -> String + Send + 'static>; + +/// Optional predicate function to determine message boundaries. This enables detecting tool calls interleaved with assistant messages. +pub type MessageBoundaryPredicateFn = + Box Option + Send + 'static>; + +/// Function to create a `NormalizedEntry` from content. +pub type NormalizedEntryProducerFn = Box NormalizedEntry + Send + 'static>; + +/// High-level plain text log processor with configurable formatting and splitting +pub struct PlainTextLogProcessor { + buffer: PlainTextBuffer, + index_provider: EntryIndexProvider, + entry_size_threshold: Option, + time_gap: Option, + format_chunk: Option, + message_boundary_predicate: Option, + normalized_entry_producer: NormalizedEntryProducerFn, + last_chunk_arrival_time: Instant, // time since last chunk arrived + current_entry_index: Option, +} + +impl PlainTextLogProcessor { + /// Process incoming text and return JSON patches for any complete entries + pub fn process(&mut self, text_chunk: String) -> Vec { + if text_chunk.is_empty() { + return vec![]; + } + + if !self.buffer.is_empty() { + // If the new content arrived after the (**Optional**) time threshold between messages, we consider it a new entry. + // Useful for stderr streams where we want to group related lines into a single entry. + if self + .time_gap + .is_some_and(|time_gap| self.last_chunk_arrival_time.elapsed() >= time_gap) + { + let lines = self.buffer.flush(); + if !lines.is_empty() { + return vec![self.create_patch(lines)]; + } + self.current_entry_index = None; + } + } + + self.last_chunk_arrival_time = Instant::now(); + + let formatted_chunk = if let Some(format_chunk) = self.format_chunk.as_ref() { + format_chunk(self.buffer.partial_line(), text_chunk) + } else { + text_chunk + }; + + if formatted_chunk.is_empty() { + return vec![]; + } + + // Let the buffer handle text buffering + self.buffer.ingest(formatted_chunk); + + let mut patches = Vec::new(); + + // Check if we have a custom message boundary predicate + loop { + let message_boundary_predicate = self + .message_boundary_predicate + .as_ref() + .and_then(|predicate| predicate(self.buffer.lines())); + + match message_boundary_predicate { + // Predicate decided to conclude the current entry at `line_idx` + Some(MessageBoundary::Split(line_idx)) => { + let lines = self.buffer.drain_lines(line_idx); + if !lines.is_empty() { + patches.push(self.create_patch(lines)); + // Move to next entry after split + self.current_entry_index = None; + } + } + // Predicate decided that current content cannot be sent yet. + Some(MessageBoundary::IncompleteContent) => { + // Stop processing, wait for more content. + // Partial updates will be disabled. + return patches; + } + None => { + // No more splits, break and continue to size/latency heuristics + break; + } + } + } + + // Check message size. If entry is large enough, break it into smaller entries. + if let Some(size_threshold) = self.entry_size_threshold { + // Check message size. If entry is large enough, create a new entry. + while self.buffer.total_len() >= size_threshold { + let lines = self.buffer.drain_size(size_threshold); + if lines.is_empty() { + break; + } + patches.push(self.create_patch(lines)); + // Move to next entry after size split + self.current_entry_index = None; + } + } + + // Send partial udpdates + if !self.buffer.is_empty() { + // Stream updates without consuming buffer + patches.push(self.create_patch(self.buffer.lines().to_vec())); + } + patches + } + + /// Create patch + fn create_patch(&mut self, lines: Vec) -> Patch { + let content = lines.concat(); + let entry = (self.normalized_entry_producer)(content); + + let added = self.current_entry_index.is_some(); + let index = if let Some(idx) = self.current_entry_index { + idx + } else { + // If no current index, get next from provider + let idx = self.index_provider.next(); + self.current_entry_index = Some(idx); + idx + }; + + if !added { + ConversationPatch::add_normalized_entry(index, entry) + } else { + ConversationPatch::replace(index, entry) + } + } +} + +#[bon] +impl PlainTextLogProcessor { + /// Create a builder for configuring PlainTextLogProcessor. + /// + /// # Parameters + /// * `normalized_entry_producer` - Required function to convert text content into a `NormalizedEntry`. + /// * `size_threshold` - Optional size threshold for individual entries. Once an entry content exceeds this size, a new entry is created. + /// * `time_gap` - Optional time gap between individual entries. When new content arrives after this duration, it is considered a new entry. + /// * `format_chunk` - Optional function to fix raw output before creating normalized entries. + /// * `message_boundary_predicate` - Optional function to determine custom message boundaries. Useful when content is heterogeneous (e.g., tool calls interleaved with assistant messages). + /// * `index_provider` - Required sharable atomic counter for tracking entry indices. + /// + /// When both `size_threshold` and `time_gap` are `None`, a default size threshold of 8 KiB is used. + #[builder] + pub fn new( + normalized_entry_producer: impl Fn(String) -> NormalizedEntry + 'static + Send, + size_threshold: Option, + time_gap: Option, + format_chunk: Option, String) -> String + 'static + Send>>, + message_boundary_predicate: Option< + Box Option + 'static + Send>, + >, + index_provider: EntryIndexProvider, + ) -> Self { + Self { + buffer: PlainTextBuffer::new(), + index_provider, + entry_size_threshold: if size_threshold.is_none() && time_gap.is_none() { + Some(8 * 1024) // Default 8KiB when neither is set + } else { + size_threshold + }, + time_gap, + format_chunk: format_chunk.map(|f| { + Box::new(f) as Box, String) -> String + Send + 'static> + }), + message_boundary_predicate: message_boundary_predicate.map(|p| { + Box::new(p) as Box Option + Send + 'static> + }), + normalized_entry_producer: Box::new(normalized_entry_producer), + last_chunk_arrival_time: Instant::now(), + current_entry_index: None, + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::logs::NormalizedEntryType; + + #[test] + fn test_plain_buffer_flush() { + let mut buffer = PlainTextBuffer::new(); + + buffer.ingest("line1\npartial".to_string()); + assert_eq!(buffer.line_count(), 2); + + let lines = buffer.flush(); + assert_eq!(lines, vec!["line1\n", "partial"]); + assert_eq!(buffer.line_count(), 0); + } + + #[test] + fn test_plain_buffer_len() { + let mut buffer = PlainTextBuffer::new(); + + buffer.ingest("abc\ndef\n".to_string()); + assert_eq!(buffer.total_len(), 8); // "abc\n" + "def\n" + + buffer.drain_lines(1); + assert_eq!(buffer.total_len(), 4); // "def\n" + } + + #[test] + fn test_drain_until_size() { + let mut buffer = PlainTextBuffer::new(); + + buffer.ingest("short\nlonger line\nvery long line here\n".to_string()); + + // Drain until we have at least 10 bytes + let drained = buffer.drain_size(10); + assert_eq!(drained.len(), 2); // "short\n" (6) + "longer line\n" (12) = 18 bytes total + assert_eq!(drained, vec!["short\n", "longer line\n"]); + } + + #[test] + fn test_processor_simple() { + let producer = |content: String| -> NormalizedEntry { + NormalizedEntry { + timestamp: None, // Avoid creating artificial timestamps during normalization + entry_type: NormalizedEntryType::SystemMessage, + content: content.to_string(), + metadata: None, + } + }; + + let mut processor = PlainTextLogProcessor::builder() + .normalized_entry_producer(producer) + .index_provider(EntryIndexProvider::new()) + .build(); + + let patches = processor.process("hello world\n".to_string()); + assert_eq!(patches.len(), 1); + } + + #[test] + fn test_processor_custom_log_formatter() { + // Example Level 1 producer that parses tool calls + let tool_producer = |content: String| -> NormalizedEntry { + if content.starts_with("TOOL:") { + let tool_name = content.strip_prefix("TOOL:").unwrap_or("unknown").trim(); + NormalizedEntry { + timestamp: None, + entry_type: NormalizedEntryType::ToolUse { + tool_name: tool_name.to_string(), + action_type: super::super::ActionType::Other { + description: tool_name.to_string(), + }, + }, + content, + metadata: None, + } + } else { + NormalizedEntry { + timestamp: None, + entry_type: NormalizedEntryType::SystemMessage, + content: content.to_string(), + metadata: None, + } + } + }; + + let mut processor = PlainTextLogProcessor::builder() + .normalized_entry_producer(tool_producer) + .index_provider(EntryIndexProvider::new()) + .build(); + + let patches = processor.process("TOOL: file_read\n".to_string()); + assert_eq!(patches.len(), 1); + } +} diff --git a/crates/executors/src/logs/stderr_processor.rs b/crates/executors/src/logs/stderr_processor.rs new file mode 100644 index 00000000..562a9617 --- /dev/null +++ b/crates/executors/src/logs/stderr_processor.rs @@ -0,0 +1,58 @@ +//! Standard stderr log processor for executors +//! +//! Uses `PlainTextLogProcessor` with a 2-second `latency_threshold` to split stderr streams into entries. +//! Each entry is normalized as `ErrorMessage` and emitted as JSON patches to the message store. +//! +//! Example: +//! ```rust,ignore +//! normalize_stderr_logs(msg_store.clone(), EntryIndexProvider::new()); +//! ``` +//! +use std::{sync::Arc, time::Duration}; + +use futures::StreamExt; +use utils::msg_store::MsgStore; + +use super::{NormalizedEntry, NormalizedEntryType, plain_text_processor::PlainTextLogProcessor}; +use crate::logs::utils::EntryIndexProvider; + +/// Standard stderr log normalizer that uses PlainTextLogProcessor to stream error logs. +/// +/// Splits stderr output into discrete entries based on a latency threshold (2s) to group +/// related lines into a single error entry. Each entry is normalized as an `ErrorMessage` +/// and emitted as JSON patches for downstream consumption (e.g., UI or log aggregation). +/// +/// # Options +/// - `latency_threshold`: 2 seconds to separate error messages based on time gaps. +/// - `normalized_entry_producer`: maps each chunk into an `ErrorMessage` entry. +/// +/// # Use case +/// Intended for executor stderr streams, grouping multi-line errors into cohesive entries +/// instead of emitting each line separately. +/// +/// # Arguments +/// * `msg_store` - the message store providing a stream of stderr chunks and accepting patches. +/// * `entry_index_provider` - provider of incremental entry indices for patch ordering. +pub fn normalize_stderr_logs(msg_store: Arc, entry_index_provider: EntryIndexProvider) { + tokio::spawn(async move { + let mut stderr = msg_store.stderr_chunked_stream(); + + // Create a processor with time-based emission for stderr + let mut processor = PlainTextLogProcessor::builder() + .normalized_entry_producer(Box::new(|content: String| NormalizedEntry { + timestamp: None, + entry_type: NormalizedEntryType::ErrorMessage, + content, + metadata: None, + })) + .time_gap(Duration::from_secs(2)) // Break messages if they are 2 seconds apart + .index_provider(entry_index_provider) + .build(); + + while let Some(Ok(chunk)) = stderr.next().await { + for patch in processor.process(chunk) { + msg_store.push_patch(patch); + } + } + }); +} diff --git a/crates/executors/src/logs/utils/entry_index.rs b/crates/executors/src/logs/utils/entry_index.rs new file mode 100644 index 00000000..559c6ec3 --- /dev/null +++ b/crates/executors/src/logs/utils/entry_index.rs @@ -0,0 +1,68 @@ +//! Entry Index Provider for thread-safe monotonic indexing + +use std::sync::{ + Arc, + atomic::{AtomicUsize, Ordering}, +}; + +/// Thread-safe provider for monotonically increasing entry indexes +#[derive(Debug, Clone)] +pub struct EntryIndexProvider(Arc); + +impl EntryIndexProvider { + /// Create a new index provider starting from 0 + pub fn new() -> Self { + Self(Arc::new(AtomicUsize::new(0))) + } + + /// Get the next available index + pub fn next(&self) -> usize { + self.0.fetch_add(1, Ordering::Relaxed) + } + + /// Get the current index without incrementing + pub fn current(&self) -> usize { + self.0.load(Ordering::Relaxed) + } +} + +impl Default for EntryIndexProvider { + fn default() -> Self { + Self::new() + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_entry_index_provider() { + let provider = EntryIndexProvider::new(); + assert_eq!(provider.next(), 0); + assert_eq!(provider.next(), 1); + assert_eq!(provider.next(), 2); + } + + #[test] + fn test_entry_index_provider_clone() { + let provider1 = EntryIndexProvider::new(); + let provider2 = provider1.clone(); + + assert_eq!(provider1.next(), 0); + assert_eq!(provider2.next(), 1); + assert_eq!(provider1.next(), 2); + } + + #[test] + fn test_current_index() { + let provider = EntryIndexProvider::new(); + assert_eq!(provider.current(), 0); + + provider.next(); + assert_eq!(provider.current(), 1); + + provider.next(); + assert_eq!(provider.current(), 2); + } +} diff --git a/crates/executors/src/logs/utils/mod.rs b/crates/executors/src/logs/utils/mod.rs new file mode 100644 index 00000000..18042ac7 --- /dev/null +++ b/crates/executors/src/logs/utils/mod.rs @@ -0,0 +1,7 @@ +//! Utility modules for executor framework + +pub mod entry_index; +pub mod patch; + +pub use entry_index::EntryIndexProvider; +pub use patch::ConversationPatch; diff --git a/crates/executors/src/logs/utils/patch.rs b/crates/executors/src/logs/utils/patch.rs new file mode 100644 index 00000000..a4887af0 --- /dev/null +++ b/crates/executors/src/logs/utils/patch.rs @@ -0,0 +1,115 @@ +use json_patch::Patch; +use serde::{Deserialize, Serialize}; +use serde_json::{from_value, json}; +use ts_rs::TS; +use utils::diff::FileDiff; + +use crate::logs::NormalizedEntry; + +#[derive(Deserialize, Serialize, Debug, Clone, PartialEq, Eq, TS)] +#[serde(rename_all = "lowercase")] +enum PatchOperation { + Add, + Replace, + Remove, +} + +#[derive(Serialize, TS)] +#[serde(rename_all = "SCREAMING_SNAKE_CASE", tag = "type", content = "content")] +pub enum PatchType { + NormalizedEntry(NormalizedEntry), + Stdout(String), + Stderr(String), + FileDiff(FileDiff), +} + +#[derive(Serialize)] +struct PatchEntry { + op: PatchOperation, + path: String, + value: PatchType, +} + +fn escape_json_pointer_segment(s: &str) -> String { + s.replace('~', "~0").replace('/', "~1") +} + +/// Helper functions to create JSON patches for conversation entries +pub struct ConversationPatch; + +impl ConversationPatch { + /// Create an ADD patch for a new conversation entry at the given index + pub fn add_normalized_entry(entry_index: usize, entry: NormalizedEntry) -> Patch { + let patch_entry = PatchEntry { + op: PatchOperation::Add, + path: format!("/entries/{entry_index}"), + value: PatchType::NormalizedEntry(entry), + }; + + from_value(json!([patch_entry])).unwrap() + } + + /// Create an ADD patch for a new string at the given index + pub fn add_stdout(entry_index: usize, entry: String) -> Patch { + let patch_entry = PatchEntry { + op: PatchOperation::Add, + path: format!("/entries/{entry_index}"), + value: PatchType::Stdout(entry), + }; + + from_value(json!([patch_entry])).unwrap() + } + + /// Create an ADD patch for a new string at the given index + pub fn add_stderr(entry_index: usize, entry: String) -> Patch { + let patch_entry = PatchEntry { + op: PatchOperation::Add, + path: format!("/entries/{entry_index}"), + value: PatchType::Stderr(entry), + }; + + from_value(json!([patch_entry])).unwrap() + } + + /// Create an ADD patch for a new file diff at the given index + pub fn add_file_diff(file_diff: FileDiff) -> Patch { + let patch_entry = PatchEntry { + op: PatchOperation::Add, + path: format!("/entries/{}", escape_json_pointer_segment(&file_diff.path)), + value: PatchType::FileDiff(file_diff), + }; + + from_value(json!([patch_entry])).unwrap() + } + + /// Create an ADD patch for a new file diff at the given index + pub fn replace_file_diff(file_diff: FileDiff) -> Patch { + let patch_entry = PatchEntry { + op: PatchOperation::Replace, + path: format!("/entries/{}", escape_json_pointer_segment(&file_diff.path)), + value: PatchType::FileDiff(file_diff), + }; + + from_value(json!([patch_entry])).unwrap() + } + + /// Create a REMOVE patch for removing a file diff + pub fn remove_file_diff(path: &str) -> Patch { + from_value(json!([{ + "op": PatchOperation::Remove, + "path": format!("/entries/{}", escape_json_pointer_segment(path)) + }])) + .unwrap() + } + + /// Create a REPLACE patch for updating an existing conversation entry at the given index + pub fn replace(entry_index: usize, entry: NormalizedEntry) -> Patch { + let patch_entry = PatchEntry { + op: PatchOperation::Replace, + path: format!("/entries/{entry_index}"), + value: PatchType::NormalizedEntry(entry), + }; + + from_value(json!([patch_entry])).unwrap() + } +} diff --git a/crates/executors/src/stdout_dup.rs b/crates/executors/src/stdout_dup.rs new file mode 100644 index 00000000..dbc512b4 --- /dev/null +++ b/crates/executors/src/stdout_dup.rs @@ -0,0 +1,127 @@ +//! Cross-platform stdout duplication utility for child processes +//! +//! Provides a single function to duplicate a child process's stdout stream. +//! Supports Unix and Windows platforms. + +#[cfg(unix)] +use std::os::unix::io::{FromRawFd, IntoRawFd, OwnedFd}; +#[cfg(windows)] +use std::os::windows::io::{FromRawHandle, IntoRawHandle, OwnedHandle}; + +use command_group::AsyncGroupChild; +use futures::{StreamExt, stream::BoxStream}; +use tokio::io::{AsyncWrite, AsyncWriteExt}; +use tokio_stream::wrappers::UnboundedReceiverStream; +use tokio_util::io::ReaderStream; + +use crate::executors::ExecutorError; + +/// Duplicate stdout from AsyncGroupChild. +/// +/// Creates a stream that mirrors stdout of child process without consuming it. +/// +/// # Returns +/// A stream of `io::Result` that receives a copy of all stdout data. +pub fn duplicate_stdout( + child: &mut AsyncGroupChild, +) -> Result>, ExecutorError> { + // The implementation strategy is: + // 1. create a new file descriptor. + // 2. read the original stdout file descriptor. + // 3. write the data to both the new file descriptor and a duplicate stream. + + // Take the original stdout + let original_stdout = child.inner().stdout.take().ok_or_else(|| { + ExecutorError::Io(std::io::Error::new( + std::io::ErrorKind::NotFound, + "Child process has no stdout", + )) + })?; + + // Create a new file descriptor in a cross-platform way (using os_pipe crate) + let (pipe_reader, pipe_writer) = os_pipe::pipe().map_err(|e| { + ExecutorError::Io(std::io::Error::other(format!("Failed to create pipe: {e}"))) + })?; + // Use fd as new child stdout + child.inner().stdout = Some(wrap_fd_as_child_stdout(pipe_reader)?); + + // Obtain writer from fd + let mut fd_writer = wrap_fd_as_tokio_writer(pipe_writer)?; + + // Create the duplicate stdout stream + let (dup_writer, dup_reader) = + tokio::sync::mpsc::unbounded_channel::>(); + + // Read original stdout and write to both new ChildStdout and duplicate stream + tokio::spawn(async move { + let mut stdout_stream = ReaderStream::new(original_stdout); + + while let Some(res) = stdout_stream.next().await { + match res { + Ok(data) => { + let _ = fd_writer.write_all(&data).await; + + let string_chunk = String::from_utf8_lossy(&data).into_owned(); + let _ = dup_writer.send(Ok(string_chunk)); + } + Err(err) => { + tracing::error!("Error reading from child stdout: {}", err); + let _ = dup_writer.send(Err(err)); + } + } + } + }); + + // Return the channel receiver as a boxed stream + Ok(Box::pin(UnboundedReceiverStream::new(dup_reader))) +} + +// ========================================= +// OS file descriptor helper functions +// ========================================= + +/// Convert os_pipe::PipeReader to tokio::process::ChildStdout +fn wrap_fd_as_child_stdout( + pipe_reader: os_pipe::PipeReader, +) -> Result { + #[cfg(unix)] + { + // On Unix: PipeReader -> raw fd -> OwnedFd -> std::process::ChildStdout -> tokio::process::ChildStdout + let raw_fd = pipe_reader.into_raw_fd(); + let owned_fd = unsafe { OwnedFd::from_raw_fd(raw_fd) }; + let std_stdout = std::process::ChildStdout::from(owned_fd); + tokio::process::ChildStdout::from_std(std_stdout).map_err(ExecutorError::Io) + } + + #[cfg(windows)] + { + // On Windows: PipeReader -> raw handle -> OwnedHandle -> std::process::ChildStdout -> tokio::process::ChildStdout + let raw_handle = pipe_reader.into_raw_handle(); + let owned_handle = unsafe { OwnedHandle::from_raw_handle(raw_handle) }; + let std_stdout = std::process::ChildStdout::from(owned_handle); + tokio::process::ChildStdout::from_std(std_stdout).map_err(ExecutorError::Io) + } +} + +/// Convert os_pipe::PipeWriter to a tokio file for async writing +fn wrap_fd_as_tokio_writer( + pipe_writer: os_pipe::PipeWriter, +) -> Result { + #[cfg(unix)] + { + // On Unix: PipeWriter -> raw fd -> OwnedFd -> std::fs::File -> tokio::fs::File + let raw_fd = pipe_writer.into_raw_fd(); + let owned_fd = unsafe { OwnedFd::from_raw_fd(raw_fd) }; + let std_file = std::fs::File::from(owned_fd); + Ok(tokio::fs::File::from_std(std_file)) + } + + #[cfg(windows)] + { + // On Windows: PipeWriter -> raw handle -> OwnedHandle -> std::fs::File -> tokio::fs::File + let raw_handle = pipe_writer.into_raw_handle(); + let owned_handle = unsafe { OwnedHandle::from_raw_handle(raw_handle) }; + let std_file = std::fs::File::from(owned_handle); + Ok(tokio::fs::File::from_std(std_file)) + } +} diff --git a/crates/local-deployment/Cargo.toml b/crates/local-deployment/Cargo.toml new file mode 100644 index 00000000..93405e6c --- /dev/null +++ b/crates/local-deployment/Cargo.toml @@ -0,0 +1,41 @@ +[package] +name = "local-deployment" +version = "0.0.56" +edition = "2024" + +[dependencies] +db = { path = "../db" } +executors = { path="../executors" } +deployment = { path = "../deployment" } +services = { path = "../services" } +utils = { path = "../utils" } +tokio-util = { version = "0.7", features = ["io"] } +bytes = "1.0" +axum = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } +anyhow = { workspace = true } +tracing = { workspace = true } +tracing-subscriber = { workspace = true } +sqlx = { version = "0.8.6", features = ["runtime-tokio-rustls", "sqlite", "chrono", "uuid"] } +chrono = { version = "0.4", features = ["serde"] } +uuid = { version = "1.0", features = ["v4", "serde"] } +ts-rs = { workspace = true } +async-trait = "0.1" +rust-embed = "8.2" +pathdiff = "0.2.1" +ignore = "0.4" +command-group = { version = "5.0", features = ["with-tokio"] } +nix = { version = "0.29", features = ["signal", "process"] } +openssl-sys = { workspace = true } +regex = "1.11.1" +notify-rust = "4.11" +notify = "8.2.0" +sentry = { version = "0.41.0", features = ["anyhow", "backtrace", "panic", "debug-images"] } +sentry-tracing = { version = "0.41.0", features = ["backtrace"] } +reqwest = { version = "0.12", features = ["json"] } +futures = "0.3" +async-stream = "0.3" +json-patch = "2.0" +tokio = { workspace = true } +tokio-stream = { version = "0.1.17", features = ["sync"] } diff --git a/crates/local-deployment/src/command.rs b/crates/local-deployment/src/command.rs new file mode 100644 index 00000000..9d865720 --- /dev/null +++ b/crates/local-deployment/src/command.rs @@ -0,0 +1,43 @@ +use command_group::AsyncGroupChild; +#[cfg(unix)] +use nix::{ + sys::signal::{Signal, killpg}, + unistd::{Pid, getpgid}, +}; +use services::services::container::ContainerError; +use tokio::time::Duration; + +pub async fn kill_process_group(child: &mut AsyncGroupChild) -> Result<(), ContainerError> { + // hit the whole process group, not just the leader + #[cfg(unix)] + { + if let Some(pid) = child.inner().id() { + let pgid = getpgid(Some(Pid::from_raw(pid as i32))) + .map_err(|e| ContainerError::KillFailed(std::io::Error::other(e)))?; + + for sig in [Signal::SIGINT, Signal::SIGTERM, Signal::SIGKILL] { + if let Err(e) = killpg(pgid, sig) { + tracing::warn!( + "Failed to send signal {:?} to process group {}: {}", + sig, + pgid, + e + ); + } + tokio::time::sleep(Duration::from_secs(2)).await; + if child + .inner() + .try_wait() + .map_err(ContainerError::Io)? + .is_some() + { + break; + } + } + } + } + + let _ = child.kill().await; + let _ = child.wait().await; + Ok(()) +} diff --git a/crates/local-deployment/src/container.rs b/crates/local-deployment/src/container.rs new file mode 100644 index 00000000..178a172f --- /dev/null +++ b/crates/local-deployment/src/container.rs @@ -0,0 +1,837 @@ +use std::{ + collections::{HashMap, HashSet}, + io, + path::{Path, PathBuf}, + sync::Arc, + time::Duration, +}; + +use anyhow::anyhow; +use async_stream::try_stream; +use async_trait::async_trait; +use axum::response::sse::Event; +use command_group::AsyncGroupChild; +use db::{ + DBService, + models::{ + execution_process::{ + ExecutionContext, ExecutionProcess, ExecutionProcessRunReason, ExecutionProcessStatus, + }, + executor_session::ExecutorSession, + project::Project, + task::{Task, TaskStatus}, + task_attempt::TaskAttempt, + }, +}; +use deployment::DeploymentError; +use executors::{ + actions::{Executable, ExecutorAction}, + logs::utils::ConversationPatch, +}; +use futures::{StreamExt, TryStreamExt, stream::select}; +use serde_json::json; +use services::services::{ + analytics::AnalyticsContext, + config::Config, + container::{ContainerError, ContainerRef, ContainerService}, + filesystem_watcher, + git::GitService, + notification::NotificationService, + worktree_manager::WorktreeManager, +}; +use tokio::{sync::RwLock, task::JoinHandle}; +use tokio_util::io::ReaderStream; +use utils::{ + log_msg::LogMsg, + msg_store::MsgStore, + text::{git_branch_id, short_uuid}, +}; +use uuid::Uuid; + +use crate::command; + +#[derive(Clone)] +pub struct LocalContainerService { + db: DBService, + child_store: Arc>>>>, + msg_stores: Arc>>>, + config: Arc>, + git: GitService, + analytics: Option, +} + +impl LocalContainerService { + pub fn new( + db: DBService, + msg_stores: Arc>>>, + config: Arc>, + git: GitService, + analytics: Option, + ) -> Self { + let child_store = Arc::new(RwLock::new(HashMap::new())); + + LocalContainerService { + db, + child_store, + msg_stores, + config, + git, + analytics, + } + } + + pub async fn get_child_from_store(&self, id: &Uuid) -> Option>> { + let map = self.child_store.read().await; + map.get(id).cloned() + } + + pub async fn add_child_to_store(&self, id: Uuid, exec: AsyncGroupChild) { + let mut map = self.child_store.write().await; + map.insert(id, Arc::new(RwLock::new(exec))); + } + + pub async fn remove_child_from_store(&self, id: &Uuid) { + let mut map = self.child_store.write().await; + map.remove(id); + } + + /// A context is finalized when + /// - The next action is None (no follow-up actions) + /// - The run reason is not DevServer + fn should_finalize(ctx: &ExecutionContext) -> bool { + ctx.execution_process + .executor_action() + .unwrap() + .next_action + .is_none() + && (!matches!( + ctx.execution_process.run_reason, + ExecutionProcessRunReason::DevServer + )) + } + + /// Defensively check for externally deleted worktrees and mark them as deleted in the database + async fn check_externally_deleted_worktrees(db: &DBService) -> Result<(), DeploymentError> { + let active_attempts = TaskAttempt::find_by_worktree_deleted(&db.pool).await?; + tracing::debug!( + "Checking {} active worktrees for external deletion...", + active_attempts.len() + ); + for (attempt_id, worktree_path) in active_attempts { + // Check if worktree directory exists + if !std::path::Path::new(&worktree_path).exists() { + // Worktree was deleted externally, mark as deleted in database + if let Err(e) = TaskAttempt::mark_worktree_deleted(&db.pool, attempt_id).await { + tracing::error!( + "Failed to mark externally deleted worktree as deleted for attempt {}: {}", + attempt_id, + e + ); + } else { + tracing::info!( + "Marked externally deleted worktree as deleted for attempt {} (path: {})", + attempt_id, + worktree_path + ); + } + } + } + Ok(()) + } + + /// Find and delete orphaned worktrees that don't correspond to any task attempts + async fn cleanup_orphaned_worktrees(&self) { + // Check if orphan cleanup is disabled via environment variable + if std::env::var("DISABLE_WORKTREE_ORPHAN_CLEANUP").is_ok() { + tracing::debug!( + "Orphan worktree cleanup is disabled via DISABLE_WORKTREE_ORPHAN_CLEANUP environment variable" + ); + return; + } + let worktree_base_dir = WorktreeManager::get_worktree_base_dir(); + if !worktree_base_dir.exists() { + tracing::debug!( + "Worktree base directory {} does not exist, skipping orphan cleanup", + worktree_base_dir.display() + ); + return; + } + let entries = match std::fs::read_dir(&worktree_base_dir) { + Ok(entries) => entries, + Err(e) => { + tracing::error!( + "Failed to read worktree base directory {}: {}", + worktree_base_dir.display(), + e + ); + return; + } + }; + for entry in entries { + let entry = match entry { + Ok(entry) => entry, + Err(e) => { + tracing::warn!("Failed to read directory entry: {}", e); + continue; + } + }; + let path = entry.path(); + // Only process directories + if !path.is_dir() { + continue; + } + + let worktree_path_str = path.to_string_lossy().to_string(); + if let Ok(false) = + TaskAttempt::container_ref_exists(&self.db().pool, &worktree_path_str).await + { + // This is an orphaned worktree - delete it + tracing::info!("Found orphaned worktree: {}", worktree_path_str); + if let Err(e) = WorktreeManager::cleanup_worktree(&path, None).await { + tracing::error!( + "Failed to remove orphaned worktree {}: {}", + worktree_path_str, + e + ); + } else { + tracing::info!( + "Successfully removed orphaned worktree: {}", + worktree_path_str + ); + } + } + } + } + + pub async fn cleanup_expired_attempt( + db: &DBService, + attempt_id: Uuid, + worktree_path: PathBuf, + git_repo_path: PathBuf, + ) -> Result<(), DeploymentError> { + WorktreeManager::cleanup_worktree(&worktree_path, Some(&git_repo_path)).await?; + // Mark worktree as deleted in database after successful cleanup + TaskAttempt::mark_worktree_deleted(&db.pool, attempt_id).await?; + tracing::info!("Successfully marked worktree as deleted for attempt {attempt_id}",); + Ok(()) + } + + pub async fn cleanup_expired_attempts(db: &DBService) -> Result<(), DeploymentError> { + let expired_attempts = TaskAttempt::find_expired_for_cleanup(&db.pool).await?; + if expired_attempts.is_empty() { + tracing::debug!("No expired worktrees found"); + return Ok(()); + } + tracing::info!( + "Found {} expired worktrees to clean up", + expired_attempts.len() + ); + for (attempt_id, worktree_path, git_repo_path) in expired_attempts { + Self::cleanup_expired_attempt( + db, + attempt_id, + PathBuf::from(worktree_path), + PathBuf::from(git_repo_path), + ) + .await + .unwrap_or_else(|e| { + tracing::error!("Failed to clean up expired attempt {attempt_id}: {e}",); + }); + } + Ok(()) + } + + pub async fn spawn_worktree_cleanup(&self) { + let db = self.db.clone(); + let mut cleanup_interval = tokio::time::interval(tokio::time::Duration::from_secs(1800)); // 30 minutes + self.cleanup_orphaned_worktrees().await; + tokio::spawn(async move { + loop { + cleanup_interval.tick().await; + tracing::info!("Starting periodic worktree cleanup..."); + Self::check_externally_deleted_worktrees(&db) + .await + .unwrap_or_else(|e| { + tracing::error!("Failed to check externally deleted worktrees: {}", e); + }); + Self::cleanup_expired_attempts(&db) + .await + .unwrap_or_else(|e| { + tracing::error!("Failed to clean up expired worktree attempts: {}", e) + }); + } + }); + } + + /// Spawn a background task that polls the child process for completion and + /// cleans up the execution entry when it exits. + pub fn spawn_exit_monitor(&self, exec_id: &Uuid) -> JoinHandle<()> { + let exec_id = *exec_id; + let child_store = self.child_store.clone(); + let msg_stores = self.msg_stores.clone(); + let db = self.db.clone(); + let config = self.config.clone(); + let container = self.clone(); + let analytics = self.analytics.clone(); + + tokio::spawn(async move { + loop { + let status_opt = { + let child_lock = { + let map = child_store.read().await; + map.get(&exec_id) + .cloned() + .unwrap_or_else(|| panic!("Child handle missing for {exec_id}")) + }; + + let mut child_handler = child_lock.write().await; + match child_handler.try_wait() { + Ok(Some(status)) => Some(Ok(status)), + Ok(None) => None, + Err(e) => Some(Err(e)), + } + }; + + // Update execution process and cleanup if exit + if let Some(status_result) = status_opt { + // Update execution process record with completion info + let (exit_code, status) = match status_result { + Ok(exit_status) => { + let code = exit_status.code().unwrap_or(-1) as i64; + let status = if exit_status.success() { + ExecutionProcessStatus::Completed + } else { + ExecutionProcessStatus::Failed + }; + (Some(code), status) + } + Err(_) => (None, ExecutionProcessStatus::Failed), + }; + + if !ExecutionProcess::was_killed(&db.pool, exec_id).await + && let Err(e) = ExecutionProcess::update_completion( + &db.pool, + exec_id, + status.clone(), + exit_code, + ) + .await + { + tracing::error!("Failed to update execution process completion: {}", e); + } + + if let Ok(ctx) = ExecutionProcess::load_context(&db.pool, exec_id).await { + if matches!( + ctx.execution_process.status, + ExecutionProcessStatus::Completed + ) && exit_code == Some(0) + { + if let Err(e) = container.try_commit_changes(&ctx).await { + tracing::error!("Failed to commit changes after execution: {}", e); + } + + // If the process exited successfully, start the next action + if let Err(e) = container.try_start_next_action(&ctx).await { + tracing::error!( + "Failed to start next action after completion: {}", + e + ); + } + } + + if Self::should_finalize(&ctx) { + if let Err(e) = + Task::update_status(&db.pool, ctx.task.id, TaskStatus::InReview) + .await + { + tracing::error!("Failed to update task status to InReview: {e}"); + } + let notify_cfg = config.read().await.notifications.clone(); + NotificationService::notify_execution_halted(notify_cfg, &ctx).await; + } + + // Fire event when CodingAgent execution has finished + if matches!( + &ctx.execution_process.run_reason, + ExecutionProcessRunReason::CodingAgent + ) && let Some(analytics) = &analytics + { + analytics.analytics_service.track_event(&analytics.user_id, "task_attempt_finished", Some(json!({ + "task_id": ctx.task.id.to_string(), + "project_id": ctx.task.project_id.to_string(), + "attempt_id": ctx.task_attempt.id.to_string(), + "execution_success": matches!(ctx.execution_process.status, ExecutionProcessStatus::Completed), + "exit_code": ctx.execution_process.exit_code, + }))); + } + } + + // Cleanup msg store + if let Some(msg_arc) = msg_stores.write().await.remove(&exec_id) { + msg_arc.push_finished(); + tokio::time::sleep(Duration::from_millis(50)).await; // Wait for the finish message to propogate + match Arc::try_unwrap(msg_arc) { + Ok(inner) => drop(inner), + Err(arc) => tracing::error!( + "There are still {} strong Arcs to MsgStore for {}", + Arc::strong_count(&arc), + exec_id + ), + } + } + + // Cleanup child handle + child_store.write().await.remove(&exec_id); + break; + } + + // still running, sleep and try again + tokio::time::sleep(Duration::from_millis(250)).await; + } + }) + } + + pub fn dir_name_from_task_attempt(attempt_id: &Uuid, task_title: &str) -> String { + let task_title_id = git_branch_id(task_title); + format!("vk-{}-{}", short_uuid(attempt_id), task_title_id) + } + + async fn track_child_msgs_in_store(&self, id: Uuid, child: &mut AsyncGroupChild) { + let store = Arc::new(MsgStore::new()); + + let out = child.inner().stdout.take().expect("no stdout"); + let err = child.inner().stderr.take().expect("no stderr"); + + // Map stdout bytes -> LogMsg::Stdout + let out = ReaderStream::new(out) + .map_ok(|chunk| LogMsg::Stdout(String::from_utf8_lossy(&chunk).into_owned())); + + // Map stderr bytes -> LogMsg::Stderr + let err = ReaderStream::new(err) + .map_ok(|chunk| LogMsg::Stderr(String::from_utf8_lossy(&chunk).into_owned())); + + // If you have a JSON Patch source, map it to LogMsg::JsonPatch too, then select all three. + + // Merge and forward into the store + let merged = select(out, err); // Stream> + store.clone().spawn_forwarder(merged); + + let mut map = self.msg_stores().write().await; + map.insert(id, store); + } +} + +#[async_trait] +impl ContainerService for LocalContainerService { + fn msg_stores(&self) -> &Arc>>> { + &self.msg_stores + } + + fn db(&self) -> &DBService { + &self.db + } + + fn git(&self) -> &GitService { + &self.git + } + + fn task_attempt_to_current_dir(&self, task_attempt: &TaskAttempt) -> PathBuf { + PathBuf::from(task_attempt.container_ref.clone().unwrap_or_default()) + } + + /// Create a container + async fn create(&self, task_attempt: &TaskAttempt) -> Result { + let task = task_attempt + .parent_task(&self.db.pool) + .await? + .ok_or(sqlx::Error::RowNotFound)?; + + let task_branch_name = + LocalContainerService::dir_name_from_task_attempt(&task_attempt.id, &task.title); + let worktree_path = WorktreeManager::get_worktree_base_dir().join(&task_branch_name); + + let project = task + .parent_project(&self.db.pool) + .await? + .ok_or(sqlx::Error::RowNotFound)?; + + WorktreeManager::create_worktree( + &project.git_repo_path, + &task_branch_name, + &worktree_path, + Some(&task_attempt.base_branch), + true, // create new branch + ) + .await?; + + // Update both container_ref and branch in the database + TaskAttempt::update_container_ref( + &self.db.pool, + task_attempt.id, + &worktree_path.to_string_lossy(), + ) + .await?; + + TaskAttempt::update_branch(&self.db.pool, task_attempt.id, &task_branch_name).await?; + + Ok(worktree_path.to_string_lossy().to_string()) + } + + async fn delete_inner(&self, task_attempt: &TaskAttempt) -> Result<(), ContainerError> { + // cleanup the container, here that means deleting the worktree + let task = task_attempt + .parent_task(&self.db.pool) + .await? + .ok_or(sqlx::Error::RowNotFound)?; + let git_repo_path = match Project::find_by_id(&self.db.pool, task.project_id).await { + Ok(Some(project)) => Some(project.git_repo_path.clone()), + Ok(None) => None, + Err(e) => { + tracing::error!("Failed to fetch project {}: {}", task.project_id, e); + None + } + }; + WorktreeManager::cleanup_worktree( + &PathBuf::from(task_attempt.container_ref.clone().unwrap_or_default()), + git_repo_path.as_deref(), + ) + .await + .unwrap_or_else(|e| { + tracing::warn!( + "Failed to clean up worktree for task attempt {}: {}", + task_attempt.id, + e + ); + }); + Ok(()) + } + + async fn ensure_container_exists( + &self, + task_attempt: &TaskAttempt, + ) -> Result { + // Get required context + let task = task_attempt + .parent_task(&self.db.pool) + .await? + .ok_or(sqlx::Error::RowNotFound)?; + + let project = task + .parent_project(&self.db.pool) + .await? + .ok_or(sqlx::Error::RowNotFound)?; + + let container_ref = task_attempt.container_ref.as_ref().ok_or_else(|| { + ContainerError::Other(anyhow!("Container ref not found for task attempt")) + })?; + let worktree_path = PathBuf::from(container_ref); + + let branch_name = task_attempt + .branch + .as_ref() + .ok_or_else(|| ContainerError::Other(anyhow!("Branch not found for task attempt")))?; + + WorktreeManager::ensure_worktree_exists( + &project.git_repo_path, + branch_name, + &worktree_path, + ) + .await?; + + Ok(container_ref.to_string()) + } + + async fn start_execution_inner( + &self, + task_attempt: &TaskAttempt, + execution_process: &ExecutionProcess, + executor_action: &ExecutorAction, + ) -> Result<(), ContainerError> { + // Get the worktree path + let container_ref = task_attempt + .container_ref + .as_ref() + .ok_or(ContainerError::Other(anyhow!( + "Container ref not found for task attempt" + )))?; + let current_dir = PathBuf::from(container_ref); + + // Create the child and stream, add to execution tracker + let mut child = executor_action.spawn(¤t_dir).await?; + + self.track_child_msgs_in_store(execution_process.id, &mut child) + .await; + + self.add_child_to_store(execution_process.id, child).await; + + // Spawn exit monitor + let _hn = self.spawn_exit_monitor(&execution_process.id); + + Ok(()) + } + + async fn stop_execution( + &self, + execution_process: &ExecutionProcess, + ) -> Result<(), ContainerError> { + let child = self + .get_child_from_store(&execution_process.id) + .await + .ok_or_else(|| { + ContainerError::Other(anyhow!("Child process not found for execution")) + })?; + ExecutionProcess::update_completion( + &self.db.pool, + execution_process.id, + ExecutionProcessStatus::Killed, + None, + ) + .await?; + + // Kill the child process and remove from the store + { + let mut child_guard = child.write().await; + if let Err(e) = command::kill_process_group(&mut child_guard).await { + tracing::error!( + "Failed to stop execution process {}: {}", + execution_process.id, + e + ); + return Err(e); + } + } + self.remove_child_from_store(&execution_process.id).await; + + // Mark the process finished in the MsgStore + if let Some(msg) = self.msg_stores.write().await.remove(&execution_process.id) { + msg.push_finished(); + } + + // Update task status to InReview when execution is stopped + if let Ok(ctx) = ExecutionProcess::load_context(&self.db.pool, execution_process.id).await + && !matches!( + ctx.execution_process.run_reason, + ExecutionProcessRunReason::DevServer + ) + && let Err(e) = + Task::update_status(&self.db.pool, ctx.task.id, TaskStatus::InReview).await + { + tracing::error!("Failed to update task status to InReview: {e}"); + } + + tracing::debug!( + "Execution process {} stopped successfully", + execution_process.id + ); + + Ok(()) + } + + async fn get_diff( + &self, + task_attempt: &TaskAttempt, + ) -> Result>, ContainerError> + { + let container_ref = self.ensure_container_exists(task_attempt).await?; + + let worktree_dir = PathBuf::from(&container_ref); + + // Return error if directory doesn't exist + if !worktree_dir.exists() { + return Err(ContainerError::Other(anyhow!( + "Worktree directory not found" + ))); + } + + let project_git_repo_path = task_attempt + .parent_task(&self.db().pool) + .await? + .ok_or(ContainerError::Other(anyhow!("Parent task not found")))? + .parent_project(&self.db().pool) + .await? + .ok_or(ContainerError::Other(anyhow!("Parent project not found")))? + .git_repo_path; + + // Fast-exit for merged attempts - they never change + if let Some(merge_commit_id) = &task_attempt.merge_commit { + let existing_diff = self.git().get_enhanced_diff( + &project_git_repo_path, + std::path::Path::new(""), + Some(merge_commit_id.as_str()), + &task_attempt.base_branch, + None::<&[&str]>, + )?; + + let stream = futures::stream::iter(existing_diff.files.into_iter().map(|file_diff| { + let patch = ConversationPatch::add_file_diff(file_diff); + let event = LogMsg::JsonPatch(patch).to_sse_event(); + Ok::<_, std::io::Error>(event) + })) + .chain(futures::stream::once(async { + Ok::<_, std::io::Error>(LogMsg::Finished.to_sse_event()) + })) + .boxed(); + + return Ok(stream); + } + + // Get initial diff + let initial_diff = self.git().get_enhanced_diff( + &project_git_repo_path, + &worktree_dir, + None, + &task_attempt.base_branch, + None::<&[&str]>, + )?; + + // Create initial stream + let initial_stream = + futures::stream::iter(initial_diff.files.into_iter().map(|file_diff| { + let patch = ConversationPatch::add_file_diff(file_diff); + let event = LogMsg::JsonPatch(patch).to_sse_event(); + Ok::<_, std::io::Error>(event) + })); + + // Create live diff stream for ongoing changes + let git_service = self.git().clone(); + let project_repo_path = project_git_repo_path.clone(); + let base_branch = task_attempt.base_branch.clone(); + let worktree_path = worktree_dir.clone(); + + let live_stream = try_stream! { + // Create filesystem watcher + let (_debouncer, mut rx, canonical_worktree_path) = filesystem_watcher::async_watcher(worktree_path.clone()) + .map_err(|e| io::Error::other(e.to_string()))?; + + while let Some(res) = rx.next().await { + match res { + Ok(events) => { + // Extract changed file paths relative to worktree + let changed_paths: Vec = events + .iter() + .flat_map(|event| &event.paths) + .filter_map(|path| { + // Try canonical first, fall back to original for non-macOS paths + path.strip_prefix(&canonical_worktree_path) + .or_else(|_| path.strip_prefix(&worktree_path)) + .ok() + .map(|p| p.to_string_lossy().replace('\\', "/")) + }) + .collect(); + + if !changed_paths.is_empty() { + // Generate diff for only the changed files + let diff = git_service.get_enhanced_diff( + &project_repo_path, + &worktree_path, + None, + &base_branch, + Some(&changed_paths), + ).map_err(|e| io::Error::other(e.to_string()))?; + + // Track which files still have diffs + let mut still_dirty: HashSet = HashSet::new(); + + // Send ADD/REPLACE messages for files that still have diffs + for file_diff in diff.files { + still_dirty.insert(file_diff.path.clone()); + let patch = ConversationPatch::add_file_diff(file_diff); + let event = LogMsg::JsonPatch(patch).to_sse_event(); + yield event; + } + + // Send REMOVE messages for files that changed but no longer have diffs + for path in &changed_paths { + if !still_dirty.contains(path) { + let patch = ConversationPatch::remove_file_diff(path); + let event = LogMsg::JsonPatch(patch).to_sse_event(); + yield event; + } + } + } + } + Err(errors) => { + // Convert filesystem watcher errors to io::Error + let error_msg = errors + .iter() + .map(|e| e.to_string()) + .collect::>() + .join("; "); + Err(io::Error::other(error_msg))?; + } + } + } + }; + + // Combine initial snapshot with live updates + let combined_stream = select(initial_stream, live_stream); + Ok(combined_stream.boxed()) + } + + async fn try_commit_changes(&self, ctx: &ExecutionContext) -> Result<(), ContainerError> { + if !matches!( + ctx.execution_process.run_reason, + ExecutionProcessRunReason::CodingAgent | ExecutionProcessRunReason::CleanupScript, + ) { + return Ok(()); + } + + let message = match ctx.execution_process.run_reason { + ExecutionProcessRunReason::CodingAgent => { + // Try to retrieve the task summary from the executor session + // otherwise fallback to default message + match ExecutorSession::find_by_execution_process_id( + &self.db().pool, + ctx.execution_process.id, + ) + .await + { + Ok(Some(session)) if session.summary.is_some() => session.summary.unwrap(), + Ok(_) => { + tracing::debug!( + "No summary found for execution process {}, using default message", + ctx.execution_process.id + ); + format!( + "Commit changes from coding agent for task attempt {}", + ctx.task_attempt.id + ) + } + Err(e) => { + tracing::debug!( + "Failed to retrieve summary for execution process {}: {}", + ctx.execution_process.id, + e + ); + format!( + "Commit changes from coding agent for task attempt {}", + ctx.task_attempt.id + ) + } + } + } + ExecutionProcessRunReason::CleanupScript => { + format!( + "Cleanup script changes for task attempt {}", + ctx.task_attempt.id + ) + } + _ => Err(ContainerError::Other(anyhow::anyhow!( + "Invalid run reason for commit" + )))?, + }; + + let container_ref = ctx.task_attempt.container_ref.as_ref().ok_or_else(|| { + ContainerError::Other(anyhow::anyhow!("Container reference not found")) + })?; + + tracing::debug!( + "Committing changes for task attempt {} at path {:?}: '{}'", + ctx.task_attempt.id, + &container_ref, + message + ); + + Ok(self.git().commit(Path::new(container_ref), &message)?) + } +} diff --git a/crates/local-deployment/src/lib.rs b/crates/local-deployment/src/lib.rs new file mode 100644 index 00000000..4a18834a --- /dev/null +++ b/crates/local-deployment/src/lib.rs @@ -0,0 +1,148 @@ +use std::{collections::HashMap, sync::Arc}; + +use async_trait::async_trait; +use db::DBService; +use deployment::{Deployment, DeploymentError}; +use services::services::{ + analytics::{AnalyticsConfig, AnalyticsContext, AnalyticsService, generate_user_id}, + auth::AuthService, + config::{Config, load_config_from_file, save_config_to_file}, + container::ContainerService, + events::EventService, + filesystem::FilesystemService, + git::GitService, + sentry::SentryService, +}; +use tokio::sync::RwLock; +use utils::{assets::config_path, msg_store::MsgStore}; +use uuid::Uuid; + +use crate::container::LocalContainerService; + +mod command; +pub mod container; + +#[derive(Clone)] +pub struct LocalDeployment { + config: Arc>, + sentry: SentryService, + user_id: String, + db: DBService, + analytics: Option, + msg_stores: Arc>>>, + container: LocalContainerService, + git: GitService, + auth: AuthService, + filesystem: FilesystemService, + events: EventService, +} + +#[async_trait] +impl Deployment for LocalDeployment { + async fn new() -> Result { + let raw_config = load_config_from_file(&config_path()).await; + // Immediately save config, as it may have just been migrated + save_config_to_file(&raw_config, &config_path()).await?; + + let config = Arc::new(RwLock::new(raw_config)); + let sentry = SentryService::new(); + let user_id = generate_user_id(); + let analytics = AnalyticsConfig::new().map(AnalyticsService::new); + let git = GitService::new(); + let msg_stores = Arc::new(RwLock::new(HashMap::new())); + let auth = AuthService::new(); + let filesystem = FilesystemService::new(); + + // Create shared components for EventService + let events_msg_store = Arc::new(MsgStore::new()); + let events_entry_count = Arc::new(RwLock::new(0)); + + // Create DB with event hooks + let db = { + let hook = EventService::create_hook( + events_msg_store.clone(), + events_entry_count.clone(), + DBService::new().await?, // Temporary DB service for the hook + ); + DBService::new_with_after_connect(hook).await? + }; + + // We need to make analytics accessible to the ContainerService + // TODO: Handle this more gracefully + let analytics_ctx = analytics.as_ref().map(|s| AnalyticsContext { + user_id: user_id.clone(), + analytics_service: s.clone(), + }); + let container = LocalContainerService::new( + db.clone(), + msg_stores.clone(), + config.clone(), + git.clone(), + analytics_ctx, + ); + container.spawn_worktree_cleanup().await; + + let events = EventService::new(db.clone(), events_msg_store, events_entry_count); + + Ok(Self { + config, + sentry, + user_id, + db, + analytics, + msg_stores, + container, + git, + auth, + filesystem, + events, + }) + } + + fn user_id(&self) -> &str { + &self.user_id + } + + fn shared_types() -> Vec { + vec![] + } + + fn config(&self) -> &Arc> { + &self.config + } + + fn sentry(&self) -> &SentryService { + &self.sentry + } + + fn db(&self) -> &DBService { + &self.db + } + + fn analytics(&self) -> &Option { + &self.analytics + } + + fn container(&self) -> &impl ContainerService { + &self.container + } + fn auth(&self) -> &AuthService { + &self.auth + } + + fn git(&self) -> &GitService { + &self.git + } + + fn filesystem(&self) -> &FilesystemService { + &self.filesystem + } + + fn msg_stores(&self) -> &Arc>>> { + &self.msg_stores + } + + fn events(&self) -> &EventService { + &self.events + } +} diff --git a/backend/Cargo.toml b/crates/server/Cargo.toml similarity index 65% rename from backend/Cargo.toml rename to crates/server/Cargo.toml index bea7d2bb..7a81688b 100644 --- a/backend/Cargo.toml +++ b/crates/server/Cargo.toml @@ -1,27 +1,22 @@ [package] -name = "vibe-kanban" +name = "server" version = "0.0.56" edition = "2021" -default-run = "vibe-kanban" -build = "build.rs" - -[lib] -name = "vibe_kanban" -path = "src/lib.rs" - -[[bin]] -name = "cloud-runner" -path = "src/bin/cloud_runner.rs" +default-run = "server" [lints.clippy] uninlined-format-args = "allow" [dependencies] +deployment = { path = "../deployment" } +executors = { path = "../executors" } +local-deployment = { path = "../local-deployment" } +utils = { path = "../utils" } +db = { path = "../db" } +services = { path = "../services" } tokio = { workspace = true } tokio-util = { version = "0.7", features = ["io"] } -bytes = "1.0" axum = { workspace = true } -tower-http = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } anyhow = { workspace = true } @@ -30,38 +25,26 @@ tracing-subscriber = { workspace = true } sqlx = { version = "0.8.6", features = ["runtime-tokio-rustls", "sqlite", "chrono", "uuid"] } chrono = { version = "0.4", features = ["serde"] } uuid = { version = "1.0", features = ["v4", "serde"] } -ts-rs = { version = "9.0", features = ["uuid-impl", "chrono-impl", "no-serde-warnings"] } -dirs = "5.0" -xdg = "3.0" -git2 = "0.18" +ts-rs = { workspace = true } async-trait = "0.1" -libc = "0.2" -rust-embed = "8.2" -mime_guess = "2.0" -directories = "6.0.0" -open = "5.3.2" -pathdiff = "0.2.1" -ignore = "0.4" command-group = { version = "5.0", features = ["with-tokio"] } nix = { version = "0.29", features = ["signal", "process"] } openssl-sys = { workspace = true } rmcp = { version = "0.2.1", features = ["server", "transport-io"] } schemars = "0.8" regex = "1.11.1" -notify-rust = "4.11" -octocrab = "0.44" -os_info = "3.12.0" sentry = { version = "0.41.0", features = ["anyhow", "backtrace", "panic", "debug-images"] } -sentry-tower = "0.41.0" sentry-tracing = { version = "0.41.0", features = ["backtrace"] } -reqwest = { version = "0.11", features = ["json"] } +reqwest = { version = "0.12", features = ["json"] } strip-ansi-escapes = "0.2.1" -urlencoding = "2.1.3" -lazy_static = "1.4" +thiserror = { workspace = true } +os_info = "3.12.0" futures-util = "0.3" -async-stream = "0.3" -json-patch = "2.0" -backon = "1.5.1" +ignore = "0.4" +git2 = "0.18" +mime_guess = "2.0" +rust-embed = "8.2" +octocrab = "0.44" [dev-dependencies] tempfile = "3.8" @@ -69,5 +52,4 @@ tower = { version = "0.4", features = ["util"] } [build-dependencies] dotenv = "0.15" -ts-rs = { version = "9.0", features = ["uuid-impl", "chrono-impl", "no-serde-warnings"] } diff --git a/backend/build.rs b/crates/server/build.rs similarity index 95% rename from backend/build.rs rename to crates/server/build.rs index d1ca3cc4..b2b12760 100644 --- a/backend/build.rs +++ b/crates/server/build.rs @@ -17,7 +17,7 @@ fn main() { } // Create frontend/dist directory if it doesn't exist - let dist_path = Path::new("../frontend/dist"); + let dist_path = Path::new("../../frontend/dist"); if !dist_path.exists() { println!("cargo:warning=Creating dummy frontend/dist directory for compilation"); fs::create_dir_all(dist_path).unwrap(); diff --git a/crates/server/src/bin/generate_types.rs b/crates/server/src/bin/generate_types.rs new file mode 100644 index 00000000..d3dcce7d --- /dev/null +++ b/crates/server/src/bin/generate_types.rs @@ -0,0 +1,121 @@ +use std::{env, fs, path::Path}; + +use ts_rs::TS; + +fn generate_types_content() -> String { + // 4. Friendly banner + const HEADER: &str = "// This file was generated by `crates/core/src/bin/generate_types.rs`.\n +// Do not edit this file manually.\n +// If you are an AI, and you absolutely have to edit this file, please confirm with the user first."; + + let decls: Vec = vec![ + services::services::filesystem::DirectoryEntry::decl(), + services::services::filesystem::DirectoryListResponse::decl(), + db::models::project::Project::decl(), + db::models::project::ProjectWithBranch::decl(), + db::models::project::CreateProject::decl(), + db::models::project::UpdateProject::decl(), + db::models::project::SearchResult::decl(), + db::models::project::SearchMatchType::decl(), + db::models::task_template::TaskTemplate::decl(), + db::models::task_template::CreateTaskTemplate::decl(), + db::models::task_template::UpdateTaskTemplate::decl(), + db::models::task::TaskStatus::decl(), + db::models::task::Task::decl(), + db::models::task::TaskWithAttemptStatus::decl(), + db::models::task::CreateTask::decl(), + db::models::task::UpdateTask::decl(), + utils::response::ApiResponse::<()>::decl(), + server::routes::config::UserSystemInfo::decl(), + server::routes::config::Environment::decl(), + server::routes::task_attempts::CreateFollowUpAttempt::decl(), + server::routes::task_attempts::CreateGitHubPrRequest::decl(), + services::services::github_service::GitHubServiceError::decl(), + services::services::config::Config::decl(), + services::services::config::NotificationConfig::decl(), + services::services::config::ThemeMode::decl(), + services::services::config::EditorConfig::decl(), + services::services::config::EditorType::decl(), + services::services::config::GitHubConfig::decl(), + services::services::config::SoundFile::decl(), + services::services::auth::DeviceFlowStartResponse::decl(), + server::routes::auth::DevicePollStatus::decl(), + server::routes::auth::CheckTokenResponse::decl(), + services::services::git::GitBranch::decl(), + services::services::git::BranchStatus::decl(), + utils::diff::WorktreeDiff::decl(), + utils::diff::FileDiff::decl(), + utils::diff::DiffChunk::decl(), + utils::diff::DiffChunkType::decl(), + services::services::github_service::RepositoryInfo::decl(), + executors::executors::BaseCodingAgent::decl(), + executors::command::CommandBuilder::decl(), + executors::command::AgentProfile::decl(), + executors::command::AgentProfiles::decl(), + executors::actions::coding_agent_initial::CodingAgentInitialRequest::decl(), + executors::actions::coding_agent_follow_up::CodingAgentFollowUpRequest::decl(), + server::routes::task_attempts::CreateTaskAttemptBody::decl(), + server::routes::task_attempts::RebaseTaskAttemptRequest::decl(), + db::models::task_attempt::TaskAttempt::decl(), + db::models::execution_process::ExecutionProcess::decl(), + db::models::execution_process::ExecutionProcessSummary::decl(), + db::models::execution_process::ExecutionProcessStatus::decl(), + db::models::execution_process::ExecutionProcessRunReason::decl(), + services::services::events::EventPatch::decl(), + services::services::events::EventPatchInner::decl(), + services::services::events::RecordTypes::decl(), + executors::logs::NormalizedConversation::decl(), + executors::logs::NormalizedEntry::decl(), + executors::logs::NormalizedEntryType::decl(), + executors::logs::ActionType::decl(), + executors::logs::utils::patch::PatchType::decl(), + ]; + + let body = decls + .into_iter() + .map(|d| { + let trimmed = d.trim_start(); + if trimmed.starts_with("export") { + d + } else { + format!("export {trimmed}") + } + }) + .collect::>() + .join("\n\n"); + + format!("{HEADER}\n\n{body}") +} + +fn main() { + let args: Vec = env::args().collect(); + let check_mode = args.iter().any(|arg| arg == "--check"); + + // 1. Make sure ../shared exists + let shared_path = Path::new("shared"); + fs::create_dir_all(shared_path).expect("cannot create shared"); + + println!("Generating TypeScript types…"); + + // 2. Let ts-rs write its per-type files here (handy for debugging) + env::set_var("TS_RS_EXPORT_DIR", shared_path.to_str().unwrap()); + + let generated = generate_types_content(); + let types_path = shared_path.join("types.ts"); + + if check_mode { + // Read the current file + let current = fs::read_to_string(&types_path).unwrap_or_default(); + if current == generated { + println!("✅ shared/types.ts is up to date."); + std::process::exit(0); + } else { + eprintln!("❌ shared/types.ts is not up to date. Please run 'npm run generate-types' and commit the changes."); + std::process::exit(1); + } + } else { + // Write the file as before + fs::write(&types_path, generated).expect("unable to write types.ts"); + println!("✅ TypeScript types generated in shared/"); + } +} diff --git a/backend/src/bin/mcp_task_server.rs b/crates/server/src/bin/mcp_task_server.rs similarity index 95% rename from backend/src/bin/mcp_task_server.rs rename to crates/server/src/bin/mcp_task_server.rs index d5065459..e22c06ab 100644 --- a/backend/src/bin/mcp_task_server.rs +++ b/crates/server/src/bin/mcp_task_server.rs @@ -1,9 +1,10 @@ use std::str::FromStr; use rmcp::{transport::stdio, ServiceExt}; +use server::mcp::task_server::TaskServer; use sqlx::{sqlite::SqliteConnectOptions, SqlitePool}; use tracing_subscriber::{prelude::*, EnvFilter}; -use vibe_kanban::{mcp::task_server::TaskServer, sentry_layer, utils::asset_dir}; +use utils::{assets::asset_dir, sentry::sentry_layer}; fn main() -> anyhow::Result<()> { let environment = if cfg!(debug_assertions) { diff --git a/crates/server/src/error.rs b/crates/server/src/error.rs new file mode 100644 index 00000000..39bc3efa --- /dev/null +++ b/crates/server/src/error.rs @@ -0,0 +1,70 @@ +use axum::{ + http::StatusCode, + response::{IntoResponse, Response}, + Json, +}; +use db::models::{project::ProjectError, task_attempt::TaskAttemptError}; +use deployment::DeploymentError; +use executors::executors::ExecutorError; +use git2::Error as Git2Error; +use services::services::{ + auth::AuthError, config::ConfigError, container::ContainerError, git::GitServiceError, + github_service::GitHubServiceError, worktree_manager::WorktreeError, +}; +use thiserror::Error; +use utils::response::ApiResponse; + +#[derive(Debug, Error, ts_rs::TS)] +#[ts(type = "string")] +pub enum ApiError { + #[error(transparent)] + Project(#[from] ProjectError), + #[error(transparent)] + TaskAttempt(#[from] TaskAttemptError), + #[error(transparent)] + GitService(#[from] GitServiceError), + #[error(transparent)] + GitHubService(#[from] GitHubServiceError), + #[error(transparent)] + Auth(#[from] AuthError), + #[error(transparent)] + Deployment(#[from] DeploymentError), + #[error(transparent)] + Container(#[from] ContainerError), + #[error(transparent)] + Executor(#[from] ExecutorError), + #[error(transparent)] + Database(#[from] sqlx::Error), + #[error(transparent)] + Worktree(#[from] WorktreeError), + #[error(transparent)] + Config(#[from] ConfigError), +} + +impl From for ApiError { + fn from(err: Git2Error) -> Self { + ApiError::GitService(GitServiceError::from(err)) + } +} + +impl IntoResponse for ApiError { + fn into_response(self) -> Response { + let (status_code, error_type) = match &self { + ApiError::Project(_) => (StatusCode::INTERNAL_SERVER_ERROR, "ProjectError"), + ApiError::TaskAttempt(_) => (StatusCode::INTERNAL_SERVER_ERROR, "TaskAttemptError"), + ApiError::GitService(_) => (StatusCode::INTERNAL_SERVER_ERROR, "GitServiceError"), + ApiError::GitHubService(_) => (StatusCode::INTERNAL_SERVER_ERROR, "GitHubServiceError"), + ApiError::Auth(_) => (StatusCode::INTERNAL_SERVER_ERROR, "AuthError"), + ApiError::Deployment(_) => (StatusCode::INTERNAL_SERVER_ERROR, "DeploymentError"), + ApiError::Container(_) => (StatusCode::INTERNAL_SERVER_ERROR, "ContainerError"), + ApiError::Executor(_) => (StatusCode::INTERNAL_SERVER_ERROR, "ExecutorError"), + ApiError::Database(_) => (StatusCode::INTERNAL_SERVER_ERROR, "DatabaseError"), + ApiError::Worktree(_) => (StatusCode::INTERNAL_SERVER_ERROR, "WorktreeError"), + ApiError::Config(_) => (StatusCode::INTERNAL_SERVER_ERROR, "ConfigError"), + }; + + let error_message = format!("{}: {}", error_type, self); + let response = ApiResponse::<()>::error(&error_message); + (status_code, Json(response)).into_response() + } +} diff --git a/crates/server/src/lib.rs b/crates/server/src/lib.rs new file mode 100644 index 00000000..2bd5f5c6 --- /dev/null +++ b/crates/server/src/lib.rs @@ -0,0 +1,9 @@ +pub mod error; +pub mod mcp; +pub mod middleware; +pub mod routes; + +// #[cfg(feature = "cloud")] +// type DeploymentImpl = vibe_kanban_cloud::deployment::CloudDeployment; +// #[cfg(not(feature = "cloud"))] +pub type DeploymentImpl = local_deployment::LocalDeployment; diff --git a/crates/server/src/main.rs b/crates/server/src/main.rs new file mode 100644 index 00000000..1af6d787 --- /dev/null +++ b/crates/server/src/main.rs @@ -0,0 +1,76 @@ +use anyhow::{self, Error as AnyhowError}; +use deployment::{Deployment, DeploymentError}; +use server::{routes, DeploymentImpl}; +use sqlx::Error as SqlxError; +use strip_ansi_escapes::strip; +use thiserror::Error; +use tracing_subscriber::{prelude::*, EnvFilter}; +use utils::{assets::asset_dir, browser::open_browser, sentry::sentry_layer}; + +#[derive(Debug, Error)] +pub enum VibeKanbanError { + #[error(transparent)] + Io(#[from] std::io::Error), + #[error(transparent)] + Sqlx(#[from] SqlxError), + #[error(transparent)] + Deployment(#[from] DeploymentError), + #[error(transparent)] + Other(#[from] AnyhowError), +} + +#[tokio::main] +async fn main() -> Result<(), VibeKanbanError> { + let log_level = std::env::var("RUST_LOG").unwrap_or_else(|_| "info".to_string()); + let filter_string = format!( + "warn,server={level},services={level},db={level},executors={level},deployment={level},local_deployment={level},utils={level}", + level = log_level + ); + let env_filter = EnvFilter::try_new(filter_string).expect("Failed to create tracing filter"); + tracing_subscriber::registry() + .with(tracing_subscriber::fmt::layer().with_filter(env_filter)) + .with(sentry_layer()) + .init(); + + // Create asset directory if it doesn't exist + if !asset_dir().exists() { + std::fs::create_dir_all(asset_dir())?; + } + + let deployment = DeploymentImpl::new().await?; + deployment.update_sentry_scope().await?; + deployment.cleanup_orphan_executions().await?; + deployment.spawn_pr_monitor_service().await; + + let app_router = routes::router(deployment); + + let port = std::env::var("BACKEND_PORT") + .or_else(|_| std::env::var("PORT")) + .ok() + .and_then(|s| { + // remove any ANSI codes, then turn into String + let cleaned = + String::from_utf8(strip(s.as_bytes())).expect("UTF-8 after stripping ANSI"); + cleaned.trim().parse::().ok() + }) + .unwrap_or_else(|| { + tracing::info!("No PORT environment variable set, using port 0 for auto-assignment"); + 0 + }); // Use 0 to find free port if no specific port provided + + let host = std::env::var("HOST").unwrap_or_else(|_| "127.0.0.1".to_string()); + let listener = tokio::net::TcpListener::bind(format!("{host}:{port}")).await?; + let actual_port = listener.local_addr()?.port(); // get → 53427 (example) + + tracing::info!("Server running on http://{host}:{actual_port}"); + + if !cfg!(debug_assertions) { + tracing::info!("Opening browser..."); + if let Err(e) = open_browser(&format!("http://127.0.0.1:{actual_port}")).await { + tracing::warn!("Failed to open browser automatically: {}. Please open http://127.0.0.1:{} manually.", e, actual_port); + } + } + + axum::serve(listener, app_router).await?; + Ok(()) +} diff --git a/backend/src/mcp/mod.rs b/crates/server/src/mcp/mod.rs similarity index 100% rename from backend/src/mcp/mod.rs rename to crates/server/src/mcp/mod.rs diff --git a/backend/src/mcp/task_server.rs b/crates/server/src/mcp/task_server.rs similarity index 96% rename from backend/src/mcp/task_server.rs rename to crates/server/src/mcp/task_server.rs index b7315f32..5d347efc 100644 --- a/backend/src/mcp/task_server.rs +++ b/crates/server/src/mcp/task_server.rs @@ -1,5 +1,9 @@ -use std::future::Future; +use std::{future::Future, path::PathBuf}; +use db::models::{ + project::Project, + task::{CreateTask, Task, TaskStatus}, +}; use rmcp::{ handler::server::tool::{Parameters, ToolRouter}, model::{ @@ -12,11 +16,6 @@ use serde_json; use sqlx::SqlitePool; use uuid::Uuid; -use crate::models::{ - project::Project, - task::{CreateTask, Task, TaskStatus}, -}; - #[derive(Debug, Deserialize, schemars::JsonSchema)] pub struct CreateTaskRequest { #[schemars(description = "The ID of the project to create the task in. This is required!")] @@ -41,13 +40,13 @@ pub struct ProjectSummary { #[schemars(description = "The name of the project")] pub name: String, #[schemars(description = "The path to the git repository")] - pub git_repo_path: String, + pub git_repo_path: PathBuf, #[schemars(description = "Optional setup script for the project")] pub setup_script: Option, + #[schemars(description = "Optional cleanup script for the project")] + pub cleanup_script: Option, #[schemars(description = "Optional development script for the project")] pub dev_script: Option, - #[schemars(description = "Current git branch (if available)")] - pub current_branch: Option, #[schemars(description = "When the project was created")] pub created_at: String, #[schemars(description = "When the project was last updated")] @@ -307,18 +306,15 @@ impl TaskServer { let count = projects.len(); let project_summaries: Vec = projects .into_iter() - .map(|project| { - let project_with_branch = project.with_branch_info(); - ProjectSummary { - id: project_with_branch.id.to_string(), - name: project_with_branch.name, - git_repo_path: project_with_branch.git_repo_path, - setup_script: project_with_branch.setup_script, - dev_script: project_with_branch.dev_script, - current_branch: project_with_branch.current_branch, - created_at: project_with_branch.created_at.to_rfc3339(), - updated_at: project_with_branch.updated_at.to_rfc3339(), - } + .map(|project| ProjectSummary { + id: project.id.to_string(), + name: project.name, + git_repo_path: project.git_repo_path, + setup_script: project.setup_script, + cleanup_script: project.cleanup_script, + dev_script: project.dev_script, + created_at: project.created_at.to_rfc3339(), + updated_at: project.updated_at.to_rfc3339(), }) .collect(); @@ -662,7 +658,7 @@ impl TaskServer { match Task::exists(&self.pool, task_uuid, project_uuid).await { Ok(true) => { // Delete the task - match Task::delete(&self.pool, task_uuid, project_uuid).await { + match Task::delete(&self.pool, task_uuid).await { Ok(rows_affected) => { if rows_affected > 0 { let response = DeleteTaskResponse { diff --git a/backend/src/middleware/mod.rs b/crates/server/src/middleware/mod.rs similarity index 100% rename from backend/src/middleware/mod.rs rename to crates/server/src/middleware/mod.rs diff --git a/crates/server/src/middleware/model_loaders.rs b/crates/server/src/middleware/model_loaders.rs new file mode 100644 index 00000000..ac9f72ae --- /dev/null +++ b/crates/server/src/middleware/model_loaders.rs @@ -0,0 +1,205 @@ +use axum::{ + extract::{Path, Request, State}, + http::StatusCode, + middleware::Next, + response::Response, +}; +use db::models::{ + execution_process::ExecutionProcess, project::Project, task::Task, task_attempt::TaskAttempt, + task_template::TaskTemplate, +}; +use deployment::Deployment; +use uuid::Uuid; + +use crate::DeploymentImpl; + +pub async fn load_project_middleware( + State(deployment): State, + Path(project_id): Path, + request: Request, + next: Next, +) -> Result { + // Load the project from the database + let project = match Project::find_by_id(&deployment.db().pool, project_id).await { + Ok(Some(project)) => project, + Ok(None) => { + tracing::warn!("Project {} not found", project_id); + return Err(StatusCode::NOT_FOUND); + } + Err(e) => { + tracing::error!("Failed to fetch project {}: {}", project_id, e); + return Err(StatusCode::INTERNAL_SERVER_ERROR); + } + }; + + // Insert the project as an extension + let mut request = request; + request.extensions_mut().insert(project); + + // Continue with the next middleware/handler + Ok(next.run(request).await) +} + +pub async fn load_task_middleware( + State(deployment): State, + Path(task_id): Path, + request: Request, + next: Next, +) -> Result { + // Load the task and validate it belongs to the project + let task = match Task::find_by_id(&deployment.db().pool, task_id).await { + Ok(Some(task)) => task, + Ok(None) => { + tracing::warn!("Task {} not found", task_id); + return Err(StatusCode::NOT_FOUND); + } + Err(e) => { + tracing::error!("Failed to fetch task {}: {}", task_id, e); + return Err(StatusCode::INTERNAL_SERVER_ERROR); + } + }; + + // Insert both models as extensions + let mut request = request; + request.extensions_mut().insert(task); + + // Continue with the next middleware/handler + Ok(next.run(request).await) +} + +pub async fn load_task_attempt_middleware( + State(deployment): State, + Path(task_attempt_id): Path, + mut request: Request, + next: Next, +) -> Result { + // Load the TaskAttempt from the database + let attempt = match TaskAttempt::find_by_id(&deployment.db().pool, task_attempt_id).await { + Ok(Some(a)) => a, + Ok(None) => { + tracing::warn!("TaskAttempt {} not found", task_attempt_id); + return Err(StatusCode::NOT_FOUND); + } + Err(e) => { + tracing::error!("Failed to fetch TaskAttempt {}: {}", task_attempt_id, e); + return Err(StatusCode::INTERNAL_SERVER_ERROR); + } + }; + + // Insert the attempt into extensions + request.extensions_mut().insert(attempt); + + // Continue on + Ok(next.run(request).await) +} + +pub async fn load_execution_process_middleware( + State(deployment): State, + Path(process_id): Path, + mut request: Request, + next: Next, +) -> Result { + // Load the execution process from the database + let execution_process = + match ExecutionProcess::find_by_id(&deployment.db().pool, process_id).await { + Ok(Some(process)) => process, + Ok(None) => { + tracing::warn!("ExecutionProcess {} not found", process_id); + return Err(StatusCode::NOT_FOUND); + } + Err(e) => { + tracing::error!("Failed to fetch execution process {}: {}", process_id, e); + return Err(StatusCode::INTERNAL_SERVER_ERROR); + } + }; + + // Inject the execution process into the request + request.extensions_mut().insert(execution_process); + + // Continue to the next middleware/handler + Ok(next.run(request).await) +} + +// TODO: fix +// Middleware that loads and injects Project, Task, TaskAttempt, and ExecutionProcess +// based on the path parameters: project_id, task_id, attempt_id, process_id +// pub async fn load_execution_process_with_context_middleware( +// State(deployment): State, +// Path((project_id, task_id, attempt_id, process_id)): Path<(Uuid, Uuid, Uuid, Uuid)>, +// request: axum::extract::Request, +// next: Next, +// ) -> Result { +// // Load the task attempt context first +// let context = match TaskAttempt::load_context( +// &deployment.db().pool, +// attempt_id, +// task_id, +// project_id, +// ) +// .await +// { +// Ok(context) => context, +// Err(e) => { +// tracing::error!( +// "Failed to load context for attempt {} in task {} in project {}: {}", +// attempt_id, +// task_id, +// project_id, +// e +// ); +// return Err(StatusCode::NOT_FOUND); +// } +// }; + +// // Load the execution process +// let execution_process = match ExecutionProcess::find_by_id(&deployment.db().pool, process_id).await +// { +// Ok(Some(process)) => process, +// Ok(None) => { +// tracing::warn!("ExecutionProcess {} not found", process_id); +// return Err(StatusCode::NOT_FOUND); +// } +// Err(e) => { +// tracing::error!("Failed to fetch execution process {}: {}", process_id, e); +// return Err(StatusCode::INTERNAL_SERVER_ERROR); +// } +// }; + +// // Insert all models as extensions +// let mut request = request; +// request.extensions_mut().insert(context.project); +// request.extensions_mut().insert(context.task); +// request.extensions_mut().insert(context.task_attempt); +// request.extensions_mut().insert(execution_process); + +// // Continue with the next middleware/handler +// Ok(next.run(request).await) +// } + +// Middleware that loads and injects TaskTemplate based on the template_id path parameter +pub async fn load_task_template_middleware( + State(deployment): State, + Path(template_id): Path, + request: axum::extract::Request, + next: Next, +) -> Result { + // Load the task template from the database + let task_template = match TaskTemplate::find_by_id(&deployment.db().pool, template_id).await { + Ok(Some(template)) => template, + Ok(None) => { + tracing::warn!("TaskTemplate {} not found", template_id); + return Err(StatusCode::NOT_FOUND); + } + Err(e) => { + tracing::error!("Failed to fetch task template {}: {}", template_id, e); + return Err(StatusCode::INTERNAL_SERVER_ERROR); + } + }; + + // Insert the task template as an extension + let mut request = request; + request.extensions_mut().insert(task_template); + + // Continue with the next middleware/handler + Ok(next.run(request).await) +} diff --git a/crates/server/src/routes/auth.rs b/crates/server/src/routes/auth.rs new file mode 100644 index 00000000..77e028cc --- /dev/null +++ b/crates/server/src/routes/auth.rs @@ -0,0 +1,128 @@ +use axum::{ + extract::{Request, State}, + http::StatusCode, + middleware::{from_fn_with_state, Next}, + response::{Json as ResponseJson, Response}, + routing::{get, post}, + Router, +}; +use deployment::Deployment; +use octocrab::auth::Continue; +use serde::{Deserialize, Serialize}; +use services::services::{ + auth::{AuthError, DeviceFlowStartResponse}, + config::save_config_to_file, + github_service::{GitHubService, GitHubServiceError}, +}; +use utils::response::ApiResponse; + +use crate::{error::ApiError, DeploymentImpl}; + +pub fn router(deployment: &DeploymentImpl) -> Router { + Router::new() + .route("/auth/github/device/start", post(device_start)) + .route("/auth/github/device/poll", post(device_poll)) + .route("/auth/github/check", get(github_check_token)) + .layer(from_fn_with_state( + deployment.clone(), + sentry_user_context_middleware, + )) +} + +/// POST /auth/github/device/start +async fn device_start( + State(deployment): State, +) -> Result>, ApiError> { + let device_start_response = deployment.auth().device_start().await?; + Ok(ResponseJson(ApiResponse::success(device_start_response))) +} + +#[derive(Serialize, Deserialize, ts_rs::TS)] +#[serde(rename_all = "SCREAMING_SNAKE_CASE")] +#[ts(use_ts_enum)] +pub enum DevicePollStatus { + SlowDown, + AuthorizationPending, + Success, +} + +#[derive(Serialize, Deserialize, ts_rs::TS)] +#[serde(rename_all = "SCREAMING_SNAKE_CASE")] +#[ts(use_ts_enum)] +pub enum CheckTokenResponse { + Valid, + Invalid, +} + +/// POST /auth/github/device/poll +async fn device_poll( + State(deployment): State, +) -> Result>, ApiError> { + let user_info = match deployment.auth().device_poll().await { + Ok(info) => info, + Err(AuthError::Pending(Continue::SlowDown)) => { + return Ok(ResponseJson(ApiResponse::success( + DevicePollStatus::SlowDown, + ))); + } + Err(AuthError::Pending(Continue::AuthorizationPending)) => { + return Ok(ResponseJson(ApiResponse::success( + DevicePollStatus::AuthorizationPending, + ))); + } + Err(e) => return Err(e.into()), + }; + // Save to config + { + let config_path = utils::assets::config_path(); + let mut config = deployment.config().write().await; + config.github.username = Some(user_info.username.clone()); + config.github.primary_email = user_info.primary_email.clone(); + config.github.oauth_token = Some(user_info.token.to_string()); + config.github_login_acknowledged = true; // Also acknowledge the GitHub login step + save_config_to_file(&config.clone(), &config_path).await?; + } + let _ = deployment.update_sentry_scope().await; + let props = serde_json::json!({ + "username": user_info.username, + "email": user_info.primary_email, + }); + deployment + .track_if_analytics_allowed("$identify", props) + .await; + Ok(ResponseJson(ApiResponse::success( + DevicePollStatus::Success, + ))) +} + +/// GET /auth/github/check +async fn github_check_token( + State(deployment): State, +) -> Result>, ApiError> { + let gh_config = deployment.config().read().await.github.clone(); + let Some(token) = gh_config.token() else { + return Ok(ResponseJson(ApiResponse::success( + CheckTokenResponse::Invalid, + ))); + }; + let gh = GitHubService::new(&token)?; + match gh.check_token().await { + Ok(()) => Ok(ResponseJson(ApiResponse::success( + CheckTokenResponse::Valid, + ))), + Err(GitHubServiceError::TokenInvalid) => Ok(ResponseJson(ApiResponse::success( + CheckTokenResponse::Invalid, + ))), + Err(e) => Err(e.into()), + } +} + +/// Middleware to set Sentry user context for every request +pub async fn sentry_user_context_middleware( + State(deployment): State, + req: Request, + next: Next, +) -> Result { + let _ = deployment.update_sentry_scope().await; + Ok(next.run(req).await) +} diff --git a/crates/server/src/routes/config.rs b/crates/server/src/routes/config.rs new file mode 100644 index 00000000..4a553ea5 --- /dev/null +++ b/crates/server/src/routes/config.rs @@ -0,0 +1,339 @@ +use std::collections::HashMap; + +use axum::{ + body::Body, + extract::{Path, Query, State}, + http, + response::{Json as ResponseJson, Response}, + routing::{get, put}, + Json, Router, +}; +use deployment::{Deployment, DeploymentError}; +use executors::{command::AgentProfiles, executors::BaseCodingAgent}; +use serde::{Deserialize, Serialize}; +use serde_json::Value; +use services::services::config::{save_config_to_file, Config, SoundFile}; +use tokio::fs; +use ts_rs::TS; +use utils::{assets::config_path, response::ApiResponse}; + +use crate::{error::ApiError, DeploymentImpl}; + +pub fn router() -> Router { + Router::new() + .route("/info", get(get_user_system_info)) + .route("/config", put(update_config)) + .route("/sounds/{sound}", get(get_sound)) + .route("/mcp-config", get(get_mcp_servers).post(update_mcp_servers)) +} + +#[derive(Debug, Serialize, Deserialize, TS)] +pub struct Environment { + pub os_type: String, + pub os_version: String, + pub os_architecture: String, + pub bitness: String, +} + +impl Default for Environment { + fn default() -> Self { + Self::new() + } +} + +impl Environment { + pub fn new() -> Self { + let info = os_info::get(); + Environment { + os_type: info.os_type().to_string(), + os_version: info.version().to_string(), + os_architecture: info.architecture().unwrap_or("unknown").to_string(), + bitness: info.bitness().to_string(), + } + } +} + +#[derive(Debug, Serialize, Deserialize, TS)] +pub struct UserSystemInfo { + pub config: Config, + #[serde(flatten)] + pub profiles: AgentProfiles, + pub environment: Environment, +} + +// TODO: update frontend, BE schema has changed, this replaces GET /config and /config/constants +#[axum::debug_handler] +async fn get_user_system_info( + State(deployment): State, +) -> ResponseJson> { + let config = deployment.config().read().await; + + let user_system_info = UserSystemInfo { + config: config.clone(), + profiles: AgentProfiles::get_cached().clone(), + environment: Environment::new(), + }; + + ResponseJson(ApiResponse::success(user_system_info)) +} + +async fn update_config( + State(deployment): State, + Json(new_config): Json, +) -> ResponseJson> { + let config_path = config_path(); + + match save_config_to_file(&new_config, &config_path).await { + Ok(_) => { + let mut config = deployment.config().write().await; + *config = new_config.clone(); + drop(config); + + ResponseJson(ApiResponse::success(new_config)) + } + Err(e) => ResponseJson(ApiResponse::error(&format!("Failed to save config: {}", e))), + } +} + +async fn get_sound(Path(sound): Path) -> Result { + let sound = sound.serve().await.map_err(DeploymentError::Other)?; + let response = Response::builder() + .status(http::StatusCode::OK) + .header( + http::header::CONTENT_TYPE, + http::HeaderValue::from_static("audio/wav"), + ) + .body(Body::from(sound.data.into_owned())) + .unwrap(); + Ok(response) +} + +#[derive(Debug, Deserialize)] +struct McpServerQuery { + base_coding_agent: Option, +} + +async fn get_mcp_servers( + State(deployment): State, + Query(query): Query, +) -> Result>, ApiError> { + let agent = match query.base_coding_agent { + Some(executor) => executor, + None => { + let config = deployment.config().read().await; + let profile = executors::command::AgentProfiles::get_cached() + .get_profile(&config.profile) + .expect("Corrupted config"); + profile.agent + } + }; + + if !agent.supports_mcp() { + return Ok(ResponseJson(ApiResponse::error( + "This executor does not support MCP servers", + ))); + } + + // Get the config file path for this executor + let config_path = match agent.config_path() { + Some(path) => path, + None => { + return Ok(ResponseJson(ApiResponse::error( + "Could not determine config file path", + ))); + } + }; + + match read_mcp_servers_from_config(&config_path, &agent).await { + Ok(servers) => { + let response_data = serde_json::json!({ + "servers": servers, + "config_path": config_path.to_string_lossy().to_string() + }); + Ok(ResponseJson(ApiResponse::success(response_data))) + } + Err(e) => Ok(ResponseJson(ApiResponse::error(&format!( + "Failed to read MCP servers: {}", + e + )))), + } +} + +async fn update_mcp_servers( + State(deployment): State, + Query(query): Query, + Json(new_servers): Json>, +) -> Result>, ApiError> { + let agent = match query.base_coding_agent { + Some(executor) => executor, + None => { + let config = deployment.config().read().await; + let profile = executors::command::AgentProfiles::get_cached() + .get_profile(&config.profile) + .expect("Corrupted config"); + profile.agent + } + }; + + if !agent.supports_mcp() { + return Ok(ResponseJson(ApiResponse::error( + "This executor does not support MCP servers", + ))); + } + + // Get the config file path for this executor + let config_path = match agent.config_path() { + Some(path) => path, + None => { + return Ok(ResponseJson(ApiResponse::error( + "Could not determine config file path", + ))); + } + }; + + match update_mcp_servers_in_config(&config_path, &agent, new_servers).await { + Ok(message) => Ok(ResponseJson(ApiResponse::success(message))), + Err(e) => Ok(ResponseJson(ApiResponse::error(&format!( + "Failed to update MCP servers: {}", + e + )))), + } +} + +async fn update_mcp_servers_in_config( + config_path: &std::path::Path, + agent: &BaseCodingAgent, + new_servers: HashMap, +) -> Result> { + // Ensure parent directory exists + if let Some(parent) = config_path.parent() { + fs::create_dir_all(parent).await?; + } + + // Read existing config file or create empty object if it doesn't exist + let file_content = fs::read_to_string(config_path) + .await + .unwrap_or_else(|_| "{}".to_string()); + let mut config: Value = serde_json::from_str(&file_content)?; + + let mcp_path = agent.mcp_attribute_path().unwrap(); + + // Get the current server count for comparison + let old_servers = get_mcp_servers_from_config_path(agent, &config, &mcp_path).len(); + + // Set the MCP servers using the correct attribute path + set_mcp_servers_in_config_path(agent, &mut config, &mcp_path, &new_servers)?; + + // Write the updated config back to file + let updated_content = serde_json::to_string_pretty(&config)?; + fs::write(config_path, updated_content).await?; + + let new_count = new_servers.len(); + let message = match (old_servers, new_count) { + (0, 0) => "No MCP servers configured".to_string(), + (0, n) => format!("Added {} MCP server(s)", n), + (old, new) if old == new => format!("Updated MCP server configuration ({} server(s))", new), + (old, new) => format!( + "Updated MCP server configuration (was {}, now {})", + old, new + ), + }; + + Ok(message) +} + +async fn read_mcp_servers_from_config( + config_path: &std::path::Path, + agent: &BaseCodingAgent, +) -> Result, Box> { + let file_content = fs::read_to_string(config_path) + .await + .unwrap_or_else(|_| "{}".to_string()); + let raw_config: Value = serde_json::from_str(&file_content)?; + let mcp_path = agent.mcp_attribute_path().unwrap(); + let servers = get_mcp_servers_from_config_path(agent, &raw_config, &mcp_path); + Ok(servers) +} + +/// Helper function to get MCP servers from config using a path +fn get_mcp_servers_from_config_path( + agent: &BaseCodingAgent, + raw_config: &Value, + path: &[&str], +) -> HashMap { + // Special handling for AMP - use flat key structure + let current = if matches!(agent, BaseCodingAgent::Amp) { + let flat_key = format!("{}.{}", path[0], path[1]); + let current = match raw_config.get(&flat_key) { + Some(val) => val, + None => return HashMap::new(), + }; + current + } else { + let mut current = raw_config; + for &part in path { + current = match current.get(part) { + Some(val) => val, + None => return HashMap::new(), + }; + } + current + }; + + // Extract the servers object + match current.as_object() { + Some(servers) => servers + .iter() + .map(|(k, v)| (k.clone(), v.clone())) + .collect(), + None => HashMap::new(), + } +} + +/// Helper function to set MCP servers in config using a path +fn set_mcp_servers_in_config_path( + agent: &BaseCodingAgent, + raw_config: &mut Value, + path: &[&str], + servers: &HashMap, +) -> Result<(), Box> { + // Ensure config is an object + if !raw_config.is_object() { + *raw_config = serde_json::json!({}); + } + + // Special handling for AMP - use flat key structure + if matches!(agent, BaseCodingAgent::Amp) { + let flat_key = format!("{}.{}", path[0], path[1]); + raw_config + .as_object_mut() + .unwrap() + .insert(flat_key, serde_json::to_value(servers)?); + return Ok(()); + } + + let mut current = raw_config; + + // Navigate/create the nested structure (all parts except the last) + for &part in &path[..path.len() - 1] { + if current.get(part).is_none() { + current + .as_object_mut() + .unwrap() + .insert(part.to_string(), serde_json::json!({})); + } + current = current.get_mut(part).unwrap(); + if !current.is_object() { + *current = serde_json::json!({}); + } + } + + // Set the final attribute + let final_attr = path.last().unwrap(); + current + .as_object_mut() + .unwrap() + .insert(final_attr.to_string(), serde_json::to_value(servers)?); + + Ok(()) +} diff --git a/crates/server/src/routes/events.rs b/crates/server/src/routes/events.rs new file mode 100644 index 00000000..6e74fc64 --- /dev/null +++ b/crates/server/src/routes/events.rs @@ -0,0 +1,28 @@ +use axum::{ + extract::State, + response::{ + sse::{Event, KeepAlive}, + Sse, + }, + routing::get, + BoxError, Router, +}; +use deployment::Deployment; +use futures_util::TryStreamExt; + +use crate::DeploymentImpl; + +pub async fn events( + State(deployment): State, +) -> Result>>, axum::http::StatusCode> +{ + // Ask the container service for a combined "history + live" stream + let stream = deployment.stream_events().await; + Ok(Sse::new(stream.map_err(|e| -> BoxError { e.into() })).keep_alive(KeepAlive::default())) +} + +pub fn router(_: &DeploymentImpl) -> Router { + let events_router = Router::new().route("/", get(events)); + + Router::new().nest("/events", events_router) +} diff --git a/crates/server/src/routes/execution_processes.rs b/crates/server/src/routes/execution_processes.rs new file mode 100644 index 00000000..a5a91812 --- /dev/null +++ b/crates/server/src/routes/execution_processes.rs @@ -0,0 +1,102 @@ +use axum::{ + extract::{Path, Query, State}, + middleware::from_fn_with_state, + response::{ + sse::{Event, KeepAlive}, + Json as ResponseJson, Sse, + }, + routing::{get, post}, + BoxError, Extension, Router, +}; +use db::models::execution_process::ExecutionProcess; +use deployment::Deployment; +use futures_util::TryStreamExt; +use serde::Deserialize; +use services::services::container::ContainerService; +use utils::response::ApiResponse; +use uuid::Uuid; + +use crate::{error::ApiError, middleware::load_execution_process_middleware, DeploymentImpl}; + +#[derive(Debug, Deserialize)] +pub struct ExecutionProcessQuery { + pub task_attempt_id: Uuid, +} + +pub async fn get_execution_processes( + State(deployment): State, + Query(query): Query, +) -> Result>>, ApiError> { + let pool = &deployment.db().pool; + let execution_processes = + ExecutionProcess::find_by_task_attempt_id(pool, query.task_attempt_id).await?; + + Ok(ResponseJson(ApiResponse::success(execution_processes))) +} + +pub async fn get_execution_process_by_id( + Extension(execution_process): Extension, + State(_deployment): State, +) -> Result>, ApiError> { + Ok(ResponseJson(ApiResponse::success(execution_process))) +} + +pub async fn stream_raw_logs( + State(deployment): State, + Path(exec_id): Path, +) -> Result>>, axum::http::StatusCode> +{ + // Ask the container service for a combined "history + live" stream + let stream = deployment + .container() + .stream_raw_logs(&exec_id) + .await + .ok_or(axum::http::StatusCode::NOT_FOUND)?; + + Ok(Sse::new(stream.map_err(|e| -> BoxError { e.into() })).keep_alive(KeepAlive::default())) +} + +pub async fn stream_normalized_logs( + State(deployment): State, + Path(exec_id): Path, +) -> Result>>, axum::http::StatusCode> +{ + // Ask the container service for a combined "history + live" stream + let stream = deployment + .container() + .stream_normalized_logs(&exec_id) + .await + .ok_or(axum::http::StatusCode::NOT_FOUND)?; + + Ok(Sse::new(stream.map_err(|e| -> BoxError { e.into() })).keep_alive(KeepAlive::default())) +} + +pub async fn stop_execution_process( + Extension(execution_process): Extension, + State(deployment): State, +) -> Result>, ApiError> { + deployment + .container() + .stop_execution(&execution_process) + .await?; + + Ok(ResponseJson(ApiResponse::success(()))) +} + +pub fn router(deployment: &DeploymentImpl) -> Router { + let task_attempt_id_router = Router::new() + .route("/", get(get_execution_process_by_id)) + .route("/stop", post(stop_execution_process)) + .route("/raw-logs", get(stream_raw_logs)) + .route("/normalized-logs", get(stream_normalized_logs)) + .layer(from_fn_with_state( + deployment.clone(), + load_execution_process_middleware, + )); + + let task_attempts_router = Router::new() + .route("/", get(get_execution_processes)) + .nest("/{id}", task_attempt_id_router); + + Router::new().nest("/execution-processes", task_attempts_router) +} diff --git a/crates/server/src/routes/filesystem.rs b/crates/server/src/routes/filesystem.rs new file mode 100644 index 00000000..572191c0 --- /dev/null +++ b/crates/server/src/routes/filesystem.rs @@ -0,0 +1,71 @@ +use axum::{ + extract::{Query, State}, + response::Json as ResponseJson, + routing::get, + Router, +}; +use deployment::Deployment; +use serde::Deserialize; +use services::services::filesystem::{DirectoryEntry, DirectoryListResponse, FilesystemError}; +use utils::response::ApiResponse; + +use crate::{error::ApiError, DeploymentImpl}; + +#[derive(Debug, Deserialize)] +pub struct ListDirectoryQuery { + path: Option, +} + +pub async fn list_directory( + State(deployment): State, + Query(query): Query, +) -> Result>, ApiError> { + match deployment.filesystem().list_directory(query.path).await { + Ok(response) => Ok(ResponseJson(ApiResponse::success(response))), + Err(FilesystemError::DirectoryDoesNotExist) => { + Ok(ResponseJson(ApiResponse::error("Directory does not exist"))) + } + Err(FilesystemError::PathIsNotDirectory) => { + Ok(ResponseJson(ApiResponse::error("Path is not a directory"))) + } + Err(FilesystemError::Io(e)) => { + tracing::error!("Failed to read directory: {}", e); + Ok(ResponseJson(ApiResponse::error(&format!( + "Failed to read directory: {}", + e + )))) + } + } +} + +pub async fn list_git_repos( + State(deployment): State, + Query(query): Query, +) -> Result>>, ApiError> { + match deployment + .filesystem() + .list_git_repos(query.path, Some(4)) + .await + { + Ok(response) => Ok(ResponseJson(ApiResponse::success(response))), + Err(FilesystemError::DirectoryDoesNotExist) => { + Ok(ResponseJson(ApiResponse::error("Directory does not exist"))) + } + Err(FilesystemError::PathIsNotDirectory) => { + Ok(ResponseJson(ApiResponse::error("Path is not a directory"))) + } + Err(FilesystemError::Io(e)) => { + tracing::error!("Failed to read directory: {}", e); + Ok(ResponseJson(ApiResponse::error(&format!( + "Failed to read directory: {}", + e + )))) + } + } +} + +pub fn router() -> Router { + Router::new() + .route("/filesystem/directory", get(list_directory)) + .route("/filesystem/git-repos", get(list_git_repos)) +} diff --git a/crates/server/src/routes/frontend.rs b/crates/server/src/routes/frontend.rs new file mode 100644 index 00000000..a5f019cc --- /dev/null +++ b/crates/server/src/routes/frontend.rs @@ -0,0 +1,54 @@ +use axum::{ + body::Body, + http::HeaderValue, + response::{IntoResponse, Response}, +}; +use reqwest::{header, StatusCode}; +use rust_embed::RustEmbed; + +#[derive(RustEmbed)] +#[folder = "../../frontend/dist"] +pub struct Assets; + +pub async fn serve_frontend(uri: axum::extract::Path) -> impl IntoResponse { + let path = uri.trim_start_matches('/'); + serve_file(path).await +} + +pub async fn serve_frontend_root() -> impl IntoResponse { + serve_file("index.html").await +} + +async fn serve_file(path: &str) -> impl IntoResponse { + let file = Assets::get(path); + + match file { + Some(content) => { + let mime = mime_guess::from_path(path).first_or_octet_stream(); + + Response::builder() + .status(StatusCode::OK) + .header( + header::CONTENT_TYPE, + HeaderValue::from_str(mime.as_ref()).unwrap(), + ) + .body(Body::from(content.data.into_owned())) + .unwrap() + } + None => { + // For SPA routing, serve index.html for unknown routes + if let Some(index) = Assets::get("index.html") { + Response::builder() + .status(StatusCode::OK) + .header(header::CONTENT_TYPE, HeaderValue::from_static("text/html")) + .body(Body::from(index.data.into_owned())) + .unwrap() + } else { + Response::builder() + .status(StatusCode::NOT_FOUND) + .body(Body::from("404 Not Found")) + .unwrap() + } + } + } +} diff --git a/backend/src/routes/github.rs b/crates/server/src/routes/github.rs similarity index 94% rename from backend/src/routes/github.rs rename to crates/server/src/routes/github.rs index 014fcb0d..5e8fc26e 100644 --- a/backend/src/routes/github.rs +++ b/crates/server/src/routes/github.rs @@ -1,3 +1,5 @@ +#![cfg(feature = "cloud")] + use axum::{ extract::{Query, State}, http::StatusCode, @@ -5,12 +7,14 @@ use axum::{ routing::{get, post}, Json, Router, }; +use serde::Deserialize; +use ts_rs::TS; use uuid::Uuid; use crate::{ app_state::AppState, models::{ - project::{CreateProject, CreateProjectFromGitHub, Project}, + project::{CreateProject, Project}, ApiResponse, }, services::{ @@ -20,6 +24,16 @@ use crate::{ }, }; +#[derive(Debug, Deserialize, TS)] +pub struct CreateProjectFromGitHub { + pub repository_id: i64, + pub name: String, + pub clone_url: String, + pub setup_script: Option, + pub dev_script: Option, + pub cleanup_script: Option, +} + #[derive(serde::Deserialize)] pub struct RepositoryQuery { pub page: Option, @@ -30,11 +44,6 @@ pub async fn list_repositories( State(app_state): State, Query(params): Query, ) -> Result>>, StatusCode> { - // Only available in cloud mode - if app_state.mode.is_local() { - return Err(StatusCode::NOT_FOUND); - } - let page = params.page.unwrap_or(1); // Get GitHub configuration @@ -88,11 +97,6 @@ pub async fn create_project_from_github( State(app_state): State, Json(payload): Json, ) -> Result>, StatusCode> { - // Only available in cloud mode - if app_state.mode.is_local() { - return Err(StatusCode::NOT_FOUND); - } - tracing::debug!("Creating project '{}' from GitHub repository", payload.name); // Get workspace path diff --git a/backend/src/routes/health.rs b/crates/server/src/routes/health.rs similarity index 80% rename from backend/src/routes/health.rs rename to crates/server/src/routes/health.rs index ee1d96a0..e66cd96c 100644 --- a/backend/src/routes/health.rs +++ b/crates/server/src/routes/health.rs @@ -1,6 +1,5 @@ use axum::response::Json; - -use crate::models::ApiResponse; +use utils::response::ApiResponse; pub async fn health_check() -> Json> { Json(ApiResponse::success("OK".to_string())) diff --git a/crates/server/src/routes/mod.rs b/crates/server/src/routes/mod.rs new file mode 100644 index 00000000..e15b2c7e --- /dev/null +++ b/crates/server/src/routes/mod.rs @@ -0,0 +1,41 @@ +use axum::{ + routing::{get, IntoMakeService}, + Router, +}; + +use crate::DeploymentImpl; + +pub mod auth; +pub mod config; +pub mod filesystem; +// pub mod github; +pub mod events; +pub mod execution_processes; +pub mod frontend; +pub mod health; +pub mod projects; +pub mod task_attempts; +pub mod task_templates; +pub mod tasks; + +pub fn router(deployment: DeploymentImpl) -> IntoMakeService { + // Create routers with different middleware layers + let base_routes = Router::new() + .route("/health", get(health::health_check)) + .merge(config::router()) + .merge(projects::router(&deployment)) + .merge(tasks::router(&deployment)) + .merge(task_attempts::router(&deployment)) + .merge(execution_processes::router(&deployment)) + .merge(task_templates::router(&deployment)) + .merge(auth::router(&deployment)) + .merge(filesystem::router()) + .merge(events::router(&deployment)) + .with_state(deployment); + + Router::new() + .route("/", get(frontend::serve_frontend_root)) + .route("/{*path}", get(frontend::serve_frontend)) + .nest("/api", base_routes) + .into_make_service() +} diff --git a/backend/src/routes/projects.rs b/crates/server/src/routes/projects.rs similarity index 61% rename from backend/src/routes/projects.rs rename to crates/server/src/routes/projects.rs index 364de39f..260f1e22 100644 --- a/backend/src/routes/projects.rs +++ b/crates/server/src/routes/projects.rs @@ -1,108 +1,54 @@ -use std::collections::HashMap; +use std::{collections::HashMap, path::Path}; use axum::{ extract::{Query, State}, http::StatusCode, + middleware::from_fn_with_state, response::Json as ResponseJson, - routing::get, + routing::{get, post}, Extension, Json, Router, }; +use db::models::project::{ + CreateProject, Project, ProjectError, SearchMatchType, SearchResult, UpdateProject, +}; +use deployment::Deployment; +use ignore::WalkBuilder; +use services::services::git::{GitBranch, GitService}; +use utils::response::ApiResponse; use uuid::Uuid; -use crate::{ - app_state::AppState, - models::{ - project::{ - CreateBranch, CreateProject, GitBranch, Project, ProjectWithBranch, SearchMatchType, - SearchResult, UpdateProject, - }, - ApiResponse, - }, -}; +use crate::{error::ApiError, middleware::load_project_middleware, DeploymentImpl}; pub async fn get_projects( - State(app_state): State, -) -> Result>>, StatusCode> { - match Project::find_all(&app_state.db_pool).await { - Ok(projects) => Ok(ResponseJson(ApiResponse::success(projects))), - Err(e) => { - tracing::error!("Failed to fetch projects: {}", e); - Err(StatusCode::INTERNAL_SERVER_ERROR) - } - } + State(deployment): State, +) -> Result>>, ApiError> { + let projects = Project::find_all(&deployment.db().pool).await?; + Ok(ResponseJson(ApiResponse::success(projects))) } pub async fn get_project( Extension(project): Extension, -) -> Result>, StatusCode> { +) -> Result>, ApiError> { Ok(ResponseJson(ApiResponse::success(project))) } -pub async fn get_project_with_branch( - Extension(project): Extension, -) -> Result>, StatusCode> { - Ok(ResponseJson(ApiResponse::success( - project.with_branch_info(), - ))) -} - pub async fn get_project_branches( Extension(project): Extension, -) -> Result>>, StatusCode> { - match project.get_all_branches() { - Ok(branches) => Ok(ResponseJson(ApiResponse::success(branches))), - Err(e) => { - tracing::error!("Failed to get branches for project {}: {}", project.id, e); - Err(StatusCode::INTERNAL_SERVER_ERROR) - } - } -} - -pub async fn create_project_branch( - Extension(project): Extension, - Json(payload): Json, -) -> Result>, StatusCode> { - // Validate branch name - if payload.name.trim().is_empty() { - return Ok(ResponseJson(ApiResponse::error( - "Branch name cannot be empty", - ))); - } - - // Check if branch name contains invalid characters - if payload.name.contains(' ') { - return Ok(ResponseJson(ApiResponse::error( - "Branch name cannot contain spaces", - ))); - } - - match project.create_branch(&payload.name, payload.base_branch.as_deref()) { - Ok(branch) => Ok(ResponseJson(ApiResponse::success(branch))), - Err(e) => { - tracing::error!( - "Failed to create branch '{}' for project {}: {}", - payload.name, - project.id, - e - ); - Ok(ResponseJson(ApiResponse::error(&format!( - "Failed to create branch: {}", - e - )))) - } - } +) -> Result>>, ApiError> { + let branches = GitService::new().get_all_branches(&project.git_repo_path)?; + Ok(ResponseJson(ApiResponse::success(branches))) } pub async fn create_project( - State(app_state): State, + State(deployment): State, Json(payload): Json, -) -> Result>, StatusCode> { +) -> Result>, ApiError> { let id = Uuid::new_v4(); tracing::debug!("Creating project '{}'", payload.name); // Check if git repo path is already used by another project - match Project::find_by_git_repo_path(&app_state.db_pool, &payload.git_repo_path).await { + match Project::find_by_git_repo_path(&deployment.db().pool, &payload.git_repo_path).await { Ok(Some(_)) => { return Ok(ResponseJson(ApiResponse::error( "A project with this git repository path already exists", @@ -112,8 +58,7 @@ pub async fn create_project( // Path is available, continue } Err(e) => { - tracing::error!("Failed to check for existing git repo path: {}", e); - return Err(StatusCode::INTERNAL_SERVER_ERROR); + return Err(ProjectError::GitRepoCheckFailed(e.to_string()).into()); } } @@ -181,40 +126,37 @@ pub async fn create_project( } } - match Project::create(&app_state.db_pool, &payload, id).await { + match Project::create(&deployment.db().pool, &payload, id).await { Ok(project) => { // Track project creation event - app_state - .track_analytics_event( + deployment + .track_if_analytics_allowed( "project_created", - Some(serde_json::json!({ + serde_json::json!({ "project_id": project.id.to_string(), "use_existing_repo": payload.use_existing_repo, "has_setup_script": payload.setup_script.is_some(), "has_dev_script": payload.dev_script.is_some(), - })), + }), ) .await; Ok(ResponseJson(ApiResponse::success(project))) } - Err(e) => { - tracing::error!("Failed to create project: {}", e); - Err(StatusCode::INTERNAL_SERVER_ERROR) - } + Err(e) => Err(ProjectError::CreateFailed(e.to_string()).into()), } } pub async fn update_project( Extension(existing_project): Extension, - State(app_state): State, + State(deployment): State, Json(payload): Json, ) -> Result>, StatusCode> { // If git_repo_path is being changed, check if the new path is already used by another project if let Some(new_git_repo_path) = &payload.git_repo_path { - if new_git_repo_path != &existing_project.git_repo_path { + if new_git_repo_path != &existing_project.git_repo_path.to_string_lossy() { match Project::find_by_git_repo_path_excluding_id( - &app_state.db_pool, + &deployment.db().pool, new_git_repo_path, existing_project.id, ) @@ -248,10 +190,11 @@ pub async fn update_project( } = payload; let name = name.unwrap_or(existing_project.name); - let git_repo_path = git_repo_path.unwrap_or(existing_project.git_repo_path); + let git_repo_path = + git_repo_path.unwrap_or(existing_project.git_repo_path.to_string_lossy().to_string()); match Project::update( - &app_state.db_pool, + &deployment.db().pool, existing_project.id, name, git_repo_path, @@ -271,9 +214,9 @@ pub async fn update_project( pub async fn delete_project( Extension(project): Extension, - State(app_state): State, + State(deployment): State, ) -> Result>, StatusCode> { - match Project::delete(&app_state.db_pool, project.id).await { + match Project::delete(&deployment.db().pool, project.id).await { Ok(rows_affected) => { if rows_affected == 0 { Err(StatusCode::NOT_FOUND) @@ -295,62 +238,24 @@ pub struct OpenEditorRequest { pub async fn open_project_in_editor( Extension(project): Extension, - State(app_state): State, + State(deployment): State, Json(payload): Json>, ) -> Result>, StatusCode> { - // Get editor command from config or override - let editor_command = { - let config_guard = app_state.get_config().read().await; - if let Some(ref request) = payload { - if let Some(ref editor_type) = request.editor_type { - // Create a temporary editor config with the override - use crate::models::config::{EditorConfig, EditorType}; - let override_editor_type = match editor_type.as_str() { - "vscode" => EditorType::VSCode, - "cursor" => EditorType::Cursor, - "windsurf" => EditorType::Windsurf, - "intellij" => EditorType::IntelliJ, - "zed" => EditorType::Zed, - "custom" => EditorType::Custom, - _ => config_guard.editor.editor_type.clone(), - }; - let temp_config = EditorConfig { - editor_type: override_editor_type, - custom_command: config_guard.editor.custom_command.clone(), - }; - temp_config.get_command() - } else { - config_guard.editor.get_command() - } - } else { - config_guard.editor.get_command() - } + let path = project.git_repo_path.to_string_lossy(); + + let editor_config = { + let config = deployment.config().read().await; + let editor_type_str = payload.as_ref().and_then(|req| req.editor_type.as_deref()); + config.editor.with_override(editor_type_str) }; - // Open editor in the project directory - let mut cmd = std::process::Command::new(&editor_command[0]); - for arg in &editor_command[1..] { - cmd.arg(arg); - } - cmd.arg(&project.git_repo_path); - - match cmd.spawn() { + match editor_config.open_file(&path) { Ok(_) => { - tracing::info!( - "Opened editor ({}) for project {} at path: {}", - editor_command.join(" "), - project.id, - project.git_repo_path - ); + tracing::info!("Opened editor for project {} at path: {}", project.id, path); Ok(ResponseJson(ApiResponse::success(()))) } Err(e) => { - tracing::error!( - "Failed to open editor ({}) for project {}: {}", - editor_command.join(" "), - project.id, - e - ); + tracing::error!("Failed to open editor for project {}: {}", project.id, e); Err(StatusCode::INTERNAL_SERVER_ERROR) } } @@ -370,7 +275,7 @@ pub async fn search_project_files( }; // Search files in the project repository - match search_files_in_repo(&project.git_repo_path, query).await { + match search_files_in_repo(&project.git_repo_path.to_string_lossy(), query).await { Ok(results) => Ok(ResponseJson(ApiResponse::success(results))), Err(e) => { tracing::error!("Failed to search files: {}", e); @@ -383,10 +288,6 @@ async fn search_files_in_repo( repo_path: &str, query: &str, ) -> Result, Box> { - use std::path::Path; - - use ignore::WalkBuilder; - let repo_path = Path::new(repo_path); if !repo_path.exists() { @@ -460,11 +361,10 @@ async fn search_files_in_repo( // Sort results by priority: FileName > DirectoryName > FullPath results.sort_by(|a, b| { - use SearchMatchType::*; let priority = |match_type: &SearchMatchType| match match_type { - FileName => 0, - DirectoryName => 1, - FullPath => 2, + SearchMatchType::FileName => 0, + SearchMatchType::DirectoryName => 1, + SearchMatchType::FullPath => 2, }; priority(&a.match_type) @@ -478,23 +378,23 @@ async fn search_files_in_repo( Ok(results) } -pub fn projects_base_router() -> Router { - Router::new().route("/projects", get(get_projects).post(create_project)) -} - -pub fn projects_with_id_router() -> Router { - use axum::routing::post; - - Router::new() +pub fn router(deployment: &DeploymentImpl) -> Router { + let project_id_router = Router::new() .route( - "/projects/:id", + "/", get(get_project).put(update_project).delete(delete_project), ) - .route("/projects/:id/with-branch", get(get_project_with_branch)) - .route( - "/projects/:id/branches", - get(get_project_branches).post(create_project_branch), - ) - .route("/projects/:id/search", get(search_project_files)) - .route("/projects/:id/open-editor", post(open_project_in_editor)) + .route("/branches", get(get_project_branches)) + .route("/search", get(search_project_files)) + .route("/open-editor", post(open_project_in_editor)) + .layer(from_fn_with_state( + deployment.clone(), + load_project_middleware, + )); + + let projects_router = Router::new() + .route("/", get(get_projects).post(create_project)) + .nest("/{id}", project_id_router); + + Router::new().nest("/projects", projects_router) } diff --git a/crates/server/src/routes/task_attempts.rs b/crates/server/src/routes/task_attempts.rs new file mode 100644 index 00000000..86673f60 --- /dev/null +++ b/crates/server/src/routes/task_attempts.rs @@ -0,0 +1,1022 @@ +use axum::{ + extract::{Query, State}, + http::StatusCode, + middleware::from_fn_with_state, + response::{ + sse::{Event, KeepAlive}, + Json as ResponseJson, Sse, + }, + routing::{get, post}, + BoxError, Extension, Json, Router, +}; +use db::models::{ + execution_process::{ExecutionProcess, ExecutionProcessRunReason}, + executor_session::ExecutorSession, + task::{Task, TaskStatus}, + task_attempt::{CreateTaskAttempt, TaskAttempt, TaskAttemptError}, +}; +use deployment::Deployment; +use executors::actions::{ + coding_agent_follow_up::CodingAgentFollowUpRequest, + script::{ScriptContext, ScriptRequest, ScriptRequestLanguage}, + ExecutorAction, ExecutorActionKind, ExecutorActionType, +}; +use futures_util::TryStreamExt; +use serde::{Deserialize, Serialize}; +use services::services::{ + container::ContainerService, + git::{BranchStatus, GitService}, + github_service::{CreatePrRequest, GitHubRepoInfo, GitHubService, GitHubServiceError}, +}; +use sqlx::Error as SqlxError; +use ts_rs::TS; +use utils::response::ApiResponse; +use uuid::Uuid; + +use crate::{error::ApiError, middleware::load_task_attempt_middleware, DeploymentImpl}; + +#[derive(Debug, Deserialize, Serialize, TS)] +pub struct RebaseTaskAttemptRequest { + pub new_base_branch: Option, +} + +#[derive(Debug, Deserialize, Serialize, TS)] +pub struct CreateGitHubPrRequest { + pub title: String, + pub body: Option, + pub base_branch: Option, +} + +#[derive(Debug, Serialize)] +pub struct FollowUpResponse { + pub message: String, + pub actual_attempt_id: Uuid, + pub created_new_attempt: bool, +} +// #[derive(Debug, Serialize, TS)] +// // pub struct ProcessLogsResponse { +// pub id: Uuid, +// pub process_type: ExecutionProcessType, +// pub command: String, +// pub executor_type: Option, +// pub status: ExecutionProcessStatus, +// pub normalized_conversation: NormalizedConversation, +// } + +// // Helper to normalize logs for a process (extracted from get_execution_process_normalized_logs) +// async fn normalize_process_logs( +// db_pool: &SqlitePool, +// process: &ExecutionProcess, +// ) -> NormalizedConversation { +// use crate::models::{ +// execution_process::ExecutionProcessType, executor_session::ExecutorSession, +// }; +// let executor_session = ExecutorSession::find_by_execution_process_id(db_pool, process.id) +// .await +// .ok() +// .flatten(); + +// let has_stdout = process +// .stdout +// .as_ref() +// .map(|s| !s.trim().is_empty()) +// .unwrap_or(false); +// let has_stderr = process +// .stderr +// .as_ref() +// .map(|s| !s.trim().is_empty()) +// .unwrap_or(false); + +// if !has_stdout && !has_stderr { +// return NormalizedConversation { +// entries: vec![], +// session_id: None, +// executor_type: process +// .executor_type +// .clone() +// .unwrap_or("unknown".to_string()), +// prompt: executor_session.as_ref().and_then(|s| s.prompt.clone()), +// summary: executor_session.as_ref().and_then(|s| s.summary.clone()), +// }; +// } + +// // Parse stdout as JSONL using executor normalization +// let mut stdout_entries = Vec::new(); +// if let Some(stdout) = &process.stdout { +// if !stdout.trim().is_empty() { +// let executor_type = process.executor_type.as_deref().unwrap_or("unknown"); +// let executor_config = if process.process_type == ExecutionProcessType::SetupScript { +// ExecutorConfig::SetupScript { +// script: executor_session +// .as_ref() +// .and_then(|s| s.prompt.clone()) +// .unwrap_or_else(|| "setup script".to_string()), +// } +// } else { +// match executor_type.to_string().parse() { +// Ok(config) => config, +// Err(_) => { +// return NormalizedConversation { +// entries: vec![], +// session_id: None, +// executor_type: executor_type.to_string(), +// prompt: executor_session.as_ref().and_then(|s| s.prompt.clone()), +// summary: executor_session.as_ref().and_then(|s| s.summary.clone()), +// }; +// } +// } +// }; +// let executor = executor_config.create_executor(); +// let working_dir_path = match std::fs::canonicalize(&process.working_directory) { +// Ok(canonical_path) => canonical_path.to_string_lossy().to_string(), +// Err(_) => process.working_directory.clone(), +// }; +// if let Ok(normalized) = executor.normalize_logs(stdout, &working_dir_path) { +// stdout_entries = normalized.entries; +// } +// } +// } +// // Parse stderr chunks separated by boundary markers +// let mut stderr_entries = Vec::new(); +// if let Some(stderr) = &process.stderr { +// let trimmed = stderr.trim(); +// if !trimmed.is_empty() { +// let chunks: Vec<&str> = trimmed.split("---STDERR_CHUNK_BOUNDARY---").collect(); +// for chunk in chunks { +// let chunk_trimmed = chunk.trim(); +// if !chunk_trimmed.is_empty() { +// let filtered_content = chunk_trimmed.replace("---STDERR_CHUNK_BOUNDARY---", ""); +// if !filtered_content.trim().is_empty() { +// stderr_entries.push(NormalizedEntry { +// timestamp: Some(chrono::Utc::now().to_rfc3339()), +// entry_type: NormalizedEntryType::ErrorMessage, +// content: filtered_content.trim().to_string(), +// metadata: None, +// }); +// } +// } +// } +// } +// } +// let mut all_entries = Vec::new(); +// all_entries.extend(stdout_entries); +// all_entries.extend(stderr_entries); +// all_entries.sort_by(|a, b| match (&a.timestamp, &b.timestamp) { +// (Some(a_ts), Some(b_ts)) => a_ts.cmp(b_ts), +// (Some(_), None) => std::cmp::Ordering::Less, +// (None, Some(_)) => std::cmp::Ordering::Greater, +// (None, None) => std::cmp::Ordering::Equal, +// }); +// let executor_type = if process.process_type == ExecutionProcessType::SetupScript { +// "setup-script".to_string() +// } else { +// process +// .executor_type +// .clone() +// .unwrap_or("unknown".to_string()) +// }; +// NormalizedConversation { +// entries: all_entries, +// session_id: None, +// executor_type, +// prompt: executor_session.as_ref().and_then(|s| s.prompt.clone()), +// summary: executor_session.as_ref().and_then(|s| s.summary.clone()), +// } +// } + +// /// Get all normalized logs for all execution processes of a task attempt +// pub async fn get_task_attempt_all_logs( +// Extension(_project): Extension, +// Extension(_task): Extension, +// Extension(task_attempt): Extension, +// State(app_state): State, +// ) -> Result>>, StatusCode> { +// // Fetch all execution processes for this attempt +// let processes = match ExecutionProcess::find_by_task_attempt_id( +// &app_state.db_pool, +// task_attempt.id, +// ) +// .await +// { +// Ok(list) => list, +// Err(_) => return Err(StatusCode::INTERNAL_SERVER_ERROR), +// }; +// // For each process, normalize logs +// let mut result = Vec::new(); +// for process in processes { +// let normalized_conversation = normalize_process_logs(&app_state.db_pool, &process).await; +// result.push(ProcessLogsResponse { +// id: process.id, +// process_type: process.process_type.clone(), +// command: process.command.clone(), +// executor_type: process.executor_type.clone(), +// status: process.status.clone(), +// normalized_conversation, +// }); +// } +// Ok(Json(ApiResponse::success(result))) +// } + +#[derive(Debug, Deserialize)] +pub struct TaskAttemptQuery { + pub task_id: Option, +} + +pub async fn get_task_attempts( + State(deployment): State, + Query(query): Query, +) -> Result>>, ApiError> { + let pool = &deployment.db().pool; + let attempts = TaskAttempt::fetch_all(pool, query.task_id).await?; + Ok(ResponseJson(ApiResponse::success(attempts))) +} + +pub async fn get_task_attempt( + Extension(task_attempt): Extension, + State(_deployment): State, +) -> Result>, ApiError> { + Ok(ResponseJson(ApiResponse::success(task_attempt))) +} + +#[derive(Debug, Deserialize, ts_rs::TS)] +pub struct CreateTaskAttemptBody { + pub task_id: Uuid, + pub profile: Option, + pub base_branch: String, +} + +#[axum::debug_handler] +pub async fn create_task_attempt( + State(deployment): State, + Json(payload): Json, +) -> Result>, ApiError> { + let profile_label = payload + .profile + .unwrap_or(deployment.config().read().await.profile.to_string()); + + let profile = executors::command::AgentProfiles::get_cached() + .get_profile(&profile_label) + .ok_or_else(|| { + ApiError::TaskAttempt(TaskAttemptError::ValidationError(format!( + "Profile not found: {}", + profile_label + ))) + })?; + + let task_attempt = TaskAttempt::create( + &deployment.db().pool, + &CreateTaskAttempt { + base_coding_agent: profile.agent.to_string(), + base_branch: payload.base_branch, + }, + payload.task_id, + ) + .await?; + + let execution_process = deployment + .container() + .start_attempt(&task_attempt, profile_label.clone()) + .await?; + + deployment + .track_if_analytics_allowed( + "task_attempt_started", + serde_json::json!({ + "task_id": task_attempt.task_id.to_string(), + "profile": &profile_label, + "base_coding_agent": profile.agent.to_string(), + "attempt_id": task_attempt.id.to_string(), + }), + ) + .await; + + tracing::info!("Started execution process {}", execution_process.id); + + Ok(ResponseJson(ApiResponse::success(task_attempt))) +} + +#[derive(Debug, Deserialize, TS)] +pub struct CreateFollowUpAttempt { + pub prompt: String, +} + +pub async fn follow_up( + Extension(task_attempt): Extension, + State(deployment): State, + Json(payload): Json, +) -> Result>, ApiError> { + tracing::info!("{:?}", task_attempt); + + // First, get the most recent execution process with executor action type = StandardCoding + let initial_execution_process = ExecutionProcess::find_latest_by_task_attempt_and_action_type( + &deployment.db().pool, + task_attempt.id, + &ExecutorActionKind::CodingAgentInitialRequest, + ) + .await? + .ok_or(ApiError::TaskAttempt(TaskAttemptError::ValidationError( + "Couldn't find initial coding agent process, has it run yet?".to_string(), + )))?; + + // Get session_id + let session_id = ExecutorSession::find_by_execution_process_id( + &deployment.db().pool, + initial_execution_process.id, + ) + .await? + .ok_or(ApiError::TaskAttempt(TaskAttemptError::ValidationError( + "Couldn't find related executor session for this execution process".to_string(), + )))? + .session_id + .ok_or(ApiError::TaskAttempt(TaskAttemptError::ValidationError( + "This executor session doesn't have a session_id".to_string(), + )))?; + + let profile = match &initial_execution_process + .executor_action() + .map_err(|e| ApiError::TaskAttempt(TaskAttemptError::ValidationError(e.to_string())))? + .typ + { + ExecutorActionType::CodingAgentInitialRequest(request) => Ok(request.profile.clone()), + _ => Err(ApiError::TaskAttempt(TaskAttemptError::ValidationError( + "Couldn't find profile from initial request".to_string(), + ))), + }?; + + // Get parent task + let task = task_attempt + .parent_task(&deployment.db().pool) + .await? + .ok_or(SqlxError::RowNotFound)?; + + // Get parent project + let project = task + .parent_project(&deployment.db().pool) + .await? + .ok_or(SqlxError::RowNotFound)?; + + let cleanup_action = project.cleanup_script.map(|script| { + Box::new(ExecutorAction::new( + ExecutorActionType::ScriptRequest(ScriptRequest { + script, + language: ScriptRequestLanguage::Bash, + context: ScriptContext::CleanupScript, + }), + None, + )) + }); + + let follow_up_action = ExecutorAction::new( + ExecutorActionType::CodingAgentFollowUpRequest(CodingAgentFollowUpRequest { + prompt: payload.prompt, + session_id, + profile, + }), + cleanup_action, + ); + + let execution_process = deployment + .container() + .start_execution( + &task_attempt, + &follow_up_action, + &ExecutionProcessRunReason::CodingAgent, + ) + .await?; + + Ok(ResponseJson(ApiResponse::success(execution_process))) +} + +pub async fn get_task_attempt_diff( + Extension(task_attempt): Extension, + State(deployment): State, + // ) -> Result>, ApiError> { +) -> Result>>, axum::http::StatusCode> +{ + let stream = deployment + .container() + .get_diff(&task_attempt) + .await + .map_err(|_e| StatusCode::INTERNAL_SERVER_ERROR)?; + + Ok(Sse::new(stream.map_err(|e| -> BoxError { e.into() })).keep_alive(KeepAlive::default())) +} + +#[axum::debug_handler] +pub async fn merge_task_attempt( + Extension(task_attempt): Extension, + State(deployment): State, +) -> Result>, ApiError> { + let pool = &deployment.db().pool; + + let task = task_attempt + .parent_task(pool) + .await? + .ok_or(ApiError::TaskAttempt(TaskAttemptError::TaskNotFound))?; + let ctx = TaskAttempt::load_context(pool, task_attempt.id, task.id, task.project_id).await?; + + let container_ref = deployment + .container() + .ensure_container_exists(&task_attempt) + .await?; + let worktree_path = std::path::Path::new(&container_ref); + + let task_uuid_str = task.id.to_string(); + let first_uuid_section = task_uuid_str.split('-').next().unwrap_or(&task_uuid_str); + + // Create commit message with task title and description + let mut commit_message = format!("{} (vibe-kanban {})", ctx.task.title, first_uuid_section); + + // Add description on next line if it exists + if let Some(description) = &ctx.task.description { + if !description.trim().is_empty() { + commit_message.push_str("\n\n"); + commit_message.push_str(description); + } + } + + // Get branch name from task attempt + let branch_name = ctx.task_attempt.branch.as_ref().ok_or_else(|| { + ApiError::TaskAttempt(TaskAttemptError::ValidationError( + "No branch found for task attempt".to_string(), + )) + })?; + + let merge_commit_id = GitService::new().merge_changes( + &ctx.project.git_repo_path, + worktree_path, + branch_name, + &ctx.task_attempt.base_branch, + &commit_message, + )?; + + TaskAttempt::update_merge_commit(pool, task_attempt.id, &merge_commit_id).await?; + Task::update_status(pool, ctx.task.id, TaskStatus::Done).await?; + + deployment + .track_if_analytics_allowed( + "task_attempt_merged", + serde_json::json!({ + "task_id": ctx.task.id.to_string(), + "project_id": ctx.project.id.to_string(), + "attempt_id": task_attempt.id.to_string(), + }), + ) + .await; + + Ok(ResponseJson(ApiResponse::success(()))) +} + +pub async fn create_github_pr( + Extension(task_attempt): Extension, + State(deployment): State, + Json(request): Json, +) -> Result>, ApiError> { + let github_config = deployment.config().read().await.github.clone(); + let Some(github_token) = github_config.token() else { + return Ok(ResponseJson(ApiResponse::error_with_data( + GitHubServiceError::TokenInvalid, + ))); + }; + // Create GitHub service instance + let github_service = GitHubService::new(&github_token)?; + if let Err(e) = github_service.check_token().await { + if e.is_api_data() { + return Ok(ResponseJson(ApiResponse::error_with_data(e))); + } else { + return Err(ApiError::GitHubService(e)); + } + } + // Get the task attempt to access the stored base branch + let base_branch = request.base_branch.unwrap_or_else(|| { + // Use the stored base branch from the task attempt as the default + // Fall back to config default or "main" only if stored base branch is somehow invalid + if !task_attempt.base_branch.trim().is_empty() { + task_attempt.base_branch.clone() + } else { + github_config + .default_pr_base + .as_ref() + .map_or_else(|| "main".to_string(), |b| b.to_string()) + } + }); + + let pool = &deployment.db().pool; + let task = task_attempt + .parent_task(pool) + .await? + .ok_or(ApiError::TaskAttempt(TaskAttemptError::TaskNotFound))?; + let ctx = TaskAttempt::load_context(pool, task_attempt.id, task.id, task.project_id).await?; + + // Ensure worktree exists (recreate if needed for cold task support) + let container_ref = deployment + .container() + .ensure_container_exists(&task_attempt) + .await?; + let worktree_path = std::path::Path::new(&container_ref); + + // Use GitService to get the remote URL, then create GitHubRepoInfo + let (owner, repo_name) = GitService::new().get_github_repo_info(&ctx.project.git_repo_path)?; + let repo_info = GitHubRepoInfo { owner, repo_name }; + + // Get branch name from task attempt + let branch_name = ctx.task_attempt.branch.as_ref().ok_or_else(|| { + ApiError::TaskAttempt(TaskAttemptError::ValidationError( + "No branch found for task attempt".to_string(), + )) + })?; + + // Push the branch to GitHub first + if let Err(e) = GitService::new().push_to_github(worktree_path, branch_name, &github_token) { + tracing::error!("Failed to push branch to GitHub: {}", e); + let gh_e = GitHubServiceError::from(e); + if gh_e.is_api_data() { + return Ok(ResponseJson(ApiResponse::error_with_data(gh_e))); + } else { + return Ok(ResponseJson(ApiResponse::error( + "Failed to push branch to GitHub", + ))); + } + } + // Create the PR using GitHub service + let pr_request = CreatePrRequest { + title: request.title.clone(), + body: request.body.clone(), + head_branch: branch_name.clone(), + base_branch: base_branch.clone(), + }; + + match github_service.create_pr(&repo_info, &pr_request).await { + Ok(pr_info) => { + // Update the task attempt with PR information + if let Err(e) = TaskAttempt::update_pr_status( + pool, + task_attempt.id, + pr_info.url.clone(), + pr_info.number, + pr_info.status.clone(), + ) + .await + { + tracing::error!("Failed to update task attempt PR status: {}", e); + } + + deployment + .track_if_analytics_allowed( + "github_pr_created", + serde_json::json!({ + "task_id": ctx.task.id.to_string(), + "project_id": ctx.project.id.to_string(), + "attempt_id": task_attempt.id.to_string(), + }), + ) + .await; + + Ok(ResponseJson(ApiResponse::success(pr_info.url))) + } + Err(e) => { + tracing::error!( + "Failed to create GitHub PR for attempt {}: {}", + task_attempt.id, + e + ); + if e.is_api_data() { + Ok(ResponseJson(ApiResponse::error_with_data(e))) + } else { + Ok(ResponseJson(ApiResponse::error("Failed to create PR"))) + } + } + } +} + +#[derive(serde::Deserialize)] +pub struct OpenEditorRequest { + editor_type: Option, +} + +pub async fn open_task_attempt_in_editor( + Extension(task_attempt): Extension, + State(deployment): State, + Json(payload): Json>, +) -> Result>, ApiError> { + // Get the task attempt to access the worktree path + let attempt = &task_attempt; + let path = attempt.container_ref.as_ref().ok_or_else(|| { + tracing::error!( + "No container ref found for task attempt {}", + task_attempt.id + ); + ApiError::TaskAttempt(TaskAttemptError::ValidationError( + "No container ref found".to_string(), + )) + })?; + + let editor_config = { + let config = deployment.config().read().await; + let editor_type_str = payload.as_ref().and_then(|req| req.editor_type.as_deref()); + config.editor.with_override(editor_type_str) + }; + + match editor_config.open_file(path) { + Ok(_) => { + tracing::info!( + "Opened editor for task attempt {} at path: {}", + task_attempt.id, + path + ); + Ok(ResponseJson(ApiResponse::success(()))) + } + Err(e) => { + tracing::error!( + "Failed to open editor for attempt {}: {}", + task_attempt.id, + e + ); + Err(ApiError::TaskAttempt(TaskAttemptError::ValidationError( + format!("Failed to open editor: {}", e), + ))) + } + } +} + +pub async fn get_task_attempt_branch_status( + Extension(task_attempt): Extension, + State(deployment): State, +) -> Result>, ApiError> { + let pool = &deployment.db().pool; + + let task = task_attempt + .parent_task(pool) + .await? + .ok_or(ApiError::TaskAttempt(TaskAttemptError::TaskNotFound))?; + let ctx = TaskAttempt::load_context(pool, task_attempt.id, task.id, task.project_id).await?; + + let branch_status = GitService::new() + .get_branch_status( + &ctx.project.git_repo_path, + ctx.task_attempt.branch.as_ref().ok_or_else(|| { + ApiError::TaskAttempt(TaskAttemptError::ValidationError( + "No branch found for task attempt".to_string(), + )) + })?, + &ctx.task_attempt.base_branch, + ctx.task_attempt.merge_commit.is_some(), + ) + .map_err(|e| { + tracing::error!( + "Failed to get branch status for task attempt {}: {}", + task_attempt.id, + e + ); + ApiError::GitService(e) + })?; + + Ok(ResponseJson(ApiResponse::success(branch_status))) +} + +#[axum::debug_handler] +pub async fn rebase_task_attempt( + Extension(task_attempt): Extension, + State(deployment): State, + request_body: Option>, +) -> Result>, ApiError> { + // Extract new base branch from request body if provided + let new_base_branch = request_body.and_then(|body| body.new_base_branch.clone()); + + let pool = &deployment.db().pool; + + let task = task_attempt + .parent_task(pool) + .await? + .ok_or(ApiError::TaskAttempt(TaskAttemptError::TaskNotFound))?; + let ctx = TaskAttempt::load_context(pool, task_attempt.id, task.id, task.project_id).await?; + + // Use the stored base branch if no new base branch is provided + let effective_base_branch = + new_base_branch.or_else(|| Some(ctx.task_attempt.base_branch.clone())); + + let container_ref = deployment + .container() + .ensure_container_exists(&task_attempt) + .await?; + let worktree_path = std::path::Path::new(&container_ref); + + let _new_base_commit = GitService::new().rebase_branch( + &ctx.project.git_repo_path, + worktree_path, + effective_base_branch.clone().as_deref(), + &ctx.task_attempt.base_branch.clone(), + )?; + + if let Some(new_base_branch) = &effective_base_branch { + if new_base_branch != &ctx.task_attempt.base_branch { + // for remote branches, store the local branch name in the database + let db_branch_name = if new_base_branch.starts_with("origin/") { + new_base_branch.strip_prefix("origin/").unwrap() + } else { + new_base_branch + }; + TaskAttempt::update_base_branch(&deployment.db().pool, task_attempt.id, db_branch_name) + .await?; + } + } + + Ok(ResponseJson(ApiResponse::success(()))) +} + +#[derive(serde::Deserialize)] +pub struct DeleteFileQuery { + file_path: String, +} + +#[axum::debug_handler] +pub async fn delete_task_attempt_file( + Extension(task_attempt): Extension, + Query(query): Query, + State(deployment): State, +) -> Result>, ApiError> { + let container_ref = deployment + .container() + .ensure_container_exists(&task_attempt) + .await?; + let worktree_path = std::path::Path::new(&container_ref); + + // Use GitService to delete file and commit + let _commit_id = GitService::new() + .delete_file_and_commit(worktree_path, &query.file_path) + .map_err(|e| { + tracing::error!( + "Failed to delete file '{}' from task attempt {}: {}", + query.file_path, + task_attempt.id, + e + ); + ApiError::GitService(e) + })?; + + Ok(ResponseJson(ApiResponse::success(()))) +} + +#[axum::debug_handler] +pub async fn start_dev_server( + Extension(task_attempt): Extension, + State(deployment): State, +) -> Result>, ApiError> { + let pool = &deployment.db().pool; + + // Get parent task + let task = task_attempt + .parent_task(&deployment.db().pool) + .await? + .ok_or(SqlxError::RowNotFound)?; + + // Get parent project + let project = task + .parent_project(&deployment.db().pool) + .await? + .ok_or(SqlxError::RowNotFound)?; + + // Stop any existing dev servers for this project + let existing_dev_servers = + match ExecutionProcess::find_running_dev_servers_by_project(pool, project.id).await { + Ok(servers) => servers, + Err(e) => { + tracing::error!( + "Failed to find running dev servers for project {}: {}", + project.id, + e + ); + return Err(ApiError::TaskAttempt(TaskAttemptError::ValidationError( + e.to_string(), + ))); + } + }; + + for dev_server in existing_dev_servers { + tracing::info!( + "Stopping existing dev server {} for project {}", + dev_server.id, + project.id + ); + + if let Err(e) = deployment.container().stop_execution(&dev_server).await { + tracing::error!("Failed to stop dev server {}: {}", dev_server.id, e); + } + } + + if let Some(dev_server) = project.dev_script { + // TODO: Derive script language from system config + let executor_action = ExecutorAction::new( + ExecutorActionType::ScriptRequest(ScriptRequest { + script: dev_server, + language: ScriptRequestLanguage::Bash, + context: ScriptContext::DevServer, + }), + None, + ); + + deployment + .container() + .start_execution( + &task_attempt, + &executor_action, + &ExecutionProcessRunReason::DevServer, + ) + .await? + } else { + return Ok(ResponseJson(ApiResponse::error( + "No dev server script configured for this project", + ))); + }; + + Ok(ResponseJson(ApiResponse::success(()))) +} + +// /// Find plan content with context by searching through multiple processes in the same attempt +// async fn find_plan_content_with_context( +// pool: &SqlitePool, +// attempt_id: Uuid, +// ) -> Result { +// // Get all execution processes for this attempt +// let execution_processes = +// match ExecutionProcess::find_by_task_attempt_id(pool, attempt_id).await { +// Ok(processes) => processes, +// Err(e) => { +// tracing::error!( +// "Failed to fetch execution processes for attempt {}: {}", +// attempt_id, +// e +// ); +// return Err(StatusCode::INTERNAL_SERVER_ERROR); +// } +// }; + +// // Look for claudeplan processes (most recent first) +// for claudeplan_process in execution_processes +// .iter() +// .rev() +// .filter(|p| p.executor_type.as_deref() == Some("claude-plan")) +// { +// if let Some(stdout) = &claudeplan_process.stdout { +// if !stdout.trim().is_empty() { +// // Create executor and normalize logs +// let executor_config = ExecutorConfig::ClaudePlan; +// let executor = executor_config.create_executor(); + +// // Use working directory for normalization +// let working_dir_path = +// match std::fs::canonicalize(&claudeplan_process.working_directory) { +// Ok(canonical_path) => canonical_path.to_string_lossy().to_string(), +// Err(_) => claudeplan_process.working_directory.clone(), +// }; + +// // Normalize logs and extract plan content +// match executor.normalize_logs(stdout, &working_dir_path) { +// Ok(normalized_conversation) => { +// // Search for plan content in the normalized conversation +// if let Some(plan_content) = normalized_conversation +// .entries +// .iter() +// .rev() +// .find_map(|entry| { +// if let NormalizedEntryType::ToolUse { +// action_type: ActionType::PlanPresentation { plan }, +// .. +// } = &entry.entry_type +// { +// Some(plan.clone()) +// } else { +// None +// } +// }) +// { +// return Ok(plan_content); +// } +// } +// Err(_) => { +// continue; +// } +// } +// } +// } +// } + +// tracing::error!( +// "No claudeplan content found in any process in attempt {}", +// attempt_id +// ); +// Err(StatusCode::NOT_FOUND) +// } + +// pub async fn approve_plan( +// Extension(project): Extension, +// Extension(task): Extension, +// Extension(task_attempt): Extension, +// State(app_state): State, +// ) -> Result>, StatusCode> { +// let current_task = &task; + +// // Find plan content with context across the task hierarchy +// let plan_content = find_plan_content_with_context(&app_state.db_pool, task_attempt.id).await?; + +// use crate::models::task::CreateTask; +// let new_task_id = Uuid::new_v4(); +// let create_task_data = CreateTask { +// project_id: project.id, +// title: format!("Execute Plan: {}", current_task.title), +// description: Some(plan_content), +// parent_task_attempt: Some(task_attempt.id), +// }; + +// let new_task = match Task::create(&app_state.db_pool, &create_task_data, new_task_id).await { +// Ok(task) => task, +// Err(e) => { +// tracing::error!("Failed to create new task: {}", e); +// return Err(StatusCode::INTERNAL_SERVER_ERROR); +// } +// }; + +// // Mark original task as completed since it now has children +// if let Err(e) = +// Task::update_status(&app_state.db_pool, task.id, project.id, TaskStatus::Done).await +// { +// tracing::error!("Failed to update original task status to Done: {}", e); +// return Err(StatusCode::INTERNAL_SERVER_ERROR); +// } else { +// tracing::info!( +// "Original task {} marked as Done after plan approval (has children)", +// task.id +// ); +// } + +// Ok(ResponseJson(ApiResponse::success(FollowUpResponse { +// message: format!("Plan approved and new task created: {}", new_task.title), +// actual_attempt_id: new_task_id, // Return the new task ID +// created_new_attempt: true, +// }))) +// } + +pub async fn get_task_attempt_children( + Extension(task_attempt): Extension, + State(deployment): State, +) -> Result>>, StatusCode> { + match Task::find_related_tasks_by_attempt_id(&deployment.db().pool, task_attempt.id).await { + Ok(related_tasks) => Ok(ResponseJson(ApiResponse::success(related_tasks))), + Err(e) => { + tracing::error!( + "Failed to fetch children for task attempt {}: {}", + task_attempt.id, + e + ); + Err(StatusCode::INTERNAL_SERVER_ERROR) + } + } +} + +// pub fn task_attempts_with_id_router(_state: AppState) -> Router { +// use axum::routing::post; + +// Router::new() +// .route( +// "/projects/:project_id/tasks/:task_id/attempts/:attempt_id/approve-plan", +// post(approve_plan), +// ) +// .merge( +// Router::new() +// .route_layer(from_fn_with_state(_state.clone(), load_task_attempt_middleware)) +// ) +// } + +pub async fn stop_task_attempt_execution( + Extension(task_attempt): Extension, + State(deployment): State, +) -> Result>, ApiError> { + deployment.container().try_stop(&task_attempt).await; + Ok(ResponseJson(ApiResponse::success(()))) +} + +pub fn router(deployment: &DeploymentImpl) -> Router { + let task_attempt_id_router = Router::new() + .route("/", get(get_task_attempt)) + .route("/follow-up", post(follow_up)) + .route("/start-dev-server", post(start_dev_server)) + .route("/branch-status", get(get_task_attempt_branch_status)) + .route("/diff", get(get_task_attempt_diff)) + .route("/merge", post(merge_task_attempt)) + .route("/rebase", post(rebase_task_attempt)) + .route("/pr", post(create_github_pr)) + .route("/open-editor", post(open_task_attempt_in_editor)) + .route("/delete-file", post(delete_task_attempt_file)) + .route("/children", get(get_task_attempt_children)) + .route("/stop", post(stop_task_attempt_execution)) + .layer(from_fn_with_state( + deployment.clone(), + load_task_attempt_middleware, + )); + + let task_attempts_router = Router::new() + .route("/", get(get_task_attempts).post(create_task_attempt)) + .nest("/{id}", task_attempt_id_router); + + Router::new().nest("/task-attempts", task_attempts_router) +} diff --git a/crates/server/src/routes/task_templates.rs b/crates/server/src/routes/task_templates.rs new file mode 100644 index 00000000..9a49c425 --- /dev/null +++ b/crates/server/src/routes/task_templates.rs @@ -0,0 +1,103 @@ +use axum::{ + extract::{Query, State}, + middleware::from_fn_with_state, + response::Json as ResponseJson, + routing::get, + Extension, Json, Router, +}; +use db::models::task_template::{CreateTaskTemplate, TaskTemplate, UpdateTaskTemplate}; +use deployment::Deployment; +use serde::Deserialize; +use sqlx::Error as SqlxError; +use utils::response::ApiResponse; +use uuid::Uuid; + +use crate::{error::ApiError, middleware::load_task_template_middleware, DeploymentImpl}; + +#[derive(Debug, Deserialize)] +pub struct TaskTemplateQuery { + global: Option, + project_id: Option, +} + +pub async fn get_templates( + State(deployment): State, + Query(query): Query, +) -> Result>>, ApiError> { + let templates = match (query.global, query.project_id) { + // All templates: Global and project-specific + (None, None) => TaskTemplate::find_all(&deployment.db().pool).await?, + // Only global templates + (Some(true), None) => TaskTemplate::find_by_project_id(&deployment.db().pool, None).await?, + // Only project-specific templates + (None | Some(false), Some(project_id)) => { + TaskTemplate::find_by_project_id(&deployment.db().pool, Some(project_id)).await? + } + // No global templates, but project_id is None, return empty list + (Some(false), None) => vec![], + // Invalid combination: Cannot query both global and project-specific templates + (Some(_), Some(_)) => { + return Err(ApiError::Database(SqlxError::InvalidArgument( + "Cannot query both global and project-specific templates".to_string(), + ))); + } + }; + Ok(ResponseJson(ApiResponse::success(templates))) +} + +pub async fn get_template( + Extension(template): Extension, +) -> Result>, ApiError> { + Ok(Json(ApiResponse::success(template))) +} + +pub async fn create_template( + State(deployment): State, + Json(payload): Json, +) -> Result>, ApiError> { + Ok(ResponseJson(ApiResponse::success( + TaskTemplate::create(&deployment.db().pool, &payload).await?, + ))) +} + +pub async fn update_template( + Extension(template): Extension, + State(deployment): State, + Json(payload): Json, +) -> Result>, ApiError> { + Ok(ResponseJson(ApiResponse::success( + TaskTemplate::update(&deployment.db().pool, template.id, &payload).await?, + ))) +} + +pub async fn delete_template( + Extension(template): Extension, + State(deployment): State, +) -> Result>, ApiError> { + let rows_affected = TaskTemplate::delete(&deployment.db().pool, template.id).await?; + if rows_affected == 0 { + Err(ApiError::Database(SqlxError::RowNotFound)) + } else { + Ok(ResponseJson(ApiResponse::success(()))) + } +} + +pub fn router(deployment: &DeploymentImpl) -> Router { + let task_template_router = Router::new() + .route( + "/", + get(get_template) + .put(update_template) + .delete(delete_template), + ) + .layer(from_fn_with_state( + deployment.clone(), + load_task_template_middleware, + )); + + let inner = Router::new() + .route("/", get(get_templates).post(create_template)) + .nest("/{template_id}", task_template_router); + + Router::new().nest("/templates", inner) +} diff --git a/crates/server/src/routes/tasks.rs b/crates/server/src/routes/tasks.rs new file mode 100644 index 00000000..39127f8b --- /dev/null +++ b/crates/server/src/routes/tasks.rs @@ -0,0 +1,224 @@ +use axum::{ + extract::{Query, State}, + middleware::from_fn_with_state, + response::Json as ResponseJson, + routing::{get, post}, + Extension, Json, Router, +}; +use db::models::{ + project::Project, + task::{CreateTask, Task, TaskWithAttemptStatus, UpdateTask}, + task_attempt::{CreateTaskAttempt, TaskAttempt, TaskAttemptError}, +}; +use deployment::Deployment; +use serde::Deserialize; +use services::services::{container::ContainerService, git::GitService}; +use sqlx::Error as SqlxError; +use utils::response::ApiResponse; +use uuid::Uuid; + +use crate::{error::ApiError, middleware::load_task_middleware, DeploymentImpl}; + +#[derive(Debug, Deserialize)] +pub struct TaskQuery { + pub project_id: Uuid, +} + +pub async fn get_tasks( + State(deployment): State, + Query(query): Query, +) -> Result>>, ApiError> { + let tasks = + Task::find_by_project_id_with_attempt_status(&deployment.db().pool, query.project_id) + .await?; + + Ok(ResponseJson(ApiResponse::success(tasks))) +} + +pub async fn get_task( + Extension(task): Extension, + State(_deployment): State, +) -> Result>, ApiError> { + Ok(ResponseJson(ApiResponse::success(task))) +} + +pub async fn create_task( + State(deployment): State, + Json(payload): Json, +) -> Result>, ApiError> { + let id = Uuid::new_v4(); + + tracing::debug!( + "Creating task '{}' in project {}", + payload.title, + payload.project_id + ); + + let task = Task::create(&deployment.db().pool, &payload, id).await?; + + // Track task creation event + deployment + .track_if_analytics_allowed( + "task_created", + serde_json::json!({ + "task_id": task.id.to_string(), + "project_id": payload.project_id, + "has_description": task.description.is_some(), + }), + ) + .await; + + Ok(ResponseJson(ApiResponse::success(task))) +} + +pub async fn create_task_and_start( + State(deployment): State, + Json(payload): Json, +) -> Result>, ApiError> { + // create the task first + let task_id = Uuid::new_v4(); + let task = Task::create(&deployment.db().pool, &payload, task_id).await?; + deployment + .track_if_analytics_allowed( + "task_created", + serde_json::json!({ + "task_id": task.id.to_string(), + "project_id": task.project_id, + "has_description": task.description.is_some(), + }), + ) + .await; + + // use the default executor profile and the current branch for the task attempt + let default_profile_label = deployment.config().read().await.profile.clone().to_string(); + let project = Project::find_by_id(&deployment.db().pool, payload.project_id) + .await? + .ok_or(ApiError::Database(SqlxError::RowNotFound))?; + let branch = GitService::new().get_current_branch(&project.git_repo_path)?; + let base_coding_agent = executors::command::AgentProfiles::get_cached() + .get_profile(&default_profile_label) + .map(|profile| profile.agent.to_string()) + .ok_or_else(|| { + ApiError::TaskAttempt(TaskAttemptError::ValidationError(format!( + "Profile not found: {}", + default_profile_label + ))) + })?; + + let task_attempt = TaskAttempt::create( + &deployment.db().pool, + &CreateTaskAttempt { + base_coding_agent: base_coding_agent.clone(), + base_branch: branch, + }, + task.id, + ) + .await?; + let execution_process = deployment + .container() + .start_attempt(&task_attempt, default_profile_label.clone()) + .await?; + deployment + .track_if_analytics_allowed( + "task_attempt_started", + serde_json::json!({ + "task_id": task.id.to_string(), + "base_coding_agent": &base_coding_agent, + "profile": &default_profile_label, + "attempt_id": task_attempt.id.to_string(), + }), + ) + .await; + + let task = Task::find_by_id(&deployment.db().pool, task.id) + .await? + .ok_or(ApiError::Database(SqlxError::RowNotFound))?; + + tracing::info!("Started execution process {}", execution_process.id); + Ok(ResponseJson(ApiResponse::success(TaskWithAttemptStatus { + id: task.id, + title: task.title, + description: task.description, + project_id: task.project_id, + status: task.status, + parent_task_attempt: task.parent_task_attempt, + created_at: task.created_at, + updated_at: task.updated_at, + has_in_progress_attempt: true, + has_merged_attempt: false, + last_attempt_failed: false, + base_coding_agent: task_attempt.base_coding_agent, + }))) +} + +pub async fn update_task( + Extension(existing_task): Extension, + State(deployment): State, + Json(payload): Json, +) -> Result>, ApiError> { + // Use existing values if not provided in update + let title = payload.title.unwrap_or(existing_task.title); + let description = payload.description.or(existing_task.description); + let status = payload.status.unwrap_or(existing_task.status); + let parent_task_attempt = payload + .parent_task_attempt + .or(existing_task.parent_task_attempt); + + let task = Task::update( + &deployment.db().pool, + existing_task.id, + existing_task.project_id, + title, + description, + status, + parent_task_attempt, + ) + .await?; + + Ok(ResponseJson(ApiResponse::success(task))) +} + +pub async fn delete_task( + Extension(task): Extension, + State(deployment): State, +) -> Result>, ApiError> { + let attempts = TaskAttempt::fetch_all(&deployment.db().pool, Some(task.id)) + .await + .unwrap_or_default(); + // Delete all attempts including their containers + for attempt in attempts { + deployment + .container() + .delete(&attempt) + .await + .unwrap_or_else(|e| { + tracing::warn!( + "Failed to delete task attempt {} for task {}: {}", + attempt.id, + task.id, + e + ); + }); + } + let rows_affected = Task::delete(&deployment.db().pool, task.id).await?; + + if rows_affected == 0 { + Err(ApiError::Database(SqlxError::RowNotFound)) + } else { + Ok(ResponseJson(ApiResponse::success(()))) + } +} + +pub fn router(deployment: &DeploymentImpl) -> Router { + let task_id_router = Router::new() + .route("/", get(get_task).put(update_task).delete(delete_task)) + .layer(from_fn_with_state(deployment.clone(), load_task_middleware)); + + let inner = Router::new() + .route("/", get(get_tasks).post(create_task)) + .route("/create-and-start", post(create_task_and_start)) + .nest("/{task_id}", task_id_router); + + // mount under /projects/:project_id/tasks + Router::new().nest("/tasks", inner) +} diff --git a/crates/services/Cargo.toml b/crates/services/Cargo.toml new file mode 100644 index 00000000..5c259787 --- /dev/null +++ b/crates/services/Cargo.toml @@ -0,0 +1,54 @@ +[package] +name = "services" +version = "0.0.56" +edition = "2024" + +[dependencies] +utils = { path = "../utils" } +executors = { path = "../executors" } +db = { path = "../db" } +tokio = { workspace = true } +tokio-util = { version = "0.7", features = ["io"] } +axum = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } +anyhow = { workspace = true } +tracing = { workspace = true } +tracing-subscriber = { workspace = true } +sqlx = { version = "0.8.6", features = ["runtime-tokio-rustls", "sqlite", "chrono", "uuid"] } +chrono = { version = "0.4", features = ["serde"] } +uuid = { version = "1.0", features = ["v4", "serde"] } +ts-rs = { workspace = true } +dirs = "5.0" +xdg = "3.0" +git2 = "0.18" +async-trait = "0.1" +libc = "0.2" +rust-embed = "8.2" +directories = "6.0.0" +open = "5.3.2" +pathdiff = "0.2.1" +ignore = "0.4" +command-group = { version = "5.0", features = ["with-tokio"] } +openssl-sys = { workspace = true } +regex = "1.11.1" +notify-rust = "4.11" +octocrab = "0.44" +os_info = "3.12.0" +sentry = { version = "0.41.0", features = ["anyhow", "backtrace", "panic", "debug-images"] } +sentry-tracing = { version = "0.41.0", features = ["backtrace"] } +reqwest = { version = "0.12", features = ["json"] } +lazy_static = "1.4" +futures-util = "0.3" +json-patch = "2.0" +backon = "1.5.1" +base64 = "0.22" +thiserror = { workspace = true } +futures = "0.3.31" +tokio-stream = "0.1.17" +secrecy = "0.10.3" +strum_macros = "0.27.2" +strum = "0.27.2" +notify = "8.2.0" +notify-debouncer-full = "0.5.0" +dunce = "1.0" diff --git a/crates/services/src/lib.rs b/crates/services/src/lib.rs new file mode 100644 index 00000000..4e379ae7 --- /dev/null +++ b/crates/services/src/lib.rs @@ -0,0 +1 @@ +pub mod services; diff --git a/backend/src/services/analytics.rs b/crates/services/src/services/analytics.rs similarity index 88% rename from backend/src/services/analytics.rs rename to crates/services/src/services/analytics.rs index e68caa62..03ec8604 100644 --- a/backend/src/services/analytics.rs +++ b/crates/services/src/services/analytics.rs @@ -5,31 +5,37 @@ use std::{ }; use os_info; -use serde_json::{json, Value}; +use serde_json::{Value, json}; + +#[derive(Debug, Clone)] +pub struct AnalyticsContext { + pub user_id: String, + pub analytics_service: AnalyticsService, +} #[derive(Debug, Clone)] pub struct AnalyticsConfig { pub posthog_api_key: String, pub posthog_api_endpoint: String, - pub enabled: bool, } impl AnalyticsConfig { - pub fn new(user_enabled: bool) -> Self { - let api_key = option_env!("POSTHOG_API_KEY").unwrap_or_default(); - let api_endpoint = option_env!("POSTHOG_API_ENDPOINT").unwrap_or_default(); + pub fn new() -> Option { + let api_key = option_env!("POSTHOG_API_KEY") + .map(|s| s.to_string()) + .or_else(|| std::env::var("POSTHOG_API_KEY").ok())?; + let api_endpoint = option_env!("POSTHOG_API_ENDPOINT") + .map(|s| s.to_string()) + .or_else(|| std::env::var("POSTHOG_API_ENDPOINT").ok())?; - let enabled = user_enabled && !api_key.is_empty() && !api_endpoint.is_empty(); - - Self { - posthog_api_key: api_key.to_string(), - posthog_api_endpoint: api_endpoint.to_string(), - enabled, - } + Some(Self { + posthog_api_key: api_key, + posthog_api_endpoint: api_endpoint, + }) } } -#[derive(Debug)] +#[derive(Clone, Debug)] pub struct AnalyticsService { config: AnalyticsConfig, client: reqwest::Client, @@ -45,12 +51,6 @@ impl AnalyticsService { Self { config, client } } - pub fn is_enabled(&self) -> bool { - self.config.enabled - && !self.config.posthog_api_key.is_empty() - && !self.config.posthog_api_endpoint.is_empty() - } - pub fn track_event(&self, user_id: &str, event_name: &str, properties: Option) { let endpoint = format!( "{}/capture/", diff --git a/crates/services/src/services/auth.rs b/crates/services/src/services/auth.rs new file mode 100644 index 00000000..088fca4b --- /dev/null +++ b/crates/services/src/services/auth.rs @@ -0,0 +1,131 @@ +use std::sync::Arc; + +use anyhow::Error as AnyhowError; +use axum::http::{HeaderName, header::ACCEPT}; +use octocrab::{ + OctocrabBuilder, + auth::{Continue, DeviceCodes, OAuth}, +}; +use secrecy::{ExposeSecret, SecretString}; +use serde::{Deserialize, Serialize}; +use thiserror::Error; +use tokio::sync::RwLock; +use ts_rs::TS; + +#[derive(Clone)] +pub struct AuthService { + pub client_id: String, + pub device_codes: Arc>>, +} + +#[derive(Debug, Error)] +pub enum AuthError { + #[error(transparent)] + GitHubClient(#[from] octocrab::Error), + #[error(transparent)] + Parse(#[from] serde_json::Error), + #[error("Device flow not started")] + DeviceFlowNotStarted, + #[error("Device flow pending")] + Pending(Continue), + #[error(transparent)] + Other(#[from] AnyhowError), +} + +#[derive(Serialize, Deserialize, TS)] +pub struct DeviceFlowStartResponse { + pub user_code: String, + pub verification_uri: String, + pub expires_in: u32, + pub interval: u32, +} + +pub struct UserInfo { + pub username: String, + pub primary_email: Option, + pub token: String, +} + +#[derive(Deserialize)] +pub struct GitHubEmailEntry { + pub email: String, + pub primary: bool, +} + +impl Default for AuthService { + fn default() -> Self { + Self::new() + } +} + +impl AuthService { + pub fn new() -> Self { + let client_id_str = option_env!("GITHUB_CLIENT_ID").unwrap_or("Ov23li9bxz3kKfPOIsGm"); + AuthService { + client_id: client_id_str.to_string(), + device_codes: Arc::new(RwLock::new(None)), // Initially no device codes + } + } + + pub async fn device_start(&self) -> Result { + let client = OctocrabBuilder::new() + .base_uri("https://github.com")? + .add_header(ACCEPT, "application/json".to_string()) + .build()?; + let device_codes = client + .authenticate_as_device( + &SecretString::from(self.client_id.clone()), + ["user:email", "repo"], + ) + .await?; + self.device_codes + .write() + .await + .replace(device_codes.clone()); // Store the device codes for later polling + Ok(DeviceFlowStartResponse { + user_code: device_codes.user_code, + verification_uri: device_codes.verification_uri, + expires_in: device_codes.expires_in as u32, + interval: device_codes.interval as u32, + }) + } + + pub async fn device_poll(&self) -> Result { + let device_codes = { + let guard = self.device_codes.read().await; + guard + .as_ref() + .ok_or(AuthError::DeviceFlowNotStarted)? + .clone() + }; + let client = OctocrabBuilder::new() + .base_uri("https://github.com")? + .add_header(ACCEPT, "application/json".to_string()) + .build()?; + let poll_response = device_codes + .poll_once(&client, &SecretString::from(self.client_id.clone())) + .await?; + let access_token = poll_response.either( + |OAuth { access_token, .. }| Ok(access_token), + |c| Err(AuthError::Pending(c)), + )?; + let client = OctocrabBuilder::new() + .add_header( + HeaderName::try_from("User-Agent").unwrap(), + "vibe-kanban-app".to_string(), + ) + .personal_token(access_token.clone()) + .build()?; + let user = client.current().user().await?; + let emails: Vec = client.get("/user/emails", None::<&()>).await?; + let primary_email = emails + .iter() + .find(|entry| entry.primary) + .map(|entry| entry.email.clone()); + Ok(UserInfo { + username: user.login, + primary_email, + token: access_token.expose_secret().to_string(), + }) + } +} diff --git a/crates/services/src/services/config/mod.rs b/crates/services/src/services/config/mod.rs new file mode 100644 index 00000000..7d662eee --- /dev/null +++ b/crates/services/src/services/config/mod.rs @@ -0,0 +1,42 @@ +use std::path::PathBuf; + +use thiserror::Error; + +mod versions; + +#[derive(Debug, Error)] +pub enum ConfigError { + #[error(transparent)] + Io(#[from] std::io::Error), + #[error(transparent)] + Json(#[from] serde_json::Error), +} + +pub type Config = versions::v2::Config; +pub type NotificationConfig = versions::v2::NotificationConfig; +pub type EditorConfig = versions::v2::EditorConfig; +pub type ThemeMode = versions::v2::ThemeMode; +pub type SoundFile = versions::v2::SoundFile; +pub type EditorType = versions::v2::EditorType; +pub type GitHubConfig = versions::v2::GitHubConfig; + +/// Will always return config, trying old schemas or eventually returning default +pub async fn load_config_from_file(config_path: &PathBuf) -> Config { + match std::fs::read_to_string(config_path) { + Ok(raw_config) => Config::from(raw_config), + Err(_) => { + tracing::info!("No config file found, creating one"); + Config::default() + } + } +} + +/// Saves the config to the given path +pub async fn save_config_to_file( + config: &Config, + config_path: &PathBuf, +) -> Result<(), ConfigError> { + let raw_config = serde_json::to_string_pretty(config)?; + std::fs::write(config_path, raw_config)?; + Ok(()) +} diff --git a/crates/services/src/services/config/versions/mod.rs b/crates/services/src/services/config/versions/mod.rs new file mode 100644 index 00000000..8e5a3fae --- /dev/null +++ b/crates/services/src/services/config/versions/mod.rs @@ -0,0 +1,2 @@ +pub(super) mod v1; +pub(super) mod v2; diff --git a/crates/services/src/services/config/versions/v1.rs b/crates/services/src/services/config/versions/v1.rs new file mode 100644 index 00000000..ede94697 --- /dev/null +++ b/crates/services/src/services/config/versions/v1.rs @@ -0,0 +1,87 @@ +use serde::{Deserialize, Serialize}; + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub(super) struct Config { + pub(super) theme: ThemeMode, + pub(super) executor: ExecutorConfig, + pub(super) disclaimer_acknowledged: bool, + pub(super) onboarding_acknowledged: bool, + pub(super) github_login_acknowledged: bool, + pub(super) telemetry_acknowledged: bool, + pub(super) sound_alerts: bool, + pub(super) sound_file: SoundFile, + pub(super) push_notifications: bool, + pub(super) editor: EditorConfig, + pub(super) github: GitHubConfig, + pub(super) analytics_enabled: Option, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(tag = "type", rename_all = "kebab-case")] +pub(super) enum ExecutorConfig { + Echo, + Claude, + ClaudePlan, + Amp, + Gemini, + #[serde(alias = "setup_script")] + SetupScript { + script: String, + }, + ClaudeCodeRouter, + #[serde(alias = "charmopencode")] + CharmOpencode, + #[serde(alias = "opencode")] + SstOpencode, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "lowercase")] +pub(super) enum ThemeMode { + Light, + Dark, + System, + Purple, + Green, + Blue, + Orange, + Red, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub(super) struct EditorConfig { + pub editor_type: EditorType, + pub custom_command: Option, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub(super) struct GitHubConfig { + pub pat: Option, + pub token: Option, + pub username: Option, + pub primary_email: Option, + pub default_pr_base: Option, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "lowercase")] +pub(super) enum EditorType { + VsCode, + Cursor, + Windsurf, + IntelliJ, + Zed, + Custom, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "kebab-case")] +pub(super) enum SoundFile { + AbstractSound1, + AbstractSound2, + AbstractSound3, + AbstractSound4, + CowMooing, + PhoneVibration, + Rooster, +} diff --git a/crates/services/src/services/config/versions/v2.rs b/crates/services/src/services/config/versions/v2.rs new file mode 100644 index 00000000..209724c3 --- /dev/null +++ b/crates/services/src/services/config/versions/v2.rs @@ -0,0 +1,389 @@ +use std::{path::PathBuf, str::FromStr}; + +use anyhow::Error; +use serde::{Deserialize, Serialize}; +use strum_macros::EnumString; +use ts_rs::TS; +use utils::{assets::SoundAssets, cache_dir}; + +use crate::services::config::versions::v1; + +#[derive(Clone, Debug, Serialize, Deserialize, TS)] +pub struct Config { + pub config_version: String, + pub theme: ThemeMode, + pub profile: String, + pub disclaimer_acknowledged: bool, + pub onboarding_acknowledged: bool, + pub github_login_acknowledged: bool, + pub telemetry_acknowledged: bool, + pub notifications: NotificationConfig, + pub editor: EditorConfig, + pub github: GitHubConfig, + pub analytics_enabled: Option, + pub workspace_dir: Option, +} + +impl Config { + pub fn from_previous_version(raw_config: &str) -> Result { + let old_config = match serde_json::from_str::(raw_config) { + Ok(cfg) => cfg, + Err(e) => { + tracing::error!("❌ Failed to parse config: {}", e); + tracing::error!(" at line {}, column {}", e.line(), e.column()); + return Err(e.into()); + } + }; + + let old_config_clone = old_config.clone(); + + let mut onboarding_acknowledged = old_config.onboarding_acknowledged; + + // Map old executors to new profiles + let profile: &str = match old_config.executor { + v1::ExecutorConfig::Claude => "claude-code", + v1::ExecutorConfig::ClaudeCodeRouter => "claude-code", + v1::ExecutorConfig::ClaudePlan => "claude-code-plan", + v1::ExecutorConfig::Amp => "amp", + v1::ExecutorConfig::Gemini => "gemini", + v1::ExecutorConfig::SstOpencode => "opencode", + _ => { + onboarding_acknowledged = false; // Reset the user's onboarding if executor is not supported + "claude-code" + } + }; + + Ok(Self { + config_version: "v2".to_string(), + theme: ThemeMode::from(old_config.theme), // Now SCREAMING_SNAKE_CASE + profile: profile.to_string(), + disclaimer_acknowledged: old_config.disclaimer_acknowledged, + onboarding_acknowledged, + github_login_acknowledged: old_config.github_login_acknowledged, + telemetry_acknowledged: old_config.telemetry_acknowledged, + notifications: NotificationConfig::from(old_config_clone), + editor: EditorConfig::from(old_config.editor), + github: GitHubConfig::from(old_config.github), + analytics_enabled: None, + workspace_dir: None, + }) + } +} + +impl From for Config { + fn from(raw_config: String) -> Self { + if let Ok(config) = serde_json::from_str(&raw_config) { + config + } else if let Ok(config) = Self::from_previous_version(&raw_config) { + tracing::info!("Config upgraded from previous version"); + config + } else { + tracing::warn!("Config reset to default"); + Self::default() + } + } +} + +impl Default for Config { + fn default() -> Self { + Self { + config_version: "v2".to_string(), + theme: ThemeMode::System, + profile: String::from("claude-code"), + disclaimer_acknowledged: false, + onboarding_acknowledged: false, + github_login_acknowledged: false, + telemetry_acknowledged: false, + notifications: NotificationConfig::default(), + editor: EditorConfig::default(), + github: GitHubConfig::default(), + analytics_enabled: None, + workspace_dir: None, + } + } +} + +#[derive(Debug, Clone, Serialize, Deserialize, TS)] +pub struct GitHubConfig { + pub pat: Option, + pub oauth_token: Option, + pub username: Option, + pub primary_email: Option, + pub default_pr_base: Option, +} + +impl From for GitHubConfig { + fn from(old: v1::GitHubConfig) -> Self { + Self { + pat: old.pat, + oauth_token: old.token, // Map to new field name + username: old.username, + primary_email: old.primary_email, + default_pr_base: old.default_pr_base, + } + } +} + +#[derive(Debug, Clone, Serialize, Deserialize, TS)] +pub struct NotificationConfig { + pub sound_enabled: bool, + pub push_enabled: bool, + pub sound_file: SoundFile, +} + +impl From for NotificationConfig { + fn from(old: v1::Config) -> Self { + Self { + sound_enabled: old.sound_alerts, + push_enabled: old.push_notifications, + sound_file: SoundFile::from(old.sound_file), // Now SCREAMING_SNAKE_CASE + } + } +} + +impl Default for NotificationConfig { + fn default() -> Self { + Self { + sound_enabled: true, + push_enabled: true, + sound_file: SoundFile::CowMooing, + } + } +} + +impl Default for GitHubConfig { + fn default() -> Self { + Self { + pat: None, + oauth_token: None, + username: None, + primary_email: None, + default_pr_base: Some("main".to_string()), + } + } +} + +impl GitHubConfig { + pub fn token(&self) -> Option { + self.pat + .as_deref() + .or(self.oauth_token.as_deref()) + .map(|s| s.to_string()) + } +} + +#[derive(Debug, Clone, Serialize, Deserialize, TS, EnumString)] +#[ts(use_ts_enum)] +#[serde(rename_all = "SCREAMING_SNAKE_CASE")] +#[strum(serialize_all = "SCREAMING_SNAKE_CASE")] +pub enum SoundFile { + AbstractSound1, + AbstractSound2, + AbstractSound3, + AbstractSound4, + CowMooing, + PhoneVibration, + Rooster, +} + +impl SoundFile { + pub fn to_filename(&self) -> &'static str { + match self { + SoundFile::AbstractSound1 => "abstract-sound1.wav", + SoundFile::AbstractSound2 => "abstract-sound2.wav", + SoundFile::AbstractSound3 => "abstract-sound3.wav", + SoundFile::AbstractSound4 => "abstract-sound4.wav", + SoundFile::CowMooing => "cow-mooing.wav", + SoundFile::PhoneVibration => "phone-vibration.wav", + SoundFile::Rooster => "rooster.wav", + } + } + + // load the sound file from the embedded assets or cache + pub async fn serve(&self) -> Result { + match SoundAssets::get(self.to_filename()) { + Some(content) => Ok(content), + None => { + tracing::error!("Sound file not found: {}", self.to_filename()); + Err(anyhow::anyhow!( + "Sound file not found: {}", + self.to_filename() + )) + } + } + } + /// Get or create a cached sound file with the embedded sound data + pub async fn get_path(&self) -> Result> { + use std::io::Write; + + let filename = self.to_filename(); + let cache_dir = cache_dir(); + let cached_path = cache_dir.join(format!("sound-{filename}")); + + // Check if cached file already exists and is valid + if cached_path.exists() { + // Verify file has content (basic validation) + if let Ok(metadata) = std::fs::metadata(&cached_path) + && metadata.len() > 0 + { + return Ok(cached_path); + } + } + + // File doesn't exist or is invalid, create it + let sound_data = SoundAssets::get(filename) + .ok_or_else(|| format!("Embedded sound file not found: {filename}"))? + .data; + + // Ensure cache directory exists + std::fs::create_dir_all(&cache_dir) + .map_err(|e| format!("Failed to create cache directory: {e}"))?; + + let mut file = std::fs::File::create(&cached_path) + .map_err(|e| format!("Failed to create cached sound file: {e}"))?; + + file.write_all(&sound_data) + .map_err(|e| format!("Failed to write sound data to cached file: {e}"))?; + + drop(file); // Ensure file is closed + + Ok(cached_path) + } +} + +impl From for SoundFile { + fn from(old: v1::SoundFile) -> Self { + match old { + v1::SoundFile::AbstractSound1 => SoundFile::AbstractSound1, + v1::SoundFile::AbstractSound2 => SoundFile::AbstractSound2, + v1::SoundFile::AbstractSound3 => SoundFile::AbstractSound3, + v1::SoundFile::AbstractSound4 => SoundFile::AbstractSound4, + v1::SoundFile::CowMooing => SoundFile::CowMooing, + v1::SoundFile::PhoneVibration => SoundFile::PhoneVibration, + v1::SoundFile::Rooster => SoundFile::Rooster, + } + } +} + +#[derive(Debug, Clone, Serialize, Deserialize, TS)] +pub struct EditorConfig { + editor_type: EditorType, + custom_command: Option, +} + +impl From for EditorConfig { + fn from(old: v1::EditorConfig) -> Self { + Self { + editor_type: EditorType::from(old.editor_type), // Now SCREAMING_SNAKE_CASE + custom_command: old.custom_command, + } + } +} + +#[derive(Debug, Clone, Serialize, Deserialize, TS, EnumString)] +#[ts(use_ts_enum)] +#[serde(rename_all = "SCREAMING_SNAKE_CASE")] +#[strum(serialize_all = "SCREAMING_SNAKE_CASE")] +pub enum EditorType { + VsCode, + Cursor, + Windsurf, + IntelliJ, + Zed, + Custom, +} + +impl From for EditorType { + fn from(old: v1::EditorType) -> Self { + match old { + v1::EditorType::VsCode => EditorType::VsCode, + v1::EditorType::Cursor => EditorType::Cursor, + v1::EditorType::Windsurf => EditorType::Windsurf, + v1::EditorType::IntelliJ => EditorType::IntelliJ, + v1::EditorType::Zed => EditorType::Zed, + v1::EditorType::Custom => EditorType::Custom, + } + } +} + +impl Default for EditorConfig { + fn default() -> Self { + Self { + editor_type: EditorType::VsCode, + custom_command: None, + } + } +} + +impl EditorConfig { + pub fn get_command(&self) -> Vec { + match &self.editor_type { + EditorType::VsCode => vec!["code".to_string()], + EditorType::Cursor => vec!["cursor".to_string()], + EditorType::Windsurf => vec!["windsurf".to_string()], + EditorType::IntelliJ => vec!["idea".to_string()], + EditorType::Zed => vec!["zed".to_string()], + EditorType::Custom => { + if let Some(custom) = &self.custom_command { + custom.split_whitespace().map(|s| s.to_string()).collect() + } else { + vec!["code".to_string()] // fallback to VSCode + } + } + } + } + + pub fn open_file(&self, path: &str) -> Result<(), std::io::Error> { + let command = self.get_command(); + let mut cmd = std::process::Command::new(&command[0]); + for arg in &command[1..] { + cmd.arg(arg); + } + cmd.arg(path); + cmd.spawn()?; + Ok(()) + } + + pub fn with_override(&self, editor_type_str: Option<&str>) -> Self { + if let Some(editor_type_str) = editor_type_str { + let editor_type = + EditorType::from_str(editor_type_str).unwrap_or(self.editor_type.clone()); + EditorConfig { + editor_type, + custom_command: self.custom_command.clone(), + } + } else { + self.clone() + } + } +} + +#[derive(Debug, Clone, Serialize, Deserialize, TS, EnumString)] +#[ts(use_ts_enum)] +#[serde(rename_all = "SCREAMING_SNAKE_CASE")] +#[strum(serialize_all = "SCREAMING_SNAKE_CASE")] +pub enum ThemeMode { + Light, + Dark, + System, + Purple, + Green, + Blue, + Orange, + Red, +} + +impl From for ThemeMode { + fn from(old: v1::ThemeMode) -> Self { + match old { + v1::ThemeMode::Light => ThemeMode::Light, + v1::ThemeMode::Dark => ThemeMode::Dark, + v1::ThemeMode::System => ThemeMode::System, + v1::ThemeMode::Purple => ThemeMode::Purple, + v1::ThemeMode::Green => ThemeMode::Green, + v1::ThemeMode::Blue => ThemeMode::Blue, + v1::ThemeMode::Orange => ThemeMode::Orange, + v1::ThemeMode::Red => ThemeMode::Red, + } + } +} diff --git a/crates/services/src/services/container.rs b/crates/services/src/services/container.rs new file mode 100644 index 00000000..0b242a8b --- /dev/null +++ b/crates/services/src/services/container.rs @@ -0,0 +1,616 @@ +use std::{ + collections::HashMap, + path::PathBuf, + sync::{ + Arc, + atomic::{AtomicUsize, Ordering}, + }, +}; + +use anyhow::Error as AnyhowError; +use async_trait::async_trait; +use axum::response::sse::Event; +use db::{ + DBService, + models::{ + execution_process::{ + CreateExecutionProcess, ExecutionContext, ExecutionProcess, ExecutionProcessRunReason, + ExecutionProcessStatus, + }, + execution_process_logs::ExecutionProcessLogs, + executor_session::{CreateExecutorSession, ExecutorSession}, + task::{Task, TaskStatus}, + task_attempt::{TaskAttempt, TaskAttemptError}, + }, +}; +use executors::{ + actions::{ + ExecutorAction, ExecutorActionType, + coding_agent_initial::CodingAgentInitialRequest, + script::{ScriptContext, ScriptRequest, ScriptRequestLanguage}, + }, + executors::{CodingAgent, ExecutorError, StandardCodingAgentExecutor}, + logs::utils::patch::ConversationPatch, +}; +use futures::{StreamExt, TryStreamExt, future}; +use sqlx::Error as SqlxError; +use thiserror::Error; +use tokio::{sync::RwLock, task::JoinHandle}; +use utils::{log_msg::LogMsg, msg_store::MsgStore}; +use uuid::Uuid; + +use crate::services::{ + git::{GitService, GitServiceError}, + worktree_manager::WorktreeError, +}; +pub type ContainerRef = String; + +#[derive(Debug, Error)] +pub enum ContainerError { + #[error(transparent)] + GitServiceError(#[from] GitServiceError), + #[error(transparent)] + Sqlx(#[from] SqlxError), + #[error(transparent)] + ExecutorError(#[from] ExecutorError), + #[error(transparent)] + Worktree(#[from] WorktreeError), + #[error("Io error: {0}")] + Io(#[from] std::io::Error), + #[error("Failed to kill process: {0}")] + KillFailed(std::io::Error), + #[error(transparent)] + TaskAttemptError(#[from] TaskAttemptError), + #[error(transparent)] + Other(#[from] AnyhowError), // Catches any unclassified errors +} + +#[async_trait] +pub trait ContainerService { + fn msg_stores(&self) -> &Arc>>>; + + fn db(&self) -> &DBService; + + fn git(&self) -> &GitService; + + fn task_attempt_to_current_dir(&self, task_attempt: &TaskAttempt) -> PathBuf; + + async fn create(&self, task_attempt: &TaskAttempt) -> Result; + + async fn delete(&self, task_attempt: &TaskAttempt) -> Result<(), ContainerError> { + self.try_stop(task_attempt).await; + self.delete_inner(task_attempt).await + } + + async fn try_stop(&self, task_attempt: &TaskAttempt) { + // stop all execution processes for this attempt + if let Ok(processes) = + ExecutionProcess::find_by_task_attempt_id(&self.db().pool, task_attempt.id).await + { + for process in processes { + if process.status == ExecutionProcessStatus::Running { + self.stop_execution(&process).await.unwrap_or_else(|e| { + tracing::debug!( + "Failed to stop execution process {} for task attempt {}: {}", + process.id, + task_attempt.id, + e + ); + }); + } + } + } + } + + async fn delete_inner(&self, task_attempt: &TaskAttempt) -> Result<(), ContainerError>; + + async fn ensure_container_exists( + &self, + task_attempt: &TaskAttempt, + ) -> Result; + + async fn start_execution_inner( + &self, + task_attempt: &TaskAttempt, + execution_process: &ExecutionProcess, + executor_action: &ExecutorAction, + ) -> Result<(), ContainerError>; + + async fn stop_execution( + &self, + execution_process: &ExecutionProcess, + ) -> Result<(), ContainerError>; + + async fn try_commit_changes(&self, ctx: &ExecutionContext) -> Result<(), ContainerError>; + + async fn get_diff( + &self, + task_attempt: &TaskAttempt, + ) -> Result>, ContainerError>; + + /// Fetch the MsgStore for a given execution ID, panicking if missing. + async fn get_msg_store_by_id(&self, uuid: &Uuid) -> Option> { + let map = self.msg_stores().read().await; + map.get(uuid).cloned() + } + + async fn stream_raw_logs( + &self, + id: &Uuid, + ) -> Option>> { + if let Some(store) = self.get_msg_store_by_id(id).await { + // First try in-memory store + let counter = Arc::new(AtomicUsize::new(0)); + return Some( + store + .history_plus_stream() + .filter(|msg| { + future::ready(matches!(msg, Ok(LogMsg::Stdout(..) | LogMsg::Stderr(..)))) + }) + .map_ok({ + let counter = counter.clone(); + move |m| { + let index = counter.fetch_add(1, Ordering::SeqCst); + match m { + LogMsg::Stdout(content) => { + let patch = ConversationPatch::add_stdout(index, content); + LogMsg::JsonPatch(patch).to_sse_event() + } + LogMsg::Stderr(content) => { + let patch = ConversationPatch::add_stderr(index, content); + LogMsg::JsonPatch(patch).to_sse_event() + } + _ => unreachable!("Filter should only pass Stdout/Stderr"), + } + } + }) + .boxed(), + ); + } else { + // Fallback: load from DB and create direct stream + let logs_record = + match ExecutionProcessLogs::find_by_execution_id(&self.db().pool, *id).await { + Ok(Some(record)) => record, + Ok(None) => return None, // No logs exist + Err(e) => { + tracing::error!("Failed to fetch logs for execution {}: {}", id, e); + return None; + } + }; + + let messages = match logs_record.parse_logs() { + Ok(msgs) => msgs, + Err(e) => { + tracing::error!("Failed to parse logs for execution {}: {}", id, e); + return None; + } + }; + + // Direct stream from parsed messages converted to JSON patches + let stream = futures::stream::iter( + messages + .into_iter() + .filter(|m| matches!(m, LogMsg::Stdout(_) | LogMsg::Stderr(_))) + .enumerate() + .map(|(index, m)| { + let event = match m { + LogMsg::Stdout(content) => { + let patch = ConversationPatch::add_stdout(index, content); + LogMsg::JsonPatch(patch).to_sse_event() + } + LogMsg::Stderr(content) => { + let patch = ConversationPatch::add_stderr(index, content); + LogMsg::JsonPatch(patch).to_sse_event() + } + _ => unreachable!("Filter should only pass Stdout/Stderr"), + }; + Ok::<_, std::io::Error>(event) + }), + ) + .boxed(); + + Some(stream) + } + } + + async fn stream_normalized_logs( + &self, + id: &Uuid, + ) -> Option>> { + // First try in-memory store (existing behavior) + if let Some(store) = self.get_msg_store_by_id(id).await { + Some( + store + .history_plus_stream() // BoxStream> + .filter(|msg| future::ready(matches!(msg, Ok(LogMsg::JsonPatch(..))))) + .map_ok(|m| m.to_sse_event()) // LogMsg -> Event + .boxed(), + ) + } else { + // Fallback: load from DB and normalize + let logs_record = + match ExecutionProcessLogs::find_by_execution_id(&self.db().pool, *id).await { + Ok(Some(record)) => record, + Ok(None) => return None, // No logs exist + Err(e) => { + tracing::error!("Failed to fetch logs for execution {}: {}", id, e); + return None; + } + }; + + let raw_messages = match logs_record.parse_logs() { + Ok(msgs) => msgs, + Err(e) => { + tracing::error!("Failed to parse logs for execution {}: {}", id, e); + return None; + } + }; + + // Create temporary store and populate + let temp_store = Arc::new(MsgStore::new()); + for msg in raw_messages { + if matches!(msg, LogMsg::Stdout(_) | LogMsg::Stderr(_)) { + temp_store.push(msg); + } + } + temp_store.push_finished(); + + let process = match ExecutionProcess::find_by_id(&self.db().pool, *id).await { + Ok(Some(process)) => process, + Ok(None) => { + tracing::error!("No execution process found for ID: {}", id); + return None; + } + Err(e) => { + tracing::error!("Failed to fetch execution process {}: {}", id, e); + return None; + } + }; + + // Get the task attempt to determine correct directory + let task_attempt = match process.parent_task_attempt(&self.db().pool).await { + Ok(Some(task_attempt)) => task_attempt, + Ok(None) => { + tracing::error!("No task attempt found for ID: {}", process.task_attempt_id); + return None; + } + Err(e) => { + tracing::error!( + "Failed to fetch task attempt {}: {}", + process.task_attempt_id, + e + ); + return None; + } + }; + + let current_dir = self.task_attempt_to_current_dir(&task_attempt); + + let executor_action = if let Ok(executor_action) = process.executor_action() { + executor_action + } else { + tracing::error!( + "Failed to parse executor action: {:?}", + process.executor_action() + ); + return None; + }; + + // Spawn normalizer on populated store + match executor_action.typ() { + ExecutorActionType::CodingAgentInitialRequest(request) => { + if let Ok(executor) = CodingAgent::from_profile_str(&request.profile) { + executor.normalize_logs(temp_store.clone(), ¤t_dir); + } else { + tracing::error!( + "Failed to resolve profile '{}' for normalization", + request.profile + ); + } + } + ExecutorActionType::CodingAgentFollowUpRequest(request) => { + if let Ok(executor) = CodingAgent::from_profile_str(&request.profile) { + executor.normalize_logs(temp_store.clone(), ¤t_dir); + } else { + tracing::error!( + "Failed to resolve profile '{}' for normalization", + request.profile + ); + } + } + _ => { + tracing::debug!( + "Executor action doesn't support log normalization: {:?}", + process.executor_action() + ); + return None; + } + } + Some( + temp_store + .history_plus_stream() + .filter(|msg| future::ready(matches!(msg, Ok(LogMsg::JsonPatch(..))))) + .map_ok(|m| m.to_sse_event()) + .boxed(), + ) + } + } + + fn spawn_stream_raw_logs_to_db(&self, execution_id: &Uuid) -> JoinHandle<()> { + let execution_id = *execution_id; + let msg_stores = self.msg_stores().clone(); + let db = self.db().clone(); + + tokio::spawn(async move { + // Get the message store for this execution + let store = { + let map = msg_stores.read().await; + map.get(&execution_id).cloned() + }; + + if let Some(store) = store { + let mut stream = store.history_plus_stream(); + + while let Some(Ok(msg)) = stream.next().await { + match &msg { + LogMsg::Stdout(_) | LogMsg::Stderr(_) => { + // Serialize this individual message as a JSONL line + match serde_json::to_string(&msg) { + Ok(jsonl_line) => { + let jsonl_line_with_newline = format!("{jsonl_line}\n"); + + // Append this line to the database + if let Err(e) = ExecutionProcessLogs::append_log_line( + &db.pool, + execution_id, + &jsonl_line_with_newline, + ) + .await + { + tracing::error!( + "Failed to append log line for execution {}: {}", + execution_id, + e + ); + } + } + Err(e) => { + tracing::error!( + "Failed to serialize log message for execution {}: {}", + execution_id, + e + ); + } + } + } + LogMsg::SessionId(session_id) => { + // Append this line to the database + if let Err(e) = ExecutorSession::update_session_id( + &db.pool, + execution_id, + session_id, + ) + .await + { + tracing::error!( + "Failed to update session_id {} for execution process {}: {}", + session_id, + execution_id, + e + ); + } + } + LogMsg::Finished => { + break; + } + LogMsg::JsonPatch(_) => continue, + } + } + } + }) + } + + async fn start_attempt( + &self, + task_attempt: &TaskAttempt, + profile_label: String, + ) -> Result { + // Create container + self.create(task_attempt).await?; + + // Get parent task + let task = task_attempt + .parent_task(&self.db().pool) + .await? + .ok_or(SqlxError::RowNotFound)?; + + // Get parent project + let project = task + .parent_project(&self.db().pool) + .await? + .ok_or(SqlxError::RowNotFound)?; + + // // Get latest version of task attempt + let task_attempt = TaskAttempt::find_by_id(&self.db().pool, task_attempt.id) + .await? + .ok_or(SqlxError::RowNotFound)?; + + let cleanup_action = project.cleanup_script.map(|script| { + Box::new(ExecutorAction::new( + ExecutorActionType::ScriptRequest(ScriptRequest { + script, + language: ScriptRequestLanguage::Bash, + context: ScriptContext::CleanupScript, + }), + None, + )) + }); + + // Choose whether to execute the setup_script or coding agent first + let execution_process = if let Some(setup_script) = project.setup_script { + let executor_action = ExecutorAction::new( + ExecutorActionType::ScriptRequest(ScriptRequest { + script: setup_script, + language: ScriptRequestLanguage::Bash, + context: ScriptContext::SetupScript, + }), + // once the setup script is done, run the initial coding agent request + Some(Box::new(ExecutorAction::new( + ExecutorActionType::CodingAgentInitialRequest(CodingAgentInitialRequest { + prompt: task.to_prompt(), + profile: profile_label, + }), + cleanup_action, + ))), + ); + + self.start_execution( + &task_attempt, + &executor_action, + &ExecutionProcessRunReason::SetupScript, + ) + .await? + } else { + let executor_action = ExecutorAction::new( + ExecutorActionType::CodingAgentInitialRequest(CodingAgentInitialRequest { + prompt: task.to_prompt(), + profile: profile_label, + }), + cleanup_action, + ); + + self.start_execution( + &task_attempt, + &executor_action, + &ExecutionProcessRunReason::CodingAgent, + ) + .await? + }; + Ok(execution_process) + } + + async fn start_execution( + &self, + task_attempt: &TaskAttempt, + executor_action: &ExecutorAction, + run_reason: &ExecutionProcessRunReason, + ) -> Result { + // Update task status to InProgress when starting an attempt + let task = task_attempt + .parent_task(&self.db().pool) + .await? + .ok_or(SqlxError::RowNotFound)?; + if task.status != TaskStatus::InProgress + && run_reason != &ExecutionProcessRunReason::DevServer + { + Task::update_status(&self.db().pool, task.id, TaskStatus::InProgress).await?; + } + // Create new execution process record + let create_execution_process = CreateExecutionProcess { + task_attempt_id: task_attempt.id, + executor_action: executor_action.clone(), + run_reason: run_reason.clone(), + }; + + let execution_process = + ExecutionProcess::create(&self.db().pool, &create_execution_process, Uuid::new_v4()) + .await?; + + if let ExecutorActionType::CodingAgentInitialRequest(coding_agent_request) = + executor_action.typ() + { + let create_executor_data = CreateExecutorSession { + task_attempt_id: task_attempt.id, + execution_process_id: execution_process.id, + prompt: Some(coding_agent_request.prompt.clone()), + }; + + let executor_session_record_id = Uuid::new_v4(); + + ExecutorSession::create( + &self.db().pool, + &create_executor_data, + executor_session_record_id, + ) + .await?; + } + + let _ = self + .start_execution_inner(task_attempt, &execution_process, executor_action) + .await?; + + // Start processing normalised logs for executor requests and follow ups + match executor_action.typ() { + ExecutorActionType::CodingAgentInitialRequest(request) => { + if let Some(msg_store) = self.get_msg_store_by_id(&execution_process.id).await { + if let Ok(executor) = CodingAgent::from_profile_str(&request.profile) { + executor.normalize_logs( + msg_store, + &self.task_attempt_to_current_dir(task_attempt), + ); + } else { + tracing::error!( + "Failed to resolve profile '{}' for normalization", + request.profile + ); + } + } + } + ExecutorActionType::CodingAgentFollowUpRequest(request) => { + if let Some(msg_store) = self.get_msg_store_by_id(&execution_process.id).await { + if let Ok(executor) = CodingAgent::from_profile_str(&request.profile) { + executor.normalize_logs( + msg_store, + &self.task_attempt_to_current_dir(task_attempt), + ); + } else { + tracing::error!( + "Failed to resolve profile '{}' for normalization", + request.profile + ); + } + } + } + _ => {} + }; + + self.spawn_stream_raw_logs_to_db(&execution_process.id); + Ok(execution_process) + } + + async fn try_start_next_action(&self, ctx: &ExecutionContext) -> Result<(), ContainerError> { + let action = ctx.execution_process.executor_action()?; + let next_action = if let Some(next_action) = action.next_action() { + next_action + } else if matches!( + ctx.execution_process.run_reason, + ExecutionProcessRunReason::SetupScript + ) { + return Err(ContainerError::Other(anyhow::anyhow!( + "No next action configured for SetupScript" + ))); + } else { + tracing::debug!("No next action configured"); + return Ok(()); + }; + + // Determine the run reason of the next action + let next_run_reason = match ctx.execution_process.run_reason { + ExecutionProcessRunReason::SetupScript => ExecutionProcessRunReason::CodingAgent, + ExecutionProcessRunReason::CodingAgent => ExecutionProcessRunReason::CleanupScript, + _ => { + tracing::warn!( + "Unexpected run reason: {:?}, defaulting to current reason", + ctx.execution_process.run_reason + ); + ctx.execution_process.run_reason.clone() + } + }; + + self.start_execution(&ctx.task_attempt, next_action, &next_run_reason) + .await?; + + tracing::debug!("Started next action: {:?}", next_action); + Ok(()) + } +} diff --git a/crates/services/src/services/events.rs b/crates/services/src/services/events.rs new file mode 100644 index 00000000..2ca7a9a5 --- /dev/null +++ b/crates/services/src/services/events.rs @@ -0,0 +1,197 @@ +use std::{str::FromStr, sync::Arc}; + +use anyhow::Error as AnyhowError; +use db::{ + DBService, + models::{execution_process::ExecutionProcess, task::Task, task_attempt::TaskAttempt}, +}; +use serde::Serialize; +use serde_json::json; +use sqlx::{Error as SqlxError, sqlite::SqliteOperation}; +use strum_macros::{Display, EnumString}; +use thiserror::Error; +use tokio::sync::RwLock; +use ts_rs::TS; +use utils::msg_store::MsgStore; + +#[derive(Debug, Error)] +pub enum EventError { + #[error(transparent)] + Sqlx(#[from] SqlxError), + #[error(transparent)] + Parse(#[from] serde_json::Error), + #[error(transparent)] + Other(#[from] AnyhowError), // Catches any unclassified errors +} + +#[derive(Clone)] +pub struct EventService { + msg_store: Arc, + db: DBService, + entry_count: Arc>, +} + +#[derive(EnumString, Display)] +enum HookTables { + #[strum(to_string = "tasks")] + Tasks, + #[strum(to_string = "task_attempts")] + TaskAttempts, + #[strum(to_string = "execution_processes")] + ExecutionProcesses, +} + +#[derive(Serialize, TS)] +#[serde(tag = "type", content = "data", rename_all = "SCREAMING_SNAKE_CASE")] +pub enum RecordTypes { + Task(Task), + TaskAttempt(TaskAttempt), + ExecutionProcess(ExecutionProcess), + DeletedTask { rowid: i64 }, + DeletedTaskAttempt { rowid: i64 }, + DeletedExecutionProcess { rowid: i64 }, +} + +#[derive(Serialize, TS)] +pub struct EventPatchInner { + db_op: String, + record: RecordTypes, +} + +#[derive(Serialize, TS)] +pub struct EventPatch { + op: String, + path: String, + value: EventPatchInner, +} + +impl EventService { + /// Creates a new EventService that will work with a DBService configured with hooks + pub fn new(db: DBService, msg_store: Arc, entry_count: Arc>) -> Self { + Self { + msg_store, + db, + entry_count, + } + } + + /// Creates the hook function that should be used with DBService::new_with_after_connect + pub fn create_hook( + msg_store: Arc, + entry_count: Arc>, + db_service: DBService, + ) -> impl for<'a> Fn( + &'a mut sqlx::sqlite::SqliteConnection, + ) -> std::pin::Pin< + Box> + Send + 'a>, + > + Send + + Sync + + 'static { + move |conn: &mut sqlx::sqlite::SqliteConnection| { + let msg_store_for_hook = msg_store.clone(); + let entry_count_for_hook = entry_count.clone(); + let db_for_hook = db_service.clone(); + + Box::pin(async move { + let mut handle = conn.lock_handle().await?; + let runtime_handle = tokio::runtime::Handle::current(); + handle.set_update_hook(move |hook: sqlx::sqlite::UpdateHookResult<'_>| { + let runtime_handle = runtime_handle.clone(); + let entry_count_for_hook = entry_count_for_hook.clone(); + let msg_store_for_hook = msg_store_for_hook.clone(); + let db = db_for_hook.clone(); + + if let Ok(table) = HookTables::from_str(hook.table) { + let rowid = hook.rowid; + runtime_handle.spawn(async move { + let record_type: RecordTypes = match (table, hook.operation.clone()) { + (HookTables::Tasks, SqliteOperation::Delete) => { + RecordTypes::DeletedTask { rowid } + } + (HookTables::TaskAttempts, SqliteOperation::Delete) => { + RecordTypes::DeletedTaskAttempt { rowid } + } + (HookTables::ExecutionProcesses, SqliteOperation::Delete) => { + RecordTypes::DeletedExecutionProcess { rowid } + } + (HookTables::Tasks, _) => { + match Task::find_by_rowid(&db.pool, rowid).await { + Ok(Some(task)) => RecordTypes::Task(task), + Ok(None) => RecordTypes::DeletedTask { rowid }, + Err(e) => { + tracing::error!("Failed to fetch task: {:?}", e); + return; + } + } + } + (HookTables::TaskAttempts, _) => { + match TaskAttempt::find_by_rowid(&db.pool, rowid).await { + Ok(Some(attempt)) => RecordTypes::TaskAttempt(attempt), + Ok(None) => RecordTypes::DeletedTaskAttempt { rowid }, + Err(e) => { + tracing::error!( + "Failed to fetch task_attempt: {:?}", + e + ); + return; + } + } + } + (HookTables::ExecutionProcesses, _) => { + match ExecutionProcess::find_by_rowid(&db.pool, rowid).await { + Ok(Some(process)) => RecordTypes::ExecutionProcess(process), + Ok(None) => RecordTypes::DeletedExecutionProcess { rowid }, + Err(e) => { + tracing::error!( + "Failed to fetch execution_process: {:?}", + e + ); + return; + } + } + } + _ => unreachable!(), + }; + + let next_entry_count = { + let mut entry_count = entry_count_for_hook.write().await; + *entry_count += 1; + *entry_count + }; + + let db_op: &str = match hook.operation { + SqliteOperation::Insert => "insert", + SqliteOperation::Delete => "delete", + SqliteOperation::Update => "update", + SqliteOperation::Unknown(_) => "unknown", + }; + + let event_patch: EventPatch = EventPatch { + op: "add".to_string(), + path: format!("/entries/{next_entry_count}"), + value: EventPatchInner { + db_op: db_op.to_string(), + record: record_type, + }, + }; + + let patch = + serde_json::from_value(json!([ + serde_json::to_value(event_patch).unwrap() + ])) + .unwrap(); + + msg_store_for_hook.push_patch(patch); + }); + } + }); + + Ok(()) + }) + } + } + + pub fn msg_store(&self) -> &Arc { + &self.msg_store + } +} diff --git a/crates/services/src/services/filesystem.rs b/crates/services/src/services/filesystem.rs new file mode 100644 index 00000000..3740ff2a --- /dev/null +++ b/crates/services/src/services/filesystem.rs @@ -0,0 +1,163 @@ +use std::{ + fs, + path::{Path, PathBuf}, +}; + +use ignore::WalkBuilder; +use serde::Serialize; +use thiserror::Error; +use ts_rs::TS; +#[derive(Clone)] +pub struct FilesystemService {} + +#[derive(Debug, Error)] +pub enum FilesystemError { + #[error("Directory does not exist")] + DirectoryDoesNotExist, + #[error("Path is not a directory")] + PathIsNotDirectory, + #[error("Failed to read directory: {0}")] + Io(#[from] std::io::Error), +} +#[derive(Debug, Serialize, TS)] +pub struct DirectoryListResponse { + pub entries: Vec, + pub current_path: String, +} + +#[derive(Debug, Serialize, TS)] +pub struct DirectoryEntry { + pub name: String, + pub path: String, + pub is_directory: bool, + pub is_git_repo: bool, + pub last_modified: Option, +} + +impl Default for FilesystemService { + fn default() -> Self { + Self::new() + } +} + +impl FilesystemService { + pub fn new() -> Self { + FilesystemService {} + } + + pub async fn list_git_repos( + &self, + path: Option, + max_depth: Option, + ) -> Result, FilesystemError> { + let base_path = path + .map(PathBuf::from) + .unwrap_or_else(Self::get_home_directory); + Self::verify_directory(&base_path)?; + let mut git_repos: Vec = WalkBuilder::new(&base_path) + .follow_links(false) + .hidden(true) + .git_ignore(true) + .filter_entry(|entry| entry.path().is_dir()) + .max_depth(max_depth) + .git_exclude(true) + .build() + .filter_map(|entry| { + let entry = entry.ok()?; + let name = entry.file_name().to_str()?; + if !entry.path().join(".git").exists() { + return None; + } + let last_modified = entry + .metadata() + .ok() + .and_then(|m| m.modified().ok()) + .map(|t| t.elapsed().unwrap_or_default().as_secs()); + Some(DirectoryEntry { + name: name.to_string(), + path: entry.path().to_string_lossy().to_string(), + is_directory: true, + is_git_repo: true, + last_modified, + }) + }) + .collect(); + git_repos.sort_by_key(|entry| entry.last_modified.unwrap_or(0)); + Ok(git_repos) + } + + fn get_home_directory() -> PathBuf { + dirs::home_dir() + .or_else(dirs::desktop_dir) + .or_else(dirs::document_dir) + .unwrap_or_else(|| { + if cfg!(windows) { + std::env::var("USERPROFILE") + .map(PathBuf::from) + .unwrap_or_else(|_| PathBuf::from("C:\\")) + } else { + PathBuf::from("/") + } + }) + } + + fn verify_directory(path: &Path) -> Result<(), FilesystemError> { + if !path.exists() { + return Err(FilesystemError::DirectoryDoesNotExist); + } + if !path.is_dir() { + return Err(FilesystemError::PathIsNotDirectory); + } + Ok(()) + } + + pub async fn list_directory( + &self, + path: Option, + ) -> Result { + let path = path + .map(PathBuf::from) + .unwrap_or_else(Self::get_home_directory); + Self::verify_directory(&path)?; + + let entries = fs::read_dir(&path)?; + let mut directory_entries = Vec::new(); + + for entry in entries.flatten() { + let path = entry.path(); + let metadata = entry.metadata().ok(); + if let Some(name) = path.file_name().and_then(|n| n.to_str()) { + // Skip hidden files/directories + if name.starts_with('.') && name != ".." { + continue; + } + + let is_directory = metadata.is_some_and(|m| m.is_dir()); + let is_git_repo = if is_directory { + path.join(".git").exists() + } else { + false + }; + + directory_entries.push(DirectoryEntry { + name: name.to_string(), + path: path.to_string_lossy().to_string(), + is_directory, + is_git_repo, + last_modified: None, + }); + } + } + // Sort: directories first, then files, both alphabetically + directory_entries.sort_by(|a, b| match (a.is_directory, b.is_directory) { + (true, false) => std::cmp::Ordering::Less, + (false, true) => std::cmp::Ordering::Greater, + _ => a.name.to_lowercase().cmp(&b.name.to_lowercase()), + }); + + Ok(DirectoryListResponse { + entries: directory_entries, + current_path: path.to_string_lossy().to_string(), + }) + } +} diff --git a/crates/services/src/services/filesystem_watcher.rs b/crates/services/src/services/filesystem_watcher.rs new file mode 100644 index 00000000..ce84f106 --- /dev/null +++ b/crates/services/src/services/filesystem_watcher.rs @@ -0,0 +1,168 @@ +use std::{ + path::{Path, PathBuf}, + sync::Arc, + time::Duration, +}; + +use futures::{ + SinkExt, StreamExt, + channel::mpsc::{Receiver, channel}, +}; +use ignore::{ + WalkBuilder, + gitignore::{Gitignore, GitignoreBuilder}, +}; +use notify::{RecommendedWatcher, RecursiveMode}; +use notify_debouncer_full::{ + DebounceEventResult, DebouncedEvent, Debouncer, RecommendedCache, new_debouncer, +}; +use thiserror::Error; + +#[derive(Debug, Error)] +pub enum FilesystemWatcherError { + #[error(transparent)] + Notify(#[from] notify::Error), + #[error(transparent)] + Ignore(#[from] ignore::Error), + #[error(transparent)] + IoError(#[from] std::io::Error), + #[error("Failed to build gitignore: {0}")] + GitignoreBuilder(String), + #[error("Invalid path: {0}")] + InvalidPath(String), +} + +fn canonicalize_lossy(path: &Path) -> PathBuf { + dunce::canonicalize(path).unwrap_or_else(|_| path.to_path_buf()) +} + +fn build_gitignore_set(root: &Path) -> Result { + let mut builder = GitignoreBuilder::new(root); + + // Walk once to collect all .gitignore files under root + for result in WalkBuilder::new(root) + .follow_links(false) + .hidden(false) // we *want* to see .gitignore + .standard_filters(false) // do not apply default ignores while walking + .git_ignore(false) // we'll add them manually + .git_exclude(false) + .build() + { + let dir_entry = result?; + if dir_entry + .file_type() + .map(|ft| ft.is_file()) + .unwrap_or(false) + && dir_entry + .path() + .file_name() + .is_some_and(|name| name == ".gitignore") + { + builder.add(dir_entry.path()); + } + } + + // Optionally include repo-local excludes + let info_exclude = root.join(".git/info/exclude"); + if info_exclude.exists() { + builder.add(info_exclude); + } + + Ok(builder.build()?) +} + +fn path_allowed(path: &PathBuf, gi: &Gitignore, canonical_root: &Path) -> bool { + let canonical_path = canonicalize_lossy(path); + + // Convert absolute path to relative path from the gitignore root + let relative_path = match canonical_path.strip_prefix(canonical_root) { + Ok(rel_path) => rel_path, + Err(_) => { + // Path is outside the watched root, don't ignore it + return true; + } + }; + + // Heuristic: assume paths without extensions are directories + // This works for most cases and avoids filesystem syscalls + let is_dir = relative_path.extension().is_none(); + let matched = gi.matched_path_or_any_parents(relative_path, is_dir); + + !matched.is_ignore() +} + +fn debounced_should_forward(event: &DebouncedEvent, gi: &Gitignore, canonical_root: &Path) -> bool { + // DebouncedEvent is a struct that wraps the underlying notify::Event + // We can check its paths field to determine if the event should be forwarded + event + .paths + .iter() + .all(|path| path_allowed(path, gi, canonical_root)) +} + +pub fn async_watcher( + root: PathBuf, +) -> Result< + ( + Debouncer, + Receiver, + PathBuf, + ), + FilesystemWatcherError, +> { + let canonical_root = canonicalize_lossy(&root); + let gi_set = Arc::new(build_gitignore_set(&canonical_root)?); + let (mut tx, rx) = channel(64); // Increased capacity for error bursts + + let gi_clone = gi_set.clone(); + let root_clone = canonical_root.clone(); + + let mut debouncer = new_debouncer( + Duration::from_millis(200), + None, // Use default config + move |res: DebounceEventResult| { + match res { + Ok(events) => { + // Filter events and only send allowed ones + let filtered_events: Vec = events + .into_iter() + .filter(|ev| debounced_should_forward(ev, &gi_clone, &root_clone)) + .collect(); + + if !filtered_events.is_empty() { + let filtered_result = Ok(filtered_events); + futures::executor::block_on(async { + tx.send(filtered_result).await.ok(); + }); + } + } + Err(errors) => { + // Always forward errors + futures::executor::block_on(async { + tx.send(Err(errors)).await.ok(); + }); + } + } + }, + )?; + + // Start watching the root directory + debouncer.watch(&canonical_root, RecursiveMode::Recursive)?; + + Ok((debouncer, rx, canonical_root)) +} + +async fn async_watch>(path: P) -> Result<(), FilesystemWatcherError> { + let (_debouncer, mut rx, _canonical_path) = async_watcher(path.as_ref().to_path_buf())?; + + // The debouncer is already watching the path, no need to call watch() again + + while let Some(res) = rx.next().await { + match res { + Ok(event) => println!("changed: {event:?}"), + Err(e) => println!("watch error: {e:?}"), + } + } + + Ok(()) +} diff --git a/crates/services/src/services/git.rs b/crates/services/src/services/git.rs new file mode 100644 index 00000000..21adb7b0 --- /dev/null +++ b/crates/services/src/services/git.rs @@ -0,0 +1,1190 @@ +use std::path::{Path, PathBuf}; + +use chrono::{DateTime, Utc}; +use git2::{ + BranchType, CherrypickOptions, Cred, Error as GitError, FetchOptions, RemoteCallbacks, + Repository, Status, StatusOptions, build::CheckoutBuilder, +}; +use regex; +use serde::{Deserialize, Serialize}; +use thiserror::Error; +use tracing::debug; +use ts_rs::TS; +use utils::diff::{DiffChunk, DiffChunkType, FileDiff, WorktreeDiff}; + +// use crate::{ +// models::task_attempt::{DiffChunk, DiffChunkType, FileDiff, WorktreeDiff}, +// utils::worktree_manager::WorktreeManager, +// }; + +#[derive(Debug, Error)] +pub enum GitServiceError { + #[error(transparent)] + Git(#[from] GitError), + #[error(transparent)] + IoError(#[from] std::io::Error), + #[error("Invalid repository: {0}")] + InvalidRepository(String), + #[error("Branch not found: {0}")] + BranchNotFound(String), + #[error("Merge conflicts: {0}")] + MergeConflicts(String), + #[error("Invalid path: {0}")] + InvalidPath(String), + #[error("Worktree has uncommitted changes: {0}")] + WorktreeDirty(String), + #[error("Invalid file paths: {0}")] + InvalidFilePaths(String), +} + +/// Service for managing Git operations in task execution workflows +#[derive(Clone)] +pub struct GitService {} + +#[derive(Debug, Serialize, TS)] +pub struct GitBranch { + pub name: String, + pub is_current: bool, + pub is_remote: bool, + #[ts(type = "Date")] + pub last_commit_date: DateTime, +} + +#[derive(Debug, Clone, Serialize, Deserialize, TS)] +pub struct BranchStatus { + pub is_behind: bool, + pub commits_behind: usize, + pub commits_ahead: usize, + pub up_to_date: bool, + pub merged: bool, + pub has_uncommitted_changes: bool, + pub base_branch_name: String, +} + +/// Represents a snapshot for diff comparison +enum Snapshot<'a> { + /// Any git tree object + Tree(git2::Oid), + /// The work-dir / index as it is *now*, compared to the given base tree + WorkdirAgainst(git2::Oid, &'a Path), +} + +impl Default for GitService { + fn default() -> Self { + Self::new() + } +} + +impl GitService { + /// Create a new GitService for the given repository path + pub fn new() -> Self { + Self {} + } + + /// Normalize a path to be repo-relative and use POSIX separators + fn normalize_to_repo_relative( + repo: &Repository, + path: &Path, + ) -> Result { + // Get the repository's working directory + let repo_workdir = repo.workdir().ok_or_else(|| { + GitServiceError::InvalidRepository("Repository has no working directory".to_string()) + })?; + + // Try to strip the repo prefix if path is absolute + let relative_path = if path.is_absolute() { + path.strip_prefix(repo_workdir).map_err(|_| { + GitServiceError::InvalidFilePaths(format!( + "Path '{}' is outside repository root '{}'", + path.display(), + repo_workdir.display() + )) + })? + } else { + path + }; + + // Convert to string and normalize separators to forward slashes + let path_str = relative_path.to_string_lossy(); + let normalized = path_str.replace('\\', "/"); + + // Remove leading "./" if present + let normalized = normalized.strip_prefix("./").unwrap_or(&normalized); + + // Security check: prevent path traversal attacks + if normalized.contains("../") || normalized.starts_with("../") { + return Err(GitServiceError::InvalidFilePaths(format!( + "Path traversal not allowed: '{normalized}'" + ))); + } + + Ok(normalized.to_string()) + } + + /// Validate and normalize file paths for use with git pathspec + fn validate_and_normalize_paths>( + repo: &Repository, + file_paths: Option<&[P]>, + ) -> Result>, GitServiceError> { + if let Some(paths) = file_paths { + let mut normalized_paths = Vec::with_capacity(paths.len()); + + for path in paths { + let normalized = Self::normalize_to_repo_relative(repo, path.as_ref())?; + normalized_paths.push(normalized); + } + + // Quick validation: check if any of the paths exist in the repo + if !normalized_paths.is_empty() { + let index = repo.index().map_err(GitServiceError::from)?; + let any_exists = normalized_paths + .iter() + .any(|path| index.get_path(Path::new(path), 0).is_some()); + + // Also check workdir for untracked files + let workdir_exists = if let Some(workdir) = repo.workdir() { + normalized_paths + .iter() + .any(|path| workdir.join(path).exists()) + } else { + false + }; + + if !any_exists && !workdir_exists { + debug!( + "None of the specified paths exist in repository or workdir: {:?}", + normalized_paths + ); + } + } + + Ok(Some(normalized_paths)) + } else { + Ok(None) + } + } + + /// Converts a Patch into our "render friendly" representation + fn patch_to_chunks(patch: &git2::Patch) -> Vec { + let mut chunks = Vec::new(); + for hunk_idx in 0..patch.num_hunks() { + let (_, hunk_lines) = patch.hunk(hunk_idx).unwrap(); + for line_idx in 0..hunk_lines { + let l = patch.line_in_hunk(hunk_idx, line_idx).unwrap(); + let kind = match l.origin() { + ' ' => DiffChunkType::Equal, + '+' => DiffChunkType::Insert, + '-' => DiffChunkType::Delete, + _ => continue, + }; + chunks.push(DiffChunk { + chunk_type: kind, + content: String::from_utf8_lossy(l.content()).into_owned(), + }); + } + } + chunks + } + + /// Builds FileDiffs from a generic git2::Diff + fn diff_to_file_diffs(diff: &git2::Diff) -> Result, GitServiceError> { + let mut files = Vec::new(); + + for idx in 0..diff.deltas().len() { + let delta = diff.get_delta(idx).unwrap(); + let path = delta + .new_file() + .path() + .or_else(|| delta.old_file().path()) + .and_then(|p| p.to_str()) + .unwrap_or("") + .to_owned(); + + // Build the in-memory patch that libgit2 has already computed + if let Some(patch) = git2::Patch::from_diff(diff, idx)? { + // Special-case pure add/delete with no hunks + let chunks = if patch.num_hunks() == 0 { + vec![DiffChunk { + chunk_type: match delta.status() { + git2::Delta::Added => DiffChunkType::Insert, + git2::Delta::Deleted => DiffChunkType::Delete, + _ => DiffChunkType::Equal, + }, + content: format!( + "{} file", + if delta.status() == git2::Delta::Added { + "Added" + } else { + "Deleted" + } + ), + }] + } else { + Self::patch_to_chunks(&patch) + }; + + files.push(FileDiff { path, chunks }); + } + } + + Ok(files) + } + + /// Generic diff engine that handles all types of comparisons + fn run_diff>( + repo: &Repository, + left: Snapshot<'_>, + right: Snapshot<'_>, + file_paths: Option<&[P]>, + ) -> Result, GitServiceError> { + let mut opts = git2::DiffOptions::new(); + opts.context_lines(10); + opts.interhunk_lines(0); + + // Apply pathspec filtering if file paths are provided + if let Some(normalized_paths) = Self::validate_and_normalize_paths(repo, file_paths)? { + // Add each path as a pathspec entry + for path in &normalized_paths { + opts.pathspec(path); + } + } + + let diff = match (left, right) { + (Snapshot::Tree(a), Snapshot::Tree(b)) => repo.diff_tree_to_tree( + Some(&repo.find_tree(a)?), + Some(&repo.find_tree(b)?), + Some(&mut opts), + )?, + (Snapshot::Tree(base), Snapshot::WorkdirAgainst(_, _)) + | (Snapshot::WorkdirAgainst(_, _), Snapshot::Tree(base)) => { + opts.include_untracked(true); + repo.diff_tree_to_workdir_with_index(Some(&repo.find_tree(base)?), Some(&mut opts))? + } + (Snapshot::WorkdirAgainst(_, _), Snapshot::WorkdirAgainst(_, _)) => { + unreachable!("work-dir vs work-dir makes no sense here") + } + }; + + Self::diff_to_file_diffs(&diff) + } + + /// Diff for an already-merged squash commit + pub fn diff_for_merge_commit>( + &self, + repo_path: &Path, + merge_commit: git2::Oid, + file_paths: Option<&[P]>, + ) -> Result { + let repo = self.open_repo(repo_path)?; + let mc = repo.find_commit(merge_commit)?; + let base = mc + .parent(0) + .map(|p| p.tree().unwrap().id()) + .unwrap_or_else(|_| { + // For the initial commit, use an empty tree + repo.treebuilder(None).unwrap().write().unwrap() + }); + + let files = Self::run_diff( + &repo, + Snapshot::Tree(base), + Snapshot::Tree(mc.tree()?.id()), + file_paths, + )?; + Ok(WorktreeDiff { files }) + } + + /// Diff for a work-tree that has not been merged yet + pub fn diff_for_worktree>( + &self, + worktree_path: &Path, + base_branch_commit: git2::Oid, + file_paths: Option<&[P]>, + ) -> Result { + let repo = Repository::open(worktree_path)?; + let base_tree = repo.find_commit(base_branch_commit)?.tree()?.id(); + let files = Self::run_diff( + &repo, + Snapshot::Tree(base_tree), + Snapshot::WorkdirAgainst(base_branch_commit, worktree_path), + file_paths, + )?; + Ok(WorktreeDiff { files }) + } + + /// Open the repository + fn open_repo(&self, repo_path: &Path) -> Result { + Repository::open(repo_path).map_err(GitServiceError::from) + } + + pub fn create_initial_commit(&self, repo: &Repository) -> Result<(), GitServiceError> { + let signature = repo.signature().unwrap_or_else(|_| { + // Fallback if no Git config is set + git2::Signature::now("Vibe Kanban", "noreply@vibekanban.com") + .expect("Failed to create fallback signature") + }); + + let tree_id = { + let tree_builder = repo.treebuilder(None)?; + tree_builder.write()? + }; + let tree = repo.find_tree(tree_id)?; + + // Create initial commit on main branch + let _commit_id = repo.commit( + Some("refs/heads/main"), + &signature, + &signature, + "Initial commit", + &tree, + &[], + )?; + + // Set HEAD to point to main branch + repo.set_head("refs/heads/main")?; + + Ok(()) + } + + pub fn commit(&self, path: &Path, message: &str) -> Result<(), GitServiceError> { + let repo = Repository::open(path)?; + + // Check if there are any changes to commit + let status = repo.statuses(None)?; + + let has_changes = status.iter().any(|entry| { + let flags = entry.status(); + flags.contains(git2::Status::INDEX_NEW) + || flags.contains(git2::Status::INDEX_MODIFIED) + || flags.contains(git2::Status::INDEX_DELETED) + || flags.contains(git2::Status::WT_NEW) + || flags.contains(git2::Status::WT_MODIFIED) + || flags.contains(git2::Status::WT_DELETED) + }); + + if !has_changes { + tracing::debug!("No changes to commit!"); + return Ok(()); + } + + // Get the current HEAD commit + let head = repo.head()?; + let parent_commit = head.peel_to_commit()?; + + // Stage all has_changes + let mut index = repo.index()?; + index.add_all(["*"].iter(), git2::IndexAddOption::DEFAULT, None)?; + index.write()?; + + let tree_id = index.write_tree()?; + let tree = repo.find_tree(tree_id)?; + + let signature = repo.signature()?; + repo.commit( + Some("HEAD"), + &signature, + &signature, + message, + &tree, + &[&parent_commit], + )?; + + Ok(()) + } + + /// Merge changes from a worktree branch back to the main repository + pub fn merge_changes( + &self, + repo_path: &Path, + worktree_path: &Path, + branch_name: &str, + base_branch_name: &str, + commit_message: &str, + ) -> Result { + // Open the worktree repository + let worktree_repo = Repository::open(worktree_path)?; + + // Check if worktree is dirty before proceeding + self.check_worktree_clean(&worktree_repo)?; + + // Verify the task branch exists in the worktree + let task_branch = worktree_repo + .find_branch(branch_name, BranchType::Local) + .map_err(|_| GitServiceError::BranchNotFound(branch_name.to_string()))?; + + // Get the base branch from the worktree + let base_branch = worktree_repo + .find_branch(base_branch_name, BranchType::Local) + .map_err(|_| GitServiceError::BranchNotFound(base_branch_name.to_string()))?; + + // Get commits + let base_commit = base_branch.get().peel_to_commit()?; + let task_commit = task_branch.get().peel_to_commit()?; + + // Get the signature for the merge commit + let signature = worktree_repo.signature()?; + + // Perform a squash merge - create a single commit with all changes + let squash_commit_id = self.perform_squash_merge( + &worktree_repo, + &base_commit, + &task_commit, + &signature, + commit_message, + base_branch_name, + )?; + + // Fix: Update main repo's HEAD if it's pointing to the base branch + let main_repo = self.open_repo(repo_path)?; + let refname = format!("refs/heads/{base_branch_name}"); + + if let Ok(main_head) = main_repo.head() + && let Some(branch_name) = main_head.shorthand() + && branch_name == base_branch_name + { + // Only update main repo's HEAD if it's currently on the base branch + main_repo.set_head(&refname)?; + let mut co = CheckoutBuilder::new(); + co.force(); + main_repo.checkout_head(Some(&mut co))?; + } + + Ok(squash_commit_id.to_string()) + } + + pub fn get_branch_status( + &self, + repo_path: &Path, + branch_name: &str, + base_branch_name: &str, + is_merged: bool, + ) -> Result { + let repo = Repository::open(repo_path)?; + + let branch_ref = repo + // try "refs/heads/" first, then raw name + .find_reference(&format!("refs/heads/{branch_name}")) + .or_else(|_| repo.find_reference(branch_name))?; + let branch_oid = branch_ref.target().unwrap(); + + // 1. prefer the branch’s configured upstream, if any + if let Ok(local_branch) = repo.find_branch(branch_name, BranchType::Local) + && let Ok(upstream) = local_branch.upstream() + && let Some(_name) = upstream.name()? + && let Some(base_oid) = upstream.get().target() + { + let (_ahead, _behind) = repo.graph_ahead_behind(branch_oid, base_oid)?; + // Ignore upstream since we use stored base branch + } + // Calculate ahead/behind counts using the stored base branch + let (commits_ahead, commits_behind) = + if let Ok(base_branch) = repo.find_branch(base_branch_name, BranchType::Local) { + if let Some(base_oid) = base_branch.get().target() { + repo.graph_ahead_behind(branch_oid, base_oid)? + } else { + (0, 0) // Base branch has no commits + } + } else { + // Base branch doesn't exist, assume no relationship + (0, 0) + }; + + let mut status_opts = StatusOptions::new(); + status_opts + .include_untracked(true) + .recurse_untracked_dirs(true) + .include_ignored(false); + + let has_uncommitted_changes = repo + .statuses(Some(&mut status_opts))? + .iter() + .any(|e| e.status() != Status::CURRENT); + + Ok(BranchStatus { + is_behind: commits_behind > 0, + commits_behind, + commits_ahead, + up_to_date: commits_behind == 0 && commits_ahead == 0, + merged: is_merged, + has_uncommitted_changes, + base_branch_name: base_branch_name.to_string(), + }) + } + + /// Check if the worktree is clean (no uncommitted changes to tracked files) + fn check_worktree_clean(&self, repo: &Repository) -> Result<(), GitServiceError> { + let mut status_options = git2::StatusOptions::new(); + status_options + .include_untracked(false) // Don't include untracked files + .include_ignored(false); // Don't include ignored files + + let statuses = repo.statuses(Some(&mut status_options))?; + + if !statuses.is_empty() { + let mut dirty_files = Vec::new(); + for entry in statuses.iter() { + let status = entry.status(); + // Only consider files that are actually tracked and modified + if status.intersects( + git2::Status::INDEX_MODIFIED + | git2::Status::INDEX_NEW + | git2::Status::INDEX_DELETED + | git2::Status::INDEX_RENAMED + | git2::Status::INDEX_TYPECHANGE + | git2::Status::WT_MODIFIED + | git2::Status::WT_DELETED + | git2::Status::WT_RENAMED + | git2::Status::WT_TYPECHANGE, + ) && let Some(path) = entry.path() + { + dirty_files.push(path.to_string()); + } + } + + if !dirty_files.is_empty() { + return Err(GitServiceError::WorktreeDirty(dirty_files.join(", "))); + } + } + + Ok(()) + } + + pub fn get_current_branch(&self, repo_path: &Path) -> Result { + let repo = Repository::open(repo_path)?; + let head = repo.head()?; + if let Some(branch_name) = head.shorthand() { + Ok(branch_name.to_string()) + } else { + Ok("HEAD".to_string()) + } + } + + pub fn get_all_branches(&self, repo_path: &Path) -> Result, git2::Error> { + let repo = Repository::open(repo_path)?; + let current_branch = self.get_current_branch(repo_path).unwrap_or_default(); + let mut branches = Vec::new(); + + // Helper function to get last commit date for a branch + let get_last_commit_date = |branch: &git2::Branch| -> Result, git2::Error> { + if let Some(target) = branch.get().target() + && let Ok(commit) = repo.find_commit(target) + { + let timestamp = commit.time().seconds(); + return Ok(DateTime::from_timestamp(timestamp, 0).unwrap_or_else(Utc::now)); + } + Ok(Utc::now()) // Default to now if we can't get the commit date + }; + + // Get local branches + let local_branches = repo.branches(Some(BranchType::Local))?; + for branch_result in local_branches { + let (branch, _) = branch_result?; + if let Some(name) = branch.name()? { + let last_commit_date = get_last_commit_date(&branch)?; + branches.push(GitBranch { + name: name.to_string(), + is_current: name == current_branch, + is_remote: false, + last_commit_date, + }); + } + } + + // Get remote branches + let remote_branches = repo.branches(Some(BranchType::Remote))?; + for branch_result in remote_branches { + let (branch, _) = branch_result?; + if let Some(name) = branch.name()? { + // Skip remote HEAD references + if !name.ends_with("/HEAD") { + let last_commit_date = get_last_commit_date(&branch)?; + branches.push(GitBranch { + name: name.to_string(), + is_current: false, + is_remote: true, + last_commit_date, + }); + } + } + } + + // Sort branches: current first, then by most recent commit date + branches.sort_by(|a, b| { + if a.is_current && !b.is_current { + std::cmp::Ordering::Less + } else if !a.is_current && b.is_current { + std::cmp::Ordering::Greater + } else { + // Sort by most recent commit date (newest first) + b.last_commit_date.cmp(&a.last_commit_date) + } + }); + + Ok(branches) + } + + /// Perform a squash merge of task branch into base branch, but fail on conflicts + fn perform_squash_merge( + &self, + repo: &Repository, + base_commit: &git2::Commit, + task_commit: &git2::Commit, + signature: &git2::Signature, + commit_message: &str, + base_branch_name: &str, + ) -> Result { + // Attempt an in-memory merge to detect conflicts + let merge_opts = git2::MergeOptions::new(); + let mut index = repo.merge_commits(base_commit, task_commit, Some(&merge_opts))?; + + // If there are conflicts, return an error + if index.has_conflicts() { + return Err(GitServiceError::MergeConflicts( + "Merge failed due to conflicts. Please resolve conflicts manually.".to_string(), + )); + } + + // Write the merged tree back to the repository + let tree_id = index.write_tree_to(repo)?; + let tree = repo.find_tree(tree_id)?; + + // Create a squash commit: use merged tree with base_commit as sole parent + let squash_commit_id = repo.commit( + None, // Don't update any reference yet + signature, // Author + signature, // Committer + commit_message, // Custom message + &tree, // Merged tree content + &[base_commit], // Single parent: base branch commit + )?; + + // Update the base branch reference to point to the new commit + let refname = format!("refs/heads/{base_branch_name}"); + repo.reference(&refname, squash_commit_id, true, "Squash merge")?; + + Ok(squash_commit_id) + } + + /// Rebase a worktree branch onto a new base + pub fn rebase_branch( + &self, + repo_path: &Path, + worktree_path: &Path, + new_base_branch: Option<&str>, + old_base_branch: &str, + ) -> Result { + let worktree_repo = Repository::open(worktree_path)?; + let main_repo = self.open_repo(repo_path)?; + + // Check if there's an existing rebase in progress and abort it + let state = worktree_repo.state(); + if state == git2::RepositoryState::Rebase + || state == git2::RepositoryState::RebaseInteractive + || state == git2::RepositoryState::RebaseMerge + { + tracing::warn!("Existing rebase in progress, aborting it first"); + // Try to abort the existing rebase + if let Ok(mut existing_rebase) = worktree_repo.open_rebase(None) { + let _ = existing_rebase.abort(); + } + } + + // Get the target base branch reference + let base_branch_name = match new_base_branch { + Some(branch) => branch.to_string(), + None => main_repo + .head() + .ok() + .and_then(|head| head.shorthand().map(|s| s.to_string())) + .unwrap_or_else(|| "main".to_string()), + }; + let base_branch_name = base_branch_name.as_str(); + + // Handle remote branches by fetching them first and creating/updating local tracking branches + let local_branch_name = if base_branch_name.starts_with("origin/") { + // This is a remote branch, fetch it and create/update local tracking branch + let remote_branch_name = base_branch_name.strip_prefix("origin/").unwrap(); + + // First, fetch the latest changes from remote + self.fetch_from_remote(&main_repo)?; + + // Try to find the remote branch after fetch + let remote_branch = main_repo + .find_branch(base_branch_name, BranchType::Remote) + .map_err(|_| GitServiceError::BranchNotFound(base_branch_name.to_string()))?; + + // Check if local tracking branch exists + match main_repo.find_branch(remote_branch_name, BranchType::Local) { + Ok(mut local_branch) => { + // Local tracking branch exists, update it to match remote + let remote_commit = remote_branch.get().peel_to_commit()?; + local_branch + .get_mut() + .set_target(remote_commit.id(), "Update local branch to match remote")?; + } + Err(_) => { + // Local tracking branch doesn't exist, create it + let remote_commit = remote_branch.get().peel_to_commit()?; + main_repo.branch(remote_branch_name, &remote_commit, false)?; + } + } + + // Use the local branch name for rebase + remote_branch_name + } else { + // This is already a local branch + base_branch_name + }; + + // Get the local branch for rebase + let base_branch = main_repo + .find_branch(local_branch_name, BranchType::Local) + .map_err(|_| GitServiceError::BranchNotFound(local_branch_name.to_string()))?; + + let new_base_commit_id = base_branch.get().peel_to_commit()?.id(); + + // Get the HEAD commit of the worktree (the changes to rebase) + let head = worktree_repo.head()?; + let task_branch_commit_id = head.peel_to_commit()?.id(); + + let signature = worktree_repo.signature()?; + + // Find the old base branch + let old_base_branch_ref = if old_base_branch.starts_with("origin/") { + // Remote branch - get local tracking branch name + let remote_branch_name = old_base_branch.strip_prefix("origin/").unwrap(); + main_repo + .find_branch(remote_branch_name, BranchType::Local) + .map_err(|_| GitServiceError::BranchNotFound(remote_branch_name.to_string()))? + } else { + // Local branch + main_repo + .find_branch(old_base_branch, BranchType::Local) + .map_err(|_| GitServiceError::BranchNotFound(old_base_branch.to_string()))? + }; + + let old_base_commit_id = old_base_branch_ref.get().peel_to_commit()?.id(); + + // Find commits unique to the task branch + let unique_commits = Self::find_unique_commits( + &worktree_repo, + task_branch_commit_id, + old_base_commit_id, + new_base_commit_id, + )?; + + if !unique_commits.is_empty() { + // Reset HEAD to the new base branch + let new_base_commit = worktree_repo.find_commit(new_base_commit_id)?; + worktree_repo.reset(new_base_commit.as_object(), git2::ResetType::Hard, None)?; + + // Cherry-pick the unique commits + Self::cherry_pick_commits(&worktree_repo, &unique_commits, &signature)?; + } else { + // No unique commits to rebase, just reset to new base + let new_base_commit = worktree_repo.find_commit(new_base_commit_id)?; + worktree_repo.reset(new_base_commit.as_object(), git2::ResetType::Hard, None)?; + } + + // Get the final commit ID after rebase + let final_head = worktree_repo.head()?; + let final_commit = final_head.peel_to_commit()?; + + Ok(final_commit.id().to_string()) + } + + /// Get enhanced diff for task attempts (from merge commit or worktree) + pub fn get_enhanced_diff>( + &self, + repo_path: &Path, + worktree_path: &Path, + merge_commit_id: Option<&str>, + base_branch: &str, + file_paths: Option<&[P]>, + ) -> Result { + if let Some(merge_commit_id) = merge_commit_id { + // Task attempt has been merged - show the diff from the merge commit + let commit_oid = git2::Oid::from_str(merge_commit_id) + .map_err(|_| GitServiceError::InvalidRepository("Invalid commit ID".to_string()))?; + self.diff_for_merge_commit(repo_path, commit_oid, file_paths) + } else { + // Task attempt not yet merged - get worktree diff + let main_repo = self.open_repo(repo_path)?; + let base_branch_ref = main_repo + .find_branch(base_branch, BranchType::Local) + .map_err(|_| GitServiceError::BranchNotFound(base_branch.to_string()))?; + let base_branch_commit = base_branch_ref.get().peel_to_commit()?.id(); + + self.diff_for_worktree(worktree_path, base_branch_commit, file_paths) + } + } + + /// Delete a file from the repository and commit the change + pub fn delete_file_and_commit( + &self, + worktree_path: &Path, + file_path: &str, + ) -> Result { + let repo = Repository::open(worktree_path)?; + + // Get the absolute path to the file within the worktree + let file_full_path = worktree_path.join(file_path); + + // Check if file exists and delete it + if file_full_path.exists() { + std::fs::remove_file(&file_full_path).map_err(|e| { + GitServiceError::IoError(std::io::Error::other(format!( + "Failed to delete file {file_path}: {e}" + ))) + })?; + } + + // Stage the deletion + let mut index = repo.index()?; + index.remove_path(Path::new(file_path))?; + index.write()?; + + // Create a commit for the file deletion + let signature = repo.signature()?; + let tree_id = index.write_tree()?; + let tree = repo.find_tree(tree_id)?; + + // Get the current HEAD commit + let head = repo.head()?; + let parent_commit = head.peel_to_commit()?; + + let commit_message = format!("Delete file: {file_path}"); + let commit_id = repo.commit( + Some("HEAD"), + &signature, + &signature, + &commit_message, + &tree, + &[&parent_commit], + )?; + + Ok(commit_id.to_string()) + } + + /// Get the default branch name for the repository + pub fn get_default_branch_name(&self, repo_path: &PathBuf) -> Result { + let repo = self.open_repo(repo_path)?; + + match repo.head() { + Ok(head_ref) => Ok(head_ref.shorthand().unwrap_or("main").to_string()), + Err(e) + if e.class() == git2::ErrorClass::Reference + && e.code() == git2::ErrorCode::UnbornBranch => + { + Ok("main".to_string()) // Repository has no commits yet + } + Err(_) => Ok("main".to_string()), // Fallback + } + } + + /// Extract GitHub owner and repo name from git repo path + pub fn get_github_repo_info( + &self, + repo_path: &PathBuf, + ) -> Result<(String, String), GitServiceError> { + let repo = self.open_repo(repo_path)?; + let remote = repo.find_remote("origin").map_err(|_| { + GitServiceError::InvalidRepository("No 'origin' remote found".to_string()) + })?; + + let url = remote.url().ok_or_else(|| { + GitServiceError::InvalidRepository("Remote origin has no URL".to_string()) + })?; + + // Parse GitHub URL (supports both HTTPS and SSH formats) + let github_regex = regex::Regex::new(r"github\.com[:/]([^/]+)/(.+?)(?:\.git)?/?$") + .map_err(|e| GitServiceError::InvalidRepository(format!("Regex error: {e}")))?; + + if let Some(captures) = github_regex.captures(url) { + let owner = captures.get(1).unwrap().as_str().to_string(); + let repo_name = captures.get(2).unwrap().as_str().to_string(); + Ok((owner, repo_name)) + } else { + Err(GitServiceError::InvalidRepository(format!( + "Not a GitHub repository: {url}" + ))) + } + } + + /// Push the branch to GitHub remote + pub fn push_to_github( + &self, + worktree_path: &Path, + branch_name: &str, + github_token: &str, + ) -> Result<(), GitServiceError> { + let repo = Repository::open(worktree_path)?; + + // Get the remote + let remote = repo.find_remote("origin")?; + let remote_url = remote.url().ok_or_else(|| { + GitServiceError::InvalidRepository("Remote origin has no URL".to_string()) + })?; + + // Convert SSH URL to HTTPS URL if necessary + let https_url = if remote_url.starts_with("git@github.com:") { + // Convert git@github.com:owner/repo.git to https://github.com/owner/repo.git + remote_url.replace("git@github.com:", "https://github.com/") + } else if remote_url.starts_with("ssh://git@github.com/") { + // Convert ssh://git@github.com/owner/repo.git to https://github.com/owner/repo.git + remote_url.replace("ssh://git@github.com/", "https://github.com/") + } else { + remote_url.to_string() + }; + + // Create a temporary remote with HTTPS URL for pushing + let temp_remote_name = "temp_https_origin"; + + // Remove any existing temp remote + let _ = repo.remote_delete(temp_remote_name); + + // Create temporary HTTPS remote + let mut temp_remote = repo.remote(temp_remote_name, &https_url)?; + + // Create refspec for pushing the branch + let refspec = format!("refs/heads/{branch_name}:refs/heads/{branch_name}"); + + // Set up authentication callback using the GitHub token + let mut callbacks = git2::RemoteCallbacks::new(); + callbacks.credentials(|_url, username_from_url, _allowed_types| { + git2::Cred::userpass_plaintext(username_from_url.unwrap_or("git"), github_token) + }); + + // Configure push options + let mut push_options = git2::PushOptions::new(); + push_options.remote_callbacks(callbacks); + + // Push the branch + let push_result = temp_remote.push(&[&refspec], Some(&mut push_options)); + + // Clean up the temporary remote + let _ = repo.remote_delete(temp_remote_name); + + // Check push result + push_result?; + + Ok(()) + } + + /// Fetch from remote repository, with SSH authentication callbacks + fn fetch_from_remote(&self, repo: &Repository) -> Result<(), GitServiceError> { + // Find the “origin” remote + let mut remote = repo.find_remote("origin").map_err(|_| { + GitServiceError::Git(git2::Error::from_str("Remote 'origin' not found")) + })?; + + // Prepare callbacks for authentication + let mut callbacks = RemoteCallbacks::new(); + callbacks.credentials(|_url, username_from_url, _| { + // Try SSH agent first + if let Some(username) = username_from_url + && let Ok(cred) = Cred::ssh_key_from_agent(username) + { + return Ok(cred); + } + // Fallback to key file (~/.ssh/id_rsa) + let home = dirs::home_dir() + .ok_or_else(|| git2::Error::from_str("Could not find home directory"))?; + let key_path = home.join(".ssh").join("id_rsa"); + Cred::ssh_key(username_from_url.unwrap_or("git"), None, &key_path, None) + }); + + // Set up fetch options with our callbacks + let mut fetch_opts = FetchOptions::new(); + fetch_opts.remote_callbacks(callbacks); + + // Actually fetch (no specific refspecs = fetch all configured) + remote + .fetch(&[] as &[&str], Some(&mut fetch_opts), None) + .map_err(GitServiceError::Git)?; + Ok(()) + } + + /// Find the merge-base between two commits + fn get_merge_base( + repo: &Repository, + commit1: git2::Oid, + commit2: git2::Oid, + ) -> Result { + repo.merge_base(commit1, commit2) + .map_err(GitServiceError::Git) + } + + /// Find commits that are unique to the task branch (not in either base branch) + fn find_unique_commits( + repo: &Repository, + task_branch_commit: git2::Oid, + old_base_commit: git2::Oid, + new_base_commit: git2::Oid, + ) -> Result, GitServiceError> { + // Find merge-base between task branch and old base branch + let task_old_base_merge_base = + Self::get_merge_base(repo, task_branch_commit, old_base_commit)?; + + // Find merge-base between old base and new base + let old_new_base_merge_base = Self::get_merge_base(repo, old_base_commit, new_base_commit)?; + + // Get all commits from task branch back to the merge-base with old base + let mut walker = repo.revwalk()?; + walker.push(task_branch_commit)?; + walker.hide(task_old_base_merge_base)?; + + let mut task_commits = Vec::new(); + for commit_id in walker { + let commit_id = commit_id?; + + // Check if this commit is not in the old base branch lineage + // (i.e., it's not between old_new_base_merge_base and old_base_commit) + let is_in_old_base = repo + .graph_descendant_of(commit_id, old_new_base_merge_base) + .unwrap_or(false) + && repo + .graph_descendant_of(old_base_commit, commit_id) + .unwrap_or(false); + + if !is_in_old_base { + task_commits.push(commit_id); + } + } + + // Reverse to get chronological order for cherry-picking + task_commits.reverse(); + Ok(task_commits) + } + + /// Cherry-pick specific commits onto a new base + fn cherry_pick_commits( + repo: &Repository, + commits: &[git2::Oid], + signature: &git2::Signature, + ) -> Result<(), GitServiceError> { + for &commit_id in commits { + let commit = repo.find_commit(commit_id)?; + + // Cherry-pick the commit + let mut cherrypick_opts = CherrypickOptions::new(); + repo.cherrypick(&commit, Some(&mut cherrypick_opts))?; + + // Check for conflicts + let mut index = repo.index()?; + if index.has_conflicts() { + return Err(GitServiceError::MergeConflicts(format!( + "Cherry-pick failed due to conflicts on commit {commit_id}" + ))); + } + + // Commit the cherry-pick + let tree_id = index.write_tree()?; + let tree = repo.find_tree(tree_id)?; + let head_commit = repo.head()?.peel_to_commit()?; + + repo.commit( + Some("HEAD"), + signature, + signature, + commit.message().unwrap_or("Cherry-picked commit"), + &tree, + &[&head_commit], + )?; + } + + Ok(()) + } + + /// Clone a repository to the specified directory + #[cfg(feature = "cloud")] + pub fn clone_repository( + clone_url: &str, + target_path: &Path, + token: Option<&str>, + ) -> Result { + if let Some(parent) = target_path.parent() { + std::fs::create_dir_all(parent)?; + } + + // Set up callbacks for authentication if token is provided + let mut callbacks = RemoteCallbacks::new(); + if let Some(token) = token { + callbacks.credentials(|_url, username_from_url, _allowed_types| { + Cred::userpass_plaintext(username_from_url.unwrap_or("git"), token) + }); + } else { + // Fallback to SSH agent and key file authentication + callbacks.credentials(|_url, username_from_url, _| { + // Try SSH agent first + if let Some(username) = username_from_url { + if let Ok(cred) = Cred::ssh_key_from_agent(username) { + return Ok(cred); + } + } + // Fallback to key file (~/.ssh/id_rsa) + let home = dirs::home_dir() + .ok_or_else(|| git2::Error::from_str("Could not find home directory"))?; + let key_path = home.join(".ssh").join("id_rsa"); + Cred::ssh_key(username_from_url.unwrap_or("git"), None, &key_path, None) + }); + } + + // Set up fetch options with our callbacks + let mut fetch_opts = FetchOptions::new(); + fetch_opts.remote_callbacks(callbacks); + + // Create a repository builder with fetch options + let mut builder = git2::build::RepoBuilder::new(); + builder.fetch_options(fetch_opts); + + let repo = builder.clone(clone_url, target_path)?; + + tracing::info!( + "Successfully cloned repository from {} to {}", + clone_url, + target_path.display() + ); + + Ok(repo) + } +} + +// #[cfg(test)] +// mod tests { +// use tempfile::TempDir; + +// use super::*; + +// fn create_test_repo() -> (TempDir, Repository) { +// let temp_dir = TempDir::new().unwrap(); +// let repo = Repository::init(temp_dir.path()).unwrap(); + +// // Configure the repository +// let mut config = repo.config().unwrap(); +// config.set_str("user.name", "Test User").unwrap(); +// config.set_str("user.email", "test@example.com").unwrap(); + +// (temp_dir, repo) +// } + +// #[test] +// fn test_git_service_creation() { +// let (temp_dir, _repo) = create_test_repo(); +// let _git_service = GitService::new(temp_dir.path()).unwrap(); +// } + +// #[test] +// fn test_invalid_repository_path() { +// let result = GitService::new("/nonexistent/path"); +// assert!(result.is_err()); +// } + +// #[test] +// fn test_default_branch_name() { +// let (temp_dir, _repo) = create_test_repo(); +// let git_service = GitService::new(temp_dir.path()).unwrap(); +// let branch_name = git_service.get_default_branch_name().unwrap(); +// assert_eq!(branch_name, "main"); +// } +// } diff --git a/backend/src/services/github_service.rs b/crates/services/src/services/github_service.rs similarity index 83% rename from backend/src/services/github_service.rs rename to crates/services/src/services/github_service.rs index f98c0b24..532cd6a3 100644 --- a/backend/src/services/github_service.rs +++ b/crates/services/src/services/github_service.rs @@ -3,46 +3,55 @@ use std::time::Duration; use backon::{ExponentialBuilder, Retryable}; use octocrab::{Octocrab, OctocrabBuilder}; use serde::{Deserialize, Serialize}; +use thiserror::Error; use tracing::info; use ts_rs::TS; -#[derive(Debug)] +use crate::services::git::GitServiceError; + +#[derive(Debug, Error, Serialize, Deserialize, TS)] +#[serde(rename_all = "SCREAMING_SNAKE_CASE")] +#[ts(use_ts_enum)] pub enum GitHubServiceError { + #[ts(skip)] + #[serde(skip)] + #[error(transparent)] Client(octocrab::Error), + #[ts(skip)] + #[error("Authentication error: {0}")] Auth(String), + #[ts(skip)] + #[error("Repository error: {0}")] Repository(String), + #[ts(skip)] + #[error("Pull request error: {0}")] PullRequest(String), + #[ts(skip)] + #[error("Branch error: {0}")] Branch(String), + #[error("GitHub token is invalid or expired.")] TokenInvalid, + #[error("Insufficient permissions")] + InsufficientPermissions, + #[error("GitHub repository not found or no access")] + RepoNotFoundOrNoAccess, + #[ts(skip)] + #[serde(skip)] + #[error(transparent)] + GitService(GitServiceError), } -impl std::fmt::Display for GitHubServiceError { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - GitHubServiceError::Client(e) => write!(f, "GitHub client error: {}", e), - GitHubServiceError::Auth(e) => write!(f, "Authentication error: {}", e), - GitHubServiceError::Repository(e) => write!(f, "Repository error: {}", e), - GitHubServiceError::PullRequest(e) => write!(f, "Pull request error: {}", e), - GitHubServiceError::Branch(e) => write!(f, "Branch error: {}", e), - GitHubServiceError::TokenInvalid => write!(f, "GitHub token is invalid or expired."), - } - } -} - -impl std::error::Error for GitHubServiceError {} - impl From for GitHubServiceError { fn from(err: octocrab::Error) -> Self { match &err { octocrab::Error::GitHub { source, .. } => { let status = source.status_code.as_u16(); let msg = source.message.to_ascii_lowercase(); - if status == 401 - || status == 403 - || msg.contains("bad credentials") - || msg.contains("token expired") + if status == 401 || msg.contains("bad credentials") || msg.contains("token expired") { GitHubServiceError::TokenInvalid + } else if status == 403 { + GitHubServiceError::InsufficientPermissions } else { GitHubServiceError::Client(err) } @@ -51,6 +60,37 @@ impl From for GitHubServiceError { } } } +impl From for GitHubServiceError { + fn from(error: GitServiceError) -> Self { + if let GitServiceError::Git(err) = error { + if err + .message() + .contains("too many redirects or authentication replays") + { + Self::TokenInvalid + } else if err.message().contains("status code: 403") { + Self::InsufficientPermissions + } else if err.message().contains("status code: 404") { + Self::RepoNotFoundOrNoAccess + } else { + Self::GitService(GitServiceError::Git(err)) + } + } else { + Self::GitService(error) + } + } +} + +impl GitHubServiceError { + pub fn is_api_data(&self) -> bool { + matches!( + self, + GitHubServiceError::TokenInvalid + | GitHubServiceError::InsufficientPermissions + | GitHubServiceError::RepoNotFoundOrNoAccess + ) + } +} #[derive(Debug, Clone)] pub struct GitHubRepoInfo { @@ -77,7 +117,6 @@ pub struct PullRequestInfo { } #[derive(Debug, Clone, Serialize, Deserialize, TS)] -#[ts(export)] pub struct RepositoryInfo { pub id: i64, pub name: String, @@ -100,14 +139,16 @@ impl GitHubService { pub fn new(github_token: &str) -> Result { let client = OctocrabBuilder::new() .personal_token(github_token.to_string()) - .build() - .map_err(|e| { - GitHubServiceError::Auth(format!("Failed to create GitHub client: {}", e)) - })?; + .build()?; Ok(Self { client }) } + pub async fn check_token(&self) -> Result<(), GitHubServiceError> { + self.client.current().user().await?; + Ok(()) + } + /// Create a pull request on GitHub pub async fn create_pr( &self, @@ -208,7 +249,7 @@ impl GitHubService { )) } } - _ => GitHubServiceError::PullRequest(format!("Failed to create PR: {}", e)), + _ => GitHubServiceError::PullRequest(format!("Failed to create PR: {e}")), })?; let pr_info = PullRequestInfo { @@ -264,7 +305,7 @@ impl GitHubService { .get(pr_number as u64) .await .map_err(|e| { - GitHubServiceError::PullRequest(format!("Failed to get PR #{}: {}", pr_number, e)) + GitHubServiceError::PullRequest(format!("Failed to get PR #{pr_number}: {e}")) })?; let status = match pr.state { @@ -293,6 +334,7 @@ impl GitHubService { } /// List repositories for the authenticated user with pagination + #[cfg(feature = "cloud")] pub async fn list_repositories( &self, page: u8, @@ -316,6 +358,7 @@ impl GitHubService { .await } + #[cfg(feature = "cloud")] async fn list_repositories_internal( &self, page: u8, diff --git a/crates/services/src/services/mod.rs b/crates/services/src/services/mod.rs new file mode 100644 index 00000000..ca77d572 --- /dev/null +++ b/crates/services/src/services/mod.rs @@ -0,0 +1,13 @@ +pub mod analytics; +pub mod auth; +pub mod config; +pub mod container; +pub mod events; +pub mod filesystem; +pub mod filesystem_watcher; +pub mod git; +pub mod github_service; +pub mod notification; +pub mod pr_monitor; +pub mod sentry; +pub mod worktree_manager; diff --git a/backend/src/services/notification_service.rs b/crates/services/src/services/notification.rs similarity index 70% rename from backend/src/services/notification_service.rs rename to crates/services/src/services/notification.rs index 7bb6dd5e..3206f52f 100644 --- a/backend/src/services/notification_service.rs +++ b/crates/services/src/services/notification.rs @@ -1,59 +1,54 @@ use std::sync::OnceLock; -use crate::models::config::SoundFile; +use db::models::execution_process::{ExecutionContext, ExecutionProcessStatus}; +use utils; + +use crate::services::config::SoundFile; /// Service for handling cross-platform notifications including sound alerts and push notifications #[derive(Debug, Clone)] -pub struct NotificationService { - sound_enabled: bool, - push_enabled: bool, -} - -/// Configuration for notifications -#[derive(Debug, Clone)] -pub struct NotificationConfig { - pub sound_enabled: bool, - pub push_enabled: bool, -} - -impl Default for NotificationConfig { - fn default() -> Self { - Self { - sound_enabled: true, - push_enabled: true, - } - } -} +pub struct NotificationService {} +use crate::services::config::NotificationConfig; /// Cache for WSL root path from PowerShell static WSL_ROOT_PATH_CACHE: OnceLock> = OnceLock::new(); impl NotificationService { - /// Create a new NotificationService with the given configuration - pub fn new(config: NotificationConfig) -> Self { - Self { - sound_enabled: config.sound_enabled, - push_enabled: config.push_enabled, - } + pub async fn notify_execution_halted(config: NotificationConfig, ctx: &ExecutionContext) { + let title = format!("Task Complete: {}", ctx.task.title); + let message = match ctx.execution_process.status { + ExecutionProcessStatus::Completed => format!( + "✅ '{}' completed successfully\nBranch: {:?}\nExecutor: {}", + ctx.task.title, ctx.task_attempt.branch, ctx.task_attempt.base_coding_agent + ), + ExecutionProcessStatus::Failed | ExecutionProcessStatus::Killed => format!( + "❌ '{}' execution failed\nBranch: {:?}\nExecutor: {}", + ctx.task.title, ctx.task_attempt.branch, ctx.task_attempt.base_coding_agent + ), + _ => { + tracing::warn!( + "Tried to notify attempt completion for {} but process is still running!", + ctx.task_attempt.id + ); + return; + } + }; + Self::notify(config, &title, &message).await; } /// Send both sound and push notifications if enabled - pub async fn notify(&self, title: &str, message: &str, sound_file: &SoundFile) { - if self.sound_enabled { - self.play_sound_notification(sound_file).await; + pub async fn notify(config: NotificationConfig, title: &str, message: &str) { + if config.sound_enabled { + Self::play_sound_notification(&config.sound_file).await; } - if self.push_enabled { - self.send_push_notification(title, message).await; + if config.push_enabled { + Self::send_push_notification(title, message).await; } } /// Play a system sound notification across platforms - pub async fn play_sound_notification(&self, sound_file: &SoundFile) { - if !self.sound_enabled { - return; - } - + async fn play_sound_notification(sound_file: &SoundFile) { let file_path = match sound_file.get_path().await { Ok(path) => path, Err(e) => { @@ -68,7 +63,7 @@ impl NotificationService { let _ = tokio::process::Command::new("afplay") .arg(&file_path) .spawn(); - } else if cfg!(target_os = "linux") && !crate::utils::is_wsl2() { + } else if cfg!(target_os = "linux") && !utils::is_wsl2() { // Try different Linux audio players if tokio::process::Command::new("paplay") .arg(&file_path) @@ -89,11 +84,9 @@ impl NotificationService { .arg("\\a") .spawn(); } - } else if cfg!(target_os = "windows") - || (cfg!(target_os = "linux") && crate::utils::is_wsl2()) - { + } else if cfg!(target_os = "windows") || (cfg!(target_os = "linux") && utils::is_wsl2()) { // Convert WSL path to Windows path if in WSL2 - let file_path = if crate::utils::is_wsl2() { + let file_path = if utils::is_wsl2() { if let Some(windows_path) = Self::wsl_to_windows_path(&file_path).await { windows_path } else { @@ -106,32 +99,25 @@ impl NotificationService { let _ = tokio::process::Command::new("powershell.exe") .arg("-c") .arg(format!( - r#"(New-Object Media.SoundPlayer "{}").PlaySync()"#, - file_path + r#"(New-Object Media.SoundPlayer "{file_path}").PlaySync()"# )) .spawn(); } } /// Send a cross-platform push notification - pub async fn send_push_notification(&self, title: &str, message: &str) { - if !self.push_enabled { - return; - } - + async fn send_push_notification(title: &str, message: &str) { if cfg!(target_os = "macos") { - self.send_macos_notification(title, message).await; - } else if cfg!(target_os = "linux") && !crate::utils::is_wsl2() { - self.send_linux_notification(title, message).await; - } else if cfg!(target_os = "windows") - || (cfg!(target_os = "linux") && crate::utils::is_wsl2()) - { - self.send_windows_notification(title, message).await; + Self::send_macos_notification(title, message).await; + } else if cfg!(target_os = "linux") && !utils::is_wsl2() { + Self::send_linux_notification(title, message).await; + } else if cfg!(target_os = "windows") || (cfg!(target_os = "linux") && utils::is_wsl2()) { + Self::send_windows_notification(title, message).await; } } /// Send macOS notification using osascript - async fn send_macos_notification(&self, title: &str, message: &str) { + async fn send_macos_notification(title: &str, message: &str) { let script = format!( r#"display notification "{message}" with title "{title}" sound name "Glass""#, message = message.replace('"', r#"\""#), @@ -145,7 +131,7 @@ impl NotificationService { } /// Send Linux notification using notify-rust - async fn send_linux_notification(&self, title: &str, message: &str) { + async fn send_linux_notification(title: &str, message: &str) { use notify_rust::Notification; let title = title.to_string(); @@ -165,8 +151,8 @@ impl NotificationService { } /// Send Windows/WSL notification using PowerShell toast script - async fn send_windows_notification(&self, title: &str, message: &str) { - let script_path = match crate::utils::get_powershell_script().await { + async fn send_windows_notification(title: &str, message: &str) { + let script_path = match utils::get_powershell_script().await { Ok(path) => path, Err(e) => { tracing::error!("Failed to get PowerShell script: {}", e); @@ -175,7 +161,7 @@ impl NotificationService { }; // Convert WSL path to Windows path if in WSL2 - let script_path_str = if crate::utils::is_wsl2() { + let script_path_str = if utils::is_wsl2() { if let Some(windows_path) = Self::wsl_to_windows_path(&script_path).await { windows_path } else { @@ -249,7 +235,7 @@ impl NotificationService { // Get cached WSL root path from PowerShell if let Some(wsl_root) = Self::get_wsl_root_path().await { // Simply concatenate WSL root with the absolute path - PowerShell doesn't mind / - let windows_path = format!("{}{}", wsl_root, path_str); + let windows_path = format!("{wsl_root}{path_str}"); tracing::debug!("WSL path converted: {} -> {}", path_str, windows_path); Some(windows_path) } else { diff --git a/crates/services/src/services/pr_monitor.rs b/crates/services/src/services/pr_monitor.rs new file mode 100644 index 00000000..63fa2a31 --- /dev/null +++ b/crates/services/src/services/pr_monitor.rs @@ -0,0 +1,142 @@ +use std::{sync::Arc, time::Duration}; + +use db::{ + DBService, + models::{ + task::{Task, TaskStatus}, + task_attempt::{PrInfo, TaskAttempt, TaskAttemptError}, + }, +}; +use sqlx::error::Error as SqlxError; +use thiserror::Error; +use tokio::{sync::RwLock, time::interval}; +use tracing::{debug, error, info}; + +use crate::services::{ + config::Config, + github_service::{GitHubRepoInfo, GitHubService, GitHubServiceError}, +}; + +#[derive(Debug, Error)] +enum PrMonitorError { + #[error("No GitHub token configured")] + NoGitHubToken, + #[error(transparent)] + GitHubServiceError(#[from] GitHubServiceError), + #[error(transparent)] + TaskAttemptError(#[from] TaskAttemptError), + #[error(transparent)] + Sqlx(#[from] SqlxError), +} + +/// Service to monitor GitHub PRs and update task status when they are merged +pub struct PrMonitorService { + db: DBService, + config: Arc>, + poll_interval: Duration, +} + +impl PrMonitorService { + pub async fn spawn(db: DBService, config: Arc>) -> tokio::task::JoinHandle<()> { + let service = Self { + db, + config, + poll_interval: Duration::from_secs(60), // Check every minute + }; + tokio::spawn(async move { + service.start().await; + }) + } + + async fn start(&self) { + info!( + "Starting PR monitoring service with interval {:?}", + self.poll_interval + ); + + let mut interval = interval(self.poll_interval); + + loop { + interval.tick().await; + if let Err(e) = self.check_all_open_prs().await { + error!("Error checking open PRs: {}", e); + } + } + } + + /// Check all open PRs for updates with the provided GitHub token + async fn check_all_open_prs(&self) -> Result<(), PrMonitorError> { + let open_prs = TaskAttempt::get_open_prs(&self.db.pool).await?; + + if open_prs.is_empty() { + debug!("No open PRs to check"); + return Ok(()); + } + + info!("Checking {} open PRs", open_prs.len()); + + for pr_info in open_prs { + if let Err(e) = self.check_pr_status(&pr_info).await { + error!( + "Error checking PR #{} for attempt {}: {}", + pr_info.pr_number, pr_info.attempt_id, e + ); + } + } + + Ok(()) + } + + /// Check the status of a specific PR + async fn check_pr_status(&self, pr_info: &PrInfo) -> Result<(), PrMonitorError> { + let github_config = self.config.read().await.github.clone(); + let github_token = github_config.token().ok_or(PrMonitorError::NoGitHubToken)?; + + let github_service = GitHubService::new(&github_token)?; + + let repo_info = GitHubRepoInfo { + owner: pr_info.repo_owner.clone(), + repo_name: pr_info.repo_name.clone(), + }; + + let pr_status = github_service + .update_pr_status(&repo_info, pr_info.pr_number) + .await?; + + debug!( + "PR #{} status: {} (was open)", + pr_info.pr_number, pr_status.status + ); + + // Update the PR status in the database + if pr_status.status != "open" { + // Extract merge commit SHA if the PR was merged + TaskAttempt::update_pr_status( + &self.db.pool, + pr_info.attempt_id, + pr_status.url, + pr_status.number, + pr_status.status, + ) + .await?; + + // If the PR was merged, update the task status to done + if pr_status.merged { + info!( + "PR #{} was merged, updating task {} to done", + pr_info.pr_number, pr_info.task_id + ); + let merge_commit_sha = pr_status.merge_commit_sha.as_deref().unwrap_or("unknown"); + Task::update_status(&self.db.pool, pr_info.task_id, TaskStatus::Done).await?; + TaskAttempt::update_merge_commit( + &self.db.pool, + pr_info.attempt_id, + merge_commit_sha, + ) + .await?; + } + } + + Ok(()) + } +} diff --git a/crates/services/src/services/sentry.rs b/crates/services/src/services/sentry.rs new file mode 100644 index 00000000..f9260dab --- /dev/null +++ b/crates/services/src/services/sentry.rs @@ -0,0 +1,33 @@ +#[derive(Clone)] +pub struct SentryService {} + +impl Default for SentryService { + fn default() -> Self { + Self::new() + } +} + +impl SentryService { + pub fn new() -> Self { + SentryService {} + } + + pub async fn update_scope(&self, user_id: &str, username: Option<&str>, email: Option<&str>) { + let sentry_user = match (username, email) { + (Some(user), Some(email)) => sentry::User { + id: Some(user_id.to_string()), + username: Some(user.to_string()), + email: Some(email.to_string()), + ..Default::default() + }, + _ => sentry::User { + id: Some(user_id.to_string()), + ..Default::default() + }, + }; + + sentry::configure_scope(|scope| { + scope.set_user(Some(sentry_user)); + }); + } +} diff --git a/backend/src/utils/worktree_manager.rs b/crates/services/src/services/worktree_manager.rs similarity index 72% rename from backend/src/utils/worktree_manager.rs rename to crates/services/src/services/worktree_manager.rs index cc2d6827..5a2af20e 100644 --- a/backend/src/utils/worktree_manager.rs +++ b/crates/services/src/services/worktree_manager.rs @@ -4,8 +4,12 @@ use std::{ sync::{Arc, Mutex}, }; -use git2::{Error as GitError, Repository, WorktreeAddOptions}; +use git2::{BranchType, Error as GitError, Repository, WorktreeAddOptions}; +use thiserror::Error; use tracing::{debug, info, warn}; +use utils::{is_wsl2, shell::get_shell_command}; + +use super::git::{GitService, GitServiceError}; // Global synchronization for worktree creation to prevent race conditions lazy_static::lazy_static! { @@ -13,16 +17,84 @@ lazy_static::lazy_static! { Arc::new(Mutex::new(HashMap::new())); } +#[derive(Debug, Error)] +pub enum WorktreeError { + #[error(transparent)] + Git(#[from] GitError), + #[error(transparent)] + GitService(#[from] GitServiceError), + #[error("Task join error: {0}")] + TaskJoin(String), + #[error("Invalid path: {0}")] + InvalidPath(String), + #[error("IO error: {0}")] + Io(#[from] std::io::Error), + #[error("Branch not found: {0}")] + BranchNotFound(String), + #[error("Repository error: {0}")] + Repository(String), +} + pub struct WorktreeManager; impl WorktreeManager { + /// Create a worktree with a new branch + pub async fn create_worktree( + repo_path: &Path, + branch_name: &str, + worktree_path: &Path, + base_branch: Option<&str>, + create_branch: bool, + ) -> Result<(), WorktreeError> { + if create_branch { + let repo_path_owned = repo_path.to_path_buf(); + let branch_name_owned = branch_name.to_string(); + let base_branch_owned = base_branch.map(|s| s.to_string()); + + tokio::task::spawn_blocking(move || { + let repo = Repository::open(&repo_path_owned)?; + + let base_reference = if let Some(base_branch) = base_branch_owned.as_deref() { + let branch = repo.find_branch(base_branch, BranchType::Local)?; + branch.into_reference() + } else { + // Handle new repositories without any commits + match repo.head() { + Ok(head_ref) => head_ref, + Err(e) + if e.class() == git2::ErrorClass::Reference + && e.code() == git2::ErrorCode::UnbornBranch => + { + // Repository has no commits yet, create an initial commit + GitService::new() + .create_initial_commit(&repo) + .map_err(|_| { + GitError::from_str("Failed to create initial commit") + })?; + repo.find_reference("refs/heads/main")? + } + Err(e) => return Err(e), + } + }; + + // Create branch + repo.branch(&branch_name_owned, &base_reference.peel_to_commit()?, false)?; + Ok::<(), GitError>(()) + }) + .await + .map_err(|e| WorktreeError::TaskJoin(format!("Task join error: {e}")))??; + } + + Self::ensure_worktree_exists(repo_path, branch_name, worktree_path).await + } + /// Ensure worktree exists, recreating if necessary with proper synchronization /// This is the main entry point for ensuring a worktree exists and prevents race conditions pub async fn ensure_worktree_exists( - repo_path: String, - branch_name: String, - worktree_path: PathBuf, - ) -> Result<(), GitError> { + repo_path: &Path, + branch_name: &str, + worktree_path: &Path, + ) -> Result<(), WorktreeError> { let path_str = worktree_path.to_string_lossy().to_string(); // Get or create a lock for this specific worktree path @@ -38,7 +110,7 @@ impl WorktreeManager { let _guard = lock.lock().await; // Check if worktree already exists and is properly set up - if Self::is_worktree_properly_set_up(&repo_path, &worktree_path).await? { + if Self::is_worktree_properly_set_up(repo_path, worktree_path).await? { debug!("Worktree already properly set up at path: {}", path_str); return Ok(()); } @@ -50,10 +122,10 @@ impl WorktreeManager { /// Internal worktree recreation function (always recreates) async fn recreate_worktree_internal( - repo_path: String, - branch_name: String, - worktree_path: PathBuf, - ) -> Result<(), GitError> { + repo_path: &Path, + branch_name: &str, + worktree_path: &Path, + ) -> Result<(), WorktreeError> { let path_str = worktree_path.to_string_lossy().to_string(); let branch_name_owned = branch_name.to_string(); let worktree_path_owned = worktree_path.to_path_buf(); @@ -65,7 +137,7 @@ impl WorktreeManager { let worktree_name = worktree_path .file_name() .and_then(|n| n.to_str()) - .ok_or_else(|| GitError::from_str("Invalid worktree path"))? + .ok_or_else(|| WorktreeError::InvalidPath("Invalid worktree path".to_string()))? .to_string(); info!( @@ -75,7 +147,7 @@ impl WorktreeManager { // Step 1: Comprehensive cleanup of existing worktree and metadata (non-blocking) Self::comprehensive_worktree_cleanup_async( - &git_repo_path, + git_repo_path, &worktree_path_owned, &worktree_name, ) @@ -86,15 +158,13 @@ impl WorktreeManager { let parent_path = parent.to_path_buf(); tokio::task::spawn_blocking(move || std::fs::create_dir_all(&parent_path)) .await - .map_err(|e| GitError::from_str(&format!("Task join error: {}", e)))? - .map_err(|e| { - GitError::from_str(&format!("Failed to create parent directory: {}", e)) - })?; + .map_err(|e| WorktreeError::TaskJoin(format!("Task join error: {e}")))? + .map_err(WorktreeError::Io)?; } // Step 3: Create the worktree with retry logic for metadata conflicts (non-blocking) Self::create_worktree_with_retry( - &git_repo_path, + git_repo_path, &branch_name_owned, &worktree_path_owned, &worktree_name, @@ -105,24 +175,24 @@ impl WorktreeManager { /// Check if a worktree is properly set up (filesystem + git metadata) async fn is_worktree_properly_set_up( - repo_path: &str, + repo_path: &Path, worktree_path: &Path, - ) -> Result { - let repo_path = repo_path.to_string(); + ) -> Result { + let repo_path = repo_path.to_path_buf(); let worktree_path = worktree_path.to_path_buf(); - tokio::task::spawn_blocking(move || { + tokio::task::spawn_blocking(move || -> Result { // Check 1: Filesystem path must exist if !worktree_path.exists() { return Ok(false); } // Check 2: Worktree must be registered in git metadata using find_worktree - let repo = Repository::open(&repo_path)?; + let repo = Repository::open(&repo_path).map_err(WorktreeError::Git)?; let worktree_name = worktree_path .file_name() .and_then(|n| n.to_str()) - .ok_or_else(|| GitError::from_str("Invalid worktree path"))?; + .ok_or_else(|| WorktreeError::InvalidPath("Invalid worktree path".to_string()))?; // Try to find the worktree - if it exists and is valid, we're good match repo.find_worktree(worktree_name) { @@ -131,7 +201,7 @@ impl WorktreeManager { } }) .await - .map_err(|e| GitError::from_str(&format!("Task join error: {}", e)))? + .map_err(|e| WorktreeError::TaskJoin(format!("{e}")))? } /// Try to remove a worktree registration from git @@ -156,7 +226,7 @@ impl WorktreeManager { repo: &Repository, worktree_path: &Path, worktree_name: &str, - ) -> Result<(), GitError> { + ) -> Result<(), WorktreeError> { debug!("Performing cleanup for worktree: {}", worktree_name); let git_repo_path = Self::get_git_repo_path(repo)?; @@ -180,13 +250,7 @@ impl WorktreeManager { "Removing existing worktree directory: {}", worktree_path.display() ); - std::fs::remove_dir_all(worktree_path).map_err(|e| { - GitError::from_str(&format!( - "Failed to remove existing directory {}: {}", - worktree_path.display(), - e - )) - })?; + std::fs::remove_dir_all(worktree_path).map_err(WorktreeError::Io)?; } debug!( @@ -198,11 +262,11 @@ impl WorktreeManager { /// Async version of comprehensive cleanup to avoid blocking the main runtime async fn comprehensive_worktree_cleanup_async( - git_repo_path: &str, + git_repo_path: &Path, worktree_path: &Path, worktree_name: &str, - ) -> Result<(), GitError> { - let git_repo_path_owned = git_repo_path.to_string(); + ) -> Result<(), WorktreeError> { + let git_repo_path_owned = git_repo_path.to_path_buf(); let worktree_path_owned = worktree_path.to_path_buf(); let worktree_name_owned = worktree_name.to_string(); @@ -224,46 +288,45 @@ impl WorktreeManager { ) }) .await - .map_err(|e| GitError::from_str(&format!("Task join error: {}", e)))? + .map_err(|e| WorktreeError::TaskJoin(format!("Task join error: {e}")))? } Ok(Err(e)) => { // Repository doesn't exist (likely deleted project), fall back to simple cleanup debug!( - "Failed to open repository at {}: {}. Falling back to simple cleanup for worktree at {}", - git_repo_path_owned, e, worktree_path_owned.display() + "Failed to open repository at {:?}: {}. Falling back to simple cleanup for worktree at {}", + git_repo_path_owned, + e, + worktree_path_owned.display() ); Self::simple_worktree_cleanup(&worktree_path_owned).await?; Ok(()) } - Err(e) => Err(GitError::from_str(&format!("Task join error: {}", e))), + Err(e) => Err(WorktreeError::TaskJoin(format!("{e}"))), } } /// Create worktree with retry logic in non-blocking manner async fn create_worktree_with_retry( - git_repo_path: &str, + git_repo_path: &Path, branch_name: &str, worktree_path: &Path, worktree_name: &str, path_str: &str, - ) -> Result<(), GitError> { - let git_repo_path = git_repo_path.to_string(); + ) -> Result<(), WorktreeError> { + let git_repo_path = git_repo_path.to_path_buf(); let branch_name = branch_name.to_string(); let worktree_path = worktree_path.to_path_buf(); let worktree_name = worktree_name.to_string(); let path_str = path_str.to_string(); - tokio::task::spawn_blocking(move || { + tokio::task::spawn_blocking(move || -> Result<(), WorktreeError> { // Open repository in blocking context - let repo = Repository::open(&git_repo_path) - .map_err(|e| GitError::from_str(&format!("Failed to open repository: {}", e)))?; + let repo = Repository::open(&git_repo_path).map_err(WorktreeError::Git)?; // Find the branch reference using the branch name let branch_ref = repo .find_branch(&branch_name, git2::BranchType::Local) - .map_err(|e| { - GitError::from_str(&format!("Branch '{}' not found: {}", branch_name, e)) - })? + .map_err(WorktreeError::Git)? .into_reference(); // Create worktree options @@ -274,9 +337,8 @@ impl WorktreeManager { Ok(_) => { // Verify the worktree was actually created if !worktree_path.exists() { - return Err(GitError::from_str(&format!( - "Worktree creation reported success but path {} does not exist", - path_str + return Err(WorktreeError::Repository(format!( + "Worktree creation reported success but path {path_str} does not exist" ))); } @@ -303,22 +365,15 @@ impl WorktreeManager { ); // Force cleanup metadata and try one more time - Self::force_cleanup_worktree_metadata(&git_repo_path, &worktree_name).map_err( - |e| { - GitError::from_str(&format!( - "Failed to cleanup worktree metadata: {}", - e - )) - }, - )?; + Self::force_cleanup_worktree_metadata(&git_repo_path, &worktree_name) + .map_err(WorktreeError::Io)?; // Try again after cleanup match repo.worktree(&branch_name, &worktree_path, Some(&worktree_opts)) { Ok(_) => { if !worktree_path.exists() { - return Err(GitError::from_str(&format!( - "Worktree creation reported success but path {} does not exist", - path_str + return Err(WorktreeError::Repository(format!( + "Worktree creation reported success but path {path_str} does not exist" ))); } @@ -342,32 +397,36 @@ impl WorktreeManager { "Worktree creation failed even after metadata cleanup: {}", retry_error ); - Err(retry_error) + Err(WorktreeError::Git(retry_error)) } } } - Err(e) => Err(e), + Err(e) => Err(WorktreeError::Git(e)), } }) .await - .map_err(|e| GitError::from_str(&format!("Task join error: {}", e)))? + .map_err(|e| WorktreeError::TaskJoin(format!("{e}")))? } /// Get the git repository path - fn get_git_repo_path(repo: &Repository) -> Result { + fn get_git_repo_path(repo: &Repository) -> Result { repo.workdir() - .ok_or_else(|| GitError::from_str("Repository has no working directory"))? + .ok_or_else(|| { + WorktreeError::Repository("Repository has no working directory".to_string()) + })? .to_str() - .ok_or_else(|| GitError::from_str("Repository path is not valid UTF-8")) - .map(|s| s.to_string()) + .ok_or_else(|| { + WorktreeError::InvalidPath("Repository path is not valid UTF-8".to_string()) + }) + .map(PathBuf::from) } /// Force cleanup worktree metadata directory fn force_cleanup_worktree_metadata( - git_repo_path: &str, + git_repo_path: &Path, worktree_name: &str, ) -> Result<(), std::io::Error> { - let git_worktree_metadata_path = Path::new(git_repo_path) + let git_worktree_metadata_path = git_repo_path .join(".git") .join("worktrees") .join(worktree_name); @@ -387,8 +446,8 @@ impl WorktreeManager { /// If git_repo_path is None, attempts to infer it from the worktree itself pub async fn cleanup_worktree( worktree_path: &Path, - git_repo_path: Option<&str>, - ) -> Result<(), GitError> { + git_repo_path: Option<&Path>, + ) -> Result<(), WorktreeError> { let path_str = worktree_path.to_string_lossy().to_string(); // Get the same lock to ensure we don't interfere with creation @@ -405,7 +464,7 @@ impl WorktreeManager { if let Some(worktree_name) = worktree_path.file_name().and_then(|n| n.to_str()) { // Try to determine the git repo path if not provided let resolved_repo_path = if let Some(repo_path) = git_repo_path { - Some(repo_path.to_string()) + Some(repo_path.to_path_buf()) } else { Self::infer_git_repo_path(worktree_path).await }; @@ -426,8 +485,8 @@ impl WorktreeManager { Self::simple_worktree_cleanup(worktree_path).await?; } } else { - return Err(GitError::from_str( - "Invalid worktree path, cannot determine name", + return Err(WorktreeError::InvalidPath( + "Invalid worktree path, cannot determine name".to_string(), )); } @@ -435,12 +494,12 @@ impl WorktreeManager { } /// Try to infer the git repository path from a worktree - async fn infer_git_repo_path(worktree_path: &Path) -> Option { + async fn infer_git_repo_path(worktree_path: &Path) -> Option { // Try using git rev-parse --git-common-dir from within the worktree let worktree_path_owned = worktree_path.to_path_buf(); tokio::task::spawn_blocking(move || { - let (shell_cmd, shell_arg) = crate::utils::shell::get_shell_command(); + let (shell_cmd, shell_arg) = get_shell_command(); let git_command = "git rev-parse --git-common-dir"; let output = std::process::Command::new(shell_cmd) @@ -454,12 +513,12 @@ impl WorktreeManager { // git-common-dir gives us the path to the .git directory // We need the working directory (parent of .git) - let git_dir_path = std::path::Path::new(&git_common_dir); + let git_dir_path = Path::new(&git_common_dir); if git_dir_path.file_name() == Some(std::ffi::OsStr::new(".git")) { - git_dir_path.parent()?.to_str().map(|s| s.to_string()) + git_dir_path.parent()?.to_str().map(PathBuf::from) } else { // In case of bare repo or unusual setup, use the git-common-dir as is - Some(git_common_dir) + Some(PathBuf::from(git_common_dir)) } } else { None @@ -471,18 +530,12 @@ impl WorktreeManager { } /// Simple worktree cleanup when we can't determine the main repo - async fn simple_worktree_cleanup(worktree_path: &Path) -> Result<(), GitError> { + async fn simple_worktree_cleanup(worktree_path: &Path) -> Result<(), WorktreeError> { let worktree_path_owned = worktree_path.to_path_buf(); - tokio::task::spawn_blocking(move || { + tokio::task::spawn_blocking(move || -> Result<(), WorktreeError> { if worktree_path_owned.exists() { - std::fs::remove_dir_all(&worktree_path_owned).map_err(|e| { - GitError::from_str(&format!( - "Failed to remove worktree directory {}: {}", - worktree_path_owned.display(), - e - )) - })?; + std::fs::remove_dir_all(&worktree_path_owned).map_err(WorktreeError::Io)?; info!( "Removed worktree directory: {}", worktree_path_owned.display() @@ -491,7 +544,7 @@ impl WorktreeManager { Ok(()) }) .await - .map_err(|e| GitError::from_str(&format!("Task join error: {}", e)))? + .map_err(|e| WorktreeError::TaskJoin(format!("{e}")))? } /// Rewrite worktree's commondir file to use relative paths for WSL compatibility @@ -507,7 +560,7 @@ impl WorktreeManager { git_repo_path: &Path, worktree_name: &str, ) -> Result<(), std::io::Error> { - if !cfg!(target_os = "linux") || !crate::utils::is_wsl2() { + if !cfg!(target_os = "linux") || !is_wsl2() { debug!("Skipping commondir fix for non-WSL2 environment"); return Ok(()); } @@ -551,7 +604,7 @@ impl WorktreeManager { { if resolved_canonical == target_canonical { // Write the relative path - std::fs::write(&commondir_path, format!("{}\n", relative_path_str))?; + std::fs::write(&commondir_path, format!("{relative_path_str}\n"))?; info!( "Rewrote commondir to relative path: {} -> {}", current_content, relative_path_str @@ -575,4 +628,9 @@ impl WorktreeManager { Ok(()) } + + /// Get the base directory for vibe-kanban worktrees + pub fn get_worktree_base_dir() -> std::path::PathBuf { + utils::path::get_vibe_kanban_temp_dir().join("worktrees") + } } diff --git a/crates/utils/Cargo.toml b/crates/utils/Cargo.toml new file mode 100644 index 00000000..5d9a5290 --- /dev/null +++ b/crates/utils/Cargo.toml @@ -0,0 +1,29 @@ +[package] +name = "utils" +version = "0.0.56" +edition = "2024" + +[dependencies] +tokio-util = { version = "0.7", features = ["io", "codec"] } +bytes = "1.0" +axum = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } +tracing = { workspace = true } +tracing-subscriber = { workspace = true } +chrono = { version = "0.4", features = ["serde"] } +uuid = { version = "1.0", features = ["v4", "serde"] } +ts-rs = { workspace = true } +libc = "0.2" +rust-embed = "8.2" +directories = "6.0.0" +open = "5.3.2" +regex = "1.11.1" +sentry-tracing = { version = "0.41.0", features = ["backtrace"] } +lazy_static = "1.4" +futures-util = "0.3" +json-patch = "2.0" +base64 = "0.22" +tokio = { workspace = true } +futures = "0.3.31" +tokio-stream = { version = "0.1.17", features = ["sync"] } diff --git a/crates/utils/src/assets.rs b/crates/utils/src/assets.rs new file mode 100644 index 00000000..fd2e2413 --- /dev/null +++ b/crates/utils/src/assets.rs @@ -0,0 +1,41 @@ +use directories::ProjectDirs; +use rust_embed::RustEmbed; + +const PROJECT_ROOT: &str = env!("CARGO_MANIFEST_DIR"); + +pub fn asset_dir() -> std::path::PathBuf { + let path = if cfg!(debug_assertions) { + std::path::PathBuf::from(PROJECT_ROOT).join("../../dev_assets") + } else { + ProjectDirs::from("ai", "bloop", "vibe-kanban") + .expect("OS didn't give us a home directory") + .data_dir() + .to_path_buf() + }; + + // Ensure the directory exists + if !path.exists() { + std::fs::create_dir_all(&path).expect("Failed to create asset directory"); + } + + path + // ✔ macOS → ~/Library/Application Support/MyApp + // ✔ Linux → ~/.local/share/myapp (respects XDG_DATA_HOME) + // ✔ Windows → %APPDATA%\Example\MyApp +} + +pub fn config_path() -> std::path::PathBuf { + asset_dir().join("config.json") +} + +pub fn profiles_path() -> std::path::PathBuf { + asset_dir().join("profiles.json") +} + +#[derive(RustEmbed)] +#[folder = "../../assets/sounds"] +pub struct SoundAssets; + +#[derive(RustEmbed)] +#[folder = "../../assets/scripts"] +pub struct ScriptAssets; diff --git a/crates/utils/src/browser.rs b/crates/utils/src/browser.rs new file mode 100644 index 00000000..8ff03996 --- /dev/null +++ b/crates/utils/src/browser.rs @@ -0,0 +1,16 @@ +use crate::is_wsl2; + +/// Open URL in browser with WSL2 support +pub async fn open_browser(url: &str) -> Result<(), Box> { + if is_wsl2() { + // In WSL2, use PowerShell to open the browser + tokio::process::Command::new("powershell.exe") + .arg("-Command") + .arg(format!("Start-Process '{url}'")) + .spawn()?; + Ok(()) + } else { + // Use the standard open crate for other platforms + open::that(url).map_err(|e| e.into()) + } +} diff --git a/crates/utils/src/diff.rs b/crates/utils/src/diff.rs new file mode 100644 index 00000000..c90f808a --- /dev/null +++ b/crates/utils/src/diff.rs @@ -0,0 +1,26 @@ +use serde::{Deserialize, Serialize}; +use ts_rs::TS; + +#[derive(Debug, Clone, Serialize, Deserialize, TS)] +pub struct WorktreeDiff { + pub files: Vec, +} + +#[derive(Debug, Clone, Serialize, Deserialize, TS)] +pub struct FileDiff { + pub path: String, + pub chunks: Vec, +} + +#[derive(Debug, Clone, Serialize, Deserialize, TS)] +pub struct DiffChunk { + pub chunk_type: DiffChunkType, + pub content: String, +} + +#[derive(Debug, Clone, Serialize, Deserialize, TS)] +pub enum DiffChunkType { + Equal, + Insert, + Delete, +} diff --git a/backend/src/utils.rs b/crates/utils/src/lib.rs similarity index 54% rename from backend/src/utils.rs rename to crates/utils/src/lib.rs index a5db5457..58186294 100644 --- a/backend/src/utils.rs +++ b/crates/utils/src/lib.rs @@ -2,12 +2,17 @@ use std::{env, sync::OnceLock}; use directories::ProjectDirs; +pub mod assets; +pub mod browser; +pub mod diff; +pub mod log_msg; +pub mod msg_store; pub mod path; +pub mod response; +pub mod sentry; pub mod shell; +pub mod stream_lines; pub mod text; -pub mod worktree_manager; - -const PROJECT_ROOT: &str = env!("CARGO_MANIFEST_DIR"); /// Cache for WSL2 detection result static WSL2_CACHE: OnceLock = OnceLock::new(); @@ -22,11 +27,11 @@ pub fn is_wsl2() -> bool { } // Check /proc/version for WSL2 signature - if let Ok(version) = std::fs::read_to_string("/proc/version") { - if version.contains("WSL2") || version.contains("microsoft") { - tracing::debug!("WSL2 detected via /proc/version"); - return true; - } + if let Ok(version) = std::fs::read_to_string("/proc/version") + && (version.contains("WSL2") || version.contains("microsoft")) + { + tracing::debug!("WSL2 detected via /proc/version"); + return true; } tracing::debug!("WSL2 not detected"); @@ -34,25 +39,6 @@ pub fn is_wsl2() -> bool { }) } -pub fn asset_dir() -> std::path::PathBuf { - if cfg!(debug_assertions) { - std::path::PathBuf::from(PROJECT_ROOT).join("../dev_assets") - } else { - ProjectDirs::from("ai", "bloop", env!("CARGO_PKG_NAME")) - .expect("OS didn't give us a home directory") - .data_dir() - .to_path_buf() - } - - // ✔ macOS → ~/Library/Application Support/MyApp - // ✔ Linux → ~/.local/share/myapp (respects XDG_DATA_HOME) - // ✔ Windows → %APPDATA%\Example\MyApp -} - -pub fn config_path() -> std::path::PathBuf { - asset_dir().join("config.json") -} - pub fn cache_dir() -> std::path::PathBuf { let proj = if cfg!(debug_assertions) { ProjectDirs::from("ai", "bloop-dev", env!("CARGO_PKG_NAME")) @@ -68,9 +54,9 @@ pub fn cache_dir() -> std::path::PathBuf { proj.cache_dir().to_path_buf() } -/// Get or create cached PowerShell script file -pub async fn get_powershell_script( -) -> Result> { +// Get or create cached PowerShell script file +pub async fn get_powershell_script() +-> Result> { use std::io::Write; let cache_dir = cache_dir(); @@ -79,44 +65,29 @@ pub async fn get_powershell_script( // Check if cached file already exists and is valid if script_path.exists() { // Verify file has content (basic validation) - if let Ok(metadata) = std::fs::metadata(&script_path) { - if metadata.len() > 0 { - return Ok(script_path); - } + if let Ok(metadata) = std::fs::metadata(&script_path) + && metadata.len() > 0 + { + return Ok(script_path); } } // File doesn't exist or is invalid, create it - let script_content = crate::ScriptAssets::get("toast-notification.ps1") + let script_content = assets::ScriptAssets::get("toast-notification.ps1") .ok_or("Embedded PowerShell script not found: toast-notification.ps1")? .data; // Ensure cache directory exists std::fs::create_dir_all(&cache_dir) - .map_err(|e| format!("Failed to create cache directory: {}", e))?; + .map_err(|e| format!("Failed to create cache directory: {e}"))?; let mut file = std::fs::File::create(&script_path) - .map_err(|e| format!("Failed to create PowerShell script file: {}", e))?; + .map_err(|e| format!("Failed to create PowerShell script file: {e}"))?; file.write_all(&script_content) - .map_err(|e| format!("Failed to write PowerShell script data: {}", e))?; + .map_err(|e| format!("Failed to write PowerShell script data: {e}"))?; drop(file); // Ensure file is closed Ok(script_path) } - -/// Open URL in browser with WSL2 support -pub async fn open_browser(url: &str) -> Result<(), Box> { - if is_wsl2() { - // In WSL2, use PowerShell to open the browser - tokio::process::Command::new("powershell.exe") - .arg("-Command") - .arg(format!("Start-Process '{}'", url)) - .spawn()?; - Ok(()) - } else { - // Use the standard open crate for other platforms - open::that(url).map_err(|e| e.into()) - } -} diff --git a/crates/utils/src/log_msg.rs b/crates/utils/src/log_msg.rs new file mode 100644 index 00000000..dec07406 --- /dev/null +++ b/crates/utils/src/log_msg.rs @@ -0,0 +1,58 @@ +use axum::response::sse::Event; +use json_patch::Patch; +use serde::{Deserialize, Serialize}; + +pub const EV_STDOUT: &str = "stdout"; +pub const EV_STDERR: &str = "stderr"; +pub const EV_JSON_PATCH: &str = "json_patch"; +pub const EV_SESSION_ID: &str = "session_id"; +pub const EV_FINISHED: &str = "finished"; + +#[derive(Clone, Debug, Serialize, Deserialize)] +pub enum LogMsg { + Stdout(String), + Stderr(String), + JsonPatch(Patch), + SessionId(String), + Finished, +} + +impl LogMsg { + pub fn name(&self) -> &'static str { + match self { + LogMsg::Stdout(_) => EV_STDOUT, + LogMsg::Stderr(_) => EV_STDERR, + LogMsg::JsonPatch(_) => EV_JSON_PATCH, + LogMsg::SessionId(_) => EV_SESSION_ID, + LogMsg::Finished => EV_FINISHED, + } + } + + pub fn to_sse_event(&self) -> Event { + match self { + LogMsg::Stdout(s) => Event::default().event(EV_STDOUT).data(s.clone()), + LogMsg::Stderr(s) => Event::default().event(EV_STDERR).data(s.clone()), + LogMsg::JsonPatch(patch) => { + let data = serde_json::to_string(patch).unwrap_or_else(|_| "[]".to_string()); + Event::default().event(EV_JSON_PATCH).data(data) + } + LogMsg::SessionId(s) => Event::default().event(EV_SESSION_ID).data(s.clone()), + LogMsg::Finished => Event::default().event(EV_FINISHED).data(""), + } + } + + /// Rough size accounting for your byte‑budgeted history. + pub fn approx_bytes(&self) -> usize { + const OVERHEAD: usize = 8; + match self { + LogMsg::Stdout(s) => EV_STDOUT.len() + s.len() + OVERHEAD, + LogMsg::Stderr(s) => EV_STDERR.len() + s.len() + OVERHEAD, + LogMsg::JsonPatch(patch) => { + let json_len = serde_json::to_string(patch).map(|s| s.len()).unwrap_or(2); + EV_JSON_PATCH.len() + json_len + OVERHEAD + } + LogMsg::SessionId(s) => EV_SESSION_ID.len() + s.len() + OVERHEAD, + LogMsg::Finished => EV_FINISHED.len() + OVERHEAD, + } + } +} diff --git a/crates/utils/src/msg_store.rs b/crates/utils/src/msg_store.rs new file mode 100644 index 00000000..dcadeba6 --- /dev/null +++ b/crates/utils/src/msg_store.rs @@ -0,0 +1,175 @@ +use std::{ + collections::VecDeque, + sync::{Arc, RwLock}, +}; + +use axum::response::sse::Event; +use futures::{StreamExt, TryStreamExt, future}; +use tokio::{sync::broadcast, task::JoinHandle}; +use tokio_stream::wrappers::BroadcastStream; + +use crate::{log_msg::LogMsg, stream_lines::LinesStreamExt}; + +// 100 MB Limit +const HISTORY_BYTES: usize = 100000 * 1024; + +#[derive(Clone)] +struct StoredMsg { + msg: LogMsg, + bytes: usize, +} + +struct Inner { + history: VecDeque, + total_bytes: usize, +} + +pub struct MsgStore { + inner: RwLock, + sender: broadcast::Sender, +} + +impl Default for MsgStore { + fn default() -> Self { + Self::new() + } +} + +impl MsgStore { + pub fn new() -> Self { + let (sender, _) = broadcast::channel(100); + Self { + inner: RwLock::new(Inner { + history: VecDeque::with_capacity(32), + total_bytes: 0, + }), + sender, + } + } + + pub fn push(&self, msg: LogMsg) { + let _ = self.sender.send(msg.clone()); // live listeners + let bytes = msg.approx_bytes(); + + let mut inner = self.inner.write().unwrap(); + while inner.total_bytes.saturating_add(bytes) > HISTORY_BYTES { + if let Some(front) = inner.history.pop_front() { + inner.total_bytes = inner.total_bytes.saturating_sub(front.bytes); + } else { + break; + } + } + inner.history.push_back(StoredMsg { msg, bytes }); + inner.total_bytes = inner.total_bytes.saturating_add(bytes); + } + + // Convenience + pub fn push_stdout>(&self, s: S) { + self.push(LogMsg::Stdout(s.into())); + } + pub fn push_stderr>(&self, s: S) { + self.push(LogMsg::Stderr(s.into())); + } + pub fn push_patch(&self, patch: json_patch::Patch) { + self.push(LogMsg::JsonPatch(patch)); + } + + pub fn push_session_id(&self, session_id: String) { + self.push(LogMsg::SessionId(session_id)); + } + + pub fn push_finished(&self) { + self.push(LogMsg::Finished); + } + + pub fn get_receiver(&self) -> broadcast::Receiver { + self.sender.subscribe() + } + pub fn get_history(&self) -> Vec { + self.inner + .read() + .unwrap() + .history + .iter() + .map(|s| s.msg.clone()) + .collect() + } + + /// History then live, as `LogMsg`. + pub fn history_plus_stream( + &self, + ) -> futures::stream::BoxStream<'static, Result> { + let (history, rx) = (self.get_history(), self.get_receiver()); + + let hist = futures::stream::iter(history.into_iter().map(Ok::<_, std::io::Error>)); + let live = BroadcastStream::new(rx) + .filter_map(|res| async move { res.ok().map(Ok::<_, std::io::Error>) }); + + Box::pin(hist.chain(live)) + } + + pub fn stdout_chunked_stream( + &self, + ) -> futures::stream::BoxStream<'static, Result> { + self.history_plus_stream() + .take_while(|res| future::ready(!matches!(res, Ok(LogMsg::Finished)))) + .filter_map(|res| async move { + match res { + Ok(LogMsg::Stdout(s)) => Some(Ok(s)), + _ => None, + } + }) + .boxed() + } + + pub fn stdout_lines_stream( + &self, + ) -> futures::stream::BoxStream<'static, std::io::Result> { + self.stdout_chunked_stream().lines() + } + + pub fn stderr_chunked_stream( + &self, + ) -> futures::stream::BoxStream<'static, Result> { + self.history_plus_stream() + .take_while(|res| future::ready(!matches!(res, Ok(LogMsg::Finished)))) + .filter_map(|res| async move { + match res { + Ok(LogMsg::Stderr(s)) => Some(Ok(s)), + _ => None, + } + }) + .boxed() + } + + pub fn stderr_lines_stream( + &self, + ) -> futures::stream::BoxStream<'static, std::io::Result> { + self.stderr_chunked_stream().lines() + } + + /// Same stream but mapped to `Event` for SSE handlers. + pub fn sse_stream(&self) -> futures::stream::BoxStream<'static, Result> { + self.history_plus_stream() + .map_ok(|m| m.to_sse_event()) + .boxed() + } + + /// Forward a stream of typed log messages into this store. + pub fn spawn_forwarder(self: Arc, stream: S) -> JoinHandle<()> + where + S: futures::Stream> + Send + 'static, + E: std::fmt::Display + Send + 'static, + { + tokio::spawn(async move { + tokio::pin!(stream); + + while let Some(next) = stream.next().await { + match next { + Ok(msg) => self.push(msg), + Err(e) => self.push(LogMsg::Stderr(format!("stream error: {e}"))), + } + } + }) + } +} diff --git a/backend/src/utils/path.rs b/crates/utils/src/path.rs similarity index 85% rename from backend/src/utils/path.rs rename to crates/utils/src/path.rs index 088b77f0..9b9a447e 100644 --- a/backend/src/utils/path.rs +++ b/crates/utils/src/path.rs @@ -69,6 +69,25 @@ pub fn make_path_relative(path: &str, worktree_path: &str) -> String { } } +pub fn get_vibe_kanban_temp_dir() -> std::path::PathBuf { + let dir_name = if cfg!(debug_assertions) { + "vibe-kanban-dev" + } else { + "vibe-kanban" + }; + + if cfg!(target_os = "macos") { + // macOS already uses /var/folders/... which is persistent storage + std::env::temp_dir().join(dir_name) + } else if cfg!(target_os = "linux") { + // Linux: use /var/tmp instead of /tmp to avoid RAM usage + std::path::PathBuf::from("/var/tmp").join(dir_name) + } else { + // Windows and other platforms: use temp dir with vibe-kanban subdirectory + std::env::temp_dir().join(dir_name) + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/crates/utils/src/response.rs b/crates/utils/src/response.rs new file mode 100644 index 00000000..803a5833 --- /dev/null +++ b/crates/utils/src/response.rs @@ -0,0 +1,41 @@ +use serde::Serialize; +use ts_rs::TS; + +#[derive(Debug, Serialize, TS)] +pub struct ApiResponse { + success: bool, + data: Option, + error_data: Option, + message: Option, +} + +impl ApiResponse { + /// Creates a successful response, with `data` and no message. + pub fn success(data: T) -> Self { + ApiResponse { + success: true, + data: Some(data), + message: None, + error_data: None, + } + } + + /// Creates an error response, with `message` and no data. + pub fn error(message: &str) -> Self { + ApiResponse { + success: false, + data: None, + message: Some(message.to_string()), + error_data: None, + } + } + /// Creates an error response, with no `data`, no `message`, but with arbitrary `error_data`. + pub fn error_with_data(data: E) -> Self { + ApiResponse { + success: false, + data: None, + error_data: Some(data), + message: None, + } + } +} diff --git a/backend/src/lib.rs b/crates/utils/src/sentry.rs similarity index 59% rename from backend/src/lib.rs rename to crates/utils/src/sentry.rs index 4d008feb..e6e7e1be 100644 --- a/backend/src/lib.rs +++ b/crates/utils/src/sentry.rs @@ -1,31 +1,6 @@ -use rust_embed::RustEmbed; use sentry_tracing::{EventFilter, SentryLayer}; use tracing::Level; -pub mod app_state; -pub mod command_runner; -pub mod execution_monitor; -pub mod executor; -pub mod executors; -pub mod mcp; -pub mod middleware; -pub mod models; -pub mod routes; -pub mod services; -pub mod utils; - -#[derive(RustEmbed)] -#[folder = "../frontend/dist"] -pub struct Assets; - -#[derive(RustEmbed)] -#[folder = "sounds"] -pub struct SoundAssets; - -#[derive(RustEmbed)] -#[folder = "scripts"] -pub struct ScriptAssets; - pub fn sentry_layer() -> SentryLayer where S: tracing::Subscriber, diff --git a/backend/src/utils/shell.rs b/crates/utils/src/shell.rs similarity index 100% rename from backend/src/utils/shell.rs rename to crates/utils/src/shell.rs diff --git a/crates/utils/src/stream_lines.rs b/crates/utils/src/stream_lines.rs new file mode 100644 index 00000000..d7c59b63 --- /dev/null +++ b/crates/utils/src/stream_lines.rs @@ -0,0 +1,22 @@ +use bytes::Bytes; +use futures::{Stream, StreamExt, TryStreamExt}; +use tokio_util::{ + codec::{FramedRead, LinesCodec}, + io::StreamReader, +}; + +/// Extension trait for converting chunked string streams to line streams. +pub trait LinesStreamExt: Stream> + Sized { + /// Convert a chunked string stream to a line stream. + fn lines(self) -> futures::stream::BoxStream<'static, std::io::Result> + where + Self: Send + 'static, + { + let reader = StreamReader::new(self.map(|result| result.map(Bytes::from))); + FramedRead::new(reader, LinesCodec::new()) + .map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e)) + .boxed() + } +} + +impl LinesStreamExt for S where S: Stream> {} diff --git a/backend/src/utils/text.rs b/crates/utils/src/text.rs similarity index 100% rename from backend/src/utils/text.rs rename to crates/utils/src/text.rs diff --git a/dev_assets_seed/config.json b/dev_assets_seed/config.json index 87dfb145..0daa6477 100644 --- a/dev_assets_seed/config.json +++ b/dev_assets_seed/config.json @@ -9,11 +9,11 @@ "sound_file": "abstract-sound4", "push_notifications": true, "editor": { - "editor_type": "vscode", + "editor_type": "VS_CODE", "custom_command": null }, "github": { "token": "", "default_pr_base": "main" } -} +} \ No newline at end of file diff --git a/frontend/package-lock.json b/frontend/package-lock.json index f6ded6b3..a5df1fbc 100644 --- a/frontend/package-lock.json +++ b/frontend/package-lock.json @@ -22,15 +22,18 @@ "@sentry/react": "^9.34.0", "@sentry/vite-plugin": "^3.5.0", "@tailwindcss/typography": "^0.5.16", + "@types/react-window": "^1.8.8", "class-variance-authority": "^0.7.0", "click-to-react-component": "^1.1.2", "clsx": "^2.0.0", - "fast-json-patch": "^3.1.1", "lucide-react": "^0.303.0", "react": "^18.2.0", "react-dom": "^18.2.0", "react-markdown": "^10.1.0", "react-router-dom": "^6.8.1", + "react-use-measure": "^2.1.7", + "react-window": "^1.8.11", + "rfc6902": "^5.1.2", "tailwind-merge": "^2.2.0", "tailwindcss-animate": "^1.0.7" }, @@ -308,6 +311,15 @@ "@babel/core": "^7.0.0-0" } }, + "node_modules/@babel/runtime": { + "version": "7.28.2", + "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.28.2.tgz", + "integrity": "sha512-KHp2IflsnGywDjBWDkR9iEqiWSpc8GIi0lgTT3mOElT0PP1tG26P4tmFI2YvAdzgq9RGyoHZQEIEdZy6Ec5xCA==", + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, "node_modules/@babel/template": { "version": "7.27.2", "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.27.2.tgz", @@ -2742,6 +2754,15 @@ "@types/react": "^18.0.0" } }, + "node_modules/@types/react-window": { + "version": "1.8.8", + "resolved": "https://registry.npmjs.org/@types/react-window/-/react-window-1.8.8.tgz", + "integrity": "sha512-8Ls660bHR1AUA2kuRvVG9D/4XpRC6wjAaPT9dil7Ckc76eP9TKWZwwmgfq8Q1LANX3QNDnoU4Zp48A3w+zK69Q==", + "license": "MIT", + "dependencies": { + "@types/react": "*" + } + }, "node_modules/@types/semver": { "version": "7.7.0", "resolved": "https://registry.npmjs.org/@types/semver/-/semver-7.7.0.tgz", @@ -4000,12 +4021,6 @@ "node": ">= 6" } }, - "node_modules/fast-json-patch": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/fast-json-patch/-/fast-json-patch-3.1.1.tgz", - "integrity": "sha512-vf6IHUX2SBcA+5/+4883dsIjpBTqmfBjmYiWK1savxQmFk4JfBMLa7ynTYOs1Rolp/T1betJxHiGD3g1Mn8lUQ==", - "license": "MIT" - }, "node_modules/fast-json-stable-stringify": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", @@ -4932,6 +4947,12 @@ "url": "https://opencollective.com/unified" } }, + "node_modules/memoize-one": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/memoize-one/-/memoize-one-5.2.1.tgz", + "integrity": "sha512-zYiwtZUcYyXKo/np96AGZAckk+FWWsUdJ3cHGGmld7+AhvcWmQyGCYUh1hc4Q/pkOhb65dQR/pqCyK0cOaHz4Q==", + "license": "MIT" + }, "node_modules/merge2": { "version": "1.4.1", "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", @@ -6152,6 +6173,38 @@ } } }, + "node_modules/react-use-measure": { + "version": "2.1.7", + "resolved": "https://registry.npmjs.org/react-use-measure/-/react-use-measure-2.1.7.tgz", + "integrity": "sha512-KrvcAo13I/60HpwGO5jpW7E9DfusKyLPLvuHlUyP5zqnmAPhNc6qTRjUQrdTADl0lpPpDVU2/Gg51UlOGHXbdg==", + "license": "MIT", + "peerDependencies": { + "react": ">=16.13", + "react-dom": ">=16.13" + }, + "peerDependenciesMeta": { + "react-dom": { + "optional": true + } + } + }, + "node_modules/react-window": { + "version": "1.8.11", + "resolved": "https://registry.npmjs.org/react-window/-/react-window-1.8.11.tgz", + "integrity": "sha512-+SRbUVT2scadgFSWx+R1P754xHPEqvcfSfVX10QYg6POOz+WNgkN48pS+BtZNIMGiL1HYrSEiCkwsMS15QogEQ==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.0.0", + "memoize-one": ">=3.1.1 <6" + }, + "engines": { + "node": ">8.0.0" + }, + "peerDependencies": { + "react": "^15.0.0 || ^16.0.0 || ^17.0.0 || ^18.0.0 || ^19.0.0", + "react-dom": "^15.0.0 || ^16.0.0 || ^17.0.0 || ^18.0.0 || ^19.0.0" + } + }, "node_modules/read-cache": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/read-cache/-/read-cache-1.0.0.tgz", @@ -6246,6 +6299,12 @@ "node": ">=0.10.0" } }, + "node_modules/rfc6902": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/rfc6902/-/rfc6902-5.1.2.tgz", + "integrity": "sha512-zxcb+PWlE8PwX0tiKE6zP97THQ8/lHmeiwucRrJ3YFupWEmp25RmFSlB1dNTqjkovwqG4iq+u1gzJMBS3um8mA==", + "license": "MIT" + }, "node_modules/rimraf": { "version": "3.0.2", "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", diff --git a/frontend/package.json b/frontend/package.json index 94e31403..857d804f 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -1,7 +1,7 @@ { "name": "vibe-kanban", "private": true, - "version": "0.1.0", + "version": "0.0.55", "type": "module", "scripts": { "dev": "vite", @@ -28,15 +28,18 @@ "@sentry/react": "^9.34.0", "@sentry/vite-plugin": "^3.5.0", "@tailwindcss/typography": "^0.5.16", + "@types/react-window": "^1.8.8", "class-variance-authority": "^0.7.0", "click-to-react-component": "^1.1.2", "clsx": "^2.0.0", - "fast-json-patch": "^3.1.1", "lucide-react": "^0.303.0", "react": "^18.2.0", "react-dom": "^18.2.0", "react-markdown": "^10.1.0", "react-router-dom": "^6.8.1", + "react-use-measure": "^2.1.7", + "react-window": "^1.8.11", + "rfc6902": "^5.1.2", "tailwind-merge": "^2.2.0", "tailwindcss-animate": "^1.0.7" }, @@ -58,4 +61,4 @@ "typescript": "^5.2.2", "vite": "^5.0.8" } -} +} \ No newline at end of file diff --git a/frontend/src/App.tsx b/frontend/src/App.tsx index 2650b3de..3d642d18 100644 --- a/frontend/src/App.tsx +++ b/frontend/src/App.tsx @@ -11,7 +11,8 @@ import { OnboardingDialog } from '@/components/OnboardingDialog'; import { PrivacyOptInDialog } from '@/components/PrivacyOptInDialog'; import { ConfigProvider, useConfig } from '@/components/config-provider'; import { ThemeProvider } from '@/components/theme-provider'; -import type { EditorType, ExecutorConfig } from 'shared/types'; +import type { EditorType } from 'shared/types'; +import { ThemeMode } from 'shared/types'; import { configApi } from '@/lib/api'; import * as Sentry from '@sentry/react'; import { Loader } from '@/components/ui/loader'; @@ -58,7 +59,7 @@ function AppContent() { }; const handleOnboardingComplete = async (onboardingConfig: { - executor: ExecutorConfig; + profile: string; editor: { editor_type: EditorType; custom_command: string | null }; }) => { if (!config) return; @@ -66,7 +67,7 @@ function AppContent() { const updatedConfig = { ...config, onboarding_acknowledged: true, - executor: onboardingConfig.executor, + profile: onboardingConfig.profile, editor: onboardingConfig.editor, }; @@ -102,14 +103,14 @@ function AppContent() { const handleGitHubLoginComplete = async () => { try { // Refresh the config to get the latest GitHub authentication state - const latestConfig = await configApi.getConfig(); - updateConfig(latestConfig); + const latestUserSystem = await configApi.getConfig(); + updateConfig(latestUserSystem.config); setShowGitHubLogin(false); // If user skipped (no GitHub token), we need to manually set the acknowledgment const updatedConfig = { - ...latestConfig, + ...latestUserSystem.config, github_login_acknowledged: true, }; updateConfig(updatedConfig); @@ -132,7 +133,7 @@ function AppContent() { } return ( - +
(null); - const [deviceState, setDeviceState] = useState( - null - ); + const [deviceState, setDeviceState] = + useState(null); const [polling, setPolling] = useState(false); const [copied, setCopied] = useState(false); const isAuthenticated = - !!(config?.github?.username && config?.github?.token) && + !!(config?.github?.username && config?.github?.oauth_token) && !githubTokenInvalid; const handleLogin = async () => { @@ -53,21 +52,26 @@ export function GitHubLoginDialog({ // Poll for completion useEffect(() => { - let timer: number; + let timer: ReturnType | null = null; if (polling && deviceState) { const poll = async () => { try { - await githubAuthApi.poll(deviceState.device_code); - setPolling(false); - setDeviceState(null); - setError(null); - onOpenChange(false); + const poll_status = await githubAuthApi.poll(); + switch (poll_status) { + case DevicePollStatus.SUCCESS: + setPolling(false); + setDeviceState(null); + setError(null); + onOpenChange(false); + break; + case DevicePollStatus.AUTHORIZATION_PENDING: + timer = setTimeout(poll, deviceState.interval * 1000); + break; + case DevicePollStatus.SLOW_DOWN: + timer = setTimeout(poll, (deviceState.interval + 5) * 1000); + } } catch (e: any) { - if (e?.message === 'authorization_pending') { - timer = setTimeout(poll, (deviceState.interval || 5) * 1000); - } else if (e?.message === 'slow_down') { - timer = setTimeout(poll, (deviceState.interval + 5) * 1000); - } else if (e?.message === 'expired_token') { + if (e?.message === 'expired_token') { setPolling(false); setError('Device code expired. Please try again.'); setDeviceState(null); diff --git a/frontend/src/components/tasks/TaskDetails/DiffCard.tsx b/frontend/src/components/NormalizedConversation/DiffCard.tsx similarity index 100% rename from frontend/src/components/tasks/TaskDetails/DiffCard.tsx rename to frontend/src/components/NormalizedConversation/DiffCard.tsx diff --git a/frontend/src/components/tasks/TaskDetails/DisplayConversationEntry.tsx b/frontend/src/components/NormalizedConversation/DisplayConversationEntry.tsx similarity index 66% rename from frontend/src/components/tasks/TaskDetails/DisplayConversationEntry.tsx rename to frontend/src/components/NormalizedConversation/DisplayConversationEntry.tsx index 96e7af2c..29ff03ff 100644 --- a/frontend/src/components/tasks/TaskDetails/DisplayConversationEntry.tsx +++ b/frontend/src/components/NormalizedConversation/DisplayConversationEntry.tsx @@ -1,5 +1,4 @@ -import { useContext, useMemo, useState } from 'react'; -import { DiffCard } from './DiffCard'; +import { useState } from 'react'; import MarkdownRenderer from '@/components/ui/markdown-renderer.tsx'; import { AlertCircle, @@ -17,12 +16,7 @@ import { Terminal, User, } from 'lucide-react'; -import { - NormalizedEntry, - type NormalizedEntryType, - type WorktreeDiff, -} from 'shared/types.ts'; -import { TaskDiffContext } from '@/components/context/taskDetailsContext.ts'; +import { NormalizedEntry, type NormalizedEntryType } from 'shared/types.ts'; type Props = { entry: NormalizedEntry; @@ -123,116 +117,6 @@ const getContentClassName = (entryType: NormalizedEntryType) => { return baseClasses; }; -// Parse file path from content (handles various formats) -const parseFilePathFromContent = (content: string): string | null => { - // Try to extract path from backticks: `path/to/file.ext` - const backtickMatch = content.match(/`([^`]+)`/); - if (backtickMatch) { - return backtickMatch[1]; - } - - // Try to extract from common patterns like "Edit file: path" or "Write file: path" - const actionMatch = content.match( - /(?:Edit|Write|Create)\s+file:\s*([^\s\n]+)/i - ); - if (actionMatch) { - return actionMatch[1]; - } - - return null; -}; - -// Helper function to determine if a tool call modifies files -const isFileModificationToolCall = ( - entryType: NormalizedEntryType -): boolean => { - if (entryType.type !== 'tool_use') { - return false; - } - - // Check for direct file write action - if (entryType.action_type.action === 'file_write') { - return true; - } - - // Check for "other" actions that are file modification tools - if (entryType.action_type.action === 'other') { - const fileModificationTools = [ - 'edit', - 'write', - 'create_file', - 'multiedit', - 'edit_file', - ]; - return fileModificationTools.includes( - entryType.tool_name?.toLowerCase() || '' - ); - } - - return false; -}; - -// Extract file path from tool call -const extractFilePathFromToolCall = (entry: NormalizedEntry): string | null => { - if (entry.entry_type.type !== 'tool_use') { - return null; - } - - const { action_type, tool_name } = entry.entry_type; - - // Direct path extraction from action_type - if (action_type.action === 'file_write') { - return action_type.path || null; - } - - // For "other" actions, check if it's a known file modification tool - if (action_type.action === 'other') { - const fileModificationTools = [ - 'edit', - 'write', - 'create_file', - 'multiedit', - 'edit_file', - ]; - - if (fileModificationTools.includes(tool_name.toLowerCase())) { - // Parse file path from content field - return parseFilePathFromContent(entry.content); - } - } - - return null; -}; - -// Create filtered diff showing only specific files -const createIncrementalDiff = ( - fullDiff: WorktreeDiff | null, - targetFilePaths: string[] -): WorktreeDiff | null => { - if (!fullDiff || targetFilePaths.length === 0) { - return null; - } - - // Filter files to only include the target file paths - const filteredFiles = fullDiff.files.filter((file) => - targetFilePaths.some( - (targetPath) => - file.path === targetPath || - file.path.endsWith('/' + targetPath) || - targetPath.endsWith('/' + file.path) - ) - ); - - if (filteredFiles.length === 0) { - return null; - } - - return { - ...fullDiff, - files: filteredFiles, - }; -}; - // Helper function to determine if content should be rendered as markdown const shouldRenderMarkdown = (entryType: NormalizedEntryType) => { // Render markdown for assistant messages, plan presentations, and tool outputs that contain backticks @@ -266,8 +150,7 @@ const shouldRenderMarkdown = (entryType: NormalizedEntryType) => { ); }; -function DisplayConversationEntry({ entry, index, diffDeletable }: Props) { - const { diff } = useContext(TaskDiffContext); +function DisplayConversationEntry({ entry, index }: Props) { const [expandedErrors, setExpandedErrors] = useState>(new Set()); const toggleErrorExpansion = (index: number) => { @@ -285,32 +168,9 @@ function DisplayConversationEntry({ entry, index, diffDeletable }: Props) { const isErrorMessage = entry.entry_type.type === 'error_message'; const isExpanded = expandedErrors.has(index); const hasMultipleLines = isErrorMessage && entry.content.includes('\n'); - const isFileModification = useMemo( - () => isFileModificationToolCall(entry.entry_type), - [entry.entry_type] - ); - - // Extract file path from this specific tool call - const modifiedFilePath = useMemo( - () => (isFileModification ? extractFilePathFromToolCall(entry) : null), - [isFileModification, entry] - ); - - // Create incremental diff showing only the files modified by this specific tool call - const incrementalDiff = useMemo( - () => - modifiedFilePath && diff - ? createIncrementalDiff(diff, [modifiedFilePath]) - : null, - [modifiedFilePath, diff] - ); - - // Show incremental diff for this specific file modification - const shouldShowDiff = - isFileModification && incrementalDiff && incrementalDiff.files.length > 0; return ( -
+
{isErrorMessage && hasMultipleLines ? ( @@ -374,17 +234,6 @@ function DisplayConversationEntry({ entry, index, diffDeletable }: Props) { )}
- - {/* Render incremental diff card inline after file modification entries */} - {shouldShowDiff && incrementalDiff && ( -
- -
- )}
); } diff --git a/frontend/src/components/OnboardingDialog.tsx b/frontend/src/components/OnboardingDialog.tsx index 2089030a..10affc1b 100644 --- a/frontend/src/components/OnboardingDialog.tsx +++ b/frontend/src/components/OnboardingDialog.tsx @@ -19,40 +19,40 @@ import { Label } from '@/components/ui/label'; import { Input } from '@/components/ui/input'; import { Card, CardContent, CardHeader, CardTitle } from '@/components/ui/card'; import { Sparkles, Code } from 'lucide-react'; -import type { EditorType, ExecutorConfig } from 'shared/types'; -import { - EXECUTOR_TYPES, - EDITOR_TYPES, - EXECUTOR_LABELS, - EDITOR_LABELS, -} from 'shared/types'; +import { EditorType } from 'shared/types'; +import { useUserSystem } from '@/components/config-provider'; + +import { toPrettyCase } from '@/utils/string'; interface OnboardingDialogProps { open: boolean; onComplete: (config: { - executor: ExecutorConfig; + profile: string; editor: { editor_type: EditorType; custom_command: string | null }; }) => void; } export function OnboardingDialog({ open, onComplete }: OnboardingDialogProps) { - const [executor, setExecutor] = useState({ type: 'claude' }); - const [editorType, setEditorType] = useState('vscode'); + const [profile, setProfile] = useState('claude-code'); + const [editorType, setEditorType] = useState(EditorType.VS_CODE); const [customCommand, setCustomCommand] = useState(''); + const { profiles } = useUserSystem(); + const handleComplete = () => { onComplete({ - executor, + profile, editor: { editor_type: editorType, - custom_command: editorType === 'custom' ? customCommand || null : null, + custom_command: + editorType === EditorType.CUSTOM ? customCommand || null : null, }, }); }; const isValid = - editorType !== 'custom' || - (editorType === 'custom' && customCommand.trim() !== ''); + editorType !== EditorType.CUSTOM || + (editorType === EditorType.CUSTOM && customCommand.trim() !== ''); return ( {}}> @@ -78,33 +78,22 @@ export function OnboardingDialog({ open, onComplete }: OnboardingDialogProps) {
- + -

- {executor.type === 'claude' && 'Claude Code from Anthropic'} - {executor.type === 'amp' && 'From Sourcegraph'} - {executor.type === 'gemini' && 'Google Gemini from Bloop'} - {executor.type === 'charm-opencode' && - 'Charm/Opencode AI assistant'} - {executor.type === 'claude-code-router' && - 'Claude Code Router'} - {executor.type === 'echo' && - 'This is just for debugging vibe-kanban itself'} -

@@ -127,9 +116,9 @@ export function OnboardingDialog({ open, onComplete }: OnboardingDialogProps) { - {EDITOR_TYPES.map((type) => ( + {Object.values(EditorType).map((type) => ( - {EDITOR_LABELS[type]} + {toPrettyCase(type)} ))} @@ -140,7 +129,7 @@ export function OnboardingDialog({ open, onComplete }: OnboardingDialogProps) {

- {editorType === 'custom' && ( + {editorType === EditorType.CUSTOM && (
{ onComplete(true); diff --git a/frontend/src/components/config-provider.tsx b/frontend/src/components/config-provider.tsx index 72a9f627..c91d9e24 100644 --- a/frontend/src/components/config-provider.tsx +++ b/frontend/src/components/config-provider.tsx @@ -4,44 +4,76 @@ import { useCallback, useContext, useEffect, + useMemo, useState, } from 'react'; -import type { Config } from 'shared/types'; +import { + type Config, + type Environment, + type AgentProfile, + type UserSystemInfo, + CheckTokenResponse, +} from 'shared/types'; import { configApi, githubAuthApi } from '../lib/api'; -interface ConfigContextType { +interface UserSystemState { + config: Config | null; + environment: Environment | null; + profiles: AgentProfile[] | null; +} + +interface UserSystemContextType { + // Full system state + system: UserSystemState; + + // Hot path - config helpers (most frequently used) config: Config | null; updateConfig: (updates: Partial) => void; - updateAndSaveConfig: (updates: Partial) => void; + updateAndSaveConfig: (updates: Partial) => Promise; saveConfig: () => Promise; + + // System data access + environment: Environment | null; + profiles: AgentProfile[] | null; + setEnvironment: (env: Environment | null) => void; + setProfiles: (profiles: AgentProfile[] | null) => void; + + // State loading: boolean; githubTokenInvalid: boolean; } -const ConfigContext = createContext(undefined); +const UserSystemContext = createContext( + undefined +); -interface ConfigProviderProps { +interface UserSystemProviderProps { children: ReactNode; } -export function ConfigProvider({ children }: ConfigProviderProps) { +export function UserSystemProvider({ children }: UserSystemProviderProps) { + // Split state for performance - independent re-renders const [config, setConfig] = useState(null); + const [environment, setEnvironment] = useState(null); + const [profiles, setProfiles] = useState(null); const [loading, setLoading] = useState(true); const [githubTokenInvalid, setGithubTokenInvalid] = useState(false); useEffect(() => { - const loadConfig = async () => { + const loadUserSystem = async () => { try { - const config = await configApi.getConfig(); - setConfig(config); + const userSystemInfo: UserSystemInfo = await configApi.getConfig(); + setConfig(userSystemInfo.config); + setEnvironment(userSystemInfo.environment); + setProfiles(userSystemInfo.profiles); } catch (err) { - console.error('Error loading config:', err); + console.error('Error loading user system:', err); } finally { setLoading(false); } }; - loadConfig(); + loadUserSystem(); }, []); // Check GitHub token validity after config loads @@ -53,7 +85,14 @@ export function ConfigProvider({ children }: ConfigProviderProps) { // Network/server error: do not update githubTokenInvalid return; } - setGithubTokenInvalid(!valid); + switch (valid) { + case CheckTokenResponse.VALID: + setGithubTokenInvalid(false); + break; + case CheckTokenResponse.INVALID: + setGithubTokenInvalid(true); + break; + } }; checkToken(); }, [loading]); @@ -74,7 +113,7 @@ export function ConfigProvider({ children }: ConfigProviderProps) { }, [config]); const updateAndSaveConfig = useCallback( - async (updates: Partial) => { + async (updates: Partial): Promise => { setLoading(true); const newConfig: Config | null = config ? { ...config, ...updates } @@ -94,26 +133,69 @@ export function ConfigProvider({ children }: ConfigProviderProps) { [config] ); + // Memoize context value to prevent unnecessary re-renders + const value = useMemo( + () => ({ + system: { config, environment, profiles }, + config, + environment, + profiles, + updateConfig, + saveConfig, + updateAndSaveConfig, + setEnvironment, + setProfiles, + loading, + githubTokenInvalid, + }), + [ + config, + environment, + profiles, + updateConfig, + saveConfig, + updateAndSaveConfig, + loading, + githubTokenInvalid, + ] + ); + return ( - + {children} - + ); } -export function useConfig() { - const context = useContext(ConfigContext); +export function useUserSystem() { + const context = useContext(UserSystemContext); if (context === undefined) { - throw new Error('useConfig must be used within a ConfigProvider'); + throw new Error('useUserSystem must be used within a UserSystemProvider'); } return context; } + +// TODO: delete +// Backward compatibility hook - maintains existing API +export function useConfig() { + const { + config, + updateConfig, + saveConfig, + updateAndSaveConfig, + loading, + githubTokenInvalid, + } = useUserSystem(); + return { + config, + updateConfig, + saveConfig, + updateAndSaveConfig, + loading, + githubTokenInvalid, + }; +} + +// TODO: delete +// Backward compatibility export - allows gradual migration +export const ConfigProvider = UserSystemProvider; diff --git a/frontend/src/components/context/TaskDetailsContextProvider.tsx b/frontend/src/components/context/TaskDetailsContextProvider.tsx index f8f2b8b3..be584b85 100644 --- a/frontend/src/components/context/TaskDetailsContextProvider.tsx +++ b/frontend/src/components/context/TaskDetailsContextProvider.tsx @@ -6,34 +6,23 @@ import { useCallback, useEffect, useMemo, - useRef, useState, } from 'react'; +import type { ExecutionProcess, ExecutionProcessSummary } from 'shared/types'; import type { EditorType, - ExecutionProcess, - ExecutionProcessSummary, - Task, TaskAttempt, - TaskAttemptState, TaskWithAttemptStatus, - WorktreeDiff, -} from 'shared/types.ts'; -import { attemptsApi, executionProcessesApi, tasksApi } from '@/lib/api.ts'; +} from 'shared/types'; +import { attemptsApi, executionProcessesApi } from '@/lib/api.ts'; import { TaskAttemptDataContext, TaskAttemptLoadingContext, TaskAttemptStoppingContext, - TaskBackgroundRefreshContext, TaskDeletingFilesContext, TaskDetailsContext, - TaskDiffContext, - TaskExecutionStateContext, - TaskRelatedTasksContext, TaskSelectedAttemptContext, } from './taskDetailsContext.ts'; -import { TaskPlanContext } from './TaskPlanContext.ts'; -import { is_planning_executor_type } from '@/lib/utils.ts'; import type { AttemptData } from '@/lib/types.ts'; const TaskDetailsProvider: FC<{ @@ -57,145 +46,17 @@ const TaskDetailsProvider: FC<{ const [deletingFiles, setDeletingFiles] = useState>(new Set()); const [fileToDelete, setFileToDelete] = useState(null); - // Diff-related state - const [diff, setDiff] = useState(null); - const [diffLoading, setDiffLoading] = useState(true); - const [diffError, setDiffError] = useState(null); - const [isBackgroundRefreshing, setIsBackgroundRefreshing] = useState(false); - - // Related tasks state - const [relatedTasks, setRelatedTasks] = useState(null); - const [relatedTasksLoading, setRelatedTasksLoading] = useState(true); - const [relatedTasksError, setRelatedTasksError] = useState( - null - ); - - const [executionState, setExecutionState] = useState( - null - ); - const [attemptData, setAttemptData] = useState({ processes: [], runningProcessDetails: {}, - allLogs: [], // new field for all logs }); - const relatedTasksLoadingRef = useRef(false); - - const fetchRelatedTasks = useCallback(async () => { - if (!projectId || !task?.id || !selectedAttempt?.id) { - setRelatedTasks(null); - setRelatedTasksLoading(false); - return; - } - - // Prevent multiple concurrent requests - if (relatedTasksLoadingRef.current) { - return; - } - - relatedTasksLoadingRef.current = true; - setRelatedTasksLoading(true); - setRelatedTasksError(null); - - try { - const children = await tasksApi.getChildren( - projectId, - task.id, - selectedAttempt.id - ); - setRelatedTasks(children); - } catch (err) { - console.error('Failed to load related tasks:', err); - setRelatedTasksError('Failed to load related tasks'); - } finally { - relatedTasksLoadingRef.current = false; - setRelatedTasksLoading(false); - } - }, [projectId, task?.id, selectedAttempt?.id]); - - const fetchDiff = useCallback( - async (isBackgroundRefresh = false) => { - if (!projectId || !selectedAttempt?.id || !selectedAttempt?.task_id) { - setDiff(null); - setDiffLoading(false); - return; - } - - if (isBackgroundRefresh) { - setIsBackgroundRefreshing(true); - } else { - setDiffLoading(true); - } - setDiffError(null); - - try { - const result = await attemptsApi.getDiff( - projectId, - selectedAttempt.task_id, - selectedAttempt.id - ); - - if (result !== undefined) { - setDiff(result); - } - } catch (err) { - console.error('Failed to load diff:', err); - setDiffError('Failed to load diff'); - } finally { - if (isBackgroundRefresh) { - setIsBackgroundRefreshing(false); - } else { - setDiffLoading(false); - } - } - }, - [projectId, selectedAttempt?.id, selectedAttempt?.task_id] - ); - - useEffect(() => { - if (selectedAttempt && task) { - fetchRelatedTasks(); - } else if (task && !selectedAttempt) { - // If we have a task but no selectedAttempt, wait a bit then clear loading state - // This happens when a task has no attempts yet - const timeout = setTimeout(() => { - setRelatedTasks(null); - setRelatedTasksLoading(false); - }, 1000); // Wait 1 second for attempts to load - - return () => clearTimeout(timeout); - } - }, [selectedAttempt, task, fetchRelatedTasks]); - - const fetchExecutionState = useCallback( - async (attemptId: string, taskId: string) => { - if (!task) return; - - try { - const result = await attemptsApi.getState(projectId, taskId, attemptId); - - if (result !== undefined) { - setExecutionState((prev) => { - if (JSON.stringify(prev) === JSON.stringify(result)) return prev; - return result; - }); - } - } catch (err) { - console.error('Failed to fetch execution state:', err); - } - }, - [task, projectId] - ); - const handleOpenInEditor = useCallback( async (editorType?: EditorType) => { if (!task || !selectedAttempt) return; try { const result = await attemptsApi.openEditor( - projectId, - selectedAttempt.task_id, selectedAttempt.id, editorType ); @@ -214,16 +75,14 @@ const TaskDetailsProvider: FC<{ ); const fetchAttemptData = useCallback( - async (attemptId: string, taskId: string) => { + async (attemptId: string) => { if (!task) return; try { - const [processesResult, allLogsResult] = await Promise.all([ - attemptsApi.getExecutionProcesses(projectId, taskId, attemptId), - attemptsApi.getAllLogs(projectId, taskId, attemptId), - ]); + const processesResult = + await executionProcessesApi.getExecutionProcesses(attemptId); - if (processesResult !== undefined && allLogsResult !== undefined) { + if (processesResult !== undefined) { const runningProcesses = processesResult.filter( (process) => process.status === 'running' ); @@ -241,7 +100,7 @@ const TaskDetailsProvider: FC<{ // Also fetch setup script process details if it exists in the processes const setupProcess = processesResult.find( - (process) => process.process_type === 'setupscript' + (process) => process.run_reason === 'setupscript' ); if (setupProcess && !runningProcessDetails[setupProcess.id]) { const result = await executionProcessesApi.getDetails( @@ -257,7 +116,6 @@ const TaskDetailsProvider: FC<{ const newData = { processes: processesResult, runningProcessDetails, - allLogs: allLogsResult, }; if (JSON.stringify(prev) === JSON.stringify(newData)) return prev; return newData; @@ -272,10 +130,9 @@ const TaskDetailsProvider: FC<{ useEffect(() => { if (selectedAttempt && task) { - fetchAttemptData(selectedAttempt.id, selectedAttempt.task_id); - fetchExecutionState(selectedAttempt.id, selectedAttempt.task_id); + fetchAttemptData(selectedAttempt.id); } - }, [selectedAttempt, task, fetchAttemptData, fetchExecutionState]); + }, [selectedAttempt, task, fetchAttemptData]); const isAttemptRunning = useMemo(() => { if (!selectedAttempt || isStopping) { @@ -284,9 +141,9 @@ const TaskDetailsProvider: FC<{ return attemptData.processes.some( (process: ExecutionProcessSummary) => - (process.process_type === 'codingagent' || - process.process_type === 'setupscript' || - process.process_type === 'cleanupscript') && + (process.run_reason === 'codingagent' || + process.run_reason === 'setupscript' || + process.run_reason === 'cleanupscript') && process.status === 'running' ); }, [selectedAttempt, attemptData.processes, isStopping]); @@ -296,53 +153,12 @@ const TaskDetailsProvider: FC<{ const interval = setInterval(() => { if (selectedAttempt) { - fetchAttemptData(selectedAttempt.id, selectedAttempt.task_id); - fetchExecutionState(selectedAttempt.id, selectedAttempt.task_id); + fetchAttemptData(selectedAttempt.id); } }, 5000); return () => clearInterval(interval); - }, [ - isAttemptRunning, - task, - selectedAttempt, - fetchAttemptData, - fetchExecutionState, - ]); - - // Refresh diff when coding agent is running and making changes - useEffect(() => { - if (!executionState || !selectedAttempt) return; - - const isCodingAgentRunning = - executionState.execution_state === 'CodingAgentRunning'; - - if (isCodingAgentRunning) { - // Immediately refresh diff when coding agent starts running - fetchDiff(true); - - // Then refresh diff every 2 seconds while coding agent is active - const interval = setInterval(() => { - fetchDiff(true); - }, 2000); - - return () => { - clearInterval(interval); - }; - } - }, [executionState, selectedAttempt, fetchDiff]); - - // Refresh diff when coding agent completes or changes state - useEffect(() => { - if (!executionState?.execution_state || !selectedAttempt) return; - - fetchDiff(); - }, [ - executionState?.execution_state, - executionState?.has_changes, - selectedAttempt, - fetchDiff, - ]); + }, [isAttemptRunning, task, selectedAttempt, fetchAttemptData]); const value = useMemo( () => ({ @@ -379,26 +195,6 @@ const TaskDetailsProvider: FC<{ [deletingFiles, fileToDelete] ); - const diffValue = useMemo( - () => ({ - setDiffError, - fetchDiff, - diff, - diffError, - diffLoading, - setDiff, - setDiffLoading, - }), - [fetchDiff, diff, diffError, diffLoading] - ); - - const backgroundRefreshingValue = useMemo( - () => ({ - isBackgroundRefreshing, - }), - [isBackgroundRefreshing] - ); - const attemptDataValue = useMemo( () => ({ attemptData, @@ -409,110 +205,15 @@ const TaskDetailsProvider: FC<{ [attemptData, fetchAttemptData, isAttemptRunning] ); - const executionStateValue = useMemo( - () => ({ - executionState, - fetchExecutionState, - }), - [executionState, fetchExecutionState] - ); - - const relatedTasksValue = useMemo( - () => ({ - relatedTasks, - setRelatedTasks, - relatedTasksLoading, - setRelatedTasksLoading, - relatedTasksError, - setRelatedTasksError, - fetchRelatedTasks, - totalRelatedCount: - (task?.parent_task_attempt ? 1 : 0) + (relatedTasks?.length || 0), - }), - [ - relatedTasks, - relatedTasksLoading, - relatedTasksError, - fetchRelatedTasks, - task?.parent_task_attempt, - ] - ); - - // Plan context value - const planValue = useMemo(() => { - const isPlanningMode = - attemptData.processes?.some( - (process) => - process.executor_type && - is_planning_executor_type(process.executor_type) - ) ?? false; - - const planCount = - attemptData.allLogs?.reduce((count, processLog) => { - const planEntries = - processLog.normalized_conversation?.entries.filter( - (entry) => - entry.entry_type.type === 'tool_use' && - entry.entry_type.action_type.action === 'plan_presentation' - ) ?? []; - return count + planEntries.length; - }, 0) ?? 0; - - const hasPlans = planCount > 0; - - const latestProcessHasNoPlan = (() => { - if (!attemptData.allLogs || attemptData.allLogs.length === 0) - return false; - const latestProcessLog = - attemptData.allLogs[attemptData.allLogs.length - 1]; - if (!latestProcessLog.normalized_conversation?.entries) return true; - - return !latestProcessLog.normalized_conversation.entries.some( - (entry) => - entry.entry_type.type === 'tool_use' && - entry.entry_type.action_type.action === 'plan_presentation' - ); - })(); - - // Can create task if not in planning mode, or if in planning mode and has plans - const canCreateTask = - !isPlanningMode || - (isPlanningMode && hasPlans && !latestProcessHasNoPlan); - - return { - isPlanningMode, - hasPlans, - planCount, - latestProcessHasNoPlan, - canCreateTask, - }; - }, [attemptData.processes, attemptData.allLogs]); - return ( - - - - - - - {children} - - - - - - + + {children} + diff --git a/frontend/src/components/context/TaskPlanContext.ts b/frontend/src/components/context/TaskPlanContext.ts deleted file mode 100644 index 3d6b37af..00000000 --- a/frontend/src/components/context/TaskPlanContext.ts +++ /dev/null @@ -1,33 +0,0 @@ -import { createContext, useContext } from 'react'; - -interface TaskPlanContextValue { - isPlanningMode: boolean; - hasPlans: boolean; - planCount: number; - latestProcessHasNoPlan: boolean; - canCreateTask: boolean; -} - -export const TaskPlanContext = createContext({ - isPlanningMode: false, - hasPlans: false, - planCount: 0, - latestProcessHasNoPlan: false, - canCreateTask: true, -}); - -export const useTaskPlan = () => { - const context = useContext(TaskPlanContext); - if (!context) { - // Return defaults when used outside of TaskPlanProvider (e.g., on project-tasks page) - // In this case, we assume not in planning mode, so task creation should be allowed - return { - isPlanningMode: false, - hasPlans: false, - planCount: 0, - latestProcessHasNoPlan: false, - canCreateTask: true, - }; - } - return context; -}; diff --git a/frontend/src/components/context/taskDetailsContext.ts b/frontend/src/components/context/taskDetailsContext.ts index 6400ae53..38f05bc0 100644 --- a/frontend/src/components/context/taskDetailsContext.ts +++ b/frontend/src/components/context/taskDetailsContext.ts @@ -1,12 +1,9 @@ import { createContext, Dispatch, SetStateAction } from 'react'; import type { EditorType, - Task, TaskAttempt, - TaskAttemptState, TaskWithAttemptStatus, - WorktreeDiff, -} from 'shared/types.ts'; +} from 'shared/types'; import { AttemptData } from '@/lib/types.ts'; export interface TaskDetailsContextValue { @@ -72,20 +69,6 @@ export const TaskDeletingFilesContext = {} as TaskDeletingFilesContextValue ); -interface TaskDiffContextValue { - setDiffError: Dispatch>; - fetchDiff: (isBackgroundRefresh?: boolean) => Promise; - diff: WorktreeDiff | null; - diffError: string | null; - diffLoading: boolean; - setDiff: Dispatch>; - setDiffLoading: Dispatch>; -} - -export const TaskDiffContext = createContext( - {} as TaskDiffContextValue -); - interface TaskBackgroundRefreshContextValue { isBackgroundRefreshing: boolean; } @@ -94,32 +77,3 @@ export const TaskBackgroundRefreshContext = createContext( {} as TaskBackgroundRefreshContextValue ); - -interface TaskExecutionStateContextValue { - executionState: TaskAttemptState | null; - fetchExecutionState: ( - attemptId: string, - taskId: string - ) => Promise | void; -} - -export const TaskExecutionStateContext = - createContext( - {} as TaskExecutionStateContextValue - ); - -interface TaskRelatedTasksContextValue { - relatedTasks: Task[] | null; - setRelatedTasks: Dispatch>; - relatedTasksLoading: boolean; - setRelatedTasksLoading: Dispatch>; - relatedTasksError: string | null; - setRelatedTasksError: Dispatch>; - fetchRelatedTasks: () => Promise; - totalRelatedCount: number; -} - -export const TaskRelatedTasksContext = - createContext( - {} as TaskRelatedTasksContextValue - ); diff --git a/frontend/src/components/logo.tsx b/frontend/src/components/logo.tsx index 6aae3af0..6e630512 100644 --- a/frontend/src/components/logo.tsx +++ b/frontend/src/components/logo.tsx @@ -7,9 +7,9 @@ export function Logo({ className = '' }: { className?: string }) { useEffect(() => { const updateTheme = () => { - if (theme === 'light') { + if (theme === 'LIGHT') { setIsDark(false); - } else if (theme === 'system') { + } else if (theme === 'SYSTEM') { // System theme setIsDark(window.matchMedia('(prefers-color-scheme: dark)').matches); } else { @@ -21,7 +21,7 @@ export function Logo({ className = '' }: { className?: string }) { updateTheme(); // Listen for system theme changes when using system theme - if (theme === 'system') { + if (theme === 'SYSTEM') { const mediaQuery = window.matchMedia('(prefers-color-scheme: dark)'); mediaQuery.addEventListener('change', updateTheme); return () => mediaQuery.removeEventListener('change', updateTheme); diff --git a/frontend/src/components/logs/LogEntryRow.tsx b/frontend/src/components/logs/LogEntryRow.tsx new file mode 100644 index 00000000..a2403a57 --- /dev/null +++ b/frontend/src/components/logs/LogEntryRow.tsx @@ -0,0 +1,62 @@ +import { memo, useEffect, useRef } from 'react'; +import type { UnifiedLogEntry, ProcessStartPayload } from '@/types/logs'; +import type { NormalizedEntry } from 'shared/types'; +import StdoutEntry from './StdoutEntry'; +import StderrEntry from './StderrEntry'; +import ProcessStartCard from './ProcessStartCard'; +import DisplayConversationEntry from '@/components/NormalizedConversation/DisplayConversationEntry'; + +interface LogEntryRowProps { + entry: UnifiedLogEntry; + index: number; + style?: React.CSSProperties; + setRowHeight: (index: number, height: number) => void; +} + +function LogEntryRow({ entry, index, style, setRowHeight }: LogEntryRowProps) { + const rowRef = useRef(null); + + useEffect(() => { + if (rowRef.current) { + setRowHeight(index, rowRef.current.clientHeight); + } + }, [rowRef]); + + const content = ( +
+ {(() => { + switch (entry.channel) { + case 'stdout': + return ; + case 'stderr': + return ; + case 'normalized': + return ( + + ); + case 'process_start': + return ( + + ); + default: + return ( +
+ Unknown log type: {entry.channel} +
+ ); + } + })()} +
+ ); + + return style ?
{content}
: content; +} + +// Memoize to optimize react-window performance +export default memo(LogEntryRow); diff --git a/frontend/src/components/logs/ProcessStartCard.tsx b/frontend/src/components/logs/ProcessStartCard.tsx new file mode 100644 index 00000000..c0e02d65 --- /dev/null +++ b/frontend/src/components/logs/ProcessStartCard.tsx @@ -0,0 +1,76 @@ +import { Clock, Cog, Play, Terminal, Code } from 'lucide-react'; +import type { ProcessStartPayload } from '@/types/logs'; + +interface ProcessStartCardProps { + payload: ProcessStartPayload; +} + +function ProcessStartCard({ payload }: ProcessStartCardProps) { + const getProcessIcon = (runReason: string) => { + switch (runReason) { + case 'setupscript': + return ; + case 'cleanupscript': + return ; + case 'codingagent': + return ; + case 'devserver': + return ; + default: + return ; + } + }; + + const getProcessLabel = (runReason: string) => { + switch (runReason) { + case 'setupscript': + return 'Setup Script'; + case 'cleanupscript': + return 'Cleanup Script'; + case 'codingagent': + return 'Coding Agent'; + case 'devserver': + return 'Dev Server'; + default: + return runReason; + } + }; + + const formatTime = (dateString: string) => { + return new Date(dateString).toLocaleTimeString(); + }; + + return ( +
+
+
+
+ {getProcessIcon(payload.runReason)} + + {getProcessLabel(payload.runReason)} + +
+
+ + {formatTime(payload.startedAt)} +
+
+ {payload.status} +
+
+
+
+ ); +} + +export default ProcessStartCard; diff --git a/frontend/src/components/logs/StderrEntry.tsx b/frontend/src/components/logs/StderrEntry.tsx new file mode 100644 index 00000000..aebb81c2 --- /dev/null +++ b/frontend/src/components/logs/StderrEntry.tsx @@ -0,0 +1,13 @@ +interface StderrEntryProps { + content: string; +} + +function StderrEntry({ content }: StderrEntryProps) { + return ( +
+ {content} +
+ ); +} + +export default StderrEntry; diff --git a/frontend/src/components/logs/StdoutEntry.tsx b/frontend/src/components/logs/StdoutEntry.tsx new file mode 100644 index 00000000..c934ff5d --- /dev/null +++ b/frontend/src/components/logs/StdoutEntry.tsx @@ -0,0 +1,13 @@ +interface StdoutEntryProps { + content: string; +} + +function StdoutEntry({ content }: StdoutEntryProps) { + return ( +
+ {content} +
+ ); +} + +export default StdoutEntry; diff --git a/frontend/src/components/projects/ProjectCard.tsx b/frontend/src/components/projects/ProjectCard.tsx index 9d37c816..9cfa61d4 100644 --- a/frontend/src/components/projects/ProjectCard.tsx +++ b/frontend/src/components/projects/ProjectCard.tsx @@ -22,7 +22,7 @@ import { } from 'lucide-react'; import { useNavigate } from 'react-router-dom'; import { projectsApi } from '@/lib/api.ts'; -import { Project } from 'shared/types.ts'; +import { Project } from 'shared/types'; import { useEffect, useRef } from 'react'; type Props = { diff --git a/frontend/src/components/projects/project-detail.tsx b/frontend/src/components/projects/project-detail.tsx index 20ff9f07..77a6e6e9 100644 --- a/frontend/src/components/projects/project-detail.tsx +++ b/frontend/src/components/projects/project-detail.tsx @@ -10,7 +10,7 @@ import { } from '@/components/ui/card'; import { Badge } from '@/components/ui/badge'; import { Alert, AlertDescription } from '@/components/ui/alert'; -import { ProjectWithBranch } from 'shared/types'; +import { Project } from 'shared/types'; import { ProjectForm } from './project-form'; import { projectsApi } from '@/lib/api'; import { @@ -32,7 +32,7 @@ interface ProjectDetailProps { export function ProjectDetail({ projectId, onBack }: ProjectDetailProps) { const navigate = useNavigate(); - const [project, setProject] = useState(null); + const [project, setProject] = useState(null); const [loading, setLoading] = useState(false); const [showEditForm, setShowEditForm] = useState(false); const [error, setError] = useState(''); @@ -47,7 +47,7 @@ export function ProjectDetail({ projectId, onBack }: ProjectDetailProps) { setError(''); try { - const result = await projectsApi.getWithBranch(projectId); + const result = await projectsApi.getById(projectId); setProject(result); } catch (error) { console.error('Failed to fetch project:', error); @@ -132,11 +132,6 @@ export function ProjectDetail({ projectId, onBack }: ProjectDetailProps) {

{project.name}

- {project.current_branch && ( - - {project.current_branch} - - )}

Project details and settings diff --git a/frontend/src/components/projects/project-form-fields.tsx b/frontend/src/components/projects/project-form-fields.tsx index 2d00db27..db5d7ea4 100644 --- a/frontend/src/components/projects/project-form-fields.tsx +++ b/frontend/src/components/projects/project-form-fields.tsx @@ -3,11 +3,11 @@ import { Input } from '@/components/ui/input'; import { Button } from '@/components/ui/button'; import { Alert, AlertDescription } from '@/components/ui/alert'; import { AlertCircle, Folder } from 'lucide-react'; -import { useSystemInfo } from '@/hooks/use-system-info'; import { createScriptPlaceholderStrategy, ScriptPlaceholderContext, } from '@/utils/script-placeholders'; +import { useUserSystem } from '@/components/config-provider'; interface ProjectFormFieldsProps { isEditing: boolean; @@ -52,12 +52,12 @@ export function ProjectFormFields({ setCleanupScript, error, }: ProjectFormFieldsProps) { - const { systemInfo } = useSystemInfo(); + const { system } = useUserSystem(); // Create strategy-based placeholders - const placeholders = systemInfo + const placeholders = system.environment ? new ScriptPlaceholderContext( - createScriptPlaceholderStrategy(systemInfo.os_type) + createScriptPlaceholderStrategy(system.environment.os_type) ).getPlaceholders() : { setup: '#!/bin/bash\nnpm install\n# Add any setup commands here...', diff --git a/frontend/src/components/projects/project-form.tsx b/frontend/src/components/projects/project-form.tsx index 7f377c89..ae298714 100644 --- a/frontend/src/components/projects/project-form.tsx +++ b/frontend/src/components/projects/project-form.tsx @@ -1,7 +1,5 @@ import { useEffect, useState } from 'react'; import { Button } from '@/components/ui/button'; -import { Label } from '@/components/ui/label'; -import { Loader2 } from 'lucide-react'; import { Dialog, DialogContent, @@ -14,15 +12,8 @@ import { Tabs, TabsContent, TabsList, TabsTrigger } from '@/components/ui/tabs'; import { FolderPicker } from '@/components/ui/folder-picker'; import { TaskTemplateManager } from '@/components/TaskTemplateManager'; import { ProjectFormFields } from './project-form-fields'; -import { GitHubRepositoryPicker } from './github-repository-picker'; -import { - CreateProject, - CreateProjectFromGitHub, - Project, - UpdateProject, - Environment, -} from 'shared/types'; -import { projectsApi, configApi, githubApi, RepositoryInfo } from '@/lib/api'; +import { CreateProject, Project, UpdateProject } from 'shared/types'; +import { projectsApi } from '@/lib/api'; interface ProjectFormProps { open: boolean; @@ -51,34 +42,8 @@ export function ProjectForm({ const [parentPath, setParentPath] = useState(''); const [folderName, setFolderName] = useState(''); - // Environment and GitHub repository state - const [environment, setEnvironment] = useState('local'); - const [selectedRepository, setSelectedRepository] = - useState(null); - const [modeLoading, setModeLoading] = useState(true); - const isEditing = !!project; - // Load cloud mode configuration - useEffect(() => { - const loadMode = async () => { - try { - const constants = await configApi.getConstants(); - setEnvironment(constants.mode); - } catch (err) { - console.error('Failed to load config constants:', err); - } finally { - setModeLoading(false); - } - }; - - if (!isEditing) { - loadMode(); - } else { - setModeLoading(false); - } - }, [isEditing]); - // Update form fields when project prop changes useEffect(() => { if (project) { @@ -93,7 +58,6 @@ export function ProjectForm({ setSetupScript(''); setDevScript(''); setCleanupScript(''); - setSelectedRepository(null); } }, [project]); @@ -139,44 +103,42 @@ export function ProjectForm({ await projectsApi.update(project.id, updateData); } else { // Creating new project - if (environment === 'cloud') { - // Cloud mode: Create project from GitHub repository - if (!selectedRepository) { - setError('Please select a GitHub repository'); - return; - } + // TODO: Compile time check for cloud + // if (environment === 'cloud') { + // // Cloud mode: Create project from GitHub repository + // if (!selectedRepository) { + // setError('Please select a GitHub repository'); + // return; + // } - const githubData: CreateProjectFromGitHub = { - repository_id: BigInt(selectedRepository.id), - name, - clone_url: selectedRepository.clone_url, - setup_script: setupScript.trim() || null, - dev_script: devScript.trim() || null, - cleanup_script: cleanupScript.trim() || null, - }; + // const githubData: CreateProjectFromGitHub = { + // repository_id: BigInt(selectedRepository.id), + // name, + // clone_url: selectedRepository.clone_url, + // setup_script: setupScript.trim() || null, + // dev_script: devScript.trim() || null, + // cleanup_script: cleanupScript.trim() || null, + // }; - await githubApi.createProjectFromRepository(githubData); - } else { - // Local mode: Create local project - let finalGitRepoPath = gitRepoPath; - if (repoMode === 'new') { - finalGitRepoPath = `${parentPath}/${folderName}`.replace( - /\/+/g, - '/' - ); - } - - const createData: CreateProject = { - name, - git_repo_path: finalGitRepoPath, - use_existing_repo: repoMode === 'existing', - setup_script: setupScript.trim() || null, - dev_script: devScript.trim() || null, - cleanup_script: cleanupScript.trim() || null, - }; - - await projectsApi.create(createData); + // await githubApi.createProjectFromRepository(githubData); + // } else { + // Local mode: Create local project + let finalGitRepoPath = gitRepoPath; + if (repoMode === 'new') { + finalGitRepoPath = `${parentPath}/${folderName}`.replace(/\/+/g, '/'); } + + const createData: CreateProject = { + name, + git_repo_path: finalGitRepoPath, + use_existing_repo: repoMode === 'existing', + setup_script: setupScript.trim() || null, + dev_script: devScript.trim() || null, + cleanup_script: cleanupScript.trim() || null, + }; + + await projectsApi.create(createData); + // } } onSuccess(); @@ -188,7 +150,6 @@ export function ProjectForm({ setCleanupScript(''); setParentPath(''); setFolderName(''); - setSelectedRepository(null); } catch (error) { setError(error instanceof Error ? error.message : 'An error occurred'); } finally { @@ -283,7 +244,9 @@ export function ProjectForm({ ) : (

- {modeLoading ? ( + {/* + TODO: compile time cloud check + modeLoading ? (
Loading... @@ -299,7 +262,7 @@ export function ProjectForm({ error={error} /> - {/* Show script fields for GitHub source */} +
- ) : ( - // Local mode: Show existing form - - )} + ) : (*/} + + + {/* )} */} )}
diff --git a/frontend/src/components/tasks/TaskDetails/DiffTab.tsx b/frontend/src/components/tasks/TaskDetails/DiffTab.tsx index 82dc99b1..fa721f6b 100644 --- a/frontend/src/components/tasks/TaskDetails/DiffTab.tsx +++ b/frontend/src/components/tasks/TaskDetails/DiffTab.tsx @@ -1,30 +1,60 @@ -import { DiffCard } from '@/components/tasks/TaskDetails/DiffCard.tsx'; -import { useContext } from 'react'; -import { TaskDiffContext } from '@/components/context/taskDetailsContext.ts'; -import { Loader } from '@/components/ui/loader'; +import { DiffCard } from '@/components/NormalizedConversation/DiffCard.tsx'; +import { useDiffStream } from '@/hooks/useDiffStream'; +import type { WorktreeDiff, FileDiff } from 'shared/types'; +import { useMemo, useContext } from 'react'; +import { TaskSelectedAttemptContext } from '@/components/context/taskDetailsContext.ts'; function DiffTab() { - const { diff, diffLoading, diffError } = useContext(TaskDiffContext); + const { selectedAttempt } = useContext(TaskSelectedAttemptContext); + const { diff, isConnected, error } = useDiffStream( + selectedAttempt?.id || null, + true + ); - if (diffLoading) { - return ( -
- -
- ); - } + const worktreeDiff = useMemo((): WorktreeDiff | null => { + if (!diff) return null; - if (diffError) { + return { + files: Object.values(diff.entries).map((entry: any) => { + // Handle PatchType wrapper properly + if (entry && typeof entry === 'object' && entry.type === 'FILE_DIFF') { + return entry.content as FileDiff; + } + // In case it's already unwrapped or a different format + return entry as FileDiff; + }), + }; + }, [diff]); + + if (error) { return ( -
-

{diffError}

+
+
Failed to load diff: {error}
); } return ( -
- +
+ {/* Connection status indicator */} + {selectedAttempt && ( +
+
+ {isConnected ? 'Live' : 'Disconnected'} +
+ )} + + {/* Diff content */} +
+ +
); } diff --git a/frontend/src/components/tasks/TaskDetails/LogsTab.tsx b/frontend/src/components/tasks/TaskDetails/LogsTab.tsx index e8ed9a92..e4594ca8 100644 --- a/frontend/src/components/tasks/TaskDetails/LogsTab.tsx +++ b/frontend/src/components/tasks/TaskDetails/LogsTab.tsx @@ -1,184 +1,102 @@ -import { useContext } from 'react'; -import { MessageSquare } from 'lucide-react'; -import { NormalizedConversationViewer } from '@/components/tasks/TaskDetails/LogsTab/NormalizedConversationViewer.tsx'; -import { - TaskAttemptDataContext, - TaskAttemptLoadingContext, - TaskExecutionStateContext, - TaskSelectedAttemptContext, -} from '@/components/context/taskDetailsContext.ts'; -import Conversation from '@/components/tasks/TaskDetails/LogsTab/Conversation.tsx'; -import { Loader } from '@/components/ui/loader'; -import SetupScriptRunning from '@/components/tasks/TaskDetails/LogsTab/SetupScriptRunning.tsx'; +import { useContext, useState, useRef, useEffect, useCallback } from 'react'; +import { VariableSizeList } from 'react-window'; +import { Cog } from 'lucide-react'; +import useMeasure from 'react-use-measure'; +import { TaskAttemptDataContext } from '@/components/context/taskDetailsContext.ts'; +import { useProcessesLogs } from '@/hooks/useProcessesLogs'; +import LogEntryRow from '@/components/logs/LogEntryRow'; +import type { UnifiedLogEntry } from '@/types/logs'; function LogsTab() { - const { loading } = useContext(TaskAttemptLoadingContext); - const { executionState } = useContext(TaskExecutionStateContext); - const { selectedAttempt } = useContext(TaskSelectedAttemptContext); const { attemptData } = useContext(TaskAttemptDataContext); + const [autoScroll, setAutoScroll] = useState(true); + const listRef = useRef(null); + const innerRef = useRef(null); + const [containerRef, bounds] = useMeasure(); - if (loading) { - return ( -
- -
- ); - } + const { entries } = useProcessesLogs(attemptData.processes || [], true); - // If no attempt is selected, show message - if (!selectedAttempt) { - return ( -
- -

No attempt selected

-

Select an attempt to view its logs

-
- ); - } + const rowHeights = useRef>({}); - // If no execution state, execution hasn't started yet - if (!executionState) { - return ( -
- -

- Task execution not started yet -

-

- Logs will appear here once the task execution begins -

-
- ); - } + const getRowHeight = useCallback((index: number): number => { + const h = rowHeights.current[index]; + return h !== undefined ? h : 100; + }, []); - const isSetupRunning = executionState.execution_state === 'SetupRunning'; - const isSetupComplete = executionState.execution_state === 'SetupComplete'; - const isSetupFailed = executionState.execution_state === 'SetupFailed'; - const isSetupStopped = executionState.execution_state === 'SetupStopped'; - const isCodingAgentRunning = - executionState.execution_state === 'CodingAgentRunning'; - const isCodingAgentComplete = - executionState.execution_state === 'CodingAgentComplete'; - const isCodingAgentFailed = - executionState.execution_state === 'CodingAgentFailed'; - const isCodingAgentStopped = - executionState.execution_state === 'CodingAgentStopped'; - const isComplete = executionState.execution_state === 'Complete'; - const hasChanges = executionState.has_changes; + const setRowHeight = useCallback((index: number, size: number) => { + listRef.current?.resetAfterIndex(0); + rowHeights.current = { ...rowHeights.current, [index]: size }; + }, []); - // When setup script is running, show setup execution stdio - if (isSetupRunning) { - return ( - - ); - } - - // When setup failed or was stopped - if (isSetupFailed || isSetupStopped) { - let setupProcess = executionState.setup_process_id - ? attemptData.runningProcessDetails[executionState.setup_process_id] - : Object.values(attemptData.runningProcessDetails).find( - (process) => process.process_type === 'setupscript' - ); - - // If not found in runningProcessDetails, try to find in processes array - if (!setupProcess) { - const setupSummary = attemptData.processes.find( - (process) => process.process_type === 'setupscript' - ); - - if (setupSummary) { - setupProcess = Object.values(attemptData.runningProcessDetails).find( - (process) => process.id === setupSummary.id - ); - - if (!setupProcess) { - setupProcess = { - ...setupSummary, - stdout: null, - stderr: null, - } as any; - } - } + // Auto-scroll to bottom when new entries arrive + useEffect(() => { + if (autoScroll && entries.length > 0 && listRef.current) { + listRef.current.scrollToItem(entries.length - 1, 'end'); } + }, [entries.length, autoScroll]); + // Handle scroll events to detect user scrolling + const onScroll = useCallback( + ({ scrollOffset, scrollUpdateWasRequested }: any) => { + if (!scrollUpdateWasRequested && bounds.height) { + const atBottom = innerRef.current + ? innerRef.current.offsetHeight - scrollOffset - bounds.height < 20 + : false; + setAutoScroll(atBottom); + } + }, + [bounds.height] + ); + + if (!attemptData.processes || attemptData.processes.length === 0) { return ( -
-
-

- {isSetupFailed ? 'Setup Script Failed' : 'Setup Script Stopped'} -

- {isSetupFailed && ( -

- The setup script encountered an error. Error details below: -

- )} +
+
+ +

No execution processes found for this attempt.

- - {setupProcess && ( - - )}
); } - // When coding agent is in any state (running, complete, failed, stopped) - if ( - isCodingAgentRunning || - isCodingAgentComplete || - isCodingAgentFailed || - isCodingAgentStopped || - hasChanges - ) { - return ; - } - - // When setup is complete but coding agent hasn't started, show waiting state - if ( - isSetupComplete && - !isCodingAgentRunning && - !isCodingAgentComplete && - !isCodingAgentFailed && - !isCodingAgentStopped && - !hasChanges - ) { - return ( -
- -

Setup Complete

-

Waiting for coding agent to start...

-
- ); - } - - // When task is complete, show completion message - if (isComplete) { - return ( -
- -

Task Complete

-

- The task has been completed successfully. -

-
- ); - } - - // When coding agent is running or complete, show conversation - if (isCodingAgentRunning || isCodingAgentComplete || hasChanges) { - return ; - } - - // Default case - unexpected state return ( -
- -

Unknown execution state

+
+ {bounds.height && bounds.width && ( + + {({ + index, + style, + data, + }: { + index: number; + style: React.CSSProperties; + data: UnifiedLogEntry[]; + }) => { + const style_with_padding = { ...style }; + if (index === entries.length - 1) { + style_with_padding.paddingBottom = '50px'; + } + + return ( + + ); + }} + + )}
); } diff --git a/frontend/src/components/tasks/TaskDetails/LogsTab/Conversation.tsx b/frontend/src/components/tasks/TaskDetails/LogsTab/Conversation.tsx deleted file mode 100644 index 264a14d0..00000000 --- a/frontend/src/components/tasks/TaskDetails/LogsTab/Conversation.tsx +++ /dev/null @@ -1,276 +0,0 @@ -import { NormalizedConversationViewer } from '@/components/tasks/TaskDetails/LogsTab/NormalizedConversationViewer.tsx'; -import { - useCallback, - useContext, - useEffect, - useMemo, - useRef, - useState, -} from 'react'; -import { TaskAttemptDataContext } from '@/components/context/taskDetailsContext.ts'; -import { useTaskPlan } from '@/components/context/TaskPlanContext.ts'; -import { Loader } from '@/components/ui/loader.tsx'; -import { Button } from '@/components/ui/button'; -import { AlertTriangle } from 'lucide-react'; -import Prompt from './Prompt'; -import ConversationEntry from './ConversationEntry'; -import { ConversationEntryDisplayType } from '@/lib/types'; - -function Conversation() { - const { attemptData, isAttemptRunning } = useContext(TaskAttemptDataContext); - const { isPlanningMode, latestProcessHasNoPlan } = useTaskPlan(); - const [shouldAutoScrollLogs, setShouldAutoScrollLogs] = useState(true); - const [conversationUpdateTrigger, setConversationUpdateTrigger] = useState(0); - const [visibleCount, setVisibleCount] = useState(100); - const [visibleRunningEntriesCount, setVisibleRunningEntriesCount] = - useState(0); - - const scrollContainerRef = useRef(null); - - // Callback to trigger auto-scroll when conversation updates - const handleConversationUpdate = useCallback(() => { - setConversationUpdateTrigger((prev) => prev + 1); - }, []); - - useEffect(() => { - if (shouldAutoScrollLogs && scrollContainerRef.current) { - scrollContainerRef.current.scrollTop = - scrollContainerRef.current.scrollHeight; - } - }, [attemptData.allLogs, conversationUpdateTrigger, shouldAutoScrollLogs]); - - const handleLogsScroll = useCallback(() => { - if (scrollContainerRef.current) { - const { scrollTop, scrollHeight, clientHeight } = - scrollContainerRef.current; - const isAtBottom = scrollTop + clientHeight >= scrollHeight - 5; - - if (isAtBottom && !shouldAutoScrollLogs) { - setShouldAutoScrollLogs(true); - } else if (!isAtBottom && shouldAutoScrollLogs) { - setShouldAutoScrollLogs(false); - } - } - }, [shouldAutoScrollLogs]); - - // Find main and follow-up processes from allLogs - const mainCodingAgentLog = useMemo( - () => - attemptData.allLogs.find( - (log) => - log.process_type.toLowerCase() === 'codingagent' && - log.command === 'executor' - ), - [attemptData.allLogs] - ); - const followUpLogs = useMemo( - () => - attemptData.allLogs.filter( - (log) => - log.process_type.toLowerCase() === 'codingagent' && - log.command === 'followup_executor' - ), - [attemptData.allLogs] - ); - - // Combine all logs in order (main first, then follow-ups) - const allProcessLogs = useMemo( - () => - [mainCodingAgentLog, ...followUpLogs].filter(Boolean) as Array< - NonNullable - >, - [mainCodingAgentLog, followUpLogs] - ); - - // Flatten all entries, keeping process info for each entry - const allEntries = useMemo(() => { - const entries: Array = []; - allProcessLogs.forEach((log, processIndex) => { - if (!log) return; - if (log.status === 'running') return; // Skip static entries for running processes - const processId = String(log.id); // Ensure string - const processPrompt = log.normalized_conversation.prompt || undefined; // Ensure undefined, not null - const entriesArr = log.normalized_conversation.entries || []; - entriesArr.forEach((entry, entryIndex) => { - entries.push({ - entry, - processId, - processPrompt, - processStatus: log.status, - processIsRunning: false, // Only completed processes here - process: log, - isFirstInProcess: entryIndex === 0, - processIndex, - entryIndex, - }); - }); - }); - // Sort by timestamp (entries without timestamp go last) - entries.sort((a, b) => { - if (a.entry.timestamp && b.entry.timestamp) { - return a.entry.timestamp.localeCompare(b.entry.timestamp); - } - if (a.entry.timestamp) return -1; - if (b.entry.timestamp) return 1; - return 0; - }); - return entries; - }, [allProcessLogs]); - - // Identify running processes (main + follow-ups) - const runningProcessLogs = useMemo( - () => allProcessLogs.filter((log) => log.status === 'running'), - [allProcessLogs] - ); - - // Paginate: show only the last visibleCount entries - const visibleEntries = useMemo( - () => allEntries.slice(-(visibleCount - visibleRunningEntriesCount)), - [allEntries, visibleCount, visibleRunningEntriesCount] - ); - - const renderedVisibleEntries = useMemo( - () => - visibleEntries.map((entry, index) => ( - - )), - [ - visibleEntries, - handleConversationUpdate, - attemptData.runningProcessDetails, - ] - ); - - const renderedRunningProcessLogs = useMemo(() => { - return runningProcessLogs.map((log, i) => { - const runningProcess = attemptData.runningProcessDetails[String(log.id)]; - if (!runningProcess) return null; - // Show prompt only if this is the first entry in the process (i.e., no completed entries for this process) - const showPrompt = - log.normalized_conversation.prompt && - !allEntries.some((e) => e.processId === String(log.id)); - return ( -
0 ? 'mt-8' : ''}> - {showPrompt && ( - - )} - -
- ); - }); - }, [ - runningProcessLogs, - attemptData.runningProcessDetails, - handleConversationUpdate, - allEntries, - visibleCount, - ]); - - // Check if we should show the status banner - only if the most recent process failed/stopped - const getMostRecentProcess = () => { - if (followUpLogs.length > 0) { - // Sort by creation time or use last in array as most recent - return followUpLogs[followUpLogs.length - 1]; - } - return mainCodingAgentLog; - }; - - const mostRecentProcess = getMostRecentProcess(); - const showStatusBanner = - mostRecentProcess && - (mostRecentProcess.status === 'failed' || - mostRecentProcess.status === 'killed'); - - return ( -
- {visibleCount - visibleRunningEntriesCount < allEntries.length && ( -
- -
- )} - {visibleEntries.length > 0 && ( -
{renderedVisibleEntries}
- )} - {/* Render live viewers for running processes (after paginated list) */} - {renderedRunningProcessLogs} - {/* If nothing to show at all, show loader */} - {visibleEntries.length === 0 && runningProcessLogs.length === 0 && ( - - Coding Agent Starting -
- Initializing conversation... - - } - size={48} - className="py-8" - /> - )} - - {/* Status banner for failed/stopped states - shown at bottom */} - {showStatusBanner && mostRecentProcess && ( -
-

- {mostRecentProcess.status === 'failed' - ? 'Coding Agent Failed' - : 'Coding Agent Stopped'} -

-

- {mostRecentProcess.status === 'failed' - ? 'The coding agent encountered an error.' - : 'The coding agent was stopped.'} -

-
- )} - - {/* Warning banner for planning mode without plan */} - {isPlanningMode && latestProcessHasNoPlan && !isAttemptRunning && ( -
-
- -

- No Plan Generated -

-
-

- The last execution attempt did not produce a plan. Task creation is - disabled until a plan is available. Try providing more specific - instructions or check the conversation for any errors. -

-
- )} -
- ); -} - -export default Conversation; diff --git a/frontend/src/components/tasks/TaskDetails/LogsTab/ConversationEntry.tsx b/frontend/src/components/tasks/TaskDetails/LogsTab/ConversationEntry.tsx deleted file mode 100644 index 89e7746d..00000000 --- a/frontend/src/components/tasks/TaskDetails/LogsTab/ConversationEntry.tsx +++ /dev/null @@ -1,56 +0,0 @@ -import { ConversationEntryDisplayType } from '@/lib/types'; -import DisplayConversationEntry from '../DisplayConversationEntry'; -import { NormalizedConversationViewer } from './NormalizedConversationViewer'; -import Prompt from './Prompt'; -import { Loader } from '@/components/ui/loader.tsx'; -import { ExecutionProcess } from 'shared/types'; - -type Props = { - item: ConversationEntryDisplayType; - idx: number; - handleConversationUpdate: () => void; - visibleEntriesLength: number; - runningProcessDetails: Record; -}; - -const ConversationEntry = ({ - item, - idx, - handleConversationUpdate, - visibleEntriesLength, - runningProcessDetails, -}: Props) => { - const showPrompt = item.isFirstInProcess && item.processPrompt; - // For running processes, render the live viewer below the static entries - if (item.processIsRunning && idx === visibleEntriesLength - 1) { - // Only render the live viewer for the last entry of a running process - const runningProcess = runningProcessDetails[item.processId]; - if (runningProcess) { - return ( -
- {showPrompt && } - -
- ); - } - // Fallback: show loading if not found - return ; - } else { - return ( -
- {showPrompt && } - -
- ); - } -}; - -export default ConversationEntry; diff --git a/frontend/src/components/tasks/TaskDetails/LogsTab/NormalizedConversationViewer.tsx b/frontend/src/components/tasks/TaskDetails/LogsTab/NormalizedConversationViewer.tsx deleted file mode 100644 index 1ffcebaf..00000000 --- a/frontend/src/components/tasks/TaskDetails/LogsTab/NormalizedConversationViewer.tsx +++ /dev/null @@ -1,92 +0,0 @@ -import { Hammer } from 'lucide-react'; -import { Loader } from '@/components/ui/loader.tsx'; -import MarkdownRenderer from '@/components/ui/markdown-renderer.tsx'; -import type { ExecutionProcess, WorktreeDiff } from 'shared/types.ts'; -import DisplayConversationEntry from '@/components/tasks/TaskDetails/DisplayConversationEntry.tsx'; -import useNormalizedConversation from '@/hooks/useNormalizedConversation'; - -interface NormalizedConversationViewerProps { - executionProcess: ExecutionProcess; - onConversationUpdate?: () => void; - onDisplayEntriesChange?: (num: number) => void; - visibleEntriesNum?: number; - diff?: WorktreeDiff | null; - isBackgroundRefreshing?: boolean; - diffDeletable?: boolean; -} - -export function NormalizedConversationViewer({ - executionProcess, - diffDeletable, - onConversationUpdate, - visibleEntriesNum, - onDisplayEntriesChange, -}: NormalizedConversationViewerProps) { - const { loading, error, conversation, displayEntries } = - useNormalizedConversation({ - executionProcess, - onConversationUpdate, - onDisplayEntriesChange, - visibleEntriesNum, - }); - - if (loading) { - return ( - - ); - } - - if (error) { - return
{error}
; - } - - if (!conversation || conversation.entries.length === 0) { - // If the execution process is still running, show loading instead of "no data" - if (executionProcess.status === 'running') { - return ( -
- Waiting for logs... -
- ); - } - - return ( -
- No conversation data available -
- ); - } - - return ( -
- {/* Display prompt if available */} - {conversation.prompt && ( -
-
- -
-
-
- -
-
-
- )} - - {/* Display conversation entries */} -
- {displayEntries.map((entry, index) => ( - - ))} -
-
- ); -} diff --git a/frontend/src/components/tasks/TaskDetails/LogsTab/Prompt.tsx b/frontend/src/components/tasks/TaskDetails/LogsTab/Prompt.tsx deleted file mode 100644 index af988912..00000000 --- a/frontend/src/components/tasks/TaskDetails/LogsTab/Prompt.tsx +++ /dev/null @@ -1,22 +0,0 @@ -import MarkdownRenderer from '@/components/ui/markdown-renderer'; -import { Hammer } from 'lucide-react'; - -const Prompt = ({ prompt }: { prompt: string }) => { - return ( -
-
- -
-
-
- -
-
-
- ); -}; - -export default Prompt; diff --git a/frontend/src/components/tasks/TaskDetails/LogsTab/SetupScriptRunning.tsx b/frontend/src/components/tasks/TaskDetails/LogsTab/SetupScriptRunning.tsx deleted file mode 100644 index 4c473b76..00000000 --- a/frontend/src/components/tasks/TaskDetails/LogsTab/SetupScriptRunning.tsx +++ /dev/null @@ -1,49 +0,0 @@ -import { useEffect, useMemo, useRef } from 'react'; -import { ExecutionProcess } from 'shared/types.ts'; - -type Props = { - setupProcessId: string | null; - runningProcessDetails: Record; -}; - -function SetupScriptRunning({ setupProcessId, runningProcessDetails }: Props) { - const setupScrollRef = useRef(null); - - // Auto-scroll setup script logs to bottom - useEffect(() => { - if (setupScrollRef.current) { - setupScrollRef.current.scrollTop = setupScrollRef.current.scrollHeight; - } - }, [runningProcessDetails]); - - const setupProcess = useMemo( - () => - setupProcessId - ? runningProcessDetails[setupProcessId] - : Object.values(runningProcessDetails).find( - (process) => process.process_type === 'setupscript' - ), - [setupProcessId, runningProcessDetails] - ); - - return ( -
-
-

Setup Script Running

-

- Preparing the environment for the coding agent... -

-
- - {setupProcess && ( -
- {[setupProcess.stdout || '', setupProcess.stderr || ''] - .filter(Boolean) - .join('\n') || 'Waiting for setup script output...'} -
- )} -
- ); -} - -export default SetupScriptRunning; diff --git a/frontend/src/components/tasks/TaskDetails/PlanTab.tsx b/frontend/src/components/tasks/TaskDetails/PlanTab.tsx deleted file mode 100644 index 9017dd8a..00000000 --- a/frontend/src/components/tasks/TaskDetails/PlanTab.tsx +++ /dev/null @@ -1,258 +0,0 @@ -import { useContext, useMemo, useState } from 'react'; -import { - FileText, - Copy, - AlertTriangle, - CheckCircle, - ChevronDown, - ChevronRight, -} from 'lucide-react'; -import { - TaskAttemptDataContext, - TaskAttemptLoadingContext, -} from '@/components/context/taskDetailsContext.ts'; -import { useTaskPlan } from '@/components/context/TaskPlanContext.ts'; -import { Loader } from '@/components/ui/loader'; -import MarkdownRenderer from '@/components/ui/markdown-renderer.tsx'; -import { NormalizedEntry } from 'shared/types.ts'; - -interface PlanEntry { - entry: NormalizedEntry; - processId: string; - processIndex: number; - planIndex: number; - isCurrent: boolean; -} - -function PlanTab() { - const { loading } = useContext(TaskAttemptLoadingContext); - const { attemptData } = useContext(TaskAttemptDataContext); - const { isPlanningMode, hasPlans, latestProcessHasNoPlan } = useTaskPlan(); - const [copiedPlan, setCopiedPlan] = useState(null); - const [expandedPlans, setExpandedPlans] = useState>(new Set()); - - // Extract all plans from all processes - const plans = useMemo(() => { - if (!attemptData.allLogs) return []; - - const planEntries: PlanEntry[] = []; - let globalPlanIndex = 1; - - attemptData.allLogs.forEach((processLog, processIndex) => { - if (!processLog.normalized_conversation?.entries) return; - - let localPlanIndex = 1; - processLog.normalized_conversation.entries.forEach((entry) => { - if ( - entry.entry_type.type === 'tool_use' && - entry.entry_type.action_type.action === 'plan_presentation' - ) { - planEntries.push({ - entry, - processId: processLog.id, - processIndex, - planIndex: localPlanIndex, - isCurrent: globalPlanIndex === planEntries.length + 1, // Last plan is current - }); - localPlanIndex++; - globalPlanIndex++; - } - }); - }); - - // Mark the last plan as current - if (planEntries.length > 0) { - planEntries.forEach((plan) => { - plan.isCurrent = false; - }); - planEntries[planEntries.length - 1].isCurrent = true; - } - - return planEntries; - }, [attemptData.allLogs]); - - const handleCopyPlan = async (planContent: string, planId: string) => { - try { - await navigator.clipboard.writeText(planContent); - setCopiedPlan(planId); - setTimeout(() => setCopiedPlan(null), 2000); - } catch (error) { - console.error('Failed to copy plan:', error); - } - }; - - const togglePlanExpansion = (planId: string) => { - setExpandedPlans((prev) => { - const newSet = new Set(prev); - if (newSet.has(planId)) { - newSet.delete(planId); - } else { - newSet.add(planId); - } - return newSet; - }); - }; - - if (loading) { - return ( -
- -
- ); - } - - if (!isPlanningMode) { - return ( -
- -

Not in planning mode

-

- This tab is only available when using a planning executor -

-
- ); - } - - if (!hasPlans && latestProcessHasNoPlan) { - return ( -
-
- -

- No plan generated -

-

- The last execution attempt did not produce a plan. Task creation is - disabled until a plan is available. -

-
-
- ); - } - - if (!hasPlans) { - return ( -
- -

No plans available

-

- Plans will appear here once they are generated -

-
- ); - } - - return ( -
-
-

Plans ({plans.length})

- {latestProcessHasNoPlan && ( -
- - Last attempt produced no plan -
- )} -
- -
- {plans.map((planEntry, index) => { - const planId = `${planEntry.processId}-${planEntry.planIndex}`; - const planContent = - planEntry.entry.entry_type.type === 'tool_use' && - planEntry.entry.entry_type.action_type.action === - 'plan_presentation' - ? planEntry.entry.entry_type.action_type.plan - : planEntry.entry.content; - const isExpanded = expandedPlans.has(planId); - - return ( -
-
togglePlanExpansion(planId)} - > -
- - - Plan {index + 1} - - {planEntry.isCurrent && ( -
- - Current -
- )} - {planEntry.entry.timestamp && ( - - {new Date(planEntry.entry.timestamp).toLocaleString()} - - )} -
- -
- - {isExpanded && ( -
-
- -
-
- )} -
- ); - })} -
- - {plans.length > 1 && ( -
- Previous plans are shown with reduced emphasis. Click to - expand/collapse plans. -
- )} -
- ); -} - -export default PlanTab; diff --git a/frontend/src/components/tasks/TaskDetails/ProcessCard.tsx b/frontend/src/components/tasks/TaskDetails/ProcessCard.tsx new file mode 100644 index 00000000..8bd118df --- /dev/null +++ b/frontend/src/components/tasks/TaskDetails/ProcessCard.tsx @@ -0,0 +1,204 @@ +import { useState, useEffect, useRef } from 'react'; +import { + Play, + Square, + AlertCircle, + CheckCircle, + Clock, + ChevronDown, + ChevronRight, +} from 'lucide-react'; +import type { + ExecutionProcessStatus, + ExecutionProcessSummary, +} from 'shared/types'; +import { useLogStream } from '@/hooks/useLogStream'; +import { useProcessConversation } from '@/hooks/useProcessConversation'; +import DisplayConversationEntry from '@/components/NormalizedConversation/DisplayConversationEntry'; + +interface ProcessCardProps { + process: ExecutionProcessSummary; +} + +function ProcessCard({ process }: ProcessCardProps) { + const [showLogs, setShowLogs] = useState(false); + const isCodingAgent = process.run_reason === 'codingagent'; + + // Use appropriate hook based on process type + const { + logs, + isConnected: rawConnected, + error: rawError, + } = useLogStream(process.id, showLogs && !isCodingAgent); + const { + entries, + isConnected: normalizedConnected, + error: normalizedError, + } = useProcessConversation(process.id, showLogs && isCodingAgent); + + const logEndRef = useRef(null); + const isConnected = isCodingAgent ? normalizedConnected : rawConnected; + const error = isCodingAgent ? normalizedError : rawError; + + // Auto-scroll to bottom when new logs/entries arrive + useEffect(() => { + if (logEndRef.current) { + logEndRef.current.scrollIntoView({ behavior: 'smooth' }); + } + }, [logs, entries]); + const getStatusIcon = (status: ExecutionProcessStatus) => { + switch (status) { + case 'running': + return ; + case 'completed': + return ; + case 'failed': + return ; + case 'killed': + return ; + default: + return ; + } + }; + + const getStatusColor = (status: ExecutionProcessStatus) => { + switch (status) { + case 'running': + return 'bg-blue-50 border-blue-200 text-blue-800'; + case 'completed': + return 'bg-green-50 border-green-200 text-green-800'; + case 'failed': + return 'bg-red-50 border-red-200 text-red-800'; + case 'killed': + return 'bg-gray-50 border-gray-200 text-gray-800'; + default: + return 'bg-gray-50 border-gray-200 text-gray-800'; + } + }; + + const formatDate = (dateString: string) => { + const date = new Date(dateString); + return date.toLocaleString(); + }; + + const getDuration = () => { + const startTime = new Date(process.started_at).getTime(); + const endTime = process.completed_at + ? new Date(process.completed_at).getTime() + : Date.now(); + const durationMs = endTime - startTime; + const durationSeconds = Math.floor(durationMs / 1000); + + if (durationSeconds < 60) { + return `${durationSeconds}s`; + } + const minutes = Math.floor(durationSeconds / 60); + const seconds = durationSeconds % 60; + return `${minutes}m ${seconds}s`; + }; + + return ( +
+
+
+ {getStatusIcon(process.status)} +
+

{process.run_reason}

+

+ Duration: {getDuration()} +

+
+
+
+ + {process.status} + + {process.exit_code !== null && ( +

+ Exit: {process.exit_code.toString()} +

+ )} +
+
+ +
+
+ Started:{' '} + {formatDate(process.started_at)} +
+ {process.completed_at && ( +
+ Completed:{' '} + {formatDate(process.completed_at)} +
+ )} +
+ Process ID: {process.id} +
+
+ + {/* Log section */} +
+ + + {showLogs && ( +
+ {error &&
{error}
} + + {isCodingAgent ? ( + // Normalized conversation display for coding agents +
+ {entries.length === 0 ? ( +
+ No conversation entries available... +
+ ) : ( + entries.map((entry, index) => ( + + )) + )} +
+
+ ) : ( + // Raw logs display for other processes +
+ {logs.length === 0 ? ( +
No logs available...
+ ) : ( + logs.map((log, index) => ( +
+ {log} +
+ )) + )} +
+
+ )} +
+ )} +
+
+ ); +} + +export default ProcessCard; diff --git a/frontend/src/components/tasks/TaskDetails/ProcessesTab.tsx b/frontend/src/components/tasks/TaskDetails/ProcessesTab.tsx index 829893f9..3950528f 100644 --- a/frontend/src/components/tasks/TaskDetails/ProcessesTab.tsx +++ b/frontend/src/components/tasks/TaskDetails/ProcessesTab.tsx @@ -13,7 +13,7 @@ import { executionProcessesApi } from '@/lib/api.ts'; import type { ExecutionProcessStatus, ExecutionProcessSummary, -} from 'shared/types.ts'; +} from 'shared/types'; function ProcessesTab() { const { attemptData, setAttemptData } = useContext(TaskAttemptDataContext); @@ -105,7 +105,7 @@ function ProcessesTab() { return (
{!selectedProcessId ? ( -
+
{attemptData.processes.map((process) => (

- {process.process_type} - {process.executor_type && ( - - {' '} - ({process.executor_type}) - - )} + {process.run_reason}

- {process.command} + Process ID: {process.id}

- {process.args && ( -

- Args: {process.args} -

- )}
@@ -162,9 +151,7 @@ function ProcessesTab() { Completed: {formatDate(process.completed_at)} )}
-
- Working directory: {process.working_directory} -
+
Process ID: {process.id}
))} @@ -191,18 +178,13 @@ function ProcessesTab() {

Type:{' '} - {selectedProcess.process_type} + {selectedProcess.run_reason}

Status:{' '} {selectedProcess.status}

- {selectedProcess.executor_type && ( -

- Executor:{' '} - {selectedProcess.executor_type} -

- )} + {/* Executor type field not available in new type */}

Exit Code:{' '} {selectedProcess.exit_code?.toString() ?? 'N/A'} @@ -226,48 +208,25 @@ function ProcessesTab() {

+ {/* Command, working directory, stdout, stderr fields not available in new ExecutionProcess type */}
-

Command

+

+ Process Information +

- {selectedProcess.command} - {selectedProcess.args && ( -
- Args: {selectedProcess.args} +
Process ID: {selectedProcess.id}
+
+ Task Attempt ID: {selectedProcess.task_attempt_id} +
+
Run Reason: {selectedProcess.run_reason}
+
Status: {selectedProcess.status}
+ {selectedProcess.exit_code !== null && ( +
+ Exit Code: {selectedProcess.exit_code.toString()}
)}
- -
-

- Working Directory -

-
- {selectedProcess.working_directory} -
-
- - {selectedProcess.stdout && ( -
-

Stdout

-
-
-                        {selectedProcess.stdout}
-                      
-
-
- )} - - {selectedProcess.stderr && ( -
-

Stderr

-
-
-                        {selectedProcess.stderr}
-                      
-
-
- )}
) : loadingProcessId === selectedProcessId ? (
diff --git a/frontend/src/components/tasks/TaskDetails/RelatedTasksTab.tsx b/frontend/src/components/tasks/TaskDetails/RelatedTasksTab.tsx deleted file mode 100644 index 3f1fc45a..00000000 --- a/frontend/src/components/tasks/TaskDetails/RelatedTasksTab.tsx +++ /dev/null @@ -1,216 +0,0 @@ -import { useContext, useEffect, useState } from 'react'; -import { useNavigate } from 'react-router-dom'; -import { - TaskDetailsContext, - TaskRelatedTasksContext, -} from '@/components/context/taskDetailsContext.ts'; -import { attemptsApi, tasksApi } from '@/lib/api.ts'; -import type { Task, TaskAttempt } from 'shared/types.ts'; -import { - AlertCircle, - CheckCircle, - Clock, - XCircle, - ArrowUp, - ArrowDown, -} from 'lucide-react'; - -function RelatedTasksTab() { - const { task, projectId } = useContext(TaskDetailsContext); - const { relatedTasks, relatedTasksLoading, relatedTasksError } = useContext( - TaskRelatedTasksContext - ); - const navigate = useNavigate(); - - // State for parent task details - const [parentTaskDetails, setParentTaskDetails] = useState<{ - task: Task; - attempt: TaskAttempt; - } | null>(null); - const [parentTaskLoading, setParentTaskLoading] = useState(false); - - const handleTaskClick = (relatedTask: any) => { - navigate(`/projects/${projectId}/tasks/${relatedTask.id}`); - }; - - const hasParent = task?.parent_task_attempt; - const children = relatedTasks || []; - - // Fetch parent task details when component mounts - useEffect(() => { - const fetchParentTaskDetails = async () => { - if (!task?.parent_task_attempt) { - setParentTaskDetails(null); - return; - } - - setParentTaskLoading(true); - try { - const attemptData = await attemptsApi.getDetails( - task.parent_task_attempt - ); - const parentTask = await tasksApi.getById( - projectId, - attemptData.task_id - ); - setParentTaskDetails({ - task: parentTask, - attempt: attemptData, - }); - } catch (error) { - console.error('Error fetching parent task details:', error); - setParentTaskDetails(null); - } finally { - setParentTaskLoading(false); - } - }; - - fetchParentTaskDetails(); - }, [task?.parent_task_attempt, projectId]); - - const handleParentClick = async () => { - if (task?.parent_task_attempt) { - try { - const attemptData = await attemptsApi.getDetails( - task.parent_task_attempt - ); - navigate( - `/projects/${projectId}/tasks/${attemptData.task_id}?attempt=${task.parent_task_attempt}` - ); - } catch (error) { - console.error('Error navigating to parent task:', error); - } - } - }; - - const getStatusIcon = (status: string) => { - switch (status) { - case 'done': - return ; - case 'inprogress': - return ; - case 'cancelled': - return ; - case 'inreview': - return ; - default: - return ; - } - }; - - if (relatedTasksLoading) { - return ( -
-
-
- ); - } - - if (relatedTasksError) { - return ( -
-
- -

{relatedTasksError}

-
-
- ); - } - - const totalRelatedTasks = (hasParent ? 1 : 0) + children.length; - - if (totalRelatedTasks === 0) { - return ( -
-
-
-

No related tasks found.

-

- This task doesn't have any parent task or subtasks. -

-
-
-
- ); - } - - return ( -
- {/* Parent Task */} - {hasParent && ( -
-

- - Parent Task -

- -
- )} - - {/* Child Tasks */} - {children.length > 0 && ( -
-

- - Child Tasks ({children.length}) -

-
- {children.map((childTask) => ( - - ))} -
-
- )} -
- ); -} - -export default RelatedTasksTab; diff --git a/frontend/src/components/tasks/TaskDetails/TabNavigation.tsx b/frontend/src/components/tasks/TaskDetails/TabNavigation.tsx index 1b6295ea..d899095e 100644 --- a/frontend/src/components/tasks/TaskDetails/TabNavigation.tsx +++ b/frontend/src/components/tasks/TaskDetails/TabNavigation.tsx @@ -1,30 +1,15 @@ -import { - GitCompare, - MessageSquare, - Network, - Cog, - FileText, -} from 'lucide-react'; +import { GitCompare, MessageSquare, Cog } from 'lucide-react'; import { useContext } from 'react'; -import { - TaskAttemptDataContext, - TaskDiffContext, - TaskRelatedTasksContext, -} from '@/components/context/taskDetailsContext.ts'; -import { useTaskPlan } from '@/components/context/TaskPlanContext.ts'; +import { TaskAttemptDataContext } from '@/components/context/taskDetailsContext.ts'; +import type { TabType } from '@/types/tabs'; type Props = { - activeTab: 'logs' | 'diffs' | 'related' | 'processes' | 'plan'; - setActiveTab: ( - tab: 'logs' | 'diffs' | 'related' | 'processes' | 'plan' - ) => void; + activeTab: TabType; + setActiveTab: (tab: TabType) => void; }; function TabNavigation({ activeTab, setActiveTab }: Props) { - const { diff } = useContext(TaskDiffContext); - const { totalRelatedCount } = useContext(TaskRelatedTasksContext); const { attemptData } = useContext(TaskAttemptDataContext); - const { isPlanningMode, planCount } = useTaskPlan(); return (
@@ -41,24 +26,7 @@ function TabNavigation({ activeTab, setActiveTab }: Props) { Logs - {isPlanningMode && ( - - )} + - {onCreateAndStartTask && (
- {/* Plan warning when in planning mode without plan */} - {isPlanningMode && !canCreateTask && ( -
-
- -

- Plan Required -

-
-

- Cannot start attempt - no plan was generated in the last - execution. Please generate a plan first. -

-
- )} -
{/* Step 1: Choose Base Branch */}
- {/* Step 2: Choose Coding Agent */} + {/* Step 2: Choose Profile */}
- - - - - - {availableExecutors.map((executor) => ( - setCreateAttemptExecutor(executor.id)} - className={ - createAttemptExecutor === executor.id ? 'bg-accent' : '' - } + {availableProfiles && ( + + + + + + {availableProfiles.map((profile) => ( + setSelectedProfile(profile.label)} + className={ + selectedProfile === profile.label ? 'bg-accent' : '' + } + > + {profile.label} + + ))} + + + )}
{/* Step 3: Start Attempt */} @@ -236,28 +221,18 @@ function CreateAttempt({
diff --git a/frontend/src/components/tasks/Toolbar/CreatePRDialog.tsx b/frontend/src/components/tasks/Toolbar/CreatePRDialog.tsx index cbf8c02f..f70d90d3 100644 --- a/frontend/src/components/tasks/Toolbar/CreatePRDialog.tsx +++ b/frontend/src/components/tasks/Toolbar/CreatePRDialog.tsx @@ -8,24 +8,24 @@ import { } from '@/components/ui/dialog'; import { Label } from '@radix-ui/react-label'; import { Textarea } from '@/components/ui/textarea.tsx'; +import { Button } from '@/components/ui/button'; +import { Input } from '@/components/ui/input'; import { Select, SelectContent, SelectItem, SelectTrigger, SelectValue, -} from '@radix-ui/react-select'; -import { Button } from '@/components/ui/button'; -import { Input } from '@/components/ui/input'; +} from '@/components/ui/select'; import { useCallback, useContext, useEffect, useState } from 'react'; import { TaskDetailsContext, TaskSelectedAttemptContext, } from '@/components/context/taskDetailsContext.ts'; -import { ApiError, attemptsApi } from '@/lib/api.ts'; +import { attemptsApi } from '@/lib/api.ts'; import { ProvidePatDialog } from '@/components/ProvidePatDialog'; import { GitHubLoginDialog } from '@/components/GitHubLoginDialog'; -import { GitBranch } from 'shared/types.ts'; +import { GitBranch, GitHubServiceError } from 'shared/types'; type Props = { showCreatePRDialog: boolean; @@ -75,58 +75,45 @@ function CreatePrDialog({ setCreatingPR(true); - try { - const prUrl = await attemptsApi.createPR( - projectId!, - selectedAttempt.task_id, - selectedAttempt.id, - { - title: prTitle, - body: prBody || null, - base_branch: prBaseBranch || null, - } - ); - // Open the PR URL in a new tab - window.open(prUrl, '_blank'); + const result = await attemptsApi.createPR(selectedAttempt.id, { + title: prTitle, + body: prBody || null, + base_branch: prBaseBranch || null, + }); + + if (result.success) { + window.open(result.data, '_blank'); setShowCreatePRDialog(false); // Reset form setPrTitle(''); setPrBody(''); setPrBaseBranch(selectedAttempt?.base_branch || 'main'); - } catch (err) { - const error = err as ApiError; - if ( - error.message === - 'GitHub authentication not configured. Please sign in with GitHub.' - ) { + } else { + if (result.error) { setShowCreatePRDialog(false); - setShowGitHubLoginDialog(true); - } else if (error.message === 'insufficient_github_permissions') { - setShowCreatePRDialog(false); - setPatDialogError(null); - setShowPatDialog(true); - } else if (error.message === 'github_repo_not_found_or_no_access') { - setShowCreatePRDialog(false); - setPatDialogError( - 'Your token does not have access to this repository, or the repository does not exist. Please check the repository URL and/or provide a Personal Access Token with access.' - ); - setShowPatDialog(true); - } else if (error.status === 403) { - setShowCreatePRDialog(false); - setPatDialogError(null); - setShowPatDialog(true); - } else if (error.status === 404) { - setShowCreatePRDialog(false); - setPatDialogError( - 'Your token does not have access to this repository, or the repository does not exist. Please check the repository URL and/or provide a Personal Access Token with access.' - ); - setShowPatDialog(true); + switch (result.error) { + case GitHubServiceError.TOKEN_INVALID: + setShowGitHubLoginDialog(true); + break; + case GitHubServiceError.INSUFFICIENT_PERMISSIONS: + setPatDialogError(null); + setShowPatDialog(true); + break; + case GitHubServiceError.REPO_NOT_FOUND_OR_NO_ACCESS: + setPatDialogError( + 'Your token does not have access to this repository, or the repository does not exist. Please check the repository URL and/or provide a Personal Access Token with access.' + ); + setShowPatDialog(true); + break; + } + } else if (result.message) { + setError(result.message); } else { - setError(error.message || 'Failed to create GitHub PR'); + setError('Failed to create GitHub PR'); } - } finally { - setCreatingPR(false); } + + setCreatingPR(false); }, [ projectId, selectedAttempt, diff --git a/frontend/src/components/tasks/Toolbar/CurrentAttempt.tsx b/frontend/src/components/tasks/Toolbar/CurrentAttempt.tsx index 5387d938..e2f5c9d4 100644 --- a/frontend/src/components/tasks/Toolbar/CurrentAttempt.tsx +++ b/frontend/src/components/tasks/Toolbar/CurrentAttempt.tsx @@ -10,7 +10,6 @@ import { Settings, StopCircle, } from 'lucide-react'; -import { is_planning_executor_type } from '@/lib/utils'; import { Tooltip, TooltipContent, @@ -33,13 +32,7 @@ import { DialogTitle, } from '@/components/ui/dialog.tsx'; import BranchSelector from '@/components/tasks/BranchSelector.tsx'; -import { - attemptsApi, - executionProcessesApi, - makeRequest, - FollowUpResponse, - ApiResponse, -} from '@/lib/api.ts'; +import { attemptsApi, executionProcessesApi } from '@/lib/api.ts'; import { Dispatch, SetStateAction, @@ -49,24 +42,16 @@ import { useMemo, useState, } from 'react'; -import type { - BranchStatus, - ExecutionProcess, - GitBranch, - TaskAttempt, -} from 'shared/types.ts'; +import type { ExecutionProcess } from 'shared/types'; +import type { BranchStatus, GitBranch, TaskAttempt } from 'shared/types'; import { TaskAttemptDataContext, TaskAttemptStoppingContext, TaskDetailsContext, - TaskExecutionStateContext, - TaskRelatedTasksContext, TaskSelectedAttemptContext, } from '@/components/context/taskDetailsContext.ts'; -import { useTaskPlan } from '@/components/context/TaskPlanContext.ts'; import { useConfig } from '@/components/config-provider.tsx'; import { useKeyboardShortcuts } from '@/lib/keyboard-shortcuts.ts'; -import { useNavigate } from 'react-router-dom'; // Helper function to get the display name for different editor types function getEditorDisplayName(editorType: string): string { @@ -96,10 +81,6 @@ type Props = { taskAttempts: TaskAttempt[]; creatingPR: boolean; handleEnterCreateAttemptMode: () => void; - availableExecutors: { - id: string; - name: string; - }[]; branches: GitBranch[]; }; @@ -111,23 +92,16 @@ function CurrentAttempt({ taskAttempts, creatingPR, handleEnterCreateAttemptMode, - availableExecutors, branches, }: Props) { const { task, projectId, handleOpenInEditor, projectHasDevScript } = useContext(TaskDetailsContext); const { config } = useConfig(); const { setSelectedAttempt } = useContext(TaskSelectedAttemptContext); - const navigate = useNavigate(); const { isStopping, setIsStopping } = useContext(TaskAttemptStoppingContext); const { attemptData, fetchAttemptData, isAttemptRunning } = useContext( TaskAttemptDataContext ); - const { relatedTasks } = useContext(TaskRelatedTasksContext); - const { executionState, fetchExecutionState } = useContext( - TaskExecutionStateContext - ); - const { isPlanningMode, canCreateTask } = useTaskPlan(); const [isStartingDevServer, setIsStartingDevServer] = useState(false); const [merging, setMerging] = useState(false); @@ -140,36 +114,24 @@ function CurrentAttempt({ const [showRebaseDialog, setShowRebaseDialog] = useState(false); const [selectedRebaseBranch, setSelectedRebaseBranch] = useState(''); const [showStopConfirmation, setShowStopConfirmation] = useState(false); - const [isApprovingPlan, setIsApprovingPlan] = useState(false); const [copied, setCopied] = useState(false); const processedDevServerLogs = useMemo(() => { if (!devServerDetails) return 'No output yet...'; - const stdout = devServerDetails.stdout || ''; - const stderr = devServerDetails.stderr || ''; - const allOutput = stdout + (stderr ? '\n' + stderr : ''); - const lines = allOutput.split('\n').filter((line) => line.trim()); - const lastLines = lines.slice(-10); - return lastLines.length > 0 ? lastLines.join('\n') : 'No output yet...'; + // TODO: stdout/stderr fields need to be restored to ExecutionProcess type + // For now, show basic status information + return `Status: ${devServerDetails.status}\nStarted: ${devServerDetails.started_at}`; }, [devServerDetails]); // Find running dev server in current project const runningDevServer = useMemo(() => { return attemptData.processes.find( (process) => - process.process_type === 'devserver' && process.status === 'running' + process.run_reason === 'devserver' && process.status === 'running' ); }, [attemptData.processes]); - // Check if plan approval is needed - const isPlanTask = useMemo(() => { - return !!( - selectedAttempt.executor && - is_planning_executor_type(selectedAttempt.executor) - ); - }, [selectedAttempt.executor]); - const fetchDevServerDetails = useCallback(async () => { if (!runningDevServer || !task || !selectedAttempt) return; @@ -200,11 +162,7 @@ function CurrentAttempt({ setIsStartingDevServer(true); try { - await attemptsApi.startDevServer( - projectId, - selectedAttempt.task_id, - selectedAttempt.id - ); + await attemptsApi.startDevServer(selectedAttempt.id); fetchAttemptData(selectedAttempt.id, selectedAttempt.task_id); } catch (err) { console.error('Failed to start dev server:', err); @@ -219,12 +177,7 @@ function CurrentAttempt({ setIsStartingDevServer(true); try { - await attemptsApi.stopExecutionProcess( - projectId, - selectedAttempt.task_id, - selectedAttempt.id, - runningDevServer.id - ); + await executionProcessesApi.stopExecutionProcess(runningDevServer.id); fetchAttemptData(selectedAttempt.id, selectedAttempt.task_id); } catch (err) { console.error('Failed to stop dev server:', err); @@ -238,11 +191,7 @@ function CurrentAttempt({ try { setIsStopping(true); - await attemptsApi.stop( - projectId, - selectedAttempt.task_id, - selectedAttempt.id - ); + await attemptsApi.stop(selectedAttempt.id); await fetchAttemptData(selectedAttempt.id, selectedAttempt.task_id); setTimeout(() => { fetchAttemptData(selectedAttempt.id, selectedAttempt.task_id); @@ -276,9 +225,8 @@ function CurrentAttempt({ (attempt: TaskAttempt) => { setSelectedAttempt(attempt); fetchAttemptData(attempt.id, attempt.task_id); - fetchExecutionState(attempt.id, attempt.task_id); }, - [fetchAttemptData, fetchExecutionState, setSelectedAttempt] + [fetchAttemptData, setSelectedAttempt] ); const handleMergeClick = async () => { @@ -289,15 +237,11 @@ function CurrentAttempt({ }; const fetchBranchStatus = useCallback(async () => { - if (!projectId || !selectedAttempt?.id || !selectedAttempt?.task_id) return; + if (!selectedAttempt?.id) return; try { setBranchStatusLoading(true); - const result = await attemptsApi.getBranchStatus( - projectId, - selectedAttempt.task_id, - selectedAttempt.id - ); + const result = await attemptsApi.getBranchStatus(selectedAttempt.id); setBranchStatus((prev) => { if (JSON.stringify(prev) === JSON.stringify(result)) return prev; return result; @@ -321,11 +265,7 @@ function CurrentAttempt({ try { setMerging(true); - await attemptsApi.merge( - projectId, - selectedAttempt.task_id, - selectedAttempt.id - ); + await attemptsApi.merge(selectedAttempt.id); // Refetch branch status to show updated state fetchBranchStatus(); } catch (error) { @@ -342,11 +282,7 @@ function CurrentAttempt({ try { setRebasing(true); - await attemptsApi.rebase( - projectId, - selectedAttempt.task_id, - selectedAttempt.id - ); + await attemptsApi.rebase(selectedAttempt.id, { new_base_branch: null }); // Refresh branch status after rebase fetchBranchStatus(); } catch (err) { @@ -361,12 +297,9 @@ function CurrentAttempt({ try { setRebasing(true); - await attemptsApi.rebase( - projectId, - selectedAttempt.task_id, - selectedAttempt.id, - newBaseBranch - ); + await attemptsApi.rebase(selectedAttempt.id, { + new_base_branch: newBaseBranch, + }); // Refresh branch status after rebase fetchBranchStatus(); setShowRebaseDialog(false); @@ -400,48 +333,6 @@ function CurrentAttempt({ setShowCreatePRDialog(true); }; - const handlePlanApproval = async () => { - if (!task || !selectedAttempt || !isPlanTask) return; - - setIsApprovingPlan(true); - try { - const response = await makeRequest( - `/api/projects/${projectId}/tasks/${task.id}/attempts/${selectedAttempt.id}/approve-plan`, - { - method: 'POST', - // No body needed - endpoint only handles approval now - } - ); - - if (response.ok) { - const result: ApiResponse = await response.json(); - if (result.success && result.data) { - console.log('Plan approved successfully:', result.message); - - // If a new task was created, navigate to it - if (result.data.created_new_attempt) { - const newTaskId = result.data.actual_attempt_id; - console.log('Navigating to new task:', newTaskId); - navigate(`/projects/${projectId}/tasks/${newTaskId}`); - } else { - // Otherwise, just refresh the current task data - fetchAttemptData(selectedAttempt.id, selectedAttempt.task_id); - } - } else { - setError(`Failed to approve plan: ${result.message}`); - } - } else { - setError('Failed to approve plan'); - } - } catch (error) { - setError( - `Error approving plan: ${error instanceof Error ? error.message : 'Unknown error'}` - ); - } finally { - setIsApprovingPlan(false); - } - }; - // Get display name for selected branch const selectedBranchDisplayName = useMemo(() => { if (!selectedBranch) return 'current'; @@ -462,13 +353,13 @@ function CurrentAttempt({ const handleCopyWorktreePath = useCallback(async () => { try { - await navigator.clipboard.writeText(selectedAttempt.worktree_path); + await navigator.clipboard.writeText(selectedAttempt.container_ref || ''); setCopied(true); setTimeout(() => setCopied(false), 2000); } catch (err) { console.error('Failed to copy worktree path:', err); } - }, [selectedAttempt.worktree_path]); + }, [selectedAttempt.container_ref]); return (
@@ -488,13 +379,10 @@ function CurrentAttempt({
- Agent + Base Agent
- {availableExecutors.find((e) => e.id === selectedAttempt.executor) - ?.name || - selectedAttempt.executor || - 'Unknown'} + {selectedAttempt.base_coding_agent}
@@ -509,10 +397,7 @@ function CurrentAttempt({ size="sm" onClick={handleRebaseDialogOpen} disabled={ - rebasing || - branchStatusLoading || - isAttemptRunning || - isPlanTask + rebasing || branchStatusLoading || isAttemptRunning } className="h-4 w-4 p-0 hover:bg-muted" > @@ -535,28 +420,10 @@ function CurrentAttempt({
- {isPlanTask ? 'Plan Status' : 'Merge Status'} + Merge Status
- {isPlanTask ? ( - // Plan status for planning tasks - relatedTasks && relatedTasks.length > 0 ? ( -
-
- - Task Created - -
- ) : ( -
-
- - Draft - -
- ) - ) : // Merge status for regular tasks - selectedAttempt.merge_commit ? ( + {selectedAttempt.merge_commit ? (
@@ -604,7 +471,7 @@ function CurrentAttempt({ > {copied && } - {selectedAttempt.worktree_path} + {selectedAttempt.container_ref} {copied && ( Copied! @@ -706,7 +573,7 @@ function CurrentAttempt({ {new Date(attempt.created_at).toLocaleTimeString()} - {attempt.executor || 'executor'} + {attempt.base_coding_agent || 'Base Agent'}
@@ -718,41 +585,21 @@ function CurrentAttempt({ {/* Git Operations */} {selectedAttempt && branchStatus && ( <> - {branchStatus.is_behind && - !branchStatus.merged && - !isPlanTask && ( - - )} - {isPlanTask ? ( - // Plan tasks: show approval button + {branchStatus.is_behind && !branchStatus.merged && ( - ) : ( + )} + { // Normal merge and PR buttons for regular tasks !branchStatus.merged && ( <> @@ -789,7 +636,7 @@ function CurrentAttempt({ ) - )} + } )} diff --git a/frontend/src/components/theme-provider.tsx b/frontend/src/components/theme-provider.tsx index 2e3f903a..9b159a7b 100644 --- a/frontend/src/components/theme-provider.tsx +++ b/frontend/src/components/theme-provider.tsx @@ -1,5 +1,5 @@ import React, { createContext, useContext, useEffect, useState } from 'react'; -import type { ThemeMode } from 'shared/types'; +import { ThemeMode } from 'shared/types'; type ThemeProviderProps = { children: React.ReactNode; @@ -12,7 +12,7 @@ type ThemeProviderState = { }; const initialState: ThemeProviderState = { - theme: 'system', + theme: ThemeMode.SYSTEM, setTheme: () => null, }; @@ -20,7 +20,7 @@ const ThemeProviderContext = createContext(initialState); export function ThemeProvider({ children, - initialTheme = 'system', + initialTheme = ThemeMode.SYSTEM, ...props }: ThemeProviderProps) { const [theme, setThemeState] = useState(initialTheme); @@ -43,7 +43,7 @@ export function ThemeProvider({ 'red' ); - if (theme === 'system') { + if (theme === ThemeMode.SYSTEM) { const systemTheme = window.matchMedia('(prefers-color-scheme: dark)') .matches ? 'dark' @@ -53,7 +53,7 @@ export function ThemeProvider({ return; } - root.classList.add(theme); + root.classList.add(theme.toLowerCase()); }, [theme]); const setTheme = (newTheme: ThemeMode) => { diff --git a/frontend/src/components/theme-toggle.tsx b/frontend/src/components/theme-toggle.tsx index 1698ad52..04b785cd 100644 --- a/frontend/src/components/theme-toggle.tsx +++ b/frontend/src/components/theme-toggle.tsx @@ -7,6 +7,7 @@ import { DropdownMenuTrigger, } from '@/components/ui/dropdown-menu'; import { useTheme } from '@/components/theme-provider'; +import { ThemeMode } from 'shared/types'; export function ThemeToggle() { const { setTheme } = useTheme(); @@ -21,13 +22,13 @@ export function ThemeToggle() { - setTheme('light')}> + setTheme(ThemeMode.LIGHT)}> Light - setTheme('dark')}> + setTheme(ThemeMode.DARK)}> Dark - setTheme('system')}> + setTheme(ThemeMode.SYSTEM)}> System diff --git a/frontend/src/components/ui/file-search-textarea.tsx b/frontend/src/components/ui/file-search-textarea.tsx index 35965298..d84e6108 100644 --- a/frontend/src/components/ui/file-search-textarea.tsx +++ b/frontend/src/components/ui/file-search-textarea.tsx @@ -3,8 +3,9 @@ import { createPortal } from 'react-dom'; import { AutoExpandingTextarea } from '@/components/ui/auto-expanding-textarea'; import { projectsApi } from '@/lib/api'; -interface FileSearchResult { - path: string; +import type { SearchResult } from 'shared/types'; + +interface FileSearchResult extends SearchResult { name: string; } @@ -55,7 +56,12 @@ export function FileSearchTextarea({ try { const result = await projectsApi.searchFiles(projectId, searchQuery); - setSearchResults(result); + // Transform SearchResult to FileSearchResult by adding name field + const fileResults: FileSearchResult[] = result.map((item) => ({ + ...item, + name: item.path.split('/').pop() || item.path, + })); + setSearchResults(fileResults); setShowDropdown(true); setSelectedIndex(-1); } catch (error) { diff --git a/frontend/src/components/ui/folder-picker.tsx b/frontend/src/components/ui/folder-picker.tsx index 0a7989d3..74510311 100644 --- a/frontend/src/components/ui/folder-picker.tsx +++ b/frontend/src/components/ui/folder-picker.tsx @@ -20,7 +20,7 @@ import { Search, } from 'lucide-react'; import { fileSystemApi } from '@/lib/api'; -import { DirectoryEntry } from 'shared/types'; +import { DirectoryEntry, DirectoryListResponse } from 'shared/types'; interface FolderPickerProps { open: boolean; @@ -65,7 +65,7 @@ export function FolderPicker({ setError(''); try { - const result = await fileSystemApi.list(path); + const result: DirectoryListResponse = await fileSystemApi.list(path); // Ensure result exists and has the expected structure if (!result || typeof result !== 'object') { diff --git a/frontend/src/hooks/use-system-info.ts b/frontend/src/hooks/use-system-info.ts deleted file mode 100644 index 995e84ee..00000000 --- a/frontend/src/hooks/use-system-info.ts +++ /dev/null @@ -1,40 +0,0 @@ -import { useState, useEffect } from 'react'; - -interface SystemInfo { - os_type: string; - os_version: string; - architecture: string; - bitness: string; -} - -export function useSystemInfo() { - const [systemInfo, setSystemInfo] = useState(null); - const [loading, setLoading] = useState(true); - const [error, setError] = useState(null); - - useEffect(() => { - const fetchSystemInfo = async () => { - try { - const response = await fetch('/api/config'); - if (!response.ok) { - throw new Error('Failed to fetch system info'); - } - const data = await response.json(); - - if (data.success && data.data?.environment) { - setSystemInfo(data.data.environment); - } else { - throw new Error('Invalid response format'); - } - } catch (err) { - setError(err instanceof Error ? err.message : 'Unknown error'); - } finally { - setLoading(false); - } - }; - - fetchSystemInfo(); - }, []); - - return { systemInfo, loading, error }; -} diff --git a/frontend/src/hooks/useDiffStream.ts b/frontend/src/hooks/useDiffStream.ts new file mode 100644 index 00000000..796e07b9 --- /dev/null +++ b/frontend/src/hooks/useDiffStream.ts @@ -0,0 +1,38 @@ +import { useCallback } from 'react'; +import type { FileDiff } from 'shared/types'; +import { useJsonPatchStream } from './useJsonPatchStream'; + +interface DiffState { + entries: Record; +} + +interface UseDiffStreamResult { + diff: DiffState | undefined; + isConnected: boolean; + error: string | null; +} + +export const useDiffStream = ( + attemptId: string | null, + enabled: boolean +): UseDiffStreamResult => { + const endpoint = attemptId + ? `/api/task-attempts/${attemptId}/diff` + : undefined; + + const initialData = useCallback( + (): DiffState => ({ + entries: {}, + }), + [] + ); + + const { data, isConnected, error } = useJsonPatchStream( + endpoint, + enabled && !!attemptId, + initialData + // No need for injectInitialEntry or deduplicatePatches for diffs + ); + + return { diff: data, isConnected, error }; +}; diff --git a/frontend/src/hooks/useEventSourceManager.ts b/frontend/src/hooks/useEventSourceManager.ts new file mode 100644 index 00000000..f7d01e16 --- /dev/null +++ b/frontend/src/hooks/useEventSourceManager.ts @@ -0,0 +1,159 @@ +import { useEffect, useState, useRef } from 'react'; +import { applyPatch } from 'rfc6902'; +import type { ExecutionProcessSummary } from 'shared/types'; +import type { ProcessStartPayload } from '@/types/logs'; + +interface ProcessData { + [processId: string]: any; +} + +interface UseEventSourceManagerParams { + processes: ExecutionProcessSummary[]; + enabled: boolean; + getEndpoint: (process: ExecutionProcessSummary) => string; + initialData?: any; +} + +interface UseEventSourceManagerResult { + processData: ProcessData; + isConnected: boolean; + error: string | null; +} + +export const useEventSourceManager = ({ + processes, + enabled, + getEndpoint, + initialData = null, +}: UseEventSourceManagerParams): UseEventSourceManagerResult => { + const [processData, setProcessData] = useState({}); + const [isConnected, setIsConnected] = useState(false); + const [error, setError] = useState(null); + const eventSourcesRef = useRef>(new Map()); + const processDataRef = useRef({}); + const processedEntriesRef = useRef>>(new Map()); + + useEffect(() => { + if (!enabled || !processes.length) { + // Close all connections and reset state + eventSourcesRef.current.forEach((es) => es.close()); + eventSourcesRef.current.clear(); + setProcessData({}); + setIsConnected(false); + setError(null); + processDataRef.current = {}; + processedEntriesRef.current.clear(); + return; + } + + const currentIds = new Set(processes.map((p) => p.id)); + + // Remove old connections + eventSourcesRef.current.forEach((es, id) => { + if (!currentIds.has(id)) { + es.close(); + eventSourcesRef.current.delete(id); + delete processDataRef.current[id]; + processedEntriesRef.current.delete(id); + } + }); + + // Add new connections + processes.forEach((process) => { + if (eventSourcesRef.current.has(process.id)) return; + + const endpoint = getEndpoint(process); + + // Initialize process data + if (!processDataRef.current[process.id]) { + processDataRef.current[process.id] = initialData + ? structuredClone(initialData) + : { entries: [] }; + + // Inject process start marker as the first entry + const processStartPayload: ProcessStartPayload = { + processId: process.id, + runReason: process.run_reason, + startedAt: process.started_at, + status: process.status, + }; + + const processStartEntry = { + type: 'PROCESS_START' as const, + content: processStartPayload, + }; + + processDataRef.current[process.id].entries.push(processStartEntry); + } + + const eventSource = new EventSource(endpoint); + + eventSource.onopen = () => { + setError(null); + }; + + eventSource.addEventListener('json_patch', (event) => { + try { + const patches = JSON.parse(event.data); + + // Initialize tracking for this process if needed + if (!processedEntriesRef.current.has(process.id)) { + processedEntriesRef.current.set(process.id, new Set()); + } + + const processedSet = processedEntriesRef.current.get(process.id)!; + + // Filter out patches we've already processed + const newPatches = patches.filter((patch: any) => { + // Extract entry index from path like "/entries/123" + const match = patch.path?.match(/^\/entries\/(\d+)$/); + if (match && patch.op === 'add') { + const entryIndex = parseInt(match[1], 10); + if (processedSet.has(entryIndex)) { + return false; // Already processed + } + processedSet.add(entryIndex); + } + // Always allow replace operations and non-entry patches + return true; + }); + + // Only apply new patches + if (newPatches.length > 0) { + applyPatch(processDataRef.current[process.id], newPatches); + + // Trigger re-render with updated data + setProcessData({ ...processDataRef.current }); + } + } catch (err) { + console.error('Failed to apply JSON patch:', err); + setError('Failed to process log update'); + } + }); + + eventSource.addEventListener('finished', () => { + eventSource.close(); + eventSourcesRef.current.delete(process.id); + setIsConnected(eventSourcesRef.current.size > 0); + }); + + eventSource.onerror = () => { + setError('Connection failed'); + eventSource.close(); + eventSourcesRef.current.delete(process.id); + setIsConnected(eventSourcesRef.current.size > 0); + }; + + eventSourcesRef.current.set(process.id, eventSource); + }); + + setIsConnected(eventSourcesRef.current.size > 0); + + return () => { + eventSourcesRef.current.forEach((es) => es.close()); + eventSourcesRef.current.clear(); + }; + }, [processes, enabled, getEndpoint, initialData]); + + return { processData, isConnected, error }; +}; diff --git a/frontend/src/hooks/useJsonPatchStream.ts b/frontend/src/hooks/useJsonPatchStream.ts new file mode 100644 index 00000000..4055245f --- /dev/null +++ b/frontend/src/hooks/useJsonPatchStream.ts @@ -0,0 +1,127 @@ +import { useEffect, useState, useRef } from 'react'; +import { applyPatch } from 'rfc6902'; +import type { Operation } from 'rfc6902'; + +interface UseJsonPatchStreamOptions { + /** + * Called once when the stream starts to inject initial data + */ + injectInitialEntry?: (data: T) => void; + /** + * Filter/deduplicate patches before applying them + */ + deduplicatePatches?: (patches: Operation[]) => Operation[]; +} + +interface UseJsonPatchStreamResult { + data: T | undefined; + isConnected: boolean; + error: string | null; +} + +/** + * Generic hook for consuming SSE streams that send JSON patches + */ +export const useJsonPatchStream = ( + endpoint: string | undefined, + enabled: boolean, + initialData: () => T, + options: UseJsonPatchStreamOptions = {} +): UseJsonPatchStreamResult => { + const [data, setData] = useState(undefined); + const [isConnected, setIsConnected] = useState(false); + const [error, setError] = useState(null); + const eventSourceRef = useRef(null); + const dataRef = useRef(undefined); + + useEffect(() => { + if (!enabled || !endpoint) { + // Close connection and reset state + if (eventSourceRef.current) { + eventSourceRef.current.close(); + eventSourceRef.current = null; + } + setData(undefined); + setIsConnected(false); + setError(null); + dataRef.current = undefined; + return; + } + + // Initialize data + if (!dataRef.current) { + dataRef.current = initialData(); + + // Inject initial entry if provided + if (options.injectInitialEntry) { + options.injectInitialEntry(dataRef.current); + } + + setData({ ...dataRef.current }); + } + + // Create EventSource if it doesn't exist + if (!eventSourceRef.current) { + const eventSource = new EventSource(endpoint); + + eventSource.onopen = () => { + setError(null); + setIsConnected(true); + }; + + eventSource.addEventListener('json_patch', (event) => { + try { + const patches: Operation[] = JSON.parse(event.data); + + // Apply deduplication if provided + const filteredPatches = options.deduplicatePatches + ? options.deduplicatePatches(patches) + : patches; + + // Only apply patches if there are any left after filtering + if (filteredPatches.length > 0 && dataRef.current) { + applyPatch(dataRef.current, filteredPatches); + + // Trigger re-render with updated data + setData({ ...dataRef.current }); + } + } catch (err) { + console.error('Failed to apply JSON patch:', err); + setError('Failed to process stream update'); + } + }); + + eventSource.addEventListener('finished', () => { + eventSource.close(); + eventSourceRef.current = null; + setIsConnected(false); + }); + + eventSource.onerror = () => { + setError('Connection failed'); + eventSource.close(); + eventSourceRef.current = null; + setIsConnected(false); + }; + + eventSourceRef.current = eventSource; + } + + return () => { + if (eventSourceRef.current) { + eventSourceRef.current.close(); + eventSourceRef.current = null; + } + dataRef.current = undefined; + setData(undefined); + }; + }, [ + endpoint, + enabled, + initialData, + options.injectInitialEntry, + options.deduplicatePatches, + ]); + + return { data, isConnected, error }; +}; diff --git a/frontend/src/hooks/useLogStream.ts b/frontend/src/hooks/useLogStream.ts new file mode 100644 index 00000000..e1691fae --- /dev/null +++ b/frontend/src/hooks/useLogStream.ts @@ -0,0 +1,74 @@ +import { useEffect, useState, useRef } from 'react'; + +interface UseLogStreamResult { + logs: string[]; + isConnected: boolean; + error: string | null; +} + +export const useLogStream = ( + processId: string, + enabled: boolean +): UseLogStreamResult => { + const [logs, setLogs] = useState([]); + const [isConnected, setIsConnected] = useState(false); + const [error, setError] = useState(null); + const eventSourceRef = useRef(null); + + useEffect(() => { + if (!enabled || !processId) { + return; + } + + const eventSource = new EventSource( + `/api/execution-processes/${processId}/raw-logs` + ); + eventSourceRef.current = eventSource; + + eventSource.onopen = () => { + setIsConnected(true); + setError(null); + }; + + eventSource.onmessage = (event) => { + // Handle default messages + setLogs((prev) => [...prev, event.data]); + }; + + eventSource.addEventListener('stdout', (event) => { + setLogs((prev) => [...prev, `stdout: ${event.data}`]); + }); + + eventSource.addEventListener('stderr', (event) => { + setLogs((prev) => [...prev, `stderr: ${event.data}`]); + }); + + eventSource.addEventListener('finished', () => { + setLogs((prev) => [...prev, '--- Stream finished ---']); + eventSource.close(); + setIsConnected(false); + }); + + eventSource.onerror = () => { + setError('Connection failed'); + setIsConnected(false); + eventSource.close(); + }; + + return () => { + eventSource.close(); + setIsConnected(false); + }; + }, [processId, enabled]); + + // Reset logs when disabled + useEffect(() => { + if (!enabled) { + setLogs([]); + setError(null); + setIsConnected(false); + } + }, [enabled]); + + return { logs, isConnected, error }; +}; diff --git a/frontend/src/hooks/useNormalizedConversation.ts b/frontend/src/hooks/useNormalizedConversation.ts deleted file mode 100644 index 4506d1e1..00000000 --- a/frontend/src/hooks/useNormalizedConversation.ts +++ /dev/null @@ -1,440 +0,0 @@ -import { - TaskAttemptDataContext, - TaskDetailsContext, -} from '@/components/context/taskDetailsContext'; -import { fetchEventSource } from '@microsoft/fetch-event-source'; -import { applyPatch } from 'fast-json-patch'; -import { - useCallback, - useContext, - useEffect, - useMemo, - useRef, - useState, -} from 'react'; -import { - ExecutionProcess, - NormalizedConversation, - NormalizedEntry, -} from 'shared/types'; - -const useNormalizedConversation = ({ - executionProcess, - onConversationUpdate, - onDisplayEntriesChange, - visibleEntriesNum, -}: { - executionProcess?: ExecutionProcess; - onConversationUpdate?: () => void; - onDisplayEntriesChange?: (num: number) => void; - visibleEntriesNum?: number; -}) => { - const { projectId } = useContext(TaskDetailsContext); - const { attemptData } = useContext(TaskAttemptDataContext); - - // Development-only logging helper - const debugLog = useCallback((message: string, ...args: any[]) => { - if (import.meta.env.DEV) { - console.log(message, ...args); - } - }, []); - - const [conversation, setConversation] = - useState(null); - const [loading, setLoading] = useState(true); - const [error, setError] = useState(null); - - // Track fetched processes to prevent redundant database calls - const fetchedProcesses = useRef(new Set()); - - // SSE Connection Manager - production-ready with reconnection and resilience - const sseManagerRef = useRef<{ - abortController: AbortController | null; - isActive: boolean; - highestBatchId: number; - reconnectAttempts: number; - reconnectTimeout: number | null; - processId: string; - processStatus: string; - patchFailureCount: number; - onopenCalled: boolean; - }>({ - abortController: null, - isActive: false, - highestBatchId: 0, - reconnectAttempts: 0, - reconnectTimeout: null, - processId: executionProcess?.id || '', - processStatus: executionProcess?.status || '', - patchFailureCount: 0, - onopenCalled: false, - }); - - // SSE Connection Manager with Production-Ready Resilience using fetch-event-source - const createSSEConnection = useCallback( - (processId: string, projectId: string): AbortController => { - const manager = sseManagerRef.current; - // Build URL with resume cursor if we have processed batches - const baseUrl = `/api/projects/${projectId}/execution-processes/${processId}/normalized-logs/stream`; - const url = - manager.highestBatchId > 0 - ? `${baseUrl}?since_batch_id=${manager.highestBatchId}` - : baseUrl; - debugLog( - `🚀 SSE: Creating connection for process ${processId} (cursor: ${manager.highestBatchId})` - ); - - const abortController = new AbortController(); - - fetchEventSource(url, { - signal: abortController.signal, - onopen: async (response) => { - const manager = sseManagerRef.current; - if (manager.onopenCalled) { - // This is a "phantom" reconnect, so abort and re-create - debugLog( - '⚠️ SSE: onopen called again for same connection, forcing reconnect' - ); - abortController.abort(); - manager.abortController = null; - manager.isActive = false; - manager.onopenCalled = false; - // Re-establish with latest cursor - scheduleReconnect(processId, projectId); - return; - } - manager.onopenCalled = true; - if (response.ok) { - debugLog(`✅ SSE: Connected to ${processId}`); - manager.isActive = true; - manager.reconnectAttempts = 0; // Reset on successful connection - manager.patchFailureCount = 0; // Reset patch failure count - - if (manager.reconnectTimeout) { - clearTimeout(manager.reconnectTimeout); - manager.reconnectTimeout = null; - } - } else { - throw new Error(`SSE connection failed: ${response.status}`); - } - }, - onmessage: (event) => { - if (event.event === 'patch') { - try { - const batchData = JSON.parse(event.data); - const { batch_id, patches } = batchData; - - // Skip duplicates - use manager's batch tracking - if (batch_id && batch_id <= manager.highestBatchId) { - debugLog( - `⏭️ SSE: Skipping duplicate batch_id=${batch_id} (current=${manager.highestBatchId})` - ); - return; - } - - // Update cursor BEFORE processing - if (batch_id) { - manager.highestBatchId = batch_id; - debugLog(`📍 SSE: Processing batch_id=${batch_id}`); - } - - setConversation((prev) => { - // Create empty conversation if none exists - const baseConversation = prev || { - entries: [], - session_id: null, - executor_type: 'unknown', - prompt: null, - summary: null, - }; - - try { - const updated = applyPatch( - JSON.parse(JSON.stringify(baseConversation)), - patches - ).newDocument as NormalizedConversation; - - updated.entries = updated.entries.filter(Boolean); - - debugLog( - `🔧 SSE: Applied batch_id=${batch_id}, entries: ${updated.entries.length}` - ); - - // Reset patch failure count on successful application - manager.patchFailureCount = 0; - - // Clear loading state on first successful patch - if (!prev) { - setLoading(false); - setError(null); - } - - if (onConversationUpdate) { - setTimeout(onConversationUpdate, 0); - } - - return updated; - } catch (patchError) { - console.warn('❌ SSE: Patch failed:', patchError); - // Reset cursor on failure for potential retry - if (batch_id && batch_id > 0) { - manager.highestBatchId = batch_id - 1; - } - // Track patch failures for monitoring - manager.patchFailureCount++; - debugLog( - `⚠️ SSE: Patch failure #${manager.patchFailureCount} for batch_id=${batch_id}` - ); - return prev || baseConversation; - } - }); - } catch (e) { - console.warn('❌ SSE: Parse failed:', e); - } - } - }, - onerror: (err) => { - console.warn(`🔌 SSE: Connection error for ${processId}:`, err); - manager.isActive = false; - - // Only attempt reconnection if process is still running - if (manager.processStatus === 'running') { - scheduleReconnect(processId, projectId); - } - }, - onclose: () => { - debugLog(`🔌 SSE: Connection closed for ${processId}`); - manager.isActive = false; - }, - }).catch((error) => { - if (error.name !== 'AbortError') { - console.warn(`❌ SSE: Fetch error for ${processId}:`, error); - manager.isActive = false; - - // Only attempt reconnection if process is still running - if (manager.processStatus === 'running') { - scheduleReconnect(processId, projectId); - } - } - }); - - return abortController; - }, - [onConversationUpdate, debugLog] - ); - - const scheduleReconnect = useCallback( - (processId: string, projectId: string) => { - const manager = sseManagerRef.current; - - // Clear any existing reconnection timeout - if (manager.reconnectTimeout) { - clearTimeout(manager.reconnectTimeout); - } - - // Exponential backoff: 1s, 2s, 4s, 8s, max 30s - const delay = Math.min( - 1000 * Math.pow(2, manager.reconnectAttempts), - 30000 - ); - manager.reconnectAttempts++; - - debugLog( - `🔄 SSE: Scheduling reconnect attempt ${manager.reconnectAttempts} in ${delay}ms` - ); - - manager.reconnectTimeout = window.setTimeout(() => { - if (manager.processStatus === 'running') { - debugLog(`🔄 SSE: Attempting reconnect for ${processId}`); - establishSSEConnection(processId, projectId); - } - }, delay); - }, - [debugLog] - ); - - const establishSSEConnection = useCallback( - (processId: string, projectId: string) => { - const manager = sseManagerRef.current; - - // Close existing connection if any - if (manager.abortController) { - manager.abortController.abort(); - manager.abortController = null; - manager.isActive = false; - } - - const abortController = createSSEConnection(processId, projectId); - manager.abortController = abortController; - - return abortController; - }, - [createSSEConnection] - ); - - // Helper functions for SSE manager - const setProcessId = (id: string) => { - sseManagerRef.current.processId = id; - }; - const setProcessStatus = (status: string) => { - sseManagerRef.current.processStatus = status; - }; - - // Consolidated cleanup function to avoid duplication - const cleanupSSEConnection = useCallback(() => { - const manager = sseManagerRef.current; - - if (manager.abortController) { - manager.abortController.abort(); - manager.abortController = null; - manager.isActive = false; - } - - if (manager.reconnectTimeout) { - clearTimeout(manager.reconnectTimeout); - manager.reconnectTimeout = null; - } - manager.onopenCalled = false; - }, []); - - // Process-based data fetching - fetch once from appropriate source - useEffect(() => { - if (!executionProcess?.id || !executionProcess?.status) { - return; - } - const processId = executionProcess.id; - const processStatus = executionProcess.status; - - debugLog(`🎯 Data: Process ${processId} is ${processStatus}`); - - // Reset conversation state when switching processes - const manager = sseManagerRef.current; - if (manager.processId !== processId) { - setConversation(null); - setLoading(true); - setError(null); - - // Clear fetch tracking for old processes (keep memory bounded) - if (fetchedProcesses.current.size > 10) { - fetchedProcesses.current.clear(); - } - } - - if (processStatus === 'running') { - // Running processes: SSE will handle data (including initial state) - debugLog(`🚀 Data: Using SSE for running process ${processId}`); - // SSE connection will be established by the SSE management effect - } else { - // Completed processes: Single database fetch - debugLog(`📋 Data: Using database for completed process ${processId}`); - const logs = attemptData.allLogs.find( - (entry) => entry.id === executionProcess.id - )?.normalized_conversation; - if (logs) { - setConversation((prev) => { - // Only update if content actually changed - use lightweight comparison - if ( - !prev || - prev.entries.length !== logs.entries.length || - prev.prompt !== logs.prompt - ) { - // Notify parent component of conversation update - if (onConversationUpdate) { - // Use setTimeout to ensure state update happens first - setTimeout(onConversationUpdate, 0); - } - return logs; - } - return prev; - }); - } - setLoading(false); - } - }, [ - executionProcess?.id, - executionProcess?.status, - attemptData.allLogs, - debugLog, - onConversationUpdate, - ]); - - // SSE connection management for running processes only - useEffect(() => { - if (!executionProcess?.id || !executionProcess?.status) { - return; - } - const processId = executionProcess.id; - const processStatus = executionProcess.status; - const manager = sseManagerRef.current; - - // Update manager state - setProcessId(processId); - setProcessStatus(processStatus); - - // Only establish SSE for running processes - if (processStatus !== 'running') { - debugLog( - `🚫 SSE: Process ${processStatus}, cleaning up any existing connection` - ); - cleanupSSEConnection(); - return; - } - - // Check if connection already exists for same process ID - if (manager.abortController && manager.processId === processId) { - debugLog(`⚠️ SSE: Connection already exists for ${processId}, reusing`); - return; - } - - // Process changed - close existing and reset state - if (manager.abortController && manager.processId !== processId) { - debugLog(`🔄 SSE: Switching from ${manager.processId} to ${processId}`); - cleanupSSEConnection(); - manager.highestBatchId = 0; // Reset cursor for new process - manager.reconnectAttempts = 0; - manager.patchFailureCount = 0; // Reset failure count for new process - } - - // Update manager state - manager.processId = processId; - manager.processStatus = processStatus; - - // Establish new connection - establishSSEConnection(processId, projectId); - - return () => { - debugLog(`🔌 SSE: Cleanup connection for ${processId}`); - - // Close connection if it belongs to this effect - if (manager.abortController && manager.processId === processId) { - cleanupSSEConnection(); - } - }; - }, [executionProcess?.id, executionProcess?.status]); - - // Memoize display entries to avoid unnecessary re-renders - const displayEntries = useMemo(() => { - if (!conversation?.entries) return []; - - // Filter out any null entries that may have been created by duplicate patch application - const displayEntries = conversation.entries.filter( - (entry): entry is NormalizedEntry => - Boolean(entry && (entry as NormalizedEntry).entry_type) - ); - onDisplayEntriesChange?.(displayEntries.length); - if (visibleEntriesNum && displayEntries.length > visibleEntriesNum) { - return displayEntries.slice(-visibleEntriesNum); - } - - return displayEntries; - }, [conversation?.entries, onDisplayEntriesChange, visibleEntriesNum]); - - return { - displayEntries, - conversation, - loading, - error, - }; -}; - -export default useNormalizedConversation; diff --git a/frontend/src/hooks/useProcessConversation.ts b/frontend/src/hooks/useProcessConversation.ts new file mode 100644 index 00000000..a6495069 --- /dev/null +++ b/frontend/src/hooks/useProcessConversation.ts @@ -0,0 +1,92 @@ +import { useCallback } from 'react'; +import type { ProcessStartPayload } from '@/types/logs'; +import type { Operation } from 'rfc6902'; +import { useJsonPatchStream } from './useJsonPatchStream'; + +interface ProcessConversationData { + entries: any[]; // Mixed types: NormalizedEntry | ProcessStartPayload | PatchType + session_id: string | null; + executor_type: string; + prompt: string | null; + summary: string | null; +} + +interface UseProcessConversationResult { + entries: any[]; // Mixed types like the original + isConnected: boolean; + error: string | null; +} + +export const useProcessConversation = ( + processId: string, + enabled: boolean +): UseProcessConversationResult => { + const endpoint = processId + ? `/api/execution-processes/${processId}/normalized-logs` + : undefined; + + const initialData = useCallback( + (): ProcessConversationData => ({ + entries: [], + session_id: null, + executor_type: '', + prompt: null, + summary: null, + }), + [] + ); + + const injectInitialEntry = useCallback( + (data: ProcessConversationData) => { + if (processId) { + // Inject process start marker as the first entry + const processStartPayload: ProcessStartPayload = { + processId: processId, + runReason: 'Manual', // Default value since we don't have process details here + startedAt: new Date().toISOString(), + status: 'running', + }; + + const processStartEntry = { + type: 'PROCESS_START' as const, + content: processStartPayload, + }; + + data.entries.push(processStartEntry); + } + }, + [processId] + ); + + const deduplicatePatches = useCallback((patches: Operation[]) => { + const processedEntries = new Set(); + + return patches.filter((patch: any) => { + // Extract entry index from path like "/entries/123" + const match = patch.path?.match(/^\/entries\/(\d+)$/); + if (match && patch.op === 'add') { + const entryIndex = parseInt(match[1], 10); + if (processedEntries.has(entryIndex)) { + return false; // Already processed + } + processedEntries.add(entryIndex); + } + // Always allow replace operations and non-entry patches + return true; + }); + }, []); + + const { data, isConnected, error } = useJsonPatchStream( + endpoint, + enabled && !!processId, + initialData, + { + injectInitialEntry, + deduplicatePatches, + } + ); + + const entries = data?.entries || []; + + return { entries, isConnected, error }; +}; diff --git a/frontend/src/hooks/useProcessesLogs.ts b/frontend/src/hooks/useProcessesLogs.ts new file mode 100644 index 00000000..ebfe81e3 --- /dev/null +++ b/frontend/src/hooks/useProcessesLogs.ts @@ -0,0 +1,115 @@ +import { useMemo, useCallback } from 'react'; +import type { + ExecutionProcessSummary, + NormalizedEntry, + PatchType, +} from 'shared/types'; +import type { UnifiedLogEntry, ProcessStartPayload } from '@/types/logs'; +import { useEventSourceManager } from './useEventSourceManager'; + +interface UseProcessesLogsResult { + entries: UnifiedLogEntry[]; + isConnected: boolean; + error: string | null; +} + +const MAX_ENTRIES = 5000; + +export const useProcessesLogs = ( + processes: ExecutionProcessSummary[], + enabled: boolean +): UseProcessesLogsResult => { + const getEndpoint = useCallback((process: ExecutionProcessSummary) => { + // Coding agents use normalized logs endpoint, scripts use raw logs endpoint + // Both endpoints now return PatchType objects via JSON patches + const isCodingAgent = process.run_reason === 'codingagent'; + return isCodingAgent + ? `/api/execution-processes/${process.id}/normalized-logs` + : `/api/execution-processes/${process.id}/raw-logs`; + }, []); + + const initialData = useMemo(() => ({ entries: [] }), []); + + const { processData, isConnected, error } = useEventSourceManager({ + processes, + enabled, + getEndpoint, + initialData, + }); + + const entries = useMemo(() => { + const allEntries: UnifiedLogEntry[] = []; + let entryCounter = 0; + + // Iterate through processes in order, adding process marker followed by logs + processes.forEach((process) => { + const data = processData[process.id]; + if (!data?.entries) return; + + // Add process start marker first + const processStartPayload: ProcessStartPayload = { + processId: process.id, + runReason: process.run_reason, + startedAt: process.started_at, + status: process.status, + }; + + allEntries.push({ + id: `${process.id}-start`, + ts: entryCounter++, + processId: process.id, + processName: process.run_reason, + channel: 'process_start', + payload: processStartPayload, + }); + + // Then add all logs for this process (skip the injected PROCESS_START entry) + data.entries.forEach( + ( + patchEntry: + | PatchType + | { type: 'PROCESS_START'; content: ProcessStartPayload }, + index: number + ) => { + // Skip the injected PROCESS_START entry since we handle it above + if (patchEntry.type === 'PROCESS_START') return; + + let channel: UnifiedLogEntry['channel']; + let payload: string | NormalizedEntry; + + switch (patchEntry.type) { + case 'STDOUT': + channel = 'stdout'; + payload = patchEntry.content; + break; + case 'STDERR': + channel = 'stderr'; + payload = patchEntry.content; + break; + case 'NORMALIZED_ENTRY': + channel = 'normalized'; + payload = patchEntry.content; + break; + default: + // Skip unknown patch types + return; + } + + allEntries.push({ + id: `${process.id}-${index}`, + ts: entryCounter++, + processId: process.id, + processName: process.run_reason, + channel, + payload, + }); + } + ); + }); + + // Limit entries (no sorting needed since we build in order) + return allEntries.slice(-MAX_ENTRIES); + }, [processData, processes]); + + return { entries, isConnected, error }; +}; diff --git a/frontend/src/lib/api.ts b/frontend/src/lib/api.ts index 56bb39d3..4e0f3161 100644 --- a/frontend/src/lib/api.ts +++ b/frontend/src/lib/api.ts @@ -1,35 +1,59 @@ // Import all necessary types from shared types + import { + ApiResponse, BranchStatus, + CheckTokenResponse, Config, - ConfigConstants, CreateFollowUpAttempt, - CreateProject, - CreateProjectFromGitHub, + CreateGitHubPrRequest, CreateTask, - CreateTaskAndStart, - CreateTaskAttempt, + CreateTaskAttemptBody, CreateTaskTemplate, - DeviceStartResponse, - DirectoryEntry, - type EditorType, + DeviceFlowStartResponse, + DevicePollStatus, + DirectoryListResponse, + EditorType, ExecutionProcess, ExecutionProcessSummary, GitBranch, - ProcessLogsResponse, Project, - ProjectWithBranch, + CreateProject, + RebaseTaskAttemptRequest, + RepositoryInfo, + SearchResult, Task, TaskAttempt, - TaskAttemptState, TaskTemplate, TaskWithAttemptStatus, UpdateProject, UpdateTask, UpdateTaskTemplate, + UserSystemInfo, WorktreeDiff, + GitHubServiceError, } from 'shared/types'; +// Re-export types for convenience +export type { RepositoryInfo } from 'shared/types'; + +export class ApiError extends Error { + public status?: number; + public error_data?: E; + + constructor( + message: string, + public statusCode?: number, + public response?: Response, + error_data?: E + ) { + super(message); + this.name = 'ApiError'; + this.status = statusCode; + this.error_data = error_data; + } +} + export const makeRequest = async (url: string, options: RequestInit = {}) => { const headers = { 'Content-Type': 'application/json', @@ -42,55 +66,55 @@ export const makeRequest = async (url: string, options: RequestInit = {}) => { }); }; -export interface ApiResponse { - success: boolean; - data?: T; - message?: string; -} - export interface FollowUpResponse { message: string; actual_attempt_id: string; created_new_attempt: boolean; } -// Additional interface for file search results -export interface FileSearchResult { - path: string; - name: string; -} +// Result type for endpoints that need typed errors +export type Result = + | { success: true; data: T } + | { success: false; error: E | undefined; message?: string }; -// Directory listing response -export interface DirectoryListResponse { - entries: DirectoryEntry[]; - current_path: string; -} +// Special handler for Result-returning endpoints +const handleApiResponseAsResult = async ( + response: Response +): Promise> => { + if (!response.ok) { + // HTTP error - no structured error data + let errorMessage = `Request failed with status ${response.status}`; -// GitHub Repository Info (manually defined since not exported from Rust yet) -export interface RepositoryInfo { - id: number; - name: string; - full_name: string; - owner: string; - description: string | null; - clone_url: string; - ssh_url: string; - default_branch: string; - private: boolean; -} + try { + const errorData = await response.json(); + if (errorData.message) { + errorMessage = errorData.message; + } + } catch { + errorMessage = response.statusText || errorMessage; + } -export class ApiError extends Error { - constructor( - message: string, - public status?: number, - public response?: Response - ) { - super(message); - this.name = 'ApiError'; + return { + success: false, + error: undefined, + message: errorMessage, + }; } -} -const handleApiResponse = async (response: Response): Promise => { + const result: ApiResponse = await response.json(); + + if (!result.success) { + return { + success: false, + error: result.error_data || undefined, + message: result.message || undefined, + }; + } + + return { success: true, data: result.data as T }; +}; + +const handleApiResponse = async (response: Response): Promise => { if (!response.ok) { let errorMessage = `Request failed with status ${response.status}`; @@ -111,12 +135,31 @@ const handleApiResponse = async (response: Response): Promise => { endpoint: response.url, timestamp: new Date().toISOString(), }); - throw new ApiError(errorMessage, response.status, response); + throw new ApiError(errorMessage, response.status, response); } - const result: ApiResponse = await response.json(); + const result: ApiResponse = await response.json(); if (!result.success) { + // Check for error_data first (structured errors), then fall back to message + if (result.error_data) { + console.error('[API Error with data]', { + error_data: result.error_data, + message: result.message, + status: response.status, + response, + endpoint: response.url, + timestamp: new Date().toISOString(), + }); + // Throw a properly typed error with the error data + throw new ApiError( + result.message || 'API request failed', + response.status, + response, + result.error_data + ); + } + console.error('[API Error]', { message: result.message || 'API request failed', status: response.status, @@ -124,7 +167,11 @@ const handleApiResponse = async (response: Response): Promise => { endpoint: response.url, timestamp: new Date().toISOString(), }); - throw new ApiError(result.message || 'API request failed'); + throw new ApiError( + result.message || 'API request failed', + response.status, + response + ); } return result.data as T; @@ -142,11 +189,6 @@ export const projectsApi = { return handleApiResponse(response); }, - getWithBranch: async (id: string): Promise => { - const response = await makeRequest(`/api/projects/${id}/with-branch`); - return handleApiResponse(response); - }, - create: async (data: CreateProject): Promise => { const response = await makeRequest('/api/projects', { method: 'POST', @@ -183,147 +225,93 @@ export const projectsApi = { return handleApiResponse(response); }, - searchFiles: async ( - id: string, - query: string - ): Promise => { + searchFiles: async (id: string, query: string): Promise => { const response = await makeRequest( `/api/projects/${id}/search?q=${encodeURIComponent(query)}` ); - return handleApiResponse(response); + return handleApiResponse(response); }, }; // Task Management APIs export const tasksApi = { getAll: async (projectId: string): Promise => { - const response = await makeRequest(`/api/projects/${projectId}/tasks`); + const response = await makeRequest(`/api/tasks?project_id=${projectId}`); return handleApiResponse(response); }, - getById: async (projectId: string, taskId: string): Promise => { - const response = await makeRequest( - `/api/projects/${projectId}/tasks/${taskId}` - ); + getById: async (taskId: string): Promise => { + const response = await makeRequest(`/api/tasks/${taskId}`); return handleApiResponse(response); }, - create: async (projectId: string, data: CreateTask): Promise => { - const response = await makeRequest(`/api/projects/${projectId}/tasks`, { + create: async (data: CreateTask): Promise => { + const response = await makeRequest(`/api/tasks`, { method: 'POST', body: JSON.stringify(data), }); return handleApiResponse(response); }, - createAndStart: async ( - projectId: string, - data: CreateTaskAndStart - ): Promise => { - const response = await makeRequest( - `/api/projects/${projectId}/tasks/create-and-start`, - { - method: 'POST', - body: JSON.stringify(data), - } - ); + createAndStart: async (data: CreateTask): Promise => { + const response = await makeRequest(`/api/tasks/create-and-start`, { + method: 'POST', + body: JSON.stringify(data), + }); return handleApiResponse(response); }, - update: async ( - projectId: string, - taskId: string, - data: UpdateTask - ): Promise => { - const response = await makeRequest( - `/api/projects/${projectId}/tasks/${taskId}`, - { - method: 'PUT', - body: JSON.stringify(data), - } - ); + update: async (taskId: string, data: UpdateTask): Promise => { + const response = await makeRequest(`/api/tasks/${taskId}`, { + method: 'PUT', + body: JSON.stringify(data), + }); return handleApiResponse(response); }, - delete: async (projectId: string, taskId: string): Promise => { - const response = await makeRequest( - `/api/projects/${projectId}/tasks/${taskId}`, - { - method: 'DELETE', - } - ); + delete: async (taskId: string): Promise => { + const response = await makeRequest(`/api/tasks/${taskId}`, { + method: 'DELETE', + }); return handleApiResponse(response); }, - - getChildren: async ( - projectId: string, - taskId: string, - attemptId: string - ): Promise => { - const response = await makeRequest( - `/api/projects/${projectId}/tasks/${taskId}/attempts/${attemptId}/children` - ); - return handleApiResponse(response); - }, }; // Task Attempts APIs export const attemptsApi = { - getAll: async (projectId: string, taskId: string): Promise => { + getChildren: async (attemptId: string): Promise => { const response = await makeRequest( - `/api/projects/${projectId}/tasks/${taskId}/attempts` + `/api/task-attempts/${attemptId}/children` ); + return handleApiResponse(response); + }, + + getAll: async (taskId: string): Promise => { + const response = await makeRequest(`/api/task-attempts?task_id=${taskId}`); return handleApiResponse(response); }, - create: async ( - projectId: string, - taskId: string, - data: CreateTaskAttempt - ): Promise => { - const response = await makeRequest( - `/api/projects/${projectId}/tasks/${taskId}/attempts`, - { - method: 'POST', - body: JSON.stringify(data), - } - ); + create: async (data: CreateTaskAttemptBody): Promise => { + const response = await makeRequest(`/api/task-attempts`, { + method: 'POST', + body: JSON.stringify(data), + }); return handleApiResponse(response); }, - getState: async ( - projectId: string, - taskId: string, - attemptId: string - ): Promise => { - const response = await makeRequest( - `/api/projects/${projectId}/tasks/${taskId}/attempts/${attemptId}` - ); - return handleApiResponse(response); - }, - - stop: async ( - projectId: string, - taskId: string, - attemptId: string - ): Promise => { - const response = await makeRequest( - `/api/projects/${projectId}/tasks/${taskId}/attempts/${attemptId}/stop`, - { - method: 'POST', - } - ); + stop: async (attemptId: string): Promise => { + const response = await makeRequest(`/api/task-attempts/${attemptId}/stop`, { + method: 'POST', + }); return handleApiResponse(response); }, followUp: async ( - projectId: string, - taskId: string, attemptId: string, data: CreateFollowUpAttempt ): Promise => { const response = await makeRequest( - `/api/projects/${projectId}/tasks/${taskId}/attempts/${attemptId}/follow-up`, + `/api/task-attempts/${attemptId}/follow-up`, { method: 'POST', body: JSON.stringify(data), @@ -332,25 +320,17 @@ export const attemptsApi = { return handleApiResponse(response); }, - getDiff: async ( - projectId: string, - taskId: string, - attemptId: string - ): Promise => { - const response = await makeRequest( - `/api/projects/${projectId}/tasks/${taskId}/attempts/${attemptId}/diff` - ); + getDiff: async (attemptId: string): Promise => { + const response = await makeRequest(`/api/task-attempts/${attemptId}/diff`); return handleApiResponse(response); }, deleteFile: async ( - projectId: string, - taskId: string, attemptId: string, fileToDelete: string ): Promise => { const response = await makeRequest( - `/api/projects/${projectId}/tasks/${taskId}/attempts/${attemptId}/delete-filefile_path=${encodeURIComponent( + `/api/task-attempts/${attemptId}/delete-file?file_path=${encodeURIComponent( fileToDelete )}`, { @@ -361,13 +341,11 @@ export const attemptsApi = { }, openEditor: async ( - projectId: string, - taskId: string, attemptId: string, editorType?: EditorType ): Promise => { const response = await makeRequest( - `/api/projects/${projectId}/tasks/${taskId}/attempts/${attemptId}/open-editor`, + `/api/task-attempts/${attemptId}/open-editor`, { method: 'POST', body: JSON.stringify(editorType ? { editor_type: editorType } : null), @@ -376,24 +354,16 @@ export const attemptsApi = { return handleApiResponse(response); }, - getBranchStatus: async ( - projectId: string, - taskId: string, - attemptId: string - ): Promise => { + getBranchStatus: async (attemptId: string): Promise => { const response = await makeRequest( - `/api/projects/${projectId}/tasks/${taskId}/attempts/${attemptId}/branch-status` + `/api/task-attempts/${attemptId}/branch-status` ); return handleApiResponse(response); }, - merge: async ( - projectId: string, - taskId: string, - attemptId: string - ): Promise => { + merge: async (attemptId: string): Promise => { const response = await makeRequest( - `/api/projects/${projectId}/tasks/${taskId}/attempts/${attemptId}/merge`, + `/api/task-attempts/${attemptId}/merge`, { method: 'POST', } @@ -402,167 +372,111 @@ export const attemptsApi = { }, rebase: async ( - projectId: string, - taskId: string, attemptId: string, - newBaseBranch?: string + data: RebaseTaskAttemptRequest ): Promise => { const response = await makeRequest( - `/api/projects/${projectId}/tasks/${taskId}/attempts/${attemptId}/rebase`, + `/api/task-attempts/${attemptId}/rebase`, { method: 'POST', - headers: { - 'Content-Type': 'application/json', - }, - body: JSON.stringify({ - new_base_branch: newBaseBranch || null, - }), + body: JSON.stringify(data), } ); return handleApiResponse(response); }, createPR: async ( - projectId: string, - taskId: string, attemptId: string, - data: { - title: string; - body: string | null; - base_branch: string | null; - } - ): Promise => { - const response = await makeRequest( - `/api/projects/${projectId}/tasks/${taskId}/attempts/${attemptId}/create-pr`, - { - method: 'POST', - body: JSON.stringify(data), - } - ); - return handleApiResponse(response); + data: CreateGitHubPrRequest + ): Promise> => { + const response = await makeRequest(`/api/task-attempts/${attemptId}/pr`, { + method: 'POST', + body: JSON.stringify(data), + }); + return handleApiResponseAsResult(response); }, - startDevServer: async ( - projectId: string, - taskId: string, - attemptId: string - ): Promise => { + startDevServer: async (attemptId: string): Promise => { const response = await makeRequest( - `/api/projects/${projectId}/tasks/${taskId}/attempts/${attemptId}/start-dev-server`, + `/api/task-attempts/${attemptId}/start-dev-server`, { method: 'POST', } ); return handleApiResponse(response); }, - - getExecutionProcesses: async ( - projectId: string, - taskId: string, - attemptId: string - ): Promise => { - const response = await makeRequest( - `/api/projects/${projectId}/tasks/${taskId}/attempts/${attemptId}/execution-processes` - ); - return handleApiResponse(response); - }, - - stopExecutionProcess: async ( - projectId: string, - taskId: string, - attemptId: string, - processId: string - ): Promise => { - const response = await makeRequest( - `/api/projects/${projectId}/tasks/${taskId}/attempts/${attemptId}/execution-processes/${processId}/stop`, - { - method: 'POST', - } - ); - return handleApiResponse(response); - }, - - getDetails: async (attemptId: string): Promise => { - const response = await makeRequest(`/api/attempts/${attemptId}/details`); - return handleApiResponse(response); - }, - - getAllLogs: async ( - projectId: string, - taskId: string, - attemptId: string - ): Promise => { - const response = await makeRequest( - `/api/projects/${projectId}/tasks/${taskId}/attempts/${attemptId}/logs` - ); - return handleApiResponse(response); - }, }; // Execution Process APIs export const executionProcessesApi = { + getExecutionProcesses: async ( + attemptId: string + ): Promise => { + const response = await makeRequest( + `/api/execution-processes?task_attempt_id=${attemptId}` + ); + return handleApiResponse(response); + }, + getDetails: async (processId: string): Promise => { const response = await makeRequest(`/api/execution-processes/${processId}`); return handleApiResponse(response); }, + + stopExecutionProcess: async (processId: string): Promise => { + const response = await makeRequest( + `/api/execution-processes/${processId}/stop`, + { + method: 'POST', + } + ); + return handleApiResponse(response); + }, }; // File System APIs export const fileSystemApi = { list: async (path?: string): Promise => { const queryParam = path ? `?path=${encodeURIComponent(path)}` : ''; - const response = await makeRequest(`/api/filesystem/list${queryParam}`); + const response = await makeRequest( + `/api/filesystem/directory${queryParam}` + ); return handleApiResponse(response); }, }; -// Config APIs +// Config APIs (backwards compatible) export const configApi = { - getConfig: async (): Promise => { - const response = await makeRequest('/api/config'); - return handleApiResponse(response); + getConfig: async (): Promise => { + const response = await makeRequest('/api/info'); + return handleApiResponse(response); }, saveConfig: async (config: Config): Promise => { const response = await makeRequest('/api/config', { - method: 'POST', + method: 'PUT', body: JSON.stringify(config), }); return handleApiResponse(response); }, - getConstants: async (): Promise => { - const response = await makeRequest('/api/config/constants'); - return handleApiResponse(response); - }, }; // GitHub Device Auth APIs export const githubAuthApi = { - checkGithubToken: async (): Promise => { - try { - const response = await makeRequest('/api/auth/github/check'); - const result: ApiResponse = await response.json(); - if (!result.success && result.message === 'github_token_invalid') { - return false; - } - return result.success; - } catch (err) { - // On network/server error, return undefined (unknown) - return undefined; - } + checkGithubToken: async (): Promise => { + const response = await makeRequest('/api/auth/github/check'); + return handleApiResponse(response); }, - start: async (): Promise => { + start: async (): Promise => { const response = await makeRequest('/api/auth/github/device/start', { method: 'POST', }); - return handleApiResponse(response); + return handleApiResponse(response); }, - poll: async (device_code: string): Promise => { + poll: async (): Promise => { const response = await makeRequest('/api/auth/github/device/poll', { method: 'POST', - body: JSON.stringify({ device_code }), - headers: { 'Content-Type': 'application/json' }, }); - return handleApiResponse(response); + return handleApiResponse(response); }, }; @@ -572,17 +486,17 @@ export const githubApi = { const response = await makeRequest(`/api/github/repositories?page=${page}`); return handleApiResponse(response); }, - createProjectFromRepository: async ( - data: CreateProjectFromGitHub - ): Promise => { - const response = await makeRequest('/api/projects/from-github', { - method: 'POST', - body: JSON.stringify(data, (_key, value) => - typeof value === 'bigint' ? Number(value) : value - ), - }); - return handleApiResponse(response); - }, + // createProjectFromRepository: async ( + // data: CreateProjectFromGitHub + // ): Promise => { + // const response = await makeRequest('/api/projects/from-github', { + // method: 'POST', + // body: JSON.stringify(data, (_key, value) => + // typeof value === 'bigint' ? Number(value) : value + // ), + // }); + // return handleApiResponse(response); + // }, }; // Task Templates APIs @@ -593,12 +507,14 @@ export const templatesApi = { }, listGlobal: async (): Promise => { - const response = await makeRequest('/api/templates/global'); + const response = await makeRequest('/api/templates?global=true'); return handleApiResponse(response); }, listByProject: async (projectId: string): Promise => { - const response = await makeRequest(`/api/projects/${projectId}/templates`); + const response = await makeRequest( + `/api/templates?project_id=${projectId}` + ); return handleApiResponse(response); }, @@ -638,13 +554,13 @@ export const templatesApi = { export const mcpServersApi = { load: async (executor: string): Promise => { const response = await makeRequest( - `/api/mcp-servers?executor=${encodeURIComponent(executor)}` + `/api/mcp-config?base_coding_agent=${encodeURIComponent(executor)}` ); return handleApiResponse(response); }, save: async (executor: string, serversConfig: any): Promise => { const response = await makeRequest( - `/api/mcp-servers?executor=${encodeURIComponent(executor)}`, + `/api/mcp-config?base_coding_agent=${encodeURIComponent(executor)}`, { method: 'POST', body: JSON.stringify(serversConfig), diff --git a/frontend/src/lib/types.ts b/frontend/src/lib/types.ts index 860a2241..7a29879b 100644 --- a/frontend/src/lib/types.ts +++ b/frontend/src/lib/types.ts @@ -1,14 +1,9 @@ -import { - DiffChunkType, - ExecutionProcess, - ExecutionProcessSummary, - ProcessLogsResponse, -} from 'shared/types.ts'; +import { DiffChunkType } from 'shared/types'; +import { ExecutionProcess, ExecutionProcessSummary } from 'shared/types'; export type AttemptData = { processes: ExecutionProcessSummary[]; runningProcessDetails: Record; - allLogs: ProcessLogsResponse[]; }; export interface ProcessedLine { diff --git a/frontend/src/lib/utils.ts b/frontend/src/lib/utils.ts index 016122c5..9ad0df42 100644 --- a/frontend/src/lib/utils.ts +++ b/frontend/src/lib/utils.ts @@ -4,7 +4,3 @@ import { twMerge } from 'tailwind-merge'; export function cn(...inputs: ClassValue[]) { return twMerge(clsx(inputs)); } - -export function is_planning_executor_type(executorType: string): boolean { - return executorType === 'claude-plan'; -} diff --git a/frontend/src/main.tsx b/frontend/src/main.tsx index ac0dff3c..e6ecb961 100644 --- a/frontend/src/main.tsx +++ b/frontend/src/main.tsx @@ -4,6 +4,7 @@ import App from './App.tsx'; import './index.css'; import { ClickToComponent } from 'click-to-react-component'; import * as Sentry from '@sentry/react'; + import { useLocation, useNavigationType, diff --git a/frontend/src/pages/McpServers.tsx b/frontend/src/pages/McpServers.tsx index f352efda..e255991f 100644 --- a/frontend/src/pages/McpServers.tsx +++ b/frontend/src/pages/McpServers.tsx @@ -18,67 +18,67 @@ import { Label } from '@/components/ui/label'; import { Alert, AlertDescription } from '@/components/ui/alert'; import { Textarea } from '@/components/ui/textarea'; import { Loader2 } from 'lucide-react'; -import { - EXECUTOR_TYPES, - EXECUTOR_LABELS, - MCP_SUPPORTED_EXECUTORS, -} from 'shared/types'; -import { useConfig } from '@/components/config-provider'; +import { BaseCodingAgent, AgentProfile } from 'shared/types'; +import { useUserSystem } from '@/components/config-provider'; import { mcpServersApi } from '../lib/api'; export function McpServers() { - const { config } = useConfig(); + const { config, profiles } = useUserSystem(); const [mcpServers, setMcpServers] = useState('{}'); const [mcpError, setMcpError] = useState(null); const [mcpLoading, setMcpLoading] = useState(true); - const [selectedMcpExecutor, setSelectedMcpExecutor] = useState(''); + const [selectedProfile, setSelectedProfile] = useState( + null + ); const [mcpApplying, setMcpApplying] = useState(false); const [mcpConfigPath, setMcpConfigPath] = useState(''); const [success, setSuccess] = useState(false); - // Initialize selected MCP executor when config loads + // Initialize selected profile when config loads useEffect(() => { - if (config?.executor?.type && !selectedMcpExecutor) { - // If current executor supports MCP, use it; otherwise use first available MCP executor - if (MCP_SUPPORTED_EXECUTORS.includes(config.executor.type)) { - setSelectedMcpExecutor(config.executor.type); - } else { - setSelectedMcpExecutor(MCP_SUPPORTED_EXECUTORS[0] || 'claude'); + if (config?.profile && profiles && !selectedProfile) { + // Find the current profile + const currentProfile = profiles.find((p) => p.label === config.profile); + if (currentProfile) { + setSelectedProfile(currentProfile); + } else if (profiles.length > 0) { + // Default to first profile if current profile not found + setSelectedProfile(profiles[0]); } } - }, [config?.executor?.type, selectedMcpExecutor]); + }, [config?.profile, profiles, selectedProfile]); - // Load existing MCP configuration when selected executor changes + // Load existing MCP configuration when selected profile changes useEffect(() => { - const loadMcpServersForExecutor = async (executorType: string) => { + const loadMcpServersForProfile = async (profile: AgentProfile) => { // Reset state when loading setMcpLoading(true); setMcpError(null); - // Set default empty config based on executor type + // Set default empty config based on agent type const defaultConfig = - executorType === 'amp' + profile.agent === BaseCodingAgent.AMP ? '{\n "amp.mcpServers": {\n }\n}' - : executorType === 'sst-opencode' + : profile.agent === BaseCodingAgent.OPENCODE ? '{\n "mcp": {\n }, "$schema": "https://opencode.ai/config.json"\n}' : '{\n "mcpServers": {\n }\n}'; setMcpServers(defaultConfig); setMcpConfigPath(''); try { - // Load MCP servers for the selected executor - const result = await mcpServersApi.load(executorType); + // Load MCP servers for the selected profile's base agent + const result = await mcpServersApi.load(profile.agent); // Handle new response format with servers and config_path const data = result || {}; const servers = data.servers || {}; const configPath = data.config_path || ''; - // Create the full configuration structure based on executor type + // Create the full configuration structure based on agent type let fullConfig; - if (executorType === 'amp') { + if (profile.agent === BaseCodingAgent.AMP) { // For AMP, use the amp.mcpServers structure fullConfig = { 'amp.mcpServers': servers }; - } else if (executorType === 'sst-opencode') { + } else if (profile.agent === BaseCodingAgent.OPENCODE) { fullConfig = { mcp: servers, $schema: 'https://opencode.ai/config.json', @@ -102,22 +102,22 @@ export function McpServers() { } }; - // Load MCP servers for the selected MCP executor - if (selectedMcpExecutor) { - loadMcpServersForExecutor(selectedMcpExecutor); + // Load MCP servers for the selected profile + if (selectedProfile) { + loadMcpServersForProfile(selectedProfile); } - }, [selectedMcpExecutor]); + }, [selectedProfile]); const handleMcpServersChange = (value: string) => { setMcpServers(value); setMcpError(null); // Validate JSON on change - if (value.trim()) { + if (value.trim() && selectedProfile) { try { const config = JSON.parse(value); - // Validate that the config has the expected structure based on executor type - if (selectedMcpExecutor === 'amp') { + // Validate that the config has the expected structure based on agent type + if (selectedProfile.agent === BaseCodingAgent.AMP) { if ( !config['amp.mcpServers'] || typeof config['amp.mcpServers'] !== 'object' @@ -126,7 +126,7 @@ export function McpServers() { 'AMP configuration must contain an "amp.mcpServers" object' ); } - } else if (selectedMcpExecutor === 'sst-opencode') { + } else if (selectedProfile.agent === BaseCodingAgent.OPENCODE) { if (!config.mcp || typeof config.mcp !== 'object') { setMcpError('Configuration must contain an "mcp" object'); } @@ -142,7 +142,7 @@ export function McpServers() { }; const handleConfigureVibeKanban = async () => { - if (!selectedMcpExecutor) return; + if (!selectedProfile) return; try { // Parse existing configuration @@ -150,7 +150,7 @@ export function McpServers() { // Always use production MCP installation instructions const vibeKanbanConfig = - selectedMcpExecutor === 'sst-opencode' + selectedProfile.agent === BaseCodingAgent.OPENCODE ? { type: 'local', command: ['npx', '-y', 'vibe-kanban', '--mcp'], @@ -163,7 +163,7 @@ export function McpServers() { // Add vibe_kanban to the existing configuration let updatedConfig; - if (selectedMcpExecutor === 'amp') { + if (selectedProfile.agent === BaseCodingAgent.AMP) { updatedConfig = { ...existingConfig, 'amp.mcpServers': { @@ -171,7 +171,7 @@ export function McpServers() { vibe_kanban: vibeKanbanConfig, }, }; - } else if (selectedMcpExecutor === 'sst-opencode') { + } else if (selectedProfile.agent === BaseCodingAgent.OPENCODE) { updatedConfig = { ...existingConfig, mcp: { @@ -200,7 +200,7 @@ export function McpServers() { }; const handleApplyMcpServers = async () => { - if (!selectedMcpExecutor) return; + if (!selectedProfile) return; setMcpApplying(true); setMcpError(null); @@ -211,9 +211,9 @@ export function McpServers() { try { const fullConfig = JSON.parse(mcpServers); - // Validate that the config has the expected structure based on executor type + // Validate that the config has the expected structure based on agent type let mcpServersConfig; - if (selectedMcpExecutor === 'amp') { + if (selectedProfile.agent === BaseCodingAgent.AMP) { if ( !fullConfig['amp.mcpServers'] || typeof fullConfig['amp.mcpServers'] !== 'object' @@ -224,7 +224,7 @@ export function McpServers() { } // Extract just the inner servers object for the API - backend will handle nesting mcpServersConfig = fullConfig['amp.mcpServers']; - } else if (selectedMcpExecutor === 'sst-opencode') { + } else if (selectedProfile.agent === BaseCodingAgent.OPENCODE) { if (!fullConfig.mcp || typeof fullConfig.mcp !== 'object') { throw new Error('Configuration must contain an "mcp" object'); } @@ -243,7 +243,7 @@ export function McpServers() { mcpServersConfig = fullConfig.mcpServers; } - await mcpServersApi.save(selectedMcpExecutor, mcpServersConfig); + await mcpServersApi.save(selectedProfile.agent, mcpServersConfig); // Show success feedback setSuccess(true); @@ -284,7 +284,7 @@ export function McpServers() {

MCP Servers

- Configure MCP servers to extend executor capabilities. + Configure MCP servers to extend coding agent capabilities.

@@ -308,32 +308,33 @@ export function McpServers() { Configuration - Configure MCP servers for different executors to extend their + Configure MCP servers for different coding agents to extend their capabilities with custom tools and resources.
- +

- Choose which executor to configure MCP servers for. + Choose which profile to configure MCP servers for.

@@ -347,7 +348,7 @@ export function McpServers() {

{mcpError}

- To use MCP servers, please select a different executor + To use MCP servers, please select a different profile (Claude, Amp, or Gemini) above.

@@ -392,7 +393,7 @@ export function McpServers() {
- {config.editor.editor_type === 'custom' && ( + {config.editor.editor_type === EditorType.CUSTOM && (
- updateConfig({ sound_alerts: checked }) + updateConfig({ + notifications: { + ...config.notifications, + sound_enabled: checked, + }, + }) } />
@@ -400,23 +405,28 @@ export function Settings() {
- {config.sound_alerts && ( + {config.notifications.sound_enabled && (