Checkpoint restore feature (#607)

This commit is contained in:
Solomon
2025-09-04 15:11:41 +01:00
committed by GitHub
parent 6c9d098216
commit 18a9ff770e
34 changed files with 1879 additions and 195 deletions

View File

@@ -0,0 +1,20 @@
{
"db_name": "SQLite",
"query": "SELECT COUNT(1) as \"count!:_\" FROM execution_processes\n WHERE task_attempt_id = $1\n AND created_at > (SELECT created_at FROM execution_processes WHERE id = $2)",
"describe": {
"columns": [
{
"name": "count!:_",
"ordinal": 0,
"type_info": "Integer"
}
],
"parameters": {
"Right": 2
},
"nullable": [
false
]
},
"hash": "38d187eeb3ffd442fdf69ae2f1c7e26e7b97622dcfb91fddaff53df62541149d"
}

View File

@@ -0,0 +1,12 @@
{
"db_name": "SQLite",
"query": "UPDATE execution_processes\n SET dropped = 1\n WHERE task_attempt_id = $1\n AND created_at > (SELECT created_at FROM execution_processes WHERE id = $2)\n AND dropped = 0\n ",
"describe": {
"columns": [],
"parameters": {
"Right": 2
},
"nullable": []
},
"hash": "453c1691f4f8109931a4b0dc95e2153645bbc2d315d76f371612a7b6262a1c21"
}

View File

@@ -1,6 +1,6 @@
{
"db_name": "SQLite",
"query": "SELECT es.session_id\n FROM execution_processes ep\n JOIN executor_sessions es ON ep.id = es.execution_process_id \n WHERE ep.task_attempt_id = $1\n AND ep.run_reason = 'codingagent'\n AND es.session_id IS NOT NULL\n ORDER BY ep.created_at DESC\n LIMIT 1",
"query": "SELECT es.session_id\n FROM execution_processes ep\n JOIN executor_sessions es ON ep.id = es.execution_process_id \n WHERE ep.task_attempt_id = $1\n AND ep.run_reason = 'codingagent'\n AND ep.dropped = 0\n AND es.session_id IS NOT NULL\n ORDER BY ep.created_at DESC\n LIMIT 1",
"describe": {
"columns": [
{
@@ -16,5 +16,5 @@
true
]
},
"hash": "9bf0917027dfddc081df78bc530435b80e53f403dc0073067c546f24f24c9226"
"hash": "4b84758f0eef3e0abc3ec8b26528987d3482554da67e2aa944d0099d9e1c6f64"
}

View File

@@ -1,6 +1,6 @@
{
"db_name": "SQLite",
"query": "SELECT \n id as \"id!: Uuid\", \n task_attempt_id as \"task_attempt_id!: Uuid\", \n run_reason as \"run_reason!: ExecutionProcessRunReason\",\n executor_action as \"executor_action!: sqlx::types::Json<ExecutorActionField>\",\n status as \"status!: ExecutionProcessStatus\",\n exit_code,\n started_at as \"started_at!: DateTime<Utc>\",\n completed_at as \"completed_at?: DateTime<Utc>\",\n created_at as \"created_at!: DateTime<Utc>\", \n updated_at as \"updated_at!: DateTime<Utc>\"\n FROM execution_processes \n WHERE task_attempt_id = $1 \n ORDER BY created_at ASC",
"query": "SELECT \n id as \"id!: Uuid\", \n task_attempt_id as \"task_attempt_id!: Uuid\", \n run_reason as \"run_reason!: ExecutionProcessRunReason\",\n executor_action as \"executor_action!: sqlx::types::Json<ExecutorActionField>\",\n after_head_commit,\n status as \"status!: ExecutionProcessStatus\",\n exit_code,\n dropped as \"dropped!: bool\",\n started_at as \"started_at!: DateTime<Utc>\",\n completed_at as \"completed_at?: DateTime<Utc>\",\n created_at as \"created_at!: DateTime<Utc>\", \n updated_at as \"updated_at!: DateTime<Utc>\"\n FROM execution_processes \n WHERE id = $1",
"describe": {
"columns": [
{
@@ -24,34 +24,44 @@
"type_info": "Text"
},
{
"name": "status!: ExecutionProcessStatus",
"name": "after_head_commit",
"ordinal": 4,
"type_info": "Text"
},
{
"name": "exit_code",
"name": "status!: ExecutionProcessStatus",
"ordinal": 5,
"type_info": "Text"
},
{
"name": "exit_code",
"ordinal": 6,
"type_info": "Integer"
},
{
"name": "started_at!: DateTime<Utc>",
"ordinal": 6,
"type_info": "Text"
},
{
"name": "completed_at?: DateTime<Utc>",
"name": "dropped!: bool",
"ordinal": 7,
"type_info": "Text"
"type_info": "Bool"
},
{
"name": "created_at!: DateTime<Utc>",
"name": "started_at!: DateTime<Utc>",
"ordinal": 8,
"type_info": "Text"
},
{
"name": "updated_at!: DateTime<Utc>",
"name": "completed_at?: DateTime<Utc>",
"ordinal": 9,
"type_info": "Text"
},
{
"name": "created_at!: DateTime<Utc>",
"ordinal": 10,
"type_info": "Text"
},
{
"name": "updated_at!: DateTime<Utc>",
"ordinal": 11,
"type_info": "Text"
}
],
"parameters": {
@@ -62,13 +72,15 @@
false,
false,
false,
true,
false,
true,
false,
false,
true,
false,
false
]
},
"hash": "ecc6c9458bffcc70af47c1f55e97efcf02f105564e7d97247dac1fd704312871"
"hash": "7169af87f37347d39630b7b5c1a5a723303fbeacadb3c974261133012bb9cbbf"
}

View File

@@ -1,6 +1,6 @@
{
"db_name": "SQLite",
"query": "SELECT \n id as \"id!: Uuid\", \n task_attempt_id as \"task_attempt_id!: Uuid\", \n run_reason as \"run_reason!: ExecutionProcessRunReason\",\n executor_action as \"executor_action!: sqlx::types::Json<ExecutorActionField>\",\n status as \"status!: ExecutionProcessStatus\",\n exit_code,\n started_at as \"started_at!: DateTime<Utc>\",\n completed_at as \"completed_at?: DateTime<Utc>\",\n created_at as \"created_at!: DateTime<Utc>\", \n updated_at as \"updated_at!: DateTime<Utc>\"\n FROM execution_processes \n WHERE task_attempt_id = ?1 \n AND run_reason = ?2\n ORDER BY created_at DESC \n LIMIT 1",
"query": "SELECT \n id as \"id!: Uuid\", \n task_attempt_id as \"task_attempt_id!: Uuid\", \n run_reason as \"run_reason!: ExecutionProcessRunReason\",\n executor_action as \"executor_action!: sqlx::types::Json<ExecutorActionField>\",\n after_head_commit,\n status as \"status!: ExecutionProcessStatus\",\n exit_code,\n dropped as \"dropped!: bool\",\n started_at as \"started_at!: DateTime<Utc>\",\n completed_at as \"completed_at?: DateTime<Utc>\",\n created_at as \"created_at!: DateTime<Utc>\", \n updated_at as \"updated_at!: DateTime<Utc>\"\n FROM execution_processes \n WHERE task_attempt_id = ?1 \n AND run_reason = ?2\n AND dropped = 0\n ORDER BY created_at DESC \n LIMIT 1",
"describe": {
"columns": [
{
@@ -24,34 +24,44 @@
"type_info": "Text"
},
{
"name": "status!: ExecutionProcessStatus",
"name": "after_head_commit",
"ordinal": 4,
"type_info": "Text"
},
{
"name": "exit_code",
"name": "status!: ExecutionProcessStatus",
"ordinal": 5,
"type_info": "Text"
},
{
"name": "exit_code",
"ordinal": 6,
"type_info": "Integer"
},
{
"name": "started_at!: DateTime<Utc>",
"ordinal": 6,
"type_info": "Text"
},
{
"name": "completed_at?: DateTime<Utc>",
"name": "dropped!: bool",
"ordinal": 7,
"type_info": "Text"
"type_info": "Bool"
},
{
"name": "created_at!: DateTime<Utc>",
"name": "started_at!: DateTime<Utc>",
"ordinal": 8,
"type_info": "Text"
},
{
"name": "updated_at!: DateTime<Utc>",
"name": "completed_at?: DateTime<Utc>",
"ordinal": 9,
"type_info": "Text"
},
{
"name": "created_at!: DateTime<Utc>",
"ordinal": 10,
"type_info": "Text"
},
{
"name": "updated_at!: DateTime<Utc>",
"ordinal": 11,
"type_info": "Text"
}
],
"parameters": {
@@ -62,13 +72,15 @@
false,
false,
false,
true,
false,
true,
false,
false,
true,
false,
false
]
},
"hash": "2fdfe5d83223a8e63a55dd28a3971fb4c9fbfe6c00010e75974c09cec1ebe933"
"hash": "857187db52bb9dcb008c4188f72207ab773f66ae06a054e10c4f628b7d13c9b7"
}

View File

@@ -1,6 +1,6 @@
{
"db_name": "SQLite",
"query": "SELECT \n id as \"id!: Uuid\", \n task_attempt_id as \"task_attempt_id!: Uuid\", \n run_reason as \"run_reason!: ExecutionProcessRunReason\",\n executor_action as \"executor_action!: sqlx::types::Json<ExecutorActionField>\",\n status as \"status!: ExecutionProcessStatus\",\n exit_code,\n started_at as \"started_at!: DateTime<Utc>\",\n completed_at as \"completed_at?: DateTime<Utc>\",\n created_at as \"created_at!: DateTime<Utc>\", \n updated_at as \"updated_at!: DateTime<Utc>\"\n FROM execution_processes \n WHERE status = 'running' \n ORDER BY created_at ASC",
"query": "SELECT \n id as \"id!: Uuid\", \n task_attempt_id as \"task_attempt_id!: Uuid\", \n run_reason as \"run_reason!: ExecutionProcessRunReason\",\n executor_action as \"executor_action!: sqlx::types::Json<ExecutorActionField>\",\n after_head_commit,\n status as \"status!: ExecutionProcessStatus\",\n exit_code,\n dropped as \"dropped!: bool\",\n started_at as \"started_at!: DateTime<Utc>\",\n completed_at as \"completed_at?: DateTime<Utc>\",\n created_at as \"created_at!: DateTime<Utc>\", \n updated_at as \"updated_at!: DateTime<Utc>\"\n FROM execution_processes \n WHERE status = 'running' \n ORDER BY created_at ASC",
"describe": {
"columns": [
{
@@ -24,34 +24,44 @@
"type_info": "Text"
},
{
"name": "status!: ExecutionProcessStatus",
"name": "after_head_commit",
"ordinal": 4,
"type_info": "Text"
},
{
"name": "exit_code",
"name": "status!: ExecutionProcessStatus",
"ordinal": 5,
"type_info": "Text"
},
{
"name": "exit_code",
"ordinal": 6,
"type_info": "Integer"
},
{
"name": "started_at!: DateTime<Utc>",
"ordinal": 6,
"type_info": "Text"
},
{
"name": "completed_at?: DateTime<Utc>",
"name": "dropped!: bool",
"ordinal": 7,
"type_info": "Text"
"type_info": "Bool"
},
{
"name": "created_at!: DateTime<Utc>",
"name": "started_at!: DateTime<Utc>",
"ordinal": 8,
"type_info": "Text"
},
{
"name": "updated_at!: DateTime<Utc>",
"name": "completed_at?: DateTime<Utc>",
"ordinal": 9,
"type_info": "Text"
},
{
"name": "created_at!: DateTime<Utc>",
"ordinal": 10,
"type_info": "Text"
},
{
"name": "updated_at!: DateTime<Utc>",
"ordinal": 11,
"type_info": "Text"
}
],
"parameters": {
@@ -62,13 +72,15 @@
false,
false,
false,
true,
false,
true,
false,
false,
true,
false,
false
]
},
"hash": "b8828d250bd93c1d77c97e3954b0e26db4e65e28bba23ec26e77a1faa4dcc974"
"hash": "b3b7913f80b7e5622ab53fa59029e4bb34dcc4abb12cfef854f0c85a4c666b53"
}

View File

@@ -1,6 +1,6 @@
{
"db_name": "SQLite",
"query": "SELECT \n id as \"id!: Uuid\", \n task_attempt_id as \"task_attempt_id!: Uuid\", \n run_reason as \"run_reason!: ExecutionProcessRunReason\",\n executor_action as \"executor_action!: sqlx::types::Json<ExecutorActionField>\",\n status as \"status!: ExecutionProcessStatus\",\n exit_code,\n started_at as \"started_at!: DateTime<Utc>\",\n completed_at as \"completed_at?: DateTime<Utc>\",\n created_at as \"created_at!: DateTime<Utc>\", \n updated_at as \"updated_at!: DateTime<Utc>\"\n FROM execution_processes \n WHERE rowid = $1",
"query": "SELECT \n id as \"id!: Uuid\", \n task_attempt_id as \"task_attempt_id!: Uuid\", \n run_reason as \"run_reason!: ExecutionProcessRunReason\",\n executor_action as \"executor_action!: sqlx::types::Json<ExecutorActionField>\",\n after_head_commit,\n status as \"status!: ExecutionProcessStatus\",\n exit_code,\n dropped as \"dropped!: bool\",\n started_at as \"started_at!: DateTime<Utc>\",\n completed_at as \"completed_at?: DateTime<Utc>\",\n created_at as \"created_at!: DateTime<Utc>\", \n updated_at as \"updated_at!: DateTime<Utc>\"\n FROM execution_processes \n WHERE task_attempt_id = $1 \n ORDER BY created_at ASC",
"describe": {
"columns": [
{
@@ -24,34 +24,44 @@
"type_info": "Text"
},
{
"name": "status!: ExecutionProcessStatus",
"name": "after_head_commit",
"ordinal": 4,
"type_info": "Text"
},
{
"name": "exit_code",
"name": "status!: ExecutionProcessStatus",
"ordinal": 5,
"type_info": "Text"
},
{
"name": "exit_code",
"ordinal": 6,
"type_info": "Integer"
},
{
"name": "started_at!: DateTime<Utc>",
"ordinal": 6,
"type_info": "Text"
},
{
"name": "completed_at?: DateTime<Utc>",
"name": "dropped!: bool",
"ordinal": 7,
"type_info": "Text"
"type_info": "Bool"
},
{
"name": "created_at!: DateTime<Utc>",
"name": "started_at!: DateTime<Utc>",
"ordinal": 8,
"type_info": "Text"
},
{
"name": "updated_at!: DateTime<Utc>",
"name": "completed_at?: DateTime<Utc>",
"ordinal": 9,
"type_info": "Text"
},
{
"name": "created_at!: DateTime<Utc>",
"ordinal": 10,
"type_info": "Text"
},
{
"name": "updated_at!: DateTime<Utc>",
"ordinal": 11,
"type_info": "Text"
}
],
"parameters": {
@@ -62,13 +72,15 @@
false,
false,
false,
true,
false,
true,
false,
false,
true,
false,
false
]
},
"hash": "c1b07b345d6cef9413e4dc19f139aad7fea3afb72c5104b2e2d1533825e81293"
"hash": "d03849da109e7b50ff4dfa6a034c610f9d1b21529558d4dd9da298b34ca69680"
}

View File

@@ -1,6 +1,6 @@
{
"db_name": "SQLite",
"query": "INSERT INTO execution_processes (\n id, task_attempt_id, run_reason, executor_action, status, \n exit_code, started_at, \n completed_at, created_at, updated_at\n ) \n VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10) \n RETURNING \n id as \"id!: Uuid\", \n task_attempt_id as \"task_attempt_id!: Uuid\", \n run_reason as \"run_reason!: ExecutionProcessRunReason\",\n executor_action as \"executor_action!: sqlx::types::Json<ExecutorActionField>\",\n status as \"status!: ExecutionProcessStatus\",\n exit_code,\n started_at as \"started_at!: DateTime<Utc>\",\n completed_at as \"completed_at?: DateTime<Utc>\",\n created_at as \"created_at!: DateTime<Utc>\", \n updated_at as \"updated_at!: DateTime<Utc>\"",
"query": "INSERT INTO execution_processes (\n id, task_attempt_id, run_reason, executor_action, after_head_commit, status, \n exit_code, started_at, completed_at, created_at, updated_at\n ) \n VALUES ($1, $2, $3, $4, NULL, $5, $6, $7, $8, $9, $10) \n RETURNING \n id as \"id!: Uuid\", \n task_attempt_id as \"task_attempt_id!: Uuid\", \n run_reason as \"run_reason!: ExecutionProcessRunReason\",\n executor_action as \"executor_action!: sqlx::types::Json<ExecutorActionField>\",\n after_head_commit,\n status as \"status!: ExecutionProcessStatus\",\n exit_code,\n dropped as \"dropped!: bool\",\n started_at as \"started_at!: DateTime<Utc>\",\n completed_at as \"completed_at?: DateTime<Utc>\",\n created_at as \"created_at!: DateTime<Utc>\", \n updated_at as \"updated_at!: DateTime<Utc>\"",
"describe": {
"columns": [
{
@@ -24,34 +24,44 @@
"type_info": "Text"
},
{
"name": "status!: ExecutionProcessStatus",
"name": "after_head_commit",
"ordinal": 4,
"type_info": "Text"
},
{
"name": "exit_code",
"name": "status!: ExecutionProcessStatus",
"ordinal": 5,
"type_info": "Text"
},
{
"name": "exit_code",
"ordinal": 6,
"type_info": "Integer"
},
{
"name": "started_at!: DateTime<Utc>",
"ordinal": 6,
"type_info": "Text"
},
{
"name": "completed_at?: DateTime<Utc>",
"name": "dropped!: bool",
"ordinal": 7,
"type_info": "Text"
"type_info": "Bool"
},
{
"name": "created_at!: DateTime<Utc>",
"name": "started_at!: DateTime<Utc>",
"ordinal": 8,
"type_info": "Text"
},
{
"name": "updated_at!: DateTime<Utc>",
"name": "completed_at?: DateTime<Utc>",
"ordinal": 9,
"type_info": "Text"
},
{
"name": "created_at!: DateTime<Utc>",
"ordinal": 10,
"type_info": "Text"
},
{
"name": "updated_at!: DateTime<Utc>",
"ordinal": 11,
"type_info": "Text"
}
],
"parameters": {
@@ -62,13 +72,15 @@
false,
false,
false,
true,
false,
true,
false,
false,
true,
false,
false
]
},
"hash": "ca6acd3a57fc44e8e29e057700cee4442c0ab8b37aca0abf29fe5464c8539c6d"
"hash": "d28f29e23bba74b5e0b61b71191cb93121d39743fd562523571f2f6c61b7777e"
}

View File

@@ -0,0 +1,12 @@
{
"db_name": "SQLite",
"query": "UPDATE execution_processes \n SET after_head_commit = $1 \n WHERE id = $2",
"describe": {
"columns": [],
"parameters": {
"Right": 2
},
"nullable": []
},
"hash": "d5bb6b9584940367852c3ea74613da570956307d063f4d432ab4e9127e863091"
}

View File

@@ -1,6 +1,6 @@
{
"db_name": "SQLite",
"query": "SELECT \n ep.id as \"id!: Uuid\", \n ep.task_attempt_id as \"task_attempt_id!: Uuid\", \n ep.run_reason as \"run_reason!: ExecutionProcessRunReason\",\n ep.executor_action as \"executor_action!: sqlx::types::Json<ExecutorActionField>\",\n ep.status as \"status!: ExecutionProcessStatus\",\n ep.exit_code,\n ep.started_at as \"started_at!: DateTime<Utc>\",\n ep.completed_at as \"completed_at?: DateTime<Utc>\",\n ep.created_at as \"created_at!: DateTime<Utc>\", \n ep.updated_at as \"updated_at!: DateTime<Utc>\"\n FROM execution_processes ep\n JOIN task_attempts ta ON ep.task_attempt_id = ta.id\n JOIN tasks t ON ta.task_id = t.id\n WHERE ep.status = 'running' \n AND ep.run_reason = 'devserver'\n AND t.project_id = $1\n ORDER BY ep.created_at ASC",
"query": "SELECT \n ep.id as \"id!: Uuid\", \n ep.task_attempt_id as \"task_attempt_id!: Uuid\", \n ep.run_reason as \"run_reason!: ExecutionProcessRunReason\",\n ep.executor_action as \"executor_action!: sqlx::types::Json<ExecutorActionField>\",\n ep.after_head_commit,\n ep.status as \"status!: ExecutionProcessStatus\",\n ep.exit_code,\n ep.dropped as \"dropped!: bool\",\n ep.started_at as \"started_at!: DateTime<Utc>\",\n ep.completed_at as \"completed_at?: DateTime<Utc>\",\n ep.created_at as \"created_at!: DateTime<Utc>\", \n ep.updated_at as \"updated_at!: DateTime<Utc>\"\n FROM execution_processes ep\n JOIN task_attempts ta ON ep.task_attempt_id = ta.id\n JOIN tasks t ON ta.task_id = t.id\n WHERE ep.status = 'running' \n AND ep.run_reason = 'devserver'\n AND t.project_id = $1\n ORDER BY ep.created_at ASC",
"describe": {
"columns": [
{
@@ -24,34 +24,44 @@
"type_info": "Text"
},
{
"name": "status!: ExecutionProcessStatus",
"name": "after_head_commit",
"ordinal": 4,
"type_info": "Text"
},
{
"name": "exit_code",
"name": "status!: ExecutionProcessStatus",
"ordinal": 5,
"type_info": "Text"
},
{
"name": "exit_code",
"ordinal": 6,
"type_info": "Integer"
},
{
"name": "started_at!: DateTime<Utc>",
"ordinal": 6,
"type_info": "Text"
},
{
"name": "completed_at?: DateTime<Utc>",
"name": "dropped!: bool",
"ordinal": 7,
"type_info": "Text"
"type_info": "Bool"
},
{
"name": "created_at!: DateTime<Utc>",
"name": "started_at!: DateTime<Utc>",
"ordinal": 8,
"type_info": "Text"
},
{
"name": "updated_at!: DateTime<Utc>",
"name": "completed_at?: DateTime<Utc>",
"ordinal": 9,
"type_info": "Text"
},
{
"name": "created_at!: DateTime<Utc>",
"ordinal": 10,
"type_info": "Text"
},
{
"name": "updated_at!: DateTime<Utc>",
"ordinal": 11,
"type_info": "Text"
}
],
"parameters": {
@@ -62,13 +72,15 @@
false,
false,
false,
true,
false,
true,
false,
false,
true,
false,
false
]
},
"hash": "3baa595eadaa8c720da7c185c5fce08f973355fd7809e2caaf966d207bcb7b4b"
"hash": "e37b848808a382edf3d45dfe5e8f7524e046ee2ffd35cbbfeafbf9d1e1abc575"
}

View File

@@ -1,6 +1,6 @@
{
"db_name": "SQLite",
"query": "SELECT \n id as \"id!: Uuid\", \n task_attempt_id as \"task_attempt_id!: Uuid\", \n run_reason as \"run_reason!: ExecutionProcessRunReason\",\n executor_action as \"executor_action!: sqlx::types::Json<ExecutorActionField>\",\n status as \"status!: ExecutionProcessStatus\",\n exit_code,\n started_at as \"started_at!: DateTime<Utc>\",\n completed_at as \"completed_at?: DateTime<Utc>\",\n created_at as \"created_at!: DateTime<Utc>\", \n updated_at as \"updated_at!: DateTime<Utc>\"\n FROM execution_processes \n WHERE id = $1",
"query": "SELECT \n id as \"id!: Uuid\", \n task_attempt_id as \"task_attempt_id!: Uuid\", \n run_reason as \"run_reason!: ExecutionProcessRunReason\",\n executor_action as \"executor_action!: sqlx::types::Json<ExecutorActionField>\",\n after_head_commit,\n status as \"status!: ExecutionProcessStatus\",\n exit_code,\n dropped as \"dropped!: bool\",\n started_at as \"started_at!: DateTime<Utc>\",\n completed_at as \"completed_at?: DateTime<Utc>\",\n created_at as \"created_at!: DateTime<Utc>\", \n updated_at as \"updated_at!: DateTime<Utc>\"\n FROM execution_processes \n WHERE rowid = $1",
"describe": {
"columns": [
{
@@ -24,34 +24,44 @@
"type_info": "Text"
},
{
"name": "status!: ExecutionProcessStatus",
"name": "after_head_commit",
"ordinal": 4,
"type_info": "Text"
},
{
"name": "exit_code",
"name": "status!: ExecutionProcessStatus",
"ordinal": 5,
"type_info": "Text"
},
{
"name": "exit_code",
"ordinal": 6,
"type_info": "Integer"
},
{
"name": "started_at!: DateTime<Utc>",
"ordinal": 6,
"type_info": "Text"
},
{
"name": "completed_at?: DateTime<Utc>",
"name": "dropped!: bool",
"ordinal": 7,
"type_info": "Text"
"type_info": "Bool"
},
{
"name": "created_at!: DateTime<Utc>",
"name": "started_at!: DateTime<Utc>",
"ordinal": 8,
"type_info": "Text"
},
{
"name": "updated_at!: DateTime<Utc>",
"name": "completed_at?: DateTime<Utc>",
"ordinal": 9,
"type_info": "Text"
},
{
"name": "created_at!: DateTime<Utc>",
"ordinal": 10,
"type_info": "Text"
},
{
"name": "updated_at!: DateTime<Utc>",
"ordinal": 11,
"type_info": "Text"
}
],
"parameters": {
@@ -62,13 +72,15 @@
false,
false,
false,
true,
false,
true,
false,
false,
true,
false,
false
]
},
"hash": "cd9d629c4040d6766307998dde9926463b9e7bf03a73cf31cafe73d046579d54"
"hash": "f8fd346c052d6ca3f5628bfc300086559ff280913ef8dd70e427f48d230ef713"
}

View File

@@ -0,0 +1,3 @@
-- Add a boolean flag to mark processes as dropped (excluded from timeline/logs)
ALTER TABLE execution_processes
ADD COLUMN dropped BOOLEAN NOT NULL DEFAULT 0;

View File

@@ -0,0 +1,4 @@
-- Add after_head_commit column to store commit OID after a process ends
ALTER TABLE execution_processes
ADD COLUMN after_head_commit TEXT;

View File

@@ -35,8 +35,14 @@ pub struct ExecutionProcess {
pub run_reason: ExecutionProcessRunReason,
#[ts(type = "ExecutorAction")]
pub executor_action: sqlx::types::Json<ExecutorActionField>,
/// Git HEAD commit OID captured after the process ends
pub after_head_commit: Option<String>,
pub status: ExecutionProcessStatus,
pub exit_code: Option<i64>,
/// dropped: true if this process is excluded from the current
/// history view (due to restore/trimming). Hidden from logs/timeline;
/// still listed in the Processes tab.
pub dropped: bool,
pub started_at: DateTime<Utc>,
pub completed_at: Option<DateTime<Utc>>,
pub created_at: DateTime<Utc>,
@@ -82,8 +88,10 @@ impl ExecutionProcess {
task_attempt_id as "task_attempt_id!: Uuid",
run_reason as "run_reason!: ExecutionProcessRunReason",
executor_action as "executor_action!: sqlx::types::Json<ExecutorActionField>",
after_head_commit,
status as "status!: ExecutionProcessStatus",
exit_code,
dropped as "dropped!: bool",
started_at as "started_at!: DateTime<Utc>",
completed_at as "completed_at?: DateTime<Utc>",
created_at as "created_at!: DateTime<Utc>",
@@ -96,6 +104,25 @@ impl ExecutionProcess {
.await
}
/// Count processes created after the given boundary process
pub async fn count_later_than(
pool: &SqlitePool,
task_attempt_id: Uuid,
boundary_process_id: Uuid,
) -> Result<i64, sqlx::Error> {
let cnt = sqlx::query_scalar!(
r#"SELECT COUNT(1) as "count!:_" FROM execution_processes
WHERE task_attempt_id = $1
AND created_at > (SELECT created_at FROM execution_processes WHERE id = $2)"#,
task_attempt_id,
boundary_process_id
)
.fetch_one(pool)
.await
.unwrap_or(0i64);
Ok(cnt)
}
/// Find execution process by rowid
pub async fn find_by_rowid(pool: &SqlitePool, rowid: i64) -> Result<Option<Self>, sqlx::Error> {
sqlx::query_as!(
@@ -105,8 +132,10 @@ impl ExecutionProcess {
task_attempt_id as "task_attempt_id!: Uuid",
run_reason as "run_reason!: ExecutionProcessRunReason",
executor_action as "executor_action!: sqlx::types::Json<ExecutorActionField>",
after_head_commit,
status as "status!: ExecutionProcessStatus",
exit_code,
dropped as "dropped!: bool",
started_at as "started_at!: DateTime<Utc>",
completed_at as "completed_at?: DateTime<Utc>",
created_at as "created_at!: DateTime<Utc>",
@@ -131,8 +160,10 @@ impl ExecutionProcess {
task_attempt_id as "task_attempt_id!: Uuid",
run_reason as "run_reason!: ExecutionProcessRunReason",
executor_action as "executor_action!: sqlx::types::Json<ExecutorActionField>",
after_head_commit,
status as "status!: ExecutionProcessStatus",
exit_code,
dropped as "dropped!: bool",
started_at as "started_at!: DateTime<Utc>",
completed_at as "completed_at?: DateTime<Utc>",
created_at as "created_at!: DateTime<Utc>",
@@ -155,8 +186,10 @@ impl ExecutionProcess {
task_attempt_id as "task_attempt_id!: Uuid",
run_reason as "run_reason!: ExecutionProcessRunReason",
executor_action as "executor_action!: sqlx::types::Json<ExecutorActionField>",
after_head_commit,
status as "status!: ExecutionProcessStatus",
exit_code,
dropped as "dropped!: bool",
started_at as "started_at!: DateTime<Utc>",
completed_at as "completed_at?: DateTime<Utc>",
created_at as "created_at!: DateTime<Utc>",
@@ -181,8 +214,10 @@ impl ExecutionProcess {
ep.task_attempt_id as "task_attempt_id!: Uuid",
ep.run_reason as "run_reason!: ExecutionProcessRunReason",
ep.executor_action as "executor_action!: sqlx::types::Json<ExecutorActionField>",
ep.after_head_commit,
ep.status as "status!: ExecutionProcessStatus",
ep.exit_code,
ep.dropped as "dropped!: bool",
ep.started_at as "started_at!: DateTime<Utc>",
ep.completed_at as "completed_at?: DateTime<Utc>",
ep.created_at as "created_at!: DateTime<Utc>",
@@ -215,6 +250,7 @@ impl ExecutionProcess {
JOIN executor_sessions es ON ep.id = es.execution_process_id
WHERE ep.task_attempt_id = $1
AND ep.run_reason = 'codingagent'
AND ep.dropped = 0
AND es.session_id IS NOT NULL
ORDER BY ep.created_at DESC
LIMIT 1"#,
@@ -241,8 +277,10 @@ impl ExecutionProcess {
task_attempt_id as "task_attempt_id!: Uuid",
run_reason as "run_reason!: ExecutionProcessRunReason",
executor_action as "executor_action!: sqlx::types::Json<ExecutorActionField>",
after_head_commit,
status as "status!: ExecutionProcessStatus",
exit_code,
dropped as "dropped!: bool",
started_at as "started_at!: DateTime<Utc>",
completed_at as "completed_at?: DateTime<Utc>",
created_at as "created_at!: DateTime<Utc>",
@@ -250,6 +288,7 @@ impl ExecutionProcess {
FROM execution_processes
WHERE task_attempt_id = ?1
AND run_reason = ?2
AND dropped = 0
ORDER BY created_at DESC
LIMIT 1"#,
task_attempt_id,
@@ -271,18 +310,19 @@ impl ExecutionProcess {
sqlx::query_as!(
ExecutionProcess,
r#"INSERT INTO execution_processes (
id, task_attempt_id, run_reason, executor_action, status,
exit_code, started_at,
completed_at, created_at, updated_at
id, task_attempt_id, run_reason, executor_action, after_head_commit, status,
exit_code, started_at, completed_at, created_at, updated_at
)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)
VALUES ($1, $2, $3, $4, NULL, $5, $6, $7, $8, $9, $10)
RETURNING
id as "id!: Uuid",
task_attempt_id as "task_attempt_id!: Uuid",
run_reason as "run_reason!: ExecutionProcessRunReason",
executor_action as "executor_action!: sqlx::types::Json<ExecutorActionField>",
after_head_commit,
status as "status!: ExecutionProcessStatus",
exit_code,
dropped as "dropped!: bool",
started_at as "started_at!: DateTime<Utc>",
completed_at as "completed_at?: DateTime<Utc>",
created_at as "created_at!: DateTime<Utc>",
@@ -338,6 +378,24 @@ impl ExecutionProcess {
Ok(())
}
/// Update the "after" commit oid for the process
pub async fn update_after_head_commit(
pool: &SqlitePool,
id: Uuid,
after_head_commit: &str,
) -> Result<(), sqlx::Error> {
sqlx::query!(
r#"UPDATE execution_processes
SET after_head_commit = $1
WHERE id = $2"#,
after_head_commit,
id
)
.execute(pool)
.await?;
Ok(())
}
pub async fn delete_by_task_attempt_id(
pool: &SqlitePool,
task_attempt_id: Uuid,
@@ -360,6 +418,28 @@ impl ExecutionProcess {
}
}
/// Set restore boundary: drop processes newer than the specified process, undrop older/equal
pub async fn set_restore_boundary(
pool: &SqlitePool,
task_attempt_id: Uuid,
boundary_process_id: Uuid,
) -> Result<(), sqlx::Error> {
// Monotonic drop: only mark newer records as dropped; never undrop.
sqlx::query!(
r#"UPDATE execution_processes
SET dropped = 1
WHERE task_attempt_id = $1
AND created_at > (SELECT created_at FROM execution_processes WHERE id = $2)
AND dropped = 0
"#,
task_attempt_id,
boundary_process_id
)
.execute(pool)
.await?;
Ok(())
}
/// Get the parent TaskAttempt for this execution process
pub async fn parent_task_attempt(
&self,

View File

@@ -150,6 +150,21 @@ pub trait Deployment: Clone + Send + Sync + 'static {
);
continue;
}
// Capture after-head commit OID (best-effort)
if let Ok(Some(task_attempt)) =
TaskAttempt::find_by_id(&self.db().pool, process.task_attempt_id).await
&& let Some(container_ref) = task_attempt.container_ref
{
let wt = std::path::PathBuf::from(container_ref);
if let Ok(head) = self.git().get_head_info(&wt) {
let _ = ExecutionProcess::update_after_head_commit(
&self.db().pool,
process.id,
&head.oid,
)
.await;
}
}
// Process marked as failed
tracing::info!("Marked orphaned execution process {} as failed", process.id);
// Update task status to InReview for coding agent and setup script failures

View File

@@ -79,10 +79,43 @@ impl StandardCodingAgentExecutor for Amp {
) -> Result<AsyncGroupChild, ExecutorError> {
// Use shell command for cross-platform compatibility
let (shell_cmd, shell_arg) = get_shell_command();
let amp_command = self.build_command_builder().build_follow_up(&[
// 1) Fork the thread synchronously to obtain new thread id
let fork_cmd = self.build_command_builder().build_follow_up(&[
"threads".to_string(),
"fork".to_string(),
session_id.to_string(),
]);
let fork_output = Command::new(shell_cmd)
.kill_on_drop(true)
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.current_dir(current_dir)
.arg(shell_arg)
.arg(&fork_cmd)
.output()
.await?;
let stdout_str = String::from_utf8_lossy(&fork_output.stdout);
let new_thread_id = stdout_str
.lines()
.rev()
.find(|l| !l.trim().is_empty())
.unwrap_or("")
.trim()
.to_string();
if new_thread_id.is_empty() {
return Err(ExecutorError::Io(std::io::Error::other(
"AMP threads fork did not return a thread id",
)));
}
tracing::debug!("AMP threads fork -> new thread id: {}", new_thread_id);
// 2) Continue using the new thread id
let continue_cmd = self.build_command_builder().build_follow_up(&[
"threads".to_string(),
"continue".to_string(),
session_id.to_string(),
new_thread_id.clone(),
]);
let combined_prompt = utils::text::combine_prompt(&self.append_prompt, prompt);
@@ -95,7 +128,7 @@ impl StandardCodingAgentExecutor for Amp {
.stderr(Stdio::piped())
.current_dir(current_dir)
.arg(shell_arg)
.arg(&amp_command);
.arg(&continue_cmd);
let mut child = command.group_spawn()?;

View File

@@ -117,6 +117,77 @@ impl SessionHandler {
"Could not find rollout file for session_id: {session_id}"
))
}
/// Fork a Codex rollout file by copying it to a temp location and assigning a new session id.
/// Returns (new_rollout_path, new_session_id).
pub fn fork_rollout_file(session_id: &str) -> Result<(PathBuf, String), String> {
use std::io::{BufRead, BufReader, Write};
let original = Self::find_rollout_file_path(session_id)?;
let file = std::fs::File::open(&original)
.map_err(|e| format!("Failed to open rollout file {}: {e}", original.display()))?;
let mut reader = BufReader::new(file);
let mut first_line = String::new();
reader
.read_line(&mut first_line)
.map_err(|e| format!("Failed to read first line from {}: {e}", original.display()))?;
let mut meta: serde_json::Value = serde_json::from_str(first_line.trim()).map_err(|e| {
format!(
"Failed to parse first line JSON in {}: {e}",
original.display()
)
})?;
// Generate new UUID for forked session
let new_id = uuid::Uuid::new_v4().to_string();
if let serde_json::Value::Object(ref mut map) = meta {
map.insert("id".to_string(), serde_json::Value::String(new_id.clone()));
} else {
return Err("First line of rollout file is not a JSON object".to_string());
}
// Prepare destination path in the same directory, following Codex rollout naming convention:
// rollout-<YYYY>-<MM>-<DD>T<HH>-<mm>-<ss>-<session_id>.jsonl
let parent_dir = original
.parent()
.ok_or_else(|| format!("Unexpected path with no parent: {}", original.display()))?;
let filename = original
.file_name()
.and_then(|s| s.to_str())
.unwrap_or("rollout.jsonl");
let new_filename = if filename.starts_with("rollout-") && filename.ends_with(".jsonl") {
let stem = &filename[..filename.len() - ".jsonl".len()];
if let Some(idx) = stem.rfind('-') {
// Replace the trailing session id with the new id, keep timestamp intact
format!("{}-{}.jsonl", &stem[..idx], new_id)
} else {
format!("rollout-{new_id}.jsonl")
}
} else {
format!("rollout-{new_id}.jsonl")
};
let dest = parent_dir.join(new_filename);
// Write new file with modified first line and copy the rest as-is
let mut writer = std::fs::File::create(&dest)
.map_err(|e| format!("Failed to create forked rollout {}: {e}", dest.display()))?;
let meta_line = serde_json::to_string(&meta)
.map_err(|e| format!("Failed to serialize modified meta: {e}"))?;
writeln!(writer, "{meta_line}")
.map_err(|e| format!("Failed to write meta to {}: {e}", dest.display()))?;
for line in reader.lines() {
let line =
line.map_err(|e| format!("I/O error reading {}: {e}", original.display()))?;
writeln!(writer, "{line}")
.map_err(|e| format!("Failed to write to {}: {e}", dest.display()))?;
}
Ok((dest, new_id))
}
}
/// An executor that uses Codex CLI to process tasks
@@ -196,11 +267,9 @@ impl StandardCodingAgentExecutor for Codex {
prompt: &str,
session_id: &str,
) -> Result<AsyncGroupChild, ExecutorError> {
// Find the rollout file for the given session_id using SessionHandler
let rollout_file_path =
SessionHandler::find_rollout_file_path(session_id).map_err(|e| {
ExecutorError::SpawnError(std::io::Error::new(std::io::ErrorKind::NotFound, e))
})?;
// Fork rollout: copy and assign a new session id so each execution has a unique session
let (rollout_file_path, _new_session_id) = SessionHandler::fork_rollout_file(session_id)
.map_err(|e| ExecutorError::SpawnError(std::io::Error::other(e)))?;
let (shell_cmd, shell_arg) = get_shell_command();
let codex_command = self.build_command_builder().build_follow_up(&[

View File

@@ -27,6 +27,12 @@ pub mod gemini;
pub mod opencode;
pub mod qwen;
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, TS)]
#[serde(rename_all = "SCREAMING_SNAKE_CASE")]
pub enum BaseAgentCapability {
RestoreCheckpoint,
}
#[derive(Debug, Error)]
pub enum ExecutorError {
#[error("Follow-up is not supported: {0}")]
@@ -125,6 +131,15 @@ impl CodingAgent {
pub fn supports_mcp(&self) -> bool {
self.default_mcp_config_path().is_some()
}
pub fn capabilities(&self) -> Vec<BaseAgentCapability> {
match self {
Self::ClaudeCode(_) => vec![BaseAgentCapability::RestoreCheckpoint],
Self::Amp(_) => vec![BaseAgentCapability::RestoreCheckpoint],
Self::Codex(_) => vec![BaseAgentCapability::RestoreCheckpoint],
Self::Gemini(_) | Self::Opencode(_) | Self::Cursor(_) | Self::QwenCode(_) => vec![],
}
}
}
#[async_trait]

View File

@@ -344,6 +344,8 @@ impl LocalContainerService {
tracing::warn!("Failed to update executor session summary: {}", e);
}
// (moved) capture after-head commit occurs later, after commit/next-action handling
if matches!(
ctx.execution_process.status,
ExecutionProcessStatus::Completed
@@ -415,6 +417,24 @@ impl LocalContainerService {
}
}
// Now that commit/next-action/finalization steps for this process are complete,
// capture the HEAD OID as the definitive "after" state (best-effort).
if let Ok(ctx) = ExecutionProcess::load_context(&db.pool, exec_id).await {
let worktree_dir = container.task_attempt_to_current_dir(&ctx.task_attempt);
if let Ok(head) = container.git().get_head_info(&worktree_dir)
&& let Err(e) = ExecutionProcess::update_after_head_commit(
&db.pool, exec_id, &head.oid,
)
.await
{
tracing::warn!(
"Failed to update after_head_commit for {}: {}",
exec_id,
e
);
}
}
// Cleanup msg store
if let Some(msg_arc) = msg_stores.write().await.remove(&exec_id) {
msg_arc.push_finished();
@@ -912,6 +932,19 @@ impl ContainerService for LocalContainerService {
execution_process.id
);
// Record after-head commit OID (best-effort)
if let Ok(ctx) = ExecutionProcess::load_context(&self.db.pool, execution_process.id).await {
let worktree = self.task_attempt_to_current_dir(&ctx.task_attempt);
if let Ok(head) = self.git().get_head_info(&worktree) {
let _ = ExecutionProcess::update_after_head_commit(
&self.db.pool,
execution_process.id,
&head.oid,
)
.await;
}
}
Ok(())
}

View File

@@ -63,6 +63,7 @@ fn generate_types_content() -> String {
executors::command::CommandBuilder::decl(),
executors::profile::ExecutorProfileId::decl(),
executors::profile::ExecutorConfig::decl(),
executors::executors::BaseAgentCapability::decl(),
executors::executors::claude::ClaudeCode::decl(),
executors::executors::gemini::Gemini::decl(),
executors::executors::gemini::GeminiModel::decl(),
@@ -76,6 +77,10 @@ fn generate_types_content() -> String {
executors::actions::coding_agent_follow_up::CodingAgentFollowUpRequest::decl(),
server::routes::task_attempts::CreateTaskAttemptBody::decl(),
server::routes::task_attempts::RebaseTaskAttemptRequest::decl(),
server::routes::task_attempts::RestoreAttemptRequest::decl(),
server::routes::task_attempts::RestoreAttemptResult::decl(),
server::routes::task_attempts::CommitInfo::decl(),
server::routes::task_attempts::CommitCompareResult::decl(),
server::routes::task_attempts::BranchStatus::decl(),
db::models::task_attempt::TaskAttempt::decl(),
db::models::execution_process::ExecutionProcess::decl(),

View File

@@ -10,7 +10,7 @@ use axum::{
};
use deployment::{Deployment, DeploymentError};
use executors::{
executors::{BaseCodingAgent, StandardCodingAgentExecutor},
executors::{BaseAgentCapability, BaseCodingAgent, StandardCodingAgentExecutor},
mcp_config::{McpConfig, read_agent_config, write_agent_config},
profile::{ExecutorConfigs, ExecutorProfileId},
};
@@ -64,6 +64,8 @@ pub struct UserSystemInfo {
#[serde(flatten)]
pub profiles: ExecutorConfigs,
pub environment: Environment,
/// Capabilities supported per executor (e.g., { "CLAUDE_CODE": ["RESTORE_CHECKPOINT"] })
pub capabilities: HashMap<String, Vec<BaseAgentCapability>>,
}
// TODO: update frontend, BE schema has changed, this replaces GET /config and /config/constants
@@ -77,6 +79,16 @@ async fn get_user_system_info(
config: config.clone(),
profiles: ExecutorConfigs::get_cached(),
environment: Environment::new(),
capabilities: {
let mut caps: HashMap<String, Vec<BaseAgentCapability>> = HashMap::new();
let profs = ExecutorConfigs::get_cached();
for key in profs.executors.keys() {
if let Some(agent) = profs.get_coding_agent(&ExecutorProfileId::new(*key)) {
caps.insert(key.to_string(), agent.capabilities());
}
}
caps
},
};
ResponseJson(ApiResponse::success(user_system_info))

View File

@@ -48,6 +48,24 @@ pub struct RebaseTaskAttemptRequest {
pub new_base_branch: Option<String>,
}
#[derive(Debug, Deserialize, Serialize, TS)]
pub struct RestoreAttemptRequest {
/// Process to restore to (target = its after_head_commit)
pub process_id: Uuid,
/// If true, allow resetting Git even when uncommitted changes exist
pub force_when_dirty: Option<bool>,
/// If false, skip performing the Git reset step (history drop still applies)
pub perform_git_reset: Option<bool>,
}
#[derive(Debug, Serialize, TS)]
pub struct RestoreAttemptResult {
pub had_later_processes: bool,
pub git_reset_needed: bool,
pub git_reset_applied: bool,
pub target_after_oid: Option<String>,
}
#[derive(Debug, Deserialize, Serialize, TS)]
pub struct CreateGitHubPrRequest {
pub title: String,
@@ -157,7 +175,7 @@ pub async fn follow_up(
.ensure_container_exists(&task_attempt)
.await?;
// Get session_id with simple query
// Get latest session id (ignoring dropped)
let session_id = ExecutionProcess::find_latest_session_id_by_task_attempt(
&deployment.db().pool,
task_attempt.id,
@@ -261,6 +279,103 @@ pub async fn follow_up(
Ok(ResponseJson(ApiResponse::success(execution_process)))
}
#[axum::debug_handler]
pub async fn restore_task_attempt(
Extension(task_attempt): Extension<TaskAttempt>,
State(deployment): State<DeploymentImpl>,
Json(payload): Json<RestoreAttemptRequest>,
) -> Result<ResponseJson<ApiResponse<RestoreAttemptResult>>, ApiError> {
let pool = &deployment.db().pool;
let proc_id = payload.process_id;
let force_when_dirty = payload.force_when_dirty.unwrap_or(false);
let perform_git_reset = payload.perform_git_reset.unwrap_or(true);
// Validate process belongs to attempt
let process =
ExecutionProcess::find_by_id(pool, proc_id)
.await?
.ok_or(ApiError::TaskAttempt(TaskAttemptError::ValidationError(
"Process not found".to_string(),
)))?;
if process.task_attempt_id != task_attempt.id {
return Err(ApiError::TaskAttempt(TaskAttemptError::ValidationError(
"Process does not belong to this attempt".to_string(),
)));
}
// Determine if there are later processes
let later = ExecutionProcess::count_later_than(pool, task_attempt.id, proc_id).await?;
let had_later_processes = later > 0;
// Mark later processes as dropped
if had_later_processes {
ExecutionProcess::set_restore_boundary(pool, task_attempt.id, proc_id).await?;
}
// Attempt Git reset to this process's after_head_commit if needed
let mut git_reset_needed = false;
let mut git_reset_applied = false;
let target_after_oid = process.after_head_commit.clone();
if perform_git_reset {
if let Some(target_oid) = &target_after_oid {
let container_ref = deployment
.container()
.ensure_container_exists(&task_attempt)
.await?;
let wt = std::path::Path::new(&container_ref);
let head_oid = deployment.git().get_head_info(wt).ok().map(|h| h.oid);
let is_dirty = deployment
.container()
.is_container_clean(&task_attempt)
.await
.map(|is_clean| !is_clean)
.unwrap_or(false);
if head_oid.as_deref() != Some(target_oid.as_str()) || is_dirty {
git_reset_needed = true;
if is_dirty && !force_when_dirty {
git_reset_applied = false; // cannot reset now
} else if let Err(e) =
deployment
.git()
.reset_worktree_to_commit(wt, target_oid, force_when_dirty)
{
tracing::error!("Failed to reset worktree: {}", e);
git_reset_applied = false;
} else {
git_reset_applied = true;
}
}
}
} else {
// Skipped git reset; still compute if it would be needed for informational result
if let Some(target_oid) = &target_after_oid {
let container_ref = deployment
.container()
.ensure_container_exists(&task_attempt)
.await?;
let wt = std::path::Path::new(&container_ref);
let head_oid = deployment.git().get_head_info(wt).ok().map(|h| h.oid);
let is_dirty = deployment
.container()
.is_container_clean(&task_attempt)
.await
.map(|is_clean| !is_clean)
.unwrap_or(false);
if head_oid.as_deref() != Some(target_oid.as_str()) || is_dirty {
git_reset_needed = true;
}
git_reset_applied = false;
}
}
Ok(ResponseJson(ApiResponse::success(RestoreAttemptResult {
had_later_processes,
git_reset_needed,
git_reset_applied,
target_after_oid,
})))
}
pub async fn get_task_attempt_diff(
Extension(task_attempt): Extension<TaskAttempt>,
State(deployment): State<DeploymentImpl>,
@@ -271,6 +386,73 @@ pub async fn get_task_attempt_diff(
Ok(Sse::new(stream.map_err(|e| -> BoxError { e.into() })).keep_alive(KeepAlive::default()))
}
#[derive(Debug, Serialize, TS)]
pub struct CommitInfo {
pub sha: String,
pub subject: String,
}
pub async fn get_commit_info(
Extension(task_attempt): Extension<TaskAttempt>,
State(deployment): State<DeploymentImpl>,
Query(params): Query<std::collections::HashMap<String, String>>,
) -> Result<ResponseJson<ApiResponse<CommitInfo>>, ApiError> {
let Some(sha) = params.get("sha").cloned() else {
return Err(ApiError::TaskAttempt(TaskAttemptError::ValidationError(
"Missing sha param".to_string(),
)));
};
let container_ref = deployment
.container()
.ensure_container_exists(&task_attempt)
.await?;
let wt = std::path::Path::new(&container_ref);
let subject = deployment.git().get_commit_subject(wt, &sha)?;
Ok(ResponseJson(ApiResponse::success(CommitInfo {
sha,
subject,
})))
}
#[derive(Debug, Serialize, TS)]
pub struct CommitCompareResult {
pub head_oid: String,
pub target_oid: String,
pub ahead_from_head: usize,
pub behind_from_head: usize,
pub is_linear: bool,
}
pub async fn compare_commit_to_head(
Extension(task_attempt): Extension<TaskAttempt>,
State(deployment): State<DeploymentImpl>,
Query(params): Query<std::collections::HashMap<String, String>>,
) -> Result<ResponseJson<ApiResponse<CommitCompareResult>>, ApiError> {
let Some(target_oid) = params.get("sha").cloned() else {
return Err(ApiError::TaskAttempt(TaskAttemptError::ValidationError(
"Missing sha param".to_string(),
)));
};
let container_ref = deployment
.container()
.ensure_container_exists(&task_attempt)
.await?;
let wt = std::path::Path::new(&container_ref);
let head_info = deployment.git().get_head_info(wt)?;
let (ahead_from_head, behind_from_head) =
deployment
.git()
.ahead_behind_commits_by_oid(wt, &head_info.oid, &target_oid)?;
let is_linear = behind_from_head == 0;
Ok(ResponseJson(ApiResponse::success(CommitCompareResult {
head_oid: head_info.oid,
target_oid,
ahead_from_head,
behind_from_head,
is_linear,
})))
}
#[axum::debug_handler]
pub async fn merge_task_attempt(
Extension(task_attempt): Extension<TaskAttempt>,
@@ -584,6 +766,9 @@ pub struct BranchStatus {
pub commits_behind: Option<usize>,
pub commits_ahead: Option<usize>,
pub has_uncommitted_changes: Option<bool>,
pub head_oid: Option<String>,
pub uncommitted_count: Option<usize>,
pub untracked_count: Option<usize>,
pub base_branch_name: String,
pub remote_commits_behind: Option<usize>,
pub remote_commits_ahead: Option<usize>,
@@ -607,6 +792,25 @@ pub async fn get_task_attempt_branch_status(
.await
.ok()
.map(|is_clean| !is_clean);
let head_oid = {
let container_ref = deployment
.container()
.ensure_container_exists(&task_attempt)
.await?;
let wt = std::path::Path::new(&container_ref);
deployment.git().get_head_info(wt).ok().map(|h| h.oid)
};
let (uncommitted_count, untracked_count) = {
let container_ref = deployment
.container()
.ensure_container_exists(&task_attempt)
.await?;
let wt = std::path::Path::new(&container_ref);
match deployment.git().get_worktree_change_counts(wt) {
Ok((a, b)) => (Some(a), Some(b)),
Err(_) => (None, None),
}
};
let task_branch =
task_attempt
@@ -634,6 +838,9 @@ pub async fn get_task_attempt_branch_status(
commits_ahead,
commits_behind,
has_uncommitted_changes,
head_oid,
uncommitted_count,
untracked_count,
remote_commits_ahead: None,
remote_commits_behind: None,
merges,
@@ -864,6 +1071,9 @@ pub fn router(deployment: &DeploymentImpl) -> Router<DeploymentImpl> {
let task_attempt_id_router = Router::new()
.route("/", get(get_task_attempt))
.route("/follow-up", post(follow_up))
.route("/restore", post(restore_task_attempt))
.route("/commit-info", get(get_commit_info))
.route("/commit-compare", get(compare_commit_to_head))
.route("/start-dev-server", post(start_dev_server))
.route("/branch-status", get(get_task_attempt_branch_status))
.route("/diff", get(get_task_attempt_diff))

View File

@@ -830,6 +830,81 @@ impl GitService {
))
}
/// Get the subject/summary line for a given commit OID
pub fn get_commit_subject(
&self,
repo_path: &Path,
commit_sha: &str,
) -> Result<String, GitServiceError> {
let repo = self.open_repo(repo_path)?;
let oid = git2::Oid::from_str(commit_sha)
.map_err(|_| GitServiceError::InvalidRepository("Invalid commit SHA".into()))?;
let commit = repo.find_commit(oid)?;
Ok(commit.summary().unwrap_or("(no subject)").to_string())
}
/// Compare two OIDs and return (ahead, behind) counts: how many commits
/// `from_oid` is ahead of and behind `to_oid`.
pub fn ahead_behind_commits_by_oid(
&self,
repo_path: &Path,
from_oid: &str,
to_oid: &str,
) -> Result<(usize, usize), GitServiceError> {
let repo = self.open_repo(repo_path)?;
let from = git2::Oid::from_str(from_oid)
.map_err(|_| GitServiceError::InvalidRepository("Invalid from OID".into()))?;
let to = git2::Oid::from_str(to_oid)
.map_err(|_| GitServiceError::InvalidRepository("Invalid to OID".into()))?;
let (ahead, behind) = repo.graph_ahead_behind(from, to)?;
Ok((ahead, behind))
}
/// Return (uncommitted_tracked_changes, untracked_files) counts in worktree
pub fn get_worktree_change_counts(
&self,
worktree_path: &Path,
) -> Result<(usize, usize), GitServiceError> {
let cli = super::git_cli::GitCli::new();
let st = cli
.get_worktree_status(worktree_path)
.map_err(|e| GitServiceError::InvalidRepository(format!("git status failed: {e}")))?;
Ok((st.uncommitted_tracked, st.untracked))
}
/// Expose full worktree status details (CLI porcelain parsing)
pub fn get_worktree_status(
&self,
worktree_path: &Path,
) -> Result<super::git_cli::WorktreeStatus, GitServiceError> {
let cli = super::git_cli::GitCli::new();
cli.get_worktree_status(worktree_path)
.map_err(|e| GitServiceError::InvalidRepository(format!("git status failed: {e}")))
}
/// Reset the given worktree to the specified commit SHA.
/// If `force` is false and the worktree is dirty, returns WorktreeDirty error.
pub fn reset_worktree_to_commit(
&self,
worktree_path: &Path,
commit_sha: &str,
force: bool,
) -> Result<(), GitServiceError> {
let repo = self.open_repo(worktree_path)?;
if !force {
// Avoid clobbering uncommitted changes unless explicitly forced
self.check_worktree_clean(&repo)?;
}
let cli = super::git_cli::GitCli::new();
cli.git(worktree_path, ["reset", "--hard", commit_sha])
.map_err(|e| {
GitServiceError::InvalidRepository(format!("git reset --hard failed: {e}"))
})?;
// Reapply sparse-checkout if configured (non-fatal)
let _ = cli.git(worktree_path, ["sparse-checkout", "reapply"]);
Ok(())
}
/// Convenience: Get author of HEAD commit
pub fn get_head_author(
&self,

View File

@@ -173,6 +173,62 @@ impl GitCli {
Ok(Self::parse_name_status(&out))
}
/// Return `git status --porcelain` parsed into a structured summary
pub fn get_worktree_status(&self, worktree_path: &Path) -> Result<WorktreeStatus, GitCliError> {
let out = self.git(worktree_path, ["status", "--porcelain"])?;
let mut entries: Vec<StatusEntry> = Vec::new();
let mut uncommitted_tracked = 0usize;
let mut untracked = 0usize;
for line in out.lines() {
let l = line.trim_end();
if l.is_empty() {
continue;
}
// Two columns (XY) + space + path(s), or '?? path' for untracked
if let Some(rest) = l.strip_prefix("?? ") {
untracked += 1;
entries.push(StatusEntry {
staged: '?',
unstaged: '?',
path: rest.to_string(),
orig_path: None,
is_untracked: true,
});
continue;
}
// At least 3 chars (X, Y, space)
let (xy, tail) = l.split_at(2);
let (_, pathspec) = tail.split_at(1); // skip the space
let staged = xy.chars().nth(0).unwrap_or(' ');
let unstaged = xy.chars().nth(1).unwrap_or(' ');
// Rename shows as 'R ' with `old -> new`
let (path, orig_path) = if pathspec.contains(" -> ") {
let mut parts = pathspec.splitn(2, " -> ");
let oldp = parts.next().unwrap_or("").to_string();
let newp = parts.next().unwrap_or("").to_string();
(newp, Some(oldp))
} else {
(pathspec.to_string(), None)
};
// Count as tracked change if either column indicates a change
if staged != ' ' || unstaged != ' ' {
uncommitted_tracked += 1;
}
entries.push(StatusEntry {
staged,
unstaged,
path,
orig_path,
is_untracked: false,
});
}
Ok(WorktreeStatus {
uncommitted_tracked,
untracked,
entries,
})
}
/// Stage all changes in the working tree (respects sparse-checkout semantics).
pub fn add_all(&self, worktree_path: &Path) -> Result<(), GitCliError> {
self.git(worktree_path, ["add", "-A"])?;
@@ -329,7 +385,11 @@ impl GitCli {
}
/// Run `git -C <repo_path> <args...>` and return stdout on success.
/// Caller may ignore the output; errors surface via Result.
/// Prefer adding specific helpers (e.g. `get_worktree_status`, `diff_status`)
/// instead of calling this directly, so all parsing and command choices are
/// centralized here. This makes it easier to change the underlying commands
/// without adjusting callers. Use this low-level method directly only in
/// tests or when no dedicated helper exists yet.
///
/// About `OsStr`/`OsString` usage:
/// - `Command` and `Path` operate on `OsStr` to support nonUTF8 paths and
@@ -390,3 +450,25 @@ impl GitCli {
Ok(String::from_utf8_lossy(&out.stdout).to_string())
}
}
/// Parsed entry from `git status --porcelain`
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct StatusEntry {
/// Single-letter staged status (column X) or '?' for untracked
pub staged: char,
/// Single-letter unstaged status (column Y) or '?' for untracked
pub unstaged: char,
/// Current path
pub path: String,
/// Original path (for renames)
pub orig_path: Option<String>,
/// True if this entry is untracked ("??")
pub is_untracked: bool,
}
/// Summary + entries for a working tree status
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct WorktreeStatus {
pub uncommitted_tracked: usize,
pub untracked: usize,
pub entries: Vec<StatusEntry>,
}