Use session IDs for scratches and queued messages (Vibe Kanban) (#1613)

* All changes have been completed. Here's a summary of what was done:

## Summary

Changed both scratches and queued messages to use session IDs instead of workspace IDs:

### Files Modified

1. **`frontend/src/components/tasks/TaskFollowUpSection.tsx`**
   - Changed `useScratch(ScratchType.DRAFT_FOLLOW_UP, workspaceId ?? '')` → `useScratch(ScratchType.DRAFT_FOLLOW_UP, sessionId ?? '')`

2. **`crates/server/src/routes/sessions/queue.rs`**
   - Changed `session.workspace_id` → `session.id` in all three queue operations (queue, cancel, get status)

3. **`crates/services/src/services/queued_message.rs`**
   - Renamed `task_attempt_id` → `session_id` in `QueuedMessage` struct
   - Updated all method parameters and comments

4. **`crates/local-deployment/src/container.rs`**
   - Changed `ctx.workspace.id` → `ctx.session.id` for queue lookups and scratch deletion
   - Updated log messages to reference "session" instead of "workspace"

5. **`shared/types.ts`** (auto-generated)
   - `QueuedMessage.task_attempt_id` → `QueuedMessage.session_id`

* Cleanup script changes for task attempt 4df90bcd-f261-41ca-bac3-8f7c2fc576c5

* Build passes. The fix has been applied. Found and fixed one missed instance in `sessions/mod.rs` where `Scratch::delete` was using `workspace.id` instead of `session.id`.
This commit is contained in:
Alex Netsch
2025-12-21 21:19:06 +00:00
committed by GitHub
parent 033ae2f1c6
commit 3c35b92a97
6 changed files with 39 additions and 42 deletions

View File

@@ -473,20 +473,19 @@ impl LocalContainerService {
ExecutionProcessStatus::Failed | ExecutionProcessStatus::Killed
);
if let Some(queued_msg) = container
.queued_message_service
.take_queued(ctx.workspace.id)
if let Some(queued_msg) =
container.queued_message_service.take_queued(ctx.session.id)
{
if should_execute_queued {
tracing::info!(
"Found queued message for workspace {}, starting follow-up execution",
ctx.workspace.id
"Found queued message for session {}, starting follow-up execution",
ctx.session.id
);
// Delete the scratch since we're consuming the queued message
if let Err(e) = Scratch::delete(
&db.pool,
ctx.workspace.id,
ctx.session.id,
&ScratchType::DraftFollowUp,
)
.await
@@ -509,8 +508,8 @@ impl LocalContainerService {
} else {
// Execution failed or was killed - discard the queued message and finalize
tracing::info!(
"Discarding queued message for workspace {} due to execution status {:?}",
ctx.workspace.id,
"Discarding queued message for session {} due to execution status {:?}",
ctx.session.id,
ctx.execution_process.status
);
container.finalize_task(publisher.as_ref().ok(), &ctx).await;

View File

@@ -218,11 +218,11 @@ pub async fn follow_up(
// Clear the draft follow-up scratch on successful spawn
// This ensures the scratch is wiped even if the user navigates away quickly
if let Err(e) = Scratch::delete(pool, workspace.id, &ScratchType::DraftFollowUp).await {
if let Err(e) = Scratch::delete(pool, session.id, &ScratchType::DraftFollowUp).await {
// Log but don't fail the request - scratch deletion is best-effort
tracing::debug!(
"Failed to delete draft follow-up scratch for attempt {}: {}",
workspace.id,
"Failed to delete draft follow-up scratch for session {}: {}",
session.id,
e
);
}

View File

@@ -31,7 +31,7 @@ pub async fn queue_message(
let queued = deployment
.queued_message_service()
.queue_message(session.workspace_id, data);
.queue_message(session.id, data);
deployment
.track_if_analytics_allowed(
@@ -55,7 +55,7 @@ pub async fn cancel_queued_message(
) -> Result<ResponseJson<ApiResponse<QueueStatus>>, ApiError> {
deployment
.queued_message_service()
.cancel_queued(session.workspace_id);
.cancel_queued(session.id);
deployment
.track_if_analytics_allowed(
@@ -75,9 +75,7 @@ pub async fn get_queue_status(
Extension(session): Extension<Session>,
State(deployment): State<DeploymentImpl>,
) -> Result<ResponseJson<ApiResponse<QueueStatus>>, ApiError> {
let status = deployment
.queued_message_service()
.get_status(session.workspace_id);
let status = deployment.queued_message_service().get_status(session.id);
Ok(ResponseJson(ApiResponse::success(status)))
}

View File

@@ -7,19 +7,19 @@ use serde::{Deserialize, Serialize};
use ts_rs::TS;
use uuid::Uuid;
/// Represents a queued follow-up message for a task attempt
/// Represents a queued follow-up message for a session
#[derive(Debug, Clone, Serialize, Deserialize, TS)]
#[ts(export)]
pub struct QueuedMessage {
/// The task attempt this message is queued for
pub task_attempt_id: Uuid,
/// The session this message is queued for
pub session_id: Uuid,
/// The follow-up data (message + variant)
pub data: DraftFollowUpData,
/// Timestamp when the message was queued
pub queued_at: DateTime<Utc>,
}
/// Status of the queue for a task attempt (for frontend display)
/// Status of the queue for a session (for frontend display)
#[derive(Debug, Clone, Serialize, Deserialize, TS)]
#[serde(tag = "status", rename_all = "snake_case")]
#[ts(export)]
@@ -31,7 +31,7 @@ pub enum QueueStatus {
}
/// In-memory service for managing queued follow-up messages.
/// One queued message per task attempt.
/// One queued message per session.
#[derive(Clone)]
pub struct QueuedMessageService {
queue: Arc<DashMap<Uuid, QueuedMessage>>,
@@ -44,41 +44,41 @@ impl QueuedMessageService {
}
}
/// Queue a message for a task attempt. Replaces any existing queued message.
pub fn queue_message(&self, task_attempt_id: Uuid, data: DraftFollowUpData) -> QueuedMessage {
/// Queue a message for a session. Replaces any existing queued message.
pub fn queue_message(&self, session_id: Uuid, data: DraftFollowUpData) -> QueuedMessage {
let queued = QueuedMessage {
task_attempt_id,
session_id,
data,
queued_at: Utc::now(),
};
self.queue.insert(task_attempt_id, queued.clone());
self.queue.insert(session_id, queued.clone());
queued
}
/// Cancel/remove a queued message for a task attempt
pub fn cancel_queued(&self, task_attempt_id: Uuid) -> Option<QueuedMessage> {
self.queue.remove(&task_attempt_id).map(|(_, v)| v)
/// Cancel/remove a queued message for a session
pub fn cancel_queued(&self, session_id: Uuid) -> Option<QueuedMessage> {
self.queue.remove(&session_id).map(|(_, v)| v)
}
/// Get the queued message for a task attempt (if any)
pub fn get_queued(&self, task_attempt_id: Uuid) -> Option<QueuedMessage> {
self.queue.get(&task_attempt_id).map(|r| r.clone())
/// Get the queued message for a session (if any)
pub fn get_queued(&self, session_id: Uuid) -> Option<QueuedMessage> {
self.queue.get(&session_id).map(|r| r.clone())
}
/// Take (remove and return) the queued message for a task attempt.
/// Take (remove and return) the queued message for a session.
/// Used by finalization flow to consume the queued message.
pub fn take_queued(&self, task_attempt_id: Uuid) -> Option<QueuedMessage> {
self.queue.remove(&task_attempt_id).map(|(_, v)| v)
pub fn take_queued(&self, session_id: Uuid) -> Option<QueuedMessage> {
self.queue.remove(&session_id).map(|(_, v)| v)
}
/// Check if a task attempt has a queued message
pub fn has_queued(&self, task_attempt_id: Uuid) -> bool {
self.queue.contains_key(&task_attempt_id)
/// Check if a session has a queued message
pub fn has_queued(&self, session_id: Uuid) -> bool {
self.queue.contains_key(&session_id)
}
/// Get queue status for frontend display
pub fn get_status(&self, task_attempt_id: Uuid) -> QueueStatus {
match self.get_queued(task_attempt_id) {
pub fn get_status(&self, session_id: Uuid) -> QueueStatus {
match self.get_queued(session_id) {
Some(msg) => QueueStatus::Queued { message: msg },
None => QueueStatus::Empty,
}

View File

@@ -133,7 +133,7 @@ export function TaskFollowUpSection({
scratch,
updateScratch,
isLoading: isScratchLoading,
} = useScratch(ScratchType.DRAFT_FOLLOW_UP, workspaceId ?? '');
} = useScratch(ScratchType.DRAFT_FOLLOW_UP, sessionId ?? '');
// Derive the message and variant from scratch
const scratchData: DraftFollowUpData | undefined =

View File

@@ -330,9 +330,9 @@ export type SharedTaskDetails = { id: string, project_id: string, title: string,
export type QueuedMessage = {
/**
* The task attempt this message is queued for
* The session this message is queued for
*/
task_attempt_id: string,
session_id: string,
/**
* The follow-up data (message + variant)
*/