feat: manual approvals (#748)

* manual user approvals

* refactor implementation

* cleanup

* fix lint errors

* i18n

* remove isLastEntry frontend check

* address fe feedback

* always run claude plan with approvals

* add watchkill script back to plan mode

* update timeout

* tooltip hover

* use response type

* put back watchkill append hack
This commit is contained in:
Gabriel Gordon-Hall
2025-09-22 16:02:42 +01:00
committed by GitHub
parent eaff3dee9e
commit 798bcb80a3
51 changed files with 1808 additions and 198 deletions

View File

@@ -33,8 +33,11 @@ use deployment::DeploymentError;
use executors::{
actions::{Executable, ExecutorAction},
logs::{
NormalizedEntry, NormalizedEntryType,
utils::{ConversationPatch, patch::escape_json_pointer_segment},
NormalizedEntryType,
utils::{
ConversationPatch,
patch::{escape_json_pointer_segment, extract_normalized_entry_from_patch},
},
},
};
use futures::{StreamExt, TryStreamExt, stream::select};
@@ -1298,7 +1301,7 @@ impl LocalContainerService {
for msg in history.iter().rev() {
if let LogMsg::JsonPatch(patch) = msg {
// Try to extract a NormalizedEntry from the patch
if let Some(entry) = self.extract_normalized_entry_from_patch(patch)
if let Some((_, entry)) = extract_normalized_entry_from_patch(patch)
&& matches!(entry.entry_type, NormalizedEntryType::AssistantMessage)
{
let content = entry.content.trim();
@@ -1317,32 +1320,6 @@ impl LocalContainerService {
None
}
/// Extract a NormalizedEntry from a JsonPatch if it contains one
fn extract_normalized_entry_from_patch(
&self,
patch: &json_patch::Patch,
) -> Option<NormalizedEntry> {
// Convert the patch to JSON to examine its structure
if let Ok(patch_json) = serde_json::to_value(patch)
&& let Some(operations) = patch_json.as_array()
{
for operation in operations {
if let Some(value) = operation.get("value") {
// Try to extract a NormalizedEntry from the value
if let Some(patch_type) = value.get("type").and_then(|t| t.as_str())
&& patch_type == "NORMALIZED_ENTRY"
&& let Some(content) = value.get("content")
&& let Ok(entry) =
serde_json::from_value::<NormalizedEntry>(content.clone())
{
return Some(entry);
}
}
}
}
None
}
/// Update the executor session summary with the final assistant message
async fn update_executor_session_summary(&self, exec_id: &Uuid) -> Result<(), anyhow::Error> {
// Check if there's an executor session for this execution process

View File

@@ -6,6 +6,7 @@ use deployment::{Deployment, DeploymentError};
use executors::profile::ExecutorConfigs;
use services::services::{
analytics::{AnalyticsConfig, AnalyticsContext, AnalyticsService, generate_user_id},
approvals::Approvals,
auth::AuthService,
config::{Config, load_config_from_file, save_config_to_file},
container::ContainerService,
@@ -40,6 +41,7 @@ pub struct LocalDeployment {
filesystem: FilesystemService,
events: EventService,
file_search_cache: Arc<FileSearchCache>,
approvals: Approvals,
}
#[async_trait]
@@ -103,6 +105,8 @@ impl Deployment for LocalDeployment {
});
}
let approvals = Approvals::new(db.pool.clone(), msg_stores.clone());
// We need to make analytics accessible to the ContainerService
// TODO: Handle this more gracefully
let analytics_ctx = analytics.as_ref().map(|s| AnalyticsContext {
@@ -136,6 +140,7 @@ impl Deployment for LocalDeployment {
filesystem,
events,
file_search_cache,
approvals,
})
}
@@ -193,4 +198,8 @@ impl Deployment for LocalDeployment {
fn file_search_cache(&self) -> &Arc<FileSearchCache> {
&self.file_search_cache
}
fn approvals(&self) -> &Approvals {
&self.approvals
}
}