From 9507836e6b33b42bd7a826dc7dd19bd9001b2b51 Mon Sep 17 00:00:00 2001 From: Solomon Date: Thu, 17 Jul 2025 18:36:14 +0100 Subject: [PATCH] sst Opencode (#239) --- backend/Cargo.toml | 1 + backend/src/bin/generate_types.rs | 6 +- backend/src/executor.rs | 11 +- backend/src/executors/mod.rs | 2 + backend/src/executors/sst_opencode.rs | 780 +++++++++++++++++++ backend/src/executors/sst_opencode/filter.rs | 184 +++++ backend/src/executors/sst_opencode/tools.rs | 139 ++++ backend/src/services/process_service.rs | 13 +- backend/src/utils.rs | 1 + backend/src/utils/path.rs | 96 +++ frontend/src/pages/McpServers.tsx | 42 +- shared/types.ts | 8 +- 12 files changed, 1271 insertions(+), 12 deletions(-) create mode 100644 backend/src/executors/sst_opencode.rs create mode 100644 backend/src/executors/sst_opencode/filter.rs create mode 100644 backend/src/executors/sst_opencode/tools.rs create mode 100644 backend/src/utils/path.rs diff --git a/backend/Cargo.toml b/backend/Cargo.toml index a23d7630..2e3c27ac 100644 --- a/backend/Cargo.toml +++ b/backend/Cargo.toml @@ -26,6 +26,7 @@ chrono = { version = "0.4", features = ["serde"] } uuid = { version = "1.0", features = ["v4", "serde"] } ts-rs = { version = "9.0", features = ["uuid-impl", "chrono-impl", "no-serde-warnings"] } dirs = "5.0" +xdg = "3.0" git2 = "0.18" async-trait = "0.1" libc = "0.2" diff --git a/backend/src/bin/generate_types.rs b/backend/src/bin/generate_types.rs index 0b69fea8..f3e855df 100644 --- a/backend/src/bin/generate_types.rs +++ b/backend/src/bin/generate_types.rs @@ -12,7 +12,8 @@ export const EXECUTOR_TYPES: string[] = [ "amp", "gemini", "charm-opencode", - "claude-code-router" + "claude-code-router", + "sst-opencode" ]; export const EDITOR_TYPES: EditorType[] = [ @@ -31,7 +32,8 @@ export const EXECUTOR_LABELS: Record = { "amp": "Amp", "gemini": "Gemini", "charm-opencode": "Charm Opencode", - "claude-code-router": "Claude Code Router" + "claude-code-router": "Claude Code Router", + "sst-opencode": "SST Opencode" }; export const EDITOR_LABELS: Record = { diff --git a/backend/src/executor.rs b/backend/src/executor.rs index f53cf95b..771e366e 100644 --- a/backend/src/executor.rs +++ b/backend/src/executor.rs @@ -8,7 +8,7 @@ use uuid::Uuid; use crate::executors::{ AmpExecutor, CCRExecutor, CharmOpencodeExecutor, ClaudeExecutor, EchoExecutor, GeminiExecutor, - SetupScriptExecutor, + SetupScriptExecutor, SstOpencodeExecutor, }; // Constants for database streaming - fast for near-real-time updates @@ -358,6 +358,7 @@ pub enum ExecutorConfig { ClaudeCodeRouter, #[serde(alias = "charmopencode")] CharmOpencode, + SstOpencode, // Future executors can be added here // Shell { command: String }, // Docker { image: String, command: String }, @@ -383,6 +384,7 @@ impl FromStr for ExecutorConfig { "gemini" => Ok(ExecutorConfig::Gemini), "charm-opencode" => Ok(ExecutorConfig::CharmOpencode), "claude-code-router" => Ok(ExecutorConfig::ClaudeCodeRouter), + "sst-opencode" => Ok(ExecutorConfig::SstOpencode), "setup-script" => Ok(ExecutorConfig::SetupScript { script: "setup script".to_string(), }), @@ -401,6 +403,7 @@ impl ExecutorConfig { ExecutorConfig::Gemini => Box::new(GeminiExecutor), ExecutorConfig::ClaudeCodeRouter => Box::new(CCRExecutor::new()), ExecutorConfig::CharmOpencode => Box::new(CharmOpencodeExecutor), + ExecutorConfig::SstOpencode => Box::new(SstOpencodeExecutor::new()), ExecutorConfig::SetupScript { script } => { Box::new(SetupScriptExecutor::new(script.clone())) } @@ -424,6 +427,9 @@ impl ExecutorConfig { ExecutorConfig::Gemini => { dirs::home_dir().map(|home| home.join(".gemini").join("settings.json")) } + ExecutorConfig::SstOpencode => { + xdg::BaseDirectories::with_prefix("opencode").get_config_file("opencode.json") + } ExecutorConfig::SetupScript { .. } => None, } } @@ -433,6 +439,7 @@ impl ExecutorConfig { match self { ExecutorConfig::Echo => None, // Echo doesn't support MCP ExecutorConfig::CharmOpencode => Some(vec!["mcpServers"]), + ExecutorConfig::SstOpencode => Some(vec!["mcp"]), ExecutorConfig::Claude => Some(vec!["mcpServers"]), ExecutorConfig::ClaudePlan => Some(vec!["mcpServers"]), ExecutorConfig::Amp => Some(vec!["amp", "mcpServers"]), // Nested path for Amp @@ -455,6 +462,7 @@ impl ExecutorConfig { match self { ExecutorConfig::Echo => "Echo (Test Mode)", ExecutorConfig::CharmOpencode => "Charm Opencode", + ExecutorConfig::SstOpencode => "SST Opencode", ExecutorConfig::Claude => "Claude", ExecutorConfig::ClaudePlan => "Claude Plan", ExecutorConfig::Amp => "Amp", @@ -473,6 +481,7 @@ impl std::fmt::Display for ExecutorConfig { ExecutorConfig::ClaudePlan => "claude-plan", ExecutorConfig::Amp => "amp", ExecutorConfig::Gemini => "gemini", + ExecutorConfig::SstOpencode => "sst-opencode", ExecutorConfig::CharmOpencode => "charm-opencode", ExecutorConfig::ClaudeCodeRouter => "claude-code-router", ExecutorConfig::SetupScript { .. } => "setup-script", diff --git a/backend/src/executors/mod.rs b/backend/src/executors/mod.rs index ff082321..ea1651e5 100644 --- a/backend/src/executors/mod.rs +++ b/backend/src/executors/mod.rs @@ -6,6 +6,7 @@ pub mod dev_server; pub mod echo; pub mod gemini; pub mod setup_script; +pub mod sst_opencode; pub use amp::{AmpExecutor, AmpFollowupExecutor}; pub use ccr::{CCRExecutor, CCRFollowupExecutor}; @@ -15,3 +16,4 @@ pub use dev_server::DevServerExecutor; pub use echo::EchoExecutor; pub use gemini::{GeminiExecutor, GeminiFollowupExecutor}; pub use setup_script::SetupScriptExecutor; +pub use sst_opencode::{SstOpencodeExecutor, SstOpencodeFollowupExecutor}; diff --git a/backend/src/executors/sst_opencode.rs b/backend/src/executors/sst_opencode.rs new file mode 100644 index 00000000..f9af0d7a --- /dev/null +++ b/backend/src/executors/sst_opencode.rs @@ -0,0 +1,780 @@ +use async_trait::async_trait; +use command_group::{AsyncCommandGroup, AsyncGroupChild}; +use serde_json::{json, Value}; +use tokio::{ + io::{AsyncBufReadExt, BufReader}, + process::Command, +}; +use uuid::Uuid; + +use crate::{ + executor::{Executor, ExecutorError, NormalizedConversation, NormalizedEntry}, + models::{execution_process::ExecutionProcess, executor_session::ExecutorSession, task::Task}, + utils::shell::get_shell_command, +}; + +// Sub-modules for utilities +pub mod filter; +pub mod tools; + +use self::{ + filter::{parse_session_id_from_line, tool_usage_regex, OpenCodeFilter}, + tools::{determine_action_type, generate_tool_content, normalize_tool_name}, +}; + +struct Content { + pub stdout: Option, + pub stderr: Option, +} + +/// Process a single line for session extraction and content formatting +async fn process_line_for_content( + line: &str, + session_extracted: &mut bool, + worktree_path: &str, + pool: &sqlx::SqlitePool, + execution_process_id: uuid::Uuid, +) -> Option { + if !*session_extracted { + if let Some(session_id) = parse_session_id_from_line(line) { + if let Err(e) = + ExecutorSession::update_session_id(pool, execution_process_id, &session_id).await + { + tracing::error!( + "Failed to update session ID for execution process {}: {}", + execution_process_id, + e + ); + } else { + tracing::info!( + "Updated session ID {} for execution process {}", + session_id, + execution_process_id + ); + *session_extracted = true; + } + + // Don't return any content for session lines + return None; + } + } + + // Check if line is noise - if so, discard it + if OpenCodeFilter::is_noise(line) { + return None; + } + + if OpenCodeFilter::is_stderr(line) { + // If it's stderr, we don't need to process it further + return Some(Content { + stdout: None, + stderr: Some(line.to_string()), + }); + } + + // Format clean content as normalized JSON + let formatted = format_opencode_content_as_normalized_json(line, worktree_path); + Some(Content { + stdout: Some(formatted), + stderr: None, + }) +} + +/// Stream stderr from OpenCode process with filtering to separate clean output from noise +pub async fn stream_opencode_stderr_to_db( + output: impl tokio::io::AsyncRead + Unpin, + pool: sqlx::SqlitePool, + attempt_id: Uuid, + execution_process_id: Uuid, + worktree_path: String, +) { + let mut reader = BufReader::new(output); + let mut line = String::new(); + let mut session_extracted = false; + + loop { + line.clear(); + + match reader.read_line(&mut line).await { + Ok(0) => break, // EOF + Ok(_) => { + line = line.trim_end_matches(['\r', '\n']).to_string(); + + let content = process_line_for_content( + &line, + &mut session_extracted, + &worktree_path, + &pool, + execution_process_id, + ) + .await; + + if let Some(Content { stdout, stderr }) = content { + tracing::debug!( + "Processed OpenCode content for attempt {}: stdout={:?} stderr={:?}", + attempt_id, + stdout, + stderr, + ); + if let Err(e) = ExecutionProcess::append_output( + &pool, + execution_process_id, + stdout.as_deref(), + stderr.as_deref(), + ) + .await + { + tracing::error!( + "Failed to write OpenCode line for attempt {}: {}", + attempt_id, + e + ); + } + } + } + Err(e) => { + tracing::error!("Error reading stderr for attempt {}: {}", attempt_id, e); + break; + } + } + } +} + +/// Format OpenCode clean content as normalized JSON entries for direct database storage +fn format_opencode_content_as_normalized_json(content: &str, worktree_path: &str) -> String { + let mut results = Vec::new(); + let base_timestamp = chrono::Utc::now(); + let mut entry_counter = 0u32; + + for line in content.lines() { + let trimmed = line.trim(); + if trimmed.is_empty() { + continue; + } + + // Generate unique timestamp for each entry by adding microseconds + let unique_timestamp = + base_timestamp + chrono::Duration::microseconds(entry_counter as i64); + let timestamp_str = unique_timestamp.to_rfc3339_opts(chrono::SecondsFormat::Micros, true); + entry_counter += 1; + + // Try to parse as existing JSON first + if let Ok(parsed_json) = serde_json::from_str::(trimmed) { + results.push(parsed_json.to_string()); + continue; + } + + // Strip ANSI codes before processing + let cleaned = OpenCodeFilter::strip_ansi_codes(trimmed); + let cleaned_trim = cleaned.trim(); + + if cleaned_trim.is_empty() { + continue; + } + + // Check for tool usage patterns after ANSI stripping: | ToolName {...} + if let Some(captures) = tool_usage_regex().captures(cleaned_trim) { + if let (Some(tool_name), Some(tool_input)) = (captures.get(1), captures.get(2)) { + // Parse tool input + let input: serde_json::Value = + serde_json::from_str(tool_input.as_str()).unwrap_or(serde_json::Value::Null); + + // Normalize tool name for frontend compatibility (e.g., "Todo" β†’ "todowrite") + let normalized_tool_name = normalize_tool_name(tool_name.as_str()); + + let normalized_entry = json!({ + "timestamp": timestamp_str, + "entry_type": { + "type": "tool_use", + "tool_name": normalized_tool_name, + "action_type": determine_action_type(&normalized_tool_name, &input, worktree_path) + }, + "content": generate_tool_content(&normalized_tool_name, &input, worktree_path), + "metadata": input + }); + results.push(normalized_entry.to_string()); + continue; + } + } + + // Regular assistant message + let normalized_entry = json!({ + "timestamp": timestamp_str, + "entry_type": { + "type": "assistant_message" + }, + "content": cleaned_trim, + "metadata": null + }); + results.push(normalized_entry.to_string()); + } + + // Ensure each JSON entry is on its own line + results.join("\n") + "\n" +} + +/// An executor that uses SST Opencode CLI to process tasks +pub struct SstOpencodeExecutor { + executor_type: String, + command: String, +} + +impl Default for SstOpencodeExecutor { + fn default() -> Self { + Self::new() + } +} + +impl SstOpencodeExecutor { + /// Create a new SstOpencodeExecutor with default settings + pub fn new() -> Self { + Self { + executor_type: "SST Opencode".to_string(), + command: "npx -y opencode-ai@latest run --print-logs".to_string(), + } + } +} + +/// An executor that resumes an SST Opencode session +pub struct SstOpencodeFollowupExecutor { + pub session_id: String, + pub prompt: String, + executor_type: String, + command_base: String, +} + +impl SstOpencodeFollowupExecutor { + /// Create a new SstOpencodeFollowupExecutor with default settings + pub fn new(session_id: String, prompt: String) -> Self { + Self { + session_id, + prompt, + executor_type: "SST Opencode".to_string(), + command_base: "npx -y opencode-ai@latest run --print-logs".to_string(), + } + } +} + +#[async_trait] +impl Executor for SstOpencodeExecutor { + async fn spawn( + &self, + pool: &sqlx::SqlitePool, + task_id: Uuid, + worktree_path: &str, + ) -> Result { + // Get the task to fetch its description + let task = Task::find_by_id(pool, task_id) + .await? + .ok_or(ExecutorError::TaskNotFound)?; + + let prompt = if let Some(task_description) = task.description { + format!( + r#"project_id: {} + +Task title: {} +Task description: {}"#, + task.project_id, task.title, task_description + ) + } else { + format!( + r#"project_id: {} + +Task title: {}"#, + task.project_id, task.title + ) + }; + + // Use shell command for cross-platform compatibility + let (shell_cmd, shell_arg) = get_shell_command(); + let opencode_command = &self.command; + + let mut command = Command::new(shell_cmd); + command + .kill_on_drop(true) + .stdin(std::process::Stdio::piped()) + .stdout(std::process::Stdio::null()) // Ignore stdout for OpenCode + .stderr(std::process::Stdio::piped()) + .current_dir(worktree_path) + .arg(shell_arg) + .arg(opencode_command) + .env("NODE_NO_WARNINGS", "1"); + + let mut child = command + .group_spawn() // Create new process group so we can kill entire tree + .map_err(|e| { + crate::executor::SpawnContext::from_command(&command, &self.executor_type) + .with_task(task_id, Some(task.title.clone())) + .with_context(format!("{} CLI execution for new task", self.executor_type)) + .spawn_error(e) + })?; + + // Write prompt to stdin safely + if let Some(mut stdin) = child.inner().stdin.take() { + use tokio::io::AsyncWriteExt; + tracing::debug!( + "Writing prompt to OpenCode stdin for task {}: {:?}", + task_id, + prompt + ); + stdin.write_all(prompt.as_bytes()).await.map_err(|e| { + let context = + crate::executor::SpawnContext::from_command(&command, &self.executor_type) + .with_task(task_id, Some(task.title.clone())) + .with_context(format!( + "Failed to write prompt to {} CLI stdin", + self.executor_type + )); + ExecutorError::spawn_failed(e, context) + })?; + stdin.shutdown().await.map_err(|e| { + let context = + crate::executor::SpawnContext::from_command(&command, &self.executor_type) + .with_task(task_id, Some(task.title.clone())) + .with_context(format!("Failed to close {} CLI stdin", self.executor_type)); + ExecutorError::spawn_failed(e, context) + })?; + } + + Ok(child) + } + + /// Execute with OpenCode filtering for stderr + async fn execute_streaming( + &self, + pool: &sqlx::SqlitePool, + task_id: Uuid, + attempt_id: Uuid, + execution_process_id: Uuid, + worktree_path: &str, + ) -> Result { + let mut child = self.spawn(pool, task_id, worktree_path).await?; + + // Take stderr pipe for OpenCode filtering + let stderr = child + .inner() + .stderr + .take() + .expect("Failed to take stderr from child process"); + + // Start OpenCode stderr filtering task + let pool_clone = pool.clone(); + let worktree_path_clone = worktree_path.to_string(); + tokio::spawn(stream_opencode_stderr_to_db( + stderr, + pool_clone, + attempt_id, + execution_process_id, + worktree_path_clone, + )); + + Ok(child) + } + + fn normalize_logs( + &self, + logs: &str, + _worktree_path: &str, + ) -> Result { + let mut entries = Vec::new(); + + for line in logs.lines() { + let trimmed = line.trim(); + if trimmed.is_empty() { + continue; + } + + // Simple passthrough: directly deserialize normalized JSON entries + if let Ok(entry) = serde_json::from_str::(trimmed) { + entries.push(entry); + } + } + + Ok(NormalizedConversation { + entries, + session_id: None, // Session ID is stored directly in the database + executor_type: "sst-opencode".to_string(), + prompt: None, + summary: None, + }) + } +} + +#[async_trait] +impl Executor for SstOpencodeFollowupExecutor { + async fn spawn( + &self, + _pool: &sqlx::SqlitePool, + _task_id: Uuid, + worktree_path: &str, + ) -> Result { + // Use shell command for cross-platform compatibility + let (shell_cmd, shell_arg) = get_shell_command(); + let opencode_command = format!("{} --session {}", self.command_base, self.session_id); + + let mut command = Command::new(shell_cmd); + command + .kill_on_drop(true) + .stdin(std::process::Stdio::piped()) + .stdout(std::process::Stdio::null()) // Ignore stdout for OpenCode + .stderr(std::process::Stdio::piped()) + .current_dir(worktree_path) + .arg(shell_arg) + .arg(&opencode_command) + .env("NODE_NO_WARNINGS", "1"); + + let mut child = command + .group_spawn() // Create new process group so we can kill entire tree + .map_err(|e| { + crate::executor::SpawnContext::from_command(&command, &self.executor_type) + .with_context(format!( + "{} CLI followup execution for session {}", + self.executor_type, self.session_id + )) + .spawn_error(e) + })?; + + // Write prompt to stdin safely + if let Some(mut stdin) = child.inner().stdin.take() { + use tokio::io::AsyncWriteExt; + tracing::debug!( + "Writing prompt to {} stdin for session {}: {:?}", + self.executor_type, + self.session_id, + self.prompt + ); + stdin.write_all(self.prompt.as_bytes()).await.map_err(|e| { + let context = + crate::executor::SpawnContext::from_command(&command, &self.executor_type) + .with_context(format!( + "Failed to write prompt to {} CLI stdin for session {}", + self.executor_type, self.session_id + )); + ExecutorError::spawn_failed(e, context) + })?; + stdin.shutdown().await.map_err(|e| { + let context = + crate::executor::SpawnContext::from_command(&command, &self.executor_type) + .with_context(format!( + "Failed to close {} CLI stdin for session {}", + self.executor_type, self.session_id + )); + ExecutorError::spawn_failed(e, context) + })?; + } + + Ok(child) + } + + /// Execute with OpenCode filtering for stderr + async fn execute_streaming( + &self, + pool: &sqlx::SqlitePool, + task_id: Uuid, + attempt_id: Uuid, + execution_process_id: Uuid, + worktree_path: &str, + ) -> Result { + let mut child = self.spawn(pool, task_id, worktree_path).await?; + + // Take stderr pipe for OpenCode filtering + let stderr = child + .inner() + .stderr + .take() + .expect("Failed to take stderr from child process"); + + // Start OpenCode stderr filtering task + let pool_clone = pool.clone(); + let worktree_path_clone = worktree_path.to_string(); + tokio::spawn(stream_opencode_stderr_to_db( + stderr, + pool_clone, + attempt_id, + execution_process_id, + worktree_path_clone, + )); + + Ok(child) + } + + fn normalize_logs( + &self, + logs: &str, + worktree_path: &str, + ) -> Result { + // Reuse the same logic as the main SstOpencodeExecutor + let main_executor = SstOpencodeExecutor::new(); + main_executor.normalize_logs(logs, worktree_path) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{ + executor::ActionType, + executors::sst_opencode::{ + format_opencode_content_as_normalized_json, SstOpencodeExecutor, + }, + }; + + // Test the actual format that comes from the database (normalized JSON entries) + #[test] + fn test_normalize_logs_with_database_format() { + let executor = SstOpencodeExecutor::new(); + + // This is what the database should contain after our streaming function processes it + let logs = r#"{"timestamp":"2025-07-16T18:04:00Z","entry_type":{"type":"tool_use","tool_name":"Read","action_type":{"action":"file_read","path":"hello.js"}},"content":"`hello.js`","metadata":{"filePath":"/path/to/repo/hello.js"}} +{"timestamp":"2025-07-16T18:04:01Z","entry_type":{"type":"assistant_message"},"content":"I'll read the hello.js file to see its current contents.","metadata":null} +{"timestamp":"2025-07-16T18:04:02Z","entry_type":{"type":"tool_use","tool_name":"bash","action_type":{"action":"command_run","command":"ls -la"}},"content":"`ls -la`","metadata":{"command":"ls -la"}} +{"timestamp":"2025-07-16T18:04:03Z","entry_type":{"type":"assistant_message"},"content":"The file exists and contains a hello world function.","metadata":null}"#; + + let result = executor.normalize_logs(logs, "/path/to/repo").unwrap(); + + assert_eq!(result.entries.len(), 4); + + // First entry: file read tool use + assert!(matches!( + result.entries[0].entry_type, + crate::executor::NormalizedEntryType::ToolUse { .. } + )); + if let crate::executor::NormalizedEntryType::ToolUse { + tool_name, + action_type, + } = &result.entries[0].entry_type + { + assert_eq!(tool_name, "Read"); + assert!(matches!(action_type, ActionType::FileRead { .. })); + } + assert_eq!(result.entries[0].content, "`hello.js`"); + assert!(result.entries[0].timestamp.is_some()); + + // Second entry: assistant message + assert!(matches!( + result.entries[1].entry_type, + crate::executor::NormalizedEntryType::AssistantMessage + )); + assert!(result.entries[1].content.contains("read the hello.js file")); + + // Third entry: bash tool use + assert!(matches!( + result.entries[2].entry_type, + crate::executor::NormalizedEntryType::ToolUse { .. } + )); + if let crate::executor::NormalizedEntryType::ToolUse { + tool_name, + action_type, + } = &result.entries[2].entry_type + { + assert_eq!(tool_name, "bash"); + assert!(matches!(action_type, ActionType::CommandRun { .. })); + } + + // Fourth entry: assistant message + assert!(matches!( + result.entries[3].entry_type, + crate::executor::NormalizedEntryType::AssistantMessage + )); + assert!(result.entries[3].content.contains("The file exists")); + } + + #[test] + fn test_normalize_logs_with_session_id() { + let executor = SstOpencodeExecutor::new(); + + // Test session ID in JSON metadata - current implementation always returns None for session_id + let logs = r#"{"timestamp":"2025-07-16T18:04:00Z","entry_type":{"type":"assistant_message"},"content":"Session started","metadata":null,"session_id":"ses_abc123"} +{"timestamp":"2025-07-16T18:04:01Z","entry_type":{"type":"assistant_message"},"content":"Hello world","metadata":null}"#; + + let result = executor.normalize_logs(logs, "/tmp").unwrap(); + assert_eq!(result.session_id, None); // Session ID is stored directly in the database + assert_eq!(result.entries.len(), 2); + } + + #[test] + fn test_normalize_logs_legacy_fallback() { + let executor = SstOpencodeExecutor::new(); + + // Current implementation doesn't handle legacy format - it only parses JSON entries + let logs = r#"INFO session=ses_legacy123 starting +| Read {"filePath":"/path/to/file.js"} +This is a plain assistant message"#; + + let result = executor.normalize_logs(logs, "/tmp").unwrap(); + + // Session ID is always None in current implementation + assert_eq!(result.session_id, None); + + // Current implementation skips non-JSON lines, so no entries will be parsed + assert_eq!(result.entries.len(), 0); + } + + #[test] + fn test_format_opencode_content_as_normalized_json() { + let content = r#"| Read {"filePath":"/path/to/repo/hello.js"} +I'll read this file to understand its contents. +| bash {"command":"ls -la"} +The file listing shows several items."#; + + let result = format_opencode_content_as_normalized_json(content, "/path/to/repo"); + let lines: Vec<&str> = result + .split('\n') + .filter(|line| !line.trim().is_empty()) + .collect(); + + // Should have 4 entries (2 tool uses + 2 assistant messages) + assert_eq!(lines.len(), 4); + + // Parse all entries and verify unique timestamps + let mut timestamps = Vec::new(); + for line in &lines { + let json: serde_json::Value = serde_json::from_str(line).unwrap(); + let timestamp = json["timestamp"].as_str().unwrap().to_string(); + timestamps.push(timestamp); + } + + // Verify all timestamps are unique (no duplicates) + let mut unique_timestamps = timestamps.clone(); + unique_timestamps.sort(); + unique_timestamps.dedup(); + assert_eq!( + timestamps.len(), + unique_timestamps.len(), + "All timestamps should be unique" + ); + + // Parse the first line (should be Read tool use) + let first_json: serde_json::Value = serde_json::from_str(lines[0]).unwrap(); + assert_eq!(first_json["entry_type"]["type"], "tool_use"); + assert_eq!(first_json["entry_type"]["tool_name"], "Read"); + assert_eq!(first_json["content"], "`hello.js`"); + + // Parse the second line (should be assistant message) + let second_json: serde_json::Value = serde_json::from_str(lines[1]).unwrap(); + assert_eq!(second_json["entry_type"]["type"], "assistant_message"); + assert!(second_json["content"] + .as_str() + .unwrap() + .contains("read this file")); + + // Parse the third line (should be bash tool use) + let third_json: serde_json::Value = serde_json::from_str(lines[2]).unwrap(); + assert_eq!(third_json["entry_type"]["type"], "tool_use"); + assert_eq!(third_json["entry_type"]["tool_name"], "bash"); + assert_eq!(third_json["content"], "`ls -la`"); + + // Verify timestamps include microseconds for uniqueness + for timestamp in timestamps { + assert!( + timestamp.contains('.'), + "Timestamp should include microseconds: {}", + timestamp + ); + } + } + + #[test] + fn test_format_opencode_content_todo_operations() { + let content = r#"| TodoWrite {"todos":[{"id":"1","content":"Fix bug","status":"completed","priority":"high"},{"id":"2","content":"Add feature","status":"in_progress","priority":"medium"}]}"#; + + let result = format_opencode_content_as_normalized_json(content, "/tmp"); + let json: serde_json::Value = serde_json::from_str(&result).unwrap(); + + assert_eq!(json["entry_type"]["type"], "tool_use"); + assert_eq!(json["entry_type"]["tool_name"], "todowrite"); // Normalized from "TodoWrite" + assert_eq!(json["entry_type"]["action_type"]["action"], "other"); // Changed from task_create to other + + // Should contain formatted todo list + let content_str = json["content"].as_str().unwrap(); + assert!(content_str.contains("TODO List:")); + assert!(content_str.contains("βœ… Fix bug (high)")); + assert!(content_str.contains("πŸ”„ Add feature (medium)")); + } + + #[test] + fn test_format_opencode_content_todo_tool() { + // Test the "Todo" tool (case-sensitive, different from todowrite/todoread) + let content = r#"| Todo {"todos":[{"id":"1","content":"Review code","status":"pending","priority":"high"},{"id":"2","content":"Write tests","status":"in_progress","priority":"low"}]}"#; + + let result = format_opencode_content_as_normalized_json(content, "/tmp"); + let json: serde_json::Value = serde_json::from_str(&result).unwrap(); + + assert_eq!(json["entry_type"]["type"], "tool_use"); + assert_eq!(json["entry_type"]["tool_name"], "todowrite"); // Normalized from "Todo" + assert_eq!(json["entry_type"]["action_type"]["action"], "other"); // Changed from task_create to other + + // Should contain formatted todo list with proper emojis + let content_str = json["content"].as_str().unwrap(); + assert!(content_str.contains("TODO List:")); + assert!(content_str.contains("⏳ Review code (high)")); + assert!(content_str.contains("πŸ”„ Write tests (low)")); + } + + #[test] + fn test_opencode_filter_noise_detection() { + use crate::executors::sst_opencode::filter::OpenCodeFilter; + + // Test noise detection + assert!(OpenCodeFilter::is_noise("")); + assert!(OpenCodeFilter::is_noise(" ")); + assert!(OpenCodeFilter::is_noise("β–ˆβ–€β–€β–ˆ β–ˆβ–€β–€β–ˆ Banner")); + assert!(OpenCodeFilter::is_noise("@ anthropic/claude-sonnet-4")); + assert!(OpenCodeFilter::is_noise("~ https://opencode.ai/s/abc123")); + assert!(OpenCodeFilter::is_noise("DEBUG some debug info")); + assert!(OpenCodeFilter::is_noise("INFO session info")); + assert!(OpenCodeFilter::is_noise("β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”")); + + // Test clean content detection (not noise) + assert!(!OpenCodeFilter::is_noise("| Read {\"file\":\"test.js\"}")); + assert!(!OpenCodeFilter::is_noise("Assistant response text")); + assert!(!OpenCodeFilter::is_noise("{\"type\":\"content\"}")); + assert!(!OpenCodeFilter::is_noise("session=abc123 started")); + assert!(!OpenCodeFilter::is_noise("Normal conversation text")); + } + + #[test] + fn test_normalize_logs_edge_cases() { + let executor = SstOpencodeExecutor::new(); + + // Empty content + let result = executor.normalize_logs("", "/tmp").unwrap(); + assert_eq!(result.entries.len(), 0); + + // Only whitespace + let result = executor.normalize_logs(" \n\t\n ", "/tmp").unwrap(); + assert_eq!(result.entries.len(), 0); + + // Malformed JSON (current implementation skips invalid JSON) + let malformed = r#"{"timestamp":"2025-01-16T18:04:00Z","content":"incomplete"#; + let result = executor.normalize_logs(malformed, "/tmp").unwrap(); + assert_eq!(result.entries.len(), 0); // Current implementation skips invalid JSON + + // Mixed valid and invalid JSON + let mixed = r#"{"timestamp":"2025-01-16T18:04:00Z","entry_type":{"type":"assistant_message"},"content":"Valid entry","metadata":null} +Invalid line that's not JSON +{"timestamp":"2025-01-16T18:04:01Z","entry_type":{"type":"assistant_message"},"content":"Another valid entry","metadata":null}"#; + let result = executor.normalize_logs(mixed, "/tmp").unwrap(); + assert_eq!(result.entries.len(), 2); // Only valid JSON entries are parsed + } + + #[test] + fn test_ansi_code_stripping() { + use crate::executors::sst_opencode::filter::OpenCodeFilter; + + // Test ANSI escape sequence removal + let ansi_text = "\x1b[31mRed text\x1b[0m normal text"; + let cleaned = OpenCodeFilter::strip_ansi_codes(ansi_text); + assert_eq!(cleaned, "Red text normal text"); + + // Test unicode escape sequences + let unicode_ansi = "Text with \\u001b[32mgreen\\u001b[0m color"; + let cleaned = OpenCodeFilter::strip_ansi_codes(unicode_ansi); + assert_eq!(cleaned, "Text with green color"); + + // Test text without ANSI codes (unchanged) + let plain_text = "Regular text without codes"; + let cleaned = OpenCodeFilter::strip_ansi_codes(plain_text); + assert_eq!(cleaned, plain_text); + } +} diff --git a/backend/src/executors/sst_opencode/filter.rs b/backend/src/executors/sst_opencode/filter.rs new file mode 100644 index 00000000..bf900cf1 --- /dev/null +++ b/backend/src/executors/sst_opencode/filter.rs @@ -0,0 +1,184 @@ +use lazy_static::lazy_static; +use regex::Regex; + +lazy_static! { + static ref OPENCODE_LOG_REGEX: Regex = Regex::new(r"^(INFO|DEBUG|WARN|ERROR)\s+.*").unwrap(); + static ref SESSION_ID_REGEX: Regex = Regex::new(r".*\b(id|session|sessionID)=([^ ]+)").unwrap(); + static ref TOOL_USAGE_REGEX: Regex = Regex::new(r"^\|\s*([a-zA-Z]+)\s*(.*)").unwrap(); + static ref NPM_WARN_REGEX: Regex = Regex::new(r"^npm warn .*").unwrap(); +} + +/// Filter for OpenCode stderr output +pub struct OpenCodeFilter; + +impl OpenCodeFilter { + /// Check if a line should be skipped as noise + pub fn is_noise(line: &str) -> bool { + let trimmed = line.trim(); + + // Empty lines are noise + if trimmed.is_empty() { + return true; + } + + // Strip ANSI escape codes for analysis + let cleaned = Self::strip_ansi_codes(trimmed); + let cleaned_trim = cleaned.trim(); + + // Skip tool calls - they are NOT noise + if TOOL_USAGE_REGEX.is_match(cleaned_trim) { + return false; + } + + // OpenCode log lines are noise (includes session logs) + if is_opencode_log_line(cleaned_trim) { + return true; + } + + if NPM_WARN_REGEX.is_match(cleaned_trim) { + return true; + } + + // Spinner glyphs + if cleaned_trim.len() == 1 && "⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏".contains(cleaned_trim) { + return true; + } + + // Banner lines containing block glyphs (Unicode Block Elements range) + if cleaned_trim + .chars() + .any(|c| ('\u{2580}'..='\u{259F}').contains(&c)) + { + return true; + } + + // UI/stats frames using Box Drawing glyphs (U+2500-257F) + if cleaned_trim + .chars() + .any(|c| ('\u{2500}'..='\u{257F}').contains(&c)) + { + return true; + } + + // Model banner (@ with spaces) + if cleaned_trim.starts_with("@ ") { + return true; + } + + // Share link + if cleaned_trim.starts_with("~") && cleaned_trim.contains("https://opencode.ai/s/") { + return true; + } + + // Everything else (assistant messages) is NOT noise + false + } + + pub fn is_stderr(_line: &str) -> bool { + false + } + + /// Strip ANSI escape codes from text (conservative) + pub fn strip_ansi_codes(text: &str) -> String { + // Handle both unicode escape sequences and raw ANSI codes + let result = text.replace("\\u001b", "\x1b"); + + let mut cleaned = String::new(); + let mut chars = result.chars().peekable(); + + while let Some(ch) = chars.next() { + if ch == '\x1b' { + // Skip ANSI escape sequence + if chars.peek() == Some(&'[') { + chars.next(); // consume '[' + // Skip until we find a letter (end of ANSI sequence) + for next_ch in chars.by_ref() { + if next_ch.is_ascii_alphabetic() { + break; + } + } + } + } else { + cleaned.push(ch); + } + } + + cleaned + } +} + +/// Detect if a line is an OpenCode log line format using regex +pub fn is_opencode_log_line(line: &str) -> bool { + OPENCODE_LOG_REGEX.is_match(line) +} + +/// Parse session_id from OpenCode log lines +pub fn parse_session_id_from_line(line: &str) -> Option { + // Only apply to OpenCode log lines + if !is_opencode_log_line(line) { + return None; + } + + // Try regex for session ID extraction from service=session logs + if let Some(captures) = SESSION_ID_REGEX.captures(line) { + if let Some(id) = captures.get(2) { + return Some(id.as_str().to_string()); + } + } + + None +} + +/// Get the tool usage regex for parsing tool patterns +pub fn tool_usage_regex() -> &'static Regex { + &TOOL_USAGE_REGEX +} + +#[cfg(test)] +mod tests { + #[test] + fn test_session_id_extraction() { + use crate::executors::sst_opencode::filter::parse_session_id_from_line; + + // Test session ID extraction from session= format (only works on OpenCode log lines) + assert_eq!( + parse_session_id_from_line("INFO session=ses_abc123 starting"), + Some("ses_abc123".to_string()) + ); + + assert_eq!( + parse_session_id_from_line("DEBUG id=debug_id process"), + Some("debug_id".to_string()) + ); + + // Test lines without log prefix (should return None) + assert_eq!( + parse_session_id_from_line("session=simple_id chatting"), + None + ); + + // Test no session ID + assert_eq!(parse_session_id_from_line("No session here"), None); + assert_eq!(parse_session_id_from_line(""), None); + } + + #[test] + fn test_ansi_code_stripping() { + use crate::executors::sst_opencode::filter::OpenCodeFilter; + + // Test ANSI escape sequence removal + let ansi_text = "\x1b[31mRed text\x1b[0m normal text"; + let cleaned = OpenCodeFilter::strip_ansi_codes(ansi_text); + assert_eq!(cleaned, "Red text normal text"); + + // Test unicode escape sequences + let unicode_ansi = "Text with \\u001b[32mgreen\\u001b[0m color"; + let cleaned = OpenCodeFilter::strip_ansi_codes(unicode_ansi); + assert_eq!(cleaned, "Text with green color"); + + // Test text without ANSI codes (unchanged) + let plain_text = "Regular text without codes"; + let cleaned = OpenCodeFilter::strip_ansi_codes(plain_text); + assert_eq!(cleaned, plain_text); + } +} diff --git a/backend/src/executors/sst_opencode/tools.rs b/backend/src/executors/sst_opencode/tools.rs new file mode 100644 index 00000000..cbc9bfc3 --- /dev/null +++ b/backend/src/executors/sst_opencode/tools.rs @@ -0,0 +1,139 @@ +use serde_json::{json, Value}; + +use crate::utils::path::make_path_relative; + +/// Normalize tool names to match frontend expectations for purple box styling +pub fn normalize_tool_name(tool_name: &str) -> String { + match tool_name { + "Todo" => "todowrite".to_string(), // Generic TODO tool β†’ todowrite + "TodoWrite" => "todowrite".to_string(), + "TodoRead" => "todoread".to_string(), + _ => tool_name.to_string(), + } +} + +/// Helper function to determine action type for tool usage +pub fn determine_action_type(tool_name: &str, input: &Value, worktree_path: &str) -> Value { + match tool_name.to_lowercase().as_str() { + "read" => { + if let Some(file_path) = input.get("filePath").and_then(|p| p.as_str()) { + json!({ + "action": "file_read", + "path": make_path_relative(file_path, worktree_path) + }) + } else { + json!({"action": "other", "description": "File read operation"}) + } + } + "write" | "edit" => { + if let Some(file_path) = input.get("filePath").and_then(|p| p.as_str()) { + json!({ + "action": "file_write", + "path": make_path_relative(file_path, worktree_path) + }) + } else { + json!({"action": "other", "description": "File write operation"}) + } + } + "bash" => { + if let Some(command) = input.get("command").and_then(|c| c.as_str()) { + json!({"action": "command_run", "command": command}) + } else { + json!({"action": "other", "description": "Command execution"}) + } + } + "grep" => { + if let Some(pattern) = input.get("pattern").and_then(|p| p.as_str()) { + json!({"action": "search", "query": pattern}) + } else { + json!({"action": "other", "description": "Search operation"}) + } + } + "todowrite" | "todoread" => { + json!({"action": "other", "description": "TODO list management"}) + } + _ => json!({"action": "other", "description": format!("Tool: {}", tool_name)}), + } +} + +/// Helper function to generate concise content for tool usage +pub fn generate_tool_content(tool_name: &str, input: &Value, worktree_path: &str) -> String { + match tool_name.to_lowercase().as_str() { + "read" => { + if let Some(file_path) = input.get("filePath").and_then(|p| p.as_str()) { + format!("`{}`", make_path_relative(file_path, worktree_path)) + } else { + "Read file".to_string() + } + } + "write" | "edit" => { + if let Some(file_path) = input.get("filePath").and_then(|p| p.as_str()) { + format!("`{}`", make_path_relative(file_path, worktree_path)) + } else { + "Write file".to_string() + } + } + "bash" => { + if let Some(command) = input.get("command").and_then(|c| c.as_str()) { + format!("`{}`", command) + } else { + "Execute command".to_string() + } + } + "todowrite" | "todoread" => generate_todo_content(input), + _ => format!("`{}`", tool_name), + } +} + +/// Generate formatted content for TODO tools +fn generate_todo_content(input: &Value) -> String { + // Extract todo list from input to show actual todos + if let Some(todos) = input.get("todos").and_then(|t| t.as_array()) { + let mut todo_items = Vec::new(); + for todo in todos { + if let Some(content) = todo.get("content").and_then(|c| c.as_str()) { + let status = todo + .get("status") + .and_then(|s| s.as_str()) + .unwrap_or("pending"); + let status_emoji = match status { + "completed" => "βœ…", + "in_progress" => "πŸ”„", + "pending" | "todo" => "⏳", + _ => "πŸ“", + }; + let priority = todo + .get("priority") + .and_then(|p| p.as_str()) + .unwrap_or("medium"); + todo_items.push(format!("{} {} ({})", status_emoji, content, priority)); + } + } + if !todo_items.is_empty() { + format!("TODO List:\n{}", todo_items.join("\n")) + } else { + "Managing TODO list".to_string() + } + } else { + "Managing TODO list".to_string() + } +} + +#[cfg(test)] +mod tests { + #[test] + fn test_normalize_tool_name() { + use crate::executors::sst_opencode::tools::normalize_tool_name; + + // Test TODO tool normalization + assert_eq!(normalize_tool_name("Todo"), "todowrite"); + assert_eq!(normalize_tool_name("TodoWrite"), "todowrite"); + assert_eq!(normalize_tool_name("TodoRead"), "todoread"); + + // Test other tools remain unchanged + assert_eq!(normalize_tool_name("Read"), "Read"); + assert_eq!(normalize_tool_name("Write"), "Write"); + assert_eq!(normalize_tool_name("bash"), "bash"); + assert_eq!(normalize_tool_name("SomeOtherTool"), "SomeOtherTool"); + } +} diff --git a/backend/src/services/process_service.rs b/backend/src/services/process_service.rs index a11512f4..f17fb116 100644 --- a/backend/src/services/process_service.rs +++ b/backend/src/services/process_service.rs @@ -623,6 +623,7 @@ impl ProcessService { Some("amp") => crate::executor::ExecutorConfig::Amp, Some("gemini") => crate::executor::ExecutorConfig::Gemini, Some("charm-opencode") => crate::executor::ExecutorConfig::CharmOpencode, + Some("sst-opencode") => crate::executor::ExecutorConfig::SstOpencode, _ => crate::executor::ExecutorConfig::Echo, // Default for "echo" or None } } @@ -744,7 +745,7 @@ impl ProcessService { } => { use crate::executors::{ AmpFollowupExecutor, CCRFollowupExecutor, CharmOpencodeFollowupExecutor, - ClaudeFollowupExecutor, GeminiFollowupExecutor, + ClaudeFollowupExecutor, GeminiFollowupExecutor, SstOpencodeFollowupExecutor, }; let executor: Box = match config { @@ -803,6 +804,16 @@ impl ProcessService { return Err(TaskAttemptError::TaskNotFound); // No session ID for followup } } + crate::executor::ExecutorConfig::SstOpencode => { + if let Some(sid) = session_id { + Box::new(SstOpencodeFollowupExecutor::new( + sid.clone(), + prompt.clone(), + )) + } else { + return Err(TaskAttemptError::TaskNotFound); // No session ID for followup + } + } crate::executor::ExecutorConfig::SetupScript { .. } => { // Setup scripts don't support followup, use regular setup script config.create_executor() diff --git a/backend/src/utils.rs b/backend/src/utils.rs index 3ca4e784..a5db5457 100644 --- a/backend/src/utils.rs +++ b/backend/src/utils.rs @@ -2,6 +2,7 @@ use std::{env, sync::OnceLock}; use directories::ProjectDirs; +pub mod path; pub mod shell; pub mod text; pub mod worktree_manager; diff --git a/backend/src/utils/path.rs b/backend/src/utils/path.rs new file mode 100644 index 00000000..088b77f0 --- /dev/null +++ b/backend/src/utils/path.rs @@ -0,0 +1,96 @@ +use std::path::Path; + +/// Convert absolute paths to relative paths based on worktree path +/// This is a robust implementation that handles symlinks and edge cases +pub fn make_path_relative(path: &str, worktree_path: &str) -> String { + let path_obj = Path::new(path); + let worktree_path_obj = Path::new(worktree_path); + + tracing::debug!("Making path relative: {} -> {}", path, worktree_path); + + // If path is already relative, return as is + if path_obj.is_relative() { + return path.to_string(); + } + + // Try to make path relative to the worktree path + match path_obj.strip_prefix(worktree_path_obj) { + Ok(relative_path) => { + let result = relative_path.to_string_lossy().to_string(); + tracing::debug!("Successfully made relative: '{}' -> '{}'", path, result); + result + } + Err(_) => { + // Handle symlinks by resolving canonical paths + let canonical_path = std::fs::canonicalize(path); + let canonical_worktree = std::fs::canonicalize(worktree_path); + + match (canonical_path, canonical_worktree) { + (Ok(canon_path), Ok(canon_worktree)) => { + tracing::debug!( + "Trying canonical path resolution: '{}' -> '{}', '{}' -> '{}'", + path, + canon_path.display(), + worktree_path, + canon_worktree.display() + ); + + match canon_path.strip_prefix(&canon_worktree) { + Ok(relative_path) => { + let result = relative_path.to_string_lossy().to_string(); + tracing::debug!( + "Successfully made relative with canonical paths: '{}' -> '{}'", + path, + result + ); + result + } + Err(e) => { + tracing::warn!( + "Failed to make canonical path relative: '{}' relative to '{}', error: {}, returning original", + canon_path.display(), + canon_worktree.display(), + e + ); + path.to_string() + } + } + } + _ => { + tracing::debug!( + "Could not canonicalize paths (paths may not exist): '{}', '{}', returning original", + path, + worktree_path + ); + path.to_string() + } + } + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_make_path_relative() { + // Test with relative path (should remain unchanged) + assert_eq!( + make_path_relative("src/main.rs", "/tmp/test-worktree"), + "src/main.rs" + ); + + // Test with absolute path (should become relative if possible) + let test_worktree = "/tmp/test-worktree"; + let absolute_path = format!("{}/src/main.rs", test_worktree); + let result = make_path_relative(&absolute_path, test_worktree); + assert_eq!(result, "src/main.rs"); + + // Test with path outside worktree (should return original) + assert_eq!( + make_path_relative("/other/path/file.js", "/tmp/test-worktree"), + "/other/path/file.js" + ); + } +} diff --git a/frontend/src/pages/McpServers.tsx b/frontend/src/pages/McpServers.tsx index 34d9a240..630de189 100644 --- a/frontend/src/pages/McpServers.tsx +++ b/frontend/src/pages/McpServers.tsx @@ -50,7 +50,9 @@ export function McpServers() { const defaultConfig = executorType === 'amp' ? '{\n "amp.mcpServers": {\n }\n}' - : '{\n "mcpServers": {\n }\n}'; + : executorType === 'sst-opencode' + ? '{\n "mcp": {\n }, "$schema": "https://opencode.ai/config.json"\n}' + : '{\n "mcpServers": {\n }\n}'; setMcpServers(defaultConfig); setMcpConfigPath(''); @@ -67,6 +69,11 @@ export function McpServers() { if (executorType === 'amp') { // For AMP, use the amp.mcpServers structure fullConfig = { 'amp.mcpServers': servers }; + } else if (executorType === 'sst-opencode') { + fullConfig = { + mcp: servers, + $schema: 'https://opencode.ai/config.json', + }; } else { // For other executors, use the standard mcpServers structure fullConfig = { mcpServers: servers }; @@ -110,6 +117,10 @@ export function McpServers() { 'AMP configuration must contain an "amp.mcpServers" object' ); } + } else if (selectedMcpExecutor === 'sst-opencode') { + if (!config.mcp || typeof config.mcp !== 'object') { + setMcpError('Configuration must contain an "mcp" object'); + } } else { if (!config.mcpServers || typeof config.mcpServers !== 'object') { setMcpError('Configuration must contain an "mcpServers" object'); @@ -129,10 +140,17 @@ export function McpServers() { const existingConfig = mcpServers.trim() ? JSON.parse(mcpServers) : {}; // Always use production MCP installation instructions - const vibeKanbanConfig = { - command: 'npx', - args: ['-y', 'vibe-kanban', '--mcp'], - }; + const vibeKanbanConfig = + selectedMcpExecutor === 'sst-opencode' + ? { + type: 'local', + command: ['npx', '-y', 'vibe-kanban', '--mcp'], + enabled: true, + } + : { + command: 'npx', + args: ['-y', 'vibe-kanban', '--mcp'], + }; // Add vibe_kanban to the existing configuration let updatedConfig; @@ -144,6 +162,14 @@ export function McpServers() { vibe_kanban: vibeKanbanConfig, }, }; + } else if (selectedMcpExecutor === 'sst-opencode') { + updatedConfig = { + ...existingConfig, + mcp: { + ...(existingConfig.mcp || {}), + vibe_kanban: vibeKanbanConfig, + }, + }; } else { updatedConfig = { ...existingConfig, @@ -189,6 +215,12 @@ export function McpServers() { } // Extract just the inner servers object for the API - backend will handle nesting mcpServersConfig = fullConfig['amp.mcpServers']; + } else if (selectedMcpExecutor === 'sst-opencode') { + if (!fullConfig.mcp || typeof fullConfig.mcp !== 'object') { + throw new Error('Configuration must contain an "mcp" object'); + } + // Extract just the mcp part for the API + mcpServersConfig = fullConfig.mcp; } else { if ( !fullConfig.mcpServers || diff --git a/shared/types.ts b/shared/types.ts index 29467352..582ad103 100644 --- a/shared/types.ts +++ b/shared/types.ts @@ -22,7 +22,7 @@ export type SoundConstants = { sound_files: Array, sound_labels: Arra export type ConfigConstants = { editor: EditorConstants, sound: SoundConstants, }; -export type ExecutorConfig = { "type": "echo" } | { "type": "claude" } | { "type": "claude-plan" } | { "type": "amp" } | { "type": "gemini" } | { "type": "setup-script", script: string, } | { "type": "claude-code-router" } | { "type": "charm-opencode" }; +export type ExecutorConfig = { "type": "echo" } | { "type": "claude" } | { "type": "claude-plan" } | { "type": "amp" } | { "type": "gemini" } | { "type": "setup-script", script: string, } | { "type": "claude-code-router" } | { "type": "charm-opencode" } | { "type": "sst-opencode" }; export type ExecutorConstants = { executor_types: Array, executor_labels: Array, }; @@ -126,7 +126,8 @@ export const EXECUTOR_TYPES: string[] = [ "amp", "gemini", "charm-opencode", - "claude-code-router" + "claude-code-router", + "sst-opencode" ]; export const EDITOR_TYPES: EditorType[] = [ @@ -145,7 +146,8 @@ export const EXECUTOR_LABELS: Record = { "amp": "Amp", "gemini": "Gemini", "charm-opencode": "Charm Opencode", - "claude-code-router": "Claude Code Router" + "claude-code-router": "Claude Code Router", + "sst-opencode": "SST Opencode" }; export const EDITOR_LABELS: Record = {