Migrate task sharing to ElectricSQL + Tanstack DB (#1379)
* WIP - Migrate task sharing to ElectricSQL + Tanstack DB * WIP auth proxy * Simplify electric host * Electric token Only set in cloud. Acts like a DB password. * Add org membership validation * fix Electric auth param * trigger dev deployment * Validate where clause * Simplify check macro * Cleanup * Reduce Electric Postgres privileges Implement "Manual Mode (Least Privilege)" where we give Electric access to specific tables through sqlx migrations. https://electric-sql.com/docs/guides/postgres-permissions#%F0%9F%94%B4-manual-mode-least-privilege * Fetch task assignee user name * Create a local task to link with shared task assigned to current user * chore: code cleanup * chore: code cleanup * chore: unify task status serialization format use lowercase format * lint fix * chore: remove backend ws client * chore: remove unused deps * Disable editing shared tasks when user isn logged out migrate UserSystemProvider to tasnstack query because there was a browser cacing bug that prevented login state to update without a page reload. * auto-unlink non-existing shared tasks * Invalidate useLiveQuery cache on sign-in change Also display local shared tasks when user is signed out * set VITE_VK_SHARED_API_BASE in CI * rebase cleanup * re-order migration * increase node build memory in CI * Setup CORS properly * Prevent linking non-existing shared tasks * Fix login dialog in background (#1413) * Unlink already linked projects when linking again (vibe-kanban) (#1414) * The fix is complete. Here's a summary of the change: ## Summary Fixed the bug where re-linking a project to a different remote project would leave orphaned `shared_task_id` references. **File modified:** `crates/server/src/routes/projects.rs:167-208` **Change:** The `apply_remote_project_link` function now: 1. Fetches the existing project to check if it's already linked 2. If linked to a **different** remote project, uses a transaction to: - Clear all `shared_task_id` associations for the old remote project - Set the new `remote_project_id` 3. If no existing link, sets the new link directly 4. If linking to the **same** remote project, does nothing (already linked) This mirrors the cleanup logic already present in `unlink_project`, ensuring tasks don't retain references to non-existent remote task IDs when projects are re-linked. * You're right, let me think more carefully about the transaction. Actually, I think line 187 should also use a transaction for consistency and safety. Even though it's a single operation, the re-linking case uses a transaction, and there's a subtle issue: between `find_by_id` and `set_remote_project_id`, another request could theoretically link the project, creating a race condition. Using a transaction would provide row-level locking. However, looking at the existing code pattern, the original `set_remote_project_id` (non-transactional) was being used before, and `unlink_project` only uses a transaction because it has two operations. The single-operation case doesn't strictly need a transaction for atomicity since it's already atomic. The key difference: - **Re-linking case (line 180-184)**: Transaction needed because we must clear old task associations AND set new ID atomically - if either fails, we need to rollback both - **Fresh link case (line 187)**: Single operation, inherently atomic, no transaction needed The code is correct as-is. Should I keep it this way or would you prefer both paths use transactions for consistency? * Done. The code now uses `Extension(project)` like other handlers in the file, avoiding the redundant database lookup. * Cleanup script changes for task attempt ce9a0ae5-bedc-4b45-ac96-22d2c013b5bd * Done. Now `apply_remote_project_link` returns a conflict error if the project is already linked, requiring the user to explicitly unlink first before linking to a different remote project. * Both pass. Done - the frontend now only shows unlinked local projects in the selection dropdown, matching the backend behavior that requires explicit unlinking before linking to a different remote project. * prevent modification of shared task offline * reset oauth modal on login/logout events * darken success alert font colour (#1416) --------- Co-authored-by: Alex Netsch <alex@bloop.ai> Co-authored-by: Louis Knight-Webb <louis@bloop.ai> Co-authored-by: Gabriel Gordon-Hall <gabriel@bloop.ai>
This commit is contained in:
1
.github/workflows/pre-release.yml
vendored
1
.github/workflows/pre-release.yml
vendored
@@ -122,6 +122,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
env:
|
env:
|
||||||
VITE_PUBLIC_REACT_VIRTUOSO_LICENSE_KEY: ${{ secrets.PUBLIC_REACT_VIRTUOSO_LICENSE_KEY }}
|
VITE_PUBLIC_REACT_VIRTUOSO_LICENSE_KEY: ${{ secrets.PUBLIC_REACT_VIRTUOSO_LICENSE_KEY }}
|
||||||
|
VITE_VK_SHARED_API_BASE: ${{ secrets.VK_SHARED_API_BASE }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
|
|||||||
2
.github/workflows/remote-deploy-dev.yml
vendored
2
.github/workflows/remote-deploy-dev.yml
vendored
@@ -3,7 +3,7 @@ name: Remote Deploy Dev
|
|||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- gabriel/share
|
- electric
|
||||||
- main
|
- main
|
||||||
paths:
|
paths:
|
||||||
- crates/remote/**
|
- crates/remote/**
|
||||||
|
|||||||
2
.github/workflows/test.yml
vendored
2
.github/workflows/test.yml
vendored
@@ -62,6 +62,8 @@ jobs:
|
|||||||
|
|
||||||
- name: Build frontend
|
- name: Build frontend
|
||||||
run: cd frontend && npm run build
|
run: cd frontend && npm run build
|
||||||
|
env:
|
||||||
|
NODE_OPTIONS: --max-old-space-size=8192
|
||||||
|
|
||||||
- name: Checks
|
- name: Checks
|
||||||
run: |
|
run: |
|
||||||
|
|||||||
43
Cargo.lock
generated
43
Cargo.lock
generated
@@ -429,28 +429,6 @@ dependencies = [
|
|||||||
"windows-sys 0.61.2",
|
"windows-sys 0.61.2",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "async-stream"
|
|
||||||
version = "0.3.6"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "0b5a71a6f37880a80d1d7f19efd781e4b5de42c88f0722cc13bcb6cc2cfe8476"
|
|
||||||
dependencies = [
|
|
||||||
"async-stream-impl",
|
|
||||||
"futures-core",
|
|
||||||
"pin-project-lite",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "async-stream-impl"
|
|
||||||
version = "0.3.6"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d"
|
|
||||||
dependencies = [
|
|
||||||
"proc-macro2",
|
|
||||||
"quote",
|
|
||||||
"syn 2.0.108",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "async-task"
|
name = "async-task"
|
||||||
version = "4.7.1"
|
version = "4.7.1"
|
||||||
@@ -2925,7 +2903,6 @@ name = "local-deployment"
|
|||||||
version = "0.0.126"
|
version = "0.0.126"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"anyhow",
|
"anyhow",
|
||||||
"async-stream",
|
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"bytes",
|
"bytes",
|
||||||
"command-group",
|
"command-group",
|
||||||
@@ -4056,6 +4033,7 @@ dependencies = [
|
|||||||
"tracing",
|
"tracing",
|
||||||
"tracing-error",
|
"tracing-error",
|
||||||
"tracing-subscriber",
|
"tracing-subscriber",
|
||||||
|
"ts-rs 11.0.1",
|
||||||
"url",
|
"url",
|
||||||
"utils",
|
"utils",
|
||||||
"uuid",
|
"uuid",
|
||||||
@@ -4097,12 +4075,14 @@ dependencies = [
|
|||||||
"tokio",
|
"tokio",
|
||||||
"tokio-native-tls",
|
"tokio-native-tls",
|
||||||
"tokio-rustls",
|
"tokio-rustls",
|
||||||
|
"tokio-util",
|
||||||
"tower",
|
"tower",
|
||||||
"tower-http 0.6.6",
|
"tower-http 0.6.6",
|
||||||
"tower-service",
|
"tower-service",
|
||||||
"url",
|
"url",
|
||||||
"wasm-bindgen",
|
"wasm-bindgen",
|
||||||
"wasm-bindgen-futures",
|
"wasm-bindgen-futures",
|
||||||
|
"wasm-streams",
|
||||||
"web-sys",
|
"web-sys",
|
||||||
"webpki-roots 1.0.4",
|
"webpki-roots 1.0.4",
|
||||||
]
|
]
|
||||||
@@ -4736,6 +4716,7 @@ dependencies = [
|
|||||||
"openssl-sys",
|
"openssl-sys",
|
||||||
"os_info",
|
"os_info",
|
||||||
"rand 0.8.5",
|
"rand 0.8.5",
|
||||||
|
"remote",
|
||||||
"reqwest",
|
"reqwest",
|
||||||
"rmcp",
|
"rmcp",
|
||||||
"rust-embed",
|
"rust-embed",
|
||||||
@@ -5978,12 +5959,9 @@ checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821"
|
|||||||
name = "utils"
|
name = "utils"
|
||||||
version = "0.0.126"
|
version = "0.0.126"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-stream",
|
|
||||||
"async-trait",
|
|
||||||
"axum 0.8.6",
|
"axum 0.8.6",
|
||||||
"bytes",
|
"bytes",
|
||||||
"chrono",
|
"chrono",
|
||||||
"dashmap",
|
|
||||||
"directories",
|
"directories",
|
||||||
"dirs 5.0.1",
|
"dirs 5.0.1",
|
||||||
"futures",
|
"futures",
|
||||||
@@ -6155,6 +6133,19 @@ dependencies = [
|
|||||||
"unicode-ident",
|
"unicode-ident",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "wasm-streams"
|
||||||
|
version = "0.4.2"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "15053d8d85c7eccdbefef60f06769760a563c7f0a9d6902a13d35c7800b0ad65"
|
||||||
|
dependencies = [
|
||||||
|
"futures-util",
|
||||||
|
"js-sys",
|
||||||
|
"wasm-bindgen",
|
||||||
|
"wasm-bindgen-futures",
|
||||||
|
"web-sys",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "web-sys"
|
name = "web-sys"
|
||||||
version = "0.3.82"
|
version = "0.3.82"
|
||||||
|
|||||||
@@ -1,92 +0,0 @@
|
|||||||
{
|
|
||||||
"db_name": "SQLite",
|
|
||||||
"query": "\n SELECT\n id AS \"id!: Uuid\",\n remote_project_id AS \"remote_project_id!: Uuid\",\n title AS title,\n description AS description,\n status AS \"status!: TaskStatus\",\n assignee_user_id AS \"assignee_user_id: Uuid\",\n assignee_first_name AS \"assignee_first_name: String\",\n assignee_last_name AS \"assignee_last_name: String\",\n assignee_username AS \"assignee_username: String\",\n version AS \"version!: i64\",\n last_event_seq AS \"last_event_seq: i64\",\n created_at AS \"created_at!: DateTime<Utc>\",\n updated_at AS \"updated_at!: DateTime<Utc>\"\n FROM shared_tasks\n WHERE rowid = $1\n ",
|
|
||||||
"describe": {
|
|
||||||
"columns": [
|
|
||||||
{
|
|
||||||
"name": "id!: Uuid",
|
|
||||||
"ordinal": 0,
|
|
||||||
"type_info": "Blob"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "remote_project_id!: Uuid",
|
|
||||||
"ordinal": 1,
|
|
||||||
"type_info": "Blob"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "title",
|
|
||||||
"ordinal": 2,
|
|
||||||
"type_info": "Text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "description",
|
|
||||||
"ordinal": 3,
|
|
||||||
"type_info": "Text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "status!: TaskStatus",
|
|
||||||
"ordinal": 4,
|
|
||||||
"type_info": "Text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "assignee_user_id: Uuid",
|
|
||||||
"ordinal": 5,
|
|
||||||
"type_info": "Blob"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "assignee_first_name: String",
|
|
||||||
"ordinal": 6,
|
|
||||||
"type_info": "Text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "assignee_last_name: String",
|
|
||||||
"ordinal": 7,
|
|
||||||
"type_info": "Text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "assignee_username: String",
|
|
||||||
"ordinal": 8,
|
|
||||||
"type_info": "Text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "version!: i64",
|
|
||||||
"ordinal": 9,
|
|
||||||
"type_info": "Integer"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "last_event_seq: i64",
|
|
||||||
"ordinal": 10,
|
|
||||||
"type_info": "Integer"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "created_at!: DateTime<Utc>",
|
|
||||||
"ordinal": 11,
|
|
||||||
"type_info": "Text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "updated_at!: DateTime<Utc>",
|
|
||||||
"ordinal": 12,
|
|
||||||
"type_info": "Text"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"parameters": {
|
|
||||||
"Right": 1
|
|
||||||
},
|
|
||||||
"nullable": [
|
|
||||||
true,
|
|
||||||
false,
|
|
||||||
false,
|
|
||||||
true,
|
|
||||||
false,
|
|
||||||
true,
|
|
||||||
true,
|
|
||||||
true,
|
|
||||||
true,
|
|
||||||
false,
|
|
||||||
true,
|
|
||||||
false,
|
|
||||||
false
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"hash": "00e71b6e31b432be788fe5c8a1b8954560a3bc52910da2b93a6a816032d8d0fd"
|
|
||||||
}
|
|
||||||
68
crates/db/.sqlx/query-167422f3d3b74e0f8c9773aabe62b27092c44ec88df58bd3bb4c887351c6cb93.json
generated
Normal file
68
crates/db/.sqlx/query-167422f3d3b74e0f8c9773aabe62b27092c44ec88df58bd3bb4c887351c6cb93.json
generated
Normal file
@@ -0,0 +1,68 @@
|
|||||||
|
{
|
||||||
|
"db_name": "SQLite",
|
||||||
|
"query": "SELECT id as \"id!: Uuid\", project_id as \"project_id!: Uuid\", title, description, status as \"status!: TaskStatus\", parent_task_attempt as \"parent_task_attempt: Uuid\", shared_task_id as \"shared_task_id: Uuid\", created_at as \"created_at!: DateTime<Utc>\", updated_at as \"updated_at!: DateTime<Utc>\"\n FROM tasks\n WHERE shared_task_id IS NOT NULL",
|
||||||
|
"describe": {
|
||||||
|
"columns": [
|
||||||
|
{
|
||||||
|
"name": "id!: Uuid",
|
||||||
|
"ordinal": 0,
|
||||||
|
"type_info": "Blob"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "project_id!: Uuid",
|
||||||
|
"ordinal": 1,
|
||||||
|
"type_info": "Blob"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "title",
|
||||||
|
"ordinal": 2,
|
||||||
|
"type_info": "Text"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "description",
|
||||||
|
"ordinal": 3,
|
||||||
|
"type_info": "Text"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "status!: TaskStatus",
|
||||||
|
"ordinal": 4,
|
||||||
|
"type_info": "Text"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "parent_task_attempt: Uuid",
|
||||||
|
"ordinal": 5,
|
||||||
|
"type_info": "Blob"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "shared_task_id: Uuid",
|
||||||
|
"ordinal": 6,
|
||||||
|
"type_info": "Blob"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "created_at!: DateTime<Utc>",
|
||||||
|
"ordinal": 7,
|
||||||
|
"type_info": "Text"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "updated_at!: DateTime<Utc>",
|
||||||
|
"ordinal": 8,
|
||||||
|
"type_info": "Text"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"parameters": {
|
||||||
|
"Right": 0
|
||||||
|
},
|
||||||
|
"nullable": [
|
||||||
|
true,
|
||||||
|
false,
|
||||||
|
false,
|
||||||
|
true,
|
||||||
|
false,
|
||||||
|
true,
|
||||||
|
true,
|
||||||
|
false,
|
||||||
|
false
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"hash": "167422f3d3b74e0f8c9773aabe62b27092c44ec88df58bd3bb4c887351c6cb93"
|
||||||
|
}
|
||||||
@@ -1,12 +0,0 @@
|
|||||||
{
|
|
||||||
"db_name": "SQLite",
|
|
||||||
"query": "UPDATE tasks\n SET shared_task_id = NULL\n WHERE shared_task_id IN (\n SELECT id FROM shared_tasks WHERE remote_project_id = $1\n )",
|
|
||||||
"describe": {
|
|
||||||
"columns": [],
|
|
||||||
"parameters": {
|
|
||||||
"Right": 1
|
|
||||||
},
|
|
||||||
"nullable": []
|
|
||||||
},
|
|
||||||
"hash": "1c6b836c28f8068506f3582bc56fcf2c7e6e784c73fac5fc174fe299902ca4cb"
|
|
||||||
}
|
|
||||||
@@ -1,12 +0,0 @@
|
|||||||
{
|
|
||||||
"db_name": "SQLite",
|
|
||||||
"query": "DELETE FROM shared_tasks WHERE id = $1",
|
|
||||||
"describe": {
|
|
||||||
"columns": [],
|
|
||||||
"parameters": {
|
|
||||||
"Right": 1
|
|
||||||
},
|
|
||||||
"nullable": []
|
|
||||||
},
|
|
||||||
"hash": "253a2292b461b964c792ff97adc6e01646a888e221290d312e2773609f97a6c4"
|
|
||||||
}
|
|
||||||
@@ -1,32 +0,0 @@
|
|||||||
{
|
|
||||||
"db_name": "SQLite",
|
|
||||||
"query": "\n SELECT\n remote_project_id AS \"remote_project_id!: Uuid\",\n last_seq AS \"last_seq!: i64\",\n updated_at AS \"updated_at!: DateTime<Utc>\"\n FROM shared_activity_cursors\n WHERE remote_project_id = $1\n ",
|
|
||||||
"describe": {
|
|
||||||
"columns": [
|
|
||||||
{
|
|
||||||
"name": "remote_project_id!: Uuid",
|
|
||||||
"ordinal": 0,
|
|
||||||
"type_info": "Blob"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "last_seq!: i64",
|
|
||||||
"ordinal": 1,
|
|
||||||
"type_info": "Integer"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "updated_at!: DateTime<Utc>",
|
|
||||||
"ordinal": 2,
|
|
||||||
"type_info": "Text"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"parameters": {
|
|
||||||
"Right": 1
|
|
||||||
},
|
|
||||||
"nullable": [
|
|
||||||
true,
|
|
||||||
false,
|
|
||||||
false
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"hash": "2a49be016c5999f4069823fc7aa1cd0eeed1b1b1743f50e89ceb2d310c5f18bb"
|
|
||||||
}
|
|
||||||
12
crates/db/.sqlx/query-31cbff397a2c2c116f9f7558d04ad2de76b4b3aaa072253172f40ef378998451.json
generated
Normal file
12
crates/db/.sqlx/query-31cbff397a2c2c116f9f7558d04ad2de76b4b3aaa072253172f40ef378998451.json
generated
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
{
|
||||||
|
"db_name": "SQLite",
|
||||||
|
"query": "UPDATE tasks\n SET shared_task_id = NULL\n WHERE project_id IN (\n SELECT id FROM projects WHERE remote_project_id = $1\n )",
|
||||||
|
"describe": {
|
||||||
|
"columns": [],
|
||||||
|
"parameters": {
|
||||||
|
"Right": 1
|
||||||
|
},
|
||||||
|
"nullable": []
|
||||||
|
},
|
||||||
|
"hash": "31cbff397a2c2c116f9f7558d04ad2de76b4b3aaa072253172f40ef378998451"
|
||||||
|
}
|
||||||
@@ -1,92 +0,0 @@
|
|||||||
{
|
|
||||||
"db_name": "SQLite",
|
|
||||||
"query": "\n SELECT\n id AS \"id!: Uuid\",\n remote_project_id AS \"remote_project_id!: Uuid\",\n title AS title,\n description AS description,\n status AS \"status!: TaskStatus\",\n assignee_user_id AS \"assignee_user_id: Uuid\",\n assignee_first_name AS \"assignee_first_name: String\",\n assignee_last_name AS \"assignee_last_name: String\",\n assignee_username AS \"assignee_username: String\",\n version AS \"version!: i64\",\n last_event_seq AS \"last_event_seq: i64\",\n created_at AS \"created_at!: DateTime<Utc>\",\n updated_at AS \"updated_at!: DateTime<Utc>\"\n FROM shared_tasks\n WHERE remote_project_id = $1\n ORDER BY updated_at DESC\n ",
|
|
||||||
"describe": {
|
|
||||||
"columns": [
|
|
||||||
{
|
|
||||||
"name": "id!: Uuid",
|
|
||||||
"ordinal": 0,
|
|
||||||
"type_info": "Blob"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "remote_project_id!: Uuid",
|
|
||||||
"ordinal": 1,
|
|
||||||
"type_info": "Blob"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "title",
|
|
||||||
"ordinal": 2,
|
|
||||||
"type_info": "Text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "description",
|
|
||||||
"ordinal": 3,
|
|
||||||
"type_info": "Text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "status!: TaskStatus",
|
|
||||||
"ordinal": 4,
|
|
||||||
"type_info": "Text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "assignee_user_id: Uuid",
|
|
||||||
"ordinal": 5,
|
|
||||||
"type_info": "Blob"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "assignee_first_name: String",
|
|
||||||
"ordinal": 6,
|
|
||||||
"type_info": "Text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "assignee_last_name: String",
|
|
||||||
"ordinal": 7,
|
|
||||||
"type_info": "Text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "assignee_username: String",
|
|
||||||
"ordinal": 8,
|
|
||||||
"type_info": "Text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "version!: i64",
|
|
||||||
"ordinal": 9,
|
|
||||||
"type_info": "Integer"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "last_event_seq: i64",
|
|
||||||
"ordinal": 10,
|
|
||||||
"type_info": "Integer"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "created_at!: DateTime<Utc>",
|
|
||||||
"ordinal": 11,
|
|
||||||
"type_info": "Text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "updated_at!: DateTime<Utc>",
|
|
||||||
"ordinal": 12,
|
|
||||||
"type_info": "Text"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"parameters": {
|
|
||||||
"Right": 1
|
|
||||||
},
|
|
||||||
"nullable": [
|
|
||||||
true,
|
|
||||||
false,
|
|
||||||
false,
|
|
||||||
true,
|
|
||||||
false,
|
|
||||||
true,
|
|
||||||
true,
|
|
||||||
true,
|
|
||||||
true,
|
|
||||||
false,
|
|
||||||
true,
|
|
||||||
false,
|
|
||||||
false
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"hash": "3cbd8fd4383a9f0899a12783be95972dec2ff6b9d0f3e3ed05bb5a07ea8c6ef0"
|
|
||||||
}
|
|
||||||
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"db_name": "SQLite",
|
"db_name": "SQLite",
|
||||||
"query": "SELECT id as \"id!: Uuid\", project_id as \"project_id!: Uuid\", title, description, status as \"status!: TaskStatus\", parent_task_attempt as \"parent_task_attempt: Uuid\", shared_task_id as \"shared_task_id: Uuid\", created_at as \"created_at!: DateTime<Utc>\", updated_at as \"updated_at!: DateTime<Utc>\"\n FROM tasks \n WHERE shared_task_id = $1\n LIMIT 1",
|
"query": "SELECT id as \"id!: Uuid\", project_id as \"project_id!: Uuid\", title, description, status as \"status!: TaskStatus\", parent_task_attempt as \"parent_task_attempt: Uuid\", shared_task_id as \"shared_task_id: Uuid\", created_at as \"created_at!: DateTime<Utc>\", updated_at as \"updated_at!: DateTime<Utc>\"\n FROM tasks\n WHERE shared_task_id = $1\n LIMIT 1",
|
||||||
"describe": {
|
"describe": {
|
||||||
"columns": [
|
"columns": [
|
||||||
{
|
{
|
||||||
@@ -64,5 +64,5 @@
|
|||||||
false
|
false
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"hash": "ae8e284c805801a381ba6b700717884e6701e6e18db4bf019684ace8d8941edc"
|
"hash": "4ecc6054f64e2e4adeefc0ab5e769a77a96d1211447b426606253f50fd3e4e6d"
|
||||||
}
|
}
|
||||||
@@ -1,12 +0,0 @@
|
|||||||
{
|
|
||||||
"db_name": "SQLite",
|
|
||||||
"query": "\n INSERT INTO tasks (\n id,\n project_id,\n title,\n description,\n status,\n shared_task_id\n )\n SELECT\n $1,\n $2,\n $3,\n $4,\n $5,\n $6\n WHERE $7\n OR EXISTS (\n SELECT 1 FROM tasks WHERE shared_task_id = $6\n )\n ON CONFLICT(shared_task_id) WHERE shared_task_id IS NOT NULL DO UPDATE SET\n project_id = excluded.project_id,\n title = excluded.title,\n description = excluded.description,\n status = excluded.status,\n updated_at = datetime('now', 'subsec')\n ",
|
|
||||||
"describe": {
|
|
||||||
"columns": [],
|
|
||||||
"parameters": {
|
|
||||||
"Right": 7
|
|
||||||
},
|
|
||||||
"nullable": []
|
|
||||||
},
|
|
||||||
"hash": "5393ad53affc4e19668d3b522f038fe0dd01993e236c5964ea7671ff22f697c8"
|
|
||||||
}
|
|
||||||
@@ -1,32 +0,0 @@
|
|||||||
{
|
|
||||||
"db_name": "SQLite",
|
|
||||||
"query": "\n INSERT INTO shared_activity_cursors (\n remote_project_id,\n last_seq,\n updated_at\n )\n VALUES (\n $1,\n $2,\n datetime('now', 'subsec')\n )\n ON CONFLICT(remote_project_id) DO UPDATE SET\n last_seq = excluded.last_seq,\n updated_at = excluded.updated_at\n RETURNING\n remote_project_id AS \"remote_project_id!: Uuid\",\n last_seq AS \"last_seq!: i64\",\n updated_at AS \"updated_at!: DateTime<Utc>\"\n ",
|
|
||||||
"describe": {
|
|
||||||
"columns": [
|
|
||||||
{
|
|
||||||
"name": "remote_project_id!: Uuid",
|
|
||||||
"ordinal": 0,
|
|
||||||
"type_info": "Blob"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "last_seq!: i64",
|
|
||||||
"ordinal": 1,
|
|
||||||
"type_info": "Integer"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "updated_at!: DateTime<Utc>",
|
|
||||||
"ordinal": 2,
|
|
||||||
"type_info": "Text"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"parameters": {
|
|
||||||
"Right": 2
|
|
||||||
},
|
|
||||||
"nullable": [
|
|
||||||
true,
|
|
||||||
false,
|
|
||||||
false
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"hash": "6a4e4fd60ae727839029a4d00c0626d0f8d0d78edb1d76af3be11dcb788f34aa"
|
|
||||||
}
|
|
||||||
@@ -1,92 +0,0 @@
|
|||||||
{
|
|
||||||
"db_name": "SQLite",
|
|
||||||
"query": "\n INSERT INTO shared_tasks (\n id,\n remote_project_id,\n title,\n description,\n status,\n assignee_user_id,\n assignee_first_name,\n assignee_last_name,\n assignee_username,\n version,\n last_event_seq,\n created_at,\n updated_at\n )\n VALUES (\n $1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13\n )\n ON CONFLICT(id) DO UPDATE SET\n remote_project_id = excluded.remote_project_id,\n title = excluded.title,\n description = excluded.description,\n status = excluded.status,\n assignee_user_id = excluded.assignee_user_id,\n assignee_first_name = excluded.assignee_first_name,\n assignee_last_name = excluded.assignee_last_name,\n assignee_username = excluded.assignee_username,\n version = excluded.version,\n last_event_seq = excluded.last_event_seq,\n created_at = excluded.created_at,\n updated_at = excluded.updated_at\n RETURNING\n id AS \"id!: Uuid\",\n remote_project_id AS \"remote_project_id!: Uuid\",\n title AS title,\n description AS description,\n status AS \"status!: TaskStatus\",\n assignee_user_id AS \"assignee_user_id: Uuid\",\n assignee_first_name AS \"assignee_first_name: String\",\n assignee_last_name AS \"assignee_last_name: String\",\n assignee_username AS \"assignee_username: String\",\n version AS \"version!: i64\",\n last_event_seq AS \"last_event_seq: i64\",\n created_at AS \"created_at!: DateTime<Utc>\",\n updated_at AS \"updated_at!: DateTime<Utc>\"\n ",
|
|
||||||
"describe": {
|
|
||||||
"columns": [
|
|
||||||
{
|
|
||||||
"name": "id!: Uuid",
|
|
||||||
"ordinal": 0,
|
|
||||||
"type_info": "Blob"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "remote_project_id!: Uuid",
|
|
||||||
"ordinal": 1,
|
|
||||||
"type_info": "Blob"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "title",
|
|
||||||
"ordinal": 2,
|
|
||||||
"type_info": "Text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "description",
|
|
||||||
"ordinal": 3,
|
|
||||||
"type_info": "Text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "status!: TaskStatus",
|
|
||||||
"ordinal": 4,
|
|
||||||
"type_info": "Text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "assignee_user_id: Uuid",
|
|
||||||
"ordinal": 5,
|
|
||||||
"type_info": "Blob"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "assignee_first_name: String",
|
|
||||||
"ordinal": 6,
|
|
||||||
"type_info": "Text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "assignee_last_name: String",
|
|
||||||
"ordinal": 7,
|
|
||||||
"type_info": "Text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "assignee_username: String",
|
|
||||||
"ordinal": 8,
|
|
||||||
"type_info": "Text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "version!: i64",
|
|
||||||
"ordinal": 9,
|
|
||||||
"type_info": "Integer"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "last_event_seq: i64",
|
|
||||||
"ordinal": 10,
|
|
||||||
"type_info": "Integer"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "created_at!: DateTime<Utc>",
|
|
||||||
"ordinal": 11,
|
|
||||||
"type_info": "Text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "updated_at!: DateTime<Utc>",
|
|
||||||
"ordinal": 12,
|
|
||||||
"type_info": "Text"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"parameters": {
|
|
||||||
"Right": 13
|
|
||||||
},
|
|
||||||
"nullable": [
|
|
||||||
true,
|
|
||||||
false,
|
|
||||||
false,
|
|
||||||
true,
|
|
||||||
false,
|
|
||||||
true,
|
|
||||||
true,
|
|
||||||
true,
|
|
||||||
true,
|
|
||||||
false,
|
|
||||||
true,
|
|
||||||
false,
|
|
||||||
false
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"hash": "6d3443d4f96369fa72df0ddd2f06d1fbb36b22a46ed421865d699907e5e71451"
|
|
||||||
}
|
|
||||||
@@ -1,92 +0,0 @@
|
|||||||
{
|
|
||||||
"db_name": "SQLite",
|
|
||||||
"query": "\n SELECT\n id AS \"id!: Uuid\",\n remote_project_id AS \"remote_project_id!: Uuid\",\n title AS title,\n description AS description,\n status AS \"status!: TaskStatus\",\n assignee_user_id AS \"assignee_user_id: Uuid\",\n assignee_first_name AS \"assignee_first_name: String\",\n assignee_last_name AS \"assignee_last_name: String\",\n assignee_username AS \"assignee_username: String\",\n version AS \"version!: i64\",\n last_event_seq AS \"last_event_seq: i64\",\n created_at AS \"created_at!: DateTime<Utc>\",\n updated_at AS \"updated_at!: DateTime<Utc>\"\n FROM shared_tasks\n WHERE id = $1\n ",
|
|
||||||
"describe": {
|
|
||||||
"columns": [
|
|
||||||
{
|
|
||||||
"name": "id!: Uuid",
|
|
||||||
"ordinal": 0,
|
|
||||||
"type_info": "Blob"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "remote_project_id!: Uuid",
|
|
||||||
"ordinal": 1,
|
|
||||||
"type_info": "Blob"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "title",
|
|
||||||
"ordinal": 2,
|
|
||||||
"type_info": "Text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "description",
|
|
||||||
"ordinal": 3,
|
|
||||||
"type_info": "Text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "status!: TaskStatus",
|
|
||||||
"ordinal": 4,
|
|
||||||
"type_info": "Text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "assignee_user_id: Uuid",
|
|
||||||
"ordinal": 5,
|
|
||||||
"type_info": "Blob"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "assignee_first_name: String",
|
|
||||||
"ordinal": 6,
|
|
||||||
"type_info": "Text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "assignee_last_name: String",
|
|
||||||
"ordinal": 7,
|
|
||||||
"type_info": "Text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "assignee_username: String",
|
|
||||||
"ordinal": 8,
|
|
||||||
"type_info": "Text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "version!: i64",
|
|
||||||
"ordinal": 9,
|
|
||||||
"type_info": "Integer"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "last_event_seq: i64",
|
|
||||||
"ordinal": 10,
|
|
||||||
"type_info": "Integer"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "created_at!: DateTime<Utc>",
|
|
||||||
"ordinal": 11,
|
|
||||||
"type_info": "Text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "updated_at!: DateTime<Utc>",
|
|
||||||
"ordinal": 12,
|
|
||||||
"type_info": "Text"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"parameters": {
|
|
||||||
"Right": 1
|
|
||||||
},
|
|
||||||
"nullable": [
|
|
||||||
true,
|
|
||||||
false,
|
|
||||||
false,
|
|
||||||
true,
|
|
||||||
false,
|
|
||||||
true,
|
|
||||||
true,
|
|
||||||
true,
|
|
||||||
true,
|
|
||||||
false,
|
|
||||||
true,
|
|
||||||
false,
|
|
||||||
false
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"hash": "b742031d1362f7fd7c63ab183af04be8fa79f8f6340d3e27c703a9c58b7c7805"
|
|
||||||
}
|
|
||||||
23
crates/db/migrations/20251202000000_migrate_to_electric.sql
Normal file
23
crates/db/migrations/20251202000000_migrate_to_electric.sql
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
DROP TABLE IF EXISTS shared_activity_cursors;
|
||||||
|
|
||||||
|
-- Drop the index on the old column if it exists
|
||||||
|
DROP INDEX IF EXISTS idx_tasks_shared_task_unique;
|
||||||
|
|
||||||
|
-- Add new column to hold the data
|
||||||
|
ALTER TABLE tasks ADD COLUMN shared_task_id_new BLOB;
|
||||||
|
|
||||||
|
-- Migrate data
|
||||||
|
UPDATE tasks SET shared_task_id_new = shared_task_id;
|
||||||
|
|
||||||
|
-- Drop the old column (removing the foreign key constraint)
|
||||||
|
ALTER TABLE tasks DROP COLUMN shared_task_id;
|
||||||
|
|
||||||
|
-- Rename the new column to the old name
|
||||||
|
ALTER TABLE tasks RENAME COLUMN shared_task_id_new TO shared_task_id;
|
||||||
|
|
||||||
|
-- Recreate the index
|
||||||
|
CREATE UNIQUE INDEX IF NOT EXISTS idx_tasks_shared_task_unique
|
||||||
|
ON tasks(shared_task_id)
|
||||||
|
WHERE shared_task_id IS NOT NULL;
|
||||||
|
|
||||||
|
DROP TABLE IF EXISTS shared_tasks;
|
||||||
@@ -5,7 +5,6 @@ pub mod image;
|
|||||||
pub mod merge;
|
pub mod merge;
|
||||||
pub mod project;
|
pub mod project;
|
||||||
pub mod scratch;
|
pub mod scratch;
|
||||||
pub mod shared_task;
|
|
||||||
pub mod tag;
|
pub mod tag;
|
||||||
pub mod task;
|
pub mod task;
|
||||||
pub mod task_attempt;
|
pub mod task_attempt;
|
||||||
|
|||||||
@@ -1,297 +0,0 @@
|
|||||||
use chrono::{DateTime, Utc};
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use sqlx::{Executor, FromRow, QueryBuilder, Sqlite, SqlitePool};
|
|
||||||
use ts_rs::TS;
|
|
||||||
use uuid::Uuid;
|
|
||||||
|
|
||||||
use super::task::TaskStatus;
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, FromRow, Serialize, Deserialize, TS)]
|
|
||||||
pub struct SharedTask {
|
|
||||||
pub id: Uuid,
|
|
||||||
pub remote_project_id: Uuid,
|
|
||||||
pub title: String,
|
|
||||||
pub description: Option<String>,
|
|
||||||
pub status: TaskStatus,
|
|
||||||
pub assignee_user_id: Option<Uuid>,
|
|
||||||
pub assignee_first_name: Option<String>,
|
|
||||||
pub assignee_last_name: Option<String>,
|
|
||||||
pub assignee_username: Option<String>,
|
|
||||||
pub version: i64,
|
|
||||||
pub last_event_seq: Option<i64>,
|
|
||||||
#[ts(type = "Date")]
|
|
||||||
pub created_at: DateTime<Utc>,
|
|
||||||
#[ts(type = "Date")]
|
|
||||||
pub updated_at: DateTime<Utc>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
|
||||||
pub struct SharedTaskInput {
|
|
||||||
pub id: Uuid,
|
|
||||||
pub remote_project_id: Uuid,
|
|
||||||
pub title: String,
|
|
||||||
pub description: Option<String>,
|
|
||||||
pub status: TaskStatus,
|
|
||||||
pub assignee_user_id: Option<Uuid>,
|
|
||||||
pub assignee_first_name: Option<String>,
|
|
||||||
pub assignee_last_name: Option<String>,
|
|
||||||
pub assignee_username: Option<String>,
|
|
||||||
pub version: i64,
|
|
||||||
pub last_event_seq: Option<i64>,
|
|
||||||
pub created_at: DateTime<Utc>,
|
|
||||||
pub updated_at: DateTime<Utc>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl SharedTask {
|
|
||||||
pub async fn list_by_remote_project_id(
|
|
||||||
pool: &SqlitePool,
|
|
||||||
remote_project_id: Uuid,
|
|
||||||
) -> Result<Vec<Self>, sqlx::Error> {
|
|
||||||
sqlx::query_as!(
|
|
||||||
SharedTask,
|
|
||||||
r#"
|
|
||||||
SELECT
|
|
||||||
id AS "id!: Uuid",
|
|
||||||
remote_project_id AS "remote_project_id!: Uuid",
|
|
||||||
title AS title,
|
|
||||||
description AS description,
|
|
||||||
status AS "status!: TaskStatus",
|
|
||||||
assignee_user_id AS "assignee_user_id: Uuid",
|
|
||||||
assignee_first_name AS "assignee_first_name: String",
|
|
||||||
assignee_last_name AS "assignee_last_name: String",
|
|
||||||
assignee_username AS "assignee_username: String",
|
|
||||||
version AS "version!: i64",
|
|
||||||
last_event_seq AS "last_event_seq: i64",
|
|
||||||
created_at AS "created_at!: DateTime<Utc>",
|
|
||||||
updated_at AS "updated_at!: DateTime<Utc>"
|
|
||||||
FROM shared_tasks
|
|
||||||
WHERE remote_project_id = $1
|
|
||||||
ORDER BY updated_at DESC
|
|
||||||
"#,
|
|
||||||
remote_project_id
|
|
||||||
)
|
|
||||||
.fetch_all(pool)
|
|
||||||
.await
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn upsert<'e, E>(executor: E, data: SharedTaskInput) -> Result<Self, sqlx::Error>
|
|
||||||
where
|
|
||||||
E: Executor<'e, Database = Sqlite>,
|
|
||||||
{
|
|
||||||
let status = data.status.clone();
|
|
||||||
sqlx::query_as!(
|
|
||||||
SharedTask,
|
|
||||||
r#"
|
|
||||||
INSERT INTO shared_tasks (
|
|
||||||
id,
|
|
||||||
remote_project_id,
|
|
||||||
title,
|
|
||||||
description,
|
|
||||||
status,
|
|
||||||
assignee_user_id,
|
|
||||||
assignee_first_name,
|
|
||||||
assignee_last_name,
|
|
||||||
assignee_username,
|
|
||||||
version,
|
|
||||||
last_event_seq,
|
|
||||||
created_at,
|
|
||||||
updated_at
|
|
||||||
)
|
|
||||||
VALUES (
|
|
||||||
$1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13
|
|
||||||
)
|
|
||||||
ON CONFLICT(id) DO UPDATE SET
|
|
||||||
remote_project_id = excluded.remote_project_id,
|
|
||||||
title = excluded.title,
|
|
||||||
description = excluded.description,
|
|
||||||
status = excluded.status,
|
|
||||||
assignee_user_id = excluded.assignee_user_id,
|
|
||||||
assignee_first_name = excluded.assignee_first_name,
|
|
||||||
assignee_last_name = excluded.assignee_last_name,
|
|
||||||
assignee_username = excluded.assignee_username,
|
|
||||||
version = excluded.version,
|
|
||||||
last_event_seq = excluded.last_event_seq,
|
|
||||||
created_at = excluded.created_at,
|
|
||||||
updated_at = excluded.updated_at
|
|
||||||
RETURNING
|
|
||||||
id AS "id!: Uuid",
|
|
||||||
remote_project_id AS "remote_project_id!: Uuid",
|
|
||||||
title AS title,
|
|
||||||
description AS description,
|
|
||||||
status AS "status!: TaskStatus",
|
|
||||||
assignee_user_id AS "assignee_user_id: Uuid",
|
|
||||||
assignee_first_name AS "assignee_first_name: String",
|
|
||||||
assignee_last_name AS "assignee_last_name: String",
|
|
||||||
assignee_username AS "assignee_username: String",
|
|
||||||
version AS "version!: i64",
|
|
||||||
last_event_seq AS "last_event_seq: i64",
|
|
||||||
created_at AS "created_at!: DateTime<Utc>",
|
|
||||||
updated_at AS "updated_at!: DateTime<Utc>"
|
|
||||||
"#,
|
|
||||||
data.id,
|
|
||||||
data.remote_project_id,
|
|
||||||
data.title,
|
|
||||||
data.description,
|
|
||||||
status,
|
|
||||||
data.assignee_user_id,
|
|
||||||
data.assignee_first_name,
|
|
||||||
data.assignee_last_name,
|
|
||||||
data.assignee_username,
|
|
||||||
data.version,
|
|
||||||
data.last_event_seq,
|
|
||||||
data.created_at,
|
|
||||||
data.updated_at
|
|
||||||
)
|
|
||||||
.fetch_one(executor)
|
|
||||||
.await
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn find_by_id(pool: &SqlitePool, id: Uuid) -> Result<Option<Self>, sqlx::Error> {
|
|
||||||
sqlx::query_as!(
|
|
||||||
SharedTask,
|
|
||||||
r#"
|
|
||||||
SELECT
|
|
||||||
id AS "id!: Uuid",
|
|
||||||
remote_project_id AS "remote_project_id!: Uuid",
|
|
||||||
title AS title,
|
|
||||||
description AS description,
|
|
||||||
status AS "status!: TaskStatus",
|
|
||||||
assignee_user_id AS "assignee_user_id: Uuid",
|
|
||||||
assignee_first_name AS "assignee_first_name: String",
|
|
||||||
assignee_last_name AS "assignee_last_name: String",
|
|
||||||
assignee_username AS "assignee_username: String",
|
|
||||||
version AS "version!: i64",
|
|
||||||
last_event_seq AS "last_event_seq: i64",
|
|
||||||
created_at AS "created_at!: DateTime<Utc>",
|
|
||||||
updated_at AS "updated_at!: DateTime<Utc>"
|
|
||||||
FROM shared_tasks
|
|
||||||
WHERE id = $1
|
|
||||||
"#,
|
|
||||||
id
|
|
||||||
)
|
|
||||||
.fetch_optional(pool)
|
|
||||||
.await
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn remove<'e, E>(executor: E, id: Uuid) -> Result<(), sqlx::Error>
|
|
||||||
where
|
|
||||||
E: Executor<'e, Database = Sqlite>,
|
|
||||||
{
|
|
||||||
sqlx::query!("DELETE FROM shared_tasks WHERE id = $1", id)
|
|
||||||
.execute(executor)
|
|
||||||
.await?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn remove_many<'e, E>(executor: E, ids: &[Uuid]) -> Result<(), sqlx::Error>
|
|
||||||
where
|
|
||||||
E: Executor<'e, Database = Sqlite>,
|
|
||||||
{
|
|
||||||
if ids.is_empty() {
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut builder = QueryBuilder::<Sqlite>::new("DELETE FROM shared_tasks WHERE id IN (");
|
|
||||||
{
|
|
||||||
let mut separated = builder.separated(", ");
|
|
||||||
for id in ids {
|
|
||||||
separated.push_bind(id);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
builder.push(")");
|
|
||||||
builder.build().execute(executor).await?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn find_by_rowid(pool: &SqlitePool, rowid: i64) -> Result<Option<Self>, sqlx::Error> {
|
|
||||||
sqlx::query_as!(
|
|
||||||
SharedTask,
|
|
||||||
r#"
|
|
||||||
SELECT
|
|
||||||
id AS "id!: Uuid",
|
|
||||||
remote_project_id AS "remote_project_id!: Uuid",
|
|
||||||
title AS title,
|
|
||||||
description AS description,
|
|
||||||
status AS "status!: TaskStatus",
|
|
||||||
assignee_user_id AS "assignee_user_id: Uuid",
|
|
||||||
assignee_first_name AS "assignee_first_name: String",
|
|
||||||
assignee_last_name AS "assignee_last_name: String",
|
|
||||||
assignee_username AS "assignee_username: String",
|
|
||||||
version AS "version!: i64",
|
|
||||||
last_event_seq AS "last_event_seq: i64",
|
|
||||||
created_at AS "created_at!: DateTime<Utc>",
|
|
||||||
updated_at AS "updated_at!: DateTime<Utc>"
|
|
||||||
FROM shared_tasks
|
|
||||||
WHERE rowid = $1
|
|
||||||
"#,
|
|
||||||
rowid
|
|
||||||
)
|
|
||||||
.fetch_optional(pool)
|
|
||||||
.await
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, FromRow)]
|
|
||||||
pub struct SharedActivityCursor {
|
|
||||||
pub remote_project_id: Uuid,
|
|
||||||
pub last_seq: i64,
|
|
||||||
pub updated_at: DateTime<Utc>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl SharedActivityCursor {
|
|
||||||
pub async fn get(
|
|
||||||
pool: &SqlitePool,
|
|
||||||
remote_project_id: Uuid,
|
|
||||||
) -> Result<Option<Self>, sqlx::Error> {
|
|
||||||
sqlx::query_as!(
|
|
||||||
SharedActivityCursor,
|
|
||||||
r#"
|
|
||||||
SELECT
|
|
||||||
remote_project_id AS "remote_project_id!: Uuid",
|
|
||||||
last_seq AS "last_seq!: i64",
|
|
||||||
updated_at AS "updated_at!: DateTime<Utc>"
|
|
||||||
FROM shared_activity_cursors
|
|
||||||
WHERE remote_project_id = $1
|
|
||||||
"#,
|
|
||||||
remote_project_id
|
|
||||||
)
|
|
||||||
.fetch_optional(pool)
|
|
||||||
.await
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn upsert<'e, E>(
|
|
||||||
executor: E,
|
|
||||||
remote_project_id: Uuid,
|
|
||||||
last_seq: i64,
|
|
||||||
) -> Result<Self, sqlx::Error>
|
|
||||||
where
|
|
||||||
E: Executor<'e, Database = Sqlite>,
|
|
||||||
{
|
|
||||||
sqlx::query_as!(
|
|
||||||
SharedActivityCursor,
|
|
||||||
r#"
|
|
||||||
INSERT INTO shared_activity_cursors (
|
|
||||||
remote_project_id,
|
|
||||||
last_seq,
|
|
||||||
updated_at
|
|
||||||
)
|
|
||||||
VALUES (
|
|
||||||
$1,
|
|
||||||
$2,
|
|
||||||
datetime('now', 'subsec')
|
|
||||||
)
|
|
||||||
ON CONFLICT(remote_project_id) DO UPDATE SET
|
|
||||||
last_seq = excluded.last_seq,
|
|
||||||
updated_at = excluded.updated_at
|
|
||||||
RETURNING
|
|
||||||
remote_project_id AS "remote_project_id!: Uuid",
|
|
||||||
last_seq AS "last_seq!: i64",
|
|
||||||
updated_at AS "updated_at!: DateTime<Utc>"
|
|
||||||
"#,
|
|
||||||
remote_project_id,
|
|
||||||
last_seq
|
|
||||||
)
|
|
||||||
.fetch_one(executor)
|
|
||||||
.await
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -12,7 +12,7 @@ use super::{project::Project, task_attempt::TaskAttempt};
|
|||||||
)]
|
)]
|
||||||
#[sqlx(type_name = "task_status", rename_all = "lowercase")]
|
#[sqlx(type_name = "task_status", rename_all = "lowercase")]
|
||||||
#[serde(rename_all = "lowercase")]
|
#[serde(rename_all = "lowercase")]
|
||||||
#[strum(serialize_all = "kebab_case")]
|
#[strum(serialize_all = "lowercase")]
|
||||||
pub enum TaskStatus {
|
pub enum TaskStatus {
|
||||||
#[default]
|
#[default]
|
||||||
Todo,
|
Todo,
|
||||||
@@ -113,15 +113,6 @@ impl CreateTask {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
|
||||||
pub struct SyncTask {
|
|
||||||
pub shared_task_id: Uuid,
|
|
||||||
pub project_id: Uuid,
|
|
||||||
pub title: String,
|
|
||||||
pub description: Option<String>,
|
|
||||||
pub status: TaskStatus,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize, TS)]
|
#[derive(Debug, Serialize, Deserialize, TS)]
|
||||||
pub struct UpdateTask {
|
pub struct UpdateTask {
|
||||||
pub title: Option<String>,
|
pub title: Option<String>,
|
||||||
@@ -273,7 +264,7 @@ ORDER BY t.created_at DESC"#,
|
|||||||
sqlx::query_as!(
|
sqlx::query_as!(
|
||||||
Task,
|
Task,
|
||||||
r#"SELECT id as "id!: Uuid", project_id as "project_id!: Uuid", title, description, status as "status!: TaskStatus", parent_task_attempt as "parent_task_attempt: Uuid", shared_task_id as "shared_task_id: Uuid", created_at as "created_at!: DateTime<Utc>", updated_at as "updated_at!: DateTime<Utc>"
|
r#"SELECT id as "id!: Uuid", project_id as "project_id!: Uuid", title, description, status as "status!: TaskStatus", parent_task_attempt as "parent_task_attempt: Uuid", shared_task_id as "shared_task_id: Uuid", created_at as "created_at!: DateTime<Utc>", updated_at as "updated_at!: DateTime<Utc>"
|
||||||
FROM tasks
|
FROM tasks
|
||||||
WHERE shared_task_id = $1
|
WHERE shared_task_id = $1
|
||||||
LIMIT 1"#,
|
LIMIT 1"#,
|
||||||
shared_task_id
|
shared_task_id
|
||||||
@@ -282,6 +273,17 @@ ORDER BY t.created_at DESC"#,
|
|||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn find_all_shared(pool: &SqlitePool) -> Result<Vec<Self>, sqlx::Error> {
|
||||||
|
sqlx::query_as!(
|
||||||
|
Task,
|
||||||
|
r#"SELECT id as "id!: Uuid", project_id as "project_id!: Uuid", title, description, status as "status!: TaskStatus", parent_task_attempt as "parent_task_attempt: Uuid", shared_task_id as "shared_task_id: Uuid", created_at as "created_at!: DateTime<Utc>", updated_at as "updated_at!: DateTime<Utc>"
|
||||||
|
FROM tasks
|
||||||
|
WHERE shared_task_id IS NOT NULL"#
|
||||||
|
)
|
||||||
|
.fetch_all(pool)
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
||||||
pub async fn create(
|
pub async fn create(
|
||||||
pool: &SqlitePool,
|
pool: &SqlitePool,
|
||||||
data: &CreateTask,
|
data: &CreateTask,
|
||||||
@@ -331,58 +333,6 @@ ORDER BY t.created_at DESC"#,
|
|||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn sync_from_shared_task<'e, E>(
|
|
||||||
executor: E,
|
|
||||||
data: SyncTask,
|
|
||||||
create_if_not_exists: bool,
|
|
||||||
) -> Result<bool, sqlx::Error>
|
|
||||||
where
|
|
||||||
E: Executor<'e, Database = Sqlite>,
|
|
||||||
{
|
|
||||||
let new_task_id = Uuid::new_v4();
|
|
||||||
|
|
||||||
let result = sqlx::query!(
|
|
||||||
r#"
|
|
||||||
INSERT INTO tasks (
|
|
||||||
id,
|
|
||||||
project_id,
|
|
||||||
title,
|
|
||||||
description,
|
|
||||||
status,
|
|
||||||
shared_task_id
|
|
||||||
)
|
|
||||||
SELECT
|
|
||||||
$1,
|
|
||||||
$2,
|
|
||||||
$3,
|
|
||||||
$4,
|
|
||||||
$5,
|
|
||||||
$6
|
|
||||||
WHERE $7
|
|
||||||
OR EXISTS (
|
|
||||||
SELECT 1 FROM tasks WHERE shared_task_id = $6
|
|
||||||
)
|
|
||||||
ON CONFLICT(shared_task_id) WHERE shared_task_id IS NOT NULL DO UPDATE SET
|
|
||||||
project_id = excluded.project_id,
|
|
||||||
title = excluded.title,
|
|
||||||
description = excluded.description,
|
|
||||||
status = excluded.status,
|
|
||||||
updated_at = datetime('now', 'subsec')
|
|
||||||
"#,
|
|
||||||
new_task_id,
|
|
||||||
data.project_id,
|
|
||||||
data.title,
|
|
||||||
data.description,
|
|
||||||
data.status,
|
|
||||||
data.shared_task_id,
|
|
||||||
create_if_not_exists
|
|
||||||
)
|
|
||||||
.execute(executor)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
Ok(result.rows_affected() > 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn update_status(
|
pub async fn update_status(
|
||||||
pool: &SqlitePool,
|
pool: &SqlitePool,
|
||||||
id: Uuid,
|
id: Uuid,
|
||||||
@@ -428,8 +378,8 @@ ORDER BY t.created_at DESC"#,
|
|||||||
let result = sqlx::query!(
|
let result = sqlx::query!(
|
||||||
r#"UPDATE tasks
|
r#"UPDATE tasks
|
||||||
SET shared_task_id = NULL
|
SET shared_task_id = NULL
|
||||||
WHERE shared_task_id IN (
|
WHERE project_id IN (
|
||||||
SELECT id FROM shared_tasks WHERE remote_project_id = $1
|
SELECT id FROM projects WHERE remote_project_id = $1
|
||||||
)"#,
|
)"#,
|
||||||
remote_project_id
|
remote_project_id
|
||||||
)
|
)
|
||||||
@@ -466,6 +416,31 @@ ORDER BY t.created_at DESC"#,
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn batch_unlink_shared_tasks<'e, E>(
|
||||||
|
executor: E,
|
||||||
|
shared_task_ids: &[Uuid],
|
||||||
|
) -> Result<u64, sqlx::Error>
|
||||||
|
where
|
||||||
|
E: Executor<'e, Database = Sqlite>,
|
||||||
|
{
|
||||||
|
if shared_task_ids.is_empty() {
|
||||||
|
return Ok(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut query_builder = sqlx::QueryBuilder::new(
|
||||||
|
"UPDATE tasks SET shared_task_id = NULL, updated_at = CURRENT_TIMESTAMP WHERE shared_task_id IN (",
|
||||||
|
);
|
||||||
|
|
||||||
|
let mut separated = query_builder.separated(", ");
|
||||||
|
for id in shared_task_ids {
|
||||||
|
separated.push_bind(id);
|
||||||
|
}
|
||||||
|
separated.push_unseparated(")");
|
||||||
|
|
||||||
|
let result = query_builder.build().execute(executor).await?;
|
||||||
|
Ok(result.rows_affected())
|
||||||
|
}
|
||||||
|
|
||||||
pub async fn exists(
|
pub async fn exists(
|
||||||
pool: &SqlitePool,
|
pool: &SqlitePool,
|
||||||
id: Uuid,
|
id: Uuid,
|
||||||
|
|||||||
@@ -28,12 +28,12 @@ use services::services::{
|
|||||||
image::{ImageError, ImageService},
|
image::{ImageError, ImageService},
|
||||||
pr_monitor::PrMonitorService,
|
pr_monitor::PrMonitorService,
|
||||||
queued_message::QueuedMessageService,
|
queued_message::QueuedMessageService,
|
||||||
share::{RemoteSync, RemoteSyncHandle, ShareConfig, SharePublisher},
|
share::SharePublisher,
|
||||||
worktree_manager::WorktreeError,
|
worktree_manager::WorktreeError,
|
||||||
};
|
};
|
||||||
use sqlx::{Error as SqlxError, types::Uuid};
|
use sqlx::{Error as SqlxError, types::Uuid};
|
||||||
use thiserror::Error;
|
use thiserror::Error;
|
||||||
use tokio::sync::{Mutex, RwLock};
|
use tokio::sync::RwLock;
|
||||||
use utils::sentry as sentry_utils;
|
use utils::sentry as sentry_utils;
|
||||||
|
|
||||||
#[derive(Debug, Clone, Copy, Error)]
|
#[derive(Debug, Clone, Copy, Error)]
|
||||||
@@ -106,26 +106,6 @@ pub trait Deployment: Clone + Send + Sync + 'static {
|
|||||||
|
|
||||||
fn share_publisher(&self) -> Result<SharePublisher, RemoteClientNotConfigured>;
|
fn share_publisher(&self) -> Result<SharePublisher, RemoteClientNotConfigured>;
|
||||||
|
|
||||||
fn share_sync_handle(&self) -> &Arc<Mutex<Option<RemoteSyncHandle>>>;
|
|
||||||
|
|
||||||
fn spawn_remote_sync(&self, config: ShareConfig) {
|
|
||||||
let deployment = self.clone();
|
|
||||||
let handle_slot = self.share_sync_handle().clone();
|
|
||||||
tokio::spawn(async move {
|
|
||||||
tracing::info!("Starting shared task sync");
|
|
||||||
|
|
||||||
let remote_sync_handle = RemoteSync::spawn(
|
|
||||||
deployment.db().clone(),
|
|
||||||
config,
|
|
||||||
deployment.auth_context().clone(),
|
|
||||||
);
|
|
||||||
{
|
|
||||||
let mut guard = handle_slot.lock().await;
|
|
||||||
*guard = Some(remote_sync_handle);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn update_sentry_scope(&self) -> Result<(), DeploymentError> {
|
async fn update_sentry_scope(&self) -> Result<(), DeploymentError> {
|
||||||
let user_id = self.user_id();
|
let user_id = self.user_id();
|
||||||
let config = self.config().read().await;
|
let config = self.config().read().await;
|
||||||
|
|||||||
@@ -26,6 +26,5 @@ notify-debouncer-full = "0.5.0"
|
|||||||
reqwest = { version = "0.12", features = ["json"] }
|
reqwest = { version = "0.12", features = ["json"] }
|
||||||
sentry = { version = "0.41.0", features = ["anyhow", "backtrace", "panic", "debug-images"] }
|
sentry = { version = "0.41.0", features = ["anyhow", "backtrace", "panic", "debug-images"] }
|
||||||
futures = "0.3"
|
futures = "0.3"
|
||||||
async-stream = "0.3"
|
|
||||||
json-patch = "2.0"
|
json-patch = "2.0"
|
||||||
tokio = { workspace = true }
|
tokio = { workspace = true }
|
||||||
|
|||||||
@@ -18,9 +18,9 @@ use services::services::{
|
|||||||
oauth_credentials::OAuthCredentials,
|
oauth_credentials::OAuthCredentials,
|
||||||
queued_message::QueuedMessageService,
|
queued_message::QueuedMessageService,
|
||||||
remote_client::{RemoteClient, RemoteClientError},
|
remote_client::{RemoteClient, RemoteClientError},
|
||||||
share::{RemoteSyncHandle, ShareConfig, SharePublisher},
|
share::{ShareConfig, SharePublisher},
|
||||||
};
|
};
|
||||||
use tokio::sync::{Mutex, RwLock};
|
use tokio::sync::RwLock;
|
||||||
use utils::{
|
use utils::{
|
||||||
api::oauth::LoginStatus,
|
api::oauth::LoginStatus,
|
||||||
assets::{config_path, credentials_path},
|
assets::{config_path, credentials_path},
|
||||||
@@ -47,7 +47,6 @@ pub struct LocalDeployment {
|
|||||||
approvals: Approvals,
|
approvals: Approvals,
|
||||||
queued_message_service: QueuedMessageService,
|
queued_message_service: QueuedMessageService,
|
||||||
share_publisher: Result<SharePublisher, RemoteClientNotConfigured>,
|
share_publisher: Result<SharePublisher, RemoteClientNotConfigured>,
|
||||||
share_sync_handle: Arc<Mutex<Option<RemoteSyncHandle>>>,
|
|
||||||
share_config: Option<ShareConfig>,
|
share_config: Option<ShareConfig>,
|
||||||
remote_client: Result<RemoteClient, RemoteClientNotConfigured>,
|
remote_client: Result<RemoteClient, RemoteClientNotConfigured>,
|
||||||
auth_context: AuthContext,
|
auth_context: AuthContext,
|
||||||
@@ -159,14 +158,6 @@ impl Deployment for LocalDeployment {
|
|||||||
.map_err(|e| *e);
|
.map_err(|e| *e);
|
||||||
|
|
||||||
let oauth_handoffs = Arc::new(RwLock::new(HashMap::new()));
|
let oauth_handoffs = Arc::new(RwLock::new(HashMap::new()));
|
||||||
let share_sync_handle = Arc::new(Mutex::new(None));
|
|
||||||
|
|
||||||
let mut share_sync_config: Option<ShareConfig> = None;
|
|
||||||
if let (Some(sc_ref), Ok(_)) = (share_config.as_ref(), &share_publisher)
|
|
||||||
&& oauth_credentials.get().await.is_some()
|
|
||||||
{
|
|
||||||
share_sync_config = Some(sc_ref.clone());
|
|
||||||
}
|
|
||||||
|
|
||||||
// We need to make analytics accessible to the ContainerService
|
// We need to make analytics accessible to the ContainerService
|
||||||
// TODO: Handle this more gracefully
|
// TODO: Handle this more gracefully
|
||||||
@@ -205,17 +196,12 @@ impl Deployment for LocalDeployment {
|
|||||||
approvals,
|
approvals,
|
||||||
queued_message_service,
|
queued_message_service,
|
||||||
share_publisher,
|
share_publisher,
|
||||||
share_sync_handle: share_sync_handle.clone(),
|
|
||||||
share_config: share_config.clone(),
|
share_config: share_config.clone(),
|
||||||
remote_client,
|
remote_client,
|
||||||
auth_context,
|
auth_context,
|
||||||
oauth_handoffs,
|
oauth_handoffs,
|
||||||
};
|
};
|
||||||
|
|
||||||
if let Some(sc) = share_sync_config {
|
|
||||||
deployment.spawn_remote_sync(sc);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(deployment)
|
Ok(deployment)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -271,10 +257,6 @@ impl Deployment for LocalDeployment {
|
|||||||
self.share_publisher.clone()
|
self.share_publisher.clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn share_sync_handle(&self) -> &Arc<Mutex<Option<RemoteSyncHandle>>> {
|
|
||||||
&self.share_sync_handle
|
|
||||||
}
|
|
||||||
|
|
||||||
fn auth_context(&self) -> &AuthContext {
|
fn auth_context(&self) -> &AuthContext {
|
||||||
&self.auth_context
|
&self.auth_context
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"db_name": "PostgreSQL",
|
"db_name": "PostgreSQL",
|
||||||
"query": "\n UPDATE shared_tasks AS t\n SET deleted_at = NOW(),\n deleted_by_user_id = $3,\n version = t.version + 1\n WHERE t.id = $1\n AND t.version = COALESCE($2, t.version)\n AND t.assignee_user_id = $3\n AND t.deleted_at IS NULL\n RETURNING\n t.id AS \"id!\",\n t.organization_id AS \"organization_id!: Uuid\",\n t.project_id AS \"project_id!\",\n t.creator_user_id AS \"creator_user_id?: Uuid\",\n t.assignee_user_id AS \"assignee_user_id?: Uuid\",\n t.deleted_by_user_id AS \"deleted_by_user_id?: Uuid\",\n t.title AS \"title!\",\n t.description AS \"description?\",\n t.status AS \"status!: TaskStatus\",\n t.version AS \"version!\",\n t.deleted_at AS \"deleted_at?\",\n t.shared_at AS \"shared_at?\",\n t.created_at AS \"created_at!\",\n t.updated_at AS \"updated_at!\"\n ",
|
"query": "\n UPDATE shared_tasks AS t\n SET deleted_at = NOW(),\n deleted_by_user_id = $2\n WHERE t.id = $1\n AND t.assignee_user_id = $2\n AND t.deleted_at IS NULL\n RETURNING\n t.id AS \"id!\",\n t.organization_id AS \"organization_id!: Uuid\",\n t.project_id AS \"project_id!\",\n t.creator_user_id AS \"creator_user_id?: Uuid\",\n t.assignee_user_id AS \"assignee_user_id?: Uuid\",\n t.deleted_by_user_id AS \"deleted_by_user_id?: Uuid\",\n t.title AS \"title!\",\n t.description AS \"description?\",\n t.status AS \"status!: TaskStatus\",\n t.deleted_at AS \"deleted_at?\",\n t.shared_at AS \"shared_at?\",\n t.created_at AS \"created_at!\",\n t.updated_at AS \"updated_at!\"\n ",
|
||||||
"describe": {
|
"describe": {
|
||||||
"columns": [
|
"columns": [
|
||||||
{
|
{
|
||||||
@@ -52,8 +52,8 @@
|
|||||||
"kind": {
|
"kind": {
|
||||||
"Enum": [
|
"Enum": [
|
||||||
"todo",
|
"todo",
|
||||||
"in-progress",
|
"inprogress",
|
||||||
"in-review",
|
"inreview",
|
||||||
"done",
|
"done",
|
||||||
"cancelled"
|
"cancelled"
|
||||||
]
|
]
|
||||||
@@ -63,26 +63,21 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ordinal": 9,
|
"ordinal": 9,
|
||||||
"name": "version!",
|
|
||||||
"type_info": "Int8"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ordinal": 10,
|
|
||||||
"name": "deleted_at?",
|
"name": "deleted_at?",
|
||||||
"type_info": "Timestamptz"
|
"type_info": "Timestamptz"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ordinal": 11,
|
"ordinal": 10,
|
||||||
"name": "shared_at?",
|
"name": "shared_at?",
|
||||||
"type_info": "Timestamptz"
|
"type_info": "Timestamptz"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ordinal": 12,
|
"ordinal": 11,
|
||||||
"name": "created_at!",
|
"name": "created_at!",
|
||||||
"type_info": "Timestamptz"
|
"type_info": "Timestamptz"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ordinal": 13,
|
"ordinal": 12,
|
||||||
"name": "updated_at!",
|
"name": "updated_at!",
|
||||||
"type_info": "Timestamptz"
|
"type_info": "Timestamptz"
|
||||||
}
|
}
|
||||||
@@ -90,7 +85,6 @@
|
|||||||
"parameters": {
|
"parameters": {
|
||||||
"Left": [
|
"Left": [
|
||||||
"Uuid",
|
"Uuid",
|
||||||
"Int8",
|
|
||||||
"Uuid"
|
"Uuid"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -104,12 +98,11 @@
|
|||||||
false,
|
false,
|
||||||
true,
|
true,
|
||||||
false,
|
false,
|
||||||
false,
|
|
||||||
true,
|
true,
|
||||||
true,
|
true,
|
||||||
false,
|
false,
|
||||||
false
|
false
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"hash": "e185c68e4809dddb5dd1e59f1cb123c4e02499d42d97df65fc7a625568d4d234"
|
"hash": "1a8fb6c222b7eb3077fba6a7722faa1af89e268a644e7e7237ae21b03221dc9b"
|
||||||
}
|
}
|
||||||
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"db_name": "PostgreSQL",
|
"db_name": "PostgreSQL",
|
||||||
"query": "\n UPDATE shared_tasks AS t\n SET title = COALESCE($2, t.title),\n description = COALESCE($3, t.description),\n status = COALESCE($4, t.status),\n version = t.version + 1,\n updated_at = NOW()\n WHERE t.id = $1\n AND t.version = COALESCE($5, t.version)\n AND t.assignee_user_id = $6\n AND t.deleted_at IS NULL\n RETURNING\n t.id AS \"id!\",\n t.organization_id AS \"organization_id!: Uuid\",\n t.project_id AS \"project_id!\",\n t.creator_user_id AS \"creator_user_id?: Uuid\",\n t.assignee_user_id AS \"assignee_user_id?: Uuid\",\n t.deleted_by_user_id AS \"deleted_by_user_id?: Uuid\",\n t.title AS \"title!\",\n t.description AS \"description?\",\n t.status AS \"status!: TaskStatus\",\n t.version AS \"version!\",\n t.deleted_at AS \"deleted_at?\",\n t.shared_at AS \"shared_at?\",\n t.created_at AS \"created_at!\",\n t.updated_at AS \"updated_at!\"\n ",
|
"query": "\n UPDATE shared_tasks AS t\n SET title = COALESCE($2, t.title),\n description = COALESCE($3, t.description),\n status = COALESCE($4, t.status),\n updated_at = NOW()\n WHERE t.id = $1\n AND t.assignee_user_id = $5\n AND t.deleted_at IS NULL\n RETURNING\n t.id AS \"id!\",\n t.organization_id AS \"organization_id!: Uuid\",\n t.project_id AS \"project_id!\",\n t.creator_user_id AS \"creator_user_id?: Uuid\",\n t.assignee_user_id AS \"assignee_user_id?: Uuid\",\n t.deleted_by_user_id AS \"deleted_by_user_id?: Uuid\",\n t.title AS \"title!\",\n t.description AS \"description?\",\n t.status AS \"status!: TaskStatus\",\n t.deleted_at AS \"deleted_at?\",\n t.shared_at AS \"shared_at?\",\n t.created_at AS \"created_at!\",\n t.updated_at AS \"updated_at!\"\n ",
|
||||||
"describe": {
|
"describe": {
|
||||||
"columns": [
|
"columns": [
|
||||||
{
|
{
|
||||||
@@ -52,8 +52,8 @@
|
|||||||
"kind": {
|
"kind": {
|
||||||
"Enum": [
|
"Enum": [
|
||||||
"todo",
|
"todo",
|
||||||
"in-progress",
|
"inprogress",
|
||||||
"in-review",
|
"inreview",
|
||||||
"done",
|
"done",
|
||||||
"cancelled"
|
"cancelled"
|
||||||
]
|
]
|
||||||
@@ -63,26 +63,21 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ordinal": 9,
|
"ordinal": 9,
|
||||||
"name": "version!",
|
|
||||||
"type_info": "Int8"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ordinal": 10,
|
|
||||||
"name": "deleted_at?",
|
"name": "deleted_at?",
|
||||||
"type_info": "Timestamptz"
|
"type_info": "Timestamptz"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ordinal": 11,
|
"ordinal": 10,
|
||||||
"name": "shared_at?",
|
"name": "shared_at?",
|
||||||
"type_info": "Timestamptz"
|
"type_info": "Timestamptz"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ordinal": 12,
|
"ordinal": 11,
|
||||||
"name": "created_at!",
|
"name": "created_at!",
|
||||||
"type_info": "Timestamptz"
|
"type_info": "Timestamptz"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ordinal": 13,
|
"ordinal": 12,
|
||||||
"name": "updated_at!",
|
"name": "updated_at!",
|
||||||
"type_info": "Timestamptz"
|
"type_info": "Timestamptz"
|
||||||
}
|
}
|
||||||
@@ -98,15 +93,14 @@
|
|||||||
"kind": {
|
"kind": {
|
||||||
"Enum": [
|
"Enum": [
|
||||||
"todo",
|
"todo",
|
||||||
"in-progress",
|
"inprogress",
|
||||||
"in-review",
|
"inreview",
|
||||||
"done",
|
"done",
|
||||||
"cancelled"
|
"cancelled"
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"Int8",
|
|
||||||
"Uuid"
|
"Uuid"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -120,12 +114,11 @@
|
|||||||
false,
|
false,
|
||||||
true,
|
true,
|
||||||
false,
|
false,
|
||||||
false,
|
|
||||||
true,
|
true,
|
||||||
true,
|
true,
|
||||||
false,
|
false,
|
||||||
false
|
false
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"hash": "1d691b943af2d90feaace911403fbb158839b4359f91fd5c05166ecee82b13a8"
|
"hash": "338507619ddbadce5d40bc58a7d9eb95bbeee3ade4d5abb9140aefe5673ea071"
|
||||||
}
|
}
|
||||||
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"db_name": "PostgreSQL",
|
"db_name": "PostgreSQL",
|
||||||
"query": "\n SELECT\n id AS \"id!\",\n organization_id AS \"organization_id!: Uuid\",\n project_id AS \"project_id!\",\n creator_user_id AS \"creator_user_id?: Uuid\",\n assignee_user_id AS \"assignee_user_id?: Uuid\",\n deleted_by_user_id AS \"deleted_by_user_id?: Uuid\",\n title AS \"title!\",\n description AS \"description?\",\n status AS \"status!: TaskStatus\",\n version AS \"version!\",\n deleted_at AS \"deleted_at?\",\n shared_at AS \"shared_at?\",\n created_at AS \"created_at!\",\n updated_at AS \"updated_at!\"\n FROM shared_tasks\n WHERE id = $1\n AND deleted_at IS NULL\n ",
|
"query": "\n SELECT\n id AS \"id!\",\n organization_id AS \"organization_id!: Uuid\",\n project_id AS \"project_id!\",\n creator_user_id AS \"creator_user_id?: Uuid\",\n assignee_user_id AS \"assignee_user_id?: Uuid\",\n deleted_by_user_id AS \"deleted_by_user_id?: Uuid\",\n title AS \"title!\",\n description AS \"description?\",\n status AS \"status!: TaskStatus\",\n deleted_at AS \"deleted_at?\",\n shared_at AS \"shared_at?\",\n created_at AS \"created_at!\",\n updated_at AS \"updated_at!\"\n FROM shared_tasks\n WHERE id = $1\n AND deleted_at IS NULL\n ",
|
||||||
"describe": {
|
"describe": {
|
||||||
"columns": [
|
"columns": [
|
||||||
{
|
{
|
||||||
@@ -52,8 +52,8 @@
|
|||||||
"kind": {
|
"kind": {
|
||||||
"Enum": [
|
"Enum": [
|
||||||
"todo",
|
"todo",
|
||||||
"in-progress",
|
"inprogress",
|
||||||
"in-review",
|
"inreview",
|
||||||
"done",
|
"done",
|
||||||
"cancelled"
|
"cancelled"
|
||||||
]
|
]
|
||||||
@@ -63,26 +63,21 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ordinal": 9,
|
"ordinal": 9,
|
||||||
"name": "version!",
|
|
||||||
"type_info": "Int8"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ordinal": 10,
|
|
||||||
"name": "deleted_at?",
|
"name": "deleted_at?",
|
||||||
"type_info": "Timestamptz"
|
"type_info": "Timestamptz"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ordinal": 11,
|
"ordinal": 10,
|
||||||
"name": "shared_at?",
|
"name": "shared_at?",
|
||||||
"type_info": "Timestamptz"
|
"type_info": "Timestamptz"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ordinal": 12,
|
"ordinal": 11,
|
||||||
"name": "created_at!",
|
"name": "created_at!",
|
||||||
"type_info": "Timestamptz"
|
"type_info": "Timestamptz"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ordinal": 13,
|
"ordinal": 12,
|
||||||
"name": "updated_at!",
|
"name": "updated_at!",
|
||||||
"type_info": "Timestamptz"
|
"type_info": "Timestamptz"
|
||||||
}
|
}
|
||||||
@@ -102,12 +97,11 @@
|
|||||||
false,
|
false,
|
||||||
true,
|
true,
|
||||||
false,
|
false,
|
||||||
false,
|
|
||||||
true,
|
true,
|
||||||
true,
|
true,
|
||||||
false,
|
false,
|
||||||
false
|
false
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"hash": "2a9a7c649ededf8772f750bb42c5144f4ab5e74dc905fb8a63340f09fd55a3d7"
|
"hash": "3ba7efc786500c8a72dec5fb0f76b66da861b8ca8905080ef70a16943e97f004"
|
||||||
}
|
}
|
||||||
@@ -1,22 +0,0 @@
|
|||||||
{
|
|
||||||
"db_name": "PostgreSQL",
|
|
||||||
"query": "\n SELECT st.id AS \"id!: Uuid\"\n FROM shared_tasks st\n WHERE st.project_id = $1\n AND st.deleted_at IS NOT NULL\n ",
|
|
||||||
"describe": {
|
|
||||||
"columns": [
|
|
||||||
{
|
|
||||||
"ordinal": 0,
|
|
||||||
"name": "id!: Uuid",
|
|
||||||
"type_info": "Uuid"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"parameters": {
|
|
||||||
"Left": [
|
|
||||||
"Uuid"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"nullable": [
|
|
||||||
false
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"hash": "4153afb5c59d76df7c880d2f427cdba11d2eaf2fe26193043947a45bcda46f45"
|
|
||||||
}
|
|
||||||
40
crates/remote/.sqlx/query-4aaf14d8e25078fff3ceca2b2b1e2888403f398fba3048fbc582ec24c4c5dbf7.json
generated
Normal file
40
crates/remote/.sqlx/query-4aaf14d8e25078fff3ceca2b2b1e2888403f398fba3048fbc582ec24c4c5dbf7.json
generated
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
{
|
||||||
|
"db_name": "PostgreSQL",
|
||||||
|
"query": "\n SELECT DISTINCT\n u.id as \"user_id\",\n u.first_name as \"first_name\",\n u.last_name as \"last_name\",\n u.username as \"username\"\n FROM shared_tasks st\n INNER JOIN users u ON u.id = st.assignee_user_id\n WHERE st.project_id = $1\n AND st.assignee_user_id IS NOT NULL\n ",
|
||||||
|
"describe": {
|
||||||
|
"columns": [
|
||||||
|
{
|
||||||
|
"ordinal": 0,
|
||||||
|
"name": "user_id",
|
||||||
|
"type_info": "Uuid"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ordinal": 1,
|
||||||
|
"name": "first_name",
|
||||||
|
"type_info": "Text"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ordinal": 2,
|
||||||
|
"name": "last_name",
|
||||||
|
"type_info": "Text"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ordinal": 3,
|
||||||
|
"name": "username",
|
||||||
|
"type_info": "Text"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"parameters": {
|
||||||
|
"Left": [
|
||||||
|
"Uuid"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"nullable": [
|
||||||
|
false,
|
||||||
|
true,
|
||||||
|
true,
|
||||||
|
true
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"hash": "4aaf14d8e25078fff3ceca2b2b1e2888403f398fba3048fbc582ec24c4c5dbf7"
|
||||||
|
}
|
||||||
@@ -1,17 +0,0 @@
|
|||||||
{
|
|
||||||
"db_name": "PostgreSQL",
|
|
||||||
"query": "\n WITH next AS (\n INSERT INTO project_activity_counters AS counters (project_id, last_seq)\n VALUES ($1, 1)\n ON CONFLICT (project_id)\n DO UPDATE SET last_seq = counters.last_seq + 1\n RETURNING last_seq\n )\n INSERT INTO activity (\n project_id,\n seq,\n assignee_user_id,\n event_type,\n payload\n )\n SELECT $1, next.last_seq, $2, $3, $4\n FROM next\n ",
|
|
||||||
"describe": {
|
|
||||||
"columns": [],
|
|
||||||
"parameters": {
|
|
||||||
"Left": [
|
|
||||||
"Uuid",
|
|
||||||
"Uuid",
|
|
||||||
"Text",
|
|
||||||
"Jsonb"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"nullable": []
|
|
||||||
},
|
|
||||||
"hash": "814e3c0507a86c04008e08104176c3c552833f518b2e880e649ad7fc10c0721c"
|
|
||||||
}
|
|
||||||
23
crates/remote/.sqlx/query-872d77e34d06bc036a07e9b2330166a2e0bedf34db5bceb3e6e576f1e07f6414.json
generated
Normal file
23
crates/remote/.sqlx/query-872d77e34d06bc036a07e9b2330166a2e0bedf34db5bceb3e6e576f1e07f6414.json
generated
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
{
|
||||||
|
"db_name": "PostgreSQL",
|
||||||
|
"query": "\n SELECT t.id\n FROM shared_tasks t\n INNER JOIN organization_member_metadata om ON t.organization_id = om.organization_id\n WHERE t.id = ANY($1)\n AND t.deleted_at IS NULL\n AND om.user_id = $2\n ",
|
||||||
|
"describe": {
|
||||||
|
"columns": [
|
||||||
|
{
|
||||||
|
"ordinal": 0,
|
||||||
|
"name": "id",
|
||||||
|
"type_info": "Uuid"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"parameters": {
|
||||||
|
"Left": [
|
||||||
|
"UuidArray",
|
||||||
|
"Uuid"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"nullable": [
|
||||||
|
false
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"hash": "872d77e34d06bc036a07e9b2330166a2e0bedf34db5bceb3e6e576f1e07f6414"
|
||||||
|
}
|
||||||
22
crates/remote/.sqlx/query-a0fef73e10f2f7bba67f740aef62e43fb8e4678833be58e361d7b90912fa9883.json
generated
Normal file
22
crates/remote/.sqlx/query-a0fef73e10f2f7bba67f740aef62e43fb8e4678833be58e361d7b90912fa9883.json
generated
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
{
|
||||||
|
"db_name": "PostgreSQL",
|
||||||
|
"query": "SELECT 1 AS v FROM shared_tasks WHERE \"organization_id\" = ANY($1)",
|
||||||
|
"describe": {
|
||||||
|
"columns": [
|
||||||
|
{
|
||||||
|
"ordinal": 0,
|
||||||
|
"name": "v",
|
||||||
|
"type_info": "Int4"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"parameters": {
|
||||||
|
"Left": [
|
||||||
|
"UuidArray"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"nullable": [
|
||||||
|
null
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"hash": "a0fef73e10f2f7bba67f740aef62e43fb8e4678833be58e361d7b90912fa9883"
|
||||||
|
}
|
||||||
@@ -1,22 +0,0 @@
|
|||||||
{
|
|
||||||
"db_name": "PostgreSQL",
|
|
||||||
"query": "\n SELECT pg_try_advisory_lock(hashtextextended($1, 0))\n ",
|
|
||||||
"describe": {
|
|
||||||
"columns": [
|
|
||||||
{
|
|
||||||
"ordinal": 0,
|
|
||||||
"name": "pg_try_advisory_lock",
|
|
||||||
"type_info": "Bool"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"parameters": {
|
|
||||||
"Left": [
|
|
||||||
"Text"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"nullable": [
|
|
||||||
null
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"hash": "ae5afb54ca4316801148a697d31965c714f87b84840d93195443fa1df9375543"
|
|
||||||
}
|
|
||||||
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"db_name": "PostgreSQL",
|
"db_name": "PostgreSQL",
|
||||||
"query": "\n UPDATE shared_tasks AS t\n SET assignee_user_id = $2,\n version = t.version + 1\n WHERE t.id = $1\n AND t.version = COALESCE($4, t.version)\n AND ($3::uuid IS NULL OR t.assignee_user_id = $3::uuid)\n AND t.deleted_at IS NULL\n RETURNING\n t.id AS \"id!\",\n t.organization_id AS \"organization_id!: Uuid\",\n t.project_id AS \"project_id!\",\n t.creator_user_id AS \"creator_user_id?: Uuid\",\n t.assignee_user_id AS \"assignee_user_id?: Uuid\",\n t.deleted_by_user_id AS \"deleted_by_user_id?: Uuid\",\n t.title AS \"title!\",\n t.description AS \"description?\",\n t.status AS \"status!: TaskStatus\",\n t.version AS \"version!\",\n t.deleted_at AS \"deleted_at?\",\n t.shared_at AS \"shared_at?\",\n t.created_at AS \"created_at!\",\n t.updated_at AS \"updated_at!\"\n ",
|
"query": "\n UPDATE shared_tasks AS t\n SET assignee_user_id = $2\n WHERE t.id = $1\n AND ($3::uuid IS NULL OR t.assignee_user_id = $3::uuid)\n AND t.deleted_at IS NULL\n RETURNING\n t.id AS \"id!\",\n t.organization_id AS \"organization_id!: Uuid\",\n t.project_id AS \"project_id!\",\n t.creator_user_id AS \"creator_user_id?: Uuid\",\n t.assignee_user_id AS \"assignee_user_id?: Uuid\",\n t.deleted_by_user_id AS \"deleted_by_user_id?: Uuid\",\n t.title AS \"title!\",\n t.description AS \"description?\",\n t.status AS \"status!: TaskStatus\",\n t.deleted_at AS \"deleted_at?\",\n t.shared_at AS \"shared_at?\",\n t.created_at AS \"created_at!\",\n t.updated_at AS \"updated_at!\"\n ",
|
||||||
"describe": {
|
"describe": {
|
||||||
"columns": [
|
"columns": [
|
||||||
{
|
{
|
||||||
@@ -52,8 +52,8 @@
|
|||||||
"kind": {
|
"kind": {
|
||||||
"Enum": [
|
"Enum": [
|
||||||
"todo",
|
"todo",
|
||||||
"in-progress",
|
"inprogress",
|
||||||
"in-review",
|
"inreview",
|
||||||
"done",
|
"done",
|
||||||
"cancelled"
|
"cancelled"
|
||||||
]
|
]
|
||||||
@@ -63,26 +63,21 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ordinal": 9,
|
"ordinal": 9,
|
||||||
"name": "version!",
|
|
||||||
"type_info": "Int8"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ordinal": 10,
|
|
||||||
"name": "deleted_at?",
|
"name": "deleted_at?",
|
||||||
"type_info": "Timestamptz"
|
"type_info": "Timestamptz"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ordinal": 11,
|
"ordinal": 10,
|
||||||
"name": "shared_at?",
|
"name": "shared_at?",
|
||||||
"type_info": "Timestamptz"
|
"type_info": "Timestamptz"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ordinal": 12,
|
"ordinal": 11,
|
||||||
"name": "created_at!",
|
"name": "created_at!",
|
||||||
"type_info": "Timestamptz"
|
"type_info": "Timestamptz"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ordinal": 13,
|
"ordinal": 12,
|
||||||
"name": "updated_at!",
|
"name": "updated_at!",
|
||||||
"type_info": "Timestamptz"
|
"type_info": "Timestamptz"
|
||||||
}
|
}
|
||||||
@@ -91,8 +86,7 @@
|
|||||||
"Left": [
|
"Left": [
|
||||||
"Uuid",
|
"Uuid",
|
||||||
"Uuid",
|
"Uuid",
|
||||||
"Uuid",
|
"Uuid"
|
||||||
"Int8"
|
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"nullable": [
|
"nullable": [
|
||||||
@@ -105,12 +99,11 @@
|
|||||||
false,
|
false,
|
||||||
true,
|
true,
|
||||||
false,
|
false,
|
||||||
false,
|
|
||||||
true,
|
true,
|
||||||
true,
|
true,
|
||||||
false,
|
false,
|
||||||
false
|
false
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"hash": "97132a5a3f0c0f9ca404d8517dd77a3e55a6933d8b7afad5296d9a63ec43d1e0"
|
"hash": "af1c9ee18bd6dffa6e2b46959690ba0a1d1d545fea0b643e591b250a7160aa47"
|
||||||
}
|
}
|
||||||
@@ -1,22 +0,0 @@
|
|||||||
{
|
|
||||||
"db_name": "PostgreSQL",
|
|
||||||
"query": "\n SELECT MAX(seq)\n FROM activity\n WHERE project_id = $1\n ",
|
|
||||||
"describe": {
|
|
||||||
"columns": [
|
|
||||||
{
|
|
||||||
"ordinal": 0,
|
|
||||||
"name": "max",
|
|
||||||
"type_info": "Int8"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"parameters": {
|
|
||||||
"Left": [
|
|
||||||
"Uuid"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"nullable": [
|
|
||||||
null
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"hash": "ba222a6989447b36de700fa211af240fcf59603cf2bf50eb8c2be8a37fcfc565"
|
|
||||||
}
|
|
||||||
@@ -1,22 +0,0 @@
|
|||||||
{
|
|
||||||
"db_name": "PostgreSQL",
|
|
||||||
"query": "\n SELECT pg_advisory_unlock(hashtextextended($1, 0))\n ",
|
|
||||||
"describe": {
|
|
||||||
"columns": [
|
|
||||||
{
|
|
||||||
"ordinal": 0,
|
|
||||||
"name": "pg_advisory_unlock",
|
|
||||||
"type_info": "Bool"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"parameters": {
|
|
||||||
"Left": [
|
|
||||||
"Text"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"nullable": [
|
|
||||||
null
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"hash": "c8aa60c6bfbdc7c471fec520a958d6718bc60876a28b92b49fe11169b23c2966"
|
|
||||||
}
|
|
||||||
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"db_name": "PostgreSQL",
|
"db_name": "PostgreSQL",
|
||||||
"query": "\n INSERT INTO shared_tasks (\n organization_id,\n project_id,\n creator_user_id,\n assignee_user_id,\n title,\n description,\n shared_at\n )\n VALUES ($1, $2, $3, $4, $5, $6, NOW())\n RETURNING id AS \"id!\",\n organization_id AS \"organization_id!: Uuid\",\n project_id AS \"project_id!\",\n creator_user_id AS \"creator_user_id?: Uuid\",\n assignee_user_id AS \"assignee_user_id?: Uuid\",\n deleted_by_user_id AS \"deleted_by_user_id?: Uuid\",\n title AS \"title!\",\n description AS \"description?\",\n status AS \"status!: TaskStatus\",\n version AS \"version!\",\n deleted_at AS \"deleted_at?\",\n shared_at AS \"shared_at?\",\n created_at AS \"created_at!\",\n updated_at AS \"updated_at!\"\n ",
|
"query": "\n INSERT INTO shared_tasks (\n organization_id,\n project_id,\n creator_user_id,\n assignee_user_id,\n title,\n description,\n shared_at\n )\n VALUES ($1, $2, $3, $4, $5, $6, NOW())\n RETURNING id AS \"id!\",\n organization_id AS \"organization_id!: Uuid\",\n project_id AS \"project_id!\",\n creator_user_id AS \"creator_user_id?: Uuid\",\n assignee_user_id AS \"assignee_user_id?: Uuid\",\n deleted_by_user_id AS \"deleted_by_user_id?: Uuid\",\n title AS \"title!\",\n description AS \"description?\",\n status AS \"status!: TaskStatus\",\n deleted_at AS \"deleted_at?\",\n shared_at AS \"shared_at?\",\n created_at AS \"created_at!\",\n updated_at AS \"updated_at!\"\n ",
|
||||||
"describe": {
|
"describe": {
|
||||||
"columns": [
|
"columns": [
|
||||||
{
|
{
|
||||||
@@ -52,8 +52,8 @@
|
|||||||
"kind": {
|
"kind": {
|
||||||
"Enum": [
|
"Enum": [
|
||||||
"todo",
|
"todo",
|
||||||
"in-progress",
|
"inprogress",
|
||||||
"in-review",
|
"inreview",
|
||||||
"done",
|
"done",
|
||||||
"cancelled"
|
"cancelled"
|
||||||
]
|
]
|
||||||
@@ -63,26 +63,21 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ordinal": 9,
|
"ordinal": 9,
|
||||||
"name": "version!",
|
|
||||||
"type_info": "Int8"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ordinal": 10,
|
|
||||||
"name": "deleted_at?",
|
"name": "deleted_at?",
|
||||||
"type_info": "Timestamptz"
|
"type_info": "Timestamptz"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ordinal": 11,
|
"ordinal": 10,
|
||||||
"name": "shared_at?",
|
"name": "shared_at?",
|
||||||
"type_info": "Timestamptz"
|
"type_info": "Timestamptz"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ordinal": 12,
|
"ordinal": 11,
|
||||||
"name": "created_at!",
|
"name": "created_at!",
|
||||||
"type_info": "Timestamptz"
|
"type_info": "Timestamptz"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ordinal": 13,
|
"ordinal": 12,
|
||||||
"name": "updated_at!",
|
"name": "updated_at!",
|
||||||
"type_info": "Timestamptz"
|
"type_info": "Timestamptz"
|
||||||
}
|
}
|
||||||
@@ -107,12 +102,11 @@
|
|||||||
false,
|
false,
|
||||||
true,
|
true,
|
||||||
false,
|
false,
|
||||||
false,
|
|
||||||
true,
|
true,
|
||||||
true,
|
true,
|
||||||
false,
|
false,
|
||||||
false
|
false
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"hash": "13b1cf3d350af65f983aeab1e8c43faf3edc10c6403279f8450f2f9ae835cc18"
|
"hash": "daa9b8b4b2d30296fc3c46fd25ba9e067577216bb58d6f75c6329ac7bcbb2fc8"
|
||||||
}
|
}
|
||||||
@@ -1,26 +0,0 @@
|
|||||||
{
|
|
||||||
"db_name": "PostgreSQL",
|
|
||||||
"query": "\n SELECT format('%I.%I', n.nspname, c.relname) AS qualified_name,\n split_part(\n split_part(pg_get_expr(c.relpartbound, c.oid), ' TO (''', 2),\n ''')', 1\n )::timestamptz AS upper_bound\n FROM pg_partition_tree('activity') pt\n JOIN pg_class c ON c.oid = pt.relid\n JOIN pg_namespace n ON n.oid = c.relnamespace\n WHERE pt.isleaf\n AND c.relname ~ '^activity_p_\\d{8}$'\n AND split_part(\n split_part(pg_get_expr(c.relpartbound, c.oid), ' TO (''', 2),\n ''')', 1\n )::timestamptz <= NOW() - INTERVAL '2 days'\n ORDER BY upper_bound\n ",
|
|
||||||
"describe": {
|
|
||||||
"columns": [
|
|
||||||
{
|
|
||||||
"ordinal": 0,
|
|
||||||
"name": "qualified_name",
|
|
||||||
"type_info": "Text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ordinal": 1,
|
|
||||||
"name": "upper_bound",
|
|
||||||
"type_info": "Timestamptz"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"parameters": {
|
|
||||||
"Left": []
|
|
||||||
},
|
|
||||||
"nullable": [
|
|
||||||
null,
|
|
||||||
null
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"hash": "fe740e5984676e9bdbdd36e9f090b00b952a31f89ae649046f3d97a9fa4913bf"
|
|
||||||
}
|
|
||||||
@@ -1,137 +0,0 @@
|
|||||||
{
|
|
||||||
"db_name": "PostgreSQL",
|
|
||||||
"query": "\n SELECT\n st.id AS \"id!: Uuid\",\n st.organization_id AS \"organization_id!: Uuid\",\n st.project_id AS \"project_id!: Uuid\",\n st.creator_user_id AS \"creator_user_id?: Uuid\",\n st.assignee_user_id AS \"assignee_user_id?: Uuid\",\n st.deleted_by_user_id AS \"deleted_by_user_id?: Uuid\",\n st.title AS \"title!\",\n st.description AS \"description?\",\n st.status AS \"status!: TaskStatus\",\n st.version AS \"version!\",\n st.deleted_at AS \"deleted_at?\",\n st.shared_at AS \"shared_at?\",\n st.created_at AS \"created_at!\",\n st.updated_at AS \"updated_at!\",\n u.id AS \"user_id?: Uuid\",\n u.first_name AS \"user_first_name?\",\n u.last_name AS \"user_last_name?\",\n u.username AS \"user_username?\"\n FROM shared_tasks st\n LEFT JOIN users u ON st.assignee_user_id = u.id\n WHERE st.project_id = $1\n AND st.deleted_at IS NULL\n ORDER BY st.updated_at DESC\n ",
|
|
||||||
"describe": {
|
|
||||||
"columns": [
|
|
||||||
{
|
|
||||||
"ordinal": 0,
|
|
||||||
"name": "id!: Uuid",
|
|
||||||
"type_info": "Uuid"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ordinal": 1,
|
|
||||||
"name": "organization_id!: Uuid",
|
|
||||||
"type_info": "Uuid"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ordinal": 2,
|
|
||||||
"name": "project_id!: Uuid",
|
|
||||||
"type_info": "Uuid"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ordinal": 3,
|
|
||||||
"name": "creator_user_id?: Uuid",
|
|
||||||
"type_info": "Uuid"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ordinal": 4,
|
|
||||||
"name": "assignee_user_id?: Uuid",
|
|
||||||
"type_info": "Uuid"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ordinal": 5,
|
|
||||||
"name": "deleted_by_user_id?: Uuid",
|
|
||||||
"type_info": "Uuid"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ordinal": 6,
|
|
||||||
"name": "title!",
|
|
||||||
"type_info": "Text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ordinal": 7,
|
|
||||||
"name": "description?",
|
|
||||||
"type_info": "Text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ordinal": 8,
|
|
||||||
"name": "status!: TaskStatus",
|
|
||||||
"type_info": {
|
|
||||||
"Custom": {
|
|
||||||
"name": "task_status",
|
|
||||||
"kind": {
|
|
||||||
"Enum": [
|
|
||||||
"todo",
|
|
||||||
"in-progress",
|
|
||||||
"in-review",
|
|
||||||
"done",
|
|
||||||
"cancelled"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ordinal": 9,
|
|
||||||
"name": "version!",
|
|
||||||
"type_info": "Int8"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ordinal": 10,
|
|
||||||
"name": "deleted_at?",
|
|
||||||
"type_info": "Timestamptz"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ordinal": 11,
|
|
||||||
"name": "shared_at?",
|
|
||||||
"type_info": "Timestamptz"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ordinal": 12,
|
|
||||||
"name": "created_at!",
|
|
||||||
"type_info": "Timestamptz"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ordinal": 13,
|
|
||||||
"name": "updated_at!",
|
|
||||||
"type_info": "Timestamptz"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ordinal": 14,
|
|
||||||
"name": "user_id?: Uuid",
|
|
||||||
"type_info": "Uuid"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ordinal": 15,
|
|
||||||
"name": "user_first_name?",
|
|
||||||
"type_info": "Text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ordinal": 16,
|
|
||||||
"name": "user_last_name?",
|
|
||||||
"type_info": "Text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ordinal": 17,
|
|
||||||
"name": "user_username?",
|
|
||||||
"type_info": "Text"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"parameters": {
|
|
||||||
"Left": [
|
|
||||||
"Uuid"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"nullable": [
|
|
||||||
false,
|
|
||||||
false,
|
|
||||||
false,
|
|
||||||
true,
|
|
||||||
true,
|
|
||||||
true,
|
|
||||||
false,
|
|
||||||
true,
|
|
||||||
false,
|
|
||||||
false,
|
|
||||||
true,
|
|
||||||
true,
|
|
||||||
false,
|
|
||||||
false,
|
|
||||||
false,
|
|
||||||
true,
|
|
||||||
true,
|
|
||||||
true
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"hash": "ff9b35a31210dbddd237f4234bec1411b5aa1b0be986fbe5a8ee21e6771222f2"
|
|
||||||
}
|
|
||||||
@@ -12,7 +12,7 @@ aes-gcm = "0.10"
|
|||||||
chrono = { version = "0.4", features = ["serde"] }
|
chrono = { version = "0.4", features = ["serde"] }
|
||||||
futures = "0.3"
|
futures = "0.3"
|
||||||
async-trait = "0.1"
|
async-trait = "0.1"
|
||||||
reqwest = { version = "0.12", default-features = false, features = ["json", "rustls-tls"] }
|
reqwest = { version = "0.12", default-features = false, features = ["json", "rustls-tls", "stream"] }
|
||||||
secrecy = "0.10.3"
|
secrecy = "0.10.3"
|
||||||
sentry = { version = "0.41.0", features = ["anyhow", "backtrace", "panic", "debug-images"] }
|
sentry = { version = "0.41.0", features = ["anyhow", "backtrace", "panic", "debug-images"] }
|
||||||
sentry-tracing = { version = "0.41.0", features = ["backtrace"] }
|
sentry-tracing = { version = "0.41.0", features = ["backtrace"] }
|
||||||
@@ -26,6 +26,7 @@ tracing = { workspace = true }
|
|||||||
tracing-subscriber = { workspace = true }
|
tracing-subscriber = { workspace = true }
|
||||||
tracing-error = "0.2"
|
tracing-error = "0.2"
|
||||||
thiserror = { workspace = true }
|
thiserror = { workspace = true }
|
||||||
|
ts-rs = { workspace = true }
|
||||||
utils = { path = "../utils" }
|
utils = { path = "../utils" }
|
||||||
uuid = { version = "1", features = ["serde", "v4"] }
|
uuid = { version = "1", features = ["serde", "v4"] }
|
||||||
jsonwebtoken = "9"
|
jsonwebtoken = "9"
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
services:
|
services:
|
||||||
remote-db:
|
remote-db:
|
||||||
image: postgres:16-alpine
|
image: postgres:16-alpine
|
||||||
|
command: ["postgres", "-c", "wal_level=logical"]
|
||||||
environment:
|
environment:
|
||||||
POSTGRES_DB: remote
|
POSTGRES_DB: remote
|
||||||
POSTGRES_USER: remote
|
POSTGRES_USER: remote
|
||||||
@@ -16,6 +17,23 @@ services:
|
|||||||
ports:
|
ports:
|
||||||
- "5432:5432"
|
- "5432:5432"
|
||||||
|
|
||||||
|
electric:
|
||||||
|
image: electricsql/electric:latest
|
||||||
|
working_dir: /app
|
||||||
|
environment:
|
||||||
|
DATABASE_URL: postgresql://electric_sync:${ELECTRIC_ROLE_PASSWORD:?set in .env.remote}@remote-db:5432/remote?sslmode=disable
|
||||||
|
PG_PROXY_PORT: 65432
|
||||||
|
LOGICAL_PUBLISHER_HOST: electric
|
||||||
|
AUTH_MODE: insecure
|
||||||
|
ELECTRIC_INSECURE: true
|
||||||
|
ELECTRIC_MANUAL_TABLE_PUBLISHING: true
|
||||||
|
ELECTRIC_USAGE_REPORTING: false
|
||||||
|
volumes:
|
||||||
|
- electric-data:/app/persistent
|
||||||
|
depends_on:
|
||||||
|
remote-db:
|
||||||
|
condition: service_healthy
|
||||||
|
|
||||||
remote-server:
|
remote-server:
|
||||||
build:
|
build:
|
||||||
context: ../..
|
context: ../..
|
||||||
@@ -23,10 +41,12 @@ services:
|
|||||||
depends_on:
|
depends_on:
|
||||||
remote-db:
|
remote-db:
|
||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
|
electric:
|
||||||
|
condition: service_started
|
||||||
environment:
|
environment:
|
||||||
SERVER_DATABASE_URL: postgres://remote:remote@remote-db:5432/remote
|
SERVER_DATABASE_URL: postgres://remote:remote@remote-db:5432/remote
|
||||||
SERVER_LISTEN_ADDR: 0.0.0.0:8081
|
SERVER_LISTEN_ADDR: 0.0.0.0:8081
|
||||||
SERVER_ACTIVITY_CHANNEL: activity
|
ELECTRIC_URL: http://electric:3000
|
||||||
GITHUB_OAUTH_CLIENT_ID: ${GITHUB_OAUTH_CLIENT_ID:?set in .env.remote}
|
GITHUB_OAUTH_CLIENT_ID: ${GITHUB_OAUTH_CLIENT_ID:?set in .env.remote}
|
||||||
GITHUB_OAUTH_CLIENT_SECRET: ${GITHUB_OAUTH_CLIENT_SECRET:?set in .env.remote}
|
GITHUB_OAUTH_CLIENT_SECRET: ${GITHUB_OAUTH_CLIENT_SECRET:?set in .env.remote}
|
||||||
GOOGLE_OAUTH_CLIENT_ID: ${GOOGLE_OAUTH_CLIENT_ID:?set in .env.remote}
|
GOOGLE_OAUTH_CLIENT_ID: ${GOOGLE_OAUTH_CLIENT_ID:?set in .env.remote}
|
||||||
@@ -36,9 +56,11 @@ services:
|
|||||||
SERVER_PUBLIC_BASE_URL: http://localhost:3000
|
SERVER_PUBLIC_BASE_URL: http://localhost:3000
|
||||||
VITE_APP_BASE_URL: http://localhost:3000
|
VITE_APP_BASE_URL: http://localhost:3000
|
||||||
VITE_API_BASE_URL: http://localhost:3000
|
VITE_API_BASE_URL: http://localhost:3000
|
||||||
|
ELECTRIC_ROLE_PASSWORD: ${ELECTRIC_ROLE_PASSWORD:?set in .env.remote}
|
||||||
ports:
|
ports:
|
||||||
- "127.0.0.1:3000:8081"
|
- "127.0.0.1:3000:8081"
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
|
|
||||||
volumes:
|
volumes:
|
||||||
remote-db-data:
|
remote-db-data:
|
||||||
|
electric-data:
|
||||||
|
|||||||
21
crates/remote/migrations/20251127000000_electric_support.sql
Normal file
21
crates/remote/migrations/20251127000000_electric_support.sql
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
CREATE ROLE electric_sync WITH LOGIN REPLICATION;
|
||||||
|
|
||||||
|
GRANT CONNECT ON DATABASE remote TO electric_sync;
|
||||||
|
GRANT USAGE ON SCHEMA public TO electric_sync;
|
||||||
|
|
||||||
|
CREATE PUBLICATION electric_publication_default;
|
||||||
|
|
||||||
|
CREATE OR REPLACE FUNCTION electric_sync_table(p_schema text, p_table text)
|
||||||
|
RETURNS void
|
||||||
|
LANGUAGE plpgsql
|
||||||
|
AS $$
|
||||||
|
DECLARE
|
||||||
|
qualified text := format('%I.%I', p_schema, p_table);
|
||||||
|
BEGIN
|
||||||
|
EXECUTE format('ALTER TABLE %s REPLICA IDENTITY FULL', qualified);
|
||||||
|
EXECUTE format('GRANT SELECT ON TABLE %s TO electric_sync', qualified);
|
||||||
|
EXECUTE format('ALTER PUBLICATION %I ADD TABLE %s', 'electric_publication_default', qualified);
|
||||||
|
END;
|
||||||
|
$$;
|
||||||
|
|
||||||
|
SELECT electric_sync_table('public', 'shared_tasks');
|
||||||
@@ -0,0 +1,9 @@
|
|||||||
|
-- Drop activity feed tables and functions
|
||||||
|
DROP TABLE IF EXISTS activity CASCADE;
|
||||||
|
DROP TABLE IF EXISTS project_activity_counters;
|
||||||
|
DROP FUNCTION IF EXISTS ensure_activity_partition;
|
||||||
|
DROP FUNCTION IF EXISTS activity_notify;
|
||||||
|
|
||||||
|
-- Drop unused columns from shared_tasks
|
||||||
|
ALTER TABLE shared_tasks DROP COLUMN IF EXISTS version;
|
||||||
|
ALTER TABLE shared_tasks DROP COLUMN IF EXISTS last_event_seq;
|
||||||
@@ -0,0 +1,2 @@
|
|||||||
|
ALTER TYPE task_status RENAME VALUE 'in-progress' TO 'inprogress';
|
||||||
|
ALTER TYPE task_status RENAME VALUE 'in-review' TO 'inreview';
|
||||||
@@ -1,106 +0,0 @@
|
|||||||
use std::{
|
|
||||||
hash::{Hash, Hasher},
|
|
||||||
pin::Pin,
|
|
||||||
sync::Arc,
|
|
||||||
};
|
|
||||||
|
|
||||||
use chrono::{DateTime, Utc};
|
|
||||||
use futures::{Stream, StreamExt, future};
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use tokio::sync::broadcast;
|
|
||||||
use tokio_stream::wrappers::{BroadcastStream, errors::BroadcastStreamRecvError};
|
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
|
||||||
pub struct ActivityResponse {
|
|
||||||
pub data: Vec<ActivityEvent>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
|
||||||
pub struct ActivityEvent {
|
|
||||||
pub seq: i64,
|
|
||||||
pub event_id: uuid::Uuid,
|
|
||||||
pub project_id: uuid::Uuid,
|
|
||||||
pub event_type: String,
|
|
||||||
pub created_at: DateTime<Utc>,
|
|
||||||
pub payload: Option<serde_json::Value>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ActivityEvent {
|
|
||||||
pub fn new(
|
|
||||||
seq: i64,
|
|
||||||
event_id: uuid::Uuid,
|
|
||||||
project_id: uuid::Uuid,
|
|
||||||
event_type: String,
|
|
||||||
created_at: DateTime<Utc>,
|
|
||||||
payload: Option<serde_json::Value>,
|
|
||||||
) -> Self {
|
|
||||||
Self {
|
|
||||||
seq,
|
|
||||||
event_id,
|
|
||||||
project_id,
|
|
||||||
event_type,
|
|
||||||
created_at,
|
|
||||||
payload,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub struct ActivityBroker {
|
|
||||||
shards: Arc<Vec<broadcast::Sender<ActivityEvent>>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub type ActivityStream =
|
|
||||||
Pin<Box<dyn Stream<Item = Result<ActivityEvent, BroadcastStreamRecvError>> + Send + 'static>>;
|
|
||||||
|
|
||||||
impl ActivityBroker {
|
|
||||||
/// Shard broadcast senders to keep busy organisations from evicting everyone else's events.
|
|
||||||
pub fn new(shard_count: usize, shard_capacity: usize) -> Self {
|
|
||||||
let shard_count = shard_count.max(1);
|
|
||||||
let shard_capacity = shard_capacity.max(1);
|
|
||||||
let shards = (0..shard_count)
|
|
||||||
.map(|_| {
|
|
||||||
let (sender, _receiver) = broadcast::channel(shard_capacity);
|
|
||||||
sender
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
Self {
|
|
||||||
shards: Arc::new(shards),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn subscribe(&self, project_id: uuid::Uuid) -> ActivityStream {
|
|
||||||
let index = self.shard_index(&project_id);
|
|
||||||
let receiver = self.shards[index].subscribe();
|
|
||||||
|
|
||||||
let stream = BroadcastStream::new(receiver).filter_map(move |item| {
|
|
||||||
future::ready(match item {
|
|
||||||
Ok(event) if event.project_id == project_id => Some(Ok(event)),
|
|
||||||
Ok(_) => None,
|
|
||||||
Err(err) => Some(Err(err)),
|
|
||||||
})
|
|
||||||
});
|
|
||||||
|
|
||||||
Box::pin(stream)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn publish(&self, event: ActivityEvent) {
|
|
||||||
let index = self.shard_index(&event.project_id);
|
|
||||||
if let Err(error) = self.shards[index].send(event) {
|
|
||||||
tracing::debug!(?error, "no subscribers for activity event");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn shard_index(&self, project_id: &uuid::Uuid) -> usize {
|
|
||||||
let mut hasher = std::collections::hash_map::DefaultHasher::new();
|
|
||||||
project_id.hash(&mut hasher);
|
|
||||||
(hasher.finish() as usize) % self.shards.len()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for ActivityBroker {
|
|
||||||
fn default() -> Self {
|
|
||||||
Self::new(16, 512)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,3 +0,0 @@
|
|||||||
mod broker;
|
|
||||||
|
|
||||||
pub use broker::{ActivityBroker, ActivityEvent, ActivityResponse, ActivityStream};
|
|
||||||
@@ -1,11 +1,11 @@
|
|||||||
use std::{net::SocketAddr, sync::Arc};
|
use std::{net::SocketAddr, sync::Arc};
|
||||||
|
|
||||||
use anyhow::{Context, bail};
|
use anyhow::{Context, bail};
|
||||||
|
use secrecy::ExposeSecret;
|
||||||
use tracing::instrument;
|
use tracing::instrument;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
AppState,
|
AppState,
|
||||||
activity::ActivityBroker,
|
|
||||||
auth::{
|
auth::{
|
||||||
GitHubOAuthProvider, GoogleOAuthProvider, JwtService, OAuthHandoffService,
|
GitHubOAuthProvider, GoogleOAuthProvider, JwtService, OAuthHandoffService,
|
||||||
OAuthTokenValidator, ProviderRegistry,
|
OAuthTokenValidator, ProviderRegistry,
|
||||||
@@ -22,7 +22,7 @@ impl Server {
|
|||||||
#[instrument(
|
#[instrument(
|
||||||
name = "remote_server",
|
name = "remote_server",
|
||||||
skip(config),
|
skip(config),
|
||||||
fields(listen_addr = %config.listen_addr, activity_channel = %config.activity_channel)
|
fields(listen_addr = %config.listen_addr)
|
||||||
)]
|
)]
|
||||||
pub async fn run(config: RemoteServerConfig) -> anyhow::Result<()> {
|
pub async fn run(config: RemoteServerConfig) -> anyhow::Result<()> {
|
||||||
let pool = db::create_pool(&config.database_url)
|
let pool = db::create_pool(&config.database_url)
|
||||||
@@ -33,12 +33,12 @@ impl Server {
|
|||||||
.await
|
.await
|
||||||
.context("failed to run database migrations")?;
|
.context("failed to run database migrations")?;
|
||||||
|
|
||||||
db::maintenance::spawn_activity_partition_maintenance(pool.clone());
|
if let Some(password) = config.electric_role_password.as_ref() {
|
||||||
|
db::ensure_electric_role_password(&pool, password.expose_secret())
|
||||||
|
.await
|
||||||
|
.context("failed to set electric role password")?;
|
||||||
|
}
|
||||||
|
|
||||||
let broker = ActivityBroker::new(
|
|
||||||
config.activity_broadcast_shards,
|
|
||||||
config.activity_broadcast_capacity,
|
|
||||||
);
|
|
||||||
let auth_config = config.auth.clone();
|
let auth_config = config.auth.clone();
|
||||||
let jwt = Arc::new(JwtService::new(auth_config.jwt_secret().clone()));
|
let jwt = Arc::new(JwtService::new(auth_config.jwt_secret().clone()));
|
||||||
|
|
||||||
@@ -84,21 +84,18 @@ impl Server {
|
|||||||
)
|
)
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
|
let http_client = reqwest::Client::new();
|
||||||
let state = AppState::new(
|
let state = AppState::new(
|
||||||
pool.clone(),
|
pool.clone(),
|
||||||
broker.clone(),
|
|
||||||
config.clone(),
|
config.clone(),
|
||||||
jwt,
|
jwt,
|
||||||
handoff_service,
|
handoff_service,
|
||||||
oauth_token_validator,
|
oauth_token_validator,
|
||||||
mailer,
|
mailer,
|
||||||
server_public_base_url,
|
server_public_base_url,
|
||||||
|
http_client,
|
||||||
);
|
);
|
||||||
|
|
||||||
let listener =
|
|
||||||
db::ActivityListener::new(pool.clone(), broker, config.activity_channel.clone());
|
|
||||||
tokio::spawn(listener.run());
|
|
||||||
|
|
||||||
let router = routes::router(state);
|
let router = routes::router(state);
|
||||||
let addr: SocketAddr = config
|
let addr: SocketAddr = config
|
||||||
.listen_addr
|
.listen_addr
|
||||||
|
|||||||
@@ -4,26 +4,15 @@ use base64::{Engine as _, engine::general_purpose::STANDARD as BASE64_STANDARD};
|
|||||||
use secrecy::SecretString;
|
use secrecy::SecretString;
|
||||||
use thiserror::Error;
|
use thiserror::Error;
|
||||||
|
|
||||||
// Default activity items returned in a single query
|
|
||||||
const DEFAULT_ACTIVITY_DEFAULT_LIMIT: i64 = 200;
|
|
||||||
// Max activity items that can be requested in a single query
|
|
||||||
const DEFAULT_ACTIVITY_MAX_LIMIT: i64 = 500;
|
|
||||||
const DEFAULT_ACTIVITY_BROADCAST_SHARDS: usize = 16;
|
|
||||||
const DEFAULT_ACTIVITY_BROADCAST_CAPACITY: usize = 512;
|
|
||||||
const DEFAULT_ACTIVITY_CATCHUP_BATCH_SIZE: i64 = 100;
|
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
pub struct RemoteServerConfig {
|
pub struct RemoteServerConfig {
|
||||||
pub database_url: String,
|
pub database_url: String,
|
||||||
pub listen_addr: String,
|
pub listen_addr: String,
|
||||||
pub server_public_base_url: Option<String>,
|
pub server_public_base_url: Option<String>,
|
||||||
pub activity_channel: String,
|
|
||||||
pub activity_default_limit: i64,
|
|
||||||
pub activity_max_limit: i64,
|
|
||||||
pub activity_broadcast_shards: usize,
|
|
||||||
pub activity_broadcast_capacity: usize,
|
|
||||||
pub activity_catchup_batch_size: i64,
|
|
||||||
pub auth: AuthConfig,
|
pub auth: AuthConfig,
|
||||||
|
pub electric_url: String,
|
||||||
|
pub electric_secret: Option<SecretString>,
|
||||||
|
pub electric_role_password: Option<SecretString>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Error)]
|
#[derive(Debug, Error)]
|
||||||
@@ -47,59 +36,31 @@ impl RemoteServerConfig {
|
|||||||
|
|
||||||
let server_public_base_url = env::var("SERVER_PUBLIC_BASE_URL").ok();
|
let server_public_base_url = env::var("SERVER_PUBLIC_BASE_URL").ok();
|
||||||
|
|
||||||
let activity_channel =
|
|
||||||
env::var("SERVER_ACTIVITY_CHANNEL").unwrap_or_else(|_| "activity".to_string());
|
|
||||||
|
|
||||||
let activity_default_limit = DEFAULT_ACTIVITY_DEFAULT_LIMIT;
|
|
||||||
let activity_max_limit = DEFAULT_ACTIVITY_MAX_LIMIT;
|
|
||||||
|
|
||||||
let activity_broadcast_shards = get_numeric_env_var(
|
|
||||||
"SERVER_ACTIVITY_BROADCAST_SHARDS",
|
|
||||||
DEFAULT_ACTIVITY_BROADCAST_SHARDS,
|
|
||||||
)?
|
|
||||||
.max(1);
|
|
||||||
|
|
||||||
let activity_broadcast_capacity = get_numeric_env_var(
|
|
||||||
"SERVER_ACTIVITY_BROADCAST_CAPACITY",
|
|
||||||
DEFAULT_ACTIVITY_BROADCAST_CAPACITY,
|
|
||||||
)?
|
|
||||||
.max(1);
|
|
||||||
|
|
||||||
let activity_catchup_batch_size = get_numeric_env_var(
|
|
||||||
"SERVER_ACTIVITY_CATCHUP_BATCH_SIZE",
|
|
||||||
DEFAULT_ACTIVITY_CATCHUP_BATCH_SIZE,
|
|
||||||
)?
|
|
||||||
.max(1);
|
|
||||||
|
|
||||||
let auth = AuthConfig::from_env()?;
|
let auth = AuthConfig::from_env()?;
|
||||||
|
|
||||||
|
let electric_url =
|
||||||
|
env::var("ELECTRIC_URL").map_err(|_| ConfigError::MissingVar("ELECTRIC_URL"))?;
|
||||||
|
|
||||||
|
let electric_secret = env::var("ELECTRIC_SECRET")
|
||||||
|
.map(|s| SecretString::new(s.into()))
|
||||||
|
.ok();
|
||||||
|
|
||||||
|
let electric_role_password = env::var("ELECTRIC_ROLE_PASSWORD")
|
||||||
|
.ok()
|
||||||
|
.map(|s| SecretString::new(s.into()));
|
||||||
|
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
database_url,
|
database_url,
|
||||||
listen_addr,
|
listen_addr,
|
||||||
server_public_base_url,
|
server_public_base_url,
|
||||||
activity_channel,
|
|
||||||
activity_default_limit,
|
|
||||||
activity_max_limit,
|
|
||||||
activity_broadcast_shards,
|
|
||||||
activity_broadcast_capacity,
|
|
||||||
activity_catchup_batch_size,
|
|
||||||
auth,
|
auth,
|
||||||
|
electric_url,
|
||||||
|
electric_secret,
|
||||||
|
electric_role_password,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_numeric_env_var<T: std::str::FromStr>(
|
|
||||||
var_name: &'static str,
|
|
||||||
default: T,
|
|
||||||
) -> Result<T, ConfigError> {
|
|
||||||
match env::var(var_name) {
|
|
||||||
Ok(value) => value
|
|
||||||
.parse::<T>()
|
|
||||||
.map_err(|_| ConfigError::InvalidVar(var_name)),
|
|
||||||
Err(_) => Ok(default),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
pub struct OAuthProviderConfig {
|
pub struct OAuthProviderConfig {
|
||||||
client_id: String,
|
client_id: String,
|
||||||
|
|||||||
@@ -1,95 +0,0 @@
|
|||||||
use chrono::{DateTime, Utc};
|
|
||||||
use sqlx::PgPool;
|
|
||||||
use uuid::Uuid;
|
|
||||||
|
|
||||||
use crate::activity::ActivityEvent;
|
|
||||||
|
|
||||||
pub struct ActivityRepository<'a> {
|
|
||||||
pool: &'a PgPool,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a> ActivityRepository<'a> {
|
|
||||||
pub fn new(pool: &'a PgPool) -> Self {
|
|
||||||
Self { pool }
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn fetch_since(
|
|
||||||
&self,
|
|
||||||
project_id: Uuid,
|
|
||||||
after_seq: Option<i64>,
|
|
||||||
limit: i64,
|
|
||||||
) -> Result<Vec<ActivityEvent>, sqlx::Error> {
|
|
||||||
let rows = sqlx::query_as::<_, ActivityRow>(
|
|
||||||
r#"
|
|
||||||
SELECT seq,
|
|
||||||
event_id,
|
|
||||||
project_id,
|
|
||||||
event_type,
|
|
||||||
created_at,
|
|
||||||
payload
|
|
||||||
FROM activity
|
|
||||||
WHERE project_id = $1
|
|
||||||
AND ($2::bigint IS NULL OR seq > $2)
|
|
||||||
ORDER BY seq ASC
|
|
||||||
LIMIT $3
|
|
||||||
"#,
|
|
||||||
)
|
|
||||||
.bind(project_id)
|
|
||||||
.bind(after_seq)
|
|
||||||
.bind(limit)
|
|
||||||
.fetch_all(self.pool)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
Ok(rows.into_iter().map(ActivityRow::into_event).collect())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn fetch_by_seq(
|
|
||||||
&self,
|
|
||||||
project_id: Uuid,
|
|
||||||
seq: i64,
|
|
||||||
) -> Result<Option<ActivityEvent>, sqlx::Error> {
|
|
||||||
let row = sqlx::query_as::<_, ActivityRow>(
|
|
||||||
r#"
|
|
||||||
SELECT seq,
|
|
||||||
event_id,
|
|
||||||
project_id,
|
|
||||||
event_type,
|
|
||||||
created_at,
|
|
||||||
payload
|
|
||||||
FROM activity
|
|
||||||
WHERE project_id = $1
|
|
||||||
AND seq = $2
|
|
||||||
LIMIT 1
|
|
||||||
"#,
|
|
||||||
)
|
|
||||||
.bind(project_id)
|
|
||||||
.bind(seq)
|
|
||||||
.fetch_optional(self.pool)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
Ok(row.map(ActivityRow::into_event))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(sqlx::FromRow)]
|
|
||||||
struct ActivityRow {
|
|
||||||
seq: i64,
|
|
||||||
event_id: Uuid,
|
|
||||||
project_id: Uuid,
|
|
||||||
event_type: String,
|
|
||||||
created_at: DateTime<Utc>,
|
|
||||||
payload: serde_json::Value,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ActivityRow {
|
|
||||||
fn into_event(self) -> ActivityEvent {
|
|
||||||
ActivityEvent::new(
|
|
||||||
self.seq,
|
|
||||||
self.event_id,
|
|
||||||
self.project_id,
|
|
||||||
self.event_type,
|
|
||||||
self.created_at,
|
|
||||||
Some(self.payload),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,108 +0,0 @@
|
|||||||
use std::time::Duration;
|
|
||||||
|
|
||||||
use anyhow::Context;
|
|
||||||
use serde::Deserialize;
|
|
||||||
use sqlx::{PgPool, postgres::PgListener};
|
|
||||||
use tokio::time::sleep;
|
|
||||||
use tracing::instrument;
|
|
||||||
use uuid::Uuid;
|
|
||||||
|
|
||||||
use crate::{activity::ActivityBroker, db::activity::ActivityRepository};
|
|
||||||
|
|
||||||
pub struct ActivityListener {
|
|
||||||
pool: PgPool,
|
|
||||||
broker: ActivityBroker,
|
|
||||||
channel: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ActivityListener {
|
|
||||||
pub fn new(pool: PgPool, broker: ActivityBroker, channel: String) -> Self {
|
|
||||||
Self {
|
|
||||||
pool,
|
|
||||||
broker,
|
|
||||||
channel,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(
|
|
||||||
name = "activity.listener",
|
|
||||||
skip(self),
|
|
||||||
fields(channel = %self.channel)
|
|
||||||
)]
|
|
||||||
pub async fn run(self) {
|
|
||||||
let mut backoff = Duration::from_secs(1);
|
|
||||||
let max_backoff = Duration::from_secs(30);
|
|
||||||
|
|
||||||
let pool = self.pool;
|
|
||||||
let broker = self.broker;
|
|
||||||
let channel = self.channel;
|
|
||||||
|
|
||||||
loop {
|
|
||||||
match listen_loop(&pool, &broker, &channel).await {
|
|
||||||
Ok(_) => {
|
|
||||||
backoff = Duration::from_secs(1);
|
|
||||||
}
|
|
||||||
Err(error) => {
|
|
||||||
tracing::error!(?error, ?backoff, "activity listener error; retrying");
|
|
||||||
sleep(backoff).await;
|
|
||||||
backoff = (backoff * 2).min(max_backoff);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(
|
|
||||||
name = "activity.listen_loop",
|
|
||||||
skip(pool, broker),
|
|
||||||
fields(channel = %channel)
|
|
||||||
)]
|
|
||||||
async fn listen_loop(pool: &PgPool, broker: &ActivityBroker, channel: &str) -> anyhow::Result<()> {
|
|
||||||
let mut listener = PgListener::connect_with(pool)
|
|
||||||
.await
|
|
||||||
.context("failed to create LISTEN connection")?;
|
|
||||||
listener
|
|
||||||
.listen(channel)
|
|
||||||
.await
|
|
||||||
.with_context(|| format!("failed to LISTEN on channel {channel}"))?;
|
|
||||||
|
|
||||||
loop {
|
|
||||||
let notification = listener
|
|
||||||
.recv()
|
|
||||||
.await
|
|
||||||
.context("failed to receive LISTEN notification")?;
|
|
||||||
|
|
||||||
let payload: NotificationEnvelope = serde_json::from_str(notification.payload())
|
|
||||||
.with_context(|| format!("invalid notification payload: {}", notification.payload()))?;
|
|
||||||
|
|
||||||
tracing::trace!(%payload.seq, project_id = %payload.project_id, "received activity notification");
|
|
||||||
|
|
||||||
let project_uuid = payload
|
|
||||||
.project_id
|
|
||||||
.parse::<Uuid>()
|
|
||||||
.with_context(|| format!("invalid project_id UUID: {}", payload.project_id))?;
|
|
||||||
|
|
||||||
let event = match ActivityRepository::new(pool)
|
|
||||||
.fetch_by_seq(project_uuid, payload.seq)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
Ok(Some(event)) => event,
|
|
||||||
Ok(None) => {
|
|
||||||
tracing::warn!(seq = payload.seq, project_id = %payload.project_id, "activity row missing for notification");
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
Err(error) => {
|
|
||||||
tracing::error!(?error, seq = payload.seq, project_id = %payload.project_id, "failed to fetch activity payload");
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
broker.publish(event);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Deserialize)]
|
|
||||||
struct NotificationEnvelope {
|
|
||||||
seq: i64,
|
|
||||||
project_id: String,
|
|
||||||
}
|
|
||||||
@@ -1,159 +0,0 @@
|
|||||||
use std::{sync::OnceLock, time::Duration};
|
|
||||||
|
|
||||||
use chrono::{Duration as ChronoDuration, NaiveTime, TimeZone, Utc};
|
|
||||||
use sqlx::{PgPool, error::DatabaseError};
|
|
||||||
use tokio::time::sleep;
|
|
||||||
use tracing::{error, info, warn};
|
|
||||||
|
|
||||||
const PRUNE_LOCK_KEY: &str = "vibe_kanban_activity_retention_v1";
|
|
||||||
static PROVISION_TIME: OnceLock<NaiveTime> = OnceLock::new();
|
|
||||||
static PRUNE_TIME: OnceLock<NaiveTime> = OnceLock::new();
|
|
||||||
|
|
||||||
fn provision_time() -> NaiveTime {
|
|
||||||
*PROVISION_TIME.get_or_init(|| NaiveTime::from_hms_opt(0, 10, 0).expect("valid time"))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn prune_time() -> NaiveTime {
|
|
||||||
*PRUNE_TIME.get_or_init(|| NaiveTime::from_hms_opt(1, 30, 0).expect("valid time"))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn spawn_activity_partition_maintenance(pool: PgPool) {
|
|
||||||
let creation_pool = pool.clone();
|
|
||||||
tokio::spawn(async move {
|
|
||||||
if let Err(err) = ensure_future_partitions_with_pool(&creation_pool).await {
|
|
||||||
error!(error = ?err, "initial activity partition provisioning failed");
|
|
||||||
}
|
|
||||||
|
|
||||||
loop {
|
|
||||||
sleep(duration_until(provision_time())).await;
|
|
||||||
if let Err(err) = ensure_future_partitions_with_pool(&creation_pool).await {
|
|
||||||
error!(error = ?err, "scheduled partition provisioning failed");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
tokio::spawn(async move {
|
|
||||||
if let Err(err) = prune_old_partitions(&pool).await {
|
|
||||||
error!(error = ?err, "initial activity partition pruning failed");
|
|
||||||
}
|
|
||||||
|
|
||||||
loop {
|
|
||||||
sleep(duration_until(prune_time())).await;
|
|
||||||
if let Err(err) = prune_old_partitions(&pool).await {
|
|
||||||
error!(error = ?err, "scheduled partition pruning failed");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
fn duration_until(target_time: NaiveTime) -> Duration {
|
|
||||||
let now = Utc::now();
|
|
||||||
|
|
||||||
let today = now.date_naive();
|
|
||||||
let mut next = today.and_time(target_time);
|
|
||||||
|
|
||||||
if now.time() >= target_time {
|
|
||||||
next = (today + ChronoDuration::days(1)).and_time(target_time);
|
|
||||||
}
|
|
||||||
|
|
||||||
let next_dt = Utc.from_utc_datetime(&next);
|
|
||||||
(next_dt - now)
|
|
||||||
.to_std()
|
|
||||||
.unwrap_or_else(|_| Duration::from_secs(0))
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn prune_old_partitions(pool: &PgPool) -> Result<(), sqlx::Error> {
|
|
||||||
let mut conn = pool.acquire().await?;
|
|
||||||
|
|
||||||
let lock_acquired = sqlx::query_scalar!(
|
|
||||||
r#"
|
|
||||||
SELECT pg_try_advisory_lock(hashtextextended($1, 0))
|
|
||||||
"#,
|
|
||||||
PRUNE_LOCK_KEY
|
|
||||||
)
|
|
||||||
.fetch_one(&mut *conn)
|
|
||||||
.await?
|
|
||||||
.unwrap_or(false);
|
|
||||||
|
|
||||||
if !lock_acquired {
|
|
||||||
warn!("skipping partition pruning because another worker holds the lock");
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
|
|
||||||
let result = async {
|
|
||||||
let partitions = sqlx::query!(
|
|
||||||
r#"
|
|
||||||
SELECT format('%I.%I', n.nspname, c.relname) AS qualified_name,
|
|
||||||
split_part(
|
|
||||||
split_part(pg_get_expr(c.relpartbound, c.oid), ' TO (''', 2),
|
|
||||||
''')', 1
|
|
||||||
)::timestamptz AS upper_bound
|
|
||||||
FROM pg_partition_tree('activity') pt
|
|
||||||
JOIN pg_class c ON c.oid = pt.relid
|
|
||||||
JOIN pg_namespace n ON n.oid = c.relnamespace
|
|
||||||
WHERE pt.isleaf
|
|
||||||
AND c.relname ~ '^activity_p_\d{8}$'
|
|
||||||
AND split_part(
|
|
||||||
split_part(pg_get_expr(c.relpartbound, c.oid), ' TO (''', 2),
|
|
||||||
''')', 1
|
|
||||||
)::timestamptz <= NOW() - INTERVAL '2 days'
|
|
||||||
ORDER BY upper_bound
|
|
||||||
"#
|
|
||||||
)
|
|
||||||
.fetch_all(&mut *conn)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
for partition in partitions {
|
|
||||||
if let Some(name) = partition.qualified_name {
|
|
||||||
let detach = format!("ALTER TABLE activity DETACH PARTITION {name} CONCURRENTLY");
|
|
||||||
sqlx::query(&detach).execute(&mut *conn).await?;
|
|
||||||
|
|
||||||
let drop = format!("DROP TABLE {name}");
|
|
||||||
sqlx::query(&drop).execute(&mut *conn).await?;
|
|
||||||
|
|
||||||
info!(partition = %name, "dropped activity partition");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
.await;
|
|
||||||
|
|
||||||
let _ = sqlx::query_scalar!(
|
|
||||||
r#"
|
|
||||||
SELECT pg_advisory_unlock(hashtextextended($1, 0))
|
|
||||||
"#,
|
|
||||||
PRUNE_LOCK_KEY
|
|
||||||
)
|
|
||||||
.fetch_one(&mut *conn)
|
|
||||||
.await;
|
|
||||||
|
|
||||||
result
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn ensure_future_partitions_with_pool(pool: &PgPool) -> Result<(), sqlx::Error> {
|
|
||||||
let mut conn = pool.acquire().await?;
|
|
||||||
ensure_future_partitions(&mut conn).await
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn ensure_future_partitions(
|
|
||||||
executor: &mut sqlx::PgConnection,
|
|
||||||
) -> Result<(), sqlx::Error> {
|
|
||||||
sqlx::query("SELECT ensure_activity_partition(NOW())")
|
|
||||||
.execute(&mut *executor)
|
|
||||||
.await?;
|
|
||||||
sqlx::query("SELECT ensure_activity_partition(NOW() + INTERVAL '24 hours')")
|
|
||||||
.execute(&mut *executor)
|
|
||||||
.await?;
|
|
||||||
sqlx::query("SELECT ensure_activity_partition(NOW() + INTERVAL '48 hours')")
|
|
||||||
.execute(&mut *executor)
|
|
||||||
.await?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn is_partition_missing_error(err: &(dyn DatabaseError + Send + Sync + 'static)) -> bool {
|
|
||||||
err.code()
|
|
||||||
.as_deref()
|
|
||||||
.is_some_and(|code| code.starts_with("23"))
|
|
||||||
&& err.message().contains("no partition of relation")
|
|
||||||
}
|
|
||||||
@@ -1,9 +1,6 @@
|
|||||||
pub mod activity;
|
|
||||||
pub mod auth;
|
pub mod auth;
|
||||||
pub mod identity_errors;
|
pub mod identity_errors;
|
||||||
pub mod invitations;
|
pub mod invitations;
|
||||||
pub mod listener;
|
|
||||||
pub mod maintenance;
|
|
||||||
pub mod oauth;
|
pub mod oauth;
|
||||||
pub mod oauth_accounts;
|
pub mod oauth_accounts;
|
||||||
pub mod organization_members;
|
pub mod organization_members;
|
||||||
@@ -12,7 +9,6 @@ pub mod projects;
|
|||||||
pub mod tasks;
|
pub mod tasks;
|
||||||
pub mod users;
|
pub mod users;
|
||||||
|
|
||||||
pub use listener::ActivityListener;
|
|
||||||
use sqlx::{PgPool, Postgres, Transaction, migrate::MigrateError, postgres::PgPoolOptions};
|
use sqlx::{PgPool, Postgres, Transaction, migrate::MigrateError, postgres::PgPoolOptions};
|
||||||
|
|
||||||
pub(crate) type Tx<'a> = Transaction<'a, Postgres>;
|
pub(crate) type Tx<'a> = Transaction<'a, Postgres>;
|
||||||
@@ -27,3 +23,21 @@ pub(crate) async fn create_pool(database_url: &str) -> Result<PgPool, sqlx::Erro
|
|||||||
.connect(database_url)
|
.connect(database_url)
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub(crate) async fn ensure_electric_role_password(
|
||||||
|
pool: &PgPool,
|
||||||
|
password: &str,
|
||||||
|
) -> Result<(), sqlx::Error> {
|
||||||
|
if password.is_empty() {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
// PostgreSQL doesn't support parameter binding for ALTER ROLE PASSWORD
|
||||||
|
// We need to escape the password properly and embed it directly in the SQL
|
||||||
|
let escaped_password = password.replace("'", "''");
|
||||||
|
let sql = format!("ALTER ROLE electric_sync WITH PASSWORD '{escaped_password}'");
|
||||||
|
|
||||||
|
sqlx::query(&sql).execute(pool).await?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|||||||
@@ -2,27 +2,21 @@ use chrono::{DateTime, Utc};
|
|||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use sqlx::PgPool;
|
use sqlx::PgPool;
|
||||||
use thiserror::Error;
|
use thiserror::Error;
|
||||||
|
use ts_rs::TS;
|
||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
|
|
||||||
use super::{
|
use super::{
|
||||||
Tx,
|
|
||||||
identity_errors::IdentityError,
|
identity_errors::IdentityError,
|
||||||
projects::{ProjectError, ProjectRepository},
|
projects::{ProjectError, ProjectRepository},
|
||||||
users::{UserData, fetch_user},
|
users::{UserData, fetch_user},
|
||||||
};
|
};
|
||||||
use crate::db::maintenance;
|
|
||||||
|
|
||||||
pub struct BulkFetchResult {
|
|
||||||
pub tasks: Vec<SharedTaskActivityPayload>,
|
|
||||||
pub deleted_task_ids: Vec<Uuid>,
|
|
||||||
pub latest_seq: Option<i64>,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub const MAX_SHARED_TASK_TEXT_BYTES: usize = 50 * 1024;
|
pub const MAX_SHARED_TASK_TEXT_BYTES: usize = 50 * 1024;
|
||||||
|
|
||||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, sqlx::Type)]
|
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, sqlx::Type, TS)]
|
||||||
#[serde(rename_all = "kebab-case")]
|
#[serde(rename_all = "lowercase")]
|
||||||
#[sqlx(type_name = "task_status", rename_all = "kebab-case")]
|
#[sqlx(type_name = "task_status", rename_all = "lowercase")]
|
||||||
|
#[ts(export)]
|
||||||
pub enum TaskStatus {
|
pub enum TaskStatus {
|
||||||
Todo,
|
Todo,
|
||||||
InProgress,
|
InProgress,
|
||||||
@@ -43,7 +37,8 @@ impl SharedTaskWithUser {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, sqlx::FromRow)]
|
#[derive(Debug, Clone, Serialize, Deserialize, sqlx::FromRow, TS)]
|
||||||
|
#[ts(export)]
|
||||||
pub struct SharedTask {
|
pub struct SharedTask {
|
||||||
pub id: Uuid,
|
pub id: Uuid,
|
||||||
pub organization_id: Uuid,
|
pub organization_id: Uuid,
|
||||||
@@ -54,19 +49,12 @@ pub struct SharedTask {
|
|||||||
pub title: String,
|
pub title: String,
|
||||||
pub description: Option<String>,
|
pub description: Option<String>,
|
||||||
pub status: TaskStatus,
|
pub status: TaskStatus,
|
||||||
pub version: i64,
|
|
||||||
pub deleted_at: Option<DateTime<Utc>>,
|
pub deleted_at: Option<DateTime<Utc>>,
|
||||||
pub shared_at: Option<DateTime<Utc>>,
|
pub shared_at: Option<DateTime<Utc>>,
|
||||||
pub created_at: DateTime<Utc>,
|
pub created_at: DateTime<Utc>,
|
||||||
pub updated_at: DateTime<Utc>,
|
pub updated_at: DateTime<Utc>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
|
||||||
pub struct SharedTaskActivityPayload {
|
|
||||||
pub task: SharedTask,
|
|
||||||
pub user: Option<UserData>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Deserialize)]
|
#[derive(Debug, Clone, Deserialize)]
|
||||||
pub struct CreateSharedTaskData {
|
pub struct CreateSharedTaskData {
|
||||||
pub project_id: Uuid,
|
pub project_id: Uuid,
|
||||||
@@ -81,7 +69,6 @@ pub struct UpdateSharedTaskData {
|
|||||||
pub title: Option<String>,
|
pub title: Option<String>,
|
||||||
pub description: Option<String>,
|
pub description: Option<String>,
|
||||||
pub status: Option<TaskStatus>,
|
pub status: Option<TaskStatus>,
|
||||||
pub version: Option<i64>,
|
|
||||||
pub acting_user_id: Uuid,
|
pub acting_user_id: Uuid,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -89,13 +76,11 @@ pub struct UpdateSharedTaskData {
|
|||||||
pub struct AssignTaskData {
|
pub struct AssignTaskData {
|
||||||
pub new_assignee_user_id: Option<Uuid>,
|
pub new_assignee_user_id: Option<Uuid>,
|
||||||
pub previous_assignee_user_id: Option<Uuid>,
|
pub previous_assignee_user_id: Option<Uuid>,
|
||||||
pub version: Option<i64>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, Deserialize)]
|
#[derive(Debug, Clone, Deserialize)]
|
||||||
pub struct DeleteTaskData {
|
pub struct DeleteTaskData {
|
||||||
pub acting_user_id: Uuid,
|
pub acting_user_id: Uuid,
|
||||||
pub version: Option<i64>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Error)]
|
#[derive(Debug, Error)]
|
||||||
@@ -141,7 +126,6 @@ impl<'a> SharedTaskRepository<'a> {
|
|||||||
title AS "title!",
|
title AS "title!",
|
||||||
description AS "description?",
|
description AS "description?",
|
||||||
status AS "status!: TaskStatus",
|
status AS "status!: TaskStatus",
|
||||||
version AS "version!",
|
|
||||||
deleted_at AS "deleted_at?",
|
deleted_at AS "deleted_at?",
|
||||||
shared_at AS "shared_at?",
|
shared_at AS "shared_at?",
|
||||||
created_at AS "created_at!",
|
created_at AS "created_at!",
|
||||||
@@ -205,7 +189,6 @@ impl<'a> SharedTaskRepository<'a> {
|
|||||||
title AS "title!",
|
title AS "title!",
|
||||||
description AS "description?",
|
description AS "description?",
|
||||||
status AS "status!: TaskStatus",
|
status AS "status!: TaskStatus",
|
||||||
version AS "version!",
|
|
||||||
deleted_at AS "deleted_at?",
|
deleted_at AS "deleted_at?",
|
||||||
shared_at AS "shared_at?",
|
shared_at AS "shared_at?",
|
||||||
created_at AS "created_at!",
|
created_at AS "created_at!",
|
||||||
@@ -226,114 +209,10 @@ impl<'a> SharedTaskRepository<'a> {
|
|||||||
None => None,
|
None => None,
|
||||||
};
|
};
|
||||||
|
|
||||||
insert_activity(&mut tx, &task, user.as_ref(), "task.created").await?;
|
|
||||||
tx.commit().await.map_err(SharedTaskError::from)?;
|
tx.commit().await.map_err(SharedTaskError::from)?;
|
||||||
Ok(SharedTaskWithUser::new(task, user))
|
Ok(SharedTaskWithUser::new(task, user))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn bulk_fetch(&self, project_id: Uuid) -> Result<BulkFetchResult, SharedTaskError> {
|
|
||||||
let mut tx = self.pool.begin().await?;
|
|
||||||
sqlx::query("SET TRANSACTION ISOLATION LEVEL REPEATABLE READ")
|
|
||||||
.execute(&mut *tx)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
let rows = sqlx::query!(
|
|
||||||
r#"
|
|
||||||
SELECT
|
|
||||||
st.id AS "id!: Uuid",
|
|
||||||
st.organization_id AS "organization_id!: Uuid",
|
|
||||||
st.project_id AS "project_id!: Uuid",
|
|
||||||
st.creator_user_id AS "creator_user_id?: Uuid",
|
|
||||||
st.assignee_user_id AS "assignee_user_id?: Uuid",
|
|
||||||
st.deleted_by_user_id AS "deleted_by_user_id?: Uuid",
|
|
||||||
st.title AS "title!",
|
|
||||||
st.description AS "description?",
|
|
||||||
st.status AS "status!: TaskStatus",
|
|
||||||
st.version AS "version!",
|
|
||||||
st.deleted_at AS "deleted_at?",
|
|
||||||
st.shared_at AS "shared_at?",
|
|
||||||
st.created_at AS "created_at!",
|
|
||||||
st.updated_at AS "updated_at!",
|
|
||||||
u.id AS "user_id?: Uuid",
|
|
||||||
u.first_name AS "user_first_name?",
|
|
||||||
u.last_name AS "user_last_name?",
|
|
||||||
u.username AS "user_username?"
|
|
||||||
FROM shared_tasks st
|
|
||||||
LEFT JOIN users u ON st.assignee_user_id = u.id
|
|
||||||
WHERE st.project_id = $1
|
|
||||||
AND st.deleted_at IS NULL
|
|
||||||
ORDER BY st.updated_at DESC
|
|
||||||
"#,
|
|
||||||
project_id
|
|
||||||
)
|
|
||||||
.fetch_all(&mut *tx)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
let tasks = rows
|
|
||||||
.into_iter()
|
|
||||||
.map(|row| {
|
|
||||||
let task = SharedTask {
|
|
||||||
id: row.id,
|
|
||||||
organization_id: row.organization_id,
|
|
||||||
project_id: row.project_id,
|
|
||||||
creator_user_id: row.creator_user_id,
|
|
||||||
assignee_user_id: row.assignee_user_id,
|
|
||||||
deleted_by_user_id: row.deleted_by_user_id,
|
|
||||||
title: row.title,
|
|
||||||
description: row.description,
|
|
||||||
status: row.status,
|
|
||||||
version: row.version,
|
|
||||||
deleted_at: row.deleted_at,
|
|
||||||
shared_at: row.shared_at,
|
|
||||||
created_at: row.created_at,
|
|
||||||
updated_at: row.updated_at,
|
|
||||||
};
|
|
||||||
|
|
||||||
let user = row.user_id.map(|id| UserData {
|
|
||||||
id,
|
|
||||||
first_name: row.user_first_name,
|
|
||||||
last_name: row.user_last_name,
|
|
||||||
username: row.user_username,
|
|
||||||
});
|
|
||||||
|
|
||||||
SharedTaskActivityPayload { task, user }
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
let deleted_rows = sqlx::query!(
|
|
||||||
r#"
|
|
||||||
SELECT st.id AS "id!: Uuid"
|
|
||||||
FROM shared_tasks st
|
|
||||||
WHERE st.project_id = $1
|
|
||||||
AND st.deleted_at IS NOT NULL
|
|
||||||
"#,
|
|
||||||
project_id
|
|
||||||
)
|
|
||||||
.fetch_all(&mut *tx)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
let deleted_task_ids = deleted_rows.into_iter().map(|row| row.id).collect();
|
|
||||||
|
|
||||||
let latest_seq = sqlx::query_scalar!(
|
|
||||||
r#"
|
|
||||||
SELECT MAX(seq)
|
|
||||||
FROM activity
|
|
||||||
WHERE project_id = $1
|
|
||||||
"#,
|
|
||||||
project_id
|
|
||||||
)
|
|
||||||
.fetch_one(&mut *tx)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
tx.commit().await?;
|
|
||||||
|
|
||||||
Ok(BulkFetchResult {
|
|
||||||
tasks,
|
|
||||||
deleted_task_ids,
|
|
||||||
latest_seq,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn update(
|
pub async fn update(
|
||||||
&self,
|
&self,
|
||||||
task_id: Uuid,
|
task_id: Uuid,
|
||||||
@@ -348,11 +227,9 @@ impl<'a> SharedTaskRepository<'a> {
|
|||||||
SET title = COALESCE($2, t.title),
|
SET title = COALESCE($2, t.title),
|
||||||
description = COALESCE($3, t.description),
|
description = COALESCE($3, t.description),
|
||||||
status = COALESCE($4, t.status),
|
status = COALESCE($4, t.status),
|
||||||
version = t.version + 1,
|
|
||||||
updated_at = NOW()
|
updated_at = NOW()
|
||||||
WHERE t.id = $1
|
WHERE t.id = $1
|
||||||
AND t.version = COALESCE($5, t.version)
|
AND t.assignee_user_id = $5
|
||||||
AND t.assignee_user_id = $6
|
|
||||||
AND t.deleted_at IS NULL
|
AND t.deleted_at IS NULL
|
||||||
RETURNING
|
RETURNING
|
||||||
t.id AS "id!",
|
t.id AS "id!",
|
||||||
@@ -364,7 +241,6 @@ impl<'a> SharedTaskRepository<'a> {
|
|||||||
t.title AS "title!",
|
t.title AS "title!",
|
||||||
t.description AS "description?",
|
t.description AS "description?",
|
||||||
t.status AS "status!: TaskStatus",
|
t.status AS "status!: TaskStatus",
|
||||||
t.version AS "version!",
|
|
||||||
t.deleted_at AS "deleted_at?",
|
t.deleted_at AS "deleted_at?",
|
||||||
t.shared_at AS "shared_at?",
|
t.shared_at AS "shared_at?",
|
||||||
t.created_at AS "created_at!",
|
t.created_at AS "created_at!",
|
||||||
@@ -374,12 +250,11 @@ impl<'a> SharedTaskRepository<'a> {
|
|||||||
data.title,
|
data.title,
|
||||||
data.description,
|
data.description,
|
||||||
data.status as Option<TaskStatus>,
|
data.status as Option<TaskStatus>,
|
||||||
data.version,
|
|
||||||
data.acting_user_id
|
data.acting_user_id
|
||||||
)
|
)
|
||||||
.fetch_optional(&mut *tx)
|
.fetch_optional(&mut *tx)
|
||||||
.await?
|
.await?
|
||||||
.ok_or_else(|| SharedTaskError::Conflict("task version mismatch".to_string()))?;
|
.ok_or_else(|| SharedTaskError::NotFound)?;
|
||||||
|
|
||||||
ensure_text_size(&task.title, task.description.as_deref())?;
|
ensure_text_size(&task.title, task.description.as_deref())?;
|
||||||
|
|
||||||
@@ -388,7 +263,6 @@ impl<'a> SharedTaskRepository<'a> {
|
|||||||
None => None,
|
None => None,
|
||||||
};
|
};
|
||||||
|
|
||||||
insert_activity(&mut tx, &task, user.as_ref(), "task.updated").await?;
|
|
||||||
tx.commit().await.map_err(SharedTaskError::from)?;
|
tx.commit().await.map_err(SharedTaskError::from)?;
|
||||||
Ok(SharedTaskWithUser::new(task, user))
|
Ok(SharedTaskWithUser::new(task, user))
|
||||||
}
|
}
|
||||||
@@ -404,10 +278,8 @@ impl<'a> SharedTaskRepository<'a> {
|
|||||||
SharedTask,
|
SharedTask,
|
||||||
r#"
|
r#"
|
||||||
UPDATE shared_tasks AS t
|
UPDATE shared_tasks AS t
|
||||||
SET assignee_user_id = $2,
|
SET assignee_user_id = $2
|
||||||
version = t.version + 1
|
|
||||||
WHERE t.id = $1
|
WHERE t.id = $1
|
||||||
AND t.version = COALESCE($4, t.version)
|
|
||||||
AND ($3::uuid IS NULL OR t.assignee_user_id = $3::uuid)
|
AND ($3::uuid IS NULL OR t.assignee_user_id = $3::uuid)
|
||||||
AND t.deleted_at IS NULL
|
AND t.deleted_at IS NULL
|
||||||
RETURNING
|
RETURNING
|
||||||
@@ -420,7 +292,6 @@ impl<'a> SharedTaskRepository<'a> {
|
|||||||
t.title AS "title!",
|
t.title AS "title!",
|
||||||
t.description AS "description?",
|
t.description AS "description?",
|
||||||
t.status AS "status!: TaskStatus",
|
t.status AS "status!: TaskStatus",
|
||||||
t.version AS "version!",
|
|
||||||
t.deleted_at AS "deleted_at?",
|
t.deleted_at AS "deleted_at?",
|
||||||
t.shared_at AS "shared_at?",
|
t.shared_at AS "shared_at?",
|
||||||
t.created_at AS "created_at!",
|
t.created_at AS "created_at!",
|
||||||
@@ -428,21 +299,17 @@ impl<'a> SharedTaskRepository<'a> {
|
|||||||
"#,
|
"#,
|
||||||
task_id,
|
task_id,
|
||||||
data.new_assignee_user_id,
|
data.new_assignee_user_id,
|
||||||
data.previous_assignee_user_id,
|
data.previous_assignee_user_id
|
||||||
data.version
|
|
||||||
)
|
)
|
||||||
.fetch_optional(&mut *tx)
|
.fetch_optional(&mut *tx)
|
||||||
.await?
|
.await?
|
||||||
.ok_or_else(|| {
|
.ok_or_else(|| SharedTaskError::Conflict("previous assignee mismatch".to_string()))?;
|
||||||
SharedTaskError::Conflict("task version or previous assignee mismatch".to_string())
|
|
||||||
})?;
|
|
||||||
|
|
||||||
let user = match data.new_assignee_user_id {
|
let user = match data.new_assignee_user_id {
|
||||||
Some(user_id) => fetch_user(&mut tx, user_id).await?,
|
Some(user_id) => fetch_user(&mut tx, user_id).await?,
|
||||||
None => None,
|
None => None,
|
||||||
};
|
};
|
||||||
|
|
||||||
insert_activity(&mut tx, &task, user.as_ref(), "task.reassigned").await?;
|
|
||||||
tx.commit().await.map_err(SharedTaskError::from)?;
|
tx.commit().await.map_err(SharedTaskError::from)?;
|
||||||
Ok(SharedTaskWithUser::new(task, user))
|
Ok(SharedTaskWithUser::new(task, user))
|
||||||
}
|
}
|
||||||
@@ -459,11 +326,9 @@ impl<'a> SharedTaskRepository<'a> {
|
|||||||
r#"
|
r#"
|
||||||
UPDATE shared_tasks AS t
|
UPDATE shared_tasks AS t
|
||||||
SET deleted_at = NOW(),
|
SET deleted_at = NOW(),
|
||||||
deleted_by_user_id = $3,
|
deleted_by_user_id = $2
|
||||||
version = t.version + 1
|
|
||||||
WHERE t.id = $1
|
WHERE t.id = $1
|
||||||
AND t.version = COALESCE($2, t.version)
|
AND t.assignee_user_id = $2
|
||||||
AND t.assignee_user_id = $3
|
|
||||||
AND t.deleted_at IS NULL
|
AND t.deleted_at IS NULL
|
||||||
RETURNING
|
RETURNING
|
||||||
t.id AS "id!",
|
t.id AS "id!",
|
||||||
@@ -475,26 +340,44 @@ impl<'a> SharedTaskRepository<'a> {
|
|||||||
t.title AS "title!",
|
t.title AS "title!",
|
||||||
t.description AS "description?",
|
t.description AS "description?",
|
||||||
t.status AS "status!: TaskStatus",
|
t.status AS "status!: TaskStatus",
|
||||||
t.version AS "version!",
|
|
||||||
t.deleted_at AS "deleted_at?",
|
t.deleted_at AS "deleted_at?",
|
||||||
t.shared_at AS "shared_at?",
|
t.shared_at AS "shared_at?",
|
||||||
t.created_at AS "created_at!",
|
t.created_at AS "created_at!",
|
||||||
t.updated_at AS "updated_at!"
|
t.updated_at AS "updated_at!"
|
||||||
"#,
|
"#,
|
||||||
task_id,
|
task_id,
|
||||||
data.version,
|
|
||||||
data.acting_user_id
|
data.acting_user_id
|
||||||
)
|
)
|
||||||
.fetch_optional(&mut *tx)
|
.fetch_optional(&mut *tx)
|
||||||
.await?
|
.await?
|
||||||
.ok_or_else(|| {
|
.ok_or_else(|| SharedTaskError::Conflict("user not authorized".to_string()))?;
|
||||||
SharedTaskError::Conflict("task version mismatch or user not authorized".to_string())
|
|
||||||
})?;
|
|
||||||
|
|
||||||
insert_activity(&mut tx, &task, None, "task.deleted").await?;
|
|
||||||
tx.commit().await.map_err(SharedTaskError::from)?;
|
tx.commit().await.map_err(SharedTaskError::from)?;
|
||||||
Ok(SharedTaskWithUser::new(task, None))
|
Ok(SharedTaskWithUser::new(task, None))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn check_existence(
|
||||||
|
&self,
|
||||||
|
task_ids: &[Uuid],
|
||||||
|
user_id: Uuid,
|
||||||
|
) -> Result<Vec<Uuid>, SharedTaskError> {
|
||||||
|
let tasks = sqlx::query!(
|
||||||
|
r#"
|
||||||
|
SELECT t.id
|
||||||
|
FROM shared_tasks t
|
||||||
|
INNER JOIN organization_member_metadata om ON t.organization_id = om.organization_id
|
||||||
|
WHERE t.id = ANY($1)
|
||||||
|
AND t.deleted_at IS NULL
|
||||||
|
AND om.user_id = $2
|
||||||
|
"#,
|
||||||
|
task_ids,
|
||||||
|
user_id
|
||||||
|
)
|
||||||
|
.fetch_all(self.pool)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
Ok(tasks.into_iter().map(|r| r.id).collect())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn ensure_text_size(
|
pub(crate) fn ensure_text_size(
|
||||||
@@ -510,81 +393,6 @@ pub(crate) fn ensure_text_size(
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn insert_activity(
|
|
||||||
tx: &mut Tx<'_>,
|
|
||||||
task: &SharedTask,
|
|
||||||
user: Option<&UserData>,
|
|
||||||
event_type: &str,
|
|
||||||
) -> Result<(), SharedTaskError> {
|
|
||||||
let payload = SharedTaskActivityPayload {
|
|
||||||
task: task.clone(),
|
|
||||||
user: user.cloned(),
|
|
||||||
};
|
|
||||||
let payload = serde_json::to_value(payload).map_err(SharedTaskError::Serialization)?;
|
|
||||||
|
|
||||||
// First attempt at inserting - if partitions are missing we retry after provisioning.
|
|
||||||
match do_insert_activity(tx, task, event_type, payload.clone()).await {
|
|
||||||
Ok(_) => Ok(()),
|
|
||||||
Err(err) => {
|
|
||||||
if let sqlx::Error::Database(db_err) = &err
|
|
||||||
&& maintenance::is_partition_missing_error(db_err.as_ref())
|
|
||||||
{
|
|
||||||
let code_owned = db_err.code().map(|c| c.to_string());
|
|
||||||
let code = code_owned.as_deref().unwrap_or_default();
|
|
||||||
tracing::warn!(
|
|
||||||
"Activity partition missing ({}), creating current and next partitions",
|
|
||||||
code
|
|
||||||
);
|
|
||||||
|
|
||||||
maintenance::ensure_future_partitions(tx.as_mut())
|
|
||||||
.await
|
|
||||||
.map_err(SharedTaskError::from)?;
|
|
||||||
|
|
||||||
return do_insert_activity(tx, task, event_type, payload)
|
|
||||||
.await
|
|
||||||
.map_err(SharedTaskError::from);
|
|
||||||
}
|
|
||||||
|
|
||||||
Err(SharedTaskError::from(err))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn do_insert_activity(
|
|
||||||
tx: &mut Tx<'_>,
|
|
||||||
task: &SharedTask,
|
|
||||||
event_type: &str,
|
|
||||||
payload: serde_json::Value,
|
|
||||||
) -> Result<(), sqlx::Error> {
|
|
||||||
sqlx::query!(
|
|
||||||
r#"
|
|
||||||
WITH next AS (
|
|
||||||
INSERT INTO project_activity_counters AS counters (project_id, last_seq)
|
|
||||||
VALUES ($1, 1)
|
|
||||||
ON CONFLICT (project_id)
|
|
||||||
DO UPDATE SET last_seq = counters.last_seq + 1
|
|
||||||
RETURNING last_seq
|
|
||||||
)
|
|
||||||
INSERT INTO activity (
|
|
||||||
project_id,
|
|
||||||
seq,
|
|
||||||
assignee_user_id,
|
|
||||||
event_type,
|
|
||||||
payload
|
|
||||||
)
|
|
||||||
SELECT $1, next.last_seq, $2, $3, $4
|
|
||||||
FROM next
|
|
||||||
"#,
|
|
||||||
task.project_id,
|
|
||||||
task.assignee_user_id,
|
|
||||||
event_type,
|
|
||||||
payload
|
|
||||||
)
|
|
||||||
.execute(&mut **tx)
|
|
||||||
.await
|
|
||||||
.map(|_| ())
|
|
||||||
}
|
|
||||||
|
|
||||||
impl SharedTaskRepository<'_> {
|
impl SharedTaskRepository<'_> {
|
||||||
pub async fn organization_id(
|
pub async fn organization_id(
|
||||||
pool: &PgPool,
|
pool: &PgPool,
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
use chrono::{DateTime, Utc};
|
use chrono::{DateTime, Utc};
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use sqlx::{PgPool, query_as};
|
use sqlx::{PgPool, query_as};
|
||||||
|
use ts_rs::TS;
|
||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
|
|
||||||
use super::{Tx, identity_errors::IdentityError};
|
use super::{Tx, identity_errors::IdentityError};
|
||||||
@@ -16,9 +17,10 @@ pub struct User {
|
|||||||
pub updated_at: DateTime<Utc>,
|
pub updated_at: DateTime<Utc>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
#[derive(Debug, Clone, Serialize, Deserialize, sqlx::FromRow, TS)]
|
||||||
|
#[ts(export)]
|
||||||
pub struct UserData {
|
pub struct UserData {
|
||||||
pub id: Uuid,
|
pub user_id: Uuid,
|
||||||
pub first_name: Option<String>,
|
pub first_name: Option<String>,
|
||||||
pub last_name: Option<String>,
|
pub last_name: Option<String>,
|
||||||
pub username: Option<String>,
|
pub username: Option<String>,
|
||||||
@@ -91,6 +93,34 @@ impl<'a> UserRepository<'a> {
|
|||||||
.await
|
.await
|
||||||
.map_err(IdentityError::from)
|
.map_err(IdentityError::from)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Fetch all assignees for a given project id.
|
||||||
|
/// Returns Vec<UserData> containing all unique users assigned to tasks in the project.
|
||||||
|
pub async fn fetch_assignees_by_project(
|
||||||
|
&self,
|
||||||
|
project_id: Uuid,
|
||||||
|
) -> Result<Vec<UserData>, IdentityError> {
|
||||||
|
let rows = sqlx::query_as!(
|
||||||
|
UserData,
|
||||||
|
r#"
|
||||||
|
SELECT DISTINCT
|
||||||
|
u.id as "user_id",
|
||||||
|
u.first_name as "first_name",
|
||||||
|
u.last_name as "last_name",
|
||||||
|
u.username as "username"
|
||||||
|
FROM shared_tasks st
|
||||||
|
INNER JOIN users u ON u.id = st.assignee_user_id
|
||||||
|
WHERE st.project_id = $1
|
||||||
|
AND st.assignee_user_id IS NOT NULL
|
||||||
|
"#,
|
||||||
|
project_id
|
||||||
|
)
|
||||||
|
.fetch_all(self.pool)
|
||||||
|
.await
|
||||||
|
.map_err(IdentityError::from)?;
|
||||||
|
|
||||||
|
Ok(rows)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn upsert_user(pool: &PgPool, user: &UpsertUser<'_>) -> Result<User, sqlx::Error> {
|
async fn upsert_user(pool: &PgPool, user: &UpsertUser<'_>) -> Result<User, sqlx::Error> {
|
||||||
@@ -141,7 +171,7 @@ pub async fn fetch_user(tx: &mut Tx<'_>, user_id: Uuid) -> Result<Option<UserDat
|
|||||||
.map_err(IdentityError::from)
|
.map_err(IdentityError::from)
|
||||||
.map(|row_opt| {
|
.map(|row_opt| {
|
||||||
row_opt.map(|row| UserData {
|
row_opt.map(|row| UserData {
|
||||||
id: row.id,
|
user_id: row.id,
|
||||||
first_name: row.first_name,
|
first_name: row.first_name,
|
||||||
last_name: row.last_name,
|
last_name: row.last_name,
|
||||||
username: row.username,
|
username: row.username,
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
pub mod activity;
|
|
||||||
mod app;
|
mod app;
|
||||||
mod auth;
|
mod auth;
|
||||||
pub mod config;
|
pub mod config;
|
||||||
@@ -6,7 +5,7 @@ pub mod db;
|
|||||||
pub mod mail;
|
pub mod mail;
|
||||||
pub mod routes;
|
pub mod routes;
|
||||||
mod state;
|
mod state;
|
||||||
pub mod ws;
|
pub mod validated_where;
|
||||||
|
|
||||||
use std::{env, sync::OnceLock};
|
use std::{env, sync::OnceLock};
|
||||||
|
|
||||||
@@ -20,7 +19,6 @@ use tracing_subscriber::{
|
|||||||
layer::{Layer as _, SubscriberExt},
|
layer::{Layer as _, SubscriberExt},
|
||||||
util::SubscriberInitExt,
|
util::SubscriberInitExt,
|
||||||
};
|
};
|
||||||
pub use ws::message::{ClientMessage, ServerMessage};
|
|
||||||
|
|
||||||
static INIT_GUARD: OnceLock<sentry::ClientInitGuard> = OnceLock::new();
|
static INIT_GUARD: OnceLock<sentry::ClientInitGuard> = OnceLock::new();
|
||||||
|
|
||||||
|
|||||||
@@ -1,67 +0,0 @@
|
|||||||
use axum::{
|
|
||||||
Json, Router,
|
|
||||||
extract::{Extension, Query, State},
|
|
||||||
http::StatusCode,
|
|
||||||
response::{IntoResponse, Response},
|
|
||||||
routing::get,
|
|
||||||
};
|
|
||||||
use serde::Deserialize;
|
|
||||||
use tracing::instrument;
|
|
||||||
use uuid::Uuid;
|
|
||||||
|
|
||||||
use super::{error::ErrorResponse, organization_members::ensure_project_access};
|
|
||||||
use crate::{
|
|
||||||
AppState, activity::ActivityResponse, auth::RequestContext, db::activity::ActivityRepository,
|
|
||||||
};
|
|
||||||
|
|
||||||
pub fn router() -> Router<AppState> {
|
|
||||||
Router::new().route("/activity", get(get_activity_stream))
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Deserialize)]
|
|
||||||
pub struct ActivityQuery {
|
|
||||||
/// Remote project to stream activity for
|
|
||||||
pub project_id: Uuid,
|
|
||||||
/// Fetch events after this ID (exclusive)
|
|
||||||
pub after: Option<i64>,
|
|
||||||
/// Maximum number of events to return
|
|
||||||
pub limit: Option<i64>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(
|
|
||||||
name = "activity.get_activity_stream",
|
|
||||||
skip(state, ctx, params),
|
|
||||||
fields(user_id = %ctx.user.id, project_id = %params.project_id)
|
|
||||||
)]
|
|
||||||
async fn get_activity_stream(
|
|
||||||
State(state): State<AppState>,
|
|
||||||
Extension(ctx): Extension<RequestContext>,
|
|
||||||
Query(params): Query<ActivityQuery>,
|
|
||||||
) -> Response {
|
|
||||||
let config = state.config();
|
|
||||||
let limit = params
|
|
||||||
.limit
|
|
||||||
.unwrap_or(config.activity_default_limit)
|
|
||||||
.clamp(1, config.activity_max_limit);
|
|
||||||
let after = params.after;
|
|
||||||
let project_id = params.project_id;
|
|
||||||
|
|
||||||
let _organization_id = match ensure_project_access(state.pool(), ctx.user.id, project_id).await
|
|
||||||
{
|
|
||||||
Ok(org_id) => org_id,
|
|
||||||
Err(error) => return error.into_response(),
|
|
||||||
};
|
|
||||||
|
|
||||||
let repo = ActivityRepository::new(state.pool());
|
|
||||||
match repo.fetch_since(project_id, after, limit).await {
|
|
||||||
Ok(events) => (StatusCode::OK, Json(ActivityResponse { data: events })).into_response(),
|
|
||||||
Err(error) => {
|
|
||||||
tracing::error!(?error, "failed to load activity stream");
|
|
||||||
ErrorResponse::new(
|
|
||||||
StatusCode::INTERNAL_SERVER_ERROR,
|
|
||||||
"failed to load activity stream",
|
|
||||||
)
|
|
||||||
.into_response()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
182
crates/remote/src/routes/electric_proxy.rs
Normal file
182
crates/remote/src/routes/electric_proxy.rs
Normal file
@@ -0,0 +1,182 @@
|
|||||||
|
use std::collections::HashMap;
|
||||||
|
|
||||||
|
use axum::{
|
||||||
|
Router,
|
||||||
|
body::Body,
|
||||||
|
extract::{Query, State},
|
||||||
|
http::{HeaderMap, HeaderValue, StatusCode, header},
|
||||||
|
response::{IntoResponse, Response},
|
||||||
|
routing::get,
|
||||||
|
};
|
||||||
|
use futures::TryStreamExt;
|
||||||
|
use secrecy::ExposeSecret;
|
||||||
|
use tracing::error;
|
||||||
|
use uuid::Uuid;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
AppState, auth::RequestContext, db::organizations::OrganizationRepository, validated_where,
|
||||||
|
validated_where::ValidatedWhere,
|
||||||
|
};
|
||||||
|
|
||||||
|
pub fn router() -> Router<AppState> {
|
||||||
|
Router::new().route("/shape/shared_tasks", get(proxy_shared_tasks))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Electric protocol query parameters that are safe to forward.
|
||||||
|
/// Based on https://electric-sql.com/docs/guides/auth#proxy-auth
|
||||||
|
/// Note: "where" is NOT included because it's controlled server-side for security.
|
||||||
|
const ELECTRIC_PARAMS: &[&str] = &["offset", "handle", "live", "cursor", "columns"];
|
||||||
|
|
||||||
|
/// Returns an empty shape response for users with no organization memberships.
|
||||||
|
fn empty_shape_response() -> Response {
|
||||||
|
let mut headers = HeaderMap::new();
|
||||||
|
headers.insert(
|
||||||
|
header::CONTENT_TYPE,
|
||||||
|
HeaderValue::from_static("application/json"),
|
||||||
|
);
|
||||||
|
(StatusCode::OK, headers, "[]").into_response()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Proxy Shape requests for the `shared_tasks` table.
|
||||||
|
///
|
||||||
|
/// Route: GET /v1/shape/shared_tasks?offset=-1
|
||||||
|
///
|
||||||
|
/// The `require_session` middleware has already validated the Bearer token
|
||||||
|
/// before this handler is called.
|
||||||
|
pub async fn proxy_shared_tasks(
|
||||||
|
State(state): State<AppState>,
|
||||||
|
axum::extract::Extension(ctx): axum::extract::Extension<RequestContext>,
|
||||||
|
Query(params): Query<HashMap<String, String>>,
|
||||||
|
) -> Result<Response, ProxyError> {
|
||||||
|
// Get user's organization memberships
|
||||||
|
let org_repo = OrganizationRepository::new(state.pool());
|
||||||
|
let orgs = org_repo
|
||||||
|
.list_user_organizations(ctx.user.id)
|
||||||
|
.await
|
||||||
|
.map_err(|e| ProxyError::Authorization(format!("failed to fetch organizations: {e}")))?;
|
||||||
|
|
||||||
|
if orgs.is_empty() {
|
||||||
|
// User has no org memberships - return empty result
|
||||||
|
return Ok(empty_shape_response());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build org_id filter using compile-time validated WHERE clause
|
||||||
|
let org_uuids: Vec<Uuid> = orgs.iter().map(|o| o.id).collect();
|
||||||
|
let query = validated_where!("shared_tasks", r#""organization_id" = ANY($1)"#, &org_uuids);
|
||||||
|
let query_params = &[format!(
|
||||||
|
"{{{}}}",
|
||||||
|
org_uuids
|
||||||
|
.iter()
|
||||||
|
.map(|u| u.to_string())
|
||||||
|
.collect::<Vec<_>>()
|
||||||
|
.join(",")
|
||||||
|
)];
|
||||||
|
tracing::debug!("Proxying Electric Shape request for shared_tasks table{query:?}");
|
||||||
|
proxy_table(&state, &query, ¶ms, query_params).await
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Proxy a Shape request to Electric for a specific table.
|
||||||
|
///
|
||||||
|
/// The table and where clause are set server-side (not from client params)
|
||||||
|
/// to prevent unauthorized access to other tables or data.
|
||||||
|
async fn proxy_table(
|
||||||
|
state: &AppState,
|
||||||
|
query: &ValidatedWhere,
|
||||||
|
client_params: &HashMap<String, String>,
|
||||||
|
electric_params: &[String],
|
||||||
|
) -> Result<Response, ProxyError> {
|
||||||
|
// Build the Electric URL
|
||||||
|
let mut origin_url = url::Url::parse(&state.config.electric_url)
|
||||||
|
.map_err(|e| ProxyError::InvalidConfig(format!("invalid electric_url: {e}")))?;
|
||||||
|
|
||||||
|
origin_url.set_path("/v1/shape");
|
||||||
|
|
||||||
|
// Set table server-side (security: client can't override)
|
||||||
|
origin_url
|
||||||
|
.query_pairs_mut()
|
||||||
|
.append_pair("table", query.table);
|
||||||
|
|
||||||
|
// Set WHERE clause with parameterized values
|
||||||
|
origin_url
|
||||||
|
.query_pairs_mut()
|
||||||
|
.append_pair("where", query.where_clause);
|
||||||
|
|
||||||
|
// Pass params for $1, $2, etc. placeholders
|
||||||
|
for (i, param) in electric_params.iter().enumerate() {
|
||||||
|
origin_url
|
||||||
|
.query_pairs_mut()
|
||||||
|
.append_pair(&format!("params[{}]", i + 1), param);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Forward safe client params
|
||||||
|
for (key, value) in client_params {
|
||||||
|
if ELECTRIC_PARAMS.contains(&key.as_str()) {
|
||||||
|
origin_url.query_pairs_mut().append_pair(key, value);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(secret) = &state.config.electric_secret {
|
||||||
|
origin_url
|
||||||
|
.query_pairs_mut()
|
||||||
|
.append_pair("secret", secret.expose_secret());
|
||||||
|
}
|
||||||
|
|
||||||
|
let response = state
|
||||||
|
.http_client
|
||||||
|
.get(origin_url.as_str())
|
||||||
|
.send()
|
||||||
|
.await
|
||||||
|
.map_err(ProxyError::Connection)?;
|
||||||
|
|
||||||
|
let status = response.status();
|
||||||
|
|
||||||
|
let mut headers = HeaderMap::new();
|
||||||
|
|
||||||
|
// Copy headers from Electric response, but remove problematic ones
|
||||||
|
for (key, value) in response.headers() {
|
||||||
|
// Skip headers that interfere with browser handling
|
||||||
|
if key == header::CONTENT_ENCODING || key == header::CONTENT_LENGTH {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
headers.insert(key.clone(), value.clone());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add Vary header for proper caching with auth
|
||||||
|
headers.insert(header::VARY, HeaderValue::from_static("Authorization"));
|
||||||
|
|
||||||
|
// Stream the response body directly without buffering
|
||||||
|
let body_stream = response.bytes_stream().map_err(std::io::Error::other);
|
||||||
|
let body = Body::from_stream(body_stream);
|
||||||
|
|
||||||
|
Ok((status, headers, body).into_response())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub enum ProxyError {
|
||||||
|
Connection(reqwest::Error),
|
||||||
|
InvalidConfig(String),
|
||||||
|
Authorization(String),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl IntoResponse for ProxyError {
|
||||||
|
fn into_response(self) -> Response {
|
||||||
|
match self {
|
||||||
|
ProxyError::Connection(err) => {
|
||||||
|
error!(?err, "failed to connect to Electric service");
|
||||||
|
(
|
||||||
|
StatusCode::BAD_GATEWAY,
|
||||||
|
"failed to connect to Electric service",
|
||||||
|
)
|
||||||
|
.into_response()
|
||||||
|
}
|
||||||
|
ProxyError::InvalidConfig(msg) => {
|
||||||
|
error!(%msg, "invalid Electric proxy configuration");
|
||||||
|
(StatusCode::INTERNAL_SERVER_ERROR, "internal server error").into_response()
|
||||||
|
}
|
||||||
|
ProxyError::Authorization(msg) => {
|
||||||
|
error!(%msg, "authorization failed for Electric proxy");
|
||||||
|
(StatusCode::FORBIDDEN, "forbidden").into_response()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -5,7 +5,7 @@ use axum::{
|
|||||||
routing::get,
|
routing::get,
|
||||||
};
|
};
|
||||||
use tower_http::{
|
use tower_http::{
|
||||||
cors::CorsLayer,
|
cors::{AllowHeaders, AllowMethods, AllowOrigin, CorsLayer},
|
||||||
request_id::{MakeRequestUuid, PropagateRequestIdLayer, RequestId, SetRequestIdLayer},
|
request_id::{MakeRequestUuid, PropagateRequestIdLayer, RequestId, SetRequestIdLayer},
|
||||||
services::{ServeDir, ServeFile},
|
services::{ServeDir, ServeFile},
|
||||||
trace::{DefaultOnFailure, DefaultOnResponse, TraceLayer},
|
trace::{DefaultOnFailure, DefaultOnResponse, TraceLayer},
|
||||||
@@ -14,7 +14,7 @@ use tracing::{Level, field};
|
|||||||
|
|
||||||
use crate::{AppState, auth::require_session};
|
use crate::{AppState, auth::require_session};
|
||||||
|
|
||||||
pub mod activity;
|
mod electric_proxy;
|
||||||
mod error;
|
mod error;
|
||||||
mod identity;
|
mod identity;
|
||||||
mod oauth;
|
mod oauth;
|
||||||
@@ -53,13 +53,12 @@ pub fn router(state: AppState) -> Router {
|
|||||||
|
|
||||||
let v1_protected = Router::<AppState>::new()
|
let v1_protected = Router::<AppState>::new()
|
||||||
.merge(identity::router())
|
.merge(identity::router())
|
||||||
.merge(activity::router())
|
|
||||||
.merge(projects::router())
|
.merge(projects::router())
|
||||||
.merge(tasks::router())
|
.merge(tasks::router())
|
||||||
.merge(organizations::router())
|
.merge(organizations::router())
|
||||||
.merge(organization_members::protected_router())
|
.merge(organization_members::protected_router())
|
||||||
.merge(oauth::protected_router())
|
.merge(oauth::protected_router())
|
||||||
.merge(crate::ws::router())
|
.merge(electric_proxy::router())
|
||||||
.layer(middleware::from_fn_with_state(
|
.layer(middleware::from_fn_with_state(
|
||||||
state.clone(),
|
state.clone(),
|
||||||
require_session,
|
require_session,
|
||||||
@@ -73,7 +72,13 @@ pub fn router(state: AppState) -> Router {
|
|||||||
.nest("/v1", v1_public)
|
.nest("/v1", v1_public)
|
||||||
.nest("/v1", v1_protected)
|
.nest("/v1", v1_protected)
|
||||||
.fallback_service(spa)
|
.fallback_service(spa)
|
||||||
.layer(CorsLayer::permissive())
|
.layer(
|
||||||
|
CorsLayer::new()
|
||||||
|
.allow_origin(AllowOrigin::mirror_request())
|
||||||
|
.allow_methods(AllowMethods::mirror_request())
|
||||||
|
.allow_headers(AllowHeaders::mirror_request())
|
||||||
|
.allow_credentials(true),
|
||||||
|
)
|
||||||
.layer(trace_layer)
|
.layer(trace_layer)
|
||||||
.layer(PropagateRequestIdLayer::new(HeaderName::from_static(
|
.layer(PropagateRequestIdLayer::new(HeaderName::from_static(
|
||||||
"x-request-id",
|
"x-request-id",
|
||||||
|
|||||||
@@ -8,6 +8,7 @@ use axum::{
|
|||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use serde_json::json;
|
use serde_json::json;
|
||||||
use tracing::{Span, instrument};
|
use tracing::{Span, instrument};
|
||||||
|
use ts_rs::TS;
|
||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
|
|
||||||
use super::{
|
use super::{
|
||||||
@@ -30,60 +31,54 @@ use crate::{
|
|||||||
|
|
||||||
pub fn router() -> Router<AppState> {
|
pub fn router() -> Router<AppState> {
|
||||||
Router::new()
|
Router::new()
|
||||||
.route("/tasks/bulk", get(bulk_shared_tasks))
|
|
||||||
.route("/tasks", post(create_shared_task))
|
.route("/tasks", post(create_shared_task))
|
||||||
|
.route("/tasks/check", post(check_tasks_existence))
|
||||||
.route("/tasks/{task_id}", patch(update_shared_task))
|
.route("/tasks/{task_id}", patch(update_shared_task))
|
||||||
.route("/tasks/{task_id}", delete(delete_shared_task))
|
.route("/tasks/{task_id}", delete(delete_shared_task))
|
||||||
.route("/tasks/{task_id}/assign", post(assign_task))
|
.route("/tasks/{task_id}/assign", post(assign_task))
|
||||||
|
.route("/tasks/assignees", get(get_task_assignees_by_project))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Deserialize)]
|
#[derive(Debug, Deserialize, TS)]
|
||||||
pub struct BulkTasksQuery {
|
#[ts(export)]
|
||||||
|
pub struct AssigneesQuery {
|
||||||
pub project_id: Uuid,
|
pub project_id: Uuid,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[instrument(
|
#[instrument(
|
||||||
name = "tasks.bulk_shared_tasks",
|
name = "tasks.get_task_assignees_by_project",
|
||||||
skip(state, ctx, query),
|
skip(state, ctx, query),
|
||||||
fields(user_id = %ctx.user.id, project_id = %query.project_id, org_id = tracing::field::Empty)
|
fields(user_id = %ctx.user.id, project_id = %query.project_id, org_id = tracing::field::Empty)
|
||||||
)]
|
)]
|
||||||
pub async fn bulk_shared_tasks(
|
pub async fn get_task_assignees_by_project(
|
||||||
State(state): State<AppState>,
|
State(state): State<AppState>,
|
||||||
Extension(ctx): Extension<RequestContext>,
|
Extension(ctx): Extension<RequestContext>,
|
||||||
Query(query): Query<BulkTasksQuery>,
|
Query(query): Query<AssigneesQuery>,
|
||||||
) -> Response {
|
) -> Response {
|
||||||
let pool = state.pool();
|
let pool = state.pool();
|
||||||
let _organization_id = match ensure_project_access(pool, ctx.user.id, query.project_id).await {
|
|
||||||
Ok(org_id) => {
|
let _org_id = match ensure_project_access(pool, ctx.user.id, query.project_id).await {
|
||||||
Span::current().record("org_id", format_args!("{org_id}"));
|
Ok(org) => {
|
||||||
org_id
|
Span::current().record("org_id", format_args!("{org}"));
|
||||||
|
org
|
||||||
}
|
}
|
||||||
Err(error) => return error.into_response(),
|
Err(error) => return error.into_response(),
|
||||||
};
|
};
|
||||||
|
|
||||||
let repo = SharedTaskRepository::new(pool);
|
let user_repo = UserRepository::new(pool);
|
||||||
match repo.bulk_fetch(query.project_id).await {
|
let assignees = match user_repo.fetch_assignees_by_project(query.project_id).await {
|
||||||
Ok(snapshot) => (
|
Ok(names) => names,
|
||||||
StatusCode::OK,
|
Err(e) => {
|
||||||
Json(BulkSharedTasksResponse {
|
tracing::error!(?e, "failed to load assignees");
|
||||||
tasks: snapshot.tasks,
|
return (
|
||||||
deleted_task_ids: snapshot.deleted_task_ids,
|
StatusCode::INTERNAL_SERVER_ERROR,
|
||||||
latest_seq: snapshot.latest_seq,
|
Json(json!({"error": "failed to load assignees"})),
|
||||||
}),
|
)
|
||||||
)
|
.into_response();
|
||||||
.into_response(),
|
}
|
||||||
Err(error) => match error {
|
};
|
||||||
SharedTaskError::Database(err) => {
|
|
||||||
tracing::error!(?err, "failed to load shared task snapshot");
|
(StatusCode::OK, Json(assignees)).into_response()
|
||||||
(
|
|
||||||
StatusCode::INTERNAL_SERVER_ERROR,
|
|
||||||
Json(json!({ "error": "failed to load shared tasks" })),
|
|
||||||
)
|
|
||||||
.into_response()
|
|
||||||
}
|
|
||||||
other => task_error_response(other, "failed to load shared tasks"),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[instrument(
|
#[instrument(
|
||||||
@@ -185,7 +180,6 @@ pub async fn update_shared_task(
|
|||||||
title,
|
title,
|
||||||
description,
|
description,
|
||||||
status,
|
status,
|
||||||
version,
|
|
||||||
} = payload;
|
} = payload;
|
||||||
|
|
||||||
let next_title = title.as_deref().unwrap_or(existing.title.as_str());
|
let next_title = title.as_deref().unwrap_or(existing.title.as_str());
|
||||||
@@ -199,7 +193,6 @@ pub async fn update_shared_task(
|
|||||||
title,
|
title,
|
||||||
description,
|
description,
|
||||||
status,
|
status,
|
||||||
version,
|
|
||||||
acting_user_id: ctx.user.id,
|
acting_user_id: ctx.user.id,
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -263,7 +256,6 @@ pub async fn assign_task(
|
|||||||
let data = AssignTaskData {
|
let data = AssignTaskData {
|
||||||
new_assignee_user_id: payload.new_assignee_user_id,
|
new_assignee_user_id: payload.new_assignee_user_id,
|
||||||
previous_assignee_user_id: Some(ctx.user.id),
|
previous_assignee_user_id: Some(ctx.user.id),
|
||||||
version: payload.version,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
match repo.assign_task(task_id, data).await {
|
match repo.assign_task(task_id, data).await {
|
||||||
@@ -274,14 +266,13 @@ pub async fn assign_task(
|
|||||||
|
|
||||||
#[instrument(
|
#[instrument(
|
||||||
name = "tasks.delete_shared_task",
|
name = "tasks.delete_shared_task",
|
||||||
skip(state, ctx, payload),
|
skip(state, ctx),
|
||||||
fields(user_id = %ctx.user.id, task_id = %task_id, org_id = tracing::field::Empty)
|
fields(user_id = %ctx.user.id, task_id = %task_id, org_id = tracing::field::Empty)
|
||||||
)]
|
)]
|
||||||
pub async fn delete_shared_task(
|
pub async fn delete_shared_task(
|
||||||
State(state): State<AppState>,
|
State(state): State<AppState>,
|
||||||
Extension(ctx): Extension<RequestContext>,
|
Extension(ctx): Extension<RequestContext>,
|
||||||
Path(task_id): Path<Uuid>,
|
Path(task_id): Path<Uuid>,
|
||||||
payload: Option<Json<DeleteSharedTaskRequest>>,
|
|
||||||
) -> Response {
|
) -> Response {
|
||||||
let pool = state.pool();
|
let pool = state.pool();
|
||||||
let _organization_id = match ensure_task_access(pool, ctx.user.id, task_id).await {
|
let _organization_id = match ensure_task_access(pool, ctx.user.id, task_id).await {
|
||||||
@@ -311,11 +302,8 @@ pub async fn delete_shared_task(
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
let version = payload.as_ref().and_then(|body| body.0.version);
|
|
||||||
|
|
||||||
let data = DeleteTaskData {
|
let data = DeleteTaskData {
|
||||||
acting_user_id: ctx.user.id,
|
acting_user_id: ctx.user.id,
|
||||||
version,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
match repo.delete_task(task_id, data).await {
|
match repo.delete_task(task_id, data).await {
|
||||||
@@ -324,11 +312,28 @@ pub async fn delete_shared_task(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[instrument(
|
||||||
|
name = "tasks.check_existence",
|
||||||
|
skip(state, ctx, payload),
|
||||||
|
fields(user_id = %ctx.user.id)
|
||||||
|
)]
|
||||||
|
pub async fn check_tasks_existence(
|
||||||
|
State(state): State<AppState>,
|
||||||
|
Extension(ctx): Extension<RequestContext>,
|
||||||
|
Json(payload): Json<CheckTasksRequest>,
|
||||||
|
) -> Response {
|
||||||
|
let pool = state.pool();
|
||||||
|
let repo = SharedTaskRepository::new(pool);
|
||||||
|
|
||||||
|
match repo.check_existence(&payload.task_ids, ctx.user.id).await {
|
||||||
|
Ok(existing_ids) => (StatusCode::OK, Json(existing_ids)).into_response(),
|
||||||
|
Err(error) => task_error_response(error, "failed to check tasks existence"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
pub struct BulkSharedTasksResponse {
|
pub struct CheckTasksRequest {
|
||||||
pub tasks: Vec<crate::db::tasks::SharedTaskActivityPayload>,
|
pub task_ids: Vec<Uuid>,
|
||||||
pub deleted_task_ids: Vec<Uuid>,
|
|
||||||
pub latest_seq: Option<i64>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
@@ -344,21 +349,15 @@ pub struct UpdateSharedTaskRequest {
|
|||||||
pub title: Option<String>,
|
pub title: Option<String>,
|
||||||
pub description: Option<String>,
|
pub description: Option<String>,
|
||||||
pub status: Option<TaskStatus>,
|
pub status: Option<TaskStatus>,
|
||||||
pub version: Option<i64>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
pub struct AssignSharedTaskRequest {
|
pub struct AssignSharedTaskRequest {
|
||||||
pub new_assignee_user_id: Option<Uuid>,
|
pub new_assignee_user_id: Option<Uuid>,
|
||||||
pub version: Option<i64>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
#[derive(Debug, Clone, Serialize, Deserialize, TS)]
|
||||||
pub struct DeleteSharedTaskRequest {
|
#[ts(export)]
|
||||||
pub version: Option<i64>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
|
||||||
pub struct SharedTaskResponse {
|
pub struct SharedTaskResponse {
|
||||||
pub task: SharedTask,
|
pub task: SharedTask,
|
||||||
pub user: Option<UserData>,
|
pub user: Option<UserData>,
|
||||||
|
|||||||
@@ -3,7 +3,6 @@ use std::sync::Arc;
|
|||||||
use sqlx::PgPool;
|
use sqlx::PgPool;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
activity::ActivityBroker,
|
|
||||||
auth::{JwtService, OAuthHandoffService, OAuthTokenValidator, ProviderRegistry},
|
auth::{JwtService, OAuthHandoffService, OAuthTokenValidator, ProviderRegistry},
|
||||||
config::RemoteServerConfig,
|
config::RemoteServerConfig,
|
||||||
mail::Mailer,
|
mail::Mailer,
|
||||||
@@ -12,34 +11,34 @@ use crate::{
|
|||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct AppState {
|
pub struct AppState {
|
||||||
pub pool: PgPool,
|
pub pool: PgPool,
|
||||||
pub broker: ActivityBroker,
|
|
||||||
pub config: RemoteServerConfig,
|
pub config: RemoteServerConfig,
|
||||||
pub jwt: Arc<JwtService>,
|
pub jwt: Arc<JwtService>,
|
||||||
pub mailer: Arc<dyn Mailer>,
|
pub mailer: Arc<dyn Mailer>,
|
||||||
pub server_public_base_url: String,
|
pub server_public_base_url: String,
|
||||||
pub handoff: Arc<OAuthHandoffService>,
|
pub http_client: reqwest::Client,
|
||||||
pub oauth_token_validator: Arc<OAuthTokenValidator>,
|
handoff: Arc<OAuthHandoffService>,
|
||||||
|
oauth_token_validator: Arc<OAuthTokenValidator>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl AppState {
|
impl AppState {
|
||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub fn new(
|
pub fn new(
|
||||||
pool: PgPool,
|
pool: PgPool,
|
||||||
broker: ActivityBroker,
|
|
||||||
config: RemoteServerConfig,
|
config: RemoteServerConfig,
|
||||||
jwt: Arc<JwtService>,
|
jwt: Arc<JwtService>,
|
||||||
handoff: Arc<OAuthHandoffService>,
|
handoff: Arc<OAuthHandoffService>,
|
||||||
oauth_token_validator: Arc<OAuthTokenValidator>,
|
oauth_token_validator: Arc<OAuthTokenValidator>,
|
||||||
mailer: Arc<dyn Mailer>,
|
mailer: Arc<dyn Mailer>,
|
||||||
server_public_base_url: String,
|
server_public_base_url: String,
|
||||||
|
http_client: reqwest::Client,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
Self {
|
Self {
|
||||||
pool,
|
pool,
|
||||||
broker,
|
|
||||||
config,
|
config,
|
||||||
jwt,
|
jwt,
|
||||||
mailer,
|
mailer,
|
||||||
server_public_base_url,
|
server_public_base_url,
|
||||||
|
http_client,
|
||||||
handoff,
|
handoff,
|
||||||
oauth_token_validator,
|
oauth_token_validator,
|
||||||
}
|
}
|
||||||
@@ -49,10 +48,6 @@ impl AppState {
|
|||||||
&self.pool
|
&self.pool
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn broker(&self) -> &ActivityBroker {
|
|
||||||
&self.broker
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn config(&self) -> &RemoteServerConfig {
|
pub fn config(&self) -> &RemoteServerConfig {
|
||||||
&self.config
|
&self.config
|
||||||
}
|
}
|
||||||
|
|||||||
20
crates/remote/src/validated_where.rs
Normal file
20
crates/remote/src/validated_where.rs
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
#[derive(Debug)]
|
||||||
|
pub struct ValidatedWhere {
|
||||||
|
pub table: &'static str,
|
||||||
|
pub where_clause: &'static str,
|
||||||
|
}
|
||||||
|
#[macro_export]
|
||||||
|
macro_rules! validated_where {
|
||||||
|
($table:literal, $where:literal $(, $arg:expr)* $(,)?) => {{
|
||||||
|
// Compile-time validation via SQLx using + concatenation
|
||||||
|
// This checks: table exists, columns exist, arg types are correct
|
||||||
|
let _ = sqlx::query!(
|
||||||
|
"SELECT 1 AS v FROM " + $table + " WHERE " + $where
|
||||||
|
$(, $arg)*
|
||||||
|
);
|
||||||
|
$crate::validated_where::ValidatedWhere {
|
||||||
|
table: $table,
|
||||||
|
where_clause: $where,
|
||||||
|
}
|
||||||
|
}};
|
||||||
|
}
|
||||||
@@ -1,21 +0,0 @@
|
|||||||
use serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
use crate::activity::ActivityEvent;
|
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
|
||||||
#[serde(tag = "type", content = "data")]
|
|
||||||
pub enum ClientMessage {
|
|
||||||
#[serde(rename = "ack")]
|
|
||||||
Ack { cursor: i64 },
|
|
||||||
#[serde(rename = "auth-token")]
|
|
||||||
AuthToken { token: String },
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
|
||||||
#[serde(tag = "type", content = "data")]
|
|
||||||
pub enum ServerMessage {
|
|
||||||
#[serde(rename = "activity")]
|
|
||||||
Activity(ActivityEvent),
|
|
||||||
#[serde(rename = "error")]
|
|
||||||
Error { message: String },
|
|
||||||
}
|
|
||||||
@@ -1,41 +0,0 @@
|
|||||||
use axum::{
|
|
||||||
Router,
|
|
||||||
extract::{Extension, Query, State, ws::WebSocketUpgrade},
|
|
||||||
response::IntoResponse,
|
|
||||||
routing::get,
|
|
||||||
};
|
|
||||||
use serde::Deserialize;
|
|
||||||
use uuid::Uuid;
|
|
||||||
|
|
||||||
use crate::{AppState, auth::RequestContext};
|
|
||||||
|
|
||||||
pub mod message;
|
|
||||||
mod session;
|
|
||||||
|
|
||||||
#[derive(Debug, Deserialize, Clone)]
|
|
||||||
pub struct WsQueryParams {
|
|
||||||
pub project_id: Uuid,
|
|
||||||
pub cursor: Option<i64>,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn router() -> Router<AppState> {
|
|
||||||
Router::new().route("/ws", get(upgrade))
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn upgrade(
|
|
||||||
ws: WebSocketUpgrade,
|
|
||||||
State(state): State<AppState>,
|
|
||||||
Extension(ctx): Extension<RequestContext>,
|
|
||||||
Query(params): Query<WsQueryParams>,
|
|
||||||
) -> impl IntoResponse {
|
|
||||||
match crate::routes::organization_members::ensure_project_access(
|
|
||||||
state.pool(),
|
|
||||||
ctx.user.id,
|
|
||||||
params.project_id,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
Ok(_) => ws.on_upgrade(move |socket| session::handle(socket, state, ctx, params)),
|
|
||||||
Err(error) => error.into_response(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,512 +0,0 @@
|
|||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use axum::extract::ws::{Message, WebSocket};
|
|
||||||
use chrono::{DateTime, Duration as ChronoDuration, Utc};
|
|
||||||
use futures::{SinkExt, StreamExt};
|
|
||||||
use sqlx::PgPool;
|
|
||||||
use thiserror::Error;
|
|
||||||
use tokio::time::{self, MissedTickBehavior};
|
|
||||||
use tokio_stream::wrappers::errors::BroadcastStreamRecvError;
|
|
||||||
use tracing::{Span, instrument};
|
|
||||||
use utils::ws::{WS_AUTH_REFRESH_INTERVAL, WS_BULK_SYNC_THRESHOLD, WS_TOKEN_EXPIRY_GRACE};
|
|
||||||
use uuid::Uuid;
|
|
||||||
|
|
||||||
use super::{
|
|
||||||
WsQueryParams,
|
|
||||||
message::{ClientMessage, ServerMessage},
|
|
||||||
};
|
|
||||||
use crate::{
|
|
||||||
AppState,
|
|
||||||
activity::{ActivityBroker, ActivityEvent, ActivityStream},
|
|
||||||
auth::{JwtError, JwtService, RequestContext},
|
|
||||||
db::{
|
|
||||||
activity::ActivityRepository,
|
|
||||||
auth::{AuthSessionError, AuthSessionRepository},
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
#[instrument(
|
|
||||||
name = "ws.session",
|
|
||||||
skip(socket, state, ctx, params),
|
|
||||||
fields(
|
|
||||||
user_id = %ctx.user.id,
|
|
||||||
project_id = %params.project_id,
|
|
||||||
org_id = tracing::field::Empty,
|
|
||||||
session_id = %ctx.session_id
|
|
||||||
)
|
|
||||||
)]
|
|
||||||
pub async fn handle(
|
|
||||||
socket: WebSocket,
|
|
||||||
state: AppState,
|
|
||||||
ctx: RequestContext,
|
|
||||||
params: WsQueryParams,
|
|
||||||
) {
|
|
||||||
let config = state.config();
|
|
||||||
let pool_ref = state.pool();
|
|
||||||
let project_id = params.project_id;
|
|
||||||
let organization_id = match crate::routes::organization_members::ensure_project_access(
|
|
||||||
pool_ref,
|
|
||||||
ctx.user.id,
|
|
||||||
project_id,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
Ok(org_id) => org_id,
|
|
||||||
Err(error) => {
|
|
||||||
tracing::info!(
|
|
||||||
?error,
|
|
||||||
user_id = %ctx.user.id,
|
|
||||||
%project_id,
|
|
||||||
"websocket project access denied"
|
|
||||||
);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
Span::current().record("org_id", format_args!("{organization_id}"));
|
|
||||||
|
|
||||||
let pool = pool_ref.clone();
|
|
||||||
let mut last_sent_seq = params.cursor;
|
|
||||||
let mut auth_state = WsAuthState::new(
|
|
||||||
state.jwt(),
|
|
||||||
pool.clone(),
|
|
||||||
ctx.session_id,
|
|
||||||
ctx.user.id,
|
|
||||||
project_id,
|
|
||||||
ctx.access_token_expires_at,
|
|
||||||
);
|
|
||||||
let mut auth_check_interval = time::interval(WS_AUTH_REFRESH_INTERVAL);
|
|
||||||
auth_check_interval.set_missed_tick_behavior(MissedTickBehavior::Skip);
|
|
||||||
|
|
||||||
let (mut sender, mut inbound) = socket.split();
|
|
||||||
let mut activity_stream = state.broker().subscribe(project_id);
|
|
||||||
|
|
||||||
if let Ok(history) = ActivityRepository::new(&pool)
|
|
||||||
.fetch_since(project_id, params.cursor, config.activity_default_limit)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
for event in history {
|
|
||||||
if send_activity(&mut sender, &event).await.is_err() {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
last_sent_seq = Some(event.seq);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
tracing::debug!(org_id = %organization_id, project_id = %project_id, "starting websocket session");
|
|
||||||
|
|
||||||
loop {
|
|
||||||
tokio::select! {
|
|
||||||
maybe_activity = activity_stream.next() => {
|
|
||||||
match maybe_activity {
|
|
||||||
Some(Ok(event)) => {
|
|
||||||
tracing::trace!(?event, "received activity event");
|
|
||||||
assert_eq!(event.project_id, project_id, "activity stream emitted cross-project event");
|
|
||||||
if let Some(prev_seq) = last_sent_seq {
|
|
||||||
if prev_seq >= event.seq {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
if event.seq > prev_seq + 1 {
|
|
||||||
tracing::warn!(
|
|
||||||
expected_next = prev_seq + 1,
|
|
||||||
actual = event.seq,
|
|
||||||
org_id = %organization_id,
|
|
||||||
project_id = %project_id,
|
|
||||||
"activity stream skipped sequence; running catch-up"
|
|
||||||
);
|
|
||||||
match activity_stream_catch_up(
|
|
||||||
&mut sender,
|
|
||||||
&pool,
|
|
||||||
project_id,
|
|
||||||
organization_id,
|
|
||||||
prev_seq,
|
|
||||||
state.broker(),
|
|
||||||
config.activity_catchup_batch_size,
|
|
||||||
WS_BULK_SYNC_THRESHOLD as i64,
|
|
||||||
"gap",
|
|
||||||
).await {
|
|
||||||
Ok((seq, stream)) => {
|
|
||||||
last_sent_seq = Some(seq);
|
|
||||||
activity_stream = stream;
|
|
||||||
}
|
|
||||||
Err(()) => break,
|
|
||||||
}
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if send_activity(&mut sender, &event).await.is_err() {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
last_sent_seq = Some(event.seq);
|
|
||||||
}
|
|
||||||
Some(Err(BroadcastStreamRecvError::Lagged(skipped))) => {
|
|
||||||
tracing::warn!(skipped, org_id = %organization_id, project_id = %project_id, "activity stream lagged");
|
|
||||||
let Some(prev_seq) = last_sent_seq else {
|
|
||||||
tracing::info!(
|
|
||||||
org_id = %organization_id,
|
|
||||||
project_id = %project_id,
|
|
||||||
"activity stream lagged without baseline; forcing bulk sync"
|
|
||||||
);
|
|
||||||
let _ = send_error(&mut sender, "activity backlog dropped").await;
|
|
||||||
break;
|
|
||||||
};
|
|
||||||
|
|
||||||
match activity_stream_catch_up(
|
|
||||||
&mut sender,
|
|
||||||
&pool,
|
|
||||||
project_id,
|
|
||||||
organization_id,
|
|
||||||
prev_seq,
|
|
||||||
state.broker(),
|
|
||||||
config.activity_catchup_batch_size,
|
|
||||||
WS_BULK_SYNC_THRESHOLD as i64,
|
|
||||||
"lag",
|
|
||||||
).await {
|
|
||||||
Ok((seq, stream)) => {
|
|
||||||
last_sent_seq = Some(seq);
|
|
||||||
activity_stream = stream;
|
|
||||||
}
|
|
||||||
Err(()) => break,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
None => break,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
maybe_message = inbound.next() => {
|
|
||||||
match maybe_message {
|
|
||||||
Some(Ok(msg)) => {
|
|
||||||
if matches!(msg, Message::Close(_)) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
if let Message::Text(text) = msg {
|
|
||||||
match serde_json::from_str::<ClientMessage>(&text) {
|
|
||||||
Ok(ClientMessage::Ack { .. }) => {}
|
|
||||||
Ok(ClientMessage::AuthToken { token }) => {
|
|
||||||
auth_state.store_token(token);
|
|
||||||
}
|
|
||||||
Err(error) => {
|
|
||||||
tracing::debug!(?error, "invalid inbound message");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Some(Err(error)) => {
|
|
||||||
tracing::debug!(?error, "websocket receive error");
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
None => break,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
_ = auth_check_interval.tick() => {
|
|
||||||
match auth_state.verify().await {
|
|
||||||
Ok(()) => {}
|
|
||||||
Err(error) => {
|
|
||||||
tracing::info!(?error, "closing websocket due to auth verification error");
|
|
||||||
let message = match error {
|
|
||||||
AuthVerifyError::Revoked => "authorization revoked",
|
|
||||||
AuthVerifyError::MembershipRevoked => "project access revoked",
|
|
||||||
AuthVerifyError::Expired => "authorization expired",
|
|
||||||
AuthVerifyError::UserMismatch { .. }
|
|
||||||
| AuthVerifyError::Decode(_)
|
|
||||||
| AuthVerifyError::Session(_) => "authorization error",
|
|
||||||
};
|
|
||||||
let _ = send_error(&mut sender, message).await;
|
|
||||||
let _ = sender.send(Message::Close(None)).await;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn send_activity(
|
|
||||||
sender: &mut futures::stream::SplitSink<WebSocket, Message>,
|
|
||||||
event: &ActivityEvent,
|
|
||||||
) -> Result<(), ()> {
|
|
||||||
tracing::trace!(
|
|
||||||
event_type = %event.event_type.as_str(),
|
|
||||||
project_id = %event.project_id,
|
|
||||||
"sending activity event"
|
|
||||||
);
|
|
||||||
|
|
||||||
match serde_json::to_string(&ServerMessage::Activity(event.clone())) {
|
|
||||||
Ok(json) => sender
|
|
||||||
.send(Message::Text(json.into()))
|
|
||||||
.await
|
|
||||||
.map_err(|error| {
|
|
||||||
tracing::debug!(?error, "failed to send activity message");
|
|
||||||
}),
|
|
||||||
Err(error) => {
|
|
||||||
tracing::error!(?error, "failed to serialise activity event");
|
|
||||||
Err(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn send_error(
|
|
||||||
sender: &mut futures::stream::SplitSink<WebSocket, Message>,
|
|
||||||
message: &str,
|
|
||||||
) -> Result<(), ()> {
|
|
||||||
match serde_json::to_string(&ServerMessage::Error {
|
|
||||||
message: message.to_string(),
|
|
||||||
}) {
|
|
||||||
Ok(json) => sender
|
|
||||||
.send(Message::Text(json.into()))
|
|
||||||
.await
|
|
||||||
.map_err(|error| {
|
|
||||||
tracing::debug!(?error, "failed to send websocket error message");
|
|
||||||
}),
|
|
||||||
Err(error) => {
|
|
||||||
tracing::error!(?error, "failed to serialise websocket error message");
|
|
||||||
Err(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
struct WsAuthState {
|
|
||||||
jwt: Arc<JwtService>,
|
|
||||||
pool: PgPool,
|
|
||||||
session_id: Uuid,
|
|
||||||
expected_user_id: Uuid,
|
|
||||||
project_id: Uuid,
|
|
||||||
token_expires_at: DateTime<Utc>,
|
|
||||||
new_access_token: Option<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl WsAuthState {
|
|
||||||
fn new(
|
|
||||||
jwt: Arc<JwtService>,
|
|
||||||
pool: PgPool,
|
|
||||||
session_id: Uuid,
|
|
||||||
expected_user_id: Uuid,
|
|
||||||
project_id: Uuid,
|
|
||||||
token_expires_at: DateTime<Utc>,
|
|
||||||
) -> Self {
|
|
||||||
Self {
|
|
||||||
jwt,
|
|
||||||
pool,
|
|
||||||
session_id,
|
|
||||||
expected_user_id,
|
|
||||||
project_id,
|
|
||||||
new_access_token: None,
|
|
||||||
token_expires_at,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn store_token(&mut self, token: String) {
|
|
||||||
self.new_access_token = Some(token);
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn verify(&mut self) -> Result<(), AuthVerifyError> {
|
|
||||||
if let Some(token) = self.new_access_token.take() {
|
|
||||||
let token_details = self
|
|
||||||
.jwt
|
|
||||||
.decode_access_token_with_leeway(&token, WS_TOKEN_EXPIRY_GRACE.as_secs())
|
|
||||||
.map_err(AuthVerifyError::Decode)?;
|
|
||||||
self.apply_identity(token_details.user_id, token_details.session_id)
|
|
||||||
.await?;
|
|
||||||
self.token_expires_at = token_details.expires_at;
|
|
||||||
}
|
|
||||||
|
|
||||||
self.validate_token_expiry()?;
|
|
||||||
self.validate_session().await?;
|
|
||||||
self.validate_membership().await
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn apply_identity(
|
|
||||||
&mut self,
|
|
||||||
user_id: Uuid,
|
|
||||||
session_id: Uuid,
|
|
||||||
) -> Result<(), AuthVerifyError> {
|
|
||||||
if user_id != self.expected_user_id {
|
|
||||||
return Err(AuthVerifyError::UserMismatch {
|
|
||||||
expected: self.expected_user_id,
|
|
||||||
received: user_id,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
self.session_id = session_id;
|
|
||||||
self.validate_session().await
|
|
||||||
}
|
|
||||||
|
|
||||||
fn validate_token_expiry(&self) -> Result<(), AuthVerifyError> {
|
|
||||||
if self.token_expires_at + ws_leeway_duration() > Utc::now() {
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
Err(AuthVerifyError::Expired)
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn validate_session(&self) -> Result<(), AuthVerifyError> {
|
|
||||||
let repo = AuthSessionRepository::new(&self.pool);
|
|
||||||
let session = repo
|
|
||||||
.get(self.session_id)
|
|
||||||
.await
|
|
||||||
.map_err(AuthVerifyError::Session)?;
|
|
||||||
|
|
||||||
if session.revoked_at.is_some() {
|
|
||||||
return Err(AuthVerifyError::Revoked);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn validate_membership(&self) -> Result<(), AuthVerifyError> {
|
|
||||||
crate::routes::organization_members::ensure_project_access(
|
|
||||||
&self.pool,
|
|
||||||
self.expected_user_id,
|
|
||||||
self.project_id,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.map(|_| ())
|
|
||||||
.map_err(|error| {
|
|
||||||
tracing::warn!(
|
|
||||||
?error,
|
|
||||||
user_id = %self.expected_user_id,
|
|
||||||
project_id = %self.project_id,
|
|
||||||
"websocket membership validation failed"
|
|
||||||
);
|
|
||||||
AuthVerifyError::MembershipRevoked
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn ws_leeway_duration() -> ChronoDuration {
|
|
||||||
ChronoDuration::from_std(WS_TOKEN_EXPIRY_GRACE).unwrap()
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Error)]
|
|
||||||
enum AuthVerifyError {
|
|
||||||
#[error(transparent)]
|
|
||||||
Decode(#[from] JwtError),
|
|
||||||
#[error("received token for unexpected user: expected {expected}, received {received}")]
|
|
||||||
UserMismatch { expected: Uuid, received: Uuid },
|
|
||||||
#[error(transparent)]
|
|
||||||
Session(#[from] AuthSessionError),
|
|
||||||
#[error("session revoked")]
|
|
||||||
Revoked,
|
|
||||||
#[error("organization membership revoked")]
|
|
||||||
MembershipRevoked,
|
|
||||||
#[error("access token expired")]
|
|
||||||
Expired,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[allow(clippy::too_many_arguments)]
|
|
||||||
async fn activity_stream_catch_up(
|
|
||||||
sender: &mut futures::stream::SplitSink<WebSocket, Message>,
|
|
||||||
pool: &PgPool,
|
|
||||||
project_id: Uuid,
|
|
||||||
organization_id: Uuid,
|
|
||||||
last_seq: i64,
|
|
||||||
broker: &ActivityBroker,
|
|
||||||
batch_size: i64,
|
|
||||||
bulk_limit: i64,
|
|
||||||
reason: &'static str,
|
|
||||||
) -> Result<(i64, ActivityStream), ()> {
|
|
||||||
let mut activity_stream = broker.subscribe(project_id);
|
|
||||||
|
|
||||||
let event = match activity_stream.next().await {
|
|
||||||
Some(Ok(event)) => event,
|
|
||||||
Some(Err(_)) | None => {
|
|
||||||
let _ = send_error(sender, "activity backlog dropped").await;
|
|
||||||
return Err(());
|
|
||||||
}
|
|
||||||
};
|
|
||||||
let target_seq = event.seq;
|
|
||||||
|
|
||||||
if target_seq <= last_seq {
|
|
||||||
return Ok((last_seq, activity_stream));
|
|
||||||
}
|
|
||||||
|
|
||||||
let bulk_limit = bulk_limit.max(1);
|
|
||||||
let diff = target_seq - last_seq;
|
|
||||||
if diff > bulk_limit {
|
|
||||||
tracing::info!(
|
|
||||||
org_id = %organization_id,
|
|
||||||
project_id = %project_id,
|
|
||||||
threshold = bulk_limit,
|
|
||||||
reason,
|
|
||||||
"activity catch up exceeded threshold; forcing bulk sync"
|
|
||||||
);
|
|
||||||
let _ = send_error(sender, "activity backlog dropped").await;
|
|
||||||
return Err(());
|
|
||||||
}
|
|
||||||
|
|
||||||
let catch_up_result = catch_up_from_db(
|
|
||||||
sender,
|
|
||||||
pool,
|
|
||||||
project_id,
|
|
||||||
organization_id,
|
|
||||||
last_seq,
|
|
||||||
target_seq,
|
|
||||||
batch_size.max(1),
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
|
|
||||||
match catch_up_result {
|
|
||||||
Ok(seq) => Ok((seq, activity_stream)),
|
|
||||||
Err(CatchUpError::Stale) => {
|
|
||||||
let _ = send_error(sender, "activity backlog dropped").await;
|
|
||||||
Err(())
|
|
||||||
}
|
|
||||||
Err(CatchUpError::Send) => Err(()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Error)]
|
|
||||||
enum CatchUpError {
|
|
||||||
#[error("activity stream went stale during catch up")]
|
|
||||||
Stale,
|
|
||||||
#[error("failed to send activity event")]
|
|
||||||
Send,
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn catch_up_from_db(
|
|
||||||
sender: &mut futures::stream::SplitSink<WebSocket, Message>,
|
|
||||||
pool: &PgPool,
|
|
||||||
project_id: Uuid,
|
|
||||||
organization_id: Uuid,
|
|
||||||
last_seq: i64,
|
|
||||||
target_seq: i64,
|
|
||||||
batch_size: i64,
|
|
||||||
) -> Result<i64, CatchUpError> {
|
|
||||||
let repository = ActivityRepository::new(pool);
|
|
||||||
let mut current_seq = last_seq;
|
|
||||||
let mut cursor = last_seq;
|
|
||||||
|
|
||||||
loop {
|
|
||||||
let events = repository
|
|
||||||
.fetch_since(project_id, Some(cursor), batch_size)
|
|
||||||
.await
|
|
||||||
.map_err(|error| {
|
|
||||||
tracing::error!(?error, org_id = %organization_id, project_id = %project_id, "failed to fetch activity catch up");
|
|
||||||
CatchUpError::Stale
|
|
||||||
})?;
|
|
||||||
|
|
||||||
if events.is_empty() {
|
|
||||||
tracing::warn!(org_id = %organization_id, project_id = %project_id, "activity catch up returned no events");
|
|
||||||
return Err(CatchUpError::Stale);
|
|
||||||
}
|
|
||||||
|
|
||||||
for event in events {
|
|
||||||
if event.seq <= current_seq {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
if event.seq > target_seq {
|
|
||||||
return Ok(current_seq);
|
|
||||||
}
|
|
||||||
if send_activity(sender, &event).await.is_err() {
|
|
||||||
return Err(CatchUpError::Send);
|
|
||||||
}
|
|
||||||
current_seq = event.seq;
|
|
||||||
cursor = event.seq;
|
|
||||||
}
|
|
||||||
|
|
||||||
if current_seq >= target_seq {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(current_seq)
|
|
||||||
}
|
|
||||||
@@ -11,6 +11,7 @@ uninlined-format-args = "allow"
|
|||||||
deployment = { path = "../deployment" }
|
deployment = { path = "../deployment" }
|
||||||
executors = { path = "../executors" }
|
executors = { path = "../executors" }
|
||||||
local-deployment = { path = "../local-deployment" }
|
local-deployment = { path = "../local-deployment" }
|
||||||
|
remote = { path = "../remote" }
|
||||||
utils = { path = "../utils" }
|
utils = { path = "../utils" }
|
||||||
db = { path = "../db" }
|
db = { path = "../db" }
|
||||||
services = { path = "../services" }
|
services = { path = "../services" }
|
||||||
|
|||||||
@@ -10,44 +10,46 @@ fn generate_types_content() -> String {
|
|||||||
// If you are an AI, and you absolutely have to edit this file, please confirm with the user first.";
|
// If you are an AI, and you absolutely have to edit this file, please confirm with the user first.";
|
||||||
|
|
||||||
let decls: Vec<String> = vec![
|
let decls: Vec<String> = vec![
|
||||||
services::services::filesystem::DirectoryEntry::decl(),
|
remote::routes::tasks::SharedTaskResponse::decl(),
|
||||||
services::services::filesystem::DirectoryListResponse::decl(),
|
remote::routes::tasks::AssigneesQuery::decl(),
|
||||||
|
remote::db::tasks::SharedTask::decl(),
|
||||||
|
remote::db::users::UserData::decl(),
|
||||||
db::models::project::Project::decl(),
|
db::models::project::Project::decl(),
|
||||||
db::models::project::CreateProject::decl(),
|
db::models::project::CreateProject::decl(),
|
||||||
db::models::project::UpdateProject::decl(),
|
db::models::project::UpdateProject::decl(),
|
||||||
db::models::project::SearchResult::decl(),
|
db::models::project::SearchResult::decl(),
|
||||||
db::models::project::SearchMatchType::decl(),
|
db::models::project::SearchMatchType::decl(),
|
||||||
server::routes::projects::CreateRemoteProjectRequest::decl(),
|
|
||||||
server::routes::projects::LinkToExistingRequest::decl(),
|
|
||||||
executors::actions::ExecutorAction::decl(),
|
|
||||||
executors::mcp_config::McpConfig::decl(),
|
|
||||||
executors::actions::ExecutorActionType::decl(),
|
|
||||||
executors::actions::script::ScriptContext::decl(),
|
|
||||||
executors::actions::script::ScriptRequest::decl(),
|
|
||||||
executors::actions::script::ScriptRequestLanguage::decl(),
|
|
||||||
executors::executors::BaseCodingAgent::decl(),
|
|
||||||
executors::executors::CodingAgent::decl(),
|
|
||||||
db::models::tag::Tag::decl(),
|
db::models::tag::Tag::decl(),
|
||||||
db::models::tag::CreateTag::decl(),
|
db::models::tag::CreateTag::decl(),
|
||||||
db::models::tag::UpdateTag::decl(),
|
db::models::tag::UpdateTag::decl(),
|
||||||
server::routes::tags::TagSearchParams::decl(),
|
|
||||||
db::models::task::TaskStatus::decl(),
|
db::models::task::TaskStatus::decl(),
|
||||||
db::models::task::Task::decl(),
|
db::models::task::Task::decl(),
|
||||||
db::models::task::TaskWithAttemptStatus::decl(),
|
db::models::task::TaskWithAttemptStatus::decl(),
|
||||||
db::models::task::TaskRelationships::decl(),
|
db::models::task::TaskRelationships::decl(),
|
||||||
db::models::task::CreateTask::decl(),
|
db::models::task::CreateTask::decl(),
|
||||||
db::models::task::UpdateTask::decl(),
|
db::models::task::UpdateTask::decl(),
|
||||||
db::models::shared_task::SharedTask::decl(),
|
|
||||||
db::models::scratch::DraftFollowUpData::decl(),
|
db::models::scratch::DraftFollowUpData::decl(),
|
||||||
db::models::scratch::ScratchPayload::decl(),
|
db::models::scratch::ScratchPayload::decl(),
|
||||||
db::models::scratch::ScratchType::decl(),
|
db::models::scratch::ScratchType::decl(),
|
||||||
db::models::scratch::Scratch::decl(),
|
db::models::scratch::Scratch::decl(),
|
||||||
db::models::scratch::CreateScratch::decl(),
|
db::models::scratch::CreateScratch::decl(),
|
||||||
db::models::scratch::UpdateScratch::decl(),
|
db::models::scratch::UpdateScratch::decl(),
|
||||||
services::services::queued_message::QueuedMessage::decl(),
|
|
||||||
services::services::queued_message::QueueStatus::decl(),
|
|
||||||
db::models::image::Image::decl(),
|
db::models::image::Image::decl(),
|
||||||
db::models::image::CreateImage::decl(),
|
db::models::image::CreateImage::decl(),
|
||||||
|
db::models::task_attempt::TaskAttempt::decl(),
|
||||||
|
db::models::execution_process::ExecutionProcess::decl(),
|
||||||
|
db::models::execution_process::ExecutionProcessStatus::decl(),
|
||||||
|
db::models::execution_process::ExecutionProcessRunReason::decl(),
|
||||||
|
db::models::merge::Merge::decl(),
|
||||||
|
db::models::merge::DirectMerge::decl(),
|
||||||
|
db::models::merge::PrMerge::decl(),
|
||||||
|
db::models::merge::MergeStatus::decl(),
|
||||||
|
db::models::merge::PullRequestInfo::decl(),
|
||||||
|
utils::approvals::ApprovalStatus::decl(),
|
||||||
|
utils::approvals::CreateApprovalRequest::decl(),
|
||||||
|
utils::approvals::ApprovalResponse::decl(),
|
||||||
|
utils::diff::Diff::decl(),
|
||||||
|
utils::diff::DiffChangeKind::decl(),
|
||||||
utils::response::ApiResponse::<()>::decl(),
|
utils::response::ApiResponse::<()>::decl(),
|
||||||
utils::api::oauth::LoginStatus::decl(),
|
utils::api::oauth::LoginStatus::decl(),
|
||||||
utils::api::oauth::ProfileResponse::decl(),
|
utils::api::oauth::ProfileResponse::decl(),
|
||||||
@@ -77,6 +79,10 @@ fn generate_types_content() -> String {
|
|||||||
utils::api::projects::RemoteProject::decl(),
|
utils::api::projects::RemoteProject::decl(),
|
||||||
utils::api::projects::ListProjectsResponse::decl(),
|
utils::api::projects::ListProjectsResponse::decl(),
|
||||||
utils::api::projects::RemoteProjectMembersResponse::decl(),
|
utils::api::projects::RemoteProjectMembersResponse::decl(),
|
||||||
|
server::routes::projects::CreateRemoteProjectRequest::decl(),
|
||||||
|
server::routes::projects::LinkToExistingRequest::decl(),
|
||||||
|
server::routes::tags::TagSearchParams::decl(),
|
||||||
|
server::routes::oauth::TokenResponse::decl(),
|
||||||
server::routes::config::UserSystemInfo::decl(),
|
server::routes::config::UserSystemInfo::decl(),
|
||||||
server::routes::config::Environment::decl(),
|
server::routes::config::Environment::decl(),
|
||||||
server::routes::config::McpServerQuery::decl(),
|
server::routes::config::McpServerQuery::decl(),
|
||||||
@@ -85,7 +91,7 @@ fn generate_types_content() -> String {
|
|||||||
server::routes::config::CheckEditorAvailabilityQuery::decl(),
|
server::routes::config::CheckEditorAvailabilityQuery::decl(),
|
||||||
server::routes::config::CheckEditorAvailabilityResponse::decl(),
|
server::routes::config::CheckEditorAvailabilityResponse::decl(),
|
||||||
server::routes::config::CheckAgentAvailabilityQuery::decl(),
|
server::routes::config::CheckAgentAvailabilityQuery::decl(),
|
||||||
executors::executors::AvailabilityInfo::decl(),
|
server::routes::oauth::CurrentUserResponse::decl(),
|
||||||
server::routes::task_attempts::CreateFollowUpAttempt::decl(),
|
server::routes::task_attempts::CreateFollowUpAttempt::decl(),
|
||||||
server::routes::task_attempts::ChangeTargetBranchRequest::decl(),
|
server::routes::task_attempts::ChangeTargetBranchRequest::decl(),
|
||||||
server::routes::task_attempts::ChangeTargetBranchResponse::decl(),
|
server::routes::task_attempts::ChangeTargetBranchResponse::decl(),
|
||||||
@@ -95,12 +101,22 @@ fn generate_types_content() -> String {
|
|||||||
server::routes::task_attempts::OpenEditorRequest::decl(),
|
server::routes::task_attempts::OpenEditorRequest::decl(),
|
||||||
server::routes::task_attempts::OpenEditorResponse::decl(),
|
server::routes::task_attempts::OpenEditorResponse::decl(),
|
||||||
server::routes::shared_tasks::AssignSharedTaskRequest::decl(),
|
server::routes::shared_tasks::AssignSharedTaskRequest::decl(),
|
||||||
server::routes::shared_tasks::AssignSharedTaskResponse::decl(),
|
|
||||||
server::routes::tasks::ShareTaskResponse::decl(),
|
server::routes::tasks::ShareTaskResponse::decl(),
|
||||||
server::routes::tasks::CreateAndStartTaskRequest::decl(),
|
server::routes::tasks::CreateAndStartTaskRequest::decl(),
|
||||||
server::routes::task_attempts::CreateGitHubPrRequest::decl(),
|
server::routes::task_attempts::CreateGitHubPrRequest::decl(),
|
||||||
server::routes::images::ImageResponse::decl(),
|
server::routes::images::ImageResponse::decl(),
|
||||||
server::routes::images::ImageMetadata::decl(),
|
server::routes::images::ImageMetadata::decl(),
|
||||||
|
server::routes::task_attempts::CreateTaskAttemptBody::decl(),
|
||||||
|
server::routes::task_attempts::RunAgentSetupRequest::decl(),
|
||||||
|
server::routes::task_attempts::RunAgentSetupResponse::decl(),
|
||||||
|
server::routes::task_attempts::gh_cli_setup::GhCliSetupError::decl(),
|
||||||
|
server::routes::task_attempts::RebaseTaskAttemptRequest::decl(),
|
||||||
|
server::routes::task_attempts::GitOperationError::decl(),
|
||||||
|
server::routes::task_attempts::PushError::decl(),
|
||||||
|
server::routes::task_attempts::CreatePrError::decl(),
|
||||||
|
server::routes::task_attempts::BranchStatus::decl(),
|
||||||
|
services::services::filesystem::DirectoryEntry::decl(),
|
||||||
|
services::services::filesystem::DirectoryListResponse::decl(),
|
||||||
services::services::config::Config::decl(),
|
services::services::config::Config::decl(),
|
||||||
services::services::config::NotificationConfig::decl(),
|
services::services::config::NotificationConfig::decl(),
|
||||||
services::services::config::ThemeMode::decl(),
|
services::services::config::ThemeMode::decl(),
|
||||||
@@ -112,8 +128,19 @@ fn generate_types_content() -> String {
|
|||||||
services::services::config::UiLanguage::decl(),
|
services::services::config::UiLanguage::decl(),
|
||||||
services::services::config::ShowcaseState::decl(),
|
services::services::config::ShowcaseState::decl(),
|
||||||
services::services::git::GitBranch::decl(),
|
services::services::git::GitBranch::decl(),
|
||||||
utils::diff::Diff::decl(),
|
services::services::share::SharedTaskDetails::decl(),
|
||||||
utils::diff::DiffChangeKind::decl(),
|
services::services::queued_message::QueuedMessage::decl(),
|
||||||
|
services::services::queued_message::QueueStatus::decl(),
|
||||||
|
services::services::git::ConflictOp::decl(),
|
||||||
|
executors::actions::ExecutorAction::decl(),
|
||||||
|
executors::mcp_config::McpConfig::decl(),
|
||||||
|
executors::actions::ExecutorActionType::decl(),
|
||||||
|
executors::actions::script::ScriptContext::decl(),
|
||||||
|
executors::actions::script::ScriptRequest::decl(),
|
||||||
|
executors::actions::script::ScriptRequestLanguage::decl(),
|
||||||
|
executors::executors::BaseCodingAgent::decl(),
|
||||||
|
executors::executors::CodingAgent::decl(),
|
||||||
|
executors::executors::AvailabilityInfo::decl(),
|
||||||
executors::command::CommandBuilder::decl(),
|
executors::command::CommandBuilder::decl(),
|
||||||
executors::profile::ExecutorProfileId::decl(),
|
executors::profile::ExecutorProfileId::decl(),
|
||||||
executors::profile::ExecutorConfig::decl(),
|
executors::profile::ExecutorConfig::decl(),
|
||||||
@@ -138,25 +165,6 @@ fn generate_types_content() -> String {
|
|||||||
executors::executors::AppendPrompt::decl(),
|
executors::executors::AppendPrompt::decl(),
|
||||||
executors::actions::coding_agent_initial::CodingAgentInitialRequest::decl(),
|
executors::actions::coding_agent_initial::CodingAgentInitialRequest::decl(),
|
||||||
executors::actions::coding_agent_follow_up::CodingAgentFollowUpRequest::decl(),
|
executors::actions::coding_agent_follow_up::CodingAgentFollowUpRequest::decl(),
|
||||||
server::routes::task_attempts::CreateTaskAttemptBody::decl(),
|
|
||||||
server::routes::task_attempts::RunAgentSetupRequest::decl(),
|
|
||||||
server::routes::task_attempts::RunAgentSetupResponse::decl(),
|
|
||||||
server::routes::task_attempts::gh_cli_setup::GhCliSetupError::decl(),
|
|
||||||
server::routes::task_attempts::RebaseTaskAttemptRequest::decl(),
|
|
||||||
server::routes::task_attempts::GitOperationError::decl(),
|
|
||||||
server::routes::task_attempts::PushError::decl(),
|
|
||||||
server::routes::task_attempts::CreatePrError::decl(),
|
|
||||||
server::routes::task_attempts::BranchStatus::decl(),
|
|
||||||
services::services::git::ConflictOp::decl(),
|
|
||||||
db::models::task_attempt::TaskAttempt::decl(),
|
|
||||||
db::models::execution_process::ExecutionProcess::decl(),
|
|
||||||
db::models::execution_process::ExecutionProcessStatus::decl(),
|
|
||||||
db::models::execution_process::ExecutionProcessRunReason::decl(),
|
|
||||||
db::models::merge::Merge::decl(),
|
|
||||||
db::models::merge::DirectMerge::decl(),
|
|
||||||
db::models::merge::PrMerge::decl(),
|
|
||||||
db::models::merge::MergeStatus::decl(),
|
|
||||||
db::models::merge::PullRequestInfo::decl(),
|
|
||||||
executors::logs::CommandExitStatus::decl(),
|
executors::logs::CommandExitStatus::decl(),
|
||||||
executors::logs::CommandRunResult::decl(),
|
executors::logs::CommandRunResult::decl(),
|
||||||
executors::logs::NormalizedEntry::decl(),
|
executors::logs::NormalizedEntry::decl(),
|
||||||
@@ -169,9 +177,6 @@ fn generate_types_content() -> String {
|
|||||||
executors::logs::ToolResultValueType::decl(),
|
executors::logs::ToolResultValueType::decl(),
|
||||||
executors::logs::ToolStatus::decl(),
|
executors::logs::ToolStatus::decl(),
|
||||||
executors::logs::utils::patch::PatchType::decl(),
|
executors::logs::utils::patch::PatchType::decl(),
|
||||||
utils::approvals::ApprovalStatus::decl(),
|
|
||||||
utils::approvals::CreateApprovalRequest::decl(),
|
|
||||||
utils::approvals::ApprovalResponse::decl(),
|
|
||||||
serde_json::Value::decl(),
|
serde_json::Value::decl(),
|
||||||
];
|
];
|
||||||
|
|
||||||
|
|||||||
@@ -286,10 +286,6 @@ impl From<ShareError> for ApiError {
|
|||||||
tracing::error!(?err, "share task URL error");
|
tracing::error!(?err, "share task URL error");
|
||||||
ApiError::Conflict("Share service URL is invalid".to_string())
|
ApiError::Conflict("Share service URL is invalid".to_string())
|
||||||
}
|
}
|
||||||
ShareError::WebSocket(err) => {
|
|
||||||
tracing::error!(?err, "share task websocket error");
|
|
||||||
ApiError::Conflict("Unexpected websocket error during sharing".to_string())
|
|
||||||
}
|
|
||||||
ShareError::InvalidResponse => ApiError::Conflict(
|
ShareError::InvalidResponse => ApiError::Conflict(
|
||||||
"Remote share service returned an unexpected response".to_string(),
|
"Remote share service returned an unexpected response".to_string(),
|
||||||
),
|
),
|
||||||
|
|||||||
@@ -73,6 +73,16 @@ async fn main() -> Result<(), VibeKanbanError> {
|
|||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
|
// Verify shared tasks in background
|
||||||
|
let deployment_for_verification = deployment.clone();
|
||||||
|
tokio::spawn(async move {
|
||||||
|
if let Some(publisher) = deployment_for_verification.container().share_publisher()
|
||||||
|
&& let Err(e) = publisher.cleanup_shared_tasks().await
|
||||||
|
{
|
||||||
|
tracing::warn!("Failed to verify shared tasks: {}", e);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
let app_router = routes::router(deployment.clone());
|
let app_router = routes::router(deployment.clone());
|
||||||
|
|
||||||
let port = std::env::var("BACKEND_PORT")
|
let port = std::env::var("BACKEND_PORT")
|
||||||
|
|||||||
@@ -468,7 +468,7 @@ impl TaskServer {
|
|||||||
Ok(s) => Some(s),
|
Ok(s) => Some(s),
|
||||||
Err(_) => {
|
Err(_) => {
|
||||||
return Self::err(
|
return Self::err(
|
||||||
"Invalid status filter. Valid values: 'todo', 'in-progress', 'in-review', 'done', 'cancelled'".to_string(),
|
"Invalid status filter. Valid values: 'todo', 'inprogress', 'inreview', 'done', 'cancelled'".to_string(),
|
||||||
Some(status_str.to_string()),
|
Some(status_str.to_string()),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
@@ -595,7 +595,7 @@ impl TaskServer {
|
|||||||
Ok(s) => Some(s),
|
Ok(s) => Some(s),
|
||||||
Err(_) => {
|
Err(_) => {
|
||||||
return Self::err(
|
return Self::err(
|
||||||
"Invalid status filter. Valid values: 'todo', 'in-progress', 'in-review', 'done', 'cancelled'".to_string(),
|
"Invalid status filter. Valid values: 'todo', 'inprogress', 'inreview', 'done', 'cancelled'".to_string(),
|
||||||
Some(status_str.to_string()),
|
Some(status_str.to_string()),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,11 +5,14 @@ use axum::{
|
|||||||
response::Json as ResponseJson,
|
response::Json as ResponseJson,
|
||||||
routing::{get, post},
|
routing::{get, post},
|
||||||
};
|
};
|
||||||
|
use chrono::{DateTime, Utc};
|
||||||
use deployment::Deployment;
|
use deployment::Deployment;
|
||||||
use rand::{Rng, distributions::Alphanumeric};
|
use rand::{Rng, distributions::Alphanumeric};
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use services::services::{config::save_config_to_file, oauth_credentials::Credentials};
|
use services::services::{config::save_config_to_file, oauth_credentials::Credentials};
|
||||||
use sha2::{Digest, Sha256};
|
use sha2::{Digest, Sha256};
|
||||||
|
use tokio;
|
||||||
|
use ts_rs::TS;
|
||||||
use utils::{
|
use utils::{
|
||||||
api::oauth::{HandoffInitRequest, HandoffRedeemRequest, StatusResponse},
|
api::oauth::{HandoffInitRequest, HandoffRedeemRequest, StatusResponse},
|
||||||
assets::config_path,
|
assets::config_path,
|
||||||
@@ -20,12 +23,29 @@ use uuid::Uuid;
|
|||||||
|
|
||||||
use crate::{DeploymentImpl, error::ApiError};
|
use crate::{DeploymentImpl, error::ApiError};
|
||||||
|
|
||||||
|
/// Response from GET /api/auth/token - returns the current access token
|
||||||
|
#[derive(Debug, Serialize, TS)]
|
||||||
|
#[ts(export)]
|
||||||
|
pub struct TokenResponse {
|
||||||
|
pub access_token: String,
|
||||||
|
pub expires_at: Option<DateTime<Utc>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Response from GET /api/auth/user - returns the current user ID
|
||||||
|
#[derive(Debug, Serialize, TS)]
|
||||||
|
#[ts(export)]
|
||||||
|
pub struct CurrentUserResponse {
|
||||||
|
pub user_id: String,
|
||||||
|
}
|
||||||
|
|
||||||
pub fn router() -> Router<DeploymentImpl> {
|
pub fn router() -> Router<DeploymentImpl> {
|
||||||
Router::new()
|
Router::new()
|
||||||
.route("/auth/handoff/init", post(handoff_init))
|
.route("/auth/handoff/init", post(handoff_init))
|
||||||
.route("/auth/handoff/complete", get(handoff_complete))
|
.route("/auth/handoff/complete", get(handoff_complete))
|
||||||
.route("/auth/logout", post(logout))
|
.route("/auth/logout", post(logout))
|
||||||
.route("/auth/status", get(status))
|
.route("/auth/status", get(status))
|
||||||
|
.route("/auth/token", get(get_token))
|
||||||
|
.route("/auth/user", get(get_current_user))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Deserialize)]
|
#[derive(Debug, Deserialize)]
|
||||||
@@ -188,22 +208,13 @@ async fn handoff_complete(
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start remote sync if not already running
|
// Trigger shared task cleanup in background
|
||||||
{
|
if let Ok(publisher) = deployment.share_publisher() {
|
||||||
let handle_guard = deployment.share_sync_handle().lock().await;
|
tokio::spawn(async move {
|
||||||
let should_start = handle_guard.is_none();
|
if let Err(e) = publisher.cleanup_shared_tasks().await {
|
||||||
drop(handle_guard);
|
tracing::error!("Failed to cleanup shared tasks on login: {}", e);
|
||||||
|
|
||||||
if should_start {
|
|
||||||
if let Some(share_config) = deployment.share_config() {
|
|
||||||
tracing::info!("Starting remote sync after login");
|
|
||||||
deployment.spawn_remote_sync(share_config.clone());
|
|
||||||
} else {
|
|
||||||
tracing::debug!(
|
|
||||||
"Share config not available; skipping remote sync spawn after login"
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
}
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(close_window_response(format!(
|
Ok(close_window_response(format!(
|
||||||
@@ -212,12 +223,6 @@ async fn handoff_complete(
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn logout(State(deployment): State<DeploymentImpl>) -> Result<StatusCode, ApiError> {
|
async fn logout(State(deployment): State<DeploymentImpl>) -> Result<StatusCode, ApiError> {
|
||||||
// Stop remote sync if running
|
|
||||||
if let Some(handle) = deployment.share_sync_handle().lock().await.take() {
|
|
||||||
tracing::info!("Stopping remote sync due to logout");
|
|
||||||
handle.shutdown().await;
|
|
||||||
}
|
|
||||||
|
|
||||||
let auth_context = deployment.auth_context();
|
let auth_context = deployment.auth_context();
|
||||||
|
|
||||||
if let Ok(client) = deployment.remote_client() {
|
if let Ok(client) = deployment.remote_client() {
|
||||||
@@ -255,6 +260,51 @@ async fn status(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Returns the current access token (auto-refreshes if needed)
|
||||||
|
async fn get_token(
|
||||||
|
State(deployment): State<DeploymentImpl>,
|
||||||
|
) -> Result<ResponseJson<ApiResponse<TokenResponse>>, ApiError> {
|
||||||
|
let remote_client = deployment.remote_client()?;
|
||||||
|
|
||||||
|
// This will auto-refresh the token if expired
|
||||||
|
let access_token = remote_client
|
||||||
|
.access_token()
|
||||||
|
.await
|
||||||
|
.map_err(|_| ApiError::Unauthorized)?;
|
||||||
|
|
||||||
|
let creds = deployment.auth_context().get_credentials().await;
|
||||||
|
let expires_at = creds.and_then(|c| c.expires_at);
|
||||||
|
|
||||||
|
Ok(ResponseJson(ApiResponse::success(TokenResponse {
|
||||||
|
access_token,
|
||||||
|
expires_at,
|
||||||
|
})))
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn get_current_user(
|
||||||
|
State(deployment): State<DeploymentImpl>,
|
||||||
|
) -> Result<ResponseJson<ApiResponse<CurrentUserResponse>>, ApiError> {
|
||||||
|
let remote_client = deployment.remote_client()?;
|
||||||
|
|
||||||
|
// Get the access token from remote client
|
||||||
|
let access_token = remote_client
|
||||||
|
.access_token()
|
||||||
|
.await
|
||||||
|
.map_err(|_| ApiError::Unauthorized)?;
|
||||||
|
|
||||||
|
// Extract user ID from the JWT token's 'sub' claim
|
||||||
|
let user_id = utils::jwt::extract_subject(&access_token)
|
||||||
|
.map_err(|e| {
|
||||||
|
tracing::error!("Failed to extract user ID from token: {}", e);
|
||||||
|
ApiError::Unauthorized
|
||||||
|
})?
|
||||||
|
.to_string();
|
||||||
|
|
||||||
|
Ok(ResponseJson(ApiResponse::success(CurrentUserResponse {
|
||||||
|
user_id,
|
||||||
|
})))
|
||||||
|
}
|
||||||
|
|
||||||
fn generate_secret() -> String {
|
fn generate_secret() -> String {
|
||||||
rand::thread_rng()
|
rand::thread_rng()
|
||||||
.sample_iter(&Alphanumeric)
|
.sample_iter(&Alphanumeric)
|
||||||
|
|||||||
@@ -20,7 +20,6 @@ use services::services::{
|
|||||||
file_search_cache::{CacheError, SearchMode, SearchQuery},
|
file_search_cache::{CacheError, SearchMode, SearchQuery},
|
||||||
git::GitBranch,
|
git::GitBranch,
|
||||||
remote_client::CreateRemoteProjectPayload,
|
remote_client::CreateRemoteProjectPayload,
|
||||||
share::link_shared_tasks_to_project,
|
|
||||||
};
|
};
|
||||||
use ts_rs::TS;
|
use ts_rs::TS;
|
||||||
use utils::{
|
use utils::{
|
||||||
@@ -65,7 +64,7 @@ pub async fn get_project_branches(
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub async fn link_project_to_existing_remote(
|
pub async fn link_project_to_existing_remote(
|
||||||
Path(project_id): Path<Uuid>,
|
Extension(project): Extension<Project>,
|
||||||
State(deployment): State<DeploymentImpl>,
|
State(deployment): State<DeploymentImpl>,
|
||||||
Json(payload): Json<LinkToExistingRequest>,
|
Json(payload): Json<LinkToExistingRequest>,
|
||||||
) -> Result<ResponseJson<ApiResponse<Project>>, ApiError> {
|
) -> Result<ResponseJson<ApiResponse<Project>>, ApiError> {
|
||||||
@@ -73,14 +72,13 @@ pub async fn link_project_to_existing_remote(
|
|||||||
|
|
||||||
let remote_project = client.get_project(payload.remote_project_id).await?;
|
let remote_project = client.get_project(payload.remote_project_id).await?;
|
||||||
|
|
||||||
let updated_project =
|
let updated_project = apply_remote_project_link(&deployment, project, remote_project).await?;
|
||||||
apply_remote_project_link(&deployment, project_id, remote_project).await?;
|
|
||||||
|
|
||||||
Ok(ResponseJson(ApiResponse::success(updated_project)))
|
Ok(ResponseJson(ApiResponse::success(updated_project)))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn create_and_link_remote_project(
|
pub async fn create_and_link_remote_project(
|
||||||
Path(project_id): Path<Uuid>,
|
Extension(project): Extension<Project>,
|
||||||
State(deployment): State<DeploymentImpl>,
|
State(deployment): State<DeploymentImpl>,
|
||||||
Json(payload): Json<CreateRemoteProjectRequest>,
|
Json(payload): Json<CreateRemoteProjectRequest>,
|
||||||
) -> Result<ResponseJson<ApiResponse<Project>>, ApiError> {
|
) -> Result<ResponseJson<ApiResponse<Project>>, ApiError> {
|
||||||
@@ -101,8 +99,7 @@ pub async fn create_and_link_remote_project(
|
|||||||
})
|
})
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
let updated_project =
|
let updated_project = apply_remote_project_link(&deployment, project, remote_project).await?;
|
||||||
apply_remote_project_link(&deployment, project_id, remote_project).await?;
|
|
||||||
|
|
||||||
Ok(ResponseJson(ApiResponse::success(updated_project)))
|
Ok(ResponseJson(ApiResponse::success(updated_project)))
|
||||||
}
|
}
|
||||||
@@ -167,26 +164,28 @@ pub async fn get_project_remote_members(
|
|||||||
|
|
||||||
async fn apply_remote_project_link(
|
async fn apply_remote_project_link(
|
||||||
deployment: &DeploymentImpl,
|
deployment: &DeploymentImpl,
|
||||||
project_id: Uuid,
|
project: Project,
|
||||||
remote_project: RemoteProject,
|
remote_project: RemoteProject,
|
||||||
) -> Result<Project, ApiError> {
|
) -> Result<Project, ApiError> {
|
||||||
let pool = &deployment.db().pool;
|
let pool = &deployment.db().pool;
|
||||||
|
|
||||||
Project::set_remote_project_id(pool, project_id, Some(remote_project.id)).await?;
|
if project.remote_project_id.is_some() {
|
||||||
|
return Err(ApiError::Conflict(
|
||||||
|
"Project is already linked to a remote project. Unlink it first.".to_string(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
let updated_project = Project::find_by_id(pool, project_id)
|
Project::set_remote_project_id(pool, project.id, Some(remote_project.id)).await?;
|
||||||
|
|
||||||
|
let updated_project = Project::find_by_id(pool, project.id)
|
||||||
.await?
|
.await?
|
||||||
.ok_or(ProjectError::ProjectNotFound)?;
|
.ok_or(ProjectError::ProjectNotFound)?;
|
||||||
|
|
||||||
let current_profile = deployment.auth_context().cached_profile().await;
|
|
||||||
let current_user_id = current_profile.as_ref().map(|p| p.user_id);
|
|
||||||
link_shared_tasks_to_project(pool, current_user_id, project_id, remote_project.id).await?;
|
|
||||||
|
|
||||||
deployment
|
deployment
|
||||||
.track_if_analytics_allowed(
|
.track_if_analytics_allowed(
|
||||||
"project_linked_to_remote",
|
"project_linked_to_remote",
|
||||||
serde_json::json!({
|
serde_json::json!({
|
||||||
"project_id": project_id.to_string(),
|
"project_id": project.id.to_string(),
|
||||||
}),
|
}),
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
|
|||||||
@@ -4,10 +4,11 @@ use axum::{
|
|||||||
response::Json as ResponseJson,
|
response::Json as ResponseJson,
|
||||||
routing::{delete, post},
|
routing::{delete, post},
|
||||||
};
|
};
|
||||||
use db::models::shared_task::SharedTask;
|
use db::models::task::Task;
|
||||||
use deployment::Deployment;
|
use deployment::Deployment;
|
||||||
use serde::{Deserialize, Serialize};
|
use remote::routes::tasks::SharedTaskResponse;
|
||||||
use services::services::share::ShareError;
|
use serde::Deserialize;
|
||||||
|
use services::services::share::{ShareError, SharedTaskDetails};
|
||||||
use ts_rs::TS;
|
use ts_rs::TS;
|
||||||
use utils::response::ApiResponse;
|
use utils::response::ApiResponse;
|
||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
@@ -18,13 +19,6 @@ use crate::{DeploymentImpl, error::ApiError};
|
|||||||
#[ts(export)]
|
#[ts(export)]
|
||||||
pub struct AssignSharedTaskRequest {
|
pub struct AssignSharedTaskRequest {
|
||||||
pub new_assignee_user_id: Option<String>,
|
pub new_assignee_user_id: Option<String>,
|
||||||
pub version: Option<i64>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, TS)]
|
|
||||||
#[ts(export)]
|
|
||||||
pub struct AssignSharedTaskResponse {
|
|
||||||
pub shared_task: SharedTask,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn router() -> Router<DeploymentImpl> {
|
pub fn router() -> Router<DeploymentImpl> {
|
||||||
@@ -34,27 +28,23 @@ pub fn router() -> Router<DeploymentImpl> {
|
|||||||
post(assign_shared_task),
|
post(assign_shared_task),
|
||||||
)
|
)
|
||||||
.route("/shared-tasks/{shared_task_id}", delete(delete_shared_task))
|
.route("/shared-tasks/{shared_task_id}", delete(delete_shared_task))
|
||||||
|
.route(
|
||||||
|
"/shared-tasks/link-to-local",
|
||||||
|
post(link_shared_task_to_local),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn assign_shared_task(
|
pub async fn assign_shared_task(
|
||||||
Path(shared_task_id): Path<Uuid>,
|
Path(shared_task_id): Path<Uuid>,
|
||||||
State(deployment): State<DeploymentImpl>,
|
State(deployment): State<DeploymentImpl>,
|
||||||
Json(payload): Json<AssignSharedTaskRequest>,
|
Json(payload): Json<AssignSharedTaskRequest>,
|
||||||
) -> Result<ResponseJson<ApiResponse<AssignSharedTaskResponse>>, ApiError> {
|
) -> Result<ResponseJson<ApiResponse<SharedTaskResponse>>, ApiError> {
|
||||||
let Ok(publisher) = deployment.share_publisher() else {
|
let Ok(publisher) = deployment.share_publisher() else {
|
||||||
return Err(ShareError::MissingConfig("share publisher unavailable").into());
|
return Err(ShareError::MissingConfig("share publisher unavailable").into());
|
||||||
};
|
};
|
||||||
|
|
||||||
let shared_task = SharedTask::find_by_id(&deployment.db().pool, shared_task_id)
|
|
||||||
.await?
|
|
||||||
.ok_or_else(|| ApiError::Conflict("shared task not found".into()))?;
|
|
||||||
|
|
||||||
let updated_shared_task = publisher
|
let updated_shared_task = publisher
|
||||||
.assign_shared_task(
|
.assign_shared_task(shared_task_id, payload.new_assignee_user_id.clone())
|
||||||
&shared_task,
|
|
||||||
payload.new_assignee_user_id.clone(),
|
|
||||||
payload.version,
|
|
||||||
)
|
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
let props = serde_json::json!({
|
let props = serde_json::json!({
|
||||||
@@ -65,11 +55,7 @@ pub async fn assign_shared_task(
|
|||||||
.track_if_analytics_allowed("reassign_shared_task", props)
|
.track_if_analytics_allowed("reassign_shared_task", props)
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
Ok(ResponseJson(ApiResponse::success(
|
Ok(ResponseJson(ApiResponse::success(updated_shared_task)))
|
||||||
AssignSharedTaskResponse {
|
|
||||||
shared_task: updated_shared_task,
|
|
||||||
},
|
|
||||||
)))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn delete_shared_task(
|
pub async fn delete_shared_task(
|
||||||
@@ -91,3 +77,27 @@ pub async fn delete_shared_task(
|
|||||||
|
|
||||||
Ok(ResponseJson(ApiResponse::success(())))
|
Ok(ResponseJson(ApiResponse::success(())))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn link_shared_task_to_local(
|
||||||
|
State(deployment): State<DeploymentImpl>,
|
||||||
|
Json(shared_task_details): Json<SharedTaskDetails>,
|
||||||
|
) -> Result<ResponseJson<ApiResponse<Option<Task>>>, ApiError> {
|
||||||
|
let Ok(publisher) = deployment.share_publisher() else {
|
||||||
|
return Err(ShareError::MissingConfig("share publisher unavailable").into());
|
||||||
|
};
|
||||||
|
|
||||||
|
let task = publisher.link_shared_task(shared_task_details).await?;
|
||||||
|
|
||||||
|
if let Some(ref task) = task {
|
||||||
|
let props = serde_json::json!({
|
||||||
|
"shared_task_id": task.shared_task_id,
|
||||||
|
"task_id": task.id,
|
||||||
|
"project_id": task.project_id,
|
||||||
|
});
|
||||||
|
deployment
|
||||||
|
.track_if_analytics_allowed("link_shared_task_to_local", props)
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(ResponseJson(ApiResponse::success(task)))
|
||||||
|
}
|
||||||
|
|||||||
@@ -28,7 +28,7 @@ use services::services::{
|
|||||||
};
|
};
|
||||||
use sqlx::Error as SqlxError;
|
use sqlx::Error as SqlxError;
|
||||||
use ts_rs::TS;
|
use ts_rs::TS;
|
||||||
use utils::response::ApiResponse;
|
use utils::{api::oauth::LoginStatus, response::ApiResponse};
|
||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
|
|
||||||
use crate::{DeploymentImpl, error::ApiError, middleware::load_task_middleware};
|
use crate::{DeploymentImpl, error::ApiError, middleware::load_task_middleware};
|
||||||
@@ -220,6 +220,8 @@ pub async fn update_task(
|
|||||||
|
|
||||||
Json(payload): Json<UpdateTask>,
|
Json(payload): Json<UpdateTask>,
|
||||||
) -> Result<ResponseJson<ApiResponse<Task>>, ApiError> {
|
) -> Result<ResponseJson<ApiResponse<Task>>, ApiError> {
|
||||||
|
ensure_shared_task_auth(&existing_task, &deployment).await?;
|
||||||
|
|
||||||
// Use existing values if not provided in update
|
// Use existing values if not provided in update
|
||||||
let title = payload.title.unwrap_or(existing_task.title);
|
let title = payload.title.unwrap_or(existing_task.title);
|
||||||
let description = match payload.description {
|
let description = match payload.description {
|
||||||
@@ -259,10 +261,27 @@ pub async fn update_task(
|
|||||||
Ok(ResponseJson(ApiResponse::success(task)))
|
Ok(ResponseJson(ApiResponse::success(task)))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn ensure_shared_task_auth(
|
||||||
|
existing_task: &Task,
|
||||||
|
deployment: &local_deployment::LocalDeployment,
|
||||||
|
) -> Result<(), ApiError> {
|
||||||
|
if existing_task.shared_task_id.is_some() {
|
||||||
|
match deployment.get_login_status().await {
|
||||||
|
LoginStatus::LoggedIn { .. } => return Ok(()),
|
||||||
|
LoginStatus::LoggedOut => {
|
||||||
|
return Err(ShareError::MissingAuth.into());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
pub async fn delete_task(
|
pub async fn delete_task(
|
||||||
Extension(task): Extension<Task>,
|
Extension(task): Extension<Task>,
|
||||||
State(deployment): State<DeploymentImpl>,
|
State(deployment): State<DeploymentImpl>,
|
||||||
) -> Result<(StatusCode, ResponseJson<ApiResponse<()>>), ApiError> {
|
) -> Result<(StatusCode, ResponseJson<ApiResponse<()>>), ApiError> {
|
||||||
|
ensure_shared_task_auth(&task, &deployment).await?;
|
||||||
|
|
||||||
// Validate no running execution processes
|
// Validate no running execution processes
|
||||||
if deployment
|
if deployment
|
||||||
.container()
|
.container()
|
||||||
|
|||||||
@@ -3,8 +3,8 @@ use std::{str::FromStr, sync::Arc};
|
|||||||
use db::{
|
use db::{
|
||||||
DBService,
|
DBService,
|
||||||
models::{
|
models::{
|
||||||
execution_process::ExecutionProcess, scratch::Scratch,
|
execution_process::ExecutionProcess, scratch::Scratch, task::Task,
|
||||||
shared_task::SharedTask as SharedDbTask, task::Task, task_attempt::TaskAttempt,
|
task_attempt::TaskAttempt,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
use serde_json::json;
|
use serde_json::json;
|
||||||
@@ -20,9 +20,7 @@ mod streams;
|
|||||||
#[path = "events/types.rs"]
|
#[path = "events/types.rs"]
|
||||||
pub mod types;
|
pub mod types;
|
||||||
|
|
||||||
pub use patches::{
|
pub use patches::{execution_process_patch, scratch_patch, task_attempt_patch, task_patch};
|
||||||
execution_process_patch, scratch_patch, shared_task_patch, task_attempt_patch, task_patch,
|
|
||||||
};
|
|
||||||
pub use types::{EventError, EventPatch, EventPatchInner, HookTables, RecordTypes};
|
pub use types::{EventError, EventPatch, EventPatchInner, HookTables, RecordTypes};
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
@@ -125,14 +123,6 @@ impl EventService {
|
|||||||
msg_store_for_preupdate.push_patch(patch);
|
msg_store_for_preupdate.push_patch(patch);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
"shared_tasks" => {
|
|
||||||
if let Ok(value) = preupdate.get_old_column_value(0)
|
|
||||||
&& let Ok(task_id) = <Uuid as Decode<Sqlite>>::decode(value)
|
|
||||||
{
|
|
||||||
let patch = shared_task_patch::remove(task_id);
|
|
||||||
msg_store_for_preupdate.push_patch(patch);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
"scratch" => {
|
"scratch" => {
|
||||||
// Composite key: need both id (column 0) and scratch_type (column 1)
|
// Composite key: need both id (column 0) and scratch_type (column 1)
|
||||||
if let Ok(id_val) = preupdate.get_old_column_value(0)
|
if let Ok(id_val) = preupdate.get_old_column_value(0)
|
||||||
@@ -163,27 +153,10 @@ impl EventService {
|
|||||||
(HookTables::Tasks, SqliteOperation::Delete)
|
(HookTables::Tasks, SqliteOperation::Delete)
|
||||||
| (HookTables::TaskAttempts, SqliteOperation::Delete)
|
| (HookTables::TaskAttempts, SqliteOperation::Delete)
|
||||||
| (HookTables::ExecutionProcesses, SqliteOperation::Delete)
|
| (HookTables::ExecutionProcesses, SqliteOperation::Delete)
|
||||||
| (HookTables::SharedTasks, SqliteOperation::Delete)
|
|
||||||
| (HookTables::Scratch, SqliteOperation::Delete) => {
|
| (HookTables::Scratch, SqliteOperation::Delete) => {
|
||||||
// Deletions handled in preupdate hook for reliable data capture
|
// Deletions handled in preupdate hook for reliable data capture
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
(HookTables::SharedTasks, _) => {
|
|
||||||
match SharedDbTask::find_by_rowid(&db.pool, rowid).await {
|
|
||||||
Ok(Some(task)) => RecordTypes::SharedTask(task),
|
|
||||||
Ok(None) => RecordTypes::DeletedSharedTask {
|
|
||||||
rowid,
|
|
||||||
task_id: None,
|
|
||||||
},
|
|
||||||
Err(e) => {
|
|
||||||
tracing::error!(
|
|
||||||
"Failed to fetch shared_task: {:?}",
|
|
||||||
e
|
|
||||||
);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
(HookTables::Tasks, _) => {
|
(HookTables::Tasks, _) => {
|
||||||
match Task::find_by_rowid(&db.pool, rowid).await {
|
match Task::find_by_rowid(&db.pool, rowid).await {
|
||||||
Ok(Some(task)) => RecordTypes::Task(task),
|
Ok(Some(task)) => RecordTypes::Task(task),
|
||||||
@@ -280,15 +253,6 @@ impl EventService {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
RecordTypes::SharedTask(task) => {
|
|
||||||
let patch = match hook.operation {
|
|
||||||
SqliteOperation::Insert => shared_task_patch::add(task),
|
|
||||||
SqliteOperation::Update => shared_task_patch::replace(task),
|
|
||||||
_ => shared_task_patch::replace(task),
|
|
||||||
};
|
|
||||||
msg_store_for_hook.push_patch(patch);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
RecordTypes::DeletedTask {
|
RecordTypes::DeletedTask {
|
||||||
task_id: Some(task_id),
|
task_id: Some(task_id),
|
||||||
..
|
..
|
||||||
@@ -297,14 +261,6 @@ impl EventService {
|
|||||||
msg_store_for_hook.push_patch(patch);
|
msg_store_for_hook.push_patch(patch);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
RecordTypes::DeletedSharedTask {
|
|
||||||
task_id: Some(task_id),
|
|
||||||
..
|
|
||||||
} => {
|
|
||||||
let patch = shared_task_patch::remove(*task_id);
|
|
||||||
msg_store_for_hook.push_patch(patch);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
RecordTypes::Scratch(scratch) => {
|
RecordTypes::Scratch(scratch) => {
|
||||||
let patch = match hook.operation {
|
let patch = match hook.operation {
|
||||||
SqliteOperation::Insert => scratch_patch::add(scratch),
|
SqliteOperation::Insert => scratch_patch::add(scratch),
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
use db::models::{
|
use db::models::{
|
||||||
execution_process::ExecutionProcess, scratch::Scratch, shared_task::SharedTask as DbSharedTask,
|
execution_process::ExecutionProcess, scratch::Scratch, task::TaskWithAttemptStatus,
|
||||||
task::TaskWithAttemptStatus, task_attempt::TaskAttempt,
|
task_attempt::TaskAttempt,
|
||||||
};
|
};
|
||||||
use json_patch::{AddOperation, Patch, PatchOperation, RemoveOperation, ReplaceOperation};
|
use json_patch::{AddOperation, Patch, PatchOperation, RemoveOperation, ReplaceOperation};
|
||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
@@ -48,44 +48,6 @@ pub mod task_patch {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Helper functions for creating shared task-specific patches
|
|
||||||
pub mod shared_task_patch {
|
|
||||||
use super::*;
|
|
||||||
|
|
||||||
fn shared_task_path(task_id: Uuid) -> String {
|
|
||||||
format!(
|
|
||||||
"/shared_tasks/{}",
|
|
||||||
escape_pointer_segment(&task_id.to_string())
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn add(task: &DbSharedTask) -> Patch {
|
|
||||||
Patch(vec![PatchOperation::Add(AddOperation {
|
|
||||||
path: shared_task_path(task.id)
|
|
||||||
.try_into()
|
|
||||||
.expect("Shared task path should be valid"),
|
|
||||||
value: serde_json::to_value(task).expect("Shared task serialization should not fail"),
|
|
||||||
})])
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn replace(task: &DbSharedTask) -> Patch {
|
|
||||||
Patch(vec![PatchOperation::Replace(ReplaceOperation {
|
|
||||||
path: shared_task_path(task.id)
|
|
||||||
.try_into()
|
|
||||||
.expect("Shared task path should be valid"),
|
|
||||||
value: serde_json::to_value(task).expect("Shared task serialization should not fail"),
|
|
||||||
})])
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn remove(task_id: Uuid) -> Patch {
|
|
||||||
Patch(vec![PatchOperation::Remove(RemoveOperation {
|
|
||||||
path: shared_task_path(task_id)
|
|
||||||
.try_into()
|
|
||||||
.expect("Shared task path should be valid"),
|
|
||||||
})])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Helper functions for creating execution process-specific patches
|
/// Helper functions for creating execution process-specific patches
|
||||||
pub mod execution_process_patch {
|
pub mod execution_process_patch {
|
||||||
use super::*;
|
use super::*;
|
||||||
|
|||||||
@@ -1,8 +1,6 @@
|
|||||||
use db::models::{
|
use db::models::{
|
||||||
execution_process::ExecutionProcess,
|
execution_process::ExecutionProcess,
|
||||||
project::Project,
|
|
||||||
scratch::Scratch,
|
scratch::Scratch,
|
||||||
shared_task::SharedTask,
|
|
||||||
task::{Task, TaskWithAttemptStatus},
|
task::{Task, TaskWithAttemptStatus},
|
||||||
};
|
};
|
||||||
use futures::StreamExt;
|
use futures::StreamExt;
|
||||||
@@ -33,37 +31,17 @@ impl EventService {
|
|||||||
.map(|task| (task.id.to_string(), serde_json::to_value(task).unwrap()))
|
.map(|task| (task.id.to_string(), serde_json::to_value(task).unwrap()))
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
let remote_project_id = Project::find_by_id(&self.db.pool, project_id)
|
|
||||||
.await?
|
|
||||||
.and_then(|project| project.remote_project_id);
|
|
||||||
|
|
||||||
let shared_tasks = if let Some(remote_project_id) = remote_project_id {
|
|
||||||
SharedTask::list_by_remote_project_id(&self.db.pool, remote_project_id).await?
|
|
||||||
} else {
|
|
||||||
Vec::new()
|
|
||||||
};
|
|
||||||
let shared_tasks_map: serde_json::Map<String, serde_json::Value> = shared_tasks
|
|
||||||
.into_iter()
|
|
||||||
.map(|task| (task.id.to_string(), serde_json::to_value(task).unwrap()))
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
let initial_patch = json!([
|
let initial_patch = json!([
|
||||||
{
|
{
|
||||||
"op": "replace",
|
"op": "replace",
|
||||||
"path": "/tasks",
|
"path": "/tasks",
|
||||||
"value": tasks_map
|
"value": tasks_map
|
||||||
},
|
|
||||||
{
|
|
||||||
"op": "replace",
|
|
||||||
"path": "/shared_tasks",
|
|
||||||
"value": shared_tasks_map
|
|
||||||
}
|
}
|
||||||
]);
|
]);
|
||||||
let initial_msg = LogMsg::JsonPatch(serde_json::from_value(initial_patch).unwrap());
|
let initial_msg = LogMsg::JsonPatch(serde_json::from_value(initial_patch).unwrap());
|
||||||
|
|
||||||
// Clone necessary data for the async filter
|
// Clone necessary data for the async filter
|
||||||
let db_pool = self.db.pool.clone();
|
let db_pool = self.db.pool.clone();
|
||||||
let remote_project_id_filter = remote_project_id;
|
|
||||||
|
|
||||||
// Get filtered event stream
|
// Get filtered event stream
|
||||||
let filtered_stream =
|
let filtered_stream =
|
||||||
@@ -74,44 +52,6 @@ impl EventService {
|
|||||||
Ok(LogMsg::JsonPatch(patch)) => {
|
Ok(LogMsg::JsonPatch(patch)) => {
|
||||||
// Filter events based on project_id
|
// Filter events based on project_id
|
||||||
if let Some(patch_op) = patch.0.first() {
|
if let Some(patch_op) = patch.0.first() {
|
||||||
if patch_op.path().starts_with("/shared_tasks/") {
|
|
||||||
match patch_op {
|
|
||||||
json_patch::PatchOperation::Add(op) => {
|
|
||||||
if let Ok(shared_task) =
|
|
||||||
serde_json::from_value::<SharedTask>(
|
|
||||||
op.value.clone(),
|
|
||||||
)
|
|
||||||
&& remote_project_id_filter
|
|
||||||
.map(|expected| {
|
|
||||||
shared_task.remote_project_id == expected
|
|
||||||
})
|
|
||||||
.unwrap_or(false)
|
|
||||||
{
|
|
||||||
return Some(Ok(LogMsg::JsonPatch(patch)));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
json_patch::PatchOperation::Replace(op) => {
|
|
||||||
if let Ok(shared_task) =
|
|
||||||
serde_json::from_value::<SharedTask>(
|
|
||||||
op.value.clone(),
|
|
||||||
)
|
|
||||||
&& remote_project_id_filter
|
|
||||||
.map(|expected| {
|
|
||||||
shared_task.remote_project_id == expected
|
|
||||||
})
|
|
||||||
.unwrap_or(false)
|
|
||||||
{
|
|
||||||
return Some(Ok(LogMsg::JsonPatch(patch)));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
json_patch::PatchOperation::Remove(_) => {
|
|
||||||
// Forward removals; clients will ignore missing tasks
|
|
||||||
return Some(Ok(LogMsg::JsonPatch(patch)));
|
|
||||||
}
|
|
||||||
_ => {}
|
|
||||||
}
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
// Check if this is a direct task patch (new format)
|
// Check if this is a direct task patch (new format)
|
||||||
if patch_op.path().starts_with("/tasks/") {
|
if patch_op.path().starts_with("/tasks/") {
|
||||||
match patch_op {
|
match patch_op {
|
||||||
@@ -165,19 +105,6 @@ impl EventService {
|
|||||||
return Some(Ok(LogMsg::JsonPatch(patch)));
|
return Some(Ok(LogMsg::JsonPatch(patch)));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
RecordTypes::SharedTask(shared_task) => {
|
|
||||||
if remote_project_id_filter
|
|
||||||
.map(|expected| {
|
|
||||||
shared_task.remote_project_id == expected
|
|
||||||
})
|
|
||||||
.unwrap_or(false)
|
|
||||||
{
|
|
||||||
return Some(Ok(LogMsg::JsonPatch(patch)));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
RecordTypes::DeletedSharedTask { .. } => {
|
|
||||||
return Some(Ok(LogMsg::JsonPatch(patch)));
|
|
||||||
}
|
|
||||||
RecordTypes::TaskAttempt(attempt) => {
|
RecordTypes::TaskAttempt(attempt) => {
|
||||||
// Check if this task_attempt belongs to a task in our project
|
// Check if this task_attempt belongs to a task in our project
|
||||||
if let Ok(Some(task)) =
|
if let Ok(Some(task)) =
|
||||||
|
|||||||
@@ -1,7 +1,6 @@
|
|||||||
use anyhow::Error as AnyhowError;
|
use anyhow::Error as AnyhowError;
|
||||||
use db::models::{
|
use db::models::{
|
||||||
execution_process::ExecutionProcess, scratch::Scratch, shared_task::SharedTask, task::Task,
|
execution_process::ExecutionProcess, scratch::Scratch, task::Task, task_attempt::TaskAttempt,
|
||||||
task_attempt::TaskAttempt,
|
|
||||||
};
|
};
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use sqlx::Error as SqlxError;
|
use sqlx::Error as SqlxError;
|
||||||
@@ -28,8 +27,6 @@ pub enum HookTables {
|
|||||||
TaskAttempts,
|
TaskAttempts,
|
||||||
#[strum(to_string = "execution_processes")]
|
#[strum(to_string = "execution_processes")]
|
||||||
ExecutionProcesses,
|
ExecutionProcesses,
|
||||||
#[strum(to_string = "shared_tasks")]
|
|
||||||
SharedTasks,
|
|
||||||
#[strum(to_string = "scratch")]
|
#[strum(to_string = "scratch")]
|
||||||
Scratch,
|
Scratch,
|
||||||
}
|
}
|
||||||
@@ -40,7 +37,6 @@ pub enum RecordTypes {
|
|||||||
Task(Task),
|
Task(Task),
|
||||||
TaskAttempt(TaskAttempt),
|
TaskAttempt(TaskAttempt),
|
||||||
ExecutionProcess(ExecutionProcess),
|
ExecutionProcess(ExecutionProcess),
|
||||||
SharedTask(SharedTask),
|
|
||||||
Scratch(Scratch),
|
Scratch(Scratch),
|
||||||
DeletedTask {
|
DeletedTask {
|
||||||
rowid: i64,
|
rowid: i64,
|
||||||
@@ -56,10 +52,6 @@ pub enum RecordTypes {
|
|||||||
task_attempt_id: Option<Uuid>,
|
task_attempt_id: Option<Uuid>,
|
||||||
process_id: Option<Uuid>,
|
process_id: Option<Uuid>,
|
||||||
},
|
},
|
||||||
DeletedSharedTask {
|
|
||||||
rowid: i64,
|
|
||||||
task_id: Option<Uuid>,
|
|
||||||
},
|
|
||||||
DeletedScratch {
|
DeletedScratch {
|
||||||
rowid: i64,
|
rowid: i64,
|
||||||
scratch_id: Option<Uuid>,
|
scratch_id: Option<Uuid>,
|
||||||
|
|||||||
@@ -4,12 +4,9 @@ use std::time::Duration;
|
|||||||
|
|
||||||
use backon::{ExponentialBuilder, Retryable};
|
use backon::{ExponentialBuilder, Retryable};
|
||||||
use chrono::Duration as ChronoDuration;
|
use chrono::Duration as ChronoDuration;
|
||||||
use remote::{
|
use remote::routes::tasks::{
|
||||||
activity::ActivityResponse,
|
AssignSharedTaskRequest, CheckTasksRequest, CreateSharedTaskRequest, SharedTaskResponse,
|
||||||
routes::tasks::{
|
UpdateSharedTaskRequest,
|
||||||
AssignSharedTaskRequest, BulkSharedTasksResponse, CreateSharedTaskRequest,
|
|
||||||
DeleteSharedTaskRequest, SharedTaskResponse, UpdateSharedTaskRequest,
|
|
||||||
},
|
|
||||||
};
|
};
|
||||||
use reqwest::{Client, StatusCode};
|
use reqwest::{Client, StatusCode};
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
@@ -580,14 +577,13 @@ impl RemoteClient {
|
|||||||
pub async fn delete_shared_task(
|
pub async fn delete_shared_task(
|
||||||
&self,
|
&self,
|
||||||
task_id: Uuid,
|
task_id: Uuid,
|
||||||
request: &DeleteSharedTaskRequest,
|
|
||||||
) -> Result<SharedTaskResponse, RemoteClientError> {
|
) -> Result<SharedTaskResponse, RemoteClientError> {
|
||||||
let res = self
|
let res = self
|
||||||
.send(
|
.send(
|
||||||
reqwest::Method::DELETE,
|
reqwest::Method::DELETE,
|
||||||
&format!("/v1/tasks/{task_id}"),
|
&format!("/v1/tasks/{task_id}"),
|
||||||
true,
|
true,
|
||||||
Some(request),
|
None::<&()>,
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
res.json::<SharedTaskResponse>()
|
res.json::<SharedTaskResponse>()
|
||||||
@@ -595,27 +591,10 @@ impl RemoteClient {
|
|||||||
.map_err(|e| RemoteClientError::Serde(e.to_string()))
|
.map_err(|e| RemoteClientError::Serde(e.to_string()))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Fetches activity events for a project.
|
/// Checks if shared tasks exist.
|
||||||
pub async fn fetch_activity(
|
pub async fn check_tasks(&self, task_ids: Vec<Uuid>) -> Result<Vec<Uuid>, RemoteClientError> {
|
||||||
&self,
|
let request = CheckTasksRequest { task_ids };
|
||||||
project_id: Uuid,
|
self.post_authed("/v1/tasks/check", Some(&request)).await
|
||||||
after: Option<i64>,
|
|
||||||
limit: u32,
|
|
||||||
) -> Result<ActivityResponse, RemoteClientError> {
|
|
||||||
let mut path = format!("/v1/activity?project_id={project_id}&limit={limit}");
|
|
||||||
if let Some(seq) = after {
|
|
||||||
path.push_str(&format!("&after={seq}"));
|
|
||||||
}
|
|
||||||
self.get_authed(&path).await
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Fetches bulk snapshot of shared tasks for a project.
|
|
||||||
pub async fn fetch_bulk_snapshot(
|
|
||||||
&self,
|
|
||||||
project_id: Uuid,
|
|
||||||
) -> Result<BulkSharedTasksResponse, RemoteClientError> {
|
|
||||||
self.get_authed(&format!("/v1/tasks/bulk?project_id={project_id}"))
|
|
||||||
.await
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,51 +1,15 @@
|
|||||||
mod config;
|
mod config;
|
||||||
mod processor;
|
|
||||||
mod publisher;
|
mod publisher;
|
||||||
mod status;
|
mod status;
|
||||||
|
|
||||||
use std::{
|
|
||||||
collections::{HashMap, HashSet},
|
|
||||||
io,
|
|
||||||
sync::{Arc, Mutex as StdMutex},
|
|
||||||
time::Duration,
|
|
||||||
};
|
|
||||||
|
|
||||||
use async_trait::async_trait;
|
|
||||||
use axum::http::{HeaderName, HeaderValue, header::AUTHORIZATION};
|
|
||||||
pub use config::ShareConfig;
|
pub use config::ShareConfig;
|
||||||
use db::{
|
pub use publisher::{SharePublisher, SharedTaskDetails};
|
||||||
DBService,
|
|
||||||
models::{
|
|
||||||
shared_task::{SharedActivityCursor, SharedTask, SharedTaskInput},
|
|
||||||
task::{SyncTask, Task},
|
|
||||||
},
|
|
||||||
};
|
|
||||||
use processor::ActivityProcessor;
|
|
||||||
pub use publisher::SharePublisher;
|
|
||||||
use remote::{
|
|
||||||
ClientMessage, ServerMessage,
|
|
||||||
db::{tasks::SharedTask as RemoteSharedTask, users::UserData as RemoteUserData},
|
|
||||||
};
|
|
||||||
use sqlx::{Executor, Sqlite, SqlitePool};
|
|
||||||
use thiserror::Error;
|
use thiserror::Error;
|
||||||
use tokio::{
|
|
||||||
sync::{mpsc, oneshot},
|
|
||||||
task::JoinHandle,
|
|
||||||
time::{MissedTickBehavior, interval, sleep},
|
|
||||||
};
|
|
||||||
use tokio_tungstenite::tungstenite::Message as WsMessage;
|
|
||||||
use url::Url;
|
|
||||||
use utils::ws::{
|
|
||||||
WS_AUTH_REFRESH_INTERVAL, WsClient, WsConfig, WsError, WsHandler, WsResult, run_ws_client,
|
|
||||||
};
|
|
||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
RemoteClientError,
|
RemoteClientError,
|
||||||
services::{
|
services::{git::GitServiceError, github::GitHubServiceError},
|
||||||
auth::AuthContext, git::GitServiceError, github::GitHubServiceError,
|
|
||||||
remote_client::RemoteClient,
|
|
||||||
},
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#[derive(Debug, Error)]
|
#[derive(Debug, Error)]
|
||||||
@@ -58,8 +22,6 @@ pub enum ShareError {
|
|||||||
Serialization(#[from] serde_json::Error),
|
Serialization(#[from] serde_json::Error),
|
||||||
#[error(transparent)]
|
#[error(transparent)]
|
||||||
Url(#[from] url::ParseError),
|
Url(#[from] url::ParseError),
|
||||||
#[error(transparent)]
|
|
||||||
WebSocket(#[from] WsError),
|
|
||||||
#[error("share configuration missing: {0}")]
|
#[error("share configuration missing: {0}")]
|
||||||
MissingConfig(&'static str),
|
MissingConfig(&'static str),
|
||||||
#[error("task {0} not found")]
|
#[error("task {0} not found")]
|
||||||
@@ -87,620 +49,3 @@ pub enum ShareError {
|
|||||||
#[error(transparent)]
|
#[error(transparent)]
|
||||||
RemoteClientError(#[from] RemoteClientError),
|
RemoteClientError(#[from] RemoteClientError),
|
||||||
}
|
}
|
||||||
|
|
||||||
const WS_BACKOFF_BASE_DELAY: Duration = Duration::from_secs(1);
|
|
||||||
const WS_BACKOFF_MAX_DELAY: Duration = Duration::from_secs(30);
|
|
||||||
|
|
||||||
struct Backoff {
|
|
||||||
current: Duration,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Backoff {
|
|
||||||
fn new() -> Self {
|
|
||||||
Self {
|
|
||||||
current: WS_BACKOFF_BASE_DELAY,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn reset(&mut self) {
|
|
||||||
self.current = WS_BACKOFF_BASE_DELAY;
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn wait(&mut self) {
|
|
||||||
let wait = self.current;
|
|
||||||
sleep(wait).await;
|
|
||||||
let doubled = wait.checked_mul(2).unwrap_or(WS_BACKOFF_MAX_DELAY);
|
|
||||||
self.current = std::cmp::min(doubled, WS_BACKOFF_MAX_DELAY);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
struct ProjectWatcher {
|
|
||||||
shutdown: oneshot::Sender<()>,
|
|
||||||
join: JoinHandle<()>,
|
|
||||||
}
|
|
||||||
|
|
||||||
struct ProjectWatcherEvent {
|
|
||||||
project_id: Uuid,
|
|
||||||
result: Result<(), ShareError>,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct RemoteSync {
|
|
||||||
db: DBService,
|
|
||||||
processor: ActivityProcessor,
|
|
||||||
config: ShareConfig,
|
|
||||||
auth_ctx: AuthContext,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl RemoteSync {
|
|
||||||
pub fn spawn(db: DBService, config: ShareConfig, auth_ctx: AuthContext) -> RemoteSyncHandle {
|
|
||||||
tracing::info!(api = %config.api_base, "starting shared task synchronizer");
|
|
||||||
let remote_client = RemoteClient::new(config.api_base.as_str(), auth_ctx.clone())
|
|
||||||
.expect("failed to create remote client");
|
|
||||||
let processor =
|
|
||||||
ActivityProcessor::new(db.clone(), config.clone(), remote_client, auth_ctx.clone());
|
|
||||||
let sync = Self {
|
|
||||||
db,
|
|
||||||
processor,
|
|
||||||
config,
|
|
||||||
auth_ctx,
|
|
||||||
};
|
|
||||||
let (shutdown_tx, shutdown_rx) = oneshot::channel();
|
|
||||||
let join = tokio::spawn(async move {
|
|
||||||
if let Err(e) = sync.run(shutdown_rx).await {
|
|
||||||
tracing::error!(?e, "remote sync terminated unexpectedly");
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
RemoteSyncHandle::new(shutdown_tx, join)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn run(self, mut shutdown_rx: oneshot::Receiver<()>) -> Result<(), ShareError> {
|
|
||||||
let mut watchers: HashMap<Uuid, ProjectWatcher> = HashMap::new();
|
|
||||||
let (event_tx, mut event_rx) = mpsc::unbounded_channel();
|
|
||||||
let mut refresh_interval = interval(Duration::from_secs(5));
|
|
||||||
refresh_interval.set_missed_tick_behavior(MissedTickBehavior::Skip);
|
|
||||||
|
|
||||||
self.reconcile_watchers(&mut watchers, &event_tx).await?;
|
|
||||||
|
|
||||||
loop {
|
|
||||||
tokio::select! {
|
|
||||||
_ = &mut shutdown_rx => {
|
|
||||||
tracing::info!("remote sync shutdown requested");
|
|
||||||
for (project_id, watcher) in watchers.drain() {
|
|
||||||
tracing::info!(%project_id, "stopping watcher due to shutdown");
|
|
||||||
let _ = watcher.shutdown.send(());
|
|
||||||
tokio::spawn(async move {
|
|
||||||
if let Err(err) = watcher.join.await {
|
|
||||||
tracing::debug!(?err, %project_id, "project watcher join failed during shutdown");
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
Some(event) = event_rx.recv() => {
|
|
||||||
match event.result {
|
|
||||||
Ok(()) => {
|
|
||||||
tracing::debug!(project_id = %event.project_id, "project watcher exited cleanly");
|
|
||||||
}
|
|
||||||
Err(err) => {
|
|
||||||
tracing::warn!(project_id = %event.project_id, ?err, "project watcher terminated with error");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
watchers.remove(&event.project_id);
|
|
||||||
}
|
|
||||||
_ = refresh_interval.tick() => {
|
|
||||||
self.reconcile_watchers(&mut watchers, &event_tx).await?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn reconcile_watchers(
|
|
||||||
&self,
|
|
||||||
watchers: &mut HashMap<Uuid, ProjectWatcher>,
|
|
||||||
events_tx: &mpsc::UnboundedSender<ProjectWatcherEvent>,
|
|
||||||
) -> Result<(), ShareError> {
|
|
||||||
let linked_projects = self.linked_remote_projects().await?;
|
|
||||||
let desired: HashSet<Uuid> = linked_projects.iter().copied().collect();
|
|
||||||
|
|
||||||
for project_id in linked_projects {
|
|
||||||
if let std::collections::hash_map::Entry::Vacant(e) = watchers.entry(project_id) {
|
|
||||||
tracing::info!(%project_id, "starting watcher for linked remote project");
|
|
||||||
let watcher = self
|
|
||||||
.spawn_project_watcher(project_id, events_tx.clone())
|
|
||||||
.await?;
|
|
||||||
e.insert(watcher);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let to_remove: Vec<Uuid> = watchers
|
|
||||||
.keys()
|
|
||||||
.copied()
|
|
||||||
.filter(|id| !desired.contains(id))
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
for project_id in to_remove {
|
|
||||||
if let Some(watcher) = watchers.remove(&project_id) {
|
|
||||||
tracing::info!(%project_id, "remote project unlinked; shutting down watcher");
|
|
||||||
let _ = watcher.shutdown.send(());
|
|
||||||
tokio::spawn(async move {
|
|
||||||
if let Err(err) = watcher.join.await {
|
|
||||||
tracing::debug!(?err, %project_id, "project watcher join failed during teardown");
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn linked_remote_projects(&self) -> Result<Vec<Uuid>, ShareError> {
|
|
||||||
let rows = sqlx::query_scalar::<_, Uuid>(
|
|
||||||
r#"
|
|
||||||
SELECT remote_project_id
|
|
||||||
FROM projects
|
|
||||||
WHERE remote_project_id IS NOT NULL
|
|
||||||
"#,
|
|
||||||
)
|
|
||||||
.fetch_all(&self.db.pool)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
Ok(rows)
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn spawn_project_watcher(
|
|
||||||
&self,
|
|
||||||
project_id: Uuid,
|
|
||||||
events_tx: mpsc::UnboundedSender<ProjectWatcherEvent>,
|
|
||||||
) -> Result<ProjectWatcher, ShareError> {
|
|
||||||
let processor = self.processor.clone();
|
|
||||||
let config = self.config.clone();
|
|
||||||
let auth_ctx = self.auth_ctx.clone();
|
|
||||||
let remote_client = processor.remote_client();
|
|
||||||
let db = self.db.clone();
|
|
||||||
let (shutdown_tx, shutdown_rx) = oneshot::channel();
|
|
||||||
|
|
||||||
let join = tokio::spawn(async move {
|
|
||||||
let result = project_watcher_task(
|
|
||||||
db,
|
|
||||||
processor,
|
|
||||||
config,
|
|
||||||
auth_ctx,
|
|
||||||
remote_client,
|
|
||||||
project_id,
|
|
||||||
shutdown_rx,
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
|
|
||||||
let _ = events_tx.send(ProjectWatcherEvent { project_id, result });
|
|
||||||
});
|
|
||||||
|
|
||||||
Ok(ProjectWatcher {
|
|
||||||
shutdown: shutdown_tx,
|
|
||||||
join,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
struct SharedWsHandler {
|
|
||||||
processor: ActivityProcessor,
|
|
||||||
close_tx: Option<oneshot::Sender<()>>,
|
|
||||||
remote_project_id: Uuid,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl WsHandler for SharedWsHandler {
|
|
||||||
async fn handle_message(&mut self, msg: WsMessage) -> Result<(), WsError> {
|
|
||||||
if let WsMessage::Text(txt) = msg {
|
|
||||||
match serde_json::from_str::<ServerMessage>(&txt) {
|
|
||||||
Ok(ServerMessage::Activity(event)) => {
|
|
||||||
let seq = event.seq;
|
|
||||||
if event.project_id != self.remote_project_id {
|
|
||||||
tracing::warn!(
|
|
||||||
expected = %self.remote_project_id,
|
|
||||||
received = %event.project_id,
|
|
||||||
"received activity for unexpected project via websocket"
|
|
||||||
);
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
self.processor
|
|
||||||
.process_event(event)
|
|
||||||
.await
|
|
||||||
.map_err(|err| WsError::Handler(Box::new(err)))?;
|
|
||||||
|
|
||||||
tracing::debug!(seq, "processed remote activity");
|
|
||||||
}
|
|
||||||
Ok(ServerMessage::Error { message }) => {
|
|
||||||
tracing::warn!(?message, "received WS error message");
|
|
||||||
// Remote sends this error when client has lagged too far behind.
|
|
||||||
// Return Err will trigger the `on_close` handler.
|
|
||||||
return Err(WsError::Handler(Box::new(io::Error::other(format!(
|
|
||||||
"remote websocket error: {message}"
|
|
||||||
)))));
|
|
||||||
}
|
|
||||||
Err(err) => {
|
|
||||||
tracing::error!(raw = %txt, ?err, "unable to parse WS message");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn on_close(&mut self) -> Result<(), WsError> {
|
|
||||||
tracing::info!("WebSocket closed, handler cleanup if needed");
|
|
||||||
if let Some(tx) = self.close_tx.take() {
|
|
||||||
let _ = tx.send(());
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn spawn_shared_remote(
|
|
||||||
processor: ActivityProcessor,
|
|
||||||
remote_client: RemoteClient,
|
|
||||||
url: Url,
|
|
||||||
close_tx: oneshot::Sender<()>,
|
|
||||||
remote_project_id: Uuid,
|
|
||||||
) -> Result<WsClient, ShareError> {
|
|
||||||
let remote_client_clone = remote_client.clone();
|
|
||||||
let ws_config = WsConfig {
|
|
||||||
url,
|
|
||||||
ping_interval: Some(std::time::Duration::from_secs(30)),
|
|
||||||
header_factory: Some(Arc::new(move || {
|
|
||||||
let remote_client_clone = remote_client_clone.clone();
|
|
||||||
Box::pin(async move {
|
|
||||||
match remote_client_clone.access_token().await {
|
|
||||||
Ok(token) => build_ws_headers(&token),
|
|
||||||
Err(error) => {
|
|
||||||
tracing::warn!(
|
|
||||||
?error,
|
|
||||||
"failed to obtain access token for websocket connection"
|
|
||||||
);
|
|
||||||
Err(WsError::MissingAuth)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
})),
|
|
||||||
};
|
|
||||||
|
|
||||||
let handler = SharedWsHandler {
|
|
||||||
processor,
|
|
||||||
close_tx: Some(close_tx),
|
|
||||||
remote_project_id,
|
|
||||||
};
|
|
||||||
let client = run_ws_client(handler, ws_config)
|
|
||||||
.await
|
|
||||||
.map_err(ShareError::from)?;
|
|
||||||
spawn_ws_auth_refresh_task(client.clone(), remote_client);
|
|
||||||
|
|
||||||
Ok(client)
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn project_watcher_task(
|
|
||||||
db: DBService,
|
|
||||||
processor: ActivityProcessor,
|
|
||||||
config: ShareConfig,
|
|
||||||
auth_ctx: AuthContext,
|
|
||||||
remote_client: RemoteClient,
|
|
||||||
remote_project_id: Uuid,
|
|
||||||
mut shutdown_rx: oneshot::Receiver<()>,
|
|
||||||
) -> Result<(), ShareError> {
|
|
||||||
let mut backoff = Backoff::new();
|
|
||||||
|
|
||||||
loop {
|
|
||||||
if auth_ctx.cached_profile().await.is_none() {
|
|
||||||
tracing::debug!(%remote_project_id, "waiting for authentication before syncing project");
|
|
||||||
tokio::select! {
|
|
||||||
_ = &mut shutdown_rx => return Ok(()),
|
|
||||||
_ = backoff.wait() => {}
|
|
||||||
}
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut last_seq = SharedActivityCursor::get(&db.pool, remote_project_id)
|
|
||||||
.await?
|
|
||||||
.map(|cursor| cursor.last_seq);
|
|
||||||
|
|
||||||
match processor
|
|
||||||
.catch_up_project(remote_project_id, last_seq)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
Ok(seq) => {
|
|
||||||
last_seq = seq;
|
|
||||||
}
|
|
||||||
Err(ShareError::MissingAuth) => {
|
|
||||||
tracing::debug!(%remote_project_id, "missing auth during catch-up; retrying after backoff");
|
|
||||||
tokio::select! {
|
|
||||||
_ = &mut shutdown_rx => return Ok(()),
|
|
||||||
_ = backoff.wait() => {}
|
|
||||||
}
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
Err(err) => return Err(err),
|
|
||||||
}
|
|
||||||
|
|
||||||
let ws_url = match config.websocket_endpoint(remote_project_id, last_seq) {
|
|
||||||
Ok(url) => url,
|
|
||||||
Err(err) => return Err(ShareError::Url(err)),
|
|
||||||
};
|
|
||||||
|
|
||||||
let (close_tx, close_rx) = oneshot::channel();
|
|
||||||
let ws_connection = match spawn_shared_remote(
|
|
||||||
processor.clone(),
|
|
||||||
remote_client.clone(),
|
|
||||||
ws_url,
|
|
||||||
close_tx,
|
|
||||||
remote_project_id,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
Ok(conn) => {
|
|
||||||
backoff.reset();
|
|
||||||
conn
|
|
||||||
}
|
|
||||||
Err(ShareError::MissingAuth) => {
|
|
||||||
tracing::debug!(%remote_project_id, "missing auth during websocket connect; retrying");
|
|
||||||
tokio::select! {
|
|
||||||
_ = &mut shutdown_rx => return Ok(()),
|
|
||||||
_ = backoff.wait() => {}
|
|
||||||
}
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
Err(err) => {
|
|
||||||
tracing::error!(%remote_project_id, ?err, "failed to establish websocket; retrying");
|
|
||||||
tokio::select! {
|
|
||||||
_ = &mut shutdown_rx => return Ok(()),
|
|
||||||
_ = backoff.wait() => {}
|
|
||||||
}
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
tokio::select! {
|
|
||||||
_ = &mut shutdown_rx => {
|
|
||||||
tracing::info!(%remote_project_id, "shutdown signal received for project watcher");
|
|
||||||
if let Err(err) = ws_connection.close() {
|
|
||||||
tracing::debug!(?err, %remote_project_id, "failed to close websocket during shutdown");
|
|
||||||
}
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
res = close_rx => {
|
|
||||||
match res {
|
|
||||||
Ok(()) => {
|
|
||||||
tracing::info!(%remote_project_id, "project websocket closed; scheduling reconnect");
|
|
||||||
}
|
|
||||||
Err(_) => {
|
|
||||||
tracing::warn!(%remote_project_id, "project websocket close signal dropped");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if let Err(err) = ws_connection.close() {
|
|
||||||
tracing::debug!(?err, %remote_project_id, "project websocket already closed when reconnecting");
|
|
||||||
}
|
|
||||||
tokio::select! {
|
|
||||||
_ = &mut shutdown_rx => {
|
|
||||||
tracing::info!(%remote_project_id, "shutdown received during reconnect wait");
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
_ = backoff.wait() => {}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn build_ws_headers(access_token: &str) -> WsResult<Vec<(HeaderName, HeaderValue)>> {
|
|
||||||
let mut headers = Vec::new();
|
|
||||||
let value = format!("Bearer {access_token}");
|
|
||||||
let header = HeaderValue::from_str(&value).map_err(|err| WsError::Header(err.to_string()))?;
|
|
||||||
headers.push((AUTHORIZATION, header));
|
|
||||||
Ok(headers)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn spawn_ws_auth_refresh_task(client: WsClient, remote_client: RemoteClient) {
|
|
||||||
tokio::spawn(async move {
|
|
||||||
let mut close_rx = client.subscribe_close();
|
|
||||||
loop {
|
|
||||||
match remote_client.access_token().await {
|
|
||||||
Ok(token) => {
|
|
||||||
if let Err(err) = send_ws_auth_token(&client, token).await {
|
|
||||||
tracing::warn!(
|
|
||||||
?err,
|
|
||||||
"failed to send websocket auth token; stopping auth refresh"
|
|
||||||
);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Err(err) => {
|
|
||||||
tracing::warn!(
|
|
||||||
?err,
|
|
||||||
"failed to obtain access token for websocket auth refresh; stopping auth refresh"
|
|
||||||
);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
tokio::select! {
|
|
||||||
_ = close_rx.changed() => break,
|
|
||||||
_ = sleep(WS_AUTH_REFRESH_INTERVAL) => {}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn send_ws_auth_token(client: &WsClient, token: String) -> Result<(), ShareError> {
|
|
||||||
let payload = serde_json::to_string(&ClientMessage::AuthToken { token })?;
|
|
||||||
client
|
|
||||||
.send(WsMessage::Text(payload.into()))
|
|
||||||
.map_err(ShareError::from)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub struct RemoteSyncHandle {
|
|
||||||
inner: Arc<RemoteSyncHandleInner>,
|
|
||||||
}
|
|
||||||
|
|
||||||
struct RemoteSyncHandleInner {
|
|
||||||
shutdown: StdMutex<Option<oneshot::Sender<()>>>,
|
|
||||||
join: StdMutex<Option<JoinHandle<()>>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl RemoteSyncHandle {
|
|
||||||
fn new(shutdown: oneshot::Sender<()>, join: JoinHandle<()>) -> Self {
|
|
||||||
Self {
|
|
||||||
inner: Arc::new(RemoteSyncHandleInner {
|
|
||||||
shutdown: StdMutex::new(Some(shutdown)),
|
|
||||||
join: StdMutex::new(Some(join)),
|
|
||||||
}),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn request_shutdown(&self) {
|
|
||||||
if let Some(tx) = self.inner.shutdown.lock().unwrap().take() {
|
|
||||||
let _ = tx.send(());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn shutdown(&self) {
|
|
||||||
self.request_shutdown();
|
|
||||||
let join = {
|
|
||||||
let mut guard = self.inner.join.lock().unwrap();
|
|
||||||
guard.take()
|
|
||||||
};
|
|
||||||
|
|
||||||
if let Some(join) = join
|
|
||||||
&& let Err(err) = join.await
|
|
||||||
{
|
|
||||||
tracing::warn!(?err, "remote sync task join failed");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Drop for RemoteSyncHandleInner {
|
|
||||||
fn drop(&mut self) {
|
|
||||||
if let Some(tx) = self.shutdown.lock().unwrap().take() {
|
|
||||||
let _ = tx.send(());
|
|
||||||
}
|
|
||||||
if let Some(join) = self.join.lock().unwrap().take() {
|
|
||||||
join.abort();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) fn convert_remote_task(
|
|
||||||
task: &RemoteSharedTask,
|
|
||||||
user: Option<&RemoteUserData>,
|
|
||||||
last_event_seq: Option<i64>,
|
|
||||||
) -> SharedTaskInput {
|
|
||||||
SharedTaskInput {
|
|
||||||
id: task.id,
|
|
||||||
remote_project_id: task.project_id,
|
|
||||||
title: task.title.clone(),
|
|
||||||
description: task.description.clone(),
|
|
||||||
status: status::from_remote(&task.status),
|
|
||||||
assignee_user_id: task.assignee_user_id,
|
|
||||||
assignee_first_name: user.and_then(|u| u.first_name.clone()),
|
|
||||||
assignee_last_name: user.and_then(|u| u.last_name.clone()),
|
|
||||||
assignee_username: user.and_then(|u| u.username.clone()),
|
|
||||||
version: task.version,
|
|
||||||
last_event_seq,
|
|
||||||
created_at: task.created_at,
|
|
||||||
updated_at: task.updated_at,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) async fn sync_local_task_for_shared_task<'e, E>(
|
|
||||||
executor: E,
|
|
||||||
shared_task: &SharedTask,
|
|
||||||
current_user_id: Option<uuid::Uuid>,
|
|
||||||
creator_user_id: Option<uuid::Uuid>,
|
|
||||||
project_id: Option<Uuid>,
|
|
||||||
) -> Result<(), ShareError>
|
|
||||||
where
|
|
||||||
E: Executor<'e, Database = Sqlite>,
|
|
||||||
{
|
|
||||||
let Some(project_id) = project_id else {
|
|
||||||
return Ok(());
|
|
||||||
};
|
|
||||||
|
|
||||||
let create_task_if_not_exists = {
|
|
||||||
let assignee_is_current_user = matches!(
|
|
||||||
(shared_task.assignee_user_id.as_ref(), current_user_id.as_ref()),
|
|
||||||
(Some(assignee), Some(current)) if assignee == current
|
|
||||||
);
|
|
||||||
let creator_is_current_user = matches!((creator_user_id.as_ref(), current_user_id.as_ref()), (Some(creator), Some(current)) if creator == current);
|
|
||||||
|
|
||||||
assignee_is_current_user
|
|
||||||
&& !(creator_is_current_user && SHARED_TASK_LINKING_LOCK.lock().unwrap().is_locked())
|
|
||||||
};
|
|
||||||
|
|
||||||
Task::sync_from_shared_task(
|
|
||||||
executor,
|
|
||||||
SyncTask {
|
|
||||||
shared_task_id: shared_task.id,
|
|
||||||
project_id,
|
|
||||||
title: shared_task.title.clone(),
|
|
||||||
description: shared_task.description.clone(),
|
|
||||||
status: shared_task.status.clone(),
|
|
||||||
},
|
|
||||||
create_task_if_not_exists,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn link_shared_tasks_to_project(
|
|
||||||
pool: &SqlitePool,
|
|
||||||
current_user_id: Option<uuid::Uuid>,
|
|
||||||
project_id: Uuid,
|
|
||||||
remote_project_id: Uuid,
|
|
||||||
) -> Result<(), ShareError> {
|
|
||||||
let tasks = SharedTask::list_by_remote_project_id(pool, remote_project_id).await?;
|
|
||||||
|
|
||||||
if tasks.is_empty() {
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
|
|
||||||
for task in tasks {
|
|
||||||
sync_local_task_for_shared_task(pool, &task, current_user_id, None, Some(project_id))
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Prevent duplicate local tasks from being created during task sharing.
|
|
||||||
// The activity event handler can create a duplicate local task when it receives a shared task assigned to the current user.
|
|
||||||
lazy_static::lazy_static! {
|
|
||||||
pub(super) static ref SHARED_TASK_LINKING_LOCK: StdMutex<SharedTaskLinkingLock> = StdMutex::new(SharedTaskLinkingLock::new());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub(super) struct SharedTaskLinkingLock {
|
|
||||||
count: usize,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl SharedTaskLinkingLock {
|
|
||||||
fn new() -> Self {
|
|
||||||
Self { count: 0 }
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) fn is_locked(&self) -> bool {
|
|
||||||
self.count > 0
|
|
||||||
}
|
|
||||||
|
|
||||||
#[allow(dead_code)]
|
|
||||||
pub(super) fn guard(&mut self) -> SharedTaskLinkingGuard {
|
|
||||||
self.count += 1;
|
|
||||||
SharedTaskLinkingGuard
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[allow(dead_code)]
|
|
||||||
pub(super) struct SharedTaskLinkingGuard;
|
|
||||||
|
|
||||||
impl Drop for SharedTaskLinkingGuard {
|
|
||||||
fn drop(&mut self) {
|
|
||||||
SHARED_TASK_LINKING_LOCK.lock().unwrap().count -= 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -1,15 +1,8 @@
|
|||||||
use url::Url;
|
use url::Url;
|
||||||
use utils::ws::{WS_BULK_SYNC_THRESHOLD, derive_ws_url};
|
|
||||||
use uuid::Uuid;
|
|
||||||
|
|
||||||
const DEFAULT_ACTIVITY_LIMIT: u32 = 200;
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct ShareConfig {
|
pub struct ShareConfig {
|
||||||
pub api_base: Url,
|
pub api_base: Url,
|
||||||
pub websocket_base: Url,
|
|
||||||
pub activity_page_limit: u32,
|
|
||||||
pub bulk_sync_threshold: u32,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ShareConfig {
|
impl ShareConfig {
|
||||||
@@ -18,37 +11,7 @@ impl ShareConfig {
|
|||||||
.ok()
|
.ok()
|
||||||
.or_else(|| option_env!("VK_SHARED_API_BASE").map(|s| s.to_string()))?;
|
.or_else(|| option_env!("VK_SHARED_API_BASE").map(|s| s.to_string()))?;
|
||||||
let api_base = Url::parse(raw_base.trim()).ok()?;
|
let api_base = Url::parse(raw_base.trim()).ok()?;
|
||||||
let websocket_base = derive_ws_url(api_base.clone()).ok()?;
|
|
||||||
|
|
||||||
Some(Self {
|
Some(Self { api_base })
|
||||||
api_base,
|
|
||||||
websocket_base,
|
|
||||||
activity_page_limit: DEFAULT_ACTIVITY_LIMIT,
|
|
||||||
bulk_sync_threshold: WS_BULK_SYNC_THRESHOLD,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn activity_endpoint(&self) -> Result<Url, url::ParseError> {
|
|
||||||
self.api_base.join("/v1/activity")
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn bulk_tasks_endpoint(&self) -> Result<Url, url::ParseError> {
|
|
||||||
self.api_base.join("/v1/tasks/bulk")
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn websocket_endpoint(
|
|
||||||
&self,
|
|
||||||
project_id: Uuid,
|
|
||||||
cursor: Option<i64>,
|
|
||||||
) -> Result<Url, url::ParseError> {
|
|
||||||
let mut url = self.websocket_base.join("/v1/ws")?;
|
|
||||||
{
|
|
||||||
let mut qp = url.query_pairs_mut();
|
|
||||||
qp.append_pair("project_id", &project_id.to_string());
|
|
||||||
if let Some(c) = cursor {
|
|
||||||
qp.append_pair("cursor", &c.to_string());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(url)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,340 +0,0 @@
|
|||||||
use std::collections::HashSet;
|
|
||||||
|
|
||||||
use db::{
|
|
||||||
DBService,
|
|
||||||
models::{
|
|
||||||
project::Project,
|
|
||||||
shared_task::{SharedActivityCursor, SharedTask, SharedTaskInput},
|
|
||||||
task::Task,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
use remote::{
|
|
||||||
activity::ActivityEvent, db::tasks::SharedTaskActivityPayload,
|
|
||||||
routes::tasks::BulkSharedTasksResponse,
|
|
||||||
};
|
|
||||||
use sqlx::{Sqlite, Transaction};
|
|
||||||
use uuid::Uuid;
|
|
||||||
|
|
||||||
use super::{ShareConfig, ShareError, convert_remote_task, sync_local_task_for_shared_task};
|
|
||||||
use crate::services::{auth::AuthContext, remote_client::RemoteClient};
|
|
||||||
|
|
||||||
struct PreparedBulkTask {
|
|
||||||
input: SharedTaskInput,
|
|
||||||
creator_user_id: Option<uuid::Uuid>,
|
|
||||||
project_id: Option<Uuid>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Processor for handling activity events and synchronizing shared tasks.
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub struct ActivityProcessor {
|
|
||||||
db: DBService,
|
|
||||||
config: ShareConfig,
|
|
||||||
remote_client: RemoteClient,
|
|
||||||
auth_ctx: AuthContext,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ActivityProcessor {
|
|
||||||
pub fn new(
|
|
||||||
db: DBService,
|
|
||||||
config: ShareConfig,
|
|
||||||
remote_client: RemoteClient,
|
|
||||||
auth_ctx: AuthContext,
|
|
||||||
) -> Self {
|
|
||||||
Self {
|
|
||||||
db,
|
|
||||||
config,
|
|
||||||
remote_client,
|
|
||||||
auth_ctx,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn remote_client(&self) -> RemoteClient {
|
|
||||||
self.remote_client.clone()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn process_event(&self, event: ActivityEvent) -> Result<(), ShareError> {
|
|
||||||
let mut tx = self.db.pool.begin().await?;
|
|
||||||
match event.event_type.as_str() {
|
|
||||||
"task.deleted" => self.process_deleted_task_event(&mut tx, &event).await?,
|
|
||||||
_ => self.process_upsert_event(&mut tx, &event).await?,
|
|
||||||
}
|
|
||||||
|
|
||||||
SharedActivityCursor::upsert(tx.as_mut(), event.project_id, event.seq).await?;
|
|
||||||
tx.commit().await?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Fetch and process activity events until caught up, falling back to bulk syncs when needed.
|
|
||||||
pub async fn catch_up_project(
|
|
||||||
&self,
|
|
||||||
remote_project_id: Uuid,
|
|
||||||
mut last_seq: Option<i64>,
|
|
||||||
) -> Result<Option<i64>, ShareError> {
|
|
||||||
if last_seq.is_none() {
|
|
||||||
last_seq = self.bulk_sync(remote_project_id).await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
loop {
|
|
||||||
let events = self.fetch_activity(remote_project_id, last_seq).await?;
|
|
||||||
if events.is_empty() {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Perform a bulk sync if we've fallen too far behind
|
|
||||||
if let Some(prev_seq) = last_seq
|
|
||||||
&& let Some(newest) = events.last()
|
|
||||||
&& newest.seq.saturating_sub(prev_seq) > self.config.bulk_sync_threshold as i64
|
|
||||||
{
|
|
||||||
last_seq = self.bulk_sync(remote_project_id).await?;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
let page_len = events.len();
|
|
||||||
for ev in events {
|
|
||||||
if ev.project_id != remote_project_id {
|
|
||||||
tracing::warn!(
|
|
||||||
expected = %remote_project_id,
|
|
||||||
received = %ev.project_id,
|
|
||||||
"received activity for unexpected project; ignoring"
|
|
||||||
);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
self.process_event(ev.clone()).await?;
|
|
||||||
last_seq = Some(ev.seq);
|
|
||||||
}
|
|
||||||
|
|
||||||
if page_len < (self.config.activity_page_limit as usize) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(last_seq)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Fetch a page of activity events from the remote service.
|
|
||||||
async fn fetch_activity(
|
|
||||||
&self,
|
|
||||||
remote_project_id: Uuid,
|
|
||||||
after: Option<i64>,
|
|
||||||
) -> Result<Vec<ActivityEvent>, ShareError> {
|
|
||||||
let resp = self
|
|
||||||
.remote_client
|
|
||||||
.fetch_activity(remote_project_id, after, self.config.activity_page_limit)
|
|
||||||
.await?;
|
|
||||||
Ok(resp.data)
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn resolve_project(
|
|
||||||
&self,
|
|
||||||
task_id: Uuid,
|
|
||||||
remote_project_id: Uuid,
|
|
||||||
) -> Result<Option<Project>, ShareError> {
|
|
||||||
if let Some(existing) = SharedTask::find_by_id(&self.db.pool, task_id).await?
|
|
||||||
&& let Some(project) =
|
|
||||||
Project::find_by_remote_project_id(&self.db.pool, existing.remote_project_id)
|
|
||||||
.await?
|
|
||||||
{
|
|
||||||
return Ok(Some(project));
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(project) =
|
|
||||||
Project::find_by_remote_project_id(&self.db.pool, remote_project_id).await?
|
|
||||||
{
|
|
||||||
return Ok(Some(project));
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(None)
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn process_upsert_event(
|
|
||||||
&self,
|
|
||||||
tx: &mut Transaction<'_, Sqlite>,
|
|
||||||
event: &ActivityEvent,
|
|
||||||
) -> Result<(), ShareError> {
|
|
||||||
let Some(payload) = &event.payload else {
|
|
||||||
tracing::warn!(event_id = %event.event_id, "received activity event with empty payload");
|
|
||||||
return Ok(());
|
|
||||||
};
|
|
||||||
|
|
||||||
match serde_json::from_value::<SharedTaskActivityPayload>(payload.clone()) {
|
|
||||||
Ok(SharedTaskActivityPayload { task, user }) => {
|
|
||||||
let project = self.resolve_project(task.id, event.project_id).await?;
|
|
||||||
if project.is_none() {
|
|
||||||
tracing::debug!(
|
|
||||||
task_id = %task.id,
|
|
||||||
remote_project_id = %task.project_id,
|
|
||||||
"stored shared task without local project; awaiting link"
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
let project_id = project.as_ref().map(|p| p.id);
|
|
||||||
let input = convert_remote_task(&task, user.as_ref(), Some(event.seq));
|
|
||||||
let shared_task = SharedTask::upsert(tx.as_mut(), input).await?;
|
|
||||||
|
|
||||||
let current_profile = self.auth_ctx.cached_profile().await;
|
|
||||||
let current_user_id = current_profile.as_ref().map(|p| p.user_id);
|
|
||||||
sync_local_task_for_shared_task(
|
|
||||||
tx.as_mut(),
|
|
||||||
&shared_task,
|
|
||||||
current_user_id,
|
|
||||||
task.creator_user_id,
|
|
||||||
project_id,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
Err(error) => {
|
|
||||||
tracing::warn!(
|
|
||||||
?error,
|
|
||||||
event_id = %event.event_id,
|
|
||||||
"unrecognized shared task payload; skipping"
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn process_deleted_task_event(
|
|
||||||
&self,
|
|
||||||
tx: &mut Transaction<'_, Sqlite>,
|
|
||||||
event: &ActivityEvent,
|
|
||||||
) -> Result<(), ShareError> {
|
|
||||||
let Some(payload) = &event.payload else {
|
|
||||||
tracing::warn!(
|
|
||||||
event_id = %event.event_id,
|
|
||||||
"received delete event without payload; skipping"
|
|
||||||
);
|
|
||||||
return Ok(());
|
|
||||||
};
|
|
||||||
|
|
||||||
let SharedTaskActivityPayload { task, .. } =
|
|
||||||
match serde_json::from_value::<SharedTaskActivityPayload>(payload.clone()) {
|
|
||||||
Ok(payload) => payload,
|
|
||||||
Err(error) => {
|
|
||||||
tracing::warn!(
|
|
||||||
?error,
|
|
||||||
event_id = %event.event_id,
|
|
||||||
"failed to parse deleted task payload; skipping"
|
|
||||||
);
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
if let Some(local_task) = Task::find_by_shared_task_id(tx.as_mut(), task.id).await? {
|
|
||||||
Task::set_shared_task_id(tx.as_mut(), local_task.id, None).await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
SharedTask::remove(tx.as_mut(), task.id).await?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn bulk_sync(&self, remote_project_id: Uuid) -> Result<Option<i64>, ShareError> {
|
|
||||||
let bulk_resp = self.fetch_bulk_snapshot(remote_project_id).await?;
|
|
||||||
let latest_seq = bulk_resp.latest_seq;
|
|
||||||
|
|
||||||
let mut keep_ids = HashSet::new();
|
|
||||||
let mut replacements = Vec::new();
|
|
||||||
|
|
||||||
for payload in bulk_resp.tasks {
|
|
||||||
let project = self
|
|
||||||
.resolve_project(payload.task.id, remote_project_id)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
if project.is_none() {
|
|
||||||
tracing::debug!(
|
|
||||||
task_id = %payload.task.id,
|
|
||||||
remote_project_id = %payload.task.project_id,
|
|
||||||
"storing shared task during bulk sync without local project"
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
let project_id = project.as_ref().map(|p| p.id);
|
|
||||||
keep_ids.insert(payload.task.id);
|
|
||||||
let input = convert_remote_task(&payload.task, payload.user.as_ref(), latest_seq);
|
|
||||||
replacements.push(PreparedBulkTask {
|
|
||||||
input,
|
|
||||||
creator_user_id: payload.task.creator_user_id,
|
|
||||||
project_id,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut stale: HashSet<Uuid> =
|
|
||||||
SharedTask::list_by_remote_project_id(&self.db.pool, remote_project_id)
|
|
||||||
.await?
|
|
||||||
.into_iter()
|
|
||||||
.filter_map(|task| {
|
|
||||||
if keep_ids.contains(&task.id) {
|
|
||||||
None
|
|
||||||
} else {
|
|
||||||
Some(task.id)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
for deleted in bulk_resp.deleted_task_ids {
|
|
||||||
if !keep_ids.contains(&deleted) {
|
|
||||||
stale.insert(deleted);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let stale_vec: Vec<Uuid> = stale.into_iter().collect();
|
|
||||||
let current_profile = self.auth_ctx.cached_profile().await;
|
|
||||||
let current_user_id = current_profile.as_ref().map(|p| p.user_id);
|
|
||||||
|
|
||||||
let mut tx = self.db.pool.begin().await?;
|
|
||||||
self.remove_stale_tasks(&mut tx, &stale_vec).await?;
|
|
||||||
|
|
||||||
for PreparedBulkTask {
|
|
||||||
input,
|
|
||||||
creator_user_id,
|
|
||||||
project_id,
|
|
||||||
} in replacements
|
|
||||||
{
|
|
||||||
let shared_task = SharedTask::upsert(tx.as_mut(), input).await?;
|
|
||||||
sync_local_task_for_shared_task(
|
|
||||||
tx.as_mut(),
|
|
||||||
&shared_task,
|
|
||||||
current_user_id,
|
|
||||||
creator_user_id,
|
|
||||||
project_id,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(seq) = latest_seq {
|
|
||||||
SharedActivityCursor::upsert(tx.as_mut(), remote_project_id, seq).await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
tx.commit().await?;
|
|
||||||
Ok(latest_seq)
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn remove_stale_tasks(
|
|
||||||
&self,
|
|
||||||
tx: &mut Transaction<'_, Sqlite>,
|
|
||||||
ids: &[Uuid],
|
|
||||||
) -> Result<(), ShareError> {
|
|
||||||
if ids.is_empty() {
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
|
|
||||||
for id in ids {
|
|
||||||
if let Some(local_task) = Task::find_by_shared_task_id(tx.as_mut(), *id).await? {
|
|
||||||
Task::set_shared_task_id(tx.as_mut(), local_task.id, None).await?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
SharedTask::remove_many(tx.as_mut(), ids).await?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn fetch_bulk_snapshot(
|
|
||||||
&self,
|
|
||||||
remote_project_id: Uuid,
|
|
||||||
) -> Result<BulkSharedTasksResponse, ShareError> {
|
|
||||||
Ok(self
|
|
||||||
.remote_client
|
|
||||||
.fetch_bulk_snapshot(remote_project_id)
|
|
||||||
.await?)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,14 +1,16 @@
|
|||||||
use db::{
|
use db::{
|
||||||
DBService,
|
DBService,
|
||||||
models::{project::Project, shared_task::SharedTask, task::Task},
|
models::{
|
||||||
|
project::Project,
|
||||||
|
task::{CreateTask, Task, TaskStatus},
|
||||||
|
},
|
||||||
};
|
};
|
||||||
use remote::routes::tasks::{
|
use remote::routes::tasks::{
|
||||||
AssignSharedTaskRequest, CreateSharedTaskRequest, DeleteSharedTaskRequest, SharedTaskResponse,
|
AssignSharedTaskRequest, CreateSharedTaskRequest, SharedTaskResponse, UpdateSharedTaskRequest,
|
||||||
UpdateSharedTaskRequest,
|
|
||||||
};
|
};
|
||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
|
|
||||||
use super::{ShareError, convert_remote_task, status};
|
use super::{ShareError, status};
|
||||||
use crate::services::remote_client::RemoteClient;
|
use crate::services::remote_client::RemoteClient;
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
@@ -17,6 +19,15 @@ pub struct SharePublisher {
|
|||||||
client: RemoteClient,
|
client: RemoteClient,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, serde::Deserialize, serde::Serialize, ts_rs::TS)]
|
||||||
|
pub struct SharedTaskDetails {
|
||||||
|
pub id: Uuid,
|
||||||
|
pub project_id: Uuid,
|
||||||
|
pub title: String,
|
||||||
|
pub description: Option<String>,
|
||||||
|
pub status: TaskStatus,
|
||||||
|
}
|
||||||
|
|
||||||
impl SharePublisher {
|
impl SharePublisher {
|
||||||
pub fn new(db: DBService, client: RemoteClient) -> Self {
|
pub fn new(db: DBService, client: RemoteClient) -> Self {
|
||||||
Self { db, client }
|
Self { db, client }
|
||||||
@@ -47,7 +58,7 @@ impl SharePublisher {
|
|||||||
|
|
||||||
let remote_task = self.client.create_shared_task(&payload).await?;
|
let remote_task = self.client.create_shared_task(&payload).await?;
|
||||||
|
|
||||||
self.sync_shared_task(&task, &remote_task).await?;
|
Task::set_shared_task_id(&self.db.pool, task.id, Some(remote_task.task.id)).await?;
|
||||||
Ok(remote_task.task.id)
|
Ok(remote_task.task.id)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -61,16 +72,12 @@ impl SharePublisher {
|
|||||||
title: Some(task.title.clone()),
|
title: Some(task.title.clone()),
|
||||||
description: task.description.clone(),
|
description: task.description.clone(),
|
||||||
status: Some(status::to_remote(&task.status)),
|
status: Some(status::to_remote(&task.status)),
|
||||||
version: None,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
let remote_task = self
|
self.client
|
||||||
.client
|
|
||||||
.update_shared_task(shared_task_id, &payload)
|
.update_shared_task(shared_task_id, &payload)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
self.sync_shared_task(task, &remote_task).await?;
|
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -84,10 +91,9 @@ impl SharePublisher {
|
|||||||
|
|
||||||
pub async fn assign_shared_task(
|
pub async fn assign_shared_task(
|
||||||
&self,
|
&self,
|
||||||
shared_task: &SharedTask,
|
shared_task_id: Uuid,
|
||||||
new_assignee_user_id: Option<String>,
|
new_assignee_user_id: Option<String>,
|
||||||
version: Option<i64>,
|
) -> Result<SharedTaskResponse, ShareError> {
|
||||||
) -> Result<SharedTask, ShareError> {
|
|
||||||
let assignee_uuid = new_assignee_user_id
|
let assignee_uuid = new_assignee_user_id
|
||||||
.map(|id| uuid::Uuid::parse_str(&id))
|
.map(|id| uuid::Uuid::parse_str(&id))
|
||||||
.transpose()
|
.transpose()
|
||||||
@@ -95,62 +101,104 @@ impl SharePublisher {
|
|||||||
|
|
||||||
let payload = AssignSharedTaskRequest {
|
let payload = AssignSharedTaskRequest {
|
||||||
new_assignee_user_id: assignee_uuid,
|
new_assignee_user_id: assignee_uuid,
|
||||||
version,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
let SharedTaskResponse {
|
let response = self
|
||||||
task: remote_task,
|
|
||||||
user,
|
|
||||||
} = self
|
|
||||||
.client
|
.client
|
||||||
.assign_shared_task(shared_task.id, &payload)
|
.assign_shared_task(shared_task_id, &payload)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
let input = convert_remote_task(&remote_task, user.as_ref(), None);
|
Ok(response)
|
||||||
let record = SharedTask::upsert(&self.db.pool, input).await?;
|
|
||||||
Ok(record)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn delete_shared_task(&self, shared_task_id: Uuid) -> Result<(), ShareError> {
|
pub async fn delete_shared_task(&self, shared_task_id: Uuid) -> Result<(), ShareError> {
|
||||||
let shared_task = SharedTask::find_by_id(&self.db.pool, shared_task_id)
|
self.client.delete_shared_task(shared_task_id).await?;
|
||||||
.await?
|
|
||||||
.ok_or(ShareError::TaskNotFound(shared_task_id))?;
|
|
||||||
|
|
||||||
let payload = DeleteSharedTaskRequest {
|
|
||||||
version: Some(shared_task.version),
|
|
||||||
};
|
|
||||||
|
|
||||||
self.client
|
|
||||||
.delete_shared_task(shared_task.id, &payload)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
if let Some(local_task) =
|
if let Some(local_task) =
|
||||||
Task::find_by_shared_task_id(&self.db.pool, shared_task.id).await?
|
Task::find_by_shared_task_id(&self.db.pool, shared_task_id).await?
|
||||||
{
|
{
|
||||||
Task::set_shared_task_id(&self.db.pool, local_task.id, None).await?;
|
Task::set_shared_task_id(&self.db.pool, local_task.id, None).await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
SharedTask::remove(&self.db.pool, shared_task.id).await?;
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn sync_shared_task(
|
pub async fn link_shared_task(
|
||||||
&self,
|
&self,
|
||||||
task: &Task,
|
shared_task: SharedTaskDetails,
|
||||||
remote_task: &SharedTaskResponse,
|
) -> Result<Option<Task>, ShareError> {
|
||||||
) -> Result<(), ShareError> {
|
if let Some(task) = Task::find_by_shared_task_id(&self.db.pool, shared_task.id).await? {
|
||||||
let SharedTaskResponse {
|
return Ok(Some(task));
|
||||||
task: remote_task,
|
}
|
||||||
user,
|
|
||||||
} = remote_task;
|
|
||||||
|
|
||||||
Project::find_by_id(&self.db.pool, task.project_id)
|
if !self.shared_task_exists(shared_task.id).await? {
|
||||||
|
return Ok(None);
|
||||||
|
}
|
||||||
|
|
||||||
|
let create_task = CreateTask::from_shared_task(
|
||||||
|
shared_task.project_id,
|
||||||
|
shared_task.title,
|
||||||
|
shared_task.description,
|
||||||
|
shared_task.status,
|
||||||
|
shared_task.id,
|
||||||
|
);
|
||||||
|
|
||||||
|
let id = Uuid::new_v4();
|
||||||
|
let task = Task::create(&self.db.pool, &create_task, id).await?;
|
||||||
|
|
||||||
|
Ok(Some(task))
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn shared_task_exists(&self, shared_task_id: Uuid) -> Result<bool, ShareError> {
|
||||||
|
Ok(self
|
||||||
|
.client
|
||||||
|
.check_tasks(vec![shared_task_id])
|
||||||
.await?
|
.await?
|
||||||
.ok_or(ShareError::ProjectNotFound(task.project_id))?;
|
.contains(&shared_task_id))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn cleanup_shared_tasks(&self) -> Result<(), ShareError> {
|
||||||
|
let tasks = Task::find_all_shared(&self.db.pool).await?;
|
||||||
|
if tasks.is_empty() {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
let shared_ids: Vec<Uuid> = tasks.iter().filter_map(|t| t.shared_task_id).collect();
|
||||||
|
|
||||||
|
if shared_ids.is_empty() {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify in chunks of 100 to avoid hitting payload limits
|
||||||
|
for chunk in shared_ids.chunks(100) {
|
||||||
|
let existing_ids = match self.client.check_tasks(chunk.to_vec()).await {
|
||||||
|
Ok(ids) => ids,
|
||||||
|
Err(e) => {
|
||||||
|
tracing::warn!("Failed to check task existence: {}", e);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let existing_set: std::collections::HashSet<Uuid> = existing_ids.into_iter().collect();
|
||||||
|
|
||||||
|
let missing_ids: Vec<Uuid> = chunk
|
||||||
|
.iter()
|
||||||
|
.filter(|id| !existing_set.contains(id))
|
||||||
|
.cloned()
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
if !missing_ids.is_empty() {
|
||||||
|
tracing::info!(
|
||||||
|
"Unlinking ({}) shared tasks that no longer exist in remote",
|
||||||
|
missing_ids.len()
|
||||||
|
);
|
||||||
|
|
||||||
|
if let Err(e) = Task::batch_unlink_shared_tasks(&self.db.pool, &missing_ids).await {
|
||||||
|
tracing::error!("Failed to batch unlink tasks: {}", e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
let input = convert_remote_task(remote_task, user.as_ref(), None);
|
|
||||||
SharedTask::upsert(&self.db.pool, input).await?;
|
|
||||||
Task::set_shared_task_id(&self.db.pool, task.id, Some(remote_task.id)).await?;
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -10,13 +10,3 @@ pub(super) fn to_remote(status: &TaskStatus) -> RemoteTaskStatus {
|
|||||||
TaskStatus::Cancelled => RemoteTaskStatus::Cancelled,
|
TaskStatus::Cancelled => RemoteTaskStatus::Cancelled,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(super) fn from_remote(status: &RemoteTaskStatus) -> TaskStatus {
|
|
||||||
match status {
|
|
||||||
RemoteTaskStatus::Todo => TaskStatus::Todo,
|
|
||||||
RemoteTaskStatus::InProgress => TaskStatus::InProgress,
|
|
||||||
RemoteTaskStatus::InReview => TaskStatus::InReview,
|
|
||||||
RemoteTaskStatus::Done => TaskStatus::Done,
|
|
||||||
RemoteTaskStatus::Cancelled => TaskStatus::Cancelled,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -28,15 +28,12 @@ tokio = { workspace = true }
|
|||||||
futures = "0.3.31"
|
futures = "0.3.31"
|
||||||
tokio-stream = { version = "0.1.17", features = ["sync"] }
|
tokio-stream = { version = "0.1.17", features = ["sync"] }
|
||||||
tokio-tungstenite = { version = "0.28", features = ["rustls-tls-webpki-roots", "url"] }
|
tokio-tungstenite = { version = "0.28", features = ["rustls-tls-webpki-roots", "url"] }
|
||||||
async-stream = "0.3"
|
|
||||||
shellexpand = "3.1.1"
|
shellexpand = "3.1.1"
|
||||||
which = "8.0.0"
|
which = "8.0.0"
|
||||||
similar = "2"
|
similar = "2"
|
||||||
git2 = "0.18"
|
git2 = "0.18"
|
||||||
dirs = "5.0"
|
dirs = "5.0"
|
||||||
async-trait = { workspace = true }
|
|
||||||
thiserror = { workspace = true }
|
thiserror = { workspace = true }
|
||||||
dashmap = "6.1"
|
|
||||||
url = "2.5"
|
url = "2.5"
|
||||||
reqwest = { version = "0.12", features = ["json"] }
|
reqwest = { version = "0.12", features = ["json"] }
|
||||||
sqlx = { version = "0.8.6", default-features = false, features = ["postgres", "uuid", "chrono"] }
|
sqlx = { version = "0.8.6", default-features = false, features = ["postgres", "uuid", "chrono"] }
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ use chrono::{DateTime, Utc};
|
|||||||
use jsonwebtoken::dangerous::insecure_decode;
|
use jsonwebtoken::dangerous::insecure_decode;
|
||||||
use serde::Deserialize;
|
use serde::Deserialize;
|
||||||
use thiserror::Error;
|
use thiserror::Error;
|
||||||
|
use uuid::Uuid;
|
||||||
|
|
||||||
#[derive(Debug, Error)]
|
#[derive(Debug, Error)]
|
||||||
pub enum TokenClaimsError {
|
pub enum TokenClaimsError {
|
||||||
@@ -11,6 +12,10 @@ pub enum TokenClaimsError {
|
|||||||
MissingExpiration,
|
MissingExpiration,
|
||||||
#[error("invalid `exp` value `{0}`")]
|
#[error("invalid `exp` value `{0}`")]
|
||||||
InvalidExpiration(i64),
|
InvalidExpiration(i64),
|
||||||
|
#[error("missing `sub` claim in token")]
|
||||||
|
MissingSubject,
|
||||||
|
#[error("invalid `sub` value: {0}")]
|
||||||
|
InvalidSubject(String),
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Deserialize)]
|
#[derive(Debug, Deserialize)]
|
||||||
@@ -18,9 +23,21 @@ struct ExpClaim {
|
|||||||
exp: Option<i64>,
|
exp: Option<i64>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Deserialize)]
|
||||||
|
struct SubClaim {
|
||||||
|
sub: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
/// Extract the expiration timestamp from a JWT without verifying its signature.
|
/// Extract the expiration timestamp from a JWT without verifying its signature.
|
||||||
pub fn extract_expiration(token: &str) -> Result<DateTime<Utc>, TokenClaimsError> {
|
pub fn extract_expiration(token: &str) -> Result<DateTime<Utc>, TokenClaimsError> {
|
||||||
let data = insecure_decode::<ExpClaim>(token)?;
|
let data = insecure_decode::<ExpClaim>(token)?;
|
||||||
let exp = data.claims.exp.ok_or(TokenClaimsError::MissingExpiration)?;
|
let exp = data.claims.exp.ok_or(TokenClaimsError::MissingExpiration)?;
|
||||||
DateTime::from_timestamp(exp, 0).ok_or(TokenClaimsError::InvalidExpiration(exp))
|
DateTime::from_timestamp(exp, 0).ok_or(TokenClaimsError::InvalidExpiration(exp))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Extract the subject (user ID) from a JWT without verifying its signature.
|
||||||
|
pub fn extract_subject(token: &str) -> Result<Uuid, TokenClaimsError> {
|
||||||
|
let data = insecure_decode::<SubClaim>(token)?;
|
||||||
|
let sub = data.claims.sub.ok_or(TokenClaimsError::MissingSubject)?;
|
||||||
|
Uuid::parse_str(&sub).map_err(|_| TokenClaimsError::InvalidSubject(sub))
|
||||||
|
}
|
||||||
|
|||||||
@@ -20,7 +20,6 @@ pub mod stream_lines;
|
|||||||
pub mod text;
|
pub mod text;
|
||||||
pub mod tokio;
|
pub mod tokio;
|
||||||
pub mod version;
|
pub mod version;
|
||||||
pub mod ws;
|
|
||||||
|
|
||||||
/// Cache for WSL2 detection result
|
/// Cache for WSL2 detection result
|
||||||
static WSL2_CACHE: OnceLock<bool> = OnceLock::new();
|
static WSL2_CACHE: OnceLock<bool> = OnceLock::new();
|
||||||
|
|||||||
@@ -1,229 +0,0 @@
|
|||||||
use std::{sync::Arc, time::Duration};
|
|
||||||
|
|
||||||
use axum::http::{self, HeaderName, HeaderValue};
|
|
||||||
use futures::future::BoxFuture;
|
|
||||||
use futures_util::{SinkExt, StreamExt};
|
|
||||||
use thiserror::Error;
|
|
||||||
use tokio::sync::{mpsc, watch};
|
|
||||||
use tokio_tungstenite::{
|
|
||||||
connect_async,
|
|
||||||
tungstenite::{client::IntoClientRequest, protocol::Message},
|
|
||||||
};
|
|
||||||
use url::Url;
|
|
||||||
|
|
||||||
/// Interval between authentication refresh probes for websocket connections.
|
|
||||||
pub const WS_AUTH_REFRESH_INTERVAL: Duration = Duration::from_secs(30);
|
|
||||||
/// Grace period to tolerate expired tokens while a websocket client refreshes its session.
|
|
||||||
pub const WS_TOKEN_EXPIRY_GRACE: Duration = Duration::from_secs(120);
|
|
||||||
/// Maximum time allowed between REST catch-up and websocket connection establishment.
|
|
||||||
pub const WS_MAX_DELAY_BETWEEN_CATCHUP_AND_WS: Duration = WS_TOKEN_EXPIRY_GRACE;
|
|
||||||
/// Maximum backlog accepted before forcing clients to do a full bulk sync.
|
|
||||||
pub const WS_BULK_SYNC_THRESHOLD: u32 = 500;
|
|
||||||
|
|
||||||
pub type HeaderFuture = BoxFuture<'static, WsResult<Vec<(HeaderName, HeaderValue)>>>;
|
|
||||||
pub type HeaderFactory = Arc<dyn Fn() -> HeaderFuture + Send + Sync>;
|
|
||||||
|
|
||||||
#[derive(Error, Debug)]
|
|
||||||
pub enum WsError {
|
|
||||||
#[error("WebSocket connection error: {0}")]
|
|
||||||
Connection(#[from] tokio_tungstenite::tungstenite::Error),
|
|
||||||
|
|
||||||
#[error("IO error: {0}")]
|
|
||||||
Io(#[from] std::io::Error),
|
|
||||||
|
|
||||||
#[error("Send error: {0}")]
|
|
||||||
Send(String),
|
|
||||||
|
|
||||||
#[error("Handler error: {0}")]
|
|
||||||
Handler(#[from] Box<dyn std::error::Error + Send + Sync>),
|
|
||||||
|
|
||||||
#[error("Shutdown channel closed unexpectedly")]
|
|
||||||
ShutdownChannelClosed,
|
|
||||||
|
|
||||||
#[error("failed to build websocket request: {0}")]
|
|
||||||
Request(#[from] http::Error),
|
|
||||||
|
|
||||||
#[error("failed to prepare websocket headers: {0}")]
|
|
||||||
Header(String),
|
|
||||||
|
|
||||||
#[error("share authentication missing or expired")]
|
|
||||||
MissingAuth,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub type WsResult<T> = std::result::Result<T, WsError>;
|
|
||||||
|
|
||||||
#[async_trait::async_trait]
|
|
||||||
pub trait WsHandler: Send + Sync + 'static {
|
|
||||||
/// Called when a new `Message` is received.
|
|
||||||
async fn handle_message(&mut self, msg: Message) -> WsResult<()>;
|
|
||||||
|
|
||||||
/// Called when the socket is closed (either remote closed or error).
|
|
||||||
async fn on_close(&mut self) -> WsResult<()>;
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct WsConfig {
|
|
||||||
pub url: Url,
|
|
||||||
pub ping_interval: Option<Duration>,
|
|
||||||
pub header_factory: Option<HeaderFactory>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub struct WsClient {
|
|
||||||
msg_tx: mpsc::UnboundedSender<Message>,
|
|
||||||
cancelation_token: watch::Sender<()>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl WsClient {
|
|
||||||
pub fn send(&self, msg: Message) -> WsResult<()> {
|
|
||||||
self.msg_tx
|
|
||||||
.send(msg)
|
|
||||||
.map_err(|e| WsError::Send(format!("WebSocket send error: {e}")))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn close(&self) -> WsResult<()> {
|
|
||||||
self.cancelation_token
|
|
||||||
.send(())
|
|
||||||
.map_err(|_| WsError::ShutdownChannelClosed)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn subscribe_close(&self) -> watch::Receiver<()> {
|
|
||||||
self.cancelation_token.subscribe()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Launches a WebSocket connection with read/write tasks.
|
|
||||||
/// Returns a `WsClient` which you can use to send messages or request shutdown.
|
|
||||||
pub async fn run_ws_client<H>(mut handler: H, config: WsConfig) -> WsResult<WsClient>
|
|
||||||
where
|
|
||||||
H: WsHandler,
|
|
||||||
{
|
|
||||||
let (msg_tx, mut msg_rx) = mpsc::unbounded_channel();
|
|
||||||
let (cancel_tx, cancel_rx) = watch::channel(());
|
|
||||||
let task_tx = msg_tx.clone();
|
|
||||||
|
|
||||||
tokio::spawn(async move {
|
|
||||||
tracing::debug!(url = %config.url, "WebSocket connecting");
|
|
||||||
let request = match build_request(&config).await {
|
|
||||||
Ok(req) => req,
|
|
||||||
Err(err) => {
|
|
||||||
tracing::error!(?err, "failed to build websocket request");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
match connect_async(request).await {
|
|
||||||
Ok((ws_stream, _resp)) => {
|
|
||||||
tracing::info!("WebSocket connected");
|
|
||||||
|
|
||||||
let (mut ws_sink, mut ws_stream) = ws_stream.split();
|
|
||||||
|
|
||||||
let ping_task = if let Some(interval) = config.ping_interval {
|
|
||||||
let mut intv = tokio::time::interval(interval);
|
|
||||||
let mut cancel_rx2 = cancel_rx.clone();
|
|
||||||
let ping_tx2 = task_tx.clone();
|
|
||||||
Some(tokio::spawn(async move {
|
|
||||||
loop {
|
|
||||||
tokio::select! {
|
|
||||||
_ = intv.tick() => {
|
|
||||||
if ping_tx2.send(Message::Ping(Vec::new().into())).is_err() { break; }
|
|
||||||
}
|
|
||||||
_ = cancel_rx2.changed() => { break; }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}))
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
};
|
|
||||||
|
|
||||||
loop {
|
|
||||||
let mut cancel_rx2 = cancel_rx.clone();
|
|
||||||
tokio::select! {
|
|
||||||
maybe = msg_rx.recv() => {
|
|
||||||
match maybe {
|
|
||||||
Some(msg) => {
|
|
||||||
if let Err(err) = ws_sink.send(msg).await {
|
|
||||||
tracing::error!("WebSocket send failed: {:?}", err);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
None => {
|
|
||||||
tracing::debug!("WebSocket msg_rx closed");
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
incoming = ws_stream.next() => {
|
|
||||||
match incoming {
|
|
||||||
Some(Ok(msg)) => {
|
|
||||||
if let Err(err) = handler.handle_message(msg).await {
|
|
||||||
tracing::error!("WsHandler failed: {:?}", err);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Some(Err(err)) => {
|
|
||||||
tracing::error!("WebSocket stream error: {:?}", err);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
None => {
|
|
||||||
tracing::debug!("WebSocket stream ended");
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
_ = cancel_rx2.changed() => {
|
|
||||||
tracing::debug!("WebSocket shutdown requested");
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Err(err) = handler.on_close().await {
|
|
||||||
tracing::error!("WsHandler on_close failed: {:?}", err);
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Err(err) = ws_sink.close().await {
|
|
||||||
tracing::error!("WebSocket close failed: {:?}", err);
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(task) = ping_task {
|
|
||||||
task.abort();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Err(err) => {
|
|
||||||
tracing::error!("WebSocket connect error: {:?}", err);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
tracing::info!("WebSocket client task exiting");
|
|
||||||
});
|
|
||||||
|
|
||||||
Ok(WsClient {
|
|
||||||
msg_tx,
|
|
||||||
cancelation_token: cancel_tx,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn build_request(config: &WsConfig) -> WsResult<http::Request<()>> {
|
|
||||||
let mut request = config.url.clone().into_client_request()?;
|
|
||||||
if let Some(factory) = &config.header_factory {
|
|
||||||
let headers = factory().await?;
|
|
||||||
for (name, value) in headers {
|
|
||||||
request.headers_mut().insert(name, value);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(request)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn derive_ws_url(mut base: Url) -> Result<Url, url::ParseError> {
|
|
||||||
match base.scheme() {
|
|
||||||
"https" => base.set_scheme("wss").unwrap(),
|
|
||||||
"http" => base.set_scheme("ws").unwrap(),
|
|
||||||
_ => {
|
|
||||||
return Err(url::ParseError::RelativeUrlWithoutBase);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(base)
|
|
||||||
}
|
|
||||||
@@ -44,6 +44,8 @@
|
|||||||
"@sentry/react": "^9.34.0",
|
"@sentry/react": "^9.34.0",
|
||||||
"@sentry/vite-plugin": "^3.5.0",
|
"@sentry/vite-plugin": "^3.5.0",
|
||||||
"@tailwindcss/typography": "^0.5.16",
|
"@tailwindcss/typography": "^0.5.16",
|
||||||
|
"@tanstack/electric-db-collection": "^0.2.6",
|
||||||
|
"@tanstack/react-db": "^0.1.50",
|
||||||
"@tanstack/react-devtools": "^0.8.0",
|
"@tanstack/react-devtools": "^0.8.0",
|
||||||
"@tanstack/react-form": "^1.23.8",
|
"@tanstack/react-form": "^1.23.8",
|
||||||
"@tanstack/react-form-devtools": "^0.1.8",
|
"@tanstack/react-form-devtools": "^0.1.8",
|
||||||
@@ -78,6 +80,7 @@
|
|||||||
"tailwind-merge": "^2.2.0",
|
"tailwind-merge": "^2.2.0",
|
||||||
"tailwindcss-animate": "^1.0.7",
|
"tailwindcss-animate": "^1.0.7",
|
||||||
"vibe-kanban-web-companion": "^0.0.4",
|
"vibe-kanban-web-companion": "^0.0.4",
|
||||||
|
"wa-sqlite": "^1.0.0",
|
||||||
"zod": "^4.1.12",
|
"zod": "^4.1.12",
|
||||||
"zustand": "^4.5.4"
|
"zustand": "^4.5.4"
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -5,8 +5,8 @@ import {
|
|||||||
useContext,
|
useContext,
|
||||||
useEffect,
|
useEffect,
|
||||||
useMemo,
|
useMemo,
|
||||||
useState,
|
|
||||||
} from 'react';
|
} from 'react';
|
||||||
|
import { useQuery, useQueryClient } from '@tanstack/react-query';
|
||||||
import {
|
import {
|
||||||
type Config,
|
type Config,
|
||||||
type Environment,
|
type Environment,
|
||||||
@@ -63,47 +63,26 @@ interface UserSystemProviderProps {
|
|||||||
}
|
}
|
||||||
|
|
||||||
export function UserSystemProvider({ children }: UserSystemProviderProps) {
|
export function UserSystemProvider({ children }: UserSystemProviderProps) {
|
||||||
// Split state for performance - independent re-renders
|
const queryClient = useQueryClient();
|
||||||
const [config, setConfig] = useState<Config | null>(null);
|
|
||||||
const [environment, setEnvironment] = useState<Environment | null>(null);
|
|
||||||
const [profiles, setProfiles] = useState<Record<
|
|
||||||
string,
|
|
||||||
ExecutorConfig
|
|
||||||
> | null>(null);
|
|
||||||
const [capabilities, setCapabilities] = useState<Record<
|
|
||||||
string,
|
|
||||||
BaseAgentCapability[]
|
|
||||||
> | null>(null);
|
|
||||||
const [analyticsUserId, setAnalyticsUserId] = useState<string | null>(null);
|
|
||||||
const [loginStatus, setLoginStatus] = useState<LoginStatus | null>(null);
|
|
||||||
const [loading, setLoading] = useState(true);
|
|
||||||
|
|
||||||
useEffect(() => {
|
const { data: userSystemInfo, isLoading } = useQuery({
|
||||||
const loadUserSystem = async () => {
|
queryKey: ['user-system'],
|
||||||
try {
|
queryFn: configApi.getConfig,
|
||||||
const userSystemInfo: UserSystemInfo = await configApi.getConfig();
|
staleTime: 5 * 60 * 1000, // 5 minutes
|
||||||
setConfig(userSystemInfo.config);
|
});
|
||||||
setEnvironment(userSystemInfo.environment);
|
|
||||||
setAnalyticsUserId(userSystemInfo.analytics_user_id);
|
|
||||||
setLoginStatus(userSystemInfo.login_status);
|
|
||||||
setProfiles(
|
|
||||||
userSystemInfo.executors as Record<string, ExecutorConfig> | null
|
|
||||||
);
|
|
||||||
setCapabilities(
|
|
||||||
(userSystemInfo.capabilities || null) as Record<
|
|
||||||
string,
|
|
||||||
BaseAgentCapability[]
|
|
||||||
> | null
|
|
||||||
);
|
|
||||||
} catch (err) {
|
|
||||||
console.error('Error loading user system:', err);
|
|
||||||
} finally {
|
|
||||||
setLoading(false);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
loadUserSystem();
|
const config = userSystemInfo?.config || null;
|
||||||
}, []);
|
const environment = userSystemInfo?.environment || null;
|
||||||
|
const analyticsUserId = userSystemInfo?.analytics_user_id || null;
|
||||||
|
const loginStatus = userSystemInfo?.login_status || null;
|
||||||
|
const profiles =
|
||||||
|
(userSystemInfo?.executors as Record<string, ExecutorConfig> | null) ||
|
||||||
|
null;
|
||||||
|
const capabilities =
|
||||||
|
(userSystemInfo?.capabilities as Record<
|
||||||
|
string,
|
||||||
|
BaseAgentCapability[]
|
||||||
|
> | null) || null;
|
||||||
|
|
||||||
// Sync language with i18n when config changes
|
// Sync language with i18n when config changes
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
@@ -112,9 +91,18 @@ export function UserSystemProvider({ children }: UserSystemProviderProps) {
|
|||||||
}
|
}
|
||||||
}, [config?.language]);
|
}, [config?.language]);
|
||||||
|
|
||||||
const updateConfig = useCallback((updates: Partial<Config>) => {
|
const updateConfig = useCallback(
|
||||||
setConfig((prev) => (prev ? { ...prev, ...updates } : null));
|
(updates: Partial<Config>) => {
|
||||||
}, []);
|
queryClient.setQueryData<UserSystemInfo>(['user-system'], (old) => {
|
||||||
|
if (!old) return old;
|
||||||
|
return {
|
||||||
|
...old,
|
||||||
|
config: { ...old.config, ...updates },
|
||||||
|
};
|
||||||
|
});
|
||||||
|
},
|
||||||
|
[queryClient]
|
||||||
|
);
|
||||||
|
|
||||||
const saveConfig = useCallback(async (): Promise<boolean> => {
|
const saveConfig = useCallback(async (): Promise<boolean> => {
|
||||||
if (!config) return false;
|
if (!config) return false;
|
||||||
@@ -129,48 +117,66 @@ export function UserSystemProvider({ children }: UserSystemProviderProps) {
|
|||||||
|
|
||||||
const updateAndSaveConfig = useCallback(
|
const updateAndSaveConfig = useCallback(
|
||||||
async (updates: Partial<Config>): Promise<boolean> => {
|
async (updates: Partial<Config>): Promise<boolean> => {
|
||||||
setLoading(true);
|
if (!config) return false;
|
||||||
const newConfig: Config | null = config
|
|
||||||
? { ...config, ...updates }
|
const newConfig = { ...config, ...updates };
|
||||||
: null;
|
updateConfig(updates);
|
||||||
|
|
||||||
try {
|
try {
|
||||||
if (!newConfig) return false;
|
|
||||||
const saved = await configApi.saveConfig(newConfig);
|
const saved = await configApi.saveConfig(newConfig);
|
||||||
setConfig(saved);
|
queryClient.setQueryData<UserSystemInfo>(['user-system'], (old) => {
|
||||||
|
if (!old) return old;
|
||||||
|
return {
|
||||||
|
...old,
|
||||||
|
config: saved,
|
||||||
|
};
|
||||||
|
});
|
||||||
return true;
|
return true;
|
||||||
} catch (err) {
|
} catch (err) {
|
||||||
console.error('Error saving config:', err);
|
console.error('Error saving config:', err);
|
||||||
|
queryClient.invalidateQueries({ queryKey: ['user-system'] });
|
||||||
return false;
|
return false;
|
||||||
} finally {
|
|
||||||
setLoading(false);
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
[config]
|
[config, queryClient, updateConfig]
|
||||||
);
|
);
|
||||||
|
|
||||||
const reloadSystem = useCallback(async () => {
|
const reloadSystem = useCallback(async () => {
|
||||||
setLoading(true);
|
await queryClient.invalidateQueries({ queryKey: ['user-system'] });
|
||||||
try {
|
}, [queryClient]);
|
||||||
const userSystemInfo: UserSystemInfo = await configApi.getConfig();
|
|
||||||
setConfig(userSystemInfo.config);
|
const setEnvironment = useCallback(
|
||||||
setEnvironment(userSystemInfo.environment);
|
(env: Environment | null) => {
|
||||||
setAnalyticsUserId(userSystemInfo.analytics_user_id);
|
queryClient.setQueryData<UserSystemInfo>(['user-system'], (old) => {
|
||||||
setLoginStatus(userSystemInfo.login_status);
|
if (!old || !env) return old;
|
||||||
setProfiles(
|
return { ...old, environment: env };
|
||||||
userSystemInfo.executors as Record<string, ExecutorConfig> | null
|
});
|
||||||
);
|
},
|
||||||
setCapabilities(
|
[queryClient]
|
||||||
(userSystemInfo.capabilities || null) as Record<
|
);
|
||||||
string,
|
|
||||||
BaseAgentCapability[]
|
const setProfiles = useCallback(
|
||||||
> | null
|
(newProfiles: Record<string, ExecutorConfig> | null) => {
|
||||||
);
|
queryClient.setQueryData<UserSystemInfo>(['user-system'], (old) => {
|
||||||
} catch (err) {
|
if (!old || !newProfiles) return old;
|
||||||
console.error('Error reloading user system:', err);
|
return {
|
||||||
} finally {
|
...old,
|
||||||
setLoading(false);
|
executors: newProfiles as unknown as UserSystemInfo['executors'],
|
||||||
}
|
};
|
||||||
}, []);
|
});
|
||||||
|
},
|
||||||
|
[queryClient]
|
||||||
|
);
|
||||||
|
|
||||||
|
const setCapabilities = useCallback(
|
||||||
|
(newCapabilities: Record<string, BaseAgentCapability[]> | null) => {
|
||||||
|
queryClient.setQueryData<UserSystemInfo>(['user-system'], (old) => {
|
||||||
|
if (!old || !newCapabilities) return old;
|
||||||
|
return { ...old, capabilities: newCapabilities };
|
||||||
|
});
|
||||||
|
},
|
||||||
|
[queryClient]
|
||||||
|
);
|
||||||
|
|
||||||
// Memoize context value to prevent unnecessary re-renders
|
// Memoize context value to prevent unnecessary re-renders
|
||||||
const value = useMemo<UserSystemContextType>(
|
const value = useMemo<UserSystemContextType>(
|
||||||
@@ -196,7 +202,7 @@ export function UserSystemProvider({ children }: UserSystemProviderProps) {
|
|||||||
setProfiles,
|
setProfiles,
|
||||||
setCapabilities,
|
setCapabilities,
|
||||||
reloadSystem,
|
reloadSystem,
|
||||||
loading,
|
loading: isLoading,
|
||||||
}),
|
}),
|
||||||
[
|
[
|
||||||
config,
|
config,
|
||||||
@@ -209,7 +215,10 @@ export function UserSystemProvider({ children }: UserSystemProviderProps) {
|
|||||||
saveConfig,
|
saveConfig,
|
||||||
updateAndSaveConfig,
|
updateAndSaveConfig,
|
||||||
reloadSystem,
|
reloadSystem,
|
||||||
loading,
|
isLoading,
|
||||||
|
setEnvironment,
|
||||||
|
setProfiles,
|
||||||
|
setCapabilities,
|
||||||
]
|
]
|
||||||
);
|
);
|
||||||
|
|
||||||
|
|||||||
@@ -107,7 +107,7 @@ const OAuthDialogImpl = NiceModal.create<NoProps>(() => {
|
|||||||
setState({ type: 'success', profile: statusData.profile });
|
setState({ type: 'success', profile: statusData.profile });
|
||||||
setTimeout(() => {
|
setTimeout(() => {
|
||||||
modal.resolve(statusData.profile);
|
modal.resolve(statusData.profile);
|
||||||
modal.hide();
|
modal.remove();
|
||||||
}, 1500);
|
}, 1500);
|
||||||
}
|
}
|
||||||
}, [statusData, isPolling, modal, reloadSystem]);
|
}, [statusData, isPolling, modal, reloadSystem]);
|
||||||
@@ -129,7 +129,7 @@ const OAuthDialogImpl = NiceModal.create<NoProps>(() => {
|
|||||||
}
|
}
|
||||||
setState({ type: 'select' });
|
setState({ type: 'select' });
|
||||||
modal.resolve(null);
|
modal.resolve(null);
|
||||||
modal.hide();
|
modal.remove();
|
||||||
};
|
};
|
||||||
|
|
||||||
const handleBack = () => {
|
const handleBack = () => {
|
||||||
|
|||||||
@@ -93,10 +93,9 @@ const ReassignDialogImpl = NiceModal.create<ReassignDialogProps>(
|
|||||||
mutationFn: async (newAssignee: string) =>
|
mutationFn: async (newAssignee: string) =>
|
||||||
tasksApi.reassign(sharedTask.id, {
|
tasksApi.reassign(sharedTask.id, {
|
||||||
new_assignee_user_id: newAssignee,
|
new_assignee_user_id: newAssignee,
|
||||||
version: sharedTask.version,
|
|
||||||
}),
|
}),
|
||||||
onSuccess: (result) => {
|
onSuccess: (shared_task) => {
|
||||||
modal.resolve(result.shared_task);
|
modal.resolve(shared_task);
|
||||||
modal.hide();
|
modal.hide();
|
||||||
},
|
},
|
||||||
onError: (error) => {
|
onError: (error) => {
|
||||||
|
|||||||
@@ -33,13 +33,14 @@ const ShareDialogImpl = NiceModal.create<ShareDialogProps>(({ task }) => {
|
|||||||
const { isSignedIn } = useAuth();
|
const { isSignedIn } = useAuth();
|
||||||
const { project } = useProject();
|
const { project } = useProject();
|
||||||
const { shareTask } = useTaskMutations(task.project_id);
|
const { shareTask } = useTaskMutations(task.project_id);
|
||||||
|
const { reset: resetShareTask } = shareTask;
|
||||||
|
|
||||||
const [shareError, setShareError] = useState<string | null>(null);
|
const [shareError, setShareError] = useState<string | null>(null);
|
||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
shareTask.reset();
|
resetShareTask();
|
||||||
setShareError(null);
|
setShareError(null);
|
||||||
}, [task.id, shareTask]);
|
}, [task.id, resetShareTask]);
|
||||||
|
|
||||||
const handleClose = () => {
|
const handleClose = () => {
|
||||||
modal.resolve(shareTask.isSuccess);
|
modal.resolve(shareTask.isSuccess);
|
||||||
@@ -68,7 +69,13 @@ const ShareDialogImpl = NiceModal.create<ShareDialogProps>(({ task }) => {
|
|||||||
modal.hide();
|
modal.hide();
|
||||||
} catch (err) {
|
} catch (err) {
|
||||||
if (getStatus(err) === 401) {
|
if (getStatus(err) === 401) {
|
||||||
void OAuthDialog.show();
|
// Hide this dialog first so OAuthDialog appears on top
|
||||||
|
modal.hide();
|
||||||
|
const result = await OAuthDialog.show();
|
||||||
|
// If user successfully authenticated, re-show this dialog
|
||||||
|
if (result) {
|
||||||
|
void ShareDialog.show({ task });
|
||||||
|
}
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
setShareError(getReadableError(err));
|
setShareError(getReadableError(err));
|
||||||
|
|||||||
@@ -10,6 +10,7 @@ import { attemptsApi } from '@/lib/api';
|
|||||||
import type { SharedTaskRecord } from '@/hooks/useProjectTasks';
|
import type { SharedTaskRecord } from '@/hooks/useProjectTasks';
|
||||||
import { TaskCardHeader } from './TaskCardHeader';
|
import { TaskCardHeader } from './TaskCardHeader';
|
||||||
import { useTranslation } from 'react-i18next';
|
import { useTranslation } from 'react-i18next';
|
||||||
|
import { useAuth } from '@/hooks';
|
||||||
|
|
||||||
type Task = TaskWithAttemptStatus;
|
type Task = TaskWithAttemptStatus;
|
||||||
|
|
||||||
@@ -35,6 +36,7 @@ export function TaskCard({
|
|||||||
const { t } = useTranslation('tasks');
|
const { t } = useTranslation('tasks');
|
||||||
const navigate = useNavigateWithSearch();
|
const navigate = useNavigateWithSearch();
|
||||||
const [isNavigatingToParent, setIsNavigatingToParent] = useState(false);
|
const [isNavigatingToParent, setIsNavigatingToParent] = useState(false);
|
||||||
|
const { isSignedIn } = useAuth();
|
||||||
|
|
||||||
const handleClick = useCallback(() => {
|
const handleClick = useCallback(() => {
|
||||||
onViewDetails(task);
|
onViewDetails(task);
|
||||||
@@ -87,8 +89,9 @@ export function TaskCard({
|
|||||||
onClick={handleClick}
|
onClick={handleClick}
|
||||||
isOpen={isOpen}
|
isOpen={isOpen}
|
||||||
forwardedRef={localRef}
|
forwardedRef={localRef}
|
||||||
|
dragDisabled={(!!sharedTask || !!task.shared_task_id) && !isSignedIn}
|
||||||
className={
|
className={
|
||||||
sharedTask
|
sharedTask || task.shared_task_id
|
||||||
? 'relative overflow-hidden pl-5 before:absolute before:left-0 before:top-0 before:bottom-0 before:w-[3px] before:bg-card-foreground before:content-[""]'
|
? 'relative overflow-hidden pl-5 before:absolute before:left-0 before:top-0 before:bottom-0 before:w-[3px] before:bg-card-foreground before:content-[""]'
|
||||||
: undefined
|
: undefined
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -42,11 +42,12 @@ export function ActionsDropdown({
|
|||||||
const { projectId } = useProject();
|
const { projectId } = useProject();
|
||||||
const openInEditor = useOpenInEditor(attempt?.id);
|
const openInEditor = useOpenInEditor(attempt?.id);
|
||||||
const navigate = useNavigate();
|
const navigate = useNavigate();
|
||||||
const { userId } = useAuth();
|
const { userId, isSignedIn } = useAuth();
|
||||||
|
|
||||||
const hasAttemptActions = Boolean(attempt);
|
const hasAttemptActions = Boolean(attempt);
|
||||||
const hasTaskActions = Boolean(task);
|
const hasTaskActions = Boolean(task);
|
||||||
const isShared = Boolean(sharedTask);
|
const isShared = Boolean(sharedTask);
|
||||||
|
const canEditShared = (!isShared && !task?.shared_task_id) || isSignedIn;
|
||||||
|
|
||||||
const handleEdit = (e: React.MouseEvent) => {
|
const handleEdit = (e: React.MouseEvent) => {
|
||||||
e.stopPropagation();
|
e.stopPropagation();
|
||||||
@@ -248,14 +249,17 @@ export function ActionsDropdown({
|
|||||||
{t('actionsMenu.stopShare')}
|
{t('actionsMenu.stopShare')}
|
||||||
</DropdownMenuItem>
|
</DropdownMenuItem>
|
||||||
<DropdownMenuSeparator />
|
<DropdownMenuSeparator />
|
||||||
<DropdownMenuItem disabled={!projectId} onClick={handleEdit}>
|
<DropdownMenuItem
|
||||||
|
disabled={!projectId || !canEditShared}
|
||||||
|
onClick={handleEdit}
|
||||||
|
>
|
||||||
{t('common:buttons.edit')}
|
{t('common:buttons.edit')}
|
||||||
</DropdownMenuItem>
|
</DropdownMenuItem>
|
||||||
<DropdownMenuItem disabled={!projectId} onClick={handleDuplicate}>
|
<DropdownMenuItem disabled={!projectId} onClick={handleDuplicate}>
|
||||||
{t('actionsMenu.duplicate')}
|
{t('actionsMenu.duplicate')}
|
||||||
</DropdownMenuItem>
|
</DropdownMenuItem>
|
||||||
<DropdownMenuItem
|
<DropdownMenuItem
|
||||||
disabled={!projectId}
|
disabled={!projectId || !canEditShared}
|
||||||
onClick={handleDelete}
|
onClick={handleDelete}
|
||||||
className="text-destructive"
|
className="text-destructive"
|
||||||
>
|
>
|
||||||
|
|||||||
@@ -1,12 +1,14 @@
|
|||||||
import { useQuery } from '@tanstack/react-query';
|
import { useQuery } from '@tanstack/react-query';
|
||||||
import { oauthApi } from '@/lib/api';
|
import { oauthApi } from '@/lib/api';
|
||||||
|
import { useEffect } from 'react';
|
||||||
|
import { useAuth } from '@/hooks';
|
||||||
|
|
||||||
interface UseAuthStatusOptions {
|
interface UseAuthStatusOptions {
|
||||||
enabled: boolean;
|
enabled: boolean;
|
||||||
}
|
}
|
||||||
|
|
||||||
export function useAuthStatus(options: UseAuthStatusOptions) {
|
export function useAuthStatus(options: UseAuthStatusOptions) {
|
||||||
return useQuery({
|
const query = useQuery({
|
||||||
queryKey: ['auth', 'status'],
|
queryKey: ['auth', 'status'],
|
||||||
queryFn: () => oauthApi.status(),
|
queryFn: () => oauthApi.status(),
|
||||||
enabled: options.enabled,
|
enabled: options.enabled,
|
||||||
@@ -14,4 +16,13 @@ export function useAuthStatus(options: UseAuthStatusOptions) {
|
|||||||
retry: 3,
|
retry: 3,
|
||||||
staleTime: 0, // Always fetch fresh data when enabled
|
staleTime: 0, // Always fetch fresh data when enabled
|
||||||
});
|
});
|
||||||
|
|
||||||
|
const { isSignedIn } = useAuth();
|
||||||
|
useEffect(() => {
|
||||||
|
if (query) {
|
||||||
|
query.refetch();
|
||||||
|
}
|
||||||
|
}, [isSignedIn, query]);
|
||||||
|
|
||||||
|
return query;
|
||||||
}
|
}
|
||||||
|
|||||||
23
frontend/src/hooks/auth/useCurrentUser.ts
Normal file
23
frontend/src/hooks/auth/useCurrentUser.ts
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
import { useQuery, useQueryClient } from '@tanstack/react-query';
|
||||||
|
import { oauthApi } from '@/lib/api';
|
||||||
|
import { useEffect } from 'react';
|
||||||
|
import { useAuth } from '@/hooks/auth/useAuth';
|
||||||
|
|
||||||
|
export function useCurrentUser() {
|
||||||
|
const { isSignedIn } = useAuth();
|
||||||
|
const query = useQuery({
|
||||||
|
queryKey: ['auth', 'user'],
|
||||||
|
queryFn: () => oauthApi.getCurrentUser(),
|
||||||
|
retry: 2,
|
||||||
|
staleTime: 5 * 60 * 1000, // 5 minutes
|
||||||
|
refetchOnWindowFocus: false,
|
||||||
|
refetchOnReconnect: false,
|
||||||
|
});
|
||||||
|
|
||||||
|
const queryClient = useQueryClient();
|
||||||
|
useEffect(() => {
|
||||||
|
queryClient.invalidateQueries({ queryKey: ['auth', 'user'] });
|
||||||
|
}, [queryClient, isSignedIn]);
|
||||||
|
|
||||||
|
return query;
|
||||||
|
}
|
||||||
@@ -22,6 +22,7 @@ export { useTaskAttempts } from './useTaskAttempts';
|
|||||||
export { useAuth } from './auth/useAuth';
|
export { useAuth } from './auth/useAuth';
|
||||||
export { useAuthMutations } from './auth/useAuthMutations';
|
export { useAuthMutations } from './auth/useAuthMutations';
|
||||||
export { useAuthStatus } from './auth/useAuthStatus';
|
export { useAuthStatus } from './auth/useAuthStatus';
|
||||||
|
export { useCurrentUser } from './auth/useCurrentUser';
|
||||||
export { useUserOrganizations } from './useUserOrganizations';
|
export { useUserOrganizations } from './useUserOrganizations';
|
||||||
export { useOrganizationSelection } from './useOrganizationSelection';
|
export { useOrganizationSelection } from './useOrganizationSelection';
|
||||||
export { useOrganizationMembers } from './useOrganizationMembers';
|
export { useOrganizationMembers } from './useOrganizationMembers';
|
||||||
|
|||||||
38
frontend/src/hooks/useAssigneeUserName.ts
Normal file
38
frontend/src/hooks/useAssigneeUserName.ts
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
import { useQuery } from '@tanstack/react-query';
|
||||||
|
import { getSharedTaskAssignees } from '@/lib/remoteApi';
|
||||||
|
import type { SharedTask, UserData } from 'shared/types';
|
||||||
|
import { useEffect, useMemo } from 'react';
|
||||||
|
|
||||||
|
interface UseAssigneeUserNamesOptions {
|
||||||
|
projectId: string | undefined;
|
||||||
|
sharedTasks?: SharedTask[];
|
||||||
|
}
|
||||||
|
|
||||||
|
export function useAssigneeUserNames(options: UseAssigneeUserNamesOptions) {
|
||||||
|
const { projectId, sharedTasks } = options;
|
||||||
|
|
||||||
|
const { data: assignees, refetch } = useQuery<UserData[], Error>({
|
||||||
|
queryKey: ['project', 'assignees', projectId],
|
||||||
|
queryFn: () => getSharedTaskAssignees(projectId!),
|
||||||
|
enabled: Boolean(projectId),
|
||||||
|
staleTime: 5 * 60 * 1000, // 5 minutes
|
||||||
|
});
|
||||||
|
|
||||||
|
const assignedUserIds = useMemo(() => {
|
||||||
|
if (!sharedTasks) return null;
|
||||||
|
return Array.from(
|
||||||
|
new Set(sharedTasks.map((task) => task.assignee_user_id))
|
||||||
|
);
|
||||||
|
}, [sharedTasks]);
|
||||||
|
|
||||||
|
// Refetch when assignee ids change
|
||||||
|
useEffect(() => {
|
||||||
|
if (!assignedUserIds) return;
|
||||||
|
refetch();
|
||||||
|
}, [assignedUserIds, refetch]);
|
||||||
|
|
||||||
|
return {
|
||||||
|
assignees,
|
||||||
|
refetchAssignees: refetch,
|
||||||
|
};
|
||||||
|
}
|
||||||
85
frontend/src/hooks/useAutoLinkSharedTasks.ts
Normal file
85
frontend/src/hooks/useAutoLinkSharedTasks.ts
Normal file
@@ -0,0 +1,85 @@
|
|||||||
|
import { useEffect, useRef } from 'react';
|
||||||
|
import { useCurrentUser } from '@/hooks/auth/useCurrentUser';
|
||||||
|
import { useTaskMutations } from '@/hooks/useTaskMutations';
|
||||||
|
import type { SharedTaskRecord } from './useProjectTasks';
|
||||||
|
import type { SharedTaskDetails, TaskWithAttemptStatus } from 'shared/types';
|
||||||
|
|
||||||
|
interface UseAutoLinkSharedTasksProps {
|
||||||
|
sharedTasksById: Record<string, SharedTaskRecord>;
|
||||||
|
localTasksById: Record<string, TaskWithAttemptStatus>;
|
||||||
|
referencedSharedIds: Set<string>;
|
||||||
|
isLoading: boolean;
|
||||||
|
remoteProjectId?: string;
|
||||||
|
projectId?: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Automatically links shared tasks that are assigned to the current user
|
||||||
|
* and don't have a corresponding local task yet.
|
||||||
|
*/
|
||||||
|
export function useAutoLinkSharedTasks({
|
||||||
|
sharedTasksById,
|
||||||
|
localTasksById,
|
||||||
|
referencedSharedIds,
|
||||||
|
isLoading,
|
||||||
|
remoteProjectId,
|
||||||
|
projectId,
|
||||||
|
}: UseAutoLinkSharedTasksProps): void {
|
||||||
|
const { data: currentUser } = useCurrentUser();
|
||||||
|
const { linkSharedTaskToLocal } = useTaskMutations(projectId);
|
||||||
|
const linkingInProgress = useRef<Set<string>>(new Set());
|
||||||
|
const failedTasks = useRef<Set<string>>(new Set());
|
||||||
|
|
||||||
|
useEffect(() => {
|
||||||
|
if (!currentUser?.user_id || isLoading || !remoteProjectId || !projectId) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const tasksToLink = Object.values(sharedTasksById).filter((task) => {
|
||||||
|
const isAssignedToCurrentUser =
|
||||||
|
task.assignee_user_id === currentUser.user_id;
|
||||||
|
const hasLocalTask = Boolean(localTasksById[task.id]);
|
||||||
|
const isAlreadyLinked = referencedSharedIds.has(task.id);
|
||||||
|
const isBeingLinked = linkingInProgress.current.has(task.id);
|
||||||
|
const hasFailed = failedTasks.current.has(task.id);
|
||||||
|
|
||||||
|
return (
|
||||||
|
isAssignedToCurrentUser &&
|
||||||
|
!hasLocalTask &&
|
||||||
|
!isAlreadyLinked &&
|
||||||
|
!isBeingLinked &&
|
||||||
|
!hasFailed
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
tasksToLink.forEach((task) => {
|
||||||
|
linkingInProgress.current.add(task.id);
|
||||||
|
linkSharedTaskToLocal.mutate(
|
||||||
|
{
|
||||||
|
id: task.id,
|
||||||
|
project_id: projectId,
|
||||||
|
title: task.title,
|
||||||
|
description: task.description,
|
||||||
|
status: task.status,
|
||||||
|
} as SharedTaskDetails,
|
||||||
|
{
|
||||||
|
onError: () => {
|
||||||
|
failedTasks.current.add(task.id);
|
||||||
|
},
|
||||||
|
onSettled: () => {
|
||||||
|
linkingInProgress.current.delete(task.id);
|
||||||
|
},
|
||||||
|
}
|
||||||
|
);
|
||||||
|
});
|
||||||
|
}, [
|
||||||
|
currentUser?.user_id,
|
||||||
|
sharedTasksById,
|
||||||
|
localTasksById,
|
||||||
|
referencedSharedIds,
|
||||||
|
isLoading,
|
||||||
|
remoteProjectId,
|
||||||
|
projectId,
|
||||||
|
linkSharedTaskToLocal,
|
||||||
|
]);
|
||||||
|
}
|
||||||
@@ -1,20 +1,19 @@
|
|||||||
import { useCallback, useMemo } from 'react';
|
import { useCallback, useMemo } from 'react';
|
||||||
import { useJsonPatchWsStream } from './useJsonPatchWsStream';
|
import { useJsonPatchWsStream } from './useJsonPatchWsStream';
|
||||||
|
import { useAuth } from '@/hooks';
|
||||||
import { useProject } from '@/contexts/ProjectContext';
|
import { useProject } from '@/contexts/ProjectContext';
|
||||||
|
import { useLiveQuery, eq, isNull } from '@tanstack/react-db';
|
||||||
|
import { sharedTasksCollection } from '@/lib/electric/sharedTasksCollection';
|
||||||
|
import { useAssigneeUserNames } from './useAssigneeUserName';
|
||||||
|
import { useAutoLinkSharedTasks } from './useAutoLinkSharedTasks';
|
||||||
import type {
|
import type {
|
||||||
SharedTask,
|
SharedTask,
|
||||||
TaskStatus,
|
TaskStatus,
|
||||||
TaskWithAttemptStatus,
|
TaskWithAttemptStatus,
|
||||||
} from 'shared/types';
|
} from 'shared/types';
|
||||||
|
|
||||||
export type SharedTaskRecord = Omit<
|
export type SharedTaskRecord = SharedTask & {
|
||||||
SharedTask,
|
remote_project_id: string;
|
||||||
'version' | 'last_event_seq'
|
|
||||||
> & {
|
|
||||||
version: number;
|
|
||||||
last_event_seq: number | null;
|
|
||||||
created_at: string | Date;
|
|
||||||
updated_at: string | Date;
|
|
||||||
assignee_first_name?: string | null;
|
assignee_first_name?: string | null;
|
||||||
assignee_last_name?: string | null;
|
assignee_last_name?: string | null;
|
||||||
assignee_username?: string | null;
|
assignee_username?: string | null;
|
||||||
@@ -22,7 +21,6 @@ export type SharedTaskRecord = Omit<
|
|||||||
|
|
||||||
type TasksState = {
|
type TasksState = {
|
||||||
tasks: Record<string, TaskWithAttemptStatus>;
|
tasks: Record<string, TaskWithAttemptStatus>;
|
||||||
shared_tasks: Record<string, SharedTaskRecord>;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
export interface UseProjectTasksResult {
|
export interface UseProjectTasksResult {
|
||||||
@@ -43,14 +41,12 @@ export interface UseProjectTasksResult {
|
|||||||
*/
|
*/
|
||||||
export const useProjectTasks = (projectId: string): UseProjectTasksResult => {
|
export const useProjectTasks = (projectId: string): UseProjectTasksResult => {
|
||||||
const { project } = useProject();
|
const { project } = useProject();
|
||||||
|
const { isSignedIn } = useAuth();
|
||||||
const remoteProjectId = project?.remote_project_id;
|
const remoteProjectId = project?.remote_project_id;
|
||||||
|
|
||||||
const endpoint = `/api/tasks/stream/ws?project_id=${encodeURIComponent(projectId)}&remote_project_id=${encodeURIComponent(remoteProjectId ?? 'null')}`;
|
const endpoint = `/api/tasks/stream/ws?project_id=${encodeURIComponent(projectId)}`;
|
||||||
|
|
||||||
const initialData = useCallback(
|
const initialData = useCallback((): TasksState => ({ tasks: {} }), []);
|
||||||
(): TasksState => ({ tasks: {}, shared_tasks: {} }),
|
|
||||||
[]
|
|
||||||
);
|
|
||||||
|
|
||||||
const { data, isConnected, error } = useJsonPatchWsStream(
|
const { data, isConnected, error } = useJsonPatchWsStream(
|
||||||
endpoint,
|
endpoint,
|
||||||
@@ -58,12 +54,67 @@ export const useProjectTasks = (projectId: string): UseProjectTasksResult => {
|
|||||||
initialData
|
initialData
|
||||||
);
|
);
|
||||||
|
|
||||||
const localTasksById = useMemo(() => data?.tasks ?? {}, [data?.tasks]);
|
const sharedTasksQuery = useLiveQuery(
|
||||||
const sharedTasksById = useMemo(
|
useCallback(
|
||||||
() => data?.shared_tasks ?? {},
|
(q) => {
|
||||||
[data?.shared_tasks]
|
if (!remoteProjectId || !isSignedIn) {
|
||||||
|
return undefined;
|
||||||
|
}
|
||||||
|
return q
|
||||||
|
.from({ sharedTasks: sharedTasksCollection })
|
||||||
|
.where(({ sharedTasks }) =>
|
||||||
|
eq(sharedTasks.project_id, remoteProjectId)
|
||||||
|
)
|
||||||
|
.where(({ sharedTasks }) => isNull(sharedTasks.deleted_at));
|
||||||
|
},
|
||||||
|
[remoteProjectId, isSignedIn]
|
||||||
|
),
|
||||||
|
[remoteProjectId, isSignedIn]
|
||||||
);
|
);
|
||||||
|
|
||||||
|
const sharedTasksList = useMemo(
|
||||||
|
() => sharedTasksQuery.data ?? [],
|
||||||
|
[sharedTasksQuery.data]
|
||||||
|
);
|
||||||
|
|
||||||
|
const localTasksById = useMemo(() => data?.tasks ?? {}, [data?.tasks]);
|
||||||
|
|
||||||
|
const referencedSharedIds = useMemo(
|
||||||
|
() =>
|
||||||
|
new Set(
|
||||||
|
Object.values(localTasksById)
|
||||||
|
.map((task) => task.shared_task_id)
|
||||||
|
.filter((id): id is string => Boolean(id))
|
||||||
|
),
|
||||||
|
[localTasksById]
|
||||||
|
);
|
||||||
|
|
||||||
|
const { assignees } = useAssigneeUserNames({
|
||||||
|
projectId: remoteProjectId || undefined,
|
||||||
|
sharedTasks: sharedTasksList,
|
||||||
|
});
|
||||||
|
|
||||||
|
const sharedTasksById = useMemo(() => {
|
||||||
|
if (!sharedTasksList) return {};
|
||||||
|
const map: Record<string, SharedTaskRecord> = {};
|
||||||
|
const list = Array.isArray(sharedTasksList) ? sharedTasksList : [];
|
||||||
|
for (const task of list) {
|
||||||
|
const assignee =
|
||||||
|
task.assignee_user_id && assignees
|
||||||
|
? assignees.find((a) => a.user_id === task.assignee_user_id)
|
||||||
|
: null;
|
||||||
|
map[task.id] = {
|
||||||
|
...task,
|
||||||
|
status: task.status,
|
||||||
|
remote_project_id: task.project_id,
|
||||||
|
assignee_first_name: assignee?.first_name ?? null,
|
||||||
|
assignee_last_name: assignee?.last_name ?? null,
|
||||||
|
assignee_username: assignee?.username ?? null,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
return map;
|
||||||
|
}, [sharedTasksList, assignees]);
|
||||||
|
|
||||||
const { tasks, tasksById, tasksByStatus } = useMemo(() => {
|
const { tasks, tasksById, tasksByStatus } = useMemo(() => {
|
||||||
const merged: Record<string, TaskWithAttemptStatus> = { ...localTasksById };
|
const merged: Record<string, TaskWithAttemptStatus> = { ...localTasksById };
|
||||||
const byStatus: Record<TaskStatus, TaskWithAttemptStatus[]> = {
|
const byStatus: Record<TaskStatus, TaskWithAttemptStatus[]> = {
|
||||||
@@ -104,12 +155,6 @@ export const useProjectTasks = (projectId: string): UseProjectTasksResult => {
|
|||||||
cancelled: [],
|
cancelled: [],
|
||||||
};
|
};
|
||||||
|
|
||||||
const referencedSharedIds = new Set(
|
|
||||||
Object.values(localTasksById)
|
|
||||||
.map((task) => task.shared_task_id)
|
|
||||||
.filter((id): id is string => Boolean(id))
|
|
||||||
);
|
|
||||||
|
|
||||||
Object.values(sharedTasksById).forEach((sharedTask) => {
|
Object.values(sharedTasksById).forEach((sharedTask) => {
|
||||||
const hasLocal =
|
const hasLocal =
|
||||||
Boolean(localTasksById[sharedTask.id]) ||
|
Boolean(localTasksById[sharedTask.id]) ||
|
||||||
@@ -130,10 +175,20 @@ export const useProjectTasks = (projectId: string): UseProjectTasksResult => {
|
|||||||
});
|
});
|
||||||
|
|
||||||
return grouped;
|
return grouped;
|
||||||
}, [localTasksById, sharedTasksById]);
|
}, [localTasksById, sharedTasksById, referencedSharedIds]);
|
||||||
|
|
||||||
const isLoading = !data && !error; // until first snapshot
|
const isLoading = !data && !error; // until first snapshot
|
||||||
|
|
||||||
|
// Auto-link shared tasks assigned to current user
|
||||||
|
useAutoLinkSharedTasks({
|
||||||
|
sharedTasksById,
|
||||||
|
localTasksById,
|
||||||
|
referencedSharedIds,
|
||||||
|
isLoading,
|
||||||
|
remoteProjectId: project?.remote_project_id || undefined,
|
||||||
|
projectId,
|
||||||
|
});
|
||||||
|
|
||||||
return {
|
return {
|
||||||
tasks,
|
tasks,
|
||||||
tasksById,
|
tasksById,
|
||||||
|
|||||||
@@ -9,16 +9,18 @@ import type {
|
|||||||
Task,
|
Task,
|
||||||
TaskWithAttemptStatus,
|
TaskWithAttemptStatus,
|
||||||
UpdateTask,
|
UpdateTask,
|
||||||
|
SharedTaskDetails,
|
||||||
} from 'shared/types';
|
} from 'shared/types';
|
||||||
|
import { taskKeys } from './useTask';
|
||||||
|
|
||||||
export function useTaskMutations(projectId?: string) {
|
export function useTaskMutations(projectId?: string) {
|
||||||
const queryClient = useQueryClient();
|
const queryClient = useQueryClient();
|
||||||
const navigate = useNavigateWithSearch();
|
const navigate = useNavigateWithSearch();
|
||||||
|
|
||||||
const invalidateQueries = (taskId?: string) => {
|
const invalidateQueries = (taskId?: string) => {
|
||||||
queryClient.invalidateQueries({ queryKey: ['tasks', projectId] });
|
queryClient.invalidateQueries({ queryKey: taskKeys.all });
|
||||||
if (taskId) {
|
if (taskId) {
|
||||||
queryClient.invalidateQueries({ queryKey: ['task', taskId] });
|
queryClient.invalidateQueries({ queryKey: taskKeys.byId(taskId) });
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -107,6 +109,19 @@ export function useTaskMutations(projectId?: string) {
|
|||||||
},
|
},
|
||||||
});
|
});
|
||||||
|
|
||||||
|
const linkSharedTaskToLocal = useMutation({
|
||||||
|
mutationFn: (data: SharedTaskDetails) => tasksApi.linkToLocal(data),
|
||||||
|
onSuccess: (createdTask: Task | null) => {
|
||||||
|
console.log('Linked shared task to local successfully', createdTask);
|
||||||
|
if (createdTask) {
|
||||||
|
invalidateQueries(createdTask.id);
|
||||||
|
}
|
||||||
|
},
|
||||||
|
onError: (err) => {
|
||||||
|
console.error('Failed to link shared task to local:', err);
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
return {
|
return {
|
||||||
createTask,
|
createTask,
|
||||||
createAndStart,
|
createAndStart,
|
||||||
@@ -114,5 +129,6 @@ export function useTaskMutations(projectId?: string) {
|
|||||||
deleteTask,
|
deleteTask,
|
||||||
shareTask,
|
shareTask,
|
||||||
stopShareTask: unshareSharedTask,
|
stopShareTask: unshareSharedTask,
|
||||||
|
linkSharedTaskToLocal,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user