-
Notifications
You must be signed in to change notification settings - Fork 7
feat: bookmarks events handler partial recovery #793
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -59,16 +59,27 @@ pub async fn del(user_id: PubkyId, bookmark_id: String) -> Result<(), EventProce | |
| } | ||
|
|
||
| pub async fn sync_del(user_id: PubkyId, bookmark_id: String) -> Result<(), EventProcessorError> { | ||
| let deleted_bookmark_info = Bookmark::del_from_graph(&user_id, &bookmark_id).await?; | ||
| // Ensure the bookmark exists in the graph before proceeding | ||
| let (post_id, author_id) = match deleted_bookmark_info { | ||
| // 1. Read target from graph WITHOUT deleting the edge | ||
| let (post_id, author_id) = match Bookmark::get_target_from_graph(&user_id, &bookmark_id).await? | ||
| { | ||
| Some(info) => info, | ||
| None => return Err(EventProcessorError::SkipIndexing), | ||
| }; | ||
|
|
||
| // 2. Guard counter decrement: only decrement if bookmark still exists in Redis index | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The guard is designed to prevent double-decrement on retry, it it only handles the case where both the Redis index removal and the counter decrement succeed before the graph delete failed. If |
||
| let existed_in_index = Bookmark::get_from_index(&author_id, &post_id, &user_id) | ||
| .await? | ||
| .is_some(); | ||
|
|
||
| // 3. Redis cleanup (idempotent) | ||
| Bookmark::del_from_index(&user_id, &post_id, &author_id).await?; | ||
| // Update user counts | ||
| UserCounts::decrement(&user_id, "bookmarks", None).await?; | ||
|
|
||
| if existed_in_index { | ||
| UserCounts::decrement(&user_id, "bookmarks", None).await?; | ||
| } | ||
|
|
||
| // 4. Graph deletion LAST — ensures data survives for retry if Redis ops fail | ||
| Bookmark::del_from_graph(&user_id, &bookmark_id).await?; | ||
|
|
||
| Ok(()) | ||
| } | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,151 @@ | ||
| use super::utils::find_post_bookmark; | ||
| use crate::event_processor::utils::watcher::WatcherTest; | ||
| use crate::event_processor::{ | ||
| users::utils::find_user_counts, utils::watcher::HomeserverHashIdPath, | ||
| }; | ||
| use anyhow::Result; | ||
| use nexus_common::models::event::EventProcessorError; | ||
| use nexus_common::models::post::Bookmark; | ||
| use nexus_common::models::user::UserCounts; | ||
| use nexus_watcher::events::handlers; | ||
| use pubky::Keypair; | ||
| use pubky_app_specs::{ | ||
| post_uri_builder, traits::HashId, PubkyAppBookmark, PubkyAppPost, PubkyAppUser, PubkyId, | ||
| }; | ||
|
|
||
| /// Simulate a retry of sync_del after a partial failure where Redis cleanup | ||
| /// succeeded but graph deletion failed. On retry, the counter must NOT be | ||
| /// decremented again (guarded by the Redis index check). | ||
| #[tokio_shared_rt::test(shared)] | ||
| async fn test_bookmark_del_retry_no_double_decrement() -> Result<()> { | ||
| let mut test = WatcherTest::setup().await?; | ||
|
|
||
| // Create user + post + bookmark through the normal watcher flow | ||
| let user_kp = Keypair::random(); | ||
| let user = PubkyAppUser { | ||
| bio: Some("test_bookmark_del_retry_no_double_decrement".to_string()), | ||
| image: None, | ||
| links: None, | ||
| name: "Watcher:Bookmark:DelRetry:User".to_string(), | ||
| status: None, | ||
| }; | ||
| let user_id = test.create_user(&user_kp, &user).await?; | ||
|
|
||
| let post = PubkyAppPost { | ||
| content: "Watcher:Bookmark:DelRetry:Post".to_string(), | ||
| kind: PubkyAppPost::default().kind, | ||
| parent: None, | ||
| embed: None, | ||
| attachments: None, | ||
| }; | ||
| let (post_id, post_path) = test.create_post(&user_kp, &post).await?; | ||
|
|
||
| let bookmark = PubkyAppBookmark { | ||
| uri: post_uri_builder(user_id.clone(), post_id.clone()), | ||
| created_at: chrono::Utc::now().timestamp_millis(), | ||
| }; | ||
| let bookmark_id = bookmark.create_id(); | ||
| let bookmark_path = bookmark.hs_path(); | ||
|
|
||
| test.put(&user_kp, &bookmark_path, bookmark).await?; | ||
|
|
||
| // Verify initial state: bookmark exists in graph + Redis, count = 1 | ||
| assert!(find_post_bookmark(&user_id, &post_id, &user_id) | ||
| .await | ||
| .is_ok()); | ||
| assert!(Bookmark::get_from_index(&user_id, &post_id, &user_id) | ||
| .await? | ||
| .is_some()); | ||
| assert_eq!(find_user_counts(&user_id).await.bookmarks, 1); | ||
|
|
||
| // Simulate partial completion of a previous sync_del attempt: | ||
| // Redis cleanup succeeded (index removed + counter decremented) but graph delete failed. | ||
| Bookmark::del_from_index(&user_id, &post_id, &user_id).await?; | ||
| UserCounts::decrement(&user_id, "bookmarks", None).await?; | ||
|
|
||
| // Verify simulated state: graph still has edge, Redis is clean, counter = 0 | ||
| assert!(find_post_bookmark(&user_id, &post_id, &user_id) | ||
| .await | ||
| .is_ok()); | ||
| assert!(Bookmark::get_from_index(&user_id, &post_id, &user_id) | ||
| .await? | ||
| .is_none()); | ||
| assert_eq!(find_user_counts(&user_id).await.bookmarks, 0); | ||
|
|
||
| // Retry: call sync_del directly — should complete graph cleanup without double-decrement | ||
| let user_pubky_id = PubkyId::try_from(user_id.as_str()).map_err(anyhow::Error::msg)?; | ||
| handlers::bookmark::sync_del(user_pubky_id, bookmark_id).await?; | ||
|
|
||
| // Verify final state: graph edge deleted, counter still 0 (not decremented again) | ||
| assert!(find_post_bookmark(&user_id, &post_id, &user_id) | ||
| .await | ||
| .is_err()); | ||
| assert_eq!(find_user_counts(&user_id).await.bookmarks, 0); | ||
|
|
||
| // Cleanup | ||
| test.cleanup_post(&user_kp, &post_path).await?; | ||
| test.cleanup_user(&user_kp).await?; | ||
|
|
||
| Ok(()) | ||
| } | ||
|
|
||
| /// After a fully successful delete, a replay of sync_del should return Ok | ||
| #[tokio_shared_rt::test(shared)] | ||
| async fn test_bookmark_del_replay_after_success_skips() -> Result<()> { | ||
| let mut test = WatcherTest::setup().await?; | ||
|
|
||
| let user_kp = Keypair::random(); | ||
| let user = PubkyAppUser { | ||
| bio: Some("test_bookmark_del_replay_after_success_skips".to_string()), | ||
| image: None, | ||
| links: None, | ||
| name: "Watcher:Bookmark:DelReplay:User".to_string(), | ||
| status: None, | ||
| }; | ||
| let user_id = test.create_user(&user_kp, &user).await?; | ||
|
|
||
| let post = PubkyAppPost { | ||
| content: "Watcher:Bookmark:DelReplay:Post".to_string(), | ||
| kind: PubkyAppPost::default().kind, | ||
| parent: None, | ||
| embed: None, | ||
| attachments: None, | ||
| }; | ||
| let (post_id, post_path) = test.create_post(&user_kp, &post).await?; | ||
|
|
||
| let bookmark = PubkyAppBookmark { | ||
| uri: post_uri_builder(user_id.clone(), post_id.clone()), | ||
| created_at: chrono::Utc::now().timestamp_millis(), | ||
| }; | ||
| let bookmark_id = bookmark.create_id(); | ||
| let bookmark_path = bookmark.hs_path(); | ||
|
|
||
| test.put(&user_kp, &bookmark_path, bookmark).await?; | ||
|
|
||
| // Delete through normal event flow | ||
| test.del(&user_kp, &bookmark_path).await?; | ||
|
|
||
| // Verify fully deleted state | ||
| assert!(find_post_bookmark(&user_id, &post_id, &user_id) | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The test conflates the post author and the bookmarker, which means it doesn't cover the case when post author != bookmarker, which is a more realistic scenario. It would be better if the test creates 2 separate users, |
||
| .await | ||
| .is_err()); | ||
| assert_eq!(find_user_counts(&user_id).await.bookmarks, 0); | ||
|
|
||
| // Replay: call sync_del again — should get SkipIndexing (graph edge gone) | ||
| let user_pubky_id = PubkyId::try_from(user_id.as_str()).map_err(anyhow::Error::msg)?; | ||
| let result = handlers::bookmark::sync_del(user_pubky_id, bookmark_id).await; | ||
|
|
||
| assert!( | ||
| matches!(result, Err(EventProcessorError::SkipIndexing)), | ||
| "Replay after full delete should return SkipIndexing, got: {result:?}" | ||
| ); | ||
|
|
||
| // Counter must remain 0 | ||
| assert_eq!(find_user_counts(&user_id).await.bookmarks, 0); | ||
|
|
||
| // Cleanup | ||
| test.cleanup_post(&user_kp, &post_path).await?; | ||
| test.cleanup_user(&user_kp).await?; | ||
|
|
||
| Ok(()) | ||
| } | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -1,4 +1,5 @@ | ||
| mod del; | ||
| mod del_idempotent; | ||
| mod fail_index; | ||
| mod raw; | ||
| mod retry_bookmark; | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.