diff --git a/main.py b/main.py index c835ff1b..55ae5c1f 100644 --- a/main.py +++ b/main.py @@ -2267,16 +2267,25 @@ def _fetch_if_valid(url: str): if existing_folders is None: return False - if not no_delete: - deletion_occurred = False + # Identify folders to delete and folders to keep (scan) + folders_to_delete = [] + folders_to_scan = existing_folders.copy() - # Identify folders to delete - folders_to_delete = [] + if not no_delete: for folder_data in folder_data_list: name = folder_data["group"]["group"].strip() if name in existing_folders: folders_to_delete.append((name, existing_folders[name])) + if name in folders_to_scan: + del folders_to_scan[name] + + # Start fetching rules from kept folders in background (parallel to deletions) + existing_rules_future = shared_executor.submit( + get_all_existing_rules, client, profile_id, folders_to_scan + ) + if not no_delete: + deletion_occurred = False if folders_to_delete: # Parallel delete to speed up the "clean slate" phase # Use shared_executor (3 workers) @@ -2309,10 +2318,13 @@ def _fetch_if_valid(url: str): ) countdown_timer(60, "Waiting for deletions to propagate") - # Optimization: Pass the updated existing_folders to avoid redundant API call - existing_rules = get_all_existing_rules( - client, profile_id, known_folders=existing_folders - ) + # Retrieve result from background task + # If deletion occurred, we effectively used the wait time to fetch rules! + try: + existing_rules = existing_rules_future.result() + except Exception as e: + log.error(f"Failed to fetch existing rules in background: {sanitize_for_log(e)}") + existing_rules = set() with concurrent.futures.ThreadPoolExecutor( max_workers=max_workers diff --git a/test_main.py b/test_main.py index e8c19321..eeb61618 100644 --- a/test_main.py +++ b/test_main.py @@ -229,6 +229,7 @@ def test_interactive_prompts_show_hints(monkeypatch, capsys): mock_args.dry_run = False mock_args.no_delete = False mock_args.plan_json = None + mock_args.clear_cache = False monkeypatch.setattr(m, "parse_args", lambda: mock_args) # Mock internal functions to abort execution safely after prompts @@ -434,6 +435,7 @@ def test_interactive_input_extracts_id(monkeypatch, capsys): mock_args.dry_run = False mock_args.no_delete = False mock_args.plan_json = None + mock_args.clear_cache = False monkeypatch.setattr(m, "parse_args", lambda: mock_args) # Mock sync_profile to catch the call diff --git a/tests/test_parallel_fetch.py b/tests/test_parallel_fetch.py new file mode 100644 index 00000000..e366c6c6 --- /dev/null +++ b/tests/test_parallel_fetch.py @@ -0,0 +1,92 @@ + +import unittest +from unittest.mock import MagicMock, patch +import time +import concurrent.futures +import sys +import os + +# Add root to path +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +import main + +class TestParallelFetch(unittest.TestCase): + def setUp(self): + self.main = main + self.client = MagicMock() + self.profile_id = "test_profile" + self.folder_urls = ["https://example.com/folder.json"] + + # Mock fetch_folder_data to return dummy data + self.folder_data = { + "group": {"group": "test_folder", "action": {"do": 0, "status": 1}}, + "rules": [{"PK": "rule1"}] + } + + @patch("main.get_all_existing_rules") + @patch("main.delete_folder") + @patch("main.verify_access_and_get_folders") + @patch("main.fetch_folder_data") + @patch("main.validate_folder_url") + @patch("main.countdown_timer") + def test_parallel_execution(self, mock_timer, mock_validate, mock_fetch, mock_verify, mock_delete, mock_get_rules): + """ + Verify that get_all_existing_rules runs in parallel with delete_folder. + """ + # Setup mocks + mock_validate.return_value = True + mock_fetch.return_value = self.folder_data + + # existing folders: test_folder (to be deleted), keep_folder (to be kept) + mock_verify.return_value = { + "test_folder": "id_1", + "keep_folder": "id_2" + } + + # Latency simulation + def slow_get_rules(*args, **kwargs): + time.sleep(1.0) + return {"rule_from_keep"} + + def slow_delete(*args, **kwargs): + time.sleep(1.0) + return True + + mock_get_rules.side_effect = slow_get_rules + mock_delete.side_effect = slow_delete + + # Mock create_folder and push_rules to avoid errors + with patch("main.create_folder") as mock_create, \ + patch("main.push_rules") as mock_push: + + mock_create.return_value = "new_id" + mock_push.return_value = True + + start = time.perf_counter() + self.main.sync_profile(self.profile_id, self.folder_urls, no_delete=False) + elapsed = time.perf_counter() - start + + # Assertions + self.assertTrue(mock_delete.called, "delete_folder should be called") + self.assertTrue(mock_get_rules.called, "get_all_existing_rules should be called") + + # Verify get_all_existing_rules was called with ONLY keep_folder + call_args = mock_get_rules.call_args + # args: client, profile_id, known_folders + known_folders = call_args[0][2] if len(call_args[0]) > 2 else call_args[1]['known_folders'] + + self.assertIn("keep_folder", known_folders) + self.assertNotIn("test_folder", known_folders, "Should not fetch rules from deleted folder") + + # Timing check + # If serial: 1s (delete) + 1s (fetch) + overhead = ~2s + # If parallel: max(1s, 1s) + overhead = ~1s + # Note: sync_profile also has overhead (fetch_folder_data etc), but that's mocked to be fast. + # We allow some buffer, but it should be significantly less than 2s + print(f"Elapsed time: {elapsed:.2f}s") + + # In the CURRENT implementation (serial), this test should FAIL (take > 2s) + # Once implemented, it should PASS (take < 1.5s) + + # Verify optimization effectiveness (should be close to max(1.0, 1.0) = 1.0s, allow 1.5s buffer) + self.assertLess(elapsed, 1.5, "Parallel execution took too long")