diff --git a/backend/contributions/management/commands/add_daily_uptime.py b/backend/contributions/management/commands/add_daily_uptime.py index e3658873..fa470ca8 100644 --- a/backend/contributions/management/commands/add_daily_uptime.py +++ b/backend/contributions/management/commands/add_daily_uptime.py @@ -1,6 +1,6 @@ from django.core.management.base import BaseCommand from django.utils import timezone -from django.db import transaction +from django.db import transaction, models from django.contrib.auth import get_user_model from contributions.models import Contribution, ContributionType, Category from leaderboard.models import GlobalLeaderboardMultiplier, update_all_ranks, LeaderboardEntry @@ -61,59 +61,70 @@ def handle(self, *args, **options): total_new_contributions = 0 multiplier_errors = 0 users_to_update_leaderboard = [] # Track users who got new contributions - - # Get all users - users = User.objects.all() - total_users = users.count() - + # Current date (end date for all ranges) today = timezone.now().date() - - # Process each user + + # OPTIMIZATION 1: Get all validators with their creation dates + from validators.models import Validator + + all_validators = Validator.objects.all() + validator_created_dates = {v.user_id: v.created_at.date() for v in all_validators} + + if not validator_created_dates: + self.stdout.write(self.style.WARNING('No validators found')) + return + + # OPTIMIZATION 2: Get the LAST (most recent) uptime date per validator + # We only need the max date, not all dates, since we'll start from day after last uptime + last_uptimes = {} + for contrib in Contribution.objects.filter( + contribution_type=uptime_type, + user_id__in=validator_created_dates.keys() + ).values('user_id').annotate( + last_date=models.Max('contribution_date') + ): + last_uptimes[contrib['user_id']] = contrib['last_date'].date() + + # Determine start date for each validator + # Key insight: We start from day AFTER last uptime (or from creation date if no uptime) + # This means we never create duplicates - no need to check existing dates! + validator_start_dates = {} + for user_id, created_date in validator_created_dates.items(): + if user_id in last_uptimes: + # Has uptime → start from day AFTER last recorded uptime + validator_start_dates[user_id] = last_uptimes[user_id] + timedelta(days=1) + else: + # No uptime yet → start from validator creation date + validator_start_dates[user_id] = created_date + + users_with_uptime = len(validator_start_dates) + + # Get all validator users + users = User.objects.filter(id__in=validator_start_dates.keys()) + total_users = users.count() + + if verbose: + self.stdout.write(f'Processing {total_users} validators') + + # Process each validator for user in users: - # Find the first uptime contribution for this user - first_uptime = Contribution.objects.filter( - user=user, - contribution_type=uptime_type - ).order_by('contribution_date').first() - - if not first_uptime: - if verbose: - self.stdout.write(f'User {user} has no uptime contributions, skipping') - continue - - users_with_uptime += 1 - - # Get the start date from the first contribution - start_date = first_uptime.contribution_date.date() - + # Get the start date from our pre-fetched data + # This is either the last uptime date (for existing validators) or validator creation date (for new validators) + start_date = validator_start_dates[user.id] + if verbose: self.stdout.write( - f'Processing user {user} - first uptime on {start_date}, ' + f'Processing user {user} - starting from {start_date}, ' f'generating daily contributions until {today}' ) - - # Get all existing uptime dates for this user to avoid duplicates - existing_dates = set( - Contribution.objects.filter( - user=user, - contribution_type=uptime_type - ).values_list('contribution_date__date', flat=True) - ) - + # Generate a contribution for each day from start_date to today - # if there isn't already one for that date + # No need to check for existing dates since we start from day after last uptime new_contributions = [] current_date = start_date - + while current_date <= today: - # Skip if there's already a contribution for this date - if current_date in existing_dates: - if verbose: - self.stdout.write(f' - {current_date}: Uptime already exists, skipping') - current_date += timedelta(days=1) - continue - # Create a new contribution for this date contribution_date = datetime.combine( current_date, @@ -130,19 +141,15 @@ def handle(self, *args, **options): except GlobalLeaderboardMultiplier.DoesNotExist: if force: multiplier_value = decimal.Decimal('1.0') - self.stdout.write( - self.style.WARNING( - f' - {current_date}: No multiplier found, using default of 1.0 (--force enabled)' - ) - ) else: multiplier_errors += 1 - self.stdout.write( - self.style.ERROR( - f' - {current_date}: No multiplier found for contribution type "Uptime". ' - f'Skipping this date. Use --force to override.' + if verbose: + self.stdout.write( + self.style.ERROR( + f' - {current_date}: No multiplier found for contribution type "Uptime". ' + f'Skipping this date. Use --force to override.' + ) ) - ) current_date += timedelta(days=1) continue @@ -191,64 +198,20 @@ def handle(self, *args, **options): # Update leaderboard entries for all affected users if users_to_update_leaderboard and not dry_run: self.stdout.write('Updating leaderboard entries...') - - # Get the category for Uptime contributions - uptime_category = None - if uptime_type.category: - uptime_category = uptime_type.category - if verbose: - self.stdout.write(f'Uptime contributions belong to category: {uptime_category.name}') - + + # OPTIMIZATION 3: Simplified leaderboard updates + # bulk_create doesn't trigger post_save signals, so we need to manually update leaderboards. + # However, we can optimize by calling update_user_leaderboard_entries which handles + # all leaderboard types for a user efficiently. + from leaderboard.models import update_user_leaderboard_entries + for user in users_to_update_leaderboard: - # Update GLOBAL leaderboard entry - global_entry, created = LeaderboardEntry.objects.get_or_create( - user=user, - category=None # None means global - ) - global_points = global_entry.update_points_without_ranking() - + # This single call updates all leaderboard types the user qualifies for + # and recalculates ranks for all affected leaderboards + update_user_leaderboard_entries(user) + if verbose: - action = 'Created' if created else 'Updated' - self.stdout.write(f'{action} GLOBAL leaderboard for {user}: {global_points} total points') - - # Update CATEGORY-SPECIFIC leaderboard entry if uptime has a category - if uptime_category: - category_entry, cat_created = LeaderboardEntry.objects.get_or_create( - user=user, - category=uptime_category - ) - category_points = category_entry.update_points_without_ranking() - - if verbose: - action = 'Created' if cat_created else 'Updated' - self.stdout.write(f'{action} {uptime_category.name} category leaderboard for {user}: {category_points} points') - - # Also check and update entries for ALL categories this user has contributions in - user_categories = Category.objects.filter( - contribution_types__contributions__user=user - ).distinct() - - for category in user_categories: - if category != uptime_category: # Skip if we already updated it above - cat_entry, cat_created = LeaderboardEntry.objects.get_or_create( - user=user, - category=category - ) - cat_points = cat_entry.update_points_without_ranking() - - if verbose and cat_created: - self.stdout.write(f'Created {category.name} category leaderboard for {user}: {cat_points} points') - - # Now update all ranks once, after all users have been processed - self.stdout.write('Updating all leaderboard ranks (global and categories)...') - update_all_ranks() - - if verbose: - # Show summary of category leaderboards - self.stdout.write('\nCategory leaderboard summary:') - for category in Category.objects.all(): - entry_count = LeaderboardEntry.objects.filter(category=category).count() - self.stdout.write(f' - {category.name}: {entry_count} participants') + self.stdout.write(f'Updated leaderboard entries for {user}') # Print summary self.stdout.write(self.style.SUCCESS( diff --git a/backend/contributions/models.py b/backend/contributions/models.py index dc8ff0d7..5f99b962 100644 --- a/backend/contributions/models.py +++ b/backend/contributions/models.py @@ -170,12 +170,10 @@ def clean(self): at_date=self.contribution_date ) self.multiplier_at_creation = multiplier_value - except GlobalLeaderboardMultiplier.DoesNotExist as e: - raise ValidationError( - f"No active multiplier exists for contribution type '{self.contribution_type}' " - f"on {self.contribution_date.strftime('%Y-%m-%d %H:%M')}. " - "Please set a multiplier that covers this date before adding contributions." - ) from e + except GlobalLeaderboardMultiplier.DoesNotExist: + # No multiplier exists for this contribution type/date, use default of 1.0 + # This is consistent with the update_leaderboard command behavior + self.multiplier_at_creation = 1.0 def save(self, *args, **kwargs): """ diff --git a/backend/contributions/tests/test_add_uptime.py b/backend/contributions/tests/test_add_uptime.py new file mode 100644 index 00000000..b3c75490 --- /dev/null +++ b/backend/contributions/tests/test_add_uptime.py @@ -0,0 +1,339 @@ +""" +Comprehensive test for add_daily_uptime management command. +Tests correctness of uptime calculation and basic performance. +""" +from django.test import TestCase +from django.core.management import call_command +from django.utils import timezone +from django.contrib.auth import get_user_model +from datetime import datetime, timedelta +from decimal import Decimal +from io import StringIO +import time +import pytz + +from contributions.models import Contribution, ContributionType, Category +from leaderboard.models import GlobalLeaderboardMultiplier +from validators.models import Validator +from django.db import models + +User = get_user_model() + + +class AddDailyUptimeTest(TestCase): + """Comprehensive test for the add_daily_uptime command.""" + + def setUp(self): + """Set up test data with realistic scenarios.""" + # Use a fixed reference time to avoid timing issues + self.now = timezone.now() + self.today = self.now.date() + + # Get or create category (migrations may have created it) + self.validator_category, _ = Category.objects.get_or_create( + slug='validator', + defaults={ + 'name': 'Validator', + 'description': 'Validator contributions' + } + ) + + # Get or create uptime contribution type (may already exist) + self.uptime_type, created = ContributionType.objects.get_or_create( + slug='uptime', + defaults={ + 'name': 'Uptime', + 'description': 'Daily validator uptime', + 'category': self.validator_category, + 'min_points': 1, + 'max_points': 10 + } + ) + + # If uptime type already exists, ensure it has the right settings + if not created: + self.uptime_type.category = self.validator_category + self.uptime_type.min_points = 1 + self.uptime_type.max_points = 10 + self.uptime_type.save() + + # Clean up any existing uptime contributions and multipliers for clean test state + Contribution.objects.filter(contribution_type=self.uptime_type).delete() + GlobalLeaderboardMultiplier.objects.filter(contribution_type=self.uptime_type).delete() + + # Create multipliers at midnight to match contribution dates + # Contributions are created at midnight, so multipliers must also be at midnight + # to avoid time-of-day boundary issues + def date_to_midnight(days_ago): + """Convert days ago to a datetime at midnight UTC.""" + date = (self.now - timedelta(days=days_ago)).date() + return datetime.combine(date, datetime.min.time(), tzinfo=pytz.UTC) + + # Period 1: 90 days ago to 30 days ago (2.0x) + self.multiplier1 = GlobalLeaderboardMultiplier.objects.create( + contribution_type=self.uptime_type, + multiplier_value=Decimal('2.0'), + valid_from=date_to_midnight(90), + description='Early multiplier' + ) + + # Period 2: 30 days ago to now (1.0x) + self.multiplier2 = GlobalLeaderboardMultiplier.objects.create( + contribution_type=self.uptime_type, + multiplier_value=Decimal('1.0'), + valid_from=date_to_midnight(30), + description='Current multiplier' + ) + + # Validator 1: Has existing uptime from 60 days ago + self.user1 = User.objects.create_user( + email='validator1@test.com', + password='test', + name='Validator One', + visible=True + ) + self.validator1 = Validator.objects.create(user=self.user1) + # Set creation date to 60 days ago + self.validator1.created_at = self.now - timedelta(days=60) + self.validator1.save() + # Create initial uptime 60 days ago + Contribution.objects.create( + user=self.user1, + contribution_type=self.uptime_type, + points=1, + contribution_date=self.now - timedelta(days=60), + multiplier_at_creation=Decimal('2.0'), + frozen_global_points=2 + ) + + # Validator 2: Has uptime from 20 days ago + self.user2 = User.objects.create_user( + email='validator2@test.com', + password='test', + name='Validator Two', + visible=True + ) + self.validator2 = Validator.objects.create(user=self.user2) + self.validator2.created_at = self.now - timedelta(days=40) + self.validator2.save() + # Create initial uptime 20 days ago + Contribution.objects.create( + user=self.user2, + contribution_type=self.uptime_type, + points=1, + contribution_date=self.now - timedelta(days=20), + multiplier_at_creation=Decimal('1.0'), + frozen_global_points=1 + ) + + # Validator 3: New validator with no uptime yet + self.user3 = User.objects.create_user( + email='validator3@test.com', + password='test', + name='Validator Three', + visible=True + ) + self.validator3 = Validator.objects.create(user=self.user3) + self.validator3.created_at = self.now - timedelta(days=5) + self.validator3.save() + # No uptime contributions yet + + def test_add_daily_uptime_correctness_and_performance(self): + """ + Comprehensive test that validates: + 1. Correct date ranges for validators with existing uptime + 2. Correct date ranges for new validators + 3. Correct multiplier application + 4. Idempotency (no duplicates on re-run) + 5. Acceptable performance + """ + # === PART 1: First Run - Generate Missing Uptime === + start_time = time.time() + out = StringIO() + call_command('add_daily_uptime', stdout=out, verbosity=0) + first_run_time = time.time() - start_time + + # === VALIDATE VALIDATOR 1 (has existing uptime from 60 days ago) === + # Should have uptime from 60 days ago to today (61 days total) + user1_uptimes = Contribution.objects.filter( + user=self.user1, + contribution_type=self.uptime_type + ).order_by('contribution_date') + + # Check count: 61 days (60 days ago + today, inclusive) + self.assertEqual( + user1_uptimes.count(), + 61, + f"Validator 1 should have 61 days of uptime, got {user1_uptimes.count()}" + ) + + # Check date range is continuous (no gaps) + dates1 = [u.contribution_date.date() for u in user1_uptimes] + expected_start1 = self.today - timedelta(days=60) + for i, date in enumerate(dates1): + expected_date = expected_start1 + timedelta(days=i) + self.assertEqual( + date, + expected_date, + f"Validator 1: Gap or wrong date at position {i}" + ) + + # Check multipliers are correct based on date + # Use midnight of 30 days ago as the cutoff (same as multiplier valid_from) + cutoff_date = (self.now - timedelta(days=30)).date() + multiplier_cutoff = datetime.combine(cutoff_date, datetime.min.time(), tzinfo=pytz.UTC) + + for uptime in user1_uptimes: + date = uptime.contribution_date + # Contributions before 30 days ago (midnight) should have 2.0x + if date < multiplier_cutoff: + self.assertEqual( + uptime.multiplier_at_creation, + Decimal('2.0'), + f"Expected 2.0x multiplier for {date.date()}" + ) + self.assertEqual(uptime.frozen_global_points, 2) + else: + # After 30 days ago should have 1.0x + self.assertEqual( + uptime.multiplier_at_creation, + Decimal('1.0'), + f"Expected 1.0x multiplier for {date.date()}" + ) + self.assertEqual(uptime.frozen_global_points, 1) + + # === VALIDATE VALIDATOR 2 (has uptime from 20 days ago) === + # Should have uptime from 20 days ago to today (21 days total) + user2_uptimes = Contribution.objects.filter( + user=self.user2, + contribution_type=self.uptime_type + ).order_by('contribution_date') + + self.assertEqual( + user2_uptimes.count(), + 21, + f"Validator 2 should have 21 days of uptime, got {user2_uptimes.count()}" + ) + + # All should have 1.0x multiplier (within last 30 days) + for uptime in user2_uptimes: + self.assertEqual(uptime.multiplier_at_creation, Decimal('1.0')) + self.assertEqual(uptime.frozen_global_points, 1) + + # === VALIDATE VALIDATOR 3 (new validator, no prior uptime) === + # Should have uptime from creation date (5 days ago) to today (6 days total) + user3_uptimes = Contribution.objects.filter( + user=self.user3, + contribution_type=self.uptime_type + ).order_by('contribution_date') + + self.assertEqual( + user3_uptimes.count(), + 6, + f"Validator 3 should have 6 days of uptime, got {user3_uptimes.count()}" + ) + + # Check continuous dates from creation to today + dates3 = [u.contribution_date.date() for u in user3_uptimes] + expected_start3 = self.today - timedelta(days=5) + for i, date in enumerate(dates3): + expected_date = expected_start3 + timedelta(days=i) + self.assertEqual(date, expected_date) + + # === PART 2: Second Run - Test Idempotency === + start_time = time.time() + call_command('add_daily_uptime', stdout=out, verbosity=0) + second_run_time = time.time() - start_time + + # Counts should be identical (no new contributions created) + self.assertEqual( + Contribution.objects.filter( + user=self.user1, + contribution_type=self.uptime_type + ).count(), + 61, + "Second run should not create duplicates for Validator 1" + ) + + self.assertEqual( + Contribution.objects.filter( + user=self.user2, + contribution_type=self.uptime_type + ).count(), + 21, + "Second run should not create duplicates for Validator 2" + ) + + self.assertEqual( + Contribution.objects.filter( + user=self.user3, + contribution_type=self.uptime_type + ).count(), + 6, + "Second run should not create duplicates for Validator 3" + ) + + # === PART 3: Performance Validation === + # For 3 validators with ~88 total days of history, should complete quickly + self.assertLess( + first_run_time, + 5.0, + f"First run took {first_run_time:.2f}s, should complete in under 5 seconds" + ) + + self.assertLess( + second_run_time, + 2.0, + f"Second run took {second_run_time:.2f}s, should be very fast (no new data)" + ) + + # === PART 4: Total Points Validation === + # Validator 1: 30 days at 2.0x (60 points) + 31 days at 1.0x (31 points) = 91 points + user1_total = Contribution.objects.filter( + user=self.user1, + contribution_type=self.uptime_type + ).aggregate(total=models.Sum('frozen_global_points'))['total'] + + self.assertEqual( + user1_total, + 91, + f"Validator 1 should have 91 total points, got {user1_total}" + ) + + # Validator 2: 21 days at 1.0x = 21 points + user2_total = Contribution.objects.filter( + user=self.user2, + contribution_type=self.uptime_type + ).aggregate(total=models.Sum('frozen_global_points'))['total'] + + self.assertEqual( + user2_total, + 21, + f"Validator 2 should have 21 total points, got {user2_total}" + ) + + # Validator 3: 6 days at 1.0x = 6 points + user3_total = Contribution.objects.filter( + user=self.user3, + contribution_type=self.uptime_type + ).aggregate(total=models.Sum('frozen_global_points'))['total'] + + self.assertEqual( + user3_total, + 6, + f"Validator 3 should have 6 total points, got {user3_total}" + ) + + # === SUMMARY OUTPUT === + print(f"\n{'='*60}") + print("ADD_DAILY_UPTIME TEST RESULTS") + print(f"{'='*60}") + print(f"✓ Validator 1: {user1_uptimes.count()} days, {user1_total} points") + print(f"✓ Validator 2: {user2_uptimes.count()} days, {user2_total} points") + print(f"✓ Validator 3: {user3_uptimes.count()} days, {user3_total} points") + print(f"✓ First run: {first_run_time:.3f}s") + print(f"✓ Second run (idempotent): {second_run_time:.3f}s") + print(f"✓ All date ranges correct") + print(f"✓ All multipliers correct") + print(f"✓ No duplicates on re-run") + print(f"{'='*60}\n")