name: Scheduled Pricing Tests on: schedule: # Run daily at 6 AM UTC - cron: "0 6 * * *" workflow_dispatch: inputs: test_scope: description: "Test scope" required: true default: "all" type: choice options: - all - pricing-only - integration-only jobs: scheduled-pricing-tests: name: Scheduled Pricing Validation runs-on: ubuntu-latest strategy: matrix: database: ["sqlite", "postgresql"] fail-fast: false services: postgres: image: postgres:15 env: POSTGRES_PASSWORD: postgres POSTGRES_DB: servala_test options: >- --health-cmd pg_isready --health-interval 10s --health-timeout 5s --health-retries 5 ports: - 5432:5432 steps: - name: Checkout code uses: actions/checkout@v4 - name: Set up Python uses: actions/setup-python@v4 with: python-version: "3.13" - name: Install uv uses: astral-sh/setup-uv@v3 with: enable-cache: true cache-dependency-glob: "uv.lock" - name: Install dependencies run: | uv sync --extra dev - name: Set database configuration run: | if [ "${{ matrix.database }}" == "postgresql" ]; then echo "DATABASE_URL=postgresql://postgres:postgres@localhost:5432/servala_test" >> $GITHUB_ENV else echo "DATABASE_URL=sqlite:///tmp/test.db" >> $GITHUB_ENV fi - name: Run comprehensive pricing tests env: DJANGO_SETTINGS_MODULE: hub.settings run: | echo "::group::Running comprehensive pricing test suite on ${{ matrix.database }}" # Set test scope based on input or default to all TEST_SCOPE="${{ github.event.inputs.test_scope || 'all' }}" case $TEST_SCOPE in "pricing-only") echo "๐ŸŽฏ Running pricing-specific tests only" uv run --extra dev manage.py test \ hub.services.tests.test_pricing \ --verbosity=2 \ --keepdb ;; "integration-only") echo "๐Ÿ”— Running integration tests only" uv run --extra dev manage.py test \ hub.services.tests.test_pricing_integration \ --verbosity=2 \ --keepdb ;; *) echo "๐Ÿงช Running all pricing tests" uv run --extra dev manage.py test \ hub.services.tests.test_pricing \ hub.services.tests.test_pricing_edge_cases \ hub.services.tests.test_pricing_integration \ --verbosity=2 \ --keepdb ;; esac echo "::endgroup::" - name: Run pricing stress tests env: DJANGO_SETTINGS_MODULE: hub.settings run: | echo "::group::Running pricing stress tests" cat << 'EOF' > stress_test_pricing.py import os import django import time import concurrent.futures from decimal import Decimal os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'hub.settings') django.setup() from hub.services.models.base import Currency, Term from hub.services.models.providers import CloudProvider from hub.services.models.services import Service from hub.services.models.pricing import ( VSHNAppCatPrice, VSHNAppCatBaseFee, VSHNAppCatUnitRate, ProgressiveDiscountModel, DiscountTier ) def setup_test_data(): """Set up test data for stress testing""" provider = CloudProvider.objects.create( name="Stress Test Provider", slug="stress-test", description="Test", website="https://test.com" ) service = Service.objects.create( name="Stress Test Service", slug="stress-test", description="Test", features="Test" ) # Create complex discount model discount = ProgressiveDiscountModel.objects.create( name="Stress Test Discount", active=True ) # Create multiple discount tiers for i in range(0, 1000, 100): DiscountTier.objects.create( discount_model=discount, min_units=i, max_units=i+100 if i < 900 else None, discount_percent=Decimal(str(min(25, i/40))) ) price_config = VSHNAppCatPrice.objects.create( service=service, variable_unit='RAM', term='MTH', discount_model=discount ) VSHNAppCatBaseFee.objects.create( vshn_appcat_price_config=price_config, currency='CHF', amount=Decimal('100.00') ) VSHNAppCatUnitRate.objects.create( vshn_appcat_price_config=price_config, currency='CHF', service_level='GA', amount=Decimal('2.0000') ) return price_config def calculate_price_concurrent(price_config, units): """Calculate price in a concurrent context""" try: result = price_config.calculate_final_price('CHF', 'GA', units) return result['total_price'] if result else None except Exception as e: return f"Error: {e}" def main(): print("๐Ÿš€ Starting pricing stress test...") # Setup price_config = setup_test_data() # Test scenarios with increasing complexity test_scenarios = [100, 500, 1000, 2000, 5000] print("\n๐Ÿ“Š Sequential performance test:") for units in test_scenarios: start_time = time.time() result = price_config.calculate_final_price('CHF', 'GA', units) end_time = time.time() duration = end_time - start_time print(f" {units:4d} units: {duration:.3f}s -> {result['total_price']} CHF") if duration > 2.0: print(f"โš ๏ธ Performance warning: {units} units took {duration:.3f}s") print("\n๐Ÿ”„ Concurrent performance test:") start_time = time.time() with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor: futures = [] for _ in range(50): # 50 concurrent calculations future = executor.submit(calculate_price_concurrent, price_config, 1000) futures.append(future) results = [] for future in concurrent.futures.as_completed(futures): result = future.result() results.append(result) end_time = time.time() duration = end_time - start_time successful_results = [r for r in results if isinstance(r, Decimal)] failed_results = [r for r in results if not isinstance(r, Decimal)] print(f" 50 concurrent calculations: {duration:.3f}s") print(f" Successful: {len(successful_results)}") print(f" Failed: {len(failed_results)}") if failed_results: print(f" Failures: {failed_results[:3]}...") # Show first 3 failures # Validate results if len(successful_results) < 45: # Allow up to 10% failures raise Exception(f"Too many concurrent calculation failures: {len(failed_results)}") if duration > 10.0: # Should complete within 10 seconds raise Exception(f"Concurrent calculations too slow: {duration}s") print("\nโœ… Stress test completed successfully!") if __name__ == "__main__": main() EOF uv run python stress_test_pricing.py echo "::endgroup::" - name: Validate pricing data integrity env: DJANGO_SETTINGS_MODULE: hub.settings run: | echo "::group::Validating pricing data integrity" cat << 'EOF' > integrity_check.py import os import django from decimal import Decimal, InvalidOperation os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'hub.settings') django.setup() from django.db import connection from hub.services.models.pricing import * def check_pricing_constraints(): """Check database constraints and data integrity""" issues = [] print("๐Ÿ” Checking pricing data integrity...") # Check for negative prices negative_compute_prices = ComputePlanPrice.objects.filter(amount__lt=0) if negative_compute_prices.exists(): issues.append(f"Found {negative_compute_prices.count()} negative compute plan prices") negative_storage_prices = StoragePlanPrice.objects.filter(amount__lt=0) if negative_storage_prices.exists(): issues.append(f"Found {negative_storage_prices.count()} negative storage prices") # Check for invalid discount percentages invalid_discounts = DiscountTier.objects.filter( models.Q(discount_percent__lt=0) | models.Q(discount_percent__gt=100) ) if invalid_discounts.exists(): issues.append(f"Found {invalid_discounts.count()} invalid discount percentages") # Check for overlapping discount tiers (potential logic issues) discount_models = ProgressiveDiscountModel.objects.filter(active=True) for model in discount_models: tiers = model.tiers.all().order_by('min_units') for i in range(len(tiers) - 1): current = tiers[i] next_tier = tiers[i + 1] if current.max_units and current.max_units > next_tier.min_units: issues.append(f"Overlapping tiers in {model.name}: {current.min_units}-{current.max_units} overlaps with {next_tier.min_units}") # Check for services without pricing services_without_pricing = Service.objects.filter(vshn_appcat_price__isnull=True) if services_without_pricing.exists(): print(f"โ„น๏ธ Found {services_without_pricing.count()} services without AppCat pricing (this may be normal)") # Check for price configurations without rates price_configs_without_base_fee = VSHNAppCatPrice.objects.filter(base_fees__isnull=True) if price_configs_without_base_fee.exists(): issues.append(f"Found {price_configs_without_base_fee.count()} price configs without base fees") return issues def main(): issues = check_pricing_constraints() if issues: print("\nโŒ Data integrity issues found:") for issue in issues: print(f" - {issue}") print(f"\nTotal issues: {len(issues)}") # Don't fail the build for minor issues, but warn if len(issues) > 5: print("โš ๏ธ Many integrity issues found - consider investigating") exit(1) else: print("\nโœ… All pricing data integrity checks passed!") if __name__ == "__main__": main() EOF uv run python integrity_check.py echo "::endgroup::" - name: Generate daily pricing report env: DJANGO_SETTINGS_MODULE: hub.settings run: | echo "::group::Generating daily pricing report" cat << 'EOF' > daily_report.py import os import django from decimal import Decimal from datetime import datetime os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'hub.settings') django.setup() from hub.services.models.pricing import * from hub.services.models.services import Service from hub.services.models.providers import CloudProvider def generate_report(): print("๐Ÿ“Š Daily Pricing System Report") print("=" * 50) print(f"Generated: {datetime.now().strftime('%Y-%m-%d %H:%M:%S UTC')}") print(f"Database: ${{ matrix.database }}") print() # Count models print("๐Ÿ“ˆ Model Counts:") print(f" Cloud Providers: {CloudProvider.objects.count()}") print(f" Services: {Service.objects.count()}") print(f" Compute Plans: {ComputePlan.objects.count()}") print(f" Storage Plans: {StoragePlan.objects.count()}") print(f" AppCat Price Configs: {VSHNAppCatPrice.objects.count()}") print(f" Discount Models: {ProgressiveDiscountModel.objects.count()}") print(f" Active Discount Models: {ProgressiveDiscountModel.objects.filter(active=True).count()}") print() # Price ranges print("๐Ÿ’ฐ Price Ranges:") compute_prices = ComputePlanPrice.objects.all() if compute_prices.exists(): min_compute = compute_prices.order_by('amount').first().amount max_compute = compute_prices.order_by('-amount').first().amount print(f" Compute Plans: {min_compute} - {max_compute} CHF") base_fees = VSHNAppCatBaseFee.objects.all() if base_fees.exists(): min_base = base_fees.order_by('amount').first().amount max_base = base_fees.order_by('-amount').first().amount print(f" AppCat Base Fees: {min_base} - {max_base} CHF") unit_rates = VSHNAppCatUnitRate.objects.all() if unit_rates.exists(): min_unit = unit_rates.order_by('amount').first().amount max_unit = unit_rates.order_by('-amount').first().amount print(f" AppCat Unit Rates: {min_unit} - {max_unit} CHF") print() # Currency distribution print("๐Ÿ’ฑ Currency Distribution:") currencies = ['CHF', 'EUR', 'USD'] for currency in currencies: compute_count = ComputePlanPrice.objects.filter(currency=currency).count() appcat_count = VSHNAppCatBaseFee.objects.filter(currency=currency).count() print(f" {currency}: {compute_count} compute prices, {appcat_count} AppCat base fees") print() # Discount model analysis print("๐ŸŽฏ Discount Model Analysis:") active_discounts = ProgressiveDiscountModel.objects.filter(active=True) for discount in active_discounts[:5]: # Show first 5 tier_count = discount.tiers.count() max_discount = discount.tiers.order_by('-discount_percent').first() max_percent = max_discount.discount_percent if max_discount else 0 print(f" {discount.name}: {tier_count} tiers, max {max_percent}% discount") if active_discounts.count() > 5: print(f" ... and {active_discounts.count() - 5} more") print() print("โœ… Report generation completed") if __name__ == "__main__": generate_report() EOF uv run python daily_report.py echo "::endgroup::" - name: Save test results if: always() uses: actions/upload-artifact@v4 with: name: scheduled-test-results-${{ matrix.database }} path: | htmlcov/ test-results.xml retention-days: 30 notify-on-failure: name: Notify on Test Failure runs-on: ubuntu-latest needs: [scheduled-pricing-tests] if: failure() steps: - name: Create failure issue uses: actions/github-script@v7 with: script: | const title = `๐Ÿšจ Scheduled Pricing Tests Failed - ${new Date().toISOString().split('T')[0]}`; const body = ` ## Scheduled Pricing Test Failure The scheduled pricing tests failed on ${new Date().toISOString()}. **Run Details:** - **Workflow**: ${context.workflow} - **Run ID**: ${context.runId} - **Commit**: ${context.sha} **Next Steps:** 1. Check the workflow logs for detailed error information 2. Verify if this is a transient issue by re-running the workflow 3. If the issue persists, investigate potential regressions **Links:** - [Failed Workflow Run](https://github.com/${context.repo.owner}/${context.repo.repo}/actions/runs/${context.runId}) /cc @tobru `; // Check if similar issue already exists const existingIssues = await github.rest.issues.listForRepo({ owner: context.repo.owner, repo: context.repo.repo, labels: 'pricing-tests,automated', state: 'open' }); if (existingIssues.data.length === 0) { await github.rest.issues.create({ owner: context.repo.owner, repo: context.repo.repo, title: title, body: body, labels: ['bug', 'pricing-tests', 'automated', 'priority-high'] }); } else { console.log('Similar issue already exists, skipping creation'); }