273 lines
8.3 KiB
Bash
Executable file
273 lines
8.3 KiB
Bash
Executable file
#!/bin/bash
|
||
|
||
# Forgejo Actions Local Testing Setup
|
||
# This script helps test the pricing workflows locally before pushing
|
||
|
||
set -e
|
||
|
||
echo "🔧 Forgejo Actions - Local Pricing Test Setup"
|
||
echo "============================================="
|
||
|
||
# Check if we're in the right directory
|
||
if [ ! -f "manage.py" ]; then
|
||
echo "❌ Error: This script must be run from the Django project root directory"
|
||
echo " Expected to find manage.py in current directory"
|
||
exit 1
|
||
fi
|
||
|
||
# Check if uv is installed
|
||
if ! command -v uv &> /dev/null; then
|
||
echo "❌ Error: uv is not installed"
|
||
echo " Please install uv: https://docs.astral.sh/uv/getting-started/installation/"
|
||
exit 1
|
||
fi
|
||
|
||
echo ""
|
||
echo "📋 Pre-flight Checks"
|
||
echo "--------------------"
|
||
|
||
# Check if test files exist
|
||
REQUIRED_FILES=(
|
||
"hub/services/tests/test_pricing.py"
|
||
"hub/services/tests/test_pricing_edge_cases.py"
|
||
"hub/services/tests/test_pricing_integration.py"
|
||
".forgejo/workflows/ci.yml"
|
||
".forgejo/workflows/pricing-tests.yml"
|
||
)
|
||
|
||
all_files_exist=true
|
||
for file in "${REQUIRED_FILES[@]}"; do
|
||
if [ -f "$file" ]; then
|
||
echo "✅ $file"
|
||
else
|
||
echo "❌ $file (missing)"
|
||
all_files_exist=false
|
||
fi
|
||
done
|
||
|
||
if [ "$all_files_exist" = false ]; then
|
||
echo ""
|
||
echo "❌ Some required files are missing. Please ensure all test files are present."
|
||
exit 1
|
||
fi
|
||
|
||
echo ""
|
||
echo "🔍 Environment Setup"
|
||
echo "--------------------"
|
||
|
||
# Install dependencies
|
||
echo "📦 Installing dependencies..."
|
||
uv sync --extra dev
|
||
|
||
# Check Django configuration
|
||
echo "🔧 Checking Django configuration..."
|
||
export DJANGO_SETTINGS_MODULE=hub.settings
|
||
uv run --extra dev manage.py check --verbosity=0
|
||
|
||
echo ""
|
||
echo "🧪 Running Pricing Tests Locally"
|
||
echo "--------------------------------"
|
||
|
||
# Function to run tests with timing
|
||
run_test_group() {
|
||
local test_name="$1"
|
||
local test_path="$2"
|
||
local start_time=$(date +%s)
|
||
|
||
echo "🔄 Running $test_name..."
|
||
if uv run --extra dev manage.py test "$test_path" --verbosity=1; then
|
||
local end_time=$(date +%s)
|
||
local duration=$((end_time - start_time))
|
||
echo "✅ $test_name completed in ${duration}s"
|
||
else
|
||
echo "❌ $test_name failed"
|
||
return 1
|
||
fi
|
||
}
|
||
|
||
# Run test groups (similar to what the workflows do)
|
||
echo "Running the same tests that Forgejo Actions will run..."
|
||
echo ""
|
||
|
||
# Basic pricing tests
|
||
run_test_group "Basic Pricing Tests" "hub.services.tests.test_pricing" || exit 1
|
||
echo ""
|
||
|
||
# Edge case tests
|
||
run_test_group "Edge Case Tests" "hub.services.tests.test_pricing_edge_cases" || exit 1
|
||
echo ""
|
||
|
||
# Integration tests
|
||
run_test_group "Integration Tests" "hub.services.tests.test_pricing_integration" || exit 1
|
||
echo ""
|
||
|
||
# Django system checks (like in CI)
|
||
echo "🔍 Running Django system checks..."
|
||
uv run --extra dev manage.py check --verbosity=2
|
||
echo "✅ System checks passed"
|
||
echo ""
|
||
|
||
# Code quality checks (if ruff is available)
|
||
if command -v ruff &> /dev/null || uv run ruff --version &> /dev/null 2>&1; then
|
||
echo "🎨 Running code quality checks..."
|
||
|
||
echo " - Checking linting..."
|
||
if uv run ruff check hub/services/tests/test_pricing*.py --quiet; then
|
||
echo "✅ Linting passed"
|
||
else
|
||
echo "⚠️ Linting issues found (run 'uv run ruff check hub/services/tests/test_pricing*.py' for details)"
|
||
fi
|
||
|
||
echo " - Checking formatting..."
|
||
if uv run ruff format --check hub/services/tests/test_pricing*.py --quiet; then
|
||
echo "✅ Formatting is correct"
|
||
else
|
||
echo "⚠️ Formatting issues found (run 'uv run ruff format hub/services/tests/test_pricing*.py' to fix)"
|
||
fi
|
||
else
|
||
echo "ℹ️ Skipping code quality checks (ruff not available)"
|
||
fi
|
||
|
||
echo ""
|
||
echo "📊 Test Coverage Analysis"
|
||
echo "-------------------------"
|
||
|
||
# Generate coverage report (if coverage is available)
|
||
if uv run --extra dev coverage --version &> /dev/null 2>&1; then
|
||
echo "📈 Generating test coverage report..."
|
||
|
||
# Run tests with coverage
|
||
uv run --extra dev coverage run --source='hub/services/models/pricing' \
|
||
manage.py test \
|
||
hub.services.tests.test_pricing \
|
||
hub.services.tests.test_pricing_edge_cases \
|
||
hub.services.tests.test_pricing_integration \
|
||
--verbosity=0
|
||
|
||
# Generate reports
|
||
echo ""
|
||
echo "Coverage Summary:"
|
||
uv run --extra dev coverage report --show-missing
|
||
|
||
# Generate HTML report
|
||
uv run --extra dev coverage html
|
||
echo ""
|
||
echo "📄 HTML coverage report generated: htmlcov/index.html"
|
||
|
||
# Check coverage threshold (like in CI)
|
||
coverage_percentage=$(uv run coverage report | grep TOTAL | awk '{print $4}' | sed 's/%//')
|
||
if [ -n "$coverage_percentage" ]; then
|
||
if (( $(echo "$coverage_percentage >= 85" | bc -l) )); then
|
||
echo "✅ Coverage threshold met: ${coverage_percentage}%"
|
||
else
|
||
echo "⚠️ Coverage below threshold: ${coverage_percentage}% (target: 85%)"
|
||
fi
|
||
fi
|
||
else
|
||
echo "ℹ️ Skipping coverage analysis (coverage not available)"
|
||
echo " Install with: uv add coverage"
|
||
fi
|
||
|
||
echo ""
|
||
echo "🚀 Performance Test"
|
||
echo "-------------------"
|
||
|
||
# Quick performance test
|
||
echo "🏃 Running quick performance test..."
|
||
cat << 'EOF' > quick_performance_test.py
|
||
import os
|
||
import django
|
||
import time
|
||
from decimal import Decimal
|
||
|
||
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'hub.settings')
|
||
django.setup()
|
||
|
||
from hub.services.models.base import Currency, Term
|
||
from hub.services.models.providers import CloudProvider
|
||
from hub.services.models.services import Service
|
||
from hub.services.models.pricing import (
|
||
VSHNAppCatPrice, VSHNAppCatBaseFee, VSHNAppCatUnitRate,
|
||
ProgressiveDiscountModel, DiscountTier
|
||
)
|
||
|
||
# Create test data
|
||
provider = CloudProvider.objects.create(
|
||
name="Perf Test", slug="perf-test", description="Test", website="https://test.com"
|
||
)
|
||
service = Service.objects.create(
|
||
name="Perf Service", slug="perf-service", description="Test", features="Test"
|
||
)
|
||
|
||
discount = ProgressiveDiscountModel.objects.create(name="Perf Discount", active=True)
|
||
DiscountTier.objects.create(
|
||
discount_model=discount, min_units=0, max_units=10, discount_percent=Decimal('0')
|
||
)
|
||
DiscountTier.objects.create(
|
||
discount_model=discount, min_units=10, max_units=None, discount_percent=Decimal('15')
|
||
)
|
||
|
||
price_config = VSHNAppCatPrice.objects.create(
|
||
service=service, variable_unit='RAM', term='MTH', discount_model=discount
|
||
)
|
||
VSHNAppCatBaseFee.objects.create(
|
||
vshn_appcat_price_config=price_config, currency='CHF', amount=Decimal('50.00')
|
||
)
|
||
VSHNAppCatUnitRate.objects.create(
|
||
vshn_appcat_price_config=price_config, currency='CHF',
|
||
service_level='GA', amount=Decimal('4.0000')
|
||
)
|
||
|
||
# Performance test
|
||
test_cases = [10, 50, 100, 500, 1000]
|
||
print("Units | Time (ms) | Result (CHF)")
|
||
print("-" * 35)
|
||
|
||
for units in test_cases:
|
||
start_time = time.time()
|
||
result = price_config.calculate_final_price('CHF', 'GA', units)
|
||
end_time = time.time()
|
||
|
||
duration_ms = (end_time - start_time) * 1000
|
||
price = result['total_price'] if result else 'Error'
|
||
print(f"{units:5d} | {duration_ms:8.2f} | {price}")
|
||
|
||
print("\n✅ Performance test completed")
|
||
EOF
|
||
|
||
uv run python quick_performance_test.py
|
||
rm quick_performance_test.py
|
||
|
||
echo ""
|
||
echo "🎉 Local Testing Complete!"
|
||
echo "=========================="
|
||
echo ""
|
||
echo "📋 Summary:"
|
||
echo " ✅ All pricing tests passed"
|
||
echo " ✅ Django system checks passed"
|
||
echo " ✅ Performance test completed"
|
||
if command -v ruff &> /dev/null || uv run ruff --version &> /dev/null 2>&1; then
|
||
echo " ✅ Code quality checks completed"
|
||
fi
|
||
if uv run coverage --version &> /dev/null 2>&1; then
|
||
echo " ✅ Coverage analysis completed"
|
||
fi
|
||
echo ""
|
||
echo "🚀 Your code is ready for Forgejo Actions!"
|
||
echo ""
|
||
echo "Next steps:"
|
||
echo " 1. Commit your changes: git add . && git commit -m 'Your commit message'"
|
||
echo " 2. Push to trigger workflows: git push origin your-branch"
|
||
echo " 3. Check Actions tab in your repository for results"
|
||
echo ""
|
||
echo "Workflow files created:"
|
||
echo " - .forgejo/workflows/ci.yml (main CI/CD pipeline)"
|
||
echo " - .forgejo/workflows/pricing-tests.yml (detailed pricing tests)"
|
||
echo " - .forgejo/workflows/pr-pricing-validation.yml (PR validation)"
|
||
echo " - .forgejo/workflows/scheduled-pricing-tests.yml (daily tests)"
|
||
echo ""
|
||
|
||
# Clean up temporary files
|
||
if [ -f "htmlcov/index.html" ]; then
|
||
echo "📄 Open htmlcov/index.html in your browser to view detailed coverage report"
|
||
fi
|