website/.forgejo/workflows/pricing-tests.yml

367 lines
14 KiB
YAML
Raw Normal View History

2025-06-20 10:46:11 +02:00
name: Pricing Tests
on:
push:
paths:
- "hub/services/models/pricing.py"
- "hub/services/tests/test_pricing*.py"
- "hub/services/forms.py"
- "hub/services/views/**"
- "hub/services/templates/**"
pull_request:
paths:
- "hub/services/models/pricing.py"
- "hub/services/tests/test_pricing*.py"
- "hub/services/forms.py"
- "hub/services/views/**"
- "hub/services/templates/**"
jobs:
pricing-tests:
name: Pricing Model Tests
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ["3.12", "3.13"]
django-version: ["5.0", "5.1"]
fail-fast: false
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
- name: Install uv
uses: astral-sh/setup-uv@v3
with:
enable-cache: true
cache-dependency-glob: "uv.lock"
- name: Install dependencies
run: |
uv sync --extra dev
- name: Set up test database
run: |
echo "Using SQLite for pricing tests"
export DJANGO_SETTINGS_MODULE=hub.settings
- name: Run pricing model structure tests
env:
DJANGO_SETTINGS_MODULE: hub.settings
run: |
echo "::group::Testing pricing model structure and basic functionality"
uv run --extra dev manage.py test hub.services.tests.test_pricing.ComputePlanTestCase --verbosity=2
uv run --extra dev manage.py test hub.services.tests.test_pricing.StoragePlanTestCase --verbosity=2
echo "::endgroup::"
- name: Run discount calculation tests
env:
DJANGO_SETTINGS_MODULE: hub.settings
run: |
echo "::group::Testing progressive discount calculations"
uv run --extra dev manage.py test hub.services.tests.test_pricing.ProgressiveDiscountModelTestCase --verbosity=2
echo "::endgroup::"
- name: Run AppCat pricing tests
env:
DJANGO_SETTINGS_MODULE: hub.settings
run: |
echo "::group::Testing AppCat service pricing and addons"
uv run --extra dev manage.py test hub.services.tests.test_pricing.VSHNAppCatPriceTestCase --verbosity=2
uv run --extra dev manage.py test hub.services.tests.test_pricing.VSHNAppCatAddonTestCase --verbosity=2
echo "::endgroup::"
- name: Run pricing edge case tests
env:
DJANGO_SETTINGS_MODULE: hub.settings
run: |
echo "::group::Testing pricing edge cases and error conditions"
uv run --extra dev manage.py test hub.services.tests.test_pricing_edge_cases --verbosity=2
echo "::endgroup::"
- name: Run pricing integration tests
env:
DJANGO_SETTINGS_MODULE: hub.settings
run: |
echo "::group::Testing pricing integration scenarios"
uv run --extra dev manage.py test hub.services.tests.test_pricing_integration --verbosity=2
echo "::endgroup::"
- name: Generate pricing test coverage report
env:
DJANGO_SETTINGS_MODULE: hub.settings
run: |
echo "::group::Generating test coverage report for pricing models"
uv run coverage run --source='hub/services/models/pricing' manage.py test hub.services.tests.test_pricing hub.services.tests.test_pricing_edge_cases hub.services.tests.test_pricing_integration
uv run coverage report --show-missing
uv run coverage html
echo "::endgroup::"
- name: Upload coverage reports
uses: actions/upload-artifact@v4
if: always()
with:
name: pricing-coverage-${{ matrix.python-version }}-django${{ matrix.django-version }}
path: htmlcov/
retention-days: 7
- name: Validate pricing calculations with sample data
env:
DJANGO_SETTINGS_MODULE: hub.settings
run: |
echo "::group::Validating pricing calculations with sample scenarios"
cat << 'EOF' > validate_pricing.py
import os
import django
from decimal import Decimal
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'hub.settings')
django.setup()
from hub.services.models.base import Currency, Term
from hub.services.models.providers import CloudProvider
from hub.services.models.services import Service
from hub.services.models.pricing import (
VSHNAppCatPrice, VSHNAppCatBaseFee, VSHNAppCatUnitRate,
ProgressiveDiscountModel, DiscountTier
)
print("🧪 Creating test pricing scenario...")
# Create test data
provider = CloudProvider.objects.create(
name="Test Provider", slug="test", description="Test", website="https://test.com"
)
service = Service.objects.create(
name="Test Service", slug="test", description="Test", features="Test"
)
# Create discount model
discount = ProgressiveDiscountModel.objects.create(name="Test", active=True)
DiscountTier.objects.create(
discount_model=discount, min_units=0, max_units=10, discount_percent=Decimal('0')
)
DiscountTier.objects.create(
discount_model=discount, min_units=10, max_units=None, discount_percent=Decimal('10')
)
# Create pricing
price_config = VSHNAppCatPrice.objects.create(
service=service, variable_unit='RAM', term='MTH', discount_model=discount
)
VSHNAppCatBaseFee.objects.create(
vshn_appcat_price_config=price_config, currency='CHF', amount=Decimal('50.00')
)
VSHNAppCatUnitRate.objects.create(
vshn_appcat_price_config=price_config, currency='CHF',
service_level='GA', amount=Decimal('5.0000')
)
# Test calculations
result_small = price_config.calculate_final_price('CHF', 'GA', 5)
result_large = price_config.calculate_final_price('CHF', 'GA', 15)
print(f"✅ Small config (5 units): {result_small['total_price']} CHF")
print(f"✅ Large config (15 units): {result_large['total_price']} CHF")
# Validate expected results
assert result_small['total_price'] == Decimal('75.00'), f"Expected 75.00, got {result_small['total_price']}"
assert result_large['total_price'] == Decimal('122.50'), f"Expected 122.50, got {result_large['total_price']}"
print("🎉 All pricing validations passed!")
EOF
uv run python validate_pricing.py
echo "::endgroup::"
- name: Performance test for large calculations
env:
DJANGO_SETTINGS_MODULE: hub.settings
run: |
echo "::group::Testing pricing performance with large datasets"
cat << 'EOF' > performance_test.py
import os
import django
import time
from decimal import Decimal
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'hub.settings')
django.setup()
from hub.services.models.base import Currency, Term
from hub.services.models.providers import CloudProvider
from hub.services.models.services import Service
from hub.services.models.pricing import (
VSHNAppCatPrice, VSHNAppCatBaseFee, VSHNAppCatUnitRate,
ProgressiveDiscountModel, DiscountTier
)
print("⚡ Testing pricing calculation performance...")
# Create test data
provider = CloudProvider.objects.create(
name="Perf Test", slug="perf", description="Test", website="https://test.com"
)
service = Service.objects.create(
name="Perf Service", slug="perf", description="Test", features="Test"
)
# Create complex discount model
discount = ProgressiveDiscountModel.objects.create(name="Complex", active=True)
for i in range(0, 1000, 100):
DiscountTier.objects.create(
discount_model=discount,
min_units=i,
max_units=i+100 if i < 900 else None,
discount_percent=Decimal(str(min(50, i/20)))
)
price_config = VSHNAppCatPrice.objects.create(
service=service, variable_unit='RAM', term='MTH', discount_model=discount
)
VSHNAppCatBaseFee.objects.create(
vshn_appcat_price_config=price_config, currency='CHF', amount=Decimal('100.00')
)
VSHNAppCatUnitRate.objects.create(
vshn_appcat_price_config=price_config, currency='CHF',
service_level='GA', amount=Decimal('1.0000')
)
# Performance test
start_time = time.time()
result = price_config.calculate_final_price('CHF', 'GA', 5000) # Large calculation
end_time = time.time()
duration = end_time - start_time
print(f"✅ Large calculation (5000 units) completed in {duration:.3f} seconds")
print(f"✅ Result: {result['total_price']} CHF")
# Performance should be under 1 second for reasonable calculations
assert duration < 5.0, f"Calculation took too long: {duration} seconds"
print("🚀 Performance test passed!")
EOF
uv run python performance_test.py
echo "::endgroup::"
pricing-documentation:
name: Pricing Documentation Check
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Check pricing test documentation
run: |
echo "::group::Verifying pricing test documentation"
# Check if README exists and is up to date
if [ ! -f "hub/services/tests/README.md" ]; then
echo "❌ Missing hub/services/tests/README.md"
exit 1
fi
# Check if test files have proper docstrings
python3 << 'EOF'
import ast
import sys
def check_docstrings(filename):
with open(filename, 'r') as f:
tree = ast.parse(f.read())
classes_without_docs = []
methods_without_docs = []
for node in ast.walk(tree):
if isinstance(node, ast.ClassDef):
if not ast.get_docstring(node):
classes_without_docs.append(node.name)
elif isinstance(node, ast.FunctionDef) and node.name.startswith('test_'):
if not ast.get_docstring(node):
methods_without_docs.append(node.name)
return classes_without_docs, methods_without_docs
test_files = [
'hub/services/tests/test_pricing.py',
'hub/services/tests/test_pricing_edge_cases.py',
'hub/services/tests/test_pricing_integration.py'
]
all_good = True
for filename in test_files:
try:
classes, methods = check_docstrings(filename)
if classes or methods:
print(f"⚠️ {filename} has missing docstrings:")
for cls in classes:
print(f" - Class: {cls}")
for method in methods:
print(f" - Method: {method}")
all_good = False
else:
print(f"✅ {filename} - All classes and methods documented")
except FileNotFoundError:
print(f"❌ {filename} not found")
all_good = False
if not all_good:
print("\n📝 Please add docstrings to undocumented classes and test methods")
sys.exit(1)
else:
print("\n🎉 All pricing test files are properly documented!")
EOF
echo "::endgroup::"
- name: Check test coverage completeness
run: |
echo "::group::Checking test coverage completeness"
python3 << 'EOF'
import ast
import sys
# Read the pricing models file
with open('hub/services/models/pricing.py', 'r') as f:
tree = ast.parse(f.read())
# Extract all model classes and their methods
model_classes = []
for node in ast.walk(tree):
if isinstance(node, ast.ClassDef):
methods = []
for item in node.body:
if isinstance(item, ast.FunctionDef) and not item.name.startswith('_'):
methods.append(item.name)
if methods:
model_classes.append((node.name, methods))
print("📊 Pricing model classes and public methods:")
for class_name, methods in model_classes:
print(f" {class_name}: {', '.join(methods)}")
# Check if all important methods have corresponding tests
important_methods = ['get_price', 'calculate_discount', 'calculate_final_price']
missing_tests = []
# This is a simplified check - in practice you'd want more sophisticated analysis
for class_name, methods in model_classes:
for method in methods:
if method in important_methods:
print(f"✅ Found important method: {class_name}.{method}")
print("\n📈 Test coverage check completed")
EOF
echo "::endgroup::"