feat(coordinator-api): integrate dynamic pricing engine with GPU marketplace and add agent identity router

- Add DynamicPricingEngine and MarketDataCollector dependencies to GPU marketplace endpoints
- Implement dynamic pricing calculation for GPU registration with market_balance strategy
- Calculate real-time dynamic prices at booking time with confidence scores and pricing factors
- Enhance /marketplace/pricing/{model} endpoint with comprehensive dynamic pricing analysis
  - Add static vs dynamic price
This commit is contained in:
oib
2026-02-28 22:57:10 +01:00
parent 85ae21a568
commit 0e6c9eda72
83 changed files with 30189 additions and 134 deletions

View File

@@ -0,0 +1,515 @@
"""
Integration Tests for Dynamic Pricing System
Tests end-to-end pricing workflows and marketplace integration
"""
import pytest
import asyncio
from datetime import datetime, timedelta
from unittest.mock import Mock, patch, AsyncMock
import httpx
from fastapi.testclient import TestClient
from app.services.dynamic_pricing_engine import DynamicPricingEngine, PricingStrategy, ResourceType
from app.services.market_data_collector import MarketDataCollector, DataSource
from app.routers.dynamic_pricing import router
from app.domain.pricing_models import PricingHistory, ProviderPricingStrategy, MarketMetrics
from app.schemas.pricing import DynamicPriceRequest, PricingStrategyRequest
class TestPricingIntegration:
"""Integration tests for the complete pricing system"""
@pytest.fixture
def pricing_engine(self):
"""Create and initialize pricing engine"""
config = {
"min_price": 0.001,
"max_price": 1000.0,
"update_interval": 60, # Faster for testing
"forecast_horizon": 24,
"max_volatility_threshold": 0.3,
"circuit_breaker_threshold": 0.5
}
engine = DynamicPricingEngine(config)
return engine
@pytest.fixture
def market_collector(self):
"""Create and initialize market data collector"""
config = {
"websocket_port": 8766 # Different port for testing
}
collector = MarketDataCollector(config)
return collector
@pytest.fixture
def test_client(self):
"""Create FastAPI test client"""
from fastapi import FastAPI
app = FastAPI()
app.include_router(router, prefix="/api/v1/pricing")
return TestClient(app)
@pytest.mark.asyncio
async def test_full_pricing_workflow(self, pricing_engine, market_collector):
"""Test complete pricing workflow from data collection to price calculation"""
# Initialize both services
await pricing_engine.initialize()
await market_collector.initialize()
# Simulate market data collection
await market_collector._collect_gpu_metrics()
await market_collector._collect_booking_data()
await market_collector._collect_competitor_prices()
# Wait for data aggregation
await asyncio.sleep(0.1)
# Calculate dynamic price
result = await pricing_engine.calculate_dynamic_price(
resource_id="integration_test_gpu",
resource_type=ResourceType.GPU,
base_price=0.05,
strategy=PricingStrategy.MARKET_BALANCE,
region="us_west"
)
# Verify workflow completed successfully
assert result.resource_id == "integration_test_gpu"
assert result.recommended_price > 0
assert result.confidence_score > 0
assert len(result.reasoning) > 0
# Verify market data was collected
assert len(market_collector.raw_data) > 0
assert len(market_collector.aggregated_data) > 0
@pytest.mark.asyncio
async def test_strategy_optimization_workflow(self, pricing_engine):
"""Test strategy optimization based on performance feedback"""
await pricing_engine.initialize()
# Set initial strategy
await pricing_engine.set_provider_strategy(
provider_id="test_provider",
strategy=PricingStrategy.MARKET_BALANCE
)
# Simulate multiple pricing calculations with performance feedback
performance_data = []
for i in range(10):
result = await pricing_engine.calculate_dynamic_price(
resource_id=f"test_resource_{i}",
resource_type=ResourceType.GPU,
base_price=0.05,
strategy=PricingStrategy.MARKET_BALANCE
)
# Simulate performance metrics
performance = {
"revenue_growth": 0.05 + (i * 0.01),
"profit_margin": 0.2 + (i * 0.02),
"market_share": 0.1 + (i * 0.01),
"customer_satisfaction": 0.8 + (i * 0.01),
"price_stability": 0.1 - (i * 0.005)
}
performance_data.append(performance)
# Verify strategy effectiveness tracking
assert pricing_engine.provider_strategies["test_provider"] == PricingStrategy.MARKET_BALANCE
# Verify pricing history was recorded
assert len(pricing_engine.pricing_history) > 0
@pytest.mark.asyncio
async def test_market_data_integration(self, pricing_engine, market_collector):
"""Test integration between market data collector and pricing engine"""
await pricing_engine.initialize()
await market_collector.initialize()
# Register pricing engine callback for market data
async def pricing_callback(data_point):
"""Callback to process market data in pricing engine"""
# Mock processing of market data
pass
market_collector.register_callback(DataSource.GPU_METRICS, pricing_callback)
market_collector.register_callback(DataSource.BOOKING_DATA, pricing_callback)
# Collect market data
await market_collector._collect_gpu_metrics()
await market_collector._collect_booking_data()
await market_collector._collect_competitor_prices()
# Wait for processing
await asyncio.sleep(0.1)
# Verify data was collected and callbacks were triggered
assert len(market_collector.raw_data) > 0
# Get aggregated data
market_data = await market_collector.get_aggregated_data("gpu", "us_west")
if market_data:
assert market_data.resource_type == "gpu"
assert market_data.region == "us_west"
assert 0 <= market_data.demand_level <= 1
assert 0 <= market_data.supply_level <= 1
@pytest.mark.asyncio
async def test_circuit_breaker_integration(self, pricing_engine, market_collector):
"""Test circuit breaker functionality during market stress"""
await pricing_engine.initialize()
await market_collector.initialize()
# Add pricing history
base_time = datetime.utcnow()
for i in range(5):
pricing_engine.pricing_history["circuit_test_gpu"] = pricing_engine.pricing_history.get("circuit_test_gpu", [])
pricing_engine.pricing_history["circuit_test_gpu"].append(
Mock(
price=0.05,
timestamp=base_time - timedelta(minutes=10-i),
demand_level=0.5,
supply_level=0.5,
confidence=0.8,
strategy_used="market_balance"
)
)
# Simulate high volatility market conditions
with patch.object(market_collector, '_collect_gpu_metrics') as mock_collect:
# Mock high volatility data
mock_collect.return_value = None
# Directly add high volatility data
await market_collector._add_data_point(Mock(
source=DataSource.GPU_METRICS,
resource_id="circuit_test_gpu",
resource_type="gpu",
region="us_west",
timestamp=datetime.utcnow(),
value=0.95, # Very high utilization
metadata={"volatility": 0.8} # High volatility
))
# Calculate price that should trigger circuit breaker
result = await pricing_engine.calculate_dynamic_price(
resource_id="circuit_test_gpu",
resource_type=ResourceType.GPU,
base_price=0.05,
strategy=PricingStrategy.MARKET_BALANCE,
region="us_west"
)
# Circuit breaker should be activated
assert "circuit_test_gpu" in pricing_engine.circuit_breakers
@pytest.mark.asyncio
async def test_forecast_accuracy_tracking(self, pricing_engine):
"""Test price forecast accuracy tracking"""
await pricing_engine.initialize()
# Add historical data
base_time = datetime.utcnow()
for i in range(48):
pricing_engine.pricing_history["forecast_test_gpu"] = pricing_engine.pricing_history.get("forecast_test_gpu", [])
pricing_engine.pricing_history["forecast_test_gpu"].append(
Mock(
price=0.05 + (i * 0.001),
demand_level=0.6 + (i % 10) * 0.02,
supply_level=0.7 - (i % 8) * 0.01,
confidence=0.8,
strategy_used="market_balance",
timestamp=base_time - timedelta(hours=48-i)
)
)
# Generate forecast
forecast = await pricing_engine.get_price_forecast("forecast_test_gpu", 24)
assert len(forecast) == 24
# Verify forecast structure
for point in forecast:
assert hasattr(point, 'timestamp')
assert hasattr(point, 'price')
assert hasattr(point, 'confidence')
assert 0 <= point.confidence <= 1
def test_api_endpoints_integration(self, test_client):
"""Test API endpoints integration"""
# Test health check
response = test_client.get("/api/v1/pricing/health")
assert response.status_code == 200
health_data = response.json()
assert "status" in health_data
assert "services" in health_data
# Test available strategies
response = test_client.get("/api/v1/pricing/strategies/available")
assert response.status_code == 200
strategies = response.json()
assert isinstance(strategies, list)
assert len(strategies) > 0
# Verify strategy structure
for strategy in strategies:
assert "strategy" in strategy
assert "name" in strategy
assert "description" in strategy
assert "parameters" in strategy
@pytest.mark.asyncio
async def test_bulk_strategy_updates(self, pricing_engine):
"""Test bulk strategy updates functionality"""
await pricing_engine.initialize()
# Prepare bulk update data
providers = ["provider_1", "provider_2", "provider_3"]
strategies = [PricingStrategy.AGGRESSIVE_GROWTH, PricingStrategy.PROFIT_MAXIMIZATION, PricingStrategy.MARKET_BALANCE]
# Apply bulk updates
for provider_id, strategy in zip(providers, strategies):
await pricing_engine.set_provider_strategy(provider_id, strategy)
# Verify all strategies were set
for provider_id, expected_strategy in zip(providers, strategies):
assert pricing_engine.provider_strategies[provider_id] == expected_strategy
# Test pricing with different strategies
results = []
for provider_id in providers:
result = await pricing_engine.calculate_dynamic_price(
resource_id=f"{provider_id}_gpu",
resource_type=ResourceType.GPU,
base_price=0.05,
strategy=pricing_engine.provider_strategies[provider_id]
)
results.append(result)
# Verify different strategies produce different results
prices = [result.recommended_price for result in results]
assert len(set(prices)) > 1 # Should have different prices
@pytest.mark.asyncio
async def test_regional_pricing_differentiation(self, pricing_engine, market_collector):
"""Test regional pricing differentiation"""
await pricing_engine.initialize()
await market_collector.initialize()
regions = ["us_west", "us_east", "europe", "asia"]
results = {}
# Calculate prices for different regions
for region in regions:
# Simulate regional market data
await market_collector._collect_gpu_metrics()
await market_collector._collect_regional_demand()
result = await pricing_engine.calculate_dynamic_price(
resource_id=f"regional_test_gpu",
resource_type=ResourceType.GPU,
base_price=0.05,
strategy=PricingStrategy.MARKET_BALANCE,
region=region
)
results[region] = result
# Verify regional differentiation
regional_prices = {region: result.recommended_price for region, result in results.items()}
# Prices should vary by region
assert len(set(regional_prices.values())) > 1
# Verify regional multipliers were applied
for region, result in results.items():
assert result.reasoning is not None
# Check if regional reasoning is present
reasoning_text = " ".join(result.reasoning).lower()
# Regional factors should be considered
@pytest.mark.asyncio
async def test_performance_monitoring_integration(self, pricing_engine):
"""Test performance monitoring and metrics collection"""
await pricing_engine.initialize()
# Simulate multiple pricing operations
start_time = datetime.utcnow()
for i in range(20):
await pricing_engine.calculate_dynamic_price(
resource_id=f"perf_test_gpu_{i}",
resource_type=ResourceType.GPU,
base_price=0.05,
strategy=PricingStrategy.MARKET_BALANCE
)
end_time = datetime.utcnow()
operation_time = (end_time - start_time).total_seconds()
# Verify performance metrics
assert operation_time < 10.0 # Should complete within 10 seconds
assert len(pricing_engine.pricing_history) == 20
# Verify pricing history tracking
for i in range(20):
resource_id = f"perf_test_gpu_{i}"
assert resource_id in pricing_engine.pricing_history
assert len(pricing_engine.pricing_history[resource_id]) == 1
@pytest.mark.asyncio
async def test_error_handling_and_recovery(self, pricing_engine, market_collector):
"""Test error handling and recovery mechanisms"""
await pricing_engine.initialize()
await market_collector.initialize()
# Test with invalid resource type
with pytest.raises(Exception):
await pricing_engine.calculate_dynamic_price(
resource_id="test_gpu",
resource_type="invalid_type", # Invalid type
base_price=0.05,
strategy=PricingStrategy.MARKET_BALANCE
)
# Test with invalid price constraints
constraints = Mock(min_price=0.10, max_price=0.05) # Invalid constraints
# Should handle gracefully
result = await pricing_engine.calculate_dynamic_price(
resource_id="test_gpu",
resource_type=ResourceType.GPU,
base_price=0.05,
strategy=PricingStrategy.MARKET_BALANCE,
constraints=constraints
)
# Should still return a valid result
assert result.recommended_price > 0
assert result.confidence_score >= 0
@pytest.mark.asyncio
async def test_concurrent_pricing_calculations(self, pricing_engine):
"""Test concurrent pricing calculations"""
await pricing_engine.initialize()
# Create multiple concurrent tasks
tasks = []
for i in range(10):
task = pricing_engine.calculate_dynamic_price(
resource_id=f"concurrent_test_gpu_{i}",
resource_type=ResourceType.GPU,
base_price=0.05,
strategy=PricingStrategy.MARKET_BALANCE
)
tasks.append(task)
# Execute all tasks concurrently
results = await asyncio.gather(*tasks)
# Verify all calculations completed successfully
assert len(results) == 10
for i, result in enumerate(results):
assert result.resource_id == f"concurrent_test_gpu_{i}"
assert result.recommended_price > 0
assert result.confidence_score > 0
@pytest.mark.asyncio
async def test_data_consistency_across_services(self, pricing_engine, market_collector):
"""Test data consistency between pricing engine and market collector"""
await pricing_engine.initialize()
await market_collector.initialize()
# Collect market data
await market_collector._collect_gpu_metrics()
await market_collector._collect_booking_data()
await market_collector._collect_competitor_prices()
# Wait for aggregation
await asyncio.sleep(0.1)
# Calculate prices
result1 = await pricing_engine.calculate_dynamic_price(
resource_id="consistency_test_gpu_1",
resource_type=ResourceType.GPU,
base_price=0.05,
strategy=PricingStrategy.MARKET_BALANCE
)
result2 = await pricing_engine.calculate_dynamic_price(
resource_id="consistency_test_gpu_2",
resource_type=ResourceType.GPU,
base_price=0.05,
strategy=PricingStrategy.MARKET_BALANCE
)
# Verify consistent market data usage
assert result1.factors_exposed.get("demand_level") is not None
assert result2.factors_exposed.get("demand_level") is not None
# Market conditions should be similar for same resource type
demand_diff = abs(result1.factors_exposed["demand_level"] - result2.factors_exposed["demand_level"])
assert demand_diff < 0.1 # Should be relatively close
class TestDatabaseIntegration:
"""Test database integration for pricing data"""
@pytest.mark.asyncio
async def test_pricing_history_storage(self, pricing_engine):
"""Test pricing history storage to database"""
await pricing_engine.initialize()
# Calculate price and store in history
result = await pricing_engine.calculate_dynamic_price(
resource_id="db_test_gpu",
resource_type=ResourceType.GPU,
base_price=0.05,
strategy=PricingStrategy.MARKET_BALANCE
)
# Verify data was stored in memory (in production, this would be database)
assert "db_test_gpu" in pricing_engine.pricing_history
assert len(pricing_engine.pricing_history["db_test_gpu"]) > 0
# Verify data structure
history_point = pricing_engine.pricing_history["db_test_gpu"][0]
assert history_point.price == result.recommended_price
assert history_point.strategy_used == result.strategy_used.value
@pytest.mark.asyncio
async def test_provider_strategy_persistence(self, pricing_engine):
"""Test provider strategy persistence"""
await pricing_engine.initialize()
# Set provider strategy
await pricing_engine.set_provider_strategy(
provider_id="db_test_provider",
strategy=PricingStrategy.PROFIT_MAXIMIZATION
)
# Verify strategy was stored
assert pricing_engine.provider_strategies["db_test_provider"] == PricingStrategy.PROFIT_MAXIMIZATION
# In production, this would be persisted to database
# For now, we verify in-memory storage
assert "db_test_provider" in pricing_engine.provider_strategies
if __name__ == "__main__":
pytest.main([__file__])

View File

@@ -0,0 +1,693 @@
"""
Performance Tests for Dynamic Pricing System
Tests system performance under load and stress conditions
"""
import pytest
import asyncio
import time
import psutil
import threading
from datetime import datetime, timedelta
from concurrent.futures import ThreadPoolExecutor
from unittest.mock import Mock, patch
import statistics
from app.services.dynamic_pricing_engine import DynamicPricingEngine, PricingStrategy, ResourceType
from app.services.market_data_collector import MarketDataCollector
class TestPricingPerformance:
"""Performance tests for the dynamic pricing system"""
@pytest.fixture
def pricing_engine(self):
"""Create pricing engine optimized for performance testing"""
config = {
"min_price": 0.001,
"max_price": 1000.0,
"update_interval": 60,
"forecast_horizon": 24,
"max_volatility_threshold": 0.3,
"circuit_breaker_threshold": 0.5
}
engine = DynamicPricingEngine(config)
return engine
@pytest.fixture
def market_collector(self):
"""Create market data collector for performance testing"""
config = {
"websocket_port": 8767
}
collector = MarketDataCollector(config)
return collector
@pytest.mark.asyncio
async def test_single_pricing_calculation_performance(self, pricing_engine):
"""Test performance of individual pricing calculations"""
await pricing_engine.initialize()
# Measure single calculation time
start_time = time.time()
result = await pricing_engine.calculate_dynamic_price(
resource_id="perf_test_gpu",
resource_type=ResourceType.GPU,
base_price=0.05,
strategy=PricingStrategy.MARKET_BALANCE
)
end_time = time.time()
calculation_time = end_time - start_time
# Performance assertions
assert calculation_time < 0.1 # Should complete within 100ms
assert result.recommended_price > 0
assert result.confidence_score > 0
print(f"Single calculation time: {calculation_time:.4f}s")
@pytest.mark.asyncio
async def test_concurrent_pricing_calculations(self, pricing_engine):
"""Test performance of concurrent pricing calculations"""
await pricing_engine.initialize()
num_concurrent = 100
num_iterations = 10
all_times = []
for iteration in range(num_iterations):
# Create concurrent tasks
tasks = []
start_time = time.time()
for i in range(num_concurrent):
task = pricing_engine.calculate_dynamic_price(
resource_id=f"concurrent_perf_gpu_{iteration}_{i}",
resource_type=ResourceType.GPU,
base_price=0.05,
strategy=PricingStrategy.MARKET_BALANCE
)
tasks.append(task)
# Execute all tasks concurrently
results = await asyncio.gather(*tasks)
end_time = time.time()
iteration_time = end_time - start_time
all_times.append(iteration_time)
# Verify all calculations completed successfully
assert len(results) == num_concurrent
for result in results:
assert result.recommended_price > 0
assert result.confidence_score > 0
print(f"Iteration {iteration + 1}: {num_concurrent} calculations in {iteration_time:.4f}s")
# Performance analysis
avg_time = statistics.mean(all_times)
min_time = min(all_times)
max_time = max(all_times)
std_dev = statistics.stdev(all_times)
print(f"Concurrent performance stats:")
print(f" Average time: {avg_time:.4f}s")
print(f" Min time: {min_time:.4f}s")
print(f" Max time: {max_time:.4f}s")
print(f" Std deviation: {std_dev:.4f}s")
# Performance assertions
assert avg_time < 2.0 # Should complete 100 calculations within 2 seconds
assert std_dev < 0.5 # Low variance in performance
@pytest.mark.asyncio
async def test_high_volume_pricing_calculations(self, pricing_engine):
"""Test performance under high volume load"""
await pricing_engine.initialize()
num_calculations = 1000
batch_size = 50
start_time = time.time()
# Process in batches to avoid overwhelming the system
for batch_start in range(0, num_calculations, batch_size):
batch_end = min(batch_start + batch_size, num_calculations)
tasks = []
for i in range(batch_start, batch_end):
task = pricing_engine.calculate_dynamic_price(
resource_id=f"high_volume_gpu_{i}",
resource_type=ResourceType.GPU,
base_price=0.05,
strategy=PricingStrategy.MARKET_BALANCE
)
tasks.append(task)
await asyncio.gather(*tasks)
end_time = time.time()
total_time = end_time - start_time
calculations_per_second = num_calculations / total_time
print(f"High volume test:")
print(f" {num_calculations} calculations in {total_time:.2f}s")
print(f" {calculations_per_second:.2f} calculations/second")
# Performance assertions
assert calculations_per_second > 50 # Should handle at least 50 calculations per second
assert total_time < 30 # Should complete within 30 seconds
@pytest.mark.asyncio
async def test_forecast_generation_performance(self, pricing_engine):
"""Test performance of price forecast generation"""
await pricing_engine.initialize()
# Add historical data for forecasting
base_time = datetime.utcnow()
for i in range(100): # 100 data points
pricing_engine.pricing_history["forecast_perf_gpu"] = pricing_engine.pricing_history.get("forecast_perf_gpu", [])
pricing_engine.pricing_history["forecast_perf_gpu"].append(
Mock(
price=0.05 + (i * 0.0001),
demand_level=0.6 + (i % 10) * 0.02,
supply_level=0.7 - (i % 8) * 0.01,
confidence=0.8,
strategy_used="market_balance",
timestamp=base_time - timedelta(hours=100-i)
)
)
# Test forecast generation performance
forecast_horizons = [24, 48, 72]
forecast_times = []
for horizon in forecast_horizons:
start_time = time.time()
forecast = await pricing_engine.get_price_forecast("forecast_perf_gpu", horizon)
end_time = time.time()
forecast_time = end_time - start_time
forecast_times.append(forecast_time)
assert len(forecast) == horizon
print(f"Forecast {horizon}h: {forecast_time:.4f}s ({len(forecast)} points)")
# Performance assertions
avg_forecast_time = statistics.mean(forecast_times)
assert avg_forecast_time < 0.5 # Forecasts should complete within 500ms
@pytest.mark.asyncio
async def test_memory_usage_under_load(self, pricing_engine):
"""Test memory usage during high load"""
await pricing_engine.initialize()
# Measure initial memory usage
process = psutil.Process()
initial_memory = process.memory_info().rss / 1024 / 1024 # MB
# Generate high load
num_calculations = 500
for i in range(num_calculations):
await pricing_engine.calculate_dynamic_price(
resource_id=f"memory_test_gpu_{i}",
resource_type=ResourceType.GPU,
base_price=0.05,
strategy=PricingStrategy.MARKET_BALANCE
)
# Measure memory usage after load
final_memory = process.memory_info().rss / 1024 / 1024 # MB
memory_increase = final_memory - initial_memory
print(f"Memory usage test:")
print(f" Initial memory: {initial_memory:.2f} MB")
print(f" Final memory: {final_memory:.2f} MB")
print(f" Memory increase: {memory_increase:.2f} MB")
print(f" Memory per calculation: {memory_increase/num_calculations:.4f} MB")
# Memory assertions
assert memory_increase < 100 # Should not increase by more than 100MB
assert memory_increase / num_calculations < 0.5 # Less than 0.5MB per calculation
@pytest.mark.asyncio
async def test_market_data_collection_performance(self, market_collector):
"""Test performance of market data collection"""
await market_collector.initialize()
# Measure data collection performance
collection_times = {}
for source in market_collector.collection_intervals.keys():
start_time = time.time()
await market_collector._collect_from_source(source)
end_time = time.time()
collection_time = end_time - start_time
collection_times[source.value] = collection_time
print(f"Data collection {source.value}: {collection_time:.4f}s")
# Performance assertions
for source, collection_time in collection_times.items():
assert collection_time < 1.0 # Each collection should complete within 1 second
total_collection_time = sum(collection_times.values())
assert total_collection_time < 5.0 # All collections should complete within 5 seconds
@pytest.mark.asyncio
async def test_strategy_switching_performance(self, pricing_engine):
"""Test performance of strategy switching"""
await pricing_engine.initialize()
strategies = [
PricingStrategy.AGGRESSIVE_GROWTH,
PricingStrategy.PROFIT_MAXIMIZATION,
PricingStrategy.MARKET_BALANCE,
PricingStrategy.COMPETITIVE_RESPONSE,
PricingStrategy.DEMAND_ELASTICITY
]
switch_times = []
for strategy in strategies:
start_time = time.time()
await pricing_engine.set_provider_strategy(
provider_id="switch_test_provider",
strategy=strategy
)
# Calculate price with new strategy
await pricing_engine.calculate_dynamic_price(
resource_id="switch_test_gpu",
resource_type=ResourceType.GPU,
base_price=0.05,
strategy=strategy
)
end_time = time.time()
switch_time = end_time - start_time
switch_times.append(switch_time)
print(f"Strategy switch to {strategy.value}: {switch_time:.4f}s")
# Performance assertions
avg_switch_time = statistics.mean(switch_times)
assert avg_switch_time < 0.05 # Strategy switches should be very fast
@pytest.mark.asyncio
async def test_circuit_breaker_performance(self, pricing_engine):
"""Test circuit breaker performance under stress"""
await pricing_engine.initialize()
# Add pricing history
base_time = datetime.utcnow()
for i in range(10):
pricing_engine.pricing_history["circuit_perf_gpu"] = pricing_engine.pricing_history.get("circuit_perf_gpu", [])
pricing_engine.pricing_history["circuit_perf_gpu"].append(
Mock(
price=0.05,
timestamp=base_time - timedelta(minutes=10-i),
demand_level=0.5,
supply_level=0.5,
confidence=0.8,
strategy_used="market_balance"
)
)
# Test circuit breaker activation performance
start_time = time.time()
# Simulate high volatility conditions
with patch.object(pricing_engine, '_get_market_conditions') as mock_conditions:
mock_conditions.return_value = Mock(
demand_level=0.9,
supply_level=0.3,
price_volatility=0.8, # High volatility
utilization_rate=0.95
)
result = await pricing_engine.calculate_dynamic_price(
resource_id="circuit_perf_gpu",
resource_type=ResourceType.GPU,
base_price=0.05,
strategy=PricingStrategy.MARKET_BALANCE
)
end_time = time.time()
circuit_time = end_time - start_time
print(f"Circuit breaker activation: {circuit_time:.4f}s")
# Verify circuit breaker was activated
assert "circuit_perf_gpu" in pricing_engine.circuit_breakers
assert pricing_engine.circuit_breakers["circuit_perf_gpu"] is True
# Performance assertions
assert circuit_time < 0.1 # Circuit breaker should be very fast
@pytest.mark.asyncio
async def test_price_history_scaling(self, pricing_engine):
"""Test performance with large price history"""
await pricing_engine.initialize()
# Build large price history
num_history_points = 10000
resource_id = "scaling_test_gpu"
print(f"Building {num_history_points} history points...")
build_start = time.time()
base_time = datetime.utcnow()
for i in range(num_history_points):
pricing_engine.pricing_history[resource_id] = pricing_engine.pricing_history.get(resource_id, [])
pricing_engine.pricing_history[resource_id].append(
Mock(
price=0.05 + (i * 0.00001),
demand_level=0.6 + (i % 10) * 0.02,
supply_level=0.7 - (i % 8) * 0.01,
confidence=0.8,
strategy_used="market_balance",
timestamp=base_time - timedelta(minutes=num_history_points-i)
)
)
build_end = time.time()
build_time = build_end - build_start
print(f"History build time: {build_time:.4f}s")
print(f"History size: {len(pricing_engine.pricing_history[resource_id])} points")
# Test calculation performance with large history
calc_start = time.time()
result = await pricing_engine.calculate_dynamic_price(
resource_id=resource_id,
resource_type=ResourceType.GPU,
base_price=0.05,
strategy=PricingStrategy.MARKET_BALANCE
)
calc_end = time.time()
calc_time = calc_end - calc_start
print(f"Calculation with large history: {calc_time:.4f}s")
# Performance assertions
assert build_time < 5.0 # History building should be fast
assert calc_time < 0.5 # Calculation should still be fast even with large history
assert len(pricing_engine.pricing_history[resource_id]) <= 1000 # Should enforce limit
def test_thread_safety(self, pricing_engine):
"""Test thread safety of pricing calculations"""
# This test uses threading to simulate concurrent access
def calculate_price_thread(thread_id, num_calculations, results):
"""Thread function for pricing calculations"""
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
try:
for i in range(num_calculations):
result = loop.run_until_complete(
pricing_engine.calculate_dynamic_price(
resource_id=f"thread_test_gpu_{thread_id}_{i}",
resource_type=ResourceType.GPU,
base_price=0.05,
strategy=PricingStrategy.MARKET_BALANCE
)
)
results.append((thread_id, i, result.recommended_price))
finally:
loop.close()
# Run multiple threads
num_threads = 5
calculations_per_thread = 20
results = []
threads = []
start_time = time.time()
# Create and start threads
for thread_id in range(num_threads):
thread = threading.Thread(
target=calculate_price_thread,
args=(thread_id, calculations_per_thread, results)
)
threads.append(thread)
thread.start()
# Wait for all threads to complete
for thread in threads:
thread.join()
end_time = time.time()
total_time = end_time - start_time
print(f"Thread safety test:")
print(f" {num_threads} threads, {calculations_per_thread} calculations each")
print(f" Total time: {total_time:.4f}s")
print(f" Results: {len(results)} calculations completed")
# Verify all calculations completed
assert len(results) == num_threads * calculations_per_thread
# Verify no corruption in results
for thread_id, calc_id, price in results:
assert price > 0
assert price < pricing_engine.max_price
class TestLoadTesting:
"""Load testing scenarios for the pricing system"""
@pytest.mark.asyncio
async def test_sustained_load(self, pricing_engine):
"""Test system performance under sustained load"""
await pricing_engine.initialize()
# Sustained load parameters
duration_seconds = 30
calculations_per_second = 50
total_calculations = duration_seconds * calculations_per_second
results = []
errors = []
async def sustained_load_worker():
"""Worker for sustained load testing"""
for i in range(total_calculations):
try:
start_time = time.time()
result = await pricing_engine.calculate_dynamic_price(
resource_id=f"sustained_gpu_{i}",
resource_type=ResourceType.GPU,
base_price=0.05,
strategy=PricingStrategy.MARKET_BALANCE
)
end_time = time.time()
calculation_time = end_time - start_time
results.append({
"calculation_id": i,
"time": calculation_time,
"price": result.recommended_price,
"confidence": result.confidence_score
})
# Rate limiting
await asyncio.sleep(1.0 / calculations_per_second)
except Exception as e:
errors.append({"calculation_id": i, "error": str(e)})
# Run sustained load test
start_time = time.time()
await sustained_load_worker()
end_time = time.time()
actual_duration = end_time - start_time
# Analyze results
calculation_times = [r["time"] for r in results]
avg_time = statistics.mean(calculation_times)
p95_time = sorted(calculation_times)[int(len(calculation_times) * 0.95)]
p99_time = sorted(calculation_times)[int(len(calculation_times) * 0.99)]
print(f"Sustained load test results:")
print(f" Duration: {actual_duration:.2f}s (target: {duration_seconds}s)")
print(f" Calculations: {len(results)} (target: {total_calculations})")
print(f" Errors: {len(errors)}")
print(f" Average time: {avg_time:.4f}s")
print(f" 95th percentile: {p95_time:.4f}s")
print(f" 99th percentile: {p99_time:.4f}s")
# Performance assertions
assert len(errors) == 0 # No errors should occur
assert len(results) >= total_calculations * 0.95 # At least 95% of calculations completed
assert avg_time < 0.1 # Average calculation time under 100ms
assert p95_time < 0.2 # 95th percentile under 200ms
@pytest.mark.asyncio
async def test_burst_load(self, pricing_engine):
"""Test system performance under burst load"""
await pricing_engine.initialize()
# Burst load parameters
num_bursts = 5
calculations_per_burst = 100
burst_interval = 2 # seconds between bursts
burst_results = []
for burst_id in range(num_bursts):
print(f"Starting burst {burst_id + 1}/{num_bursts}")
start_time = time.time()
# Create burst of calculations
tasks = []
for i in range(calculations_per_burst):
task = pricing_engine.calculate_dynamic_price(
resource_id=f"burst_gpu_{burst_id}_{i}",
resource_type=ResourceType.GPU,
base_price=0.05,
strategy=PricingStrategy.MARKET_BALANCE
)
tasks.append(task)
# Execute burst
results = await asyncio.gather(*tasks)
end_time = time.time()
burst_time = end_time - start_time
burst_results.append({
"burst_id": burst_id,
"time": burst_time,
"calculations": len(results),
"throughput": len(results) / burst_time
})
print(f" Burst {burst_id + 1}: {len(results)} calculations in {burst_time:.4f}s")
print(f" Throughput: {len(results) / burst_time:.2f} calc/s")
# Wait between bursts
if burst_id < num_bursts - 1:
await asyncio.sleep(burst_interval)
# Analyze burst performance
throughputs = [b["throughput"] for b in burst_results]
avg_throughput = statistics.mean(throughputs)
min_throughput = min(throughputs)
max_throughput = max(throughputs)
print(f"Burst load test results:")
print(f" Average throughput: {avg_throughput:.2f} calc/s")
print(f" Min throughput: {min_throughput:.2f} calc/s")
print(f" Max throughput: {max_throughput:.2f} calc/s")
# Performance assertions
assert avg_throughput > 100 # Should handle at least 100 calculations per second
assert min_throughput > 50 # Even slowest burst should be reasonable
@pytest.mark.asyncio
async def test_stress_testing(self, pricing_engine):
"""Stress test with extreme load conditions"""
await pricing_engine.initialize()
# Stress test parameters
stress_duration = 60 # seconds
max_concurrent = 200
calculation_interval = 0.01 # very aggressive
results = []
errors = []
start_time = time.time()
async def stress_worker():
"""Worker for stress testing"""
calculation_id = 0
while time.time() - start_time < stress_duration:
try:
# Create batch of concurrent calculations
batch_size = min(max_concurrent, 50)
tasks = []
for i in range(batch_size):
task = pricing_engine.calculate_dynamic_price(
resource_id=f"stress_gpu_{calculation_id}_{i}",
resource_type=ResourceType.GPU,
base_price=0.05,
strategy=PricingStrategy.MARKET_BALANCE
)
tasks.append(task)
# Execute batch
batch_results = await asyncio.gather(*tasks, return_exceptions=True)
# Process results
for result in batch_results:
if isinstance(result, Exception):
errors.append(str(result))
else:
results.append(result)
calculation_id += batch_size
# Very short interval
await asyncio.sleep(calculation_interval)
except Exception as e:
errors.append(str(e))
break
# Run stress test
await stress_worker()
end_time = time.time()
actual_duration = end_time - start_time
# Analyze stress test results
total_calculations = len(results)
error_rate = len(errors) / (len(results) + len(errors)) if (len(results) + len(errors)) > 0 else 0
throughput = total_calculations / actual_duration
print(f"Stress test results:")
print(f" Duration: {actual_duration:.2f}s")
print(f" Calculations: {total_calculations}")
print(f" Errors: {len(errors)}")
print(f" Error rate: {error_rate:.2%}")
print(f" Throughput: {throughput:.2f} calc/s")
# Stress test assertions (more lenient than normal tests)
assert error_rate < 0.05 # Error rate should be under 5%
assert throughput > 20 # Should maintain reasonable throughput even under stress
assert actual_duration >= stress_duration * 0.9 # Should run for most of the duration
if __name__ == "__main__":
pytest.main([__file__])

View File

@@ -0,0 +1,611 @@
"""
Unit Tests for Dynamic Pricing Engine
Tests pricing calculations, strategies, and market data processing
"""
import pytest
import asyncio
from datetime import datetime, timedelta
from unittest.mock import Mock, patch, AsyncMock
import numpy as np
from app.services.dynamic_pricing_engine import (
DynamicPricingEngine,
PricingStrategy,
ResourceType,
PriceConstraints,
PricingFactors,
MarketConditions,
PriceTrend
)
from app.domain.pricing_strategies import StrategyLibrary
class TestDynamicPricingEngine:
"""Test cases for DynamicPricingEngine"""
@pytest.fixture
def pricing_engine(self):
"""Create a pricing engine instance for testing"""
config = {
"min_price": 0.001,
"max_price": 1000.0,
"update_interval": 300,
"forecast_horizon": 72,
"max_volatility_threshold": 0.3,
"circuit_breaker_threshold": 0.5
}
engine = DynamicPricingEngine(config)
return engine
@pytest.fixture
def sample_market_conditions(self):
"""Create sample market conditions for testing"""
return MarketConditions(
region="us_west",
resource_type=ResourceType.GPU,
demand_level=0.8,
supply_level=0.6,
average_price=0.05,
price_volatility=0.15,
utilization_rate=0.75,
competitor_prices=[0.045, 0.055, 0.048, 0.052],
market_sentiment=0.2
)
@pytest.mark.asyncio
async def test_engine_initialization(self, pricing_engine):
"""Test engine initialization"""
await pricing_engine.initialize()
assert pricing_engine.min_price == 0.001
assert pricing_engine.max_price == 1000.0
assert pricing_engine.update_interval == 300
assert pricing_engine.forecast_horizon == 72
assert isinstance(pricing_engine.pricing_history, dict)
assert isinstance(pricing_engine.provider_strategies, dict)
assert isinstance(pricing_engine.price_constraints, dict)
@pytest.mark.asyncio
async def test_calculate_dynamic_price_basic(self, pricing_engine, sample_market_conditions):
"""Test basic dynamic price calculation"""
# Mock market conditions
with patch.object(pricing_engine, '_get_market_conditions', return_value=sample_market_conditions):
result = await pricing_engine.calculate_dynamic_price(
resource_id="test_gpu_1",
resource_type=ResourceType.GPU,
base_price=0.05,
strategy=PricingStrategy.MARKET_BALANCE,
region="us_west"
)
assert result.resource_id == "test_gpu_1"
assert result.resource_type == ResourceType.GPU
assert result.current_price == 0.05
assert result.recommended_price > 0
assert result.recommended_price <= pricing_engine.max_price
assert result.recommended_price >= pricing_engine.min_price
assert isinstance(result.price_trend, PriceTrend)
assert 0 <= result.confidence_score <= 1
assert isinstance(result.factors_exposed, dict)
assert isinstance(result.reasoning, list)
assert result.strategy_used == PricingStrategy.MARKET_BALANCE
@pytest.mark.asyncio
async def test_pricing_strategies_different_results(self, pricing_engine, sample_market_conditions):
"""Test that different strategies produce different results"""
with patch.object(pricing_engine, '_get_market_conditions', return_value=sample_market_conditions):
# Test aggressive growth strategy
result_growth = await pricing_engine.calculate_dynamic_price(
resource_id="test_gpu_1",
resource_type=ResourceType.GPU,
base_price=0.05,
strategy=PricingStrategy.AGGRESSIVE_GROWTH,
region="us_west"
)
# Test profit maximization strategy
result_profit = await pricing_engine.calculate_dynamic_price(
resource_id="test_gpu_1",
resource_type=ResourceType.GPU,
base_price=0.05,
strategy=PricingStrategy.PROFIT_MAXIMIZATION,
region="us_west"
)
# Results should be different
assert result_growth.recommended_price != result_profit.recommended_price
assert result_growth.strategy_used == PricingStrategy.AGGRESSIVE_GROWTH
assert result_profit.strategy_used == PricingStrategy.PROFIT_MAXIMIZATION
@pytest.mark.asyncio
async def test_price_constraints_application(self, pricing_engine, sample_market_conditions):
"""Test that price constraints are properly applied"""
constraints = PriceConstraints(
min_price=0.03,
max_price=0.08,
max_change_percent=0.2
)
with patch.object(pricing_engine, '_get_market_conditions', return_value=sample_market_conditions):
result = await pricing_engine.calculate_dynamic_price(
resource_id="test_gpu_1",
resource_type=ResourceType.GPU,
base_price=0.05,
strategy=PricingStrategy.PROFIT_MAXIMIZATION,
constraints=constraints,
region="us_west"
)
# Should respect constraints
assert result.recommended_price >= constraints.min_price
assert result.recommended_price <= constraints.max_price
@pytest.mark.asyncio
async def test_circuit_breaker_activation(self, pricing_engine, sample_market_conditions):
"""Test circuit breaker activation during high volatility"""
# Create high volatility conditions
high_volatility_conditions = MarketConditions(
region="us_west",
resource_type=ResourceType.GPU,
demand_level=0.9,
supply_level=0.3,
average_price=0.05,
price_volatility=0.6, # High volatility
utilization_rate=0.95,
competitor_prices=[0.045, 0.055, 0.048, 0.052],
market_sentiment=-0.3
)
# Add some pricing history
pricing_engine.pricing_history["test_gpu_1"] = [
Mock(price=0.05, timestamp=datetime.utcnow() - timedelta(minutes=10))
]
with patch.object(pricing_engine, '_get_market_conditions', return_value=high_volatility_conditions):
result = await pricing_engine.calculate_dynamic_price(
resource_id="test_gpu_1",
resource_type=ResourceType.GPU,
base_price=0.05,
strategy=PricingStrategy.MARKET_BALANCE,
region="us_west"
)
# Circuit breaker should be activated
assert "test_gpu_1" in pricing_engine.circuit_breakers
assert pricing_engine.circuit_breakers["test_gpu_1"] is True
@pytest.mark.asyncio
async def test_price_forecast_generation(self, pricing_engine):
"""Test price forecast generation"""
# Add historical data
base_time = datetime.utcnow()
for i in range(48): # 48 data points
pricing_engine.pricing_history["test_gpu_1"] = pricing_engine.pricing_history.get("test_gpu_1", [])
pricing_engine.pricing_history["test_gpu_1"].append(
Mock(
price=0.05 + (i * 0.001),
demand_level=0.6 + (i % 10) * 0.02,
supply_level=0.7 - (i % 8) * 0.01,
confidence=0.8,
strategy_used="market_balance",
timestamp=base_time - timedelta(hours=48-i)
)
)
forecast = await pricing_engine.get_price_forecast("test_gpu_1", 24)
assert len(forecast) == 24
for point in forecast:
assert hasattr(point, 'timestamp')
assert hasattr(point, 'price')
assert hasattr(point, 'demand_level')
assert hasattr(point, 'supply_level')
assert hasattr(point, 'confidence')
assert 0 <= point.confidence <= 1
assert point.price >= pricing_engine.min_price
assert point.price <= pricing_engine.max_price
@pytest.mark.asyncio
async def test_provider_strategy_management(self, pricing_engine):
"""Test setting and retrieving provider strategies"""
constraints = PriceConstraints(
min_price=0.02,
max_price=0.10
)
# Set strategy
success = await pricing_engine.set_provider_strategy(
provider_id="test_provider",
strategy=PricingStrategy.AGGRESSIVE_GROWTH,
constraints=constraints
)
assert success is True
assert pricing_engine.provider_strategies["test_provider"] == PricingStrategy.AGGRESSIVE_GROWTH
assert pricing_engine.price_constraints["test_provider"] == constraints
def test_demand_multiplier_calculation(self, pricing_engine):
"""Test demand multiplier calculation"""
# High demand
multiplier_high = pricing_engine._calculate_demand_multiplier(0.9, PricingStrategy.MARKET_BALANCE)
assert multiplier_high > 1.0
# Low demand
multiplier_low = pricing_engine._calculate_demand_multiplier(0.2, PricingStrategy.MARKET_BALANCE)
assert multiplier_low < 1.0
# Aggressive growth strategy should have lower multipliers
multiplier_growth = pricing_engine._calculate_demand_multiplier(0.8, PricingStrategy.AGGRESSIVE_GROWTH)
multiplier_balance = pricing_engine._calculate_demand_multiplier(0.8, PricingStrategy.MARKET_BALANCE)
assert multiplier_growth < multiplier_balance
def test_supply_multiplier_calculation(self, pricing_engine):
"""Test supply multiplier calculation"""
# Low supply (should increase prices)
multiplier_low_supply = pricing_engine._calculate_supply_multiplier(0.2, PricingStrategy.MARKET_BALANCE)
assert multiplier_low_supply > 1.0
# High supply (should decrease prices)
multiplier_high_supply = pricing_engine._calculate_supply_multiplier(0.9, PricingStrategy.MARKET_BALANCE)
assert multiplier_high_supply < 1.0
def test_time_multiplier_calculation(self, pricing_engine):
"""Test time-based multiplier calculation"""
# Test different hours
business_hour_multiplier = pricing_engine._calculate_time_multiplier()
# Mock different hours by temporarily changing the method
with patch('datetime.datetime') as mock_datetime:
mock_datetime.utcnow.return_value.hour = 14 # 2 PM business hour
business_hour_multiplier = pricing_engine._calculate_time_multiplier()
assert business_hour_multiplier > 1.0
mock_datetime.utcnow.return_value.hour = 3 # 3 AM late night
late_night_multiplier = pricing_engine._calculate_time_multiplier()
assert late_night_multiplier < 1.0
def test_competition_multiplier_calculation(self, pricing_engine):
"""Test competition-based multiplier calculation"""
competitor_prices = [0.045, 0.055, 0.048, 0.052]
base_price = 0.05
# Competitive response strategy
multiplier_competitive = pricing_engine._calculate_competition_multiplier(
base_price, competitor_prices, PricingStrategy.COMPETITIVE_RESPONSE
)
# Profit maximization strategy
multiplier_profit = pricing_engine._calculate_competition_multiplier(
base_price, competitor_prices, PricingStrategy.PROFIT_MAXIMIZATION
)
# Competitive strategy should be more responsive to competition
assert isinstance(multiplier_competitive, float)
assert isinstance(multiplier_profit, float)
def test_price_trend_determination(self, pricing_engine):
"""Test price trend determination"""
# Create increasing trend
pricing_engine.pricing_history["test_resource"] = [
Mock(price=0.05, timestamp=datetime.utcnow() - timedelta(minutes=50)),
Mock(price=0.051, timestamp=datetime.utcnow() - timedelta(minutes=40)),
Mock(price=0.052, timestamp=datetime.utcnow() - timedelta(minutes=30)),
Mock(price=0.053, timestamp=datetime.utcnow() - timedelta(minutes=20)),
Mock(price=0.054, timestamp=datetime.utcnow() - timedelta(minutes=10))
]
trend = pricing_engine._determine_price_trend("test_resource", 0.055)
assert trend == PriceTrend.INCREASING
# Create decreasing trend
pricing_engine.pricing_history["test_resource"] = [
Mock(price=0.055, timestamp=datetime.utcnow() - timedelta(minutes=50)),
Mock(price=0.054, timestamp=datetime.utcnow() - timedelta(minutes=40)),
Mock(price=0.053, timestamp=datetime.utcnow() - timedelta(minutes=30)),
Mock(price=0.052, timestamp=datetime.utcnow() - timedelta(minutes=20)),
Mock(price=0.051, timestamp=datetime.utcnow() - timedelta(minutes=10))
]
trend = pricing_engine._determine_price_trend("test_resource", 0.05)
assert trend == PriceTrend.DECREASING
def test_confidence_score_calculation(self, pricing_engine, sample_market_conditions):
"""Test confidence score calculation"""
factors = PricingFactors(
base_price=0.05,
demand_level=0.8,
supply_level=0.6,
market_volatility=0.1,
confidence_score=0.8
)
confidence = pricing_engine._calculate_confidence_score(factors, sample_market_conditions)
assert 0 <= confidence <= 1
assert isinstance(confidence, float)
def test_pricing_factors_calculation(self, pricing_engine, sample_market_conditions):
"""Test pricing factors calculation"""
factors = asyncio.run(pricing_engine._calculate_pricing_factors(
resource_id="test_gpu_1",
resource_type=ResourceType.GPU,
base_price=0.05,
strategy=PricingStrategy.MARKET_BALANCE,
market_conditions=sample_market_conditions
))
assert isinstance(factors, PricingFactors)
assert factors.base_price == 0.05
assert 0 <= factors.demand_multiplier <= 3.0
assert 0.8 <= factors.supply_multiplier <= 2.5
assert 0.7 <= factors.time_multiplier <= 1.5
assert 0.9 <= factors.performance_multiplier <= 1.3
assert 0.8 <= factors.competition_multiplier <= 1.4
assert 0.9 <= factors.sentiment_multiplier <= 1.2
assert 0.8 <= factors.regional_multiplier <= 1.3
def test_strategy_pricing_application(self, pricing_engine, sample_market_conditions):
"""Test strategy-specific pricing application"""
factors = PricingFactors(
base_price=0.05,
demand_multiplier=1.2,
supply_multiplier=1.1,
time_multiplier=1.0,
performance_multiplier=1.05,
competition_multiplier=0.95,
sentiment_multiplier=1.02,
regional_multiplier=1.0
)
# Test different strategies
price_aggressive = asyncio.run(pricing_engine._apply_strategy_pricing(
0.05, factors, PricingStrategy.AGGRESSIVE_GROWTH, sample_market_conditions
))
price_profit = asyncio.run(pricing_engine._apply_strategy_pricing(
0.05, factors, PricingStrategy.PROFIT_MAXIMIZATION, sample_market_conditions
))
# Should produce different results
assert price_aggressive != price_profit
assert price_aggressive > 0
assert price_profit > 0
def test_constraints_and_risk_application(self, pricing_engine):
"""Test constraints and risk management application"""
constraints = PriceConstraints(
min_price=0.03,
max_price=0.08,
max_change_percent=0.2
)
factors = PricingFactors(
base_price=0.05,
market_volatility=0.1
)
# Test normal price within constraints
normal_price = asyncio.run(pricing_engine._apply_constraints_and_risk(
"test_resource", 0.06, constraints, factors
))
assert 0.03 <= normal_price <= 0.08
# Test price above max constraint
high_price = asyncio.run(pricing_engine._apply_constraints_and_risk(
"test_resource", 0.10, constraints, factors
))
assert high_price <= constraints.max_price
# Test price below min constraint
low_price = asyncio.run(pricing_engine._apply_constraints_and_risk(
"test_resource", 0.01, constraints, factors
))
assert low_price >= constraints.min_price
def test_price_point_storage(self, pricing_engine):
"""Test price point storage in history"""
factors = PricingFactors(
base_price=0.05,
demand_level=0.8,
supply_level=0.6,
confidence_score=0.85
)
asyncio.run(pricing_engine._store_price_point(
"test_resource", 0.055, factors, PricingStrategy.MARKET_BALANCE
))
assert "test_resource" in pricing_engine.pricing_history
assert len(pricing_engine.pricing_history["test_resource"]) == 1
point = pricing_engine.pricing_history["test_resource"][0]
assert point.price == 0.055
assert point.demand_level == 0.8
assert point.supply_level == 0.6
assert point.confidence == 0.85
assert point.strategy_used == "market_balance"
def test_seasonal_factor_calculation(self, pricing_engine):
"""Test seasonal factor calculation"""
# Test morning hours
morning_factor = pricing_engine._calculate_seasonal_factor(9)
assert morning_factor > 1.0
# Test business peak
peak_factor = pricing_engine._calculate_seasonal_factor(14)
assert peak_factor > morning_factor
# Test late night
night_factor = pricing_engine._calculate_seasonal_factor(3)
assert night_factor < 1.0
def test_demand_supply_forecasting(self, pricing_engine):
"""Test demand and supply level forecasting"""
demand_history = [0.6, 0.7, 0.8, 0.75, 0.9, 0.85]
supply_history = [0.7, 0.6, 0.5, 0.55, 0.4, 0.45]
demand_forecast = pricing_engine._forecast_demand_level(demand_history, 1)
supply_forecast = pricing_engine._forecast_supply_level(supply_history, 1)
assert 0 <= demand_forecast <= 1
assert 0 <= supply_forecast <= 1
assert isinstance(demand_forecast, float)
assert isinstance(supply_forecast, float)
class TestPricingFactors:
"""Test cases for PricingFactors dataclass"""
def test_pricing_factors_creation(self):
"""Test PricingFactors creation with default values"""
factors = PricingFactors(base_price=0.05)
assert factors.base_price == 0.05
assert factors.demand_multiplier == 1.0
assert factors.supply_multiplier == 1.0
assert factors.time_multiplier == 1.0
assert factors.performance_multiplier == 1.0
assert factors.competition_multiplier == 1.0
assert factors.sentiment_multiplier == 1.0
assert factors.regional_multiplier == 1.0
assert factors.confidence_score == 0.8
assert factors.risk_adjustment == 0.0
def test_pricing_factors_with_custom_values(self):
"""Test PricingFactors creation with custom values"""
factors = PricingFactors(
base_price=0.05,
demand_multiplier=1.5,
supply_multiplier=0.8,
confidence_score=0.9
)
assert factors.base_price == 0.05
assert factors.demand_multiplier == 1.5
assert factors.supply_multiplier == 0.8
assert factors.confidence_score == 0.9
class TestPriceConstraints:
"""Test cases for PriceConstraints dataclass"""
def test_price_constraints_creation(self):
"""Test PriceConstraints creation with default values"""
constraints = PriceConstraints()
assert constraints.min_price is None
assert constraints.max_price is None
assert constraints.max_change_percent == 0.5
assert constraints.min_change_interval == 300
assert constraints.strategy_lock_period == 3600
def test_price_constraints_with_custom_values(self):
"""Test PriceConstraints creation with custom values"""
constraints = PriceConstraints(
min_price=0.02,
max_price=0.10,
max_change_percent=0.3
)
assert constraints.min_price == 0.02
assert constraints.max_price == 0.10
assert constraints.max_change_percent == 0.3
class TestMarketConditions:
"""Test cases for MarketConditions dataclass"""
def test_market_conditions_creation(self):
"""Test MarketConditions creation"""
conditions = MarketConditions(
region="us_west",
resource_type=ResourceType.GPU,
demand_level=0.8,
supply_level=0.6,
average_price=0.05,
price_volatility=0.15,
utilization_rate=0.75
)
assert conditions.region == "us_west"
assert conditions.resource_type == ResourceType.GPU
assert conditions.demand_level == 0.8
assert conditions.supply_level == 0.6
assert conditions.average_price == 0.05
assert conditions.price_volatility == 0.15
assert conditions.utilization_rate == 0.75
assert conditions.competitor_prices == []
assert conditions.market_sentiment == 0.0
assert isinstance(conditions.timestamp, datetime)
class TestStrategyLibrary:
"""Test cases for StrategyLibrary"""
def test_get_all_strategies(self):
"""Test getting all available strategies"""
strategies = StrategyLibrary.get_all_strategies()
assert isinstance(strategies, dict)
assert len(strategies) > 0
assert PricingStrategy.AGGRESSIVE_GROWTH in strategies
assert PricingStrategy.PROFIT_MAXIMIZATION in strategies
assert PricingStrategy.MARKET_BALANCE in strategies
# Check strategy configurations
for strategy_type, config in strategies.items():
assert config.strategy_type == strategy_type
assert config.name is not None
assert config.description is not None
assert config.parameters is not None
assert isinstance(config.parameters.base_multiplier, float)
def test_aggressive_growth_strategy(self):
"""Test aggressive growth strategy configuration"""
strategy = StrategyLibrary.get_aggressive_growth_strategy()
assert strategy.strategy_type == PricingStrategy.AGGRESSIVE_GROWTH
assert strategy.parameters.base_multiplier < 1.0 # Lower prices for growth
assert strategy.parameters.growth_target_rate > 0.2 # High growth target
assert strategy.risk_tolerance.value == "aggressive"
def test_profit_maximization_strategy(self):
"""Test profit maximization strategy configuration"""
strategy = StrategyLibrary.get_profit_maximization_strategy()
assert strategy.strategy_type == PricingStrategy.PROFIT_MAXIMIZATION
assert strategy.parameters.base_multiplier > 1.0 # Higher prices for profit
assert strategy.parameters.profit_target_margin > 0.3 # High profit target
assert strategy.parameters.demand_sensitivity > 0.5 # Demand sensitive
def test_market_balance_strategy(self):
"""Test market balance strategy configuration"""
strategy = StrategyLibrary.get_market_balance_strategy()
assert strategy.strategy_type == PricingStrategy.MARKET_BALANCE
assert strategy.parameters.base_multiplier == 1.0 # Balanced pricing
assert strategy.parameters.volatility_threshold < 0.2 # Lower volatility tolerance
assert strategy.risk_tolerance.value == "moderate"
if __name__ == "__main__":
pytest.main([__file__])