chore(security): enhance environment configuration, CI workflows, and wallet daemon with security improvements

- Restructure .env.example with security-focused documentation, service-specific environment file references, and AWS Secrets Manager integration
- Update CLI tests workflow to single Python 3.13 version, add pytest-mock dependency, and consolidate test execution with coverage
- Add comprehensive security validation to package publishing workflow with manual approval gates, secret scanning, and release
This commit is contained in:
oib
2026-03-03 10:33:46 +01:00
parent 00d00cb964
commit f353e00172
220 changed files with 42506 additions and 921 deletions

View File

@@ -1,32 +1,510 @@
"""
Marketplace Analytics System Integration Tests
Marketplace Analytics System Tests
Comprehensive testing for analytics, insights, reporting, and dashboards
"""
import pytest
import asyncio
import json
import statistics
from datetime import datetime, timedelta
from uuid import uuid4
from typing import Dict, Any
from unittest.mock import Mock, patch
from typing import Dict, Any, List
from sqlmodel import Session, select
from sqlalchemy.exc import SQLAlchemyError
from apps.coordinator_api.src.app.services.analytics_service import (
MarketplaceAnalytics, DataCollector, AnalyticsEngine, DashboardManager
)
from apps.coordinator_api.src.app.domain.analytics import (
MarketMetric, MarketInsight, AnalyticsReport, DashboardConfig,
AnalyticsPeriod, MetricType, InsightType, ReportType
)
class TestMarketplaceAnalytics:
"""Test marketplace analytics functionality"""
def test_market_metrics_calculation(self):
"""Test market metrics calculation"""
# Sample market data
market_data = [
{'price': 0.10, 'gpu_type': 'RTX 3080', 'timestamp': '2024-01-01T10:00:00Z'},
{'price': 0.12, 'gpu_type': 'RTX 3080', 'timestamp': '2024-01-01T11:00:00Z'},
{'price': 0.11, 'gpu_type': 'RTX 3080', 'timestamp': '2024-01-01T12:00:00Z'},
{'price': 0.15, 'gpu_type': 'RTX 3090', 'timestamp': '2024-01-01T10:00:00Z'},
{'price': 0.14, 'gpu_type': 'RTX 3090', 'timestamp': '2024-01-01T11:00:00Z'},
]
# Calculate metrics
rtx3080_prices = [d['price'] for d in market_data if d['gpu_type'] == 'RTX 3080']
rtx3090_prices = [d['price'] for d in market_data if d['gpu_type'] == 'RTX 3090']
# Calculate statistics
metrics = {
'RTX 3080': {
'avg_price': statistics.mean(rtx3080_prices),
'min_price': min(rtx3080_prices),
'max_price': max(rtx3080_prices),
'price_volatility': statistics.stdev(rtx3080_prices) if len(rtx3080_prices) > 1 else 0
},
'RTX 3090': {
'avg_price': statistics.mean(rtx3090_prices),
'min_price': min(rtx3090_prices),
'max_price': max(rtx3090_prices),
'price_volatility': statistics.stdev(rtx3090_prices) if len(rtx3090_prices) > 1 else 0
}
}
# Validate metrics
assert metrics['RTX 3080']['avg_price'] == 0.11
assert metrics['RTX 3080']['min_price'] == 0.10
assert metrics['RTX 3080']['max_price'] == 0.12
assert metrics['RTX 3090']['avg_price'] == 0.145
assert metrics['RTX 3090']['min_price'] == 0.14
assert metrics['RTX 3090']['max_price'] == 0.15
def test_demand_analysis(self):
"""Test demand analysis functionality"""
# Sample demand data
demand_data = [
{'date': '2024-01-01', 'requests': 120, 'fulfilled': 100},
{'date': '2024-01-02', 'requests': 150, 'fulfilled': 130},
{'date': '2024-01-03', 'requests': 180, 'fulfilled': 160},
{'date': '2024-01-04', 'requests': 140, 'fulfilled': 125},
]
# Calculate demand metrics
total_requests = sum(d['requests'] for d in demand_data)
total_fulfilled = sum(d['fulfilled'] for d in demand_data)
fulfillment_rate = (total_fulfilled / total_requests) * 100
# Calculate trend
daily_rates = [(d['fulfilled'] / d['requests']) * 100 for d in demand_data]
trend = 'increasing' if daily_rates[-1] > daily_rates[0] else 'decreasing'
# Validate analysis
assert total_requests == 590
assert total_fulfilled == 515
assert fulfillment_rate == 87.29 # Approximately
assert trend == 'increasing'
assert all(0 <= rate <= 100 for rate in daily_rates)
def test_provider_performance(self):
"""Test provider performance analytics"""
# Sample provider data
provider_data = [
{
'provider_id': 'provider_1',
'total_jobs': 50,
'completed_jobs': 45,
'avg_completion_time': 25.5, # minutes
'avg_rating': 4.8,
'gpu_types': ['RTX 3080', 'RTX 3090']
},
{
'provider_id': 'provider_2',
'total_jobs': 30,
'completed_jobs': 28,
'avg_completion_time': 30.2,
'avg_rating': 4.6,
'gpu_types': ['RTX 3080']
},
{
'provider_id': 'provider_3',
'total_jobs': 40,
'completed_jobs': 35,
'avg_completion_time': 22.1,
'avg_rating': 4.9,
'gpu_types': ['RTX 3090', 'RTX 4090']
}
]
# Calculate performance metrics
for provider in provider_data:
success_rate = (provider['completed_jobs'] / provider['total_jobs']) * 100
provider['success_rate'] = success_rate
# Sort by performance
top_providers = sorted(provider_data, key=lambda x: x['success_rate'], reverse=True)
# Validate calculations
assert top_providers[0]['provider_id'] == 'provider_1'
assert top_providers[0]['success_rate'] == 90.0
assert top_providers[1]['success_rate'] == 93.33 # provider_2
assert top_providers[2]['success_rate'] == 87.5 # provider_3
# Validate data integrity
for provider in provider_data:
assert 0 <= provider['success_rate'] <= 100
assert provider['avg_rating'] >= 0 and provider['avg_rating'] <= 5
assert provider['avg_completion_time'] > 0
class TestAnalyticsEngine:
"""Test analytics engine functionality"""
def test_data_aggregation(self):
"""Test data aggregation capabilities"""
# Sample time series data
time_series_data = [
{'timestamp': '2024-01-01T00:00:00Z', 'value': 100},
{'timestamp': '2024-01-01T01:00:00Z', 'value': 110},
{'timestamp': '2024-01-01T02:00:00Z', 'value': 105},
{'timestamp': '2024-01-01T03:00:00Z', 'value': 120},
{'timestamp': '2024-01-01T04:00:00Z', 'value': 115},
]
# Aggregate by hour (already hourly data)
hourly_avg = statistics.mean([d['value'] for d in time_series_data])
hourly_max = max([d['value'] for d in time_series_data])
hourly_min = min([d['value'] for d in time_series_data])
# Create aggregated summary
aggregated_data = {
'period': 'hourly',
'data_points': len(time_series_data),
'average': hourly_avg,
'maximum': hourly_max,
'minimum': hourly_min,
'trend': 'up' if time_series_data[-1]['value'] > time_series_data[0]['value'] else 'down'
}
# Validate aggregation
assert aggregated_data['period'] == 'hourly'
assert aggregated_data['data_points'] == 5
assert aggregated_data['average'] == 110.0
assert aggregated_data['maximum'] == 120
assert aggregated_data['minimum'] == 100
assert aggregated_data['trend'] == 'up'
def test_anomaly_detection(self):
"""Test anomaly detection in metrics"""
# Sample metrics with anomalies
metrics_data = [
{'timestamp': '2024-01-01T00:00:00Z', 'response_time': 100},
{'timestamp': '2024-01-01T01:00:00Z', 'response_time': 105},
{'timestamp': '2024-01-01T02:00:00Z', 'response_time': 98},
{'timestamp': '2024-01-01T03:00:00Z', 'response_time': 500}, # Anomaly
{'timestamp': '2024-01-01T04:00:00Z', 'response_time': 102},
{'timestamp': '2024-01-01T05:00:00Z', 'response_time': 95},
]
# Calculate statistics for anomaly detection
response_times = [d['response_time'] for d in metrics_data]
mean_time = statistics.mean(response_times)
stdev_time = statistics.stdev(response_times) if len(response_times) > 1 else 0
# Detect anomalies (values > 2 standard deviations from mean)
threshold = mean_time + (2 * stdev_time)
anomalies = [
d for d in metrics_data
if d['response_time'] > threshold
]
# Validate anomaly detection
assert len(anomalies) == 1
assert anomalies[0]['response_time'] == 500
assert anomalies[0]['timestamp'] == '2024-01-01T03:00:00Z'
def test_forecasting_model(self):
"""Test simple forecasting model"""
# Historical data for forecasting
historical_data = [
{'period': '2024-01-01', 'demand': 100},
{'period': '2024-01-02', 'demand': 110},
{'period': '2024-01-03', 'demand': 105},
{'period': '2024-01-04', 'demand': 120},
{'period': '2024-01-05', 'demand': 115},
]
# Simple moving average forecast
demand_values = [d['demand'] for d in historical_data]
forecast_period = 3
forecast = statistics.mean(demand_values[-forecast_period:])
# Calculate forecast accuracy (using last known value as "actual")
last_actual = demand_values[-1]
forecast_error = abs(forecast - last_actual)
forecast_accuracy = max(0, 100 - (forecast_error / last_actual * 100))
# Validate forecast
assert forecast > 0
assert forecast_accuracy >= 0
assert forecast_accuracy <= 100
class TestDashboardManager:
"""Test dashboard management functionality"""
def test_dashboard_configuration(self):
"""Test dashboard configuration management"""
# Sample dashboard configuration
dashboard_config = {
'dashboard_id': 'marketplace_overview',
'title': 'Marketplace Overview',
'layout': 'grid',
'widgets': [
{
'id': 'market_metrics',
'type': 'metric_card',
'title': 'Market Metrics',
'position': {'x': 0, 'y': 0, 'w': 4, 'h': 2},
'data_source': 'market_metrics_api'
},
{
'id': 'price_chart',
'type': 'line_chart',
'title': 'Price Trends',
'position': {'x': 4, 'y': 0, 'w': 8, 'h': 4},
'data_source': 'price_history_api'
},
{
'id': 'provider_ranking',
'type': 'table',
'title': 'Top Providers',
'position': {'x': 0, 'y': 2, 'w': 6, 'h': 3},
'data_source': 'provider_ranking_api'
}
],
'refresh_interval': 300, # 5 minutes
'permissions': ['read', 'write']
}
# Validate configuration
assert dashboard_config['dashboard_id'] == 'marketplace_overview'
assert len(dashboard_config['widgets']) == 3
assert dashboard_config['refresh_interval'] == 300
assert 'read' in dashboard_config['permissions']
# Validate widgets
for widget in dashboard_config['widgets']:
assert 'id' in widget
assert 'type' in widget
assert 'title' in widget
assert 'position' in widget
assert 'data_source' in widget
def test_widget_data_processing(self):
"""Test widget data processing"""
# Sample data for different widget types
widget_data = {
'metric_card': {
'value': 1250,
'change': 5.2,
'change_type': 'increase',
'unit': 'AITBC',
'timestamp': datetime.utcnow().isoformat()
},
'line_chart': {
'labels': ['Jan', 'Feb', 'Mar', 'Apr', 'May'],
'datasets': [
{
'label': 'RTX 3080',
'data': [0.10, 0.11, 0.12, 0.11, 0.13],
'borderColor': '#007bff'
},
{
'label': 'RTX 3090',
'data': [0.15, 0.14, 0.16, 0.15, 0.17],
'borderColor': '#28a745'
}
]
},
'table': {
'columns': ['provider', 'jobs_completed', 'avg_rating', 'success_rate'],
'rows': [
['provider_1', 45, 4.8, '90%'],
['provider_2', 28, 4.6, '93%'],
['provider_3', 35, 4.9, '88%']
]
}
}
# Validate metric card data
metric_data = widget_data['metric_card']
assert isinstance(metric_data['value'], (int, float))
assert isinstance(metric_data['change'], (int, float))
assert metric_data['change_type'] in ['increase', 'decrease']
assert 'timestamp' in metric_data
# Validate line chart data
chart_data = widget_data['line_chart']
assert 'labels' in chart_data
assert 'datasets' in chart_data
assert len(chart_data['datasets']) == 2
assert len(chart_data['labels']) == len(chart_data['datasets'][0]['data'])
# Validate table data
table_data = widget_data['table']
assert 'columns' in table_data
assert 'rows' in table_data
assert len(table_data['columns']) == 4
assert len(table_data['rows']) == 3
def test_dashboard_permissions(self):
"""Test dashboard permission management"""
# Sample user permissions
user_permissions = {
'admin': ['read', 'write', 'delete', 'share'],
'analyst': ['read', 'write', 'share'],
'viewer': ['read'],
'guest': []
}
# Sample dashboard access rules
dashboard_access = {
'marketplace_overview': ['admin', 'analyst', 'viewer'],
'system_metrics': ['admin'],
'public_stats': ['admin', 'analyst', 'viewer', 'guest']
}
# Test permission checking
def check_permission(user_role, dashboard_id, action):
if action not in user_permissions[user_role]:
return False
if user_role not in dashboard_access[dashboard_id]:
return False
return True
# Validate permissions
assert check_permission('admin', 'marketplace_overview', 'read') is True
assert check_permission('admin', 'system_metrics', 'write') is True
assert check_permission('viewer', 'system_metrics', 'read') is False
assert check_permission('guest', 'public_stats', 'read') is True
assert check_permission('analyst', 'marketplace_overview', 'delete') is False
class TestReportingSystem:
"""Test reporting system functionality"""
def test_report_generation(self):
"""Test report generation capabilities"""
# Sample report data
report_data = {
'report_id': 'monthly_marketplace_report',
'title': 'Monthly Marketplace Performance',
'period': {
'start': '2024-01-01',
'end': '2024-01-31'
},
'sections': [
{
'title': 'Executive Summary',
'content': {
'total_transactions': 1250,
'total_volume': 156.78,
'active_providers': 45,
'satisfaction_rate': 4.7
}
},
{
'title': 'Price Analysis',
'content': {
'avg_gpu_price': 0.12,
'price_trend': 'stable',
'volatility_index': 0.05
}
}
],
'generated_at': datetime.utcnow().isoformat(),
'format': 'json'
}
# Validate report structure
assert 'report_id' in report_data
assert 'title' in report_data
assert 'period' in report_data
assert 'sections' in report_data
assert 'generated_at' in report_data
# Validate sections
for section in report_data['sections']:
assert 'title' in section
assert 'content' in section
# Validate data integrity
summary = report_data['sections'][0]['content']
assert summary['total_transactions'] > 0
assert summary['total_volume'] > 0
assert summary['active_providers'] > 0
assert 0 <= summary['satisfaction_rate'] <= 5
def test_report_export(self):
"""Test report export functionality"""
# Sample report for export
report = {
'title': 'Marketplace Analysis',
'data': {
'metrics': {'transactions': 100, 'volume': 50.5},
'trends': {'price': 'up', 'demand': 'stable'}
},
'metadata': {
'generated_by': 'analytics_system',
'generated_at': datetime.utcnow().isoformat()
}
}
# Test JSON export
json_export = json.dumps(report, indent=2)
assert isinstance(json_export, str)
assert 'Marketplace Analysis' in json_export
# Test CSV export (simplified)
csv_data = "Metric,Value\n"
csv_data += f"Transactions,{report['data']['metrics']['transactions']}\n"
csv_data += f"Volume,{report['data']['metrics']['volume']}\n"
assert 'Transactions,100' in csv_data
assert 'Volume,50.5' in csv_data
assert csv_data.count('\n') == 3 # Header + 2 data rows
def test_report_scheduling(self):
"""Test report scheduling functionality"""
# Sample schedule configuration
schedule_config = {
'report_id': 'daily_marketplace_summary',
'frequency': 'daily',
'time': '08:00',
'recipients': ['admin@aitbc.com', 'ops@aitbc.com'],
'format': 'pdf',
'enabled': True,
'last_run': '2024-01-01T08:00:00Z',
'next_run': '2024-01-02T08:00:00Z'
}
# Validate schedule configuration
assert schedule_config['frequency'] in ['daily', 'weekly', 'monthly']
assert schedule_config['time'] == '08:00'
assert len(schedule_config['recipients']) > 0
assert schedule_config['enabled'] is True
assert 'next_run' in schedule_config
# Test next run calculation
from datetime import datetime, timedelta
last_run = datetime.fromisoformat(schedule_config['last_run'].replace('Z', '+00:00'))
next_run = datetime.fromisoformat(schedule_config['next_run'].replace('Z', '+00:00'))
expected_next_run = last_run + timedelta(days=1)
assert next_run.date() == expected_next_run.date()
assert next_run.hour == 8
assert next_run.minute == 0
class TestDataCollector:
"""Test data collection functionality"""
@pytest.fixture
def data_collector(self):
return DataCollector()
def test_data_collection_metrics(self):
"""Test data collection metrics gathering"""
# Sample data collection metrics
collection_metrics = {
'total_records_collected': 10000,
'collection_duration_seconds': 300,
'error_rate': 0.02, # 2%
'data_sources': ['marketplace_api', 'blockchain_api', 'user_activity'],
'last_collection': datetime.utcnow().isoformat()
}
# Validate metrics
assert collection_metrics['total_records_collected'] > 0
assert collection_metrics['collection_duration_seconds'] > 0
assert 0 <= collection_metrics['error_rate'] <= 1
assert len(collection_metrics['data_sources']) > 0
assert 'last_collection' in collection_metrics
# Calculate collection rate
collection_rate = collection_metrics['total_records_collected'] / collection_metrics['collection_duration_seconds']
assert collection_rate > 10 # Should collect at least 10 records per second
def test_collect_transaction_volume(self, data_collector):
"""Test transaction volume collection"""