chore: update file permissions to executable across repository
- Change file mode from 644 to 755 for all project files - Add chain_id parameter to get_balance RPC endpoint with default "ait-devnet" - Rename Miner.extra_meta_data to extra_metadata for consistency
This commit is contained in:
569
tests/performance/test_performance.py
Normal file
569
tests/performance/test_performance.py
Normal file
@@ -0,0 +1,569 @@
|
||||
"""
|
||||
Performance Tests for AITBC Chain Management and Analytics
|
||||
Tests system performance under various load conditions
|
||||
"""
|
||||
|
||||
import pytest
|
||||
import asyncio
|
||||
import json
|
||||
import time
|
||||
import threading
|
||||
import statistics
|
||||
from datetime import datetime, timedelta
|
||||
from pathlib import Path
|
||||
import subprocess
|
||||
import requests
|
||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||
from typing import Dict, Any, List, Tuple
|
||||
import psutil
|
||||
import memory_profiler
|
||||
|
||||
class TestPerformance:
|
||||
"""Performance testing suite for AITBC components"""
|
||||
|
||||
@pytest.fixture(scope="class")
|
||||
def performance_config(self):
|
||||
"""Performance test configuration"""
|
||||
return {
|
||||
"base_url": "http://localhost",
|
||||
"ports": {
|
||||
"coordinator": 8001,
|
||||
"blockchain": 8007,
|
||||
"consensus": 8002,
|
||||
"network": 8008,
|
||||
"explorer": 8016,
|
||||
"wallet_daemon": 8003,
|
||||
"exchange": 8010,
|
||||
"oracle": 8011,
|
||||
"trading": 8012,
|
||||
"compliance": 8015,
|
||||
"plugin_registry": 8013,
|
||||
"plugin_marketplace": 8014,
|
||||
"global_infrastructure": 8017,
|
||||
"ai_agents": 8018,
|
||||
"load_balancer": 8019
|
||||
},
|
||||
"load_test_config": {
|
||||
"concurrent_users": 10,
|
||||
"requests_per_user": 100,
|
||||
"duration_seconds": 60,
|
||||
"ramp_up_time": 10
|
||||
},
|
||||
"performance_thresholds": {
|
||||
"response_time_p95": 2000, # 95th percentile < 2 seconds
|
||||
"response_time_p99": 5000, # 99th percentile < 5 seconds
|
||||
"error_rate": 0.01, # < 1% error rate
|
||||
"throughput_min": 50, # Minimum 50 requests/second
|
||||
"cpu_usage_max": 0.80, # < 80% CPU usage
|
||||
"memory_usage_max": 0.85 # < 85% memory usage
|
||||
}
|
||||
}
|
||||
|
||||
@pytest.fixture(scope="class")
|
||||
def baseline_metrics(self, performance_config):
|
||||
"""Capture baseline system metrics"""
|
||||
return {
|
||||
"cpu_percent": psutil.cpu_percent(interval=1),
|
||||
"memory_percent": psutil.virtual_memory().percent,
|
||||
"timestamp": datetime.utcnow().isoformat()
|
||||
}
|
||||
|
||||
def test_cli_performance(self, performance_config):
|
||||
"""Test CLI command performance"""
|
||||
cli_commands = [
|
||||
["--help"],
|
||||
["wallet", "--help"],
|
||||
["blockchain", "--help"],
|
||||
["multisig", "--help"],
|
||||
["genesis-protection", "--help"],
|
||||
["transfer-control", "--help"],
|
||||
["compliance", "--help"],
|
||||
["exchange", "--help"],
|
||||
["oracle", "--help"],
|
||||
["market-maker", "--help"]
|
||||
]
|
||||
|
||||
response_times = []
|
||||
|
||||
for command in cli_commands:
|
||||
start_time = time.time()
|
||||
|
||||
result = subprocess.run(
|
||||
["python", "-m", "aitbc_cli.main"] + command,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
cwd="/home/oib/windsurf/aitbc/cli"
|
||||
)
|
||||
|
||||
end_time = time.time()
|
||||
response_time = (end_time - start_time) * 1000 # Convert to milliseconds
|
||||
|
||||
assert result.returncode == 0, f"CLI command failed: {' '.join(command)}"
|
||||
assert response_time < 5000, f"CLI command too slow: {response_time:.2f}ms"
|
||||
|
||||
response_times.append(response_time)
|
||||
|
||||
# Calculate performance statistics
|
||||
avg_response_time = statistics.mean(response_times)
|
||||
p95_response_time = statistics.quantiles(response_times, n=20)[18] # 95th percentile
|
||||
max_response_time = max(response_times)
|
||||
|
||||
# Performance assertions
|
||||
assert avg_response_time < 1000, f"Average CLI response time too high: {avg_response_time:.2f}ms"
|
||||
assert p95_response_time < 3000, f"95th percentile CLI response time too high: {p95_response_time:.2f}ms"
|
||||
assert max_response_time < 10000, f"Maximum CLI response time too high: {max_response_time:.2f}ms"
|
||||
|
||||
print(f"CLI Performance Results:")
|
||||
print(f" Average: {avg_response_time:.2f}ms")
|
||||
print(f" 95th percentile: {p95_response_time:.2f}ms")
|
||||
print(f" Maximum: {max_response_time:.2f}ms")
|
||||
|
||||
def test_concurrent_cli_operations(self, performance_config):
|
||||
"""Test concurrent CLI operations"""
|
||||
def run_cli_command(command):
|
||||
start_time = time.time()
|
||||
result = subprocess.run(
|
||||
["python", "-m", "aitbc_cli.main"] + command,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
cwd="/home/oib/windsurf/aitbc/cli"
|
||||
)
|
||||
end_time = time.time()
|
||||
return {
|
||||
"command": command,
|
||||
"success": result.returncode == 0,
|
||||
"response_time": (end_time - start_time) * 1000,
|
||||
"output_length": len(result.stdout)
|
||||
}
|
||||
|
||||
# Test concurrent operations
|
||||
commands_to_test = [
|
||||
["wallet", "--help"],
|
||||
["blockchain", "--help"],
|
||||
["multisig", "--help"],
|
||||
["compliance", "--help"],
|
||||
["exchange", "--help"]
|
||||
]
|
||||
|
||||
with ThreadPoolExecutor(max_workers=10) as executor:
|
||||
# Submit multiple concurrent requests
|
||||
futures = []
|
||||
for _ in range(20): # 20 concurrent operations
|
||||
for command in commands_to_test:
|
||||
future = executor.submit(run_cli_command, command)
|
||||
futures.append(future)
|
||||
|
||||
# Collect results
|
||||
results = []
|
||||
for future in as_completed(futures):
|
||||
result = future.result()
|
||||
results.append(result)
|
||||
|
||||
# Analyze results
|
||||
successful_operations = [r for r in results if r["success"]]
|
||||
response_times = [r["response_time"] for r in successful_operations]
|
||||
|
||||
success_rate = len(successful_operations) / len(results)
|
||||
avg_response_time = statistics.mean(response_times) if response_times else 0
|
||||
p95_response_time = statistics.quantiles(response_times, n=20)[18] if len(response_times) > 20 else max(response_times) if response_times else 0
|
||||
|
||||
# Performance assertions
|
||||
assert success_rate >= 0.95, f"Low success rate: {success_rate:.2%}"
|
||||
assert avg_response_time < 2000, f"Average response time too high: {avg_response_time:.2f}ms"
|
||||
assert p95_response_time < 5000, f"95th percentile response time too high: {p95_response_time:.2f}ms"
|
||||
|
||||
print(f"Concurrent CLI Operations Results:")
|
||||
print(f" Success rate: {success_rate:.2%}")
|
||||
print(f" Average response time: {avg_response_time:.2f}ms")
|
||||
print(f" 95th percentile: {p95_response_time:.2f}ms")
|
||||
print(f" Total operations: {len(results)}")
|
||||
|
||||
def test_memory_usage_cli(self, performance_config):
|
||||
"""Test memory usage during CLI operations"""
|
||||
@memory_profiler.profile
|
||||
def run_memory_intensive_cli_operations():
|
||||
commands = [
|
||||
["wallet", "--help"],
|
||||
["blockchain", "--help"],
|
||||
["multisig", "--help"],
|
||||
["genesis-protection", "--help"],
|
||||
["transfer-control", "--help"],
|
||||
["compliance", "--help"],
|
||||
["exchange", "--help"],
|
||||
["oracle", "--help"],
|
||||
["market-maker", "--help"]
|
||||
]
|
||||
|
||||
for _ in range(10): # Run commands multiple times
|
||||
for command in commands:
|
||||
subprocess.run(
|
||||
["python", "-m", "aitbc_cli.main"] + command,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
cwd="/home/oib/windsurf/aitbc/cli"
|
||||
)
|
||||
|
||||
# Capture memory before test
|
||||
memory_before = psutil.virtual_memory().percent
|
||||
|
||||
# Run memory-intensive operations
|
||||
run_memory_intensive_cli_operations()
|
||||
|
||||
# Capture memory after test
|
||||
memory_after = psutil.virtual_memory().percent
|
||||
memory_increase = memory_after - memory_before
|
||||
|
||||
# Memory assertion
|
||||
assert memory_increase < 20, f"Memory usage increased too much: {memory_increase:.1f}%"
|
||||
|
||||
print(f"Memory Usage Results:")
|
||||
print(f" Memory before: {memory_before:.1f}%")
|
||||
print(f" Memory after: {memory_after:.1f}%")
|
||||
print(f" Memory increase: {memory_increase:.1f}%")
|
||||
|
||||
def test_load_balancing_performance(self, performance_config):
|
||||
"""Test load balancer performance under load"""
|
||||
def make_load_balancer_request():
|
||||
try:
|
||||
start_time = time.time()
|
||||
response = requests.get(
|
||||
f"{performance_config['base_url']}:{performance_config['ports']['load_balancer']}/health",
|
||||
timeout=5
|
||||
)
|
||||
end_time = time.time()
|
||||
|
||||
return {
|
||||
"success": response.status_code == 200,
|
||||
"response_time": (end_time - start_time) * 1000,
|
||||
"status_code": response.status_code
|
||||
}
|
||||
except Exception as e:
|
||||
return {
|
||||
"success": False,
|
||||
"response_time": 5000, # Timeout
|
||||
"error": str(e)
|
||||
}
|
||||
|
||||
# Test with concurrent requests
|
||||
with ThreadPoolExecutor(max_workers=20) as executor:
|
||||
futures = [executor.submit(make_load_balancer_request) for _ in range(100)]
|
||||
results = [future.result() for future in as_completed(futures)]
|
||||
|
||||
# Analyze results
|
||||
successful_requests = [r for r in results if r["success"]]
|
||||
response_times = [r["response_time"] for r in successful_requests]
|
||||
|
||||
if response_times:
|
||||
success_rate = len(successful_requests) / len(results)
|
||||
avg_response_time = statistics.mean(response_times)
|
||||
p95_response_time = statistics.quantiles(response_times, n=20)[18] if len(response_times) > 20 else max(response_times)
|
||||
throughput = len(successful_requests) / 10 # requests per second
|
||||
|
||||
# Performance assertions
|
||||
assert success_rate >= 0.90, f"Low success rate: {success_rate:.2%}"
|
||||
assert avg_response_time < 1000, f"Average response time too high: {avg_response_time:.2f}ms"
|
||||
assert throughput >= 10, f"Throughput too low: {throughput:.2f} req/s"
|
||||
|
||||
print(f"Load Balancer Performance Results:")
|
||||
print(f" Success rate: {success_rate:.2%}")
|
||||
print(f" Average response time: {avg_response_time:.2f}ms")
|
||||
print(f" 95th percentile: {p95_response_time:.2f}ms")
|
||||
print(f" Throughput: {throughput:.2f} req/s")
|
||||
|
||||
def test_global_infrastructure_performance(self, performance_config):
|
||||
"""Test global infrastructure performance"""
|
||||
def test_service_performance(service_name, port):
|
||||
try:
|
||||
start_time = time.time()
|
||||
response = requests.get(f"{performance_config['base_url']}:{port}/health", timeout=5)
|
||||
end_time = time.time()
|
||||
|
||||
return {
|
||||
"service": service_name,
|
||||
"success": response.status_code == 200,
|
||||
"response_time": (end_time - start_time) * 1000,
|
||||
"status_code": response.status_code
|
||||
}
|
||||
except Exception as e:
|
||||
return {
|
||||
"service": service_name,
|
||||
"success": False,
|
||||
"response_time": 5000,
|
||||
"error": str(e)
|
||||
}
|
||||
|
||||
# Test all global services
|
||||
global_services = {
|
||||
"global_infrastructure": performance_config["ports"]["global_infrastructure"],
|
||||
"ai_agents": performance_config["ports"]["ai_agents"],
|
||||
"load_balancer": performance_config["ports"]["load_balancer"]
|
||||
}
|
||||
|
||||
with ThreadPoolExecutor(max_workers=5) as executor:
|
||||
futures = [
|
||||
executor.submit(test_service_performance, service_name, port)
|
||||
for service_name, port in global_services.items()
|
||||
]
|
||||
results = [future.result() for future in as_completed(futures)]
|
||||
|
||||
# Analyze results
|
||||
successful_services = [r for r in results if r["success"]]
|
||||
response_times = [r["response_time"] for r in successful_services]
|
||||
|
||||
if response_times:
|
||||
avg_response_time = statistics.mean(response_times)
|
||||
max_response_time = max(response_times)
|
||||
|
||||
# Performance assertions
|
||||
assert len(successful_services) >= 2, f"Too few successful services: {len(successful_services)}"
|
||||
assert avg_response_time < 2000, f"Average response time too high: {avg_response_time:.2f}ms"
|
||||
assert max_response_time < 5000, f"Maximum response time too high: {max_response_time:.2f}ms"
|
||||
|
||||
print(f"Global Infrastructure Performance Results:")
|
||||
print(f" Successful services: {len(successful_services)}/{len(results)}")
|
||||
print(f" Average response time: {avg_response_time:.2f}ms")
|
||||
print(f" Maximum response time: {max_response_time:.2f}ms")
|
||||
|
||||
def test_ai_agent_communication_performance(self, performance_config):
|
||||
"""Test AI agent communication performance"""
|
||||
def test_agent_communication():
|
||||
try:
|
||||
start_time = time.time()
|
||||
response = requests.get(
|
||||
f"{performance_config['base_url']}:{performance_config['ports']['ai_agents']}/api/v1/network/dashboard",
|
||||
timeout=5
|
||||
)
|
||||
end_time = time.time()
|
||||
|
||||
return {
|
||||
"success": response.status_code == 200,
|
||||
"response_time": (end_time - start_time) * 1000,
|
||||
"data_size": len(response.content)
|
||||
}
|
||||
except Exception as e:
|
||||
return {
|
||||
"success": False,
|
||||
"response_time": 5000,
|
||||
"error": str(e)
|
||||
}
|
||||
|
||||
# Test concurrent agent communications
|
||||
with ThreadPoolExecutor(max_workers=10) as executor:
|
||||
futures = [executor.submit(test_agent_communication) for _ in range(50)]
|
||||
results = [future.result() for future in as_completed(futures)]
|
||||
|
||||
# Analyze results
|
||||
successful_requests = [r for r in results if r["success"]]
|
||||
response_times = [r["response_time"] for r in successful_requests]
|
||||
|
||||
if response_times:
|
||||
success_rate = len(successful_requests) / len(results)
|
||||
avg_response_time = statistics.mean(response_times)
|
||||
p95_response_time = statistics.quantiles(response_times, n=20)[18] if len(response_times) > 20 else max(response_times)
|
||||
|
||||
# Performance assertions
|
||||
assert success_rate >= 0.80, f"Low success rate: {success_rate:.2%}"
|
||||
assert avg_response_time < 3000, f"Average response time too high: {avg_response_time:.2f}ms"
|
||||
assert p95_response_time < 8000, f"95th percentile response time too high: {p95_response_time:.2f}ms"
|
||||
|
||||
print(f"AI Agent Communication Performance Results:")
|
||||
print(f" Success rate: {success_rate:.2%}")
|
||||
print(f" Average response time: {avg_response_time:.2f}ms")
|
||||
print(f" 95th percentile: {p95_response_time:.2f}ms")
|
||||
print(f" Total requests: {len(results)}")
|
||||
|
||||
def test_plugin_ecosystem_performance(self, performance_config):
|
||||
"""Test plugin ecosystem performance"""
|
||||
plugin_services = {
|
||||
"plugin_registry": performance_config["ports"]["plugin_registry"],
|
||||
"plugin_marketplace": performance_config["ports"]["plugin_marketplace"],
|
||||
"plugin_analytics": performance_config["ports"]["plugin_analytics"]
|
||||
}
|
||||
|
||||
def test_plugin_service(service_name, port):
|
||||
try:
|
||||
start_time = time.time()
|
||||
response = requests.get(f"{performance_config['base_url']}:{port}/health", timeout=5)
|
||||
end_time = time.time()
|
||||
|
||||
return {
|
||||
"service": service_name,
|
||||
"success": response.status_code == 200,
|
||||
"response_time": (end_time - start_time) * 1000
|
||||
}
|
||||
except Exception as e:
|
||||
return {
|
||||
"service": service_name,
|
||||
"success": False,
|
||||
"response_time": 5000,
|
||||
"error": str(e)
|
||||
}
|
||||
|
||||
with ThreadPoolExecutor(max_workers=3) as executor:
|
||||
futures = [
|
||||
executor.submit(test_plugin_service, service_name, port)
|
||||
for service_name, port in plugin_services.items()
|
||||
]
|
||||
results = [future.result() for future in as_completed(futures)]
|
||||
|
||||
# Analyze results
|
||||
successful_services = [r for r in results if r["success"]]
|
||||
response_times = [r["response_time"] for r in successful_services]
|
||||
|
||||
if response_times:
|
||||
avg_response_time = statistics.mean(response_times)
|
||||
|
||||
# Performance assertions
|
||||
assert len(successful_services) >= 1, f"No plugin services responding"
|
||||
assert avg_response_time < 2000, f"Average response time too high: {avg_response_time:.2f}ms"
|
||||
|
||||
print(f"Plugin Ecosystem Performance Results:")
|
||||
print(f" Successful services: {len(successful_services)}/{len(results)}")
|
||||
print(f" Average response time: {avg_response_time:.2f}ms")
|
||||
|
||||
def test_system_resource_usage(self, performance_config, baseline_metrics):
|
||||
"""Test system resource usage during operations"""
|
||||
# Monitor system resources during intensive operations
|
||||
resource_samples = []
|
||||
|
||||
def monitor_resources():
|
||||
for _ in range(30): # Monitor for 30 seconds
|
||||
cpu_percent = psutil.cpu_percent(interval=1)
|
||||
memory_percent = psutil.virtual_memory().percent
|
||||
|
||||
resource_samples.append({
|
||||
"timestamp": datetime.utcnow().isoformat(),
|
||||
"cpu_percent": cpu_percent,
|
||||
"memory_percent": memory_percent
|
||||
})
|
||||
|
||||
def run_intensive_operations():
|
||||
# Run intensive CLI operations
|
||||
commands = [
|
||||
["wallet", "--help"],
|
||||
["blockchain", "--help"],
|
||||
["multisig", "--help"],
|
||||
["compliance", "--help"]
|
||||
]
|
||||
|
||||
for _ in range(20):
|
||||
for command in commands:
|
||||
subprocess.run(
|
||||
["python", "-m", "aitbc_cli.main"] + command,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
cwd="/home/oib/windsurf/aitbc/cli"
|
||||
)
|
||||
|
||||
# Run monitoring and operations concurrently
|
||||
monitor_thread = threading.Thread(target=monitor_resources)
|
||||
operation_thread = threading.Thread(target=run_intensive_operations)
|
||||
|
||||
monitor_thread.start()
|
||||
operation_thread.start()
|
||||
|
||||
monitor_thread.join()
|
||||
operation_thread.join()
|
||||
|
||||
# Analyze resource usage
|
||||
cpu_values = [sample["cpu_percent"] for sample in resource_samples]
|
||||
memory_values = [sample["memory_percent"] for sample in resource_samples]
|
||||
|
||||
avg_cpu = statistics.mean(cpu_values)
|
||||
max_cpu = max(cpu_values)
|
||||
avg_memory = statistics.mean(memory_values)
|
||||
max_memory = max(memory_values)
|
||||
|
||||
# Resource assertions
|
||||
assert avg_cpu < 70, f"Average CPU usage too high: {avg_cpu:.1f}%"
|
||||
assert max_cpu < 90, f"Maximum CPU usage too high: {max_cpu:.1f}%"
|
||||
assert avg_memory < 80, f"Average memory usage too high: {avg_memory:.1f}%"
|
||||
assert max_memory < 95, f"Maximum memory usage too high: {max_memory:.1f}%"
|
||||
|
||||
print(f"System Resource Usage Results:")
|
||||
print(f" Average CPU: {avg_cpu:.1f}% (max: {max_cpu:.1f}%)")
|
||||
print(f" Average Memory: {avg_memory:.1f}% (max: {max_memory:.1f}%)")
|
||||
print(f" Baseline CPU: {baseline_metrics['cpu_percent']:.1f}%")
|
||||
print(f" Baseline Memory: {baseline_metrics['memory_percent']:.1f}%")
|
||||
|
||||
def test_stress_test_cli(self, performance_config):
|
||||
"""Stress test CLI with high load"""
|
||||
def stress_cli_worker(worker_id):
|
||||
results = []
|
||||
commands = [
|
||||
["wallet", "--help"],
|
||||
["blockchain", "--help"],
|
||||
["multisig", "--help"],
|
||||
["compliance", "--help"]
|
||||
]
|
||||
|
||||
for i in range(50): # 50 operations per worker
|
||||
command = commands[i % len(commands)]
|
||||
start_time = time.time()
|
||||
|
||||
result = subprocess.run(
|
||||
["python", "-m", "aitbc_cli.main"] + command,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
cwd="/home/oib/windsurf/aitbc/cli"
|
||||
)
|
||||
|
||||
end_time = time.time()
|
||||
|
||||
results.append({
|
||||
"worker_id": worker_id,
|
||||
"operation_id": i,
|
||||
"success": result.returncode == 0,
|
||||
"response_time": (end_time - start_time) * 1000
|
||||
})
|
||||
|
||||
return results
|
||||
|
||||
# Run stress test with multiple workers
|
||||
with ThreadPoolExecutor(max_workers=5) as executor:
|
||||
futures = [executor.submit(stress_cli_worker, i) for i in range(5)]
|
||||
all_results = []
|
||||
|
||||
for future in as_completed(futures):
|
||||
worker_results = future.result()
|
||||
all_results.extend(worker_results)
|
||||
|
||||
# Analyze stress test results
|
||||
successful_operations = [r for r in all_results if r["success"]]
|
||||
response_times = [r["response_time"] for r in successful_operations]
|
||||
|
||||
success_rate = len(successful_operations) / len(all_results)
|
||||
avg_response_time = statistics.mean(response_times) if response_times else 0
|
||||
p95_response_time = statistics.quantiles(response_times, n=20)[18] if len(response_times) > 20 else max(response_times) if response_times else 0
|
||||
total_throughput = len(successful_operations) / 30 # operations per second
|
||||
|
||||
# Stress test assertions (more lenient thresholds)
|
||||
assert success_rate >= 0.90, f"Low success rate under stress: {success_rate:.2%}"
|
||||
assert avg_response_time < 5000, f"Average response time too high under stress: {avg_response_time:.2f}ms"
|
||||
assert total_throughput >= 5, f"Throughput too low under stress: {total_throughput:.2f} ops/s"
|
||||
|
||||
print(f"CLI Stress Test Results:")
|
||||
print(f" Total operations: {len(all_results)}")
|
||||
print(f" Success rate: {success_rate:.2%}")
|
||||
print(f" Average response time: {avg_response_time:.2f}ms")
|
||||
print(f" 95th percentile: {p95_response_time:.2f}ms")
|
||||
print(f" Throughput: {total_throughput:.2f} ops/s")
|
||||
|
||||
class TestLoadTesting:
|
||||
"""Load testing for high-volume scenarios"""
|
||||
|
||||
def test_load_test_blockchain_operations(self, performance_config):
|
||||
"""Load test blockchain operations"""
|
||||
# This would test blockchain operations under high load
|
||||
# Implementation depends on blockchain service availability
|
||||
pass
|
||||
|
||||
def test_load_test_trading_operations(self, performance_config):
|
||||
"""Load test trading operations"""
|
||||
# This would test trading operations under high load
|
||||
# Implementation depends on trading service availability
|
||||
pass
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Run performance tests
|
||||
pytest.main([__file__, "-v", "--tb=short"])
|
||||
0
tests/performance/test_performance_benchmarks.py
Normal file → Executable file
0
tests/performance/test_performance_benchmarks.py
Normal file → Executable file
505
tests/performance/test_performance_lightweight.py
Normal file
505
tests/performance/test_performance_lightweight.py
Normal file
@@ -0,0 +1,505 @@
|
||||
"""
|
||||
Performance Tests for AITBC Chain Management and Analytics
|
||||
Tests system performance under various load conditions (lightweight version)
|
||||
"""
|
||||
|
||||
import pytest
|
||||
import asyncio
|
||||
import json
|
||||
import time
|
||||
import threading
|
||||
import statistics
|
||||
from datetime import datetime, timedelta
|
||||
from pathlib import Path
|
||||
import subprocess
|
||||
import requests
|
||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||
from typing import Dict, Any, List, Tuple
|
||||
import os
|
||||
import resource
|
||||
|
||||
class TestPerformance:
|
||||
"""Performance testing suite for AITBC components"""
|
||||
|
||||
@pytest.fixture(scope="class")
|
||||
def performance_config(self):
|
||||
"""Performance test configuration"""
|
||||
return {
|
||||
"base_url": "http://localhost",
|
||||
"ports": {
|
||||
"coordinator": 8001,
|
||||
"blockchain": 8007,
|
||||
"consensus": 8002,
|
||||
"network": 8008,
|
||||
"explorer": 8016,
|
||||
"wallet_daemon": 8003,
|
||||
"exchange": 8010,
|
||||
"oracle": 8011,
|
||||
"trading": 8012,
|
||||
"compliance": 8015,
|
||||
"plugin_registry": 8013,
|
||||
"plugin_marketplace": 8014,
|
||||
"global_infrastructure": 8017,
|
||||
"ai_agents": 8018,
|
||||
"load_balancer": 8019
|
||||
},
|
||||
"performance_thresholds": {
|
||||
"response_time_p95": 2000, # 95th percentile < 2 seconds
|
||||
"response_time_p99": 5000, # 99th percentile < 5 seconds
|
||||
"error_rate": 0.01, # < 1% error rate
|
||||
"throughput_min": 50, # Minimum 50 requests/second
|
||||
"cli_response_max": 5000 # CLI max response time < 5 seconds
|
||||
}
|
||||
}
|
||||
|
||||
def get_memory_usage(self):
|
||||
"""Get current memory usage (lightweight version)"""
|
||||
try:
|
||||
# Using resource module for memory usage
|
||||
usage = resource.getrusage(resource.RUSAGE_SELF)
|
||||
return usage.ru_maxrss / 1024 # Convert to MB (on Linux)
|
||||
except:
|
||||
return 0
|
||||
|
||||
def get_cpu_usage(self):
|
||||
"""Get CPU usage (lightweight version)"""
|
||||
try:
|
||||
# Simple CPU usage calculation
|
||||
start_time = time.time()
|
||||
while time.time() - start_time < 0.1: # Sample for 0.1 seconds
|
||||
pass
|
||||
return 0 # Simplified - would need more complex implementation for accurate CPU
|
||||
except:
|
||||
return 0
|
||||
|
||||
def test_cli_performance(self, performance_config):
|
||||
"""Test CLI command performance"""
|
||||
cli_commands = [
|
||||
["--help"],
|
||||
["wallet", "--help"],
|
||||
["blockchain", "--help"],
|
||||
["multisig", "--help"],
|
||||
["genesis-protection", "--help"],
|
||||
["transfer-control", "--help"],
|
||||
["compliance", "--help"],
|
||||
["exchange", "--help"],
|
||||
["oracle", "--help"],
|
||||
["market-maker", "--help"]
|
||||
]
|
||||
|
||||
response_times = []
|
||||
memory_usage_before = self.get_memory_usage()
|
||||
|
||||
for command in cli_commands:
|
||||
start_time = time.time()
|
||||
|
||||
result = subprocess.run(
|
||||
["python", "-m", "aitbc_cli.main"] + command,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
cwd="/home/oib/windsurf/aitbc/cli"
|
||||
)
|
||||
|
||||
end_time = time.time()
|
||||
response_time = (end_time - start_time) * 1000 # Convert to milliseconds
|
||||
|
||||
assert result.returncode == 0, f"CLI command failed: {' '.join(command)}"
|
||||
assert response_time < performance_config["performance_thresholds"]["cli_response_max"], \
|
||||
f"CLI command too slow: {response_time:.2f}ms"
|
||||
|
||||
response_times.append(response_time)
|
||||
|
||||
memory_usage_after = self.get_memory_usage()
|
||||
memory_increase = memory_usage_after - memory_usage_before
|
||||
|
||||
# Calculate performance statistics
|
||||
avg_response_time = statistics.mean(response_times)
|
||||
p95_response_time = statistics.quantiles(response_times, n=20)[18] if len(response_times) > 20 else max(response_times)
|
||||
max_response_time = max(response_times)
|
||||
|
||||
# Performance assertions
|
||||
assert avg_response_time < 1000, f"Average CLI response time too high: {avg_response_time:.2f}ms"
|
||||
assert p95_response_time < 3000, f"95th percentile CLI response time too high: {p95_response_time:.2f}ms"
|
||||
assert max_response_time < 10000, f"Maximum CLI response time too high: {max_response_time:.2f}ms"
|
||||
assert memory_increase < 100, f"Memory usage increased too much: {memory_increase:.1f}MB"
|
||||
|
||||
print(f"CLI Performance Results:")
|
||||
print(f" Average: {avg_response_time:.2f}ms")
|
||||
print(f" 95th percentile: {p95_response_time:.2f}ms")
|
||||
print(f" Maximum: {max_response_time:.2f}ms")
|
||||
print(f" Memory increase: {memory_increase:.1f}MB")
|
||||
|
||||
def test_concurrent_cli_operations(self, performance_config):
|
||||
"""Test concurrent CLI operations"""
|
||||
def run_cli_command(command):
|
||||
start_time = time.time()
|
||||
result = subprocess.run(
|
||||
["python", "-m", "aitbc_cli.main"] + command,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
cwd="/home/oib/windsurf/aitbc/cli"
|
||||
)
|
||||
end_time = time.time()
|
||||
return {
|
||||
"command": command,
|
||||
"success": result.returncode == 0,
|
||||
"response_time": (end_time - start_time) * 1000,
|
||||
"output_length": len(result.stdout)
|
||||
}
|
||||
|
||||
# Test concurrent operations
|
||||
commands_to_test = [
|
||||
["wallet", "--help"],
|
||||
["blockchain", "--help"],
|
||||
["multisig", "--help"],
|
||||
["compliance", "--help"],
|
||||
["exchange", "--help"]
|
||||
]
|
||||
|
||||
with ThreadPoolExecutor(max_workers=10) as executor:
|
||||
# Submit multiple concurrent requests
|
||||
futures = []
|
||||
for _ in range(20): # 20 concurrent operations
|
||||
for command in commands_to_test:
|
||||
future = executor.submit(run_cli_command, command)
|
||||
futures.append(future)
|
||||
|
||||
# Collect results
|
||||
results = []
|
||||
for future in as_completed(futures):
|
||||
result = future.result()
|
||||
results.append(result)
|
||||
|
||||
# Analyze results
|
||||
successful_operations = [r for r in results if r["success"]]
|
||||
response_times = [r["response_time"] for r in successful_operations]
|
||||
|
||||
success_rate = len(successful_operations) / len(results)
|
||||
avg_response_time = statistics.mean(response_times) if response_times else 0
|
||||
p95_response_time = statistics.quantiles(response_times, n=20)[18] if len(response_times) > 20 else max(response_times) if response_times else 0
|
||||
|
||||
# Performance assertions
|
||||
assert success_rate >= 0.95, f"Low success rate: {success_rate:.2%}"
|
||||
assert avg_response_time < 2000, f"Average response time too high: {avg_response_time:.2f}ms"
|
||||
assert p95_response_time < 5000, f"95th percentile response time too high: {p95_response_time:.2f}ms"
|
||||
|
||||
print(f"Concurrent CLI Operations Results:")
|
||||
print(f" Success rate: {success_rate:.2%}")
|
||||
print(f" Average response time: {avg_response_time:.2f}ms")
|
||||
print(f" 95th percentile: {p95_response_time:.2f}ms")
|
||||
print(f" Total operations: {len(results)}")
|
||||
|
||||
def test_cli_memory_efficiency(self, performance_config):
|
||||
"""Test CLI memory efficiency"""
|
||||
memory_samples = []
|
||||
|
||||
def monitor_memory():
|
||||
for _ in range(10):
|
||||
memory_usage = self.get_memory_usage()
|
||||
memory_samples.append(memory_usage)
|
||||
time.sleep(0.5)
|
||||
|
||||
def run_cli_operations():
|
||||
commands = [
|
||||
["wallet", "--help"],
|
||||
["blockchain", "--help"],
|
||||
["multisig", "--help"],
|
||||
["genesis-protection", "--help"],
|
||||
["transfer-control", "--help"],
|
||||
["compliance", "--help"],
|
||||
["exchange", "--help"],
|
||||
["oracle", "--help"],
|
||||
["market-maker", "--help"]
|
||||
]
|
||||
|
||||
for _ in range(5): # Run commands multiple times
|
||||
for command in commands:
|
||||
subprocess.run(
|
||||
["python", "-m", "aitbc_cli.main"] + command,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
cwd="/home/oib/windsurf/aitbc/cli"
|
||||
)
|
||||
|
||||
# Monitor memory during operations
|
||||
monitor_thread = threading.Thread(target=monitor_memory)
|
||||
operation_thread = threading.Thread(target=run_cli_operations)
|
||||
|
||||
monitor_thread.start()
|
||||
operation_thread.start()
|
||||
|
||||
monitor_thread.join()
|
||||
operation_thread.join()
|
||||
|
||||
# Analyze memory usage
|
||||
if memory_samples:
|
||||
avg_memory = statistics.mean(memory_samples)
|
||||
max_memory = max(memory_samples)
|
||||
memory_variance = statistics.variance(memory_samples) if len(memory_samples) > 1 else 0
|
||||
|
||||
# Memory efficiency assertions
|
||||
assert max_memory - min(memory_samples) < 50, f"Memory usage variance too high: {max_memory - min(memory_samples):.1f}MB"
|
||||
assert avg_memory < 200, f"Average memory usage too high: {avg_memory:.1f}MB"
|
||||
|
||||
print(f"CLI Memory Efficiency Results:")
|
||||
print(f" Average memory: {avg_memory:.1f}MB")
|
||||
print(f" Maximum memory: {max_memory:.1f}MB")
|
||||
print(f" Memory variance: {memory_variance:.1f}")
|
||||
|
||||
def test_cli_throughput(self, performance_config):
|
||||
"""Test CLI command throughput"""
|
||||
def measure_throughput():
|
||||
commands = [
|
||||
["wallet", "--help"],
|
||||
["blockchain", "--help"],
|
||||
["multisig", "--help"]
|
||||
]
|
||||
|
||||
start_time = time.time()
|
||||
successful_operations = 0
|
||||
|
||||
for i in range(100): # 100 operations
|
||||
command = commands[i % len(commands)]
|
||||
result = subprocess.run(
|
||||
["python", "-m", "aitbc_cli.main"] + command,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
cwd="/home/oib/windsurf/aitbc/cli"
|
||||
)
|
||||
|
||||
if result.returncode == 0:
|
||||
successful_operations += 1
|
||||
|
||||
end_time = time.time()
|
||||
duration = end_time - start_time
|
||||
throughput = successful_operations / duration # operations per second
|
||||
|
||||
return {
|
||||
"total_operations": 100,
|
||||
"successful_operations": successful_operations,
|
||||
"duration": duration,
|
||||
"throughput": throughput
|
||||
}
|
||||
|
||||
# Run throughput test
|
||||
result = measure_throughput()
|
||||
|
||||
# Throughput assertions
|
||||
assert result["successful_operations"] >= 95, f"Too many failed operations: {result['successful_operations']}/100"
|
||||
assert result["throughput"] >= 10, f"Throughput too low: {result['throughput']:.2f} ops/s"
|
||||
assert result["duration"] < 30, f"Test took too long: {result['duration']:.2f}s"
|
||||
|
||||
print(f"CLI Throughput Results:")
|
||||
print(f" Successful operations: {result['successful_operations']}/100")
|
||||
print(f" Duration: {result['duration']:.2f}s")
|
||||
print(f" Throughput: {result['throughput']:.2f} ops/s")
|
||||
|
||||
def test_cli_response_time_distribution(self, performance_config):
|
||||
"""Test CLI response time distribution"""
|
||||
commands = [
|
||||
["--help"],
|
||||
["wallet", "--help"],
|
||||
["blockchain", "--help"],
|
||||
["multisig", "--help"],
|
||||
["genesis-protection", "--help"],
|
||||
["transfer-control", "--help"],
|
||||
["compliance", "--help"],
|
||||
["exchange", "--help"],
|
||||
["oracle", "--help"],
|
||||
["market-maker", "--help"]
|
||||
]
|
||||
|
||||
response_times = []
|
||||
|
||||
# Run each command multiple times
|
||||
for command in commands:
|
||||
for _ in range(10): # 10 times per command
|
||||
start_time = time.time()
|
||||
|
||||
result = subprocess.run(
|
||||
["python", "-m", "aitbc_cli.main"] + command,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
cwd="/home/oib/windsurf/aitbc/cli"
|
||||
)
|
||||
|
||||
end_time = time.time()
|
||||
response_time = (end_time - start_time) * 1000
|
||||
|
||||
assert result.returncode == 0, f"CLI command failed: {' '.join(command)}"
|
||||
response_times.append(response_time)
|
||||
|
||||
# Calculate distribution statistics
|
||||
min_time = min(response_times)
|
||||
max_time = max(response_times)
|
||||
mean_time = statistics.mean(response_times)
|
||||
median_time = statistics.median(response_times)
|
||||
std_dev = statistics.stdev(response_times)
|
||||
|
||||
# Percentiles
|
||||
sorted_times = sorted(response_times)
|
||||
p50 = sorted_times[len(sorted_times) // 2]
|
||||
p90 = sorted_times[int(len(sorted_times) * 0.9)]
|
||||
p95 = sorted_times[int(len(sorted_times) * 0.95)]
|
||||
p99 = sorted_times[int(len(sorted_times) * 0.99)]
|
||||
|
||||
# Distribution assertions
|
||||
assert mean_time < 1000, f"Mean response time too high: {mean_time:.2f}ms"
|
||||
assert p95 < 3000, f"95th percentile too high: {p95:.2f}ms"
|
||||
assert p99 < 5000, f"99th percentile too high: {p99:.2f}ms"
|
||||
assert std_dev < mean_time, f"Standard deviation too high: {std_dev:.2f}ms"
|
||||
|
||||
print(f"CLI Response Time Distribution:")
|
||||
print(f" Min: {min_time:.2f}ms")
|
||||
print(f" Max: {max_time:.2f}ms")
|
||||
print(f" Mean: {mean_time:.2f}ms")
|
||||
print(f" Median: {median_time:.2f}ms")
|
||||
print(f" Std Dev: {std_dev:.2f}ms")
|
||||
print(f" 50th percentile: {p50:.2f}ms")
|
||||
print(f" 90th percentile: {p90:.2f}ms")
|
||||
print(f" 95th percentile: {p95:.2f}ms")
|
||||
print(f" 99th percentile: {p99:.2f}ms")
|
||||
|
||||
def test_cli_scalability(self, performance_config):
|
||||
"""Test CLI scalability with increasing load"""
|
||||
def test_load_level(num_concurrent, operations_per_thread):
|
||||
def worker():
|
||||
commands = [["--help"], ["wallet", "--help"], ["blockchain", "--help"]]
|
||||
results = []
|
||||
|
||||
for i in range(operations_per_thread):
|
||||
command = commands[i % len(commands)]
|
||||
start_time = time.time()
|
||||
|
||||
result = subprocess.run(
|
||||
["python", "-m", "aitbc_cli.main"] + command,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
cwd="/home/oib/windsurf/aitbc/cli"
|
||||
)
|
||||
|
||||
end_time = time.time()
|
||||
results.append({
|
||||
"success": result.returncode == 0,
|
||||
"response_time": (end_time - start_time) * 1000
|
||||
})
|
||||
|
||||
return results
|
||||
|
||||
with ThreadPoolExecutor(max_workers=num_concurrent) as executor:
|
||||
futures = [executor.submit(worker) for _ in range(num_concurrent)]
|
||||
all_results = []
|
||||
|
||||
for future in as_completed(futures):
|
||||
worker_results = future.result()
|
||||
all_results.extend(worker_results)
|
||||
|
||||
# Analyze results
|
||||
successful = [r for r in all_results if r["success"]]
|
||||
response_times = [r["response_time"] for r in successful]
|
||||
|
||||
if response_times:
|
||||
success_rate = len(successful) / len(all_results)
|
||||
avg_response_time = statistics.mean(response_times)
|
||||
|
||||
return {
|
||||
"total_operations": len(all_results),
|
||||
"successful_operations": len(successful),
|
||||
"success_rate": success_rate,
|
||||
"avg_response_time": avg_response_time
|
||||
}
|
||||
|
||||
# Test different load levels
|
||||
load_levels = [
|
||||
(1, 50), # 1 thread, 50 operations
|
||||
(2, 50), # 2 threads, 50 operations each
|
||||
(5, 20), # 5 threads, 20 operations each
|
||||
(10, 10) # 10 threads, 10 operations each
|
||||
]
|
||||
|
||||
results = {}
|
||||
|
||||
for num_threads, ops_per_thread in load_levels:
|
||||
result = test_load_level(num_threads, ops_per_thread)
|
||||
results[f"{num_threads}x{ops_per_thread}"] = result
|
||||
|
||||
# Scalability assertions
|
||||
assert result["success_rate"] >= 0.90, f"Low success rate at {num_threads}x{ops_per_thread}: {result['success_rate']:.2%}"
|
||||
assert result["avg_response_time"] < 3000, f"Response time too high at {num_threads}x{ops_per_thread}: {result['avg_response_time']:.2f}ms"
|
||||
|
||||
print(f"CLI Scalability Results:")
|
||||
for load_level, result in results.items():
|
||||
print(f" {load_level}: {result['success_rate']:.2%} success, {result['avg_response_time']:.2f}ms avg")
|
||||
|
||||
def test_cli_error_handling_performance(self, performance_config):
|
||||
"""Test CLI error handling performance"""
|
||||
# Test invalid commands
|
||||
invalid_commands = [
|
||||
["--invalid-option"],
|
||||
["wallet", "--invalid-subcommand"],
|
||||
["blockchain", "invalid-subcommand"],
|
||||
["nonexistent-command"]
|
||||
]
|
||||
|
||||
response_times = []
|
||||
|
||||
for command in invalid_commands:
|
||||
start_time = time.time()
|
||||
|
||||
result = subprocess.run(
|
||||
["python", "-m", "aitbc_cli.main"] + command,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
cwd="/home/oib/windsurf/aitbc/cli"
|
||||
)
|
||||
|
||||
end_time = time.time()
|
||||
response_time = (end_time - start_time) * 1000
|
||||
|
||||
# Should fail gracefully
|
||||
assert result.returncode != 0, f"Invalid command should fail: {' '.join(command)}"
|
||||
assert response_time < 2000, f"Error handling too slow: {response_time:.2f}ms"
|
||||
|
||||
response_times.append(response_time)
|
||||
|
||||
avg_error_response_time = statistics.mean(response_times)
|
||||
max_error_response_time = max(response_times)
|
||||
|
||||
# Error handling performance assertions
|
||||
assert avg_error_response_time < 1000, f"Average error response time too high: {avg_error_response_time:.2f}ms"
|
||||
assert max_error_response_time < 2000, f"Maximum error response time too high: {max_error_response_time:.2f}ms"
|
||||
|
||||
print(f"CLI Error Handling Performance:")
|
||||
print(f" Average error response time: {avg_error_response_time:.2f}ms")
|
||||
print(f" Maximum error response time: {max_error_response_time:.2f}ms")
|
||||
|
||||
class TestServicePerformance:
|
||||
"""Test service performance (when services are available)"""
|
||||
|
||||
def test_service_health_performance(self, performance_config):
|
||||
"""Test service health endpoint performance"""
|
||||
services_to_test = {
|
||||
"global_infrastructure": performance_config["ports"]["global_infrastructure"],
|
||||
"consensus": performance_config["ports"]["consensus"]
|
||||
}
|
||||
|
||||
for service_name, port in services_to_test.items():
|
||||
try:
|
||||
start_time = time.time()
|
||||
response = requests.get(f"{performance_config['base_url']}:{port}/health", timeout=5)
|
||||
end_time = time.time()
|
||||
|
||||
response_time = (end_time - start_time) * 1000
|
||||
|
||||
if response.status_code == 200:
|
||||
assert response_time < 1000, f"{service_name} health endpoint too slow: {response_time:.2f}ms"
|
||||
print(f"✅ {service_name} health: {response_time:.2f}ms")
|
||||
else:
|
||||
print(f"⚠️ {service_name} health returned {response.status_code}")
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ {service_name} health check failed: {str(e)}")
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Run performance tests
|
||||
pytest.main([__file__, "-v", "--tb=short"])
|
||||
0
tests/performance/test_pricing_performance.py
Normal file → Executable file
0
tests/performance/test_pricing_performance.py
Normal file → Executable file
Reference in New Issue
Block a user