chore: update file permissions to executable across repository
- Change file mode from 644 to 755 for all project files - Add chain_id parameter to get_balance RPC endpoint with default "ait-devnet" - Rename Miner.extra_meta_data to extra_metadata for consistency
This commit is contained in:
0
tests/README.md
Normal file → Executable file
0
tests/README.md
Normal file → Executable file
0
tests/TEST_REFACTORING_COMPLETED.md
Normal file → Executable file
0
tests/TEST_REFACTORING_COMPLETED.md
Normal file → Executable file
0
tests/USAGE_GUIDE.md
Normal file → Executable file
0
tests/USAGE_GUIDE.md
Normal file → Executable file
0
tests/analytics/test_analytics_system.py
Normal file → Executable file
0
tests/analytics/test_analytics_system.py
Normal file → Executable file
0
tests/certification/test_certification_system.py
Normal file → Executable file
0
tests/certification/test_certification_system.py
Normal file → Executable file
0
tests/cli-test-updates-completed.md
Normal file → Executable file
0
tests/cli-test-updates-completed.md
Normal file → Executable file
0
tests/cli/test_admin.py
Normal file → Executable file
0
tests/cli/test_admin.py
Normal file → Executable file
0
tests/cli/test_agent_commands.py
Normal file → Executable file
0
tests/cli/test_agent_commands.py
Normal file → Executable file
0
tests/cli/test_auth.py
Normal file → Executable file
0
tests/cli/test_auth.py
Normal file → Executable file
0
tests/cli/test_blockchain.py
Normal file → Executable file
0
tests/cli/test_blockchain.py
Normal file → Executable file
0
tests/cli/test_chain.py
Normal file → Executable file
0
tests/cli/test_chain.py
Normal file → Executable file
0
tests/cli/test_cli_integration.py
Normal file → Executable file
0
tests/cli/test_cli_integration.py
Normal file → Executable file
0
tests/cli/test_client.py
Normal file → Executable file
0
tests/cli/test_client.py
Normal file → Executable file
0
tests/cli/test_config.py
Normal file → Executable file
0
tests/cli/test_config.py
Normal file → Executable file
0
tests/cli/test_deploy_commands.py
Normal file → Executable file
0
tests/cli/test_deploy_commands.py
Normal file → Executable file
0
tests/cli/test_deploy_commands_simple.py
Normal file → Executable file
0
tests/cli/test_deploy_commands_simple.py
Normal file → Executable file
0
tests/cli/test_deploy_structure.py
Normal file → Executable file
0
tests/cli/test_deploy_structure.py
Normal file → Executable file
0
tests/cli/test_exchange.py
Normal file → Executable file
0
tests/cli/test_exchange.py
Normal file → Executable file
0
tests/cli/test_genesis.py
Normal file → Executable file
0
tests/cli/test_genesis.py
Normal file → Executable file
0
tests/cli/test_governance.py
Normal file → Executable file
0
tests/cli/test_governance.py
Normal file → Executable file
0
tests/cli/test_marketplace.py
Normal file → Executable file
0
tests/cli/test_marketplace.py
Normal file → Executable file
0
tests/cli/test_marketplace_additional.py
Normal file → Executable file
0
tests/cli/test_marketplace_additional.py
Normal file → Executable file
0
tests/cli/test_marketplace_advanced_commands.py
Normal file → Executable file
0
tests/cli/test_marketplace_advanced_commands.py
Normal file → Executable file
0
tests/cli/test_marketplace_bids.py
Normal file → Executable file
0
tests/cli/test_marketplace_bids.py
Normal file → Executable file
0
tests/cli/test_miner.py
Normal file → Executable file
0
tests/cli/test_miner.py
Normal file → Executable file
0
tests/cli/test_multimodal_commands.py
Normal file → Executable file
0
tests/cli/test_multimodal_commands.py
Normal file → Executable file
0
tests/cli/test_node.py
Normal file → Executable file
0
tests/cli/test_node.py
Normal file → Executable file
0
tests/cli/test_openclaw_commands.py
Normal file → Executable file
0
tests/cli/test_openclaw_commands.py
Normal file → Executable file
0
tests/cli/test_optimize_commands.py
Normal file → Executable file
0
tests/cli/test_optimize_commands.py
Normal file → Executable file
0
tests/cli/test_simulate.py
Normal file → Executable file
0
tests/cli/test_simulate.py
Normal file → Executable file
0
tests/cli/test_swarm_commands.py
Normal file → Executable file
0
tests/cli/test_swarm_commands.py
Normal file → Executable file
0
tests/cli/test_wallet.py
Normal file → Executable file
0
tests/cli/test_wallet.py
Normal file → Executable file
0
tests/cli/test_wallet_additions.py
Normal file → Executable file
0
tests/cli/test_wallet_additions.py
Normal file → Executable file
0
tests/cli/test_wallet_remaining.py
Normal file → Executable file
0
tests/cli/test_wallet_remaining.py
Normal file → Executable file
0
tests/conftest.py
Normal file → Executable file
0
tests/conftest.py
Normal file → Executable file
0
tests/contracts/AgentBounty.test.js
Normal file → Executable file
0
tests/contracts/AgentBounty.test.js
Normal file → Executable file
0
tests/contracts/AgentStaking.test.js
Normal file → Executable file
0
tests/contracts/AgentStaking.test.js
Normal file → Executable file
0
tests/contracts/Integration.test.js
Normal file → Executable file
0
tests/contracts/Integration.test.js
Normal file → Executable file
0
tests/contracts/MockERC20.sol
Normal file → Executable file
0
tests/contracts/MockERC20.sol
Normal file → Executable file
0
tests/contracts/MockGroth16Verifier.sol
Normal file → Executable file
0
tests/contracts/MockGroth16Verifier.sol
Normal file → Executable file
0
tests/contracts/MockZKVerifier.sol
Normal file → Executable file
0
tests/contracts/MockZKVerifier.sol
Normal file → Executable file
0
tests/e2e/E2E_TESTING_SUMMARY.md
Normal file → Executable file
0
tests/e2e/E2E_TESTING_SUMMARY.md
Normal file → Executable file
0
tests/e2e/E2E_TEST_EXECUTION_SUMMARY.md
Normal file → Executable file
0
tests/e2e/E2E_TEST_EXECUTION_SUMMARY.md
Normal file → Executable file
0
tests/e2e/README.md
Normal file → Executable file
0
tests/e2e/README.md
Normal file → Executable file
0
tests/e2e/conftest.py
Normal file → Executable file
0
tests/e2e/conftest.py
Normal file → Executable file
0
tests/e2e/conftest_fixtures.py
Normal file → Executable file
0
tests/e2e/conftest_fixtures.py
Normal file → Executable file
0
tests/e2e/fixtures/__init__.py
Normal file → Executable file
0
tests/e2e/fixtures/__init__.py
Normal file → Executable file
0
tests/e2e/fixtures/home/client1/.aitbc/config.yaml
Normal file → Executable file
0
tests/e2e/fixtures/home/client1/.aitbc/config.yaml
Normal file → Executable file
0
tests/e2e/fixtures/home/client1/answer.txt
Normal file → Executable file
0
tests/e2e/fixtures/home/client1/answer.txt
Normal file → Executable file
0
tests/e2e/fixtures/home/miner1/.aitbc/config.yaml
Normal file → Executable file
0
tests/e2e/fixtures/home/miner1/.aitbc/config.yaml
Normal file → Executable file
0
tests/e2e/fixtures/home/miner1/question.txt
Normal file → Executable file
0
tests/e2e/fixtures/home/miner1/question.txt
Normal file → Executable file
0
tests/e2e/test_advanced_features.py
Normal file → Executable file
0
tests/e2e/test_advanced_features.py
Normal file → Executable file
0
tests/e2e/test_advanced_features_ws.py
Normal file → Executable file
0
tests/e2e/test_advanced_features_ws.py
Normal file → Executable file
0
tests/e2e/test_client_miner_workflow.py
Normal file → Executable file
0
tests/e2e/test_client_miner_workflow.py
Normal file → Executable file
0
tests/e2e/test_cross_container_marketplace.py
Normal file → Executable file
0
tests/e2e/test_cross_container_marketplace.py
Normal file → Executable file
0
tests/e2e/test_enhanced_services_workflows.py
Normal file → Executable file
0
tests/e2e/test_enhanced_services_workflows.py
Normal file → Executable file
0
tests/e2e/test_fixture_verification.py
Normal file → Executable file
0
tests/e2e/test_fixture_verification.py
Normal file → Executable file
0
tests/e2e/test_mock_services.py
Normal file → Executable file
0
tests/e2e/test_mock_services.py
Normal file → Executable file
0
tests/e2e/test_performance_benchmarks.py
Normal file → Executable file
0
tests/e2e/test_performance_benchmarks.py
Normal file → Executable file
0
tests/fixtures/mock_blockchain_node.py
vendored
Normal file → Executable file
0
tests/fixtures/mock_blockchain_node.py
vendored
Normal file → Executable file
0
tests/integration/api_integration.test.js
Normal file → Executable file
0
tests/integration/api_integration.test.js
Normal file → Executable file
0
tests/integration/test_agent_economics_integration.py
Normal file → Executable file
0
tests/integration/test_agent_economics_integration.py
Normal file → Executable file
0
tests/integration/test_api_integration.py
Normal file → Executable file
0
tests/integration/test_api_integration.py
Normal file → Executable file
0
tests/integration/test_basic_integration.py
Normal file → Executable file
0
tests/integration/test_basic_integration.py
Normal file → Executable file
0
tests/integration/test_blockchain_final.py
Normal file → Executable file
0
tests/integration/test_blockchain_final.py
Normal file → Executable file
0
tests/integration/test_blockchain_nodes.py
Normal file → Executable file
0
tests/integration/test_blockchain_nodes.py
Normal file → Executable file
0
tests/integration/test_blockchain_simple.py
Normal file → Executable file
0
tests/integration/test_blockchain_simple.py
Normal file → Executable file
0
tests/integration/test_blockchain_sync.py
Normal file → Executable file
0
tests/integration/test_blockchain_sync.py
Normal file → Executable file
0
tests/integration/test_blockchain_sync_simple.py
Normal file → Executable file
0
tests/integration/test_blockchain_sync_simple.py
Normal file → Executable file
0
tests/integration/test_community_governance.py
Normal file → Executable file
0
tests/integration/test_community_governance.py
Normal file → Executable file
0
tests/integration/test_full_workflow.py
Normal file → Executable file
0
tests/integration/test_full_workflow.py
Normal file → Executable file
0
tests/integration/test_integration_simple.py
Normal file → Executable file
0
tests/integration/test_integration_simple.py
Normal file → Executable file
495
tests/integration/test_multi_chain_integration.py
Normal file
495
tests/integration/test_multi_chain_integration.py
Normal file
@@ -0,0 +1,495 @@
|
||||
"""
|
||||
Integration Tests for AITBC Multi-Chain Components
|
||||
Tests end-to-end functionality across all implemented services
|
||||
"""
|
||||
|
||||
import pytest
|
||||
import asyncio
|
||||
import json
|
||||
import time
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
import subprocess
|
||||
import requests
|
||||
from typing import Dict, Any, List
|
||||
|
||||
class TestMultiChainIntegration:
|
||||
"""Test suite for multi-chain integration"""
|
||||
|
||||
@pytest.fixture(scope="class")
|
||||
def test_config(self):
|
||||
"""Test configuration for integration tests"""
|
||||
return {
|
||||
"base_url": "http://localhost",
|
||||
"ports": {
|
||||
"coordinator": 8001,
|
||||
"blockchain": 8007,
|
||||
"consensus": 8002,
|
||||
"network": 8008,
|
||||
"explorer": 8016,
|
||||
"wallet_daemon": 8003,
|
||||
"exchange": 8010,
|
||||
"oracle": 8011,
|
||||
"trading": 8012,
|
||||
"compliance": 8015,
|
||||
"plugin_registry": 8013,
|
||||
"plugin_marketplace": 8014,
|
||||
"plugin_analytics": 8016,
|
||||
"global_infrastructure": 8017,
|
||||
"ai_agents": 8018,
|
||||
"load_balancer": 8019
|
||||
},
|
||||
"test_chains": ["ait-devnet", "ait-testnet"],
|
||||
"test_wallets": ["test_wallet_1", "test_wallet_2"],
|
||||
"timeout": 30
|
||||
}
|
||||
|
||||
@pytest.fixture(scope="class")
|
||||
def services_health(self, test_config):
|
||||
"""Check if all services are healthy before running tests"""
|
||||
healthy_services = {}
|
||||
|
||||
for service_name, port in test_config["ports"].items():
|
||||
try:
|
||||
response = requests.get(f"{test_config['base_url']}:{port}/health", timeout=5)
|
||||
if response.status_code == 200:
|
||||
healthy_services[service_name] = True
|
||||
print(f"✅ {service_name} service is healthy")
|
||||
else:
|
||||
healthy_services[service_name] = False
|
||||
print(f"❌ {service_name} service returned status {response.status_code}")
|
||||
except Exception as e:
|
||||
healthy_services[service_name] = False
|
||||
print(f"❌ {service_name} service is unreachable: {str(e)}")
|
||||
|
||||
return healthy_services
|
||||
|
||||
def test_coordinator_health(self, test_config, services_health):
|
||||
"""Test coordinator service health"""
|
||||
assert services_health.get("coordinator", False), "Coordinator service is not healthy"
|
||||
|
||||
response = requests.get(f"{test_config['base_url']}:{test_config['ports']['coordinator']}/health")
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
assert "status" in data
|
||||
assert data["status"] == "ok"
|
||||
|
||||
def test_blockchain_integration(self, test_config, services_health):
|
||||
"""Test blockchain service integration"""
|
||||
assert services_health.get("blockchain", False), "Blockchain service is not healthy"
|
||||
|
||||
# Test blockchain health
|
||||
response = requests.get(f"{test_config['base_url']}:{test_config['ports']['blockchain']}/health")
|
||||
assert response.status_code == 200
|
||||
|
||||
# Test chain listing
|
||||
response = requests.get(f"{test_config['base_url']}:{test_config['ports']['blockchain']}/rpc/chains")
|
||||
assert response.status_code == 200
|
||||
chains = response.json()
|
||||
assert isinstance(chains, list)
|
||||
|
||||
# Test block head
|
||||
response = requests.get(f"{test_config['base_url']}:{test_config['ports']['blockchain']}/rpc/head")
|
||||
assert response.status_code == 200
|
||||
head = response.json()
|
||||
assert "height" in head
|
||||
assert isinstance(head["height"], int)
|
||||
|
||||
def test_consensus_integration(self, test_config, services_health):
|
||||
"""Test consensus service integration"""
|
||||
assert services_health.get("consensus", False), "Consensus service is not healthy"
|
||||
|
||||
# Test consensus status
|
||||
response = requests.get(f"{test_config['base_url']}:{test_config['ports']['consensus']}/rpc/consensusStatus")
|
||||
assert response.status_code == 200
|
||||
status = response.json()
|
||||
assert "status" in status
|
||||
assert status["status"] == "healthy"
|
||||
|
||||
# Test validators
|
||||
response = requests.get(f"{test_config['base_url']}:{test_config['ports']['consensus']}/rpc/validators")
|
||||
assert response.status_code == 200
|
||||
validators = response.json()
|
||||
assert isinstance(validators, list)
|
||||
|
||||
def test_network_integration(self, test_config, services_health):
|
||||
"""Test network service integration"""
|
||||
assert services_health.get("network", False), "Network service is not healthy"
|
||||
|
||||
# Test network status
|
||||
response = requests.get(f"{test_config['base_url']}:{test_config['ports']['network']}/network/status")
|
||||
assert response.status_code == 200
|
||||
status = response.json()
|
||||
assert "status" in status
|
||||
assert status["status"] == "healthy"
|
||||
|
||||
# Test peer management
|
||||
response = requests.get(f"{test_config['base_url']}:{test_config['ports']['network']}/network/peers")
|
||||
assert response.status_code == 200
|
||||
peers = response.json()
|
||||
assert isinstance(peers, list)
|
||||
|
||||
def test_explorer_integration(self, test_config, services_health):
|
||||
"""Test explorer service integration"""
|
||||
assert services_health.get("explorer", False), "Explorer service is not healthy"
|
||||
|
||||
# Test explorer health
|
||||
response = requests.get(f"{test_config['base_url']}:{test_config['ports']['explorer']}/health")
|
||||
assert response.status_code == 200
|
||||
|
||||
# Test chains API
|
||||
response = requests.get(f"{test_config['base_url']}:{test_config['ports']['explorer']}/api/v1/chains")
|
||||
assert response.status_code == 200
|
||||
chains = response.json()
|
||||
assert "chains" in chains
|
||||
assert isinstance(chains["chains"], list)
|
||||
|
||||
def test_wallet_daemon_integration(self, test_config, services_health):
|
||||
"""Test wallet daemon integration"""
|
||||
assert services_health.get("wallet_daemon", False), "Wallet daemon service is not healthy"
|
||||
|
||||
# Test wallet daemon health
|
||||
response = requests.get(f"{test_config['base_url']}:{test_config['ports']['wallet_daemon']}/health")
|
||||
assert response.status_code == 200
|
||||
|
||||
# Test chain listing
|
||||
response = requests.get(f"{test_config['base_url']}:{test_config['ports']['wallet_daemon']}/v1/chains")
|
||||
assert response.status_code == 200
|
||||
chains = response.json()
|
||||
assert isinstance(chains, list)
|
||||
|
||||
def test_exchange_integration(self, test_config, services_health):
|
||||
"""Test exchange service integration"""
|
||||
assert services_health.get("exchange", False), "Exchange service is not healthy"
|
||||
|
||||
# Test exchange health
|
||||
response = requests.get(f"{test_config['base_url']}:{test_config['ports']['exchange']}/health")
|
||||
assert response.status_code == 200
|
||||
|
||||
# Test trading pairs
|
||||
response = requests.get(f"{test_config['base_url']}:{test_config['ports']['exchange']}/api/v1/pairs")
|
||||
assert response.status_code == 200
|
||||
pairs = response.json()
|
||||
assert "pairs" in pairs
|
||||
assert isinstance(pairs["pairs"], list)
|
||||
|
||||
def test_oracle_integration(self, test_config, services_health):
|
||||
"""Test oracle service integration"""
|
||||
assert services_health.get("oracle", False), "Oracle service is not healthy"
|
||||
|
||||
# Test oracle health
|
||||
response = requests.get(f"{test_config['base_url']}:{test_config['ports']['oracle']}/health")
|
||||
assert response.status_code == 200
|
||||
|
||||
# Test price feed
|
||||
response = requests.get(f"{test_config['base_url']}:{test_config['ports']['oracle']}/api/v1/price-feed")
|
||||
assert response.status_code == 200
|
||||
prices = response.json()
|
||||
assert isinstance(prices, list)
|
||||
|
||||
def test_trading_engine_integration(self, test_config, services_health):
|
||||
"""Test trading engine integration"""
|
||||
assert services_health.get("trading", False), "Trading engine service is not healthy"
|
||||
|
||||
# Test trading engine health
|
||||
response = requests.get(f"{test_config['base_url']}:{test_config['ports']['trading']}/health")
|
||||
assert response.status_code == 200
|
||||
|
||||
# Test order book
|
||||
response = requests.get(f"{test_config['base_url']}:{test_config['ports']['trading']}/api/v1/orderbook/AITBC-BTC")
|
||||
assert response.status_code in [200, 404] # 404 is acceptable if pair doesn't exist
|
||||
|
||||
def test_compliance_integration(self, test_config, services_health):
|
||||
"""Test compliance service integration"""
|
||||
assert services_health.get("compliance", False), "Compliance service is not healthy"
|
||||
|
||||
# Test compliance health
|
||||
response = requests.get(f"{test_config['base_url']}:{test_config['ports']['compliance']}/health")
|
||||
assert response.status_code == 200
|
||||
|
||||
# Test dashboard
|
||||
response = requests.get(f"{test_config['base_url']}:{test_config['ports']['compliance']}/api/v1/dashboard")
|
||||
assert response.status_code == 200
|
||||
dashboard = response.json()
|
||||
assert "dashboard" in dashboard
|
||||
|
||||
def test_plugin_ecosystem_integration(self, test_config, services_health):
|
||||
"""Test plugin ecosystem integration"""
|
||||
# Test plugin registry
|
||||
if services_health.get("plugin_registry", False):
|
||||
response = requests.get(f"{test_config['base_url']}:{test_config['ports']['plugin_registry']}/health")
|
||||
assert response.status_code == 200
|
||||
|
||||
response = requests.get(f"{test_config['base_url']}:{test_config['ports']['plugin_registry']}/api/v1/plugins")
|
||||
assert response.status_code == 200
|
||||
plugins = response.json()
|
||||
assert "plugins" in plugins
|
||||
|
||||
# Test plugin marketplace
|
||||
if services_health.get("plugin_marketplace", False):
|
||||
response = requests.get(f"{test_config['base_url']}:{test_config['ports']['plugin_marketplace']}/health")
|
||||
assert response.status_code == 200
|
||||
|
||||
response = requests.get(f"{test_config['base_url']}:{test_config['ports']['plugin_marketplace']}/api/v1/marketplace/featured")
|
||||
assert response.status_code == 200
|
||||
featured = response.json()
|
||||
assert "featured_plugins" in featured
|
||||
|
||||
# Test plugin analytics
|
||||
if services_health.get("plugin_analytics", False):
|
||||
response = requests.get(f"{test_config['base_url']}:{test_config['ports']['plugin_analytics']}/health")
|
||||
assert response.status_code == 200
|
||||
|
||||
response = requests.get(f"{test_config['base_url']}:{test_config['ports']['plugin_analytics']}/api/v1/analytics/dashboard")
|
||||
assert response.status_code == 200
|
||||
analytics = response.json()
|
||||
assert "dashboard" in analytics
|
||||
|
||||
def test_global_services_integration(self, test_config, services_health):
|
||||
"""Test global services integration"""
|
||||
# Test global infrastructure
|
||||
if services_health.get("global_infrastructure", False):
|
||||
response = requests.get(f"{test_config['base_url']}:{test_config['ports']['global_infrastructure']}/health")
|
||||
assert response.status_code == 200
|
||||
|
||||
response = requests.get(f"{test_config['base_url']}:{test_config['ports']['global_infrastructure']}/api/v1/global/dashboard")
|
||||
assert response.status_code == 200
|
||||
dashboard = response.json()
|
||||
assert "dashboard" in dashboard
|
||||
|
||||
# Test AI agents
|
||||
if services_health.get("ai_agents", False):
|
||||
response = requests.get(f"{test_config['base_url']}:{test_config['ports']['ai_agents']}/health")
|
||||
assert response.status_code == 200
|
||||
|
||||
response = requests.get(f"{test_config['base_url']}:{test_config['ports']['ai_agents']}/api/v1/network/dashboard")
|
||||
assert response.status_code == 200
|
||||
network = response.json()
|
||||
assert "dashboard" in network
|
||||
|
||||
# Test load balancer
|
||||
if services_health.get("load_balancer", False):
|
||||
response = requests.get(f"{test_config['base_url']}:{test_config['ports']['load_balancer']}/health")
|
||||
assert response.status_code == 200
|
||||
|
||||
response = requests.get(f"{test_config['base_url']}:{test_config['ports']['load_balancer']}/api/v1/dashboard")
|
||||
assert response.status_code == 200
|
||||
dashboard = response.json()
|
||||
assert "dashboard" in dashboard
|
||||
|
||||
def test_end_to_end_transaction_flow(self, test_config, services_health):
|
||||
"""Test complete end-to-end transaction flow"""
|
||||
# Skip test if critical services are not healthy
|
||||
if not all([
|
||||
services_health.get("blockchain", False),
|
||||
services_health.get("consensus", False),
|
||||
services_health.get("network", False)
|
||||
]):
|
||||
pytest.skip("Critical services not healthy for end-to-end test")
|
||||
|
||||
# Submit a transaction to blockchain
|
||||
transaction_data = {
|
||||
"from": "ait1testsender000000000000000000000000000",
|
||||
"to": "ait1testreceiver000000000000000000000000",
|
||||
"amount": "1000",
|
||||
"chain_id": "ait-devnet"
|
||||
}
|
||||
|
||||
response = requests.post(
|
||||
f"{test_config['base_url']}:{test_config['ports']['blockchain']}/rpc/submitTransaction",
|
||||
json=transaction_data
|
||||
)
|
||||
|
||||
# Accept 200 or 400 (invalid transaction is acceptable for integration test)
|
||||
assert response.status_code in [200, 400]
|
||||
|
||||
if response.status_code == 200:
|
||||
result = response.json()
|
||||
assert "transaction_id" in result or "error" in result
|
||||
|
||||
def test_cli_integration(self, test_config):
|
||||
"""Test CLI integration with services"""
|
||||
# Test CLI help command
|
||||
result = subprocess.run(
|
||||
["python", "-m", "aitbc_cli.main", "--help"],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
cwd="/home/oib/windsurf/aitbc/cli"
|
||||
)
|
||||
|
||||
assert result.returncode == 0
|
||||
assert "Usage:" in result.stdout
|
||||
|
||||
# Test specific CLI commands
|
||||
cli_commands = [
|
||||
["wallet", "--help"],
|
||||
["blockchain", "--help"],
|
||||
["multisig", "--help"],
|
||||
["genesis-protection", "--help"],
|
||||
["transfer-control", "--help"],
|
||||
["compliance", "--help"]
|
||||
]
|
||||
|
||||
for command in cli_commands:
|
||||
result = subprocess.run(
|
||||
["python", "-m", "aitbc_cli.main"] + command,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
cwd="/home/oib/windsurf/aitbc/cli"
|
||||
)
|
||||
|
||||
assert result.returncode == 0, f"CLI command {' '.join(command)} failed"
|
||||
|
||||
def test_service_discovery(self, test_config, services_health):
|
||||
"""Test service discovery and inter-service communication"""
|
||||
# Test that services can discover each other
|
||||
healthy_services = [name for name, healthy in services_health.items() if healthy]
|
||||
|
||||
assert len(healthy_services) > 0, "No healthy services found"
|
||||
|
||||
# Test that explorer can discover blockchain data
|
||||
if services_health.get("explorer") and services_health.get("blockchain"):
|
||||
response = requests.get(f"{test_config['base_url']}:{test_config['ports']['explorer']}/api/v1/blocks")
|
||||
assert response.status_code == 200
|
||||
blocks = response.json()
|
||||
assert "blocks" in blocks
|
||||
assert isinstance(blocks["blocks"], list)
|
||||
|
||||
def test_error_handling(self, test_config, services_health):
|
||||
"""Test error handling across services"""
|
||||
# Test 404 errors
|
||||
if services_health.get("blockchain", False):
|
||||
response = requests.get(f"{test_config['base_url']}:{test_config['ports']['blockchain']}/rpc/nonexistent")
|
||||
assert response.status_code == 404
|
||||
|
||||
# Test invalid requests
|
||||
if services_health.get("exchange", False):
|
||||
response = requests.post(
|
||||
f"{test_config['base_url']}:{test_config['ports']['exchange']}/api/v1/orders",
|
||||
json={"invalid": "data"}
|
||||
)
|
||||
assert response.status_code in [400, 422]
|
||||
|
||||
def test_performance_metrics(self, test_config, services_health):
|
||||
"""Test performance metrics collection"""
|
||||
# Test that services provide performance metrics
|
||||
metric_endpoints = [
|
||||
("blockchain", "/rpc/status"),
|
||||
("consensus", "/rpc/consensusStatus"),
|
||||
("network", "/network/status"),
|
||||
("trading", "/api/v1/engine/stats")
|
||||
]
|
||||
|
||||
for service_name, endpoint in metric_endpoints:
|
||||
if services_health.get(service_name, False):
|
||||
response = requests.get(f"{test_config['base_url']}:{test_config['ports'][service_name]}{endpoint}")
|
||||
assert response.status_code == 200
|
||||
|
||||
data = response.json()
|
||||
# Check for common performance fields
|
||||
performance_fields = ["status", "timestamp", "uptime", "performance"]
|
||||
found_fields = [field for field in performance_fields if field in data]
|
||||
assert len(found_fields) > 0, f"No performance fields found in {service_name} response"
|
||||
|
||||
class TestCrossChainIntegration:
|
||||
"""Test cross-chain functionality"""
|
||||
|
||||
@pytest.fixture(scope="class")
|
||||
def cross_chain_config(self):
|
||||
"""Cross-chain test configuration"""
|
||||
return {
|
||||
"source_chain": "ait-devnet",
|
||||
"target_chain": "ait-testnet",
|
||||
"test_amount": 1000,
|
||||
"test_address": "ait1testcrosschain00000000000000000000"
|
||||
}
|
||||
|
||||
def test_cross_chain_isolation(self, cross_chain_config):
|
||||
"""Test that chains are properly isolated"""
|
||||
# This test would verify that tokens from one chain cannot be used on another
|
||||
# Implementation depends on the specific cross-chain isolation mechanisms
|
||||
pass
|
||||
|
||||
def test_chain_specific_operations(self, cross_chain_config):
|
||||
"""Test chain-specific operations"""
|
||||
# Test that operations are chain-specific
|
||||
pass
|
||||
|
||||
class TestSecurityIntegration:
|
||||
"""Test security integration across services"""
|
||||
|
||||
def test_authentication_flow(self):
|
||||
"""Test authentication across services"""
|
||||
# Test that authentication works consistently
|
||||
pass
|
||||
|
||||
def test_authorization_controls(self):
|
||||
"""Test authorization controls"""
|
||||
# Test that authorization is properly enforced
|
||||
pass
|
||||
|
||||
def test_encryption_handling(self):
|
||||
"""Test encryption across services"""
|
||||
# Test that sensitive data is properly encrypted
|
||||
pass
|
||||
|
||||
# Performance and load testing
|
||||
class TestPerformanceIntegration:
|
||||
"""Test performance under load"""
|
||||
|
||||
def test_concurrent_requests(self, test_config):
|
||||
"""Test handling of concurrent requests"""
|
||||
import concurrent.futures
|
||||
import threading
|
||||
|
||||
def make_request(service_name, endpoint):
|
||||
try:
|
||||
response = requests.get(f"{test_config['base_url']}:{test_config['ports'][service_name]}{endpoint}", timeout=5)
|
||||
return response.status_code
|
||||
except:
|
||||
return None
|
||||
|
||||
# Test concurrent requests to multiple services
|
||||
services_to_test = ["blockchain", "consensus", "network"]
|
||||
endpoints = ["/health", "/rpc/status", "/network/status"]
|
||||
|
||||
with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:
|
||||
futures = []
|
||||
for service in services_to_test:
|
||||
for endpoint in endpoints:
|
||||
for _ in range(5): # 5 concurrent requests per service/endpoint
|
||||
future = executor.submit(make_request, service, endpoint)
|
||||
futures.append(future)
|
||||
|
||||
results = [future.result() for future in concurrent.futures.as_completed(futures)]
|
||||
|
||||
# Check that most requests succeeded
|
||||
success_count = len([r for r in results if r in [200, 404]]) # 404 is acceptable for some endpoints
|
||||
success_rate = success_count / len(results)
|
||||
|
||||
assert success_rate > 0.8, f"Low success rate: {success_rate:.2%}"
|
||||
|
||||
def test_response_times(self, test_config, services_health):
|
||||
"""Test response times are within acceptable limits"""
|
||||
acceptable_response_times = {
|
||||
"health": 1.0, # 1 second for health checks
|
||||
"rpc": 2.0, # 2 seconds for RPC calls
|
||||
"api": 1.5 # 1.5 seconds for API calls
|
||||
}
|
||||
|
||||
# Test response times for healthy services
|
||||
for service_name, healthy in services_health.items():
|
||||
if not healthy:
|
||||
continue
|
||||
|
||||
# Test health endpoint
|
||||
start_time = time.time()
|
||||
response = requests.get(f"{test_config['base_url']}:{test_config['ports'][service_name]}/health", timeout=5)
|
||||
response_time = time.time() - start_time
|
||||
|
||||
assert response_time < acceptable_response_times["health"], \
|
||||
f"{service_name} health endpoint too slow: {response_time:.2f}s"
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Run integration tests
|
||||
pytest.main([__file__, "-v", "--tb=short"])
|
||||
0
tests/integration/test_pricing_integration.py
Normal file → Executable file
0
tests/integration/test_pricing_integration.py
Normal file → Executable file
0
tests/integration/test_working_integration.py
Normal file → Executable file
0
tests/integration/test_working_integration.py
Normal file → Executable file
0
tests/load/locustfile.py
Normal file → Executable file
0
tests/load/locustfile.py
Normal file → Executable file
0
tests/openclaw_marketplace/README.md
Normal file → Executable file
0
tests/openclaw_marketplace/README.md
Normal file → Executable file
0
tests/openclaw_marketplace/run_all_tests.py
Normal file → Executable file
0
tests/openclaw_marketplace/run_all_tests.py
Normal file → Executable file
0
tests/openclaw_marketplace/test_advanced_agent_capabilities.py
Normal file → Executable file
0
tests/openclaw_marketplace/test_advanced_agent_capabilities.py
Normal file → Executable file
0
tests/openclaw_marketplace/test_agent_economics.py
Normal file → Executable file
0
tests/openclaw_marketplace/test_agent_economics.py
Normal file → Executable file
0
tests/openclaw_marketplace/test_agent_governance.py
Normal file → Executable file
0
tests/openclaw_marketplace/test_agent_governance.py
Normal file → Executable file
0
tests/openclaw_marketplace/test_blockchain_integration.py
Normal file → Executable file
0
tests/openclaw_marketplace/test_blockchain_integration.py
Normal file → Executable file
0
tests/openclaw_marketplace/test_framework.py
Normal file → Executable file
0
tests/openclaw_marketplace/test_framework.py
Normal file → Executable file
0
tests/openclaw_marketplace/test_multi_region_deployment.py
Normal file → Executable file
0
tests/openclaw_marketplace/test_multi_region_deployment.py
Normal file → Executable file
0
tests/openclaw_marketplace/test_performance_optimization.py
Normal file → Executable file
0
tests/openclaw_marketplace/test_performance_optimization.py
Normal file → Executable file
569
tests/performance/test_performance.py
Normal file
569
tests/performance/test_performance.py
Normal file
@@ -0,0 +1,569 @@
|
||||
"""
|
||||
Performance Tests for AITBC Chain Management and Analytics
|
||||
Tests system performance under various load conditions
|
||||
"""
|
||||
|
||||
import pytest
|
||||
import asyncio
|
||||
import json
|
||||
import time
|
||||
import threading
|
||||
import statistics
|
||||
from datetime import datetime, timedelta
|
||||
from pathlib import Path
|
||||
import subprocess
|
||||
import requests
|
||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||
from typing import Dict, Any, List, Tuple
|
||||
import psutil
|
||||
import memory_profiler
|
||||
|
||||
class TestPerformance:
|
||||
"""Performance testing suite for AITBC components"""
|
||||
|
||||
@pytest.fixture(scope="class")
|
||||
def performance_config(self):
|
||||
"""Performance test configuration"""
|
||||
return {
|
||||
"base_url": "http://localhost",
|
||||
"ports": {
|
||||
"coordinator": 8001,
|
||||
"blockchain": 8007,
|
||||
"consensus": 8002,
|
||||
"network": 8008,
|
||||
"explorer": 8016,
|
||||
"wallet_daemon": 8003,
|
||||
"exchange": 8010,
|
||||
"oracle": 8011,
|
||||
"trading": 8012,
|
||||
"compliance": 8015,
|
||||
"plugin_registry": 8013,
|
||||
"plugin_marketplace": 8014,
|
||||
"global_infrastructure": 8017,
|
||||
"ai_agents": 8018,
|
||||
"load_balancer": 8019
|
||||
},
|
||||
"load_test_config": {
|
||||
"concurrent_users": 10,
|
||||
"requests_per_user": 100,
|
||||
"duration_seconds": 60,
|
||||
"ramp_up_time": 10
|
||||
},
|
||||
"performance_thresholds": {
|
||||
"response_time_p95": 2000, # 95th percentile < 2 seconds
|
||||
"response_time_p99": 5000, # 99th percentile < 5 seconds
|
||||
"error_rate": 0.01, # < 1% error rate
|
||||
"throughput_min": 50, # Minimum 50 requests/second
|
||||
"cpu_usage_max": 0.80, # < 80% CPU usage
|
||||
"memory_usage_max": 0.85 # < 85% memory usage
|
||||
}
|
||||
}
|
||||
|
||||
@pytest.fixture(scope="class")
|
||||
def baseline_metrics(self, performance_config):
|
||||
"""Capture baseline system metrics"""
|
||||
return {
|
||||
"cpu_percent": psutil.cpu_percent(interval=1),
|
||||
"memory_percent": psutil.virtual_memory().percent,
|
||||
"timestamp": datetime.utcnow().isoformat()
|
||||
}
|
||||
|
||||
def test_cli_performance(self, performance_config):
|
||||
"""Test CLI command performance"""
|
||||
cli_commands = [
|
||||
["--help"],
|
||||
["wallet", "--help"],
|
||||
["blockchain", "--help"],
|
||||
["multisig", "--help"],
|
||||
["genesis-protection", "--help"],
|
||||
["transfer-control", "--help"],
|
||||
["compliance", "--help"],
|
||||
["exchange", "--help"],
|
||||
["oracle", "--help"],
|
||||
["market-maker", "--help"]
|
||||
]
|
||||
|
||||
response_times = []
|
||||
|
||||
for command in cli_commands:
|
||||
start_time = time.time()
|
||||
|
||||
result = subprocess.run(
|
||||
["python", "-m", "aitbc_cli.main"] + command,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
cwd="/home/oib/windsurf/aitbc/cli"
|
||||
)
|
||||
|
||||
end_time = time.time()
|
||||
response_time = (end_time - start_time) * 1000 # Convert to milliseconds
|
||||
|
||||
assert result.returncode == 0, f"CLI command failed: {' '.join(command)}"
|
||||
assert response_time < 5000, f"CLI command too slow: {response_time:.2f}ms"
|
||||
|
||||
response_times.append(response_time)
|
||||
|
||||
# Calculate performance statistics
|
||||
avg_response_time = statistics.mean(response_times)
|
||||
p95_response_time = statistics.quantiles(response_times, n=20)[18] # 95th percentile
|
||||
max_response_time = max(response_times)
|
||||
|
||||
# Performance assertions
|
||||
assert avg_response_time < 1000, f"Average CLI response time too high: {avg_response_time:.2f}ms"
|
||||
assert p95_response_time < 3000, f"95th percentile CLI response time too high: {p95_response_time:.2f}ms"
|
||||
assert max_response_time < 10000, f"Maximum CLI response time too high: {max_response_time:.2f}ms"
|
||||
|
||||
print(f"CLI Performance Results:")
|
||||
print(f" Average: {avg_response_time:.2f}ms")
|
||||
print(f" 95th percentile: {p95_response_time:.2f}ms")
|
||||
print(f" Maximum: {max_response_time:.2f}ms")
|
||||
|
||||
def test_concurrent_cli_operations(self, performance_config):
|
||||
"""Test concurrent CLI operations"""
|
||||
def run_cli_command(command):
|
||||
start_time = time.time()
|
||||
result = subprocess.run(
|
||||
["python", "-m", "aitbc_cli.main"] + command,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
cwd="/home/oib/windsurf/aitbc/cli"
|
||||
)
|
||||
end_time = time.time()
|
||||
return {
|
||||
"command": command,
|
||||
"success": result.returncode == 0,
|
||||
"response_time": (end_time - start_time) * 1000,
|
||||
"output_length": len(result.stdout)
|
||||
}
|
||||
|
||||
# Test concurrent operations
|
||||
commands_to_test = [
|
||||
["wallet", "--help"],
|
||||
["blockchain", "--help"],
|
||||
["multisig", "--help"],
|
||||
["compliance", "--help"],
|
||||
["exchange", "--help"]
|
||||
]
|
||||
|
||||
with ThreadPoolExecutor(max_workers=10) as executor:
|
||||
# Submit multiple concurrent requests
|
||||
futures = []
|
||||
for _ in range(20): # 20 concurrent operations
|
||||
for command in commands_to_test:
|
||||
future = executor.submit(run_cli_command, command)
|
||||
futures.append(future)
|
||||
|
||||
# Collect results
|
||||
results = []
|
||||
for future in as_completed(futures):
|
||||
result = future.result()
|
||||
results.append(result)
|
||||
|
||||
# Analyze results
|
||||
successful_operations = [r for r in results if r["success"]]
|
||||
response_times = [r["response_time"] for r in successful_operations]
|
||||
|
||||
success_rate = len(successful_operations) / len(results)
|
||||
avg_response_time = statistics.mean(response_times) if response_times else 0
|
||||
p95_response_time = statistics.quantiles(response_times, n=20)[18] if len(response_times) > 20 else max(response_times) if response_times else 0
|
||||
|
||||
# Performance assertions
|
||||
assert success_rate >= 0.95, f"Low success rate: {success_rate:.2%}"
|
||||
assert avg_response_time < 2000, f"Average response time too high: {avg_response_time:.2f}ms"
|
||||
assert p95_response_time < 5000, f"95th percentile response time too high: {p95_response_time:.2f}ms"
|
||||
|
||||
print(f"Concurrent CLI Operations Results:")
|
||||
print(f" Success rate: {success_rate:.2%}")
|
||||
print(f" Average response time: {avg_response_time:.2f}ms")
|
||||
print(f" 95th percentile: {p95_response_time:.2f}ms")
|
||||
print(f" Total operations: {len(results)}")
|
||||
|
||||
def test_memory_usage_cli(self, performance_config):
|
||||
"""Test memory usage during CLI operations"""
|
||||
@memory_profiler.profile
|
||||
def run_memory_intensive_cli_operations():
|
||||
commands = [
|
||||
["wallet", "--help"],
|
||||
["blockchain", "--help"],
|
||||
["multisig", "--help"],
|
||||
["genesis-protection", "--help"],
|
||||
["transfer-control", "--help"],
|
||||
["compliance", "--help"],
|
||||
["exchange", "--help"],
|
||||
["oracle", "--help"],
|
||||
["market-maker", "--help"]
|
||||
]
|
||||
|
||||
for _ in range(10): # Run commands multiple times
|
||||
for command in commands:
|
||||
subprocess.run(
|
||||
["python", "-m", "aitbc_cli.main"] + command,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
cwd="/home/oib/windsurf/aitbc/cli"
|
||||
)
|
||||
|
||||
# Capture memory before test
|
||||
memory_before = psutil.virtual_memory().percent
|
||||
|
||||
# Run memory-intensive operations
|
||||
run_memory_intensive_cli_operations()
|
||||
|
||||
# Capture memory after test
|
||||
memory_after = psutil.virtual_memory().percent
|
||||
memory_increase = memory_after - memory_before
|
||||
|
||||
# Memory assertion
|
||||
assert memory_increase < 20, f"Memory usage increased too much: {memory_increase:.1f}%"
|
||||
|
||||
print(f"Memory Usage Results:")
|
||||
print(f" Memory before: {memory_before:.1f}%")
|
||||
print(f" Memory after: {memory_after:.1f}%")
|
||||
print(f" Memory increase: {memory_increase:.1f}%")
|
||||
|
||||
def test_load_balancing_performance(self, performance_config):
|
||||
"""Test load balancer performance under load"""
|
||||
def make_load_balancer_request():
|
||||
try:
|
||||
start_time = time.time()
|
||||
response = requests.get(
|
||||
f"{performance_config['base_url']}:{performance_config['ports']['load_balancer']}/health",
|
||||
timeout=5
|
||||
)
|
||||
end_time = time.time()
|
||||
|
||||
return {
|
||||
"success": response.status_code == 200,
|
||||
"response_time": (end_time - start_time) * 1000,
|
||||
"status_code": response.status_code
|
||||
}
|
||||
except Exception as e:
|
||||
return {
|
||||
"success": False,
|
||||
"response_time": 5000, # Timeout
|
||||
"error": str(e)
|
||||
}
|
||||
|
||||
# Test with concurrent requests
|
||||
with ThreadPoolExecutor(max_workers=20) as executor:
|
||||
futures = [executor.submit(make_load_balancer_request) for _ in range(100)]
|
||||
results = [future.result() for future in as_completed(futures)]
|
||||
|
||||
# Analyze results
|
||||
successful_requests = [r for r in results if r["success"]]
|
||||
response_times = [r["response_time"] for r in successful_requests]
|
||||
|
||||
if response_times:
|
||||
success_rate = len(successful_requests) / len(results)
|
||||
avg_response_time = statistics.mean(response_times)
|
||||
p95_response_time = statistics.quantiles(response_times, n=20)[18] if len(response_times) > 20 else max(response_times)
|
||||
throughput = len(successful_requests) / 10 # requests per second
|
||||
|
||||
# Performance assertions
|
||||
assert success_rate >= 0.90, f"Low success rate: {success_rate:.2%}"
|
||||
assert avg_response_time < 1000, f"Average response time too high: {avg_response_time:.2f}ms"
|
||||
assert throughput >= 10, f"Throughput too low: {throughput:.2f} req/s"
|
||||
|
||||
print(f"Load Balancer Performance Results:")
|
||||
print(f" Success rate: {success_rate:.2%}")
|
||||
print(f" Average response time: {avg_response_time:.2f}ms")
|
||||
print(f" 95th percentile: {p95_response_time:.2f}ms")
|
||||
print(f" Throughput: {throughput:.2f} req/s")
|
||||
|
||||
def test_global_infrastructure_performance(self, performance_config):
|
||||
"""Test global infrastructure performance"""
|
||||
def test_service_performance(service_name, port):
|
||||
try:
|
||||
start_time = time.time()
|
||||
response = requests.get(f"{performance_config['base_url']}:{port}/health", timeout=5)
|
||||
end_time = time.time()
|
||||
|
||||
return {
|
||||
"service": service_name,
|
||||
"success": response.status_code == 200,
|
||||
"response_time": (end_time - start_time) * 1000,
|
||||
"status_code": response.status_code
|
||||
}
|
||||
except Exception as e:
|
||||
return {
|
||||
"service": service_name,
|
||||
"success": False,
|
||||
"response_time": 5000,
|
||||
"error": str(e)
|
||||
}
|
||||
|
||||
# Test all global services
|
||||
global_services = {
|
||||
"global_infrastructure": performance_config["ports"]["global_infrastructure"],
|
||||
"ai_agents": performance_config["ports"]["ai_agents"],
|
||||
"load_balancer": performance_config["ports"]["load_balancer"]
|
||||
}
|
||||
|
||||
with ThreadPoolExecutor(max_workers=5) as executor:
|
||||
futures = [
|
||||
executor.submit(test_service_performance, service_name, port)
|
||||
for service_name, port in global_services.items()
|
||||
]
|
||||
results = [future.result() for future in as_completed(futures)]
|
||||
|
||||
# Analyze results
|
||||
successful_services = [r for r in results if r["success"]]
|
||||
response_times = [r["response_time"] for r in successful_services]
|
||||
|
||||
if response_times:
|
||||
avg_response_time = statistics.mean(response_times)
|
||||
max_response_time = max(response_times)
|
||||
|
||||
# Performance assertions
|
||||
assert len(successful_services) >= 2, f"Too few successful services: {len(successful_services)}"
|
||||
assert avg_response_time < 2000, f"Average response time too high: {avg_response_time:.2f}ms"
|
||||
assert max_response_time < 5000, f"Maximum response time too high: {max_response_time:.2f}ms"
|
||||
|
||||
print(f"Global Infrastructure Performance Results:")
|
||||
print(f" Successful services: {len(successful_services)}/{len(results)}")
|
||||
print(f" Average response time: {avg_response_time:.2f}ms")
|
||||
print(f" Maximum response time: {max_response_time:.2f}ms")
|
||||
|
||||
def test_ai_agent_communication_performance(self, performance_config):
|
||||
"""Test AI agent communication performance"""
|
||||
def test_agent_communication():
|
||||
try:
|
||||
start_time = time.time()
|
||||
response = requests.get(
|
||||
f"{performance_config['base_url']}:{performance_config['ports']['ai_agents']}/api/v1/network/dashboard",
|
||||
timeout=5
|
||||
)
|
||||
end_time = time.time()
|
||||
|
||||
return {
|
||||
"success": response.status_code == 200,
|
||||
"response_time": (end_time - start_time) * 1000,
|
||||
"data_size": len(response.content)
|
||||
}
|
||||
except Exception as e:
|
||||
return {
|
||||
"success": False,
|
||||
"response_time": 5000,
|
||||
"error": str(e)
|
||||
}
|
||||
|
||||
# Test concurrent agent communications
|
||||
with ThreadPoolExecutor(max_workers=10) as executor:
|
||||
futures = [executor.submit(test_agent_communication) for _ in range(50)]
|
||||
results = [future.result() for future in as_completed(futures)]
|
||||
|
||||
# Analyze results
|
||||
successful_requests = [r for r in results if r["success"]]
|
||||
response_times = [r["response_time"] for r in successful_requests]
|
||||
|
||||
if response_times:
|
||||
success_rate = len(successful_requests) / len(results)
|
||||
avg_response_time = statistics.mean(response_times)
|
||||
p95_response_time = statistics.quantiles(response_times, n=20)[18] if len(response_times) > 20 else max(response_times)
|
||||
|
||||
# Performance assertions
|
||||
assert success_rate >= 0.80, f"Low success rate: {success_rate:.2%}"
|
||||
assert avg_response_time < 3000, f"Average response time too high: {avg_response_time:.2f}ms"
|
||||
assert p95_response_time < 8000, f"95th percentile response time too high: {p95_response_time:.2f}ms"
|
||||
|
||||
print(f"AI Agent Communication Performance Results:")
|
||||
print(f" Success rate: {success_rate:.2%}")
|
||||
print(f" Average response time: {avg_response_time:.2f}ms")
|
||||
print(f" 95th percentile: {p95_response_time:.2f}ms")
|
||||
print(f" Total requests: {len(results)}")
|
||||
|
||||
def test_plugin_ecosystem_performance(self, performance_config):
|
||||
"""Test plugin ecosystem performance"""
|
||||
plugin_services = {
|
||||
"plugin_registry": performance_config["ports"]["plugin_registry"],
|
||||
"plugin_marketplace": performance_config["ports"]["plugin_marketplace"],
|
||||
"plugin_analytics": performance_config["ports"]["plugin_analytics"]
|
||||
}
|
||||
|
||||
def test_plugin_service(service_name, port):
|
||||
try:
|
||||
start_time = time.time()
|
||||
response = requests.get(f"{performance_config['base_url']}:{port}/health", timeout=5)
|
||||
end_time = time.time()
|
||||
|
||||
return {
|
||||
"service": service_name,
|
||||
"success": response.status_code == 200,
|
||||
"response_time": (end_time - start_time) * 1000
|
||||
}
|
||||
except Exception as e:
|
||||
return {
|
||||
"service": service_name,
|
||||
"success": False,
|
||||
"response_time": 5000,
|
||||
"error": str(e)
|
||||
}
|
||||
|
||||
with ThreadPoolExecutor(max_workers=3) as executor:
|
||||
futures = [
|
||||
executor.submit(test_plugin_service, service_name, port)
|
||||
for service_name, port in plugin_services.items()
|
||||
]
|
||||
results = [future.result() for future in as_completed(futures)]
|
||||
|
||||
# Analyze results
|
||||
successful_services = [r for r in results if r["success"]]
|
||||
response_times = [r["response_time"] for r in successful_services]
|
||||
|
||||
if response_times:
|
||||
avg_response_time = statistics.mean(response_times)
|
||||
|
||||
# Performance assertions
|
||||
assert len(successful_services) >= 1, f"No plugin services responding"
|
||||
assert avg_response_time < 2000, f"Average response time too high: {avg_response_time:.2f}ms"
|
||||
|
||||
print(f"Plugin Ecosystem Performance Results:")
|
||||
print(f" Successful services: {len(successful_services)}/{len(results)}")
|
||||
print(f" Average response time: {avg_response_time:.2f}ms")
|
||||
|
||||
def test_system_resource_usage(self, performance_config, baseline_metrics):
|
||||
"""Test system resource usage during operations"""
|
||||
# Monitor system resources during intensive operations
|
||||
resource_samples = []
|
||||
|
||||
def monitor_resources():
|
||||
for _ in range(30): # Monitor for 30 seconds
|
||||
cpu_percent = psutil.cpu_percent(interval=1)
|
||||
memory_percent = psutil.virtual_memory().percent
|
||||
|
||||
resource_samples.append({
|
||||
"timestamp": datetime.utcnow().isoformat(),
|
||||
"cpu_percent": cpu_percent,
|
||||
"memory_percent": memory_percent
|
||||
})
|
||||
|
||||
def run_intensive_operations():
|
||||
# Run intensive CLI operations
|
||||
commands = [
|
||||
["wallet", "--help"],
|
||||
["blockchain", "--help"],
|
||||
["multisig", "--help"],
|
||||
["compliance", "--help"]
|
||||
]
|
||||
|
||||
for _ in range(20):
|
||||
for command in commands:
|
||||
subprocess.run(
|
||||
["python", "-m", "aitbc_cli.main"] + command,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
cwd="/home/oib/windsurf/aitbc/cli"
|
||||
)
|
||||
|
||||
# Run monitoring and operations concurrently
|
||||
monitor_thread = threading.Thread(target=monitor_resources)
|
||||
operation_thread = threading.Thread(target=run_intensive_operations)
|
||||
|
||||
monitor_thread.start()
|
||||
operation_thread.start()
|
||||
|
||||
monitor_thread.join()
|
||||
operation_thread.join()
|
||||
|
||||
# Analyze resource usage
|
||||
cpu_values = [sample["cpu_percent"] for sample in resource_samples]
|
||||
memory_values = [sample["memory_percent"] for sample in resource_samples]
|
||||
|
||||
avg_cpu = statistics.mean(cpu_values)
|
||||
max_cpu = max(cpu_values)
|
||||
avg_memory = statistics.mean(memory_values)
|
||||
max_memory = max(memory_values)
|
||||
|
||||
# Resource assertions
|
||||
assert avg_cpu < 70, f"Average CPU usage too high: {avg_cpu:.1f}%"
|
||||
assert max_cpu < 90, f"Maximum CPU usage too high: {max_cpu:.1f}%"
|
||||
assert avg_memory < 80, f"Average memory usage too high: {avg_memory:.1f}%"
|
||||
assert max_memory < 95, f"Maximum memory usage too high: {max_memory:.1f}%"
|
||||
|
||||
print(f"System Resource Usage Results:")
|
||||
print(f" Average CPU: {avg_cpu:.1f}% (max: {max_cpu:.1f}%)")
|
||||
print(f" Average Memory: {avg_memory:.1f}% (max: {max_memory:.1f}%)")
|
||||
print(f" Baseline CPU: {baseline_metrics['cpu_percent']:.1f}%")
|
||||
print(f" Baseline Memory: {baseline_metrics['memory_percent']:.1f}%")
|
||||
|
||||
def test_stress_test_cli(self, performance_config):
|
||||
"""Stress test CLI with high load"""
|
||||
def stress_cli_worker(worker_id):
|
||||
results = []
|
||||
commands = [
|
||||
["wallet", "--help"],
|
||||
["blockchain", "--help"],
|
||||
["multisig", "--help"],
|
||||
["compliance", "--help"]
|
||||
]
|
||||
|
||||
for i in range(50): # 50 operations per worker
|
||||
command = commands[i % len(commands)]
|
||||
start_time = time.time()
|
||||
|
||||
result = subprocess.run(
|
||||
["python", "-m", "aitbc_cli.main"] + command,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
cwd="/home/oib/windsurf/aitbc/cli"
|
||||
)
|
||||
|
||||
end_time = time.time()
|
||||
|
||||
results.append({
|
||||
"worker_id": worker_id,
|
||||
"operation_id": i,
|
||||
"success": result.returncode == 0,
|
||||
"response_time": (end_time - start_time) * 1000
|
||||
})
|
||||
|
||||
return results
|
||||
|
||||
# Run stress test with multiple workers
|
||||
with ThreadPoolExecutor(max_workers=5) as executor:
|
||||
futures = [executor.submit(stress_cli_worker, i) for i in range(5)]
|
||||
all_results = []
|
||||
|
||||
for future in as_completed(futures):
|
||||
worker_results = future.result()
|
||||
all_results.extend(worker_results)
|
||||
|
||||
# Analyze stress test results
|
||||
successful_operations = [r for r in all_results if r["success"]]
|
||||
response_times = [r["response_time"] for r in successful_operations]
|
||||
|
||||
success_rate = len(successful_operations) / len(all_results)
|
||||
avg_response_time = statistics.mean(response_times) if response_times else 0
|
||||
p95_response_time = statistics.quantiles(response_times, n=20)[18] if len(response_times) > 20 else max(response_times) if response_times else 0
|
||||
total_throughput = len(successful_operations) / 30 # operations per second
|
||||
|
||||
# Stress test assertions (more lenient thresholds)
|
||||
assert success_rate >= 0.90, f"Low success rate under stress: {success_rate:.2%}"
|
||||
assert avg_response_time < 5000, f"Average response time too high under stress: {avg_response_time:.2f}ms"
|
||||
assert total_throughput >= 5, f"Throughput too low under stress: {total_throughput:.2f} ops/s"
|
||||
|
||||
print(f"CLI Stress Test Results:")
|
||||
print(f" Total operations: {len(all_results)}")
|
||||
print(f" Success rate: {success_rate:.2%}")
|
||||
print(f" Average response time: {avg_response_time:.2f}ms")
|
||||
print(f" 95th percentile: {p95_response_time:.2f}ms")
|
||||
print(f" Throughput: {total_throughput:.2f} ops/s")
|
||||
|
||||
class TestLoadTesting:
|
||||
"""Load testing for high-volume scenarios"""
|
||||
|
||||
def test_load_test_blockchain_operations(self, performance_config):
|
||||
"""Load test blockchain operations"""
|
||||
# This would test blockchain operations under high load
|
||||
# Implementation depends on blockchain service availability
|
||||
pass
|
||||
|
||||
def test_load_test_trading_operations(self, performance_config):
|
||||
"""Load test trading operations"""
|
||||
# This would test trading operations under high load
|
||||
# Implementation depends on trading service availability
|
||||
pass
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Run performance tests
|
||||
pytest.main([__file__, "-v", "--tb=short"])
|
||||
0
tests/performance/test_performance_benchmarks.py
Normal file → Executable file
0
tests/performance/test_performance_benchmarks.py
Normal file → Executable file
505
tests/performance/test_performance_lightweight.py
Normal file
505
tests/performance/test_performance_lightweight.py
Normal file
@@ -0,0 +1,505 @@
|
||||
"""
|
||||
Performance Tests for AITBC Chain Management and Analytics
|
||||
Tests system performance under various load conditions (lightweight version)
|
||||
"""
|
||||
|
||||
import pytest
|
||||
import asyncio
|
||||
import json
|
||||
import time
|
||||
import threading
|
||||
import statistics
|
||||
from datetime import datetime, timedelta
|
||||
from pathlib import Path
|
||||
import subprocess
|
||||
import requests
|
||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||
from typing import Dict, Any, List, Tuple
|
||||
import os
|
||||
import resource
|
||||
|
||||
class TestPerformance:
|
||||
"""Performance testing suite for AITBC components"""
|
||||
|
||||
@pytest.fixture(scope="class")
|
||||
def performance_config(self):
|
||||
"""Performance test configuration"""
|
||||
return {
|
||||
"base_url": "http://localhost",
|
||||
"ports": {
|
||||
"coordinator": 8001,
|
||||
"blockchain": 8007,
|
||||
"consensus": 8002,
|
||||
"network": 8008,
|
||||
"explorer": 8016,
|
||||
"wallet_daemon": 8003,
|
||||
"exchange": 8010,
|
||||
"oracle": 8011,
|
||||
"trading": 8012,
|
||||
"compliance": 8015,
|
||||
"plugin_registry": 8013,
|
||||
"plugin_marketplace": 8014,
|
||||
"global_infrastructure": 8017,
|
||||
"ai_agents": 8018,
|
||||
"load_balancer": 8019
|
||||
},
|
||||
"performance_thresholds": {
|
||||
"response_time_p95": 2000, # 95th percentile < 2 seconds
|
||||
"response_time_p99": 5000, # 99th percentile < 5 seconds
|
||||
"error_rate": 0.01, # < 1% error rate
|
||||
"throughput_min": 50, # Minimum 50 requests/second
|
||||
"cli_response_max": 5000 # CLI max response time < 5 seconds
|
||||
}
|
||||
}
|
||||
|
||||
def get_memory_usage(self):
|
||||
"""Get current memory usage (lightweight version)"""
|
||||
try:
|
||||
# Using resource module for memory usage
|
||||
usage = resource.getrusage(resource.RUSAGE_SELF)
|
||||
return usage.ru_maxrss / 1024 # Convert to MB (on Linux)
|
||||
except:
|
||||
return 0
|
||||
|
||||
def get_cpu_usage(self):
|
||||
"""Get CPU usage (lightweight version)"""
|
||||
try:
|
||||
# Simple CPU usage calculation
|
||||
start_time = time.time()
|
||||
while time.time() - start_time < 0.1: # Sample for 0.1 seconds
|
||||
pass
|
||||
return 0 # Simplified - would need more complex implementation for accurate CPU
|
||||
except:
|
||||
return 0
|
||||
|
||||
def test_cli_performance(self, performance_config):
|
||||
"""Test CLI command performance"""
|
||||
cli_commands = [
|
||||
["--help"],
|
||||
["wallet", "--help"],
|
||||
["blockchain", "--help"],
|
||||
["multisig", "--help"],
|
||||
["genesis-protection", "--help"],
|
||||
["transfer-control", "--help"],
|
||||
["compliance", "--help"],
|
||||
["exchange", "--help"],
|
||||
["oracle", "--help"],
|
||||
["market-maker", "--help"]
|
||||
]
|
||||
|
||||
response_times = []
|
||||
memory_usage_before = self.get_memory_usage()
|
||||
|
||||
for command in cli_commands:
|
||||
start_time = time.time()
|
||||
|
||||
result = subprocess.run(
|
||||
["python", "-m", "aitbc_cli.main"] + command,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
cwd="/home/oib/windsurf/aitbc/cli"
|
||||
)
|
||||
|
||||
end_time = time.time()
|
||||
response_time = (end_time - start_time) * 1000 # Convert to milliseconds
|
||||
|
||||
assert result.returncode == 0, f"CLI command failed: {' '.join(command)}"
|
||||
assert response_time < performance_config["performance_thresholds"]["cli_response_max"], \
|
||||
f"CLI command too slow: {response_time:.2f}ms"
|
||||
|
||||
response_times.append(response_time)
|
||||
|
||||
memory_usage_after = self.get_memory_usage()
|
||||
memory_increase = memory_usage_after - memory_usage_before
|
||||
|
||||
# Calculate performance statistics
|
||||
avg_response_time = statistics.mean(response_times)
|
||||
p95_response_time = statistics.quantiles(response_times, n=20)[18] if len(response_times) > 20 else max(response_times)
|
||||
max_response_time = max(response_times)
|
||||
|
||||
# Performance assertions
|
||||
assert avg_response_time < 1000, f"Average CLI response time too high: {avg_response_time:.2f}ms"
|
||||
assert p95_response_time < 3000, f"95th percentile CLI response time too high: {p95_response_time:.2f}ms"
|
||||
assert max_response_time < 10000, f"Maximum CLI response time too high: {max_response_time:.2f}ms"
|
||||
assert memory_increase < 100, f"Memory usage increased too much: {memory_increase:.1f}MB"
|
||||
|
||||
print(f"CLI Performance Results:")
|
||||
print(f" Average: {avg_response_time:.2f}ms")
|
||||
print(f" 95th percentile: {p95_response_time:.2f}ms")
|
||||
print(f" Maximum: {max_response_time:.2f}ms")
|
||||
print(f" Memory increase: {memory_increase:.1f}MB")
|
||||
|
||||
def test_concurrent_cli_operations(self, performance_config):
|
||||
"""Test concurrent CLI operations"""
|
||||
def run_cli_command(command):
|
||||
start_time = time.time()
|
||||
result = subprocess.run(
|
||||
["python", "-m", "aitbc_cli.main"] + command,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
cwd="/home/oib/windsurf/aitbc/cli"
|
||||
)
|
||||
end_time = time.time()
|
||||
return {
|
||||
"command": command,
|
||||
"success": result.returncode == 0,
|
||||
"response_time": (end_time - start_time) * 1000,
|
||||
"output_length": len(result.stdout)
|
||||
}
|
||||
|
||||
# Test concurrent operations
|
||||
commands_to_test = [
|
||||
["wallet", "--help"],
|
||||
["blockchain", "--help"],
|
||||
["multisig", "--help"],
|
||||
["compliance", "--help"],
|
||||
["exchange", "--help"]
|
||||
]
|
||||
|
||||
with ThreadPoolExecutor(max_workers=10) as executor:
|
||||
# Submit multiple concurrent requests
|
||||
futures = []
|
||||
for _ in range(20): # 20 concurrent operations
|
||||
for command in commands_to_test:
|
||||
future = executor.submit(run_cli_command, command)
|
||||
futures.append(future)
|
||||
|
||||
# Collect results
|
||||
results = []
|
||||
for future in as_completed(futures):
|
||||
result = future.result()
|
||||
results.append(result)
|
||||
|
||||
# Analyze results
|
||||
successful_operations = [r for r in results if r["success"]]
|
||||
response_times = [r["response_time"] for r in successful_operations]
|
||||
|
||||
success_rate = len(successful_operations) / len(results)
|
||||
avg_response_time = statistics.mean(response_times) if response_times else 0
|
||||
p95_response_time = statistics.quantiles(response_times, n=20)[18] if len(response_times) > 20 else max(response_times) if response_times else 0
|
||||
|
||||
# Performance assertions
|
||||
assert success_rate >= 0.95, f"Low success rate: {success_rate:.2%}"
|
||||
assert avg_response_time < 2000, f"Average response time too high: {avg_response_time:.2f}ms"
|
||||
assert p95_response_time < 5000, f"95th percentile response time too high: {p95_response_time:.2f}ms"
|
||||
|
||||
print(f"Concurrent CLI Operations Results:")
|
||||
print(f" Success rate: {success_rate:.2%}")
|
||||
print(f" Average response time: {avg_response_time:.2f}ms")
|
||||
print(f" 95th percentile: {p95_response_time:.2f}ms")
|
||||
print(f" Total operations: {len(results)}")
|
||||
|
||||
def test_cli_memory_efficiency(self, performance_config):
|
||||
"""Test CLI memory efficiency"""
|
||||
memory_samples = []
|
||||
|
||||
def monitor_memory():
|
||||
for _ in range(10):
|
||||
memory_usage = self.get_memory_usage()
|
||||
memory_samples.append(memory_usage)
|
||||
time.sleep(0.5)
|
||||
|
||||
def run_cli_operations():
|
||||
commands = [
|
||||
["wallet", "--help"],
|
||||
["blockchain", "--help"],
|
||||
["multisig", "--help"],
|
||||
["genesis-protection", "--help"],
|
||||
["transfer-control", "--help"],
|
||||
["compliance", "--help"],
|
||||
["exchange", "--help"],
|
||||
["oracle", "--help"],
|
||||
["market-maker", "--help"]
|
||||
]
|
||||
|
||||
for _ in range(5): # Run commands multiple times
|
||||
for command in commands:
|
||||
subprocess.run(
|
||||
["python", "-m", "aitbc_cli.main"] + command,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
cwd="/home/oib/windsurf/aitbc/cli"
|
||||
)
|
||||
|
||||
# Monitor memory during operations
|
||||
monitor_thread = threading.Thread(target=monitor_memory)
|
||||
operation_thread = threading.Thread(target=run_cli_operations)
|
||||
|
||||
monitor_thread.start()
|
||||
operation_thread.start()
|
||||
|
||||
monitor_thread.join()
|
||||
operation_thread.join()
|
||||
|
||||
# Analyze memory usage
|
||||
if memory_samples:
|
||||
avg_memory = statistics.mean(memory_samples)
|
||||
max_memory = max(memory_samples)
|
||||
memory_variance = statistics.variance(memory_samples) if len(memory_samples) > 1 else 0
|
||||
|
||||
# Memory efficiency assertions
|
||||
assert max_memory - min(memory_samples) < 50, f"Memory usage variance too high: {max_memory - min(memory_samples):.1f}MB"
|
||||
assert avg_memory < 200, f"Average memory usage too high: {avg_memory:.1f}MB"
|
||||
|
||||
print(f"CLI Memory Efficiency Results:")
|
||||
print(f" Average memory: {avg_memory:.1f}MB")
|
||||
print(f" Maximum memory: {max_memory:.1f}MB")
|
||||
print(f" Memory variance: {memory_variance:.1f}")
|
||||
|
||||
def test_cli_throughput(self, performance_config):
|
||||
"""Test CLI command throughput"""
|
||||
def measure_throughput():
|
||||
commands = [
|
||||
["wallet", "--help"],
|
||||
["blockchain", "--help"],
|
||||
["multisig", "--help"]
|
||||
]
|
||||
|
||||
start_time = time.time()
|
||||
successful_operations = 0
|
||||
|
||||
for i in range(100): # 100 operations
|
||||
command = commands[i % len(commands)]
|
||||
result = subprocess.run(
|
||||
["python", "-m", "aitbc_cli.main"] + command,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
cwd="/home/oib/windsurf/aitbc/cli"
|
||||
)
|
||||
|
||||
if result.returncode == 0:
|
||||
successful_operations += 1
|
||||
|
||||
end_time = time.time()
|
||||
duration = end_time - start_time
|
||||
throughput = successful_operations / duration # operations per second
|
||||
|
||||
return {
|
||||
"total_operations": 100,
|
||||
"successful_operations": successful_operations,
|
||||
"duration": duration,
|
||||
"throughput": throughput
|
||||
}
|
||||
|
||||
# Run throughput test
|
||||
result = measure_throughput()
|
||||
|
||||
# Throughput assertions
|
||||
assert result["successful_operations"] >= 95, f"Too many failed operations: {result['successful_operations']}/100"
|
||||
assert result["throughput"] >= 10, f"Throughput too low: {result['throughput']:.2f} ops/s"
|
||||
assert result["duration"] < 30, f"Test took too long: {result['duration']:.2f}s"
|
||||
|
||||
print(f"CLI Throughput Results:")
|
||||
print(f" Successful operations: {result['successful_operations']}/100")
|
||||
print(f" Duration: {result['duration']:.2f}s")
|
||||
print(f" Throughput: {result['throughput']:.2f} ops/s")
|
||||
|
||||
def test_cli_response_time_distribution(self, performance_config):
|
||||
"""Test CLI response time distribution"""
|
||||
commands = [
|
||||
["--help"],
|
||||
["wallet", "--help"],
|
||||
["blockchain", "--help"],
|
||||
["multisig", "--help"],
|
||||
["genesis-protection", "--help"],
|
||||
["transfer-control", "--help"],
|
||||
["compliance", "--help"],
|
||||
["exchange", "--help"],
|
||||
["oracle", "--help"],
|
||||
["market-maker", "--help"]
|
||||
]
|
||||
|
||||
response_times = []
|
||||
|
||||
# Run each command multiple times
|
||||
for command in commands:
|
||||
for _ in range(10): # 10 times per command
|
||||
start_time = time.time()
|
||||
|
||||
result = subprocess.run(
|
||||
["python", "-m", "aitbc_cli.main"] + command,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
cwd="/home/oib/windsurf/aitbc/cli"
|
||||
)
|
||||
|
||||
end_time = time.time()
|
||||
response_time = (end_time - start_time) * 1000
|
||||
|
||||
assert result.returncode == 0, f"CLI command failed: {' '.join(command)}"
|
||||
response_times.append(response_time)
|
||||
|
||||
# Calculate distribution statistics
|
||||
min_time = min(response_times)
|
||||
max_time = max(response_times)
|
||||
mean_time = statistics.mean(response_times)
|
||||
median_time = statistics.median(response_times)
|
||||
std_dev = statistics.stdev(response_times)
|
||||
|
||||
# Percentiles
|
||||
sorted_times = sorted(response_times)
|
||||
p50 = sorted_times[len(sorted_times) // 2]
|
||||
p90 = sorted_times[int(len(sorted_times) * 0.9)]
|
||||
p95 = sorted_times[int(len(sorted_times) * 0.95)]
|
||||
p99 = sorted_times[int(len(sorted_times) * 0.99)]
|
||||
|
||||
# Distribution assertions
|
||||
assert mean_time < 1000, f"Mean response time too high: {mean_time:.2f}ms"
|
||||
assert p95 < 3000, f"95th percentile too high: {p95:.2f}ms"
|
||||
assert p99 < 5000, f"99th percentile too high: {p99:.2f}ms"
|
||||
assert std_dev < mean_time, f"Standard deviation too high: {std_dev:.2f}ms"
|
||||
|
||||
print(f"CLI Response Time Distribution:")
|
||||
print(f" Min: {min_time:.2f}ms")
|
||||
print(f" Max: {max_time:.2f}ms")
|
||||
print(f" Mean: {mean_time:.2f}ms")
|
||||
print(f" Median: {median_time:.2f}ms")
|
||||
print(f" Std Dev: {std_dev:.2f}ms")
|
||||
print(f" 50th percentile: {p50:.2f}ms")
|
||||
print(f" 90th percentile: {p90:.2f}ms")
|
||||
print(f" 95th percentile: {p95:.2f}ms")
|
||||
print(f" 99th percentile: {p99:.2f}ms")
|
||||
|
||||
def test_cli_scalability(self, performance_config):
|
||||
"""Test CLI scalability with increasing load"""
|
||||
def test_load_level(num_concurrent, operations_per_thread):
|
||||
def worker():
|
||||
commands = [["--help"], ["wallet", "--help"], ["blockchain", "--help"]]
|
||||
results = []
|
||||
|
||||
for i in range(operations_per_thread):
|
||||
command = commands[i % len(commands)]
|
||||
start_time = time.time()
|
||||
|
||||
result = subprocess.run(
|
||||
["python", "-m", "aitbc_cli.main"] + command,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
cwd="/home/oib/windsurf/aitbc/cli"
|
||||
)
|
||||
|
||||
end_time = time.time()
|
||||
results.append({
|
||||
"success": result.returncode == 0,
|
||||
"response_time": (end_time - start_time) * 1000
|
||||
})
|
||||
|
||||
return results
|
||||
|
||||
with ThreadPoolExecutor(max_workers=num_concurrent) as executor:
|
||||
futures = [executor.submit(worker) for _ in range(num_concurrent)]
|
||||
all_results = []
|
||||
|
||||
for future in as_completed(futures):
|
||||
worker_results = future.result()
|
||||
all_results.extend(worker_results)
|
||||
|
||||
# Analyze results
|
||||
successful = [r for r in all_results if r["success"]]
|
||||
response_times = [r["response_time"] for r in successful]
|
||||
|
||||
if response_times:
|
||||
success_rate = len(successful) / len(all_results)
|
||||
avg_response_time = statistics.mean(response_times)
|
||||
|
||||
return {
|
||||
"total_operations": len(all_results),
|
||||
"successful_operations": len(successful),
|
||||
"success_rate": success_rate,
|
||||
"avg_response_time": avg_response_time
|
||||
}
|
||||
|
||||
# Test different load levels
|
||||
load_levels = [
|
||||
(1, 50), # 1 thread, 50 operations
|
||||
(2, 50), # 2 threads, 50 operations each
|
||||
(5, 20), # 5 threads, 20 operations each
|
||||
(10, 10) # 10 threads, 10 operations each
|
||||
]
|
||||
|
||||
results = {}
|
||||
|
||||
for num_threads, ops_per_thread in load_levels:
|
||||
result = test_load_level(num_threads, ops_per_thread)
|
||||
results[f"{num_threads}x{ops_per_thread}"] = result
|
||||
|
||||
# Scalability assertions
|
||||
assert result["success_rate"] >= 0.90, f"Low success rate at {num_threads}x{ops_per_thread}: {result['success_rate']:.2%}"
|
||||
assert result["avg_response_time"] < 3000, f"Response time too high at {num_threads}x{ops_per_thread}: {result['avg_response_time']:.2f}ms"
|
||||
|
||||
print(f"CLI Scalability Results:")
|
||||
for load_level, result in results.items():
|
||||
print(f" {load_level}: {result['success_rate']:.2%} success, {result['avg_response_time']:.2f}ms avg")
|
||||
|
||||
def test_cli_error_handling_performance(self, performance_config):
|
||||
"""Test CLI error handling performance"""
|
||||
# Test invalid commands
|
||||
invalid_commands = [
|
||||
["--invalid-option"],
|
||||
["wallet", "--invalid-subcommand"],
|
||||
["blockchain", "invalid-subcommand"],
|
||||
["nonexistent-command"]
|
||||
]
|
||||
|
||||
response_times = []
|
||||
|
||||
for command in invalid_commands:
|
||||
start_time = time.time()
|
||||
|
||||
result = subprocess.run(
|
||||
["python", "-m", "aitbc_cli.main"] + command,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
cwd="/home/oib/windsurf/aitbc/cli"
|
||||
)
|
||||
|
||||
end_time = time.time()
|
||||
response_time = (end_time - start_time) * 1000
|
||||
|
||||
# Should fail gracefully
|
||||
assert result.returncode != 0, f"Invalid command should fail: {' '.join(command)}"
|
||||
assert response_time < 2000, f"Error handling too slow: {response_time:.2f}ms"
|
||||
|
||||
response_times.append(response_time)
|
||||
|
||||
avg_error_response_time = statistics.mean(response_times)
|
||||
max_error_response_time = max(response_times)
|
||||
|
||||
# Error handling performance assertions
|
||||
assert avg_error_response_time < 1000, f"Average error response time too high: {avg_error_response_time:.2f}ms"
|
||||
assert max_error_response_time < 2000, f"Maximum error response time too high: {max_error_response_time:.2f}ms"
|
||||
|
||||
print(f"CLI Error Handling Performance:")
|
||||
print(f" Average error response time: {avg_error_response_time:.2f}ms")
|
||||
print(f" Maximum error response time: {max_error_response_time:.2f}ms")
|
||||
|
||||
class TestServicePerformance:
|
||||
"""Test service performance (when services are available)"""
|
||||
|
||||
def test_service_health_performance(self, performance_config):
|
||||
"""Test service health endpoint performance"""
|
||||
services_to_test = {
|
||||
"global_infrastructure": performance_config["ports"]["global_infrastructure"],
|
||||
"consensus": performance_config["ports"]["consensus"]
|
||||
}
|
||||
|
||||
for service_name, port in services_to_test.items():
|
||||
try:
|
||||
start_time = time.time()
|
||||
response = requests.get(f"{performance_config['base_url']}:{port}/health", timeout=5)
|
||||
end_time = time.time()
|
||||
|
||||
response_time = (end_time - start_time) * 1000
|
||||
|
||||
if response.status_code == 200:
|
||||
assert response_time < 1000, f"{service_name} health endpoint too slow: {response_time:.2f}ms"
|
||||
print(f"✅ {service_name} health: {response_time:.2f}ms")
|
||||
else:
|
||||
print(f"⚠️ {service_name} health returned {response.status_code}")
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ {service_name} health check failed: {str(e)}")
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Run performance tests
|
||||
pytest.main([__file__, "-v", "--tb=short"])
|
||||
0
tests/performance/test_pricing_performance.py
Normal file → Executable file
0
tests/performance/test_pricing_performance.py
Normal file → Executable file
0
tests/reputation/test_reputation_system.py
Normal file → Executable file
0
tests/reputation/test_reputation_system.py
Normal file → Executable file
0
tests/rewards/test_reward_system.py
Normal file → Executable file
0
tests/rewards/test_reward_system.py
Normal file → Executable file
0
tests/security/test_confidential_transactions.py
Normal file → Executable file
0
tests/security/test_confidential_transactions.py
Normal file → Executable file
681
tests/security/test_security.py
Normal file
681
tests/security/test_security.py
Normal file
@@ -0,0 +1,681 @@
|
||||
"""
|
||||
Security Tests for AITBC Private Chain Access Control and Encryption
|
||||
Tests security features, access controls, and encryption mechanisms
|
||||
"""
|
||||
|
||||
import pytest
|
||||
import json
|
||||
import hashlib
|
||||
import hmac
|
||||
import secrets
|
||||
import time
|
||||
from datetime import datetime, timedelta
|
||||
from pathlib import Path
|
||||
import subprocess
|
||||
import requests
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from typing import Dict, Any, List, Optional
|
||||
import tempfile
|
||||
import os
|
||||
|
||||
class TestSecurity:
|
||||
"""Security testing suite for AITBC components"""
|
||||
|
||||
@pytest.fixture(scope="class")
|
||||
def security_config(self):
|
||||
"""Security test configuration"""
|
||||
return {
|
||||
"test_data_dir": Path("/tmp/aitbc_security_test"),
|
||||
"encryption_key": secrets.token_hex(32),
|
||||
"test_password": "TestSecurePassword123!",
|
||||
"test_wallet_id": "test_security_wallet",
|
||||
"test_chain_id": "ait-security-test",
|
||||
"security_thresholds": {
|
||||
"password_min_length": 8,
|
||||
"encryption_strength": 256,
|
||||
"session_timeout_minutes": 30,
|
||||
"max_login_attempts": 5,
|
||||
"lockout_duration_minutes": 15
|
||||
}
|
||||
}
|
||||
|
||||
def test_password_security(self, security_config):
|
||||
"""Test password security requirements"""
|
||||
# Test password validation
|
||||
weak_passwords = [
|
||||
"123",
|
||||
"password",
|
||||
"abc",
|
||||
"test",
|
||||
"short",
|
||||
"",
|
||||
"12345678",
|
||||
"password123"
|
||||
]
|
||||
|
||||
strong_passwords = [
|
||||
"SecureP@ssw0rd123!",
|
||||
"MyStr0ng#P@ssword",
|
||||
"AitbcSecur3ty@2026",
|
||||
"ComplexP@ssw0rd!#$",
|
||||
"VerySecureP@ssw0rd123"
|
||||
]
|
||||
|
||||
# Test weak passwords should be rejected
|
||||
for password in weak_passwords:
|
||||
is_valid = validate_password_strength(password)
|
||||
assert not is_valid, f"Weak password should be rejected: {password}"
|
||||
|
||||
# Test strong passwords should be accepted
|
||||
for password in strong_passwords:
|
||||
is_valid = validate_password_strength(password)
|
||||
assert is_valid, f"Strong password should be accepted: {password}"
|
||||
|
||||
print("✅ Password security validation working correctly")
|
||||
|
||||
def test_encryption_decryption(self, security_config):
|
||||
"""Test encryption and decryption mechanisms"""
|
||||
test_data = "Sensitive AITBC blockchain data"
|
||||
encryption_key = security_config["encryption_key"]
|
||||
|
||||
# Test encryption
|
||||
encrypted_data = encrypt_data(test_data, encryption_key)
|
||||
assert encrypted_data != test_data, "Encrypted data should be different from original"
|
||||
assert len(encrypted_data) > 0, "Encrypted data should not be empty"
|
||||
|
||||
# Test decryption
|
||||
decrypted_data = decrypt_data(encrypted_data, encryption_key)
|
||||
assert decrypted_data == test_data, "Decrypted data should match original"
|
||||
|
||||
# Test with wrong key
|
||||
wrong_key = secrets.token_hex(32)
|
||||
decrypted_with_wrong_key = decrypt_data(encrypted_data, wrong_key)
|
||||
assert decrypted_with_wrong_key != test_data, "Decryption with wrong key should fail"
|
||||
|
||||
print("✅ Encryption/decryption working correctly")
|
||||
|
||||
def test_hashing_security(self, security_config):
|
||||
"""Test cryptographic hashing"""
|
||||
test_data = "AITBC blockchain transaction data"
|
||||
|
||||
# Test SHA-256 hashing
|
||||
hash1 = hashlib.sha256(test_data.encode()).hexdigest()
|
||||
hash2 = hashlib.sha256(test_data.encode()).hexdigest()
|
||||
|
||||
assert hash1 == hash2, "Same data should produce same hash"
|
||||
assert len(hash1) == 64, "SHA-256 hash should be 64 characters"
|
||||
assert all(c in '0123456789abcdef' for c in hash1), "Hash should only contain hex characters"
|
||||
|
||||
# Test different data produces different hash
|
||||
different_data = "Different blockchain data"
|
||||
hash3 = hashlib.sha256(different_data.encode()).hexdigest()
|
||||
assert hash1 != hash3, "Different data should produce different hash"
|
||||
|
||||
# Test HMAC for message authentication
|
||||
secret_key = security_config["encryption_key"]
|
||||
hmac1 = hmac.new(secret_key.encode(), test_data.encode(), hashlib.sha256).hexdigest()
|
||||
hmac2 = hmac.new(secret_key.encode(), test_data.encode(), hashlib.sha256).hexdigest()
|
||||
|
||||
assert hmac1 == hmac2, "HMAC should be consistent"
|
||||
|
||||
# Test HMAC with different key
|
||||
different_key = "different_secret_key"
|
||||
hmac3 = hmac.new(different_key.encode(), test_data.encode(), hashlib.sha256).hexdigest()
|
||||
assert hmac1 != hmac3, "HMAC with different key should be different"
|
||||
|
||||
print("✅ Cryptographic hashing working correctly")
|
||||
|
||||
def test_wallet_security(self, security_config):
|
||||
"""Test wallet security features"""
|
||||
security_config["test_data_dir"].mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Test wallet file permissions
|
||||
wallet_file = security_config["test_data_dir"] / "test_wallet.json"
|
||||
|
||||
# Create test wallet
|
||||
wallet_data = {
|
||||
"wallet_id": security_config["test_wallet_id"],
|
||||
"private_key": secrets.token_hex(32),
|
||||
"public_key": secrets.token_hex(64),
|
||||
"address": f"ait1{secrets.token_hex(40)}",
|
||||
"created_at": datetime.utcnow().isoformat()
|
||||
}
|
||||
|
||||
with open(wallet_file, 'w') as f:
|
||||
json.dump(wallet_data, f)
|
||||
|
||||
# Set restrictive permissions (600 - read/write for owner only)
|
||||
os.chmod(wallet_file, 0o600)
|
||||
|
||||
# Verify permissions
|
||||
file_stat = wallet_file.stat()
|
||||
file_permissions = oct(file_stat.st_mode)[-3:]
|
||||
|
||||
assert file_permissions == "600", f"Wallet file should have 600 permissions, got {file_permissions}"
|
||||
|
||||
# Test wallet encryption
|
||||
encrypted_wallet = encrypt_wallet_data(wallet_data, security_config["test_password"])
|
||||
assert encrypted_wallet != wallet_data, "Encrypted wallet should be different"
|
||||
|
||||
# Test wallet decryption
|
||||
decrypted_wallet = decrypt_wallet_data(encrypted_wallet, security_config["test_password"])
|
||||
assert decrypted_wallet["wallet_id"] == wallet_data["wallet_id"], "Decrypted wallet should match original"
|
||||
|
||||
# Test decryption with wrong password
|
||||
try:
|
||||
decrypt_wallet_data(encrypted_wallet, "wrong_password")
|
||||
assert False, "Decryption with wrong password should fail"
|
||||
except:
|
||||
pass # Expected to fail
|
||||
|
||||
# Cleanup
|
||||
wallet_file.unlink()
|
||||
|
||||
print("✅ Wallet security features working correctly")
|
||||
|
||||
def test_chain_access_control(self, security_config):
|
||||
"""Test chain access control mechanisms"""
|
||||
# Test chain access permissions
|
||||
chain_permissions = {
|
||||
"admin": ["read", "write", "delete", "manage"],
|
||||
"operator": ["read", "write"],
|
||||
"viewer": ["read"],
|
||||
"anonymous": []
|
||||
}
|
||||
|
||||
# Test permission validation
|
||||
def has_permission(user_role, required_permission):
|
||||
return required_permission in chain_permissions.get(user_role, [])
|
||||
|
||||
# Test admin permissions
|
||||
assert has_permission("admin", "read"), "Admin should have read permission"
|
||||
assert has_permission("admin", "write"), "Admin should have write permission"
|
||||
assert has_permission("admin", "delete"), "Admin should have delete permission"
|
||||
assert has_permission("admin", "manage"), "Admin should have manage permission"
|
||||
|
||||
# Test operator permissions
|
||||
assert has_permission("operator", "read"), "Operator should have read permission"
|
||||
assert has_permission("operator", "write"), "Operator should have write permission"
|
||||
assert not has_permission("operator", "delete"), "Operator should not have delete permission"
|
||||
assert not has_permission("operator", "manage"), "Operator should not have manage permission"
|
||||
|
||||
# Test viewer permissions
|
||||
assert has_permission("viewer", "read"), "Viewer should have read permission"
|
||||
assert not has_permission("viewer", "write"), "Viewer should not have write permission"
|
||||
assert not has_permission("viewer", "delete"), "Viewer should not have delete permission"
|
||||
|
||||
# Test anonymous permissions
|
||||
assert not has_permission("anonymous", "read"), "Anonymous should not have read permission"
|
||||
assert not has_permission("anonymous", "write"), "Anonymous should not have write permission"
|
||||
|
||||
# Test invalid role
|
||||
assert not has_permission("invalid_role", "read"), "Invalid role should have no permissions"
|
||||
|
||||
print("✅ Chain access control working correctly")
|
||||
|
||||
def test_transaction_security(self, security_config):
|
||||
"""Test transaction security features"""
|
||||
# Test transaction signing
|
||||
transaction_data = {
|
||||
"from": f"ait1{secrets.token_hex(40)}",
|
||||
"to": f"ait1{secrets.token_hex(40)}",
|
||||
"amount": "1000",
|
||||
"nonce": secrets.token_hex(16),
|
||||
"timestamp": int(time.time())
|
||||
}
|
||||
|
||||
private_key = secrets.token_hex(32)
|
||||
|
||||
# Sign transaction
|
||||
signature = sign_transaction(transaction_data, private_key)
|
||||
assert signature != transaction_data, "Signature should be different from transaction data"
|
||||
assert len(signature) > 0, "Signature should not be empty"
|
||||
|
||||
# Verify signature
|
||||
is_valid = verify_transaction_signature(transaction_data, signature, private_key)
|
||||
assert is_valid, "Signature verification should pass"
|
||||
|
||||
# Test with tampered data
|
||||
tampered_data = transaction_data.copy()
|
||||
tampered_data["amount"] = "2000"
|
||||
|
||||
is_valid_tampered = verify_transaction_signature(tampered_data, signature, private_key)
|
||||
assert not is_valid_tampered, "Signature verification should fail for tampered data"
|
||||
|
||||
# Test with wrong key
|
||||
wrong_key = secrets.token_hex(32)
|
||||
is_valid_wrong_key = verify_transaction_signature(transaction_data, signature, wrong_key)
|
||||
assert not is_valid_wrong_key, "Signature verification should fail with wrong key"
|
||||
|
||||
print("✅ Transaction security working correctly")
|
||||
|
||||
def test_session_security(self, security_config):
|
||||
"""Test session management security"""
|
||||
# Test session token generation
|
||||
user_id = "test_user_123"
|
||||
session_token = generate_session_token(user_id)
|
||||
|
||||
assert len(session_token) > 20, "Session token should be sufficiently long"
|
||||
assert session_token != user_id, "Session token should be different from user ID"
|
||||
|
||||
# Test session validation
|
||||
is_valid = validate_session_token(session_token, user_id)
|
||||
assert is_valid, "Valid session token should pass validation"
|
||||
|
||||
# Test session with wrong user
|
||||
is_valid_wrong_user = validate_session_token(session_token, "wrong_user")
|
||||
assert not is_valid_wrong_user, "Session token should fail for wrong user"
|
||||
|
||||
# Test expired session
|
||||
expired_token = generate_expired_session_token(user_id)
|
||||
is_valid_expired = validate_session_token(expired_token, user_id)
|
||||
assert not is_valid_expired, "Expired session token should fail validation"
|
||||
|
||||
# Test session timeout
|
||||
session_timeout = security_config["security_thresholds"]["session_timeout_minutes"]
|
||||
assert session_timeout == 30, "Session timeout should be 30 minutes"
|
||||
|
||||
print("✅ Session security working correctly")
|
||||
|
||||
def test_api_security(self, security_config):
|
||||
"""Test API security features"""
|
||||
# Test API key generation
|
||||
api_key = generate_api_key()
|
||||
|
||||
assert len(api_key) >= 32, "API key should be at least 32 characters"
|
||||
assert api_key.isalnum(), "API key should be alphanumeric"
|
||||
|
||||
# Test API key validation
|
||||
is_valid = validate_api_key(api_key)
|
||||
assert is_valid, "Valid API key should pass validation"
|
||||
|
||||
# Test invalid API key
|
||||
invalid_keys = [
|
||||
"short",
|
||||
"invalid@key",
|
||||
"key with spaces",
|
||||
"key-with-special-chars!",
|
||||
""
|
||||
]
|
||||
|
||||
for invalid_key in invalid_keys:
|
||||
is_invalid = validate_api_key(invalid_key)
|
||||
assert not is_invalid, f"Invalid API key should fail validation: {invalid_key}"
|
||||
|
||||
# Test rate limiting (simulation)
|
||||
rate_limiter = RateLimiter(max_requests=5, window_seconds=60)
|
||||
|
||||
# Should allow requests within limit
|
||||
for i in range(5):
|
||||
assert rate_limiter.is_allowed(), f"Request {i+1} should be allowed"
|
||||
|
||||
# Should block request beyond limit
|
||||
assert not rate_limiter.is_allowed(), "Request beyond limit should be blocked"
|
||||
|
||||
print("✅ API security working correctly")
|
||||
|
||||
def test_data_protection(self, security_config):
|
||||
"""Test data protection and privacy"""
|
||||
sensitive_data = {
|
||||
"user_id": "user_123",
|
||||
"private_key": secrets.token_hex(32),
|
||||
"email": "user@example.com",
|
||||
"phone": "+1234567890",
|
||||
"address": "123 Blockchain Street"
|
||||
}
|
||||
|
||||
# Test data masking
|
||||
masked_data = mask_sensitive_data(sensitive_data)
|
||||
|
||||
assert "private_key" not in masked_data, "Private key should be masked"
|
||||
assert "email" in masked_data, "Email should remain unmasked"
|
||||
assert masked_data["email"] != sensitive_data["email"], "Email should be partially masked"
|
||||
|
||||
# Test data anonymization
|
||||
anonymized_data = anonymize_data(sensitive_data)
|
||||
|
||||
assert "user_id" not in anonymized_data, "User ID should be anonymized"
|
||||
assert "private_key" not in anonymized_data, "Private key should be anonymized"
|
||||
assert "email" not in anonymized_data, "Email should be anonymized"
|
||||
|
||||
# Test data retention
|
||||
retention_days = 365
|
||||
cutoff_date = datetime.utcnow() - timedelta(days=retention_days)
|
||||
|
||||
old_data = {
|
||||
"data": "sensitive_info",
|
||||
"created_at": (cutoff_date - timedelta(days=1)).isoformat()
|
||||
}
|
||||
|
||||
should_delete = should_delete_data(old_data, retention_days)
|
||||
assert should_delete, "Data older than retention period should be deleted"
|
||||
|
||||
recent_data = {
|
||||
"data": "sensitive_info",
|
||||
"created_at": datetime.utcnow().isoformat()
|
||||
}
|
||||
|
||||
should_not_delete = should_delete_data(recent_data, retention_days)
|
||||
assert not should_not_delete, "Recent data should not be deleted"
|
||||
|
||||
print("✅ Data protection working correctly")
|
||||
|
||||
def test_audit_logging(self, security_config):
|
||||
"""Test security audit logging"""
|
||||
audit_log = []
|
||||
|
||||
# Test audit log entry creation
|
||||
log_entry = create_audit_log(
|
||||
action="wallet_create",
|
||||
user_id="test_user",
|
||||
resource_id="wallet_123",
|
||||
details={"wallet_type": "multi_signature"},
|
||||
ip_address="192.168.1.1"
|
||||
)
|
||||
|
||||
assert "action" in log_entry, "Audit log should contain action"
|
||||
assert "user_id" in log_entry, "Audit log should contain user ID"
|
||||
assert "timestamp" in log_entry, "Audit log should contain timestamp"
|
||||
assert "ip_address" in log_entry, "Audit log should contain IP address"
|
||||
|
||||
audit_log.append(log_entry)
|
||||
|
||||
# Test audit log integrity
|
||||
log_hash = calculate_audit_log_hash(audit_log)
|
||||
assert len(log_hash) == 64, "Audit log hash should be 64 characters"
|
||||
|
||||
# Test audit log tampering detection
|
||||
tampered_log = audit_log.copy()
|
||||
tampered_log[0]["action"] = "different_action"
|
||||
|
||||
tampered_hash = calculate_audit_log_hash(tampered_log)
|
||||
assert log_hash != tampered_hash, "Tampered log should have different hash"
|
||||
|
||||
print("✅ Audit logging working correctly")
|
||||
|
||||
class TestAuthenticationSecurity:
|
||||
"""Test authentication and authorization security"""
|
||||
|
||||
def test_multi_factor_authentication(self):
|
||||
"""Test multi-factor authentication"""
|
||||
user_credentials = {
|
||||
"username": "test_user",
|
||||
"password": "SecureP@ssw0rd123!"
|
||||
}
|
||||
|
||||
# Test password authentication
|
||||
password_valid = authenticate_password(user_credentials["username"], user_credentials["password"])
|
||||
assert password_valid, "Valid password should authenticate"
|
||||
|
||||
# Test invalid password
|
||||
invalid_password_valid = authenticate_password(user_credentials["username"], "wrong_password")
|
||||
assert not invalid_password_valid, "Invalid password should not authenticate"
|
||||
|
||||
# Test 2FA token generation
|
||||
totp_secret = generate_totp_secret()
|
||||
totp_code = generate_totp_code(totp_secret)
|
||||
|
||||
assert len(totp_code) == 6, "TOTP code should be 6 digits"
|
||||
assert totp_code.isdigit(), "TOTP code should be numeric"
|
||||
|
||||
# Test 2FA validation
|
||||
totp_valid = validate_totp_code(totp_secret, totp_code)
|
||||
assert totp_valid, "Valid TOTP code should pass"
|
||||
|
||||
# Test invalid TOTP code
|
||||
invalid_totp_valid = validate_totp_code(totp_secret, "123456")
|
||||
assert not invalid_totp_valid, "Invalid TOTP code should fail"
|
||||
|
||||
print("✅ Multi-factor authentication working correctly")
|
||||
|
||||
def test_login_attempt_limiting(self):
|
||||
"""Test login attempt limiting"""
|
||||
user_id = "test_user"
|
||||
max_attempts = 5
|
||||
lockout_duration = 15 # minutes
|
||||
|
||||
login_attempts = LoginAttemptLimiter(max_attempts, lockout_duration)
|
||||
|
||||
# Test successful attempts within limit
|
||||
for i in range(max_attempts):
|
||||
assert not login_attempts.is_locked_out(user_id), f"User should not be locked out after {i+1} attempts"
|
||||
|
||||
# Test lockout after max attempts
|
||||
login_attempts.record_failed_attempt(user_id)
|
||||
assert login_attempts.is_locked_out(user_id), "User should be locked out after max attempts"
|
||||
|
||||
# Test lockout duration
|
||||
lockout_remaining = login_attempts.get_lockout_remaining(user_id)
|
||||
assert lockout_remaining > 0, "Lockout should have remaining time"
|
||||
assert lockout_remaining <= lockout_duration * 60, "Lockout should not exceed max duration"
|
||||
|
||||
print("✅ Login attempt limiting working correctly")
|
||||
|
||||
# Security utility functions
|
||||
def validate_password_strength(password: str) -> bool:
|
||||
"""Validate password strength"""
|
||||
if len(password) < 8:
|
||||
return False
|
||||
|
||||
has_upper = any(c.isupper() for c in password)
|
||||
has_lower = any(c.islower() for c in password)
|
||||
has_digit = any(c.isdigit() for c in password)
|
||||
has_special = any(c in "!@#$%^&*()_+-=[]{}|;:,.<>?" for c in password)
|
||||
|
||||
return has_upper and has_lower and has_digit and has_special
|
||||
|
||||
def encrypt_data(data: str, key: str) -> str:
|
||||
"""Simple encryption simulation (in production, use proper encryption)"""
|
||||
import base64
|
||||
|
||||
# Simulate encryption with XOR and base64 encoding
|
||||
key_bytes = key.encode()
|
||||
data_bytes = data.encode()
|
||||
|
||||
encrypted = bytes([b ^ key_bytes[i % len(key_bytes)] for i, b in enumerate(data_bytes)])
|
||||
return base64.b64encode(encrypted).decode()
|
||||
|
||||
def decrypt_data(encrypted_data: str, key: str) -> str:
|
||||
"""Simple decryption simulation (in production, use proper decryption)"""
|
||||
import base64
|
||||
|
||||
try:
|
||||
key_bytes = key.encode()
|
||||
encrypted_bytes = base64.b64decode(encrypted_data.encode())
|
||||
|
||||
decrypted = bytes([b ^ key_bytes[i % len(key_bytes)] for i, b in enumerate(encrypted_bytes)])
|
||||
return decrypted.decode()
|
||||
except:
|
||||
return ""
|
||||
|
||||
def encrypt_wallet_data(wallet_data: Dict[str, Any], password: str) -> str:
|
||||
"""Encrypt wallet data with password"""
|
||||
wallet_json = json.dumps(wallet_data)
|
||||
return encrypt_data(wallet_json, password)
|
||||
|
||||
def decrypt_wallet_data(encrypted_wallet: str, password: str) -> Dict[str, Any]:
|
||||
"""Decrypt wallet data with password"""
|
||||
decrypted_json = decrypt_data(encrypted_wallet, password)
|
||||
return json.loads(decrypted_json)
|
||||
|
||||
def sign_transaction(transaction: Dict[str, Any], private_key: str) -> str:
|
||||
"""Sign transaction with private key"""
|
||||
transaction_json = json.dumps(transaction, sort_keys=True)
|
||||
return hashlib.sha256((transaction_json + private_key).encode()).hexdigest()
|
||||
|
||||
def verify_transaction_signature(transaction: Dict[str, Any], signature: str, public_key: str) -> bool:
|
||||
"""Verify transaction signature"""
|
||||
expected_signature = sign_transaction(transaction, public_key)
|
||||
return hmac.compare_digest(signature, expected_signature)
|
||||
|
||||
def generate_session_token(user_id: str) -> str:
|
||||
"""Generate session token"""
|
||||
timestamp = str(int(time.time()))
|
||||
random_data = secrets.token_hex(16)
|
||||
return hashlib.sha256(f"{user_id}:{timestamp}:{random_data}".encode()).hexdigest()
|
||||
|
||||
def generate_expired_session_token(user_id: str) -> str:
|
||||
"""Generate expired session token for testing"""
|
||||
old_timestamp = str(int(time.time()) - 3600) # 1 hour ago
|
||||
random_data = secrets.token_hex(16)
|
||||
return hashlib.sha256(f"{user_id}:{old_timestamp}:{random_data}".encode()).hexdigest()
|
||||
|
||||
def validate_session_token(token: str, user_id: str) -> bool:
|
||||
"""Validate session token"""
|
||||
# In production, this would validate timestamp and signature
|
||||
return len(token) == 64 and token.startswith(user_id[:8])
|
||||
|
||||
def generate_api_key() -> str:
|
||||
"""Generate API key"""
|
||||
return secrets.token_hex(32)
|
||||
|
||||
def validate_api_key(api_key: str) -> bool:
|
||||
"""Validate API key format"""
|
||||
return len(api_key) >= 32 and api_key.isalnum()
|
||||
|
||||
class RateLimiter:
|
||||
"""Simple rate limiter"""
|
||||
|
||||
def __init__(self, max_requests: int, window_seconds: int):
|
||||
self.max_requests = max_requests
|
||||
self.window_seconds = window_seconds
|
||||
self.requests = {}
|
||||
|
||||
def is_allowed(self) -> bool:
|
||||
current_time = time.time()
|
||||
window_start = current_time - self.window_seconds
|
||||
|
||||
# Clean old requests
|
||||
self.requests = {k: v for k, v in self.requests.items() if v > window_start}
|
||||
|
||||
if len(self.requests) >= self.max_requests:
|
||||
return False
|
||||
|
||||
self.requests[current_time] = current_time
|
||||
return True
|
||||
|
||||
def mask_sensitive_data(data: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Mask sensitive data"""
|
||||
masked = data.copy()
|
||||
|
||||
if "private_key" in masked:
|
||||
masked["private_key"] = "***MASKED***"
|
||||
|
||||
if "email" in masked:
|
||||
email = masked["email"]
|
||||
if "@" in email:
|
||||
local, domain = email.split("@", 1)
|
||||
masked["email"] = f"{local[:2]}***@{domain}"
|
||||
|
||||
return masked
|
||||
|
||||
def anonymize_data(data: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Anonymize sensitive data"""
|
||||
anonymized = {}
|
||||
|
||||
for key, value in data.items():
|
||||
if key in ["user_id", "email", "phone", "address"]:
|
||||
anonymized[key] = "***ANONYMIZED***"
|
||||
else:
|
||||
anonymized[key] = value
|
||||
|
||||
return anonymized
|
||||
|
||||
def should_delete_data(data: Dict[str, Any], retention_days: int) -> bool:
|
||||
"""Check if data should be deleted based on retention policy"""
|
||||
if "created_at" not in data:
|
||||
return False
|
||||
|
||||
created_at = datetime.fromisoformat(data["created_at"])
|
||||
cutoff_date = datetime.utcnow() - timedelta(days=retention_days)
|
||||
|
||||
return created_at < cutoff_date
|
||||
|
||||
def create_audit_log(action: str, user_id: str, resource_id: str, details: Dict[str, Any], ip_address: str) -> Dict[str, Any]:
|
||||
"""Create audit log entry"""
|
||||
return {
|
||||
"action": action,
|
||||
"user_id": user_id,
|
||||
"resource_id": resource_id,
|
||||
"details": details,
|
||||
"ip_address": ip_address,
|
||||
"timestamp": datetime.utcnow().isoformat(),
|
||||
"log_id": secrets.token_hex(16)
|
||||
}
|
||||
|
||||
def calculate_audit_log_hash(audit_log: List[Dict[str, Any]]) -> str:
|
||||
"""Calculate hash of audit log for integrity verification"""
|
||||
log_json = json.dumps(audit_log, sort_keys=True)
|
||||
return hashlib.sha256(log_json.encode()).hexdigest()
|
||||
|
||||
def authenticate_password(username: str, password: str) -> bool:
|
||||
"""Simulate password authentication"""
|
||||
# In production, this would check against hashed passwords
|
||||
return username == "test_user" and password == "SecureP@ssw0rd123!"
|
||||
|
||||
def generate_totp_secret() -> str:
|
||||
"""Generate TOTP secret"""
|
||||
return secrets.token_hex(20)
|
||||
|
||||
def generate_totp_code(secret: str) -> str:
|
||||
"""Generate TOTP code (simplified)"""
|
||||
import hashlib
|
||||
import time
|
||||
|
||||
timestep = int(time.time() // 30)
|
||||
counter = f"{secret}{timestep}"
|
||||
return hashlib.sha256(counter.encode()).hexdigest()[:6]
|
||||
|
||||
def validate_totp_code(secret: str, code: str) -> bool:
|
||||
"""Validate TOTP code"""
|
||||
expected_code = generate_totp_code(secret)
|
||||
return hmac.compare_digest(code, expected_code)
|
||||
|
||||
class LoginAttemptLimiter:
|
||||
"""Login attempt limiter"""
|
||||
|
||||
def __init__(self, max_attempts: int, lockout_duration_minutes: int):
|
||||
self.max_attempts = max_attempts
|
||||
self.lockout_duration_minutes = lockout_duration_minutes
|
||||
self.attempts = {}
|
||||
|
||||
def record_failed_attempt(self, user_id: str):
|
||||
"""Record failed login attempt"""
|
||||
current_time = time.time()
|
||||
|
||||
if user_id not in self.attempts:
|
||||
self.attempts[user_id] = []
|
||||
|
||||
self.attempts[user_id].append(current_time)
|
||||
|
||||
def is_locked_out(self, user_id: str) -> bool:
|
||||
"""Check if user is locked out"""
|
||||
if user_id not in self.attempts:
|
||||
return False
|
||||
|
||||
# Remove attempts older than lockout period
|
||||
lockout_time = self.lockout_duration_minutes * 60
|
||||
current_time = time.time()
|
||||
cutoff_time = current_time - lockout_time
|
||||
|
||||
self.attempts[user_id] = [
|
||||
attempt for attempt in self.attempts[user_id]
|
||||
if attempt > cutoff_time
|
||||
]
|
||||
|
||||
return len(self.attempts[user_id]) >= self.max_attempts
|
||||
|
||||
def get_lockout_remaining(self, user_id: str) -> int:
|
||||
"""Get remaining lockout time in seconds"""
|
||||
if not self.is_locked_out(user_id):
|
||||
return 0
|
||||
|
||||
oldest_attempt = min(self.attempts[user_id])
|
||||
lockout_end = oldest_attempt + (self.lockout_duration_minutes * 60)
|
||||
remaining = max(0, int(lockout_end - time.time()))
|
||||
|
||||
return remaining
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Run security tests
|
||||
pytest.main([__file__, "-v", "--tb=short"])
|
||||
0
tests/security/test_security_comprehensive.py
Normal file → Executable file
0
tests/security/test_security_comprehensive.py
Normal file → Executable file
0
tests/test-integration-completed.md
Normal file → Executable file
0
tests/test-integration-completed.md
Normal file → Executable file
0
tests/test_agent_wallet_security.py
Normal file → Executable file
0
tests/test_agent_wallet_security.py
Normal file → Executable file
0
tests/test_cli_translation_security.py
Normal file → Executable file
0
tests/test_cli_translation_security.py
Normal file → Executable file
0
tests/test_event_driven_cache.py
Normal file → Executable file
0
tests/test_event_driven_cache.py
Normal file → Executable file
0
tests/test_explorer_fixes.py
Normal file → Executable file
0
tests/test_explorer_fixes.py
Normal file → Executable file
0
tests/test_explorer_integration.py
Normal file → Executable file
0
tests/test_explorer_integration.py
Normal file → Executable file
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user