chore: remove configuration files and enhance blockchain explorer with advanced search, analytics, and export features

- Delete .aitbc.yaml.example CLI configuration template
- Delete .lycheeignore link checker exclusion rules
- Delete .nvmrc Node.js version specification
- Add advanced search panel with filters for address, amount range, transaction type, time range, and validator
- Add analytics dashboard with transaction volume, active addresses, and block time metrics
- Add Chart.js integration
This commit is contained in:
oib
2026-03-02 15:38:25 +01:00
parent af185cdd8b
commit ccedbace53
271 changed files with 35942 additions and 2359 deletions

View File

@@ -0,0 +1,3 @@
"""
Multi-chain tests
"""

View File

@@ -0,0 +1,442 @@
"""
Test for cross-chain agent communication system
"""
import asyncio
import pytest
from datetime import datetime, timedelta
from aitbc_cli.core.config import MultiChainConfig, NodeConfig
from aitbc_cli.core.agent_communication import (
CrossChainAgentCommunication, AgentInfo, AgentMessage,
MessageType, AgentStatus, AgentCollaboration, AgentReputation
)
def test_agent_communication_creation():
"""Test agent communication system creation"""
config = MultiChainConfig()
comm = CrossChainAgentCommunication(config)
assert comm.config == config
assert comm.agents == {}
assert comm.messages == {}
assert comm.collaborations == {}
assert comm.reputations == {}
assert comm.routing_table == {}
async def test_agent_registration():
"""Test agent registration"""
config = MultiChainConfig()
comm = CrossChainAgentCommunication(config)
# Create test agent
agent_info = AgentInfo(
agent_id="test-agent-1",
name="Test Agent",
chain_id="chain-1",
node_id="node-1",
status=AgentStatus.ACTIVE,
capabilities=["trading", "analytics"],
reputation_score=0.8,
last_seen=datetime.now(),
endpoint="http://localhost:8080",
version="1.0.0"
)
# Register agent
success = await comm.register_agent(agent_info)
assert success
assert "test-agent-1" in comm.agents
assert comm.agents["test-agent-1"].name == "Test Agent"
assert "test-agent-1" in comm.reputations
assert comm.reputations["test-agent-1"].reputation_score == 0.8
async def test_agent_discovery():
"""Test agent discovery"""
config = MultiChainConfig()
comm = CrossChainAgentCommunication(config)
# Register multiple agents
agents = [
AgentInfo(
agent_id="agent-1",
name="Agent 1",
chain_id="chain-1",
node_id="node-1",
status=AgentStatus.ACTIVE,
capabilities=["trading", "analytics"],
reputation_score=0.8,
last_seen=datetime.now(),
endpoint="http://localhost:8080",
version="1.0.0"
),
AgentInfo(
agent_id="agent-2",
name="Agent 2",
chain_id="chain-1",
node_id="node-1",
status=AgentStatus.ACTIVE,
capabilities=["mining"],
reputation_score=0.7,
last_seen=datetime.now(),
endpoint="http://localhost:8081",
version="1.0.0"
),
AgentInfo(
agent_id="agent-3",
name="Agent 3",
chain_id="chain-2",
node_id="node-2",
status=AgentStatus.INACTIVE,
capabilities=["trading"],
reputation_score=0.6,
last_seen=datetime.now(),
endpoint="http://localhost:8082",
version="1.0.0"
)
]
for agent in agents:
await comm.register_agent(agent)
# Discover agents on chain-1
chain1_agents = await comm.discover_agents("chain-1")
assert len(chain1_agents) == 2
assert all(agent.chain_id == "chain-1" for agent in chain1_agents)
# Discover agents with trading capability
trading_agents = await comm.discover_agents("chain-1", ["trading"])
assert len(trading_agents) == 1
assert trading_agents[0].agent_id == "agent-1"
# Discover active agents only
active_agents = await comm.discover_agents("chain-1")
assert all(agent.status == AgentStatus.ACTIVE for agent in active_agents)
async def test_message_sending():
"""Test message sending"""
config = MultiChainConfig()
comm = CrossChainAgentCommunication(config)
# Register agents
sender = AgentInfo(
agent_id="sender-agent",
name="Sender",
chain_id="chain-1",
node_id="node-1",
status=AgentStatus.ACTIVE,
capabilities=["trading"],
reputation_score=0.8,
last_seen=datetime.now(),
endpoint="http://localhost:8080",
version="1.0.0"
)
receiver = AgentInfo(
agent_id="receiver-agent",
name="Receiver",
chain_id="chain-1",
node_id="node-1",
status=AgentStatus.ACTIVE,
capabilities=["analytics"],
reputation_score=0.7,
last_seen=datetime.now(),
endpoint="http://localhost:8081",
version="1.0.0"
)
await comm.register_agent(sender)
await comm.register_agent(receiver)
# Create message
message = AgentMessage(
message_id="test-message-1",
sender_id="sender-agent",
receiver_id="receiver-agent",
message_type=MessageType.COMMUNICATION,
chain_id="chain-1",
target_chain_id=None,
payload={"action": "test", "data": "hello"},
timestamp=datetime.now(),
signature="test-signature",
priority=5,
ttl_seconds=3600
)
# Send message
success = await comm.send_message(message)
assert success
assert "test-message-1" in comm.messages
assert len(comm.message_queue["receiver-agent"]) == 0 # Should be delivered immediately
async def test_cross_chain_messaging():
"""Test cross-chain messaging"""
config = MultiChainConfig()
comm = CrossChainAgentCommunication(config)
# Register agents on different chains
sender = AgentInfo(
agent_id="cross-chain-sender",
name="Cross Chain Sender",
chain_id="chain-1",
node_id="node-1",
status=AgentStatus.ACTIVE,
capabilities=["trading"],
reputation_score=0.8,
last_seen=datetime.now(),
endpoint="http://localhost:8080",
version="1.0.0"
)
receiver = AgentInfo(
agent_id="cross-chain-receiver",
name="Cross Chain Receiver",
chain_id="chain-2",
node_id="node-2",
status=AgentStatus.ACTIVE,
capabilities=["analytics"],
reputation_score=0.7,
last_seen=datetime.now(),
endpoint="http://localhost:8081",
version="1.0.0"
)
await comm.register_agent(sender)
await comm.register_agent(receiver)
# Create cross-chain message
message = AgentMessage(
message_id="cross-chain-message-1",
sender_id="cross-chain-sender",
receiver_id="cross-chain-receiver",
message_type=MessageType.COMMUNICATION,
chain_id="chain-1",
target_chain_id="chain-2",
payload={"action": "cross_chain_test", "data": "hello across chains"},
timestamp=datetime.now(),
signature="test-signature",
priority=5,
ttl_seconds=3600
)
# Send cross-chain message
success = await comm.send_message(message)
assert success
assert "cross-chain-message-1" in comm.messages
async def test_collaboration_creation():
"""Test multi-agent collaboration creation"""
config = MultiChainConfig()
comm = CrossChainAgentCommunication(config)
# Register multiple agents
agents = []
for i in range(3):
agent = AgentInfo(
agent_id=f"collab-agent-{i+1}",
name=f"Collab Agent {i+1}",
chain_id=f"chain-{(i % 2) + 1}", # Spread across 2 chains
node_id=f"node-{(i % 2) + 1}",
status=AgentStatus.ACTIVE,
capabilities=["trading", "analytics"],
reputation_score=0.8,
last_seen=datetime.now(),
endpoint=f"http://localhost:808{i}",
version="1.0.0"
)
await comm.register_agent(agent)
agents.append(agent.agent_id)
# Create collaboration
collaboration_id = await comm.create_collaboration(
agents,
"research_project",
{"voting_threshold": 0.6, "resource_sharing": True}
)
assert collaboration_id is not None
assert collaboration_id in comm.collaborations
collaboration = comm.collaborations[collaboration_id]
assert collaboration.collaboration_type == "research_project"
assert len(collaboration.agent_ids) == 3
assert collaboration.status == "active"
assert collaboration.governance_rules["voting_threshold"] == 0.6
async def test_reputation_system():
"""Test reputation system"""
config = MultiChainConfig()
comm = CrossChainAgentCommunication(config)
# Register agent
agent = AgentInfo(
agent_id="reputation-agent",
name="Reputation Agent",
chain_id="chain-1",
node_id="node-1",
status=AgentStatus.ACTIVE,
capabilities=["trading"],
reputation_score=0.5, # Start with neutral reputation
last_seen=datetime.now(),
endpoint="http://localhost:8080",
version="1.0.0"
)
await comm.register_agent(agent)
# Update reputation with successful interactions
for i in range(5):
await comm.update_reputation("reputation-agent", True, 0.8)
# Update reputation with some failures
for i in range(2):
await comm.update_reputation("reputation-agent", False, 0.3)
# Check reputation
reputation = comm.reputations["reputation-agent"]
assert reputation.total_interactions == 7
assert reputation.successful_interactions == 5
assert reputation.failed_interactions == 2
assert reputation.reputation_score > 0.5 # Should have improved
async def test_agent_status():
"""Test agent status retrieval"""
config = MultiChainConfig()
comm = CrossChainAgentCommunication(config)
# Register agent
agent = AgentInfo(
agent_id="status-agent",
name="Status Agent",
chain_id="chain-1",
node_id="node-1",
status=AgentStatus.ACTIVE,
capabilities=["trading", "analytics"],
reputation_score=0.8,
last_seen=datetime.now(),
endpoint="http://localhost:8080",
version="1.0.0"
)
await comm.register_agent(agent)
# Get agent status
status = await comm.get_agent_status("status-agent")
assert status is not None
assert status["agent_info"]["agent_id"] == "status-agent"
assert status["status"] == "active"
assert status["reputation"] is not None
assert status["message_queue_size"] == 0
assert status["active_collaborations"] == 0
async def test_network_overview():
"""Test network overview"""
config = MultiChainConfig()
comm = CrossChainAgentCommunication(config)
# Register multiple agents
for i in range(5):
agent = AgentInfo(
agent_id=f"network-agent-{i+1}",
name=f"Network Agent {i+1}",
chain_id=f"chain-{(i % 3) + 1}", # Spread across 3 chains
node_id=f"node-{(i % 2) + 1}",
status=AgentStatus.ACTIVE if i < 4 else AgentStatus.BUSY,
capabilities=["trading", "analytics"],
reputation_score=0.7 + (i * 0.05),
last_seen=datetime.now(),
endpoint=f"http://localhost:808{i}",
version="1.0.0"
)
await comm.register_agent(agent)
# Create some collaborations
collab_id = await comm.create_collaboration(
["network-agent-1", "network-agent-2"],
"test_collaboration",
{}
)
# Get network overview
overview = await comm.get_network_overview()
assert overview["total_agents"] == 5
assert overview["active_agents"] == 4
assert overview["total_collaborations"] == 1
assert overview["active_collaborations"] == 1
assert len(overview["agents_by_chain"]) == 3
assert overview["average_reputation"] > 0.7
def test_validation_functions():
"""Test validation functions"""
config = MultiChainConfig()
comm = CrossChainAgentCommunication(config)
# Test agent validation
valid_agent = AgentInfo(
agent_id="valid-agent",
name="Valid Agent",
chain_id="chain-1",
node_id="node-1",
status=AgentStatus.ACTIVE,
capabilities=["trading"],
reputation_score=0.8,
last_seen=datetime.now(),
endpoint="http://localhost:8080",
version="1.0.0"
)
assert comm._validate_agent_info(valid_agent) == True
# Test invalid agent (missing capabilities)
invalid_agent = AgentInfo(
agent_id="invalid-agent",
name="Invalid Agent",
chain_id="chain-1",
node_id="node-1",
status=AgentStatus.ACTIVE,
capabilities=[], # Empty capabilities
reputation_score=0.8,
last_seen=datetime.now(),
endpoint="http://localhost:8080",
version="1.0.0"
)
assert comm._validate_agent_info(invalid_agent) == False
# Test message validation
valid_message = AgentMessage(
message_id="valid-message",
sender_id="sender",
receiver_id="receiver",
message_type=MessageType.COMMUNICATION,
chain_id="chain-1",
target_chain_id=None,
payload={"test": "data"},
timestamp=datetime.now(),
signature="signature",
priority=5,
ttl_seconds=3600
)
assert comm._validate_message(valid_message) == True
if __name__ == "__main__":
# Run basic tests
test_agent_communication_creation()
test_validation_functions()
# Run async tests
asyncio.run(test_agent_registration())
asyncio.run(test_agent_discovery())
asyncio.run(test_message_sending())
asyncio.run(test_cross_chain_messaging())
asyncio.run(test_collaboration_creation())
asyncio.run(test_reputation_system())
asyncio.run(test_agent_status())
asyncio.run(test_network_overview())
print("✅ All agent communication tests passed!")

View File

@@ -0,0 +1,334 @@
"""
Test for analytics and monitoring system
"""
import asyncio
import pytest
from datetime import datetime, timedelta
from aitbc_cli.core.config import MultiChainConfig, NodeConfig
from aitbc_cli.core.analytics import ChainAnalytics, ChainMetrics, ChainAlert
def test_analytics_creation():
"""Test analytics system creation"""
config = MultiChainConfig()
analytics = ChainAnalytics(config)
assert analytics.config == config
assert analytics.metrics_history == {}
assert analytics.alerts == []
assert analytics.predictions == {}
assert analytics.health_scores == {}
async def test_metrics_collection():
"""Test metrics collection"""
config = MultiChainConfig()
# Add a test node
test_node = NodeConfig(
id="test-node",
endpoint="http://localhost:8545",
timeout=30,
retry_count=3,
max_connections=10
)
config.nodes["test-node"] = test_node
analytics = ChainAnalytics(config)
# Test metrics collection (will use mock data)
try:
metrics = await analytics.collect_metrics("test-chain", "test-node")
assert metrics.chain_id == "test-chain"
assert metrics.node_id == "test-node"
assert isinstance(metrics.tps, float)
assert isinstance(metrics.block_height, int)
except Exception as e:
print(f"Expected error in test environment: {e}")
def test_performance_summary():
"""Test performance summary generation"""
config = MultiChainConfig()
analytics = ChainAnalytics(config)
# Add some mock metrics
now = datetime.now()
mock_metrics = ChainMetrics(
chain_id="test-chain",
node_id="test-node",
timestamp=now,
block_height=1000,
tps=15.5,
avg_block_time=3.2,
gas_price=20000000000,
memory_usage_mb=256.0,
disk_usage_mb=512.0,
active_nodes=3,
client_count=25,
miner_count=8,
agent_count=12,
network_in_mb=10.5,
network_out_mb=8.2
)
# Add multiple metrics for history
for i in range(10):
metrics = ChainMetrics(
chain_id="test-chain",
node_id="test-node",
timestamp=now - timedelta(hours=i),
block_height=1000 - i,
tps=15.5 + (i * 0.1),
avg_block_time=3.2 + (i * 0.01),
gas_price=20000000000,
memory_usage_mb=256.0 + (i * 10),
disk_usage_mb=512.0 + (i * 5),
active_nodes=3,
client_count=25,
miner_count=8,
agent_count=12,
network_in_mb=10.5,
network_out_mb=8.2
)
analytics.metrics_history["test-chain"].append(metrics)
# Test performance summary
summary = analytics.get_chain_performance_summary("test-chain", 24)
assert summary["chain_id"] == "test-chain"
assert summary["data_points"] == 10
assert "statistics" in summary
assert "tps" in summary["statistics"]
assert "avg" in summary["statistics"]["tps"]
def test_cross_chain_analysis():
"""Test cross-chain analysis"""
config = MultiChainConfig()
analytics = ChainAnalytics(config)
# Add mock metrics for multiple chains
chains = ["chain-1", "chain-2", "chain-3"]
for chain_id in chains:
metrics = ChainMetrics(
chain_id=chain_id,
node_id="test-node",
timestamp=datetime.now(),
block_height=1000,
tps=15.5,
avg_block_time=3.2,
gas_price=20000000000,
memory_usage_mb=256.0,
disk_usage_mb=512.0,
active_nodes=3,
client_count=25,
miner_count=8,
agent_count=12,
network_in_mb=10.5,
network_out_mb=8.2
)
analytics.metrics_history[chain_id].append(metrics)
# Test cross-chain analysis
analysis = analytics.get_cross_chain_analysis()
assert analysis["total_chains"] == 3
assert "resource_usage" in analysis
assert "alerts_summary" in analysis
assert "performance_comparison" in analysis
def test_health_score_calculation():
"""Test health score calculation"""
config = MultiChainConfig()
analytics = ChainAnalytics(config)
# Add mock metrics
metrics = ChainMetrics(
chain_id="test-chain",
node_id="test-node",
timestamp=datetime.now(),
block_height=1000,
tps=20.0, # Good TPS
avg_block_time=3.0, # Good block time
gas_price=20000000000,
memory_usage_mb=500.0, # Moderate memory usage
disk_usage_mb=512.0,
active_nodes=5, # Good node count
client_count=25,
miner_count=8,
agent_count=12,
network_in_mb=10.5,
network_out_mb=8.2
)
analytics.metrics_history["test-chain"].append(metrics)
analytics._calculate_health_score("test-chain")
health_score = analytics.health_scores["test-chain"]
assert 0 <= health_score <= 100
assert health_score > 50 # Should be a good health score
def test_alert_generation():
"""Test alert generation"""
config = MultiChainConfig()
analytics = ChainAnalytics(config)
# Add metrics that should trigger alerts
metrics = ChainMetrics(
chain_id="test-chain",
node_id="test-node",
timestamp=datetime.now(),
block_height=1000,
tps=0.5, # Low TPS - should trigger alert
avg_block_time=15.0, # High block time - should trigger alert
gas_price=20000000000,
memory_usage_mb=3000.0, # High memory usage - should trigger alert
disk_usage_mb=512.0,
active_nodes=0, # Low node count - should trigger alert
client_count=25,
miner_count=8,
agent_count=12,
network_in_mb=10.5,
network_out_mb=8.2
)
# Test alert checking
asyncio.run(analytics._check_alerts(metrics))
# Should have generated multiple alerts
assert len(analytics.alerts) > 0
# Check specific alert types
alert_types = [alert.alert_type for alert in analytics.alerts]
assert "tps_low" in alert_types
assert "block_time_high" in alert_types
assert "memory_high" in alert_types
assert "node_count_low" in alert_types
def test_optimization_recommendations():
"""Test optimization recommendations"""
config = MultiChainConfig()
analytics = ChainAnalytics(config)
# Add metrics that need optimization
metrics = ChainMetrics(
chain_id="test-chain",
node_id="test-node",
timestamp=datetime.now(),
block_height=1000,
tps=0.5, # Low TPS
avg_block_time=15.0, # High block time
gas_price=20000000000,
memory_usage_mb=1500.0, # High memory usage
disk_usage_mb=512.0,
active_nodes=1, # Low node count
client_count=25,
miner_count=8,
agent_count=12,
network_in_mb=10.5,
network_out_mb=8.2
)
analytics.metrics_history["test-chain"].append(metrics)
# Get recommendations
recommendations = analytics.get_optimization_recommendations("test-chain")
assert len(recommendations) > 0
# Check recommendation types
rec_types = [rec["type"] for rec in recommendations]
assert "performance" in rec_types
assert "resource" in rec_types
assert "availability" in rec_types
def test_prediction_system():
"""Test performance prediction system"""
config = MultiChainConfig()
analytics = ChainAnalytics(config)
# Add historical metrics
now = datetime.now()
for i in range(20): # Need at least 10 data points
metrics = ChainMetrics(
chain_id="test-chain",
node_id="test-node",
timestamp=now - timedelta(hours=i),
block_height=1000 - i,
tps=15.0 + (i * 0.5), # Increasing trend
avg_block_time=3.0,
gas_price=20000000000,
memory_usage_mb=256.0 + (i * 10), # Increasing trend
disk_usage_mb=512.0,
active_nodes=3,
client_count=25,
miner_count=8,
agent_count=12,
network_in_mb=10.5,
network_out_mb=8.2
)
analytics.metrics_history["test-chain"].append(metrics)
# Test predictions
predictions = asyncio.run(analytics.predict_chain_performance("test-chain", 24))
assert len(predictions) > 0
# Check prediction types
pred_metrics = [pred.metric for pred in predictions]
assert "tps" in pred_metrics
assert "memory_usage_mb" in pred_metrics
# Check confidence scores
for pred in predictions:
assert 0 <= pred.confidence <= 1
assert pred.predicted_value >= 0
def test_dashboard_data():
"""Test dashboard data generation"""
config = MultiChainConfig()
analytics = ChainAnalytics(config)
# Add mock data
metrics = ChainMetrics(
chain_id="test-chain",
node_id="test-node",
timestamp=datetime.now(),
block_height=1000,
tps=15.5,
avg_block_time=3.2,
gas_price=20000000000,
memory_usage_mb=256.0,
disk_usage_mb=512.0,
active_nodes=3,
client_count=25,
miner_count=8,
agent_count=12,
network_in_mb=10.5,
network_out_mb=8.2
)
analytics.metrics_history["test-chain"].append(metrics)
# Get dashboard data
dashboard_data = analytics.get_dashboard_data()
assert "overview" in dashboard_data
assert "chain_summaries" in dashboard_data
assert "alerts" in dashboard_data
assert "predictions" in dashboard_data
assert "recommendations" in dashboard_data
if __name__ == "__main__":
# Run basic tests
test_analytics_creation()
test_performance_summary()
test_cross_chain_analysis()
test_health_score_calculation()
test_alert_generation()
test_optimization_recommendations()
test_prediction_system()
test_dashboard_data()
# Run async tests
asyncio.run(test_metrics_collection())
print("✅ All analytics tests passed!")

View File

@@ -0,0 +1,132 @@
"""
Basic test for multi-chain CLI functionality
"""
import pytest
import asyncio
import tempfile
import yaml
from pathlib import Path
from aitbc_cli.core.config import MultiChainConfig, load_multichain_config
from aitbc_cli.core.chain_manager import ChainManager
from aitbc_cli.core.genesis_generator import GenesisGenerator
from aitbc_cli.models.chain import ChainConfig, ChainType, ConsensusAlgorithm, ConsensusConfig, PrivacyConfig
def test_multichain_config():
"""Test multi-chain configuration"""
config = MultiChainConfig()
assert config.chains.default_gas_limit == 10000000
assert config.chains.default_gas_price == 20000000000
assert config.logging_level == "INFO"
assert config.enable_caching is True
def test_chain_config():
"""Test chain configuration model"""
consensus_config = ConsensusConfig(
algorithm=ConsensusAlgorithm.POS,
block_time=5,
max_validators=21
)
privacy_config = PrivacyConfig(
visibility="private",
access_control="invite_only"
)
chain_config = ChainConfig(
type=ChainType.PRIVATE,
purpose="test",
name="Test Chain",
consensus=consensus_config,
privacy=privacy_config
)
assert chain_config.type == ChainType.PRIVATE
assert chain_config.purpose == "test"
assert chain_config.consensus.algorithm == ConsensusAlgorithm.POS
assert chain_config.privacy.visibility == "private"
def test_genesis_generator():
"""Test genesis generator"""
config = MultiChainConfig()
generator = GenesisGenerator(config)
# Test template listing
templates = generator.list_templates()
assert isinstance(templates, dict)
assert "private" in templates
assert "topic" in templates
assert "research" in templates
async def test_chain_manager():
"""Test chain manager"""
config = MultiChainConfig()
chain_manager = ChainManager(config)
# Test listing chains (should return empty list initially)
chains = await chain_manager.list_chains()
assert isinstance(chains, list)
def test_config_file_operations():
"""Test configuration file operations"""
with tempfile.TemporaryDirectory() as temp_dir:
config_path = Path(temp_dir) / "test_config.yaml"
# Create test config
config = MultiChainConfig()
config.chains.default_gas_limit = 20000000
# Save config
from aitbc_cli.core.config import save_multichain_config
save_multichain_config(config, str(config_path))
# Load config
loaded_config = load_multichain_config(str(config_path))
assert loaded_config.chains.default_gas_limit == 20000000
def test_chain_config_file():
"""Test chain configuration from file"""
chain_config_data = {
"chain": {
"type": "topic",
"purpose": "healthcare",
"name": "Healthcare Chain",
"consensus": {
"algorithm": "pos",
"block_time": 5
},
"privacy": {
"visibility": "public",
"access_control": "open"
}
}
}
with tempfile.NamedTemporaryFile(mode='w', suffix='.yaml', delete=False) as f:
yaml.dump(chain_config_data, f)
config_file = f.name
try:
# Load and validate
with open(config_file, 'r') as f:
data = yaml.safe_load(f)
chain_config = ChainConfig(**data['chain'])
assert chain_config.type == ChainType.TOPIC
assert chain_config.purpose == "healthcare"
assert chain_config.consensus.algorithm == ConsensusAlgorithm.POS
finally:
Path(config_file).unlink()
if __name__ == "__main__":
# Run basic tests
test_multichain_config()
test_chain_config()
test_genesis_generator()
asyncio.run(test_chain_manager())
test_config_file_operations()
test_chain_config_file()
print("✅ All basic tests passed!")

View File

@@ -0,0 +1,403 @@
"""
Test for production deployment and scaling system
"""
import asyncio
import pytest
from datetime import datetime, timedelta
from pathlib import Path
from aitbc_cli.core.deployment import (
ProductionDeployment, DeploymentConfig, DeploymentMetrics,
ScalingEvent, ScalingPolicy, DeploymentStatus
)
def test_deployment_creation():
"""Test deployment system creation"""
deployment = ProductionDeployment("/tmp/test_aitbc")
assert deployment.config_path == Path("/tmp/test_aitbc")
assert deployment.deployments == {}
assert deployment.metrics == {}
assert deployment.scaling_events == []
assert deployment.health_checks == {}
# Check directories were created
assert deployment.deployment_dir.exists()
assert deployment.config_dir.exists()
assert deployment.logs_dir.exists()
assert deployment.backups_dir.exists()
async def test_create_deployment_config():
"""Test deployment configuration creation"""
deployment = ProductionDeployment("/tmp/test_aitbc")
# Create deployment
deployment_id = await deployment.create_deployment(
name="test-deployment",
environment="production",
region="us-west-1",
instance_type="t3.medium",
min_instances=1,
max_instances=10,
desired_instances=2,
port=8080,
domain="test.aitbc.dev",
database_config={"host": "localhost", "port": 5432, "name": "aitbc"}
)
assert deployment_id is not None
assert deployment_id in deployment.deployments
config = deployment.deployments[deployment_id]
assert config.name == "test-deployment"
assert config.environment == "production"
assert config.min_instances == 1
assert config.max_instances == 10
assert config.desired_instances == 2
assert config.scaling_policy == ScalingPolicy.AUTO
assert config.port == 8080
assert config.domain == "test.aitbc.dev"
async def test_deployment_application():
"""Test application deployment"""
deployment = ProductionDeployment("/tmp/test_aitbc")
# Create deployment first
deployment_id = await deployment.create_deployment(
name="test-app",
environment="staging",
region="us-east-1",
instance_type="t3.small",
min_instances=1,
max_instances=5,
desired_instances=2,
port=3000,
domain="staging.aitbc.dev",
database_config={"host": "localhost", "port": 5432, "name": "aitbc_staging"}
)
# Mock the infrastructure deployment (skip actual system calls)
original_deploy_infra = deployment._deploy_infrastructure
async def mock_deploy_infra(dep_config):
print(f"Mock infrastructure deployment for {dep_config.name}")
return True
deployment._deploy_infrastructure = mock_deploy_infra
# Deploy application
success = await deployment.deploy_application(deployment_id)
assert success
assert deployment_id in deployment.health_checks
assert deployment.health_checks[deployment_id] == True
assert deployment_id in deployment.metrics
# Restore original method
deployment._deploy_infrastructure = original_deploy_infra
async def test_manual_scaling():
"""Test manual scaling"""
deployment = ProductionDeployment("/tmp/test_aitbc")
# Create deployment
deployment_id = await deployment.create_deployment(
name="scale-test",
environment="production",
region="us-west-2",
instance_type="t3.medium",
min_instances=1,
max_instances=10,
desired_instances=2,
port=8080,
domain="scale.aitbc.dev",
database_config={"host": "localhost", "port": 5432, "name": "aitbc"}
)
# Mock infrastructure deployment
original_deploy_infra = deployment._deploy_infrastructure
async def mock_deploy_infra(dep_config):
return True
deployment._deploy_infrastructure = mock_deploy_infra
# Deploy first
await deployment.deploy_application(deployment_id)
# Scale up
success = await deployment.scale_deployment(deployment_id, 5, "manual scaling test")
assert success
# Check deployment was updated
config = deployment.deployments[deployment_id]
assert config.desired_instances == 5
# Check scaling event was created
scaling_events = [e for e in deployment.scaling_events if e.deployment_id == deployment_id]
assert len(scaling_events) > 0
latest_event = scaling_events[-1]
assert latest_event.old_instances == 2
assert latest_event.new_instances == 5
assert latest_event.success == True
assert latest_event.trigger_reason == "manual scaling test"
# Restore original method
deployment._deploy_infrastructure = original_deploy_infra
async def test_auto_scaling():
"""Test automatic scaling"""
deployment = ProductionDeployment("/tmp/test_aitbc")
# Create deployment
deployment_id = await deployment.create_deployment(
name="auto-scale-test",
environment="production",
region="us-east-1",
instance_type="t3.medium",
min_instances=1,
max_instances=10,
desired_instances=2,
port=8080,
domain="autoscale.aitbc.dev",
database_config={"host": "localhost", "port": 5432, "name": "aitbc"}
)
# Mock infrastructure deployment
original_deploy_infra = deployment._deploy_infrastructure
async def mock_deploy_infra(dep_config):
return True
deployment._deploy_infrastructure = mock_deploy_infra
# Deploy first
await deployment.deploy_application(deployment_id)
# Set metrics to trigger scale up (high CPU)
metrics = deployment.metrics[deployment_id]
metrics.cpu_usage = 85.0 # Above threshold
metrics.memory_usage = 40.0
metrics.error_rate = 1.0
metrics.response_time = 500.0
# Trigger auto-scaling
success = await deployment.auto_scale_deployment(deployment_id)
assert success
# Check deployment was scaled up
config = deployment.deployments[deployment_id]
assert config.desired_instances == 3 # Should have scaled up by 1
# Set metrics to trigger scale down
metrics.cpu_usage = 15.0 # Below threshold
metrics.memory_usage = 25.0
# Trigger auto-scaling again
success = await deployment.auto_scale_deployment(deployment_id)
assert success
# Check deployment was scaled down
config = deployment.deployments[deployment_id]
assert config.desired_instances == 2 # Should have scaled down by 1
# Restore original method
deployment._deploy_infrastructure = original_deploy_infra
async def test_deployment_status():
"""Test deployment status retrieval"""
deployment = ProductionDeployment("/tmp/test_aitbc")
# Create and deploy
deployment_id = await deployment.create_deployment(
name="status-test",
environment="production",
region="us-west-1",
instance_type="t3.medium",
min_instances=1,
max_instances=5,
desired_instances=2,
port=8080,
domain="status.aitbc.dev",
database_config={"host": "localhost", "port": 5432, "name": "aitbc"}
)
# Mock infrastructure deployment
original_deploy_infra = deployment._deploy_infrastructure
async def mock_deploy_infra(dep_config):
return True
deployment._deploy_infrastructure = mock_deploy_infra
await deployment.deploy_application(deployment_id)
# Get status
status = await deployment.get_deployment_status(deployment_id)
assert status is not None
assert "deployment" in status
assert "metrics" in status
assert "health_status" in status
assert "recent_scaling_events" in status
assert "uptime_percentage" in status
# Check deployment info
deployment_info = status["deployment"]
assert deployment_info["name"] == "status-test"
assert deployment_info["environment"] == "production"
assert deployment_info["desired_instances"] == 2
# Check health status
assert status["health_status"] == True
# Check metrics
metrics = status["metrics"]
assert metrics["deployment_id"] == deployment_id
assert metrics["cpu_usage"] >= 0
assert metrics["memory_usage"] >= 0
# Restore original method
deployment._deploy_infrastructure = original_deploy_infra
async def test_cluster_overview():
"""Test cluster overview"""
deployment = ProductionDeployment("/tmp/test_aitbc")
# Mock infrastructure deployment
original_deploy_infra = deployment._deploy_infrastructure
async def mock_deploy_infra(dep_config):
return True
deployment._deploy_infrastructure = mock_deploy_infra
# Create multiple deployments
deployment_ids = []
for i in range(3):
deployment_id = await deployment.create_deployment(
name=f"cluster-test-{i+1}",
environment="production" if i % 2 == 0 else "staging",
region="us-west-1",
instance_type="t3.medium",
min_instances=1,
max_instances=5,
desired_instances=2,
port=8080 + i,
domain=f"test{i+1}.aitbc.dev",
database_config={"host": "localhost", "port": 5432, "name": f"aitbc_{i+1}"}
)
await deployment.deploy_application(deployment_id)
deployment_ids.append(deployment_id)
# Get cluster overview
overview = await deployment.get_cluster_overview()
assert overview is not None
assert "total_deployments" in overview
assert "running_deployments" in overview
assert "total_instances" in overview
assert "aggregate_metrics" in overview
assert "recent_scaling_events" in overview
assert "successful_scaling_rate" in overview
assert "health_check_coverage" in overview
# Check overview data
assert overview["total_deployments"] == 3
assert overview["running_deployments"] == 3
assert overview["total_instances"] == 6 # 2 instances per deployment
assert overview["health_check_coverage"] == 1.0 # 100% coverage
# Restore original method
deployment._deploy_infrastructure = original_deploy_infra
def test_scaling_thresholds():
"""Test scaling threshold configuration"""
deployment = ProductionDeployment("/tmp/test_aitbc")
# Check default thresholds
assert deployment.scaling_thresholds['cpu_high'] == 80.0
assert deployment.scaling_thresholds['cpu_low'] == 20.0
assert deployment.scaling_thresholds['memory_high'] == 85.0
assert deployment.scaling_thresholds['memory_low'] == 30.0
assert deployment.scaling_thresholds['error_rate_high'] == 5.0
assert deployment.scaling_thresholds['response_time_high'] == 2000.0
assert deployment.scaling_thresholds['min_uptime'] == 99.0
async def test_deployment_config_validation():
"""Test deployment configuration validation"""
deployment = ProductionDeployment("/tmp/test_aitbc")
# Test valid configuration
deployment_id = await deployment.create_deployment(
name="valid-config",
environment="production",
region="us-west-1",
instance_type="t3.medium",
min_instances=1,
max_instances=10,
desired_instances=5,
port=8080,
domain="valid.aitbc.dev",
database_config={"host": "localhost", "port": 5432, "name": "aitbc"}
)
assert deployment_id is not None
config = deployment.deployments[deployment_id]
assert config.min_instances <= config.desired_instances <= config.max_instances
async def test_metrics_initialization():
"""Test metrics initialization"""
deployment = ProductionDeployment("/tmp/test_aitbc")
# Create deployment
deployment_id = await deployment.create_deployment(
name="metrics-test",
environment="production",
region="us-west-1",
instance_type="t3.medium",
min_instances=1,
max_instances=5,
desired_instances=2,
port=8080,
domain="metrics.aitbc.dev",
database_config={"host": "localhost", "port": 5432, "name": "aitbc"}
)
# Mock infrastructure deployment
original_deploy_infra = deployment._deploy_infrastructure
async def mock_deploy_infra(dep_config):
return True
deployment._deploy_infrastructure = mock_deploy_infra
# Deploy to initialize metrics
await deployment.deploy_application(deployment_id)
# Check metrics were initialized
metrics = deployment.metrics[deployment_id]
assert metrics.deployment_id == deployment_id
assert metrics.cpu_usage >= 0
assert metrics.memory_usage >= 0
assert metrics.disk_usage >= 0
assert metrics.request_count >= 0
assert metrics.error_rate >= 0
assert metrics.response_time >= 0
assert metrics.uptime_percentage >= 0
assert metrics.active_instances >= 1
# Restore original method
deployment._deploy_infrastructure = original_deploy_infra
if __name__ == "__main__":
# Run basic tests
test_deployment_creation()
test_scaling_thresholds()
# Run async tests
asyncio.run(test_create_deployment_config())
asyncio.run(test_deployment_application())
asyncio.run(test_manual_scaling())
asyncio.run(test_auto_scaling())
asyncio.run(test_deployment_status())
asyncio.run(test_cluster_overview())
asyncio.run(test_deployment_config_validation())
asyncio.run(test_metrics_initialization())
print("✅ All deployment tests passed!")

View File

@@ -0,0 +1,372 @@
"""
Test for global chain marketplace system
"""
import asyncio
import pytest
from decimal import Decimal
from datetime import datetime, timedelta
from aitbc_cli.core.config import MultiChainConfig
from aitbc_cli.core.marketplace import (
GlobalChainMarketplace, ChainListing, ChainType, MarketplaceStatus,
MarketplaceTransaction, TransactionStatus, ChainEconomy, MarketplaceMetrics
)
def test_marketplace_creation():
"""Test marketplace system creation"""
config = MultiChainConfig()
marketplace = GlobalChainMarketplace(config)
assert marketplace.config == config
assert marketplace.listings == {}
assert marketplace.transactions == {}
assert marketplace.chain_economies == {}
assert marketplace.user_reputations == {}
assert marketplace.market_metrics is None
async def test_create_listing():
"""Test chain listing creation"""
config = MultiChainConfig()
marketplace = GlobalChainMarketplace(config)
# Set up user reputation
marketplace.user_reputations["seller-1"] = 0.8
# Create listing
listing_id = await marketplace.create_listing(
chain_id="healthcare-chain-001",
chain_name="Healthcare Analytics Chain",
chain_type=ChainType.TOPIC,
description="Advanced healthcare data analytics chain",
seller_id="seller-1",
price=Decimal("1.5"),
currency="ETH",
chain_specifications={"consensus": "pos", "block_time": 5},
metadata={"category": "healthcare", "compliance": "hipaa"}
)
assert listing_id is not None
assert listing_id in marketplace.listings
listing = marketplace.listings[listing_id]
assert listing.chain_id == "healthcare-chain-001"
assert listing.chain_name == "Healthcare Analytics Chain"
assert listing.chain_type == ChainType.TOPIC
assert listing.price == Decimal("1.5")
assert listing.status == MarketplaceStatus.ACTIVE
async def test_purchase_chain():
"""Test chain purchase"""
config = MultiChainConfig()
marketplace = GlobalChainMarketplace(config)
# Set up user reputations
marketplace.user_reputations["seller-1"] = 0.8
marketplace.user_reputations["buyer-1"] = 0.7
# Create listing
listing_id = await marketplace.create_listing(
chain_id="trading-chain-001",
chain_name="Trading Analytics Chain",
chain_type=ChainType.PRIVATE,
description="Private trading analytics chain",
seller_id="seller-1",
price=Decimal("2.0"),
currency="ETH",
chain_specifications={"consensus": "pos"},
metadata={"category": "trading"}
)
# Purchase chain
transaction_id = await marketplace.purchase_chain(listing_id, "buyer-1", "crypto")
assert transaction_id is not None
assert transaction_id in marketplace.transactions
transaction = marketplace.transactions[transaction_id]
assert transaction.buyer_id == "buyer-1"
assert transaction.seller_id == "seller-1"
assert transaction.price == Decimal("2.0")
assert transaction.status == TransactionStatus.PENDING
# Check listing status
listing = marketplace.listings[listing_id]
assert listing.status == MarketplaceStatus.SOLD
async def test_complete_transaction():
"""Test transaction completion"""
config = MultiChainConfig()
marketplace = GlobalChainMarketplace(config)
# Set up user reputations
marketplace.user_reputations["seller-1"] = 0.8
marketplace.user_reputations["buyer-1"] = 0.7
# Create listing and purchase
listing_id = await marketplace.create_listing(
chain_id="research-chain-001",
chain_name="Research Collaboration Chain",
chain_type=ChainType.RESEARCH,
description="Research collaboration chain",
seller_id="seller-1",
price=Decimal("0.5"),
currency="ETH",
chain_specifications={"consensus": "pos"},
metadata={"category": "research"}
)
transaction_id = await marketplace.purchase_chain(listing_id, "buyer-1", "crypto")
# Complete transaction
success = await marketplace.complete_transaction(transaction_id, "0x1234567890abcdef")
assert success
transaction = marketplace.transactions[transaction_id]
assert transaction.status == TransactionStatus.COMPLETED
assert transaction.transaction_hash == "0x1234567890abcdef"
assert transaction.completed_at is not None
# Check escrow release
escrow_contract = marketplace.escrow_contracts.get(transaction.escrow_address)
assert escrow_contract is not None
assert escrow_contract["status"] == "released"
async def test_chain_economy():
"""Test chain economy tracking"""
config = MultiChainConfig()
marketplace = GlobalChainMarketplace(config)
# Get chain economy (should create new one)
economy = await marketplace.get_chain_economy("test-chain-001")
assert economy is not None
assert economy.chain_id == "test-chain-001"
assert isinstance(economy.total_value_locked, Decimal)
assert isinstance(economy.daily_volume, Decimal)
assert economy.transaction_count >= 0
assert economy.last_updated is not None
async def test_search_listings():
"""Test listing search functionality"""
config = MultiChainConfig()
marketplace = GlobalChainMarketplace(config)
# Set up user reputation
marketplace.user_reputations["seller-1"] = 0.8
# Create multiple listings
listings = [
("healthcare-chain-001", "Healthcare Chain", ChainType.TOPIC, Decimal("1.0")),
("trading-chain-001", "Trading Chain", ChainType.PRIVATE, Decimal("2.0")),
("research-chain-001", "Research Chain", ChainType.RESEARCH, Decimal("0.5")),
("enterprise-chain-001", "Enterprise Chain", ChainType.ENTERPRISE, Decimal("5.0"))
]
listing_ids = []
for chain_id, name, chain_type, price in listings:
listing_id = await marketplace.create_listing(
chain_id=chain_id,
chain_name=name,
chain_type=chain_type,
description=f"Description for {name}",
seller_id="seller-1",
price=price,
currency="ETH",
chain_specifications={},
metadata={}
)
listing_ids.append(listing_id)
# Search by chain type
topic_listings = await marketplace.search_listings(chain_type=ChainType.TOPIC)
assert len(topic_listings) == 1
assert topic_listings[0].chain_type == ChainType.TOPIC
# Search by price range
price_listings = await marketplace.search_listings(min_price=Decimal("1.0"), max_price=Decimal("2.0"))
assert len(price_listings) == 2
# Search by seller
seller_listings = await marketplace.search_listings(seller_id="seller-1")
assert len(seller_listings) == 4
async def test_user_transactions():
"""Test user transaction retrieval"""
config = MultiChainConfig()
marketplace = GlobalChainMarketplace(config)
# Set up user reputations
marketplace.user_reputations["seller-1"] = 0.8
marketplace.user_reputations["buyer-1"] = 0.7
marketplace.user_reputations["buyer-2"] = 0.6
# Create listings and purchases
listing_id1 = await marketplace.create_listing(
chain_id="chain-001",
chain_name="Chain 1",
chain_type=ChainType.TOPIC,
description="Description",
seller_id="seller-1",
price=Decimal("1.0"),
currency="ETH",
chain_specifications={},
metadata={}
)
listing_id2 = await marketplace.create_listing(
chain_id="chain-002",
chain_name="Chain 2",
chain_type=ChainType.PRIVATE,
description="Description",
seller_id="seller-1",
price=Decimal("2.0"),
currency="ETH",
chain_specifications={},
metadata={}
)
transaction_id1 = await marketplace.purchase_chain(listing_id1, "buyer-1", "crypto")
transaction_id2 = await marketplace.purchase_chain(listing_id2, "buyer-2", "crypto")
# Get seller transactions
seller_transactions = await marketplace.get_user_transactions("seller-1", "seller")
assert len(seller_transactions) == 2
# Get buyer transactions
buyer_transactions = await marketplace.get_user_transactions("buyer-1", "buyer")
assert len(buyer_transactions) == 1
assert buyer_transactions[0].buyer_id == "buyer-1"
# Get all user transactions
all_transactions = await marketplace.get_user_transactions("seller-1", "both")
assert len(all_transactions) == 2
async def test_marketplace_overview():
"""Test marketplace overview"""
config = MultiChainConfig()
marketplace = GlobalChainMarketplace(config)
# Set up user reputations
marketplace.user_reputations["seller-1"] = 0.8
marketplace.user_reputations["buyer-1"] = 0.7
# Create listings and transactions
listing_id = await marketplace.create_listing(
chain_id="overview-chain-001",
chain_name="Overview Test Chain",
chain_type=ChainType.TOPIC,
description="Test chain for overview",
seller_id="seller-1",
price=Decimal("1.5"),
currency="ETH",
chain_specifications={},
metadata={}
)
transaction_id = await marketplace.purchase_chain(listing_id, "buyer-1", "crypto")
await marketplace.complete_transaction(transaction_id, "0x1234567890abcdef")
# Get marketplace overview
overview = await marketplace.get_marketplace_overview()
assert overview is not None
assert "marketplace_metrics" in overview
assert "volume_24h" in overview
assert "top_performing_chains" in overview
assert "chain_types_distribution" in overview
assert "user_activity" in overview
assert "escrow_summary" in overview
# Check marketplace metrics
metrics = overview["marketplace_metrics"]
assert metrics["total_listings"] == 1
assert metrics["total_transactions"] == 1
assert metrics["total_volume"] == Decimal("1.5")
def test_validation_functions():
"""Test validation functions"""
config = MultiChainConfig()
marketplace = GlobalChainMarketplace(config)
# Test user reputation update
marketplace._update_user_reputation("user-1", 0.1)
print(f"After +0.1: {marketplace.user_reputations['user-1']}")
assert marketplace.user_reputations["user-1"] == 0.6 # Started at 0.5
marketplace._update_user_reputation("user-1", -0.2)
print(f"After -0.2: {marketplace.user_reputations['user-1']}")
assert abs(marketplace.user_reputations["user-1"] - 0.4) < 0.0001 # Allow for floating point precision
# Test bounds
marketplace._update_user_reputation("user-1", 0.6) # Add 0.6 to reach 1.0
print(f"After +0.6: {marketplace.user_reputations['user-1']}")
assert marketplace.user_reputations["user-1"] == 1.0 # Max bound
marketplace._update_user_reputation("user-1", -1.5) # Subtract 1.5 to go below 0
print(f"After -1.5: {marketplace.user_reputations['user-1']}")
assert marketplace.user_reputations["user-1"] == 0.0 # Min bound
async def test_escrow_system():
"""Test escrow contract system"""
config = MultiChainConfig()
marketplace = GlobalChainMarketplace(config)
# Set up user reputations
marketplace.user_reputations["seller-1"] = 0.8
marketplace.user_reputations["buyer-1"] = 0.7
# Create listing and purchase
listing_id = await marketplace.create_listing(
chain_id="escrow-test-chain",
chain_name="Escrow Test Chain",
chain_type=ChainType.TOPIC,
description="Test escrow functionality",
seller_id="seller-1",
price=Decimal("3.0"),
currency="ETH",
chain_specifications={},
metadata={}
)
transaction_id = await marketplace.purchase_chain(listing_id, "buyer-1", "crypto")
# Check escrow creation
transaction = marketplace.transactions[transaction_id]
escrow_address = transaction.escrow_address
assert escrow_address in marketplace.escrow_contracts
escrow_contract = marketplace.escrow_contracts[escrow_address]
assert escrow_contract["status"] == "active"
assert escrow_contract["amount"] == Decimal("3.0")
assert escrow_contract["buyer_id"] == "buyer-1"
assert escrow_contract["seller_id"] == "seller-1"
# Complete transaction and check escrow release
await marketplace.complete_transaction(transaction_id, "0xabcdef1234567890")
escrow_contract = marketplace.escrow_contracts[escrow_address]
assert escrow_contract["status"] == "released"
assert "fee_breakdown" in escrow_contract
fee_breakdown = escrow_contract["fee_breakdown"]
assert fee_breakdown["escrow_fee"] == Decimal("0.06") # 2% of 3.0
assert fee_breakdown["marketplace_fee"] == Decimal("0.03") # 1% of 3.0
assert fee_breakdown["seller_amount"] == Decimal("2.91") # 3.0 - 0.06 - 0.03
if __name__ == "__main__":
# Run basic tests
test_marketplace_creation()
test_validation_functions()
# Run async tests
asyncio.run(test_create_listing())
asyncio.run(test_purchase_chain())
asyncio.run(test_complete_transaction())
asyncio.run(test_chain_economy())
asyncio.run(test_search_listings())
asyncio.run(test_user_transactions())
asyncio.run(test_marketplace_overview())
asyncio.run(test_escrow_system())
print("✅ All marketplace tests passed!")

View File

@@ -0,0 +1,132 @@
"""
Test for multi-chain node integration
"""
import asyncio
import pytest
from aitbc_cli.core.config import MultiChainConfig, NodeConfig
from aitbc_cli.core.node_client import NodeClient
from aitbc_cli.core.chain_manager import ChainManager
def test_node_client_creation():
"""Test node client creation and basic functionality"""
node_config = NodeConfig(
id="test-node",
endpoint="http://localhost:8545",
timeout=30,
retry_count=3,
max_connections=10
)
# Test client creation
client = NodeClient(node_config)
assert client.config.id == "test-node"
assert client.config.endpoint == "http://localhost:8545"
async def test_node_client_mock_operations():
"""Test node client operations with mock data"""
node_config = NodeConfig(
id="test-node",
endpoint="http://localhost:8545",
timeout=30,
retry_count=3,
max_connections=10
)
async with NodeClient(node_config) as client:
# Test node info
node_info = await client.get_node_info()
assert node_info["node_id"] == "test-node"
assert "status" in node_info
assert "uptime_days" in node_info
# Test hosted chains
chains = await client.get_hosted_chains()
assert isinstance(chains, list)
if chains: # If mock data is available
assert hasattr(chains[0], 'id')
assert hasattr(chains[0], 'type')
# Test chain stats
stats = await client.get_chain_stats("test-chain")
assert "chain_id" in stats
assert "block_height" in stats
def test_chain_manager_with_node_client():
"""Test chain manager integration with node client"""
config = MultiChainConfig()
# Add a test node
test_node = NodeConfig(
id="test-node",
endpoint="http://localhost:8545",
timeout=30,
retry_count=3,
max_connections=10
)
config.nodes["test-node"] = test_node
chain_manager = ChainManager(config)
# Test that chain manager can use the node client
assert "test-node" in chain_manager.config.nodes
assert chain_manager.config.nodes["test-node"].endpoint == "http://localhost:8545"
async def test_chain_operations_with_node():
"""Test chain operations using node client"""
config = MultiChainConfig()
# Add a test node
test_node = NodeConfig(
id="test-node",
endpoint="http://localhost:8545",
timeout=30,
retry_count=3,
max_connections=10
)
config.nodes["test-node"] = test_node
chain_manager = ChainManager(config)
# Test listing chains (should work with mock data)
chains = await chain_manager.list_chains()
assert isinstance(chains, list)
# Test node-specific operations
node_chains = await chain_manager._get_node_chains("test-node")
assert isinstance(node_chains, list)
def test_backup_restore_operations():
"""Test backup and restore operations"""
config = MultiChainConfig()
# Add a test node
test_node = NodeConfig(
id="test-node",
endpoint="http://localhost:8545",
timeout=30,
retry_count=3,
max_connections=10
)
config.nodes["test-node"] = test_node
chain_manager = ChainManager(config)
# These would normally be async, but we're testing the structure
assert hasattr(chain_manager, '_execute_backup')
assert hasattr(chain_manager, '_execute_restore')
assert hasattr(chain_manager, '_get_chain_hosting_nodes')
if __name__ == "__main__":
# Run basic tests
test_node_client_creation()
# Run async tests
asyncio.run(test_node_client_mock_operations())
asyncio.run(test_chain_operations_with_node())
# Run sync tests
test_chain_manager_with_node_client()
test_backup_restore_operations()
print("✅ All node integration tests passed!")