network: add hub registration, Redis persistence, and federated mesh join protocol
Some checks failed
Systemd Sync / sync-systemd (push) Waiting to run
CLI Tests / test-cli (push) Has been cancelled
Integration Tests / test-service-integration (push) Has been cancelled
Python Tests / test-python (push) Has been cancelled
Security Scanning / security-scan (push) Has been cancelled
Documentation Validation / validate-docs (push) Has been cancelled
API Endpoint Tests / test-api-endpoints (push) Has been cancelled

- Change default P2P port from 7070 to 8001 in config and .env.example
- Add redis_url configuration option for hub persistence (default: redis://localhost:6379)
- Implement DNS-based hub registration/unregistration via HTTPS API endpoints
- Add Redis persistence for hub registrations with 1-hour TTL
- Add island join request/response protocol with member list and blockchain credentials
- Add GPU marketplace tracking (offers, bids, providers) in hub manager
- Add
This commit is contained in:
aitbc
2026-04-13 11:47:34 +02:00
parent fefa6c4435
commit d72945f20c
42 changed files with 3802 additions and 1022 deletions

View File

@@ -7,8 +7,9 @@ supported_chains=ait-devnet
rpc_bind_host=0.0.0.0
rpc_bind_port=8006
# Network
p2p_bind_host=0.0.0.0
p2p_bind_port=7070
p2p_bind_port=8001
proposer_id=aitbc1-proposer

View File

@@ -26,11 +26,11 @@ class ChainSettings(BaseSettings):
supported_chains: str = "ait-devnet" # Comma-separated list of supported chain IDs
db_path: Path = Path("/var/lib/aitbc/data/chain.db")
rpc_bind_host: str = "127.0.0.1"
rpc_bind_host: str = "0.0.0.0"
rpc_bind_port: int = 8080
p2p_bind_host: str = "127.0.0.2"
p2p_bind_port: int = 7070
p2p_bind_host: str = "0.0.0.0"
p2p_bind_port: int = 8001
proposer_id: str = ""
proposer_key: Optional[str] = None
@@ -85,6 +85,9 @@ class ChainSettings(BaseSettings):
hub_discovery_url: str = "hub.aitbc.bubuit.net" # Hub discovery DNS
bridge_islands: str = "" # Comma-separated list of islands to bridge (optional)
# Redis Configuration (Hub persistence)
redis_url: str = "redis://localhost:6379" # Redis connection URL
# Keystore for proposer private key (future block signing)
keystore_path: Path = Path("/var/lib/aitbc/keystore")
keystore_password_file: Path = Path("/var/lib/aitbc/keystore/.password")

View File

@@ -75,9 +75,9 @@ class P2PDiscovery:
"""Add bootstrap node for initial connection"""
self.bootstrap_nodes.append((address, port))
def generate_node_id(self, address: str, port: int, public_key: str) -> str:
"""Generate unique node ID from address, port, and public key"""
content = f"{address}:{port}:{public_key}"
def generate_node_id(self, hostname: str, address: str, port: int, public_key: str) -> str:
"""Generate unique node ID from hostname, address, port, and public key"""
content = f"{hostname}:{address}:{port}:{public_key}"
return hashlib.sha256(content.encode()).hexdigest()
async def start_discovery(self):

View File

@@ -6,7 +6,8 @@ DNS-based hub discovery for federated mesh with hardcoded fallback
import asyncio
import logging
import socket
from typing import List, Optional, Tuple
import json
from typing import List, Optional, Tuple, Dict
from dataclasses import dataclass
logger = logging.getLogger(__name__)
@@ -102,16 +103,73 @@ class HubDiscovery:
for address, port in self.FALLBACK_HUBS
]
async def register_hub(self, hub_address: str, hub_port: int, discovery_url: Optional[str] = None) -> bool:
async def register_hub(self, hub_info: Dict, discovery_url: Optional[str] = None) -> bool:
"""
Register this node as a hub (placeholder for future DNS registration)
Note: This is a placeholder for future DNS registration functionality.
Currently, hub registration is done via manual DNS configuration.
Register this node as a hub with DNS discovery service
Args:
hub_info: Dictionary containing hub information (node_id, address, port, island_id, island_name, public_address, public_port, public_key_pem)
discovery_url: Optional custom discovery URL (uses default if not provided)
Returns:
bool: True if registration successful, False otherwise
"""
logger.info(f"Hub registration placeholder: {hub_address}:{hub_port}")
# Future: Implement dynamic DNS registration
return True
url = discovery_url or self.discovery_url
registration_url = f"https://{url}/api/register"
try:
import httpx
async with httpx.AsyncClient(timeout=10.0) as client:
response = await client.post(registration_url, json=hub_info)
if response.status_code == 200:
logger.info(f"Successfully registered hub {hub_info.get('node_id')} with DNS discovery service")
return True
else:
logger.error(f"DNS registration failed: {response.status_code} - {response.text}")
return False
except httpx.RequestError as e:
logger.error(f"DNS registration request failed: {e}")
return False
except Exception as e:
logger.error(f"DNS registration error: {e}")
return False
async def unregister_hub(self, node_id: str, discovery_url: Optional[str] = None) -> bool:
"""
Unregister this node as a hub from DNS discovery service
Args:
node_id: Node ID to unregister
discovery_url: Optional custom discovery URL (uses default if not provided)
Returns:
bool: True if unregistration successful, False otherwise
"""
url = discovery_url or self.discovery_url
unregistration_url = f"https://{url}/api/unregister"
try:
import httpx
async with httpx.AsyncClient(timeout=10.0) as client:
response = await client.post(unregistration_url, json={"node_id": node_id})
if response.status_code == 200:
logger.info(f"Successfully unregistered hub {node_id} from DNS discovery service")
return True
else:
logger.error(f"DNS unregistration failed: {response.status_code} - {response.text}")
return False
except httpx.RequestError as e:
logger.error(f"DNS unregistration request failed: {e}")
return False
except Exception as e:
logger.error(f"DNS unregistration error: {e}")
return False
def clear_cache(self):
"""Clear cached hub list"""

View File

@@ -6,8 +6,10 @@ Manages hub operations, peer list sharing, and hub registration for federated me
import asyncio
import logging
import time
import json
import os
from typing import Dict, List, Optional, Set
from dataclasses import dataclass, field
from dataclasses import dataclass, field, asdict
from enum import Enum
logger = logging.getLogger(__name__)
@@ -50,45 +52,319 @@ class PeerInfo:
class HubManager:
"""Manages hub operations for federated mesh"""
def __init__(self, local_node_id: str, local_address: str, local_port: int, island_id: str, island_name: str):
def __init__(self, local_node_id: str, local_address: str, local_port: int, island_id: str, island_name: str, redis_url: Optional[str] = None):
self.local_node_id = local_node_id
self.local_address = local_address
self.local_port = local_port
self.island_id = island_id
self.island_name = island_name
self.redis_url = redis_url or "redis://localhost:6379"
# Hub registration status
self.is_hub = False
self.hub_status = HubStatus.UNREGISTERED
self.registered_at: Optional[float] = None
# Known hubs
self.known_hubs: Dict[str, HubInfo] = {} # node_id -> HubInfo
# Peer registry (for providing peer lists)
self.peer_registry: Dict[str, PeerInfo] = {} # node_id -> PeerInfo
# Island peers (island_id -> set of node_ids)
self.island_peers: Dict[str, Set[str]] = {}
self.running = False
self._redis = None
# Initialize island peers for our island
self.island_peers[self.island_id] = set()
def register_as_hub(self, public_address: Optional[str] = None, public_port: Optional[int] = None) -> bool:
async def _connect_redis(self):
"""Connect to Redis"""
try:
import redis.asyncio as redis
self._redis = redis.from_url(self.redis_url)
await self._redis.ping()
logger.info(f"Connected to Redis for hub persistence: {self.redis_url}")
return True
except Exception as e:
logger.error(f"Failed to connect to Redis: {e}")
return False
async def _persist_hub_registration(self, hub_info: HubInfo) -> bool:
"""Persist hub registration to Redis"""
try:
if not self._redis:
await self._connect_redis()
if not self._redis:
logger.warning("Redis not available, skipping persistence")
return False
key = f"hub:{hub_info.node_id}"
value = json.dumps(asdict(hub_info), default=str)
await self._redis.setex(key, 3600, value) # TTL: 1 hour
logger.info(f"Persisted hub registration to Redis: {key}")
return True
except Exception as e:
logger.error(f"Failed to persist hub registration: {e}")
return False
async def _remove_hub_registration(self, node_id: str) -> bool:
"""Remove hub registration from Redis"""
try:
if not self._redis:
await self._connect_redis()
if not self._redis:
logger.warning("Redis not available, skipping removal")
return False
key = f"hub:{node_id}"
await self._redis.delete(key)
logger.info(f"Removed hub registration from Redis: {key}")
return True
except Exception as e:
logger.error(f"Failed to remove hub registration: {e}")
return False
async def _load_hub_registration(self) -> Optional[HubInfo]:
"""Load hub registration from Redis"""
try:
if not self._redis:
await self._connect_redis()
if not self._redis:
return None
key = f"hub:{self.local_node_id}"
value = await self._redis.get(key)
if value:
data = json.loads(value)
return HubInfo(**data)
return None
except Exception as e:
logger.error(f"Failed to load hub registration: {e}")
return None
def _get_blockchain_credentials(self) -> dict:
"""Get blockchain credentials from keystore"""
try:
credentials = {}
# Get genesis block hash from genesis.json
genesis_path = '/var/lib/aitbc/data/ait-mainnet/genesis.json'
if os.path.exists(genesis_path):
with open(genesis_path, 'r') as f:
genesis_data = json.load(f)
# Get genesis block hash
if 'blocks' in genesis_data and len(genesis_data['blocks']) > 0:
genesis_block = genesis_data['blocks'][0]
credentials['genesis_block_hash'] = genesis_block.get('hash', '')
credentials['genesis_block'] = genesis_data
# Get genesis address from keystore
keystore_path = '/var/lib/aitbc/keystore/validator_keys.json'
if os.path.exists(keystore_path):
with open(keystore_path, 'r') as f:
keys = json.load(f)
# Get first key's address
for key_id, key_data in keys.items():
# Extract address from public key or use key_id
credentials['genesis_address'] = key_id
break
# Add chain info
credentials['chain_id'] = self.island_chain_id or f"ait-{self.island_id[:8]}"
credentials['island_id'] = self.island_id
credentials['island_name'] = self.island_name
# Add RPC endpoint (local)
credentials['rpc_endpoint'] = f"http://{self.local_address}:8006"
credentials['p2p_port'] = self.local_port
return credentials
except Exception as e:
logger.error(f"Failed to get blockchain credentials: {e}")
return {}
def __init__(self, local_node_id: str, local_address: str, local_port: int,
island_id: str, island_name: str, redis_url: str):
self.local_node_id = local_node_id
self.local_address = local_address
self.local_port = local_port
self.island_id = island_id
self.island_name = island_name
self.island_chain_id = f"ait-{island_id[:8]}"
self.known_hubs: Dict[str, HubInfo] = {}
self.peer_registry: Dict[str, PeerInfo] = {}
self.peer_reputation: Dict[str, float] = {}
self.peer_last_seen: Dict[str, float] = {}
# GPU marketplace tracking
self.gpu_offers: Dict[str, dict] = {}
self.gpu_bids: Dict[str, dict] = {}
self.gpu_providers: Dict[str, dict] = {} # node_id -> gpu info
# Exchange tracking
self.exchange_orders: Dict[str, dict] = {} # order_id -> order info
self.exchange_order_books: Dict[str, Dict] = {} # pair -> {bids: [], asks: []}
# Redis client for persistence
self.redis_url = redis_url
self._redis_client = None
async def handle_join_request(self, join_request: dict) -> Optional[dict]:
"""
Handle island join request from a new node
Args:
join_request: Dictionary containing join request data
Returns:
dict: Join response with member list and credentials, or None if failed
"""
try:
requested_island_id = join_request.get('island_id')
# Validate island ID
if requested_island_id != self.island_id:
logger.warning(f"Join request for island {requested_island_id} does not match our island {self.island_id}")
return None
# Get all island members
members = []
for node_id, peer_info in self.peer_registry.items():
if peer_info.island_id == self.island_id:
members.append({
'node_id': peer_info.node_id,
'address': peer_info.address,
'port': peer_info.port,
'is_hub': peer_info.is_hub,
'public_address': peer_info.public_address,
'public_port': peer_info.public_port
})
# Include self in member list
members.append({
'node_id': self.local_node_id,
'address': self.local_address,
'port': self.local_port,
'is_hub': True,
'public_address': self.known_hubs.get(self.local_node_id, {}).public_address if self.local_node_id in self.known_hubs else None,
'public_port': self.known_hubs.get(self.local_node_id, {}).public_port if self.local_node_id in self.known_hubs else None
})
# Get blockchain credentials
credentials = self._get_blockchain_credentials()
# Build response
response = {
'type': 'join_response',
'island_id': self.island_id,
'island_name': self.island_name,
'island_chain_id': self.island_chain_id or f"ait-{self.island_id[:8]}",
'members': members,
'credentials': credentials
}
logger.info(f"Sent join_response to node {join_request.get('node_id')} with {len(members)} members")
return response
except Exception as e:
logger.error(f"Error handling join request: {e}")
return None
def register_gpu_offer(self, offer_data: dict) -> bool:
"""Register a GPU marketplace offer in the hub"""
try:
offer_id = offer_data.get('offer_id')
if offer_id:
self.gpu_offers[offer_id] = offer_data
logger.info(f"Registered GPU offer: {offer_id}")
return True
except Exception as e:
logger.error(f"Error registering GPU offer: {e}")
return False
def register_gpu_bid(self, bid_data: dict) -> bool:
"""Register a GPU marketplace bid in the hub"""
try:
bid_id = bid_data.get('bid_id')
if bid_id:
self.gpu_bids[bid_id] = bid_data
logger.info(f"Registered GPU bid: {bid_id}")
return True
except Exception as e:
logger.error(f"Error registering GPU bid: {e}")
return False
def register_gpu_provider(self, node_id: str, gpu_info: dict) -> bool:
"""Register a GPU provider in the hub"""
try:
self.gpu_providers[node_id] = gpu_info
logger.info(f"Registered GPU provider: {node_id}")
return True
except Exception as e:
logger.error(f"Error registering GPU provider: {e}")
return False
def register_exchange_order(self, order_data: dict) -> bool:
"""Register an exchange order in the hub"""
try:
order_id = order_data.get('order_id')
if order_id:
self.exchange_orders[order_id] = order_data
# Update order book
pair = order_data.get('pair')
side = order_data.get('side')
if pair and side:
if pair not in self.exchange_order_books:
self.exchange_order_books[pair] = {'bids': [], 'asks': []}
if side == 'buy':
self.exchange_order_books[pair]['bids'].append(order_data)
elif side == 'sell':
self.exchange_order_books[pair]['asks'].append(order_data)
logger.info(f"Registered exchange order: {order_id}")
return True
except Exception as e:
logger.error(f"Error registering exchange order: {e}")
return False
def get_gpu_offers(self) -> list:
"""Get all GPU offers"""
return list(self.gpu_offers.values())
def get_gpu_bids(self) -> list:
"""Get all GPU bids"""
return list(self.gpu_bids.values())
def get_gpu_providers(self) -> list:
"""Get all GPU providers"""
return list(self.gpu_providers.values())
def get_exchange_order_book(self, pair: str) -> dict:
"""Get order book for a specific trading pair"""
return self.exchange_order_books.get(pair, {'bids': [], 'asks': []})
async def register_as_hub(self, public_address: Optional[str] = None, public_port: Optional[int] = None) -> bool:
"""Register this node as a hub"""
if self.is_hub:
logger.warning("Already registered as hub")
return False
self.is_hub = True
self.hub_status = HubStatus.REGISTERED
self.registered_at = time.time()
# Add self to known hubs
self.known_hubs[self.local_node_id] = HubInfo(
hub_info = HubInfo(
node_id=self.local_node_id,
address=self.local_address,
port=self.local_port,
@@ -99,24 +375,31 @@ class HubManager:
registered_at=time.time(),
last_seen=time.time()
)
self.known_hubs[self.local_node_id] = hub_info
# Persist to Redis
await self._persist_hub_registration(hub_info)
logger.info(f"Registered as hub for island {self.island_id}")
return True
def unregister_as_hub(self) -> bool:
async def unregister_as_hub(self) -> bool:
"""Unregister this node as a hub"""
if not self.is_hub:
logger.warning("Not registered as hub")
return False
self.is_hub = False
self.hub_status = HubStatus.UNREGISTERED
self.registered_at = None
# Remove from Redis
await self._remove_hub_registration(self.local_node_id)
# Remove self from known hubs
if self.local_node_id in self.known_hubs:
del self.known_hubs[self.local_node_id]
logger.info(f"Unregistered as hub for island {self.island_id}")
return True

View File

@@ -88,10 +88,11 @@ class P2PNetworkService:
self.host,
self.port,
self.island_id,
self.island_name
self.island_name,
self.config.redis_url
)
self.hub_manager.register_as_hub(self.public_endpoint[0] if self.public_endpoint else None,
self.public_endpoint[1] if self.public_endpoint else None)
await self.hub_manager.register_as_hub(self.public_endpoint[0] if self.public_endpoint else None,
self.public_endpoint[1] if self.public_endpoint else None)
logger.info("Initialized hub manager")
# Discover public endpoint via STUN if configured
@@ -423,6 +424,40 @@ class P2PNetworkService:
elif msg_type == 'handshake':
pass # Ignore subsequent handshakes
elif msg_type == 'join_request':
# Handle island join request (only if we're a hub)
if self.hub_manager:
logger.info(f"Received join_request from {peer_id}")
response = await self.hub_manager.handle_join_request(message)
if response:
await self._send_message(writer, response)
else:
logger.warning(f"Received join_request but not a hub, ignoring")
elif msg_type == 'join_response':
# Handle island join response (only if we requested to join)
logger.info(f"Received join_response from {peer_id}")
# Store the response for the CLI to retrieve
if not hasattr(self, '_join_response'):
self._join_response = {}
self._join_response[peer_id] = message
elif msg_type == 'gpu_provider_query':
# Handle GPU provider query
logger.info(f"Received gpu_provider_query from {peer_id}")
# Respond with GPU availability
gpu_response = {
'type': 'gpu_provider_response',
'node_id': self.node_id,
'gpu_available': self._get_gpu_count(),
'gpu_specs': self._get_gpu_specs()
}
await self._send_message(writer, gpu_response)
elif msg_type == 'gpu_provider_response':
# Handle GPU provider response
logger.info(f"Received gpu_provider_response from {peer_id}")
# Store the response for the CLI to retrieve
if not hasattr(self, '_gpu_provider_responses'):
self._gpu_provider_responses = {}
self._gpu_provider_responses[peer_id] = message
elif msg_type == 'new_transaction':
tx_data = message.get('tx')
if tx_data:
@@ -470,28 +505,101 @@ class P2PNetworkService:
writer.close()
try:
await writer.wait_closed()
except Exception:
except:
pass
async def _send_message(self, writer: asyncio.StreamWriter, message: dict):
"""Helper to send a JSON message over a stream"""
def _get_gpu_count(self) -> int:
"""Get the number of available GPUs on this node"""
try:
data = json.dumps(message) + '\n'
writer.write(data.encode())
await writer.drain()
# Try to read GPU count from system
# This is a placeholder - in a real implementation, this would
# query the actual GPU hardware or a configuration file
import os
gpu_config_path = '/var/lib/aitbc/gpu_config.json'
if os.path.exists(gpu_config_path):
with open(gpu_config_path, 'r') as f:
config = json.load(f)
return config.get('gpu_count', 0)
return 0
except Exception as e:
logger.error(f"Failed to send message: {e}")
logger.error(f"Error getting GPU count: {e}")
return 0
async def _ping_peers_loop(self):
"""Periodically broadcast pings to all active connections to keep them alive"""
while not self._stop_event.is_set():
await asyncio.sleep(20)
ping_msg = {'type': 'ping', 'node_id': self.node_id}
# Make a copy of writers to avoid dictionary changed during iteration error
writers = list(self.active_connections.values())
for writer in writers:
await self._send_message(writer, ping_msg)
def _get_gpu_specs(self) -> dict:
"""Get GPU specifications for this node"""
try:
# Try to read GPU specs from system
# This is a placeholder - in a real implementation, this would
# query the actual GPU hardware or a configuration file
import os
gpu_config_path = '/var/lib/aitbc/gpu_config.json'
if os.path.exists(gpu_config_path):
with open(gpu_config_path, 'r') as f:
config = json.load(f)
return config.get('specs', {})
return {}
except Exception as e:
logger.error(f"Error getting GPU specs: {e}")
return {}
async def send_join_request(self, hub_address: str, hub_port: int, island_id: str, island_name: str, node_id: str, public_key_pem: str) -> Optional[dict]:
"""
Send join request to a hub and wait for response
Args:
hub_address: Hub IP address or hostname
hub_port: Hub port
island_id: Island ID to join
island_name: Island name
node_id: Local node ID
public_key_pem: Public key PEM
Returns:
dict: Join response from hub, or None if failed
"""
try:
# Connect to hub
reader, writer = await asyncio.open_connection(hub_address, hub_port)
logger.info(f"Connected to hub {hub_address}:{hub_port}")
# Send join request
join_request = {
'type': 'join_request',
'node_id': node_id,
'island_id': island_id,
'island_name': island_name,
'public_key_pem': public_key_pem
}
await self._send_message(writer, join_request)
logger.info(f"Sent join_request to hub")
# Wait for join response (with timeout)
try:
data = await asyncio.wait_for(reader.readline(), timeout=30.0)
if data:
response = json.loads(data.decode().strip())
if response.get('type') == 'join_response':
logger.info(f"Received join_response from hub")
writer.close()
await writer.wait_closed()
return response
else:
logger.warning(f"Unexpected response type: {response.get('type')}")
else:
logger.warning("No response from hub")
except asyncio.TimeoutError:
logger.warning("Timeout waiting for join response")
writer.close()
await writer.wait_closed()
return None
except ConnectionRefusedError:
logger.error(f"Hub {hub_address}:{hub_port} refused connection")
return None
except Exception as e:
logger.error(f"Failed to send join request: {e}")
return None
async def run_p2p_service(host: str, port: int, node_id: str, peers: str):

View File

@@ -60,7 +60,7 @@ def _serialize_receipt(receipt: Receipt) -> Dict[str, Any]:
class TransactionRequest(BaseModel):
type: str = Field(description="Transaction type, e.g. TRANSFER or RECEIPT_CLAIM")
type: str = Field(description="Transaction type, e.g. TRANSFER, RECEIPT_CLAIM, GPU_MARKETPLACE, EXCHANGE")
sender: str
nonce: int
fee: int = Field(ge=0)
@@ -70,8 +70,9 @@ class TransactionRequest(BaseModel):
@model_validator(mode="after")
def normalize_type(self) -> "TransactionRequest": # type: ignore[override]
normalized = self.type.upper()
if normalized not in {"TRANSFER", "RECEIPT_CLAIM"}:
raise ValueError(f"unsupported transaction type: {self.type}")
valid_types = {"TRANSFER", "RECEIPT_CLAIM", "GPU_MARKETPLACE", "EXCHANGE"}
if normalized not in valid_types:
raise ValueError(f"unsupported transaction type: {normalized}. Valid types: {valid_types}")
self.type = normalized
return self
@@ -201,31 +202,83 @@ async def get_mempool(chain_id: str = None, limit: int = 100) -> Dict[str, Any]:
@router.get("/accounts/{address}", summary="Get account information")
async def get_account(address: str) -> Dict[str, Any]:
"""Get account information including balance"""
from ..models import Account
async def get_account(address: str, chain_id: str = None) -> Dict[str, Any]:
"""Get account information"""
chain_id = get_chain_id(chain_id)
try:
with session_scope() as session:
account = session.exec(select(Account).where(Account.address == address)).first()
with session_scope() as session:
account = session.exec(select(Account).where(Account.address == address).where(Account.chain_id == chain_id)).first()
if not account:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Account not found")
return {
"address": account.address,
"balance": account.balance,
"nonce": account.nonce,
"chain_id": account.chain_id
}
@router.get("/transactions", summary="Query transactions")
async def query_transactions(
transaction_type: Optional[str] = None,
island_id: Optional[str] = None,
pair: Optional[str] = None,
status: Optional[str] = None,
order_id: Optional[str] = None,
limit: Optional[int] = 100,
chain_id: str = None
) -> List[Dict[str, Any]]:
"""Query transactions with optional filters"""
chain_id = get_chain_id(chain_id)
with session_scope() as session:
query = select(Transaction).where(Transaction.chain_id == chain_id)
# Apply filters based on payload fields
transactions = session.exec(query).all()
results = []
for tx in transactions:
# Filter by transaction type in payload
if transaction_type and tx.payload.get('type') != transaction_type:
continue
if account is None:
return {
"address": address,
"balance": 0,
"nonce": 0,
"exists": False
}
# Filter by island_id in payload
if island_id and tx.payload.get('island_id') != island_id:
continue
return {
"address": account.address,
"balance": account.balance,
"nonce": account.nonce,
"exists": True
}
except Exception as e:
_logger.error("Failed to get account", extra={"error": str(e), "address": address})
raise HTTPException(status_code=500, detail=f"Failed to get account: {str(e)}")
# Filter by pair in payload
if pair and tx.payload.get('pair') != pair:
continue
# Filter by status in payload
if status and tx.payload.get('status') != status:
continue
# Filter by order_id in payload
if order_id and tx.payload.get('order_id') != order_id and tx.payload.get('offer_id') != order_id and tx.payload.get('bid_id') != order_id:
continue
results.append({
"transaction_id": tx.id,
"tx_hash": tx.tx_hash,
"sender": tx.sender,
"recipient": tx.recipient,
"payload": tx.payload,
"status": tx.status,
"created_at": tx.created_at.isoformat(),
"timestamp": tx.timestamp,
"nonce": tx.nonce,
"value": tx.value,
"fee": tx.fee
})
# Apply limit
if limit:
results = results[:limit]
return results
@router.get("/blocks-range", summary="Get blocks in height range")

View File

@@ -21,17 +21,18 @@ class TestP2PDiscovery:
def test_generate_node_id(self):
"""Test node ID generation"""
hostname = "node1.example.com"
address = "127.0.0.1"
port = 8000
public_key = "test_public_key"
node_id = self.discovery.generate_node_id(address, port, public_key)
node_id = self.discovery.generate_node_id(hostname, address, port, public_key)
assert isinstance(node_id, str)
assert len(node_id) == 64 # SHA256 hex length
# Test consistency
node_id2 = self.discovery.generate_node_id(address, port, public_key)
node_id2 = self.discovery.generate_node_id(hostname, address, port, public_key)
assert node_id == node_id2
def test_add_bootstrap_node(self):
@@ -45,17 +46,18 @@ class TestP2PDiscovery:
def test_generate_node_id_consistency(self):
"""Test node ID generation consistency"""
hostname = "node2.example.com"
address = "192.168.1.1"
port = 9000
public_key = "test_key"
node_id1 = self.discovery.generate_node_id(address, port, public_key)
node_id2 = self.discovery.generate_node_id(address, port, public_key)
node_id1 = self.discovery.generate_node_id(hostname, address, port, public_key)
node_id2 = self.discovery.generate_node_id(hostname, address, port, public_key)
assert node_id1 == node_id2
# Different inputs should produce different IDs
node_id3 = self.discovery.generate_node_id("192.168.1.2", port, public_key)
node_id3 = self.discovery.generate_node_id(hostname, "192.168.1.2", port, public_key)
assert node_id1 != node_id3
def test_get_peer_count_empty(self):

View File

@@ -0,0 +1,324 @@
"""
Tests for Hub Manager with Redis persistence
"""
import pytest
import asyncio
from unittest.mock import Mock, AsyncMock, patch
from aitbc_chain.network.hub_manager import HubManager, HubInfo, HubStatus, PeerInfo
class TestHubManager:
"""Test cases for Hub Manager with Redis persistence"""
@pytest.fixture
def hub_manager(self):
"""Create a HubManager instance for testing"""
return HubManager(
local_node_id="test-node-id",
local_address="127.0.0.1",
local_port=7070,
island_id="test-island-id",
island_name="test-island",
redis_url="redis://localhost:6379"
)
@pytest.mark.asyncio
async def test_connect_redis_success(self, hub_manager):
"""Test successful Redis connection"""
with patch('aitbc_chain.network.hub_manager.redis.asyncio') as mock_redis:
mock_client = AsyncMock()
mock_client.ping = AsyncMock(return_value=True)
mock_redis.from_url.return_value = mock_client
result = await hub_manager._connect_redis()
assert result is True
assert hub_manager._redis is not None
mock_redis.from_url.assert_called_once_with("redis://localhost:6379")
mock_client.ping.assert_called_once()
@pytest.mark.asyncio
async def test_connect_redis_failure(self, hub_manager):
"""Test Redis connection failure"""
with patch('aitbc_chain.network.hub_manager.redis.asyncio') as mock_redis:
mock_redis.from_url.side_effect = Exception("Connection failed")
result = await hub_manager._connect_redis()
assert result is False
assert hub_manager._redis is None
@pytest.mark.asyncio
async def test_persist_hub_registration_success(self, hub_manager):
"""Test successful hub registration persistence to Redis"""
hub_info = HubInfo(
node_id="test-node-id",
address="127.0.0.1",
port=7070,
island_id="test-island-id",
island_name="test-island",
public_address="1.2.3.4",
public_port=7070,
registered_at=1234567890.0,
last_seen=1234567890.0
)
with patch('aitbc_chain.network.hub_manager.redis.asyncio') as mock_redis:
mock_client = AsyncMock()
mock_client.setex = AsyncMock(return_value=True)
mock_redis.from_url.return_value = mock_client
result = await hub_manager._persist_hub_registration(hub_info)
assert result is True
mock_client.setex.assert_called_once()
key = mock_client.setex.call_args[0][0]
assert key == "hub:test-node-id"
@pytest.mark.asyncio
async def test_persist_hub_registration_no_redis(self, hub_manager):
"""Test hub registration persistence when Redis is unavailable"""
hub_info = HubInfo(
node_id="test-node-id",
address="127.0.0.1",
port=7070,
island_id="test-island-id",
island_name="test-island"
)
with patch.object(hub_manager, '_connect_redis', return_value=False):
result = await hub_manager._persist_hub_registration(hub_info)
assert result is False
@pytest.mark.asyncio
async def test_remove_hub_registration_success(self, hub_manager):
"""Test successful hub registration removal from Redis"""
with patch('aitbc_chain.network.hub_manager.redis.asyncio') as mock_redis:
mock_client = AsyncMock()
mock_client.delete = AsyncMock(return_value=True)
mock_redis.from_url.return_value = mock_client
result = await hub_manager._remove_hub_registration("test-node-id")
assert result is True
mock_client.delete.assert_called_once_with("hub:test-node-id")
@pytest.mark.asyncio
async def test_load_hub_registration_success(self, hub_manager):
"""Test successful hub registration loading from Redis"""
with patch('aitbc_chain.network.hub_manager.redis.asyncio') as mock_redis:
mock_client = AsyncMock()
hub_data = {
"node_id": "test-node-id",
"address": "127.0.0.1",
"port": 7070,
"island_id": "test-island-id",
"island_name": "test-island"
}
mock_client.get = AsyncMock(return_value='{"node_id": "test-node-id", "address": "127.0.0.1", "port": 7070, "island_id": "test-island-id", "island_name": "test-island"}')
mock_redis.from_url.return_value = mock_client
result = await hub_manager._load_hub_registration()
assert result is not None
assert result.node_id == "test-node-id"
mock_client.get.assert_called_once_with("hub:test-node-id")
@pytest.mark.asyncio
async def test_load_hub_registration_not_found(self, hub_manager):
"""Test hub registration loading when not found in Redis"""
with patch('aitbc_chain.network.hub_manager.redis.asyncio') as mock_redis:
mock_client = AsyncMock()
mock_client.get = AsyncMock(return_value=None)
mock_redis.from_url.return_value = mock_client
result = await hub_manager._load_hub_registration()
assert result is None
@pytest.mark.asyncio
async def test_register_as_hub_success(self, hub_manager):
"""Test successful hub registration"""
with patch.object(hub_manager, '_persist_hub_registration', return_value=True):
result = await hub_manager.register_as_hub(public_address="1.2.3.4", public_port=7070)
assert result is True
assert hub_manager.is_hub is True
assert hub_manager.hub_status == HubStatus.REGISTERED
assert hub_manager.registered_at is not None
assert hub_manager.local_node_id in hub_manager.known_hubs
@pytest.mark.asyncio
async def test_register_as_hub_already_registered(self, hub_manager):
"""Test hub registration when already registered"""
hub_manager.is_hub = True
hub_manager.hub_status = HubStatus.REGISTERED
result = await hub_manager.register_as_hub()
assert result is False
assert hub_manager.is_hub is True
@pytest.mark.asyncio
async def test_unregister_as_hub_success(self, hub_manager):
"""Test successful hub unregistration"""
hub_manager.is_hub = True
hub_manager.hub_status = HubStatus.REGISTERED
hub_manager.known_hubs["test-node-id"] = HubInfo(
node_id="test-node-id",
address="127.0.0.1",
port=7070,
island_id="test-island-id",
island_name="test-island"
)
with patch.object(hub_manager, '_remove_hub_registration', return_value=True):
result = await hub_manager.unregister_as_hub()
assert result is True
assert hub_manager.is_hub is False
assert hub_manager.hub_status == HubStatus.UNREGISTERED
assert hub_manager.registered_at is None
assert hub_manager.local_node_id not in hub_manager.known_hubs
@pytest.mark.asyncio
async def test_unregister_as_hub_not_registered(self, hub_manager):
"""Test hub unregistration when not registered"""
result = await hub_manager.unregister_as_hub()
assert result is False
assert hub_manager.is_hub is False
def test_register_peer(self, hub_manager):
"""Test peer registration"""
peer_info = PeerInfo(
node_id="peer-1",
address="192.168.1.1",
port=7071,
island_id="test-island-id",
is_hub=False
)
result = hub_manager.register_peer(peer_info)
assert result is True
assert "peer-1" in hub_manager.peer_registry
assert "peer-1" in hub_manager.island_peers["test-island-id"]
def test_unregister_peer(self, hub_manager):
"""Test peer unregistration"""
peer_info = PeerInfo(
node_id="peer-1",
address="192.168.1.1",
port=7071,
island_id="test-island-id",
is_hub=False
)
hub_manager.register_peer(peer_info)
result = hub_manager.unregister_peer("peer-1")
assert result is True
assert "peer-1" not in hub_manager.peer_registry
assert "peer-1" not in hub_manager.island_peers["test-island-id"]
def test_add_known_hub(self, hub_manager):
"""Test adding a known hub"""
hub_info = HubInfo(
node_id="hub-1",
address="10.1.1.1",
port=7070,
island_id="test-island-id",
island_name="test-island"
)
hub_manager.add_known_hub(hub_info)
assert "hub-1" in hub_manager.known_hubs
assert hub_manager.known_hubs["hub-1"] == hub_info
def test_remove_known_hub(self, hub_manager):
"""Test removing a known hub"""
hub_info = HubInfo(
node_id="hub-1",
address="10.1.1.1",
port=7070,
island_id="test-island-id",
island_name="test-island"
)
hub_manager.add_known_hub(hub_info)
result = hub_manager.remove_known_hub("hub-1")
assert result is True
assert "hub-1" not in hub_manager.known_hubs
def test_get_peer_list(self, hub_manager):
"""Test getting peer list for an island"""
peer_info1 = PeerInfo(
node_id="peer-1",
address="192.168.1.1",
port=7071,
island_id="test-island-id",
is_hub=False
)
peer_info2 = PeerInfo(
node_id="peer-2",
address="192.168.1.2",
port=7072,
island_id="other-island-id",
is_hub=False
)
hub_manager.register_peer(peer_info1)
hub_manager.register_peer(peer_info2)
peers = hub_manager.get_peer_list("test-island-id")
assert len(peers) == 1
assert peers[0].node_id == "peer-1"
def test_get_hub_list(self, hub_manager):
"""Test getting hub list"""
hub_info1 = HubInfo(
node_id="hub-1",
address="10.1.1.1",
port=7070,
island_id="test-island-id",
island_name="test-island"
)
hub_info2 = HubInfo(
node_id="hub-2",
address="10.1.1.2",
port=7070,
island_id="other-island-id",
island_name="other-island"
)
hub_manager.add_known_hub(hub_info1)
hub_manager.add_known_hub(hub_info2)
hubs = hub_manager.get_hub_list("test-island-id")
assert len(hubs) == 1
assert hubs[0].node_id == "hub-1"
def test_update_peer_last_seen(self, hub_manager):
"""Test updating peer last seen time"""
peer_info = PeerInfo(
node_id="peer-1",
address="192.168.1.1",
port=7071,
island_id="test-island-id",
is_hub=False,
last_seen=100.0
)
hub_manager.register_peer(peer_info)
hub_manager.update_peer_last_seen("peer-1")
assert hub_manager.peer_registry["peer-1"].last_seen > 100.0
if __name__ == "__main__":
pytest.main([__file__])

View File

@@ -0,0 +1,244 @@
"""
Tests for Island Join functionality
"""
import pytest
import asyncio
from unittest.mock import Mock, AsyncMock, patch, MagicMock
from aitbc_chain.network.hub_manager import HubManager, HubInfo, PeerInfo
from aitbc_chain.p2p_network import P2PNetworkService
class TestHubManagerJoin:
"""Test cases for HubManager join request handling"""
@pytest.fixture
def hub_manager(self):
"""Create a HubManager instance for testing"""
return HubManager(
local_node_id="test-hub-node",
local_address="127.0.0.1",
local_port=7070,
island_id="test-island-id",
island_name="test-island",
redis_url="redis://localhost:6379"
)
def test_get_blockchain_credentials(self, hub_manager):
"""Test blockchain credentials retrieval"""
with patch('aitbc_chain.network.hub_manager.os.path.exists', return_value=True):
with patch('aitbc_chain.network.hub_manager.open', create=True) as mock_open:
# Mock genesis.json
genesis_data = {
'blocks': [{'hash': 'test-genesis-hash'}]
}
mock_file = MagicMock()
mock_file.read.return_value = '{"blocks": [{"hash": "test-genesis-hash"}]}'
mock_open.return_value.__enter__.return_value = mock_file
# Mock keystore
with patch('aitbc_chain.network.hub_manager.json.load') as mock_json_load:
mock_json_load.return_value = {'0x123': {'public_key_pem': 'test-key'}}
credentials = hub_manager._get_blockchain_credentials()
assert credentials is not None
assert 'chain_id' in credentials
assert 'island_id' in credentials
assert credentials['island_id'] == 'test-island-id'
@pytest.mark.asyncio
async def test_handle_join_request_success(self, hub_manager):
"""Test successful join request handling"""
# Add some peers to the registry
peer_info = PeerInfo(
node_id="peer-1",
address="192.168.1.1",
port=7071,
island_id="test-island-id",
is_hub=False
)
hub_manager.register_peer(peer_info)
join_request = {
'type': 'join_request',
'node_id': 'new-node',
'island_id': 'test-island-id',
'island_name': 'test-island',
'public_key_pem': 'test-pem'
}
with patch.object(hub_manager, '_get_blockchain_credentials', return_value={'chain_id': 'test-chain'}):
response = await hub_manager.handle_join_request(join_request)
assert response is not None
assert response['type'] == 'join_response'
assert response['island_id'] == 'test-island-id'
assert len(response['members']) >= 1 # At least the hub itself
assert 'credentials' in response
@pytest.mark.asyncio
async def test_handle_join_request_wrong_island(self, hub_manager):
"""Test join request for wrong island"""
join_request = {
'type': 'join_request',
'node_id': 'new-node',
'island_id': 'wrong-island-id',
'island_name': 'wrong-island',
'public_key_pem': 'test-pem'
}
response = await hub_manager.handle_join_request(join_request)
assert response is None
@pytest.mark.asyncio
async def test_handle_join_request_with_members(self, hub_manager):
"""Test join request returns all island members"""
# Add multiple peers
for i in range(3):
peer_info = PeerInfo(
node_id=f"peer-{i}",
address=f"192.168.1.{i}",
port=7070 + i,
island_id="test-island-id",
is_hub=False
)
hub_manager.register_peer(peer_info)
join_request = {
'type': 'join_request',
'node_id': 'new-node',
'island_id': 'test-island-id',
'island_name': 'test-island',
'public_key_pem': 'test-pem'
}
with patch.object(hub_manager, '_get_blockchain_credentials', return_value={'chain_id': 'test-chain'}):
response = await hub_manager.handle_join_request(join_request)
assert response is not None
# Should include all peers + hub itself
assert len(response['members']) >= 4
class TestP2PNetworkJoin:
"""Test cases for P2P network join request functionality"""
@pytest.fixture
def p2p_service(self):
"""Create a P2P service instance for testing"""
return P2PNetworkService(
host="127.0.0.1",
port=7070,
node_id="test-node",
peers=[]
)
@pytest.mark.asyncio
async def test_send_join_request_success(self, p2p_service):
"""Test successful join request to hub"""
join_response = {
'type': 'join_response',
'island_id': 'test-island-id',
'island_name': 'test-island',
'island_chain_id': 'test-chain',
'members': [],
'credentials': {}
}
with patch('aitbc_chain.p2p_network.asyncio.open_connection') as mock_open:
# Mock reader and writer
mock_reader = AsyncMock()
mock_reader.readline = AsyncMock(return_value=b'{"type": "join_response"}')
mock_writer = AsyncMock()
mock_writer.close = AsyncMock()
mock_writer.wait_closed = AsyncMock()
mock_open.return_value = (mock_reader, mock_writer)
response = await p2p_service.send_join_request(
hub_address="127.0.0.1",
hub_port=7070,
island_id="test-island-id",
island_name="test-island",
node_id="test-node",
public_key_pem="test-pem"
)
assert response is not None
mock_open.assert_called_once_with("127.0.0.1", 7070)
@pytest.mark.asyncio
async def test_send_join_request_connection_refused(self, p2p_service):
"""Test join request when hub refuses connection"""
with patch('aitbc_chain.p2p_network.asyncio.open_connection') as mock_open:
mock_open.side_effect = ConnectionRefusedError()
response = await p2p_service.send_join_request(
hub_address="127.0.0.1",
hub_port=7070,
island_id="test-island-id",
island_name="test-island",
node_id="test-node",
public_key_pem="test-pem"
)
assert response is None
@pytest.mark.asyncio
async def test_send_join_request_timeout(self, p2p_service):
"""Test join request timeout"""
with patch('aitbc_chain.p2p_network.asyncio.open_connection') as mock_open:
# Mock reader that times out
mock_reader = AsyncMock()
mock_reader.readline = AsyncMock(side_effect=asyncio.TimeoutError())
mock_writer = AsyncMock()
mock_writer.close = AsyncMock()
mock_writer.wait_closed = AsyncMock()
mock_open.return_value = (mock_reader, mock_writer)
response = await p2p_service.send_join_request(
hub_address="127.0.0.1",
hub_port=7070,
island_id="test-island-id",
island_name="test-island",
node_id="test-node",
public_key_pem="test-pem"
)
assert response is None
class TestJoinMessageHandling:
"""Test cases for join message handling in P2P network"""
@pytest.mark.asyncio
async def test_join_request_message_handling(self):
"""Test that join_request messages are handled correctly"""
service = P2PNetworkService(
host="127.0.0.1",
port=7070,
node_id="test-node",
peers=[]
)
# Mock hub manager
service.hub_manager = Mock()
service.hub_manager.handle_join_request = AsyncMock(return_value={'type': 'join_response'})
join_request = {
'type': 'join_request',
'node_id': 'new-node',
'island_id': 'test-island-id'
}
# The actual message handling happens in _listen_to_stream
# This test verifies the hub_manager.handle_join_request would be called
response = await service.hub_manager.handle_join_request(join_request)
assert response is not None
assert response['type'] == 'join_response'
if __name__ == "__main__":
pytest.main([__file__])

View File

@@ -77,7 +77,7 @@ app.add_middleware(
"http://localhost:3000",
"http://localhost:8080",
"http://localhost:8000",
"http://localhost:3003"
"http://localhost:8008"
],
allow_credentials=True,
allow_methods=["GET", "POST", "PUT", "DELETE", "OPTIONS"],
@@ -358,4 +358,4 @@ def health_check():
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=3003)
uvicorn.run(app, host="0.0.0.0", port=8008)

View File

@@ -1,20 +0,0 @@
# Exchange API Routes - Add this to the existing nginx config
# Exchange API Routes
location /api/trades/ {
proxy_pass http://127.0.0.1:3003/api/trades/;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_buffering off;
}
location /api/orders {
proxy_pass http://127.0.0.1:3003/api/orders;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_buffering off;
}

View File

@@ -347,7 +347,7 @@ class ExchangeAPIHandler(BaseHTTPRequestHandler):
"error": str(e)
}, 500)
def run_server(port=3003):
def run_server(port=8008):
"""Run the server"""
init_db()