refactor: migrate P2P network from Redis gossip to direct TCP mesh architecture
- Replaced Redis-based P2P with direct TCP connections for decentralized mesh networking - Added handshake protocol with node_id exchange for peer authentication - Implemented bidirectional connection management (inbound/outbound streams) - Added peer dialing loop to continuously reconnect to initial peers - Added ping/pong keepalive mechanism to maintain active connections - Prevented duplicate connections through endpoint
This commit is contained in:
@@ -1,39 +1,65 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
P2P Network Service using Redis Gossip
|
||||
Handles peer-to-peer communication between blockchain nodes
|
||||
P2P Network Service using Direct TCP connections
|
||||
Handles decentralized peer-to-peer mesh communication between blockchain nodes
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
import logging
|
||||
import socket
|
||||
from typing import Dict, Any, Optional
|
||||
from typing import Dict, Any, Optional, Set, Tuple
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class P2PNetworkService:
|
||||
def __init__(self, host: str, port: int, redis_url: str, node_id: str):
|
||||
def __init__(self, host: str, port: int, node_id: str, peers: str = ""):
|
||||
self.host = host
|
||||
self.port = port
|
||||
self.redis_url = redis_url
|
||||
self.node_id = node_id
|
||||
|
||||
# Initial peers to dial (format: "ip:port,ip:port")
|
||||
self.initial_peers = []
|
||||
if peers:
|
||||
for p in peers.split(','):
|
||||
p = p.strip()
|
||||
if p:
|
||||
parts = p.split(':')
|
||||
if len(parts) == 2:
|
||||
self.initial_peers.append((parts[0], int(parts[1])))
|
||||
|
||||
self._server = None
|
||||
self._stop_event = asyncio.Event()
|
||||
|
||||
# Active connections
|
||||
# Map of node_id -> writer stream
|
||||
self.active_connections: Dict[str, asyncio.StreamWriter] = {}
|
||||
# Set of active endpoints we've connected to prevent duplicate dialing
|
||||
self.connected_endpoints: Set[Tuple[str, int]] = set()
|
||||
|
||||
self._background_tasks = []
|
||||
|
||||
async def start(self):
|
||||
"""Start P2P network service"""
|
||||
logger.info(f"Starting P2P network service on {self.host}:{self.port}")
|
||||
logger.info(f"Starting P2P network mesh service on {self.host}:{self.port}")
|
||||
logger.info(f"Node ID: {self.node_id}")
|
||||
|
||||
# Create TCP server for P2P connections
|
||||
# Create TCP server for inbound P2P connections
|
||||
self._server = await asyncio.start_server(
|
||||
self._handle_connection,
|
||||
self._handle_inbound_connection,
|
||||
self.host,
|
||||
self.port
|
||||
)
|
||||
|
||||
logger.info(f"P2P service listening on {self.host}:{self.port}")
|
||||
|
||||
# Start background task to dial known peers
|
||||
dial_task = asyncio.create_task(self._dial_peers_loop())
|
||||
self._background_tasks.append(dial_task)
|
||||
|
||||
# Start background task to broadcast pings to active peers
|
||||
ping_task = asyncio.create_task(self._ping_peers_loop())
|
||||
self._background_tasks.append(ping_task)
|
||||
|
||||
try:
|
||||
await self._stop_event.wait()
|
||||
finally:
|
||||
@@ -42,63 +68,253 @@ class P2PNetworkService:
|
||||
async def stop(self):
|
||||
"""Stop P2P network service"""
|
||||
logger.info("Stopping P2P network service")
|
||||
|
||||
# Cancel background tasks
|
||||
for task in self._background_tasks:
|
||||
task.cancel()
|
||||
|
||||
# Close all active connections
|
||||
for writer in self.active_connections.values():
|
||||
writer.close()
|
||||
try:
|
||||
await writer.wait_closed()
|
||||
except Exception:
|
||||
pass
|
||||
self.active_connections.clear()
|
||||
self.connected_endpoints.clear()
|
||||
|
||||
# Close server
|
||||
if self._server:
|
||||
self._server.close()
|
||||
await self._server.wait_closed()
|
||||
|
||||
async def _handle_connection(self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter):
|
||||
"""Handle incoming P2P connections"""
|
||||
addr = writer.get_extra_info('peername')
|
||||
logger.info(f"P2P connection from {addr}")
|
||||
|
||||
|
||||
async def _dial_peers_loop(self):
|
||||
"""Background loop to continually try connecting to disconnected initial peers"""
|
||||
while not self._stop_event.is_set():
|
||||
for host, port in self.initial_peers:
|
||||
endpoint = (host, port)
|
||||
|
||||
# Prevent dialing ourselves or already connected peers
|
||||
if endpoint in self.connected_endpoints:
|
||||
continue
|
||||
|
||||
# Find if we are already connected to a peer with this host/ip by inbound connections
|
||||
# This prevents two nodes from endlessly redialing each other's listen ports
|
||||
already_connected_ip = False
|
||||
for node_id, writer in self.active_connections.items():
|
||||
peer_ip = writer.get_extra_info('peername')[0]
|
||||
# We might want to resolve hostname -> IP but keeping it simple:
|
||||
if peer_ip == host or (host == "aitbc1" and peer_ip.startswith("10.")):
|
||||
already_connected_ip = True
|
||||
break
|
||||
|
||||
if already_connected_ip:
|
||||
self.connected_endpoints.add(endpoint) # Mark so we don't try again
|
||||
continue
|
||||
|
||||
# Attempt connection
|
||||
asyncio.create_task(self._dial_peer(host, port))
|
||||
|
||||
# Wait before trying again
|
||||
await asyncio.sleep(10)
|
||||
|
||||
async def _dial_peer(self, host: str, port: int):
|
||||
"""Attempt to establish an outbound TCP connection to a peer"""
|
||||
endpoint = (host, port)
|
||||
try:
|
||||
while True:
|
||||
data = await reader.read(1024)
|
||||
reader, writer = await asyncio.open_connection(host, port)
|
||||
logger.info(f"Successfully dialed outbound peer at {host}:{port}")
|
||||
|
||||
# Record that we're connected to this endpoint
|
||||
self.connected_endpoints.add(endpoint)
|
||||
|
||||
# Send handshake immediately
|
||||
handshake = {
|
||||
'type': 'handshake',
|
||||
'node_id': self.node_id,
|
||||
'listen_port': self.port
|
||||
}
|
||||
await self._send_message(writer, handshake)
|
||||
|
||||
# Start listening to this outbound connection
|
||||
await self._listen_to_stream(reader, writer, endpoint, outbound=True)
|
||||
|
||||
except ConnectionRefusedError:
|
||||
logger.debug(f"Peer {host}:{port} refused connection (offline?)")
|
||||
except Exception as e:
|
||||
logger.debug(f"Failed to dial peer {host}:{port}: {e}")
|
||||
|
||||
async def _handle_inbound_connection(self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter):
|
||||
"""Handle incoming P2P TCP connections from other nodes"""
|
||||
addr = writer.get_extra_info('peername')
|
||||
logger.info(f"Incoming P2P connection from {addr}")
|
||||
|
||||
# Wait for handshake
|
||||
try:
|
||||
# Add timeout for initial handshake
|
||||
data = await asyncio.wait_for(reader.readline(), timeout=5.0)
|
||||
if not data:
|
||||
writer.close()
|
||||
return
|
||||
|
||||
message = json.loads(data.decode())
|
||||
if message.get('type') != 'handshake':
|
||||
logger.warning(f"Peer {addr} did not handshake first. Dropping.")
|
||||
writer.close()
|
||||
return
|
||||
|
||||
peer_node_id = message.get('node_id')
|
||||
peer_listen_port = message.get('listen_port', 7070)
|
||||
|
||||
if not peer_node_id or peer_node_id == self.node_id:
|
||||
logger.warning(f"Peer {addr} provided invalid or self node_id: {peer_node_id}")
|
||||
writer.close()
|
||||
return
|
||||
|
||||
# Accept handshake and store connection
|
||||
logger.info(f"Handshake accepted from node {peer_node_id} at {addr}")
|
||||
|
||||
# If we already have a connection to this node, drop the new one to prevent duplicates
|
||||
if peer_node_id in self.active_connections:
|
||||
logger.info(f"Already connected to node {peer_node_id}. Dropping duplicate inbound.")
|
||||
writer.close()
|
||||
return
|
||||
|
||||
self.active_connections[peer_node_id] = writer
|
||||
|
||||
# Map their listening endpoint so we don't try to dial them
|
||||
remote_ip = addr[0]
|
||||
self.connected_endpoints.add((remote_ip, peer_listen_port))
|
||||
|
||||
# Reply with our handshake
|
||||
reply_handshake = {
|
||||
'type': 'handshake',
|
||||
'node_id': self.node_id,
|
||||
'listen_port': self.port
|
||||
}
|
||||
await self._send_message(writer, reply_handshake)
|
||||
|
||||
# Listen for messages
|
||||
await self._listen_to_stream(reader, writer, (remote_ip, peer_listen_port), outbound=False, peer_id=peer_node_id)
|
||||
|
||||
except asyncio.TimeoutError:
|
||||
logger.warning(f"Timeout waiting for handshake from {addr}")
|
||||
writer.close()
|
||||
except Exception as e:
|
||||
logger.error(f"Error handling inbound connection from {addr}: {e}")
|
||||
writer.close()
|
||||
|
||||
async def _listen_to_stream(self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter, endpoint: Tuple[str, int], outbound: bool, peer_id: str = None):
|
||||
"""Read loop for an established TCP stream (both inbound and outbound)"""
|
||||
addr = endpoint
|
||||
try:
|
||||
while not self._stop_event.is_set():
|
||||
data = await reader.readline()
|
||||
if not data:
|
||||
break
|
||||
break # Connection closed remotely
|
||||
|
||||
try:
|
||||
message = json.loads(data.decode())
|
||||
logger.info(f"P2P received: {message}")
|
||||
message = json.loads(data.decode().strip())
|
||||
|
||||
# Handle different message types
|
||||
if message.get('type') == 'ping':
|
||||
response = {'type': 'pong', 'node_id': self.node_id}
|
||||
writer.write(json.dumps(response).encode() + b'\n')
|
||||
await writer.drain()
|
||||
msg_type = message.get('type')
|
||||
|
||||
# If this is an outbound connection, the first message MUST be their handshake reply
|
||||
if outbound and peer_id is None:
|
||||
if msg_type == 'handshake':
|
||||
peer_id = message.get('node_id')
|
||||
if not peer_id or peer_id == self.node_id:
|
||||
logger.warning(f"Invalid handshake reply from {addr}. Closing.")
|
||||
break
|
||||
|
||||
if peer_id in self.active_connections:
|
||||
logger.info(f"Already connected to node {peer_id}. Closing duplicate outbound.")
|
||||
break
|
||||
|
||||
self.active_connections[peer_id] = writer
|
||||
logger.info(f"Outbound handshake complete. Connected to node {peer_id}")
|
||||
continue
|
||||
else:
|
||||
logger.warning(f"Expected handshake reply from {addr}, got {msg_type}")
|
||||
break
|
||||
|
||||
# Normal message handling
|
||||
if msg_type == 'ping':
|
||||
logger.debug(f"Received ping from {peer_id}")
|
||||
await self._send_message(writer, {'type': 'pong', 'node_id': self.node_id})
|
||||
|
||||
elif msg_type == 'pong':
|
||||
logger.debug(f"Received pong from {peer_id}")
|
||||
|
||||
elif msg_type == 'handshake':
|
||||
pass # Ignore subsequent handshakes
|
||||
|
||||
else:
|
||||
logger.info(f"Received {msg_type} from {peer_id}: {message}")
|
||||
# In a real node, we would forward blocks/txs to the internal event bus here
|
||||
|
||||
except json.JSONDecodeError:
|
||||
logger.warning(f"Invalid JSON from {addr}")
|
||||
logger.warning(f"Invalid JSON received from {addr}")
|
||||
|
||||
except asyncio.CancelledError:
|
||||
pass
|
||||
except Exception as e:
|
||||
logger.error(f"P2P connection error: {e}")
|
||||
logger.error(f"Stream error with {addr}: {e}")
|
||||
finally:
|
||||
logger.info(f"Connection closed to {peer_id or addr}")
|
||||
if peer_id and peer_id in self.active_connections:
|
||||
del self.active_connections[peer_id]
|
||||
if endpoint in self.connected_endpoints:
|
||||
self.connected_endpoints.remove(endpoint)
|
||||
writer.close()
|
||||
await writer.wait_closed()
|
||||
logger.info(f"P2P connection closed from {addr}")
|
||||
try:
|
||||
await writer.wait_closed()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
async def run_p2p_service(host: str, port: int, redis_url: str, node_id: str):
|
||||
async def _send_message(self, writer: asyncio.StreamWriter, message: dict):
|
||||
"""Helper to send a JSON message over a stream"""
|
||||
try:
|
||||
data = json.dumps(message) + '\n'
|
||||
writer.write(data.encode())
|
||||
await writer.drain()
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to send message: {e}")
|
||||
|
||||
async def _ping_peers_loop(self):
|
||||
"""Periodically broadcast pings to all active connections to keep them alive"""
|
||||
while not self._stop_event.is_set():
|
||||
await asyncio.sleep(20)
|
||||
ping_msg = {'type': 'ping', 'node_id': self.node_id}
|
||||
|
||||
# Make a copy of writers to avoid dictionary changed during iteration error
|
||||
writers = list(self.active_connections.values())
|
||||
for writer in writers:
|
||||
await self._send_message(writer, ping_msg)
|
||||
|
||||
|
||||
async def run_p2p_service(host: str, port: int, node_id: str, peers: str):
|
||||
"""Run P2P service"""
|
||||
service = P2PNetworkService(host, port, redis_url, node_id)
|
||||
service = P2PNetworkService(host, port, node_id, peers)
|
||||
await service.start()
|
||||
|
||||
def main():
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser(description="AITBC P2P Network Service")
|
||||
parser = argparse.ArgumentParser(description="AITBC Direct TCP P2P Mesh Network")
|
||||
parser.add_argument("--host", default="0.0.0.0", help="Bind host")
|
||||
parser.add_argument("--port", type=int, default=8005, help="Bind port")
|
||||
parser.add_argument("--redis", default="redis://localhost:6379", help="Redis URL")
|
||||
parser.add_argument("--node-id", help="Node identifier")
|
||||
parser.add_argument("--port", type=int, default=7070, help="Bind port")
|
||||
parser.add_argument("--node-id", required=True, help="Node identifier (required for handshake)")
|
||||
parser.add_argument("--peers", default="", help="Comma separated list of initial peers to dial (ip:port)")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
logging.basicConfig(
|
||||
level=logging.INFO,
|
||||
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
||||
)
|
||||
|
||||
try:
|
||||
asyncio.run(run_p2p_service(args.host, args.port, args.redis, args.node_id))
|
||||
asyncio.run(run_p2p_service(args.host, args.port, args.node_id, args.peers))
|
||||
except KeyboardInterrupt:
|
||||
logger.info("P2P service stopped by user")
|
||||
|
||||
|
||||
@@ -115,13 +115,13 @@ check_system_readiness() {
|
||||
# Check CLI availability
|
||||
if [ ! -f "$CLI_PATH" ]; then
|
||||
print_error "AITBC CLI not found at $CLI_PATH"
|
||||
((issues++))
|
||||
(( issues += 1 )) || true
|
||||
else
|
||||
print_success "AITBC CLI found"
|
||||
fi
|
||||
|
||||
# Check service availability
|
||||
local services=("8000:Exchange" "8001:Coordinator" "8006:Genesis-Node" "8007:Follower-Node")
|
||||
local services=("8001:Exchange" "8000:Coordinator" "8006:Genesis-Node" "8006:Follower-Node")
|
||||
for service in "${services[@]}"; do
|
||||
local port=$(echo "$service" | cut -d: -f1)
|
||||
local name=$(echo "$service" | cut -d: -f2)
|
||||
@@ -131,7 +131,7 @@ check_system_readiness() {
|
||||
print_success "$name service (port $port) is accessible"
|
||||
else
|
||||
print_warning "$name service (port $port) may not be running"
|
||||
((issues++))
|
||||
(( issues += 1 )) || true
|
||||
fi
|
||||
done
|
||||
|
||||
@@ -140,7 +140,7 @@ check_system_readiness() {
|
||||
print_success "Ollama service is running"
|
||||
else
|
||||
print_warning "Ollama service may not be running (needed for Stage 3)"
|
||||
((issues++))
|
||||
(( issues += 1 )) || true
|
||||
fi
|
||||
|
||||
# Check log directory
|
||||
@@ -152,7 +152,7 @@ check_system_readiness() {
|
||||
# Check training scripts
|
||||
if [ ! -d "$SCRIPT_DIR" ]; then
|
||||
print_error "Training scripts directory not found: $SCRIPT_DIR"
|
||||
((issues++))
|
||||
(( issues += 1 )) || true
|
||||
fi
|
||||
|
||||
if [ $issues -eq 0 ]; then
|
||||
@@ -250,7 +250,7 @@ run_complete_training() {
|
||||
print_progress $stage "Starting"
|
||||
|
||||
if run_stage $stage; then
|
||||
((completed_stages++))
|
||||
((completed_stages+=1))
|
||||
print_success "Stage $stage completed successfully"
|
||||
|
||||
# Ask if user wants to continue
|
||||
@@ -310,7 +310,7 @@ review_progress() {
|
||||
for stage in {1..5}; do
|
||||
local log_file="$LOG_DIR/training_stage${stage}.log"
|
||||
if [ -f "$log_file" ] && grep -q "completed successfully" "$log_file"; then
|
||||
((completed++))
|
||||
(( completed += 1 )) || true
|
||||
echo "✅ Stage $stage: Completed"
|
||||
else
|
||||
echo "❌ Stage $stage: Not completed"
|
||||
|
||||
@@ -43,7 +43,7 @@ genesis_block_initialization() {
|
||||
NODE_URL="http://localhost:8006" cli_cmd "blockchain genesis" || print_warning "Genesis block inspection failed"
|
||||
|
||||
print_status "Initializing blockchain on Follower Node..."
|
||||
if NODE_URL="http://localhost:8007" cli_cmd "blockchain init --force"; then
|
||||
if NODE_URL="http://aitbc1:8006" cli_cmd "blockchain init --force"; then
|
||||
print_success "Blockchain initialized on Follower Node"
|
||||
else
|
||||
print_warning "Blockchain may already be initialized on Follower Node"
|
||||
@@ -56,11 +56,11 @@ genesis_block_initialization() {
|
||||
print_warning "Genesis Node RPC (port 8006) is not accessible"
|
||||
fi
|
||||
|
||||
print_status "Verifying RPC connectivity to Follower Node (port 8007)..."
|
||||
if curl -s http://localhost:8007/rpc/info > /dev/null 2>&1; then
|
||||
print_success "Follower Node RPC (port 8007) is accessible"
|
||||
print_status "Verifying RPC connectivity to Follower Node (port 8006 on aitbc1)..."
|
||||
if curl -s http://aitbc1:8006/rpc/info > /dev/null 2>&1; then
|
||||
print_success "Follower Node RPC (port 8006 on aitbc1) is accessible"
|
||||
else
|
||||
print_warning "Follower Node RPC (port 8007) is not accessible"
|
||||
print_warning "Follower Node RPC (port 8006 on aitbc1) is not accessible"
|
||||
fi
|
||||
|
||||
print_status "Verifying Follower Node RPC also runs on port 8006..."
|
||||
|
||||
@@ -156,13 +156,13 @@ node_specific_blockchain() {
|
||||
NODE_URL="http://localhost:8006" $CLI_PATH blockchain info 2>/dev/null || print_warning "Genesis node blockchain info not available"
|
||||
log "Genesis node blockchain operations tested"
|
||||
|
||||
print_status "Testing Follower Node blockchain operations (port 8007)..."
|
||||
NODE_URL="http://localhost:8007" $CLI_PATH blockchain info 2>/dev/null || print_warning "Follower node blockchain info not available"
|
||||
print_status "Testing Follower Node blockchain operations (port 8006 on aitbc1)..."
|
||||
NODE_URL="http://aitbc1:8006" $CLI_PATH blockchain info 2>/dev/null || print_warning "Follower node blockchain info not available"
|
||||
log "Follower node blockchain operations tested"
|
||||
|
||||
print_status "Comparing blockchain heights between nodes..."
|
||||
GENESIS_HEIGHT=$(NODE_URL="http://localhost:8006" $CLI_PATH blockchain height 2>/dev/null | grep -o '[0-9]*' | head -1 || echo "0")
|
||||
FOLLOWER_HEIGHT=$(NODE_URL="http://localhost:8007" $CLI_PATH blockchain height 2>/dev/null | grep -o '[0-9]*' | head -1 || echo "0")
|
||||
FOLLOWER_HEIGHT=$(NODE_URL="http://aitbc1:8006" $CLI_PATH blockchain height 2>/dev/null | grep -o '[0-9]*' | head -1 || echo "0")
|
||||
|
||||
print_status "Genesis height: $GENESIS_HEIGHT, Follower height: $FOLLOWER_HEIGHT"
|
||||
log "Node comparison: Genesis=$GENESIS_HEIGHT, Follower=$FOLLOWER_HEIGHT"
|
||||
|
||||
@@ -217,13 +217,13 @@ node_specific_ai() {
|
||||
NODE_URL="http://localhost:8006" $CLI_PATH ai --job --submit --type inference --prompt "Genesis node test" 2>/dev/null || print_warning "Genesis node AI job submission failed"
|
||||
log "Genesis node AI operations tested"
|
||||
|
||||
print_status "Testing AI operations on Follower Node (port 8007)..."
|
||||
NODE_URL="http://localhost:8007" $CLI_PATH ai --job --submit --type parallel --prompt "Follower node test" 2>/dev/null || print_warning "Follower node AI job submission failed"
|
||||
print_status "Testing AI operations on Follower Node (port 8006 on aitbc1)..."
|
||||
NODE_URL="http://aitbc1:8006" $CLI_PATH ai --job --submit --type parallel --prompt "Follower node test" 2>/dev/null || print_warning "Follower node AI job submission failed"
|
||||
log "Follower node AI operations tested"
|
||||
|
||||
print_status "Comparing AI service availability between nodes..."
|
||||
GENESIS_STATUS=$(NODE_URL="http://localhost:8006" $CLI_PATH ai --service --status --name coordinator 2>/dev/null || echo "unavailable")
|
||||
FOLLOWER_STATUS=$(NODE_URL="http://localhost:8007" $CLI_PATH ai --service --status --name coordinator 2>/dev/null || echo "unavailable")
|
||||
FOLLOWER_STATUS=$(NODE_URL="http://aitbc1:8006" $CLI_PATH ai --service --status --name coordinator 2>/dev/null || echo "unavailable")
|
||||
|
||||
print_status "Genesis AI services: $GENESIS_STATUS"
|
||||
print_status "Follower AI services: $FOLLOWER_STATUS"
|
||||
|
||||
@@ -192,13 +192,13 @@ node_specific_marketplace() {
|
||||
NODE_URL="http://localhost:8006" $CLI_PATH marketplace --list 2>/dev/null || print_warning "Genesis node marketplace not available"
|
||||
log "Genesis node marketplace operations tested"
|
||||
|
||||
print_status "Testing marketplace on Follower Node (port 8007)..."
|
||||
NODE_URL="http://localhost:8007" $CLI_PATH marketplace --list 2>/dev/null || print_warning "Follower node marketplace not available"
|
||||
print_status "Testing marketplace on Follower Node (port 8006 on aitbc1)..."
|
||||
NODE_URL="http://aitbc1:8006" $CLI_PATH marketplace --list 2>/dev/null || print_warning "Follower node marketplace not available"
|
||||
log "Follower node marketplace operations tested"
|
||||
|
||||
print_status "Comparing marketplace data between nodes..."
|
||||
GENESIS_ITEMS=$(NODE_URL="http://localhost:8006" $CLI_PATH marketplace --list 2>/dev/null | wc -l || echo "0")
|
||||
FOLLOWER_ITEMS=$(NODE_URL="http://localhost:8007" $CLI_PATH marketplace --list 2>/dev/null | wc -l || echo "0")
|
||||
FOLLOWER_ITEMS=$(NODE_URL="http://aitbc1:8006" $CLI_PATH marketplace --list 2>/dev/null | wc -l || echo "0")
|
||||
|
||||
print_status "Genesis marketplace items: $GENESIS_ITEMS"
|
||||
print_status "Follower marketplace items: $FOLLOWER_ITEMS"
|
||||
@@ -260,7 +260,7 @@ cross_node_coordination() {
|
||||
log "Genesis node economic data generated"
|
||||
|
||||
# Generate economic data on follower node
|
||||
NODE_URL="http://localhost:8007" $CLI_PATH economics --market --analyze 2>/dev/null || print_warning "Follower node economic analysis failed"
|
||||
NODE_URL="http://aitbc1:8006" $CLI_PATH economics --market --analyze 2>/dev/null || print_warning "Follower node economic analysis failed"
|
||||
log "Follower node economic data generated"
|
||||
|
||||
# Test economic coordination
|
||||
|
||||
@@ -95,7 +95,7 @@ multi_node_coordination() {
|
||||
print_status "5.2 Multi-Node Coordination"
|
||||
|
||||
print_status "Checking cluster status across all nodes..."
|
||||
$CLI_PATH cluster --status --nodes aitbc,aitbc1 2>/dev/null || print_warning "Cluster status command not available"
|
||||
$CLI_PATH cluster status 2>/dev/null || print_warning "Cluster status command not available"
|
||||
log "Cluster status across nodes checked"
|
||||
|
||||
print_status "Syncing all nodes..."
|
||||
@@ -111,7 +111,7 @@ multi_node_coordination() {
|
||||
log "Failover coordination on Genesis node tested"
|
||||
|
||||
print_status "Testing recovery coordination on Follower Node..."
|
||||
NODE_URL="http://localhost:8007" $CLI_PATH cluster --coordinate --action recovery 2>/dev/null || print_warning "Recovery coordination failed"
|
||||
NODE_URL="http://aitbc1:8006" $CLI_PATH cluster --coordinate --action recovery 2>/dev/null || print_warning "Recovery coordination failed"
|
||||
log "Recovery coordination on Follower node tested"
|
||||
|
||||
print_success "5.2 Multi-Node Coordination completed"
|
||||
@@ -122,7 +122,7 @@ performance_optimization() {
|
||||
print_status "5.3 Performance Optimization"
|
||||
|
||||
print_status "Running comprehensive performance benchmark..."
|
||||
$CLI_PATH performance --benchmark --suite comprehensive 2>/dev/null || print_warning "Performance benchmark command not available"
|
||||
$CLI_PATH performance benchmark 2>/dev/null || print_warning "Performance benchmark command not available"
|
||||
log "Comprehensive performance benchmark executed"
|
||||
|
||||
print_status "Optimizing for low latency..."
|
||||
@@ -323,7 +323,7 @@ final_certification_exam() {
|
||||
|
||||
# Test 1: Basic operations
|
||||
if $CLI_PATH --version > /dev/null 2>&1; then
|
||||
((TESTS_PASSED++))
|
||||
(( TESTS_PASSED += 1 )) || true
|
||||
log "Certification test 1 (CLI version): PASSED"
|
||||
else
|
||||
log "Certification test 1 (CLI version): FAILED"
|
||||
@@ -331,7 +331,7 @@ final_certification_exam() {
|
||||
|
||||
# Test 2: Wallet operations
|
||||
if $CLI_PATH wallet balance "$WALLET_NAME" > /dev/null 2>&1; then
|
||||
((TESTS_PASSED++))
|
||||
(( TESTS_PASSED += 1 )) || true
|
||||
log "Certification test 2 (Wallet balance): PASSED"
|
||||
else
|
||||
log "Certification test 2 (Wallet balance): FAILED"
|
||||
@@ -339,7 +339,7 @@ final_certification_exam() {
|
||||
|
||||
# Test 3: Blockchain operations
|
||||
if $CLI_PATH blockchain info > /dev/null 2>&1; then
|
||||
((TESTS_PASSED++))
|
||||
(( TESTS_PASSED += 1 )) || true
|
||||
log "Certification test 3 (Blockchain info): PASSED"
|
||||
else
|
||||
log "Certification test 3 (Blockchain info): FAILED"
|
||||
@@ -347,7 +347,7 @@ final_certification_exam() {
|
||||
|
||||
# Test 4: AI operations
|
||||
if $CLI_PATH ai status > /dev/null 2>&1; then
|
||||
((TESTS_PASSED++))
|
||||
(( TESTS_PASSED += 1 )) || true
|
||||
log "Certification test 4 (AI status): PASSED"
|
||||
else
|
||||
log "Certification test 4 (AI status): FAILED"
|
||||
@@ -355,47 +355,47 @@ final_certification_exam() {
|
||||
|
||||
# Test 5: Marketplace operations
|
||||
if $CLI_PATH market list > /dev/null 2>&1; then
|
||||
((TESTS_PASSED++))
|
||||
(( TESTS_PASSED += 1 )) || true
|
||||
log "Certification test 5 (Marketplace list): PASSED"
|
||||
else
|
||||
log "Certification test 5 (Marketplace list): FAILED"
|
||||
fi
|
||||
|
||||
# Test 6: Economic operations
|
||||
if $CLI_PATH economics --model --type cost-optimization > /dev/null 2>&1; then
|
||||
((TESTS_PASSED++))
|
||||
if $CLI_PATH simulate price > /dev/null 2>&1; then
|
||||
(( TESTS_PASSED += 1 )) || true
|
||||
log "Certification test 6 (Economic modeling): PASSED"
|
||||
else
|
||||
log "Certification test 6 (Economic modeling): FAILED"
|
||||
fi
|
||||
|
||||
# Test 7: Analytics operations
|
||||
if $CLI_PATH analytics --report --type performance > /dev/null 2>&1; then
|
||||
((TESTS_PASSED++))
|
||||
if $CLI_PATH analytics blocks > /dev/null 2>&1; then
|
||||
(( TESTS_PASSED += 1 )) || true
|
||||
log "Certification test 7 (Analytics report): PASSED"
|
||||
else
|
||||
log "Certification test 7 (Analytics report): FAILED"
|
||||
fi
|
||||
|
||||
# Test 8: Automation operations
|
||||
if $CLI_PATH automate --workflow --name test-workflow > /dev/null 2>&1; then
|
||||
((TESTS_PASSED++))
|
||||
if $CLI_PATH workflow create --name test > /dev/null 2>&1; then
|
||||
(( TESTS_PASSED += 1 )) || true
|
||||
log "Certification test 8 (Automation workflow): PASSED"
|
||||
else
|
||||
log "Certification test 8 (Automation workflow): FAILED"
|
||||
fi
|
||||
|
||||
# Test 9: Cluster operations
|
||||
if $CLI_PATH cluster --status --nodes aitbc,aitbc1 > /dev/null 2>&1; then
|
||||
((TESTS_PASSED++))
|
||||
if $CLI_PATH cluster status > /dev/null 2>&1; then
|
||||
(( TESTS_PASSED += 1 )) || true
|
||||
log "Certification test 9 (Cluster status): PASSED"
|
||||
else
|
||||
log "Certification test 9 (Cluster status): FAILED"
|
||||
fi
|
||||
|
||||
# Test 10: Performance operations
|
||||
if $CLI_PATH performance --benchmark --suite comprehensive > /dev/null 2>&1; then
|
||||
((TESTS_PASSED++))
|
||||
if $CLI_PATH performance benchmark > /dev/null 2>&1; then
|
||||
(( TESTS_PASSED += 1 )) || true
|
||||
log "Certification test 10 (Performance benchmark): PASSED"
|
||||
else
|
||||
log "Certification test 10 (Performance benchmark): FAILED"
|
||||
|
||||
@@ -17,13 +17,13 @@ export WALLET_NAME="${WALLET_NAME:-openclaw-trainee}"
|
||||
export WALLET_PASSWORD="${WALLET_PASSWORD:-trainee123}"
|
||||
export TRAINING_TIMEOUT="${TRAINING_TIMEOUT:-300}"
|
||||
export GENESIS_NODE="http://localhost:8006"
|
||||
export FOLLOWER_NODE="http://localhost:8007"
|
||||
export FOLLOWER_NODE="http://aitbc1:8006"
|
||||
|
||||
# Service endpoints
|
||||
export SERVICES=(
|
||||
"8000:Coordinator"
|
||||
"8006:Genesis-Node"
|
||||
"8007:Follower-Node"
|
||||
"8006:Follower-Node"
|
||||
"11434:Ollama"
|
||||
)
|
||||
|
||||
@@ -186,7 +186,7 @@ check_all_services() {
|
||||
local name=$(echo "$service" | cut -d: -f2)
|
||||
|
||||
if ! check_service "$port" "$name"; then
|
||||
((failed++))
|
||||
(( failed += 1 )) || true
|
||||
fi
|
||||
done
|
||||
|
||||
@@ -230,7 +230,7 @@ benchmark_with_retry() {
|
||||
local success=false
|
||||
|
||||
while [[ $attempt -lt $max_retries ]] && [[ "$success" == "false" ]]; do
|
||||
((attempt++))
|
||||
(( attempt += 1 )) || true
|
||||
|
||||
if eval "$cmd" &>/dev/null; then
|
||||
success=true
|
||||
@@ -379,12 +379,12 @@ check_prerequisites_full() {
|
||||
|
||||
# Check CLI
|
||||
if ! check_cli; then
|
||||
((errors++)) || true
|
||||
(( errors += 1 )) || true || true
|
||||
fi
|
||||
|
||||
# Check services
|
||||
if ! check_all_services; then
|
||||
((errors++)) || true
|
||||
(( errors += 1 )) || true || true
|
||||
fi
|
||||
|
||||
# Check log directory
|
||||
@@ -392,7 +392,7 @@ check_prerequisites_full() {
|
||||
print_status "Creating log directory..."
|
||||
mkdir -p "$LOG_DIR" || {
|
||||
print_error "Cannot create log directory"
|
||||
((errors++)) || true
|
||||
(( errors += 1 )) || true || true
|
||||
}
|
||||
fi
|
||||
|
||||
@@ -427,7 +427,7 @@ init_progress() {
|
||||
# Update progress
|
||||
update_progress() {
|
||||
local step_name="$1"
|
||||
((CURRENT_STEP++))
|
||||
(( CURRENT_STEP += 1 )) || true
|
||||
|
||||
local elapsed=$(( $(date +%s) - STEP_START_TIME ))
|
||||
local percent=$((CURRENT_STEP * 100 / TOTAL_STEPS))
|
||||
@@ -447,7 +447,7 @@ cli_cmd() {
|
||||
local attempt=0
|
||||
|
||||
while [[ $attempt -lt $max_retries ]]; do
|
||||
((attempt++))
|
||||
(( attempt += 1 )) || true
|
||||
|
||||
if $CLI_PATH $cmd 2>/dev/null; then
|
||||
return 0
|
||||
|
||||
@@ -1,38 +0,0 @@
|
||||
[Unit]
|
||||
Description=AITBC Advanced AI Service - Enhanced AI Capabilities
|
||||
After=network.target
|
||||
Wants=network.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=root
|
||||
Group=root
|
||||
WorkingDirectory=/opt/aitbc/apps/coordinator-api
|
||||
Environment=PATH=/usr/bin
|
||||
Environment=PYTHONPATH=/opt/aitbc/apps/coordinator-api/src
|
||||
ExecStart=/opt/aitbc/venv/bin/python -m app.services.advanced_ai_service
|
||||
ExecReload=/bin/kill -HUP $MAINPID
|
||||
Restart=always
|
||||
RestartSec=10
|
||||
StandardOutput=journal
|
||||
StandardError=journal
|
||||
SyslogIdentifier=aitbc-advanced-ai
|
||||
|
||||
# Security settings (relaxed for development)
|
||||
# NoNewPrivileges=true
|
||||
# PrivateTmp=true
|
||||
# ProtectSystem=strict
|
||||
# ProtectHome=true
|
||||
ReadWritePaths=/var/log/aitbc /var/lib/aitbc/data /opt/aitbc/apps/coordinator-api
|
||||
|
||||
# Resource limits
|
||||
LimitNOFILE=65536
|
||||
LimitNPROC=4096
|
||||
|
||||
# GPU access (if available)
|
||||
DeviceAllow=/dev/nvidia0 rw
|
||||
DeviceAllow=/dev/nvidiactl rw
|
||||
DeviceAllow=/dev/nvidia-uvm rw
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -1,46 +0,0 @@
|
||||
[Unit]
|
||||
Description=AITBC Blockchain HTTP API (Port 8005)
|
||||
After=network.target aitbc-blockchain-node.service
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=root
|
||||
Group=root
|
||||
WorkingDirectory=/opt/aitbc
|
||||
Environment=PATH=/usr/bin:/usr/local/bin:/usr/bin:/bin
|
||||
Environment=NODE_ID=aitbc
|
||||
Environment=BLOCKCHAIN_HTTP_PORT=8005
|
||||
Environment=PYTHONPATH=/opt/aitbc/services
|
||||
EnvironmentFile=/etc/aitbc/production.env
|
||||
|
||||
# Blockchain HTTP execution
|
||||
ExecStart=/opt/aitbc/venv/bin/python /opt/aitbc/services/blockchain_http_launcher.py
|
||||
ExecReload=/bin/kill -HUP $MAINPID
|
||||
KillMode=mixed
|
||||
TimeoutStopSec=10
|
||||
|
||||
# Production reliability
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
StartLimitBurst=5
|
||||
StartLimitIntervalSec=60
|
||||
|
||||
# Production logging
|
||||
StandardOutput=journal
|
||||
StandardError=journal
|
||||
SyslogIdentifier=aitbc-blockchain-http
|
||||
|
||||
# Production security
|
||||
NoNewPrivileges=true
|
||||
ProtectSystem=strict
|
||||
ProtectHome=true
|
||||
ReadWritePaths=/var/lib/aitbc/data/blockchain /var/log/aitbc/production/blockchain
|
||||
|
||||
# Production performance
|
||||
LimitNOFILE=65536
|
||||
LimitNPROC=4096
|
||||
MemoryMax=1G
|
||||
CPUQuota=25%
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -1,6 +1,6 @@
|
||||
[Unit]
|
||||
Description=AITBC Blockchain P2P Network Service
|
||||
After=network.target redis.service
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
@@ -10,7 +10,7 @@ WorkingDirectory=/opt/aitbc/apps/blockchain-node
|
||||
Environment=PATH=/usr/bin:/usr/local/bin:/usr/bin:/bin
|
||||
Environment=PYTHONPATH=/opt/aitbc/apps/blockchain-node/src:/opt/aitbc/apps/blockchain-node/scripts
|
||||
EnvironmentFile=/etc/aitbc/blockchain.env
|
||||
ExecStart=/opt/aitbc/venv/bin/python -m aitbc_chain.p2p_network --host ${p2p_bind_host} --port ${p2p_bind_port} --redis ${gossip_broadcast_url} --node-id ${proposer_id}
|
||||
ExecStart=/opt/aitbc/venv/bin/python -m aitbc_chain.p2p_network --host ${p2p_bind_host} --port ${p2p_bind_port} --peers ${p2p_peers} --node-id ${proposer_id}
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
StandardOutput=journal
|
||||
|
||||
@@ -1,11 +0,0 @@
|
||||
[Unit]
|
||||
Description=AITBC Coordinator Proxy Health Check
|
||||
After=network-online.target
|
||||
Wants=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
ExecStart=/opt/aitbc/apps/coordinator-api/scripts/check_coordinator_proxy.sh
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -1,14 +0,0 @@
|
||||
[Unit]
|
||||
Description=AITBC Cross Chain Reputation Service
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=aitbc
|
||||
WorkingDirectory=/opt/aitbc/services
|
||||
ExecStart=/usr/bin/python3 -m cross_chain_reputation
|
||||
Restart=always
|
||||
RestartSec=10
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -1,13 +0,0 @@
|
||||
[Unit]
|
||||
Description=AITBC Edge Node Monitoring - aitbc1-edge-secondary
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=root
|
||||
ExecStart=/tmp/aitbc-monitoring/monitor.sh
|
||||
Restart=always
|
||||
RestartSec=30
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -1,38 +0,0 @@
|
||||
[Unit]
|
||||
Description=AITBC Enterprise API Gateway - Multi-tenant API Management
|
||||
After=network.target
|
||||
Wants=network.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=aitbc
|
||||
Group=aitbc
|
||||
WorkingDirectory=/opt/aitbc/apps/coordinator-api
|
||||
Environment=PATH=/usr/bin
|
||||
Environment=PYTHONPATH=/opt/aitbc/apps/coordinator-api/src
|
||||
ExecStart=/usr/bin/python3 -m app.services.enterprise_api_gateway
|
||||
ExecReload=/bin/kill -HUP $MAINPID
|
||||
Restart=always
|
||||
RestartSec=10
|
||||
StandardOutput=journal
|
||||
StandardError=journal
|
||||
SyslogIdentifier=aitbc-enterprise-api
|
||||
|
||||
# Security settings
|
||||
NoNewPrivileges=true
|
||||
PrivateTmp=true
|
||||
ProtectSystem=strict
|
||||
ProtectHome=true
|
||||
ReadWritePaths=/var/log/aitbc /var/lib/aitbc/data
|
||||
|
||||
# Resource limits
|
||||
LimitNOFILE=65536
|
||||
LimitNPROC=4096
|
||||
|
||||
# Performance settings
|
||||
Nice=-5
|
||||
IOSchedulingClass=best-effort
|
||||
IOSchedulingPriority=0
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -1,29 +0,0 @@
|
||||
[Unit]
|
||||
Description=AITBC Blockchain Follower Node (Port 8007)
|
||||
After=network.target aitbc-blockchain-node.service
|
||||
Wants=aitbc-blockchain-node.service
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=root
|
||||
Group=root
|
||||
Environment=NODE_ENV=production
|
||||
Environment=NODE_ID=follower-node-8007
|
||||
Environment=PYTHONPATH=/opt/aitbc/apps/blockchain-node/src:/opt/aitbc/services
|
||||
Environment=BLOCKCHAIN_DATA_DIR=/var/lib/aitbc/data/follower
|
||||
Environment=BLOCKCHAIN_CONFIG_DIR=/etc/aitbc
|
||||
Environment=BLOCKCHAIN_LOG_DIR=/var/log/aitbc/production
|
||||
Environment=BLOCKCHAIN_PORT=8007
|
||||
Environment=BLOCKCHAIN_ROLE=follower
|
||||
Environment=BLOCKCHAIN_GENESIS_NODE=http://localhost:8006
|
||||
ExecStart=/opt/aitbc/venv/bin/python /opt/aitbc/services/blockchain_simple.py
|
||||
ExecReload=/bin/kill -HUP $MAINPID
|
||||
KillMode=mixed
|
||||
TimeoutStopSec=30
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
StandardOutput=journal
|
||||
StandardError=journal
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -1,24 +0,0 @@
|
||||
[Unit]
|
||||
Description=AITBC Geographic Load Balancer (Port 8017)
|
||||
After=network.target aitbc-coordinator-api.service aitbc-marketplace-enhanced.service
|
||||
Wants=aitbc-coordinator-api.service aitbc-marketplace-enhanced.service
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=aitbc
|
||||
Group=aitbc
|
||||
WorkingDirectory=/opt/aitbc
|
||||
Environment=PATH=/usr/bin
|
||||
Environment=PORT=8017
|
||||
Environment=SERVICE_TYPE=loadbalancer-geo
|
||||
Environment=LOG_LEVEL=INFO
|
||||
ExecStart=/usr/bin/python3 /opt/aitbc/apps/coordinator-api/scripts/geo_load_balancer.py --port 8017
|
||||
ExecReload=/bin/kill -HUP $MAINPID
|
||||
Restart=always
|
||||
RestartSec=10
|
||||
StandardOutput=journal
|
||||
StandardError=journal
|
||||
SyslogIdentifier=aitbc-loadbalancer-geo
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -1,13 +0,0 @@
|
||||
[Unit]
|
||||
Description=AITBC Miner Dashboard
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=root
|
||||
WorkingDirectory=/opt/aitbc-miner-dashboard
|
||||
ExecStart=/usr/bin/python3 dashboard_server.py
|
||||
Restart=always
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -1,45 +0,0 @@
|
||||
[Unit]
|
||||
Description=AITBC Real Mining Blockchain Service
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=root
|
||||
Group=root
|
||||
WorkingDirectory=/opt/aitbc
|
||||
Environment=PATH=/usr/bin:/usr/local/bin:/usr/bin:/bin
|
||||
Environment=NODE_ID=aitbc
|
||||
Environment=PYTHONPATH=/opt/aitbc/services
|
||||
EnvironmentFile=/etc/aitbc/production.env
|
||||
|
||||
# Real mining execution
|
||||
ExecStart=/opt/aitbc/venv/bin/python /opt/aitbc/services/mining_blockchain.py
|
||||
ExecReload=/bin/kill -HUP $MAINPID
|
||||
KillMode=mixed
|
||||
TimeoutStopSec=10
|
||||
|
||||
# Mining reliability
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
StartLimitBurst=5
|
||||
StartLimitIntervalSec=60
|
||||
|
||||
# Mining logging
|
||||
StandardOutput=journal
|
||||
StandardError=journal
|
||||
SyslogIdentifier=aitbc-mining-blockchain
|
||||
|
||||
# Mining security
|
||||
NoNewPrivileges=true
|
||||
ProtectSystem=strict
|
||||
ProtectHome=true
|
||||
ReadWritePaths=/var/lib/aitbc/data/blockchain /var/log/aitbc/production/blockchain
|
||||
|
||||
# Mining performance
|
||||
LimitNOFILE=65536
|
||||
LimitNPROC=4096
|
||||
MemoryMax=4G
|
||||
CPUQuota=80%
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -1,23 +0,0 @@
|
||||
[Unit]
|
||||
Description=AITBC Blockchain Node Service
|
||||
After=network.target
|
||||
Wants=network.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=root
|
||||
Group=root
|
||||
WorkingDirectory=/root/aitbc/apps/blockchain-node
|
||||
Environment=PATH=/usr/bin
|
||||
Environment=PYTHONPATH=/root/aitbc/apps/blockchain-node
|
||||
Environment=RUST_LOG=info
|
||||
ExecStart=/usr/bin/python3 -m node.main --datadir /root/aitbc/data --rpc-bind 0.0.0.0:8545
|
||||
ExecReload=/bin/kill -HUP $MAINPID
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
StandardOutput=journal
|
||||
StandardError=journal
|
||||
SyslogIdentifier=aitbc-node
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -1,45 +0,0 @@
|
||||
[Unit]
|
||||
Description=AITBC OpenClaw AI Service
|
||||
After=network.target aitbc-mining-blockchain.service
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=root
|
||||
Group=root
|
||||
WorkingDirectory=/opt/aitbc
|
||||
Environment=PATH=/usr/bin:/usr/local/bin:/usr/bin:/bin
|
||||
Environment=NODE_ID=aitbc
|
||||
Environment=PYTHONPATH=/opt/aitbc/services
|
||||
EnvironmentFile=/etc/aitbc/production.env
|
||||
|
||||
# OpenClaw AI execution
|
||||
ExecStart=/opt/aitbc/venv/bin/python /opt/aitbc/services/openclaw_ai.py
|
||||
ExecReload=/bin/kill -HUP $MAINPID
|
||||
KillMode=mixed
|
||||
TimeoutStopSec=10
|
||||
|
||||
# AI service reliability
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
StartLimitBurst=5
|
||||
StartLimitIntervalSec=60
|
||||
|
||||
# AI logging
|
||||
StandardOutput=journal
|
||||
StandardError=journal
|
||||
SyslogIdentifier=aitbc-openclaw-ai
|
||||
|
||||
# AI security
|
||||
NoNewPrivileges=true
|
||||
ProtectSystem=strict
|
||||
ProtectHome=true
|
||||
ReadWritePaths=/var/lib/aitbc/data/openclaw /var/log/aitbc/production/openclaw
|
||||
|
||||
# AI performance
|
||||
LimitNOFILE=65536
|
||||
LimitNPROC=4096
|
||||
MemoryMax=2G
|
||||
CPUQuota=60%
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -1,46 +0,0 @@
|
||||
[Unit]
|
||||
Description=AITBC Real Marketplace with AI Services
|
||||
After=network.target aitbc-mining-blockchain.service aitbc-openclaw-ai.service
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=root
|
||||
Group=root
|
||||
WorkingDirectory=/opt/aitbc
|
||||
Environment=PATH=/usr/bin:/usr/local/bin:/usr/bin:/bin
|
||||
Environment=NODE_ID=aitbc
|
||||
Environment=REAL_MARKETPLACE_PORT=8009
|
||||
Environment=PYTHONPATH=/opt/aitbc/services
|
||||
EnvironmentFile=/etc/aitbc/production.env
|
||||
|
||||
# Real marketplace execution
|
||||
ExecStart=/opt/aitbc/venv/bin/python /opt/aitbc/services/real_marketplace_launcher.py
|
||||
ExecReload=/bin/kill -HUP $MAINPID
|
||||
KillMode=mixed
|
||||
TimeoutStopSec=10
|
||||
|
||||
# Marketplace reliability
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
StartLimitBurst=5
|
||||
StartLimitIntervalSec=60
|
||||
|
||||
# Marketplace logging
|
||||
StandardOutput=journal
|
||||
StandardError=journal
|
||||
SyslogIdentifier=aitbc-real-marketplace
|
||||
|
||||
# Marketplace security
|
||||
NoNewPrivileges=true
|
||||
ProtectSystem=strict
|
||||
ProtectHome=true
|
||||
ReadWritePaths=/var/lib/aitbc/data/marketplace /var/log/aitbc/production/marketplace
|
||||
|
||||
# Marketplace performance
|
||||
LimitNOFILE=65536
|
||||
LimitNPROC=4096
|
||||
MemoryMax=1G
|
||||
CPUQuota=40%
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
Reference in New Issue
Block a user