chore(systemd): remove obsolete systemd service files and update infrastructure documentation
- Remove 8 unused systemd service files from coordinator-api/systemd/ - aitbc-adaptive-learning.service (port 8005) - aitbc-advanced-ai.service - aitbc-enterprise-api.service - aitbc-gpu-multimodal.service (port 8003) - aitbc-marketplace-enhanced.service (port 8006) - aitbc-modality-optimization.service (port 8004) - aitbc-multimodal.service (port 8002) - aitbc-openclaw-enhanced.service (port 8007
This commit is contained in:
43
apps/blockchain-node/scripts/apply_bootstrap_genesis.sh
Normal file
43
apps/blockchain-node/scripts/apply_bootstrap_genesis.sh
Normal file
@@ -0,0 +1,43 @@
|
||||
#!/bin/bash
|
||||
|
||||
echo "=== AITBC Bootstrap Genesis Setup ==="
|
||||
echo ""
|
||||
|
||||
# Stop the blockchain node
|
||||
echo "1. Stopping blockchain node..."
|
||||
sudo systemctl stop aitbc-node
|
||||
|
||||
# Backup current data
|
||||
echo "2. Backing up current blockchain data..."
|
||||
sudo mv /root/aitbc/apps/blockchain-node/data/devnet/db.sqlite /root/aitbc/apps/blockchain-node/data/devnet/db.sqlite.backup.$(date +%s) 2>/dev/null || true
|
||||
|
||||
# Copy new genesis
|
||||
echo "3. Applying bootstrap genesis..."
|
||||
sudo cp /root/aitbc/apps/blockchain-node/data/genesis_with_bootstrap.json /root/aitbc/apps/blockchain-node/data/devnet/genesis.json
|
||||
|
||||
# Reset database
|
||||
echo "4. Resetting blockchain database..."
|
||||
sudo rm -f /root/aitbc/apps/blockchain-node/data/devnet/db.sqlite
|
||||
|
||||
# Restart blockchain node
|
||||
echo "5. Restarting blockchain node..."
|
||||
sudo systemctl start aitbc-node
|
||||
|
||||
# Wait for node to start
|
||||
echo "6. Waiting for node to initialize..."
|
||||
sleep 5
|
||||
|
||||
# Verify treasury balance
|
||||
echo "7. Verifying treasury balance..."
|
||||
curl -s http://localhost:9080/rpc/getBalance/aitbcexchange00000000000000000000000000000000 | jq
|
||||
|
||||
echo ""
|
||||
echo "=== Bootstrap Complete! ==="
|
||||
echo "Treasury should now have 10,000,000 AITBC"
|
||||
echo ""
|
||||
echo "Initial Distribution:"
|
||||
echo "- Exchange Treasury: 10,000,000 AITBC (47.6%)"
|
||||
echo "- Community Faucet: 1,000,000 AITBC (4.8%)"
|
||||
echo "- Team Fund: 2,000,000 AITBC (9.5%)"
|
||||
echo "- Early Investors: 5,000,000 AITBC (23.8%)"
|
||||
echo "- Ecosystem Fund: 3,000,000 AITBC (14.3%)"
|
||||
75
apps/blockchain-node/scripts/assign_proposer.py
Normal file
75
apps/blockchain-node/scripts/assign_proposer.py
Normal file
@@ -0,0 +1,75 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Script to assign a proposer to a block by polling for it
|
||||
"""
|
||||
|
||||
import httpx
|
||||
import json
|
||||
|
||||
# Configuration
|
||||
COORDINATOR_URL = "http://localhost:8001"
|
||||
MINER_API_KEY = "${MINER_API_KEY}"
|
||||
MINER_ID = "localhost-gpu-miner"
|
||||
|
||||
def assign_proposer_to_latest_block():
|
||||
"""Poll for the latest unassigned job to become the proposer"""
|
||||
|
||||
# First register the miner
|
||||
print("📝 Registering miner...")
|
||||
register_response = httpx.post(
|
||||
f"{COORDINATOR_URL}/v1/miners/register?miner_id={MINER_ID}",
|
||||
headers={
|
||||
"Content-Type": "application/json",
|
||||
"X-Api-Key": MINER_API_KEY
|
||||
},
|
||||
json={
|
||||
"capabilities": {
|
||||
"gpu": {"model": "RTX 4060 Ti", "memory_gb": 16}
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
if register_response.status_code != 200:
|
||||
print(f"❌ Registration failed: {register_response.text}")
|
||||
return
|
||||
|
||||
print("✅ Miner registered")
|
||||
|
||||
# Poll for a job
|
||||
print("\n🔍 Polling for jobs...")
|
||||
poll_response = httpx.post(
|
||||
f"{COORDINATOR_URL}/v1/miners/poll",
|
||||
headers={
|
||||
"Content-Type": "application/json",
|
||||
"X-Api-Key": MINER_API_KEY
|
||||
},
|
||||
json={"max_wait_seconds": 1}
|
||||
)
|
||||
|
||||
if poll_response.status_code == 200:
|
||||
job = poll_response.json()
|
||||
print(f"✅ Received job: {job['job_id']}")
|
||||
print(f" This job is now assigned to miner: {MINER_ID}")
|
||||
|
||||
# Check the block
|
||||
print("\n📦 Checking block...")
|
||||
blocks_response = httpx.get(f"{COORDINATOR_URL}/v1/explorer/blocks")
|
||||
|
||||
if blocks_response.status_code == 200:
|
||||
blocks = blocks_response.json()
|
||||
for block in blocks['items']:
|
||||
if block['hash'] == job['job_id']:
|
||||
print(f"✅ Block updated!")
|
||||
print(f" Height: {block['height']}")
|
||||
print(f" Hash: {block['hash']}")
|
||||
print(f" Proposer: {block['proposer']}")
|
||||
break
|
||||
elif poll_response.status_code == 204:
|
||||
print("ℹ️ No jobs available to poll")
|
||||
else:
|
||||
print(f"❌ Poll failed: {poll_response.text}")
|
||||
|
||||
if __name__ == "__main__":
|
||||
print("🎯 Assign Proposer to Latest Block")
|
||||
print("=" * 40)
|
||||
assign_proposer_to_latest_block()
|
||||
323
apps/blockchain-node/scripts/fix_sync_optimization.sh
Executable file
323
apps/blockchain-node/scripts/fix_sync_optimization.sh
Executable file
@@ -0,0 +1,323 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Blockchain Synchronization Optimization Script
|
||||
# Fixes common sync issues and optimizes cross-site synchronization
|
||||
|
||||
set -e
|
||||
|
||||
echo "🔧 Blockchain Synchronization Optimization"
|
||||
echo "=========================================="
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Function to print colored output
|
||||
print_status() {
|
||||
echo -e "${GREEN}✅ $1${NC}"
|
||||
}
|
||||
|
||||
print_warning() {
|
||||
echo -e "${YELLOW}⚠️ $1${NC}"
|
||||
}
|
||||
|
||||
print_error() {
|
||||
echo -e "${RED}❌ $1${NC}"
|
||||
}
|
||||
|
||||
# Function to check if service is running
|
||||
check_service() {
|
||||
local service=$1
|
||||
local host=$2
|
||||
|
||||
if [ -z "$host" ]; then
|
||||
if systemctl is-active --quiet "$service"; then
|
||||
return 0
|
||||
else
|
||||
return 1
|
||||
fi
|
||||
else
|
||||
if ssh "$host" "systemctl is-active --quiet '$service'"; then
|
||||
return 0
|
||||
else
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# Function to restart service
|
||||
restart_service() {
|
||||
local service=$1
|
||||
local host=$2
|
||||
|
||||
if [ -z "$host" ]; then
|
||||
sudo systemctl restart "$service"
|
||||
else
|
||||
ssh "$host" "sudo systemctl restart '$service'"
|
||||
fi
|
||||
}
|
||||
|
||||
# Function to get blockchain height
|
||||
get_height() {
|
||||
local url=$1
|
||||
local host=$2
|
||||
|
||||
if [ -z "$host" ]; then
|
||||
curl -s "$url/head" 2>/dev/null | grep -o '"height":[^,]*' | cut -d'"' -f2
|
||||
else
|
||||
ssh "$host" "curl -s '$url/head' 2>/dev/null | grep -o '\"height\":[^,]*' | cut -d'\"' -f2"
|
||||
fi
|
||||
}
|
||||
|
||||
echo ""
|
||||
echo "📊 Current Sync Status Analysis"
|
||||
echo "=============================="
|
||||
|
||||
# Get current heights
|
||||
echo "Checking current blockchain heights..."
|
||||
NODE1_HEIGHT=$(get_height "http://localhost:8082/rpc" "aitbc-cascade")
|
||||
NODE2_HEIGHT=$(get_height "http://localhost:8081/rpc" "aitbc-cascade")
|
||||
NODE3_HEIGHT=$(get_height "http://192.168.100.10:8082/rpc" "ns3-root")
|
||||
|
||||
echo "Node 1 (aitbc-cascade): $NODE1_HEIGHT"
|
||||
echo "Node 2 (aitbc-cascade): $NODE2_HEIGHT"
|
||||
echo "Node 3 (ns3): $NODE3_HEIGHT"
|
||||
|
||||
# Calculate height differences
|
||||
if [ -n "$NODE1_HEIGHT" ] && [ -n "$NODE2_HEIGHT" ]; then
|
||||
DIFF12=$((NODE2_HEIGHT - NODE1_HEIGHT))
|
||||
echo "Height difference (Node2 - Node1): $DIFF12"
|
||||
fi
|
||||
|
||||
if [ -n "$NODE2_HEIGHT" ] && [ -n "$NODE3_HEIGHT" ]; then
|
||||
DIFF23=$((NODE2_HEIGHT - NODE3_HEIGHT))
|
||||
echo "Height difference (Node2 - Node3): $DIFF23"
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "🔧 Step 1: Fix Node 1 Endpoint Configuration"
|
||||
echo "============================================="
|
||||
|
||||
# Check Node 1 config for wrong endpoint
|
||||
echo "Checking Node 1 configuration..."
|
||||
NODE1_CONFIG=$(ssh aitbc-cascade "grep -n 'aitbc.bubuit.net/rpc2' /opt/blockchain-node/src/aitbc_chain/config.py 2>/dev/null || true")
|
||||
|
||||
if [ -n "$NODE1_CONFIG" ]; then
|
||||
print_warning "Found wrong endpoint /rpc2 in Node 1 config"
|
||||
echo "Fixing endpoint configuration..."
|
||||
|
||||
# Backup original config
|
||||
ssh aitbc-cascade "sudo cp /opt/blockchain-node/src/aitbc_chain/config.py /opt/blockchain-node/src/aitbc_chain/config.py.backup"
|
||||
|
||||
# Fix the endpoint
|
||||
ssh aitbc-cascade "sudo sed -i 's|https://aitbc.bubuit.net/rpc2|https://aitbc.bubuit.net/rpc|g' /opt/blockchain-node/src/aitbc_chain/config.py"
|
||||
|
||||
print_status "Fixed Node 1 endpoint configuration"
|
||||
|
||||
# Restart Node 1
|
||||
echo "Restarting Node 1 service..."
|
||||
restart_service "aitbc-blockchain-node-1.service" "aitbc-cascade"
|
||||
sleep 5
|
||||
|
||||
if check_service "aitbc-blockchain-node-1.service" "aitbc-cascade"; then
|
||||
print_status "Node 1 service restarted successfully"
|
||||
else
|
||||
print_error "Node 1 service failed to restart"
|
||||
fi
|
||||
else
|
||||
print_status "Node 1 endpoint configuration is correct"
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "🔧 Step 2: Fix Node 3 Services"
|
||||
echo "=============================="
|
||||
|
||||
# Check Node 3 service status
|
||||
echo "Checking Node 3 services..."
|
||||
NODE3_STATUS=$(ssh ns3-root "systemctl is-active blockchain-node-3.service 2>/dev/null || echo 'failed'")
|
||||
|
||||
if [ "$NODE3_STATUS" = "failed" ] || [ "$NODE3_STATUS" = "activating" ]; then
|
||||
print_warning "Node 3 service is in $NODE3_STATUS state"
|
||||
|
||||
echo "Checking Node 3 service logs..."
|
||||
ssh ns3-root "journalctl -u blockchain-node-3.service --no-pager -n 10"
|
||||
|
||||
echo "Attempting to fix Node 3 service..."
|
||||
|
||||
# Stop and restart Node 3
|
||||
ssh ns3-root "sudo systemctl stop blockchain-node-3.service || true"
|
||||
sleep 2
|
||||
ssh ns3-root "sudo systemctl start blockchain-node-3.service"
|
||||
sleep 5
|
||||
|
||||
# Check status again
|
||||
NODE3_NEW_STATUS=$(ssh ns3-root "systemctl is-active blockchain-node-3.service 2>/dev/null || echo 'failed'")
|
||||
|
||||
if [ "$NODE3_NEW_STATUS" = "active" ]; then
|
||||
print_status "Node 3 service fixed and running"
|
||||
else
|
||||
print_error "Node 3 service still not working: $NODE3_NEW_STATUS"
|
||||
echo "Manual intervention required for Node 3"
|
||||
fi
|
||||
else
|
||||
print_status "Node 3 service is running"
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "🔧 Step 3: Optimize Sync Configuration"
|
||||
echo "======================================"
|
||||
|
||||
# Function to optimize sync config
|
||||
optimize_sync_config() {
|
||||
local host=$1
|
||||
local config_path=$2
|
||||
|
||||
echo "Optimizing sync configuration on $host..."
|
||||
|
||||
# Backup config
|
||||
ssh "$host" "sudo cp '$config_path' '$config_path.backup' 2>/dev/null || true"
|
||||
|
||||
# Add/modify sync settings
|
||||
ssh "$host" "sudo tee -a '$config_path' > /dev/null << 'EOF'
|
||||
|
||||
# Sync optimization settings
|
||||
sync_interval_seconds: int = 5 # Reduced from 10s
|
||||
sync_retry_attempts: int = 3
|
||||
sync_retry_delay_seconds: int = 2
|
||||
sync_timeout_seconds: int = 10
|
||||
max_sync_height_diff: int = 1000 # Alert if difference exceeds this
|
||||
EOF"
|
||||
|
||||
print_status "Sync configuration optimized on $host"
|
||||
}
|
||||
|
||||
# Optimize sync configs
|
||||
optimize_sync_config "aitbc-cascade" "/opt/blockchain-node/src/aitbc_chain/config.py"
|
||||
optimize_sync_config "aitbc-cascade" "/opt/blockchain-node-2/src/aitbc_chain/config.py"
|
||||
optimize_sync_config "ns3-root" "/opt/blockchain-node/src/aitbc_chain/config.py"
|
||||
|
||||
echo ""
|
||||
echo "🔧 Step 4: Restart Services with New Config"
|
||||
echo "=========================================="
|
||||
|
||||
# Restart all services
|
||||
echo "Restarting blockchain services..."
|
||||
|
||||
for service in "aitbc-blockchain-node-1.service" "aitbc-blockchain-node-2.service"; do
|
||||
echo "Restarting $service on aitbc-cascade..."
|
||||
restart_service "$service" "aitbc-cascade"
|
||||
sleep 3
|
||||
done
|
||||
|
||||
for service in "blockchain-node-3.service"; do
|
||||
echo "Restarting $service on ns3..."
|
||||
restart_service "$service" "ns3-root"
|
||||
sleep 3
|
||||
done
|
||||
|
||||
echo ""
|
||||
echo "📊 Step 5: Verify Sync Optimization"
|
||||
echo "==================================="
|
||||
|
||||
# Wait for services to stabilize
|
||||
echo "Waiting for services to stabilize..."
|
||||
sleep 10
|
||||
|
||||
# Check new heights
|
||||
echo "Checking new blockchain heights..."
|
||||
NEW_NODE1_HEIGHT=$(get_height "http://localhost:8082/rpc" "aitbc-cascade")
|
||||
NEW_NODE2_HEIGHT=$(get_height "http://localhost:8081/rpc" "aitbc-cascade")
|
||||
NEW_NODE3_HEIGHT=$(get_height "http://192.168.100.10:8082/rpc" "ns3-root")
|
||||
|
||||
echo "New heights:"
|
||||
echo "Node 1: $NEW_NODE1_HEIGHT"
|
||||
echo "Node 2: $NEW_NODE2_HEIGHT"
|
||||
echo "Node 3: $NEW_NODE3_HEIGHT"
|
||||
|
||||
# Calculate improvements
|
||||
if [ -n "$NEW_NODE1_HEIGHT" ] && [ -n "$NEW_NODE2_HEIGHT" ] && [ -n "$NODE1_HEIGHT" ] && [ -n "$NODE2_HEIGHT" ]; then
|
||||
OLD_DIFF=$((NODE2_HEIGHT - NODE1_HEIGHT))
|
||||
NEW_DIFF=$((NEW_NODE2_HEIGHT - NEW_NODE1_HEIGHT))
|
||||
|
||||
echo "Height difference improvement:"
|
||||
echo "Before: $OLD_DIFF"
|
||||
echo "After: $NEW_DIFF"
|
||||
|
||||
if [ $NEW_DIFF -lt $OLD_DIFF ]; then
|
||||
IMPROVEMENT=$((OLD_DIFF - NEW_DIFF))
|
||||
print_status "Sync improved by $IMPROVEMENT blocks"
|
||||
else
|
||||
print_warning "Sync did not improve or got worse"
|
||||
fi
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "🔧 Step 6: Create Sync Monitoring Script"
|
||||
echo "========================================="
|
||||
|
||||
# Create monitoring script
|
||||
cat > /tmp/sync_monitor.sh << 'EOF'
|
||||
#!/bin/bash
|
||||
|
||||
# Blockchain Sync Monitor
|
||||
# Run this periodically to check sync health
|
||||
|
||||
echo "🔍 Blockchain Sync Monitor - $(date)"
|
||||
echo "===================================="
|
||||
|
||||
# Get heights
|
||||
NODE1=$(curl -s http://localhost:8082/rpc/head 2>/dev/null | grep -o '"height":[^,]*' | cut -d'"' -f2)
|
||||
NODE2=$(curl -s http://localhost:8081/rpc/head 2>/dev/null | grep -o '"height":[^,]*' | cut -d'"' -f2)
|
||||
NODE3=$(ssh ns3-root "curl -s http://192.168.100.10:8082/rpc/head 2>/dev/null | grep -o '\"height\":[^,]*' | cut -d'\"' -f2")
|
||||
|
||||
echo "Node 1: $NODE1"
|
||||
echo "Node 2: $NODE2"
|
||||
echo "Node 3: $NODE3"
|
||||
|
||||
# Check for issues
|
||||
if [ -n "$NODE1" ] && [ -n "$NODE2" ]; then
|
||||
DIFF=$((NODE2 - NODE1))
|
||||
if [ $DIFF -gt 100 ]; then
|
||||
echo "⚠️ WARNING: Node 1 and Node 2 height difference: $DIFF"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -n "$NODE2" ] && [ -n "$NODE3" ]; then
|
||||
DIFF=$((NODE2 - NODE3))
|
||||
if [ $DIFF -gt 1000 ]; then
|
||||
echo "⚠️ WARNING: Node 2 and Node 3 height difference: $DIFF"
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "Sync check completed."
|
||||
EOF
|
||||
|
||||
chmod +x /tmp/sync_monitor.sh
|
||||
print_status "Created sync monitoring script: /tmp/sync_monitor.sh"
|
||||
|
||||
echo ""
|
||||
echo "🎉 Sync Optimization Complete!"
|
||||
echo "=============================="
|
||||
|
||||
echo ""
|
||||
echo "📋 Summary of actions taken:"
|
||||
echo "• Fixed Node 1 endpoint configuration"
|
||||
echo "• Restarted problematic services"
|
||||
echo "• Optimized sync intervals and retry logic"
|
||||
echo "• Created monitoring script"
|
||||
echo ""
|
||||
echo "📊 Next steps:"
|
||||
echo "1. Monitor sync performance with: /tmp/sync_monitor.sh"
|
||||
echo "2. Set up cron job for periodic monitoring"
|
||||
echo "3. Check logs for any remaining issues"
|
||||
echo "4. Consider implementing P2P sync for better performance"
|
||||
echo ""
|
||||
echo "🔧 If issues persist:"
|
||||
echo "• Check individual service logs: journalctl -u [service-name]"
|
||||
echo "• Verify network connectivity between sites"
|
||||
echo "• Consider manual block import for severely lagging nodes"
|
||||
echo "• Review firewall and security group settings"
|
||||
|
||||
print_status "Blockchain synchronization optimization completed!"
|
||||
21
apps/blockchain-node/scripts/return_testnet_btc.sh
Executable file
21
apps/blockchain-node/scripts/return_testnet_btc.sh
Executable file
@@ -0,0 +1,21 @@
|
||||
#!/bin/bash
|
||||
# Script to return testnet Bitcoin
|
||||
|
||||
RETURN_ADDRESS="tb1qerzrlxcfu24davlur5sqmgzzgsal6wusda40er"
|
||||
|
||||
echo "Checking balance..."
|
||||
BALANCE=$(bitcoin-cli -testnet -rpcwallet=aitbc_exchange getbalance)
|
||||
|
||||
if [ "$(echo "$BALANCE > 0" | bc)" -eq 1 ]; then
|
||||
echo "Current balance: $BALANCE BTC"
|
||||
echo "Sending to return address: $RETURN_ADDRESS"
|
||||
|
||||
# Calculate amount to send (balance minus small fee)
|
||||
SEND_AMOUNT=$(echo "$BALANCE - 0.00001" | bc)
|
||||
|
||||
TXID=$(bitcoin-cli -testnet -rpcwallet=aitbc_exchange sendtoaddress "$RETURN_ADDRESS" "$SEND_AMOUNT")
|
||||
echo "Transaction sent! TXID: $TXID"
|
||||
echo "Explorer: https://blockstream.info/testnet/tx/$TXID"
|
||||
else
|
||||
echo "No Bitcoin to return. Current balance: $BALANCE BTC"
|
||||
fi
|
||||
87
apps/blockchain-node/scripts/start_mock_blockchain.sh
Normal file
87
apps/blockchain-node/scripts/start_mock_blockchain.sh
Normal file
@@ -0,0 +1,87 @@
|
||||
#!/bin/bash
|
||||
# Start mock blockchain nodes for testing
|
||||
# This script sets up the required mock servers on ports 8081 and 8082
|
||||
|
||||
set -e
|
||||
|
||||
echo "🚀 Starting Mock Blockchain Nodes for Testing"
|
||||
echo "============================================="
|
||||
|
||||
# Colors
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
NC='\033[0m'
|
||||
|
||||
print_status() {
|
||||
echo -e "${GREEN}[INFO]${NC} $1"
|
||||
}
|
||||
|
||||
print_warning() {
|
||||
echo -e "${YELLOW}[WARN]${NC} $1"
|
||||
}
|
||||
|
||||
# Check if required ports are available
|
||||
check_port() {
|
||||
local port=$1
|
||||
if curl -s "http://127.0.0.1:$port/health" >/dev/null 2>&1; then
|
||||
print_warning "Port $port is already in use"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
# Stop any existing mock servers
|
||||
stop_existing_servers() {
|
||||
print_status "Stopping existing mock servers..."
|
||||
pkill -f "mock_blockchain_node.py" 2>/dev/null || true
|
||||
sleep 1
|
||||
}
|
||||
|
||||
# Start mock servers
|
||||
start_mock_servers() {
|
||||
print_status "Starting mock blockchain node on port 8081..."
|
||||
cd "$(dirname "$0")/.."
|
||||
python3 tests/mock_blockchain_node.py 8081 > /tmp/mock_node_8081.log 2>&1 &
|
||||
local pid1=$!
|
||||
|
||||
print_status "Starting mock blockchain node on port 8082..."
|
||||
python3 tests/mock_blockchain_node.py 8082 > /tmp/mock_node_8082.log 2>&1 &
|
||||
local pid2=$!
|
||||
|
||||
# Wait for servers to start
|
||||
sleep 2
|
||||
|
||||
# Verify servers are running
|
||||
if curl -s "http://127.0.0.1:8081/health" >/dev/null 2>&1 && \
|
||||
curl -s "http://127.0.0.1:8082/health" >/dev/null 2>&1; then
|
||||
print_status "✅ Mock blockchain nodes are running!"
|
||||
echo ""
|
||||
echo "Node 1: http://127.0.0.1:8082"
|
||||
echo "Node 2: http://127.0.0.1:8081"
|
||||
echo ""
|
||||
echo "To run tests:"
|
||||
echo " python -m pytest tests/test_blockchain_nodes.py -v"
|
||||
echo ""
|
||||
echo "To stop servers:"
|
||||
echo " pkill -f 'mock_blockchain_node.py'"
|
||||
echo ""
|
||||
echo "Log files:"
|
||||
echo " Node 1: /tmp/mock_node_8082.log"
|
||||
echo " Node 2: /tmp/mock_node_8081.log"
|
||||
else
|
||||
print_warning "❌ Failed to start mock servers"
|
||||
echo "Check log files:"
|
||||
echo " Node 1: /tmp/mock_node_8082.log"
|
||||
echo " Node 2: /tmp/mock_node_8081.log"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Main execution
|
||||
main() {
|
||||
stop_existing_servers
|
||||
start_mock_servers
|
||||
}
|
||||
|
||||
# Run main function
|
||||
main "$@"
|
||||
18
apps/coordinator-api/scripts/check_coordinator_proxy.sh
Executable file
18
apps/coordinator-api/scripts/check_coordinator_proxy.sh
Executable file
@@ -0,0 +1,18 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
HEALTH_URL="http://127.0.0.1:18000/v1/health"
|
||||
MAX_RETRIES=10
|
||||
RETRY_DELAY=2
|
||||
|
||||
for ((i=1; i<=MAX_RETRIES; i++)); do
|
||||
if curl -fsS --max-time 5 "$HEALTH_URL" >/dev/null 2>&1; then
|
||||
echo "Coordinator proxy healthy: $HEALTH_URL"
|
||||
exit 0
|
||||
fi
|
||||
echo "Attempt $i/$MAX_RETRIES: Coordinator proxy not ready yet, waiting ${RETRY_DELAY}s..."
|
||||
sleep $RETRY_DELAY
|
||||
done
|
||||
|
||||
echo "Coordinator proxy health check FAILED: $HEALTH_URL" >&2
|
||||
exit 1
|
||||
151
apps/coordinator-api/scripts/geo_load_balancer.py
Executable file
151
apps/coordinator-api/scripts/geo_load_balancer.py
Executable file
@@ -0,0 +1,151 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Geographic Load Balancer for AITBC Marketplace
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import aiohttp
|
||||
from aiohttp import web
|
||||
import json
|
||||
from datetime import datetime
|
||||
import os
|
||||
|
||||
# Regional endpoints configuration
|
||||
regions = {
|
||||
'us-east': {'url': 'http://127.0.0.1:18000', 'weight': 3, 'healthy': True, 'edge_node': 'aitbc-edge-primary'},
|
||||
'us-west': {'url': 'http://127.0.0.1:18001', 'weight': 2, 'healthy': True, 'edge_node': 'aitbc1-edge-secondary'},
|
||||
'eu-central': {'url': 'http://127.0.0.1:8006', 'weight': 2, 'healthy': True, 'edge_node': 'localhost'},
|
||||
'eu-west': {'url': 'http://127.0.0.1:18000', 'weight': 1, 'healthy': True, 'edge_node': 'aitbc-edge-primary'},
|
||||
'ap-southeast': {'url': 'http://127.0.0.1:18001', 'weight': 2, 'healthy': True, 'edge_node': 'aitbc1-edge-secondary'},
|
||||
'ap-northeast': {'url': 'http://127.0.0.1:8006', 'weight': 1, 'healthy': True, 'edge_node': 'localhost'}
|
||||
}
|
||||
|
||||
class GeoLoadBalancer:
|
||||
def __init__(self):
|
||||
self.current_region = 0
|
||||
self.health_check_interval = 30
|
||||
|
||||
async def health_check(self, region_config):
|
||||
try:
|
||||
async with aiohttp.ClientSession() as session:
|
||||
async with session.get(f"{region_config['url']}/health/live", timeout=5) as response:
|
||||
region_config['healthy'] = response.status == 200
|
||||
region_config['last_check'] = datetime.now().isoformat()
|
||||
except Exception as e:
|
||||
region_config['healthy'] = False
|
||||
region_config['last_check'] = datetime.now().isoformat()
|
||||
region_config['error'] = str(e)
|
||||
|
||||
async def get_healthy_region(self):
|
||||
healthy_regions = [(name, config) for name, config in regions.items() if config['healthy']]
|
||||
if not healthy_regions:
|
||||
return None, None
|
||||
|
||||
# Simple weighted round-robin
|
||||
total_weight = sum(config['weight'] for _, config in healthy_regions)
|
||||
if total_weight == 0:
|
||||
return healthy_regions[0]
|
||||
|
||||
import random
|
||||
rand = random.randint(1, total_weight)
|
||||
current_weight = 0
|
||||
|
||||
for name, config in healthy_regions:
|
||||
current_weight += config['weight']
|
||||
if rand <= current_weight:
|
||||
return name, config
|
||||
|
||||
return healthy_regions[0]
|
||||
|
||||
async def proxy_request(self, request):
|
||||
region_name, region_config = await self.get_healthy_region()
|
||||
if not region_config:
|
||||
return web.json_response({'error': 'No healthy regions available'}, status=503)
|
||||
|
||||
try:
|
||||
# Forward request to selected region
|
||||
target_url = f"{region_config['url']}{request.path_qs}"
|
||||
|
||||
async with aiohttp.ClientSession() as session:
|
||||
# Prepare headers (remove host header)
|
||||
headers = dict(request.headers)
|
||||
headers.pop('Host', None)
|
||||
|
||||
async with session.request(
|
||||
method=request.method,
|
||||
url=target_url,
|
||||
headers=headers,
|
||||
data=await request.read()
|
||||
) as response:
|
||||
# Read response
|
||||
body = await response.read()
|
||||
|
||||
# Create response
|
||||
resp = web.Response(
|
||||
body=body,
|
||||
status=response.status,
|
||||
headers=dict(response.headers)
|
||||
)
|
||||
|
||||
# Add routing headers
|
||||
resp.headers['X-Region'] = region_name
|
||||
resp.headers['X-Backend-Url'] = region_config['url']
|
||||
|
||||
return resp
|
||||
|
||||
except Exception as e:
|
||||
return web.json_response({
|
||||
'error': 'Proxy error',
|
||||
'message': str(e),
|
||||
'region': region_name
|
||||
}, status=502)
|
||||
|
||||
async def handle_all_requests(request):
|
||||
balancer = request.app['balancer']
|
||||
return await balancer.proxy_request(request)
|
||||
|
||||
async def health_check_handler(request):
|
||||
balancer = request.app['balancer']
|
||||
|
||||
# Perform health checks on all regions
|
||||
tasks = [balancer.health_check(config) for config in regions.values()]
|
||||
await asyncio.gather(*tasks)
|
||||
|
||||
return web.json_response({
|
||||
'status': 'healthy',
|
||||
'load_balancer': 'geographic',
|
||||
'regions': regions,
|
||||
'timestamp': datetime.now().isoformat()
|
||||
})
|
||||
|
||||
async def status_handler(request):
|
||||
balancer = request.app['balancer']
|
||||
healthy_count = sum(1 for config in regions.values() if config['healthy'])
|
||||
|
||||
return web.json_response({
|
||||
'total_regions': len(regions),
|
||||
'healthy_regions': healthy_count,
|
||||
'health_ratio': healthy_count / len(regions),
|
||||
'current_time': datetime.now().isoformat(),
|
||||
'regions': {name: {
|
||||
'healthy': config['healthy'],
|
||||
'weight': config['weight'],
|
||||
'last_check': config.get('last_check')
|
||||
} for name, config in regions.items()}
|
||||
})
|
||||
|
||||
async def create_app():
|
||||
app = web.Application()
|
||||
balancer = GeoLoadBalancer()
|
||||
app['balancer'] = balancer
|
||||
|
||||
# Add routes
|
||||
app.router.add_route('*', '/{path:.*}', handle_all_requests)
|
||||
app.router.add_get('/health', health_check_handler)
|
||||
app.router.add_get('/status', status_handler)
|
||||
|
||||
return app
|
||||
|
||||
if __name__ == '__main__':
|
||||
app = asyncio.run(create_app())
|
||||
web.run_app(app, host='127.0.0.1', port=8080)
|
||||
@@ -1,32 +0,0 @@
|
||||
[Unit]
|
||||
Description=AITBC Adaptive Learning Service
|
||||
After=network.target aitbc-coordinator-api.service
|
||||
Wants=aitbc-coordinator-api.service
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=debian
|
||||
Group=debian
|
||||
WorkingDirectory=/home/oib/aitbc/apps/coordinator-api
|
||||
Environment=PATH=/home/oib/aitbc/apps/coordinator-api/.venv/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
|
||||
ExecStart=/home/oib/aitbc/apps/coordinator-api/.venv/bin/python -m uvicorn src.app.services.adaptive_learning_app:app --host 127.0.0.1 --port 8005
|
||||
ExecReload=/bin/kill -HUP $MAINPID
|
||||
KillMode=mixed
|
||||
TimeoutStopSec=5
|
||||
PrivateTmp=true
|
||||
Restart=on-failure
|
||||
RestartSec=10
|
||||
|
||||
# Logging
|
||||
StandardOutput=journal
|
||||
StandardError=journal
|
||||
SyslogIdentifier=aitbc-adaptive-learning
|
||||
|
||||
# Security
|
||||
NoNewPrivileges=true
|
||||
ProtectSystem=strict
|
||||
ProtectHome=true
|
||||
ReadWritePaths=/home/oib/aitbc/apps/coordinator-api
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -1,38 +0,0 @@
|
||||
[Unit]
|
||||
Description=AITBC Advanced AI Service - Enhanced AI Capabilities
|
||||
After=network.target
|
||||
Wants=network.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=aitbc
|
||||
Group=aitbc
|
||||
WorkingDirectory=/opt/aitbc/apps/coordinator-api
|
||||
Environment=PATH=/opt/aitbc/.venv/bin
|
||||
Environment=PYTHONPATH=/opt/aitbc/apps/coordinator-api/src
|
||||
ExecStart=/opt/aitbc/.venv/bin/python -m app.services.advanced_ai_service
|
||||
ExecReload=/bin/kill -HUP $MAINPID
|
||||
Restart=always
|
||||
RestartSec=10
|
||||
StandardOutput=journal
|
||||
StandardError=journal
|
||||
SyslogIdentifier=aitbc-advanced-ai
|
||||
|
||||
# Security settings
|
||||
NoNewPrivileges=true
|
||||
PrivateTmp=true
|
||||
ProtectSystem=strict
|
||||
ProtectHome=true
|
||||
ReadWritePaths=/opt/aitbc/logs /opt/aitbc/data
|
||||
|
||||
# Resource limits
|
||||
LimitNOFILE=65536
|
||||
LimitNPROC=4096
|
||||
|
||||
# GPU access (if available)
|
||||
DeviceAllow=/dev/nvidia0 rw
|
||||
DeviceAllow=/dev/nvidiactl rw
|
||||
DeviceAllow=/dev/nvidia-uvm rw
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -1,38 +0,0 @@
|
||||
[Unit]
|
||||
Description=AITBC Enterprise API Gateway - Multi-tenant API Management
|
||||
After=network.target
|
||||
Wants=network.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=aitbc
|
||||
Group=aitbc
|
||||
WorkingDirectory=/opt/aitbc/apps/coordinator-api
|
||||
Environment=PATH=/opt/aitbc/.venv/bin
|
||||
Environment=PYTHONPATH=/opt/aitbc/apps/coordinator-api/src
|
||||
ExecStart=/opt/aitbc/.venv/bin/python -m app.services.enterprise_api_gateway
|
||||
ExecReload=/bin/kill -HUP $MAINPID
|
||||
Restart=always
|
||||
RestartSec=10
|
||||
StandardOutput=journal
|
||||
StandardError=journal
|
||||
SyslogIdentifier=aitbc-enterprise-api
|
||||
|
||||
# Security settings
|
||||
NoNewPrivileges=true
|
||||
PrivateTmp=true
|
||||
ProtectSystem=strict
|
||||
ProtectHome=true
|
||||
ReadWritePaths=/opt/aitbc/logs /opt/aitbc/data
|
||||
|
||||
# Resource limits
|
||||
LimitNOFILE=65536
|
||||
LimitNPROC=4096
|
||||
|
||||
# Performance settings
|
||||
Nice=-5
|
||||
IOSchedulingClass=best-effort
|
||||
IOSchedulingPriority=0
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -1,37 +0,0 @@
|
||||
[Unit]
|
||||
Description=AITBC GPU Multi-Modal Processing Service
|
||||
After=network.target aitbc-coordinator-api.service
|
||||
Wants=aitbc-coordinator-api.service
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=debian
|
||||
Group=debian
|
||||
WorkingDirectory=/home/oib/aitbc/apps/coordinator-api
|
||||
Environment=PATH=/home/oib/aitbc/apps/coordinator-api/.venv/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
|
||||
Environment=CUDA_VISIBLE_DEVICES=0
|
||||
ExecStart=/home/oib/aitbc/apps/coordinator-api/.venv/bin/python -m uvicorn src.app.services.gpu_multimodal_app:app --host 127.0.0.1 --port 8003
|
||||
ExecReload=/bin/kill -HUP $MAINPID
|
||||
KillMode=mixed
|
||||
TimeoutStopSec=5
|
||||
PrivateTmp=true
|
||||
Restart=on-failure
|
||||
RestartSec=10
|
||||
|
||||
# Logging
|
||||
StandardOutput=journal
|
||||
StandardError=journal
|
||||
SyslogIdentifier=aitbc-gpu-multimodal
|
||||
|
||||
# Security
|
||||
NoNewPrivileges=true
|
||||
ProtectSystem=strict
|
||||
ProtectHome=true
|
||||
ReadWritePaths=/home/oib/aitbc/apps/coordinator-api
|
||||
|
||||
# GPU Access
|
||||
DeviceAllow=/dev/nvidia0 rwm
|
||||
DevicePolicy=auto
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -1,32 +0,0 @@
|
||||
[Unit]
|
||||
Description=AITBC Enhanced Marketplace Service
|
||||
After=network.target aitbc-coordinator-api.service
|
||||
Wants=aitbc-coordinator-api.service
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=oib
|
||||
Group=oib
|
||||
WorkingDirectory=/home/oib/windsurf/aitbc/apps/coordinator-api
|
||||
Environment=PATH=/home/oib/windsurf/aitbc/apps/coordinator-api/.venv/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
|
||||
ExecStart=/home/oib/windsurf/aitbc/apps/coordinator-api/.venv/bin/python -m uvicorn src.app.routers.marketplace_enhanced_app:app --host 127.0.0.1 --port 8006
|
||||
ExecReload=/bin/kill -HUP $MAINPID
|
||||
KillMode=mixed
|
||||
TimeoutStopSec=5
|
||||
PrivateTmp=true
|
||||
Restart=on-failure
|
||||
RestartSec=10
|
||||
|
||||
# Logging
|
||||
StandardOutput=journal
|
||||
StandardError=journal
|
||||
SyslogIdentifier=aitbc-marketplace-enhanced
|
||||
|
||||
# Security
|
||||
NoNewPrivileges=true
|
||||
ProtectSystem=strict
|
||||
ProtectHome=true
|
||||
ReadWritePaths=/home/oib/windsurf/aitbc/apps/coordinator-api
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -1,32 +0,0 @@
|
||||
[Unit]
|
||||
Description=AITBC Modality Optimization Service
|
||||
After=network.target aitbc-coordinator-api.service
|
||||
Wants=aitbc-coordinator-api.service
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=debian
|
||||
Group=debian
|
||||
WorkingDirectory=/home/oib/aitbc/apps/coordinator-api
|
||||
Environment=PATH=/home/oib/aitbc/apps/coordinator-api/.venv/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
|
||||
ExecStart=/home/oib/aitbc/apps/coordinator-api/.venv/bin/python -m uvicorn src.app.services.modality_optimization_app:app --host 127.0.0.1 --port 8004
|
||||
ExecReload=/bin/kill -HUP $MAINPID
|
||||
KillMode=mixed
|
||||
TimeoutStopSec=5
|
||||
PrivateTmp=true
|
||||
Restart=on-failure
|
||||
RestartSec=10
|
||||
|
||||
# Logging
|
||||
StandardOutput=journal
|
||||
StandardError=journal
|
||||
SyslogIdentifier=aitbc-modality-optimization
|
||||
|
||||
# Security
|
||||
NoNewPrivileges=true
|
||||
ProtectSystem=strict
|
||||
ProtectHome=true
|
||||
ReadWritePaths=/home/oib/aitbc/apps/coordinator-api
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -1,32 +0,0 @@
|
||||
[Unit]
|
||||
Description=AITBC Multi-Modal Agent Service
|
||||
After=network.target aitbc-coordinator-api.service
|
||||
Wants=aitbc-coordinator-api.service
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=debian
|
||||
Group=debian
|
||||
WorkingDirectory=/home/oib/aitbc/apps/coordinator-api
|
||||
Environment=PATH=/home/oib/aitbc/apps/coordinator-api/.venv/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
|
||||
ExecStart=/home/oib/aitbc/apps/coordinator-api/.venv/bin/python -m uvicorn src.app.services.multimodal_app:app --host 127.0.0.1 --port 8002
|
||||
ExecReload=/bin/kill -HUP $MAINPID
|
||||
KillMode=mixed
|
||||
TimeoutStopSec=5
|
||||
PrivateTmp=true
|
||||
Restart=on-failure
|
||||
RestartSec=10
|
||||
|
||||
# Logging
|
||||
StandardOutput=journal
|
||||
StandardError=journal
|
||||
SyslogIdentifier=aitbc-multimodal
|
||||
|
||||
# Security
|
||||
NoNewPrivileges=true
|
||||
ProtectSystem=strict
|
||||
ProtectHome=true
|
||||
ReadWritePaths=/home/oib/aitbc/apps/coordinator-api
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -1,32 +0,0 @@
|
||||
[Unit]
|
||||
Description=AITBC OpenClaw Enhanced Service
|
||||
After=network.target aitbc-coordinator-api.service
|
||||
Wants=aitbc-coordinator-api.service
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=debian
|
||||
Group=debian
|
||||
WorkingDirectory=/home/oib/aitbc/apps/coordinator-api
|
||||
Environment=PATH=/home/oib/aitbc/apps/coordinator-api/.venv/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
|
||||
ExecStart=/home/oib/aitbc/apps/coordinator-api/.venv/bin/python -m uvicorn src.app.routers.openclaw_enhanced_app:app --host 127.0.0.1 --port 8007
|
||||
ExecReload=/bin/kill -HUP $MAINPID
|
||||
KillMode=mixed
|
||||
TimeoutStopSec=5
|
||||
PrivateTmp=true
|
||||
Restart=on-failure
|
||||
RestartSec=10
|
||||
|
||||
# Logging
|
||||
StandardOutput=journal
|
||||
StandardError=journal
|
||||
SyslogIdentifier=aitbc-openclaw-enhanced
|
||||
|
||||
# Security
|
||||
NoNewPrivileges=true
|
||||
ProtectSystem=strict
|
||||
ProtectHome=true
|
||||
ReadWritePaths=/home/oib/aitbc/apps/coordinator-api
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
248
apps/marketplace-web/scripts/deploy_edge_node.py
Executable file
248
apps/marketplace-web/scripts/deploy_edge_node.py
Executable file
@@ -0,0 +1,248 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Edge Node Deployment Script for AITBC Marketplace
|
||||
Deploys edge node configuration and services
|
||||
"""
|
||||
|
||||
import yaml
|
||||
import subprocess
|
||||
import sys
|
||||
import os
|
||||
import json
|
||||
from datetime import datetime
|
||||
|
||||
def load_config(config_file):
|
||||
"""Load edge node configuration from YAML file"""
|
||||
with open(config_file, 'r') as f:
|
||||
return yaml.safe_load(f)
|
||||
|
||||
def deploy_redis_cache(config):
|
||||
"""Deploy Redis cache layer"""
|
||||
print(f"🔧 Deploying Redis cache for {config['edge_node_config']['node_id']}")
|
||||
|
||||
# Check if Redis is running
|
||||
try:
|
||||
result = subprocess.run(['redis-cli', 'ping'], capture_output=True, text=True)
|
||||
if result.stdout.strip() == 'PONG':
|
||||
print("✅ Redis is already running")
|
||||
else:
|
||||
print("⚠️ Redis not responding, attempting to start...")
|
||||
# Start Redis if not running
|
||||
subprocess.run(['sudo', 'systemctl', 'start', 'redis-server'], check=True)
|
||||
print("✅ Redis started")
|
||||
except FileNotFoundError:
|
||||
print("❌ Redis not installed, installing...")
|
||||
subprocess.run(['sudo', 'apt-get', 'update'], check=True)
|
||||
subprocess.run(['sudo', 'apt-get', 'install', '-y', 'redis-server'], check=True)
|
||||
subprocess.run(['sudo', 'systemctl', 'start', 'redis-server'], check=True)
|
||||
print("✅ Redis installed and started")
|
||||
|
||||
# Configure Redis
|
||||
redis_config = config['edge_node_config']['caching']
|
||||
|
||||
# Set Redis configuration
|
||||
redis_commands = [
|
||||
f"CONFIG SET maxmemory {redis_config['max_memory_mb']}mb",
|
||||
f"CONFIG SET maxmemory-policy allkeys-lru",
|
||||
f"CONFIG SET timeout {redis_config['cache_ttl_seconds']}"
|
||||
]
|
||||
|
||||
for cmd in redis_commands:
|
||||
try:
|
||||
subprocess.run(['redis-cli', *cmd.split()], check=True, capture_output=True)
|
||||
except subprocess.CalledProcessError:
|
||||
print(f"⚠️ Could not set Redis config: {cmd}")
|
||||
|
||||
def deploy_monitoring(config):
|
||||
"""Deploy monitoring agent"""
|
||||
print(f"📊 Deploying monitoring for {config['edge_node_config']['node_id']}")
|
||||
|
||||
monitoring_config = config['edge_node_config']['monitoring']
|
||||
|
||||
# Create monitoring directory
|
||||
os.makedirs('/tmp/aitbc-monitoring', exist_ok=True)
|
||||
|
||||
# Create monitoring script
|
||||
monitoring_script = f"""#!/bin/bash
|
||||
# Monitoring script for {config['edge_node_config']['node_id']}
|
||||
echo "{{{{'timestamp': '$(date -Iseconds)', 'node_id': '{config['edge_node_config']['node_id']}', 'status': 'monitoring'}}}}" > /tmp/aitbc-monitoring/status.json
|
||||
|
||||
# Check marketplace API health
|
||||
curl -s http://localhost:{config['edge_node_config']['services'][0]['port']}/health/live > /dev/null
|
||||
if [ $? -eq 0 ]; then
|
||||
echo "marketplace_healthy=true" >> /tmp/aitbc-monitoring/status.json
|
||||
else
|
||||
echo "marketplace_healthy=false" >> /tmp/aitbc-monitoring/status.json
|
||||
fi
|
||||
|
||||
# Check Redis health
|
||||
redis-cli ping > /dev/null
|
||||
if [ $? -eq 0 ]; then
|
||||
echo "redis_healthy=true" >> /tmp/aitbc-monitoring/status.json
|
||||
else
|
||||
echo "redis_healthy=false" >> /tmp/aitbc-monitoring/status.json
|
||||
fi
|
||||
"""
|
||||
|
||||
with open('/tmp/aitbc-monitoring/monitor.sh', 'w') as f:
|
||||
f.write(monitoring_script)
|
||||
|
||||
os.chmod('/tmp/aitbc-monitoring/monitor.sh', 0o755)
|
||||
|
||||
# Create systemd service for monitoring
|
||||
monitoring_service = f"""[Unit]
|
||||
Description=AITBC Edge Node Monitoring - {config['edge_node_config']['node_id']}
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=root
|
||||
ExecStart=/tmp/aitbc-monitoring/monitor.sh
|
||||
Restart=always
|
||||
RestartSec=30
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
"""
|
||||
|
||||
service_file = f"/etc/systemd/system/aitbc-edge-monitoring-{config['edge_node_config']['node_id']}.service"
|
||||
|
||||
with open(service_file, 'w') as f:
|
||||
f.write(monitoring_service)
|
||||
|
||||
# Enable and start monitoring service
|
||||
subprocess.run(['sudo', 'systemctl', 'daemon-reload'], check=True)
|
||||
subprocess.run(['sudo', 'systemctl', 'enable', f'aitbc-edge-monitoring-{config["edge_node_config"]["node_id"]}.service'], check=True)
|
||||
subprocess.run(['sudo', 'systemctl', 'start', f'aitbc-edge-monitoring-{config["edge_node_config"]["node_id"]}.service'], check=True)
|
||||
|
||||
print("✅ Monitoring agent deployed")
|
||||
|
||||
def optimize_network(config):
|
||||
"""Apply network optimizations"""
|
||||
print(f"🌐 Optimizing network for {config['edge_node_config']['node_id']}")
|
||||
|
||||
network_config = config['edge_node_config']['network']
|
||||
|
||||
# TCP optimizations
|
||||
tcp_params = {
|
||||
'net.core.rmem_max': '16777216',
|
||||
'net.core.wmem_max': '16777216',
|
||||
'net.ipv4.tcp_rmem': '4096 87380 16777216',
|
||||
'net.ipv4.tcp_wmem': '4096 65536 16777216',
|
||||
'net.ipv4.tcp_congestion_control': 'bbr',
|
||||
'net.core.netdev_max_backlog': '5000'
|
||||
}
|
||||
|
||||
for param, value in tcp_params.items():
|
||||
try:
|
||||
subprocess.run(['sudo', 'sysctl', '-w', f'{param}={value}'], check=True, capture_output=True)
|
||||
print(f"✅ Set {param}={value}")
|
||||
except subprocess.CalledProcessError:
|
||||
print(f"⚠️ Could not set {param}")
|
||||
|
||||
def deploy_edge_services(config):
|
||||
"""Deploy edge node services"""
|
||||
print(f"🚀 Deploying edge services for {config['edge_node_config']['node_id']}")
|
||||
|
||||
# Create edge service configuration
|
||||
edge_service_config = {
|
||||
'node_id': config['edge_node_config']['node_id'],
|
||||
'region': config['edge_node_config']['region'],
|
||||
'services': config['edge_node_config']['services'],
|
||||
'performance_targets': config['edge_node_config']['performance_targets'],
|
||||
'deployed_at': datetime.now().isoformat()
|
||||
}
|
||||
|
||||
# Save configuration
|
||||
with open(f'/tmp/aitbc-edge-{config["edge_node_config"]["node_id"]}-config.json', 'w') as f:
|
||||
json.dump(edge_service_config, f, indent=2)
|
||||
|
||||
print(f"✅ Edge services configuration saved")
|
||||
|
||||
def validate_deployment(config):
|
||||
"""Validate edge node deployment"""
|
||||
print(f"✅ Validating deployment for {config['edge_node_config']['node_id']}")
|
||||
|
||||
validation_results = {}
|
||||
|
||||
# Check marketplace API
|
||||
try:
|
||||
response = subprocess.run(['curl', '-s', f'http://localhost:{config["edge_node_config"]["services"][0]["port"]}/health/live'],
|
||||
capture_output=True, text=True, timeout=10)
|
||||
if response.status_code == 0:
|
||||
validation_results['marketplace_api'] = 'healthy'
|
||||
else:
|
||||
validation_results['marketplace_api'] = 'unhealthy'
|
||||
except Exception as e:
|
||||
validation_results['marketplace_api'] = f'error: {str(e)}'
|
||||
|
||||
# Check Redis
|
||||
try:
|
||||
result = subprocess.run(['redis-cli', 'ping'], capture_output=True, text=True, timeout=5)
|
||||
if result.stdout.strip() == 'PONG':
|
||||
validation_results['redis'] = 'healthy'
|
||||
else:
|
||||
validation_results['redis'] = 'unhealthy'
|
||||
except Exception as e:
|
||||
validation_results['redis'] = f'error: {str(e)}'
|
||||
|
||||
# Check monitoring
|
||||
try:
|
||||
result = subprocess.run(['systemctl', 'is-active', f'aitbc-edge-monitoring-{config["edge_node_config"]["node_id"]}.service'],
|
||||
capture_output=True, text=True, timeout=5)
|
||||
validation_results['monitoring'] = result.stdout.strip()
|
||||
except Exception as e:
|
||||
validation_results['monitoring'] = f'error: {str(e)}'
|
||||
|
||||
print(f"📊 Validation Results:")
|
||||
for service, status in validation_results.items():
|
||||
print(f" {service}: {status}")
|
||||
|
||||
return validation_results
|
||||
|
||||
def main():
|
||||
if len(sys.argv) != 2:
|
||||
print("Usage: python deploy_edge_node.py <config_file>")
|
||||
sys.exit(1)
|
||||
|
||||
config_file = sys.argv[1]
|
||||
|
||||
if not os.path.exists(config_file):
|
||||
print(f"❌ Configuration file {config_file} not found")
|
||||
sys.exit(1)
|
||||
|
||||
try:
|
||||
config = load_config(config_file)
|
||||
|
||||
print(f"🚀 Deploying edge node: {config['edge_node_config']['node_id']}")
|
||||
print(f"📍 Region: {config['edge_node_config']['region']}")
|
||||
print(f"🌍 Location: {config['edge_node_config']['location']}")
|
||||
|
||||
# Deploy components
|
||||
deploy_redis_cache(config)
|
||||
deploy_monitoring(config)
|
||||
optimize_network(config)
|
||||
deploy_edge_services(config)
|
||||
|
||||
# Validate deployment
|
||||
validation_results = validate_deployment(config)
|
||||
|
||||
# Save deployment status
|
||||
deployment_status = {
|
||||
'node_id': config['edge_node_config']['node_id'],
|
||||
'deployment_time': datetime.now().isoformat(),
|
||||
'validation_results': validation_results,
|
||||
'status': 'completed'
|
||||
}
|
||||
|
||||
with open(f'/tmp/aitbc-edge-{config["edge_node_config"]["node_id"]}-deployment.json', 'w') as f:
|
||||
json.dump(deployment_status, f, indent=2)
|
||||
|
||||
print(f"✅ Edge node deployment completed for {config['edge_node_config']['node_id']}")
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Deployment failed: {str(e)}")
|
||||
sys.exit(1)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
Reference in New Issue
Block a user