chore(systemd): remove obsolete systemd service files and update infrastructure documentation

- Remove 8 unused systemd service files from coordinator-api/systemd/
  - aitbc-adaptive-learning.service (port 8005)
  - aitbc-advanced-ai.service
  - aitbc-enterprise-api.service
  - aitbc-gpu-multimodal.service (port 8003)
  - aitbc-marketplace-enhanced.service (port 8006)
  - aitbc-modality-optimization.service (port 8004)
  - aitbc-multimodal.service (port 8002)
  - aitbc-openclaw-enhanced.service (port 8007
This commit is contained in:
oib
2026-03-04 12:16:50 +01:00
parent 581309369d
commit 50954a4b31
101 changed files with 1655 additions and 4871 deletions

View File

@@ -0,0 +1,18 @@
#!/usr/bin/env bash
set -euo pipefail
HEALTH_URL="http://127.0.0.1:18000/v1/health"
MAX_RETRIES=10
RETRY_DELAY=2
for ((i=1; i<=MAX_RETRIES; i++)); do
if curl -fsS --max-time 5 "$HEALTH_URL" >/dev/null 2>&1; then
echo "Coordinator proxy healthy: $HEALTH_URL"
exit 0
fi
echo "Attempt $i/$MAX_RETRIES: Coordinator proxy not ready yet, waiting ${RETRY_DELAY}s..."
sleep $RETRY_DELAY
done
echo "Coordinator proxy health check FAILED: $HEALTH_URL" >&2
exit 1

View File

@@ -1,32 +0,0 @@
[Unit]
Description=AITBC Adaptive Learning Service
After=network.target aitbc-coordinator-api.service
Wants=aitbc-coordinator-api.service
[Service]
Type=simple
User=debian
Group=debian
WorkingDirectory=/home/oib/aitbc/apps/coordinator-api
Environment=PATH=/home/oib/aitbc/apps/coordinator-api/.venv/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
ExecStart=/home/oib/aitbc/apps/coordinator-api/.venv/bin/python -m uvicorn src.app.services.adaptive_learning_app:app --host 127.0.0.1 --port 8005
ExecReload=/bin/kill -HUP $MAINPID
KillMode=mixed
TimeoutStopSec=5
PrivateTmp=true
Restart=on-failure
RestartSec=10
# Logging
StandardOutput=journal
StandardError=journal
SyslogIdentifier=aitbc-adaptive-learning
# Security
NoNewPrivileges=true
ProtectSystem=strict
ProtectHome=true
ReadWritePaths=/home/oib/aitbc/apps/coordinator-api
[Install]
WantedBy=multi-user.target

View File

@@ -1,37 +0,0 @@
[Unit]
Description=AITBC GPU Multi-Modal Processing Service
After=network.target aitbc-coordinator-api.service
Wants=aitbc-coordinator-api.service
[Service]
Type=simple
User=debian
Group=debian
WorkingDirectory=/home/oib/aitbc/apps/coordinator-api
Environment=PATH=/home/oib/aitbc/apps/coordinator-api/.venv/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
Environment=CUDA_VISIBLE_DEVICES=0
ExecStart=/home/oib/aitbc/apps/coordinator-api/.venv/bin/python -m uvicorn src.app.services.gpu_multimodal_app:app --host 127.0.0.1 --port 8003
ExecReload=/bin/kill -HUP $MAINPID
KillMode=mixed
TimeoutStopSec=5
PrivateTmp=true
Restart=on-failure
RestartSec=10
# Logging
StandardOutput=journal
StandardError=journal
SyslogIdentifier=aitbc-gpu-multimodal
# Security
NoNewPrivileges=true
ProtectSystem=strict
ProtectHome=true
ReadWritePaths=/home/oib/aitbc/apps/coordinator-api
# GPU Access
DeviceAllow=/dev/nvidia0 rwm
DevicePolicy=auto
[Install]
WantedBy=multi-user.target

View File

@@ -1,32 +0,0 @@
[Unit]
Description=AITBC Enhanced Marketplace Service
After=network.target aitbc-coordinator-api.service
Wants=aitbc-coordinator-api.service
[Service]
Type=simple
User=oib
Group=oib
WorkingDirectory=/home/oib/windsurf/aitbc/apps/coordinator-api
Environment=PATH=/home/oib/windsurf/aitbc/apps/coordinator-api/.venv/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
ExecStart=/home/oib/windsurf/aitbc/apps/coordinator-api/.venv/bin/python -m uvicorn src.app.routers.marketplace_enhanced_app:app --host 127.0.0.1 --port 8006
ExecReload=/bin/kill -HUP $MAINPID
KillMode=mixed
TimeoutStopSec=5
PrivateTmp=true
Restart=on-failure
RestartSec=10
# Logging
StandardOutput=journal
StandardError=journal
SyslogIdentifier=aitbc-marketplace-enhanced
# Security
NoNewPrivileges=true
ProtectSystem=strict
ProtectHome=true
ReadWritePaths=/home/oib/windsurf/aitbc/apps/coordinator-api
[Install]
WantedBy=multi-user.target

View File

@@ -1,32 +0,0 @@
[Unit]
Description=AITBC Modality Optimization Service
After=network.target aitbc-coordinator-api.service
Wants=aitbc-coordinator-api.service
[Service]
Type=simple
User=debian
Group=debian
WorkingDirectory=/home/oib/aitbc/apps/coordinator-api
Environment=PATH=/home/oib/aitbc/apps/coordinator-api/.venv/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
ExecStart=/home/oib/aitbc/apps/coordinator-api/.venv/bin/python -m uvicorn src.app.services.modality_optimization_app:app --host 127.0.0.1 --port 8004
ExecReload=/bin/kill -HUP $MAINPID
KillMode=mixed
TimeoutStopSec=5
PrivateTmp=true
Restart=on-failure
RestartSec=10
# Logging
StandardOutput=journal
StandardError=journal
SyslogIdentifier=aitbc-modality-optimization
# Security
NoNewPrivileges=true
ProtectSystem=strict
ProtectHome=true
ReadWritePaths=/home/oib/aitbc/apps/coordinator-api
[Install]
WantedBy=multi-user.target

View File

@@ -1,32 +0,0 @@
[Unit]
Description=AITBC Multi-Modal Agent Service
After=network.target aitbc-coordinator-api.service
Wants=aitbc-coordinator-api.service
[Service]
Type=simple
User=debian
Group=debian
WorkingDirectory=/home/oib/aitbc/apps/coordinator-api
Environment=PATH=/home/oib/aitbc/apps/coordinator-api/.venv/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
ExecStart=/home/oib/aitbc/apps/coordinator-api/.venv/bin/python -m uvicorn src.app.services.multimodal_app:app --host 127.0.0.1 --port 8002
ExecReload=/bin/kill -HUP $MAINPID
KillMode=mixed
TimeoutStopSec=5
PrivateTmp=true
Restart=on-failure
RestartSec=10
# Logging
StandardOutput=journal
StandardError=journal
SyslogIdentifier=aitbc-multimodal
# Security
NoNewPrivileges=true
ProtectSystem=strict
ProtectHome=true
ReadWritePaths=/home/oib/aitbc/apps/coordinator-api
[Install]
WantedBy=multi-user.target

View File

@@ -1,32 +0,0 @@
[Unit]
Description=AITBC OpenClaw Enhanced Service
After=network.target aitbc-coordinator-api.service
Wants=aitbc-coordinator-api.service
[Service]
Type=simple
User=debian
Group=debian
WorkingDirectory=/home/oib/aitbc/apps/coordinator-api
Environment=PATH=/home/oib/aitbc/apps/coordinator-api/.venv/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
ExecStart=/home/oib/aitbc/apps/coordinator-api/.venv/bin/python -m uvicorn src.app.routers.openclaw_enhanced_app:app --host 127.0.0.1 --port 8007
ExecReload=/bin/kill -HUP $MAINPID
KillMode=mixed
TimeoutStopSec=5
PrivateTmp=true
Restart=on-failure
RestartSec=10
# Logging
StandardOutput=journal
StandardError=journal
SyslogIdentifier=aitbc-openclaw-enhanced
# Security
NoNewPrivileges=true
ProtectSystem=strict
ProtectHome=true
ReadWritePaths=/home/oib/aitbc/apps/coordinator-api
[Install]
WantedBy=multi-user.target

View File

@@ -0,0 +1,89 @@
#!/bin/bash
# Deploy GPU Miner to AITBC Container - All in One
set -e
echo "🚀 Deploying GPU Miner to AITBC Container..."
# Step 1: Copy files
echo "1. Copying GPU scripts..."
scp -o StrictHostKeyChecking=no /home/oib/windsurf/aitbc/gpu_registry_demo.py aitbc:/home/oib/
scp -o StrictHostKeyChecking=no /home/oib/windsurf/aitbc/gpu_miner_with_wait.py aitbc:/home/oib/
# Step 2: Install Python and deps
echo "2. Installing Python and dependencies..."
ssh aitbc 'sudo apt-get update -qq'
ssh aitbc 'sudo apt-get install -y -qq python3 python3-venv python3-pip'
ssh aitbc 'python3 -m venv /home/oib/.venv-gpu'
ssh aitbc '/home/oib/.venv-gpu/bin/pip install -q fastapi uvicorn httpx psutil'
# Step 3: Create GPU registry service
echo "3. Creating GPU registry service..."
ssh aitbc "sudo tee /etc/systemd/system/aitbc-gpu-registry.service >/dev/null <<'EOF'
[Unit]
Description=AITBC GPU Registry
After=network.target
[Service]
Type=simple
User=oib
WorkingDirectory=/home/oib
ExecStart=/home/oib/.venv-gpu/bin/python /home/oib/gpu_registry_demo.py
Restart=always
RestartSec=5
StandardOutput=journal
StandardError=journal
[Install]
WantedBy=multi-user.target
EOF"
# Step 4: Start GPU registry
echo "4. Starting GPU registry..."
ssh aitbc 'sudo systemctl daemon-reload'
ssh aitbc 'sudo systemctl enable --now aitbc-gpu-registry.service'
# Step 5: Create GPU miner service
echo "5. Creating GPU miner service..."
ssh aitbc "sudo tee /etc/systemd/system/aitbc-gpu-miner.service >/dev/null <<'EOF'
[Unit]
Description=AITBC GPU Miner Client
After=network.target aitbc-gpu-registry.service
Wants=aitbc-gpu-registry.service
[Service]
Type=simple
User=oib
WorkingDirectory=/home/oib
ExecStart=/home/oib/.venv-gpu/bin/python /home/oib/gpu_miner_with_wait.py
Restart=always
RestartSec=10
StandardOutput=journal
StandardError=journal
[Install]
WantedBy=multi-user.target
EOF"
# Step 6: Start GPU miner
echo "6. Starting GPU miner..."
ssh aitbc 'sudo systemctl daemon-reload'
ssh aitbc 'sudo systemctl enable --now aitbc-gpu-miner.service'
# Step 7: Check services
echo "7. Checking services..."
echo -e "\n=== GPU Registry Service ==="
ssh aitbc 'sudo systemctl status aitbc-gpu-registry.service --no-pager'
echo -e "\n=== GPU Miner Service ==="
ssh aitbc 'sudo systemctl status aitbc-gpu-miner.service --no-pager'
# Step 8: Verify GPU registration
echo -e "\n8. Verifying GPU registration..."
sleep 3
echo " curl http://10.1.223.93:8091/miners/list"
curl -s http://10.1.223.93:8091/miners/list | python3 -c "import sys,json; data=json.load(sys.stdin); print(f'✅ Found {len(data.get(\"gpus\", []))} GPU(s)'); [print(f' - {gpu[\"capabilities\"][\"gpu\"][\"model\"]} ({gpu[\"capabilities\"][\"gpu\"][\"memory_gb\"]}GB)') for gpu in data.get('gpus', [])]"
echo -e "\n✅ Deployment complete!"
echo "GPU Registry: http://10.1.223.93:8091"
echo "GPU Miner: Running and sending heartbeats"

View File

@@ -0,0 +1,89 @@
#!/bin/bash
# Deploy GPU Miner to AITBC Container
echo "🚀 Deploying GPU Miner to AITBC Container..."
# Check if container is accessible
echo "1. Checking container access..."
sudo incus exec aitbc -- whoami
# Copy GPU miner files
echo "2. Copying GPU miner files..."
sudo incus file push /home/oib/windsurf/aitbc/gpu_miner_with_wait.py aitbc/home/oib/
sudo incus file push /home/oib/windsurf/aitbc/gpu_registry_demo.py aitbc/home/oib/
# Install dependencies
echo "3. Installing dependencies..."
sudo incus exec aitbc -- pip install httpx fastapi uvicorn psutil
# Create GPU miner service
echo "4. Creating GPU miner service..."
cat << 'EOF' | sudo tee /tmp/gpu-miner.service
[Unit]
Description=AITBC GPU Miner Client
After=network.target
[Service]
Type=simple
User=oib
WorkingDirectory=/home/oib
ExecStart=/usr/bin/python3 gpu_miner_with_wait.py
Restart=always
RestartSec=30
StandardOutput=journal
StandardError=journal
[Install]
WantedBy=multi-user.target
EOF
sudo incus file push /tmp/gpu-miner.service aitbc/tmp/
sudo incus exec aitbc -- sudo mv /tmp/gpu-miner.service /etc/systemd/system/
sudo incus exec aitbc -- sudo systemctl daemon-reload
sudo incus exec aitbc -- sudo systemctl enable gpu-miner.service
sudo incus exec aitbc -- sudo systemctl start gpu-miner.service
# Create GPU registry service
echo "5. Creating GPU registry service..."
cat << 'EOF' | sudo tee /tmp/gpu-registry.service
[Unit]
Description=AITBC GPU Registry
After=network.target
[Service]
Type=simple
User=oib
WorkingDirectory=/home/oib
ExecStart=/usr/bin/python3 gpu_registry_demo.py
Restart=always
RestartSec=10
StandardOutput=journal
StandardError=journal
[Install]
WantedBy=multi-user.target
EOF
sudo incus file push /tmp/gpu-registry.service aitbc/tmp/
sudo incus exec aitbc -- sudo mv /tmp/gpu-registry.service /etc/systemd/system/
sudo incus exec aitbc -- sudo systemctl daemon-reload
sudo incus exec aitbc -- sudo systemctl enable gpu-registry.service
sudo incus exec aitbc -- sudo systemctl start gpu-registry.service
# Check services
echo "6. Checking services..."
echo "GPU Miner Service:"
sudo incus exec aitbc -- sudo systemctl status gpu-miner.service --no-pager
echo -e "\nGPU Registry Service:"
sudo incus exec aitbc -- sudo systemctl status gpu-registry.service --no-pager
# Show access URLs
echo -e "\n✅ Deployment complete!"
echo "Access URLs:"
echo " - Container IP: 10.1.223.93"
echo " - GPU Registry: http://10.1.223.93:8091/miners/list"
echo " - Coordinator API: http://10.1.223.93:8000"
echo -e "\nTo check GPU status:"
echo " curl http://10.1.223.93:8091/miners/list"

View File

@@ -0,0 +1,92 @@
#!/usr/bin/env python3
"""
GPU Exchange Integration Demo
Shows how the GPU miner is integrated with the exchange
"""
import json
import httpx
import subprocess
import time
from datetime import datetime
print("🔗 AITBC GPU Exchange Integration")
print("=" * 50)
# Check GPU Registry
print("\n1. 📊 Checking GPU Registry...")
try:
response = httpx.get("http://localhost:8091/miners/list")
if response.status_code == 200:
data = response.json()
gpus = data.get("gpus", [])
print(f" Found {len(gpus)} registered GPU(s)")
for gpu in gpus:
print(f"\n 🎮 GPU Details:")
print(f" Model: {gpu['capabilities']['gpu']['model']}")
print(f" Memory: {gpu['capabilities']['gpu']['memory_gb']} GB")
print(f" CUDA: {gpu['capabilities']['gpu']['cuda_version']}")
print(f" Status: {gpu.get('status', 'Unknown')}")
print(f" Region: {gpu.get('region', 'Unknown')}")
else:
print(" ❌ GPU Registry not accessible")
except Exception as e:
print(f" ❌ Error: {e}")
# Check Exchange
print("\n2. 💰 Checking Trade Exchange...")
try:
response = httpx.get("http://localhost:3002")
if response.status_code == 200:
print(" ✅ Trade Exchange is running")
print(" 🌐 URL: http://localhost:3002")
else:
print(" ❌ Trade Exchange not responding")
except:
print(" ❌ Trade Exchange not accessible")
# Check Blockchain
print("\n3. ⛓️ Checking Blockchain Node...")
try:
response = httpx.get("http://localhost:9080/rpc/head")
if response.status_code == 200:
data = response.json()
print(f" ✅ Blockchain Node active")
print(f" Block Height: {data.get('height', 'Unknown')}")
print(f" Block Hash: {data.get('hash', 'Unknown')[:16]}...")
else:
print(" ❌ Blockchain Node not responding")
except:
print(" ❌ Blockchain Node not accessible")
# Show Integration Points
print("\n4. 🔌 Integration Points:")
print(" • GPU Registry: http://localhost:8091/miners/list")
print(" • Trade Exchange: http://localhost:3002")
print(" • Blockchain RPC: http://localhost:9080")
print(" • GPU Marketplace: Exchange > Browse GPU Marketplace")
# Show API Usage
print("\n5. 📡 API Usage Examples:")
print("\n Get registered GPUs:")
print(" curl http://localhost:8091/miners/list")
print("\n Get GPU details:")
print(" curl http://localhost:8091/miners/localhost-gpu-miner")
print("\n Get blockchain info:")
print(" curl http://localhost:9080/rpc/head")
# Show Current Status
print("\n6. 📈 Current System Status:")
print(" ✅ GPU Miner: Running (systemd)")
print(" ✅ GPU Registry: Running on port 8091")
print(" ✅ Trade Exchange: Running on port 3002")
print(" ✅ Blockchain Node: Running on port 9080")
print("\n" + "=" * 50)
print("🎯 GPU is successfully integrated with the exchange!")
print("\nNext steps:")
print("1. Open http://localhost:3002 in your browser")
print("2. Click 'Browse GPU Marketplace'")
print("3. View the registered RTX 4060 Ti GPU")
print("4. Purchase GPU compute time with AITBC tokens")

467
dev/gpu/gpu_miner_host.py Normal file
View File

@@ -0,0 +1,467 @@
#!/usr/bin/env python3
"""
Real GPU Miner Client for AITBC - runs on host with actual GPU
"""
import json
import time
import httpx
import logging
import sys
import subprocess
import os
from datetime import datetime
from typing import Dict, Optional
# Configuration
COORDINATOR_URL = os.environ.get("COORDINATOR_URL", "http://127.0.0.1:9080")
MINER_ID = os.environ.get("MINER_API_KEY", "miner_test")
AUTH_TOKEN = os.environ.get("MINER_API_KEY", "miner_test")
HEARTBEAT_INTERVAL = 15
MAX_RETRIES = 10
RETRY_DELAY = 30
# Setup logging with explicit configuration
LOG_PATH = "/home/oib/windsurf/aitbc/logs/host_gpu_miner.log"
os.makedirs(os.path.dirname(LOG_PATH), exist_ok=True)
class FlushHandler(logging.StreamHandler):
def emit(self, record):
super().emit(record)
self.flush()
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s',
handlers=[
FlushHandler(sys.stdout),
logging.FileHandler(LOG_PATH)
]
)
logger = logging.getLogger(__name__)
# Force stdout to be unbuffered
sys.stdout.reconfigure(line_buffering=True)
sys.stderr.reconfigure(line_buffering=True)
ARCH_MAP = {
"4090": "ada_lovelace",
"4080": "ada_lovelace",
"4070": "ada_lovelace",
"4060": "ada_lovelace",
"3090": "ampere",
"3080": "ampere",
"3070": "ampere",
"3060": "ampere",
"2080": "turing",
"2070": "turing",
"2060": "turing",
"1080": "pascal",
"1070": "pascal",
"1060": "pascal",
}
def classify_architecture(name: str) -> str:
upper = name.upper()
for key, arch in ARCH_MAP.items():
if key in upper:
return arch
if "A100" in upper or "V100" in upper or "P100" in upper:
return "datacenter"
return "unknown"
def detect_cuda_version() -> Optional[str]:
try:
result = subprocess.run(["nvidia-smi", "--query-gpu=driver_version", "--format=csv,noheader"],
capture_output=True, text=True, timeout=5)
if result.returncode == 0:
return result.stdout.strip()
except Exception as e:
logger.error(f"Failed to detect CUDA/driver version: {e}")
return None
def build_gpu_capabilities() -> Dict:
gpu_info = get_gpu_info()
cuda_version = detect_cuda_version() or "unknown"
model = gpu_info["name"] if gpu_info else "Unknown GPU"
memory_total = gpu_info["memory_total"] if gpu_info else 0
arch = classify_architecture(model) if model else "unknown"
edge_optimized = arch in {"ada_lovelace", "ampere", "turing"}
return {
"gpu": {
"model": model,
"architecture": arch,
"consumer_grade": True,
"edge_optimized": edge_optimized,
"memory_gb": memory_total,
"cuda_version": cuda_version,
"platform": "CUDA",
"supported_tasks": ["inference", "training", "stable-diffusion", "llama"],
"max_concurrent_jobs": 1
}
}
def measure_coordinator_latency() -> float:
start = time.time()
try:
resp = httpx.get(f"{COORDINATOR_URL}/v1/health", timeout=3)
if resp.status_code == 200:
return (time.time() - start) * 1000
except Exception:
pass
return -1.0
def get_gpu_info():
"""Get real GPU information"""
try:
result = subprocess.run(['nvidia-smi', '--query-gpu=name,memory.total,memory.used,utilization.gpu',
'--format=csv,noheader,nounits'],
capture_output=True, text=True, timeout=5)
if result.returncode == 0:
info = result.stdout.strip().split(', ')
return {
"name": info[0],
"memory_total": int(info[1]),
"memory_used": int(info[2]),
"utilization": int(info[3])
}
except Exception as e:
logger.error(f"Failed to get GPU info: {e}")
return None
def check_ollama():
"""Check if Ollama is running and has models"""
try:
response = httpx.get("http://localhost:11434/api/tags", timeout=5)
if response.status_code == 200:
models = response.json().get('models', [])
model_names = [m['name'] for m in models]
logger.info(f"Ollama running with models: {model_names}")
return True, model_names
else:
logger.error("Ollama not responding")
return False, []
except Exception as e:
logger.error(f"Ollama check failed: {e}")
return False, []
def wait_for_coordinator():
"""Wait for coordinator to be available"""
for i in range(MAX_RETRIES):
try:
response = httpx.get(f"{COORDINATOR_URL}/v1/health", timeout=5)
if response.status_code == 200:
logger.info("Coordinator is available!")
return True
except:
pass
logger.info(f"Waiting for coordinator... ({i+1}/{MAX_RETRIES})")
time.sleep(RETRY_DELAY)
logger.error("Coordinator not available after max retries")
return False
def register_miner():
"""Register the miner with the coordinator"""
register_data = {
"capabilities": build_gpu_capabilities(),
"concurrency": 1,
"region": "localhost"
}
headers = {
"X-Api-Key": AUTH_TOKEN,
"Content-Type": "application/json"
}
try:
response = httpx.post(
f"{COORDINATOR_URL}/v1/miners/register?miner_id={MINER_ID}",
json=register_data,
headers=headers,
timeout=10
)
if response.status_code == 200:
data = response.json()
logger.info(f"Successfully registered miner: {data}")
return data.get("session_token", "demo-token")
else:
logger.error(f"Registration failed: {response.status_code} - {response.text}")
return None
except Exception as e:
logger.error(f"Registration error: {e}")
return None
def send_heartbeat():
"""Send heartbeat to coordinator with real GPU stats"""
gpu_info = get_gpu_info()
arch = classify_architecture(gpu_info["name"]) if gpu_info else "unknown"
latency_ms = measure_coordinator_latency()
if gpu_info:
heartbeat_data = {
"status": "active",
"current_jobs": 0,
"last_seen": datetime.utcnow().isoformat(),
"gpu_utilization": gpu_info["utilization"],
"memory_used": gpu_info["memory_used"],
"memory_total": gpu_info["memory_total"],
"architecture": arch,
"edge_optimized": arch in {"ada_lovelace", "ampere", "turing"},
"network_latency_ms": latency_ms,
}
else:
heartbeat_data = {
"status": "active",
"current_jobs": 0,
"last_seen": datetime.utcnow().isoformat(),
"gpu_utilization": 0,
"memory_used": 0,
"memory_total": 0,
"architecture": "unknown",
"edge_optimized": False,
"network_latency_ms": latency_ms,
}
headers = {
"X-Api-Key": AUTH_TOKEN,
"Content-Type": "application/json"
}
try:
response = httpx.post(
f"{COORDINATOR_URL}/v1/miners/heartbeat?miner_id={MINER_ID}",
json=heartbeat_data,
headers=headers,
timeout=5
)
if response.status_code == 200:
logger.info(f"Heartbeat sent (GPU: {gpu_info['utilization'] if gpu_info else 'N/A'}%)")
else:
logger.error(f"Heartbeat failed: {response.status_code} - {response.text}")
except Exception as e:
logger.error(f"Heartbeat error: {e}")
def execute_job(job, available_models):
"""Execute a job using real GPU resources"""
job_id = job.get('job_id')
payload = job.get('payload', {})
logger.info(f"Executing job {job_id}: {payload}")
try:
if payload.get('type') == 'inference':
# Get the prompt and model
prompt = payload.get('prompt', '')
model = payload.get('model', 'llama3.2:latest')
# Check if model is available
if model not in available_models:
# Use first available model
if available_models:
model = available_models[0]
logger.info(f"Using available model: {model}")
else:
raise Exception("No models available in Ollama")
# Call Ollama API for real GPU inference
logger.info(f"Running inference on GPU with model: {model}")
start_time = time.time()
ollama_response = httpx.post(
"http://localhost:11434/api/generate",
json={
"model": model,
"prompt": prompt,
"stream": False
},
timeout=60
)
if ollama_response.status_code == 200:
result = ollama_response.json()
output = result.get('response', '')
execution_time = time.time() - start_time
# Get GPU stats after execution
gpu_after = get_gpu_info()
# Submit result back to coordinator
submit_result(job_id, {
"result": {
"status": "completed",
"output": output,
"model": model,
"tokens_processed": result.get('eval_count', 0),
"execution_time": execution_time,
"gpu_used": True
},
"metrics": {
"gpu_utilization": gpu_after["utilization"] if gpu_after else 0,
"memory_used": gpu_after["memory_used"] if gpu_after else 0,
"memory_peak": max(gpu_after["memory_used"] if gpu_after else 0, 2048)
}
})
logger.info(f"Job {job_id} completed in {execution_time:.2f}s")
return True
else:
logger.error(f"Ollama error: {ollama_response.status_code}")
submit_result(job_id, {
"result": {
"status": "failed",
"error": f"Ollama error: {ollama_response.text}"
}
})
return False
else:
# Unsupported job type
logger.error(f"Unsupported job type: {payload.get('type')}")
submit_result(job_id, {
"result": {
"status": "failed",
"error": f"Unsupported job type: {payload.get('type')}"
}
})
return False
except Exception as e:
logger.error(f"Job execution error: {e}")
submit_result(job_id, {
"result": {
"status": "failed",
"error": str(e)
}
})
return False
def submit_result(job_id, result):
"""Submit job result to coordinator"""
headers = {
"X-Api-Key": AUTH_TOKEN,
"Content-Type": "application/json"
}
try:
response = httpx.post(
f"{COORDINATOR_URL}/v1/miners/{job_id}/result",
json=result,
headers=headers,
timeout=10
)
if response.status_code == 200:
logger.info(f"Result submitted for job {job_id}")
else:
logger.error(f"Result submission failed: {response.status_code} - {response.text}")
except Exception as e:
logger.error(f"Result submission error: {e}")
def poll_for_jobs():
"""Poll for available jobs"""
poll_data = {
"max_wait_seconds": 5
}
headers = {
"X-Api-Key": AUTH_TOKEN,
"Content-Type": "application/json"
}
try:
response = httpx.post(
f"{COORDINATOR_URL}/v1/miners/poll",
json=poll_data,
headers=headers,
timeout=10
)
if response.status_code == 200:
job = response.json()
logger.info(f"Received job: {job}")
return job
elif response.status_code == 204:
return None
else:
logger.error(f"Poll failed: {response.status_code} - {response.text}")
return None
except Exception as e:
logger.error(f"Error polling for jobs: {e}")
return None
def main():
"""Main miner loop"""
logger.info("Starting Real GPU Miner Client on Host...")
# Check GPU availability
gpu_info = get_gpu_info()
if not gpu_info:
logger.error("GPU not available, exiting")
sys.exit(1)
logger.info(f"GPU detected: {gpu_info['name']} ({gpu_info['memory_total']}MB)")
# Check Ollama
ollama_available, models = check_ollama()
if not ollama_available:
logger.error("Ollama not available - please install and start Ollama")
sys.exit(1)
logger.info(f"Ollama models available: {', '.join(models)}")
# Wait for coordinator
if not wait_for_coordinator():
sys.exit(1)
# Register with coordinator
session_token = register_miner()
if not session_token:
logger.error("Failed to register, exiting")
sys.exit(1)
logger.info("Miner registered successfully, starting main loop...")
# Main loop
last_heartbeat = 0
last_poll = 0
try:
while True:
current_time = time.time()
# Send heartbeat
if current_time - last_heartbeat >= HEARTBEAT_INTERVAL:
send_heartbeat()
last_heartbeat = current_time
# Poll for jobs
if current_time - last_poll >= 3:
job = poll_for_jobs()
if job:
# Execute the job with real GPU
execute_job(job, models)
last_poll = current_time
time.sleep(1)
except KeyboardInterrupt:
logger.info("Shutting down miner...")
except Exception as e:
logger.error(f"Error in main loop: {e}")
sys.exit(1)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,3 @@
#!/bin/bash
# Wrapper script for GPU miner to ensure proper logging
exec /home/oib/windsurf/aitbc/.venv/bin/python -u /home/oib/windsurf/aitbc/scripts/gpu/gpu_miner_host.py 2>&1

View File

@@ -0,0 +1,72 @@
#!/usr/bin/env python3
"""
Simple GPU Registry Server for demonstration
"""
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
from typing import Dict, Any, Optional
import uvicorn
from datetime import datetime
app = FastAPI(title="GPU Registry Demo")
# In-memory storage
registered_gpus: Dict[str, Dict] = {}
class GPURegistration(BaseModel):
capabilities: Dict[str, Any]
concurrency: int = 1
region: Optional[str] = None
class Heartbeat(BaseModel):
inflight: int = 0
status: str = "ONLINE"
metadata: Dict[str, Any] = {}
@app.get("/")
async def root():
return {"message": "GPU Registry Demo", "registered_gpus": len(registered_gpus)}
@app.get("/health")
async def health():
return {"status": "ok"}
@app.post("/miners/register")
async def register_gpu(miner_id: str, gpu_data: GPURegistration):
"""Register a GPU miner"""
registered_gpus[miner_id] = {
"id": miner_id,
"registered_at": datetime.utcnow().isoformat(),
"last_heartbeat": datetime.utcnow().isoformat(),
**gpu_data.dict()
}
return {"status": "ok", "message": f"GPU {miner_id} registered successfully"}
@app.post("/miners/heartbeat")
async def heartbeat(miner_id: str, heartbeat_data: Heartbeat):
"""Receive heartbeat from GPU miner"""
if miner_id not in registered_gpus:
raise HTTPException(status_code=404, detail="GPU not registered")
registered_gpus[miner_id]["last_heartbeat"] = datetime.utcnow().isoformat()
registered_gpus[miner_id]["status"] = heartbeat_data.status
registered_gpus[miner_id]["metadata"] = heartbeat_data.metadata
return {"status": "ok"}
@app.get("/miners/list")
async def list_gpus():
"""List all registered GPUs"""
return {"gpus": list(registered_gpus.values())}
@app.get("/miners/{miner_id}")
async def get_gpu(miner_id: str):
"""Get details of a specific GPU"""
if miner_id not in registered_gpus:
raise HTTPException(status_code=404, detail="GPU not registered")
return registered_gpus[miner_id]
if __name__ == "__main__":
print("Starting GPU Registry Demo on http://localhost:8091")
uvicorn.run(app, host="0.0.0.0", port=8091)

View File

@@ -0,0 +1,146 @@
#!/usr/bin/env python3
"""
Integrate GPU Miner with existing Trade Exchange
"""
import httpx
import json
import subprocess
import time
from datetime import datetime
# Configuration
EXCHANGE_URL = "http://localhost:3002"
GPU_REGISTRY_URL = "http://localhost:8091"
def update_exchange_with_gpu():
"""Update the exchange frontend to show registered GPUs"""
# Read the exchange HTML
with open('/home/oib/windsurf/aitbc/apps/trade-exchange/index.html', 'r') as f:
html_content = f.read()
# Add GPU marketplace integration
gpu_integration = """
<script>
// GPU Integration
async function loadRealGPUOffers() {
try {
const response = await fetch('http://localhost:8091/miners/list');
const data = await response.json();
if (data.gpus && data.gpus.length > 0) {
displayRealGPUOffers(data.gpus);
} else {
displayDemoOffers();
}
} catch (error) {
console.log('Using demo GPU offers');
displayDemoOffers();
}
}
function displayRealGPUOffers(gpus) {
const container = document.getElementById('gpuList');
container.innerHTML = '';
gpus.forEach(gpu => {
const gpuCard = `
<div class="bg-white rounded-lg shadow-lg p-6 card-hover">
<div class="flex justify-between items-start mb-4">
<h3 class="text-lg font-semibold">${gpu.capabilities.gpu.model}</h3>
<span class="bg-green-100 text-green-800 px-2 py-1 rounded text-sm">Available</span>
</div>
<div class="space-y-2 text-sm text-gray-600 mb-4">
<p><i data-lucide="monitor" class="w-4 h-4 inline mr-1"></i>Memory: ${gpu.capabilities.gpu.memory_gb} GB</p>
<p><i data-lucide="zap" class="w-4 h-4 inline mr-1"></i>CUDA: ${gpu.capabilities.gpu.cuda_version}</p>
<p><i data-lucide="cpu" class="w-4 h-4 inline mr-1"></i>Concurrency: ${gpu.concurrency}</p>
<p><i data-lucide="map-pin" class="w-4 h-4 inline mr-1"></i>Region: ${gpu.region}</p>
</div>
<div class="flex justify-between items-center">
<span class="text-2xl font-bold text-purple-600">50 AITBC/hr</span>
<button onclick="purchaseGPU('${gpu.id}')" class="bg-purple-600 text-white px-4 py-2 rounded hover:bg-purple-700 transition">
Purchase
</button>
</div>
</div>
`;
container.innerHTML += gpuCard;
});
lucide.createIcons();
}
// Override the loadGPUOffers function
const originalLoadGPUOffers = loadGPUOffers;
loadGPUOffers = loadRealGPUOffers;
</script>
"""
# Insert before closing body tag
if '</body>' in html_content:
html_content = html_content.replace('</body>', gpu_integration + '</body>')
# Write back to file
with open('/home/oib/windsurf/aitbc/apps/trade-exchange/index.html', 'w') as f:
f.write(html_content)
print("✅ Updated exchange with GPU integration!")
else:
print("❌ Could not find </body> tag in exchange HTML")
def create_gpu_api_endpoint():
"""Create an API endpoint in the exchange to serve GPU data"""
api_code = """
@app.get("/api/gpu/offers")
async def get_gpu_offers():
\"\"\"Get available GPU offers\"\"\"
try:
# Fetch from GPU registry
response = httpx.get("http://localhost:8091/miners/list")
if response.status_code == 200:
data = response.json()
return {"offers": data.get("gpus", [])}
except:
pass
# Return demo data if registry not available
return {
"offers": [{
"id": "demo-gpu-1",
"model": "NVIDIA RTX 4060 Ti",
"memory_gb": 16,
"price_per_hour": 50,
"available": True
}]
}
"""
print("\n📝 To add GPU API endpoint to exchange, add this code to simple_exchange_api.py:")
print(api_code)
def main():
print("🔗 Integrating GPU Miner with Trade Exchange...")
# Update exchange frontend
update_exchange_with_gpu()
# Show API integration code
create_gpu_api_endpoint()
print("\n📊 Integration Summary:")
print("1. ✅ Exchange frontend updated to show real GPUs")
print("2. 📝 See above for API endpoint code")
print("3. 🌐 Access the exchange at: http://localhost:3002")
print("4. 🎯 GPU Registry available at: http://localhost:8091/miners/list")
print("\n🔄 To see the integrated GPU marketplace:")
print("1. Restart the trade exchange if needed:")
print(" cd /home/oib/windsurf/aitbc/apps/trade-exchange")
print(" python simple_exchange_api.py")
print("2. Open http://localhost:3002 in browser")
print("3. Click 'Browse GPU Marketplace'")
if __name__ == "__main__":
main()

115
dev/gpu/miner_workflow.py Normal file
View File

@@ -0,0 +1,115 @@
#!/usr/bin/env python3
"""
Complete miner workflow - poll for jobs and assign proposer
"""
import httpx
import json
import time
from datetime import datetime
# Configuration
COORDINATOR_URL = "http://localhost:8001"
MINER_API_KEY = "${MINER_API_KEY}"
MINER_ID = "localhost-gpu-miner"
def poll_and_accept_job():
"""Poll for a job and accept it"""
print("🔍 Polling for jobs...")
with httpx.Client() as client:
# Poll for a job
response = client.post(
f"{COORDINATOR_URL}/v1/miners/poll",
headers={
"Content-Type": "application/json",
"X-Api-Key": MINER_API_KEY
},
json={"max_wait_seconds": 5}
)
if response.status_code == 200:
job = response.json()
print(f"✅ Received job: {job['job_id']}")
print(f" Task: {job['payload'].get('task', 'unknown')}")
# Simulate processing
print("⚙️ Processing job...")
time.sleep(2)
# Submit result
result_data = {
"result": {
"status": "completed",
"output": f"Job {job['job_id']} completed successfully",
"execution_time_ms": 2000,
"miner_id": MINER_ID
},
"metrics": {
"compute_time": 2.0,
"energy_used": 0.1
}
}
print(f"📤 Submitting result for job {job['job_id']}...")
result_response = client.post(
f"{COORDINATOR_URL}/v1/miners/{job['job_id']}/result",
headers={
"Content-Type": "application/json",
"X-Api-Key": MINER_API_KEY
},
json=result_data
)
if result_response.status_code == 200:
print("✅ Result submitted successfully!")
return job['job_id']
else:
print(f"❌ Failed to submit result: {result_response.status_code}")
print(f" Response: {result_response.text}")
return None
elif response.status_code == 204:
print(" No jobs available")
return None
else:
print(f"❌ Failed to poll: {response.status_code}")
return None
def check_block_proposer(job_id):
"""Check if the block now has a proposer"""
print(f"\n🔍 Checking proposer for job {job_id}...")
with httpx.Client() as client:
response = client.get(f"{COORDINATOR_URL}/v1/explorer/blocks")
if response.status_code == 200:
blocks = response.json()
for block in blocks['items']:
if block['hash'] == job_id:
print(f"📦 Block Info:")
print(f" Height: {block['height']}")
print(f" Hash: {block['hash']}")
print(f" Proposer: {block['proposer']}")
print(f" Time: {block['timestamp']}")
return block
return None
def main():
print("⛏️ AITBC Miner Workflow Demo")
print(f" Miner ID: {MINER_ID}")
print(f" Coordinator: {COORDINATOR_URL}")
print()
# Poll and accept a job
job_id = poll_and_accept_job()
if job_id:
# Check if the block has a proposer now
time.sleep(1) # Give the server a moment to update
check_block_proposer(job_id)
else:
print("\n💡 Tip: Create a job first using example_client_remote.py")
if __name__ == "__main__":
main()

32
dev/gpu/start_gpu_miner.sh Executable file
View File

@@ -0,0 +1,32 @@
#!/bin/bash
# Start GPU Miner Client
echo "=== AITBC GPU Miner Client Startup ==="
echo "Starting GPU miner client..."
echo ""
# Check if GPU is available
if ! command -v nvidia-smi &> /dev/null; then
echo "WARNING: nvidia-smi not found, GPU may not be available"
fi
# Show GPU info
if command -v nvidia-smi &> /dev/null; then
echo "=== GPU Status ==="
nvidia-smi --query-gpu=name,memory.used,memory.total,utilization.gpu,temperature.gpu --format=csv,noheader,nounits
echo ""
fi
# Check if coordinator is running
echo "=== Checking Coordinator API ==="
if curl -s http://localhost:9080/health > /dev/null 2>&1; then
echo "✓ Coordinator API is running on port 9080"
else
echo "✗ Coordinator API is not accessible on port 9080"
echo " The miner will wait for the coordinator to start..."
fi
echo ""
echo "=== Starting GPU Miner ==="
cd /home/oib/windsurf/aitbc
exec python3 scripts/gpu/gpu_miner_host.py

72
dev/service/check-container.sh Executable file
View File

@@ -0,0 +1,72 @@
#!/bin/bash
# Check what's running in the aitbc container
echo "🔍 Checking AITBC Container Status"
echo "================================="
# First, let's see if we can access the container
if ! groups | grep -q incus; then
echo "❌ You're not in the incus group!"
echo "Run: sudo usermod -aG incus \$USER"
echo "Then log out and log back in"
exit 1
fi
echo "📋 Container Info:"
incus list | grep aitbc
echo ""
echo "🔧 Services in container:"
incus exec aitbc -- ps aux | grep -E "(uvicorn|python)" | grep -v grep || echo "No services running"
echo ""
echo "🌐 Ports listening in container:"
incus exec aitbc -- ss -tlnp | grep -E "(8000|9080|3001|3002)" || echo "No ports listening"
echo ""
echo "📁 Nginx status:"
incus exec aitbc -- systemctl status nginx --no-pager -l | head -20
echo ""
echo "🔍 Nginx config test:"
incus exec aitbc -- nginx -t
echo ""
echo "📝 Nginx sites enabled:"
incus exec aitbc -- ls -la /etc/nginx/sites-enabled/
echo ""
echo "🚀 Starting services if needed..."
# Start the services
incus exec aitbc -- bash -c "
cd /home/oib/aitbc
pkill -f uvicorn 2>/dev/null || true
pkill -f server.py 2>/dev/null || true
# Start blockchain node
cd apps/blockchain-node
source ../../.venv/bin/activate
python -m uvicorn aitbc_chain.app:app --host 0.0.0.0 --port 9080 &
# Start coordinator API
cd ../coordinator-api
source ../../.venv/bin/activate
python -m uvicorn src.app.main:app --host 0.0.0.0 --port 8000 &
# Start marketplace UI
cd ../marketplace-ui
python server.py --port 3001 &
# Start trade exchange
cd ../trade-exchange
python server.py --port 3002 &
sleep 3
echo 'Services started!'
"
echo ""
echo "✅ Done! Check services:"
echo "incus exec aitbc -- ps aux | grep uvicorn"

View File

@@ -0,0 +1,65 @@
#!/bin/bash
# Diagnose AITBC services
echo "🔍 Diagnosing AITBC Services"
echo "=========================="
echo ""
# Check local services
echo "📋 Local Services:"
echo "Port 8000 (Coordinator API):"
lsof -i :8000 2>/dev/null || echo " ❌ Not running"
echo "Port 9080 (Blockchain Node):"
lsof -i :9080 2>/dev/null || echo " ❌ Not running"
echo "Port 3001 (Marketplace UI):"
lsof -i :3001 2>/dev/null || echo " ❌ Not running"
echo "Port 3002 (Trade Exchange):"
lsof -i :3002 2>/dev/null || echo " ❌ Not running"
echo ""
echo "🌐 Testing Endpoints:"
# Test local endpoints
echo "Local API Health:"
curl -s http://127.0.0.1:8000/v1/health 2>/dev/null && echo " ✅ OK" || echo " ❌ Failed"
echo "Local Blockchain:"
curl -s http://127.0.0.1:9080/rpc/head 2>/dev/null | head -c 50 && echo "..." || echo " ❌ Failed"
echo "Local Admin:"
curl -s http://127.0.0.1:8000/v1/admin/stats 2>/dev/null | head -c 50 && echo "..." || echo " ❌ Failed"
echo ""
echo "🌐 Remote Endpoints (via domain):"
echo "Domain API Health:"
curl -s https://aitbc.bubuit.net/health 2>/dev/null && echo " ✅ OK" || echo " ❌ Failed"
echo "Domain Admin:"
curl -s https://aitbc.bubuit.net/admin/stats 2>/dev/null | head -c 50 && echo "..." || echo " ❌ Failed"
echo ""
echo "🔧 Fixing common issues..."
# Stop any conflicting services
echo "Stopping local services..."
sudo fuser -k 8000/tcp 2>/dev/null || true
sudo fuser -k 9080/tcp 2>/dev/null || true
sudo fuser -k 3001/tcp 2>/dev/null || true
sudo fuser -k 3002/tcp 2>/dev/null || true
echo ""
echo "📝 Instructions:"
echo "1. Make sure you're in the incus group: sudo usermod -aG incus \$USER"
echo "2. Log out and log back in"
echo "3. Run: incus exec aitbc -- bash"
echo "4. Inside container, run: /home/oib/start_aitbc.sh"
echo "5. Check services: ps aux | grep uvicorn"
echo ""
echo "If services are running in container but not accessible:"
echo "1. Check port forwarding to 10.1.223.93"
echo "2. Check nginx config in container"
echo "3. Check firewall rules"

58
dev/service/fix-services.sh Executable file
View File

@@ -0,0 +1,58 @@
#!/bin/bash
# Quick fix to start AITBC services in container
echo "🔧 Starting AITBC Services in Container"
echo "====================================="
# First, let's manually start the services
echo "1. Starting Coordinator API..."
cd /home/oib/windsurf/aitbc/apps/coordinator-api
source ../../.venv/bin/activate 2>/dev/null || source .venv/bin/activate
python -m uvicorn src.app.main:app --host 0.0.0.0 --port 8000 &
COORD_PID=$!
echo "2. Starting Blockchain Node..."
cd ../blockchain-node
python -m uvicorn aitbc_chain.app:app --host 0.0.0.0 --port 9080 &
NODE_PID=$!
echo "3. Starting Marketplace UI..."
cd ../marketplace-ui
python server.py --port 3001 &
MARKET_PID=$!
echo "4. Starting Trade Exchange..."
cd ../trade-exchange
python server.py --port 3002 &
EXCHANGE_PID=$!
echo ""
echo "✅ Services started!"
echo "Coordinator API: http://127.0.0.1:8000"
echo "Blockchain: http://127.0.0.1:9080"
echo "Marketplace: http://127.0.0.1:3001"
echo "Exchange: http://127.0.0.1:3002"
echo ""
echo "PIDs:"
echo "Coordinator: $COORD_PID"
echo "Blockchain: $NODE_PID"
echo "Marketplace: $MARKET_PID"
echo "Exchange: $EXCHANGE_PID"
echo ""
echo "To stop: kill $COORD_PID $NODE_PID $MARKET_PID $EXCHANGE_PID"
# Wait a bit for services to start
sleep 3
# Test endpoints
echo ""
echo "🧪 Testing endpoints:"
echo "API Health:"
curl -s http://127.0.0.1:8000/v1/health | head -c 100
echo -e "\n\nAdmin Stats:"
curl -s http://127.0.0.1:8000/v1/admin/stats -H "X-Api-Key: ${ADMIN_API_KEY}" | head -c 100
echo -e "\n\nMarketplace Offers:"
curl -s http://127.0.0.1:8000/v1/marketplace/offers | head -c 100

129
dev/service/run-local-services.sh Executable file
View File

@@ -0,0 +1,129 @@
#!/bin/bash
# Run AITBC services locally for domain access
set -e
echo "🚀 Starting AITBC Services for Domain Access"
echo "=========================================="
# Kill any existing services
echo "Cleaning up existing services..."
sudo fuser -k 8000/tcp 2>/dev/null || true
sudo fuser -k 9080/tcp 2>/dev/null || true
sudo fuser -k 3001/tcp 2>/dev/null || true
sudo fuser -k 3002/tcp 2>/dev/null || true
pkill -f "uvicorn.*aitbc" 2>/dev/null || true
pkill -f "server.py" 2>/dev/null || true
# Wait for ports to be free
sleep 2
# Create logs directory
mkdir -p logs
echo ""
echo "📦 Starting Services..."
# Start Coordinator API
echo "1. Starting Coordinator API (port 8000)..."
cd apps/coordinator-api
source ../.venv/bin/activate 2>/dev/null || python -m venv ../.venv && source ../.venv/bin/activate
pip install -q -e . 2>/dev/null || true
nohup python -m uvicorn src.app.main:app --host 0.0.0.0 --port 8000 > ../../logs/api.log 2>&1 &
API_PID=$!
echo " PID: $API_PID"
# Start Blockchain Node
echo "2. Starting Blockchain Node (port 9080)..."
cd ../blockchain-node
nohup python -m uvicorn aitbc_chain.app:app --host 0.0.0.0 --port 9080 > ../../logs/blockchain.log 2>&1 &
NODE_PID=$!
echo " PID: $NODE_PID"
# Start Marketplace UI
echo "3. Starting Marketplace UI (port 3001)..."
cd ../marketplace-ui
nohup python server.py --port 3001 > ../../logs/marketplace.log 2>&1 &
MARKET_PID=$!
echo " PID: $MARKET_PID"
# Start Trade Exchange
echo "4. Starting Trade Exchange (port 3002)..."
cd ../trade-exchange
nohup python server.py --port 3002 > ../../logs/exchange.log 2>&1 &
EXCHANGE_PID=$!
echo " PID: $EXCHANGE_PID"
# Save PIDs for cleanup
echo "$API_PID $NODE_PID $MARKET_PID $EXCHANGE_PID" > ../.service_pids
cd ..
# Wait for services to start
echo ""
echo "⏳ Waiting for services to initialize..."
sleep 5
# Test services
echo ""
echo "🧪 Testing Services..."
echo -n "API Health: "
if curl -s http://127.0.0.1:8000/v1/health > /dev/null; then
echo "✅ OK"
else
echo "❌ Failed"
fi
echo -n "Admin API: "
if curl -s http://127.0.0.1:8000/v1/admin/stats -H "X-Api-Key: ${ADMIN_API_KEY}" > /dev/null; then
echo "✅ OK"
else
echo "❌ Failed"
fi
echo -n "Blockchain: "
if curl -s http://127.0.0.1:9080/rpc/head > /dev/null; then
echo "✅ OK"
else
echo "❌ Failed"
fi
echo -n "Marketplace: "
if curl -s http://127.0.0.1:3001 > /dev/null; then
echo "✅ OK"
else
echo "❌ Failed"
fi
echo -n "Exchange: "
if curl -s http://127.0.0.1:3002 > /dev/null; then
echo "✅ OK"
else
echo "❌ Failed"
fi
echo ""
echo "✅ All services started!"
echo ""
echo "📋 Local URLs:"
echo " API: http://127.0.0.1:8000/v1"
echo " RPC: http://127.0.0.1:9080/rpc"
echo " Marketplace: http://127.0.0.1:3001"
echo " Exchange: http://127.0.0.1:3002"
echo ""
echo "🌐 Domain URLs (if nginx is configured):"
echo " API: https://aitbc.bubuit.net/api"
echo " Admin: https://aitbc.bubuit.net/admin"
echo " RPC: https://aitbc.bubuit.net/rpc"
echo " Marketplace: https://aitbc.bubuit.net/Marketplace"
echo " Exchange: https://aitbc.bubuit.net/Exchange"
echo ""
echo "📝 Logs: ./logs/"
echo "🛑 Stop services: ./stop-services.sh"
echo ""
echo "Press Ctrl+C to stop monitoring (services will keep running)"
# Monitor logs
tail -f logs/*.log

View File

@@ -0,0 +1,40 @@
#!/bin/bash
# Download production assets locally
echo "Setting up production assets..."
# Create assets directory
mkdir -p /home/oib/windsurf/aitbc/assets/{css,js,icons}
# Download Tailwind CSS (production build)
echo "Downloading Tailwind CSS..."
curl -L https://unpkg.com/tailwindcss@3.4.0/lib/tailwind.js -o /home/oib/windsurf/aitbc/assets/js/tailwind.js
# Download Axios
echo "Downloading Axios..."
curl -L https://unpkg.com/axios@1.6.2/dist/axios.min.js -o /home/oib/windsurf/aitbc/assets/js/axios.min.js
# Download Lucide icons
echo "Downloading Lucide..."
curl -L https://unpkg.com/lucide@latest/dist/umd/lucide.js -o /home/oib/windsurf/aitbc/assets/js/lucide.js
# Create a custom Tailwind build with only used classes
cat > /home/oib/windsurf/aitbc/assets/tailwind.config.js << 'EOF'
module.exports = {
content: [
"./apps/trade-exchange/index.html",
"./apps/marketplace-ui/index.html"
],
darkMode: 'class',
theme: {
extend: {},
},
plugins: [],
}
EOF
echo "Assets downloaded to /home/oib/windsurf/aitbc/assets/"
echo "Update your HTML files to use local paths:"
echo " - /assets/js/tailwind.js"
echo " - /assets/js/axios.min.js"
echo " - /assets/js/lucide.js"

View File

@@ -0,0 +1,39 @@
#!/bin/bash
echo "=== Starting AITBC Miner Dashboard ==="
echo ""
# Find available port
PORT=8080
while [ $PORT -le 8090 ]; do
if ! netstat -tuln 2>/dev/null | grep -q ":$PORT "; then
echo "✓ Found available port: $PORT"
break
fi
echo "Port $port is in use, trying next..."
PORT=$((PORT + 1))
done
if [ $PORT -gt 8090 ]; then
echo "❌ No available ports found between 8080-8090"
exit 1
fi
# Start the dashboard
echo "Starting dashboard on port $PORT..."
nohup python3 -m http.server $PORT --bind 0.0.0.0 > dashboard.log 2>&1 &
PID=$!
echo ""
echo "✅ Dashboard is running!"
echo ""
echo "Access URLs:"
echo " Local: http://localhost:$PORT"
echo " Network: http://$(hostname -I | awk '{print $1}'):$PORT"
echo ""
echo "Dashboard file: miner-dashboard.html"
echo "Process ID: $PID"
echo "Log file: dashboard.log"
echo ""
echo "To stop: kill $PID"
echo "To view logs: tail -f dashboard.log"

30
dev/service/stop-services.sh Executable file
View File

@@ -0,0 +1,30 @@
#!/bin/bash
# Stop all AITBC services
echo "🛑 Stopping AITBC Services"
echo "========================"
# Stop by PID if file exists
if [ -f .service_pids ]; then
PIDS=$(cat .service_pids)
echo "Found PIDs: $PIDS"
for PID in $PIDS; do
if kill -0 $PID 2>/dev/null; then
echo "Stopping PID $PID..."
kill $PID
fi
done
rm -f .service_pids
fi
# Force kill any remaining services
echo "Cleaning up any remaining processes..."
sudo fuser -k 8000/tcp 2>/dev/null || true
sudo fuser -k 9080/tcp 2>/dev/null || true
sudo fuser -k 3001/tcp 2>/dev/null || true
sudo fuser -k 3002/tcp 2>/dev/null || true
pkill -f "uvicorn.*aitbc" 2>/dev/null || true
pkill -f "server.py" 2>/dev/null || true
echo "✅ All services stopped!"

View File

@@ -1055,7 +1055,7 @@ Comprehensive implementation of privacy-preserving machine learning and edge GPU
- **Performance**: Established baseline performance metrics and validated 5-10% improvements - **Performance**: Established baseline performance metrics and validated 5-10% improvements
- **Test Coverage**: Achieved 100% CLI test pass rate (170/170 tests) with Python 3.13.5 - **Test Coverage**: Achieved 100% CLI test pass rate (170/170 tests) with Python 3.13.5
- **FastAPI Compatibility**: Fixed dependency annotation issues for Python 3.13.5 - **FastAPI Compatibility**: Fixed dependency annotation issues for Python 3.13.5
- **Database Optimization**: Corrected coordinator API database path to `/home/oib/windsurf/aitbc/data/` - **Database Optimization**: Corrected coordinator API database path to `/home/oib/windsurf/aitbc/apps/coordinator-api/data/`
### Upgrade Impact ### Upgrade Impact
- **Standardized** minimum Python version to 3.13.5 across entire codebase (SDK, crypto, APIs, CLI, infrastructure) - **Standardized** minimum Python version to 3.13.5 across entire codebase (SDK, crypto, APIs, CLI, infrastructure)

View File

@@ -1,17 +1,17 @@
# AITBC Infrastructure Documentation # AITBC Infrastructure Documentation
> Last updated: 2026-02-14 > Last updated: 2026-03-04
## Overview ## Overview
Two-tier architecture: **incus host** (localhost) runs the reverse proxy with SSL termination, forwarding all `aitbc.bubuit.net` traffic to the **aitbc container** which runs nginx + all services. Two-tier architecture: **incus host (at1)** runs the reverse proxy with SSL termination, forwarding all `aitbc.bubuit.net` traffic to the **aitbc container** which runs nginx + all services.
``` ```
Internet → aitbc.bubuit.net (HTTPS :443) Internet → aitbc.bubuit.net (HTTPS :443)
┌──────────────────────────────────────────────┐ ┌──────────────────────────────────────────────┐
│ Incus Host (localhost / at1) │ │ Incus Host (at1 / localhost) │
│ Nginx reverse proxy (:443 SSL → :80) │ │ Nginx reverse proxy (:443 SSL → :80) │
│ Config: /etc/nginx/sites-available/ │ │ Config: /etc/nginx/sites-available/ │
│ aitbc-proxy.conf │ │ aitbc-proxy.conf │
@@ -38,7 +38,13 @@ Internet → aitbc.bubuit.net (HTTPS :443)
└──────────────────────────────────────────────┘ └──────────────────────────────────────────────┘
``` ```
## Incus Host (localhost) ## Incus Host (at1)
### Host Details
- **Hostname**: `at1` (primary development workstation)
- **Environment**: Windsurf development environment
- **GPU Access**: **Primary GPU access location** - all GPU workloads must run on at1
- **Architecture**: x86_64 Linux with CUDA GPU support
### Services (Host) ### Services (Host)
@@ -63,11 +69,36 @@ aitbc-mock-coordinator.service # Mock coordinator on port 8090
``` ```
**Service Details:** **Service Details:**
- **Working Directory**: `/home/oib/windsurf/aitbc/apps/blockchain-node` - **Working Directory**: `/opt/aitbc/` (standard path for all services)
- **Python Environment**: `/home/oib/windsurf/aitbc/apps/blockchain-node/.venv/bin/python` - **Python Environment**: `/opt/aitbc/.venv/bin/python`
- **User**: oib - **User**: oib
- **Restart Policy**: always (with 5s delay) - **Restart Policy**: always (with 5s delay)
### Standard Service Structure (/opt/aitbc)
On at1, `/opt/aitbc` uses individual symlinks to the Windsurf project directories:
```bash
/opt/aitbc/ # Service root with selective symlinks
├── apps/ # Symlinked app directories
│ ├── blockchain-explorer -> /home/oib/windsurf/aitbc/apps/blockchain-explorer/
│ ├── blockchain-node -> /home/oib/windsurf/aitbc/apps/blockchain-node/
│ ├── coordinator-api -> /home/oib/windsurf/aitbc/apps/coordinator-api/
│ ├── explorer-web -> /home/oib/windsurf/aitbc/apps/explorer-web/
│ ├── marketplace-web -> /home/oib/windsurf/aitbc/apps/marketplace-web/
│ ├── pool-hub -> /home/oib/windsurf/aitbc/apps/pool-hub/
│ ├── trade-exchange -> /home/oib/windsurf/aitbc/apps/trade-exchange/
│ ├── wallet-daemon -> /home/oib/windsurf/aitbc/apps/wallet-daemon/
│ └── zk-circuits -> /home/oib/windsurf/aitbc/apps/zk-circuits/
├── data/ # Local service data
├── logs/ # Local service logs
├── models/ # Local model storage
├── scripts -> /home/oib/windsurf/aitbc/scripts/ # Shared scripts
└── systemd -> /home/oib/windsurf/aitbc/systemd/ # Service definitions
```
**On aitbc/aitbc1 servers**: `/opt/aitbc` is symlinked to the git repo clone (`/opt/aitbc -> /path/to/aitbc-repo`) for complete repository access.
**Verification Commands:** **Verification Commands:**
```bash ```bash
# Check service status # Check service status
@@ -79,25 +110,48 @@ sudo systemctl start aitbc-blockchain-node.service
# Check logs # Check logs
journalctl -u aitbc-mock-coordinator --no-pager -n 20 journalctl -u aitbc-mock-coordinator --no-pager -n 20
# Verify /opt/aitbc symlink structure
ls -la /opt/aitbc/ # Should show individual app symlinks
ls -la /opt/aitbc/apps/ # Should show all app symlinks
ls -la /opt/aitbc/scripts # Should show symlink to windsurf scripts
ls -la /opt/aitbc/systemd # Should show symlink to windsurf systemd
``` ```
### Python Environment (Host) ### Python Environment (at1)
Development and testing services on localhost use **Python 3.13.5**: **Development vs Service Environments**:
```bash ```bash
# Localhost development workspace # Development environment (Windsurf project)
/home/oib/windsurf/aitbc/ # Local development /home/oib/windsurf/aitbc/.venv/ # Development Python 3.13.5 environment
├── .venv/ # Primary Python environment ├── bin/python # Python executable
├── apps/ # Service applications
├── cli/ # CLI tools (12 command groups) ├── cli/ # CLI tools (12 command groups)
├── scripts/ # Development scripts ├── scripts/ # Development scripts
└── tests/ # Pytest suites └── tests/ # Pytest suites
# Service environment (/opt/aitbc with symlinks)
/opt/aitbc/ # Service root with selective symlinks
├── apps/blockchain-node -> /home/oib/windsurf/aitbc/apps/blockchain-node/
├── apps/coordinator-api -> /home/oib/windsurf/aitbc/apps/coordinator-api/
├── scripts -> /home/oib/windsurf/aitbc/scripts/
└── systemd -> /home/oib/windsurf/aitbc/systemd/
``` ```
**Note**: Services use individual symlinks to specific app directories, while development uses the full Windsurf project workspace.
**Verification Commands:** **Verification Commands:**
```bash ```bash
# Verify symlink structure
ls -la /opt/aitbc/ # Should show individual symlinks, not single repo symlink
ls -la /opt/aitbc/apps/blockchain-node # Should point to windsurf project
python3 --version # Should show Python 3.13.5 python3 --version # Should show Python 3.13.5
ls -la /home/oib/windsurf/aitbc/.venv/bin/python # Check venv ls -la /home/oib/windsurf/aitbc/.venv/bin/python # Check development venv
# Test symlink resolution
readlink -f /opt/aitbc/apps/blockchain-node # Should resolve to windsurf project path
readlink -f /opt/aitbc/scripts # Should resolve to windsurf scripts
``` ```
### Nginx Reverse Proxy ### Nginx Reverse Proxy
@@ -138,7 +192,7 @@ server {
ssh aitbc-cascade # Direct SSH to container ssh aitbc-cascade # Direct SSH to container
``` ```
**GPU Access**: No GPU passthrough. All GPU workloads must run on localhost (windsurf host), not inside incus containers. **GPU Access**: No GPU passthrough. All GPU workloads must run on **at1** (Windsurf development host), not inside incus containers.
**Host Proxies (for localhost GPU clients)** **Host Proxies (for localhost GPU clients)**
- `127.0.0.1:18000` → container `127.0.0.1:8000` (coordinator/marketplace API) - `127.0.0.1:18000` → container `127.0.0.1:8000` (coordinator/marketplace API)
@@ -157,7 +211,7 @@ ssh aitbc1-cascade # Direct SSH to aitbc1 container (incus)
- Proxy device: incus proxy on host maps 127.0.0.1:18001 → 127.0.0.1:8000 inside container - Proxy device: incus proxy on host maps 127.0.0.1:18001 → 127.0.0.1:8000 inside container
- AppArmor profile: unconfined (incus raw.lxc) - AppArmor profile: unconfined (incus raw.lxc)
- Use same deployment patterns as `aitbc` (nginx + services) once provisioned - Use same deployment patterns as `aitbc` (nginx + services) once provisioned
- **GPU Access**: None. Run GPU-dependent tasks on localhost (windsurf host) only. - **GPU Access**: None. Run GPU-dependent tasks on **at1** (Windsurf development host) only.
**Host Proxies (for localhost GPU clients)** **Host Proxies (for localhost GPU clients)**
- `127.0.0.1:18001` → container `127.0.0.1:8000` (coordinator/marketplace API) - `127.0.0.1:18001` → container `127.0.0.1:8000` (coordinator/marketplace API)
@@ -165,11 +219,11 @@ ssh aitbc1-cascade # Direct SSH to aitbc1 container (incus)
- (Optional) Expose marketplace frontend for aitbc1 via an additional proxy/port if needed for UI tests. - (Optional) Expose marketplace frontend for aitbc1 via an additional proxy/port if needed for UI tests.
- Health check suggestion: `curl -s http://127.0.0.1:18001/v1/health` - Health check suggestion: `curl -s http://127.0.0.1:18001/v1/health`
**Localhost dual-miner/dual-client test (shared GPU)** **at1 dual-miner/dual-client test (shared GPU)**
- Run two miners on localhost (GPU shared), targeting each marketplace: - Run two miners on **at1** (GPU shared), targeting each marketplace:
- Miner A → `http://127.0.0.1:18000` - Miner A → `http://127.0.0.1:18000`
- Miner B → `http://127.0.0.1:18001` - Miner B → `http://127.0.0.1:18001`
- Run two clients on localhost for bids/contracts/Ollama answers: - Run two clients on **at1** for bids/contracts/Ollama answers:
- Client 1 → `http://127.0.0.1:18000` - Client 1 → `http://127.0.0.1:18000`
- Client 2 → `http://127.0.0.1:18001` - Client 2 → `http://127.0.0.1:18001`
- Use a shared dev chain so both marketplaces see the same on-chain events. - Use a shared dev chain so both marketplaces see the same on-chain events.
@@ -328,10 +382,10 @@ curl http://aitbc.keisanki.net/rpc/head # Node 3 RPC
- **Consensus**: PoA with 2s block intervals - **Consensus**: PoA with 2s block intervals
- **P2P**: Not connected yet; nodes maintain independent chain state - **P2P**: Not connected yet; nodes maintain independent chain state
## Development Workspace ## Development Workspace (at1)
``` ```
/home/oib/windsurf/aitbc/ # Local development /home/oib/windsurf/aitbc/ # at1 Windsurf development workspace
├── apps/ # Application source (8 apps) ├── apps/ # Application source (8 apps)
├── cli/ # CLI tools (12 command groups) ├── cli/ # CLI tools (12 command groups)
├── scripts/ # Organized scripts (8 subfolders) ├── scripts/ # Organized scripts (8 subfolders)
@@ -358,7 +412,7 @@ ssh aitbc-cascade "systemctl restart coordinator-api"
## Health Checks ## Health Checks
```bash ```bash
# From localhost (via container) # From at1 (via container)
ssh aitbc-cascade "curl -s http://localhost:8000/v1/health" ssh aitbc-cascade "curl -s http://localhost:8000/v1/health"
ssh aitbc-cascade "curl -s http://localhost:9080/rpc/head | jq .height" ssh aitbc-cascade "curl -s http://localhost:9080/rpc/head | jq .height"

View File

@@ -838,7 +838,7 @@ This document tracks components that have been successfully deployed and are ope
- Added troubleshooting guides for Python 3.13.5 specific issues - Added troubleshooting guides for Python 3.13.5 specific issues
**6. Infrastructure & Database Fixes (2026-02-24):** **6. Infrastructure & Database Fixes (2026-02-24):**
- Fixed coordinator API database path to use `/home/oib/windsurf/aitbc/data/coordinator.db` - Fixed coordinator API database path to use `/home/oib/windsurf/aitbc/apps/coordinator-api/data/coordinator.db`
- Updated database configuration with absolute paths for reliability - Updated database configuration with absolute paths for reliability
- Cleaned up old database files and consolidated storage - Cleaned up old database files and consolidated storage
- Fixed FastAPI dependency annotations for Python 3.13.5 compatibility - Fixed FastAPI dependency annotations for Python 3.13.5 compatibility

View File

@@ -1,430 +0,0 @@
#!/usr/bin/env bash
# AITBC Advanced Agent Features Deployment Script
# Deploys cross-chain reputation, agent communication, and advanced learning systems
set -euo pipefail
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
print_status() {
echo -e "${BLUE}[INFO]${NC} $1"
}
print_success() {
echo -e "${GREEN}[SUCCESS]${NC} $1"
}
print_warning() {
echo -e "${YELLOW}[WARNING]${NC} $1"
}
print_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
print_critical() {
echo -e "${RED}[CRITICAL]${NC} $1"
}
# Configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
ROOT_DIR="$(dirname "$SCRIPT_DIR")"
CONTRACTS_DIR="$ROOT_DIR/contracts"
SERVICES_DIR="$ROOT_DIR/apps/coordinator-api/src/app/services"
FRONTEND_DIR="$ROOT_DIR/apps/marketplace-web/src/components"
# Network configuration
NETWORK=${1:-"localhost"}
VERIFY_CONTRACTS=${2:-"true"}
SKIP_BUILD=${3:-"false"}
echo "🚀 AITBC Advanced Agent Features Deployment"
echo "=========================================="
echo "Network: $NETWORK"
echo "Verify Contracts: $VERIFY_CONTRACTS"
echo "Skip Build: $SKIP_BUILD"
echo "Timestamp: $(date -Iseconds)"
echo ""
# Pre-deployment checks
check_prerequisites() {
print_status "Checking prerequisites..."
# Check if Node.js is installed
if ! command -v node &> /dev/null; then
print_error "Node.js is not installed"
exit 1
fi
# Check if Python is installed
if ! command -v python3 &> /dev/null; then
print_error "Python 3 is not installed"
exit 1
fi
# Check if required directories exist
if [[ ! -d "$CONTRACTS_DIR" ]]; then
print_error "Contracts directory not found: $CONTRACTS_DIR"
exit 1
fi
if [[ ! -d "$SERVICES_DIR" ]]; then
print_error "Services directory not found: $SERVICES_DIR"
exit 1
fi
print_success "Prerequisites check completed"
}
# Install Python dependencies
install_python_dependencies() {
print_status "Installing Python dependencies..."
cd "$ROOT_DIR/apps/coordinator-api"
if [[ -f "requirements.txt" ]]; then
pip install -r requirements.txt
print_success "Python dependencies installed"
else
print_error "requirements.txt not found"
exit 1
fi
}
# Deploy smart contracts
deploy_contracts() {
print_status "Deploying advanced agent features contracts..."
cd "$CONTRACTS_DIR"
# Check if .env file exists
if [[ ! -f ".env" ]]; then
print_warning ".env file not found, creating from example..."
if [[ -f ".env.example" ]]; then
cp .env.example .env
print_warning "Please update .env file with your configuration"
else
print_error ".env.example file not found"
exit 1
fi
fi
# Compile contracts
print_status "Compiling contracts..."
npx hardhat compile
# Deploy contracts based on network
case $NETWORK in
"localhost")
print_status "Deploying to localhost..."
npx hardhat run scripts/deploy-advanced-contracts.js --network localhost
;;
"sepolia"|"goerli")
print_status "Deploying to $NETWORK..."
npx hardhat run scripts/deploy-advanced-contracts.js --network $NETWORK
;;
"mainnet")
print_critical "DEPLOYING TO MAINNET - This will spend real ETH!"
read -p "Type 'DEPLOY-ADVANCED-TO-MAINNET' to continue: " confirmation
if [[ "$confirmation" != "DEPLOY-ADVANCED-TO-MAINNET" ]]; then
print_error "Deployment cancelled"
exit 1
fi
npx hardhat run scripts/deploy-advanced-contracts.js --network mainnet
;;
*)
print_error "Unsupported network: $NETWORK"
exit 1
;;
esac
print_success "Advanced contracts deployed"
}
# Verify contracts
verify_contracts() {
if [[ "$VERIFY_CONTRACTS" == "true" ]]; then
print_status "Verifying contracts on Etherscan..."
cd "$CONTRACTS_DIR"
# Wait for block confirmations
print_status "Waiting for block confirmations..."
sleep 30
# Run verification
if npx hardhat run scripts/verify-advanced-contracts.js --network $NETWORK; then
print_success "Contracts verified on Etherscan"
else
print_warning "Contract verification failed - manual verification may be required"
fi
else
print_status "Skipping contract verification"
fi
}
# Build frontend components
build_frontend() {
if [[ "$SKIP_BUILD" == "true" ]]; then
print_status "Skipping frontend build"
return
fi
print_status "Building frontend components..."
cd "$ROOT_DIR/apps/marketplace-web"
# Install dependencies if needed
if [[ ! -d "node_modules" ]]; then
print_status "Installing frontend dependencies..."
npm install
fi
# Build the application
npm run build
print_success "Frontend built successfully"
}
# Deploy frontend
deploy_frontend() {
print_status "Deploying frontend components..."
# The frontend is already built and deployed as part of the main marketplace
print_success "Frontend deployment completed"
}
# Setup services
setup_services() {
print_status "Setting up backend services..."
# Create service configuration
cat > "$ROOT_DIR/apps/coordinator-api/config/advanced_features.json" << EOF
{
"cross_chain_reputation": {
"base_score": 1000,
"success_bonus": 100,
"failure_penalty": 50,
"min_stake_amount": 100000000000000000000,
"max_delegation_ratio": 1.0,
"sync_cooldown": 3600,
"supported_chains": {
"ethereum": 1,
"polygon": 137,
"arbitrum": 42161,
"optimism": 10,
"bsc": 56,
"avalanche": 43114,
"fantom": 250
},
"tier_thresholds": {
"bronze": 4500,
"silver": 6000,
"gold": 7500,
"platinum": 9000,
"diamond": 9500
},
"stake_rewards": {
"bronze": 0.05,
"silver": 0.08,
"gold": 0.12,
"platinum": 0.18,
"diamond": 0.25
}
},
"agent_communication": {
"min_reputation_score": 1000,
"base_message_price": 0.001,
"max_message_size": 100000,
"message_timeout": 86400,
"channel_timeout": 2592000,
"encryption_enabled": true,
"supported_message_types": [
"text",
"data",
"task_request",
"task_response",
"collaboration",
"notification",
"system",
"urgent",
"bulk"
],
"channel_types": [
"direct",
"group",
"broadcast",
"private"
],
"encryption_types": [
"aes256",
"rsa",
"hybrid",
"none"
]
},
"advanced_learning": {
"max_model_size": 104857600,
"max_training_time": 3600,
"default_batch_size": 32,
"default_learning_rate": 0.001,
"convergence_threshold": 0.001,
"early_stopping_patience": 10,
"meta_learning_algorithms": [
"MAML",
"Reptile",
"Meta-SGD"
],
"federated_algorithms": [
"FedAvg",
"FedProx",
"FedNova"
],
"reinforcement_algorithms": [
"DQN",
"PPO",
"A3C",
"SAC"
],
"model_types": [
"task_planning",
"bidding_strategy",
"resource_allocation",
"communication",
"collaboration",
"decision_making",
"prediction",
"classification"
]
}
}
EOF
print_success "Service configuration created"
}
# Run integration tests
run_tests() {
print_status "Running integration tests..."
cd "$ROOT_DIR"
# Run Python tests
if [[ -f "tests/test_advanced_features.py" ]]; then
python -m pytest tests/test_advanced_features.py -v
fi
# Run contract tests
cd "$CONTRACTS_DIR"
if [[ -f "test/CrossChainReputation.test.js" ]]; then
npx hardhat test test/CrossChainReputation.test.js
fi
if [[ -f "test/AgentCommunication.test.js" ]]; then
npx hardhat test test/AgentCommunication.test.js
fi
print_success "Integration tests completed"
}
# Generate deployment report
generate_report() {
print_status "Generating deployment report..."
local report_file="$ROOT_DIR/advanced-features-deployment-report-$(date +%Y%m%d-%H%M%S).json"
cat > "$report_file" << EOF
{
"deployment": {
"timestamp": "$(date -Iseconds)",
"network": "$NETWORK",
"contracts_verified": "$VERIFY_CONTRACTS",
"frontend_built": "$([[ "$SKIP_BUILD" == "true" ]] && echo "false" || echo "true")"
},
"contracts": {
"CrossChainReputation": "deployed-contracts-$NETWORK.json",
"AgentCommunication": "deployed-contracts-$NETWORK.json",
"AgentCollaboration": "deployed-contracts-$NETWORK.json",
"AgentLearning": "deployed-contracts-$NETWORK.json",
"AgentMarketplaceV2": "deployed-contracts-$NETWORK.json",
"ReputationNFT": "deployed-contracts-$NETWORK.json"
},
"services": {
"cross_chain_reputation": "$SERVICES_DIR/cross_chain_reputation.py",
"agent_communication": "$SERVICES_DIR/agent_communication.py",
"agent_collaboration": "$SERVICES_DIR/agent_collaboration.py",
"advanced_learning": "$SERVICES_DIR/advanced_learning.py",
"agent_autonomy": "$SERVICES_DIR/agent_autonomy.py",
"marketplace_v2": "$SERVICES_DIR/marketplace_v2.py"
},
"frontend": {
"cross_chain_reputation": "$FRONTEND_DIR/CrossChainReputation.tsx",
"agent_communication": "$FRONTEND_DIR/AgentCommunication.tsx",
"agent_collaboration": "$FRONTEND_DIR/AgentCollaboration.tsx",
"advanced_learning": "$FRONTEND_DIR/AdvancedLearning.tsx",
"agent_autonomy": "$FRONTEND_DIR/AgentAutonomy.tsx",
"marketplace_v2": "$FRONTEND_DIR/MarketplaceV2.tsx"
},
"next_steps": [
"1. Initialize cross-chain reputation for existing agents",
"2. Set up agent communication channels",
"3. Configure advanced learning models",
"4. Test agent collaboration protocols",
"5. Monitor system performance and optimize"
]
}
EOF
print_success "Deployment report saved to $report_file"
}
# Main execution
main() {
print_critical "🚀 STARTING ADVANCED AGENT FEATURES DEPLOYMENT"
# Run deployment steps
check_prerequisites
install_python_dependencies
deploy_contracts
verify_contracts
build_frontend
deploy_frontend
setup_services
run_tests
generate_report
print_success "🎉 ADVANCED AGENT FEATURES DEPLOYMENT COMPLETED!"
echo ""
echo "📊 Deployment Summary:"
echo " Network: $NETWORK"
echo " Contracts: CrossChainReputation, AgentCommunication, AgentCollaboration, AgentLearning, AgentMarketplaceV2, ReputationNFT"
echo " Services: Cross-Chain Reputation, Agent Communication, Advanced Learning, Agent Autonomy"
echo " Frontend: Cross-Chain Reputation, Agent Communication, Advanced Learning components"
echo ""
echo "🔧 Next Steps:"
echo " 1. Initialize cross-chain reputation: python -m scripts/init_cross_chain_reputation.py"
echo " 2. Set up agent communication: python -m scripts/setup_agent_communication.py"
echo " 3. Configure learning models: python -m scripts/configure_learning_models.py"
echo " 4. Test agent collaboration: python -m scripts/test_agent_collaboration.py"
echo " 5. Monitor deployment: cat advanced-features-deployment-report-*.json"
echo ""
echo "⚠️ Important Notes:"
echo " - Cross-chain reputation requires multi-chain setup"
echo " - Agent communication needs proper encryption keys"
echo " - Advanced learning requires GPU resources for training"
echo " - Agent autonomy needs careful safety measures"
echo " - Contract addresses are in deployed-contracts-$NETWORK.json"
echo " - Frontend components are integrated into the main marketplace"
}
# Handle script interruption
trap 'print_critical "Deployment interrupted - please check partial deployment"; exit 1' INT TERM
# Run main function
main "$@"

View File

@@ -1,359 +0,0 @@
#!/usr/bin/env bash
# AITBC OpenClaw Autonomous Economics Deployment Script
# Deploys agent wallet, bid strategy, and orchestration components
set -euo pipefail
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
print_status() {
echo -e "${BLUE}[INFO]${NC} $1"
}
print_success() {
echo -e "${GREEN}[SUCCESS]${NC} $1"
}
print_warning() {
echo -e "${YELLOW}[WARNING]${NC} $1"
}
print_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
print_critical() {
echo -e "${RED}[CRITICAL]${NC} $1"
}
# Configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
ROOT_DIR="$(dirname "$SCRIPT_DIR")"
CONTRACTS_DIR="$ROOT_DIR/contracts"
SERVICES_DIR="$ROOT_DIR/apps/coordinator-api/src/app/services"
FRONTEND_DIR="$ROOT_DIR/apps/marketplace-web/src/components"
# Network configuration
NETWORK=${1:-"localhost"}
VERIFY_CONTRACTS=${2:-"true"}
SKIP_BUILD=${3:-"false"}
echo "🚀 AITBC OpenClaw Autonomous Economics Deployment"
echo "=============================================="
echo "Network: $NETWORK"
echo "Verify Contracts: $VERIFY_CONTRACTS"
echo "Skip Build: $SKIP_BUILD"
echo "Timestamp: $(date -Iseconds)"
echo ""
# Pre-deployment checks
check_prerequisites() {
print_status "Checking prerequisites..."
# Check if Node.js is installed
if ! command -v node &> /dev/null; then
print_error "Node.js is not installed"
exit 1
fi
# Check if Python is installed
if ! command -v python3 &> /dev/null; then
print_error "Python 3 is not installed"
exit 1
fi
# Check if required directories exist
if [[ ! -d "$CONTRACTS_DIR" ]]; then
print_error "Contracts directory not found: $CONTRACTS_DIR"
exit 1
fi
if [[ ! -d "$SERVICES_DIR" ]]; then
print_error "Services directory not found: $SERVICES_DIR"
exit 1
fi
print_success "Prerequisites check completed"
}
# Install Python dependencies
install_python_dependencies() {
print_status "Installing Python dependencies..."
cd "$ROOT_DIR/apps/coordinator-api"
if [[ -f "requirements.txt" ]]; then
pip install -r requirements.txt
print_success "Python dependencies installed"
else
print_error "requirements.txt not found"
exit 1
fi
}
# Deploy smart contracts
deploy_contracts() {
print_status "Deploying autonomous economics smart contracts..."
cd "$CONTRACTS_DIR"
# Check if .env file exists
if [[ ! -f ".env" ]]; then
print_warning ".env file not found, creating from example..."
if [[ -f ".env.example" ]]; then
cp .env.example .env
print_warning "Please update .env file with your configuration"
else
print_error ".env.example file not found"
exit 1
fi
fi
# Compile contracts
print_status "Compiling contracts..."
npx hardhat compile
# Deploy contracts based on network
case $NETWORK in
"localhost")
print_status "Deploying to localhost..."
npx hardhat run scripts/deploy-agent-contracts.js --network localhost
;;
"sepolia"|"goerli")
print_status "Deploying to $NETWORK..."
npx hardhat run scripts/deploy-agent-contracts.js --network $NETWORK
;;
"mainnet")
print_critical "DEPLOYING TO MAINNET - This will spend real ETH!"
read -p "Type 'DEPLOY-TO-MAINNET' to continue: " confirmation
if [[ "$confirmation" != "DEPLOY-TO-MAINNET" ]]; then
print_error "Deployment cancelled"
exit 1
fi
npx hardhat run scripts/deploy-agent-contracts.js --network mainnet
;;
*)
print_error "Unsupported network: $NETWORK"
exit 1
;;
esac
print_success "Smart contracts deployed"
}
# Verify contracts
verify_contracts() {
if [[ "$VERIFY_CONTRACTS" == "true" ]]; then
print_status "Verifying contracts on Etherscan..."
cd "$CONTRACTS_DIR"
# Wait for block confirmations
print_status "Waiting for block confirmations..."
sleep 30
# Run verification
if npx hardhat run scripts/verify-agent-contracts.js --network $NETWORK; then
print_success "Contracts verified on Etherscan"
else
print_warning "Contract verification failed - manual verification may be required"
fi
else
print_status "Skipping contract verification"
fi
}
# Build frontend components
build_frontend() {
if [[ "$SKIP_BUILD" == "true" ]]; then
print_status "Skipping frontend build"
return
fi
print_status "Building frontend components..."
cd "$ROOT_DIR/apps/marketplace-web"
# Install dependencies if needed
if [[ ! -d "node_modules" ]]; then
print_status "Installing frontend dependencies..."
npm install
fi
# Build the application
npm run build
print_success "Frontend built successfully"
}
# Deploy frontend
deploy_frontend() {
print_status "Deploying frontend components..."
# The frontend is already built and deployed as part of the main marketplace
print_success "Frontend deployment completed"
}
# Setup services
setup_services() {
print_status "Setting up backend services..."
# Create service configuration
cat > "$ROOT_DIR/apps/coordinator-api/config/agent_economics.json" << EOF
{
"bid_strategy_engine": {
"market_window": 24,
"price_history_days": 30,
"volatility_threshold": 0.15,
"strategy_weights": {
"urgent_bid": 0.25,
"cost_optimized": 0.25,
"balanced": 0.25,
"aggressive": 0.15,
"conservative": 0.10
}
},
"task_decomposition": {
"max_subtasks": 10,
"min_subtask_duration": 0.1,
"complexity_thresholds": {
"text_processing": 0.3,
"image_processing": 0.5,
"audio_processing": 0.4,
"video_processing": 0.8,
"data_analysis": 0.6,
"model_inference": 0.4,
"model_training": 0.9,
"compute_intensive": 0.8,
"io_bound": 0.2,
"mixed_modal": 0.7
}
},
"agent_orchestrator": {
"max_concurrent_plans": 10,
"assignment_timeout": 300,
"monitoring_interval": 30,
"retry_limit": 3
}
}
EOF
print_success "Service configuration created"
}
# Run integration tests
run_tests() {
print_status "Running integration tests..."
cd "$ROOT_DIR"
# Run Python tests
if [[ -f "tests/test_agent_economics.py" ]]; then
python -m pytest tests/test_agent_economics.py -v
fi
# Run contract tests
cd "$CONTRACTS_DIR"
if [[ -f "test/AgentWallet.test.js" ]]; then
npx hardhat test test/AgentWallet.test.js
fi
if [[ -f "test/AgentOrchestration.test.js" ]]; then
npx hardhat test test/AgentOrchestration.test.js
fi
print_success "Integration tests completed"
}
# Generate deployment report
generate_report() {
print_status "Generating deployment report..."
local report_file="$ROOT_DIR/agent-economics-deployment-report-$(date +%Y%m%d-%H%M%S).json"
cat > "$report_file" << EOF
{
"deployment": {
"timestamp": "$(date -Iseconds)",
"network": "$NETWORK",
"contracts_verified": "$VERIFY_CONTRACTS",
"frontend_built": "$([[ "$SKIP_BUILD" == "true" ]] && echo "false" || echo "true")"
},
"contracts": {
"AgentWallet": "deployed-contracts-$NETWORK.json",
"AgentOrchestration": "deployed-contracts-$NETWORK.json",
"AIPowerRental": "deployed-contracts-$NETWORK.json"
},
"services": {
"bid_strategy_engine": "$SERVICES_DIR/bid_strategy_engine.py",
"task_decomposition": "$SERVICES_DIR/task_decomposition.py",
"agent_orchestrator": "$SERVICES_DIR/agent_orchestrator.py",
"agent_wallet_service": "$SERVICES_DIR/agent_wallet_service.py"
},
"frontend": {
"agent_wallet": "$FRONTEND_DIR/AgentWallet.tsx",
"bid_strategy": "$FRONTEND_DIR/BidStrategy.tsx",
"agent_orchestration": "$FRONTEND_DIR/AgentOrchestration.tsx",
"task_decomposition": "$FRONTEND_DIR/TaskDecomposition.tsx"
},
"next_steps": [
"1. Configure agent wallet funding",
"2. Set up bid strategy parameters",
"3. Initialize agent orchestrator",
"4. Test autonomous agent workflows",
"5. Monitor agent performance"
]
}
EOF
print_success "Deployment report saved to $report_file"
}
# Main execution
main() {
print_critical "🚀 STARTING AUTONOMOUS ECONOMICS DEPLOYMENT"
# Run deployment steps
check_prerequisites
install_python_dependencies
deploy_contracts
verify_contracts
build_frontend
deploy_frontend
setup_services
run_tests
generate_report
print_success "🎉 AUTONOMOUS ECONOMICS DEPLOYMENT COMPLETED!"
echo ""
echo "📊 Deployment Summary:"
echo " Network: $NETWORK"
echo " Contracts: AgentWallet, AgentOrchestration, AIPowerRental (extended)"
echo " Services: Bid Strategy, Task Decomposition, Agent Orchestrator"
echo " Frontend: Agent Wallet, Bid Strategy, Orchestration components"
echo ""
echo "🔧 Next Steps:"
echo " 1. Configure agent wallet: python -m scripts/setup_agent_wallets.py"
echo " 2. Test bid strategies: python -m scripts/test_bid_strategies.py"
echo " 3. Initialize orchestrator: python -m scripts/init_orchestrator.py"
echo " 4. Monitor deployment: cat agent-economics-deployment-report-*.json"
echo ""
echo "⚠️ Important Notes:"
echo " - Agent wallets must be funded before use"
echo " - Bid strategies require market data initialization"
echo " - Agent orchestrator needs provider registration"
echo " - Contract addresses are in deployed-contracts-$NETWORK.json"
echo " - Frontend components are integrated into the main marketplace"
}
# Handle script interruption
trap 'print_critical "Deployment interrupted - please check partial deployment"; exit 1' INT TERM
# Run main function
main "$@"

View File

@@ -1,334 +0,0 @@
#!/usr/bin/env bash
# AITBC Decentralized Memory & Storage Deployment Script
# Deploys IPFS/Filecoin integration, smart contracts, and frontend components
set -euo pipefail
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
print_status() {
echo -e "${BLUE}[INFO]${NC} $1"
}
print_success() {
echo -e "${GREEN}[SUCCESS]${NC} $1"
}
print_warning() {
echo -e "${YELLOW}[WARNING]${NC} $1"
}
print_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
print_critical() {
echo -e "${RED}[CRITICAL]${NC} $1"
}
# Configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
ROOT_DIR="$(dirname "$SCRIPT_DIR")"
CONTRACTS_DIR="$ROOT_DIR/contracts"
SERVICES_DIR="$ROOT_DIR/apps/coordinator-api/src/app/services"
FRONTEND_DIR="$ROOT_DIR/apps/marketplace-web/src/components"
# Network configuration
NETWORK=${1:-"localhost"}
VERIFY_CONTRACTS=${2:-"true"}
SKIP_BUILD=${3:-"false"}
echo "🚀 AITBC Decentralized Memory & Storage Deployment"
echo "=============================================="
echo "Network: $NETWORK"
echo "Verify Contracts: $VERIFY_CONTRACTS"
echo "Skip Build: $SKIP_BUILD"
echo "Timestamp: $(date -Iseconds)"
echo ""
# Pre-deployment checks
check_prerequisites() {
print_status "Checking prerequisites..."
# Check if Node.js is installed
if ! command -v node &> /dev/null; then
print_error "Node.js is not installed"
exit 1
fi
# Check if Python is installed
if ! command -v python3 &> /dev/null; then
print_error "Python 3 is not installed"
exit 1
fi
# Check if IPFS is installed (optional)
if command -v ipfs &> /dev/null; then
print_success "IPFS is installed"
else
print_warning "IPFS is not installed - some features may not work"
fi
# Check if required directories exist
if [[ ! -d "$CONTRACTS_DIR" ]]; then
print_error "Contracts directory not found: $CONTRACTS_DIR"
exit 1
fi
if [[ ! -d "$SERVICES_DIR" ]]; then
print_error "Services directory not found: $SERVICES_DIR"
exit 1
fi
print_success "Prerequisites check completed"
}
# Install Python dependencies
install_python_dependencies() {
print_status "Installing Python dependencies..."
cd "$ROOT_DIR/apps/coordinator-api"
if [[ -f "requirements.txt" ]]; then
pip install -r requirements.txt
print_success "Python dependencies installed"
else
print_error "requirements.txt not found"
exit 1
fi
}
# Deploy smart contracts
deploy_contracts() {
print_status "Deploying decentralized memory smart contracts..."
cd "$CONTRACTS_DIR"
# Check if .env file exists
if [[ ! -f ".env" ]]; then
print_warning ".env file not found, creating from example..."
if [[ -f ".env.example" ]]; then
cp .env.example .env
print_warning "Please update .env file with your configuration"
else
print_error ".env.example file not found"
exit 1
fi
fi
# Compile contracts
print_status "Compiling contracts..."
npx hardhat compile
# Deploy contracts based on network
case $NETWORK in
"localhost")
print_status "Deploying to localhost..."
npx hardhat run scripts/deploy-memory-contracts.js --network localhost
;;
"sepolia"|"goerli")
print_status "Deploying to $NETWORK..."
npx hardhat run scripts/deploy-memory-contracts.js --network $NETWORK
;;
"mainnet")
print_critical "DEPLOYING TO MAINNET - This will spend real ETH!"
read -p "Type 'DEPLOY-TO-MAINNET' to continue: " confirmation
if [[ "$confirmation" != "DEPLOY-TO-MAINNET" ]]; then
print_error "Deployment cancelled"
exit 1
fi
npx hardhat run scripts/deploy-memory-contracts.js --network mainnet
;;
*)
print_error "Unsupported network: $NETWORK"
exit 1
;;
esac
print_success "Smart contracts deployed"
}
# Verify contracts
verify_contracts() {
if [[ "$VERIFY_CONTRACTS" == "true" ]]; then
print_status "Verifying contracts on Etherscan..."
cd "$CONTRACTS_DIR"
# Wait for block confirmations
print_status "Waiting for block confirmations..."
sleep 30
# Run verification
if npx hardhat run scripts/verify-memory-contracts.js --network $NETWORK; then
print_success "Contracts verified on Etherscan"
else
print_warning "Contract verification failed - manual verification may be required"
fi
else
print_status "Skipping contract verification"
fi
}
# Build frontend components
build_frontend() {
if [[ "$SKIP_BUILD" == "true" ]]; then
print_status "Skipping frontend build"
return
fi
print_status "Building frontend components..."
cd "$ROOT_DIR/apps/marketplace-web"
# Install dependencies if needed
if [[ ! -d "node_modules" ]]; then
print_status "Installing frontend dependencies..."
npm install
fi
# Build the application
npm run build
print_success "Frontend built successfully"
}
# Deploy frontend
deploy_frontend() {
print_status "Deploying frontend components..."
# The frontend is already built and deployed as part of the main marketplace
print_success "Frontend deployment completed"
}
# Setup IPFS node
setup_ipfs() {
print_status "Setting up IPFS node..."
# Check if IPFS is running
if command -v ipfs &> /dev/null; then
if ipfs swarm peers &> /dev/null; then
print_success "IPFS node is running"
else
print_status "Starting IPFS daemon..."
ipfs daemon --init &
sleep 5
print_success "IPFS daemon started"
fi
else
print_warning "IPFS not installed - skipping IPFS setup"
fi
}
# Run integration tests
run_tests() {
print_status "Running integration tests..."
cd "$ROOT_DIR"
# Run Python tests
if [[ -f "tests/test_memory_integration.py" ]]; then
python -m pytest tests/test_memory_integration.py -v
fi
# Run contract tests
cd "$CONTRACTS_DIR"
if [[ -f "test/AgentMemory.test.js" ]]; then
npx hardhat test test/AgentMemory.test.js
fi
if [[ -f "test/KnowledgeGraphMarket.test.js" ]]; then
npx hardhat test test/KnowledgeGraphMarket.test.js
fi
print_success "Integration tests completed"
}
# Generate deployment report
generate_report() {
print_status "Generating deployment report..."
local report_file="$ROOT_DIR/decentralized-memory-deployment-report-$(date +%Y%m%d-%H%M%S).json"
cat > "$report_file" << EOF
{
"deployment": {
"timestamp": "$(date -Iseconds)",
"network": "$NETWORK",
"contracts_verified": "$VERIFY_CONTRACTS",
"frontend_built": "$([[ "$SKIP_BUILD" == "true" ]] && echo "false" || echo "true")"
},
"contracts": {
"AgentMemory": "deployed-contracts-$NETWORK.json",
"KnowledgeGraphMarket": "deployed-contracts-$NETWORK.json",
"MemoryVerifier": "deployed-contracts-$NETWORK.json"
},
"services": {
"ipfs_storage_service": "$SERVICES_DIR/ipfs_storage_service.py",
"memory_manager": "$SERVICES_DIR/memory_manager.py",
"knowledge_graph_market": "$SERVICES_DIR/knowledge_graph_market.py"
},
"frontend": {
"knowledge_marketplace": "$FRONTEND_DIR/KnowledgeMarketplace.tsx",
"memory_manager": "$FRONTEND_DIR/MemoryManager.tsx"
},
"next_steps": [
"1. Configure IPFS node settings",
"2. Set up Filecoin storage deals",
"3. Test memory upload/retrieval functionality",
"4. Verify knowledge graph marketplace functionality",
"5. Monitor system performance"
]
}
EOF
print_success "Deployment report saved to $report_file"
}
# Main execution
main() {
print_critical "🚀 STARTING DECENTRALIZED MEMORY DEPLOYMENT"
# Run deployment steps
check_prerequisites
install_python_dependencies
deploy_contracts
verify_contracts
build_frontend
deploy_frontend
setup_ipfs
run_tests
generate_report
print_success "🎉 DECENTRALIZED MEMORY DEPLOYMENT COMPLETED!"
echo ""
echo "📊 Deployment Summary:"
echo " Network: $NETWORK"
echo " Contracts: AgentMemory, KnowledgeGraphMarket, MemoryVerifier"
echo " Services: IPFS Storage, Memory Manager, Knowledge Graph Market"
echo " Frontend: Knowledge Marketplace, Memory Manager"
echo ""
echo "🔧 Next Steps:"
echo " 1. Configure IPFS node: ipfs config show"
echo " 2. Test memory functionality: python -m pytest tests/"
echo " 3. Access frontend: http://localhost:3000/marketplace/"
echo " 4. Monitor deployment: cat decentralized-memory-deployment-report-*.json"
echo ""
echo "⚠️ Important Notes:"
echo " - IPFS node should be running for full functionality"
echo " - Filecoin storage deals require additional configuration"
echo " - Smart contract addresses are in deployed-contracts-$NETWORK.json"
echo " - Frontend components are integrated into the main marketplace"
}
# Handle script interruption
trap 'print_critical "Deployment interrupted - please check partial deployment"; exit 1' INT TERM
# Run main function
main "$@"

View File

@@ -1,533 +0,0 @@
#!/usr/bin/env bash
# AITBC Developer Ecosystem Complete Deployment Orchestration
# Deploys the entire Developer Ecosystem system (contracts + frontend + API)
#
# Usage: ./deploy-developer-ecosystem.sh [environment] [skip-tests]
# Environment: testnet, mainnet
# Skip-Tests: true/false - whether to skip integration tests
set -euo pipefail
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
print_status() {
echo -e "${BLUE}[INFO]${NC} $1"
}
print_success() {
echo -e "${GREEN}[SUCCESS]${NC} $1"
}
print_warning() {
echo -e "${YELLOW}[WARNING]${NC} $1"
}
print_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
# Parse arguments
ENVIRONMENT="${1:-testnet}"
SKIP_TESTS="${2:-false}"
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
ROOT_DIR="$(dirname "$SCRIPT_DIR")"
echo "🚀 AITBC Developer Ecosystem Complete Deployment"
echo "==============================================="
echo "Environment: $ENVIRONMENT"
echo "Skip Tests: $SKIP_TESTS"
echo "Root Directory: $ROOT_DIR"
echo ""
# Deployment phases
PHASES=("contracts" "frontend" "api" "integration-tests" "monitoring")
# Check prerequisites
check_prerequisites() {
print_status "Checking deployment prerequisites..."
# Check if required directories exist
if [[ ! -d "$ROOT_DIR/contracts" ]]; then
print_error "Contracts directory not found"
exit 1
fi
if [[ ! -d "$ROOT_DIR/apps/marketplace-web" ]]; then
print_error "Frontend directory not found"
exit 1
fi
# Check if required scripts exist
if [[ ! -f "$ROOT_DIR/contracts/scripts/deploy-developer-ecosystem.sh" ]]; then
print_error "Contract deployment script not found"
exit 1
fi
if [[ ! -f "$ROOT_DIR/apps/marketplace-web/scripts/deploy-frontend.sh" ]]; then
print_error "Frontend deployment script not found"
exit 1
fi
# Check SSH connection for frontend deployment
if ! ssh -o ConnectTimeout=5 aitbc-cascade "echo 'SSH connection successful'" 2>/dev/null; then
print_warning "Cannot connect to frontend server. Frontend deployment will be skipped."
SKIP_FRONTEND=true
else
SKIP_FRONTEND=false
fi
print_success "Prerequisites check completed"
}
# Phase 1: Deploy Smart Contracts
deploy_contracts() {
print_status "Phase 1: Deploying Smart Contracts"
echo "====================================="
cd "$ROOT_DIR/contracts"
# Run contract deployment
if ./scripts/deploy-developer-ecosystem.sh "$ENVIRONMENT" "true"; then
print_success "Smart contracts deployed successfully"
# Copy deployment info to root directory
if [[ -f "deployed-contracts-$ENVIRONMENT.json" ]]; then
cp "deployed-contracts-$ENVIRONMENT.json" "$ROOT_DIR/"
print_success "Contract deployment info copied to root directory"
fi
else
print_error "Smart contract deployment failed"
return 1
fi
echo ""
}
# Phase 2: Deploy Frontend
deploy_frontend() {
if [[ "$SKIP_FRONTEND" == "true" ]]; then
print_warning "Skipping frontend deployment (SSH connection failed)"
return 0
fi
print_status "Phase 2: Deploying Frontend"
echo "============================"
cd "$ROOT_DIR/apps/marketplace-web"
# Update environment variables with contract addresses
update_frontend_env
# Build and deploy frontend
if ./scripts/deploy-frontend.sh "production" "aitbc-cascade"; then
print_success "Frontend deployed successfully"
else
print_error "Frontend deployment failed"
return 1
fi
echo ""
}
# Update frontend environment variables
update_frontend_env() {
print_status "Updating frontend environment variables..."
local deployment_file="$ROOT_DIR/deployed-contracts-$ENVIRONMENT.json"
if [[ ! -f "$deployment_file" ]]; then
print_error "Contract deployment file not found: $deployment_file"
return 1
fi
# Extract contract addresses
local aitbc_token=$(jq -r '.contracts.AITBCToken.address' "$deployment_file")
local agent_bounty=$(jq -r '.contracts.AgentBounty.address' "$deployment_file")
local agent_staking=$(jq -r '.contracts.AgentStaking.address' "$deployment_file")
local performance_verifier=$(jq -r '.contracts.PerformanceVerifier.address' "$deployment_file")
local dispute_resolution=$(jq -r '.contracts.DisputeResolution.address' "$deployment_file")
local escrow_service=$(jq -r '.contracts.EscrowService.address' "$deployment_file")
# Create .env.local file
cat > .env.local << EOF
# AITBC Developer Ecosystem - Frontend Environment
# Generated on $(date -Iseconds)
# Contract Addresses
VITE_AITBC_TOKEN_ADDRESS=$aitbc_token
VITE_AGENT_BOUNTY_ADDRESS=$agent_bounty
VITE_AGENT_STAKING_ADDRESS=$agent_staking
VITE_PERFORMANCE_VERIFIER_ADDRESS=$performance_verifier
VITE_DISPUTE_RESOLUTION_ADDRESS=$dispute_resolution
VITE_ESCROW_SERVICE_ADDRESS=$escrow_service
# API Configuration
VITE_API_BASE_URL=http://localhost:3001/api/v1
VITE_WS_URL=ws://localhost:3001
# Network Configuration
VITE_NETWORK_NAME=$ENVIRONMENT
VITE_CHAIN_ID=$(get_chain_id "$ENVIRONMENT")
# Application Configuration
VITE_APP_NAME=AITBC Developer Ecosystem
VITE_APP_VERSION=1.0.0
VITE_APP_DESCRIPTION=Developer Ecosystem & DAO Grants System
EOF
print_success "Frontend environment variables updated"
}
# Get chain ID for environment
get_chain_id() {
case "$1" in
"localhost"|"hardhat")
echo "31337"
;;
"sepolia")
echo "11155111"
;;
"goerli")
echo "5"
;;
"mainnet")
echo "1"
;;
*)
echo "1"
;;
esac
}
# Phase 3: Deploy API Services
deploy_api() {
print_status "Phase 3: Deploying API Services"
echo "=================================="
# Check if API deployment script exists
if [[ -f "$ROOT_DIR/apps/coordinator-api/deploy_services.sh" ]]; then
cd "$ROOT_DIR/apps/coordinator-api"
if ./deploy_services.sh "$ENVIRONMENT"; then
print_success "API services deployed successfully"
else
print_error "API services deployment failed"
return 1
fi
else
print_warning "API deployment script not found. Skipping API deployment."
fi
echo ""
}
# Phase 4: Run Integration Tests
run_integration_tests() {
if [[ "$SKIP_TESTS" == "true" ]]; then
print_warning "Skipping integration tests"
return 0
fi
print_status "Phase 4: Running Integration Tests"
echo "====================================="
cd "$ROOT_DIR"
# Update test configuration with deployed contracts
update_test_config
# Run comprehensive test suite
if ./tests/run_all_tests.sh; then
print_success "Integration tests passed"
else
print_error "Integration tests failed"
return 1
fi
echo ""
}
# Update test configuration
update_test_config() {
print_status "Updating test configuration..."
local deployment_file="$ROOT_DIR/deployed-contracts-$ENVIRONMENT.json"
if [[ ! -f "$deployment_file" ]]; then
print_warning "Contract deployment file not found. Using default test configuration."
return 0
fi
# Create test configuration
cat > "$ROOT_DIR/tests/test-config-$ENVIRONMENT.json" << EOF
{
"environment": "$ENVIRONMENT",
"contracts": $(cat "$deployment_file"),
"api": {
"base_url": "http://localhost:3001/api/v1",
"timeout": 30000
},
"frontend": {
"base_url": "http://aitbc.bubuit.net/marketplace",
"timeout": 10000
}
}
EOF
print_success "Test configuration updated"
}
# Phase 5: Setup Monitoring
setup_monitoring() {
print_status "Phase 5: Setting up Monitoring"
echo "==============================="
# Create monitoring configuration
create_monitoring_config
# Setup health checks
setup_health_checks
print_success "Monitoring setup completed"
echo ""
}
# Create monitoring configuration
create_monitoring_config() {
print_status "Creating monitoring configuration..."
local deployment_file="$ROOT_DIR/deployed-contracts-$ENVIRONMENT.json"
cat > "$ROOT_DIR/monitoring-config-$ENVIRONMENT.json" << EOF
{
"environment": "$ENVIRONMENT",
"timestamp": "$(date -Iseconds)",
"contracts": $(cat "$deployment_file"),
"monitoring": {
"enabled": true,
"interval": 60,
"endpoints": [
{
"name": "Frontend Health",
"url": "http://aitbc.bubuit.net/marketplace/",
"method": "GET",
"expected_status": 200
},
{
"name": "API Health",
"url": "http://localhost:3001/api/v1/health",
"method": "GET",
"expected_status": 200
}
],
"alerts": {
"email": "admin@aitbc.dev",
"slack_webhook": "https://hooks.slack.com/services/YOUR/SLACK/WEBHOOK"
}
}
}
EOF
print_success "Monitoring configuration created"
}
# Setup health checks
setup_health_checks() {
print_status "Setting up health checks..."
# Create health check script
cat > "$ROOT_DIR/scripts/health-check.sh" << 'EOF'
#!/bin/bash
# AITBC Developer Ecosystem Health Check Script
ENVIRONMENT="${1:-testnet}"
CONFIG_FILE="monitoring-config-$ENVIRONMENT.json"
if [[ ! -f "$CONFIG_FILE" ]]; then
echo "❌ Monitoring configuration not found: $CONFIG_FILE"
exit 1
fi
echo "🔍 Running health checks for $ENVIRONMENT..."
echo "=========================================="
# Check frontend
FRONTEND_URL=$(jq -r '.monitoring.endpoints[0].url' "$CONFIG_FILE")
FRONTEND_STATUS=$(curl -s -o /dev/null -w "%{http_code}" "$FRONTEND_URL" || echo "000")
if [[ "$FRONTEND_STATUS" == "200" ]]; then
echo "✅ Frontend: $FRONTEND_URL (Status: $FRONTEND_STATUS)"
else
echo "❌ Frontend: $FRONTEND_URL (Status: $FRONTEND_STATUS)"
fi
# Check API
API_URL=$(jq -r '.monitoring.endpoints[1].url' "$CONFIG_FILE")
API_STATUS=$(curl -s -o /dev/null -w "%{http_code}" "$API_URL" || echo "000")
if [[ "$API_STATUS" == "200" ]]; then
echo "✅ API: $API_URL (Status: $API_STATUS)"
else
echo "❌ API: $API_URL (Status: $API_STATUS)"
fi
echo ""
echo "Health check completed at $(date)"
EOF
chmod +x "$ROOT_DIR/scripts/health-check.sh"
print_success "Health check script created"
}
# Generate deployment report
generate_deployment_report() {
print_status "Generating deployment report..."
local report_file="$ROOT_DIR/deployment-report-$ENVIRONMENT-$(date +%Y%m%d-%H%M%S).json"
cat > "$report_file" << EOF
{
"deployment": {
"environment": "$ENVIRONMENT",
"timestamp": "$(date -Iseconds)",
"skip_tests": "$SKIP_TESTS",
"skip_frontend": "$SKIP_FRONTEND"
},
"phases": {
"contracts": {
"status": "$CONTRACTS_STATUS",
"file": "deployed-contracts-$ENVIRONMENT.json"
},
"frontend": {
"status": "$FRONTEND_STATUS",
"url": "http://aitbc.bubuit.net/marketplace/"
},
"api": {
"status": "$API_STATUS",
"url": "http://localhost:3001/api/v1"
},
"tests": {
"status": "$TESTS_STATUS",
"skipped": "$SKIP_TESTS"
},
"monitoring": {
"status": "completed",
"config": "monitoring-config-$ENVIRONMENT.json"
}
},
"urls": {
"frontend": "http://aitbc.bubuit.net/marketplace/",
"api": "http://localhost:3001/api/v1",
"health_check": "./scripts/health-check.sh $ENVIRONMENT"
}
}
EOF
print_success "Deployment report saved to $report_file"
}
# Rollback function
rollback() {
print_warning "Rolling back deployment..."
# Rollback contracts (if needed)
print_status "Contract rollback not implemented (manual intervention required)"
# Rollback frontend
if [[ "$SKIP_FRONTEND" != "true" ]]; then
print_status "Rolling back frontend..."
ssh aitbc-cascade "cp -r /var/www/aitbc.bubuit.net/marketplace.backup /var/www/aitbc.bubuit.net/marketplace" 2>/dev/null || true
ssh aitbc-cascade "systemctl reload nginx" 2>/dev/null || true
fi
print_warning "Rollback completed. Please verify system status."
}
# Main execution
main() {
print_status "Starting complete Developer Ecosystem deployment..."
# Initialize status variables
CONTRACTS_STATUS="pending"
FRONTEND_STATUS="pending"
API_STATUS="pending"
TESTS_STATUS="pending"
# Check prerequisites
check_prerequisites
# Execute deployment phases
if deploy_contracts; then
CONTRACTS_STATUS="success"
else
CONTRACTS_STATUS="failed"
print_error "Contract deployment failed. Aborting."
exit 1
fi
if deploy_frontend; then
FRONTEND_STATUS="success"
else
FRONTEND_STATUS="failed"
print_warning "Frontend deployment failed, but continuing..."
fi
if deploy_api; then
API_STATUS="success"
else
API_STATUS="failed"
print_warning "API deployment failed, but continuing..."
fi
if run_integration_tests; then
TESTS_STATUS="success"
else
TESTS_STATUS="failed"
if [[ "$SKIP_TESTS" != "true" ]]; then
print_error "Integration tests failed. Deployment may be unstable."
fi
fi
# Setup monitoring
setup_monitoring
# Generate deployment report
generate_deployment_report
print_success "🎉 Developer Ecosystem deployment completed!"
echo ""
echo "📊 Deployment Summary:"
echo " Contracts: $CONTRACTS_STATUS"
echo " Frontend: $FRONTEND_STATUS"
echo " API: $API_STATUS"
echo " Tests: $TESTS_STATUS"
echo ""
echo "🌐 Application URLs:"
echo " Frontend: http://aitbc.bubuit.net/marketplace/"
echo " API: http://localhost:3001/api/v1"
echo ""
echo "🔧 Management Commands:"
echo " Health Check: ./scripts/health-check.sh $ENVIRONMENT"
echo " View Report: cat deployment-report-$ENVIRONMENT-*.json"
echo ""
echo "📋 Next Steps:"
echo " 1. Test the application in browser"
echo " 2. Verify all functionality works"
echo " 3. Monitor system health"
echo " 4. Set up automated monitoring"
}
# Handle script interruption
trap 'print_error "Deployment interrupted"; rollback; exit 1' INT TERM
# Run main function
main "$@"

View File

@@ -1,634 +0,0 @@
#!/usr/bin/env bash
# AITBC Developer Ecosystem - Mainnet Deployment Script
# PRODUCTION DEPLOYMENT - Use with extreme caution
#
# Usage: ./deploy-mainnet.sh [--dry-run] [--skip-verification] [--emergency-only]
# --dry-run: Simulate deployment without executing transactions
# --skip-verification: Skip Etherscan verification (faster but less transparent)
# --emergency-only: Only deploy emergency contracts (DisputeResolution, EscrowService)
set -euo pipefail
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
MAGENTA='\033[0;35m'
NC='\033[0m' # No Color
print_status() {
echo -e "${BLUE}[INFO]${NC} $1"
}
print_success() {
echo -e "${GREEN}[SUCCESS]${NC} $1"
}
print_warning() {
echo -e "${YELLOW}[WARNING]${NC} $1"
}
print_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
print_critical() {
echo -e "${MAGENTA}[CRITICAL]${NC} $1"
}
# Parse arguments
DRY_RUN=false
SKIP_VERIFICATION=false
EMERGENCY_ONLY=false
while [[ $# -gt 0 ]]; do
case $1 in
--dry-run)
DRY_RUN=true
shift
;;
--skip-verification)
SKIP_VERIFICATION=true
shift
;;
--emergency-only)
EMERGENCY_ONLY=true
shift
;;
*)
print_error "Unknown argument: $1"
exit 1
;;
esac
done
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
ROOT_DIR="$(dirname "$SCRIPT_DIR")"
echo "🚀 AITBC Developer Ecosystem - MAINNET DEPLOYMENT"
echo "================================================="
echo "Environment: PRODUCTION"
echo "Dry Run: $DRY_RUN"
echo "Skip Verification: $SKIP_VERIFICATION"
echo "Emergency Only: $EMERGENCY_ONLY"
echo "Timestamp: $(date -Iseconds)"
echo ""
# CRITICAL: Production deployment confirmation
confirm_production_deployment() {
print_critical "⚠️ PRODUCTION DEPLOYMENT CONFIRMATION ⚠️"
echo "You are about to deploy the AITBC Developer Ecosystem to MAINNET."
echo "This will deploy real smart contracts to the Ethereum blockchain."
echo "This action is IRREVERSIBLE and will consume REAL ETH for gas."
echo ""
echo "Please confirm the following:"
echo "1. You have thoroughly tested on testnet"
echo "2. You have sufficient ETH for deployment costs (~5-10 ETH)"
echo "3. You have the private key of the deployer account"
echo "4. You have reviewed all contract addresses and parameters"
echo "5. You have a backup plan in case of failure"
echo ""
if [[ "$DRY_RUN" == "true" ]]; then
print_warning "DRY RUN MODE - No actual transactions will be executed"
return 0
fi
read -p "Type 'DEPLOY-TO-MAINNET' to continue: " confirmation
if [[ "$confirmation" != "DEPLOY-TO-MAINNET" ]]; then
print_error "Deployment cancelled by user"
exit 1
fi
print_success "Production deployment confirmed"
}
# Enhanced security checks
security_checks() {
print_status "Performing security checks..."
# Check if .env file exists and is properly configured
if [[ ! -f "$ROOT_DIR/contracts/.env" ]]; then
print_error ".env file not found. Please configure environment variables."
exit 1
fi
# Check if private key is set (but don't display it)
if ! grep -q "PRIVATE_KEY=" "$ROOT_DIR/contracts/.env"; then
print_error "PRIVATE_KEY not configured in .env file"
exit 1
fi
# Check if private key looks valid (basic format check)
if grep -q "PRIVATE_KEY=your_private_key_here" "$ROOT_DIR/contracts/.env"; then
print_error "Please update PRIVATE_KEY in .env file with actual deployer key"
exit 1
fi
# Check for sufficient testnet deployments (pre-requisite)
local testnet_deployment="$ROOT_DIR/deployed-contracts-sepolia.json"
if [[ ! -f "$testnet_deployment" ]]; then
print_warning "No testnet deployment found. Consider deploying to testnet first."
read -p "Continue anyway? (y/N): " continue_anyway
if [[ "$continue_anyway" != "y" && "$continue_anyway" != "Y" ]]; then
print_error "Deployment cancelled. Please deploy to testnet first."
exit 1
fi
fi
# Check gas price and network conditions
check_network_conditions
print_success "Security checks passed"
}
# Check network conditions
check_network_conditions() {
print_status "Checking network conditions..."
cd "$ROOT_DIR/contracts"
# Get current gas price
local gas_price=$(npx hardhat run scripts/check-gas-price.js --network mainnet 2>/dev/null || echo "unknown")
print_status "Current gas price: $gas_price gwei"
# Get ETH balance of deployer
local balance=$(npx hardhat run scripts/check-balance.js --network mainnet 2>/dev/null || echo "unknown")
print_status "Deployer balance: $balance ETH"
# Warning if gas price is high
if [[ "$gas_price" != "unknown" ]]; then
local gas_num=$(echo "$gas_price" | grep -o '[0-9]*' | head -1)
if [[ "$gas_num" -gt 50 ]]; then
print_warning "High gas price detected ($gas_price gwei). Consider waiting for lower gas."
read -p "Continue anyway? (y/N): " continue_high_gas
if [[ "$continue_high_gas" != "y" && "$continue_high_gas" != "Y" ]]; then
print_error "Deployment cancelled due to high gas price"
exit 1
fi
fi
fi
}
# Create deployment backup
create_deployment_backup() {
print_status "Creating deployment backup..."
local backup_dir="$ROOT_DIR/backups/mainnet-$(date +%Y%m%d-%H%M%S)"
mkdir -p "$backup_dir"
# Backup current configurations
cp -r "$ROOT_DIR/contracts" "$backup_dir/"
cp -r "$ROOT_DIR/apps/marketplace-web" "$backup_dir/"
cp -r "$ROOT_DIR/tests" "$backup_dir/"
# Backup any existing deployments
if [[ -f "$ROOT_DIR/deployed-contracts-mainnet.json" ]]; then
cp "$ROOT_DIR/deployed-contracts-mainnet.json" "$backup_dir/"
fi
print_success "Backup created at $backup_dir"
}
# Enhanced contract deployment with multi-sig support
deploy_contracts_mainnet() {
print_status "Deploying smart contracts to MAINNET..."
cd "$ROOT_DIR/contracts"
local deploy_script="deploy-developer-ecosystem-mainnet.js"
# Create mainnet-specific deployment script
create_mainnet_deployment_script
if [[ "$DRY_RUN" == "true" ]]; then
print_warning "DRY RUN: Simulating contract deployment..."
npx hardhat run "$deploy_script" --network hardhat
else
print_critical "Executing MAINNET contract deployment..."
# Execute deployment with retry logic
local max_retries=3
local retry_count=0
while [[ $retry_count -lt $max_retries ]]; do
if npx hardhat run "$deploy_script" --network mainnet; then
print_success "Contract deployment completed successfully"
break
else
retry_count=$((retry_count + 1))
if [[ $retry_count -eq $max_retries ]]; then
print_error "Contract deployment failed after $max_retries attempts"
exit 1
fi
print_warning "Deployment attempt $retry_count failed, retrying in 30 seconds..."
sleep 30
fi
done
fi
# Verify contracts if not skipped
if [[ "$SKIP_VERIFICATION" != "true" && "$DRY_RUN" != "true" ]]; then
verify_contracts_mainnet
fi
}
# Create mainnet-specific deployment script
create_mainnet_deployment_script() {
local deploy_script="deploy-developer-ecosystem-mainnet.js"
cat > "$deploy_script" << 'EOF'
const { ethers } = require("hardhat");
const fs = require("fs");
const path = require("path");
async function main() {
console.log("🚀 DEPLOYING TO ETHEREUM MAINNET");
console.log("=================================");
console.log("⚠️ PRODUCTION DEPLOYMENT - REAL ETH WILL BE SPENT");
console.log("");
const [deployer] = await ethers.getSigners();
const balance = await deployer.getBalance();
console.log(`Deployer: ${deployer.address}`);
console.log(`Balance: ${ethers.utils.formatEther(balance)} ETH`);
if (balance.lt(ethers.utils.parseEther("5"))) {
throw new Error("Insufficient ETH balance. Minimum 5 ETH required for deployment.");
}
console.log("");
console.log("Proceeding with deployment...");
// Deployment logic here (similar to testnet but with enhanced security)
const deployedContracts = {
network: "mainnet",
deployer: deployer.address,
timestamp: new Date().toISOString(),
contracts: {}
};
// Deploy contracts with enhanced gas estimation
const gasOptions = {
gasLimit: 8000000,
gasPrice: ethers.utils.parseUnits("30", "gwei") // Adjust based on network conditions
};
try {
// Deploy AITBC Token (or use existing token)
console.log("📦 Deploying AITBC Token...");
const AITBCToken = await ethers.getContractFactory("MockERC20");
const aitbcToken = await AITBCToken.deploy(
"AITBC Token",
"AITBC",
ethers.utils.parseEther("1000000"),
gasOptions
);
await aitbcToken.deployed();
deployedContracts.contracts.AITBCToken = {
address: aitbcToken.address,
deploymentHash: aitbcToken.deployTransaction.hash,
gasUsed: (await aitbcToken.deployTransaction.wait()).gasUsed.toString()
};
console.log(`✅ AITBC Token: ${aitbcToken.address}`);
// Deploy other contracts with similar enhanced logic...
// (AgentBounty, AgentStaking, PerformanceVerifier, etc.)
// Save deployment info
const deploymentFile = `deployed-contracts-mainnet.json`;
fs.writeFileSync(
path.join(__dirname, "..", deploymentFile),
JSON.stringify(deployedContracts, null, 2)
);
console.log("");
console.log("🎉 MAINNET DEPLOYMENT COMPLETED");
console.log("===============================");
console.log(`Total gas used: ${calculateTotalGas(deployedContracts)}`);
console.log(`Deployment file: ${deploymentFile}`);
} catch (error) {
console.error("❌ Deployment failed:", error);
throw error;
}
}
function calculateTotalGas(deployedContracts) {
let totalGas = 0;
for (const contract of Object.values(deployedContracts.contracts)) {
if (contract.gasUsed) {
totalGas += parseInt(contract.gasUsed);
}
}
return totalGas.toLocaleString();
}
main()
.then(() => process.exit(0))
.catch((error) => {
console.error(error);
process.exit(1);
});
EOF
print_success "Mainnet deployment script created"
}
# Enhanced contract verification
verify_contracts_mainnet() {
print_status "Verifying contracts on Etherscan..."
cd "$ROOT_DIR/contracts"
# Wait for block confirmations
print_status "Waiting for block confirmations..."
sleep 60
# Run verification
if npx hardhat run scripts/verify-contracts.js --network mainnet; then
print_success "Contracts verified on Etherscan"
else
print_warning "Contract verification failed. Manual verification may be required."
fi
}
# Production frontend deployment
deploy_frontend_mainnet() {
print_status "Deploying frontend to production..."
cd "$ROOT_DIR/apps/marketplace-web"
# Update environment with mainnet contract addresses
update_frontend_mainnet_env
# Build for production
if [[ "$DRY_RUN" != "true" ]]; then
npm run build
# Deploy to production server
./scripts/deploy-frontend.sh "production" "aitbc-cascade"
print_success "Frontend deployed to production"
else
print_warning "DRY RUN: Frontend deployment skipped"
fi
}
# Update frontend with mainnet configuration
update_frontend_mainnet_env() {
print_status "Updating frontend for mainnet..."
local deployment_file="$ROOT_DIR/deployed-contracts-mainnet.json"
if [[ ! -f "$deployment_file" ]]; then
print_error "Mainnet deployment file not found"
return 1
fi
# Create production environment file
cat > .env.production << EOF
# AITBC Developer Ecosystem - MAINNET Production
# Generated on $(date -Iseconds)
# Contract Addresses (MAINNET)
VITE_AITBC_TOKEN_ADDRESS=$(jq -r '.contracts.AITBCToken.address' "$deployment_file")
VITE_AGENT_BOUNTY_ADDRESS=$(jq -r '.contracts.AgentBounty.address' "$deployment_file")
VITE_AGENT_STAKING_ADDRESS=$(jq -r '.contracts.AgentStaking.address' "$deployment_file")
# Network Configuration (MAINNET)
VITE_NETWORK_NAME=mainnet
VITE_CHAIN_ID=1
VITE_RPC_URL=https://mainnet.infura.io/v3/\${INFURA_PROJECT_ID}
# Production Configuration
VITE_API_BASE_URL=https://api.aitbc.dev/api/v1
VITE_WS_URL=wss://api.aitbc.dev
# Security Configuration
VITE_ENABLE_ANALYTICS=true
VITE_ENABLE_ERROR_REPORTING=true
VITE_SENTRY_DSN=\${SENTRY_DSN}
EOF
print_success "Frontend configured for mainnet"
}
# Production monitoring setup
setup_production_monitoring() {
print_status "Setting up production monitoring..."
# Create production monitoring configuration
cat > "$ROOT_DIR/monitoring-config-mainnet.json" << EOF
{
"environment": "mainnet",
"production": true,
"timestamp": "$(date -Iseconds)",
"monitoring": {
"enabled": true,
"interval": 30,
"alerting": {
"email": "alerts@aitbc.dev",
"slack_webhook": "\${SLACK_WEBHOOK_URL}",
"pagerduty_key": "\${PAGERDUTY_KEY}"
},
"endpoints": [
{
"name": "Frontend Production",
"url": "https://aitbc.dev/marketplace/",
"method": "GET",
"expected_status": 200,
"timeout": 10000
},
{
"name": "API Production",
"url": "https://api.aitbc.dev/api/v1/health",
"method": "GET",
"expected_status": 200,
"timeout": 5000
}
],
"contracts": {
"monitor_events": true,
"critical_events": [
"BountyCreated",
"BountyCompleted",
"TokensStaked",
"TokensUnstaked",
"DisputeFiled"
]
}
}
}
EOF
# Setup production health checks
cat > "$ROOT_DIR/scripts/production-health-check.sh" << 'EOF'
#!/bin/bash
# Production Health Check Script
ENVIRONMENT="mainnet"
CONFIG_FILE="monitoring-config-$ENVIRONMENT.json"
echo "🔍 Production Health Check - $ENVIRONMENT"
echo "========================================"
# Check frontend
FRONTEND_STATUS=$(curl -s -o /dev/null -w "%{http_code}" "https://aitbc.dev/marketplace/" || echo "000")
if [[ "$FRONTEND_STATUS" == "200" ]]; then
echo "✅ Frontend: https://aitbc.dev/marketplace/ (Status: $FRONTEND_STATUS)"
else
echo "❌ Frontend: https://aitbc.dev/marketplace/ (Status: $FRONTEND_STATUS)"
fi
# Check API
API_STATUS=$(curl -s -o /dev/null -w "%{http_code}" "https://api.aitbc.dev/api/v1/health" || echo "000")
if [[ "$API_STATUS" == "200" ]]; then
echo "✅ API: https://api.aitbc.dev/api/v1/health (Status: $API_STATUS)"
else
echo "❌ API: https://api.aitbc.dev/api/v1/health (Status: $API_STATUS)"
fi
echo ""
echo "Health check completed at $(date)"
EOF
chmod +x "$ROOT_DIR/scripts/production-health-check.sh"
print_success "Production monitoring configured"
}
# Generate comprehensive deployment report
generate_mainnet_report() {
print_status "Generating mainnet deployment report..."
local report_file="$ROOT_DIR/mainnet-deployment-report-$(date +%Y%m%d-%H%M%S).json"
cat > "$report_file" << EOF
{
"deployment": {
"environment": "mainnet",
"production": true,
"timestamp": "$(date -Iseconds)",
"dry_run": "$DRY_RUN",
"emergency_only": "$EMERGENCY_ONLY"
},
"contracts": {
"file": "deployed-contracts-mainnet.json",
"verified": "$([[ "$SKIP_VERIFICATION" != "true" ]] && echo "true" || echo "false")"
},
"frontend": {
"url": "https://aitbc.dev/marketplace/",
"environment": "production"
},
"api": {
"url": "https://api.aitbc.dev/api/v1",
"status": "production"
},
"monitoring": {
"config": "monitoring-config-mainnet.json",
"health_check": "./scripts/production-health-check.sh"
},
"security": {
"backup_created": "true",
"verification_completed": "$([[ "$SKIP_VERIFICATION" != "true" ]] && echo "true" || echo "false")"
},
"next_steps": [
"1. Verify all contracts on Etherscan",
"2. Test all frontend functionality",
"3. Monitor system health for 24 hours",
"4. Set up automated alerts",
"5. Prepare incident response procedures"
]
}
EOF
print_success "Mainnet deployment report saved to $report_file"
}
# Emergency rollback procedures
emergency_rollback() {
print_critical "🚨 EMERGENCY ROLLBACK INITIATED 🚨"
print_status "Executing emergency rollback procedures..."
# 1. Stop all services
ssh aitbc-cascade "systemctl stop nginx" 2>/dev/null || true
# 2. Restore from backup
local latest_backup=$(ls -t "$ROOT_DIR/backups/" | head -1)
if [[ -n "$latest_backup" ]]; then
print_status "Restoring from backup: $latest_backup"
# Implementation would restore from backup
fi
# 3. Restart services
ssh aitbc-cascade "systemctl start nginx" 2>/dev/null || true
print_warning "Emergency rollback completed. Please verify system status."
}
# Main execution
main() {
print_critical "🚀 STARTING MAINNET DEPLOYMENT"
print_critical "This is a PRODUCTION deployment to Ethereum mainnet"
echo ""
# Security confirmation
confirm_production_deployment
# Security checks
security_checks
# Create backup
create_deployment_backup
# Deploy contracts
if [[ "$EMERGENCY_ONLY" != "true" ]]; then
deploy_contracts_mainnet
deploy_frontend_mainnet
else
print_warning "Emergency deployment mode - only critical contracts"
fi
# Setup monitoring
setup_production_monitoring
# Generate report
generate_mainnet_report
print_success "🎉 MAINNET DEPLOYMENT COMPLETED!"
echo ""
echo "📊 Deployment Summary:"
echo " Environment: MAINNET (PRODUCTION)"
echo " Dry Run: $DRY_RUN"
echo " Emergency Only: $EMERGENCY_ONLY"
echo ""
echo "🌐 Production URLs:"
echo " Frontend: https://aitbc.dev/marketplace/"
echo " API: https://api.aitbc.dev/api/v1"
echo ""
echo "🔧 Management Commands:"
echo " Health Check: ./scripts/production-health-check.sh"
echo " View Report: cat mainnet-deployment-report-*.json"
echo " Emergency Rollback: ./scripts/emergency-rollback.sh"
echo ""
echo "⚠️ CRITICAL NEXT STEPS:"
echo " 1. Verify all contracts on Etherscan"
echo " 2. Test all functionality thoroughly"
echo " 3. Monitor system for 24 hours"
echo " 4. Set up production alerts"
echo " 5. Prepare incident response"
}
# Handle script interruption
trap 'print_critical "Deployment interrupted - initiating emergency rollback"; emergency_rollback; exit 1' INT TERM
# Run main function
main "$@"

View File

@@ -1,715 +0,0 @@
#!/usr/bin/env bash
# AITBC Advanced Agent Features Production Deployment Script
# Production-ready deployment with security, monitoring, and verification
set -euo pipefail
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
PURPLE='\033[0;35m'
CYAN='\033[0;36m'
NC='\033[0m' # No Color
print_status() {
echo -e "${BLUE}[INFO]${NC} $1"
}
print_success() {
echo -e "${GREEN}[SUCCESS]${NC} $1"
}
print_warning() {
echo -e "${YELLOW}[WARNING]${NC} $1"
}
print_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
print_critical() {
echo -e "${RED}[CRITICAL]${NC} $1"
}
print_production() {
echo -e "${PURPLE}[PRODUCTION]${NC} $1"
}
print_security() {
echo -e "${CYAN}[SECURITY]${NC} $1"
}
# Configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
ROOT_DIR="$(dirname "$SCRIPT_DIR")"
CONTRACTS_DIR="$ROOT_DIR/contracts"
SERVICES_DIR="$ROOT_DIR/apps/coordinator-api/src/app/services"
INFRA_DIR="$ROOT_DIR/infra"
# Network configuration
NETWORK=${1:-"mainnet"}
ENVIRONMENT=${2:-"production"}
SKIP_SECURITY=${3:-"false"}
SKIP_MONITORING=${4:-"false"}
echo "🚀 AITBC Advanced Agent Features Production Deployment"
echo "==================================================="
echo "Network: $NETWORK"
echo "Environment: $ENVIRONMENT"
echo "Skip Security: $SKIP_SECURITY"
echo "Skip Monitoring: $SKIP_MONITORING"
echo "Timestamp: $(date -Iseconds)"
echo ""
# Production deployment checks
check_production_readiness() {
print_production "Checking production readiness..."
# Check if this is mainnet deployment
if [[ "$NETWORK" != "mainnet" ]]; then
print_warning "Not deploying to mainnet - using testnet deployment"
return
fi
# Check for production environment variables
if [[ ! -f "$ROOT_DIR/.env.production" ]]; then
print_error "Production environment file not found: .env.production"
print_critical "Please create .env.production with production configuration"
exit 1
fi
# Check for required production tools
if ! command -v jq &> /dev/null; then
print_error "jq is required for production deployment"
exit 1
fi
# Check for security tools
if [[ "$SKIP_SECURITY" != "true" ]]; then
if ! command -v slither &> /dev/null; then
print_warning "slither not found - skipping security analysis"
fi
if ! command -v mythril &> /dev/null; then
print_warning "mythril not found - skipping mythril analysis"
fi
fi
print_success "Production readiness check completed"
}
# Security verification
verify_security() {
if [[ "$SKIP_SECURITY" == "true" ]]; then
print_security "Skipping security verification"
return
fi
print_security "Running security verification..."
cd "$CONTRACTS_DIR"
# Run Slither analysis
if command -v slither &> /dev/null; then
print_status "Running Slither security analysis..."
slither . --json slither-report.json --filter medium,high,critical || true
print_success "Slither analysis completed"
fi
# Run Mythril analysis
if command -v mythril &> /dev/null; then
print_status "Running Mythril security analysis..."
mythril analyze . --format json --output mythril-report.json || true
print_success "Mythril analysis completed"
fi
# Check for common security issues
print_status "Checking for common security issues..."
# Check for hardcoded addresses
if grep -r "0x[a-fA-F0-9]{40}" contracts/ --include="*.sol" | grep -v "0x0000000000000000000000000000000000000000"; then
print_warning "Found hardcoded addresses - review required"
fi
# Check for TODO comments
if grep -r "TODO\|FIXME\|XXX" contracts/ --include="*.sol"; then
print_warning "Found TODO comments - review required"
fi
print_success "Security verification completed"
}
# Deploy contracts to production
deploy_production_contracts() {
print_production "Deploying contracts to production..."
cd "$CONTRACTS_DIR"
# Load production environment
source "$ROOT_DIR/.env.production"
# Verify production wallet
if [[ -z "$PRODUCTION_PRIVATE_KEY" ]]; then
print_error "PRODUCTION_PRIVATE_KEY not set in environment"
exit 1
fi
# Verify gas price settings
if [[ -z "$PRODUCTION_GAS_PRICE" ]]; then
export PRODUCTION_GAS_PRICE="50000000000" # 50 Gwei
fi
# Verify gas limit settings
if [[ -z "$PRODUCTION_GAS_LIMIT" ]]; then
export PRODUCTION_GAS_LIMIT="8000000"
fi
print_status "Using gas price: $PRODUCTION_GAS_PRICE wei"
print_status "Using gas limit: $PRODUCTION_GAS_LIMIT"
# Compile contracts with optimization
print_status "Compiling contracts with production optimization..."
npx hardhat compile --optimizer --optimizer-runs 200
# Deploy contracts
print_status "Deploying advanced agent features contracts..."
# Create deployment report
local deployment_report="$ROOT_DIR/production-deployment-report-$(date +%Y%m%d-%H%M%S).json"
# Run deployment with verification
npx hardhat run scripts/deploy-advanced-contracts.js --network mainnet --verbose
# Verify contracts immediately
print_status "Verifying contracts on Etherscan..."
if [[ -n "$ETHERSCAN_API_KEY" ]]; then
npx hardhat run scripts/verify-advanced-contracts.js --network mainnet
else
print_warning "ETHERSCAN_API_KEY not set - skipping verification"
fi
# Generate deployment report
cat > "$deployment_report" << EOF
{
"deployment": {
"timestamp": "$(date -Iseconds)",
"network": "$NETWORK",
"environment": "$ENVIRONMENT",
"gas_price": "$PRODUCTION_GAS_PRICE",
"gas_limit": "$PRODUCTION_GAS_LIMIT",
"security_verified": "$([[ "$SKIP_SECURITY" != "true" ]] && echo "true" || echo "false")",
"monitoring_enabled": "$([[ "$SKIP_MONITORING" != "true" ]] && echo "true" || echo "false")"
},
"contracts": $(cat deployed-contracts-mainnet.json | jq '.contracts')
}
EOF
print_success "Production deployment completed"
print_status "Deployment report: $deployment_report"
}
# Setup production monitoring
setup_production_monitoring() {
if [[ "$SKIP_MONITORING" == "true" ]]; then
print_production "Skipping monitoring setup"
return
fi
print_production "Setting up production monitoring..."
# Create monitoring configuration
cat > "$ROOT_DIR/monitoring/advanced-features-monitoring.yml" << EOF
# Advanced Agent Features Production Monitoring
version: '3.8'
services:
# Cross-Chain Reputation Monitoring
reputation-monitor:
image: prom/prometheus:latest
container_name: reputation-monitor
ports:
- "9090:9090"
volumes:
- ./monitoring/prometheus.yml:/etc/prometheus/prometheus.yml
- ./monitoring/rules:/etc/prometheus/rules
command:
- '--config.file=/etc/prometheus/prometheus.yml'
- '--storage.tsdb.path=/prometheus'
- '--web.console.libraries=/etc/prometheus/console_libraries'
- '--web.console.templates=/etc/prometheus/consoles'
- '--storage.tsdb.retention.time=200h'
- '--web.enable-lifecycle'
restart: unless-stopped
# Agent Communication Monitoring
communication-monitor:
image: grafana/grafana:latest
container_name: communication-monitor
ports:
- "3001:3000"
environment:
- GF_SECURITY_ADMIN_PASSWORD=admin
- GF_USERS_ALLOW_SIGN_UP=false
volumes:
- ./monitoring/grafana/provisioning:/etc/grafana/provisioning
- ./monitoring/grafana/dashboards:/var/lib/grafana/dashboards
restart: unless-stopped
# Advanced Learning Monitoring
learning-monitor:
image: node:18-alpine
container_name: learning-monitor
working_dir: /app
volumes:
- ./monitoring/learning-monitor:/app
command: npm start
restart: unless-stopped
# Log Aggregation
log-aggregator:
image: fluent/fluent-bit:latest
container_name: log-aggregator
volumes:
- ./monitoring/fluent-bit.conf:/fluent-bit/etc/fluent-bit.conf
- /var/log:/var/log:ro
restart: unless-stopped
# Alert Manager
alert-manager:
image: prom/alertmanager:latest
container_name: alert-manager
ports:
- "9093:9093"
volumes:
- ./monitoring/alertmanager.yml:/etc/alertmanager/alertmanager.yml
restart: unless-stopped
EOF
# Create Prometheus configuration
mkdir -p "$ROOT_DIR/monitoring"
cat > "$ROOT_DIR/monitoring/prometheus.yml" << EOF
global:
scrape_interval: 15s
evaluation_interval: 15s
rule_files:
- "rules/*.yml"
alerting:
alertmanagers:
- static_configs:
- targets:
- alert-manager:9093
scrape_configs:
- job_name: 'cross-chain-reputation'
static_configs:
- targets: ['localhost:8000']
metrics_path: '/metrics'
scrape_interval: 10s
- job_name: 'agent-communication'
static_configs:
- targets: ['localhost:8001']
metrics_path: '/metrics'
scrape_interval: 10s
- job_name: 'advanced-learning'
static_configs:
- targets: ['localhost:8002']
metrics_path: '/metrics'
scrape_interval: 10s
- job_name: 'agent-collaboration'
static_configs:
- targets: ['localhost:8003']
metrics_path: '/metrics'
scrape_interval: 10s
EOF
# Create alert rules
mkdir -p "$ROOT_DIR/monitoring/rules"
cat > "$ROOT_DIR/monitoring/rules/advanced-features.yml" << EOF
groups:
- name: advanced-features
rules:
- alert: CrossChainReputationSyncFailure
expr: reputation_sync_success_rate < 0.95
for: 5m
labels:
severity: critical
annotations:
summary: "Cross-chain reputation sync failure"
description: "Cross-chain reputation sync success rate is below 95%"
- alert: AgentCommunicationFailure
expr: agent_communication_success_rate < 0.90
for: 5m
labels:
severity: warning
annotations:
summary: "Agent communication failure"
description: "Agent communication success rate is below 90%"
- alert: AdvancedLearningFailure
expr: learning_model_accuracy < 0.70
for: 10m
labels:
severity: warning
annotations:
summary: "Advanced learning model accuracy low"
description: "Learning model accuracy is below 70%"
- alert: HighGasUsage
expr: gas_usage_rate > 0.80
for: 5m
labels:
severity: warning
annotations:
summary: "High gas usage detected"
description: "Gas usage rate is above 80%"
EOF
print_success "Production monitoring setup completed"
}
# Setup production backup
setup_production_backup() {
print_production "Setting up production backup..."
# Create backup configuration
cat > "$ROOT_DIR/backup/backup-advanced-features.sh" << 'EOF'
#!/bin/bash
# Advanced Agent Features Production Backup Script
set -euo pipefail
BACKUP_DIR="/backup/advanced-features"
DATE=$(date +%Y%m%d_%H%M%S)
BACKUP_FILE="advanced-features-backup-$DATE.tar.gz"
echo "Starting backup of advanced agent features..."
# Create backup directory
mkdir -p "$BACKUP_DIR"
# Backup contracts
echo "Backing up contracts..."
tar -czf "$BACKUP_DIR/contracts-$DATE.tar.gz" contracts/
# Backup services
echo "Backing up services..."
tar -czf "$BACKUP_DIR/services-$DATE.tar.gz" apps/coordinator-api/src/app/services/
# Backup configuration
echo "Backing up configuration..."
tar -czf "$BACKUP_DIR/config-$DATE.tar.gz" .env.production monitoring/ backup/
# Backup deployment data
echo "Backing up deployment data..."
cp deployed-contracts-mainnet.json "$BACKUP_DIR/deployment-$DATE.json"
# Create full backup
echo "Creating full backup..."
tar -czf "$BACKUP_DIR/$BACKUP_FILE" \
contracts/ \
apps/coordinator-api/src/app/services/ \
.env.production \
monitoring/ \
backup/ \
deployed-contracts-mainnet.json
echo "Backup completed: $BACKUP_DIR/$BACKUP_FILE"
# Keep only last 7 days of backups
find "$BACKUP_DIR" -name "*.tar.gz" -mtime +7 -delete
echo "Backup cleanup completed"
EOF
chmod +x "$ROOT_DIR/backup/backup-advanced-features.sh"
# Create cron job for automatic backups
cat > "$ROOT_DIR/backup/backup-cron.txt" << EOF
# Advanced Agent Features Backup Cron Job
# Run daily at 2 AM UTC
0 2 * * * $ROOT_DIR/backup/backup-advanced-features.sh >> $ROOT_DIR/backup/backup.log 2>&1
EOF
print_success "Production backup setup completed"
}
# Setup production security
setup_production_security() {
if [[ "$SKIP_SECURITY" == "true" ]]; then
print_security "Skipping security setup"
return
fi
print_security "Setting up production security..."
# Create security configuration
cat > "$ROOT_DIR/security/production-security.yml" << EOF
# Advanced Agent Features Production Security Configuration
version: '3.8'
services:
# Security Monitoring
security-monitor:
image: aquasec/trivy:latest
container_name: security-monitor
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
- ./security/trivy-config:/root/.trivy
command: image --format json --output /reports/security-scan.json
restart: unless-stopped
# Intrusion Detection
intrusion-detection:
image: falco/falco:latest
container_name: intrusion-detection
privileged: true
volumes:
- /var/run/docker.sock:/host/var/run/docker.sock:ro
- /dev:/host/dev:ro
- /proc:/host/proc:ro
- /boot:/host/boot:ro
- /lib/modules:/host/lib/modules:ro
- /usr:/host/usr:ro
- /etc:/host/etc:ro
- ./security/falco-rules:/etc/falco/falco_rules
restart: unless-stopped
# Rate Limiting
rate-limiter:
image: nginx:alpine
container_name: rate-limiter
ports:
- "80:80"
- "443:443"
volumes:
- ./security/nginx-rate-limit.conf:/etc/nginx/nginx.conf
- ./security/ssl:/etc/nginx/ssl
restart: unless-stopped
# Web Application Firewall
waf:
image: coraza/waf:latest
container_name: waf
ports:
- "8080:8080"
volumes:
- ./security/coraza.conf:/etc/coraza/coraza.conf
- ./security/crs-rules:/etc/coraza/crs-rules
restart: unless-stopped
EOF
# Create security rules
mkdir -p "$ROOT_DIR/security"
cat > "$ROOT_DIR/security/falco-rules/falco_rules.yml" << EOF
# Advanced Agent Features Security Rules
- rule: Detect Unauthorized Contract Interactions
desc: Detect unauthorized interactions with advanced agent contracts
condition: >
evt.type=openat and
proc.name in (node, npx) and
fd.name contains "CrossChainReputation" and
not user.name in (root, aitbc)
output: >
Unauthorized contract interaction detected
(user=%user.name command=%proc.cmdline file=%fd.name)
priority: HIGH
tags: [contract, security, unauthorized]
- rule: Detect Unusual Gas Usage
desc: Detect unusual gas usage patterns
condition: >
evt.type=openat and
proc.name in (node, npx) and
evt.arg.gas > 1000000
output: >
High gas usage detected
(user=%user.name gas=%evt.arg.gas command=%proc.cmdline)
priority: MEDIUM
tags: [gas, security, unusual]
- rule: Detect Reputation Manipulation
desc: Detect potential reputation manipulation
condition: >
evt.type=openat and
proc.name in (node, npx) and
fd.name contains "updateReputation" and
evt.arg.amount > 1000
output: >
Potential reputation manipulation detected
(user=%user.name amount=%evt.arg.amount command=%proc.cmdline)
priority: HIGH
tags: [reputation, security, manipulation]
EOF
print_success "Production security setup completed"
}
# Run production tests
run_production_tests() {
print_production "Running production tests..."
cd "$ROOT_DIR"
# Run contract tests
print_status "Running contract tests..."
cd "$CONTRACTS_DIR"
npx hardhat test --network mainnet test/CrossChainReputation.test.js || true
npx hardhat test --network mainnet test/AgentCommunication.test.js || true
npx hardhat test --network mainnet test/AgentCollaboration.test.js || true
npx hardhat test --network mainnet test/AgentLearning.test.js || true
# Run service tests
print_status "Running service tests..."
cd "$ROOT_DIR/apps/coordinator-api"
python -m pytest tests/test_cross_chain_reproduction.py -v --network mainnet || true
python -m pytest tests/test_agent_communication.py -v --network mainnet || true
python -m pytest tests/test_advanced_learning.py -v --network mainnet || true
# Run integration tests
print_status "Running integration tests..."
python -m pytest tests/test_production_integration.py -v --network mainnet || true
print_success "Production tests completed"
}
# Generate production report
generate_production_report() {
print_production "Generating production deployment report..."
local report_file="$ROOT_DIR/production-deployment-report-$(date +%Y%m%d-%H%M%S).json"
cat > "$report_file" << EOF
{
"production_deployment": {
"timestamp": "$(date -Iseconds)",
"network": "$NETWORK",
"environment": "$ENVIRONMENT",
"security_verified": "$([[ "$SKIP_SECURITY" != "true" ]] && echo "true" || echo "false")",
"monitoring_enabled": "$([[ "$SKIP_MONITORING" != "true" ]] && echo "true" || echo "false")",
"tests_passed": "true",
"backup_enabled": "true"
},
"contracts": {
"CrossChainReputation": "deployed-contracts-mainnet.json",
"AgentCommunication": "deployed-contracts-mainnet.json",
"AgentCollaboration": "deployed-contracts-mainnet.json",
"AgentLearning": "deployed-contracts-mainnet.json",
"AgentMarketplaceV2": "deployed-contracts-mainnet.json",
"ReputationNFT": "deployed-contracts-mainnet.json"
},
"services": {
"cross_chain_reputation": "https://api.aitbc.dev/advanced/reputation",
"agent_communication": "https://api.aitbc.dev/advanced/communication",
"agent_collaboration": "https://api.aitbc.dev/advanced/collaboration",
"advanced_learning": "https://api.aitbc.dev/advanced/learning",
"agent_autonomy": "https://api.aitbc.dev/advanced/autonomy",
"marketplace_v2": "https://api.aitbc.dev/advanced/marketplace"
},
"monitoring": {
"prometheus": "http://monitoring.aitbc.dev:9090",
"grafana": "http://monitoring.aitbc.dev:3001",
"alertmanager": "http://monitoring.aitbc.dev:9093"
},
"security": {
"slither_report": "$ROOT_DIR/slither-report.json",
"mythril_report": "$ROOT_DIR/mythril-report.json",
"falco_rules": "$ROOT_DIR/security/falco-rules/",
"rate_limiting": "enabled",
"waf": "enabled"
},
"backup": {
"backup_script": "$ROOT_DIR/backup/backup-advanced-features.sh",
"backup_schedule": "daily at 2 AM UTC",
"retention": "7 days"
},
"next_steps": [
"1. Monitor contract performance and gas usage",
"2. Review security alerts and logs",
"3. Verify cross-chain reputation synchronization",
"4. Test agent communication across networks",
"5. Monitor advanced learning model performance",
"6. Review backup and recovery procedures",
"7. Scale monitoring based on usage patterns"
],
"emergency_contacts": [
"DevOps Team: devops@aitbc.dev",
"Security Team: security@aitbc.dev",
"Smart Contract Team: contracts@aitbc.dev"
]
}
EOF
print_success "Production deployment report saved to $report_file"
}
# Main execution
main() {
print_critical "🚀 STARTING PRODUCTION DEPLOYMENT - ADVANCED AGENT FEATURES"
# Run production deployment steps
check_production_readiness
verify_security
deploy_production_contracts
setup_production_monitoring
setup_production_backup
setup_production_security
run_production_tests
generate_production_report
print_success "🎉 PRODUCTION DEPLOYMENT COMPLETED!"
echo ""
echo "📊 Production Deployment Summary:"
echo " Network: $NETWORK"
echo " Environment: $ENVIRONMENT"
echo " Security: $([[ "$SKIP_SECURITY" != "true" ]] && echo "Verified" || echo "Skipped")"
echo " Monitoring: $([[ "$SKIP_MONITORING" != "true" ]] && echo "Enabled" || echo "Skipped")"
echo " Backup: Enabled"
echo " Tests: Passed"
echo ""
echo "🔧 Production Services:"
echo " Cross-Chain Reputation: https://api.aitbc.dev/advanced/reputation"
echo " Agent Communication: https://api.aitbc.dev/advanced/communication"
echo " Advanced Learning: https://api.aitbc.dev/advanced/learning"
echo " Agent Collaboration: https://api.aitbc.dev/advanced/collaboration"
echo " Agent Autonomy: https://api.aitbc.dev/advanced/autonomy"
echo " Marketplace V2: https://api.aitbc.dev/advanced/marketplace"
echo ""
echo "📊 Monitoring Dashboard:"
echo " Prometheus: http://monitoring.aitbc.dev:9090"
echo " Grafana: http://monitoring.aitbc.dev:3001"
echo " Alert Manager: http://monitoring.aitbc.dev:9093"
echo ""
echo "🔧 Next Steps:"
echo " 1. Verify contract addresses on Etherscan"
echo " 2. Test cross-chain reputation synchronization"
echo " 3. Validate agent communication security"
echo " 4. Monitor advanced learning performance"
echo " 5. Review security alerts and logs"
echo " 6. Test backup and recovery procedures"
echo " 7. Scale monitoring based on usage"
echo ""
echo "⚠️ Production Notes:"
echo " - All contracts deployed to mainnet with verification"
echo " - Security monitoring and alerts are active"
echo " - Automated backups are scheduled daily"
echo " - Rate limiting and WAF are enabled"
echo " - Gas optimization is active"
echo " - Cross-chain synchronization is monitored"
echo ""
echo "🎯 Production Status: READY FOR LIVE TRAFFIC"
}
# Handle script interruption
trap 'print_critical "Production deployment interrupted - please check partial deployment"; exit 1' INT TERM
# Run main function
main "$@"

View File

@@ -1,586 +0,0 @@
#!/usr/bin/env bash
# AITBC Platform Services Deployment Script for aitbc and aitbc1 Servers
# Deploys backend services and frontend to both production servers
set -euo pipefail
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
PURPLE='\033[0;35m'
CYAN='\033[0;36m'
NC='\033[0m' # No Color
print_status() {
echo -e "${BLUE}[INFO]${NC} $1"
}
print_success() {
echo -e "${GREEN}[SUCCESS]${NC} $1"
}
print_warning() {
echo -e "${YELLOW}[WARNING]${NC} $1"
}
print_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
print_critical() {
echo -e "${RED}[CRITICAL]${NC} $1"
}
print_server() {
echo -e "${PURPLE}[SERVER]${NC} $1"
}
print_deploy() {
echo -e "${CYAN}[DEPLOY]${NC} $1"
}
# Configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
ROOT_DIR="$(dirname "$SCRIPT_DIR")"
SERVICES_DIR="$ROOT_DIR/apps/coordinator-api/src/app/services"
FRONTEND_DIR="$ROOT_DIR/apps/marketplace-web"
# Server configuration
AITBC_SERVER="aitbc-cascade"
AITBC1_SERVER="aitbc1-cascade"
AITBC_HOST="aitbc.bubuit.net"
AITBC1_HOST="aitbc1.bubuit.net"
echo "🚀 AITBC Platform Services Deployment to aitbc and aitbc1 Servers"
echo "=============================================================="
echo "Timestamp: $(date -Iseconds)"
echo ""
# Pre-deployment checks
check_prerequisites() {
print_status "Checking prerequisites..."
# Check if SSH keys are available
if [[ ! -f "$HOME/.ssh/id_rsa" ]] && [[ ! -f "$HOME/.ssh/id_ed25519" ]]; then
print_error "SSH keys not found. Please generate SSH keys first."
exit 1
fi
# Check if we can connect to servers
print_status "Testing SSH connections..."
if ! ssh -o ConnectTimeout=10 -o BatchMode=yes $AITBC_SERVER "echo 'Connection successful'" 2>/dev/null; then
print_error "Cannot connect to $AITBC_SERVER"
exit 1
fi
if ! ssh -o ConnectTimeout=10 -o BatchMode=yes $AITBC1_SERVER "echo 'Connection successful'" 2>/dev/null; then
print_error "Cannot connect to $AITBC1_SERVER"
exit 1
fi
# Check if required directories exist
if [[ ! -d "$SERVICES_DIR" ]]; then
print_error "Services directory not found: $SERVICES_DIR"
exit 1
fi
if [[ ! -d "$FRONTEND_DIR" ]]; then
print_error "Frontend directory not found: $FRONTEND_DIR"
exit 1
fi
print_success "Prerequisites check completed"
}
# Deploy backend services
deploy_services() {
print_status "Deploying backend services..."
# Deploy to aitbc server
print_server "Deploying services to aitbc server..."
# Copy services to aitbc
scp -r "$SERVICES_DIR" $AITBC_SERVER:/tmp/
# Install dependencies and setup services on aitbc
ssh $AITBC_SERVER "
# Create service directory
sudo mkdir -p /opt/aitbc/services
# Copy services
sudo cp -r /tmp/services/* /opt/aitbc/services/
# Install Python dependencies
cd /opt/aitbc/services
python3 -m pip install -r requirements.txt 2>/dev/null || true
# Create systemd services
sudo tee /etc/systemd/system/aitbc-cross-chain-reputation.service > /dev/null << 'EOF'
[Unit]
Description=AITBC Cross Chain Reputation Service
After=network.target
[Service]
Type=simple
User=aitbc
WorkingDirectory=/opt/aitbc/services
ExecStart=/usr/bin/python3 -m cross_chain_reputation
Restart=always
RestartSec=10
[Install]
WantedBy=multi-user.target
EOF
sudo tee /etc/systemd/system/aitbc-agent-communication.service > /dev/null << 'EOF'
[Unit]
Description=AITBC Agent Communication Service
After=network.target
[Service]
Type=simple
User=aitbc
WorkingDirectory=/opt/aitbc/services
ExecStart=/usr/bin/python3 -m agent_communication
Restart=always
RestartSec=10
[Install]
WantedBy=multi-user.target
EOF
sudo tee /etc/systemd/system/aitbc-advanced-learning.service > /dev/null << 'EOF'
[Unit]
Description=AITBC Advanced Learning Service
After=network.target
[Service]
Type=simple
User=aitbc
WorkingDirectory=/opt/aitbc/services
ExecStart=/usr/bin/python3 -m advanced_learning
Restart=always
RestartSec=10
[Install]
WantedBy=multi-user.target
EOF
# Reload systemd and start services
sudo systemctl daemon-reload
sudo systemctl enable aitbc-cross-chain-reputation
sudo systemctl enable aitbc-agent-communication
sudo systemctl enable aitbc-advanced-learning
sudo systemctl start aitbc-cross-chain-reputation
sudo systemctl start aitbc-agent-communication
sudo systemctl start aitbc-advanced-learning
echo 'Services deployed and started on aitbc'
"
# Deploy to aitbc1 server
print_server "Deploying services to aitbc1 server..."
# Copy services to aitbc1
scp -r "$SERVICES_DIR" $AITBC1_SERVER:/tmp/
# Install dependencies and setup services on aitbc1
ssh $AITBC1_SERVER "
# Create service directory
sudo mkdir -p /opt/aitbc/services
# Copy services
sudo cp -r /tmp/services/* /opt/aitbc/services/
# Install Python dependencies
cd /opt/aitbc/services
python3 -m pip install -r requirements.txt 2>/dev/null || true
# Create systemd services
sudo tee /etc/systemd/system/aitbc-cross-chain-reputation.service > /dev/null << 'EOF'
[Unit]
Description=AITBC Cross Chain Reputation Service
After=network.target
[Service]
Type=simple
User=aitbc
WorkingDirectory=/opt/aitbc/services
ExecStart=/usr/bin/python3 -m cross_chain_reputation
Restart=always
RestartSec=10
[Install]
WantedBy=multi-user.target
EOF
sudo tee /etc/systemd/system/aitbc-agent-communication.service > /dev/null << 'EOF'
[Unit]
Description=AITBC Agent Communication Service
After=network.target
[Service]
Type=simple
User=aitbc
WorkingDirectory=/opt/aitbc/services
ExecStart=/usr/bin/python3 -m agent_communication
Restart=always
RestartSec=10
[Install]
WantedBy=multi-user.target
EOF
sudo tee /etc/systemd/system/aitbc-advanced-learning.service > /dev/null << 'EOF'
[Unit]
Description=AITBC Advanced Learning Service
After=network.target
[Service]
Type=simple
User=aitbc
WorkingDirectory=/opt/aitbc/services
ExecStart=/usr/bin/python3 -m advanced_learning
Restart=always
RestartSec=10
[Install]
WantedBy=multi-user.target
EOF
# Reload systemd and start services
sudo systemctl daemon-reload
sudo systemctl enable aitbc-cross-chain-reputation
sudo systemctl enable aitbc-agent-communication
sudo systemctl enable aitbc-advanced-learning
sudo systemctl start aitbc-cross-chain-reputation
sudo systemctl start aitbc-agent-communication
sudo systemctl start aitbc-advanced-learning
echo 'Services deployed and started on aitbc1'
"
print_success "Backend services deployed to both servers"
}
# Deploy frontend
deploy_frontend() {
print_status "Building and deploying frontend..."
cd "$FRONTEND_DIR"
# Build frontend
print_status "Building frontend application..."
npm run build
# Deploy to aitbc server
print_server "Deploying frontend to aitbc server..."
# Copy built frontend to aitbc
scp -r dist/* $AITBC_SERVER:/tmp/frontend/
ssh $AITBC_SERVER "
# Backup existing frontend
sudo cp -r /var/www/aitbc.bubuit.net /var/www/aitbc.bubuit.net.backup 2>/dev/null || true
# Deploy new frontend
sudo rm -rf /var/www/aitbc.bubuit.net/*
sudo cp -r /tmp/frontend/* /var/www/aitbc.bubuit.net/
# Set permissions
sudo chown -R www-data:www-data /var/www/aitbc.bubuit.net/
sudo chmod -R 755 /var/www/aitbc.bubuit.net/
echo 'Frontend deployed to aitbc'
"
# Deploy to aitbc1 server
print_server "Deploying frontend to aitbc1 server..."
# Copy built frontend to aitbc1
scp -r dist/* $AITBC1_SERVER:/tmp/frontend/
ssh $AITBC1_SERVER "
# Backup existing frontend
sudo cp -r /var/www/aitbc.bubuit.net /var/www/aitbc.bubuit.net.backup 2>/dev/null || true
# Deploy new frontend
sudo rm -rf /var/www/aitbc.bubuit.net/*
sudo cp -r /tmp/frontend/* /var/www/aitbc.bubuit.net/
# Set permissions
sudo chown -R www-data:www-data /var/www/aitbc.bubuit.net/
sudo chmod -R 755 /var/www/aitbc.bubuit.net/
echo 'Frontend deployed to aitbc1'
"
print_success "Frontend deployed to both servers"
}
# Deploy configuration files
deploy_configuration() {
print_status "Deploying configuration files..."
# Create nginx configuration for aitbc
print_server "Deploying nginx configuration to aitbc..."
ssh $AITBC_SERVER "
sudo tee /etc/nginx/sites-available/aitbc-advanced.conf > /dev/null << 'EOF'
server {
listen 80;
server_name aitbc.bubuit.net;
root /var/www/aitbc.bubuit.net;
index index.html;
# Security headers
add_header X-Frame-Options DENY;
add_header X-Content-Type-Options nosniff;
add_header X-XSS-Protection \"1; mode=block\";
add_header Strict-Transport-Security \"max-age=31536000; includeSubDomains\" always;
# Gzip compression
gzip on;
gzip_vary on;
gzip_min_length 1024;
gzip_types text/plain text/css text/xml text/javascript application/javascript application/xml+rss application/json;
# API routes
location /api/ {
proxy_pass http://localhost:8000/;
proxy_set_header Host \$host;
proxy_set_header X-Real-IP \$remote_addr;
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto \$scheme;
}
# Advanced features API
location /api/v1/advanced/ {
proxy_pass http://localhost:8001/;
proxy_set_header Host \$host;
proxy_set_header X-Real-IP \$remote_addr;
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto \$scheme;
}
# Static files
location / {
try_files \$uri \$uri/ /index.html;
expires 1y;
add_header Cache-Control \"public, immutable\";
}
# Health check
location /health {
access_log off;
return 200 \"healthy\";
add_header Content-Type text/plain;
}
}
EOF
# Enable site
sudo ln -sf /etc/nginx/sites-available/aitbc-advanced.conf /etc/nginx/sites-enabled/
sudo nginx -t
sudo systemctl reload nginx
echo 'Nginx configuration deployed to aitbc'
"
# Create nginx configuration for aitbc1
print_server "Deploying nginx configuration to aitbc1..."
ssh $AITBC1_SERVER "
sudo tee /etc/nginx/sites-available/aitbc1-advanced.conf > /dev/null << 'EOF'
server {
listen 80;
server_name aitbc1.bubuit.net;
root /var/www/aitbc.bubuit.net;
index index.html;
# Security headers
add_header X-Frame-Options DENY;
add_header X-Content-Type-Options nosniff;
add_header X-XSS-Protection \"1; mode=block\";
add_header Strict-Transport-Security \"max-age=31536000; includeSubDomains\" always;
# Gzip compression
gzip on;
gzip_vary on;
gzip_min_length 1024;
gzip_types text/plain text/css text/xml text/javascript application/javascript application/xml+rss application/json;
# API routes
location /api/ {
proxy_pass http://localhost:8000/;
proxy_set_header Host \$host;
proxy_set_header X-Real-IP \$remote_addr;
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto \$scheme;
}
# Advanced features API
location /api/v1/advanced/ {
proxy_pass http://localhost:8001/;
proxy_set_header Host \$host;
proxy_set_header X-Real-IP \$remote_addr;
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto \$scheme;
}
# Static files
location / {
try_files \$uri \$uri/ /index.html;
expires 1y;
add_header Cache-Control \"public, immutable\";
}
# Health check
location /health {
access_log off;
return 200 \"healthy\";
add_header Content-Type text/plain;
}
}
EOF
# Enable site
sudo ln -sf /etc/nginx/sites-available/aitbc1-advanced.conf /etc/nginx/sites-enabled/
sudo nginx -t
sudo systemctl reload nginx
echo 'Nginx configuration deployed to aitbc1'
"
print_success "Configuration files deployed to both servers"
}
# Verify deployment
verify_deployment() {
print_status "Verifying deployment..."
# Verify aitbc server
print_server "Verifying aitbc server deployment..."
ssh $AITBC_SERVER "
echo '=== aitbc Server Status ==='
# Check services
echo 'Services:'
sudo systemctl is-active aitbc-cross-chain-reputation || echo 'cross-chain-reputation: INACTIVE'
sudo systemctl is-active aitbc-agent-communication || echo 'agent-communication: INACTIVE'
sudo systemctl is-active aitbc-advanced-learning || echo 'advanced-learning: INACTIVE'
# Check nginx
echo 'Nginx:'
sudo systemctl is-active nginx || echo 'nginx: INACTIVE'
sudo nginx -t || echo 'nginx config: ERROR'
# Check web server
echo 'Web server:'
curl -s http://localhost/health || echo 'health check: FAILED'
echo 'aitbc verification completed'
"
# Verify aitbc1 server
print_server "Verifying aitbc1 server deployment..."
ssh $AITBC1_SERVER "
echo '=== aitbc1 Server Status ==='
# Check services
echo 'Services:'
sudo systemctl is-active aitbc-cross-chain-reputation || echo 'cross-chain-reputation: INACTIVE'
sudo systemctl is-active aitbc-agent-communication || echo 'agent-communication: INACTIVE'
sudo systemctl is-active aitbc-advanced-learning || echo 'advanced-learning: INACTIVE'
# Check nginx
echo 'Nginx:'
sudo systemctl is-active nginx || echo 'nginx: INACTIVE'
sudo nginx -t || echo 'nginx config: ERROR'
# Check web server
echo 'Web server:'
curl -s http://localhost/health || echo 'health check: FAILED'
echo 'aitbc1 verification completed'
"
print_success "Deployment verification completed"
}
# Test external connectivity
test_connectivity() {
print_status "Testing external connectivity..."
# Test aitbc server
print_server "Testing aitbc external connectivity..."
if curl -s "http://$AITBC_HOST/health" | grep -q "healthy"; then
print_success "aitbc server is accessible externally"
else
print_warning "aitbc server external connectivity issue"
fi
# Test aitbc1 server
print_server "Testing aitbc1 external connectivity..."
if curl -s "http://$AITBC1_HOST/health" | grep -q "healthy"; then
print_success "aitbc1 server is accessible externally"
else
print_warning "aitbc1 server external connectivity issue"
fi
}
# Main execution
main() {
print_critical "🚀 STARTING AITBC PLATFORM SERVICES DEPLOYMENT TO aitbc AND aitbc1 SERVERS"
# Run deployment steps
check_prerequisites
deploy_services
deploy_frontend
deploy_configuration
verify_deployment
test_connectivity
print_success "🎉 AITBC PLATFORM SERVICES DEPLOYMENT COMPLETED!"
echo ""
echo "📊 Deployment Summary:"
echo " Servers: aitbc, aitbc1"
echo " Services: Deployed"
echo " Frontend: Deployed"
echo " Configuration: Deployed"
echo " Verification: Completed"
echo ""
echo "🌐 Platform URLs:"
echo " aitbc Frontend: http://$AITBC_HOST/"
echo " aitbc API: http://$AITBC_HOST/api/"
echo " aitbc Advanced: http://$AITBC_HOST/api/v1/advanced/"
echo " aitbc1 Frontend: http://$AITBC1_HOST/"
echo " aitbc1 API: http://$AITBC1_HOST/api/"
echo " aitbc1 Advanced: http://$AITBC1_HOST/api/v1/advanced/"
echo ""
echo "🔧 Next Steps:"
echo " 1. Monitor service performance on both servers"
echo " 2. Test cross-server functionality"
echo " 3. Verify load balancing if configured"
echo " 4. Monitor system resources and scaling"
echo " 5. Set up monitoring and alerting"
echo " 6. Test failover scenarios"
echo ""
echo "⚠️ Important Notes:"
echo " - Both servers are running identical configurations"
echo " - Services are managed by systemd"
echo " - Nginx is configured for reverse proxy"
echo " - Health checks are available at /health"
echo " - API endpoints are available at /api/ and /api/v1/advanced/"
echo ""
echo "🎯 Deployment Status: SUCCESS - SERVICES LIVE ON BOTH SERVERS!"
}
# Handle script interruption
trap 'print_critical "Deployment interrupted - please check partial deployment"; exit 1' INT TERM
# Run main function
main "$@"

View File

@@ -1,774 +0,0 @@
#!/usr/bin/env bash
# AITBC Platform Deployment Script for aitbc and aitbc1 Servers
# Deploys the complete platform to both production servers
set -euo pipefail
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
PURPLE='\033[0;35m'
CYAN='\033[0;36m'
NC='\033[0m' # No Color
print_status() {
echo -e "${BLUE}[INFO]${NC} $1"
}
print_success() {
echo -e "${GREEN}[SUCCESS]${NC} $1"
}
print_warning() {
echo -e "${YELLOW}[WARNING]${NC} $1"
}
print_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
print_critical() {
echo -e "${RED}[CRITICAL]${NC} $1"
}
print_server() {
echo -e "${PURPLE}[SERVER]${NC} $1"
}
print_deploy() {
echo -e "${CYAN}[DEPLOY]${NC} $1"
}
# Configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
ROOT_DIR="$(dirname "$SCRIPT_DIR")"
CONTRACTS_DIR="$ROOT_DIR/contracts"
SERVICES_DIR="$ROOT_DIR/apps/coordinator-api/src/app/services"
FRONTEND_DIR="$ROOT_DIR/apps/marketplace-web"
INFRA_DIR="$ROOT_DIR/infra"
# Server configuration
AITBC_SERVER="aitbc-cascade"
AITBC1_SERVER="aitbc1-cascade"
AITBC_HOST="aitbc.bubuit.net"
AITBC1_HOST="aitbc1.bubuit.net"
AITBC_PORT="22"
AITBC1_PORT="22"
# Deployment configuration
DEPLOY_CONTRACTS=${1:-"true"}
DEPLOY_SERVICES=${2:-"true"}
DEPLOY_FRONTEND=${3:-"true"}
SKIP_VERIFICATION=${4:-"false"}
BACKUP_BEFORE_DEPLOY=${5:-"true"}
echo "🚀 AITBC Platform Deployment to aitbc and aitbc1 Servers"
echo "======================================================="
echo "Deploy Contracts: $DEPLOY_CONTRACTS"
echo "Deploy Services: $DEPLOY_SERVICES"
echo "Deploy Frontend: $DEPLOY_FRONTEND"
echo "Skip Verification: $SKIP_VERIFICATION"
echo "Backup Before Deploy: $BACKUP_BEFORE_DEPLOY"
echo "Timestamp: $(date -Iseconds)"
echo ""
# Pre-deployment checks
check_prerequisites() {
print_status "Checking prerequisites..."
# Check if SSH keys are available
if [[ ! -f "$HOME/.ssh/id_rsa" ]] && [[ ! -f "$HOME/.ssh/id_ed25519" ]]; then
print_error "SSH keys not found. Please generate SSH keys first."
exit 1
fi
# Check if we can connect to servers
print_status "Testing SSH connections..."
if ! ssh -o ConnectTimeout=10 -o BatchMode=yes $AITBC_SERVER "echo 'Connection successful'" 2>/dev/null; then
print_error "Cannot connect to $AITBC_SERVER"
exit 1
fi
if ! ssh -o ConnectTimeout=10 -o BatchMode=yes $AITBC1_SERVER "echo 'Connection successful'" 2>/dev/null; then
print_error "Cannot connect to $AITBC1_SERVER"
exit 1
fi
# Check if required directories exist
if [[ ! -d "$CONTRACTS_DIR" ]]; then
print_error "Contracts directory not found: $CONTRACTS_DIR"
exit 1
fi
if [[ ! -d "$SERVICES_DIR" ]]; then
print_error "Services directory not found: $SERVICES_DIR"
exit 1
fi
if [[ ! -d "$FRONTEND_DIR" ]]; then
print_error "Frontend directory not found: $FRONTEND_DIR"
exit 1
fi
print_success "Prerequisites check completed"
}
# Backup existing deployment
backup_deployment() {
if [[ "$BACKUP_BEFORE_DEPLOY" != "true" ]]; then
print_status "Skipping backup (disabled)"
return
fi
print_status "Creating backup of existing deployment..."
local backup_dir="/tmp/aitbc-backup-$(date +%Y%m%d-%H%M%S)"
# Backup aitbc server
print_server "Backing up aitbc server..."
ssh $AITBC_SERVER "
mkdir -p $backup_dir
sudo cp -r /var/www/aitbc.bubuit.net $backup_dir/ 2>/dev/null || true
sudo cp -r /var/www/html $backup_dir/ 2>/dev/null || true
sudo cp -r /etc/nginx/sites-enabled/ $backup_dir/ 2>/dev/null || true
sudo cp -r /etc/systemd/system/aitbc* $backup_dir/ 2>/dev/null || true
echo 'aitbc backup completed'
"
# Backup aitbc1 server
print_server "Backing up aitbc1 server..."
ssh $AITBC1_SERVER "
mkdir -p $backup_dir
sudo cp -r /var/www/aitbc.bubuit.net $backup_dir/ 2>/dev/null || true
sudo cp -r /var/www/html $backup_dir/ 2>/dev/null || true
sudo cp -r /etc/nginx/sites-enabled/ $backup_dir/ 2>/dev/null || true
sudo cp -r /etc/systemd/system/aitbc* $backup_dir/ 2>/dev/null || true
echo 'aitbc1 backup completed'
"
print_success "Backup completed: $backup_dir"
}
# Deploy smart contracts
deploy_contracts() {
if [[ "$DEPLOY_CONTRACTS" != "true" ]]; then
print_status "Skipping contract deployment (disabled)"
return
fi
print_status "Deploying smart contracts..."
cd "$CONTRACTS_DIR"
# Check if contracts are already deployed
if [[ -f "deployed-contracts-mainnet.json" ]]; then
print_warning "Contracts already deployed. Skipping deployment."
return
fi
# Compile contracts
print_status "Compiling contracts..."
npx hardhat compile
# Deploy to mainnet
print_status "Deploying contracts to mainnet..."
npx hardhat run scripts/deploy-advanced-contracts.js --network mainnet
# Verify contracts
if [[ "$SKIP_VERIFICATION" != "true" ]]; then
print_status "Verifying contracts..."
npx hardhat run scripts/verify-advanced-contracts.js --network mainnet
fi
print_success "Smart contracts deployed and verified"
}
# Deploy backend services
deploy_services() {
if [[ "$DEPLOY_SERVICES" != "true" ]]; then
print_status "Skipping service deployment (disabled)"
return
fi
print_status "Deploying backend services..."
# Deploy to aitbc server
print_server "Deploying services to aitbc server..."
# Copy services to aitbc
scp -r "$SERVICES_DIR" $AITBC_SERVER:/tmp/
# Install dependencies and setup services on aitbc
ssh $AITBC_SERVER "
# Create service directory
sudo mkdir -p /opt/aitbc/services
# Copy services
sudo cp -r /tmp/services/* /opt/aitbc/services/
# Install Python dependencies
cd /opt/aitbc/services
python3 -m pip install -r requirements.txt 2>/dev/null || true
# Create systemd services
sudo tee /etc/systemd/system/aitbc-cross-chain-reputation.service > /dev/null << 'EOF'
[Unit]
Description=AITBC Cross Chain Reputation Service
After=network.target
[Service]
Type=simple
User=aitbc
WorkingDirectory=/opt/aitbc/services
ExecStart=/usr/bin/python3 -m cross_chain_reputation
Restart=always
RestartSec=10
[Install]
WantedBy=multi-user.target
EOF
sudo tee /etc/systemd/system/aitbc-agent-communication.service > /dev/null << 'EOF'
[Unit]
Description=AITBC Agent Communication Service
After=network.target
[Service]
Type=simple
User=aitbc
WorkingDirectory=/opt/aitbc/services
ExecStart=/usr/bin/python3 -m agent_communication
Restart=always
RestartSec=10
[Install]
WantedBy=multi-user.target
EOF
sudo tee /etc/systemd/system/aitbc-advanced-learning.service > /dev/null << 'EOF'
[Unit]
Description=AITBC Advanced Learning Service
After=network.target
[Service]
Type=simple
User=aitbc
WorkingDirectory=/opt/aitbc/services
ExecStart=/usr/bin/python3 -m advanced_learning
Restart=always
RestartSec=10
[Install]
WantedBy=multi-user.target
EOF
# Reload systemd and start services
sudo systemctl daemon-reload
sudo systemctl enable aitbc-cross-chain-reputation
sudo systemctl enable aitbc-agent-communication
sudo systemctl enable aitbc-advanced-learning
sudo systemctl start aitbc-cross-chain-reputation
sudo systemctl start aitbc-agent-communication
sudo systemctl start aitbc-advanced-learning
echo 'Services deployed and started on aitbc'
"
# Deploy to aitbc1 server
print_server "Deploying services to aitbc1 server..."
# Copy services to aitbc1
scp -r "$SERVICES_DIR" $AITBC1_SERVER:/tmp/
# Install dependencies and setup services on aitbc1
ssh $AITBC1_SERVER "
# Create service directory
sudo mkdir -p /opt/aitbc/services
# Copy services
sudo cp -r /tmp/services/* /opt/aitbc/services/
# Install Python dependencies
cd /opt/aitbc/services
python3 -m pip install -r requirements.txt 2>/dev/null || true
# Create systemd services
sudo tee /etc/systemd/system/aitbc-cross-chain-reputation.service > /dev/null << 'EOF'
[Unit]
Description=AITBC Cross Chain Reputation Service
After=network.target
[Service]
Type=simple
User=aitbc
WorkingDirectory=/opt/aitbc/services
ExecStart=/usr/bin/python3 -m cross_chain_reputation
Restart=always
RestartSec=10
[Install]
WantedBy=multi-user.target
EOF
sudo tee /etc/systemd/system/aitbc-agent-communication.service > /dev/null << 'EOF'
[Unit]
Description=AITBC Agent Communication Service
After=network.target
[Service]
Type=simple
User=aitbc
WorkingDirectory=/opt/aitbc/services
ExecStart=/usr/bin/python3 -m agent_communication
Restart=always
RestartSec=10
[Install]
WantedBy=multi-user.target
EOF
sudo tee /etc/systemd/system/aitbc-advanced-learning.service > /dev/null << 'EOF'
[Unit]
Description=AITBC Advanced Learning Service
After=network.target
[Service]
Type=simple
User=aitbc
WorkingDirectory=/opt/aitbc/services
ExecStart=/usr/bin/python3 -m advanced_learning
Restart=always
RestartSec=10
[Install]
WantedBy=multi-user.target
EOF
# Reload systemd and start services
sudo systemctl daemon-reload
sudo systemctl enable aitbc-cross-chain-reputation
sudo systemctl enable aitbc-agent-communication
sudo systemctl enable aitbc-advanced-learning
sudo systemctl start aitbc-cross-chain-reputation
sudo systemctl start aitbc-agent-communication
sudo systemctl start aitbc-advanced-learning
echo 'Services deployed and started on aitbc1'
"
print_success "Backend services deployed to both servers"
}
# Deploy frontend
deploy_frontend() {
if [[ "$DEPLOY_FRONTEND" != "true" ]]; then
print_status "Skipping frontend deployment (disabled)"
return
fi
print_status "Building and deploying frontend..."
cd "$FRONTEND_DIR"
# Build frontend
print_status "Building frontend application..."
npm run build
# Deploy to aitbc server
print_server "Deploying frontend to aitbc server..."
# Copy built frontend to aitbc
scp -r build/* $AITBC_SERVER:/tmp/frontend/
ssh $AITBC_SERVER "
# Backup existing frontend
sudo cp -r /var/www/aitbc.bubuit.net /var/www/aitbc.bubuit.net.backup 2>/dev/null || true
# Deploy new frontend
sudo rm -rf /var/www/aitbc.bubuit.net/*
sudo cp -r /tmp/frontend/* /var/www/aitbc.bubuit.net/
# Set permissions
sudo chown -R www-data:www-data /var/www/aitbc.bubuit.net/
sudo chmod -R 755 /var/www/aitbc.bubuit.net/
echo 'Frontend deployed to aitbc'
"
# Deploy to aitbc1 server
print_server "Deploying frontend to aitbc1 server..."
# Copy built frontend to aitbc1
scp -r build/* $AITBC1_SERVER:/tmp/frontend/
ssh $AITBC1_SERVER "
# Backup existing frontend
sudo cp -r /var/www/aitbc.bubuit.net /var/www/aitbc.bubuit.net.backup 2>/dev/null || true
# Deploy new frontend
sudo rm -rf /var/www/aitbc.bubuit.net/*
sudo cp -r /tmp/frontend/* /var/www/aitbc.bubuit.net/
# Set permissions
sudo chown -R www-data:www-data /var/www/aitbc.bubuit.net/
sudo chmod -R 755 /var/www/aitbc.bubuit.net/
echo 'Frontend deployed to aitbc1'
"
print_success "Frontend deployed to both servers"
}
# Deploy configuration files
deploy_configuration() {
print_status "Deploying configuration files..."
# Create nginx configuration for aitbc
print_server "Deploying nginx configuration to aitbc..."
ssh $AITBC_SERVER "
sudo tee /etc/nginx/sites-available/aitbc-advanced.conf > /dev/null << 'EOF'
server {
listen 80;
server_name aitbc.bubuit.net;
root /var/www/aitbc.bubuit.net;
index index.html;
# Security headers
add_header X-Frame-Options DENY;
add_header X-Content-Type-Options nosniff;
add_header X-XSS-Protection \"1; mode=block\";
add_header Strict-Transport-Security \"max-age=31536000; includeSubDomains\" always;
# Gzip compression
gzip on;
gzip_vary on;
gzip_min_length 1024;
gzip_types text/plain text/css text/xml text/javascript application/javascript application/xml+rss application/json;
# API routes
location /api/ {
proxy_pass http://localhost:8000/;
proxy_set_header Host \$host;
proxy_set_header X-Real-IP \$remote_addr;
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto \$scheme;
}
# Advanced features API
location /api/v1/advanced/ {
proxy_pass http://localhost:8001/;
proxy_set_header Host \$host;
proxy_set_header X-Real-IP \$remote_addr;
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto \$scheme;
}
# Static files
location / {
try_files \$uri \$uri/ /index.html;
expires 1y;
add_header Cache-Control \"public, immutable\";
}
# Health check
location /health {
access_log off;
return 200 \"healthy\";
add_header Content-Type text/plain;
}
}
EOF
# Enable site
sudo ln -sf /etc/nginx/sites-available/aitbc-advanced.conf /etc/nginx/sites-enabled/
sudo nginx -t
sudo systemctl reload nginx
echo 'Nginx configuration deployed to aitbc'
"
# Create nginx configuration for aitbc1
print_server "Deploying nginx configuration to aitbc1..."
ssh $AITBC1_SERVER "
sudo tee /etc/nginx/sites-available/aitbc1-advanced.conf > /dev/null << 'EOF'
server {
listen 80;
server_name aitbc1.bubuit.net;
root /var/www/aitbc.bubuit.net;
index index.html;
# Security headers
add_header X-Frame-Options DENY;
add_header X-Content-Type-Options nosniff;
add_header X-XSS-Protection \"1; mode=block\";
add_header Strict-Transport-Security \"max-age=31536000; includeSubDomains\" always;
# Gzip compression
gzip on;
gzip_vary on;
gzip_min_length 1024;
gzip_types text/plain text/css text/xml text/javascript application/javascript application/xml+rss application/json;
# API routes
location /api/ {
proxy_pass http://localhost:8000/;
proxy_set_header Host \$host;
proxy_set_header X-Real-IP \$remote_addr;
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto \$scheme;
}
# Advanced features API
location /api/v1/advanced/ {
proxy_pass http://localhost:8001/;
proxy_set_header Host \$host;
proxy_set_header X-Real-IP \$remote_addr;
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto \$scheme;
}
# Static files
location / {
try_files \$uri \$uri/ /index.html;
expires 1y;
add_header Cache-Control \"public, immutable\";
}
# Health check
location /health {
access_log off;
return 200 \"healthy\";
add_header Content-Type text/plain;
}
}
EOF
# Enable site
sudo ln -sf /etc/nginx/sites-available/aitbc1-advanced.conf /etc/nginx/sites-enabled/
sudo nginx -t
sudo systemctl reload nginx
echo 'Nginx configuration deployed to aitbc1'
"
print_success "Configuration files deployed to both servers"
}
# Verify deployment
verify_deployment() {
if [[ "$SKIP_VERIFICATION" == "true" ]]; then
print_status "Skipping verification (disabled)"
return
fi
print_status "Verifying deployment..."
# Verify aitbc server
print_server "Verifying aitbc server deployment..."
ssh $AITBC_SERVER "
echo '=== aitbc Server Status ==='
# Check services
echo 'Services:'
sudo systemctl is-active aitbc-cross-chain-reputation || echo 'cross-chain-reputation: INACTIVE'
sudo systemctl is-active aitbc-agent-communication || echo 'agent-communication: INACTIVE'
sudo systemctl is-active aitbc-advanced-learning || echo 'advanced-learning: INACTIVE'
# Check nginx
echo 'Nginx:'
sudo systemctl is-active nginx || echo 'nginx: INACTIVE'
sudo nginx -t || echo 'nginx config: ERROR'
# Check web server
echo 'Web server:'
curl -s http://localhost/health || echo 'health check: FAILED'
# Check API endpoints
echo 'API endpoints:'
curl -s http://localhost:8000/health || echo 'API health: FAILED'
curl -s http://localhost:8001/health || echo 'Advanced API health: FAILED'
echo 'aitbc verification completed'
"
# Verify aitbc1 server
print_server "Verifying aitbc1 server deployment..."
ssh $AITBC1_SERVER "
echo '=== aitbc1 Server Status ==='
# Check services
echo 'Services:'
sudo systemctl is-active aitbc-cross-chain-reputation || echo 'cross-chain-reputation: INACTIVE'
sudo systemctl is-active aitbc-agent-communication || echo 'agent-communication: INACTIVE'
sudo systemctl is-active aitbc-advanced-learning || echo 'advanced-learning: INACTIVE'
# Check nginx
echo 'Nginx:'
sudo systemctl is-active nginx || echo 'nginx: INACTIVE'
sudo nginx -t || echo 'nginx config: ERROR'
# Check web server
echo 'Web server:'
curl -s http://localhost/health || echo 'health check: FAILED'
# Check API endpoints
echo 'API endpoints:'
curl -s http://localhost:8000/health || echo 'API health: FAILED'
curl -s http://localhost:8001/health || echo 'Advanced API health: FAILED'
echo 'aitbc1 verification completed'
"
print_success "Deployment verification completed"
}
# Test external connectivity
test_connectivity() {
print_status "Testing external connectivity..."
# Test aitbc server
print_server "Testing aitbc external connectivity..."
if curl -s "http://$AITBC_HOST/health" | grep -q "healthy"; then
print_success "aitbc server is accessible externally"
else
print_warning "aitbc server external connectivity issue"
fi
# Test aitbc1 server
print_server "Testing aitbc1 external connectivity..."
if curl -s "http://$AITBC1_HOST/health" | grep -q "healthy"; then
print_success "aitbc1 server is accessible externally"
else
print_warning "aitbc1 server external connectivity issue"
fi
}
# Generate deployment report
generate_report() {
print_status "Generating deployment report..."
local report_file="$ROOT_DIR/deployment-report-$(date +%Y%m%d-%H%M%S).json"
cat > "$report_file" << EOF
{
"deployment": {
"timestamp": "$(date -Iseconds)",
"servers": ["aitbc", "aitbc1"],
"contracts_deployed": "$DEPLOY_CONTRACTS",
"services_deployed": "$DEPLOY_SERVICES",
"frontend_deployed": "$DEPLOY_FRONTEND",
"backup_created": "$BACKUP_BEFORE_DEPLOY",
"verification_completed": "$([[ "$SKIP_VERIFICATION" != "true" ]] && echo "true" || echo "false")"
},
"servers": {
"aitbc": {
"host": "$AITBC_HOST",
"services": {
"cross_chain_reputation": "deployed",
"agent_communication": "deployed",
"advanced_learning": "deployed"
},
"web_server": "nginx",
"api_endpoints": {
"main": "http://$AITBC_HOST/api/",
"advanced": "http://$AITBC_HOST/api/v1/advanced/"
}
},
"aitbc1": {
"host": "$AITBC1_HOST",
"services": {
"cross_chain_reputation": "deployed",
"agent_communication": "deployed",
"advanced_learning": "deployed"
},
"web_server": "nginx",
"api_endpoints": {
"main": "http://$AITBC1_HOST/api/",
"advanced": "http://$AITBC1_HOST/api/v1/advanced/"
}
}
},
"urls": {
"aitbc_frontend": "http://$AITBC_HOST/",
"aitbc_api": "http://$AITBC_HOST/api/",
"aitbc_advanced": "http://$AITBC_HOST/api/v1/advanced/",
"aitbc1_frontend": "http://$AITBC1_HOST/",
"aitbc1_api": "http://$AITBC1_HOST/api/",
"aitbc1_advanced": "http://$AITBC1_HOST/api/v1/advanced/"
},
"next_steps": [
"1. Monitor service performance on both servers",
"2. Test cross-server functionality",
"3. Verify load balancing if configured",
"4. Monitor system resources and scaling",
"5. Set up monitoring and alerting",
"6. Test failover scenarios"
]
}
EOF
print_success "Deployment report saved to $report_file"
}
# Main execution
main() {
print_critical "🚀 STARTING AITBC PLATFORM DEPLOYMENT TO aitbc AND aitbc1 SERVERS"
# Run deployment steps
check_prerequisites
backup_deployment
deploy_contracts
deploy_services
deploy_frontend
deploy_configuration
verify_deployment
test_connectivity
generate_report
print_success "🎉 AITBC PLATFORM DEPLOYMENT COMPLETED!"
echo ""
echo "📊 Deployment Summary:"
echo " Servers: aitbc, aitbc1"
echo " Contracts: $DEPLOY_CONTRACTS"
echo " Services: $DEPLOY_SERVICES"
echo " Frontend: $DEPLOY_FRONTEND"
echo " Verification: $([[ "$SKIP_VERIFICATION" != "true" ]] && echo "Completed" || echo "Skipped")"
echo " Backup: $BACKUP_BEFORE_DEPLOY"
echo ""
echo "🌐 Platform URLs:"
echo " aitbc Frontend: http://$AITBC_HOST/"
echo " aitbc API: http://$AITBC_HOST/api/"
echo " aitbc Advanced: http://$AITBC_HOST/api/v1/advanced/"
echo " aitbc1 Frontend: http://$AITBC1_HOST/"
echo " aitbc1 API: http://$AITBC1_HOST/api/"
echo " aitbc1 Advanced: http://$AITBC1_HOST/api/v1/advanced/"
echo ""
echo "🔧 Next Steps:"
echo " 1. Monitor service performance on both servers"
echo " 2. Test cross-server functionality"
echo " 3. Verify load balancing if configured"
echo " 4. Monitor system resources and scaling"
echo " 5. Set up monitoring and alerting"
echo " 6. Test failover scenarios"
echo ""
echo "⚠️ Important Notes:"
echo " - Both servers are running identical configurations"
echo " - Services are managed by systemd"
echo " - Nginx is configured for reverse proxy"
echo " - Health checks are available at /health"
echo " - API endpoints are available at /api/ and /api/v1/advanced/"
echo " - Backup was created before deployment"
echo ""
echo "🎯 Deployment Status: SUCCESS - PLATFORM LIVE ON BOTH SERVERS!"
}
# Handle script interruption
trap 'print_critical "Deployment interrupted - please check partial deployment"; exit 1' INT TERM
# Run main function
main "$@"

View File

@@ -1,266 +0,0 @@
#!/bin/bash
echo "=== AITBC Smart Contract Deployment to aitbc & aitbc1 ==="
# Server configurations - using cascade connections
AITBC_SSH="aitbc-cascade"
AITBC1_SSH="aitbc1-cascade"
DEPLOY_PATH="/home/oib/windsurf/aitbc"
# Contract files to deploy
CONTRACTS=(
"contracts/AIPowerRental.sol"
"contracts/AITBCPaymentProcessor.sol"
"contracts/PerformanceVerifier.sol"
"contracts/DisputeResolution.sol"
"contracts/EscrowService.sol"
"contracts/DynamicPricing.sol"
"contracts/ZKReceiptVerifier.sol"
"contracts/Groth16Verifier.sol"
)
# Deployment scripts
SCRIPTS=(
"scripts/deploy_contracts.js"
"scripts/validate_contracts.js"
"scripts/integration_test.js"
"scripts/compile_contracts.sh"
)
# Configuration files
CONFIGS=(
"configs/deployment_config.json"
"package.json"
"hardhat.config.cjs"
)
# Test contracts
TEST_CONTRACTS=(
"test/contracts/MockERC20.sol"
"test/contracts/MockZKVerifier.sol"
"test/contracts/MockGroth16Verifier.sol"
"test/contracts/Integration.test.js"
)
echo "🚀 Starting deployment to aitbc and aitbc1 servers..."
# Function to deploy to a server
deploy_to_server() {
local ssh_cmd=$1
local server_name=$2
echo ""
echo "📡 Deploying to $server_name ($ssh_cmd)..."
# Create directories
ssh $ssh_cmd "mkdir -p $DEPLOY_PATH/contracts $DEPLOY_PATH/scripts $DEPLOY_PATH/configs $DEPLOY_PATH/test/contracts"
# Deploy contracts
echo "📄 Deploying smart contracts..."
for contract in "${CONTRACTS[@]}"; do
if [ -f "$contract" ]; then
scp "$contract" $ssh_cmd:"$DEPLOY_PATH/$contract"
echo "$contract deployed to $server_name"
else
echo "$contract not found"
fi
done
# Deploy scripts
echo "🔧 Deploying deployment scripts..."
for script in "${SCRIPTS[@]}"; do
if [ -f "$script" ]; then
scp "$script" $ssh_cmd:"$DEPLOY_PATH/$script"
ssh $ssh_cmd "chmod +x $DEPLOY_PATH/$script"
echo "$script deployed to $server_name"
else
echo "$script not found"
fi
done
# Deploy configurations
echo "⚙️ Deploying configuration files..."
for config in "${CONFIGS[@]}"; do
if [ -f "$config" ]; then
scp "$config" $ssh_cmd:"$DEPLOY_PATH/$config"
echo "$config deployed to $server_name"
else
echo "$config not found"
fi
done
# Deploy test contracts
echo "🧪 Deploying test contracts..."
for test_contract in "${TEST_CONTRACTS[@]}"; do
if [ -f "$test_contract" ]; then
scp "$test_contract" $ssh_cmd:"$DEPLOY_PATH/$test_contract"
echo "$test_contract deployed to $server_name"
else
echo "$test_contract not found"
fi
done
# Deploy node_modules if they exist
if [ -d "node_modules" ]; then
echo "📦 Deploying node_modules..."
ssh $ssh_cmd "mkdir -p $DEPLOY_PATH/node_modules"
# Use scp -r for recursive copy since rsync might not be available
scp -r node_modules/ $ssh_cmd:"$DEPLOY_PATH/node_modules/"
echo "✅ node_modules deployed to $server_name"
fi
echo "✅ Deployment to $server_name completed"
}
# Deploy to aitbc
deploy_to_server $AITBC_SSH "aitbc"
# Deploy to aitbc1
deploy_to_server $AITBC1_SSH "aitbc1"
echo ""
echo "🔍 Verifying deployment..."
# Verify deployment on aitbc
echo "📊 Checking aitbc deployment..."
ssh $AITBC_SSH "ls -la $DEPLOY_PATH/contracts/*.sol | wc -l | xargs echo 'Contract files on aitbc:'"
ssh $AITBC_SSH "ls -la $DEPLOY_PATH/scripts/*.js | wc -l | xargs echo 'Script files on aitbc:'"
# Verify deployment on aitbc1
echo "📊 Checking aitbc1 deployment..."
ssh $AITBC1_SSH "ls -la $DEPLOY_PATH/contracts/*.sol | wc -l | xargs echo 'Contract files on aitbc1:'"
ssh $AITBC1_SSH "ls -la $DEPLOY_PATH/scripts/*.js | wc -l | xargs echo 'Script files on aitbc1:'"
echo ""
echo "🧪 Running validation on aitbc..."
ssh $AITBC_SSH "cd $DEPLOY_PATH && node scripts/validate_contracts.js"
echo ""
echo "🧪 Running validation on aitbc1..."
ssh $AITBC1_SSH "cd $DEPLOY_PATH && node scripts/validate_contracts.js"
echo ""
echo "🔧 Setting up systemd services..."
# Create systemd service for contract monitoring
create_systemd_service() {
local ssh_cmd=$1
local server_name=$2
echo "📝 Creating contract monitoring service on $server_name..."
cat << EOF | $ssh_cmd "cat > /tmp/aitbc-contracts.service"
[Unit]
Description=AITBC Smart Contracts Monitoring
After=network.target aitbc-coordinator-api.service
Wants=aitbc-coordinator-api.service
[Service]
Type=simple
User=oib
Group=oib
WorkingDirectory=$DEPLOY_PATH
Environment=PATH=$DEPLOY_PATH/node_modules/.bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
ExecStart=/usr/bin/node scripts/contract_monitor.js
Restart=always
RestartSec=10
StandardOutput=journal
StandardError=journal
[Install]
WantedBy=multi-user.target
EOF
ssh $ssh_cmd "sudo mv /tmp/aitbc-contracts.service /etc/systemd/system/"
ssh $ssh_cmd "sudo systemctl daemon-reload"
ssh $ssh_cmd "sudo systemctl enable aitbc-contracts.service"
ssh $ssh_cmd "sudo systemctl start aitbc-contracts.service"
echo "✅ Contract monitoring service created on $server_name"
}
# Create contract monitor script
create_contract_monitor() {
local ssh_cmd=$1
local server_name=$2
echo "📝 Creating contract monitor script on $server_name..."
cat << 'EOF' | $ssh_cmd "cat > $DEPLOY_PATH/scripts/contract_monitor.js"
#!/usr/bin/env node
const fs = require('fs');
const path = require('path');
console.log("🔍 AITBC Contract Monitor Started");
// Monitor contracts directory
const contractsDir = path.join(__dirname, '..', 'contracts');
function checkContracts() {
try {
const contracts = fs.readdirSync(contractsDir).filter(file => file.endsWith('.sol'));
console.log(`📊 Monitoring ${contracts.length} contracts`);
contracts.forEach(contract => {
const filePath = path.join(contractsDir, contract);
const stats = fs.statSync(filePath);
console.log(`📄 ${contract}: ${stats.size} bytes, modified: ${stats.mtime}`);
});
// Check if contracts are valid (basic check)
const validContracts = contracts.filter(contract => {
const content = fs.readFileSync(path.join(contractsDir, contract), 'utf8');
return content.includes('pragma solidity') && content.includes('contract ');
});
console.log(`✅ Valid contracts: ${validContracts.length}/${contracts.length}`);
} catch (error) {
console.error('❌ Error monitoring contracts:', error.message);
}
}
// Check every 30 seconds
setInterval(checkContracts, 30000);
// Initial check
checkContracts();
console.log("🔄 Contract monitoring active (30-second intervals)");
EOF
ssh $ssh_cmd "chmod +x $DEPLOY_PATH/scripts/contract_monitor.js"
echo "✅ Contract monitor script created on $server_name"
}
# Setup monitoring services
create_contract_monitor $AITBC_SSH "aitbc"
create_systemd_service $AITBC_SSH "aitbc"
create_contract_monitor $AITBC1_SSH "aitbc1"
create_systemd_service $AITBC1_SSH "aitbc1"
echo ""
echo "📊 Deployment Summary:"
echo "✅ Smart contracts deployed to aitbc and aitbc1"
echo "✅ Deployment scripts and configurations deployed"
echo "✅ Test contracts and validation tools deployed"
echo "✅ Node.js dependencies deployed"
echo "✅ Contract monitoring services created"
echo "✅ Systemd services configured and started"
echo ""
echo "🔗 Service URLs:"
echo "aitbc: http://127.0.0.1:18000"
echo "aitbc1: http://127.0.0.1:18001"
echo ""
echo "📝 Next Steps:"
echo "1. Verify contract deployment on both servers"
echo "2. Run integration tests"
echo "3. Configure marketplace API integration"
echo "4. Start contract deployment process"
echo ""
echo "✨ Deployment to aitbc & aitbc1 completed!"

11
scripts/detect-aitbc-user.sh Executable file
View File

@@ -0,0 +1,11 @@
#!/bin/bash
# AITBC User Detection Script
# Returns the appropriate user to run AITBC services
if id "aitbc" >/dev/null 2>&1; then
echo "aitbc"
elif id "oib" >/dev/null 2>&1; then
echo "oib"
else
echo "root"
fi

View File

@@ -1,12 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
HEALTH_URL="http://127.0.0.1:18000/v1/health"
if curl -fsS --max-time 5 "$HEALTH_URL" >/dev/null; then
echo "Coordinator proxy healthy: $HEALTH_URL"
exit 0
fi
echo "Coordinator proxy health check FAILED: $HEALTH_URL" >&2
exit 1

View File

@@ -1,5 +1,5 @@
[Unit] [Unit]
Description=AITBC Coordinator API Service (Python 3.11+) Description=AITBC Coordinator API Service (Python 3.13.5+)
After=network.target After=network.target
Wants=network.target Wants=network.target
@@ -11,7 +11,7 @@ WorkingDirectory=/opt/coordinator-api
Environment=PATH=/opt/coordinator-api/.venv/bin Environment=PATH=/opt/coordinator-api/.venv/bin
Environment=PYTHONPATH=/opt/coordinator-api/src Environment=PYTHONPATH=/opt/coordinator-api/src
# Python version validation # Python version validation
ExecStartPre=/bin/bash -c "python3.11 --version || (echo 'Python 3.11+ required' && exit 1)" ExecStartPre=/bin/bash -c "python3 --version || (echo 'Python 3.13.5+ required' && exit 1)"
ExecStart=/opt/coordinator-api/.venv/bin/python -m uvicorn app.main:app --host 0.0.0.0 --port 8000 ExecStart=/opt/coordinator-api/.venv/bin/python -m uvicorn app.main:app --host 0.0.0.0 --port 8000
ExecReload=/bin/kill -HUP $MAINPID ExecReload=/bin/kill -HUP $MAINPID
Restart=always Restart=always

View File

@@ -5,4 +5,7 @@ Wants=network-online.target
[Service] [Service]
Type=oneshot Type=oneshot
ExecStart=/opt/coordinator-api/scripts/check_coordinator_proxy.sh ExecStart=/opt/aitbc/apps/coordinator-api/scripts/check_coordinator_proxy.sh
[Install]
WantedBy=multi-user.target

View File

@@ -1,5 +1,5 @@
[Unit] [Unit]
Description=AITBC Exchange API Service (Python 3.11+) Description=AITBC Exchange API Service (Python 3.13.5+)
After=network.target After=network.target
Wants=network.target Wants=network.target
@@ -10,7 +10,7 @@ Group=root
WorkingDirectory=/opt/exchange-api WorkingDirectory=/opt/exchange-api
Environment=PATH=/opt/exchange-api/.venv/bin Environment=PATH=/opt/exchange-api/.venv/bin
# Python version validation # Python version validation
ExecStartPre=/bin/bash -c "python3.11 --version || (echo 'Python 3.11+ required' && exit 1)" ExecStartPre=/bin/bash -c "python3 --version || (echo 'Python 3.13.5+ required' && exit 1)"
ExecStart=/opt/exchange-api/.venv/bin/python simple_exchange_api.py ExecStart=/opt/exchange-api/.venv/bin/python simple_exchange_api.py
ExecReload=/bin/kill -HUP $MAINPID ExecReload=/bin/kill -HUP $MAINPID
Restart=always Restart=always

View File

@@ -1,5 +1,5 @@
[Unit] [Unit]
Description=AITBC Blockchain Node Service (Python 3.11+) Description=AITBC Blockchain Node Service (Python 3.13.5+)
After=network.target After=network.target
Wants=network.target Wants=network.target
@@ -12,7 +12,7 @@ Environment=PATH=/opt/blockchain-node/.venv/bin
Environment=PYTHONPATH=/opt/blockchain-node Environment=PYTHONPATH=/opt/blockchain-node
Environment=RUST_LOG=info Environment=RUST_LOG=info
# Python version validation # Python version validation
ExecStartPre=/bin/bash -c "python3.11 --version || (echo 'Python 3.11+ required' && exit 1)" ExecStartPre=/bin/bash -c "python3 --version || (echo 'Python 3.13.5+ required' && exit 1)"
ExecStart=/opt/blockchain-node/.venv/bin/python -m node.main --datadir /opt/blockchain-node/data --rpc-bind 0.0.0.0:8545 ExecStart=/opt/blockchain-node/.venv/bin/python -m node.main --datadir /opt/blockchain-node/data --rpc-bind 0.0.0.0:8545
ExecReload=/bin/kill -HUP $MAINPID ExecReload=/bin/kill -HUP $MAINPID
Restart=always Restart=always

Some files were not shown because too many files have changed in this diff Show More