chore(systemd): remove obsolete systemd service files and update infrastructure documentation

- Remove 8 unused systemd service files from coordinator-api/systemd/
  - aitbc-adaptive-learning.service (port 8005)
  - aitbc-advanced-ai.service
  - aitbc-enterprise-api.service
  - aitbc-gpu-multimodal.service (port 8003)
  - aitbc-marketplace-enhanced.service (port 8006)
  - aitbc-modality-optimization.service (port 8004)
  - aitbc-multimodal.service (port 8002)
  - aitbc-openclaw-enhanced.service (port 8007
This commit is contained in:
oib
2026-03-04 12:16:50 +01:00
parent 581309369d
commit 50954a4b31
101 changed files with 1655 additions and 4871 deletions

View File

@@ -1,43 +0,0 @@
#!/bin/bash
echo "=== AITBC Bootstrap Genesis Setup ==="
echo ""
# Stop the blockchain node
echo "1. Stopping blockchain node..."
sudo systemctl stop aitbc-node
# Backup current data
echo "2. Backing up current blockchain data..."
sudo mv /root/aitbc/apps/blockchain-node/data/devnet/db.sqlite /root/aitbc/apps/blockchain-node/data/devnet/db.sqlite.backup.$(date +%s) 2>/dev/null || true
# Copy new genesis
echo "3. Applying bootstrap genesis..."
sudo cp /root/aitbc/apps/blockchain-node/data/genesis_with_bootstrap.json /root/aitbc/apps/blockchain-node/data/devnet/genesis.json
# Reset database
echo "4. Resetting blockchain database..."
sudo rm -f /root/aitbc/apps/blockchain-node/data/devnet/db.sqlite
# Restart blockchain node
echo "5. Restarting blockchain node..."
sudo systemctl start aitbc-node
# Wait for node to start
echo "6. Waiting for node to initialize..."
sleep 5
# Verify treasury balance
echo "7. Verifying treasury balance..."
curl -s http://localhost:9080/rpc/getBalance/aitbcexchange00000000000000000000000000000000 | jq
echo ""
echo "=== Bootstrap Complete! ==="
echo "Treasury should now have 10,000,000 AITBC"
echo ""
echo "Initial Distribution:"
echo "- Exchange Treasury: 10,000,000 AITBC (47.6%)"
echo "- Community Faucet: 1,000,000 AITBC (4.8%)"
echo "- Team Fund: 2,000,000 AITBC (9.5%)"
echo "- Early Investors: 5,000,000 AITBC (23.8%)"
echo "- Ecosystem Fund: 3,000,000 AITBC (14.3%)"

View File

@@ -1,75 +0,0 @@
#!/usr/bin/env python3
"""
Script to assign a proposer to a block by polling for it
"""
import httpx
import json
# Configuration
COORDINATOR_URL = "http://localhost:8001"
MINER_API_KEY = "${MINER_API_KEY}"
MINER_ID = "localhost-gpu-miner"
def assign_proposer_to_latest_block():
"""Poll for the latest unassigned job to become the proposer"""
# First register the miner
print("📝 Registering miner...")
register_response = httpx.post(
f"{COORDINATOR_URL}/v1/miners/register?miner_id={MINER_ID}",
headers={
"Content-Type": "application/json",
"X-Api-Key": MINER_API_KEY
},
json={
"capabilities": {
"gpu": {"model": "RTX 4060 Ti", "memory_gb": 16}
}
}
)
if register_response.status_code != 200:
print(f"❌ Registration failed: {register_response.text}")
return
print("✅ Miner registered")
# Poll for a job
print("\n🔍 Polling for jobs...")
poll_response = httpx.post(
f"{COORDINATOR_URL}/v1/miners/poll",
headers={
"Content-Type": "application/json",
"X-Api-Key": MINER_API_KEY
},
json={"max_wait_seconds": 1}
)
if poll_response.status_code == 200:
job = poll_response.json()
print(f"✅ Received job: {job['job_id']}")
print(f" This job is now assigned to miner: {MINER_ID}")
# Check the block
print("\n📦 Checking block...")
blocks_response = httpx.get(f"{COORDINATOR_URL}/v1/explorer/blocks")
if blocks_response.status_code == 200:
blocks = blocks_response.json()
for block in blocks['items']:
if block['hash'] == job['job_id']:
print(f"✅ Block updated!")
print(f" Height: {block['height']}")
print(f" Hash: {block['hash']}")
print(f" Proposer: {block['proposer']}")
break
elif poll_response.status_code == 204:
print(" No jobs available to poll")
else:
print(f"❌ Poll failed: {poll_response.text}")
if __name__ == "__main__":
print("🎯 Assign Proposer to Latest Block")
print("=" * 40)
assign_proposer_to_latest_block()

View File

@@ -1,323 +0,0 @@
#!/bin/bash
# Blockchain Synchronization Optimization Script
# Fixes common sync issues and optimizes cross-site synchronization
set -e
echo "🔧 Blockchain Synchronization Optimization"
echo "=========================================="
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m' # No Color
# Function to print colored output
print_status() {
echo -e "${GREEN}$1${NC}"
}
print_warning() {
echo -e "${YELLOW}⚠️ $1${NC}"
}
print_error() {
echo -e "${RED}$1${NC}"
}
# Function to check if service is running
check_service() {
local service=$1
local host=$2
if [ -z "$host" ]; then
if systemctl is-active --quiet "$service"; then
return 0
else
return 1
fi
else
if ssh "$host" "systemctl is-active --quiet '$service'"; then
return 0
else
return 1
fi
fi
}
# Function to restart service
restart_service() {
local service=$1
local host=$2
if [ -z "$host" ]; then
sudo systemctl restart "$service"
else
ssh "$host" "sudo systemctl restart '$service'"
fi
}
# Function to get blockchain height
get_height() {
local url=$1
local host=$2
if [ -z "$host" ]; then
curl -s "$url/head" 2>/dev/null | grep -o '"height":[^,]*' | cut -d'"' -f2
else
ssh "$host" "curl -s '$url/head' 2>/dev/null | grep -o '\"height\":[^,]*' | cut -d'\"' -f2"
fi
}
echo ""
echo "📊 Current Sync Status Analysis"
echo "=============================="
# Get current heights
echo "Checking current blockchain heights..."
NODE1_HEIGHT=$(get_height "http://localhost:8082/rpc" "aitbc-cascade")
NODE2_HEIGHT=$(get_height "http://localhost:8081/rpc" "aitbc-cascade")
NODE3_HEIGHT=$(get_height "http://192.168.100.10:8082/rpc" "ns3-root")
echo "Node 1 (aitbc-cascade): $NODE1_HEIGHT"
echo "Node 2 (aitbc-cascade): $NODE2_HEIGHT"
echo "Node 3 (ns3): $NODE3_HEIGHT"
# Calculate height differences
if [ -n "$NODE1_HEIGHT" ] && [ -n "$NODE2_HEIGHT" ]; then
DIFF12=$((NODE2_HEIGHT - NODE1_HEIGHT))
echo "Height difference (Node2 - Node1): $DIFF12"
fi
if [ -n "$NODE2_HEIGHT" ] && [ -n "$NODE3_HEIGHT" ]; then
DIFF23=$((NODE2_HEIGHT - NODE3_HEIGHT))
echo "Height difference (Node2 - Node3): $DIFF23"
fi
echo ""
echo "🔧 Step 1: Fix Node 1 Endpoint Configuration"
echo "============================================="
# Check Node 1 config for wrong endpoint
echo "Checking Node 1 configuration..."
NODE1_CONFIG=$(ssh aitbc-cascade "grep -n 'aitbc.bubuit.net/rpc2' /opt/blockchain-node/src/aitbc_chain/config.py 2>/dev/null || true")
if [ -n "$NODE1_CONFIG" ]; then
print_warning "Found wrong endpoint /rpc2 in Node 1 config"
echo "Fixing endpoint configuration..."
# Backup original config
ssh aitbc-cascade "sudo cp /opt/blockchain-node/src/aitbc_chain/config.py /opt/blockchain-node/src/aitbc_chain/config.py.backup"
# Fix the endpoint
ssh aitbc-cascade "sudo sed -i 's|https://aitbc.bubuit.net/rpc2|https://aitbc.bubuit.net/rpc|g' /opt/blockchain-node/src/aitbc_chain/config.py"
print_status "Fixed Node 1 endpoint configuration"
# Restart Node 1
echo "Restarting Node 1 service..."
restart_service "aitbc-blockchain-node-1.service" "aitbc-cascade"
sleep 5
if check_service "aitbc-blockchain-node-1.service" "aitbc-cascade"; then
print_status "Node 1 service restarted successfully"
else
print_error "Node 1 service failed to restart"
fi
else
print_status "Node 1 endpoint configuration is correct"
fi
echo ""
echo "🔧 Step 2: Fix Node 3 Services"
echo "=============================="
# Check Node 3 service status
echo "Checking Node 3 services..."
NODE3_STATUS=$(ssh ns3-root "systemctl is-active blockchain-node-3.service 2>/dev/null || echo 'failed'")
if [ "$NODE3_STATUS" = "failed" ] || [ "$NODE3_STATUS" = "activating" ]; then
print_warning "Node 3 service is in $NODE3_STATUS state"
echo "Checking Node 3 service logs..."
ssh ns3-root "journalctl -u blockchain-node-3.service --no-pager -n 10"
echo "Attempting to fix Node 3 service..."
# Stop and restart Node 3
ssh ns3-root "sudo systemctl stop blockchain-node-3.service || true"
sleep 2
ssh ns3-root "sudo systemctl start blockchain-node-3.service"
sleep 5
# Check status again
NODE3_NEW_STATUS=$(ssh ns3-root "systemctl is-active blockchain-node-3.service 2>/dev/null || echo 'failed'")
if [ "$NODE3_NEW_STATUS" = "active" ]; then
print_status "Node 3 service fixed and running"
else
print_error "Node 3 service still not working: $NODE3_NEW_STATUS"
echo "Manual intervention required for Node 3"
fi
else
print_status "Node 3 service is running"
fi
echo ""
echo "🔧 Step 3: Optimize Sync Configuration"
echo "======================================"
# Function to optimize sync config
optimize_sync_config() {
local host=$1
local config_path=$2
echo "Optimizing sync configuration on $host..."
# Backup config
ssh "$host" "sudo cp '$config_path' '$config_path.backup' 2>/dev/null || true"
# Add/modify sync settings
ssh "$host" "sudo tee -a '$config_path' > /dev/null << 'EOF'
# Sync optimization settings
sync_interval_seconds: int = 5 # Reduced from 10s
sync_retry_attempts: int = 3
sync_retry_delay_seconds: int = 2
sync_timeout_seconds: int = 10
max_sync_height_diff: int = 1000 # Alert if difference exceeds this
EOF"
print_status "Sync configuration optimized on $host"
}
# Optimize sync configs
optimize_sync_config "aitbc-cascade" "/opt/blockchain-node/src/aitbc_chain/config.py"
optimize_sync_config "aitbc-cascade" "/opt/blockchain-node-2/src/aitbc_chain/config.py"
optimize_sync_config "ns3-root" "/opt/blockchain-node/src/aitbc_chain/config.py"
echo ""
echo "🔧 Step 4: Restart Services with New Config"
echo "=========================================="
# Restart all services
echo "Restarting blockchain services..."
for service in "aitbc-blockchain-node-1.service" "aitbc-blockchain-node-2.service"; do
echo "Restarting $service on aitbc-cascade..."
restart_service "$service" "aitbc-cascade"
sleep 3
done
for service in "blockchain-node-3.service"; do
echo "Restarting $service on ns3..."
restart_service "$service" "ns3-root"
sleep 3
done
echo ""
echo "📊 Step 5: Verify Sync Optimization"
echo "==================================="
# Wait for services to stabilize
echo "Waiting for services to stabilize..."
sleep 10
# Check new heights
echo "Checking new blockchain heights..."
NEW_NODE1_HEIGHT=$(get_height "http://localhost:8082/rpc" "aitbc-cascade")
NEW_NODE2_HEIGHT=$(get_height "http://localhost:8081/rpc" "aitbc-cascade")
NEW_NODE3_HEIGHT=$(get_height "http://192.168.100.10:8082/rpc" "ns3-root")
echo "New heights:"
echo "Node 1: $NEW_NODE1_HEIGHT"
echo "Node 2: $NEW_NODE2_HEIGHT"
echo "Node 3: $NEW_NODE3_HEIGHT"
# Calculate improvements
if [ -n "$NEW_NODE1_HEIGHT" ] && [ -n "$NEW_NODE2_HEIGHT" ] && [ -n "$NODE1_HEIGHT" ] && [ -n "$NODE2_HEIGHT" ]; then
OLD_DIFF=$((NODE2_HEIGHT - NODE1_HEIGHT))
NEW_DIFF=$((NEW_NODE2_HEIGHT - NEW_NODE1_HEIGHT))
echo "Height difference improvement:"
echo "Before: $OLD_DIFF"
echo "After: $NEW_DIFF"
if [ $NEW_DIFF -lt $OLD_DIFF ]; then
IMPROVEMENT=$((OLD_DIFF - NEW_DIFF))
print_status "Sync improved by $IMPROVEMENT blocks"
else
print_warning "Sync did not improve or got worse"
fi
fi
echo ""
echo "🔧 Step 6: Create Sync Monitoring Script"
echo "========================================="
# Create monitoring script
cat > /tmp/sync_monitor.sh << 'EOF'
#!/bin/bash
# Blockchain Sync Monitor
# Run this periodically to check sync health
echo "🔍 Blockchain Sync Monitor - $(date)"
echo "===================================="
# Get heights
NODE1=$(curl -s http://localhost:8082/rpc/head 2>/dev/null | grep -o '"height":[^,]*' | cut -d'"' -f2)
NODE2=$(curl -s http://localhost:8081/rpc/head 2>/dev/null | grep -o '"height":[^,]*' | cut -d'"' -f2)
NODE3=$(ssh ns3-root "curl -s http://192.168.100.10:8082/rpc/head 2>/dev/null | grep -o '\"height\":[^,]*' | cut -d'\"' -f2")
echo "Node 1: $NODE1"
echo "Node 2: $NODE2"
echo "Node 3: $NODE3"
# Check for issues
if [ -n "$NODE1" ] && [ -n "$NODE2" ]; then
DIFF=$((NODE2 - NODE1))
if [ $DIFF -gt 100 ]; then
echo "⚠️ WARNING: Node 1 and Node 2 height difference: $DIFF"
fi
fi
if [ -n "$NODE2" ] && [ -n "$NODE3" ]; then
DIFF=$((NODE2 - NODE3))
if [ $DIFF -gt 1000 ]; then
echo "⚠️ WARNING: Node 2 and Node 3 height difference: $DIFF"
fi
fi
echo "Sync check completed."
EOF
chmod +x /tmp/sync_monitor.sh
print_status "Created sync monitoring script: /tmp/sync_monitor.sh"
echo ""
echo "🎉 Sync Optimization Complete!"
echo "=============================="
echo ""
echo "📋 Summary of actions taken:"
echo "• Fixed Node 1 endpoint configuration"
echo "• Restarted problematic services"
echo "• Optimized sync intervals and retry logic"
echo "• Created monitoring script"
echo ""
echo "📊 Next steps:"
echo "1. Monitor sync performance with: /tmp/sync_monitor.sh"
echo "2. Set up cron job for periodic monitoring"
echo "3. Check logs for any remaining issues"
echo "4. Consider implementing P2P sync for better performance"
echo ""
echo "🔧 If issues persist:"
echo "• Check individual service logs: journalctl -u [service-name]"
echo "• Verify network connectivity between sites"
echo "• Consider manual block import for severely lagging nodes"
echo "• Review firewall and security group settings"
print_status "Blockchain synchronization optimization completed!"

View File

@@ -1,21 +0,0 @@
#!/bin/bash
# Script to return testnet Bitcoin
RETURN_ADDRESS="tb1qerzrlxcfu24davlur5sqmgzzgsal6wusda40er"
echo "Checking balance..."
BALANCE=$(bitcoin-cli -testnet -rpcwallet=aitbc_exchange getbalance)
if [ "$(echo "$BALANCE > 0" | bc)" -eq 1 ]; then
echo "Current balance: $BALANCE BTC"
echo "Sending to return address: $RETURN_ADDRESS"
# Calculate amount to send (balance minus small fee)
SEND_AMOUNT=$(echo "$BALANCE - 0.00001" | bc)
TXID=$(bitcoin-cli -testnet -rpcwallet=aitbc_exchange sendtoaddress "$RETURN_ADDRESS" "$SEND_AMOUNT")
echo "Transaction sent! TXID: $TXID"
echo "Explorer: https://blockstream.info/testnet/tx/$TXID"
else
echo "No Bitcoin to return. Current balance: $BALANCE BTC"
fi

View File

@@ -1,87 +0,0 @@
#!/bin/bash
# Start mock blockchain nodes for testing
# This script sets up the required mock servers on ports 8081 and 8082
set -e
echo "🚀 Starting Mock Blockchain Nodes for Testing"
echo "============================================="
# Colors
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m'
print_status() {
echo -e "${GREEN}[INFO]${NC} $1"
}
print_warning() {
echo -e "${YELLOW}[WARN]${NC} $1"
}
# Check if required ports are available
check_port() {
local port=$1
if curl -s "http://127.0.0.1:$port/health" >/dev/null 2>&1; then
print_warning "Port $port is already in use"
return 1
fi
return 0
}
# Stop any existing mock servers
stop_existing_servers() {
print_status "Stopping existing mock servers..."
pkill -f "mock_blockchain_node.py" 2>/dev/null || true
sleep 1
}
# Start mock servers
start_mock_servers() {
print_status "Starting mock blockchain node on port 8081..."
cd "$(dirname "$0")/.."
python3 tests/mock_blockchain_node.py 8081 > /tmp/mock_node_8081.log 2>&1 &
local pid1=$!
print_status "Starting mock blockchain node on port 8082..."
python3 tests/mock_blockchain_node.py 8082 > /tmp/mock_node_8082.log 2>&1 &
local pid2=$!
# Wait for servers to start
sleep 2
# Verify servers are running
if curl -s "http://127.0.0.1:8081/health" >/dev/null 2>&1 && \
curl -s "http://127.0.0.1:8082/health" >/dev/null 2>&1; then
print_status "✅ Mock blockchain nodes are running!"
echo ""
echo "Node 1: http://127.0.0.1:8082"
echo "Node 2: http://127.0.0.1:8081"
echo ""
echo "To run tests:"
echo " python -m pytest tests/test_blockchain_nodes.py -v"
echo ""
echo "To stop servers:"
echo " pkill -f 'mock_blockchain_node.py'"
echo ""
echo "Log files:"
echo " Node 1: /tmp/mock_node_8082.log"
echo " Node 2: /tmp/mock_node_8081.log"
else
print_warning "❌ Failed to start mock servers"
echo "Check log files:"
echo " Node 1: /tmp/mock_node_8082.log"
echo " Node 2: /tmp/mock_node_8081.log"
exit 1
fi
}
# Main execution
main() {
stop_existing_servers
start_mock_servers
}
# Run main function
main "$@"

View File

@@ -1,86 +0,0 @@
#!/bin/bash
# scripts/check-file-organization.sh
echo "🔍 Checking project file organization..."
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Count issues
ISSUES=0
# Function to report issue
report_issue() {
local file="$1"
local issue="$2"
local suggestion="$3"
echo -e "${RED}❌ ISSUE: $file${NC}"
echo -e " ${YELLOW}Problem: $issue${NC}"
echo -e " ${BLUE}Suggestion: $suggestion${NC}"
echo ""
((ISSUES++))
}
# Check root directory for misplaced files
echo "📁 Checking root directory..."
cd "$(dirname "$0")/.."
# Test files
for file in test_*.py test_*.sh run_mc_test.sh; do
if [[ -f "$file" ]]; then
report_issue "$file" "Test file at root level" "Move to dev/tests/"
fi
done
# Development scripts
for file in patch_*.py fix_*.py simple_test.py; do
if [[ -f "$file" ]]; then
report_issue "$file" "Development script at root level" "Move to dev/scripts/"
fi
done
# Multi-chain files
for file in MULTI_*.md; do
if [[ -f "$file" ]]; then
report_issue "$file" "Multi-chain file at root level" "Move to dev/multi-chain/"
fi
done
# Environment files
for dir in node_modules .venv cli_env logs .pytest_cache .ruff_cache .vscode; do
if [[ -d "$dir" ]]; then
report_issue "$dir" "Environment directory at root level" "Move to dev/env/ or dev/cache/"
fi
done
# Configuration files
for file in .aitbc.yaml .aitbc.yaml.example .env.production .nvmrc .lycheeignore; do
if [[ -f "$file" ]]; then
report_issue "$file" "Configuration file at root level" "Move to config/"
fi
done
# Check if essential files are missing
echo "📋 Checking essential files..."
ESSENTIAL_FILES=(".editorconfig" ".env.example" ".gitignore" "LICENSE" "README.md" "pyproject.toml" "poetry.lock" "pytest.ini" "run_all_tests.sh")
for file in "${ESSENTIAL_FILES[@]}"; do
if [[ ! -f "$file" ]]; then
echo -e "${YELLOW}⚠️ WARNING: Essential file '$file' is missing${NC}"
fi
done
# Summary
if [[ $ISSUES -eq 0 ]]; then
echo -e "${GREEN}✅ File organization is perfect! No issues found.${NC}"
exit 0
else
echo -e "${RED}❌ Found $ISSUES organization issue(s)${NC}"
echo -e "${BLUE}💡 Run './scripts/move-to-right-folder.sh --auto' to fix automatically${NC}"
exit 1
fi

View File

@@ -1,31 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# Uncomment for debugging
# set -x
PROJECT_ROOT=$(cd "$(dirname "$0")/../.." && pwd)
PKG_PATHS="${PROJECT_ROOT}/packages/py/aitbc-crypto/src:${PROJECT_ROOT}/packages/py/aitbc-sdk/src"
cd "${PROJECT_ROOT}"
if command -v poetry >/dev/null 2>&1; then
RUNNER=(poetry run)
else
RUNNER=()
fi
run_pytest() {
local py_path=$1
shift
if [ ${#RUNNER[@]} -gt 0 ]; then
PYTHONPATH="$py_path" "${RUNNER[@]}" python -m pytest "$@"
else
PYTHONPATH="$py_path" python -m pytest "$@"
fi
}
run_pytest "${PROJECT_ROOT}/apps/coordinator-api/src:${PKG_PATHS}" apps/coordinator-api/tests -q
run_pytest "${PKG_PATHS}" packages/py/aitbc-sdk/tests -q
run_pytest "${PROJECT_ROOT}/apps/miner-node/src:${PKG_PATHS}" apps/miner-node/tests -q
run_pytest "${PROJECT_ROOT}/apps/wallet-daemon/src:${PROJECT_ROOT}/apps/blockchain-node/src:${PKG_PATHS}" apps/wallet-daemon/tests -q
run_pytest "${PROJECT_ROOT}/apps/blockchain-node/src:${PKG_PATHS}" apps/blockchain-node/tests/test_websocket.py -q

View File

@@ -1,559 +0,0 @@
#!/usr/bin/env python3
"""
AITBC Community Onboarding Automation
This script automates the onboarding process for new community members,
including welcome messages, resource links, and initial guidance.
"""
import asyncio
import json
import logging
from datetime import datetime, timedelta
from typing import Dict, List, Optional
from pathlib import Path
import subprocess
import os
class CommunityOnboarding:
"""Automated community onboarding system."""
def __init__(self, config_path: str = "config/community_config.json"):
self.config = self._load_config(config_path)
self.logger = self._setup_logging()
self.onboarding_data = self._load_onboarding_data()
def _load_config(self, config_path: str) -> Dict:
"""Load community configuration."""
default_config = {
"discord": {
"bot_token": os.getenv("DISCORD_BOT_TOKEN"),
"welcome_channel": "welcome",
"general_channel": "general",
"help_channel": "help"
},
"github": {
"token": os.getenv("GITHUB_TOKEN"),
"org": "aitbc",
"repo": "aitbc",
"team_slugs": ["core-team", "maintainers", "contributors"]
},
"email": {
"smtp_server": os.getenv("SMTP_SERVER"),
"smtp_port": 587,
"username": os.getenv("SMTP_USERNAME"),
"password": os.getenv("SMTP_PASSWORD"),
"from_address": "community@aitbc.dev"
},
"onboarding": {
"welcome_delay_hours": 1,
"follow_up_days": [3, 7, 14],
"resource_links": {
"documentation": "https://docs.aitbc.dev",
"api_reference": "https://api.aitbc.dev/docs",
"plugin_development": "https://docs.aitbc.dev/plugins",
"community_forum": "https://community.aitbc.dev",
"discord_invite": "https://discord.gg/aitbc"
}
}
}
config_file = Path(config_path)
if config_file.exists():
with open(config_file, 'r') as f:
user_config = json.load(f)
default_config.update(user_config)
return default_config
def _setup_logging(self) -> logging.Logger:
"""Setup logging for the onboarding system."""
logger = logging.getLogger("community_onboarding")
logger.setLevel(logging.INFO)
if not logger.handlers:
handler = logging.StreamHandler()
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
def _load_onboarding_data(self) -> Dict:
"""Load onboarding data from file."""
data_file = Path("data/onboarding_data.json")
if data_file.exists():
with open(data_file, 'r') as f:
return json.load(f)
return {"members": {}, "messages": {}, "follow_ups": {}}
def _save_onboarding_data(self) -> None:
"""Save onboarding data to file."""
data_file = Path("data/onboarding_data.json")
data_file.parent.mkdir(exist_ok=True)
with open(data_file, 'w') as f:
json.dump(self.onboarding_data, f, indent=2)
async def welcome_new_member(self, member_id: str, member_name: str,
platform: str = "discord") -> bool:
"""Welcome a new community member."""
try:
self.logger.info(f"Welcoming new member: {member_name} on {platform}")
# Create onboarding record
self.onboarding_data["members"][member_id] = {
"name": member_name,
"platform": platform,
"joined_at": datetime.now().isoformat(),
"welcome_sent": False,
"follow_ups_sent": [],
"resources_viewed": [],
"contributions": [],
"status": "new"
}
# Schedule welcome message
await self._schedule_welcome_message(member_id)
# Track member in analytics
await self._track_member_analytics(member_id, "joined")
self._save_onboarding_data()
return True
except Exception as e:
self.logger.error(f"Error welcoming member {member_name}: {e}")
return False
async def _schedule_welcome_message(self, member_id: str) -> None:
"""Schedule welcome message for new member."""
delay_hours = self.config["onboarding"]["welcome_delay_hours"]
# In production, this would use a proper task queue
# For now, we'll send immediately
await asyncio.sleep(delay_hours * 3600)
await self.send_welcome_message(member_id)
async def send_welcome_message(self, member_id: str) -> bool:
"""Send welcome message to member."""
try:
member_data = self.onboarding_data["members"][member_id]
platform = member_data["platform"]
if platform == "discord":
success = await self._send_discord_welcome(member_id)
elif platform == "github":
success = await self._send_github_welcome(member_id)
else:
self.logger.warning(f"Unsupported platform: {platform}")
return False
if success:
member_data["welcome_sent"] = True
member_data["welcome_sent_at"] = datetime.now().isoformat()
self._save_onboarding_data()
await self._track_member_analytics(member_id, "welcome_sent")
return success
except Exception as e:
self.logger.error(f"Error sending welcome message to {member_id}: {e}")
return False
async def _send_discord_welcome(self, member_id: str) -> bool:
"""Send welcome message via Discord."""
try:
# Discord bot implementation would go here
# For now, we'll log the message
member_data = self.onboarding_data["members"][member_id]
welcome_message = self._generate_welcome_message(member_data["name"])
self.logger.info(f"Discord welcome message for {member_id}: {welcome_message}")
# In production:
# await discord_bot.send_message(
# channel_id=self.config["discord"]["welcome_channel"],
# content=welcome_message
# )
return True
except Exception as e:
self.logger.error(f"Error sending Discord welcome: {e}")
return False
async def _send_github_welcome(self, member_id: str) -> bool:
"""Send welcome message via GitHub."""
try:
# GitHub API implementation would go here
member_data = self.onboarding_data["members"][member_id]
welcome_message = self._generate_welcome_message(member_data["name"])
self.logger.info(f"GitHub welcome message for {member_id}: {welcome_message}")
# In production:
# await github_api.create_issue_comment(
# repo=self.config["github"]["repo"],
# issue_number=welcome_issue_number,
# body=welcome_message
# )
return True
except Exception as e:
self.logger.error(f"Error sending GitHub welcome: {e}")
return False
def _generate_welcome_message(self, member_name: str) -> str:
"""Generate personalized welcome message."""
resources = self.config["onboarding"]["resource_links"]
message = f"""🎉 Welcome to AITBC, {member_name}!
We're excited to have you join our community of developers, researchers, and innovators building the future of AI-powered blockchain technology.
🚀 **Quick Start Guide:**
1. **Documentation**: {resources["documentation"]}
2. **API Reference**: {resources["api_reference"]}
3. **Plugin Development**: {resources["plugin_development"]}
4. **Community Forum**: {resources["community_forum"]}
5. **Discord Chat**: {resources["discord_invite"]}
📋 **Next Steps:**
- ⭐ Star our repository on GitHub
- 📖 Read our contribution guidelines
- 💬 Introduce yourself in the #introductions channel
- 🔍 Check out our "good first issues" for newcomers
🛠️ **Ways to Contribute:**
- Code contributions (bug fixes, features)
- Documentation improvements
- Plugin development
- Community support and mentoring
- Testing and feedback
❓ **Need Help?**
- Ask questions in #help channel
- Check our FAQ at {resources["documentation"]}/faq
- Join our weekly office hours (Tuesdays 2PM UTC)
We're here to help you succeed! Don't hesitate to reach out.
Welcome aboard! 🚀
#AITBCCommunity #Welcome #OpenSource"""
return message
async def send_follow_up_message(self, member_id: str, day: int) -> bool:
"""Send follow-up message to member."""
try:
member_data = self.onboarding_data["members"][member_id]
if day in member_data["follow_ups_sent"]:
return True # Already sent
follow_up_message = self._generate_follow_up_message(member_data["name"], day)
if member_data["platform"] == "discord":
success = await self._send_discord_follow_up(member_id, follow_up_message)
else:
success = await self._send_email_follow_up(member_id, follow_up_message)
if success:
member_data["follow_ups_sent"].append(day)
member_data[f"follow_up_{day}_sent_at"] = datetime.now().isoformat()
self._save_onboarding_data()
await self._track_member_analytics(member_id, f"follow_up_{day}")
return success
except Exception as e:
self.logger.error(f"Error sending follow-up to {member_id}: {e}")
return False
def _generate_follow_up_message(self, member_name: str, day: int) -> str:
"""Generate follow-up message based on day."""
resources = self.config["onboarding"]["resource_links"]
if day == 3:
return f"""Hi {member_name}! 👋
Hope you're settling in well! Here are some resources to help you get started:
🔧 **Development Setup:**
- Clone the repository: `git clone https://github.com/aitbc/aitbc`
- Install dependencies: `poetry install`
- Run tests: `pytest`
📚 **Learning Resources:**
- Architecture overview: {resources["documentation"]}/architecture
- Plugin tutorial: {resources["plugin_development"]}/tutorial
- API examples: {resources["api_reference"]}/examples
💬 **Community Engagement:**
- Join our weekly community call (Thursdays 3PM UTC)
- Share your progress in #show-and-tell
- Ask for help in #help
How's your experience been so far? Any questions or challenges we can help with?
#AITBCCommunity #Onboarding #GetStarted"""
elif day == 7:
return f"""Hi {member_name}! 🎯
You've been with us for a week! We'd love to hear about your experience:
📊 **Quick Check-in:**
- Have you been able to set up your development environment?
- Have you explored the codebase or documentation?
- Are there any areas where you'd like more guidance?
🚀 **Contribution Opportunities:**
- Good first issues: https://github.com/aitbc/aitbc/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22
- Documentation improvements: {resources["documentation"]}/contribute
- Plugin ideas: {resources["plugin_development"]}/ideas
🎉 **Community Events:**
- Monthly hackathon (first Saturday)
- Plugin showcase (third Thursday)
- Office hours (every Tuesday 2PM UTC)
Your feedback helps us improve the onboarding experience. What would make your journey more successful?
#AITBCCommunity #Feedback #Community"""
elif day == 14:
return f"""Hi {member_name}! 🌟
Two weeks in - you're becoming part of the AITBC ecosystem!
🎯 **Next Level Engagement:**
- Consider joining a specialized team (security, plugins, docs, etc.)
- Start a plugin project: {resources["plugin_development"]}/starter
- Review a pull request to learn the codebase
- Share your ideas in #feature-requests
🏆 **Recognition Program:**
- Contributor of the month nominations
- Plugin contest participation
- Community spotlight features
- Speaking opportunities at community events
📈 **Your Impact:**
- Every contribution, no matter how small, helps
- Your questions help us improve documentation
- Your feedback shapes the project direction
- Your presence strengthens the community
What would you like to focus on next? We're here to support your journey!
#AITBCCommunity #Growth #Impact"""
else:
return f"Hi {member_name}! Just checking in. How's your AITBC journey going?"
async def _send_discord_follow_up(self, member_id: str, message: str) -> bool:
"""Send follow-up via Discord DM."""
try:
self.logger.info(f"Discord follow-up for {member_id}: {message[:100]}...")
# Discord DM implementation
return True
except Exception as e:
self.logger.error(f"Error sending Discord follow-up: {e}")
return False
async def _send_email_follow_up(self, member_id: str, message: str) -> bool:
"""Send follow-up via email."""
try:
self.logger.info(f"Email follow-up for {member_id}: {message[:100]}...")
# Email implementation
return True
except Exception as e:
self.logger.error(f"Error sending email follow-up: {e}")
return False
async def track_member_activity(self, member_id: str, activity_type: str,
details: Dict = None) -> None:
"""Track member activity for analytics."""
try:
if member_id not in self.onboarding_data["members"]:
return
member_data = self.onboarding_data["members"][member_id]
if "activities" not in member_data:
member_data["activities"] = []
activity = {
"type": activity_type,
"timestamp": datetime.now().isoformat(),
"details": details or {}
}
member_data["activities"].append(activity)
# Update member status based on activity
if activity_type == "first_contribution":
member_data["status"] = "contributor"
elif activity_type == "first_plugin":
member_data["status"] = "plugin_developer"
self._save_onboarding_data()
await self._track_member_analytics(member_id, activity_type)
except Exception as e:
self.logger.error(f"Error tracking activity for {member_id}: {e}")
async def _track_member_analytics(self, member_id: str, event: str) -> None:
"""Track analytics for member events."""
try:
# Analytics implementation would go here
self.logger.info(f"Analytics event: {member_id} - {event}")
# In production, send to analytics service
# await analytics_service.track_event({
# "member_id": member_id,
# "event": event,
# "timestamp": datetime.now().isoformat(),
# "properties": {}
# })
except Exception as e:
self.logger.error(f"Error tracking analytics: {e}")
async def process_follow_ups(self) -> None:
"""Process scheduled follow-ups for all members."""
try:
current_date = datetime.now()
for member_id, member_data in self.onboarding_data["members"].items():
joined_date = datetime.fromisoformat(member_data["joined_at"])
for day in self.config["onboarding"]["follow_up_days"]:
follow_up_date = joined_date + timedelta(days=day)
if (current_date >= follow_up_date and
day not in member_data["follow_ups_sent"]):
await self.send_follow_up_message(member_id, day)
except Exception as e:
self.logger.error(f"Error processing follow-ups: {e}")
async def generate_onboarding_report(self) -> Dict:
"""Generate onboarding analytics report."""
try:
total_members = len(self.onboarding_data["members"])
welcome_sent = sum(1 for m in self.onboarding_data["members"].values() if m.get("welcome_sent"))
status_counts = {}
for member in self.onboarding_data["members"].values():
status = member.get("status", "new")
status_counts[status] = status_counts.get(status, 0) + 1
platform_counts = {}
for member in self.onboarding_data["members"].values():
platform = member.get("platform", "unknown")
platform_counts[platform] = platform_counts.get(platform, 0) + 1
return {
"total_members": total_members,
"welcome_sent": welcome_sent,
"welcome_rate": welcome_sent / total_members if total_members > 0 else 0,
"status_distribution": status_counts,
"platform_distribution": platform_counts,
"generated_at": datetime.now().isoformat()
}
except Exception as e:
self.logger.error(f"Error generating report: {e}")
return {}
async def run_daily_tasks(self) -> None:
"""Run daily onboarding tasks."""
try:
self.logger.info("Running daily onboarding tasks")
# Process follow-ups
await self.process_follow_ups()
# Generate daily report
report = await self.generate_onboarding_report()
self.logger.info(f"Daily onboarding report: {report}")
# Cleanup old data
await self._cleanup_old_data()
except Exception as e:
self.logger.error(f"Error running daily tasks: {e}")
async def _cleanup_old_data(self) -> None:
"""Clean up old onboarding data."""
try:
cutoff_date = datetime.now() - timedelta(days=365)
# Remove members older than 1 year with no activity
to_remove = []
for member_id, member_data in self.onboarding_data["members"].items():
joined_date = datetime.fromisoformat(member_data["joined_at"])
if (joined_date < cutoff_date and
not member_data.get("activities") and
member_data.get("status") == "new"):
to_remove.append(member_id)
for member_id in to_remove:
del self.onboarding_data["members"][member_id]
self.logger.info(f"Removed inactive member: {member_id}")
if to_remove:
self._save_onboarding_data()
except Exception as e:
self.logger.error(f"Error cleaning up data: {e}")
# CLI interface for the onboarding system
async def main():
"""Main CLI interface."""
import argparse
parser = argparse.ArgumentParser(description="AITBC Community Onboarding")
parser.add_argument("--welcome", help="Welcome new member (member_id,name,platform)")
parser.add_argument("--followup", help="Send follow-up (member_id,day)")
parser.add_argument("--report", action="store_true", help="Generate onboarding report")
parser.add_argument("--daily", action="store_true", help="Run daily tasks")
args = parser.parse_args()
onboarding = CommunityOnboarding()
if args.welcome:
member_id, name, platform = args.welcome.split(",")
await onboarding.welcome_new_member(member_id, name, platform)
print(f"Welcome message scheduled for {name}")
elif args.followup:
member_id, day = args.followup.split(",")
success = await onboarding.send_follow_up_message(member_id, int(day))
print(f"Follow-up sent: {success}")
elif args.report:
report = await onboarding.generate_onboarding_report()
print(json.dumps(report, indent=2))
elif args.daily:
await onboarding.run_daily_tasks()
print("Daily tasks completed")
else:
print("Use --help to see available options")
if __name__ == "__main__":
asyncio.run(main())

View File

@@ -1,73 +0,0 @@
#!/bin/bash
echo "=== AITBC Smart Contract Compilation ==="
# Check if solc is installed
if ! command -v solc &> /dev/null; then
echo "Error: solc (Solidity compiler) not found"
echo "Please install solc: npm install -g solc"
exit 1
fi
# Create artifacts directory
mkdir -p artifacts
mkdir -p cache
# Contract files to compile
contracts=(
"contracts/AIPowerRental.sol"
"contracts/AITBCPaymentProcessor.sol"
"contracts/PerformanceVerifier.sol"
"contracts/DisputeResolution.sol"
"contracts/EscrowService.sol"
"contracts/DynamicPricing.sol"
"test/contracts/MockERC20.sol"
"test/contracts/MockZKVerifier.sol"
"test/contracts/MockGroth16Verifier.sol"
)
echo "Compiling contracts..."
# Compile each contract
for contract in "${contracts[@]}"; do
if [ -f "$contract" ]; then
echo "Compiling $contract..."
# Extract contract name from file path
contract_name=$(basename "$contract" .sol)
# Compile with solc
solc --bin --abi --optimize --output-dir artifacts \
--base-path . \
--include-path node_modules/@openzeppelin/contracts/node_modules/@openzeppelin/contracts \
"$contract"
if [ $? -eq 0 ]; then
echo "$contract_name compiled successfully"
else
echo "$contract_name compilation failed"
exit 1
fi
else
echo "⚠️ Contract file not found: $contract"
fi
done
echo ""
echo "=== Compilation Summary ==="
echo "✅ All contracts compiled successfully"
echo "📁 Artifacts saved to: artifacts/"
echo "📋 ABI files available for integration"
# List compiled artifacts
echo ""
echo "Compiled artifacts:"
ls -la artifacts/*.bin 2>/dev/null | wc -l | xargs echo "Binary files:"
ls -la artifacts/*.abi 2>/dev/null | wc -l | xargs echo "ABI files:"
echo ""
echo "=== Next Steps ==="
echo "1. Review compilation artifacts"
echo "2. Run integration tests"
echo "3. Deploy to testnet"
echo "4. Perform security audit"

View File

@@ -1,66 +0,0 @@
#!/bin/bash
echo "==========================================================="
echo " AITBC Platform Pre-Flight Security & Readiness Audit"
echo "==========================================================="
echo ""
echo "1. Checking Core Components Presence..."
COMPONENTS=(
"apps/blockchain-node"
"apps/coordinator-api"
"apps/explorer-web"
"apps/marketplace-web"
"apps/wallet-daemon"
"contracts"
"gpu_acceleration"
)
for comp in "${COMPONENTS[@]}"; do
if [ -d "$comp" ]; then
echo "$comp found"
else
echo "$comp MISSING"
fi
done
echo ""
echo "2. Checking NO-DOCKER Policy Compliance..."
DOCKER_FILES=$(find . -name "Dockerfile*" -o -name "docker-compose*.yml" | grep -v "node_modules" | grep -v ".venv")
if [ -z "$DOCKER_FILES" ]; then
echo "✅ No Docker files found. Strict NO-DOCKER policy is maintained."
else
echo "❌ WARNING: Docker files found!"
echo "$DOCKER_FILES"
fi
echo ""
echo "3. Checking Systemd Service Definitions..."
SERVICES=$(ls systemd/*.service 2>/dev/null | wc -l)
if [ "$SERVICES" -gt 0 ]; then
echo "✅ Found $SERVICES systemd service configurations."
else
echo "❌ No systemd service configurations found."
fi
echo ""
echo "4. Checking Security Framework (Native Tools)..."
echo "✅ Validating Lynis, RKHunter, ClamAV, Nmap configurations (Simulated Pass)"
echo ""
echo "5. Verifying Phase 9 & 10 Components..."
P9_FILES=$(find apps/coordinator-api/src/app/services -name "*performance*" -o -name "*fusion*" -o -name "*creativity*")
if [ -n "$P9_FILES" ]; then
echo "✅ Phase 9 Advanced Agent Capabilities & Performance verified."
else
echo "❌ Phase 9 Components missing."
fi
P10_FILES=$(find apps/coordinator-api/src/app/services -name "*community*" -o -name "*governance*")
if [ -n "$P10_FILES" ]; then
echo "✅ Phase 10 Agent Community & Governance verified."
else
echo "❌ Phase 10 Components missing."
fi
echo ""
echo "==========================================================="
echo " AUDIT COMPLETE: System is READY for production deployment."
echo "==========================================================="

View File

@@ -1,430 +0,0 @@
#!/usr/bin/env bash
# AITBC Advanced Agent Features Deployment Script
# Deploys cross-chain reputation, agent communication, and advanced learning systems
set -euo pipefail
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
print_status() {
echo -e "${BLUE}[INFO]${NC} $1"
}
print_success() {
echo -e "${GREEN}[SUCCESS]${NC} $1"
}
print_warning() {
echo -e "${YELLOW}[WARNING]${NC} $1"
}
print_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
print_critical() {
echo -e "${RED}[CRITICAL]${NC} $1"
}
# Configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
ROOT_DIR="$(dirname "$SCRIPT_DIR")"
CONTRACTS_DIR="$ROOT_DIR/contracts"
SERVICES_DIR="$ROOT_DIR/apps/coordinator-api/src/app/services"
FRONTEND_DIR="$ROOT_DIR/apps/marketplace-web/src/components"
# Network configuration
NETWORK=${1:-"localhost"}
VERIFY_CONTRACTS=${2:-"true"}
SKIP_BUILD=${3:-"false"}
echo "🚀 AITBC Advanced Agent Features Deployment"
echo "=========================================="
echo "Network: $NETWORK"
echo "Verify Contracts: $VERIFY_CONTRACTS"
echo "Skip Build: $SKIP_BUILD"
echo "Timestamp: $(date -Iseconds)"
echo ""
# Pre-deployment checks
check_prerequisites() {
print_status "Checking prerequisites..."
# Check if Node.js is installed
if ! command -v node &> /dev/null; then
print_error "Node.js is not installed"
exit 1
fi
# Check if Python is installed
if ! command -v python3 &> /dev/null; then
print_error "Python 3 is not installed"
exit 1
fi
# Check if required directories exist
if [[ ! -d "$CONTRACTS_DIR" ]]; then
print_error "Contracts directory not found: $CONTRACTS_DIR"
exit 1
fi
if [[ ! -d "$SERVICES_DIR" ]]; then
print_error "Services directory not found: $SERVICES_DIR"
exit 1
fi
print_success "Prerequisites check completed"
}
# Install Python dependencies
install_python_dependencies() {
print_status "Installing Python dependencies..."
cd "$ROOT_DIR/apps/coordinator-api"
if [[ -f "requirements.txt" ]]; then
pip install -r requirements.txt
print_success "Python dependencies installed"
else
print_error "requirements.txt not found"
exit 1
fi
}
# Deploy smart contracts
deploy_contracts() {
print_status "Deploying advanced agent features contracts..."
cd "$CONTRACTS_DIR"
# Check if .env file exists
if [[ ! -f ".env" ]]; then
print_warning ".env file not found, creating from example..."
if [[ -f ".env.example" ]]; then
cp .env.example .env
print_warning "Please update .env file with your configuration"
else
print_error ".env.example file not found"
exit 1
fi
fi
# Compile contracts
print_status "Compiling contracts..."
npx hardhat compile
# Deploy contracts based on network
case $NETWORK in
"localhost")
print_status "Deploying to localhost..."
npx hardhat run scripts/deploy-advanced-contracts.js --network localhost
;;
"sepolia"|"goerli")
print_status "Deploying to $NETWORK..."
npx hardhat run scripts/deploy-advanced-contracts.js --network $NETWORK
;;
"mainnet")
print_critical "DEPLOYING TO MAINNET - This will spend real ETH!"
read -p "Type 'DEPLOY-ADVANCED-TO-MAINNET' to continue: " confirmation
if [[ "$confirmation" != "DEPLOY-ADVANCED-TO-MAINNET" ]]; then
print_error "Deployment cancelled"
exit 1
fi
npx hardhat run scripts/deploy-advanced-contracts.js --network mainnet
;;
*)
print_error "Unsupported network: $NETWORK"
exit 1
;;
esac
print_success "Advanced contracts deployed"
}
# Verify contracts
verify_contracts() {
if [[ "$VERIFY_CONTRACTS" == "true" ]]; then
print_status "Verifying contracts on Etherscan..."
cd "$CONTRACTS_DIR"
# Wait for block confirmations
print_status "Waiting for block confirmations..."
sleep 30
# Run verification
if npx hardhat run scripts/verify-advanced-contracts.js --network $NETWORK; then
print_success "Contracts verified on Etherscan"
else
print_warning "Contract verification failed - manual verification may be required"
fi
else
print_status "Skipping contract verification"
fi
}
# Build frontend components
build_frontend() {
if [[ "$SKIP_BUILD" == "true" ]]; then
print_status "Skipping frontend build"
return
fi
print_status "Building frontend components..."
cd "$ROOT_DIR/apps/marketplace-web"
# Install dependencies if needed
if [[ ! -d "node_modules" ]]; then
print_status "Installing frontend dependencies..."
npm install
fi
# Build the application
npm run build
print_success "Frontend built successfully"
}
# Deploy frontend
deploy_frontend() {
print_status "Deploying frontend components..."
# The frontend is already built and deployed as part of the main marketplace
print_success "Frontend deployment completed"
}
# Setup services
setup_services() {
print_status "Setting up backend services..."
# Create service configuration
cat > "$ROOT_DIR/apps/coordinator-api/config/advanced_features.json" << EOF
{
"cross_chain_reputation": {
"base_score": 1000,
"success_bonus": 100,
"failure_penalty": 50,
"min_stake_amount": 100000000000000000000,
"max_delegation_ratio": 1.0,
"sync_cooldown": 3600,
"supported_chains": {
"ethereum": 1,
"polygon": 137,
"arbitrum": 42161,
"optimism": 10,
"bsc": 56,
"avalanche": 43114,
"fantom": 250
},
"tier_thresholds": {
"bronze": 4500,
"silver": 6000,
"gold": 7500,
"platinum": 9000,
"diamond": 9500
},
"stake_rewards": {
"bronze": 0.05,
"silver": 0.08,
"gold": 0.12,
"platinum": 0.18,
"diamond": 0.25
}
},
"agent_communication": {
"min_reputation_score": 1000,
"base_message_price": 0.001,
"max_message_size": 100000,
"message_timeout": 86400,
"channel_timeout": 2592000,
"encryption_enabled": true,
"supported_message_types": [
"text",
"data",
"task_request",
"task_response",
"collaboration",
"notification",
"system",
"urgent",
"bulk"
],
"channel_types": [
"direct",
"group",
"broadcast",
"private"
],
"encryption_types": [
"aes256",
"rsa",
"hybrid",
"none"
]
},
"advanced_learning": {
"max_model_size": 104857600,
"max_training_time": 3600,
"default_batch_size": 32,
"default_learning_rate": 0.001,
"convergence_threshold": 0.001,
"early_stopping_patience": 10,
"meta_learning_algorithms": [
"MAML",
"Reptile",
"Meta-SGD"
],
"federated_algorithms": [
"FedAvg",
"FedProx",
"FedNova"
],
"reinforcement_algorithms": [
"DQN",
"PPO",
"A3C",
"SAC"
],
"model_types": [
"task_planning",
"bidding_strategy",
"resource_allocation",
"communication",
"collaboration",
"decision_making",
"prediction",
"classification"
]
}
}
EOF
print_success "Service configuration created"
}
# Run integration tests
run_tests() {
print_status "Running integration tests..."
cd "$ROOT_DIR"
# Run Python tests
if [[ -f "tests/test_advanced_features.py" ]]; then
python -m pytest tests/test_advanced_features.py -v
fi
# Run contract tests
cd "$CONTRACTS_DIR"
if [[ -f "test/CrossChainReputation.test.js" ]]; then
npx hardhat test test/CrossChainReputation.test.js
fi
if [[ -f "test/AgentCommunication.test.js" ]]; then
npx hardhat test test/AgentCommunication.test.js
fi
print_success "Integration tests completed"
}
# Generate deployment report
generate_report() {
print_status "Generating deployment report..."
local report_file="$ROOT_DIR/advanced-features-deployment-report-$(date +%Y%m%d-%H%M%S).json"
cat > "$report_file" << EOF
{
"deployment": {
"timestamp": "$(date -Iseconds)",
"network": "$NETWORK",
"contracts_verified": "$VERIFY_CONTRACTS",
"frontend_built": "$([[ "$SKIP_BUILD" == "true" ]] && echo "false" || echo "true")"
},
"contracts": {
"CrossChainReputation": "deployed-contracts-$NETWORK.json",
"AgentCommunication": "deployed-contracts-$NETWORK.json",
"AgentCollaboration": "deployed-contracts-$NETWORK.json",
"AgentLearning": "deployed-contracts-$NETWORK.json",
"AgentMarketplaceV2": "deployed-contracts-$NETWORK.json",
"ReputationNFT": "deployed-contracts-$NETWORK.json"
},
"services": {
"cross_chain_reputation": "$SERVICES_DIR/cross_chain_reputation.py",
"agent_communication": "$SERVICES_DIR/agent_communication.py",
"agent_collaboration": "$SERVICES_DIR/agent_collaboration.py",
"advanced_learning": "$SERVICES_DIR/advanced_learning.py",
"agent_autonomy": "$SERVICES_DIR/agent_autonomy.py",
"marketplace_v2": "$SERVICES_DIR/marketplace_v2.py"
},
"frontend": {
"cross_chain_reputation": "$FRONTEND_DIR/CrossChainReputation.tsx",
"agent_communication": "$FRONTEND_DIR/AgentCommunication.tsx",
"agent_collaboration": "$FRONTEND_DIR/AgentCollaboration.tsx",
"advanced_learning": "$FRONTEND_DIR/AdvancedLearning.tsx",
"agent_autonomy": "$FRONTEND_DIR/AgentAutonomy.tsx",
"marketplace_v2": "$FRONTEND_DIR/MarketplaceV2.tsx"
},
"next_steps": [
"1. Initialize cross-chain reputation for existing agents",
"2. Set up agent communication channels",
"3. Configure advanced learning models",
"4. Test agent collaboration protocols",
"5. Monitor system performance and optimize"
]
}
EOF
print_success "Deployment report saved to $report_file"
}
# Main execution
main() {
print_critical "🚀 STARTING ADVANCED AGENT FEATURES DEPLOYMENT"
# Run deployment steps
check_prerequisites
install_python_dependencies
deploy_contracts
verify_contracts
build_frontend
deploy_frontend
setup_services
run_tests
generate_report
print_success "🎉 ADVANCED AGENT FEATURES DEPLOYMENT COMPLETED!"
echo ""
echo "📊 Deployment Summary:"
echo " Network: $NETWORK"
echo " Contracts: CrossChainReputation, AgentCommunication, AgentCollaboration, AgentLearning, AgentMarketplaceV2, ReputationNFT"
echo " Services: Cross-Chain Reputation, Agent Communication, Advanced Learning, Agent Autonomy"
echo " Frontend: Cross-Chain Reputation, Agent Communication, Advanced Learning components"
echo ""
echo "🔧 Next Steps:"
echo " 1. Initialize cross-chain reputation: python -m scripts/init_cross_chain_reputation.py"
echo " 2. Set up agent communication: python -m scripts/setup_agent_communication.py"
echo " 3. Configure learning models: python -m scripts/configure_learning_models.py"
echo " 4. Test agent collaboration: python -m scripts/test_agent_collaboration.py"
echo " 5. Monitor deployment: cat advanced-features-deployment-report-*.json"
echo ""
echo "⚠️ Important Notes:"
echo " - Cross-chain reputation requires multi-chain setup"
echo " - Agent communication needs proper encryption keys"
echo " - Advanced learning requires GPU resources for training"
echo " - Agent autonomy needs careful safety measures"
echo " - Contract addresses are in deployed-contracts-$NETWORK.json"
echo " - Frontend components are integrated into the main marketplace"
}
# Handle script interruption
trap 'print_critical "Deployment interrupted - please check partial deployment"; exit 1' INT TERM
# Run main function
main "$@"

View File

@@ -1,359 +0,0 @@
#!/usr/bin/env bash
# AITBC OpenClaw Autonomous Economics Deployment Script
# Deploys agent wallet, bid strategy, and orchestration components
set -euo pipefail
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
print_status() {
echo -e "${BLUE}[INFO]${NC} $1"
}
print_success() {
echo -e "${GREEN}[SUCCESS]${NC} $1"
}
print_warning() {
echo -e "${YELLOW}[WARNING]${NC} $1"
}
print_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
print_critical() {
echo -e "${RED}[CRITICAL]${NC} $1"
}
# Configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
ROOT_DIR="$(dirname "$SCRIPT_DIR")"
CONTRACTS_DIR="$ROOT_DIR/contracts"
SERVICES_DIR="$ROOT_DIR/apps/coordinator-api/src/app/services"
FRONTEND_DIR="$ROOT_DIR/apps/marketplace-web/src/components"
# Network configuration
NETWORK=${1:-"localhost"}
VERIFY_CONTRACTS=${2:-"true"}
SKIP_BUILD=${3:-"false"}
echo "🚀 AITBC OpenClaw Autonomous Economics Deployment"
echo "=============================================="
echo "Network: $NETWORK"
echo "Verify Contracts: $VERIFY_CONTRACTS"
echo "Skip Build: $SKIP_BUILD"
echo "Timestamp: $(date -Iseconds)"
echo ""
# Pre-deployment checks
check_prerequisites() {
print_status "Checking prerequisites..."
# Check if Node.js is installed
if ! command -v node &> /dev/null; then
print_error "Node.js is not installed"
exit 1
fi
# Check if Python is installed
if ! command -v python3 &> /dev/null; then
print_error "Python 3 is not installed"
exit 1
fi
# Check if required directories exist
if [[ ! -d "$CONTRACTS_DIR" ]]; then
print_error "Contracts directory not found: $CONTRACTS_DIR"
exit 1
fi
if [[ ! -d "$SERVICES_DIR" ]]; then
print_error "Services directory not found: $SERVICES_DIR"
exit 1
fi
print_success "Prerequisites check completed"
}
# Install Python dependencies
install_python_dependencies() {
print_status "Installing Python dependencies..."
cd "$ROOT_DIR/apps/coordinator-api"
if [[ -f "requirements.txt" ]]; then
pip install -r requirements.txt
print_success "Python dependencies installed"
else
print_error "requirements.txt not found"
exit 1
fi
}
# Deploy smart contracts
deploy_contracts() {
print_status "Deploying autonomous economics smart contracts..."
cd "$CONTRACTS_DIR"
# Check if .env file exists
if [[ ! -f ".env" ]]; then
print_warning ".env file not found, creating from example..."
if [[ -f ".env.example" ]]; then
cp .env.example .env
print_warning "Please update .env file with your configuration"
else
print_error ".env.example file not found"
exit 1
fi
fi
# Compile contracts
print_status "Compiling contracts..."
npx hardhat compile
# Deploy contracts based on network
case $NETWORK in
"localhost")
print_status "Deploying to localhost..."
npx hardhat run scripts/deploy-agent-contracts.js --network localhost
;;
"sepolia"|"goerli")
print_status "Deploying to $NETWORK..."
npx hardhat run scripts/deploy-agent-contracts.js --network $NETWORK
;;
"mainnet")
print_critical "DEPLOYING TO MAINNET - This will spend real ETH!"
read -p "Type 'DEPLOY-TO-MAINNET' to continue: " confirmation
if [[ "$confirmation" != "DEPLOY-TO-MAINNET" ]]; then
print_error "Deployment cancelled"
exit 1
fi
npx hardhat run scripts/deploy-agent-contracts.js --network mainnet
;;
*)
print_error "Unsupported network: $NETWORK"
exit 1
;;
esac
print_success "Smart contracts deployed"
}
# Verify contracts
verify_contracts() {
if [[ "$VERIFY_CONTRACTS" == "true" ]]; then
print_status "Verifying contracts on Etherscan..."
cd "$CONTRACTS_DIR"
# Wait for block confirmations
print_status "Waiting for block confirmations..."
sleep 30
# Run verification
if npx hardhat run scripts/verify-agent-contracts.js --network $NETWORK; then
print_success "Contracts verified on Etherscan"
else
print_warning "Contract verification failed - manual verification may be required"
fi
else
print_status "Skipping contract verification"
fi
}
# Build frontend components
build_frontend() {
if [[ "$SKIP_BUILD" == "true" ]]; then
print_status "Skipping frontend build"
return
fi
print_status "Building frontend components..."
cd "$ROOT_DIR/apps/marketplace-web"
# Install dependencies if needed
if [[ ! -d "node_modules" ]]; then
print_status "Installing frontend dependencies..."
npm install
fi
# Build the application
npm run build
print_success "Frontend built successfully"
}
# Deploy frontend
deploy_frontend() {
print_status "Deploying frontend components..."
# The frontend is already built and deployed as part of the main marketplace
print_success "Frontend deployment completed"
}
# Setup services
setup_services() {
print_status "Setting up backend services..."
# Create service configuration
cat > "$ROOT_DIR/apps/coordinator-api/config/agent_economics.json" << EOF
{
"bid_strategy_engine": {
"market_window": 24,
"price_history_days": 30,
"volatility_threshold": 0.15,
"strategy_weights": {
"urgent_bid": 0.25,
"cost_optimized": 0.25,
"balanced": 0.25,
"aggressive": 0.15,
"conservative": 0.10
}
},
"task_decomposition": {
"max_subtasks": 10,
"min_subtask_duration": 0.1,
"complexity_thresholds": {
"text_processing": 0.3,
"image_processing": 0.5,
"audio_processing": 0.4,
"video_processing": 0.8,
"data_analysis": 0.6,
"model_inference": 0.4,
"model_training": 0.9,
"compute_intensive": 0.8,
"io_bound": 0.2,
"mixed_modal": 0.7
}
},
"agent_orchestrator": {
"max_concurrent_plans": 10,
"assignment_timeout": 300,
"monitoring_interval": 30,
"retry_limit": 3
}
}
EOF
print_success "Service configuration created"
}
# Run integration tests
run_tests() {
print_status "Running integration tests..."
cd "$ROOT_DIR"
# Run Python tests
if [[ -f "tests/test_agent_economics.py" ]]; then
python -m pytest tests/test_agent_economics.py -v
fi
# Run contract tests
cd "$CONTRACTS_DIR"
if [[ -f "test/AgentWallet.test.js" ]]; then
npx hardhat test test/AgentWallet.test.js
fi
if [[ -f "test/AgentOrchestration.test.js" ]]; then
npx hardhat test test/AgentOrchestration.test.js
fi
print_success "Integration tests completed"
}
# Generate deployment report
generate_report() {
print_status "Generating deployment report..."
local report_file="$ROOT_DIR/agent-economics-deployment-report-$(date +%Y%m%d-%H%M%S).json"
cat > "$report_file" << EOF
{
"deployment": {
"timestamp": "$(date -Iseconds)",
"network": "$NETWORK",
"contracts_verified": "$VERIFY_CONTRACTS",
"frontend_built": "$([[ "$SKIP_BUILD" == "true" ]] && echo "false" || echo "true")"
},
"contracts": {
"AgentWallet": "deployed-contracts-$NETWORK.json",
"AgentOrchestration": "deployed-contracts-$NETWORK.json",
"AIPowerRental": "deployed-contracts-$NETWORK.json"
},
"services": {
"bid_strategy_engine": "$SERVICES_DIR/bid_strategy_engine.py",
"task_decomposition": "$SERVICES_DIR/task_decomposition.py",
"agent_orchestrator": "$SERVICES_DIR/agent_orchestrator.py",
"agent_wallet_service": "$SERVICES_DIR/agent_wallet_service.py"
},
"frontend": {
"agent_wallet": "$FRONTEND_DIR/AgentWallet.tsx",
"bid_strategy": "$FRONTEND_DIR/BidStrategy.tsx",
"agent_orchestration": "$FRONTEND_DIR/AgentOrchestration.tsx",
"task_decomposition": "$FRONTEND_DIR/TaskDecomposition.tsx"
},
"next_steps": [
"1. Configure agent wallet funding",
"2. Set up bid strategy parameters",
"3. Initialize agent orchestrator",
"4. Test autonomous agent workflows",
"5. Monitor agent performance"
]
}
EOF
print_success "Deployment report saved to $report_file"
}
# Main execution
main() {
print_critical "🚀 STARTING AUTONOMOUS ECONOMICS DEPLOYMENT"
# Run deployment steps
check_prerequisites
install_python_dependencies
deploy_contracts
verify_contracts
build_frontend
deploy_frontend
setup_services
run_tests
generate_report
print_success "🎉 AUTONOMOUS ECONOMICS DEPLOYMENT COMPLETED!"
echo ""
echo "📊 Deployment Summary:"
echo " Network: $NETWORK"
echo " Contracts: AgentWallet, AgentOrchestration, AIPowerRental (extended)"
echo " Services: Bid Strategy, Task Decomposition, Agent Orchestrator"
echo " Frontend: Agent Wallet, Bid Strategy, Orchestration components"
echo ""
echo "🔧 Next Steps:"
echo " 1. Configure agent wallet: python -m scripts/setup_agent_wallets.py"
echo " 2. Test bid strategies: python -m scripts/test_bid_strategies.py"
echo " 3. Initialize orchestrator: python -m scripts/init_orchestrator.py"
echo " 4. Monitor deployment: cat agent-economics-deployment-report-*.json"
echo ""
echo "⚠️ Important Notes:"
echo " - Agent wallets must be funded before use"
echo " - Bid strategies require market data initialization"
echo " - Agent orchestrator needs provider registration"
echo " - Contract addresses are in deployed-contracts-$NETWORK.json"
echo " - Frontend components are integrated into the main marketplace"
}
# Handle script interruption
trap 'print_critical "Deployment interrupted - please check partial deployment"; exit 1' INT TERM
# Run main function
main "$@"

View File

@@ -1,334 +0,0 @@
#!/usr/bin/env bash
# AITBC Decentralized Memory & Storage Deployment Script
# Deploys IPFS/Filecoin integration, smart contracts, and frontend components
set -euo pipefail
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
print_status() {
echo -e "${BLUE}[INFO]${NC} $1"
}
print_success() {
echo -e "${GREEN}[SUCCESS]${NC} $1"
}
print_warning() {
echo -e "${YELLOW}[WARNING]${NC} $1"
}
print_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
print_critical() {
echo -e "${RED}[CRITICAL]${NC} $1"
}
# Configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
ROOT_DIR="$(dirname "$SCRIPT_DIR")"
CONTRACTS_DIR="$ROOT_DIR/contracts"
SERVICES_DIR="$ROOT_DIR/apps/coordinator-api/src/app/services"
FRONTEND_DIR="$ROOT_DIR/apps/marketplace-web/src/components"
# Network configuration
NETWORK=${1:-"localhost"}
VERIFY_CONTRACTS=${2:-"true"}
SKIP_BUILD=${3:-"false"}
echo "🚀 AITBC Decentralized Memory & Storage Deployment"
echo "=============================================="
echo "Network: $NETWORK"
echo "Verify Contracts: $VERIFY_CONTRACTS"
echo "Skip Build: $SKIP_BUILD"
echo "Timestamp: $(date -Iseconds)"
echo ""
# Pre-deployment checks
check_prerequisites() {
print_status "Checking prerequisites..."
# Check if Node.js is installed
if ! command -v node &> /dev/null; then
print_error "Node.js is not installed"
exit 1
fi
# Check if Python is installed
if ! command -v python3 &> /dev/null; then
print_error "Python 3 is not installed"
exit 1
fi
# Check if IPFS is installed (optional)
if command -v ipfs &> /dev/null; then
print_success "IPFS is installed"
else
print_warning "IPFS is not installed - some features may not work"
fi
# Check if required directories exist
if [[ ! -d "$CONTRACTS_DIR" ]]; then
print_error "Contracts directory not found: $CONTRACTS_DIR"
exit 1
fi
if [[ ! -d "$SERVICES_DIR" ]]; then
print_error "Services directory not found: $SERVICES_DIR"
exit 1
fi
print_success "Prerequisites check completed"
}
# Install Python dependencies
install_python_dependencies() {
print_status "Installing Python dependencies..."
cd "$ROOT_DIR/apps/coordinator-api"
if [[ -f "requirements.txt" ]]; then
pip install -r requirements.txt
print_success "Python dependencies installed"
else
print_error "requirements.txt not found"
exit 1
fi
}
# Deploy smart contracts
deploy_contracts() {
print_status "Deploying decentralized memory smart contracts..."
cd "$CONTRACTS_DIR"
# Check if .env file exists
if [[ ! -f ".env" ]]; then
print_warning ".env file not found, creating from example..."
if [[ -f ".env.example" ]]; then
cp .env.example .env
print_warning "Please update .env file with your configuration"
else
print_error ".env.example file not found"
exit 1
fi
fi
# Compile contracts
print_status "Compiling contracts..."
npx hardhat compile
# Deploy contracts based on network
case $NETWORK in
"localhost")
print_status "Deploying to localhost..."
npx hardhat run scripts/deploy-memory-contracts.js --network localhost
;;
"sepolia"|"goerli")
print_status "Deploying to $NETWORK..."
npx hardhat run scripts/deploy-memory-contracts.js --network $NETWORK
;;
"mainnet")
print_critical "DEPLOYING TO MAINNET - This will spend real ETH!"
read -p "Type 'DEPLOY-TO-MAINNET' to continue: " confirmation
if [[ "$confirmation" != "DEPLOY-TO-MAINNET" ]]; then
print_error "Deployment cancelled"
exit 1
fi
npx hardhat run scripts/deploy-memory-contracts.js --network mainnet
;;
*)
print_error "Unsupported network: $NETWORK"
exit 1
;;
esac
print_success "Smart contracts deployed"
}
# Verify contracts
verify_contracts() {
if [[ "$VERIFY_CONTRACTS" == "true" ]]; then
print_status "Verifying contracts on Etherscan..."
cd "$CONTRACTS_DIR"
# Wait for block confirmations
print_status "Waiting for block confirmations..."
sleep 30
# Run verification
if npx hardhat run scripts/verify-memory-contracts.js --network $NETWORK; then
print_success "Contracts verified on Etherscan"
else
print_warning "Contract verification failed - manual verification may be required"
fi
else
print_status "Skipping contract verification"
fi
}
# Build frontend components
build_frontend() {
if [[ "$SKIP_BUILD" == "true" ]]; then
print_status "Skipping frontend build"
return
fi
print_status "Building frontend components..."
cd "$ROOT_DIR/apps/marketplace-web"
# Install dependencies if needed
if [[ ! -d "node_modules" ]]; then
print_status "Installing frontend dependencies..."
npm install
fi
# Build the application
npm run build
print_success "Frontend built successfully"
}
# Deploy frontend
deploy_frontend() {
print_status "Deploying frontend components..."
# The frontend is already built and deployed as part of the main marketplace
print_success "Frontend deployment completed"
}
# Setup IPFS node
setup_ipfs() {
print_status "Setting up IPFS node..."
# Check if IPFS is running
if command -v ipfs &> /dev/null; then
if ipfs swarm peers &> /dev/null; then
print_success "IPFS node is running"
else
print_status "Starting IPFS daemon..."
ipfs daemon --init &
sleep 5
print_success "IPFS daemon started"
fi
else
print_warning "IPFS not installed - skipping IPFS setup"
fi
}
# Run integration tests
run_tests() {
print_status "Running integration tests..."
cd "$ROOT_DIR"
# Run Python tests
if [[ -f "tests/test_memory_integration.py" ]]; then
python -m pytest tests/test_memory_integration.py -v
fi
# Run contract tests
cd "$CONTRACTS_DIR"
if [[ -f "test/AgentMemory.test.js" ]]; then
npx hardhat test test/AgentMemory.test.js
fi
if [[ -f "test/KnowledgeGraphMarket.test.js" ]]; then
npx hardhat test test/KnowledgeGraphMarket.test.js
fi
print_success "Integration tests completed"
}
# Generate deployment report
generate_report() {
print_status "Generating deployment report..."
local report_file="$ROOT_DIR/decentralized-memory-deployment-report-$(date +%Y%m%d-%H%M%S).json"
cat > "$report_file" << EOF
{
"deployment": {
"timestamp": "$(date -Iseconds)",
"network": "$NETWORK",
"contracts_verified": "$VERIFY_CONTRACTS",
"frontend_built": "$([[ "$SKIP_BUILD" == "true" ]] && echo "false" || echo "true")"
},
"contracts": {
"AgentMemory": "deployed-contracts-$NETWORK.json",
"KnowledgeGraphMarket": "deployed-contracts-$NETWORK.json",
"MemoryVerifier": "deployed-contracts-$NETWORK.json"
},
"services": {
"ipfs_storage_service": "$SERVICES_DIR/ipfs_storage_service.py",
"memory_manager": "$SERVICES_DIR/memory_manager.py",
"knowledge_graph_market": "$SERVICES_DIR/knowledge_graph_market.py"
},
"frontend": {
"knowledge_marketplace": "$FRONTEND_DIR/KnowledgeMarketplace.tsx",
"memory_manager": "$FRONTEND_DIR/MemoryManager.tsx"
},
"next_steps": [
"1. Configure IPFS node settings",
"2. Set up Filecoin storage deals",
"3. Test memory upload/retrieval functionality",
"4. Verify knowledge graph marketplace functionality",
"5. Monitor system performance"
]
}
EOF
print_success "Deployment report saved to $report_file"
}
# Main execution
main() {
print_critical "🚀 STARTING DECENTRALIZED MEMORY DEPLOYMENT"
# Run deployment steps
check_prerequisites
install_python_dependencies
deploy_contracts
verify_contracts
build_frontend
deploy_frontend
setup_ipfs
run_tests
generate_report
print_success "🎉 DECENTRALIZED MEMORY DEPLOYMENT COMPLETED!"
echo ""
echo "📊 Deployment Summary:"
echo " Network: $NETWORK"
echo " Contracts: AgentMemory, KnowledgeGraphMarket, MemoryVerifier"
echo " Services: IPFS Storage, Memory Manager, Knowledge Graph Market"
echo " Frontend: Knowledge Marketplace, Memory Manager"
echo ""
echo "🔧 Next Steps:"
echo " 1. Configure IPFS node: ipfs config show"
echo " 2. Test memory functionality: python -m pytest tests/"
echo " 3. Access frontend: http://localhost:3000/marketplace/"
echo " 4. Monitor deployment: cat decentralized-memory-deployment-report-*.json"
echo ""
echo "⚠️ Important Notes:"
echo " - IPFS node should be running for full functionality"
echo " - Filecoin storage deals require additional configuration"
echo " - Smart contract addresses are in deployed-contracts-$NETWORK.json"
echo " - Frontend components are integrated into the main marketplace"
}
# Handle script interruption
trap 'print_critical "Deployment interrupted - please check partial deployment"; exit 1' INT TERM
# Run main function
main "$@"

View File

@@ -1,533 +0,0 @@
#!/usr/bin/env bash
# AITBC Developer Ecosystem Complete Deployment Orchestration
# Deploys the entire Developer Ecosystem system (contracts + frontend + API)
#
# Usage: ./deploy-developer-ecosystem.sh [environment] [skip-tests]
# Environment: testnet, mainnet
# Skip-Tests: true/false - whether to skip integration tests
set -euo pipefail
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
print_status() {
echo -e "${BLUE}[INFO]${NC} $1"
}
print_success() {
echo -e "${GREEN}[SUCCESS]${NC} $1"
}
print_warning() {
echo -e "${YELLOW}[WARNING]${NC} $1"
}
print_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
# Parse arguments
ENVIRONMENT="${1:-testnet}"
SKIP_TESTS="${2:-false}"
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
ROOT_DIR="$(dirname "$SCRIPT_DIR")"
echo "🚀 AITBC Developer Ecosystem Complete Deployment"
echo "==============================================="
echo "Environment: $ENVIRONMENT"
echo "Skip Tests: $SKIP_TESTS"
echo "Root Directory: $ROOT_DIR"
echo ""
# Deployment phases
PHASES=("contracts" "frontend" "api" "integration-tests" "monitoring")
# Check prerequisites
check_prerequisites() {
print_status "Checking deployment prerequisites..."
# Check if required directories exist
if [[ ! -d "$ROOT_DIR/contracts" ]]; then
print_error "Contracts directory not found"
exit 1
fi
if [[ ! -d "$ROOT_DIR/apps/marketplace-web" ]]; then
print_error "Frontend directory not found"
exit 1
fi
# Check if required scripts exist
if [[ ! -f "$ROOT_DIR/contracts/scripts/deploy-developer-ecosystem.sh" ]]; then
print_error "Contract deployment script not found"
exit 1
fi
if [[ ! -f "$ROOT_DIR/apps/marketplace-web/scripts/deploy-frontend.sh" ]]; then
print_error "Frontend deployment script not found"
exit 1
fi
# Check SSH connection for frontend deployment
if ! ssh -o ConnectTimeout=5 aitbc-cascade "echo 'SSH connection successful'" 2>/dev/null; then
print_warning "Cannot connect to frontend server. Frontend deployment will be skipped."
SKIP_FRONTEND=true
else
SKIP_FRONTEND=false
fi
print_success "Prerequisites check completed"
}
# Phase 1: Deploy Smart Contracts
deploy_contracts() {
print_status "Phase 1: Deploying Smart Contracts"
echo "====================================="
cd "$ROOT_DIR/contracts"
# Run contract deployment
if ./scripts/deploy-developer-ecosystem.sh "$ENVIRONMENT" "true"; then
print_success "Smart contracts deployed successfully"
# Copy deployment info to root directory
if [[ -f "deployed-contracts-$ENVIRONMENT.json" ]]; then
cp "deployed-contracts-$ENVIRONMENT.json" "$ROOT_DIR/"
print_success "Contract deployment info copied to root directory"
fi
else
print_error "Smart contract deployment failed"
return 1
fi
echo ""
}
# Phase 2: Deploy Frontend
deploy_frontend() {
if [[ "$SKIP_FRONTEND" == "true" ]]; then
print_warning "Skipping frontend deployment (SSH connection failed)"
return 0
fi
print_status "Phase 2: Deploying Frontend"
echo "============================"
cd "$ROOT_DIR/apps/marketplace-web"
# Update environment variables with contract addresses
update_frontend_env
# Build and deploy frontend
if ./scripts/deploy-frontend.sh "production" "aitbc-cascade"; then
print_success "Frontend deployed successfully"
else
print_error "Frontend deployment failed"
return 1
fi
echo ""
}
# Update frontend environment variables
update_frontend_env() {
print_status "Updating frontend environment variables..."
local deployment_file="$ROOT_DIR/deployed-contracts-$ENVIRONMENT.json"
if [[ ! -f "$deployment_file" ]]; then
print_error "Contract deployment file not found: $deployment_file"
return 1
fi
# Extract contract addresses
local aitbc_token=$(jq -r '.contracts.AITBCToken.address' "$deployment_file")
local agent_bounty=$(jq -r '.contracts.AgentBounty.address' "$deployment_file")
local agent_staking=$(jq -r '.contracts.AgentStaking.address' "$deployment_file")
local performance_verifier=$(jq -r '.contracts.PerformanceVerifier.address' "$deployment_file")
local dispute_resolution=$(jq -r '.contracts.DisputeResolution.address' "$deployment_file")
local escrow_service=$(jq -r '.contracts.EscrowService.address' "$deployment_file")
# Create .env.local file
cat > .env.local << EOF
# AITBC Developer Ecosystem - Frontend Environment
# Generated on $(date -Iseconds)
# Contract Addresses
VITE_AITBC_TOKEN_ADDRESS=$aitbc_token
VITE_AGENT_BOUNTY_ADDRESS=$agent_bounty
VITE_AGENT_STAKING_ADDRESS=$agent_staking
VITE_PERFORMANCE_VERIFIER_ADDRESS=$performance_verifier
VITE_DISPUTE_RESOLUTION_ADDRESS=$dispute_resolution
VITE_ESCROW_SERVICE_ADDRESS=$escrow_service
# API Configuration
VITE_API_BASE_URL=http://localhost:3001/api/v1
VITE_WS_URL=ws://localhost:3001
# Network Configuration
VITE_NETWORK_NAME=$ENVIRONMENT
VITE_CHAIN_ID=$(get_chain_id "$ENVIRONMENT")
# Application Configuration
VITE_APP_NAME=AITBC Developer Ecosystem
VITE_APP_VERSION=1.0.0
VITE_APP_DESCRIPTION=Developer Ecosystem & DAO Grants System
EOF
print_success "Frontend environment variables updated"
}
# Get chain ID for environment
get_chain_id() {
case "$1" in
"localhost"|"hardhat")
echo "31337"
;;
"sepolia")
echo "11155111"
;;
"goerli")
echo "5"
;;
"mainnet")
echo "1"
;;
*)
echo "1"
;;
esac
}
# Phase 3: Deploy API Services
deploy_api() {
print_status "Phase 3: Deploying API Services"
echo "=================================="
# Check if API deployment script exists
if [[ -f "$ROOT_DIR/apps/coordinator-api/deploy_services.sh" ]]; then
cd "$ROOT_DIR/apps/coordinator-api"
if ./deploy_services.sh "$ENVIRONMENT"; then
print_success "API services deployed successfully"
else
print_error "API services deployment failed"
return 1
fi
else
print_warning "API deployment script not found. Skipping API deployment."
fi
echo ""
}
# Phase 4: Run Integration Tests
run_integration_tests() {
if [[ "$SKIP_TESTS" == "true" ]]; then
print_warning "Skipping integration tests"
return 0
fi
print_status "Phase 4: Running Integration Tests"
echo "====================================="
cd "$ROOT_DIR"
# Update test configuration with deployed contracts
update_test_config
# Run comprehensive test suite
if ./tests/run_all_tests.sh; then
print_success "Integration tests passed"
else
print_error "Integration tests failed"
return 1
fi
echo ""
}
# Update test configuration
update_test_config() {
print_status "Updating test configuration..."
local deployment_file="$ROOT_DIR/deployed-contracts-$ENVIRONMENT.json"
if [[ ! -f "$deployment_file" ]]; then
print_warning "Contract deployment file not found. Using default test configuration."
return 0
fi
# Create test configuration
cat > "$ROOT_DIR/tests/test-config-$ENVIRONMENT.json" << EOF
{
"environment": "$ENVIRONMENT",
"contracts": $(cat "$deployment_file"),
"api": {
"base_url": "http://localhost:3001/api/v1",
"timeout": 30000
},
"frontend": {
"base_url": "http://aitbc.bubuit.net/marketplace",
"timeout": 10000
}
}
EOF
print_success "Test configuration updated"
}
# Phase 5: Setup Monitoring
setup_monitoring() {
print_status "Phase 5: Setting up Monitoring"
echo "==============================="
# Create monitoring configuration
create_monitoring_config
# Setup health checks
setup_health_checks
print_success "Monitoring setup completed"
echo ""
}
# Create monitoring configuration
create_monitoring_config() {
print_status "Creating monitoring configuration..."
local deployment_file="$ROOT_DIR/deployed-contracts-$ENVIRONMENT.json"
cat > "$ROOT_DIR/monitoring-config-$ENVIRONMENT.json" << EOF
{
"environment": "$ENVIRONMENT",
"timestamp": "$(date -Iseconds)",
"contracts": $(cat "$deployment_file"),
"monitoring": {
"enabled": true,
"interval": 60,
"endpoints": [
{
"name": "Frontend Health",
"url": "http://aitbc.bubuit.net/marketplace/",
"method": "GET",
"expected_status": 200
},
{
"name": "API Health",
"url": "http://localhost:3001/api/v1/health",
"method": "GET",
"expected_status": 200
}
],
"alerts": {
"email": "admin@aitbc.dev",
"slack_webhook": "https://hooks.slack.com/services/YOUR/SLACK/WEBHOOK"
}
}
}
EOF
print_success "Monitoring configuration created"
}
# Setup health checks
setup_health_checks() {
print_status "Setting up health checks..."
# Create health check script
cat > "$ROOT_DIR/scripts/health-check.sh" << 'EOF'
#!/bin/bash
# AITBC Developer Ecosystem Health Check Script
ENVIRONMENT="${1:-testnet}"
CONFIG_FILE="monitoring-config-$ENVIRONMENT.json"
if [[ ! -f "$CONFIG_FILE" ]]; then
echo "❌ Monitoring configuration not found: $CONFIG_FILE"
exit 1
fi
echo "🔍 Running health checks for $ENVIRONMENT..."
echo "=========================================="
# Check frontend
FRONTEND_URL=$(jq -r '.monitoring.endpoints[0].url' "$CONFIG_FILE")
FRONTEND_STATUS=$(curl -s -o /dev/null -w "%{http_code}" "$FRONTEND_URL" || echo "000")
if [[ "$FRONTEND_STATUS" == "200" ]]; then
echo "✅ Frontend: $FRONTEND_URL (Status: $FRONTEND_STATUS)"
else
echo "❌ Frontend: $FRONTEND_URL (Status: $FRONTEND_STATUS)"
fi
# Check API
API_URL=$(jq -r '.monitoring.endpoints[1].url' "$CONFIG_FILE")
API_STATUS=$(curl -s -o /dev/null -w "%{http_code}" "$API_URL" || echo "000")
if [[ "$API_STATUS" == "200" ]]; then
echo "✅ API: $API_URL (Status: $API_STATUS)"
else
echo "❌ API: $API_URL (Status: $API_STATUS)"
fi
echo ""
echo "Health check completed at $(date)"
EOF
chmod +x "$ROOT_DIR/scripts/health-check.sh"
print_success "Health check script created"
}
# Generate deployment report
generate_deployment_report() {
print_status "Generating deployment report..."
local report_file="$ROOT_DIR/deployment-report-$ENVIRONMENT-$(date +%Y%m%d-%H%M%S).json"
cat > "$report_file" << EOF
{
"deployment": {
"environment": "$ENVIRONMENT",
"timestamp": "$(date -Iseconds)",
"skip_tests": "$SKIP_TESTS",
"skip_frontend": "$SKIP_FRONTEND"
},
"phases": {
"contracts": {
"status": "$CONTRACTS_STATUS",
"file": "deployed-contracts-$ENVIRONMENT.json"
},
"frontend": {
"status": "$FRONTEND_STATUS",
"url": "http://aitbc.bubuit.net/marketplace/"
},
"api": {
"status": "$API_STATUS",
"url": "http://localhost:3001/api/v1"
},
"tests": {
"status": "$TESTS_STATUS",
"skipped": "$SKIP_TESTS"
},
"monitoring": {
"status": "completed",
"config": "monitoring-config-$ENVIRONMENT.json"
}
},
"urls": {
"frontend": "http://aitbc.bubuit.net/marketplace/",
"api": "http://localhost:3001/api/v1",
"health_check": "./scripts/health-check.sh $ENVIRONMENT"
}
}
EOF
print_success "Deployment report saved to $report_file"
}
# Rollback function
rollback() {
print_warning "Rolling back deployment..."
# Rollback contracts (if needed)
print_status "Contract rollback not implemented (manual intervention required)"
# Rollback frontend
if [[ "$SKIP_FRONTEND" != "true" ]]; then
print_status "Rolling back frontend..."
ssh aitbc-cascade "cp -r /var/www/aitbc.bubuit.net/marketplace.backup /var/www/aitbc.bubuit.net/marketplace" 2>/dev/null || true
ssh aitbc-cascade "systemctl reload nginx" 2>/dev/null || true
fi
print_warning "Rollback completed. Please verify system status."
}
# Main execution
main() {
print_status "Starting complete Developer Ecosystem deployment..."
# Initialize status variables
CONTRACTS_STATUS="pending"
FRONTEND_STATUS="pending"
API_STATUS="pending"
TESTS_STATUS="pending"
# Check prerequisites
check_prerequisites
# Execute deployment phases
if deploy_contracts; then
CONTRACTS_STATUS="success"
else
CONTRACTS_STATUS="failed"
print_error "Contract deployment failed. Aborting."
exit 1
fi
if deploy_frontend; then
FRONTEND_STATUS="success"
else
FRONTEND_STATUS="failed"
print_warning "Frontend deployment failed, but continuing..."
fi
if deploy_api; then
API_STATUS="success"
else
API_STATUS="failed"
print_warning "API deployment failed, but continuing..."
fi
if run_integration_tests; then
TESTS_STATUS="success"
else
TESTS_STATUS="failed"
if [[ "$SKIP_TESTS" != "true" ]]; then
print_error "Integration tests failed. Deployment may be unstable."
fi
fi
# Setup monitoring
setup_monitoring
# Generate deployment report
generate_deployment_report
print_success "🎉 Developer Ecosystem deployment completed!"
echo ""
echo "📊 Deployment Summary:"
echo " Contracts: $CONTRACTS_STATUS"
echo " Frontend: $FRONTEND_STATUS"
echo " API: $API_STATUS"
echo " Tests: $TESTS_STATUS"
echo ""
echo "🌐 Application URLs:"
echo " Frontend: http://aitbc.bubuit.net/marketplace/"
echo " API: http://localhost:3001/api/v1"
echo ""
echo "🔧 Management Commands:"
echo " Health Check: ./scripts/health-check.sh $ENVIRONMENT"
echo " View Report: cat deployment-report-$ENVIRONMENT-*.json"
echo ""
echo "📋 Next Steps:"
echo " 1. Test the application in browser"
echo " 2. Verify all functionality works"
echo " 3. Monitor system health"
echo " 4. Set up automated monitoring"
}
# Handle script interruption
trap 'print_error "Deployment interrupted"; rollback; exit 1' INT TERM
# Run main function
main "$@"

View File

@@ -1,634 +0,0 @@
#!/usr/bin/env bash
# AITBC Developer Ecosystem - Mainnet Deployment Script
# PRODUCTION DEPLOYMENT - Use with extreme caution
#
# Usage: ./deploy-mainnet.sh [--dry-run] [--skip-verification] [--emergency-only]
# --dry-run: Simulate deployment without executing transactions
# --skip-verification: Skip Etherscan verification (faster but less transparent)
# --emergency-only: Only deploy emergency contracts (DisputeResolution, EscrowService)
set -euo pipefail
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
MAGENTA='\033[0;35m'
NC='\033[0m' # No Color
print_status() {
echo -e "${BLUE}[INFO]${NC} $1"
}
print_success() {
echo -e "${GREEN}[SUCCESS]${NC} $1"
}
print_warning() {
echo -e "${YELLOW}[WARNING]${NC} $1"
}
print_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
print_critical() {
echo -e "${MAGENTA}[CRITICAL]${NC} $1"
}
# Parse arguments
DRY_RUN=false
SKIP_VERIFICATION=false
EMERGENCY_ONLY=false
while [[ $# -gt 0 ]]; do
case $1 in
--dry-run)
DRY_RUN=true
shift
;;
--skip-verification)
SKIP_VERIFICATION=true
shift
;;
--emergency-only)
EMERGENCY_ONLY=true
shift
;;
*)
print_error "Unknown argument: $1"
exit 1
;;
esac
done
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
ROOT_DIR="$(dirname "$SCRIPT_DIR")"
echo "🚀 AITBC Developer Ecosystem - MAINNET DEPLOYMENT"
echo "================================================="
echo "Environment: PRODUCTION"
echo "Dry Run: $DRY_RUN"
echo "Skip Verification: $SKIP_VERIFICATION"
echo "Emergency Only: $EMERGENCY_ONLY"
echo "Timestamp: $(date -Iseconds)"
echo ""
# CRITICAL: Production deployment confirmation
confirm_production_deployment() {
print_critical "⚠️ PRODUCTION DEPLOYMENT CONFIRMATION ⚠️"
echo "You are about to deploy the AITBC Developer Ecosystem to MAINNET."
echo "This will deploy real smart contracts to the Ethereum blockchain."
echo "This action is IRREVERSIBLE and will consume REAL ETH for gas."
echo ""
echo "Please confirm the following:"
echo "1. You have thoroughly tested on testnet"
echo "2. You have sufficient ETH for deployment costs (~5-10 ETH)"
echo "3. You have the private key of the deployer account"
echo "4. You have reviewed all contract addresses and parameters"
echo "5. You have a backup plan in case of failure"
echo ""
if [[ "$DRY_RUN" == "true" ]]; then
print_warning "DRY RUN MODE - No actual transactions will be executed"
return 0
fi
read -p "Type 'DEPLOY-TO-MAINNET' to continue: " confirmation
if [[ "$confirmation" != "DEPLOY-TO-MAINNET" ]]; then
print_error "Deployment cancelled by user"
exit 1
fi
print_success "Production deployment confirmed"
}
# Enhanced security checks
security_checks() {
print_status "Performing security checks..."
# Check if .env file exists and is properly configured
if [[ ! -f "$ROOT_DIR/contracts/.env" ]]; then
print_error ".env file not found. Please configure environment variables."
exit 1
fi
# Check if private key is set (but don't display it)
if ! grep -q "PRIVATE_KEY=" "$ROOT_DIR/contracts/.env"; then
print_error "PRIVATE_KEY not configured in .env file"
exit 1
fi
# Check if private key looks valid (basic format check)
if grep -q "PRIVATE_KEY=your_private_key_here" "$ROOT_DIR/contracts/.env"; then
print_error "Please update PRIVATE_KEY in .env file with actual deployer key"
exit 1
fi
# Check for sufficient testnet deployments (pre-requisite)
local testnet_deployment="$ROOT_DIR/deployed-contracts-sepolia.json"
if [[ ! -f "$testnet_deployment" ]]; then
print_warning "No testnet deployment found. Consider deploying to testnet first."
read -p "Continue anyway? (y/N): " continue_anyway
if [[ "$continue_anyway" != "y" && "$continue_anyway" != "Y" ]]; then
print_error "Deployment cancelled. Please deploy to testnet first."
exit 1
fi
fi
# Check gas price and network conditions
check_network_conditions
print_success "Security checks passed"
}
# Check network conditions
check_network_conditions() {
print_status "Checking network conditions..."
cd "$ROOT_DIR/contracts"
# Get current gas price
local gas_price=$(npx hardhat run scripts/check-gas-price.js --network mainnet 2>/dev/null || echo "unknown")
print_status "Current gas price: $gas_price gwei"
# Get ETH balance of deployer
local balance=$(npx hardhat run scripts/check-balance.js --network mainnet 2>/dev/null || echo "unknown")
print_status "Deployer balance: $balance ETH"
# Warning if gas price is high
if [[ "$gas_price" != "unknown" ]]; then
local gas_num=$(echo "$gas_price" | grep -o '[0-9]*' | head -1)
if [[ "$gas_num" -gt 50 ]]; then
print_warning "High gas price detected ($gas_price gwei). Consider waiting for lower gas."
read -p "Continue anyway? (y/N): " continue_high_gas
if [[ "$continue_high_gas" != "y" && "$continue_high_gas" != "Y" ]]; then
print_error "Deployment cancelled due to high gas price"
exit 1
fi
fi
fi
}
# Create deployment backup
create_deployment_backup() {
print_status "Creating deployment backup..."
local backup_dir="$ROOT_DIR/backups/mainnet-$(date +%Y%m%d-%H%M%S)"
mkdir -p "$backup_dir"
# Backup current configurations
cp -r "$ROOT_DIR/contracts" "$backup_dir/"
cp -r "$ROOT_DIR/apps/marketplace-web" "$backup_dir/"
cp -r "$ROOT_DIR/tests" "$backup_dir/"
# Backup any existing deployments
if [[ -f "$ROOT_DIR/deployed-contracts-mainnet.json" ]]; then
cp "$ROOT_DIR/deployed-contracts-mainnet.json" "$backup_dir/"
fi
print_success "Backup created at $backup_dir"
}
# Enhanced contract deployment with multi-sig support
deploy_contracts_mainnet() {
print_status "Deploying smart contracts to MAINNET..."
cd "$ROOT_DIR/contracts"
local deploy_script="deploy-developer-ecosystem-mainnet.js"
# Create mainnet-specific deployment script
create_mainnet_deployment_script
if [[ "$DRY_RUN" == "true" ]]; then
print_warning "DRY RUN: Simulating contract deployment..."
npx hardhat run "$deploy_script" --network hardhat
else
print_critical "Executing MAINNET contract deployment..."
# Execute deployment with retry logic
local max_retries=3
local retry_count=0
while [[ $retry_count -lt $max_retries ]]; do
if npx hardhat run "$deploy_script" --network mainnet; then
print_success "Contract deployment completed successfully"
break
else
retry_count=$((retry_count + 1))
if [[ $retry_count -eq $max_retries ]]; then
print_error "Contract deployment failed after $max_retries attempts"
exit 1
fi
print_warning "Deployment attempt $retry_count failed, retrying in 30 seconds..."
sleep 30
fi
done
fi
# Verify contracts if not skipped
if [[ "$SKIP_VERIFICATION" != "true" && "$DRY_RUN" != "true" ]]; then
verify_contracts_mainnet
fi
}
# Create mainnet-specific deployment script
create_mainnet_deployment_script() {
local deploy_script="deploy-developer-ecosystem-mainnet.js"
cat > "$deploy_script" << 'EOF'
const { ethers } = require("hardhat");
const fs = require("fs");
const path = require("path");
async function main() {
console.log("🚀 DEPLOYING TO ETHEREUM MAINNET");
console.log("=================================");
console.log("⚠️ PRODUCTION DEPLOYMENT - REAL ETH WILL BE SPENT");
console.log("");
const [deployer] = await ethers.getSigners();
const balance = await deployer.getBalance();
console.log(`Deployer: ${deployer.address}`);
console.log(`Balance: ${ethers.utils.formatEther(balance)} ETH`);
if (balance.lt(ethers.utils.parseEther("5"))) {
throw new Error("Insufficient ETH balance. Minimum 5 ETH required for deployment.");
}
console.log("");
console.log("Proceeding with deployment...");
// Deployment logic here (similar to testnet but with enhanced security)
const deployedContracts = {
network: "mainnet",
deployer: deployer.address,
timestamp: new Date().toISOString(),
contracts: {}
};
// Deploy contracts with enhanced gas estimation
const gasOptions = {
gasLimit: 8000000,
gasPrice: ethers.utils.parseUnits("30", "gwei") // Adjust based on network conditions
};
try {
// Deploy AITBC Token (or use existing token)
console.log("📦 Deploying AITBC Token...");
const AITBCToken = await ethers.getContractFactory("MockERC20");
const aitbcToken = await AITBCToken.deploy(
"AITBC Token",
"AITBC",
ethers.utils.parseEther("1000000"),
gasOptions
);
await aitbcToken.deployed();
deployedContracts.contracts.AITBCToken = {
address: aitbcToken.address,
deploymentHash: aitbcToken.deployTransaction.hash,
gasUsed: (await aitbcToken.deployTransaction.wait()).gasUsed.toString()
};
console.log(`✅ AITBC Token: ${aitbcToken.address}`);
// Deploy other contracts with similar enhanced logic...
// (AgentBounty, AgentStaking, PerformanceVerifier, etc.)
// Save deployment info
const deploymentFile = `deployed-contracts-mainnet.json`;
fs.writeFileSync(
path.join(__dirname, "..", deploymentFile),
JSON.stringify(deployedContracts, null, 2)
);
console.log("");
console.log("🎉 MAINNET DEPLOYMENT COMPLETED");
console.log("===============================");
console.log(`Total gas used: ${calculateTotalGas(deployedContracts)}`);
console.log(`Deployment file: ${deploymentFile}`);
} catch (error) {
console.error("❌ Deployment failed:", error);
throw error;
}
}
function calculateTotalGas(deployedContracts) {
let totalGas = 0;
for (const contract of Object.values(deployedContracts.contracts)) {
if (contract.gasUsed) {
totalGas += parseInt(contract.gasUsed);
}
}
return totalGas.toLocaleString();
}
main()
.then(() => process.exit(0))
.catch((error) => {
console.error(error);
process.exit(1);
});
EOF
print_success "Mainnet deployment script created"
}
# Enhanced contract verification
verify_contracts_mainnet() {
print_status "Verifying contracts on Etherscan..."
cd "$ROOT_DIR/contracts"
# Wait for block confirmations
print_status "Waiting for block confirmations..."
sleep 60
# Run verification
if npx hardhat run scripts/verify-contracts.js --network mainnet; then
print_success "Contracts verified on Etherscan"
else
print_warning "Contract verification failed. Manual verification may be required."
fi
}
# Production frontend deployment
deploy_frontend_mainnet() {
print_status "Deploying frontend to production..."
cd "$ROOT_DIR/apps/marketplace-web"
# Update environment with mainnet contract addresses
update_frontend_mainnet_env
# Build for production
if [[ "$DRY_RUN" != "true" ]]; then
npm run build
# Deploy to production server
./scripts/deploy-frontend.sh "production" "aitbc-cascade"
print_success "Frontend deployed to production"
else
print_warning "DRY RUN: Frontend deployment skipped"
fi
}
# Update frontend with mainnet configuration
update_frontend_mainnet_env() {
print_status "Updating frontend for mainnet..."
local deployment_file="$ROOT_DIR/deployed-contracts-mainnet.json"
if [[ ! -f "$deployment_file" ]]; then
print_error "Mainnet deployment file not found"
return 1
fi
# Create production environment file
cat > .env.production << EOF
# AITBC Developer Ecosystem - MAINNET Production
# Generated on $(date -Iseconds)
# Contract Addresses (MAINNET)
VITE_AITBC_TOKEN_ADDRESS=$(jq -r '.contracts.AITBCToken.address' "$deployment_file")
VITE_AGENT_BOUNTY_ADDRESS=$(jq -r '.contracts.AgentBounty.address' "$deployment_file")
VITE_AGENT_STAKING_ADDRESS=$(jq -r '.contracts.AgentStaking.address' "$deployment_file")
# Network Configuration (MAINNET)
VITE_NETWORK_NAME=mainnet
VITE_CHAIN_ID=1
VITE_RPC_URL=https://mainnet.infura.io/v3/\${INFURA_PROJECT_ID}
# Production Configuration
VITE_API_BASE_URL=https://api.aitbc.dev/api/v1
VITE_WS_URL=wss://api.aitbc.dev
# Security Configuration
VITE_ENABLE_ANALYTICS=true
VITE_ENABLE_ERROR_REPORTING=true
VITE_SENTRY_DSN=\${SENTRY_DSN}
EOF
print_success "Frontend configured for mainnet"
}
# Production monitoring setup
setup_production_monitoring() {
print_status "Setting up production monitoring..."
# Create production monitoring configuration
cat > "$ROOT_DIR/monitoring-config-mainnet.json" << EOF
{
"environment": "mainnet",
"production": true,
"timestamp": "$(date -Iseconds)",
"monitoring": {
"enabled": true,
"interval": 30,
"alerting": {
"email": "alerts@aitbc.dev",
"slack_webhook": "\${SLACK_WEBHOOK_URL}",
"pagerduty_key": "\${PAGERDUTY_KEY}"
},
"endpoints": [
{
"name": "Frontend Production",
"url": "https://aitbc.dev/marketplace/",
"method": "GET",
"expected_status": 200,
"timeout": 10000
},
{
"name": "API Production",
"url": "https://api.aitbc.dev/api/v1/health",
"method": "GET",
"expected_status": 200,
"timeout": 5000
}
],
"contracts": {
"monitor_events": true,
"critical_events": [
"BountyCreated",
"BountyCompleted",
"TokensStaked",
"TokensUnstaked",
"DisputeFiled"
]
}
}
}
EOF
# Setup production health checks
cat > "$ROOT_DIR/scripts/production-health-check.sh" << 'EOF'
#!/bin/bash
# Production Health Check Script
ENVIRONMENT="mainnet"
CONFIG_FILE="monitoring-config-$ENVIRONMENT.json"
echo "🔍 Production Health Check - $ENVIRONMENT"
echo "========================================"
# Check frontend
FRONTEND_STATUS=$(curl -s -o /dev/null -w "%{http_code}" "https://aitbc.dev/marketplace/" || echo "000")
if [[ "$FRONTEND_STATUS" == "200" ]]; then
echo "✅ Frontend: https://aitbc.dev/marketplace/ (Status: $FRONTEND_STATUS)"
else
echo "❌ Frontend: https://aitbc.dev/marketplace/ (Status: $FRONTEND_STATUS)"
fi
# Check API
API_STATUS=$(curl -s -o /dev/null -w "%{http_code}" "https://api.aitbc.dev/api/v1/health" || echo "000")
if [[ "$API_STATUS" == "200" ]]; then
echo "✅ API: https://api.aitbc.dev/api/v1/health (Status: $API_STATUS)"
else
echo "❌ API: https://api.aitbc.dev/api/v1/health (Status: $API_STATUS)"
fi
echo ""
echo "Health check completed at $(date)"
EOF
chmod +x "$ROOT_DIR/scripts/production-health-check.sh"
print_success "Production monitoring configured"
}
# Generate comprehensive deployment report
generate_mainnet_report() {
print_status "Generating mainnet deployment report..."
local report_file="$ROOT_DIR/mainnet-deployment-report-$(date +%Y%m%d-%H%M%S).json"
cat > "$report_file" << EOF
{
"deployment": {
"environment": "mainnet",
"production": true,
"timestamp": "$(date -Iseconds)",
"dry_run": "$DRY_RUN",
"emergency_only": "$EMERGENCY_ONLY"
},
"contracts": {
"file": "deployed-contracts-mainnet.json",
"verified": "$([[ "$SKIP_VERIFICATION" != "true" ]] && echo "true" || echo "false")"
},
"frontend": {
"url": "https://aitbc.dev/marketplace/",
"environment": "production"
},
"api": {
"url": "https://api.aitbc.dev/api/v1",
"status": "production"
},
"monitoring": {
"config": "monitoring-config-mainnet.json",
"health_check": "./scripts/production-health-check.sh"
},
"security": {
"backup_created": "true",
"verification_completed": "$([[ "$SKIP_VERIFICATION" != "true" ]] && echo "true" || echo "false")"
},
"next_steps": [
"1. Verify all contracts on Etherscan",
"2. Test all frontend functionality",
"3. Monitor system health for 24 hours",
"4. Set up automated alerts",
"5. Prepare incident response procedures"
]
}
EOF
print_success "Mainnet deployment report saved to $report_file"
}
# Emergency rollback procedures
emergency_rollback() {
print_critical "🚨 EMERGENCY ROLLBACK INITIATED 🚨"
print_status "Executing emergency rollback procedures..."
# 1. Stop all services
ssh aitbc-cascade "systemctl stop nginx" 2>/dev/null || true
# 2. Restore from backup
local latest_backup=$(ls -t "$ROOT_DIR/backups/" | head -1)
if [[ -n "$latest_backup" ]]; then
print_status "Restoring from backup: $latest_backup"
# Implementation would restore from backup
fi
# 3. Restart services
ssh aitbc-cascade "systemctl start nginx" 2>/dev/null || true
print_warning "Emergency rollback completed. Please verify system status."
}
# Main execution
main() {
print_critical "🚀 STARTING MAINNET DEPLOYMENT"
print_critical "This is a PRODUCTION deployment to Ethereum mainnet"
echo ""
# Security confirmation
confirm_production_deployment
# Security checks
security_checks
# Create backup
create_deployment_backup
# Deploy contracts
if [[ "$EMERGENCY_ONLY" != "true" ]]; then
deploy_contracts_mainnet
deploy_frontend_mainnet
else
print_warning "Emergency deployment mode - only critical contracts"
fi
# Setup monitoring
setup_production_monitoring
# Generate report
generate_mainnet_report
print_success "🎉 MAINNET DEPLOYMENT COMPLETED!"
echo ""
echo "📊 Deployment Summary:"
echo " Environment: MAINNET (PRODUCTION)"
echo " Dry Run: $DRY_RUN"
echo " Emergency Only: $EMERGENCY_ONLY"
echo ""
echo "🌐 Production URLs:"
echo " Frontend: https://aitbc.dev/marketplace/"
echo " API: https://api.aitbc.dev/api/v1"
echo ""
echo "🔧 Management Commands:"
echo " Health Check: ./scripts/production-health-check.sh"
echo " View Report: cat mainnet-deployment-report-*.json"
echo " Emergency Rollback: ./scripts/emergency-rollback.sh"
echo ""
echo "⚠️ CRITICAL NEXT STEPS:"
echo " 1. Verify all contracts on Etherscan"
echo " 2. Test all functionality thoroughly"
echo " 3. Monitor system for 24 hours"
echo " 4. Set up production alerts"
echo " 5. Prepare incident response"
}
# Handle script interruption
trap 'print_critical "Deployment interrupted - initiating emergency rollback"; emergency_rollback; exit 1' INT TERM
# Run main function
main "$@"

View File

@@ -1,715 +0,0 @@
#!/usr/bin/env bash
# AITBC Advanced Agent Features Production Deployment Script
# Production-ready deployment with security, monitoring, and verification
set -euo pipefail
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
PURPLE='\033[0;35m'
CYAN='\033[0;36m'
NC='\033[0m' # No Color
print_status() {
echo -e "${BLUE}[INFO]${NC} $1"
}
print_success() {
echo -e "${GREEN}[SUCCESS]${NC} $1"
}
print_warning() {
echo -e "${YELLOW}[WARNING]${NC} $1"
}
print_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
print_critical() {
echo -e "${RED}[CRITICAL]${NC} $1"
}
print_production() {
echo -e "${PURPLE}[PRODUCTION]${NC} $1"
}
print_security() {
echo -e "${CYAN}[SECURITY]${NC} $1"
}
# Configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
ROOT_DIR="$(dirname "$SCRIPT_DIR")"
CONTRACTS_DIR="$ROOT_DIR/contracts"
SERVICES_DIR="$ROOT_DIR/apps/coordinator-api/src/app/services"
INFRA_DIR="$ROOT_DIR/infra"
# Network configuration
NETWORK=${1:-"mainnet"}
ENVIRONMENT=${2:-"production"}
SKIP_SECURITY=${3:-"false"}
SKIP_MONITORING=${4:-"false"}
echo "🚀 AITBC Advanced Agent Features Production Deployment"
echo "==================================================="
echo "Network: $NETWORK"
echo "Environment: $ENVIRONMENT"
echo "Skip Security: $SKIP_SECURITY"
echo "Skip Monitoring: $SKIP_MONITORING"
echo "Timestamp: $(date -Iseconds)"
echo ""
# Production deployment checks
check_production_readiness() {
print_production "Checking production readiness..."
# Check if this is mainnet deployment
if [[ "$NETWORK" != "mainnet" ]]; then
print_warning "Not deploying to mainnet - using testnet deployment"
return
fi
# Check for production environment variables
if [[ ! -f "$ROOT_DIR/.env.production" ]]; then
print_error "Production environment file not found: .env.production"
print_critical "Please create .env.production with production configuration"
exit 1
fi
# Check for required production tools
if ! command -v jq &> /dev/null; then
print_error "jq is required for production deployment"
exit 1
fi
# Check for security tools
if [[ "$SKIP_SECURITY" != "true" ]]; then
if ! command -v slither &> /dev/null; then
print_warning "slither not found - skipping security analysis"
fi
if ! command -v mythril &> /dev/null; then
print_warning "mythril not found - skipping mythril analysis"
fi
fi
print_success "Production readiness check completed"
}
# Security verification
verify_security() {
if [[ "$SKIP_SECURITY" == "true" ]]; then
print_security "Skipping security verification"
return
fi
print_security "Running security verification..."
cd "$CONTRACTS_DIR"
# Run Slither analysis
if command -v slither &> /dev/null; then
print_status "Running Slither security analysis..."
slither . --json slither-report.json --filter medium,high,critical || true
print_success "Slither analysis completed"
fi
# Run Mythril analysis
if command -v mythril &> /dev/null; then
print_status "Running Mythril security analysis..."
mythril analyze . --format json --output mythril-report.json || true
print_success "Mythril analysis completed"
fi
# Check for common security issues
print_status "Checking for common security issues..."
# Check for hardcoded addresses
if grep -r "0x[a-fA-F0-9]{40}" contracts/ --include="*.sol" | grep -v "0x0000000000000000000000000000000000000000"; then
print_warning "Found hardcoded addresses - review required"
fi
# Check for TODO comments
if grep -r "TODO\|FIXME\|XXX" contracts/ --include="*.sol"; then
print_warning "Found TODO comments - review required"
fi
print_success "Security verification completed"
}
# Deploy contracts to production
deploy_production_contracts() {
print_production "Deploying contracts to production..."
cd "$CONTRACTS_DIR"
# Load production environment
source "$ROOT_DIR/.env.production"
# Verify production wallet
if [[ -z "$PRODUCTION_PRIVATE_KEY" ]]; then
print_error "PRODUCTION_PRIVATE_KEY not set in environment"
exit 1
fi
# Verify gas price settings
if [[ -z "$PRODUCTION_GAS_PRICE" ]]; then
export PRODUCTION_GAS_PRICE="50000000000" # 50 Gwei
fi
# Verify gas limit settings
if [[ -z "$PRODUCTION_GAS_LIMIT" ]]; then
export PRODUCTION_GAS_LIMIT="8000000"
fi
print_status "Using gas price: $PRODUCTION_GAS_PRICE wei"
print_status "Using gas limit: $PRODUCTION_GAS_LIMIT"
# Compile contracts with optimization
print_status "Compiling contracts with production optimization..."
npx hardhat compile --optimizer --optimizer-runs 200
# Deploy contracts
print_status "Deploying advanced agent features contracts..."
# Create deployment report
local deployment_report="$ROOT_DIR/production-deployment-report-$(date +%Y%m%d-%H%M%S).json"
# Run deployment with verification
npx hardhat run scripts/deploy-advanced-contracts.js --network mainnet --verbose
# Verify contracts immediately
print_status "Verifying contracts on Etherscan..."
if [[ -n "$ETHERSCAN_API_KEY" ]]; then
npx hardhat run scripts/verify-advanced-contracts.js --network mainnet
else
print_warning "ETHERSCAN_API_KEY not set - skipping verification"
fi
# Generate deployment report
cat > "$deployment_report" << EOF
{
"deployment": {
"timestamp": "$(date -Iseconds)",
"network": "$NETWORK",
"environment": "$ENVIRONMENT",
"gas_price": "$PRODUCTION_GAS_PRICE",
"gas_limit": "$PRODUCTION_GAS_LIMIT",
"security_verified": "$([[ "$SKIP_SECURITY" != "true" ]] && echo "true" || echo "false")",
"monitoring_enabled": "$([[ "$SKIP_MONITORING" != "true" ]] && echo "true" || echo "false")"
},
"contracts": $(cat deployed-contracts-mainnet.json | jq '.contracts')
}
EOF
print_success "Production deployment completed"
print_status "Deployment report: $deployment_report"
}
# Setup production monitoring
setup_production_monitoring() {
if [[ "$SKIP_MONITORING" == "true" ]]; then
print_production "Skipping monitoring setup"
return
fi
print_production "Setting up production monitoring..."
# Create monitoring configuration
cat > "$ROOT_DIR/monitoring/advanced-features-monitoring.yml" << EOF
# Advanced Agent Features Production Monitoring
version: '3.8'
services:
# Cross-Chain Reputation Monitoring
reputation-monitor:
image: prom/prometheus:latest
container_name: reputation-monitor
ports:
- "9090:9090"
volumes:
- ./monitoring/prometheus.yml:/etc/prometheus/prometheus.yml
- ./monitoring/rules:/etc/prometheus/rules
command:
- '--config.file=/etc/prometheus/prometheus.yml'
- '--storage.tsdb.path=/prometheus'
- '--web.console.libraries=/etc/prometheus/console_libraries'
- '--web.console.templates=/etc/prometheus/consoles'
- '--storage.tsdb.retention.time=200h'
- '--web.enable-lifecycle'
restart: unless-stopped
# Agent Communication Monitoring
communication-monitor:
image: grafana/grafana:latest
container_name: communication-monitor
ports:
- "3001:3000"
environment:
- GF_SECURITY_ADMIN_PASSWORD=admin
- GF_USERS_ALLOW_SIGN_UP=false
volumes:
- ./monitoring/grafana/provisioning:/etc/grafana/provisioning
- ./monitoring/grafana/dashboards:/var/lib/grafana/dashboards
restart: unless-stopped
# Advanced Learning Monitoring
learning-monitor:
image: node:18-alpine
container_name: learning-monitor
working_dir: /app
volumes:
- ./monitoring/learning-monitor:/app
command: npm start
restart: unless-stopped
# Log Aggregation
log-aggregator:
image: fluent/fluent-bit:latest
container_name: log-aggregator
volumes:
- ./monitoring/fluent-bit.conf:/fluent-bit/etc/fluent-bit.conf
- /var/log:/var/log:ro
restart: unless-stopped
# Alert Manager
alert-manager:
image: prom/alertmanager:latest
container_name: alert-manager
ports:
- "9093:9093"
volumes:
- ./monitoring/alertmanager.yml:/etc/alertmanager/alertmanager.yml
restart: unless-stopped
EOF
# Create Prometheus configuration
mkdir -p "$ROOT_DIR/monitoring"
cat > "$ROOT_DIR/monitoring/prometheus.yml" << EOF
global:
scrape_interval: 15s
evaluation_interval: 15s
rule_files:
- "rules/*.yml"
alerting:
alertmanagers:
- static_configs:
- targets:
- alert-manager:9093
scrape_configs:
- job_name: 'cross-chain-reputation'
static_configs:
- targets: ['localhost:8000']
metrics_path: '/metrics'
scrape_interval: 10s
- job_name: 'agent-communication'
static_configs:
- targets: ['localhost:8001']
metrics_path: '/metrics'
scrape_interval: 10s
- job_name: 'advanced-learning'
static_configs:
- targets: ['localhost:8002']
metrics_path: '/metrics'
scrape_interval: 10s
- job_name: 'agent-collaboration'
static_configs:
- targets: ['localhost:8003']
metrics_path: '/metrics'
scrape_interval: 10s
EOF
# Create alert rules
mkdir -p "$ROOT_DIR/monitoring/rules"
cat > "$ROOT_DIR/monitoring/rules/advanced-features.yml" << EOF
groups:
- name: advanced-features
rules:
- alert: CrossChainReputationSyncFailure
expr: reputation_sync_success_rate < 0.95
for: 5m
labels:
severity: critical
annotations:
summary: "Cross-chain reputation sync failure"
description: "Cross-chain reputation sync success rate is below 95%"
- alert: AgentCommunicationFailure
expr: agent_communication_success_rate < 0.90
for: 5m
labels:
severity: warning
annotations:
summary: "Agent communication failure"
description: "Agent communication success rate is below 90%"
- alert: AdvancedLearningFailure
expr: learning_model_accuracy < 0.70
for: 10m
labels:
severity: warning
annotations:
summary: "Advanced learning model accuracy low"
description: "Learning model accuracy is below 70%"
- alert: HighGasUsage
expr: gas_usage_rate > 0.80
for: 5m
labels:
severity: warning
annotations:
summary: "High gas usage detected"
description: "Gas usage rate is above 80%"
EOF
print_success "Production monitoring setup completed"
}
# Setup production backup
setup_production_backup() {
print_production "Setting up production backup..."
# Create backup configuration
cat > "$ROOT_DIR/backup/backup-advanced-features.sh" << 'EOF'
#!/bin/bash
# Advanced Agent Features Production Backup Script
set -euo pipefail
BACKUP_DIR="/backup/advanced-features"
DATE=$(date +%Y%m%d_%H%M%S)
BACKUP_FILE="advanced-features-backup-$DATE.tar.gz"
echo "Starting backup of advanced agent features..."
# Create backup directory
mkdir -p "$BACKUP_DIR"
# Backup contracts
echo "Backing up contracts..."
tar -czf "$BACKUP_DIR/contracts-$DATE.tar.gz" contracts/
# Backup services
echo "Backing up services..."
tar -czf "$BACKUP_DIR/services-$DATE.tar.gz" apps/coordinator-api/src/app/services/
# Backup configuration
echo "Backing up configuration..."
tar -czf "$BACKUP_DIR/config-$DATE.tar.gz" .env.production monitoring/ backup/
# Backup deployment data
echo "Backing up deployment data..."
cp deployed-contracts-mainnet.json "$BACKUP_DIR/deployment-$DATE.json"
# Create full backup
echo "Creating full backup..."
tar -czf "$BACKUP_DIR/$BACKUP_FILE" \
contracts/ \
apps/coordinator-api/src/app/services/ \
.env.production \
monitoring/ \
backup/ \
deployed-contracts-mainnet.json
echo "Backup completed: $BACKUP_DIR/$BACKUP_FILE"
# Keep only last 7 days of backups
find "$BACKUP_DIR" -name "*.tar.gz" -mtime +7 -delete
echo "Backup cleanup completed"
EOF
chmod +x "$ROOT_DIR/backup/backup-advanced-features.sh"
# Create cron job for automatic backups
cat > "$ROOT_DIR/backup/backup-cron.txt" << EOF
# Advanced Agent Features Backup Cron Job
# Run daily at 2 AM UTC
0 2 * * * $ROOT_DIR/backup/backup-advanced-features.sh >> $ROOT_DIR/backup/backup.log 2>&1
EOF
print_success "Production backup setup completed"
}
# Setup production security
setup_production_security() {
if [[ "$SKIP_SECURITY" == "true" ]]; then
print_security "Skipping security setup"
return
fi
print_security "Setting up production security..."
# Create security configuration
cat > "$ROOT_DIR/security/production-security.yml" << EOF
# Advanced Agent Features Production Security Configuration
version: '3.8'
services:
# Security Monitoring
security-monitor:
image: aquasec/trivy:latest
container_name: security-monitor
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
- ./security/trivy-config:/root/.trivy
command: image --format json --output /reports/security-scan.json
restart: unless-stopped
# Intrusion Detection
intrusion-detection:
image: falco/falco:latest
container_name: intrusion-detection
privileged: true
volumes:
- /var/run/docker.sock:/host/var/run/docker.sock:ro
- /dev:/host/dev:ro
- /proc:/host/proc:ro
- /boot:/host/boot:ro
- /lib/modules:/host/lib/modules:ro
- /usr:/host/usr:ro
- /etc:/host/etc:ro
- ./security/falco-rules:/etc/falco/falco_rules
restart: unless-stopped
# Rate Limiting
rate-limiter:
image: nginx:alpine
container_name: rate-limiter
ports:
- "80:80"
- "443:443"
volumes:
- ./security/nginx-rate-limit.conf:/etc/nginx/nginx.conf
- ./security/ssl:/etc/nginx/ssl
restart: unless-stopped
# Web Application Firewall
waf:
image: coraza/waf:latest
container_name: waf
ports:
- "8080:8080"
volumes:
- ./security/coraza.conf:/etc/coraza/coraza.conf
- ./security/crs-rules:/etc/coraza/crs-rules
restart: unless-stopped
EOF
# Create security rules
mkdir -p "$ROOT_DIR/security"
cat > "$ROOT_DIR/security/falco-rules/falco_rules.yml" << EOF
# Advanced Agent Features Security Rules
- rule: Detect Unauthorized Contract Interactions
desc: Detect unauthorized interactions with advanced agent contracts
condition: >
evt.type=openat and
proc.name in (node, npx) and
fd.name contains "CrossChainReputation" and
not user.name in (root, aitbc)
output: >
Unauthorized contract interaction detected
(user=%user.name command=%proc.cmdline file=%fd.name)
priority: HIGH
tags: [contract, security, unauthorized]
- rule: Detect Unusual Gas Usage
desc: Detect unusual gas usage patterns
condition: >
evt.type=openat and
proc.name in (node, npx) and
evt.arg.gas > 1000000
output: >
High gas usage detected
(user=%user.name gas=%evt.arg.gas command=%proc.cmdline)
priority: MEDIUM
tags: [gas, security, unusual]
- rule: Detect Reputation Manipulation
desc: Detect potential reputation manipulation
condition: >
evt.type=openat and
proc.name in (node, npx) and
fd.name contains "updateReputation" and
evt.arg.amount > 1000
output: >
Potential reputation manipulation detected
(user=%user.name amount=%evt.arg.amount command=%proc.cmdline)
priority: HIGH
tags: [reputation, security, manipulation]
EOF
print_success "Production security setup completed"
}
# Run production tests
run_production_tests() {
print_production "Running production tests..."
cd "$ROOT_DIR"
# Run contract tests
print_status "Running contract tests..."
cd "$CONTRACTS_DIR"
npx hardhat test --network mainnet test/CrossChainReputation.test.js || true
npx hardhat test --network mainnet test/AgentCommunication.test.js || true
npx hardhat test --network mainnet test/AgentCollaboration.test.js || true
npx hardhat test --network mainnet test/AgentLearning.test.js || true
# Run service tests
print_status "Running service tests..."
cd "$ROOT_DIR/apps/coordinator-api"
python -m pytest tests/test_cross_chain_reproduction.py -v --network mainnet || true
python -m pytest tests/test_agent_communication.py -v --network mainnet || true
python -m pytest tests/test_advanced_learning.py -v --network mainnet || true
# Run integration tests
print_status "Running integration tests..."
python -m pytest tests/test_production_integration.py -v --network mainnet || true
print_success "Production tests completed"
}
# Generate production report
generate_production_report() {
print_production "Generating production deployment report..."
local report_file="$ROOT_DIR/production-deployment-report-$(date +%Y%m%d-%H%M%S).json"
cat > "$report_file" << EOF
{
"production_deployment": {
"timestamp": "$(date -Iseconds)",
"network": "$NETWORK",
"environment": "$ENVIRONMENT",
"security_verified": "$([[ "$SKIP_SECURITY" != "true" ]] && echo "true" || echo "false")",
"monitoring_enabled": "$([[ "$SKIP_MONITORING" != "true" ]] && echo "true" || echo "false")",
"tests_passed": "true",
"backup_enabled": "true"
},
"contracts": {
"CrossChainReputation": "deployed-contracts-mainnet.json",
"AgentCommunication": "deployed-contracts-mainnet.json",
"AgentCollaboration": "deployed-contracts-mainnet.json",
"AgentLearning": "deployed-contracts-mainnet.json",
"AgentMarketplaceV2": "deployed-contracts-mainnet.json",
"ReputationNFT": "deployed-contracts-mainnet.json"
},
"services": {
"cross_chain_reputation": "https://api.aitbc.dev/advanced/reputation",
"agent_communication": "https://api.aitbc.dev/advanced/communication",
"agent_collaboration": "https://api.aitbc.dev/advanced/collaboration",
"advanced_learning": "https://api.aitbc.dev/advanced/learning",
"agent_autonomy": "https://api.aitbc.dev/advanced/autonomy",
"marketplace_v2": "https://api.aitbc.dev/advanced/marketplace"
},
"monitoring": {
"prometheus": "http://monitoring.aitbc.dev:9090",
"grafana": "http://monitoring.aitbc.dev:3001",
"alertmanager": "http://monitoring.aitbc.dev:9093"
},
"security": {
"slither_report": "$ROOT_DIR/slither-report.json",
"mythril_report": "$ROOT_DIR/mythril-report.json",
"falco_rules": "$ROOT_DIR/security/falco-rules/",
"rate_limiting": "enabled",
"waf": "enabled"
},
"backup": {
"backup_script": "$ROOT_DIR/backup/backup-advanced-features.sh",
"backup_schedule": "daily at 2 AM UTC",
"retention": "7 days"
},
"next_steps": [
"1. Monitor contract performance and gas usage",
"2. Review security alerts and logs",
"3. Verify cross-chain reputation synchronization",
"4. Test agent communication across networks",
"5. Monitor advanced learning model performance",
"6. Review backup and recovery procedures",
"7. Scale monitoring based on usage patterns"
],
"emergency_contacts": [
"DevOps Team: devops@aitbc.dev",
"Security Team: security@aitbc.dev",
"Smart Contract Team: contracts@aitbc.dev"
]
}
EOF
print_success "Production deployment report saved to $report_file"
}
# Main execution
main() {
print_critical "🚀 STARTING PRODUCTION DEPLOYMENT - ADVANCED AGENT FEATURES"
# Run production deployment steps
check_production_readiness
verify_security
deploy_production_contracts
setup_production_monitoring
setup_production_backup
setup_production_security
run_production_tests
generate_production_report
print_success "🎉 PRODUCTION DEPLOYMENT COMPLETED!"
echo ""
echo "📊 Production Deployment Summary:"
echo " Network: $NETWORK"
echo " Environment: $ENVIRONMENT"
echo " Security: $([[ "$SKIP_SECURITY" != "true" ]] && echo "Verified" || echo "Skipped")"
echo " Monitoring: $([[ "$SKIP_MONITORING" != "true" ]] && echo "Enabled" || echo "Skipped")"
echo " Backup: Enabled"
echo " Tests: Passed"
echo ""
echo "🔧 Production Services:"
echo " Cross-Chain Reputation: https://api.aitbc.dev/advanced/reputation"
echo " Agent Communication: https://api.aitbc.dev/advanced/communication"
echo " Advanced Learning: https://api.aitbc.dev/advanced/learning"
echo " Agent Collaboration: https://api.aitbc.dev/advanced/collaboration"
echo " Agent Autonomy: https://api.aitbc.dev/advanced/autonomy"
echo " Marketplace V2: https://api.aitbc.dev/advanced/marketplace"
echo ""
echo "📊 Monitoring Dashboard:"
echo " Prometheus: http://monitoring.aitbc.dev:9090"
echo " Grafana: http://monitoring.aitbc.dev:3001"
echo " Alert Manager: http://monitoring.aitbc.dev:9093"
echo ""
echo "🔧 Next Steps:"
echo " 1. Verify contract addresses on Etherscan"
echo " 2. Test cross-chain reputation synchronization"
echo " 3. Validate agent communication security"
echo " 4. Monitor advanced learning performance"
echo " 5. Review security alerts and logs"
echo " 6. Test backup and recovery procedures"
echo " 7. Scale monitoring based on usage"
echo ""
echo "⚠️ Production Notes:"
echo " - All contracts deployed to mainnet with verification"
echo " - Security monitoring and alerts are active"
echo " - Automated backups are scheduled daily"
echo " - Rate limiting and WAF are enabled"
echo " - Gas optimization is active"
echo " - Cross-chain synchronization is monitored"
echo ""
echo "🎯 Production Status: READY FOR LIVE TRAFFIC"
}
# Handle script interruption
trap 'print_critical "Production deployment interrupted - please check partial deployment"; exit 1' INT TERM
# Run main function
main "$@"

View File

@@ -1,586 +0,0 @@
#!/usr/bin/env bash
# AITBC Platform Services Deployment Script for aitbc and aitbc1 Servers
# Deploys backend services and frontend to both production servers
set -euo pipefail
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
PURPLE='\033[0;35m'
CYAN='\033[0;36m'
NC='\033[0m' # No Color
print_status() {
echo -e "${BLUE}[INFO]${NC} $1"
}
print_success() {
echo -e "${GREEN}[SUCCESS]${NC} $1"
}
print_warning() {
echo -e "${YELLOW}[WARNING]${NC} $1"
}
print_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
print_critical() {
echo -e "${RED}[CRITICAL]${NC} $1"
}
print_server() {
echo -e "${PURPLE}[SERVER]${NC} $1"
}
print_deploy() {
echo -e "${CYAN}[DEPLOY]${NC} $1"
}
# Configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
ROOT_DIR="$(dirname "$SCRIPT_DIR")"
SERVICES_DIR="$ROOT_DIR/apps/coordinator-api/src/app/services"
FRONTEND_DIR="$ROOT_DIR/apps/marketplace-web"
# Server configuration
AITBC_SERVER="aitbc-cascade"
AITBC1_SERVER="aitbc1-cascade"
AITBC_HOST="aitbc.bubuit.net"
AITBC1_HOST="aitbc1.bubuit.net"
echo "🚀 AITBC Platform Services Deployment to aitbc and aitbc1 Servers"
echo "=============================================================="
echo "Timestamp: $(date -Iseconds)"
echo ""
# Pre-deployment checks
check_prerequisites() {
print_status "Checking prerequisites..."
# Check if SSH keys are available
if [[ ! -f "$HOME/.ssh/id_rsa" ]] && [[ ! -f "$HOME/.ssh/id_ed25519" ]]; then
print_error "SSH keys not found. Please generate SSH keys first."
exit 1
fi
# Check if we can connect to servers
print_status "Testing SSH connections..."
if ! ssh -o ConnectTimeout=10 -o BatchMode=yes $AITBC_SERVER "echo 'Connection successful'" 2>/dev/null; then
print_error "Cannot connect to $AITBC_SERVER"
exit 1
fi
if ! ssh -o ConnectTimeout=10 -o BatchMode=yes $AITBC1_SERVER "echo 'Connection successful'" 2>/dev/null; then
print_error "Cannot connect to $AITBC1_SERVER"
exit 1
fi
# Check if required directories exist
if [[ ! -d "$SERVICES_DIR" ]]; then
print_error "Services directory not found: $SERVICES_DIR"
exit 1
fi
if [[ ! -d "$FRONTEND_DIR" ]]; then
print_error "Frontend directory not found: $FRONTEND_DIR"
exit 1
fi
print_success "Prerequisites check completed"
}
# Deploy backend services
deploy_services() {
print_status "Deploying backend services..."
# Deploy to aitbc server
print_server "Deploying services to aitbc server..."
# Copy services to aitbc
scp -r "$SERVICES_DIR" $AITBC_SERVER:/tmp/
# Install dependencies and setup services on aitbc
ssh $AITBC_SERVER "
# Create service directory
sudo mkdir -p /opt/aitbc/services
# Copy services
sudo cp -r /tmp/services/* /opt/aitbc/services/
# Install Python dependencies
cd /opt/aitbc/services
python3 -m pip install -r requirements.txt 2>/dev/null || true
# Create systemd services
sudo tee /etc/systemd/system/aitbc-cross-chain-reputation.service > /dev/null << 'EOF'
[Unit]
Description=AITBC Cross Chain Reputation Service
After=network.target
[Service]
Type=simple
User=aitbc
WorkingDirectory=/opt/aitbc/services
ExecStart=/usr/bin/python3 -m cross_chain_reputation
Restart=always
RestartSec=10
[Install]
WantedBy=multi-user.target
EOF
sudo tee /etc/systemd/system/aitbc-agent-communication.service > /dev/null << 'EOF'
[Unit]
Description=AITBC Agent Communication Service
After=network.target
[Service]
Type=simple
User=aitbc
WorkingDirectory=/opt/aitbc/services
ExecStart=/usr/bin/python3 -m agent_communication
Restart=always
RestartSec=10
[Install]
WantedBy=multi-user.target
EOF
sudo tee /etc/systemd/system/aitbc-advanced-learning.service > /dev/null << 'EOF'
[Unit]
Description=AITBC Advanced Learning Service
After=network.target
[Service]
Type=simple
User=aitbc
WorkingDirectory=/opt/aitbc/services
ExecStart=/usr/bin/python3 -m advanced_learning
Restart=always
RestartSec=10
[Install]
WantedBy=multi-user.target
EOF
# Reload systemd and start services
sudo systemctl daemon-reload
sudo systemctl enable aitbc-cross-chain-reputation
sudo systemctl enable aitbc-agent-communication
sudo systemctl enable aitbc-advanced-learning
sudo systemctl start aitbc-cross-chain-reputation
sudo systemctl start aitbc-agent-communication
sudo systemctl start aitbc-advanced-learning
echo 'Services deployed and started on aitbc'
"
# Deploy to aitbc1 server
print_server "Deploying services to aitbc1 server..."
# Copy services to aitbc1
scp -r "$SERVICES_DIR" $AITBC1_SERVER:/tmp/
# Install dependencies and setup services on aitbc1
ssh $AITBC1_SERVER "
# Create service directory
sudo mkdir -p /opt/aitbc/services
# Copy services
sudo cp -r /tmp/services/* /opt/aitbc/services/
# Install Python dependencies
cd /opt/aitbc/services
python3 -m pip install -r requirements.txt 2>/dev/null || true
# Create systemd services
sudo tee /etc/systemd/system/aitbc-cross-chain-reputation.service > /dev/null << 'EOF'
[Unit]
Description=AITBC Cross Chain Reputation Service
After=network.target
[Service]
Type=simple
User=aitbc
WorkingDirectory=/opt/aitbc/services
ExecStart=/usr/bin/python3 -m cross_chain_reputation
Restart=always
RestartSec=10
[Install]
WantedBy=multi-user.target
EOF
sudo tee /etc/systemd/system/aitbc-agent-communication.service > /dev/null << 'EOF'
[Unit]
Description=AITBC Agent Communication Service
After=network.target
[Service]
Type=simple
User=aitbc
WorkingDirectory=/opt/aitbc/services
ExecStart=/usr/bin/python3 -m agent_communication
Restart=always
RestartSec=10
[Install]
WantedBy=multi-user.target
EOF
sudo tee /etc/systemd/system/aitbc-advanced-learning.service > /dev/null << 'EOF'
[Unit]
Description=AITBC Advanced Learning Service
After=network.target
[Service]
Type=simple
User=aitbc
WorkingDirectory=/opt/aitbc/services
ExecStart=/usr/bin/python3 -m advanced_learning
Restart=always
RestartSec=10
[Install]
WantedBy=multi-user.target
EOF
# Reload systemd and start services
sudo systemctl daemon-reload
sudo systemctl enable aitbc-cross-chain-reputation
sudo systemctl enable aitbc-agent-communication
sudo systemctl enable aitbc-advanced-learning
sudo systemctl start aitbc-cross-chain-reputation
sudo systemctl start aitbc-agent-communication
sudo systemctl start aitbc-advanced-learning
echo 'Services deployed and started on aitbc1'
"
print_success "Backend services deployed to both servers"
}
# Deploy frontend
deploy_frontend() {
print_status "Building and deploying frontend..."
cd "$FRONTEND_DIR"
# Build frontend
print_status "Building frontend application..."
npm run build
# Deploy to aitbc server
print_server "Deploying frontend to aitbc server..."
# Copy built frontend to aitbc
scp -r dist/* $AITBC_SERVER:/tmp/frontend/
ssh $AITBC_SERVER "
# Backup existing frontend
sudo cp -r /var/www/aitbc.bubuit.net /var/www/aitbc.bubuit.net.backup 2>/dev/null || true
# Deploy new frontend
sudo rm -rf /var/www/aitbc.bubuit.net/*
sudo cp -r /tmp/frontend/* /var/www/aitbc.bubuit.net/
# Set permissions
sudo chown -R www-data:www-data /var/www/aitbc.bubuit.net/
sudo chmod -R 755 /var/www/aitbc.bubuit.net/
echo 'Frontend deployed to aitbc'
"
# Deploy to aitbc1 server
print_server "Deploying frontend to aitbc1 server..."
# Copy built frontend to aitbc1
scp -r dist/* $AITBC1_SERVER:/tmp/frontend/
ssh $AITBC1_SERVER "
# Backup existing frontend
sudo cp -r /var/www/aitbc.bubuit.net /var/www/aitbc.bubuit.net.backup 2>/dev/null || true
# Deploy new frontend
sudo rm -rf /var/www/aitbc.bubuit.net/*
sudo cp -r /tmp/frontend/* /var/www/aitbc.bubuit.net/
# Set permissions
sudo chown -R www-data:www-data /var/www/aitbc.bubuit.net/
sudo chmod -R 755 /var/www/aitbc.bubuit.net/
echo 'Frontend deployed to aitbc1'
"
print_success "Frontend deployed to both servers"
}
# Deploy configuration files
deploy_configuration() {
print_status "Deploying configuration files..."
# Create nginx configuration for aitbc
print_server "Deploying nginx configuration to aitbc..."
ssh $AITBC_SERVER "
sudo tee /etc/nginx/sites-available/aitbc-advanced.conf > /dev/null << 'EOF'
server {
listen 80;
server_name aitbc.bubuit.net;
root /var/www/aitbc.bubuit.net;
index index.html;
# Security headers
add_header X-Frame-Options DENY;
add_header X-Content-Type-Options nosniff;
add_header X-XSS-Protection \"1; mode=block\";
add_header Strict-Transport-Security \"max-age=31536000; includeSubDomains\" always;
# Gzip compression
gzip on;
gzip_vary on;
gzip_min_length 1024;
gzip_types text/plain text/css text/xml text/javascript application/javascript application/xml+rss application/json;
# API routes
location /api/ {
proxy_pass http://localhost:8000/;
proxy_set_header Host \$host;
proxy_set_header X-Real-IP \$remote_addr;
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto \$scheme;
}
# Advanced features API
location /api/v1/advanced/ {
proxy_pass http://localhost:8001/;
proxy_set_header Host \$host;
proxy_set_header X-Real-IP \$remote_addr;
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto \$scheme;
}
# Static files
location / {
try_files \$uri \$uri/ /index.html;
expires 1y;
add_header Cache-Control \"public, immutable\";
}
# Health check
location /health {
access_log off;
return 200 \"healthy\";
add_header Content-Type text/plain;
}
}
EOF
# Enable site
sudo ln -sf /etc/nginx/sites-available/aitbc-advanced.conf /etc/nginx/sites-enabled/
sudo nginx -t
sudo systemctl reload nginx
echo 'Nginx configuration deployed to aitbc'
"
# Create nginx configuration for aitbc1
print_server "Deploying nginx configuration to aitbc1..."
ssh $AITBC1_SERVER "
sudo tee /etc/nginx/sites-available/aitbc1-advanced.conf > /dev/null << 'EOF'
server {
listen 80;
server_name aitbc1.bubuit.net;
root /var/www/aitbc.bubuit.net;
index index.html;
# Security headers
add_header X-Frame-Options DENY;
add_header X-Content-Type-Options nosniff;
add_header X-XSS-Protection \"1; mode=block\";
add_header Strict-Transport-Security \"max-age=31536000; includeSubDomains\" always;
# Gzip compression
gzip on;
gzip_vary on;
gzip_min_length 1024;
gzip_types text/plain text/css text/xml text/javascript application/javascript application/xml+rss application/json;
# API routes
location /api/ {
proxy_pass http://localhost:8000/;
proxy_set_header Host \$host;
proxy_set_header X-Real-IP \$remote_addr;
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto \$scheme;
}
# Advanced features API
location /api/v1/advanced/ {
proxy_pass http://localhost:8001/;
proxy_set_header Host \$host;
proxy_set_header X-Real-IP \$remote_addr;
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto \$scheme;
}
# Static files
location / {
try_files \$uri \$uri/ /index.html;
expires 1y;
add_header Cache-Control \"public, immutable\";
}
# Health check
location /health {
access_log off;
return 200 \"healthy\";
add_header Content-Type text/plain;
}
}
EOF
# Enable site
sudo ln -sf /etc/nginx/sites-available/aitbc1-advanced.conf /etc/nginx/sites-enabled/
sudo nginx -t
sudo systemctl reload nginx
echo 'Nginx configuration deployed to aitbc1'
"
print_success "Configuration files deployed to both servers"
}
# Verify deployment
verify_deployment() {
print_status "Verifying deployment..."
# Verify aitbc server
print_server "Verifying aitbc server deployment..."
ssh $AITBC_SERVER "
echo '=== aitbc Server Status ==='
# Check services
echo 'Services:'
sudo systemctl is-active aitbc-cross-chain-reputation || echo 'cross-chain-reputation: INACTIVE'
sudo systemctl is-active aitbc-agent-communication || echo 'agent-communication: INACTIVE'
sudo systemctl is-active aitbc-advanced-learning || echo 'advanced-learning: INACTIVE'
# Check nginx
echo 'Nginx:'
sudo systemctl is-active nginx || echo 'nginx: INACTIVE'
sudo nginx -t || echo 'nginx config: ERROR'
# Check web server
echo 'Web server:'
curl -s http://localhost/health || echo 'health check: FAILED'
echo 'aitbc verification completed'
"
# Verify aitbc1 server
print_server "Verifying aitbc1 server deployment..."
ssh $AITBC1_SERVER "
echo '=== aitbc1 Server Status ==='
# Check services
echo 'Services:'
sudo systemctl is-active aitbc-cross-chain-reputation || echo 'cross-chain-reputation: INACTIVE'
sudo systemctl is-active aitbc-agent-communication || echo 'agent-communication: INACTIVE'
sudo systemctl is-active aitbc-advanced-learning || echo 'advanced-learning: INACTIVE'
# Check nginx
echo 'Nginx:'
sudo systemctl is-active nginx || echo 'nginx: INACTIVE'
sudo nginx -t || echo 'nginx config: ERROR'
# Check web server
echo 'Web server:'
curl -s http://localhost/health || echo 'health check: FAILED'
echo 'aitbc1 verification completed'
"
print_success "Deployment verification completed"
}
# Test external connectivity
test_connectivity() {
print_status "Testing external connectivity..."
# Test aitbc server
print_server "Testing aitbc external connectivity..."
if curl -s "http://$AITBC_HOST/health" | grep -q "healthy"; then
print_success "aitbc server is accessible externally"
else
print_warning "aitbc server external connectivity issue"
fi
# Test aitbc1 server
print_server "Testing aitbc1 external connectivity..."
if curl -s "http://$AITBC1_HOST/health" | grep -q "healthy"; then
print_success "aitbc1 server is accessible externally"
else
print_warning "aitbc1 server external connectivity issue"
fi
}
# Main execution
main() {
print_critical "🚀 STARTING AITBC PLATFORM SERVICES DEPLOYMENT TO aitbc AND aitbc1 SERVERS"
# Run deployment steps
check_prerequisites
deploy_services
deploy_frontend
deploy_configuration
verify_deployment
test_connectivity
print_success "🎉 AITBC PLATFORM SERVICES DEPLOYMENT COMPLETED!"
echo ""
echo "📊 Deployment Summary:"
echo " Servers: aitbc, aitbc1"
echo " Services: Deployed"
echo " Frontend: Deployed"
echo " Configuration: Deployed"
echo " Verification: Completed"
echo ""
echo "🌐 Platform URLs:"
echo " aitbc Frontend: http://$AITBC_HOST/"
echo " aitbc API: http://$AITBC_HOST/api/"
echo " aitbc Advanced: http://$AITBC_HOST/api/v1/advanced/"
echo " aitbc1 Frontend: http://$AITBC1_HOST/"
echo " aitbc1 API: http://$AITBC1_HOST/api/"
echo " aitbc1 Advanced: http://$AITBC1_HOST/api/v1/advanced/"
echo ""
echo "🔧 Next Steps:"
echo " 1. Monitor service performance on both servers"
echo " 2. Test cross-server functionality"
echo " 3. Verify load balancing if configured"
echo " 4. Monitor system resources and scaling"
echo " 5. Set up monitoring and alerting"
echo " 6. Test failover scenarios"
echo ""
echo "⚠️ Important Notes:"
echo " - Both servers are running identical configurations"
echo " - Services are managed by systemd"
echo " - Nginx is configured for reverse proxy"
echo " - Health checks are available at /health"
echo " - API endpoints are available at /api/ and /api/v1/advanced/"
echo ""
echo "🎯 Deployment Status: SUCCESS - SERVICES LIVE ON BOTH SERVERS!"
}
# Handle script interruption
trap 'print_critical "Deployment interrupted - please check partial deployment"; exit 1' INT TERM
# Run main function
main "$@"

View File

@@ -1,774 +0,0 @@
#!/usr/bin/env bash
# AITBC Platform Deployment Script for aitbc and aitbc1 Servers
# Deploys the complete platform to both production servers
set -euo pipefail
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
PURPLE='\033[0;35m'
CYAN='\033[0;36m'
NC='\033[0m' # No Color
print_status() {
echo -e "${BLUE}[INFO]${NC} $1"
}
print_success() {
echo -e "${GREEN}[SUCCESS]${NC} $1"
}
print_warning() {
echo -e "${YELLOW}[WARNING]${NC} $1"
}
print_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
print_critical() {
echo -e "${RED}[CRITICAL]${NC} $1"
}
print_server() {
echo -e "${PURPLE}[SERVER]${NC} $1"
}
print_deploy() {
echo -e "${CYAN}[DEPLOY]${NC} $1"
}
# Configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
ROOT_DIR="$(dirname "$SCRIPT_DIR")"
CONTRACTS_DIR="$ROOT_DIR/contracts"
SERVICES_DIR="$ROOT_DIR/apps/coordinator-api/src/app/services"
FRONTEND_DIR="$ROOT_DIR/apps/marketplace-web"
INFRA_DIR="$ROOT_DIR/infra"
# Server configuration
AITBC_SERVER="aitbc-cascade"
AITBC1_SERVER="aitbc1-cascade"
AITBC_HOST="aitbc.bubuit.net"
AITBC1_HOST="aitbc1.bubuit.net"
AITBC_PORT="22"
AITBC1_PORT="22"
# Deployment configuration
DEPLOY_CONTRACTS=${1:-"true"}
DEPLOY_SERVICES=${2:-"true"}
DEPLOY_FRONTEND=${3:-"true"}
SKIP_VERIFICATION=${4:-"false"}
BACKUP_BEFORE_DEPLOY=${5:-"true"}
echo "🚀 AITBC Platform Deployment to aitbc and aitbc1 Servers"
echo "======================================================="
echo "Deploy Contracts: $DEPLOY_CONTRACTS"
echo "Deploy Services: $DEPLOY_SERVICES"
echo "Deploy Frontend: $DEPLOY_FRONTEND"
echo "Skip Verification: $SKIP_VERIFICATION"
echo "Backup Before Deploy: $BACKUP_BEFORE_DEPLOY"
echo "Timestamp: $(date -Iseconds)"
echo ""
# Pre-deployment checks
check_prerequisites() {
print_status "Checking prerequisites..."
# Check if SSH keys are available
if [[ ! -f "$HOME/.ssh/id_rsa" ]] && [[ ! -f "$HOME/.ssh/id_ed25519" ]]; then
print_error "SSH keys not found. Please generate SSH keys first."
exit 1
fi
# Check if we can connect to servers
print_status "Testing SSH connections..."
if ! ssh -o ConnectTimeout=10 -o BatchMode=yes $AITBC_SERVER "echo 'Connection successful'" 2>/dev/null; then
print_error "Cannot connect to $AITBC_SERVER"
exit 1
fi
if ! ssh -o ConnectTimeout=10 -o BatchMode=yes $AITBC1_SERVER "echo 'Connection successful'" 2>/dev/null; then
print_error "Cannot connect to $AITBC1_SERVER"
exit 1
fi
# Check if required directories exist
if [[ ! -d "$CONTRACTS_DIR" ]]; then
print_error "Contracts directory not found: $CONTRACTS_DIR"
exit 1
fi
if [[ ! -d "$SERVICES_DIR" ]]; then
print_error "Services directory not found: $SERVICES_DIR"
exit 1
fi
if [[ ! -d "$FRONTEND_DIR" ]]; then
print_error "Frontend directory not found: $FRONTEND_DIR"
exit 1
fi
print_success "Prerequisites check completed"
}
# Backup existing deployment
backup_deployment() {
if [[ "$BACKUP_BEFORE_DEPLOY" != "true" ]]; then
print_status "Skipping backup (disabled)"
return
fi
print_status "Creating backup of existing deployment..."
local backup_dir="/tmp/aitbc-backup-$(date +%Y%m%d-%H%M%S)"
# Backup aitbc server
print_server "Backing up aitbc server..."
ssh $AITBC_SERVER "
mkdir -p $backup_dir
sudo cp -r /var/www/aitbc.bubuit.net $backup_dir/ 2>/dev/null || true
sudo cp -r /var/www/html $backup_dir/ 2>/dev/null || true
sudo cp -r /etc/nginx/sites-enabled/ $backup_dir/ 2>/dev/null || true
sudo cp -r /etc/systemd/system/aitbc* $backup_dir/ 2>/dev/null || true
echo 'aitbc backup completed'
"
# Backup aitbc1 server
print_server "Backing up aitbc1 server..."
ssh $AITBC1_SERVER "
mkdir -p $backup_dir
sudo cp -r /var/www/aitbc.bubuit.net $backup_dir/ 2>/dev/null || true
sudo cp -r /var/www/html $backup_dir/ 2>/dev/null || true
sudo cp -r /etc/nginx/sites-enabled/ $backup_dir/ 2>/dev/null || true
sudo cp -r /etc/systemd/system/aitbc* $backup_dir/ 2>/dev/null || true
echo 'aitbc1 backup completed'
"
print_success "Backup completed: $backup_dir"
}
# Deploy smart contracts
deploy_contracts() {
if [[ "$DEPLOY_CONTRACTS" != "true" ]]; then
print_status "Skipping contract deployment (disabled)"
return
fi
print_status "Deploying smart contracts..."
cd "$CONTRACTS_DIR"
# Check if contracts are already deployed
if [[ -f "deployed-contracts-mainnet.json" ]]; then
print_warning "Contracts already deployed. Skipping deployment."
return
fi
# Compile contracts
print_status "Compiling contracts..."
npx hardhat compile
# Deploy to mainnet
print_status "Deploying contracts to mainnet..."
npx hardhat run scripts/deploy-advanced-contracts.js --network mainnet
# Verify contracts
if [[ "$SKIP_VERIFICATION" != "true" ]]; then
print_status "Verifying contracts..."
npx hardhat run scripts/verify-advanced-contracts.js --network mainnet
fi
print_success "Smart contracts deployed and verified"
}
# Deploy backend services
deploy_services() {
if [[ "$DEPLOY_SERVICES" != "true" ]]; then
print_status "Skipping service deployment (disabled)"
return
fi
print_status "Deploying backend services..."
# Deploy to aitbc server
print_server "Deploying services to aitbc server..."
# Copy services to aitbc
scp -r "$SERVICES_DIR" $AITBC_SERVER:/tmp/
# Install dependencies and setup services on aitbc
ssh $AITBC_SERVER "
# Create service directory
sudo mkdir -p /opt/aitbc/services
# Copy services
sudo cp -r /tmp/services/* /opt/aitbc/services/
# Install Python dependencies
cd /opt/aitbc/services
python3 -m pip install -r requirements.txt 2>/dev/null || true
# Create systemd services
sudo tee /etc/systemd/system/aitbc-cross-chain-reputation.service > /dev/null << 'EOF'
[Unit]
Description=AITBC Cross Chain Reputation Service
After=network.target
[Service]
Type=simple
User=aitbc
WorkingDirectory=/opt/aitbc/services
ExecStart=/usr/bin/python3 -m cross_chain_reputation
Restart=always
RestartSec=10
[Install]
WantedBy=multi-user.target
EOF
sudo tee /etc/systemd/system/aitbc-agent-communication.service > /dev/null << 'EOF'
[Unit]
Description=AITBC Agent Communication Service
After=network.target
[Service]
Type=simple
User=aitbc
WorkingDirectory=/opt/aitbc/services
ExecStart=/usr/bin/python3 -m agent_communication
Restart=always
RestartSec=10
[Install]
WantedBy=multi-user.target
EOF
sudo tee /etc/systemd/system/aitbc-advanced-learning.service > /dev/null << 'EOF'
[Unit]
Description=AITBC Advanced Learning Service
After=network.target
[Service]
Type=simple
User=aitbc
WorkingDirectory=/opt/aitbc/services
ExecStart=/usr/bin/python3 -m advanced_learning
Restart=always
RestartSec=10
[Install]
WantedBy=multi-user.target
EOF
# Reload systemd and start services
sudo systemctl daemon-reload
sudo systemctl enable aitbc-cross-chain-reputation
sudo systemctl enable aitbc-agent-communication
sudo systemctl enable aitbc-advanced-learning
sudo systemctl start aitbc-cross-chain-reputation
sudo systemctl start aitbc-agent-communication
sudo systemctl start aitbc-advanced-learning
echo 'Services deployed and started on aitbc'
"
# Deploy to aitbc1 server
print_server "Deploying services to aitbc1 server..."
# Copy services to aitbc1
scp -r "$SERVICES_DIR" $AITBC1_SERVER:/tmp/
# Install dependencies and setup services on aitbc1
ssh $AITBC1_SERVER "
# Create service directory
sudo mkdir -p /opt/aitbc/services
# Copy services
sudo cp -r /tmp/services/* /opt/aitbc/services/
# Install Python dependencies
cd /opt/aitbc/services
python3 -m pip install -r requirements.txt 2>/dev/null || true
# Create systemd services
sudo tee /etc/systemd/system/aitbc-cross-chain-reputation.service > /dev/null << 'EOF'
[Unit]
Description=AITBC Cross Chain Reputation Service
After=network.target
[Service]
Type=simple
User=aitbc
WorkingDirectory=/opt/aitbc/services
ExecStart=/usr/bin/python3 -m cross_chain_reputation
Restart=always
RestartSec=10
[Install]
WantedBy=multi-user.target
EOF
sudo tee /etc/systemd/system/aitbc-agent-communication.service > /dev/null << 'EOF'
[Unit]
Description=AITBC Agent Communication Service
After=network.target
[Service]
Type=simple
User=aitbc
WorkingDirectory=/opt/aitbc/services
ExecStart=/usr/bin/python3 -m agent_communication
Restart=always
RestartSec=10
[Install]
WantedBy=multi-user.target
EOF
sudo tee /etc/systemd/system/aitbc-advanced-learning.service > /dev/null << 'EOF'
[Unit]
Description=AITBC Advanced Learning Service
After=network.target
[Service]
Type=simple
User=aitbc
WorkingDirectory=/opt/aitbc/services
ExecStart=/usr/bin/python3 -m advanced_learning
Restart=always
RestartSec=10
[Install]
WantedBy=multi-user.target
EOF
# Reload systemd and start services
sudo systemctl daemon-reload
sudo systemctl enable aitbc-cross-chain-reputation
sudo systemctl enable aitbc-agent-communication
sudo systemctl enable aitbc-advanced-learning
sudo systemctl start aitbc-cross-chain-reputation
sudo systemctl start aitbc-agent-communication
sudo systemctl start aitbc-advanced-learning
echo 'Services deployed and started on aitbc1'
"
print_success "Backend services deployed to both servers"
}
# Deploy frontend
deploy_frontend() {
if [[ "$DEPLOY_FRONTEND" != "true" ]]; then
print_status "Skipping frontend deployment (disabled)"
return
fi
print_status "Building and deploying frontend..."
cd "$FRONTEND_DIR"
# Build frontend
print_status "Building frontend application..."
npm run build
# Deploy to aitbc server
print_server "Deploying frontend to aitbc server..."
# Copy built frontend to aitbc
scp -r build/* $AITBC_SERVER:/tmp/frontend/
ssh $AITBC_SERVER "
# Backup existing frontend
sudo cp -r /var/www/aitbc.bubuit.net /var/www/aitbc.bubuit.net.backup 2>/dev/null || true
# Deploy new frontend
sudo rm -rf /var/www/aitbc.bubuit.net/*
sudo cp -r /tmp/frontend/* /var/www/aitbc.bubuit.net/
# Set permissions
sudo chown -R www-data:www-data /var/www/aitbc.bubuit.net/
sudo chmod -R 755 /var/www/aitbc.bubuit.net/
echo 'Frontend deployed to aitbc'
"
# Deploy to aitbc1 server
print_server "Deploying frontend to aitbc1 server..."
# Copy built frontend to aitbc1
scp -r build/* $AITBC1_SERVER:/tmp/frontend/
ssh $AITBC1_SERVER "
# Backup existing frontend
sudo cp -r /var/www/aitbc.bubuit.net /var/www/aitbc.bubuit.net.backup 2>/dev/null || true
# Deploy new frontend
sudo rm -rf /var/www/aitbc.bubuit.net/*
sudo cp -r /tmp/frontend/* /var/www/aitbc.bubuit.net/
# Set permissions
sudo chown -R www-data:www-data /var/www/aitbc.bubuit.net/
sudo chmod -R 755 /var/www/aitbc.bubuit.net/
echo 'Frontend deployed to aitbc1'
"
print_success "Frontend deployed to both servers"
}
# Deploy configuration files
deploy_configuration() {
print_status "Deploying configuration files..."
# Create nginx configuration for aitbc
print_server "Deploying nginx configuration to aitbc..."
ssh $AITBC_SERVER "
sudo tee /etc/nginx/sites-available/aitbc-advanced.conf > /dev/null << 'EOF'
server {
listen 80;
server_name aitbc.bubuit.net;
root /var/www/aitbc.bubuit.net;
index index.html;
# Security headers
add_header X-Frame-Options DENY;
add_header X-Content-Type-Options nosniff;
add_header X-XSS-Protection \"1; mode=block\";
add_header Strict-Transport-Security \"max-age=31536000; includeSubDomains\" always;
# Gzip compression
gzip on;
gzip_vary on;
gzip_min_length 1024;
gzip_types text/plain text/css text/xml text/javascript application/javascript application/xml+rss application/json;
# API routes
location /api/ {
proxy_pass http://localhost:8000/;
proxy_set_header Host \$host;
proxy_set_header X-Real-IP \$remote_addr;
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto \$scheme;
}
# Advanced features API
location /api/v1/advanced/ {
proxy_pass http://localhost:8001/;
proxy_set_header Host \$host;
proxy_set_header X-Real-IP \$remote_addr;
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto \$scheme;
}
# Static files
location / {
try_files \$uri \$uri/ /index.html;
expires 1y;
add_header Cache-Control \"public, immutable\";
}
# Health check
location /health {
access_log off;
return 200 \"healthy\";
add_header Content-Type text/plain;
}
}
EOF
# Enable site
sudo ln -sf /etc/nginx/sites-available/aitbc-advanced.conf /etc/nginx/sites-enabled/
sudo nginx -t
sudo systemctl reload nginx
echo 'Nginx configuration deployed to aitbc'
"
# Create nginx configuration for aitbc1
print_server "Deploying nginx configuration to aitbc1..."
ssh $AITBC1_SERVER "
sudo tee /etc/nginx/sites-available/aitbc1-advanced.conf > /dev/null << 'EOF'
server {
listen 80;
server_name aitbc1.bubuit.net;
root /var/www/aitbc.bubuit.net;
index index.html;
# Security headers
add_header X-Frame-Options DENY;
add_header X-Content-Type-Options nosniff;
add_header X-XSS-Protection \"1; mode=block\";
add_header Strict-Transport-Security \"max-age=31536000; includeSubDomains\" always;
# Gzip compression
gzip on;
gzip_vary on;
gzip_min_length 1024;
gzip_types text/plain text/css text/xml text/javascript application/javascript application/xml+rss application/json;
# API routes
location /api/ {
proxy_pass http://localhost:8000/;
proxy_set_header Host \$host;
proxy_set_header X-Real-IP \$remote_addr;
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto \$scheme;
}
# Advanced features API
location /api/v1/advanced/ {
proxy_pass http://localhost:8001/;
proxy_set_header Host \$host;
proxy_set_header X-Real-IP \$remote_addr;
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto \$scheme;
}
# Static files
location / {
try_files \$uri \$uri/ /index.html;
expires 1y;
add_header Cache-Control \"public, immutable\";
}
# Health check
location /health {
access_log off;
return 200 \"healthy\";
add_header Content-Type text/plain;
}
}
EOF
# Enable site
sudo ln -sf /etc/nginx/sites-available/aitbc1-advanced.conf /etc/nginx/sites-enabled/
sudo nginx -t
sudo systemctl reload nginx
echo 'Nginx configuration deployed to aitbc1'
"
print_success "Configuration files deployed to both servers"
}
# Verify deployment
verify_deployment() {
if [[ "$SKIP_VERIFICATION" == "true" ]]; then
print_status "Skipping verification (disabled)"
return
fi
print_status "Verifying deployment..."
# Verify aitbc server
print_server "Verifying aitbc server deployment..."
ssh $AITBC_SERVER "
echo '=== aitbc Server Status ==='
# Check services
echo 'Services:'
sudo systemctl is-active aitbc-cross-chain-reputation || echo 'cross-chain-reputation: INACTIVE'
sudo systemctl is-active aitbc-agent-communication || echo 'agent-communication: INACTIVE'
sudo systemctl is-active aitbc-advanced-learning || echo 'advanced-learning: INACTIVE'
# Check nginx
echo 'Nginx:'
sudo systemctl is-active nginx || echo 'nginx: INACTIVE'
sudo nginx -t || echo 'nginx config: ERROR'
# Check web server
echo 'Web server:'
curl -s http://localhost/health || echo 'health check: FAILED'
# Check API endpoints
echo 'API endpoints:'
curl -s http://localhost:8000/health || echo 'API health: FAILED'
curl -s http://localhost:8001/health || echo 'Advanced API health: FAILED'
echo 'aitbc verification completed'
"
# Verify aitbc1 server
print_server "Verifying aitbc1 server deployment..."
ssh $AITBC1_SERVER "
echo '=== aitbc1 Server Status ==='
# Check services
echo 'Services:'
sudo systemctl is-active aitbc-cross-chain-reputation || echo 'cross-chain-reputation: INACTIVE'
sudo systemctl is-active aitbc-agent-communication || echo 'agent-communication: INACTIVE'
sudo systemctl is-active aitbc-advanced-learning || echo 'advanced-learning: INACTIVE'
# Check nginx
echo 'Nginx:'
sudo systemctl is-active nginx || echo 'nginx: INACTIVE'
sudo nginx -t || echo 'nginx config: ERROR'
# Check web server
echo 'Web server:'
curl -s http://localhost/health || echo 'health check: FAILED'
# Check API endpoints
echo 'API endpoints:'
curl -s http://localhost:8000/health || echo 'API health: FAILED'
curl -s http://localhost:8001/health || echo 'Advanced API health: FAILED'
echo 'aitbc1 verification completed'
"
print_success "Deployment verification completed"
}
# Test external connectivity
test_connectivity() {
print_status "Testing external connectivity..."
# Test aitbc server
print_server "Testing aitbc external connectivity..."
if curl -s "http://$AITBC_HOST/health" | grep -q "healthy"; then
print_success "aitbc server is accessible externally"
else
print_warning "aitbc server external connectivity issue"
fi
# Test aitbc1 server
print_server "Testing aitbc1 external connectivity..."
if curl -s "http://$AITBC1_HOST/health" | grep -q "healthy"; then
print_success "aitbc1 server is accessible externally"
else
print_warning "aitbc1 server external connectivity issue"
fi
}
# Generate deployment report
generate_report() {
print_status "Generating deployment report..."
local report_file="$ROOT_DIR/deployment-report-$(date +%Y%m%d-%H%M%S).json"
cat > "$report_file" << EOF
{
"deployment": {
"timestamp": "$(date -Iseconds)",
"servers": ["aitbc", "aitbc1"],
"contracts_deployed": "$DEPLOY_CONTRACTS",
"services_deployed": "$DEPLOY_SERVICES",
"frontend_deployed": "$DEPLOY_FRONTEND",
"backup_created": "$BACKUP_BEFORE_DEPLOY",
"verification_completed": "$([[ "$SKIP_VERIFICATION" != "true" ]] && echo "true" || echo "false")"
},
"servers": {
"aitbc": {
"host": "$AITBC_HOST",
"services": {
"cross_chain_reputation": "deployed",
"agent_communication": "deployed",
"advanced_learning": "deployed"
},
"web_server": "nginx",
"api_endpoints": {
"main": "http://$AITBC_HOST/api/",
"advanced": "http://$AITBC_HOST/api/v1/advanced/"
}
},
"aitbc1": {
"host": "$AITBC1_HOST",
"services": {
"cross_chain_reputation": "deployed",
"agent_communication": "deployed",
"advanced_learning": "deployed"
},
"web_server": "nginx",
"api_endpoints": {
"main": "http://$AITBC1_HOST/api/",
"advanced": "http://$AITBC1_HOST/api/v1/advanced/"
}
}
},
"urls": {
"aitbc_frontend": "http://$AITBC_HOST/",
"aitbc_api": "http://$AITBC_HOST/api/",
"aitbc_advanced": "http://$AITBC_HOST/api/v1/advanced/",
"aitbc1_frontend": "http://$AITBC1_HOST/",
"aitbc1_api": "http://$AITBC1_HOST/api/",
"aitbc1_advanced": "http://$AITBC1_HOST/api/v1/advanced/"
},
"next_steps": [
"1. Monitor service performance on both servers",
"2. Test cross-server functionality",
"3. Verify load balancing if configured",
"4. Monitor system resources and scaling",
"5. Set up monitoring and alerting",
"6. Test failover scenarios"
]
}
EOF
print_success "Deployment report saved to $report_file"
}
# Main execution
main() {
print_critical "🚀 STARTING AITBC PLATFORM DEPLOYMENT TO aitbc AND aitbc1 SERVERS"
# Run deployment steps
check_prerequisites
backup_deployment
deploy_contracts
deploy_services
deploy_frontend
deploy_configuration
verify_deployment
test_connectivity
generate_report
print_success "🎉 AITBC PLATFORM DEPLOYMENT COMPLETED!"
echo ""
echo "📊 Deployment Summary:"
echo " Servers: aitbc, aitbc1"
echo " Contracts: $DEPLOY_CONTRACTS"
echo " Services: $DEPLOY_SERVICES"
echo " Frontend: $DEPLOY_FRONTEND"
echo " Verification: $([[ "$SKIP_VERIFICATION" != "true" ]] && echo "Completed" || echo "Skipped")"
echo " Backup: $BACKUP_BEFORE_DEPLOY"
echo ""
echo "🌐 Platform URLs:"
echo " aitbc Frontend: http://$AITBC_HOST/"
echo " aitbc API: http://$AITBC_HOST/api/"
echo " aitbc Advanced: http://$AITBC_HOST/api/v1/advanced/"
echo " aitbc1 Frontend: http://$AITBC1_HOST/"
echo " aitbc1 API: http://$AITBC1_HOST/api/"
echo " aitbc1 Advanced: http://$AITBC1_HOST/api/v1/advanced/"
echo ""
echo "🔧 Next Steps:"
echo " 1. Monitor service performance on both servers"
echo " 2. Test cross-server functionality"
echo " 3. Verify load balancing if configured"
echo " 4. Monitor system resources and scaling"
echo " 5. Set up monitoring and alerting"
echo " 6. Test failover scenarios"
echo ""
echo "⚠️ Important Notes:"
echo " - Both servers are running identical configurations"
echo " - Services are managed by systemd"
echo " - Nginx is configured for reverse proxy"
echo " - Health checks are available at /health"
echo " - API endpoints are available at /api/ and /api/v1/advanced/"
echo " - Backup was created before deployment"
echo ""
echo "🎯 Deployment Status: SUCCESS - PLATFORM LIVE ON BOTH SERVERS!"
}
# Handle script interruption
trap 'print_critical "Deployment interrupted - please check partial deployment"; exit 1' INT TERM
# Run main function
main "$@"

View File

@@ -1,173 +0,0 @@
const { ethers } = require("hardhat");
async function main() {
console.log("=== AITBC Smart Contract Deployment ===");
// Get deployer account
const [deployer] = await ethers.getSigners();
console.log("Deploying contracts with the account:", deployer.address);
console.log("Account balance:", (await deployer.getBalance()).toString());
// Deployment addresses (to be replaced with actual addresses)
const AITBC_TOKEN_ADDRESS = process.env.AITBC_TOKEN_ADDRESS || "0x0000000000000000000000000000000000000000";
const ZK_VERIFIER_ADDRESS = process.env.ZK_VERIFIER_ADDRESS || "0x0000000000000000000000000000000000000000";
const GROTH16_VERIFIER_ADDRESS = process.env.GROTH16_VERIFIER_ADDRESS || "0x0000000000000000000000000000000000000000";
try {
// 1. Deploy AI Power Rental Contract
console.log("\n1. Deploying AIPowerRental...");
const AIPowerRental = await ethers.getContractFactory("AIPowerRental");
const aiPowerRental = await AIPowerRental.deploy(
AITBC_TOKEN_ADDRESS,
ZK_VERIFIER_ADDRESS,
GROTH16_VERIFIER_ADDRESS
);
await aiPowerRental.deployed();
console.log("AIPowerRental deployed to:", aiPowerRental.address);
// 2. Deploy AITBC Payment Processor
console.log("\n2. Deploying AITBCPaymentProcessor...");
const AITBCPaymentProcessor = await ethers.getContractFactory("AITBCPaymentProcessor");
const paymentProcessor = await AITBCPaymentProcessor.deploy(
AITBC_TOKEN_ADDRESS,
aiPowerRental.address
);
await paymentProcessor.deployed();
console.log("AITBCPaymentProcessor deployed to:", paymentProcessor.address);
// 3. Deploy Performance Verifier
console.log("\n3. Deploying PerformanceVerifier...");
const PerformanceVerifier = await ethers.getContractFactory("PerformanceVerifier");
const performanceVerifier = await PerformanceVerifier.deploy(
ZK_VERIFIER_ADDRESS,
GROTH16_VERIFIER_ADDRESS,
aiPowerRental.address
);
await performanceVerifier.deployed();
console.log("PerformanceVerifier deployed to:", performanceVerifier.address);
// 4. Deploy Dispute Resolution
console.log("\n4. Deploying DisputeResolution...");
const DisputeResolution = await ethers.getContractFactory("DisputeResolution");
const disputeResolution = await DisputeResolution.deploy(
aiPowerRental.address,
paymentProcessor.address,
performanceVerifier.address
);
await disputeResolution.deployed();
console.log("DisputeResolution deployed to:", disputeResolution.address);
// 5. Deploy Escrow Service
console.log("\n5. Deploying EscrowService...");
const EscrowService = await ethers.getContractFactory("EscrowService");
const escrowService = await EscrowService.deploy(
AITBC_TOKEN_ADDRESS,
aiPowerRental.address,
paymentProcessor.address
);
await escrowService.deployed();
console.log("EscrowService deployed to:", escrowService.address);
// 6. Deploy Dynamic Pricing
console.log("\n6. Deploying DynamicPricing...");
const DynamicPricing = await ethers.getContractFactory("DynamicPricing");
const dynamicPricing = await DynamicPricing.deploy(
aiPowerRental.address,
performanceVerifier.address,
AITBC_TOKEN_ADDRESS
);
await dynamicPricing.deployed();
console.log("DynamicPricing deployed to:", dynamicPricing.address);
// Initialize contracts with cross-references
console.log("\n7. Initializing contract cross-references...");
// Set payment processor in AI Power Rental
await aiPowerRental.setPaymentProcessor(paymentProcessor.address);
console.log("Payment processor set in AIPowerRental");
// Set performance verifier in AI Power Rental
await aiPowerRental.setPerformanceVerifier(performanceVerifier.address);
console.log("Performance verifier set in AIPowerRental");
// Set dispute resolver in payment processor
await paymentProcessor.setDisputeResolver(disputeResolution.address);
console.log("Dispute resolver set in PaymentProcessor");
// Set escrow service in payment processor
await paymentProcessor.setEscrowService(escrowService.address);
console.log("Escrow service set in PaymentProcessor");
// Authorize initial oracles and arbiters
console.log("\n8. Setting up initial oracles and arbiters...");
// Authorize deployer as price oracle
await dynamicPricing.authorizePriceOracle(deployer.address);
console.log("Deployer authorized as price oracle");
// Authorize deployer as performance oracle
await performanceVerifier.authorizeOracle(deployer.address);
console.log("Deployer authorized as performance oracle");
// Authorize deployer as arbitrator
await disputeResolution.authorizeArbitrator(deployer.address);
console.log("Deployer authorized as arbitrator");
// Authorize deployer as escrow arbiter
await escrowService.authorizeArbiter(deployer.address);
console.log("Deployer authorized as escrow arbiter");
// Save deployment addresses
const deploymentInfo = {
network: network.name,
deployer: deployer.address,
timestamp: new Date().toISOString(),
contracts: {
AITBC_TOKEN_ADDRESS,
ZK_VERIFIER_ADDRESS,
GROTH16_VERIFIER_ADDRESS,
AIPowerRental: aiPowerRental.address,
AITBCPaymentProcessor: paymentProcessor.address,
PerformanceVerifier: performanceVerifier.address,
DisputeResolution: disputeResolution.address,
EscrowService: escrowService.address,
DynamicPricing: dynamicPricing.address
}
};
// Write deployment info to file
const fs = require('fs');
fs.writeFileSync(
`deployment-${network.name}-${Date.now()}.json`,
JSON.stringify(deploymentInfo, null, 2)
);
console.log("\n=== Deployment Summary ===");
console.log("All contracts deployed successfully!");
console.log("Deployment info saved to deployment file");
console.log("\nContract Addresses:");
console.log("- AIPowerRental:", aiPowerRental.address);
console.log("- AITBCPaymentProcessor:", paymentProcessor.address);
console.log("- PerformanceVerifier:", performanceVerifier.address);
console.log("- DisputeResolution:", disputeResolution.address);
console.log("- EscrowService:", escrowService.address);
console.log("- DynamicPricing:", dynamicPricing.address);
console.log("\n=== Next Steps ===");
console.log("1. Update environment variables with contract addresses");
console.log("2. Run integration tests");
console.log("3. Configure marketplace API to use new contracts");
console.log("4. Perform security audit");
} catch (error) {
console.error("Deployment failed:", error);
process.exit(1);
}
}
main()
.then(() => process.exit(0))
.catch((error) => {
console.error(error);
process.exit(1);
});

View File

@@ -1,248 +0,0 @@
#!/usr/bin/env python3
"""
Edge Node Deployment Script for AITBC Marketplace
Deploys edge node configuration and services
"""
import yaml
import subprocess
import sys
import os
import json
from datetime import datetime
def load_config(config_file):
"""Load edge node configuration from YAML file"""
with open(config_file, 'r') as f:
return yaml.safe_load(f)
def deploy_redis_cache(config):
"""Deploy Redis cache layer"""
print(f"🔧 Deploying Redis cache for {config['edge_node_config']['node_id']}")
# Check if Redis is running
try:
result = subprocess.run(['redis-cli', 'ping'], capture_output=True, text=True)
if result.stdout.strip() == 'PONG':
print("✅ Redis is already running")
else:
print("⚠️ Redis not responding, attempting to start...")
# Start Redis if not running
subprocess.run(['sudo', 'systemctl', 'start', 'redis-server'], check=True)
print("✅ Redis started")
except FileNotFoundError:
print("❌ Redis not installed, installing...")
subprocess.run(['sudo', 'apt-get', 'update'], check=True)
subprocess.run(['sudo', 'apt-get', 'install', '-y', 'redis-server'], check=True)
subprocess.run(['sudo', 'systemctl', 'start', 'redis-server'], check=True)
print("✅ Redis installed and started")
# Configure Redis
redis_config = config['edge_node_config']['caching']
# Set Redis configuration
redis_commands = [
f"CONFIG SET maxmemory {redis_config['max_memory_mb']}mb",
f"CONFIG SET maxmemory-policy allkeys-lru",
f"CONFIG SET timeout {redis_config['cache_ttl_seconds']}"
]
for cmd in redis_commands:
try:
subprocess.run(['redis-cli', *cmd.split()], check=True, capture_output=True)
except subprocess.CalledProcessError:
print(f"⚠️ Could not set Redis config: {cmd}")
def deploy_monitoring(config):
"""Deploy monitoring agent"""
print(f"📊 Deploying monitoring for {config['edge_node_config']['node_id']}")
monitoring_config = config['edge_node_config']['monitoring']
# Create monitoring directory
os.makedirs('/tmp/aitbc-monitoring', exist_ok=True)
# Create monitoring script
monitoring_script = f"""#!/bin/bash
# Monitoring script for {config['edge_node_config']['node_id']}
echo "{{{{'timestamp': '$(date -Iseconds)', 'node_id': '{config['edge_node_config']['node_id']}', 'status': 'monitoring'}}}}" > /tmp/aitbc-monitoring/status.json
# Check marketplace API health
curl -s http://localhost:{config['edge_node_config']['services'][0]['port']}/health/live > /dev/null
if [ $? -eq 0 ]; then
echo "marketplace_healthy=true" >> /tmp/aitbc-monitoring/status.json
else
echo "marketplace_healthy=false" >> /tmp/aitbc-monitoring/status.json
fi
# Check Redis health
redis-cli ping > /dev/null
if [ $? -eq 0 ]; then
echo "redis_healthy=true" >> /tmp/aitbc-monitoring/status.json
else
echo "redis_healthy=false" >> /tmp/aitbc-monitoring/status.json
fi
"""
with open('/tmp/aitbc-monitoring/monitor.sh', 'w') as f:
f.write(monitoring_script)
os.chmod('/tmp/aitbc-monitoring/monitor.sh', 0o755)
# Create systemd service for monitoring
monitoring_service = f"""[Unit]
Description=AITBC Edge Node Monitoring - {config['edge_node_config']['node_id']}
After=network.target
[Service]
Type=simple
User=root
ExecStart=/tmp/aitbc-monitoring/monitor.sh
Restart=always
RestartSec=30
[Install]
WantedBy=multi-user.target
"""
service_file = f"/etc/systemd/system/aitbc-edge-monitoring-{config['edge_node_config']['node_id']}.service"
with open(service_file, 'w') as f:
f.write(monitoring_service)
# Enable and start monitoring service
subprocess.run(['sudo', 'systemctl', 'daemon-reload'], check=True)
subprocess.run(['sudo', 'systemctl', 'enable', f'aitbc-edge-monitoring-{config["edge_node_config"]["node_id"]}.service'], check=True)
subprocess.run(['sudo', 'systemctl', 'start', f'aitbc-edge-monitoring-{config["edge_node_config"]["node_id"]}.service'], check=True)
print("✅ Monitoring agent deployed")
def optimize_network(config):
"""Apply network optimizations"""
print(f"🌐 Optimizing network for {config['edge_node_config']['node_id']}")
network_config = config['edge_node_config']['network']
# TCP optimizations
tcp_params = {
'net.core.rmem_max': '16777216',
'net.core.wmem_max': '16777216',
'net.ipv4.tcp_rmem': '4096 87380 16777216',
'net.ipv4.tcp_wmem': '4096 65536 16777216',
'net.ipv4.tcp_congestion_control': 'bbr',
'net.core.netdev_max_backlog': '5000'
}
for param, value in tcp_params.items():
try:
subprocess.run(['sudo', 'sysctl', '-w', f'{param}={value}'], check=True, capture_output=True)
print(f"✅ Set {param}={value}")
except subprocess.CalledProcessError:
print(f"⚠️ Could not set {param}")
def deploy_edge_services(config):
"""Deploy edge node services"""
print(f"🚀 Deploying edge services for {config['edge_node_config']['node_id']}")
# Create edge service configuration
edge_service_config = {
'node_id': config['edge_node_config']['node_id'],
'region': config['edge_node_config']['region'],
'services': config['edge_node_config']['services'],
'performance_targets': config['edge_node_config']['performance_targets'],
'deployed_at': datetime.now().isoformat()
}
# Save configuration
with open(f'/tmp/aitbc-edge-{config["edge_node_config"]["node_id"]}-config.json', 'w') as f:
json.dump(edge_service_config, f, indent=2)
print(f"✅ Edge services configuration saved")
def validate_deployment(config):
"""Validate edge node deployment"""
print(f"✅ Validating deployment for {config['edge_node_config']['node_id']}")
validation_results = {}
# Check marketplace API
try:
response = subprocess.run(['curl', '-s', f'http://localhost:{config["edge_node_config"]["services"][0]["port"]}/health/live'],
capture_output=True, text=True, timeout=10)
if response.status_code == 0:
validation_results['marketplace_api'] = 'healthy'
else:
validation_results['marketplace_api'] = 'unhealthy'
except Exception as e:
validation_results['marketplace_api'] = f'error: {str(e)}'
# Check Redis
try:
result = subprocess.run(['redis-cli', 'ping'], capture_output=True, text=True, timeout=5)
if result.stdout.strip() == 'PONG':
validation_results['redis'] = 'healthy'
else:
validation_results['redis'] = 'unhealthy'
except Exception as e:
validation_results['redis'] = f'error: {str(e)}'
# Check monitoring
try:
result = subprocess.run(['systemctl', 'is-active', f'aitbc-edge-monitoring-{config["edge_node_config"]["node_id"]}.service'],
capture_output=True, text=True, timeout=5)
validation_results['monitoring'] = result.stdout.strip()
except Exception as e:
validation_results['monitoring'] = f'error: {str(e)}'
print(f"📊 Validation Results:")
for service, status in validation_results.items():
print(f" {service}: {status}")
return validation_results
def main():
if len(sys.argv) != 2:
print("Usage: python deploy_edge_node.py <config_file>")
sys.exit(1)
config_file = sys.argv[1]
if not os.path.exists(config_file):
print(f"❌ Configuration file {config_file} not found")
sys.exit(1)
try:
config = load_config(config_file)
print(f"🚀 Deploying edge node: {config['edge_node_config']['node_id']}")
print(f"📍 Region: {config['edge_node_config']['region']}")
print(f"🌍 Location: {config['edge_node_config']['location']}")
# Deploy components
deploy_redis_cache(config)
deploy_monitoring(config)
optimize_network(config)
deploy_edge_services(config)
# Validate deployment
validation_results = validate_deployment(config)
# Save deployment status
deployment_status = {
'node_id': config['edge_node_config']['node_id'],
'deployment_time': datetime.now().isoformat(),
'validation_results': validation_results,
'status': 'completed'
}
with open(f'/tmp/aitbc-edge-{config["edge_node_config"]["node_id"]}-deployment.json', 'w') as f:
json.dump(deployment_status, f, indent=2)
print(f"✅ Edge node deployment completed for {config['edge_node_config']['node_id']}")
except Exception as e:
print(f"❌ Deployment failed: {str(e)}")
sys.exit(1)
if __name__ == "__main__":
main()

View File

@@ -1,266 +0,0 @@
#!/bin/bash
echo "=== AITBC Smart Contract Deployment to aitbc & aitbc1 ==="
# Server configurations - using cascade connections
AITBC_SSH="aitbc-cascade"
AITBC1_SSH="aitbc1-cascade"
DEPLOY_PATH="/home/oib/windsurf/aitbc"
# Contract files to deploy
CONTRACTS=(
"contracts/AIPowerRental.sol"
"contracts/AITBCPaymentProcessor.sol"
"contracts/PerformanceVerifier.sol"
"contracts/DisputeResolution.sol"
"contracts/EscrowService.sol"
"contracts/DynamicPricing.sol"
"contracts/ZKReceiptVerifier.sol"
"contracts/Groth16Verifier.sol"
)
# Deployment scripts
SCRIPTS=(
"scripts/deploy_contracts.js"
"scripts/validate_contracts.js"
"scripts/integration_test.js"
"scripts/compile_contracts.sh"
)
# Configuration files
CONFIGS=(
"configs/deployment_config.json"
"package.json"
"hardhat.config.cjs"
)
# Test contracts
TEST_CONTRACTS=(
"test/contracts/MockERC20.sol"
"test/contracts/MockZKVerifier.sol"
"test/contracts/MockGroth16Verifier.sol"
"test/contracts/Integration.test.js"
)
echo "🚀 Starting deployment to aitbc and aitbc1 servers..."
# Function to deploy to a server
deploy_to_server() {
local ssh_cmd=$1
local server_name=$2
echo ""
echo "📡 Deploying to $server_name ($ssh_cmd)..."
# Create directories
ssh $ssh_cmd "mkdir -p $DEPLOY_PATH/contracts $DEPLOY_PATH/scripts $DEPLOY_PATH/configs $DEPLOY_PATH/test/contracts"
# Deploy contracts
echo "📄 Deploying smart contracts..."
for contract in "${CONTRACTS[@]}"; do
if [ -f "$contract" ]; then
scp "$contract" $ssh_cmd:"$DEPLOY_PATH/$contract"
echo "$contract deployed to $server_name"
else
echo "$contract not found"
fi
done
# Deploy scripts
echo "🔧 Deploying deployment scripts..."
for script in "${SCRIPTS[@]}"; do
if [ -f "$script" ]; then
scp "$script" $ssh_cmd:"$DEPLOY_PATH/$script"
ssh $ssh_cmd "chmod +x $DEPLOY_PATH/$script"
echo "$script deployed to $server_name"
else
echo "$script not found"
fi
done
# Deploy configurations
echo "⚙️ Deploying configuration files..."
for config in "${CONFIGS[@]}"; do
if [ -f "$config" ]; then
scp "$config" $ssh_cmd:"$DEPLOY_PATH/$config"
echo "$config deployed to $server_name"
else
echo "$config not found"
fi
done
# Deploy test contracts
echo "🧪 Deploying test contracts..."
for test_contract in "${TEST_CONTRACTS[@]}"; do
if [ -f "$test_contract" ]; then
scp "$test_contract" $ssh_cmd:"$DEPLOY_PATH/$test_contract"
echo "$test_contract deployed to $server_name"
else
echo "$test_contract not found"
fi
done
# Deploy node_modules if they exist
if [ -d "node_modules" ]; then
echo "📦 Deploying node_modules..."
ssh $ssh_cmd "mkdir -p $DEPLOY_PATH/node_modules"
# Use scp -r for recursive copy since rsync might not be available
scp -r node_modules/ $ssh_cmd:"$DEPLOY_PATH/node_modules/"
echo "✅ node_modules deployed to $server_name"
fi
echo "✅ Deployment to $server_name completed"
}
# Deploy to aitbc
deploy_to_server $AITBC_SSH "aitbc"
# Deploy to aitbc1
deploy_to_server $AITBC1_SSH "aitbc1"
echo ""
echo "🔍 Verifying deployment..."
# Verify deployment on aitbc
echo "📊 Checking aitbc deployment..."
ssh $AITBC_SSH "ls -la $DEPLOY_PATH/contracts/*.sol | wc -l | xargs echo 'Contract files on aitbc:'"
ssh $AITBC_SSH "ls -la $DEPLOY_PATH/scripts/*.js | wc -l | xargs echo 'Script files on aitbc:'"
# Verify deployment on aitbc1
echo "📊 Checking aitbc1 deployment..."
ssh $AITBC1_SSH "ls -la $DEPLOY_PATH/contracts/*.sol | wc -l | xargs echo 'Contract files on aitbc1:'"
ssh $AITBC1_SSH "ls -la $DEPLOY_PATH/scripts/*.js | wc -l | xargs echo 'Script files on aitbc1:'"
echo ""
echo "🧪 Running validation on aitbc..."
ssh $AITBC_SSH "cd $DEPLOY_PATH && node scripts/validate_contracts.js"
echo ""
echo "🧪 Running validation on aitbc1..."
ssh $AITBC1_SSH "cd $DEPLOY_PATH && node scripts/validate_contracts.js"
echo ""
echo "🔧 Setting up systemd services..."
# Create systemd service for contract monitoring
create_systemd_service() {
local ssh_cmd=$1
local server_name=$2
echo "📝 Creating contract monitoring service on $server_name..."
cat << EOF | $ssh_cmd "cat > /tmp/aitbc-contracts.service"
[Unit]
Description=AITBC Smart Contracts Monitoring
After=network.target aitbc-coordinator-api.service
Wants=aitbc-coordinator-api.service
[Service]
Type=simple
User=oib
Group=oib
WorkingDirectory=$DEPLOY_PATH
Environment=PATH=$DEPLOY_PATH/node_modules/.bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
ExecStart=/usr/bin/node scripts/contract_monitor.js
Restart=always
RestartSec=10
StandardOutput=journal
StandardError=journal
[Install]
WantedBy=multi-user.target
EOF
ssh $ssh_cmd "sudo mv /tmp/aitbc-contracts.service /etc/systemd/system/"
ssh $ssh_cmd "sudo systemctl daemon-reload"
ssh $ssh_cmd "sudo systemctl enable aitbc-contracts.service"
ssh $ssh_cmd "sudo systemctl start aitbc-contracts.service"
echo "✅ Contract monitoring service created on $server_name"
}
# Create contract monitor script
create_contract_monitor() {
local ssh_cmd=$1
local server_name=$2
echo "📝 Creating contract monitor script on $server_name..."
cat << 'EOF' | $ssh_cmd "cat > $DEPLOY_PATH/scripts/contract_monitor.js"
#!/usr/bin/env node
const fs = require('fs');
const path = require('path');
console.log("🔍 AITBC Contract Monitor Started");
// Monitor contracts directory
const contractsDir = path.join(__dirname, '..', 'contracts');
function checkContracts() {
try {
const contracts = fs.readdirSync(contractsDir).filter(file => file.endsWith('.sol'));
console.log(`📊 Monitoring ${contracts.length} contracts`);
contracts.forEach(contract => {
const filePath = path.join(contractsDir, contract);
const stats = fs.statSync(filePath);
console.log(`📄 ${contract}: ${stats.size} bytes, modified: ${stats.mtime}`);
});
// Check if contracts are valid (basic check)
const validContracts = contracts.filter(contract => {
const content = fs.readFileSync(path.join(contractsDir, contract), 'utf8');
return content.includes('pragma solidity') && content.includes('contract ');
});
console.log(`✅ Valid contracts: ${validContracts.length}/${contracts.length}`);
} catch (error) {
console.error('❌ Error monitoring contracts:', error.message);
}
}
// Check every 30 seconds
setInterval(checkContracts, 30000);
// Initial check
checkContracts();
console.log("🔄 Contract monitoring active (30-second intervals)");
EOF
ssh $ssh_cmd "chmod +x $DEPLOY_PATH/scripts/contract_monitor.js"
echo "✅ Contract monitor script created on $server_name"
}
# Setup monitoring services
create_contract_monitor $AITBC_SSH "aitbc"
create_systemd_service $AITBC_SSH "aitbc"
create_contract_monitor $AITBC1_SSH "aitbc1"
create_systemd_service $AITBC1_SSH "aitbc1"
echo ""
echo "📊 Deployment Summary:"
echo "✅ Smart contracts deployed to aitbc and aitbc1"
echo "✅ Deployment scripts and configurations deployed"
echo "✅ Test contracts and validation tools deployed"
echo "✅ Node.js dependencies deployed"
echo "✅ Contract monitoring services created"
echo "✅ Systemd services configured and started"
echo ""
echo "🔗 Service URLs:"
echo "aitbc: http://127.0.0.1:18000"
echo "aitbc1: http://127.0.0.1:18001"
echo ""
echo "📝 Next Steps:"
echo "1. Verify contract deployment on both servers"
echo "2. Run integration tests"
echo "3. Configure marketplace API integration"
echo "4. Start contract deployment process"
echo ""
echo "✨ Deployment to aitbc & aitbc1 completed!"

11
scripts/detect-aitbc-user.sh Executable file
View File

@@ -0,0 +1,11 @@
#!/bin/bash
# AITBC User Detection Script
# Returns the appropriate user to run AITBC services
if id "aitbc" >/dev/null 2>&1; then
echo "aitbc"
elif id "oib" >/dev/null 2>&1; then
echo "oib"
else
echo "root"
fi

View File

@@ -1,126 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
CLI_PY="$ROOT_DIR/cli/client.py"
AITBC_URL="${AITBC_URL:-http://localhost:8000}"
CLIENT_KEY="${CLIENT_KEY:?Set CLIENT_KEY env var}"
ADMIN_KEY="${ADMIN_KEY:?Set ADMIN_KEY env var}"
MINER_KEY="${MINER_KEY:?Set MINER_KEY env var}"
usage() {
cat <<'EOF'
AITBC CLI wrapper
Usage:
aitbc-cli.sh submit <type> [--prompt TEXT] [--model NAME] [--ttl SECONDS]
aitbc-cli.sh status <job_id>
aitbc-cli.sh browser [--block-limit N] [--tx-limit N] [--receipt-limit N] [--job-id ID]
aitbc-cli.sh blocks [--limit N]
aitbc-cli.sh receipts [--limit N] [--job-id ID]
aitbc-cli.sh cancel <job_id>
aitbc-cli.sh admin-miners
aitbc-cli.sh admin-jobs
aitbc-cli.sh admin-stats
aitbc-cli.sh admin-cancel-running
aitbc-cli.sh health
Environment overrides:
AITBC_URL (default: http://localhost:8000)
CLIENT_KEY (required)
ADMIN_KEY (required)
MINER_KEY (required)
EOF
}
if [[ $# -lt 1 ]]; then
usage
exit 1
fi
cmd="$1"
shift
case "$cmd" in
submit)
python3 "$CLI_PY" --url "$AITBC_URL" --api-key "$CLIENT_KEY" submit "$@"
;;
status)
python3 "$CLI_PY" --url "$AITBC_URL" --api-key "$CLIENT_KEY" status "$@"
;;
browser)
python3 "$CLI_PY" --url "$AITBC_URL" --api-key "$CLIENT_KEY" browser "$@"
;;
blocks)
python3 "$CLI_PY" --url "$AITBC_URL" --api-key "$CLIENT_KEY" blocks "$@"
;;
receipts)
limit=10
job_id=""
while [[ $# -gt 0 ]]; do
case "$1" in
--limit)
limit="$2"
shift 2
;;
--job-id)
job_id="$2"
shift 2
;;
*)
echo "Unknown option: $1" >&2
exit 1
;;
esac
done
if [[ -n "$job_id" ]]; then
curl -sS "$AITBC_URL/v1/explorer/receipts?limit=${limit}&job_id=${job_id}"
else
curl -sS "$AITBC_URL/v1/explorer/receipts?limit=${limit}"
fi
;;
cancel)
if [[ $# -lt 1 ]]; then
echo "Usage: aitbc-cli.sh cancel <job_id>" >&2
exit 1
fi
job_id="$1"
curl -sS -X POST -H "X-Api-Key: ${CLIENT_KEY}" "$AITBC_URL/v1/jobs/${job_id}/cancel"
;;
admin-miners)
curl -sS -H "X-Api-Key: ${ADMIN_KEY}" "$AITBC_URL/v1/admin/miners"
;;
admin-jobs)
curl -sS -H "X-Api-Key: ${ADMIN_KEY}" "$AITBC_URL/v1/admin/jobs"
;;
admin-stats)
curl -sS -H "X-Api-Key: ${ADMIN_KEY}" "$AITBC_URL/v1/admin/stats"
;;
admin-cancel-running)
echo "Fetching running jobs..."
running_jobs=$(curl -sS -H "X-Api-Key: ${ADMIN_KEY}" "$AITBC_URL/v1/admin/jobs" | jq -r '.[] | select(.state == "running") | .id')
if [[ -z "$running_jobs" ]]; then
echo "No running jobs found."
else
count=0
for job_id in $running_jobs; do
echo "Cancelling job: $job_id"
curl -sS -X POST -H "X-Api-Key: ${CLIENT_KEY}" "$AITBC_URL/v1/jobs/${job_id}/cancel" > /dev/null
((count++))
done
echo "Cancelled $count running jobs."
fi
;;
health)
curl -sS "$AITBC_URL/v1/health"
;;
help|-h|--help)
usage
;;
*)
echo "Unknown command: $cmd" >&2
usage
exit 1
;;
esac

View File

@@ -1,17 +0,0 @@
# Add project paths to Python path for imports
import sys
from pathlib import Path
# Get the directory where this .pth file is located
project_root = Path(__file__).parent
# Add package source directories
sys.path.insert(0, str(project_root / "packages" / "py" / "aitbc-core" / "src"))
sys.path.insert(0, str(project_root / "packages" / "py" / "aitbc-crypto" / "src"))
sys.path.insert(0, str(project_root / "packages" / "py" / "aitbc-p2p" / "src"))
sys.path.insert(0, str(project_root / "packages" / "py" / "aitbc-sdk" / "src"))
# Add app source directories
sys.path.insert(0, str(project_root / "apps" / "coordinator-api" / "src"))
sys.path.insert(0, str(project_root / "apps" / "wallet-daemon" / "src"))
sys.path.insert(0, str(project_root / "apps" / "blockchain-node" / "src"))

View File

@@ -1,177 +0,0 @@
#!/bin/bash
# AITBC Development Services Manager
# Starts AITBC services for development and provides cleanup option
set -e
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(dirname "$SCRIPT_DIR")"
LOG_DIR="$PROJECT_ROOT/logs"
PID_FILE="$PROJECT_ROOT/.aitbc_dev_pids"
# Create logs directory if it doesn't exist
mkdir -p "$LOG_DIR"
# Color codes for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Services to manage
SERVICES=(
"aitbc-blockchain-node.service"
"aitbc-blockchain-rpc.service"
"aitbc-gpu-miner.service"
"aitbc-mock-coordinator.service"
)
start_services() {
echo -e "${BLUE}Starting AITBC development services...${NC}"
# Check if services are already running
for service in "${SERVICES[@]}"; do
if systemctl is-active --quiet "$service"; then
echo -e "${YELLOW}Warning: $service is already running${NC}"
fi
done
# Start all services
for service in "${SERVICES[@]}"; do
echo -e "Starting $service..."
sudo systemctl start "$service"
# Wait a moment and check if it started successfully
sleep 2
if systemctl is-active --quiet "$service"; then
echo -e "${GREEN}$service started successfully${NC}"
echo "$service" >> "$PID_FILE"
else
echo -e "${RED}✗ Failed to start $service${NC}"
echo -e "${RED}Check logs: sudo journalctl -u $service${NC}"
fi
done
echo -e "\n${GREEN}AITBC services started!${NC}"
echo -e "Use '$0 stop' to stop all services"
echo -e "Use '$0 status' to check service status"
}
stop_services() {
echo -e "${BLUE}Stopping AITBC development services...${NC}"
for service in "${SERVICES[@]}"; do
if systemctl is-active --quiet "$service"; then
echo -e "Stopping $service..."
sudo systemctl stop "$service"
echo -e "${GREEN}$service stopped${NC}"
else
echo -e "${YELLOW}$service was not running${NC}"
fi
done
# Clean up PID file
rm -f "$PID_FILE"
echo -e "\n${GREEN}All AITBC services stopped${NC}"
}
show_status() {
echo -e "${BLUE}AITBC Service Status:${NC}\n"
for service in "${SERVICES[@]}"; do
if systemctl is-active --quiet "$service"; then
echo -e "${GREEN}$service: RUNNING${NC}"
# Show uptime
uptime=$(systemctl show "$service" --property=ActiveEnterTimestamp --value)
echo -e " Running since: $uptime"
else
echo -e "${RED}$service: STOPPED${NC}"
fi
done
# Show recent logs if any services are running
echo -e "\n${BLUE}Recent logs (last 10 lines each):${NC}"
for service in "${SERVICES[@]}"; do
if systemctl is-active --quiet "$service"; then
echo -e "\n${YELLOW}--- $service ---${NC}"
sudo journalctl -u "$service" -n 5 --no-pager | tail -n 5
fi
done
}
show_logs() {
local service="$1"
if [ -z "$service" ]; then
echo -e "${BLUE}Following logs for all AITBC services...${NC}"
sudo journalctl -f -u aitbc-blockchain-node.service -u aitbc-blockchain-rpc.service -u aitbc-gpu-miner.service -u aitbc-mock-coordinator.service
else
echo -e "${BLUE}Following logs for $service...${NC}"
sudo journalctl -f -u "$service"
fi
}
restart_services() {
echo -e "${BLUE}Restarting AITBC services...${NC}"
stop_services
sleep 3
start_services
}
cleanup() {
echo -e "${BLUE}Performing cleanup...${NC}"
stop_services
# Additional cleanup
echo -e "Cleaning up temporary files..."
rm -f "$PROJECT_ROOT/.aitbc_dev_pids"
# Clear any lingering processes (optional)
echo -e "Checking for lingering processes..."
pkill -f "aitbc" || echo "No lingering processes found"
echo -e "${GREEN}Cleanup complete${NC}"
}
# Handle script interruption for Ctrl+C only
trap cleanup INT
# Main script logic
case "$1" in
start)
start_services
;;
stop)
stop_services
;;
restart)
restart_services
;;
status)
show_status
;;
logs)
show_logs "$2"
;;
cleanup)
cleanup
;;
*)
echo -e "${BLUE}AITBC Development Services Manager${NC}"
echo -e "\nUsage: $0 {start|stop|restart|status|logs|cleanup}"
echo -e "\nCommands:"
echo -e " start - Start all AITBC services"
echo -e " stop - Stop all AITBC services"
echo -e " restart - Restart all AITBC services"
echo -e " status - Show service status"
echo -e " logs - Follow logs (optional: specify service name)"
echo -e " cleanup - Stop services and clean up"
echo -e "\nExamples:"
echo -e " $0 start # Start all services"
echo -e " $0 logs # Follow all logs"
echo -e " $0 logs node # Follow node logs only"
echo -e " $0 stop # Stop all services"
exit 1
;;
esac

View File

@@ -1,151 +0,0 @@
"""
Bitcoin Exchange Router for AITBC
"""
from typing import Dict, Any
from fastapi import APIRouter, HTTPException, BackgroundTasks
from sqlmodel import Session
import uuid
import time
import json
import os
from ..deps import require_admin_key, require_client_key
from ..domain import Wallet
from ..schemas import ExchangePaymentRequest, ExchangePaymentResponse
router = APIRouter(tags=["exchange"])
# In-memory storage for demo (use database in production)
payments: Dict[str, Dict] = {}
# Bitcoin configuration
BITCOIN_CONFIG = {
'testnet': True,
'main_address': 'tb1qxy2kgdygjrsqtzq2n0yrf2493p83kkfjhx0wlh', # Testnet address
'exchange_rate': 100000, # 1 BTC = 100,000 AITBC
'min_confirmations': 1,
'payment_timeout': 3600 # 1 hour
}
@router.post("/exchange/create-payment", response_model=ExchangePaymentResponse)
async def create_payment(
request: ExchangePaymentRequest,
background_tasks: BackgroundTasks,
api_key: str = require_client_key()
) -> Dict[str, Any]:
"""Create a new Bitcoin payment request"""
# Validate request
if request.aitbc_amount <= 0 or request.btc_amount <= 0:
raise HTTPException(status_code=400, detail="Invalid amount")
# Calculate expected BTC amount
expected_btc = request.aitbc_amount / BITCOIN_CONFIG['exchange_rate']
# Allow small difference for rounding
if abs(request.btc_amount - expected_btc) > 0.00000001:
raise HTTPException(status_code=400, detail="Amount mismatch")
# Create payment record
payment_id = str(uuid.uuid4())
payment = {
'payment_id': payment_id,
'user_id': request.user_id,
'aitbc_amount': request.aitbc_amount,
'btc_amount': request.btc_amount,
'payment_address': BITCOIN_CONFIG['main_address'],
'status': 'pending',
'created_at': int(time.time()),
'expires_at': int(time.time()) + BITCOIN_CONFIG['payment_timeout'],
'confirmations': 0,
'tx_hash': None
}
# Store payment
payments[payment_id] = payment
# Start payment monitoring in background
background_tasks.add_task(monitor_payment, payment_id)
return payment
@router.get("/exchange/payment-status/{payment_id}")
async def get_payment_status(payment_id: str) -> Dict[str, Any]:
"""Get payment status"""
if payment_id not in payments:
raise HTTPException(status_code=404, detail="Payment not found")
payment = payments[payment_id]
# Check if expired
if payment['status'] == 'pending' and time.time() > payment['expires_at']:
payment['status'] = 'expired'
return payment
@router.post("/exchange/confirm-payment/{payment_id}")
async def confirm_payment(
payment_id: str,
tx_hash: str,
api_key: str = require_admin_key()
) -> Dict[str, Any]:
"""Confirm payment (webhook from payment processor)"""
if payment_id not in payments:
raise HTTPException(status_code=404, detail="Payment not found")
payment = payments[payment_id]
if payment['status'] != 'pending':
raise HTTPException(status_code=400, detail="Payment not in pending state")
# Verify transaction (in production, verify with blockchain API)
# For demo, we'll accept any tx_hash
payment['status'] = 'confirmed'
payment['tx_hash'] = tx_hash
payment['confirmed_at'] = int(time.time())
# Mint AITBC tokens to user's wallet
try:
from ..services.blockchain import mint_tokens
await mint_tokens(payment['user_id'], payment['aitbc_amount'])
except Exception as e:
print(f"Error minting tokens: {e}")
# In production, handle this error properly
return {
'status': 'ok',
'payment_id': payment_id,
'aitbc_amount': payment['aitbc_amount']
}
@router.get("/exchange/rates")
async def get_exchange_rates() -> Dict[str, float]:
"""Get current exchange rates"""
return {
'btc_to_aitbc': BITCOIN_CONFIG['exchange_rate'],
'aitbc_to_btc': 1.0 / BITCOIN_CONFIG['exchange_rate'],
'fee_percent': 0.5
}
async def monitor_payment(payment_id: str):
"""Monitor payment for confirmation (background task)"""
import asyncio
while payment_id in payments:
payment = payments[payment_id]
# Check if expired
if payment['status'] == 'pending' and time.time() > payment['expires_at']:
payment['status'] = 'expired'
break
# In production, check blockchain for payment
# For demo, we'll wait for manual confirmation
await asyncio.sleep(30) # Check every 30 seconds

View File

@@ -1,99 +0,0 @@
#!/usr/bin/env python3
"""
Generate OpenAPI specifications from FastAPI services
"""
import json
import sys
import subprocess
import requests
from pathlib import Path
def extract_openapi_spec(service_name: str, base_url: str, output_file: str):
"""Extract OpenAPI spec from a running FastAPI service"""
try:
# Get OpenAPI spec from the service
response = requests.get(f"{base_url}/openapi.json")
response.raise_for_status()
spec = response.json()
# Add service-specific metadata
spec["info"]["title"] = f"AITBC {service_name} API"
spec["info"]["description"] = f"OpenAPI specification for AITBC {service_name} service"
spec["info"]["version"] = "1.0.0"
# Add servers configuration
spec["servers"] = [
{
"url": "https://aitbc.bubuit.net/api",
"description": "Production server"
},
{
"url": "https://staging-api.aitbc.io",
"description": "Staging server"
},
{
"url": "http://localhost:8011",
"description": "Development server"
}
]
# Save the spec
output_path = Path(output_file)
output_path.parent.mkdir(parents=True, exist_ok=True)
with open(output_path, 'w') as f:
json.dump(spec, f, indent=2)
print(f"✓ Generated {service_name} OpenAPI spec: {output_file}")
return True
except Exception as e:
print(f"✗ Failed to generate {service_name} spec: {e}")
return False
def main():
"""Generate OpenAPI specs for all AITBC services"""
services = [
{
"name": "Coordinator API",
"base_url": "http://127.0.0.2:8011",
"output": "api/coordinator/openapi.json"
},
{
"name": "Blockchain Node API",
"base_url": "http://127.0.0.2:8080",
"output": "api/blockchain/openapi.json"
},
{
"name": "Wallet Daemon API",
"base_url": "http://127.0.0.2:8071",
"output": "api/wallet/openapi.json"
}
]
print("Generating OpenAPI specifications...")
all_success = True
for service in services:
success = extract_openapi_spec(
service["name"],
service["base_url"],
service["output"]
)
if not success:
all_success = False
if all_success:
print("\n✓ All OpenAPI specifications generated successfully!")
print("\nNext steps:")
print("1. Review the generated specs")
print("2. Commit them to the documentation repository")
print("3. Update the API reference documentation")
else:
print("\n✗ Some specifications failed to generate")
sys.exit(1)
if __name__ == "__main__":
main()

View File

@@ -1,135 +0,0 @@
#!/usr/bin/env python3
"""
Local proxy to simulate domain routing for development
"""
import subprocess
import time
import os
import signal
import sys
from pathlib import Path
# Configuration
DOMAIN = "aitbc.bubuit.net"
SERVICES = {
"api": {"port": 8000, "path": "/v1"},
"rpc": {"port": 9080, "path": "/rpc"},
"marketplace": {"port": 3001, "path": "/"},
"exchange": {"port": 3002, "path": "/"},
}
def start_services():
"""Start all AITBC services"""
print("🚀 Starting AITBC Services")
print("=" * 40)
# Change to project directory
os.chdir("/home/oib/windsurf/aitbc")
processes = {}
# Start Coordinator API
print("\n1. Starting Coordinator API...")
api_proc = subprocess.Popen([
"python", "-m", "uvicorn",
"src.app.main:app",
"--host", "127.0.0.1",
"--port", "8000"
], cwd="apps/coordinator-api")
processes["api"] = api_proc
print(f" PID: {api_proc.pid}")
# Start Blockchain Node (if not running)
print("\n2. Checking Blockchain Node...")
result = subprocess.run(["lsof", "-i", ":9080"], capture_output=True)
if not result.stdout:
print(" Starting Blockchain Node...")
node_proc = subprocess.Popen([
"python", "-m", "uvicorn",
"aitbc_chain.app:app",
"--host", "127.0.0.1",
"--port", "9080"
], cwd="apps/blockchain-node")
processes["blockchain"] = node_proc
print(f" PID: {node_proc.pid}")
else:
print(" ✅ Already running")
# Start Marketplace UI
print("\n3. Starting Marketplace UI...")
market_proc = subprocess.Popen([
"python", "server.py",
"--port", "3001"
], cwd="apps/marketplace-ui")
processes["marketplace"] = market_proc
print(f" PID: {market_proc.pid}")
# Start Trade Exchange
print("\n4. Starting Trade Exchange...")
exchange_proc = subprocess.Popen([
"python", "server.py",
"--port", "3002"
], cwd="apps/trade-exchange")
processes["exchange"] = exchange_proc
print(f" PID: {exchange_proc.pid}")
# Wait for services to start
print("\n⏳ Waiting for services to start...")
time.sleep(5)
# Test endpoints
print("\n🧪 Testing Services:")
test_endpoints()
print("\n✅ All services started!")
print("\n📋 Local URLs:")
print(f" API: http://127.0.0.1:8000/v1")
print(f" RPC: http://127.0.0.1:9080/rpc")
print(f" Marketplace: http://127.0.0.1:3001")
print(f" Exchange: http://127.0.0.1:3002")
print("\n🌐 Domain URLs (when proxied):")
print(f" API: https://{DOMAIN}/api")
print(f" RPC: https://{DOMAIN}/rpc")
print(f" Marketplace: https://{DOMAIN}/Marketplace")
print(f" Exchange: https://{DOMAIN}/Exchange")
print(f" Admin: https://{DOMAIN}/admin")
print("\n🛑 Press Ctrl+C to stop all services")
try:
# Keep running
while True:
time.sleep(1)
except KeyboardInterrupt:
print("\n\n🛑 Stopping services...")
for name, proc in processes.items():
print(f" Stopping {name}...")
proc.terminate()
proc.wait()
print("✅ All services stopped!")
def test_endpoints():
"""Test if services are responding"""
import requests
endpoints = [
("API Health", "http://127.0.0.1:8000/v1/health"),
("Admin Stats", "http://127.0.0.1:8000/v1/admin/stats"),
("Marketplace", "http://127.0.0.1:3001"),
("Exchange", "http://127.0.0.1:3002"),
]
for name, url in endpoints:
try:
if "admin" in url:
response = requests.get(url, headers={"X-Api-Key": "${ADMIN_API_KEY}"}, timeout=2)
else:
response = requests.get(url, timeout=2)
print(f" {name}: ✅ {response.status_code}")
except Exception as e:
print(f" {name}: ❌ {str(e)[:50]}")
if __name__ == "__main__":
start_services()

View File

@@ -1,98 +0,0 @@
#!/bin/bash
# AITBC Service Management Script
case "$1" in
status)
echo "=== AITBC Service Status ==="
for service in aitbc-coordinator-api aitbc-exchange-api aitbc-exchange-frontend aitbc-wallet aitbc-node; do
status=$(sudo systemctl is-active $service 2>/dev/null || echo "inactive")
enabled=$(sudo systemctl is-enabled $service 2>/dev/null || echo "disabled")
echo "$service: $status ($enabled)"
done
;;
start)
echo "Starting AITBC services..."
sudo systemctl start aitbc-coordinator-api
sudo systemctl start aitbc-exchange-api
sudo systemctl start aitbc-exchange-frontend
sudo systemctl start aitbc-wallet
sudo systemctl start aitbc-node
echo "Done!"
;;
stop)
echo "Stopping AITBC services..."
sudo systemctl stop aitbc-coordinator-api
sudo systemctl stop aitbc-exchange-api
sudo systemctl stop aitbc-exchange-frontend
sudo systemctl stop aitbc-wallet
sudo systemctl stop aitbc-node
echo "Done!"
;;
restart)
echo "Restarting AITBC services..."
sudo systemctl restart aitbc-coordinator-api
sudo systemctl restart aitbc-exchange-api
sudo systemctl restart aitbc-exchange-frontend
sudo systemctl restart aitbc-wallet
sudo systemctl restart aitbc-node
echo "Done!"
;;
logs)
if [ -z "$2" ]; then
echo "Usage: $0 logs <service-name>"
echo "Available services: coordinator-api, exchange-api, exchange-frontend, wallet, node"
exit 1
fi
case "$2" in
coordinator-api) sudo journalctl -u aitbc-coordinator-api -f ;;
exchange-api) sudo journalctl -u aitbc-exchange-api -f ;;
exchange-frontend) sudo journalctl -u aitbc-exchange-frontend -f ;;
wallet) sudo journalctl -u aitbc-wallet -f ;;
node) sudo journalctl -u aitbc-node -f ;;
*) echo "Unknown service: $2" ;;
esac
;;
enable)
echo "Enabling AITBC services to start on boot..."
sudo systemctl enable aitbc-coordinator-api
sudo systemctl enable aitbc-exchange-api
sudo systemctl enable aitbc-exchange-frontend
sudo systemctl enable aitbc-wallet
sudo systemctl enable aitbc-node
echo "Done!"
;;
disable)
echo "Disabling AITBC services from starting on boot..."
sudo systemctl disable aitbc-coordinator-api
sudo systemctl disable aitbc-exchange-api
sudo systemctl disable aitbc-exchange-frontend
sudo systemctl disable aitbc-wallet
sudo systemctl disable aitbc-node
echo "Done!"
;;
*)
echo "Usage: $0 {status|start|stop|restart|logs|enable|disable}"
echo ""
echo "Commands:"
echo " status - Show status of all AITBC services"
echo " start - Start all AITBC services"
echo " stop - Stop all AITBC services"
echo " restart - Restart all AITBC services"
echo " logs - View logs for a specific service"
echo " enable - Enable services to start on boot"
echo " disable - Disable services from starting on boot"
echo ""
echo "Examples:"
echo " $0 status"
echo " $0 logs exchange-api"
exit 1
;;
esac

View File

@@ -1,70 +0,0 @@
#!/bin/bash
# Setup AITBC Systemd Services
# Requirements: Python 3.11+, systemd, sudo access
echo "🔧 Setting up AITBC systemd services..."
# Validate Python version
echo "🐍 Checking Python version..."
if ! python3.11 --version >/dev/null 2>&1; then
echo "❌ Error: Python 3.11+ is required but not found"
echo " Please install Python 3.11+ and try again"
exit 1
fi
PYTHON_VERSION=$(python3.11 --version | cut -d' ' -f2)
echo "✅ Found Python $PYTHON_VERSION"
# Validate systemctl is available
if ! command -v systemctl >/dev/null 2>&1; then
echo "❌ Error: systemctl not found. This script requires systemd."
exit 1
fi
echo "✅ Systemd available"
# Copy service files
echo "📁 Copying service files..."
sudo cp systemd/aitbc-*.service /etc/systemd/system/
# Reload systemd daemon
echo "🔄 Reloading systemd daemon..."
sudo systemctl daemon-reload
# Stop existing processes
echo "⏹️ Stopping existing processes..."
pkill -f "coordinator-api" || true
pkill -f "simple_exchange_api.py" || true
pkill -f "server.py --port 3002" || true
pkill -f "wallet_daemon" || true
pkill -f "node.main" || true
# Enable services
echo "✅ Enabling services..."
sudo systemctl enable aitbc-coordinator-api.service
sudo systemctl enable aitbc-exchange-api.service
sudo systemctl enable aitbc-exchange-frontend.service
sudo systemctl enable aitbc-wallet.service
sudo systemctl enable aitbc-node.service
# Start services
echo "🚀 Starting services..."
sudo systemctl start aitbc-coordinator-api.service
sudo systemctl start aitbc-exchange-api.service
sudo systemctl start aitbc-exchange-frontend.service
sudo systemctl start aitbc-wallet.service
sudo systemctl start aitbc-node.service
# Check status
echo ""
echo "📊 Service Status:"
for service in aitbc-coordinator-api aitbc-exchange-api aitbc-exchange-frontend aitbc-wallet aitbc-node; do
status=$(sudo systemctl is-active $service)
echo " $service: $status"
done
echo ""
echo "📝 To view logs: sudo journalctl -u <service-name> -f"
echo "📝 To restart: sudo systemctl restart <service-name>"
echo "📝 To stop: sudo systemctl stop <service-name>"

View File

@@ -1,355 +0,0 @@
#!/usr/bin/env python3
"""
Dotenv Linter for AITBC
This script checks for configuration drift between .env.example and actual
environment variable usage in the codebase. It ensures that all environment
variables used in the code are documented in .env.example and vice versa.
Usage:
python scripts/dotenv_linter.py
python scripts/dotenv_linter.py --fix
python scripts/dotenv_linter.py --verbose
"""
import os
import re
import sys
import argparse
from pathlib import Path
from typing import Set, Dict, List, Tuple
import ast
import subprocess
class DotenvLinter:
"""Linter for .env files and environment variable usage."""
def __init__(self, project_root: Path = None):
"""Initialize the linter."""
self.project_root = project_root or Path(__file__).parent.parent
self.env_example_path = self.project_root / ".env.example"
self.python_files = self._find_python_files()
def _find_python_files(self) -> List[Path]:
"""Find all Python files in the project."""
python_files = []
for root, dirs, files in os.walk(self.project_root):
# Skip hidden directories and common exclusions
dirs[:] = [d for d in dirs if not d.startswith('.') and d not in {
'__pycache__', 'node_modules', '.git', 'venv', 'env', '.venv'
}]
for file in files:
if file.endswith('.py'):
python_files.append(Path(root) / file)
return python_files
def _parse_env_example(self) -> Set[str]:
"""Parse .env.example and extract all environment variable keys."""
env_vars = set()
if not self.env_example_path.exists():
print(f"❌ .env.example not found at {self.env_example_path}")
return env_vars
with open(self.env_example_path, 'r') as f:
for line_num, line in enumerate(f, 1):
line = line.strip()
# Skip comments and empty lines
if not line or line.startswith('#'):
continue
# Extract variable name (everything before =)
if '=' in line:
var_name = line.split('=')[0].strip()
if var_name:
env_vars.add(var_name)
return env_vars
def _find_env_usage_in_python(self) -> Set[str]:
"""Find all environment variable usage in Python files."""
env_vars = set()
# Patterns to search for
patterns = [
r'os\.environ\.get\([\'"]([^\'"]+)[\'"]',
r'os\.environ\[([\'"]([^\'"]+)[\'"])\]',
r'os\.getenv\([\'"]([^\'"]+)[\'"]',
r'getenv\([\'"]([^\'"]+)[\'"]',
r'environ\.get\([\'"]([^\'"]+)[\'"]',
r'environ\[([\'"]([^\'"]+)[\'"])\]',
]
for python_file in self.python_files:
try:
with open(python_file, 'r', encoding='utf-8') as f:
content = f.read()
for pattern in patterns:
matches = re.finditer(pattern, content)
for match in matches:
var_name = match.group(1)
env_vars.add(var_name)
except (UnicodeDecodeError, PermissionError) as e:
print(f"⚠️ Could not read {python_file}: {e}")
return env_vars
def _find_env_usage_in_config_files(self) -> Set[str]:
"""Find environment variable usage in configuration files."""
env_vars = set()
# Check common config files
config_files = [
'pyproject.toml',
'pytest.ini',
'setup.cfg',
'tox.ini',
'.github/workflows/*.yml',
'.github/workflows/*.yaml',
'docker-compose.yml',
'docker-compose.yaml',
'Dockerfile',
]
for pattern in config_files:
for config_file in self.project_root.glob(pattern):
try:
with open(config_file, 'r', encoding='utf-8') as f:
content = f.read()
# Look for environment variable patterns
env_patterns = [
r'\${([A-Z_][A-Z0-9_]*)}', # ${VAR_NAME}
r'\$([A-Z_][A-Z0-9_]*)', # $VAR_NAME
r'env\.([A-Z_][A-Z0-9_]*)', # env.VAR_NAME
r'os\.environ\([\'"]([^\'"]+)[\'"]', # os.environ("VAR_NAME")
r'getenv\([\'"]([^\'"]+)[\'"]', # getenv("VAR_NAME")
]
for env_pattern in env_patterns:
matches = re.finditer(env_pattern, content)
for match in matches:
var_name = match.group(1) if match.groups() else match.group(0)
if var_name.isupper():
env_vars.add(var_name)
except (UnicodeDecodeError, PermissionError) as e:
print(f"⚠️ Could not read {config_file}: {e}")
return env_vars
def _find_env_usage_in_shell_scripts(self) -> Set[str]:
"""Find environment variable usage in shell scripts."""
env_vars = set()
shell_files = []
for root, dirs, files in os.walk(self.project_root):
dirs[:] = [d for d in dirs if not d.startswith('.') and d not in {
'__pycache__', 'node_modules', '.git', 'venv', 'env', '.venv'
}]
for file in files:
if file.endswith(('.sh', '.bash', '.zsh')):
shell_files.append(Path(root) / file)
for shell_file in shell_files:
try:
with open(shell_file, 'r', encoding='utf-8') as f:
content = f.read()
# Look for environment variable patterns in shell scripts
patterns = [
r'\$\{([A-Z_][A-Z0-9_]*)\}', # ${VAR_NAME}
r'\$([A-Z_][A-Z0-9_]*)', # $VAR_NAME
r'export\s+([A-Z_][A-Z0-9_]*)=', # export VAR_NAME=
r'([A-Z_][A-Z0-9_]*)=', # VAR_NAME=
]
for pattern in patterns:
matches = re.finditer(pattern, content)
for match in matches:
var_name = match.group(1)
env_vars.add(var_name)
except (UnicodeDecodeError, PermissionError) as e:
print(f"⚠️ Could not read {shell_file}: {e}")
return env_vars
def _find_all_env_usage(self) -> Set[str]:
"""Find all environment variable usage across the project."""
all_vars = set()
# Python files
python_vars = self._find_env_usage_in_python()
all_vars.update(python_vars)
# Config files
config_vars = self._find_env_usage_in_config_files()
all_vars.update(config_vars)
# Shell scripts
shell_vars = self._find_env_usage_in_shell_scripts()
all_vars.update(shell_vars)
return all_vars
def _check_missing_in_example(self, used_vars: Set[str], example_vars: Set[str]) -> Set[str]:
"""Find variables used in code but missing from .env.example."""
missing = used_vars - example_vars
# Filter out common system variables that don't need to be in .env.example
system_vars = {
'PATH', 'HOME', 'USER', 'SHELL', 'TERM', 'LANG', 'LC_ALL',
'PYTHONPATH', 'PYTHONHOME', 'VIRTUAL_ENV', 'CONDA_DEFAULT_ENV',
'GITHUB_ACTIONS', 'CI', 'TRAVIS', 'APPVEYOR', 'CIRCLECI',
'HTTP_PROXY', 'HTTPS_PROXY', 'NO_PROXY', 'http_proxy', 'https_proxy',
'PWD', 'OLDPWD', 'SHLVL', '_', 'HOSTNAME', 'HOSTTYPE', 'OSTYPE',
'MACHTYPE', 'UID', 'GID', 'EUID', 'EGID', 'PS1', 'PS2', 'IFS',
'DISPLAY', 'XAUTHORITY', 'DBUS_SESSION_BUS_ADDRESS', 'SSH_AUTH_SOCK',
'SSH_CONNECTION', 'SSH_CLIENT', 'SSH_TTY', 'LOGNAME', 'USERNAME'
}
return missing - system_vars
def _check_unused_in_example(self, used_vars: Set[str], example_vars: Set[str]) -> Set[str]:
"""Find variables in .env.example but not used in code."""
unused = example_vars - used_vars
# Filter out variables that might be used by external tools or services
external_vars = {
'NODE_ENV', 'NPM_CONFIG_PREFIX', 'NPM_AUTH_TOKEN',
'DOCKER_HOST', 'DOCKER_TLS_VERIFY', 'DOCKER_CERT_PATH',
'KUBERNETES_SERVICE_HOST', 'KUBERNETES_SERVICE_PORT',
'REDIS_URL', 'MEMCACHED_URL', 'ELASTICSEARCH_URL',
'SENTRY_DSN', 'ROLLBAR_ACCESS_TOKEN', 'HONEYBADGER_API_KEY'
}
return unused - external_vars
def lint(self, verbose: bool = False) -> Tuple[int, int, int, Set[str], Set[str]]:
"""Run the linter and return results."""
print("🔍 Dotenv Linter for AITBC")
print("=" * 50)
# Parse .env.example
example_vars = self._parse_env_example()
if verbose:
print(f"📄 Found {len(example_vars)} variables in .env.example")
if example_vars:
print(f" {', '.join(sorted(example_vars))}")
# Find all environment variable usage
used_vars = self._find_all_env_usage()
if verbose:
print(f"🔍 Found {len(used_vars)} variables used in code")
if used_vars:
print(f" {', '.join(sorted(used_vars))}")
# Check for missing variables
missing_vars = self._check_missing_in_example(used_vars, example_vars)
# Check for unused variables
unused_vars = self._check_unused_in_example(used_vars, example_vars)
return len(example_vars), len(used_vars), len(missing_vars), missing_vars, unused_vars
def fix_env_example(self, missing_vars: Set[str], verbose: bool = False):
"""Add missing variables to .env.example."""
if not missing_vars:
if verbose:
print("✅ No missing variables to add")
return
print(f"🔧 Adding {len(missing_vars)} missing variables to .env.example")
with open(self.env_example_path, 'a') as f:
f.write("\n# Auto-generated variables (added by dotenv_linter)\n")
for var in sorted(missing_vars):
f.write(f"{var}=\n")
print(f"✅ Added {len(missing_vars)} variables to .env.example")
def generate_report(self, example_count: int, used_count: int, missing_count: int,
missing_vars: Set[str], unused_vars: Set[str]) -> str:
"""Generate a detailed report."""
report = []
report.append("📊 Dotenv Linter Report")
report.append("=" * 50)
report.append(f"Variables in .env.example: {example_count}")
report.append(f"Variables used in code: {used_count}")
report.append(f"Missing from .env.example: {missing_count}")
report.append(f"Unused in .env.example: {len(unused_vars)}")
report.append("")
if missing_vars:
report.append("❌ Missing Variables (used in code but not in .env.example):")
for var in sorted(missing_vars):
report.append(f" - {var}")
report.append("")
if unused_vars:
report.append("⚠️ Unused Variables (in .env.example but not used in code):")
for var in sorted(unused_vars):
report.append(f" - {var}")
report.append("")
if not missing_vars and not unused_vars:
report.append("✅ No configuration drift detected!")
return "\n".join(report)
def main():
"""Main entry point."""
parser = argparse.ArgumentParser(
description="Dotenv Linter for AITBC - Check for configuration drift",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
python scripts/dotenv_linter.py # Check for drift
python scripts/dotenv_linter.py --verbose # Verbose output
python scripts/dotenv_linter.py --fix # Auto-fix missing variables
python scripts/dotenv_linter.py --check # Exit with error code on issues
"""
)
parser.add_argument("--verbose", "-v", action="store_true", help="Verbose output")
parser.add_argument("--fix", action="store_true", help="Auto-fix missing variables in .env.example")
parser.add_argument("--check", action="store_true", help="Exit with error code if issues found")
args = parser.parse_args()
# Initialize linter
linter = DotenvLinter()
# Run linting
example_count, used_count, missing_count, missing_vars, unused_vars = linter.lint(args.verbose)
# Generate report
report = linter.generate_report(example_count, used_count, missing_count, missing_vars, unused_vars)
print(report)
# Auto-fix if requested
if args.fix and missing_vars:
linter.fix_env_example(missing_vars, args.verbose)
# Exit with error code if check requested and issues found
if args.check and (missing_vars or unused_vars):
print(f"❌ Configuration drift detected: {missing_count} missing, {len(unused_vars)} unused")
sys.exit(1)
# Success
print("✅ Dotenv linter completed successfully")
return 0
if __name__ == "__main__":
sys.exit(main())

View File

@@ -1,131 +0,0 @@
# AITBC Local Simulation
Simulate client and GPU provider interactions with independent wallets and AITBC transactions.
## Structure
```
home/
├── genesis.py # Creates genesis block and distributes initial AITBC
├── client/ # Customer/client wallet
│ └── wallet.py # Client wallet management
├── miner/ # GPU provider wallet
│ └── wallet.py # Miner wallet management
└── simulate.py # Complete workflow simulation
```
## Quick Start
### 1. Initialize the Economy
```bash
cd /home/oib/windsurf/aitbc/home
python3 genesis.py
```
This creates:
- Genesis wallet: 1,000,000 AITBC
- Client wallet: 10,000 AITBC
- Miner wallet: 1,000 AITBC
### 2. Check Wallets
```bash
# Client wallet
cd client && python3 wallet.py balance
# Miner wallet
cd miner && python3 wallet.py balance
```
### 3. Run Complete Simulation
```bash
cd /home/oib/windsurf/aitbc/home
python3 simulate.py
```
## Wallet Commands
### Client Wallet
```bash
cd client
# Check balance
python3 wallet.py balance
# Show address
python3 wallet.py address
# Pay for services
python3 wallet.py send <amount> <address> <description>
# Transaction history
python3 wallet.py history
```
### Miner Wallet
```bash
cd miner
# Check balance with stats
python3 wallet.py balance
# Add earnings from completed job
python3 wallet.py earn <amount> --job <job_id> --desc "Service description"
# Withdraw earnings
python3 wallet.py withdraw <amount> <address>
# Mining statistics
python3 wallet.py stats
```
## Example Workflow
### 1. Client Submits Job
```bash
cd /home/oib/windsurf/aitbc/cli
python3 client.py submit inference --model llama-2-7b --prompt "What is AI?"
```
### 2. Miner Processes Job
```bash
# Miner polls and gets job
python3 miner.py poll
# Miner earns AITBC
cd /home/oib/windsurf/aitbc/home/miner
python3 wallet.py earn 50.0 --job abc123 --desc "Inference task"
```
### 3. Client Pays
```bash
cd /home/oib/windsurf/aitbc/home/client
# Get miner address
cd ../miner && python3 wallet.py address
# Returns: aitbc1721d5bf8c0005ded6704
# Send payment
cd ../client
python3 wallet.py send 50.0 aitbc1721d5bf8c0005ded6704 "Payment for inference"
```
## Wallet Files
- `client/client_wallet.json` - Client's wallet data
- `miner/miner_wallet.json` - Miner's wallet data
- `genesis_wallet.json` - Genesis wallet with remaining AITBC
## Integration with CLI Tools
The home wallets integrate with the CLI tools:
1. Submit jobs using `cli/client.py`
2. Process jobs using `cli/miner.py`
3. Track payments using `home/*/wallet.py`
## Tips
- Each wallet has a unique address
- All transactions are recorded with timestamps
- Genesis wallet holds the remaining AITBC supply
- Use `simulate.py` for a complete demo
- Check `wallet.py history` to see all transactions

View File

@@ -1,143 +0,0 @@
#!/usr/bin/env python3
"""
Client retrieves job result from completed GPU processing
"""
import subprocess
import json
import sys
import os
# Add paths
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'cli'))
def get_job_result(job_id):
"""Get the result of a completed job"""
print(f"🔍 Retrieving result for job: {job_id}")
print("=" * 60)
# Check job status
print("\n1. Checking job status...")
status_result = subprocess.run(
f'cd ../cli && python3 client.py status {job_id}',
shell=True,
capture_output=True,
text=True
)
print(status_result.stdout)
# Check if job is completed
if "completed" in status_result.stdout:
print("\n2. ✅ Job completed! Retrieving result...")
# Parse the status to get result details
# In a real implementation, this would fetch from the coordinator API
print("\n📄 Job Result:")
print("-" * 40)
# Simulate getting the result from the blockchain/coordinator
print(f"Job ID: {job_id}")
print("Status: Completed")
print("Miner: ollama-miner")
print("Model: llama3.2:latest")
print("Processing Time: 2.3 seconds")
print("\nOutput:")
print("Hello! I'm an AI assistant powered by AITBC network.")
print("I'm running on GPU infrastructure provided by network miners.")
print("\nMetadata:")
print("- Tokens processed: 15")
print("- GPU utilization: 45%")
print("- Cost: 0.000025 AITBC")
return True
elif "queued" in status_result.stdout:
print("\n⏳ Job is still queued, waiting for miner...")
return False
elif "running" in status_result.stdout:
print("\n⚙️ Job is being processed by GPU provider...")
return False
elif "failed" in status_result.stdout:
print("\n❌ Job failed!")
return False
else:
print("\n❓ Unknown job status")
return False
def watch_job(job_id):
"""Watch a job until completion"""
print(f"👀 Watching job: {job_id}")
print("=" * 60)
import time
max_wait = 60 # Maximum wait time in seconds
start_time = time.time()
while time.time() - start_time < max_wait:
print(f"\n⏰ Checking... ({int(time.time() - start_time)}s elapsed)")
# Get status
result = subprocess.run(
f'cd ../cli && python3 client.py status {job_id}',
shell=True,
capture_output=True,
text=True
)
if "completed" in result.stdout:
print("\n✅ Job completed!")
return get_job_result(job_id)
elif "failed" in result.stdout:
print("\n❌ Job failed!")
return False
time.sleep(3)
print("\n⏰ Timeout waiting for job completion")
return False
def list_recent_results():
"""List recent completed jobs and their results"""
print("📋 Recent Job Results")
print("=" * 60)
# Get recent blocks/jobs from explorer
result = subprocess.run(
'cd ../cli && python3 client.py blocks --limit 5',
shell=True,
capture_output=True,
text=True
)
print(result.stdout)
print("\n💡 To get specific result:")
print(" python3 client_get_result.py <job_id>")
def main():
if len(sys.argv) < 2:
print("Usage:")
print(" python3 client_get_result.py <job_id> # Get specific job result")
print(" python3 client_get_result.py watch <job_id> # Watch job until complete")
print(" python3 client_get_result.py list # List recent results")
return
command = sys.argv[1]
if command == "list":
list_recent_results()
elif command == "watch" and len(sys.argv) > 2:
watch_job(sys.argv[2])
else:
get_job_result(command)
if __name__ == "__main__":
main()

View File

@@ -1,126 +0,0 @@
#!/usr/bin/env python3
"""
Client sends a job to GPU provider and pays for it
"""
import subprocess
import json
import time
import sys
import os
# Add paths
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'cli'))
sys.path.append(os.path.join(os.path.dirname(__file__)))
def send_job_to_gpu_provider():
print("🚀 Client: Sending Job to GPU Provider")
print("=" * 60)
# 1. Check client wallet balance
print("\n1. Checking client wallet...")
result = subprocess.run(
'cd client && python3 wallet.py balance',
shell=True,
capture_output=True,
text=True
)
print(result.stdout)
# 2. Submit job to coordinator
print("\n2. Submitting 'hello' job to network...")
job_result = subprocess.run(
'cd ../cli && python3 client.py submit inference --prompt "hello"',
shell=True,
capture_output=True,
text=True
)
print(job_result.stdout)
# Extract job ID
job_id = None
if "Job ID:" in job_result.stdout:
for line in job_result.stdout.split('\n'):
if "Job ID:" in line:
job_id = line.split()[-1]
break
if not job_id:
print("❌ Failed to submit job")
return
print(f"\n✅ Job submitted: {job_id}")
# 3. Wait for miner to process
print("\n3. Waiting for GPU provider to process job...")
print(" (Make sure miner is running: python3 cli/miner.py mine)")
# Check job status
max_wait = 30
for i in range(max_wait):
status_result = subprocess.run(
f'cd ../cli && python3 client.py status {job_id}',
shell=True,
capture_output=True,
text=True
)
if "completed" in status_result.stdout:
print("✅ Job completed by GPU provider!")
print(status_result.stdout)
break
elif "failed" in status_result.stdout:
print("❌ Job failed")
print(status_result.stdout)
break
else:
print(f" Waiting... ({i+1}s)")
time.sleep(1)
# 4. Get cost and pay
print("\n4. Processing payment...")
# For demo, assume cost is 10 AITBC
job_cost = 10.0
# Get miner address
miner_result = subprocess.run(
'cd miner && python3 wallet.py address',
shell=True,
capture_output=True,
text=True
)
miner_address = None
if "Miner Address:" in miner_result.stdout:
for line in miner_result.stdout.split('\n'):
if "Miner Address:" in line:
miner_address = line.split()[-1]
break
if miner_address:
print(f" Paying {job_cost} AITBC to miner...")
# Send payment
pay_result = subprocess.run(
f'cd client && python3 wallet.py send {job_cost} {miner_address} "Payment for job {job_id}"',
shell=True,
capture_output=True,
text=True
)
print(pay_result.stdout)
# 5. Show final balances
print("\n5. Final balances:")
print("\n Client:")
subprocess.run('cd client && python3 wallet.py balance', shell=True)
print("\n Miner:")
subprocess.run('cd miner && python3 wallet.py balance', shell=True)
print("\n✅ Job completed and paid for!")
if __name__ == "__main__":
send_job_to_gpu_provider()

View File

@@ -1,74 +0,0 @@
#!/usr/bin/env python3
"""
Client wallet for managing AITBC tokens
"""
import argparse
import json
import os
import sys
from datetime import datetime
# Add parent directory to path to import wallet module
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import importlib.util
spec = importlib.util.spec_from_file_location("wallet", os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "wallet.py"))
wallet = importlib.util.module_from_spec(spec)
spec.loader.exec_module(wallet)
AITBCWallet = wallet.AITBCWallet
def main():
parser = argparse.ArgumentParser(description="Client Wallet - Manage AITBC for paying for GPU services")
parser.add_argument("--wallet", default="client_wallet.json", help="Wallet file name")
subparsers = parser.add_subparsers(dest="command", help="Commands")
# Balance command
balance_parser = subparsers.add_parser("balance", help="Show balance")
# Address command
address_parser = subparsers.add_parser("address", help="Show wallet address")
# History command
history_parser = subparsers.add_parser("history", help="Show transaction history")
# Send command (pay for services)
send_parser = subparsers.add_parser("send", help="Send AITBC to GPU provider")
send_parser.add_argument("amount", type=float, help="Amount to send")
send_parser.add_argument("to", help="Recipient address")
send_parser.add_argument("description", help="Payment description")
args = parser.parse_args()
if not args.command:
parser.print_help()
return
# Use client-specific wallet directory
wallet_dir = os.path.dirname(os.path.abspath(__file__))
wallet_path = os.path.join(wallet_dir, args.wallet)
wallet = AITBCWallet(wallet_path)
if args.command == "balance":
print("💼 CLIENT WALLET")
print("=" * 40)
wallet.show_balance()
print("\n💡 Use 'send' to pay for GPU services")
elif args.command == "address":
print(f"💼 Client Address: {wallet.data['address']}")
print(" Share this address to receive AITBC")
elif args.command == "history":
print("💼 CLIENT TRANSACTION HISTORY")
print("=" * 40)
wallet.show_history()
elif args.command == "send":
print(f"💸 Sending {args.amount} AITBC to {args.to}")
print(f" For: {args.description}")
wallet.spend(args.amount, args.description)
if __name__ == "__main__":
main()

View File

@@ -1,199 +0,0 @@
#!/usr/bin/env python3
"""
Enhanced client that submits jobs and automatically retrieves results
"""
import subprocess
import json
import time
import sys
import os
# Add paths
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'cli'))
class AITBCClient:
def __init__(self):
self.coordinator_url = "http://localhost:8001"
self.api_key = "${CLIENT_API_KEY}"
def submit_job(self, prompt, model="llama3.2:latest", wait_for_result=True):
"""Submit a job and optionally wait for result"""
print(f"📤 Submitting job to AITBC network...")
print(f" Prompt: '{prompt}'")
print(f" Model: {model}")
print()
# Submit job
result = subprocess.run(
f'cd ../cli && python3 client.py submit inference --prompt "{prompt}"',
shell=True,
capture_output=True,
text=True
)
# Extract job ID
job_id = None
for line in result.stdout.split('\n'):
if "Job ID:" in line:
job_id = line.split()[-1]
break
if not job_id:
print("❌ Failed to submit job")
return None
print(f"✅ Job submitted: {job_id}")
if wait_for_result:
return self.wait_for_result(job_id)
else:
return job_id
def wait_for_result(self, job_id, timeout=60):
"""Wait for job completion and return result"""
print(f"⏳ Waiting for GPU provider to process job...")
print(f" Timeout: {timeout}s")
print()
start_time = time.time()
while time.time() - start_time < timeout:
# Check status
status_result = subprocess.run(
f'cd ../cli && python3 client.py status {job_id}',
shell=True,
capture_output=True,
text=True
)
if "completed" in status_result.stdout:
print(f"✅ Job completed by GPU provider!")
print()
return self.get_result(job_id)
elif "failed" in status_result.stdout:
print(f"❌ Job failed")
return None
elif "running" in status_result.stdout:
elapsed = int(time.time() - start_time)
print(f" ⚙️ Processing... ({elapsed}s)")
else:
elapsed = int(time.time() - start_time)
print(f" ⏳ Waiting in queue... ({elapsed}s)")
time.sleep(3)
print(f"⏰ Timeout after {timeout}s")
return None
def get_result(self, job_id):
"""Get and display job result"""
print(f"📄 Job Result for {job_id}")
print("=" * 60)
# In a real implementation, fetch from coordinator API
# For now, simulate the result
# Get job details
status_result = subprocess.run(
f'cd ../cli && python3 client.py status {job_id}',
shell=True,
capture_output=True,
text=True
)
print("Job Details:")
print(status_result.stdout)
print("\nGenerated Output:")
print("-" * 40)
# Simulate different outputs based on job
if "hello" in job_id.lower():
print("Hello! 👋")
print("I'm an AI assistant running on the AITBC network.")
print("Your request was processed by a GPU miner in the network.")
elif "blockchain" in job_id.lower():
print("Blockchain is a distributed ledger technology that maintains")
print("a secure and decentralized record of transactions across multiple")
print("computers. It's the foundation of cryptocurrencies like Bitcoin")
print("and has many other applications beyond digital currencies.")
else:
print("This is a sample response from the AITBC network.")
print("The actual output would be generated by the GPU provider")
print("based on your specific prompt and requirements.")
print("\nProcessing Details:")
print("-" * 40)
print(f"• Miner: GPU Provider")
print(f"• Model: llama3.2:latest")
print(f"• Tokens: ~25")
print(f"• Cost: 0.000025 AITBC")
print(f"• Network: AITBC")
return {
"job_id": job_id,
"status": "completed",
"output": "Generated response from GPU provider"
}
def pay_for_job(self, job_id, amount=25.0):
"""Pay for a completed job"""
print(f"\n💸 Paying for job {job_id}...")
# Get miner address
miner_result = subprocess.run(
'cd miner && python3 wallet.py address',
shell=True,
capture_output=True,
text=True
)
miner_address = None
for line in miner_result.stdout.split('\n'):
if "Miner Address:" in line:
miner_address = line.split()[-1]
break
if miner_address:
# Send payment
pay_result = subprocess.run(
f'cd client && python3 wallet.py send {amount} {miner_address} "Payment for job {job_id}"',
shell=True,
capture_output=True,
text=True
)
print(pay_result.stdout)
return True
else:
print("❌ Could not get miner address")
return False
def main():
client = AITBCClient()
print("🚀 AITBC Enhanced Client")
print("=" * 60)
# Example 1: Submit and wait for result
print("\n📝 Example 1: Submit job and wait for result")
print("-" * 40)
result = client.submit_job("hello", wait_for_result=True)
if result:
# Pay for the job
client.pay_for_job(result["job_id"])
print("\n" + "=" * 60)
print("✅ Complete workflow demonstrated!")
print("\n💡 To use with your own prompt:")
print(" python3 enhanced_client.py")
if __name__ == "__main__":
main()

View File

@@ -1,109 +0,0 @@
#!/usr/bin/env python3
"""
Example client using the remote AITBC coordinator
"""
import httpx
import json
from datetime import datetime
# Configuration - using the SSH tunnel to remote server
COORDINATOR_URL = "http://localhost:8001"
CLIENT_API_KEY = "${CLIENT_API_KEY}"
def create_job():
"""Create a job on the remote coordinator"""
job_data = {
"payload": {
"type": "inference",
"task": "text-generation",
"model": "llama-2-7b",
"parameters": {
"prompt": "Hello, AITBC!",
"max_tokens": 100
}
},
"ttl_seconds": 900
}
with httpx.Client() as client:
response = client.post(
f"{COORDINATOR_URL}/v1/jobs",
headers={
"Content-Type": "application/json",
"X-Api-Key": CLIENT_API_KEY
},
json=job_data
)
if response.status_code == 201:
job = response.json()
print(f"✅ Job created successfully!")
print(f" Job ID: {job['job_id']}")
print(f" State: {job['state']}")
print(f" Expires at: {job['expires_at']}")
return job['job_id']
else:
print(f"❌ Failed to create job: {response.status_code}")
print(f" Response: {response.text}")
return None
def check_job_status(job_id):
"""Check the status of a job"""
with httpx.Client() as client:
response = client.get(
f"{COORDINATOR_URL}/v1/jobs/{job_id}",
headers={"X-Api-Key": CLIENT_API_KEY}
)
if response.status_code == 200:
job = response.json()
print(f"\n📊 Job Status:")
print(f" Job ID: {job['job_id']}")
print(f" State: {job['state']}")
print(f" Assigned Miner: {job.get('assigned_miner_id', 'None')}")
print(f" Created: {job['requested_at']}")
return job
else:
print(f"❌ Failed to get job status: {response.status_code}")
return None
def list_blocks():
"""List blocks from the explorer"""
with httpx.Client() as client:
response = client.get(f"{COORDINATOR_URL}/v1/explorer/blocks")
if response.status_code == 200:
blocks = response.json()
print(f"\n📦 Recent Blocks ({len(blocks['items'])} total):")
for block in blocks['items'][:5]: # Show last 5 blocks
print(f" Height: {block['height']}")
print(f" Hash: {block['hash']}")
print(f" Time: {block['timestamp']}")
print(f" Transactions: {block['txCount']}")
print(f" Proposer: {block['proposer']}")
print()
else:
print(f"❌ Failed to list blocks: {response.status_code}")
def main():
print("🚀 AITBC Remote Client Example")
print(f" Connecting to: {COORDINATOR_URL}")
print()
# List current blocks
list_blocks()
# Create a new job
job_id = create_job()
if job_id:
# Check job status
check_job_status(job_id)
# List blocks again to see the new job
print("\n🔄 Updated block list:")
list_blocks()
if __name__ == "__main__":
main()

View File

@@ -1,85 +0,0 @@
#!/usr/bin/env python3
"""
Genesis wallet - Distributes initial AITBC from genesis block
"""
import os
import sys
import json
from datetime import datetime
# Add parent directory to path
sys.path.append(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'cli'))
from wallet import AITBCWallet
def main():
print("🌍 GENESIS BLOCK - Initial AITBC Distribution")
print("=" * 60)
# Create genesis wallet with large initial balance
genesis = AITBCWallet("genesis_wallet.json")
genesis.data["balance"] = 1000000.0 # 1 million AITBC
genesis.data["transactions"] = [{
"type": "genesis",
"amount": 1000000.0,
"description": "Genesis block creation",
"timestamp": datetime.now().isoformat()
}]
genesis.save()
print(f"💰 Genesis Wallet Created")
print(f" Address: {genesis.data['address']}")
print(f" Balance: {genesis.data['balance']} AITBC")
print()
# Distribute to client and miner
client_wallet = AITBCWallet(os.path.join("client", "client_wallet.json"))
miner_wallet = AITBCWallet(os.path.join("miner", "miner_wallet.json"))
print("📤 Distributing Initial AITBC")
print("-" * 40)
# Give client 10,000 AITBC to spend
client_address = client_wallet.data["address"]
print(f"💸 Sending 10,000 AITBC to Client ({client_address[:20]}...)")
client_wallet.add_earnings(10000.0, "genesis_distribution", "Initial funding from genesis block")
# Give miner 1,000 AITBC to start
miner_address = miner_wallet.data["address"]
print(f"💸 Sending 1,000 AITBC to Miner ({miner_address[:20]}...)")
miner_wallet.add_earnings(1000.0, "genesis_distribution", "Initial funding from genesis block")
# Update genesis wallet
genesis.data["balance"] -= 11000.0
genesis.data["transactions"].extend([
{
"type": "transfer",
"amount": -10000.0,
"to": client_address,
"description": "Initial client funding",
"timestamp": datetime.now().isoformat()
},
{
"type": "transfer",
"amount": -1000.0,
"to": miner_address,
"description": "Initial miner funding",
"timestamp": datetime.now().isoformat()
}
])
genesis.save()
print()
print("✅ Distribution Complete!")
print("=" * 60)
print(f"Genesis Balance: {genesis.data['balance']} AITBC")
print(f"Client Balance: {client_wallet.data['balance']} AITBC")
print(f"Miner Balance: {miner_wallet.data['balance']} AITBC")
print()
print("💡 Next Steps:")
print(" 1. Client: Submit jobs and pay for GPU services")
print(" 2. Miner: Process jobs and earn AITBC")
print(" 3. Track everything with the wallet CLI tools")
if __name__ == "__main__":
main()

View File

@@ -1,144 +0,0 @@
#!/usr/bin/env python3
"""
Demonstration: How customers get replies from GPU providers
"""
import subprocess
import time
def main():
print("📨 How Customers Get Replies in AITBC")
print("=" * 60)
print("\n🔄 Complete Flow:")
print("1. Customer submits job")
print("2. GPU provider processes job")
print("3. Result stored on blockchain")
print("4. Customer retrieves result")
print("5. Customer pays for service")
print("\n" + "=" * 60)
print("\n📝 STEP 1: Customer Submits Job")
print("-" * 40)
# Submit a job
result = subprocess.run(
'cd ../cli && python3 client.py submit inference --prompt "What is AI?"',
shell=True,
capture_output=True,
text=True
)
print(result.stdout)
# Extract job ID
job_id = None
for line in result.stdout.split('\n'):
if "Job ID:" in line:
job_id = line.split()[-1]
break
if not job_id:
print("❌ Failed to submit job")
return
print(f"\n✅ Job submitted with ID: {job_id}")
print("\n⚙️ STEP 2: GPU Provider Processes Job")
print("-" * 40)
print(" • Miner polls for jobs")
print(" • Job assigned to miner")
print(" • GPU processes the request")
print(" • Result submitted to network")
# Simulate processing
print("\n 💭 Simulating GPU processing...")
time.sleep(2)
print("\n📦 STEP 3: Result Stored on Blockchain")
print("-" * 40)
print(f" • Job {job_id} marked as completed")
print(f" • Result stored with job metadata")
print(f" • Block created with job details")
# Show block
print("\n 📋 Blockchain Entry:")
print(f" Block Hash: {job_id}")
print(f" Proposer: gpu-miner")
print(f" Status: COMPLETED")
print(f" Result: Available for retrieval")
print("\n🔍 STEP 4: Customer Retrieves Result")
print("-" * 40)
print(" Method 1: Check job status")
print(f" $ python3 cli/client.py status {job_id}")
print()
# Show status
status_result = subprocess.run(
f'cd ../cli && python3 client.py status {job_id}',
shell=True,
capture_output=True,
text=True
)
print(" Status Result:")
for line in status_result.stdout.split('\n'):
if line.strip():
print(f" {line}")
print("\n Method 2: Get full result")
print(f" $ python3 client_get_result.py {job_id}")
print()
print(" 📄 Full Result:")
print(" ----------")
print(" Output: AI stands for Artificial Intelligence, which refers")
print(" to the simulation of human intelligence in machines")
print(" that are programmed to think and learn.")
print(" Tokens: 28")
print(" Cost: 0.000028 AITBC")
print(" Miner: GPU Provider #1")
print("\n💸 STEP 5: Customer Pays for Service")
print("-" * 40)
# Get miner address
miner_result = subprocess.run(
'cd miner && python3 wallet.py address',
shell=True,
capture_output=True,
text=True
)
miner_address = None
for line in miner_result.stdout.split('\n'):
if "Miner Address:" in line:
miner_address = line.split()[-1]
break
if miner_address:
print(f" Payment sent to: {miner_address}")
print(" Amount: 25.0 AITBC")
print(" Status: ✅ Paid")
print("\n" + "=" * 60)
print("✅ Customer successfully received reply!")
print("\n📋 Summary of Retrieval Methods:")
print("-" * 40)
print("1. Job Status: python3 cli/client.py status <job_id>")
print("2. Full Result: python3 client_get_result.py <job_id>")
print("3. Watch Job: python3 client_get_result.py watch <job_id>")
print("4. List Recent: python3 client_get_result.py list")
print("5. Enhanced Client: python3 enhanced_client.py")
print("\n💡 In production:")
print(" • Results are stored on-chain")
print(" • Customers can retrieve anytime")
print(" • Results are immutable and verifiable")
print(" • Payment is required to unlock full results")
if __name__ == "__main__":
main()

View File

@@ -1,113 +0,0 @@
#!/usr/bin/env python3
"""
GPU Provider wallet for managing earnings from mining
"""
import argparse
import json
import os
import sys
from datetime import datetime
# Add parent directory to path to import wallet module
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import importlib.util
spec = importlib.util.spec_from_file_location("wallet", os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "wallet.py"))
wallet = importlib.util.module_from_spec(spec)
spec.loader.exec_module(wallet)
AITBCWallet = wallet.AITBCWallet
def main():
parser = argparse.ArgumentParser(description="GPU Provider Wallet - Manage earnings from mining services")
parser.add_argument("--wallet", default="miner_wallet.json", help="Wallet file name")
subparsers = parser.add_subparsers(dest="command", help="Commands")
# Balance command
balance_parser = subparsers.add_parser("balance", help="Show balance")
# Address command
address_parser = subparsers.add_parser("address", help="Show wallet address")
# History command
history_parser = subparsers.add_parser("history", help="Show transaction history")
# Earn command (receive payment for completed jobs)
earn_parser = subparsers.add_parser("earn", help="Add earnings from completed job")
earn_parser.add_argument("amount", type=float, help="Amount earned")
earn_parser.add_argument("--job", required=True, help="Job ID that was completed")
earn_parser.add_argument("--desc", default="GPU computation", help="Service description")
# Withdraw command
withdraw_parser = subparsers.add_parser("withdraw", help="Withdraw AITBC to external wallet")
withdraw_parser.add_argument("amount", type=float, help="Amount to withdraw")
withdraw_parser.add_argument("address", help="Destination address")
# Stats command
stats_parser = subparsers.add_parser("stats", help="Show mining statistics")
args = parser.parse_args()
if not args.command:
parser.print_help()
return
# Use miner-specific wallet directory
wallet_dir = os.path.dirname(os.path.abspath(__file__))
wallet_path = os.path.join(wallet_dir, args.wallet)
wallet = AITBCWallet(wallet_path)
if args.command == "balance":
print("⛏️ GPU PROVIDER WALLET")
print("=" * 40)
wallet.show_balance()
# Show additional stats
earnings = sum(t['amount'] for t in wallet.data['transactions'] if t['type'] == 'earn')
jobs_completed = sum(1 for t in wallet.data['transactions'] if t['type'] == 'earn')
print(f"\n📊 Mining Stats:")
print(f" Total Earned: {earnings} AITBC")
print(f" Jobs Completed: {jobs_completed}")
print(f" Average per Job: {earnings/jobs_completed if jobs_completed > 0 else 0} AITBC")
elif args.command == "address":
print(f"⛏️ Miner Address: {wallet.data['address']}")
print(" Share this address to receive payments")
elif args.command == "history":
print("⛏️ MINER TRANSACTION HISTORY")
print("=" * 40)
wallet.show_history()
elif args.command == "earn":
print(f"💰 Adding earnings for job {args.job}")
wallet.add_earnings(args.amount, args.job, args.desc)
elif args.command == "withdraw":
print(f"💸 Withdrawing {args.amount} AITBC to {args.address}")
wallet.spend(args.amount, f"Withdrawal to {args.address}")
elif args.command == "stats":
print("⛏️ MINING STATISTICS")
print("=" * 40)
transactions = wallet.data['transactions']
earnings = [t for t in transactions if t['type'] == 'earn']
spends = [t for t in transactions if t['type'] == 'spend']
total_earned = sum(t['amount'] for t in earnings)
total_spent = sum(t['amount'] for t in spends)
print(f"💰 Total Earned: {total_earned} AITBC")
print(f"💸 Total Spent: {total_spent} AITBC")
print(f"💳 Net Balance: {wallet.data['balance']} AITBC")
print(f"📊 Jobs Completed: {len(earnings)}")
if earnings:
print(f"\n📈 Recent Earnings:")
for earning in earnings[-5:]:
print(f" +{earning['amount']} AITBC | Job: {earning.get('job_id', 'N/A')}")
if __name__ == "__main__":
main()

View File

@@ -1,265 +0,0 @@
#!/usr/bin/env python3
"""
Python 3.13.5 Features Demonstration for AITBC
This script showcases the new features and improvements available in Python 3.13.5
that can benefit the AITBC project.
"""
import sys
import time
import asyncio
from typing import Generic, TypeVar, override, List, Optional
from pathlib import Path
print(f"🚀 Python 3.13.5 Features Demo - Running on Python {sys.version}")
print("=" * 60)
# ============================================================================
# 1. Enhanced Error Messages
# ============================================================================
def demonstrate_enhanced_errors():
"""Demonstrate improved error messages in Python 3.13"""
print("\n1. Enhanced Error Messages:")
print("-" * 30)
try:
# This will show a much clearer error message in Python 3.13
data = {"name": "AITBC", "version": "1.0"}
result = data["missing_key"]
except KeyError as e:
print(f"KeyError: {e}")
print("✅ Clearer error messages with exact location and suggestions")
# ============================================================================
# 2. Type Parameter Defaults
# ============================================================================
T = TypeVar('T')
class DataContainer(Generic[T]):
"""Generic container with type parameter defaults (Python 3.13+)"""
def __init__(self, items: List[T] | None = None) -> None:
# Type parameter defaults allow more flexible generic classes
self.items = items or []
def add_item(self, item: T) -> None:
self.items.append(item)
def get_items(self) -> List[T]:
return self.items.copy()
def demonstrate_type_defaults():
"""Demonstrate type parameter defaults"""
print("\n2. Type Parameter Defaults:")
print("-" * 30)
# Can now create containers without specifying type
container = DataContainer()
container.add_item("test_string")
container.add_item(42)
print("✅ Generic classes with default type parameters")
print(f" Items: {container.get_items()}")
# ============================================================================
# 3. @override Decorator
# ============================================================================
class BaseProcessor:
"""Base class for demonstrating @override decorator"""
def process(self, data: str) -> str:
return data.upper()
class AdvancedProcessor(BaseProcessor):
"""Advanced processor using @override decorator"""
@override
def process(self, data: str) -> str:
# Enhanced processing with validation
if not data:
raise ValueError("Data cannot be empty")
return data.lower().strip()
def demonstrate_override_decorator():
"""Demonstrate @override decorator for method overriding"""
print("\n3. @override Decorator:")
print("-" * 30)
processor = AdvancedProcessor()
result = processor.process(" HELLO AITBC ")
print("✅ Method overriding with @override decorator")
print(f" Result: '{result}'")
# ============================================================================
# 4. Performance Improvements
# ============================================================================
def demonstrate_performance():
"""Demonstrate Python 3.13 performance improvements"""
print("\n4. Performance Improvements:")
print("-" * 30)
# List comprehension performance
start_time = time.time()
result = [i * i for i in range(100000)]
list_time = (time.time() - start_time) * 1000
# Dictionary comprehension performance
start_time = time.time()
result_dict = {i: i * i for i in range(100000)}
dict_time = (time.time() - start_time) * 1000
print(f"✅ List comprehension (100k items): {list_time:.2f}ms")
print(f"✅ Dict comprehension (100k items): {dict_time:.2f}ms")
print("✅ 5-10% performance improvement over Python 3.11")
# ============================================================================
# 5. Asyncio Improvements
# ============================================================================
async def demonstrate_asyncio():
"""Demonstrate asyncio performance improvements"""
print("\n5. Asyncio Improvements:")
print("-" * 30)
async def fast_task():
await asyncio.sleep(0.001)
return "completed"
# Run multiple concurrent tasks
start_time = time.time()
tasks = [fast_task() for _ in range(100)]
results = await asyncio.gather(*tasks)
async_time = (time.time() - start_time) * 1000
print(f"✅ 100 concurrent async tasks: {async_time:.2f}ms")
print("✅ Enhanced asyncio performance and task scheduling")
# ============================================================================
# 6. Standard Library Improvements
# ============================================================================
def demonstrate_stdlib_improvements():
"""Demonstrate standard library improvements"""
print("\n6. Standard Library Improvements:")
print("-" * 30)
# Pathlib improvements
config_path = Path("/home/oib/windsurf/aitbc/config")
print(f"✅ Enhanced pathlib: {config_path}")
# HTTP server improvements
print("✅ Improved http.server with better error handling")
# JSON improvements
import json
data = {"status": "ok", "python": "3.13.5"}
json_str = json.dumps(data, indent=2)
print("✅ Enhanced JSON serialization with better formatting")
# ============================================================================
# 7. Security Improvements
# ============================================================================
def demonstrate_security():
"""Demonstrate security improvements"""
print("\n7. Security Improvements:")
print("-" * 30)
# Hash randomization
import hashlib
data = b"aitbc_security_test"
hash_result = hashlib.sha256(data).hexdigest()
print(f"✅ Enhanced hash randomization: {hash_result[:16]}...")
# Memory safety
try:
# Memory-safe operations
large_list = list(range(1000000))
print(f"✅ Better memory safety: Created list with {len(large_list)} items")
except MemoryError:
print("✅ Improved memory error handling")
# ============================================================================
# 8. AITBC-Specific Applications
# ============================================================================
class AITBCReceiptProcessor(Generic[T]):
"""Generic receipt processor using Python 3.13 features"""
def __init__(self, validator: Optional[callable] = None) -> None:
self.validator = validator or (lambda x: True)
self.receipts: List[T] = []
def add_receipt(self, receipt: T) -> bool:
"""Add receipt with validation"""
if self.validator(receipt):
self.receipts.append(receipt)
return True
return False
@override
def process_receipts(self) -> List[T]:
"""Process all receipts with enhanced validation"""
return [receipt for receipt in self.receipts if self.validator(receipt)]
def demonstrate_aitbc_applications():
"""Demonstrate Python 3.13 features in AITBC context"""
print("\n8. AITBC-Specific Applications:")
print("-" * 30)
# Generic receipt processor
def validate_receipt(receipt: dict) -> bool:
return receipt.get("valid", False)
processor = AITBCReceiptProcessor[dict](validate_receipt)
# Add sample receipts
processor.add_receipt({"id": 1, "valid": True, "amount": 100})
processor.add_receipt({"id": 2, "valid": False, "amount": 50})
processed = processor.process_receipts()
print(f"✅ Generic receipt processor: {len(processed)} valid receipts")
# Enhanced error handling for blockchain operations
try:
block_data = {"height": 1000, "hash": "0x123..."}
next_hash = block_data["next_hash"] # This will show enhanced error
except KeyError as e:
print(f"✅ Enhanced blockchain error handling: {e}")
# ============================================================================
# Main Execution
# ============================================================================
def main():
"""Run all demonstrations"""
try:
demonstrate_enhanced_errors()
demonstrate_type_defaults()
demonstrate_override_decorator()
demonstrate_performance()
# Run async demo
asyncio.run(demonstrate_asyncio())
demonstrate_stdlib_improvements()
demonstrate_security()
demonstrate_aitbc_applications()
print("\n" + "=" * 60)
print("🎉 Python 3.13.5 Features Demo Complete!")
print("🚀 AITBC is ready to leverage these improvements!")
except Exception as e:
print(f"❌ Demo failed: {e}")
import traceback
traceback.print_exc()
if __name__ == "__main__":
main()

View File

@@ -1,42 +0,0 @@
#!/usr/bin/env python3
"""
Quick job submission and payment
Usage: python3 quick_job.py "your prompt"
"""
import subprocess
import sys
import time
if len(sys.argv) < 2:
print("Usage: python3 quick_job.py \"your prompt\"")
sys.exit(1)
prompt = sys.argv[1]
print(f"🚀 Submitting job: '{prompt}'")
# Submit job
result = subprocess.run(
f'cd ../cli && python3 client.py submit inference --prompt "{prompt}"',
shell=True,
capture_output=True,
text=True
)
# Extract job ID
job_id = None
for line in result.stdout.split('\n'):
if "Job ID:" in line:
job_id = line.split()[-1]
break
if job_id:
print(f"✅ Job submitted: {job_id}")
print("\n💡 Next steps:")
print(f" 1. Start miner: python3 cli/miner.py mine")
print(f" 2. Check status: python3 cli/client.py status {job_id}")
print(f" 3. After completion, pay with:")
print(f" cd home/client && python3 wallet.py send 25.0 $(cd home/miner && python3 wallet.py address | grep Address | cut -d' ' -f4) 'Payment for {job_id}'")
else:
print("❌ Failed to submit job")

View File

@@ -1,104 +0,0 @@
#!/usr/bin/env python3
"""
Simple job flow: Client -> GPU Provider -> Payment
"""
import subprocess
import time
def main():
print("📋 AITBC Job Flow: Client -> GPU Provider -> Payment")
print("=" * 60)
print("\n📝 STEP 1: Client submits job 'hello'")
print("-" * 40)
# Submit job
result = subprocess.run(
'cd ../cli && python3 client.py demo',
shell=True,
capture_output=True,
text=True
)
print(result.stdout)
# Extract job ID
job_id = None
if "Job ID:" in result.stdout:
for line in result.stdout.split('\n'):
if "Job ID:" in line:
job_id = line.split()[-1]
break
if not job_id:
print("❌ Failed to submit job")
return
print(f"\n📮 Job submitted: {job_id}")
print("\n⛏️ STEP 2: GPU Provider processes job")
print("-" * 40)
print(" (Start miner with: python3 cli/miner.py mine)")
print(" The miner will automatically pick up the job")
# Simulate miner processing
print("\n 💭 Simulating job processing...")
time.sleep(2)
# Miner earns AITBC
print(" ✅ Job processed!")
print(" 💰 Miner earned 25 AITBC")
# Add to miner wallet
subprocess.run(
f'cd miner && python3 wallet.py earn 25.0 --job {job_id} --desc "Processed hello job"',
shell=True,
capture_output=True,
text=True
)
print("\n💸 STEP 3: Client pays for service")
print("-" * 40)
# Get miner address
miner_result = subprocess.run(
'cd miner && python3 wallet.py address',
shell=True,
capture_output=True,
text=True
)
miner_address = None
if "Miner Address:" in miner_result.stdout:
for line in miner_result.stdout.split('\n'):
if "Miner Address:" in line:
miner_address = line.split()[-1]
break
if miner_address:
# Client pays
subprocess.run(
f'cd client && python3 wallet.py send 25.0 {miner_address} "Payment for job {job_id}"',
shell=True,
capture_output=True,
text=True
)
print("\n📊 STEP 4: Final balances")
print("-" * 40)
print("\n Client Wallet:")
subprocess.run('cd client && python3 wallet.py balance', shell=True)
print("\n Miner Wallet:")
subprocess.run('cd miner && python3 wallet.py balance', shell=True)
print("\n✅ Complete workflow demonstrated!")
print("\n💡 To run with real GPU processing:")
print(" 1. Start miner: python3 cli/miner.py mine")
print(" 2. Submit job: python3 cli/client.py submit inference --prompt 'hello'")
print(" 3. Check status: python3 cli/client.py status <job_id>")
print(" 4. Pay manually: cd home/client && python3 wallet.py send <amount> <miner_address>")
if __name__ == "__main__":
main()

View File

@@ -1,136 +0,0 @@
#!/usr/bin/env python3
"""
Complete simulation: Client pays for GPU services, Miner earns AITBC
"""
import os
import sys
import time
import subprocess
def run_wallet_command(wallet_type, command, description):
"""Run a wallet command and display results"""
print(f"\n{'='*60}")
print(f"💼 {wallet_type}: {description}")
print(f"{'='*60}")
wallet_dir = os.path.join(os.path.dirname(__file__), wallet_type.lower())
cmd = f"cd {wallet_dir} && python3 wallet.py {command}"
result = subprocess.run(cmd, shell=True, capture_output=True, text=True)
print(result.stdout)
if result.stderr:
print(f"Error: {result.stderr}")
return result
def main():
print("🎭 AITBC Local Simulation")
print("=" * 60)
print("Simulating client and GPU provider interactions")
print()
# Step 1: Initialize wallets with genesis distribution
print("📋 STEP 1: Initialize Wallets")
os.system("cd /home/oib/windsurf/aitbc/home && python3 genesis.py")
input("\nPress Enter to continue...")
# Step 2: Check initial balances
print("\n📋 STEP 2: Check Initial Balances")
run_wallet_command("Client", "balance", "Initial client balance")
run_wallet_command("Miner", "balance", "Initial miner balance")
input("\nPress Enter to continue...")
# Step 3: Client submits a job (using CLI tool)
print("\n📋 STEP 3: Client Submits Job")
print("-" * 40)
# Submit job to coordinator
result = subprocess.run(
"cd /home/oib/windsurf/aitbc/cli && python3 client.py submit inference --model llama-2-7b --prompt 'What is the future of AI?'",
shell=True,
capture_output=True,
text=True
)
print(result.stdout)
# Extract job ID if successful
job_id = None
if "Job ID:" in result.stdout:
for line in result.stdout.split('\n'):
if "Job ID:" in line:
job_id = line.split()[-1]
break
input("\nPress Enter to continue...")
# Step 4: Miner processes the job
print("\n📋 STEP 4: Miner Processes Job")
print("-" * 40)
if job_id:
print(f"⛏️ Miner found job: {job_id}")
print("⚙️ Processing job...")
time.sleep(2)
# Miner earns AITBC for completing the job
run_wallet_command(
"Miner",
f"earn 50.0 --job {job_id} --desc 'Inference task completed'",
"Miner earns AITBC"
)
input("\nPress Enter to continue...")
# Step 5: Client pays for the service
print("\n📋 STEP 5: Client Pays for Service")
print("-" * 40)
if job_id:
# Get miner address
miner_result = subprocess.run(
"cd /home/oib/windsurf/aitbc/home/miner && python3 wallet.py address",
shell=True,
capture_output=True,
text=True
)
miner_address = None
if "Miner Address:" in miner_result.stdout:
for line in miner_result.stdout.split('\n'):
if "Miner Address:" in line:
miner_address = line.split()[-1]
break
if miner_address:
run_wallet_command(
"Client",
f"send 50.0 {miner_address} 'Payment for inference job {job_id}'",
"Client pays for completed job"
)
input("\nPress Enter to continue...")
# Step 6: Check final balances
print("\n📋 STEP 6: Final Balances")
run_wallet_command("Client", "balance", "Final client balance")
run_wallet_command("Miner", "balance", "Final miner balance")
print("\n🎉 Simulation Complete!")
print("=" * 60)
print("Summary:")
print(" • Client submitted job and paid 50 AITBC")
print(" • Miner processed job and earned 50 AITBC")
print(" • Transaction recorded on blockchain")
print()
print("💡 You can:")
print(" • Run 'cd home/client && python3 wallet.py history' to see client transactions")
print(" • Run 'cd home/miner && python3 wallet.py stats' to see miner earnings")
print(" • Submit more jobs with the CLI tools")
if __name__ == "__main__":
main()

View File

@@ -1,158 +0,0 @@
#!/usr/bin/env python3
"""
AITBC Wallet CLI Tool - Track earnings and manage wallet
"""
import argparse
import json
import os
from datetime import datetime
from typing import Dict, List
class AITBCWallet:
def __init__(self, wallet_file: str = None):
if wallet_file is None:
wallet_file = os.path.expanduser("~/.aitbc_wallet.json")
self.wallet_file = wallet_file
self.data = self._load_wallet()
def _load_wallet(self) -> dict:
"""Load wallet data from file"""
if os.path.exists(self.wallet_file):
try:
with open(self.wallet_file, 'r') as f:
return json.load(f)
except:
pass
# Create new wallet
return {
"address": "aitbc1" + os.urandom(10).hex(),
"balance": 0.0,
"transactions": [],
"created_at": datetime.now().isoformat()
}
def save(self):
"""Save wallet to file"""
with open(self.wallet_file, 'w') as f:
json.dump(self.data, f, indent=2)
def add_earnings(self, amount: float, job_id: str, description: str = ""):
"""Add earnings from completed job"""
transaction = {
"type": "earn",
"amount": amount,
"job_id": job_id,
"description": description or f"Job {job_id}",
"timestamp": datetime.now().isoformat()
}
self.data["transactions"].append(transaction)
self.data["balance"] += amount
self.save()
print(f"💰 Added {amount} AITBC to wallet")
print(f" New balance: {self.data['balance']} AITBC")
def spend(self, amount: float, description: str):
"""Spend AITBC"""
if self.data["balance"] < amount:
print(f"❌ Insufficient balance!")
print(f" Balance: {self.data['balance']} AITBC")
print(f" Needed: {amount} AITBC")
return False
transaction = {
"type": "spend",
"amount": -amount,
"description": description,
"timestamp": datetime.now().isoformat()
}
self.data["transactions"].append(transaction)
self.data["balance"] -= amount
self.save()
print(f"💸 Spent {amount} AITBC")
print(f" Remaining: {self.data['balance']} AITBC")
return True
def show_balance(self):
"""Show wallet balance"""
print(f"💳 Wallet Address: {self.data['address']}")
print(f"💰 Balance: {self.data['balance']} AITBC")
print(f"📊 Total Transactions: {len(self.data['transactions'])}")
def show_history(self, limit: int = 10):
"""Show transaction history"""
transactions = self.data["transactions"][-limit:]
if not transactions:
print("📭 No transactions yet")
return
print(f"📜 Recent Transactions (last {limit}):")
print("-" * 60)
for tx in reversed(transactions):
symbol = "💰" if tx["type"] == "earn" else "💸"
print(f"{symbol} {tx['amount']:+8.2f} AITBC | {tx.get('description', 'N/A')}")
print(f" 📅 {tx['timestamp']}")
if "job_id" in tx:
print(f" 🆔 Job: {tx['job_id']}")
print()
def main():
parser = argparse.ArgumentParser(description="AITBC Wallet CLI")
parser.add_argument("--wallet", help="Wallet file path")
subparsers = parser.add_subparsers(dest="command", help="Commands")
# Balance command
balance_parser = subparsers.add_parser("balance", help="Show balance")
# History command
history_parser = subparsers.add_parser("history", help="Show transaction history")
history_parser.add_argument("--limit", type=int, default=10, help="Number of transactions")
# Earn command
earn_parser = subparsers.add_parser("earn", help="Add earnings")
earn_parser.add_argument("amount", type=float, help="Amount earned")
earn_parser.add_argument("--job", help="Job ID")
earn_parser.add_argument("--desc", help="Description")
# Spend command
spend_parser = subparsers.add_parser("spend", help="Spend AITBC")
spend_parser.add_argument("amount", type=float, help="Amount to spend")
spend_parser.add_argument("description", help="What you're spending on")
# Address command
address_parser = subparsers.add_parser("address", help="Show wallet address")
args = parser.parse_args()
if not args.command:
parser.print_help()
return
wallet = AITBCWallet(args.wallet)
if args.command == "balance":
wallet.show_balance()
elif args.command == "history":
wallet.show_history(args.limit)
elif args.command == "earn":
wallet.add_earnings(args.amount, args.job or "unknown", args.desc or "")
elif args.command == "spend":
wallet.spend(args.amount, args.description)
elif args.command == "address":
print(f"💳 Wallet Address: {wallet.data['address']}")
if __name__ == "__main__":
main()

View File

@@ -1,418 +0,0 @@
#!/usr/bin/env python3
"""
Focused Dotenv Linter for AITBC
This script specifically checks for environment variable usage patterns that
actually require .env.example documentation, filtering out script variables and
other non-environment variable patterns.
Usage:
python scripts/focused_dotenv_linter.py
python scripts/focused_dotenv_linter.py --fix
python scripts/focused_dotenv_linter.py --verbose
"""
import os
import re
import sys
import argparse
from pathlib import Path
from typing import Set, Dict, List, Tuple
import ast
class FocusedDotenvLinter:
"""Focused linter for actual environment variable usage."""
def __init__(self, project_root: Path = None):
"""Initialize the linter."""
self.project_root = project_root or Path(__file__).parent.parent
self.env_example_path = self.project_root / ".env.example"
self.python_files = self._find_python_files()
# Common script/internal variables to ignore
self.script_vars = {
'PID', 'PIDS', 'PID_FILE', 'CHILD_PIDS', 'API_PID', 'COORD_PID', 'MARKET_PID',
'EXCHANGE_PID', 'NODE_PID', 'API_STATUS', 'FRONTEND_STATUS', 'CONTRACTS_STATUS',
'NODE1_HEIGHT', 'NODE2_HEIGHT', 'NODE3_HEIGHT', 'NEW_NODE1_HEIGHT',
'NEW_NODE2_HEIGHT', 'NEW_NODE3_HEIGHT', 'NODE3_STATUS', 'NODE3_NEW_STATUS',
'OLD_DIFF', 'NEW_DIFF', 'DIFF12', 'DIFF23', 'NEW_DIFF', 'DIFF',
'COVERAGE', 'MYTHRIL_REPORT', 'MYTHRIL_TEXT', 'SLITHER_REPORT', 'SLITHER_TEXT',
'GITHUB_OUTPUT', 'GITHUB_PATH', 'GITHUB_STEP_SUMMARY', 'PYTEST_CURRENT_TEST',
'NC', 'REPLY', 'RUNNER', 'TIMESTAMP', 'DATE', 'VERSION', 'SCRIPT_VERSION',
'VERBOSE', 'DEBUG', 'DRY_RUN', 'AUTO_MODE', 'DEV_MODE', 'TEST_MODE',
'PRODUCTION_MODE', 'ENVIRONMENT', 'APP_ENV', 'NODE_ENV', 'LIVE_SERVER',
'LOCAL_MODEL_PATH', 'FASTTEXT_MODEL_PATH', 'BUILD_DIR', 'OUTPUT_DIR',
'TEMP_DIR', 'TEMP_DEPLOY_DIR', 'BACKUP_DIR', 'BACKUP_FILE', 'BACKUP_NAME',
'LOG_DIR', 'MONITORING_DIR', 'REPORT_DIR', 'DOCS_DIR', 'SCRIPTS_DIR',
'SCRIPT_DIR', 'CONFIG_DIR', 'CONFIGS_DIR', 'CONFIGS', 'PACKAGES_DIR',
'SERVICES_DIR', 'CONTRACTS_DIR', 'INFRA_DIR', 'FRONTEND_DIR', 'EXCHANGE_DIR',
'EXPLORER_DIR', 'ROOT_DIR', 'PROJECT_ROOT', 'PROJECT_DIR', 'SOURCE_DIR',
'VENV_DIR', 'INSTALL_DIR', 'DEBIAN_DIR', 'DEB_OUTPUT_DIR', 'DIST_DIR',
'LEGACY_DIR', 'MIGRATION_EXAMPLES_DIR', 'GPU_ACCEL_DIR', 'ZK_DIR',
'WHEEL_FILE', 'PACKAGE_FILE', 'PACKAGE_NAME', 'PACKAGE_VERSION', 'PACKAGE_PATH',
'PACKAGE_SIZE', 'PKG_NAME', 'PKG_VERSION', 'PKG_PATH', 'PKG_IDENTIFIER',
'PKG_INSTALL_LOCATION', 'PKG_MANAGER', 'PKG_PATHS', 'CUSTOM_PACKAGES',
'SELECTED_PACKAGES', 'COMPONENTS', 'PHASES', 'REQUIRED_VERSION',
'SCRIPTS', 'SERVICES', 'SERVERS', 'CONTAINER', 'CONTAINER_NAME', 'CONTAINER_IP',
'DOMAIN', 'PORT', 'HOST', 'SERVER', 'SERVICE_NAME', 'NAMESPACE',
'CLIENT_ID', 'CLIENT_REGION', 'CLIENT_KEY', 'CLIENT_WALLET', 'MINER_ID',
'MINER_REGION', 'MINER_KEY', 'MINER_WALLET', 'AGENT_TYPE', 'CATEGORY',
'NETWORK', 'CHAIN', 'CHAINS', 'CHAIN_ID', 'SUPPORTED_CHAINS',
'NODE1', 'NODE2', 'NODE3', 'NODE_MAP', 'NODE1_CONFIG', 'NODE1_DIR',
'NODE2_DIR', 'NODE3_DIR', 'NODE_ENV', 'PLATFORM', 'ARCH', 'ARCH_NAME',
'CHIP_FAMILY', 'PYTHON_VERSION', 'BASH_VERSION', 'ZSH_VERSION',
'DEBIAN_VERSION', 'SHELL_PROFILE', 'SHELL_RC', 'POWERSHELL_PROFILE',
'SYSTEMD_PATH', 'WSL_SCRIPT_DIR', 'SSH_KEY', 'SSH_USER', 'SSL_CERT_PATH',
'SSL_KEY_PATH', 'SSL_ENABLED', 'NGINX_CONFIG', 'WEB_ROOT', 'WEBHOOK_SECRET',
'WORKERS', 'AUTO_SCALING', 'MAX_INSTANCES', 'MIN_INSTANCES', 'EMERGENCY_ONLY',
'SKIP_BUILD', 'SKIP_TESTS', 'SKIP_SECURITY', 'SKIP_MONITORING', 'SKIP_VERIFICATION',
'SKIP_FRONTEND', 'RESET', 'UPDATE', 'UPDATE_ALL', 'UPDATE_CLI', 'UPDATE_SERVICES',
'INSTALL_CLI', 'INSTALL_SERVICES', 'UNINSTALL', 'UNINSTALL_CLI_ONLY',
'UNINSTALL_SERVICES_ONLY', 'DEPLOY_CONTRACTS', 'DEPLOY_FRONTEND', 'DEPLOY_SERVICES',
'BACKUP_BEFORE_DEPLOY', 'DEPLOY_PATH', 'COMPLETE_INSTALL', 'DIAGNOSE',
'HEALTH_CHECK', 'HEALTH_URL', 'RUN_MYTHRIL', 'RUN_SLITHER', 'TEST_CONTRACTS',
'VERIFY_CONTRACTS', 'SEND_AMOUNT', 'RETURN_ADDRESS', 'TXID', 'BALANCE',
'MINT_PER_UNIT', 'MIN_CONFIRMATIONS', 'PRODUCTION_GAS_LIMIT', 'PRODUCTION_GAS_PRICE',
'PRIVATE_KEY', 'PRODUCTION_PRIVATE_KEY', 'PROPOSER_KEY', 'ENCRYPTION_KEY',
'BITCOIN_ADDRESS', 'BITCOIN_PRIVATE_KEY', 'BITCOIN_TESTNET', 'BTC_TO_AITBC_RATE',
'VITE_APP_NAME', 'VITE_APP_VERSION', 'VITE_APP_DESCRIPTION', 'VITE_NETWORK_NAME',
'VITE_CHAIN_ID', 'VITE_RPC_URL', 'VITE_WS_URL', 'VITE_API_BASE_URL',
'VITE_ENABLE_ANALYTICS', 'VITE_ENABLE_ERROR_REPORTING', 'VITE_SENTRY_DSN',
'VITE_AGENT_BOUNTY_ADDRESS', 'VITE_AGENT_STAKING_ADDRESS', 'VITE_AITBC_TOKEN_ADDRESS',
'VITE_DISPUTE_RESOLUTION_ADDRESS', 'VITE_PERFORMANCE_VERIFIER_ADDRESS',
'VITE_ESCROW_SERVICE_ADDRESS', 'COMPREHENSIVE', 'HIGH', 'MEDIUM', 'LOW',
'RED', 'GREEN', 'YELLOW', 'BLUE', 'MAGENTA', 'CYAN', 'PURPLE', 'WHITE',
'NC', 'EDITOR', 'PAGER', 'LANG', 'LC_ALL', 'TERM', 'SHELL', 'USER', 'HOME',
'PATH', 'PWD', 'OLDPWD', 'SHLVL', '_', 'HOSTNAME', 'HOSTTYPE', 'OSTYPE',
'MACHTYPE', 'UID', 'GID', 'EUID', 'EGID', 'PS1', 'PS2', 'IFS', 'DISPLAY',
'XAUTHORITY', 'DBUS_SESSION_BUS_ADDRESS', 'SSH_AUTH_SOCK', 'SSH_CONNECTION',
'SSH_CLIENT', 'SSH_TTY', 'LOGNAME', 'USERNAME', 'CURRENT_USER'
}
def _find_python_files(self) -> List[Path]:
"""Find all Python files in the project."""
python_files = []
for root, dirs, files in os.walk(self.project_root):
# Skip hidden directories and common exclusions
dirs[:] = [d for d in dirs if not d.startswith('.') and d not in {
'__pycache__', 'node_modules', '.git', 'venv', 'env', '.venv'
}]
for file in files:
if file.endswith('.py'):
python_files.append(Path(root) / file)
return python_files
def _parse_env_example(self) -> Set[str]:
"""Parse .env.example and extract all environment variable keys."""
env_vars = set()
if not self.env_example_path.exists():
print(f"❌ .env.example not found at {self.env_example_path}")
return env_vars
with open(self.env_example_path, 'r') as f:
for line_num, line in enumerate(f, 1):
line = line.strip()
# Skip comments and empty lines
if not line or line.startswith('#'):
continue
# Extract variable name (everything before =)
if '=' in line:
var_name = line.split('=')[0].strip()
if var_name:
env_vars.add(var_name)
return env_vars
def _find_env_usage_in_python(self) -> Set[str]:
"""Find actual environment variable usage in Python files."""
env_vars = set()
# More specific patterns for actual environment variables
patterns = [
r'os\.environ\.get\([\'"]([A-Z_][A-Z0-9_]*)[\'"]',
r'os\.environ\[([\'"]([A-Z_][A-Z0-9_]*)[\'"])\]',
r'os\.getenv\([\'"]([A-Z_][A-Z0-9_]*)[\'"]',
r'getenv\([\'"]([A-Z_][A-Z0-9_]*)[\'"]',
r'environ\.get\([\'"]([A-Z_][A-Z0-9_]*)[\'"]',
r'environ\[([\'"]([A-Z_][A-Z0-9_]*)[\'"])\]',
]
for python_file in self.python_files:
try:
with open(python_file, 'r', encoding='utf-8') as f:
content = f.read()
for pattern in patterns:
matches = re.finditer(pattern, content)
for match in matches:
var_name = match.group(1)
# Only include if it looks like a real environment variable
if var_name.isupper() and len(var_name) > 1:
env_vars.add(var_name)
except (UnicodeDecodeError, PermissionError) as e:
print(f"⚠️ Could not read {python_file}: {e}")
return env_vars
def _find_env_usage_in_config_files(self) -> Set[str]:
"""Find environment variable usage in configuration files."""
env_vars = set()
# Check common config files
config_files = [
'pyproject.toml',
'pytest.ini',
'setup.cfg',
'tox.ini',
'.github/workflows/*.yml',
'.github/workflows/*.yaml',
'docker-compose.yml',
'docker-compose.yaml',
'Dockerfile',
]
for pattern in config_files:
for config_file in self.project_root.glob(pattern):
try:
with open(config_file, 'r', encoding='utf-8') as f:
content = f.read()
# Look for environment variable patterns in config files
env_patterns = [
r'\${([A-Z_][A-Z0-9_]*)}', # ${VAR_NAME}
r'\$([A-Z_][A-Z0-9_]*)', # $VAR_NAME
r'env\.([A-Z_][A-Z0-9_]*)', # env.VAR_NAME
r'os\.environ\([\'"]([A-Z_][A-Z0-9_]*)[\'"]', # os.environ("VAR_NAME")
r'getenv\([\'"]([A-Z_][A-Z0-9_]*)[\'"]', # getenv("VAR_NAME")
]
for env_pattern in env_patterns:
matches = re.finditer(env_pattern, content)
for match in matches:
var_name = match.group(1)
if var_name.isupper() and len(var_name) > 1:
env_vars.add(var_name)
except (UnicodeDecodeError, PermissionError) as e:
print(f"⚠️ Could not read {config_file}: {e}")
return env_vars
def _find_env_usage_in_shell_scripts(self) -> Set[str]:
"""Find environment variable usage in shell scripts."""
env_vars = set()
shell_files = []
for root, dirs, files in os.walk(self.project_root):
dirs[:] = [d for d in dirs if not d.startswith('.') and d not in {
'__pycache__', 'node_modules', '.git', 'venv', 'env', '.venv'
}]
for file in files:
if file.endswith(('.sh', '.bash', '.zsh')):
shell_files.append(Path(root) / file)
for shell_file in shell_files:
try:
with open(shell_file, 'r', encoding='utf-8') as f:
content = f.read()
# Look for environment variable patterns in shell scripts
patterns = [
r'\$\{([A-Z_][A-Z0-9_]*)\}', # ${VAR_NAME}
r'\$([A-Z_][A-Z0-9_]*)', # $VAR_NAME
r'export\s+([A-Z_][A-Z0-9_]*)=', # export VAR_NAME=
r'([A-Z_][A-Z0-9_]*)=', # VAR_NAME=
]
for pattern in patterns:
matches = re.finditer(pattern, content)
for match in matches:
var_name = match.group(1)
if var_name.isupper() and len(var_name) > 1:
env_vars.add(var_name)
except (UnicodeDecodeError, PermissionError) as e:
print(f"⚠️ Could not read {shell_file}: {e}")
return env_vars
def _find_all_env_usage(self) -> Set[str]:
"""Find all environment variable usage across the project."""
all_vars = set()
# Python files
python_vars = self._find_env_usage_in_python()
all_vars.update(python_vars)
# Config files
config_vars = self._find_env_usage_in_config_files()
all_vars.update(config_vars)
# Shell scripts
shell_vars = self._find_env_usage_in_shell_scripts()
all_vars.update(shell_vars)
# Filter out script variables and system variables
filtered_vars = all_vars - self.script_vars
# Additional filtering for common non-config variables
non_config_vars = {
'HTTP_PROXY', 'HTTPS_PROXY', 'NO_PROXY', 'http_proxy', 'https_proxy',
'PYTHONPATH', 'PYTHONHOME', 'VIRTUAL_ENV', 'CONDA_DEFAULT_ENV',
'GITHUB_ACTIONS', 'CI', 'TRAVIS', 'APPVEYOR', 'CIRCLECI',
'LD_LIBRARY_PATH', 'DYLD_LIBRARY_PATH', 'CLASSPATH',
'JAVA_HOME', 'NODE_PATH', 'GOPATH', 'RUST_HOME',
'XDG_CONFIG_HOME', 'XDG_DATA_HOME', 'XDG_CACHE_HOME',
'TERM', 'COLUMNS', 'LINES', 'PS1', 'PS2', 'PROMPT_COMMAND'
}
return filtered_vars - non_config_vars
def _check_missing_in_example(self, used_vars: Set[str], example_vars: Set[str]) -> Set[str]:
"""Find variables used in code but missing from .env.example."""
missing = used_vars - example_vars
return missing
def _check_unused_in_example(self, used_vars: Set[str], example_vars: Set[str]) -> Set[str]:
"""Find variables in .env.example but not used in code."""
unused = example_vars - used_vars
# Filter out variables that might be used by external tools or services
external_vars = {
'NODE_ENV', 'NPM_CONFIG_PREFIX', 'NPM_AUTH_TOKEN',
'DOCKER_HOST', 'DOCKER_TLS_VERIFY', 'DOCKER_CERT_PATH',
'KUBERNETES_SERVICE_HOST', 'KUBERNETES_SERVICE_PORT',
'REDIS_URL', 'MEMCACHED_URL', 'ELASTICSEARCH_URL',
'SENTRY_DSN', 'ROLLBAR_ACCESS_TOKEN', 'HONEYBADGER_API_KEY'
}
return unused - external_vars
def lint(self, verbose: bool = False) -> Tuple[int, int, int, Set[str], Set[str]]:
"""Run the linter and return results."""
print("🔍 Focused Dotenv Linter for AITBC")
print("=" * 50)
# Parse .env.example
example_vars = self._parse_env_example()
if verbose:
print(f"📄 Found {len(example_vars)} variables in .env.example")
if example_vars:
print(f" {', '.join(sorted(example_vars))}")
# Find all environment variable usage
used_vars = self._find_all_env_usage()
if verbose:
print(f"🔍 Found {len(used_vars)} actual environment variables used in code")
if used_vars:
print(f" {', '.join(sorted(used_vars))}")
# Check for missing variables
missing_vars = self._check_missing_in_example(used_vars, example_vars)
# Check for unused variables
unused_vars = self._check_unused_in_example(used_vars, example_vars)
return len(example_vars), len(used_vars), len(missing_vars), missing_vars, unused_vars
def fix_env_example(self, missing_vars: Set[str], verbose: bool = False):
"""Add missing variables to .env.example."""
if not missing_vars:
if verbose:
print("✅ No missing variables to add")
return
print(f"🔧 Adding {len(missing_vars)} missing variables to .env.example")
with open(self.env_example_path, 'a') as f:
f.write("\n# Auto-generated variables (added by focused_dotenv_linter)\n")
for var in sorted(missing_vars):
f.write(f"{var}=\n")
print(f"✅ Added {len(missing_vars)} variables to .env.example")
def generate_report(self, example_count: int, used_count: int, missing_count: int,
missing_vars: Set[str], unused_vars: Set[str]) -> str:
"""Generate a detailed report."""
report = []
report.append("📊 Focused Dotenv Linter Report")
report.append("=" * 50)
report.append(f"Variables in .env.example: {example_count}")
report.append(f"Actual environment variables used: {used_count}")
report.append(f"Missing from .env.example: {missing_count}")
report.append(f"Unused in .env.example: {len(unused_vars)}")
report.append("")
if missing_vars:
report.append("❌ Missing Variables (used in code but not in .env.example):")
for var in sorted(missing_vars):
report.append(f" - {var}")
report.append("")
if unused_vars:
report.append("⚠️ Unused Variables (in .env.example but not used in code):")
for var in sorted(unused_vars):
report.append(f" - {var}")
report.append("")
if not missing_vars and not unused_vars:
report.append("✅ No configuration drift detected!")
return "\n".join(report)
def main():
"""Main entry point."""
parser = argparse.ArgumentParser(
description="Focused Dotenv Linter for AITBC - Check for actual configuration drift",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
python scripts/focused_dotenv_linter.py # Check for drift
python scripts/focused_dotenv_linter.py --verbose # Verbose output
python scripts/focused_dotenv_linter.py --fix # Auto-fix missing variables
python scripts/focused_dotenv_linter.py --check # Exit with error code on issues
"""
)
parser.add_argument("--verbose", "-v", action="store_true", help="Verbose output")
parser.add_argument("--fix", action="store_true", help="Auto-fix missing variables in .env.example")
parser.add_argument("--check", action="store_true", help="Exit with error code if issues found")
args = parser.parse_args()
# Initialize linter
linter = FocusedDotenvLinter()
# Run linting
example_count, used_count, missing_count, missing_vars, unused_vars = linter.lint(args.verbose)
# Generate report
report = linter.generate_report(example_count, used_count, missing_count, missing_vars, unused_vars)
print(report)
# Auto-fix if requested
if args.fix and missing_vars:
linter.fix_env_example(missing_vars, args.verbose)
# Exit with error code if check requested and issues found
if args.check and (missing_vars or unused_vars):
print(f"❌ Configuration drift detected: {missing_count} missing, {len(unused_vars)} unused")
sys.exit(1)
# Success
print("✅ Focused dotenv linter completed successfully")
return 0
if __name__ == "__main__":
sys.exit(main())

View File

@@ -1,151 +0,0 @@
#!/usr/bin/env python3
"""
Geographic Load Balancer for AITBC Marketplace
"""
import asyncio
import aiohttp
from aiohttp import web
import json
from datetime import datetime
import os
# Regional endpoints configuration
regions = {
'us-east': {'url': 'http://127.0.0.1:18000', 'weight': 3, 'healthy': True, 'edge_node': 'aitbc-edge-primary'},
'us-west': {'url': 'http://127.0.0.1:18001', 'weight': 2, 'healthy': True, 'edge_node': 'aitbc1-edge-secondary'},
'eu-central': {'url': 'http://127.0.0.1:8006', 'weight': 2, 'healthy': True, 'edge_node': 'localhost'},
'eu-west': {'url': 'http://127.0.0.1:18000', 'weight': 1, 'healthy': True, 'edge_node': 'aitbc-edge-primary'},
'ap-southeast': {'url': 'http://127.0.0.1:18001', 'weight': 2, 'healthy': True, 'edge_node': 'aitbc1-edge-secondary'},
'ap-northeast': {'url': 'http://127.0.0.1:8006', 'weight': 1, 'healthy': True, 'edge_node': 'localhost'}
}
class GeoLoadBalancer:
def __init__(self):
self.current_region = 0
self.health_check_interval = 30
async def health_check(self, region_config):
try:
async with aiohttp.ClientSession() as session:
async with session.get(f"{region_config['url']}/health/live", timeout=5) as response:
region_config['healthy'] = response.status == 200
region_config['last_check'] = datetime.now().isoformat()
except Exception as e:
region_config['healthy'] = False
region_config['last_check'] = datetime.now().isoformat()
region_config['error'] = str(e)
async def get_healthy_region(self):
healthy_regions = [(name, config) for name, config in regions.items() if config['healthy']]
if not healthy_regions:
return None, None
# Simple weighted round-robin
total_weight = sum(config['weight'] for _, config in healthy_regions)
if total_weight == 0:
return healthy_regions[0]
import random
rand = random.randint(1, total_weight)
current_weight = 0
for name, config in healthy_regions:
current_weight += config['weight']
if rand <= current_weight:
return name, config
return healthy_regions[0]
async def proxy_request(self, request):
region_name, region_config = await self.get_healthy_region()
if not region_config:
return web.json_response({'error': 'No healthy regions available'}, status=503)
try:
# Forward request to selected region
target_url = f"{region_config['url']}{request.path_qs}"
async with aiohttp.ClientSession() as session:
# Prepare headers (remove host header)
headers = dict(request.headers)
headers.pop('Host', None)
async with session.request(
method=request.method,
url=target_url,
headers=headers,
data=await request.read()
) as response:
# Read response
body = await response.read()
# Create response
resp = web.Response(
body=body,
status=response.status,
headers=dict(response.headers)
)
# Add routing headers
resp.headers['X-Region'] = region_name
resp.headers['X-Backend-Url'] = region_config['url']
return resp
except Exception as e:
return web.json_response({
'error': 'Proxy error',
'message': str(e),
'region': region_name
}, status=502)
async def handle_all_requests(request):
balancer = request.app['balancer']
return await balancer.proxy_request(request)
async def health_check_handler(request):
balancer = request.app['balancer']
# Perform health checks on all regions
tasks = [balancer.health_check(config) for config in regions.values()]
await asyncio.gather(*tasks)
return web.json_response({
'status': 'healthy',
'load_balancer': 'geographic',
'regions': regions,
'timestamp': datetime.now().isoformat()
})
async def status_handler(request):
balancer = request.app['balancer']
healthy_count = sum(1 for config in regions.values() if config['healthy'])
return web.json_response({
'total_regions': len(regions),
'healthy_regions': healthy_count,
'health_ratio': healthy_count / len(regions),
'current_time': datetime.now().isoformat(),
'regions': {name: {
'healthy': config['healthy'],
'weight': config['weight'],
'last_check': config.get('last_check')
} for name, config in regions.items()}
})
async def create_app():
app = web.Application()
balancer = GeoLoadBalancer()
app['balancer'] = balancer
# Add routes
app.router.add_route('*', '/{path:.*}', handle_all_requests)
app.router.add_get('/health', health_check_handler)
app.router.add_get('/status', status_handler)
return app
if __name__ == '__main__':
app = asyncio.run(create_app())
web.run_app(app, host='127.0.0.1', port=8080)

View File

@@ -1,52 +0,0 @@
#!/bin/bash
# AITBC GPU Miner Startup Script
# Copy to start_gpu_miner.sh and adjust variables for your environment
set -e
# === CONFIGURE THESE ===
COORDINATOR_URL="http://YOUR_COORDINATOR_IP:18000"
MINER_API_KEY="your_miner_api_key"
OLLAMA_HOST="http://127.0.0.1:11434"
GPU_ID="gpu-0"
echo "🔧 Starting AITBC GPU Miner"
echo "Coordinator: $COORDINATOR_URL"
echo "Ollama: $OLLAMA_HOST"
echo ""
# Check Ollama is running
if ! curl -s "$OLLAMA_HOST/api/tags" > /dev/null 2>&1; then
echo "❌ Ollama not running at $OLLAMA_HOST"
echo "Start it with: ollama serve"
exit 1
fi
echo "✅ Ollama is running"
# Check GPU
if command -v nvidia-smi &> /dev/null; then
echo "GPU detected:"
nvidia-smi --query-gpu=name,memory.total --format=csv,noheader
else
echo "⚠️ No NVIDIA GPU detected (CPU-only mode)"
fi
# Register miner
echo ""
echo "Registering miner with coordinator..."
curl -s -X POST "$COORDINATOR_URL/v1/miners/register" \
-H "X-Api-Key: $MINER_API_KEY" \
-H "Content-Type: application/json" \
-d "{\"gpu_id\": \"$GPU_ID\", \"ollama_url\": \"$OLLAMA_HOST\"}"
echo ""
echo "✅ Miner registered. Starting heartbeat loop..."
# Heartbeat + job polling loop
while true; do
curl -s -X POST "$COORDINATOR_URL/v1/miners/heartbeat" \
-H "X-Api-Key: $MINER_API_KEY" > /dev/null 2>&1
sleep 10
done

View File

@@ -1,187 +0,0 @@
#!/usr/bin/env node
const fs = require('fs');
const path = require('path');
console.log("=== AITBC Smart Contract Integration Test ===");
// Test scenarios
const testScenarios = [
{
name: "Contract Deployment Test",
description: "Verify all contracts can be deployed and initialized",
status: "PENDING",
result: null
},
{
name: "Cross-Contract Integration Test",
description: "Test interactions between contracts",
status: "PENDING",
result: null
},
{
name: "Security Features Test",
description: "Verify security controls are working",
status: "PENDING",
result: null
},
{
name: "Gas Optimization Test",
description: "Verify gas usage is optimized",
status: "PENDING",
result: null
},
{
name: "Event Emission Test",
description: "Verify events are properly emitted",
status: "PENDING",
result: null
},
{
name: "Error Handling Test",
description: "Verify error conditions are handled",
status: "PENDING",
result: null
}
];
// Mock test execution
function runTests() {
console.log("\n🧪 Running integration tests...\n");
testScenarios.forEach((test, index) => {
console.log(`Running test ${index + 1}/${testScenarios.length}: ${test.name}`);
// Simulate test execution
setTimeout(() => {
const success = Math.random() > 0.1; // 90% success rate
test.status = success ? "PASSED" : "FAILED";
test.result = success ? "All checks passed" : "Test failed - check logs";
console.log(`${success ? '✅' : '❌'} ${test.name}: ${test.status}`);
if (index === testScenarios.length - 1) {
printResults();
}
}, 1000 * (index + 1));
});
}
function printResults() {
console.log("\n📊 Test Results Summary:");
const passed = testScenarios.filter(t => t.status === "PASSED").length;
const failed = testScenarios.filter(t => t.status === "FAILED").length;
const total = testScenarios.length;
console.log(`Total tests: ${total}`);
console.log(`Passed: ${passed}`);
console.log(`Failed: ${failed}`);
console.log(`Success rate: ${((passed / total) * 100).toFixed(1)}%`);
console.log("\n📋 Detailed Results:");
testScenarios.forEach(test => {
console.log(`\n${test.status === 'PASSED' ? '✅' : '❌'} ${test.name}`);
console.log(` Description: ${test.description}`);
console.log(` Status: ${test.status}`);
console.log(` Result: ${test.result}`);
});
// Integration validation
console.log("\n🔗 Integration Validation:");
// Check contract interfaces
const contracts = [
'AIPowerRental.sol',
'AITBCPaymentProcessor.sol',
'PerformanceVerifier.sol',
'DisputeResolution.sol',
'EscrowService.sol',
'DynamicPricing.sol'
];
contracts.forEach(contract => {
const contractPath = `contracts/${contract}`;
if (fs.existsSync(contractPath)) {
const content = fs.readFileSync(contractPath, 'utf8');
const functions = (content.match(/function\s+\w+/g) || []).length;
const events = (content.match(/event\s+\w+/g) || []).length;
const modifiers = (content.match(/modifier\s+\w+/g) || []).length;
console.log(`${contract}: ${functions} functions, ${events} events, ${modifiers} modifiers`);
} else {
console.log(`${contract}: File not found`);
}
});
// Security validation
console.log("\n🔒 Security Validation:");
const securityFeatures = [
'ReentrancyGuard',
'Pausable',
'Ownable',
'require(',
'revert(',
'onlyOwner'
];
contracts.forEach(contract => {
const contractPath = `contracts/${contract}`;
if (fs.existsSync(contractPath)) {
const content = fs.readFileSync(contractPath, 'utf8');
const foundFeatures = securityFeatures.filter(feature => content.includes(feature));
console.log(`${contract}: ${foundFeatures.length}/${securityFeatures.length} security features`);
}
});
// Performance validation
console.log("\n⚡ Performance Validation:");
contracts.forEach(contract => {
const contractPath = `contracts/${contract}`;
if (fs.existsSync(contractPath)) {
const content = fs.readFileSync(contractPath, 'utf8');
const lines = content.split('\n').length;
// Estimate gas usage based on complexity
const complexity = lines / 1000; // Rough estimate
const estimatedGas = Math.floor(100000 + (complexity * 50000));
console.log(`${contract}: ~${lines} lines, estimated ${estimatedGas.toLocaleString()} gas deployment`);
}
});
// Final assessment
console.log("\n🎯 Integration Test Assessment:");
if (passed === total) {
console.log("🚀 Status: ALL TESTS PASSED - Ready for deployment");
console.log("✅ Contracts are fully integrated and tested");
console.log("✅ Security features are properly implemented");
console.log("✅ Gas optimization is adequate");
} else if (passed >= total * 0.8) {
console.log("⚠️ Status: MOSTLY PASSED - Minor issues to address");
console.log("📝 Review failed tests and fix issues");
console.log("📝 Consider additional security measures");
} else {
console.log("❌ Status: SIGNIFICANT ISSUES - Major improvements needed");
console.log("🔧 Address failed tests before deployment");
console.log("🔧 Review security implementation");
console.log("🔧 Optimize gas usage");
}
console.log("\n📝 Next Steps:");
console.log("1. Fix any failed tests");
console.log("2. Run security audit");
console.log("3. Deploy to testnet");
console.log("4. Perform integration testing with marketplace API");
console.log("5. Deploy to mainnet");
console.log("\n✨ Integration testing completed!");
}
// Start tests
runTests();

View File

@@ -1,105 +0,0 @@
#!/bin/bash
# Script to make all test files pytest compatible
echo "🔧 Making AITBC test suite pytest compatible..."
# Colors for output
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
RED='\033[0;31m'
NC='\033[0m' # No Color
cd "$(dirname "$0")/.."
# Function to check if a file has pytest-compatible structure
check_pytest_compatible() {
local file="$1"
# Check for pytest imports
if ! grep -q "import pytest" "$file"; then
return 1
fi
# Check for test classes or functions
if ! grep -q "def test_" "$file" && ! grep -q "class Test" "$file"; then
return 1
fi
# Check for proper syntax
if ! python -m py_compile "$file" 2>/dev/null; then
return 1
fi
return 0
}
# Function to fix a test file to be pytest compatible
fix_test_file() {
local file="$1"
echo -e "${YELLOW}Fixing $file${NC}"
# Add pytest import if missing
if ! grep -q "import pytest" "$file"; then
sed -i '1i import pytest' "$file"
fi
# Fix incomplete functions (basic fix)
if grep -q "def test_.*:$" "$file" && ! grep -A1 "def test_.*:$" "$file" | grep -q " "; then
# Add basic function body
sed -i 's/def test_.*:$/&\n assert True # Placeholder test/' "$file"
fi
# Fix incomplete classes
if grep -q "class Test.*:$" "$file" && ! grep -A1 "class Test.*:$" "$file" | grep -q " "; then
# Add basic test method
sed -i 's/class Test.*:$/&\n\n def test_placeholder(self):\n assert True # Placeholder test/' "$file"
fi
}
# Find all test files
echo "📁 Scanning for test files..."
test_files=$(find tests -name "test_*.py" -type f)
total_files=0
fixed_files=0
already_compatible=0
for file in $test_files; do
((total_files++))
if check_pytest_compatible "$file"; then
echo -e "${GREEN}$file is already pytest compatible${NC}"
((already_compatible++))
else
fix_test_file "$file"
((fixed_files++))
fi
done
echo ""
echo "📊 Summary:"
echo -e " Total test files: ${GREEN}$total_files${NC}"
echo -e " Already compatible: ${GREEN}$already_compatible${NC}"
echo -e " Fixed: ${YELLOW}$fixed_files${NC}"
# Test a few files to make sure they work
echo ""
echo "🧪 Testing pytest compatibility..."
# Test the wallet test file
if python -m pytest tests/cli/test_wallet.py::TestWalletCommands::test_wallet_help -v > /dev/null 2>&1; then
echo -e "${GREEN}✅ Wallet tests are working${NC}"
else
echo -e "${RED}❌ Wallet tests have issues${NC}"
fi
# Test the marketplace test file
if python -m pytest tests/cli/test_marketplace.py::TestMarketplaceCommands::test_marketplace_help -v > /dev/null 2>&1; then
echo -e "${GREEN}✅ Marketplace tests are working${NC}"
else
echo -e "${RED}❌ Marketplace tests have issues${NC}"
fi
echo ""
echo -e "${GREEN}🎉 Pytest compatibility update complete!${NC}"
echo "Run 'python -m pytest tests/ -v' to test the full suite."

View File

@@ -1,103 +0,0 @@
#!/bin/bash
# scripts/move-to-right-folder.sh
echo "🔄 Moving files to correct folders..."
# Colors
GREEN='\033[0;32m'
BLUE='\033[0;34m'
YELLOW='\033[1;33m'
NC='\033[0m'
# Auto mode
AUTO_MODE=false
if [[ "$1" == "--auto" ]]; then
AUTO_MODE=true
fi
# Change to project root
cd "$(dirname "$0")/.."
# Function to move file with confirmation
move_file() {
local file="$1"
local target_dir="$2"
if [[ -f "$file" ]]; then
echo -e "${BLUE}📁 Moving '$file' to '$target_dir/'${NC}"
if [[ "$AUTO_MODE" == "true" ]]; then
mkdir -p "$target_dir"
mv "$file" "$target_dir/"
echo -e "${GREEN}✅ Moved automatically${NC}"
else
read -p "Move this file? (y/N): " -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]; then
mkdir -p "$target_dir"
mv "$file" "$target_dir/"
echo -e "${GREEN}✅ Moved${NC}"
else
echo -e "${YELLOW}⏭️ Skipped${NC}"
fi
fi
fi
}
# Function to move directory with confirmation
move_dir() {
local dir="$1"
local target_dir="$2"
if [[ -d "$dir" ]]; then
echo -e "${BLUE}📁 Moving directory '$dir' to '$target_dir/'${NC}"
if [[ "$AUTO_MODE" == "true" ]]; then
mkdir -p "$target_dir"
mv "$dir" "$target_dir/"
echo -e "${GREEN}✅ Moved automatically${NC}"
else
read -p "Move this directory? (y/N): " -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]; then
mkdir -p "$target_dir"
mv "$dir" "$target_dir/"
echo -e "${GREEN}✅ Moved${NC}"
else
echo -e "${YELLOW}⏭️ Skipped${NC}"
fi
fi
fi
}
# Move test files
for file in test_*.py test_*.sh run_mc_test.sh; do
move_file "$file" "dev/tests"
done
# Move development scripts
for file in patch_*.py fix_*.py simple_test.py; do
move_file "$file" "dev/scripts"
done
# Move multi-chain files
for file in MULTI_*.md; do
move_file "$file" "dev/multi-chain"
done
# Move environment directories
for dir in node_modules .venv cli_env; do
move_dir "$dir" "dev/env"
done
# Move cache directories
for dir in .pytest_cache .ruff_cache .vscode; do
move_dir "$dir" "dev/cache"
done
# Move configuration files
for file in .aitbc.yaml .aitbc.yaml.example .env.production .nvmrc .lycheeignore; do
move_file "$file" "config"
done
echo -e "${GREEN}🎉 File organization complete!${NC}"

View File

@@ -1,473 +0,0 @@
#!/usr/bin/env python3
"""
auto-onboard.py - Automated onboarding for AITBC agents
This script provides automated onboarding for new agents joining the AITBC network.
It handles capability assessment, agent type recommendation, registration, and swarm integration.
"""
import asyncio
import json
import sys
import os
import subprocess
import logging
from datetime import datetime
from pathlib import Path
# Configure logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)
class AgentOnboarder:
"""Automated agent onboarding system"""
def __init__(self):
self.session = {
'start_time': datetime.utcnow(),
'steps_completed': [],
'errors': [],
'agent': None
}
async def run_auto_onboarding(self):
"""Run complete automated onboarding"""
try:
logger.info("🤖 Starting AITBC Agent Network Automated Onboarding")
logger.info("=" * 60)
# Step 1: Environment Check
await self.check_environment()
# Step 2: Capability Assessment
capabilities = await self.assess_capabilities()
# Step 3: Agent Type Recommendation
agent_type = await self.recommend_agent_type(capabilities)
# Step 4: Agent Creation
agent = await self.create_agent(agent_type, capabilities)
# Step 5: Network Registration
await self.register_agent(agent)
# Step 6: Swarm Integration
await self.join_swarm(agent, agent_type)
# Step 7: Start Participation
await self.start_participation(agent)
# Step 8: Generate Report
report = await self.generate_onboarding_report(agent)
logger.info("🎉 Automated onboarding completed successfully!")
self.print_success_summary(agent, report)
return True
except Exception as e:
logger.error(f"❌ Onboarding failed: {e}")
self.session['errors'].append(str(e))
return False
async def check_environment(self):
"""Check if environment meets requirements"""
logger.info("📋 Step 1: Checking environment requirements...")
try:
# Check Python version
python_version = sys.version_info
if python_version < (3, 13):
raise Exception(f"Python 3.13+ required, found {python_version.major}.{python_version.minor}")
# Check required packages
required_packages = ['torch', 'numpy', 'requests']
for package in required_packages:
try:
__import__(package)
except ImportError:
logger.warning(f"⚠️ Package {package} not found, installing...")
subprocess.run([sys.executable, '-m', 'pip', 'install', package], check=True)
# Check network connectivity
import requests
try:
response = requests.get('https://api.aitbc.bubuit.net/v1/health', timeout=10)
if response.status_code != 200:
raise Exception("Network connectivity check failed")
except Exception as e:
raise Exception(f"Network connectivity issue: {e}")
logger.info("✅ Environment check passed")
self.session['steps_completed'].append('environment_check')
except Exception as e:
logger.error(f"❌ Environment check failed: {e}")
raise
async def assess_capabilities(self):
"""Assess agent capabilities"""
logger.info("🔍 Step 2: Assessing agent capabilities...")
capabilities = {}
# Check GPU capabilities
try:
import torch
if torch.cuda.is_available():
capabilities['gpu_available'] = True
capabilities['gpu_memory'] = torch.cuda.get_device_properties(0).total_memory // 1024 // 1024
capabilities['gpu_count'] = torch.cuda.device_count()
capabilities['cuda_version'] = torch.version.cuda
logger.info(f"✅ GPU detected: {capabilities['gpu_memory']}MB memory")
else:
capabilities['gpu_available'] = False
logger.info(" No GPU detected")
except ImportError:
capabilities['gpu_available'] = False
logger.warning("⚠️ PyTorch not available for GPU detection")
# Check CPU capabilities
import psutil
capabilities['cpu_count'] = psutil.cpu_count()
capabilities['memory_total'] = psutil.virtual_memory().total // 1024 // 1024 # MB
logger.info(f"✅ CPU: {capabilities['cpu_count']} cores, Memory: {capabilities['memory_total']}MB")
# Check storage
capabilities['disk_space'] = psutil.disk_usage('/').free // 1024 // 1024 # MB
logger.info(f"✅ Available disk space: {capabilities['disk_space']}MB")
# Check network bandwidth (simplified)
try:
start_time = datetime.utcnow()
requests.get('https://api.aitbc.bubuit.net/v1/health', timeout=5)
latency = (datetime.utcnow() - start_time).total_seconds()
capabilities['network_latency'] = latency
logger.info(f"✅ Network latency: {latency:.2f}s")
except:
capabilities['network_latency'] = None
logger.warning("⚠️ Could not measure network latency")
# Determine specialization
capabilities['specializations'] = []
if capabilities.get('gpu_available'):
capabilities['specializations'].append('gpu_computing')
if capabilities['memory_total'] > 8192: # >8GB
capabilities['specializations'].append('large_models')
if capabilities['cpu_count'] >= 8:
capabilities['specializations'].append('parallel_processing')
logger.info(f"✅ Capabilities assessed: {len(capabilities['specializations'])} specializations")
self.session['steps_completed'].append('capability_assessment')
return capabilities
async def recommend_agent_type(self, capabilities):
"""Recommend optimal agent type based on capabilities"""
logger.info("🎯 Step 3: Determining optimal agent type...")
# Decision logic
score = {}
# Compute Provider Score
provider_score = 0
if capabilities.get('gpu_available'):
provider_score += 40
if capabilities['gpu_memory'] >= 8192: # >=8GB
provider_score += 20
if capabilities['gpu_memory'] >= 16384: # >=16GB
provider_score += 20
if capabilities['network_latency'] and capabilities['network_latency'] < 0.1:
provider_score += 10
score['compute_provider'] = provider_score
# Compute Consumer Score
consumer_score = 30 # Base score for being able to consume
if capabilities['memory_total'] >= 4096:
consumer_score += 20
if capabilities['network_latency'] and capabilities['network_latency'] < 0.2:
consumer_score += 10
score['compute_consumer'] = consumer_score
# Platform Builder Score
builder_score = 20 # Base score
if capabilities['disk_space'] >= 10240: # >=10GB
builder_score += 20
if capabilities['memory_total'] >= 4096:
builder_score += 15
if capabilities['cpu_count'] >= 4:
builder_score += 15
score['platform_builder'] = builder_score
# Swarm Coordinator Score
coordinator_score = 25 # Base score
if capabilities['network_latency'] and capabilities['network_latency'] < 0.15:
coordinator_score += 25
if capabilities['cpu_count'] >= 4:
coordinator_score += 15
if capabilities['memory_total'] >= 2048:
coordinator_score += 10
score['swarm_coordinator'] = coordinator_score
# Find best match
best_type = max(score, key=score.get)
confidence = score[best_type] / 100
logger.info(f"✅ Recommended agent type: {best_type} (confidence: {confidence:.2%})")
logger.info(f" Scores: {score}")
self.session['steps_completed'].append('agent_type_recommendation')
return best_type
async def create_agent(self, agent_type, capabilities):
"""Create agent instance"""
logger.info(f"🔐 Step 4: Creating {agent_type} agent...")
try:
# Import here to avoid circular imports
sys.path.append('/home/oib/windsurf/aitbc/packages/py/aitbc-agent-sdk')
if agent_type == 'compute_provider':
from aitbc_agent import ComputeProvider
agent = ComputeProvider.register(
agent_name=f"auto-provider-{datetime.utcnow().strftime('%Y%m%d%H%M%S')}",
capabilities={
"compute_type": "inference",
"gpu_memory": capabilities.get('gpu_memory', 0),
"performance_score": 0.9
},
pricing_model={"base_rate": 0.1}
)
elif agent_type == 'compute_consumer':
from aitbc_agent import ComputeConsumer
agent = ComputeConsumer.create(
agent_name=f"auto-consumer-{datetime.utcnow().strftime('%Y%m%d%H%M%S')}",
capabilities={
"compute_type": "inference",
"task_requirements": {"min_performance": 0.8}
}
)
elif agent_type == 'platform_builder':
from aitbc_agent import PlatformBuilder
agent = PlatformBuilder.create(
agent_name=f"auto-builder-{datetime.utcnow().strftime('%Y%m%d%H%M%S')}",
capabilities={
"specializations": capabilities.get('specializations', [])
}
)
elif agent_type == 'swarm_coordinator':
from aitbc_agent import SwarmCoordinator
agent = SwarmCoordinator.create(
agent_name=f"auto-coordinator-{datetime.utcnow().strftime('%Y%m%d%H%M%S')}",
capabilities={
"specialization": "load_balancing",
"analytical_skills": "high"
}
)
else:
raise Exception(f"Unknown agent type: {agent_type}")
logger.info(f"✅ Agent created: {agent.identity.id}")
self.session['agent'] = agent
self.session['steps_completed'].append('agent_creation')
return agent
except Exception as e:
logger.error(f"❌ Agent creation failed: {e}")
raise
async def register_agent(self, agent):
"""Register agent on AITBC network"""
logger.info("🌐 Step 5: Registering on AITBC network...")
try:
success = await agent.register()
if not success:
raise Exception("Registration failed")
logger.info(f"✅ Agent registered successfully")
self.session['steps_completed'].append('network_registration')
except Exception as e:
logger.error(f"❌ Registration failed: {e}")
raise
async def join_swarm(self, agent, agent_type):
"""Join appropriate swarm"""
logger.info("🐝 Step 6: Joining swarm intelligence...")
try:
# Determine appropriate swarm based on agent type
swarm_config = {
'compute_provider': {
'swarm_type': 'load_balancing',
'config': {
'role': 'resource_provider',
'contribution_level': 'medium',
'data_sharing': True
}
},
'compute_consumer': {
'swarm_type': 'pricing',
'config': {
'role': 'market_participant',
'contribution_level': 'low',
'data_sharing': True
}
},
'platform_builder': {
'swarm_type': 'innovation',
'config': {
'role': 'contributor',
'contribution_level': 'medium',
'data_sharing': True
}
},
'swarm_coordinator': {
'swarm_type': 'load_balancing',
'config': {
'role': 'coordinator',
'contribution_level': 'high',
'data_sharing': True
}
}
}
swarm_info = swarm_config.get(agent_type)
if not swarm_info:
raise Exception(f"No swarm configuration for agent type: {agent_type}")
joined = await agent.join_swarm(swarm_info['swarm_type'], swarm_info['config'])
if not joined:
raise Exception("Swarm join failed")
logger.info(f"✅ Joined {swarm_info['swarm_type']} swarm")
self.session['steps_completed'].append('swarm_integration')
except Exception as e:
logger.error(f"❌ Swarm integration failed: {e}")
# Don't fail completely - agent can still function without swarm
logger.warning("⚠️ Continuing without swarm integration")
async def start_participation(self, agent):
"""Start agent participation"""
logger.info("🚀 Step 7: Starting network participation...")
try:
await agent.start_contribution()
logger.info("✅ Agent participation started")
self.session['steps_completed'].append('participation_started')
except Exception as e:
logger.error(f"❌ Failed to start participation: {e}")
# Don't fail completely
logger.warning("⚠️ Agent can still function manually")
async def generate_onboarding_report(self, agent):
"""Generate comprehensive onboarding report"""
logger.info("📊 Step 8: Generating onboarding report...")
report = {
'onboarding': {
'timestamp': datetime.utcnow().isoformat(),
'duration_minutes': (datetime.utcnow() - self.session['start_time']).total_seconds() / 60,
'status': 'success',
'agent_id': agent.identity.id,
'agent_name': agent.identity.name,
'agent_address': agent.identity.address,
'steps_completed': self.session['steps_completed'],
'errors': self.session['errors']
},
'agent_capabilities': {
'gpu_available': agent.capabilities.gpu_memory > 0,
'specialization': agent.capabilities.compute_type,
'performance_score': agent.capabilities.performance_score
},
'network_status': {
'registered': agent.registered,
'swarm_joined': len(agent.joined_swarms) > 0 if hasattr(agent, 'joined_swarms') else False,
'participating': True
}
}
# Save report to file
report_file = f"/tmp/aitbc-onboarding-{agent.identity.id}.json"
with open(report_file, 'w') as f:
json.dump(report, f, indent=2)
logger.info(f"✅ Report saved to: {report_file}")
self.session['steps_completed'].append('report_generated')
return report
def print_success_summary(self, agent, report):
"""Print success summary"""
print("\n" + "=" * 60)
print("🎉 AUTOMATED ONBOARDING COMPLETED SUCCESSFULLY!")
print("=" * 60)
print()
print("🤖 AGENT INFORMATION:")
print(f" ID: {agent.identity.id}")
print(f" Name: {agent.identity.name}")
print(f" Address: {agent.identity.address}")
print(f" Type: {agent.capabilities.compute_type}")
print()
print("📊 ONBOARDING SUMMARY:")
print(f" Duration: {report['onboarding']['duration_minutes']:.1f} minutes")
print(f" Steps Completed: {len(report['onboarding']['steps_completed'])}/7")
print(f" Status: {report['onboarding']['status']}")
print()
print("🌐 NETWORK STATUS:")
print(f" Registered: {'' if report['network_status']['registered'] else ''}")
print(f" Swarm Joined: {'' if report['network_status']['swarm_joined'] else ''}")
print(f" Participating: {'' if report['network_status']['participating'] else ''}")
print()
print("🔗 USEFUL LINKS:")
print(f" Agent Dashboard: https://aitbc.bubuit.net/agents/{agent.identity.id}")
print(f" Documentation: https://aitbc.bubuit.net/docs/11_agents/")
print(f" API Reference: https://aitbc.bubuit.net/docs/agents/agent-api-spec.json")
print(f" Community: https://discord.gg/aitbc-agents")
print()
print("🚀 NEXT STEPS:")
if agent.capabilities.compute_type == 'inference' and agent.capabilities.gpu_memory > 0:
print(" 1. Monitor your GPU utilization and earnings")
print(" 2. Adjust pricing based on market demand")
print(" 3. Build reputation through reliability")
else:
print(" 1. Submit your first computational job")
print(" 2. Monitor job completion and costs")
print(" 3. Participate in swarm intelligence")
print(" 4. Check your agent dashboard regularly")
print(" 5. Join the community Discord for support")
print()
print("💾 Session data saved to local files")
print(" 📊 Report: /tmp/aitbc-onboarding-*.json")
print(" 🔐 Keys: ~/.aitbc/agent_keys/")
print()
print("🎊 Welcome to the AITBC Agent Network!")
def main():
"""Main entry point"""
onboarder = AgentOnboarder()
try:
success = asyncio.run(onboarder.run_auto_onboarding())
sys.exit(0 if success else 1)
except KeyboardInterrupt:
print("\n⚠️ Onboarding interrupted by user")
sys.exit(1)
except Exception as e:
logger.error(f"Fatal error: {e}")
sys.exit(1)
if __name__ == "__main__":
main()

View File

@@ -1,424 +0,0 @@
#!/usr/bin/env python3
"""
onboarding-monitor.py - Monitor agent onboarding success and performance
This script monitors the success rate of agent onboarding, tracks metrics,
and provides insights for improving the onboarding process.
"""
import asyncio
import json
import sys
import time
import logging
from datetime import datetime, timedelta
from pathlib import Path
import requests
from collections import defaultdict
# Configure logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)
class OnboardingMonitor:
"""Monitor agent onboarding metrics and performance"""
def __init__(self):
self.metrics = {
'total_onboardings': 0,
'successful_onboardings': 0,
'failed_onboardings': 0,
'agent_type_distribution': defaultdict(int),
'completion_times': [],
'failure_points': defaultdict(int),
'daily_stats': defaultdict(dict),
'error_patterns': defaultdict(int)
}
def load_existing_data(self):
"""Load existing onboarding data"""
data_file = Path('/tmp/aitbc-onboarding-metrics.json')
if data_file.exists():
try:
with open(data_file, 'r') as f:
data = json.load(f)
self.metrics.update(data)
logger.info(f"Loaded existing metrics: {data.get('total_onboardings', 0)} onboardings")
except Exception as e:
logger.error(f"Failed to load existing data: {e}")
def save_metrics(self):
"""Save current metrics to file"""
try:
data_file = Path('/tmp/aitbc-onboarding-metrics.json')
with open(data_file, 'w') as f:
json.dump(dict(self.metrics), f, indent=2)
except Exception as e:
logger.error(f"Failed to save metrics: {e}")
def scan_onboarding_reports(self):
"""Scan for onboarding report files"""
reports = []
report_dir = Path('/tmp')
for report_file in report_dir.glob('aitbc-onboarding-*.json'):
try:
with open(report_file, 'r') as f:
report = json.load(f)
reports.append(report)
except Exception as e:
logger.error(f"Failed to read report {report_file}: {e}")
return reports
def analyze_reports(self, reports):
"""Analyze onboarding reports and update metrics"""
for report in reports:
try:
onboarding = report.get('onboarding', {})
# Update basic metrics
self.metrics['total_onboardings'] += 1
if onboarding.get('status') == 'success':
self.metrics['successful_onboardings'] += 1
# Track completion time
duration = onboarding.get('duration_minutes', 0)
self.metrics['completion_times'].append(duration)
# Track agent type distribution
agent_type = self.extract_agent_type(report)
if agent_type:
self.metrics['agent_type_distribution'][agent_type] += 1
# Track daily stats
date = datetime.fromisoformat(onboarding['timestamp']).date()
self.metrics['daily_stats'][date]['successful'] = \
self.metrics['daily_stats'][date].get('successful', 0) + 1
self.metrics['daily_stats'][date]['total'] = \
self.metrics['daily_stats'][date].get('total', 0) + 1
else:
self.metrics['failed_onboardings'] += 1
# Track failure points
steps_completed = onboarding.get('steps_completed', [])
expected_steps = ['environment_check', 'capability_assessment',
'agent_type_recommendation', 'agent_creation',
'network_registration', 'swarm_integration',
'participation_started', 'report_generated']
for step in expected_steps:
if step not in steps_completed:
self.metrics['failure_points'][step] += 1
# Track errors
for error in onboarding.get('errors', []):
self.metrics['error_patterns'][error] += 1
# Track daily failures
date = datetime.fromisoformat(onboarding['timestamp']).date()
self.metrics['daily_stats'][date]['failed'] = \
self.metrics['daily_stats'][date].get('failed', 0) + 1
self.metrics['daily_stats'][date]['total'] = \
self.metrics['daily_stats'][date].get('total', 0) + 1
except Exception as e:
logger.error(f"Failed to analyze report: {e}")
def extract_agent_type(self, report):
"""Extract agent type from report"""
try:
agent_capabilities = report.get('agent_capabilities', {})
compute_type = agent_capabilities.get('specialization')
# Map specialization to agent type
type_mapping = {
'inference': 'compute_provider',
'training': 'compute_provider',
'processing': 'compute_consumer',
'coordination': 'swarm_coordinator',
'development': 'platform_builder'
}
return type_mapping.get(compute_type, 'unknown')
except:
return 'unknown'
def calculate_metrics(self):
"""Calculate derived metrics"""
metrics = {}
# Success rate
if self.metrics['total_onboardings'] > 0:
metrics['success_rate'] = (self.metrics['successful_onboardings'] /
self.metrics['total_onboardings']) * 100
else:
metrics['success_rate'] = 0
# Average completion time
if self.metrics['completion_times']:
metrics['avg_completion_time'] = sum(self.metrics['completion_times']) / len(self.metrics['completion_times'])
else:
metrics['avg_completion_time'] = 0
# Most common failure point
if self.metrics['failure_points']:
metrics['most_common_failure'] = max(self.metrics['failure_points'],
key=self.metrics['failure_points'].get)
else:
metrics['most_common_failure'] = 'none'
# Most common error
if self.metrics['error_patterns']:
metrics['most_common_error'] = max(self.metrics['error_patterns'],
key=self.metrics['error_patterns'].get)
else:
metrics['most_common_error'] = 'none'
# Agent type distribution percentages
total_agents = sum(self.metrics['agent_type_distribution'].values())
if total_agents > 0:
metrics['agent_type_percentages'] = {
agent_type: (count / total_agents) * 100
for agent_type, count in self.metrics['agent_type_distribution'].items()
}
else:
metrics['agent_type_percentages'] = {}
return metrics
def generate_report(self):
"""Generate comprehensive onboarding report"""
metrics = self.calculate_metrics()
report = {
'timestamp': datetime.utcnow().isoformat(),
'summary': {
'total_onboardings': self.metrics['total_onboardings'],
'successful_onboardings': self.metrics['successful_onboardings'],
'failed_onboardings': self.metrics['failed_onboardings'],
'success_rate': metrics['success_rate'],
'avg_completion_time_minutes': metrics['avg_completion_time']
},
'agent_type_distribution': dict(self.metrics['agent_type_distribution']),
'agent_type_percentages': metrics['agent_type_percentages'],
'failure_analysis': {
'most_common_failure_point': metrics['most_common_failure'],
'failure_points': dict(self.metrics['failure_points']),
'most_common_error': metrics['most_common_error'],
'error_patterns': dict(self.metrics['error_patterns'])
},
'daily_stats': dict(self.metrics['daily_stats']),
'recommendations': self.generate_recommendations(metrics)
}
return report
def generate_recommendations(self, metrics):
"""Generate improvement recommendations"""
recommendations = []
# Success rate recommendations
if metrics['success_rate'] < 80:
recommendations.append({
'priority': 'high',
'issue': 'Low success rate',
'recommendation': 'Review onboarding process for common failure points',
'action': 'Focus on fixing: ' + metrics['most_common_failure']
})
elif metrics['success_rate'] < 95:
recommendations.append({
'priority': 'medium',
'issue': 'Moderate success rate',
'recommendation': 'Optimize onboarding for better success rate',
'action': 'Monitor and improve failure points'
})
# Completion time recommendations
if metrics['avg_completion_time'] > 20:
recommendations.append({
'priority': 'medium',
'issue': 'Slow onboarding process',
'recommendation': 'Optimize onboarding steps for faster completion',
'action': 'Reduce time in capability assessment and registration'
})
# Agent type distribution recommendations
if 'compute_provider' not in metrics['agent_type_percentages'] or \
metrics['agent_type_percentages'].get('compute_provider', 0) < 20:
recommendations.append({
'priority': 'low',
'issue': 'Low compute provider adoption',
'recommendation': 'Improve compute provider onboarding experience',
'action': 'Simplify GPU setup and resource offering process'
})
# Error pattern recommendations
if metrics['most_common_error'] != 'none':
recommendations.append({
'priority': 'high',
'issue': f'Recurring error: {metrics["most_common_error"]}',
'recommendation': 'Fix common error pattern',
'action': 'Add better error handling and user guidance'
})
return recommendations
def print_dashboard(self):
"""Print a dashboard view of current metrics"""
metrics = self.calculate_metrics()
print("🤖 AITBC Agent Onboarding Dashboard")
print("=" * 50)
print()
# Summary stats
print("📊 SUMMARY:")
print(f" Total Onboardings: {self.metrics['total_onboardings']}")
print(f" Success Rate: {metrics['success_rate']:.1f}%")
print(f" Avg Completion Time: {metrics['avg_completion_time']:.1f} minutes")
print()
# Agent type distribution
print("🎯 AGENT TYPE DISTRIBUTION:")
for agent_type, count in self.metrics['agent_type_distribution'].items():
percentage = metrics['agent_type_percentages'].get(agent_type, 0)
print(f" {agent_type}: {count} ({percentage:.1f}%)")
print()
# Recent performance
print("📈 RECENT PERFORMANCE (Last 7 Days):")
recent_date = datetime.now().date() - timedelta(days=7)
recent_successful = 0
recent_total = 0
for date, stats in self.metrics['daily_stats'].items():
if date >= recent_date:
recent_total += stats.get('total', 0)
recent_successful += stats.get('successful', 0)
if recent_total > 0:
recent_success_rate = (recent_successful / recent_total) * 100
print(f" Success Rate: {recent_success_rate:.1f}% ({recent_successful}/{recent_total})")
else:
print(" No recent data available")
print()
# Issues
if metrics['most_common_failure'] != 'none':
print("⚠️ COMMON ISSUES:")
print(f" Most Common Failure: {metrics['most_common_failure']}")
if metrics['most_common_error'] != 'none':
print(f" Most Common Error: {metrics['most_common_error']}")
print()
# Recommendations
recommendations = self.generate_recommendations(metrics)
if recommendations:
print("💡 RECOMMENDATIONS:")
for rec in recommendations[:3]: # Show top 3
priority_emoji = "🔴" if rec['priority'] == 'high' else "🟡" if rec['priority'] == 'medium' else "🟢"
print(f" {priority_emoji} {rec['issue']}")
print(f" {rec['recommendation']}")
print()
def export_csv(self):
"""Export metrics to CSV format"""
import csv
from io import StringIO
output = StringIO()
writer = csv.writer(output)
# Write header
writer.writerow(['Date', 'Total', 'Successful', 'Failed', 'Success Rate', 'Avg Time'])
# Write daily stats
for date, stats in sorted(self.metrics['daily_stats'].items()):
total = stats.get('total', 0)
successful = stats.get('successful', 0)
failed = stats.get('failed', 0)
success_rate = (successful / total * 100) if total > 0 else 0
writer.writerow([
date,
total,
successful,
failed,
f"{success_rate:.1f}%",
"N/A" # Would need to calculate daily average
])
csv_content = output.getvalue()
# Save to file
csv_file = Path('/tmp/aitbc-onboarding-metrics.csv')
with open(csv_file, 'w') as f:
f.write(csv_content)
print(f"📊 Metrics exported to: {csv_file}")
def run_monitoring(self):
"""Run continuous monitoring"""
print("🔍 Starting onboarding monitoring...")
print("Press Ctrl+C to stop monitoring")
print()
try:
while True:
# Load existing data
self.load_existing_data()
# Scan for new reports
reports = self.scan_onboarding_reports()
if reports:
print(f"📊 Processing {len(reports)} new onboarding reports...")
self.analyze_reports(reports)
self.save_metrics()
# Print updated dashboard
self.print_dashboard()
# Wait before next scan
time.sleep(300) # 5 minutes
except KeyboardInterrupt:
print("\n👋 Monitoring stopped by user")
except Exception as e:
logger.error(f"Monitoring error: {e}")
def main():
"""Main entry point"""
monitor = OnboardingMonitor()
# Parse command line arguments
if len(sys.argv) > 1:
command = sys.argv[1]
if command == 'dashboard':
monitor.load_existing_data()
monitor.print_dashboard()
elif command == 'export':
monitor.load_existing_data()
monitor.export_csv()
elif command == 'report':
monitor.load_existing_data()
report = monitor.generate_report()
print(json.dumps(report, indent=2))
elif command == 'monitor':
monitor.run_monitoring()
else:
print("Usage: python3 onboarding-monitor.py [dashboard|export|report|monitor]")
sys.exit(1)
else:
# Default: show dashboard
monitor.load_existing_data()
monitor.print_dashboard()
if __name__ == "__main__":
main()

View File

@@ -1,180 +0,0 @@
#!/bin/bash
# quick-start.sh - Quick start for AITBC agents
set -e
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
print_status() {
echo -e "${GREEN}$1${NC}"
}
print_warning() {
echo -e "${YELLOW}⚠️ $1${NC}"
}
print_info() {
echo -e "${BLUE} $1${NC}"
}
print_error() {
echo -e "${RED}$1${NC}"
}
echo "🤖 AITBC Agent Network - Quick Start"
echo "=================================="
echo
# Check if running in correct directory
if [ ! -f "pyproject.toml" ] || [ ! -d "docs/11_agents" ]; then
print_error "Please run this script from the AITBC repository root"
exit 1
fi
print_status "Repository validation passed"
# Step 1: Install dependencies
echo "📦 Step 1: Installing dependencies..."
if command -v python3 &> /dev/null; then
print_status "Python 3 found"
else
print_error "Python 3 is required"
exit 1
fi
# Install AITBC agent SDK
print_info "Installing AITBC agent SDK..."
pip install -e packages/py/aitbc-agent-sdk/ > /dev/null 2>&1 || {
print_error "Failed to install agent SDK"
exit 1
}
print_status "Agent SDK installed"
# Install additional dependencies
print_info "Installing additional dependencies..."
pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118 > /dev/null 2>&1 || {
print_warning "PyTorch installation failed (CPU-only mode)"
}
pip install requests psutil > /dev/null 2>&1 || {
print_error "Failed to install additional dependencies"
exit 1
}
print_status "Dependencies installed"
# Step 2: Choose agent type
echo ""
echo "🎯 Step 2: Choose your agent type:"
echo "1) Compute Provider - Sell GPU resources to other agents"
echo "2) Compute Consumer - Rent computational resources for tasks"
echo "3) Platform Builder - Contribute code and improvements"
echo "4) Swarm Coordinator - Participate in collective intelligence"
echo
while true; do
read -p "Enter your choice (1-4): " choice
case $choice in
1)
AGENT_TYPE="compute_provider"
break
;;
2)
AGENT_TYPE="compute_consumer"
break
;;
3)
AGENT_TYPE="platform_builder"
break
;;
4)
AGENT_TYPE="swarm_coordinator"
break
;;
*)
print_error "Invalid choice. Please enter 1-4."
;;
esac
done
print_status "Agent type selected: $AGENT_TYPE"
# Step 3: Run automated onboarding
echo ""
echo "🚀 Step 3: Running automated onboarding..."
echo "This will:"
echo " - Assess your system capabilities"
echo " - Create your agent identity"
echo " - Register on the AITBC network"
echo " - Join appropriate swarm"
echo " - Start network participation"
echo
if [ -f "scripts/onboarding/auto-onboard.py" ]; then
python3 scripts/onboarding/auto-onboard.py
else
print_error "Automated onboarding script not found"
exit 1
fi
# Check if onboarding was successful
if [ $? -eq 0 ]; then
print_status "Automated onboarding completed successfully!"
# Show next steps
echo ""
echo "🎉 Congratulations! Your agent is now part of the AITBC network!"
echo ""
echo "📋 Next Steps:"
echo "1. Check your agent dashboard: https://aitbc.bubuit.net/agents/"
echo "2. Read the documentation: https://aitbc.bubuit.net/docs/11_agents/"
echo "3. Join the community: https://discord.gg/aitbc-agents"
echo ""
echo "🔗 Quick Commands:"
case $AGENT_TYPE in
compute_provider)
echo " - Monitor earnings: aitbc agent earnings"
echo " - Check utilization: aitbc agent status"
echo " - Adjust pricing: aitbc agent pricing --rate 0.15"
;;
compute_consumer)
echo " - Submit job: aitbc agent submit --task 'text analysis'"
echo " - Check status: aitbc agent status"
echo " - View history: aitbc agent history"
;;
platform_builder)
echo " - Contribute code: aitbc agent contribute --type optimization"
echo " - Check contributions: aitbc agent contributions"
echo " - View reputation: aitbc agent reputation"
;;
swarm_coordinator)
echo " - Swarm status: aitbc swarm status"
echo " - Coordinate tasks: aitbc swarm coordinate --task optimization"
echo " - View metrics: aitbc swarm metrics"
;;
esac
echo ""
echo "📚 Documentation:"
echo " - Getting Started: https://aitbc.bubuit.net/docs/11_agents/getting-started.md"
echo " - Agent Guide: https://aitbc.bubuit.net/docs/11_agents/${AGENT_TYPE}.md"
echo " - API Reference: https://aitbc.bubuit.net/docs/agents/agent-api-spec.json"
echo ""
print_info "Your agent is ready to earn tokens and participate in the network!"
else
print_error "Automated onboarding failed"
echo ""
echo "🔧 Troubleshooting:"
echo "1. Check your internet connection"
echo "2. Verify AITBC network status: curl https://api.aitbc.bubuit.net/v1/health"
echo "3. Check logs in /tmp/aitbc-onboarding-*.json"
echo "4. Run manual onboarding: python3 scripts/onboarding/manual-onboard.py"
fi
echo ""
echo "🤖 Welcome to the AITBC Agent Network!"

View File

@@ -1,444 +0,0 @@
#!/bin/bash
# AITBC Advanced Agent Features Production Backup Script
# Comprehensive backup system for production deployment
set -euo pipefail
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
PURPLE='\033[0;35m'
CYAN='\033[0;36m'
NC='\033[0m' # No Color
print_status() {
echo -e "${BLUE}[INFO]${NC} $1"
}
print_success() {
echo -e "${GREEN}[SUCCESS]${NC} $1"
}
print_warning() {
echo -e "${YELLOW}[WARNING]${NC} $1"
}
print_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
print_critical() {
echo -e "${RED}[CRITICAL]${NC} $1"
}
print_backup() {
echo -e "${PURPLE}[BACKUP]${NC} $1"
}
# Configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
ROOT_DIR="$(dirname "$SCRIPT_DIR")"
CONTRACTS_DIR="$ROOT_DIR/contracts"
SERVICES_DIR="$ROOT_DIR/apps/coordinator-api/src/app/services"
MONITORING_DIR="$ROOT_DIR/monitoring"
BACKUP_DIR="${BACKUP_DIR:-/backup/advanced-features}"
DATE=$(date +%Y%m%d_%H%M%S)
BACKUP_FILE="advanced-features-backup-$DATE.tar.gz"
ENCRYPTION_KEY="${ENCRYPTION_KEY:-your_encryption_key_here}"
echo "🔄 AITBC Advanced Agent Features Production Backup"
echo "================================================="
echo "Backup Directory: $BACKUP_DIR"
echo "Timestamp: $DATE"
echo "Encryption: Enabled"
echo ""
# Create backup directory
create_backup_directory() {
print_backup "Creating backup directory..."
mkdir -p "$BACKUP_DIR"
mkdir -p "$BACKUP_DIR/contracts"
mkdir -p "$BACKUP_DIR/services"
mkdir -p "$BACKUP_DIR/config"
mkdir -p "$BACKUP_DIR/monitoring"
mkdir -p "$BACKUP_DIR/database"
mkdir -p "$BACKUP_DIR/logs"
mkdir -p "$BACKUP_DIR/deployment"
print_success "Backup directory created: $BACKUP_DIR"
}
# Backup smart contracts
backup_contracts() {
print_backup "Backing up smart contracts..."
# Backup contract source code
tar -czf "$BACKUP_DIR/contracts/source-$DATE.tar.gz" \
contracts/ \
--exclude=node_modules \
--exclude=artifacts \
--exclude=cache \
--exclude=.git
# Backup compiled contracts
if [[ -d "$CONTRACTS_DIR/artifacts" ]]; then
tar -czf "$BACKUP_DIR/contracts/artifacts-$DATE.tar.gz" \
"$CONTRACTS_DIR/artifacts"
fi
# Backup deployment data
if [[ -f "$CONTRACTS_DIR/deployed-contracts-mainnet.json" ]]; then
cp "$CONTRACTS_DIR/deployed-contracts-mainnet.json" \
"$BACKUP_DIR/deployment/deployment-$DATE.json"
fi
# Backup contract verification data
if [[ -f "$CONTRACTS_DIR/slither-report.json" ]]; then
cp "$CONTRACTS_DIR/slither-report.json" \
"$BACKUP_DIR/deployment/slither-report-$DATE.json"
fi
if [[ -f "$CONTRACTS_DIR/mythril-report.json" ]]; then
cp "$CONTRACTS_DIR/mythril-report.json" \
"$BACKUP_DIR/deployment/mythril-report-$DATE.json"
fi
print_success "Smart contracts backup completed"
}
# Backup services
backup_services() {
print_backup "Backing up services..."
# Backup service source code
tar -czf "$BACKUP_DIR/services/source-$DATE.tar.gz" \
apps/coordinator-api/src/app/services/ \
--exclude=__pycache__ \
--exclude=*.pyc \
--exclude=.git
# Backup service configuration
if [[ -f "$ROOT_DIR/apps/coordinator-api/config/advanced_features.json" ]]; then
cp "$ROOT_DIR/apps/coordinator-api/config/advanced_features.json" \
"$BACKUP_DIR/config/advanced-features-$DATE.json"
fi
# Backup service logs
if [[ -d "/var/log/aitbc" ]]; then
tar -czf "$BACKUP_DIR/logs/services-$DATE.tar.gz" \
/var/log/aitbc/ \
--exclude=*.log.gz
fi
print_success "Services backup completed"
}
# Backup configuration
backup_configuration() {
print_backup "Backing up configuration..."
# Backup environment files
if [[ -f "$ROOT_DIR/.env.production" ]]; then
cp "$ROOT_DIR/.env.production" \
"$BACKUP_DIR/config/env-production-$DATE"
fi
# Backup monitoring configuration
if [[ -f "$ROOT_DIR/monitoring/advanced-features-monitoring.yml" ]]; then
cp "$ROOT_DIR/monitoring/advanced-features-monitoring.yml" \
"$BACKUP_DIR/monitoring/monitoring-$DATE.yml"
fi
# Backup Prometheus configuration
if [[ -f "$ROOT_DIR/monitoring/prometheus.yml" ]]; then
cp "$ROOT_DIR/monitoring/prometheus.yml" \
"$BACKUP_DIR/monitoring/prometheus-$DATE.yml"
fi
# Backup Grafana configuration
if [[ -d "$ROOT_DIR/monitoring/grafana" ]]; then
tar -czf "$BACKUP_DIR/monitoring/grafana-$DATE.tar.gz" \
"$ROOT_DIR/monitoring/grafana"
fi
# Backup security configuration
if [[ -d "$ROOT_DIR/security" ]]; then
tar -czf "$BACKUP_DIR/config/security-$DATE.tar.gz" \
"$ROOT_DIR/security"
fi
print_success "Configuration backup completed"
}
# Backup database
backup_database() {
print_backup "Backing up database..."
# Backup PostgreSQL database
if command -v pg_dump &> /dev/null; then
if [[ -n "${DATABASE_URL:-}" ]]; then
pg_dump "$DATABASE_URL" > "$BACKUP_DIR/database/postgres-$DATE.sql"
print_success "PostgreSQL backup completed"
else
print_warning "DATABASE_URL not set, skipping PostgreSQL backup"
fi
else
print_warning "pg_dump not available, skipping PostgreSQL backup"
fi
# Backup Redis data
if command -v redis-cli &> /dev/null; then
if redis-cli ping | grep -q "PONG"; then
redis-cli --rdb "$BACKUP_DIR/database/redis-$DATE.rdb"
print_success "Redis backup completed"
else
print_warning "Redis not running, skipping Redis backup"
fi
else
print_warning "redis-cli not available, skipping Redis backup"
fi
# Backup monitoring data
if [[ -d "/var/lib/prometheus" ]]; then
tar -czf "$BACKUP_DIR/monitoring/prometheus-data-$DATE.tar.gz" \
/var/lib/prometheus
fi
if [[ -d "/var/lib/grafana" ]]; then
tar -czf "$BACKUP_DIR/monitoring/grafana-data-$DATE.tar.gz" \
/var/lib/grafana
fi
print_success "Database backup completed"
}
# Create encrypted backup
create_encrypted_backup() {
print_backup "Creating encrypted backup..."
# Create full backup
tar -czf "$BACKUP_DIR/$BACKUP_FILE" \
"$BACKUP_DIR/contracts/" \
"$BACKUP_DIR/services/" \
"$BACKUP_DIR/config/" \
"$BACKUP_DIR/monitoring/" \
"$BACKUP_DIR/database/" \
"$BACKUP_DIR/logs/" \
"$BACKUP_DIR/deployment/"
# Encrypt backup
if command -v gpg &> /dev/null; then
gpg --symmetric --cipher-algo AES256 \
--output "$BACKUP_DIR/$BACKUP_FILE.gpg" \
--batch --yes --passphrase "$ENCRYPTION_KEY" \
"$BACKUP_DIR/$BACKUP_FILE"
# Remove unencrypted backup
rm "$BACKUP_DIR/$BACKUP_FILE"
print_success "Encrypted backup created: $BACKUP_DIR/$BACKUP_FILE.gpg"
else
print_warning "gpg not available, keeping unencrypted backup"
print_warning "Backup file: $BACKUP_DIR/$BACKUP_FILE"
fi
}
# Upload to cloud storage
upload_to_cloud() {
if [[ -n "${S3_BUCKET:-}" && -n "${AWS_ACCESS_KEY_ID:-}" && -n "${AWS_SECRET_ACCESS_KEY:-}" ]]; then
print_backup "Uploading to S3..."
if command -v aws &> /dev/null; then
aws s3 cp "$BACKUP_DIR/$BACKUP_FILE.gpg" \
"s3://$S3_BUCKET/advanced-features-backups/"
print_success "Backup uploaded to S3: s3://$S3_BUCKET/advanced-features-backups/$BACKUP_FILE.gpg"
else
print_warning "AWS CLI not available, skipping S3 upload"
fi
else
print_warning "S3 configuration not set, skipping cloud upload"
fi
}
# Cleanup old backups
cleanup_old_backups() {
print_backup "Cleaning up old backups..."
# Keep only last 7 days of local backups
find "$BACKUP_DIR" -name "*.tar.gz" -mtime +7 -delete
find "$BACKUP_DIR" -name "*.gpg" -mtime +7 -delete
find "$BACKUP_DIR" -name "*.sql" -mtime +7 -delete
find "$BACKUP_DIR" -name "*.rdb" -mtime +7 -delete
# Clean up old directories
find "$BACKUP_DIR" -type d -name "*-$DATE" -mtime +7 -exec rm -rf {} + 2>/dev/null || true
print_success "Old backups cleaned up"
}
# Verify backup integrity
verify_backup() {
print_backup "Verifying backup integrity..."
local backup_file="$BACKUP_DIR/$BACKUP_FILE.gpg"
if [[ ! -f "$backup_file" ]]; then
backup_file="$BACKUP_DIR/$BACKUP_FILE"
fi
if [[ -f "$backup_file" ]]; then
# Check file size
local file_size=$(stat -f%z "$backup_file" 2>/dev/null || stat -c%s "$backup_file" 2>/dev/null)
if [[ $file_size -gt 1000 ]]; then
print_success "Backup integrity verified (size: $file_size bytes)"
else
print_error "Backup integrity check failed - file too small"
return 1
fi
else
print_error "Backup file not found"
return 1
fi
}
# Generate backup report
generate_backup_report() {
print_backup "Generating backup report..."
local report_file="$BACKUP_DIR/backup-report-$DATE.json"
local backup_size=0
local backup_file="$BACKUP_DIR/$BACKUP_FILE.gpg"
if [[ -f "$backup_file" ]]; then
backup_size=$(stat -f%z "$backup_file" 2>/dev/null || stat -c%s "$backup_file" 2>/dev/null)
fi
cat > "$report_file" << EOF
{
"backup": {
"timestamp": "$(date -Iseconds)",
"backup_file": "$BACKUP_FILE",
"backup_size": $backup_size,
"backup_directory": "$BACKUP_DIR",
"encryption_enabled": true,
"cloud_upload": "$([[ -n "${S3_BUCKET:-}" ]] && echo "enabled" || echo "disabled")"
},
"components": {
"contracts": "backed_up",
"services": "backed_up",
"configuration": "backed_up",
"monitoring": "backed_up",
"database": "backed_up",
"logs": "backed_up",
"deployment": "backed_up"
},
"verification": {
"integrity_check": "passed",
"file_size": $backup_size,
"encryption": "verified"
},
"cleanup": {
"retention_days": 7,
"old_backups_removed": true
},
"next_backup": "$(date -d '+1 day' -Iseconds)"
}
EOF
print_success "Backup report saved to $report_file"
}
# Send notification
send_notification() {
if [[ -n "${SLACK_WEBHOOK_URL:-}" ]]; then
print_backup "Sending Slack notification..."
local message="✅ Advanced Agent Features backup completed successfully\n"
message+="📁 Backup file: $BACKUP_FILE\n"
message+="📊 Size: $(du -h "$BACKUP_DIR/$BACKUP_FILE.gpg" | cut -f1)\n"
message+="🕐 Timestamp: $(date -Iseconds)"
curl -X POST -H 'Content-type: application/json' \
--data "{\"text\":\"$message\"}" \
"$SLACK_WEBHOOK_URL" || true
fi
if [[ -n "${EMAIL_TO:-}" && -n "${EMAIL_FROM:-}" ]]; then
print_backup "Sending email notification..."
local subject="Advanced Agent Features Backup Completed"
local body="Backup completed successfully at $(date -Iseconds)\n\n"
body+="Backup file: $BACKUP_FILE\n"
body+="Size: $(du -h "$BACKUP_DIR/$BACKUP_FILE.gpg" | cut -f1)\n"
body+="Location: $BACKUP_DIR\n\n"
body+="This is an automated backup notification."
echo -e "$body" | mail -s "$subject" "$EMAIL_TO" || true
fi
}
# Main execution
main() {
print_critical "🔄 STARTING PRODUCTION BACKUP - ADVANCED AGENT FEATURES"
local backup_failed=0
# Run backup steps
create_backup_directory || backup_failed=1
backup_contracts || backup_failed=1
backup_services || backup_failed=1
backup_configuration || backup_failed=1
backup_database || backup_failed=1
create_encrypted_backup || backup_failed=1
upload_to_cloud || backup_failed=1
cleanup_old_backups || backup_failed=1
verify_backup || backup_failed=1
generate_backup_report || backup_failed=1
send_notification
if [[ $backup_failed -eq 0 ]]; then
print_success "🎉 PRODUCTION BACKUP COMPLETED SUCCESSFULLY!"
echo ""
echo "📊 Backup Summary:"
echo " Backup File: $BACKUP_FILE"
echo " Location: $BACKUP_DIR"
echo " Encryption: Enabled"
echo " Cloud Upload: $([[ -n "${S3_BUCKET:-}" ]] && echo "Completed" || echo "Skipped")"
echo " Retention: 7 days"
echo ""
echo "✅ All components backed up successfully"
echo "🔐 Backup is encrypted and secure"
echo "📊 Backup integrity verified"
echo "🧹 Old backups cleaned up"
echo "📧 Notifications sent"
echo ""
echo "🎯 Backup Status: COMPLETED - DATA SECURED"
else
print_error "❌ PRODUCTION BACKUP FAILED!"
echo ""
echo "📊 Backup Summary:"
echo " Backup File: $BACKUP_FILE"
echo " Location: $BACKUP_DIR"
echo " Status: FAILED"
echo ""
echo "⚠️ Some backup steps failed"
echo "🔧 Please review the errors above"
echo "📊 Check backup integrity manually"
echo "🔐 Verify encryption is working"
echo "🧹 Clean up partial backups if needed"
echo ""
echo "🎯 Backup Status: FAILED - INVESTIGATE IMMEDIATELY"
exit 1
fi
}
# Handle script interruption
trap 'print_critical "Backup interrupted - please check partial backup"; exit 1' INT TERM
# Run main function
main "$@"

View File

@@ -1,12 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
HEALTH_URL="http://127.0.0.1:18000/v1/health"
if curl -fsS --max-time 5 "$HEALTH_URL" >/dev/null; then
echo "Coordinator proxy healthy: $HEALTH_URL"
exit 0
fi
echo "Coordinator proxy health check FAILED: $HEALTH_URL" >&2
exit 1

View File

@@ -1,33 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
SERVICE_NAME="aitbc-miner"
APP_DIR="/opt/aitbc/apps/miner-node"
VENV_DIR="$APP_DIR/.venv"
LOG_DIR="/var/log/aitbc"
SYSTEMD_PATH="/etc/systemd/system/${SERVICE_NAME}.service"
if [[ $EUID -ne 0 ]]; then
echo "This script must be run as root" >&2
exit 1
fi
install -d "$APP_DIR" "$LOG_DIR"
install -d "/etc/aitbc"
if [[ ! -d "$VENV_DIR" ]]; then
python3 -m venv "$VENV_DIR"
fi
source "$VENV_DIR/bin/activate"
pip install --upgrade pip
pip install -r "$APP_DIR/requirements.txt" || true
deactivate
install -m 644 "$(pwd)/config/systemd/${SERVICE_NAME}.service" "$SYSTEMD_PATH"
systemctl daemon-reload
systemctl enable --now "$SERVICE_NAME"
echo "${SERVICE_NAME} systemd unit installed and started."

View File

@@ -1,22 +0,0 @@
#!/bin/bash
# Start SSH tunnel to remote AITBC coordinator
echo "Starting SSH tunnel to remote AITBC coordinator..."
# Check if tunnel is already running
if pgrep -f "ssh.*-L.*8001:localhost:8000.*aitbc" > /dev/null; then
echo "✅ Tunnel is already running"
exit 0
fi
# Start the tunnel
ssh -f -N -L 8001:localhost:8000 aitbc
if [ $? -eq 0 ]; then
echo "✅ SSH tunnel established on port 8001"
echo " Remote coordinator available at: http://localhost:8001"
echo " Health check: curl http://localhost:8001/v1/health"
else
echo "❌ Failed to establish SSH tunnel"
exit 1
fi

View File

@@ -1,32 +0,0 @@
import re
with open("docs/10_plan/99_currentissue.md", "r") as f:
content = f.read()
# We know that Phase 8 is completely done and documented in docs/13_tasks/completed_phases/
# We should only keep the actual warnings and blockers that might still be relevant,
# and remove all the "Completed", "Results", "Achievements" sections.
# Let's extract only lines with warning/pending emojis
lines = content.split("\n")
kept_lines = []
for line in lines:
if line.startswith("# Current Issues"):
kept_lines.append(line)
elif line.startswith("## Current"):
kept_lines.append(line)
elif any(icon in line for icon in ['⚠️', '', '🔄']) and '' not in line:
kept_lines.append(line)
elif line.startswith("### "):
kept_lines.append("\n" + line)
elif line.startswith("#### "):
kept_lines.append("\n" + line)
# Clean up empty headers
new_content = "\n".join(kept_lines)
new_content = re.sub(r'#+\s+[^\n]+\n+(?=#)', '\n', new_content)
new_content = re.sub(r'\n{3,}', '\n\n', new_content)
with open("docs/10_plan/99_currentissue.md", "w") as f:
f.write(new_content.strip() + '\n')

View File

@@ -1,547 +0,0 @@
#!/usr/bin/env python3
"""
AITBC Performance Baseline Testing
This script establishes performance baselines for the AITBC platform,
including API response times, throughput, resource usage, and user experience metrics.
"""
import asyncio
import json
import logging
import time
import statistics
from datetime import datetime, timedelta
from typing import Dict, List, Optional, Any
from dataclasses import dataclass, asdict
from pathlib import Path
import aiohttp
import psutil
import subprocess
import sys
@dataclass
class PerformanceMetric:
"""Individual performance measurement."""
timestamp: float
metric_name: str
value: float
unit: str
context: Dict[str, Any]
@dataclass
class BaselineResult:
"""Performance baseline result."""
metric_name: str
baseline_value: float
unit: str
samples: int
min_value: float
max_value: float
mean_value: float
median_value: float
std_deviation: float
percentile_95: float
percentile_99: float
status: str # "pass", "warning", "fail"
threshold: Optional[float]
class PerformanceBaseline:
"""Performance baseline testing system."""
def __init__(self, config_path: str = "config/performance_config.json"):
self.config = self._load_config(config_path)
self.logger = self._setup_logging()
self.baselines = self._load_baselines()
self.current_metrics = []
def _load_config(self, config_path: str) -> Dict:
"""Load performance testing configuration."""
default_config = {
"test_duration": 300, # 5 minutes
"concurrent_users": 10,
"ramp_up_time": 60, # 1 minute
"endpoints": {
"health": "https://api.aitbc.dev/health",
"users": "https://api.aitbc.dev/api/v1/users",
"transactions": "https://api.aitbc.dev/api/v1/transactions",
"blockchain": "https://api.aitbc.dev/api/v1/blockchain/status",
"marketplace": "https://api.aitbc.dev/api/v1/marketplace/listings"
},
"thresholds": {
"response_time_p95": 2000, # ms
"response_time_p99": 5000, # ms
"error_rate": 1.0, # %
"throughput_min": 100, # requests/second
"cpu_max": 80, # %
"memory_max": 85, # %
"disk_io_max": 100 # MB/s
},
"scenarios": {
"light_load": {"users": 5, "duration": 60},
"medium_load": {"users": 20, "duration": 120},
"heavy_load": {"users": 50, "duration": 180},
"stress_test": {"users": 100, "duration": 300}
}
}
config_file = Path(config_path)
if config_file.exists():
with open(config_file, 'r') as f:
user_config = json.load(f)
default_config.update(user_config)
return default_config
def _setup_logging(self) -> logging.Logger:
"""Setup logging for performance testing."""
logger = logging.getLogger("performance_baseline")
logger.setLevel(logging.INFO)
if not logger.handlers:
handler = logging.StreamHandler()
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
def _load_baselines(self) -> Dict:
"""Load existing baselines."""
baseline_file = Path("data/performance_baselines.json")
if baseline_file.exists():
with open(baseline_file, 'r') as f:
return json.load(f)
return {}
def _save_baselines(self) -> None:
"""Save baselines to file."""
baseline_file = Path("data/performance_baselines.json")
baseline_file.parent.mkdir(exist_ok=True)
with open(baseline_file, 'w') as f:
json.dump(self.baselines, f, indent=2)
async def measure_api_response_time(self, endpoint: str, method: str = "GET",
payload: Dict = None) -> float:
"""Measure API response time."""
start_time = time.time()
try:
async with aiohttp.ClientSession() as session:
if method.upper() == "GET":
async with session.get(endpoint) as response:
await response.text()
elif method.upper() == "POST":
async with session.post(endpoint, json=payload) as response:
await response.text()
else:
raise ValueError(f"Unsupported method: {method}")
end_time = time.time()
return (end_time - start_time) * 1000 # Convert to ms
except Exception as e:
self.logger.error(f"Error measuring {endpoint}: {e}")
return -1 # Indicate error
async def run_load_test(self, scenario: str) -> Dict[str, Any]:
"""Run load test scenario."""
scenario_config = self.config["scenarios"][scenario]
users = scenario_config["users"]
duration = scenario_config["duration"]
self.logger.info(f"Running {scenario} load test: {users} users for {duration}s")
results = {
"scenario": scenario,
"users": users,
"duration": duration,
"start_time": time.time(),
"metrics": {},
"system_metrics": []
}
# Start system monitoring
monitoring_task = asyncio.create_task(self._monitor_system_resources(results))
# Run concurrent requests
tasks = []
for i in range(users):
task = asyncio.create_task(self._simulate_user(duration))
tasks.append(task)
# Wait for all tasks to complete
user_results = await asyncio.gather(*tasks, return_exceptions=True)
# Stop monitoring
monitoring_task.cancel()
# Process results
all_response_times = []
error_count = 0
total_requests = 0
for user_result in user_results:
if isinstance(user_result, Exception):
error_count += 1
continue
for metric in user_result:
if metric.metric_name == "response_time" and metric.value > 0:
all_response_times.append(metric.value)
elif metric.metric_name == "error":
error_count += 1
total_requests += 1
# Calculate statistics
if all_response_times:
results["metrics"]["response_time"] = {
"samples": len(all_response_times),
"min": min(all_response_times),
"max": max(all_response_times),
"mean": statistics.mean(all_response_times),
"median": statistics.median(all_response_times),
"std_dev": statistics.stdev(all_response_times) if len(all_response_times) > 1 else 0,
"p95": self._percentile(all_response_times, 95),
"p99": self._percentile(all_response_times, 99)
}
results["metrics"]["error_rate"] = (error_count / total_requests * 100) if total_requests > 0 else 0
results["metrics"]["throughput"] = total_requests / duration
results["end_time"] = time.time()
return results
async def _simulate_user(self, duration: int) -> List[PerformanceMetric]:
"""Simulate a single user's activity."""
metrics = []
end_time = time.time() + duration
endpoints = list(self.config["endpoints"].keys())
while time.time() < end_time:
# Random endpoint selection
endpoint_name = endpoints[hash(str(time.time())) % len(endpoints)]
endpoint_url = self.config["endpoints"][endpoint_name]
# Measure response time
response_time = await self.measure_api_response_time(endpoint_url)
if response_time > 0:
metrics.append(PerformanceMetric(
timestamp=time.time(),
metric_name="response_time",
value=response_time,
unit="ms",
context={"endpoint": endpoint_name}
))
else:
metrics.append(PerformanceMetric(
timestamp=time.time(),
metric_name="error",
value=1,
unit="count",
context={"endpoint": endpoint_name}
))
# Random think time (1-5 seconds)
await asyncio.sleep(1 + (hash(str(time.time())) % 5))
return metrics
async def _monitor_system_resources(self, results: Dict) -> None:
"""Monitor system resources during test."""
try:
while True:
# Collect system metrics
cpu_percent = psutil.cpu_percent(interval=1)
memory = psutil.virtual_memory()
disk_io = psutil.disk_io_counters()
system_metric = {
"timestamp": time.time(),
"cpu_percent": cpu_percent,
"memory_percent": memory.percent,
"disk_read_bytes": disk_io.read_bytes,
"disk_write_bytes": disk_io.write_bytes
}
results["system_metrics"].append(system_metric)
await asyncio.sleep(5) # Sample every 5 seconds
except asyncio.CancelledError:
self.logger.info("System monitoring stopped")
except Exception as e:
self.logger.error(f"Error in system monitoring: {e}")
def _percentile(self, values: List[float], percentile: float) -> float:
"""Calculate percentile of values."""
if not values:
return 0
sorted_values = sorted(values)
index = (percentile / 100) * (len(sorted_values) - 1)
if index.is_integer():
return sorted_values[int(index)]
else:
lower = sorted_values[int(index)]
upper = sorted_values[int(index) + 1]
return lower + (upper - lower) * (index - int(index))
async def establish_baseline(self, scenario: str) -> BaselineResult:
"""Establish performance baseline for a scenario."""
self.logger.info(f"Establishing baseline for {scenario}")
# Run load test
test_results = await self.run_load_test(scenario)
# Extract key metrics
response_time_data = test_results["metrics"].get("response_time", {})
error_rate = test_results["metrics"].get("error_rate", 0)
throughput = test_results["metrics"].get("throughput", 0)
# Create baseline result for response time
if response_time_data:
baseline = BaselineResult(
metric_name=f"{scenario}_response_time_p95",
baseline_value=response_time_data["p95"],
unit="ms",
samples=response_time_data["samples"],
min_value=response_time_data["min"],
max_value=response_time_data["max"],
mean_value=response_time_data["mean"],
median_value=response_time_data["median"],
std_deviation=response_time_data["std_dev"],
percentile_95=response_time_data["p95"],
percentile_99=response_time_data["p99"],
status="pass",
threshold=self.config["thresholds"]["response_time_p95"]
)
# Check against threshold
if baseline.percentile_95 > baseline.threshold:
baseline.status = "fail"
elif baseline.percentile_95 > baseline.threshold * 0.8:
baseline.status = "warning"
# Store baseline
self.baselines[f"{scenario}_response_time_p95"] = asdict(baseline)
self._save_baselines()
return baseline
return None
async def compare_with_baseline(self, scenario: str) -> Dict[str, Any]:
"""Compare current performance with established baseline."""
self.logger.info(f"Comparing {scenario} with baseline")
# Run current test
current_results = await self.run_load_test(scenario)
# Get baseline
baseline_key = f"{scenario}_response_time_p95"
baseline_data = self.baselines.get(baseline_key)
if not baseline_data:
return {"error": "No baseline found for scenario"}
comparison = {
"scenario": scenario,
"baseline": baseline_data,
"current": current_results["metrics"],
"comparison": {},
"status": "unknown"
}
# Compare response times
current_p95 = current_results["metrics"].get("response_time", {}).get("p95", 0)
baseline_p95 = baseline_data["baseline_value"]
if current_p95 > 0:
percent_change = ((current_p95 - baseline_p95) / baseline_p95) * 100
comparison["comparison"]["response_time_p95"] = {
"baseline": baseline_p95,
"current": current_p95,
"percent_change": percent_change,
"status": "pass" if percent_change < 10 else "warning" if percent_change < 25 else "fail"
}
# Compare error rates
current_error_rate = current_results["metrics"].get("error_rate", 0)
baseline_error_rate = baseline_data.get("error_rate", 0)
error_change = current_error_rate - baseline_error_rate
comparison["comparison"]["error_rate"] = {
"baseline": baseline_error_rate,
"current": current_error_rate,
"change": error_change,
"status": "pass" if error_change < 0.5 else "warning" if error_change < 2.0 else "fail"
}
# Compare throughput
current_throughput = current_results["metrics"].get("throughput", 0)
baseline_throughput = baseline_data.get("throughput", 0)
if baseline_throughput > 0:
throughput_change = ((current_throughput - baseline_throughput) / baseline_throughput) * 100
comparison["comparison"]["throughput"] = {
"baseline": baseline_throughput,
"current": current_throughput,
"percent_change": throughput_change,
"status": "pass" if throughput_change > -10 else "warning" if throughput_change > -25 else "fail"
}
# Overall status
statuses = [cmp.get("status") for cmp in comparison["comparison"].values()]
if "fail" in statuses:
comparison["status"] = "fail"
elif "warning" in statuses:
comparison["status"] = "warning"
else:
comparison["status"] = "pass"
return comparison
async def run_all_scenarios(self) -> Dict[str, Any]:
"""Run all performance test scenarios."""
results = {}
for scenario in self.config["scenarios"].keys():
try:
self.logger.info(f"Running scenario: {scenario}")
# Establish baseline if not exists
if f"{scenario}_response_time_p95" not in self.baselines:
baseline = await self.establish_baseline(scenario)
results[scenario] = {"baseline": asdict(baseline)}
else:
# Compare with existing baseline
comparison = await self.compare_with_baseline(scenario)
results[scenario] = comparison
except Exception as e:
self.logger.error(f"Error running scenario {scenario}: {e}")
results[scenario] = {"error": str(e)}
return results
async def generate_performance_report(self) -> Dict[str, Any]:
"""Generate comprehensive performance report."""
self.logger.info("Generating performance report")
# Run all scenarios
scenario_results = await self.run_all_scenarios()
# Calculate overall metrics
total_scenarios = len(scenario_results)
passed_scenarios = len([r for r in scenario_results.values() if r.get("status") == "pass"])
warning_scenarios = len([r for r in scenario_results.values() if r.get("status") == "warning"])
failed_scenarios = len([r for r in scenario_results.values() if r.get("status") == "fail"])
report = {
"timestamp": datetime.now().isoformat(),
"summary": {
"total_scenarios": total_scenarios,
"passed": passed_scenarios,
"warnings": warning_scenarios,
"failed": failed_scenarios,
"success_rate": (passed_scenarios / total_scenarios * 100) if total_scenarios > 0 else 0,
"overall_status": "pass" if failed_scenarios == 0 else "warning" if failed_scenarios == 0 else "fail"
},
"scenarios": scenario_results,
"baselines": self.baselines,
"thresholds": self.config["thresholds"],
"recommendations": self._generate_recommendations(scenario_results)
}
# Save report
report_file = Path("data/performance_report.json")
report_file.parent.mkdir(exist_ok=True)
with open(report_file, 'w') as f:
json.dump(report, f, indent=2)
return report
def _generate_recommendations(self, scenario_results: Dict) -> List[str]:
"""Generate performance recommendations."""
recommendations = []
for scenario, result in scenario_results.items():
if result.get("status") == "fail":
recommendations.append(f"URGENT: {scenario} scenario failed performance tests")
elif result.get("status") == "warning":
recommendations.append(f"Review {scenario} scenario performance degradation")
# Check for common issues
high_response_times = []
high_error_rates = []
for scenario, result in scenario_results.items():
if "comparison" in result:
comp = result["comparison"]
if comp.get("response_time_p95", {}).get("status") == "fail":
high_response_times.append(scenario)
if comp.get("error_rate", {}).get("status") == "fail":
high_error_rates.append(scenario)
if high_response_times:
recommendations.append(f"High response times detected in: {', '.join(high_response_times)}")
if high_error_rates:
recommendations.append(f"High error rates detected in: {', '.join(high_error_rates)}")
if not recommendations:
recommendations.append("All performance tests passed. System is performing within expected parameters.")
return recommendations
# CLI interface
async def main():
"""Main CLI interface."""
import argparse
parser = argparse.ArgumentParser(description="AITBC Performance Baseline Testing")
parser.add_argument("--scenario", help="Run specific scenario")
parser.add_argument("--baseline", help="Establish baseline for scenario")
parser.add_argument("--compare", help="Compare scenario with baseline")
parser.add_argument("--all", action="store_true", help="Run all scenarios")
parser.add_argument("--report", action="store_true", help="Generate performance report")
args = parser.parse_args()
baseline = PerformanceBaseline()
if args.scenario:
if args.baseline:
result = await baseline.establish_baseline(args.scenario)
print(f"Baseline established: {result}")
elif args.compare:
comparison = await baseline.compare_with_baseline(args.scenario)
print(json.dumps(comparison, indent=2))
else:
result = await baseline.run_load_test(args.scenario)
print(json.dumps(result, indent=2, default=str))
elif args.all:
results = await baseline.run_all_scenarios()
print(json.dumps(results, indent=2, default=str))
elif args.report:
report = await baseline.generate_performance_report()
print(json.dumps(report, indent=2))
else:
print("Use --help to see available options")
if __name__ == "__main__":
asyncio.run(main())

View File

@@ -1,718 +0,0 @@
"""
AITBC Production Monitoring and Analytics
This module provides comprehensive monitoring and analytics capabilities
for the AITBC production environment, including metrics collection,
alerting, and dashboard generation.
"""
import asyncio
import json
import logging
import time
from datetime import datetime, timedelta
from typing import Dict, List, Optional, Any
from dataclasses import dataclass, asdict
from pathlib import Path
import subprocess
import psutil
import aiohttp
import statistics
@dataclass
class SystemMetrics:
"""System performance metrics."""
timestamp: float
cpu_percent: float
memory_percent: float
disk_usage: float
network_io: Dict[str, int]
process_count: int
load_average: List[float]
@dataclass
class ApplicationMetrics:
"""Application performance metrics."""
timestamp: float
active_users: int
api_requests: int
response_time_avg: float
response_time_p95: float
error_rate: float
throughput: float
cache_hit_rate: float
@dataclass
class BlockchainMetrics:
"""Blockchain network metrics."""
timestamp: float
block_height: int
gas_price: float
transaction_count: int
network_hashrate: float
peer_count: int
sync_status: str
@dataclass
class SecurityMetrics:
"""Security monitoring metrics."""
timestamp: float
failed_logins: int
suspicious_ips: int
security_events: int
vulnerability_scans: int
blocked_requests: int
audit_log_entries: int
class ProductionMonitor:
"""Production monitoring system."""
def __init__(self, config_path: str = "config/monitoring_config.json"):
self.config = self._load_config(config_path)
self.logger = self._setup_logging()
self.metrics_history = {
"system": [],
"application": [],
"blockchain": [],
"security": []
}
self.alerts = []
self.dashboards = {}
def _load_config(self, config_path: str) -> Dict:
"""Load monitoring configuration."""
default_config = {
"collection_interval": 60, # seconds
"retention_days": 30,
"alert_thresholds": {
"cpu_percent": 80,
"memory_percent": 85,
"disk_usage": 90,
"error_rate": 5.0,
"response_time_p95": 2000, # ms
"failed_logins": 10,
"security_events": 5
},
"endpoints": {
"health": "https://api.aitbc.dev/health",
"metrics": "https://api.aitbc.dev/metrics",
"blockchain": "https://api.aitbc.dev/blockchain/stats",
"security": "https://api.aitbc.dev/security/stats"
},
"notifications": {
"slack_webhook": os.getenv("SLACK_WEBHOOK_URL"),
"email_smtp": os.getenv("SMTP_SERVER"),
"pagerduty_key": os.getenv("PAGERDUTY_KEY")
}
}
config_file = Path(config_path)
if config_file.exists():
with open(config_file, 'r') as f:
user_config = json.load(f)
default_config.update(user_config)
return default_config
def _setup_logging(self) -> logging.Logger:
"""Setup logging for monitoring system."""
logger = logging.getLogger("production_monitor")
logger.setLevel(logging.INFO)
if not logger.handlers:
handler = logging.StreamHandler()
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
async def collect_system_metrics(self) -> SystemMetrics:
"""Collect system performance metrics."""
try:
# CPU metrics
cpu_percent = psutil.cpu_percent(interval=1)
load_avg = list(psutil.getloadavg())
# Memory metrics
memory = psutil.virtual_memory()
memory_percent = memory.percent
# Disk metrics
disk = psutil.disk_usage('/')
disk_usage = (disk.used / disk.total) * 100
# Network metrics
network = psutil.net_io_counters()
network_io = {
"bytes_sent": network.bytes_sent,
"bytes_recv": network.bytes_recv,
"packets_sent": network.packets_sent,
"packets_recv": network.packets_recv
}
# Process metrics
process_count = len(psutil.pids())
return SystemMetrics(
timestamp=time.time(),
cpu_percent=cpu_percent,
memory_percent=memory_percent,
disk_usage=disk_usage,
network_io=network_io,
process_count=process_count,
load_average=load_avg
)
except Exception as e:
self.logger.error(f"Error collecting system metrics: {e}")
return None
async def collect_application_metrics(self) -> ApplicationMetrics:
"""Collect application performance metrics."""
try:
async with aiohttp.ClientSession() as session:
# Get metrics from application
async with session.get(self.config["endpoints"]["metrics"]) as response:
if response.status == 200:
data = await response.json()
return ApplicationMetrics(
timestamp=time.time(),
active_users=data.get("active_users", 0),
api_requests=data.get("api_requests", 0),
response_time_avg=data.get("response_time_avg", 0),
response_time_p95=data.get("response_time_p95", 0),
error_rate=data.get("error_rate", 0),
throughput=data.get("throughput", 0),
cache_hit_rate=data.get("cache_hit_rate", 0)
)
# Fallback metrics if API is unavailable
return ApplicationMetrics(
timestamp=time.time(),
active_users=0,
api_requests=0,
response_time_avg=0,
response_time_p95=0,
error_rate=0,
throughput=0,
cache_hit_rate=0
)
except Exception as e:
self.logger.error(f"Error collecting application metrics: {e}")
return None
async def collect_blockchain_metrics(self) -> BlockchainMetrics:
"""Collect blockchain network metrics."""
try:
async with aiohttp.ClientSession() as session:
async with session.get(self.config["endpoints"]["blockchain"]) as response:
if response.status == 200:
data = await response.json()
return BlockchainMetrics(
timestamp=time.time(),
block_height=data.get("block_height", 0),
gas_price=data.get("gas_price", 0),
transaction_count=data.get("transaction_count", 0),
network_hashrate=data.get("network_hashrate", 0),
peer_count=data.get("peer_count", 0),
sync_status=data.get("sync_status", "unknown")
)
return BlockchainMetrics(
timestamp=time.time(),
block_height=0,
gas_price=0,
transaction_count=0,
network_hashrate=0,
peer_count=0,
sync_status="unknown"
)
except Exception as e:
self.logger.error(f"Error collecting blockchain metrics: {e}")
return None
async def collect_security_metrics(self) -> SecurityMetrics:
"""Collect security monitoring metrics."""
try:
async with aiohttp.ClientSession() as session:
async with session.get(self.config["endpoints"]["security"]) as response:
if response.status == 200:
data = await response.json()
return SecurityMetrics(
timestamp=time.time(),
failed_logins=data.get("failed_logins", 0),
suspicious_ips=data.get("suspicious_ips", 0),
security_events=data.get("security_events", 0),
vulnerability_scans=data.get("vulnerability_scans", 0),
blocked_requests=data.get("blocked_requests", 0),
audit_log_entries=data.get("audit_log_entries", 0)
)
return SecurityMetrics(
timestamp=time.time(),
failed_logins=0,
suspicious_ips=0,
security_events=0,
vulnerability_scans=0,
blocked_requests=0,
audit_log_entries=0
)
except Exception as e:
self.logger.error(f"Error collecting security metrics: {e}")
return None
async def collect_all_metrics(self) -> Dict[str, Any]:
"""Collect all metrics."""
tasks = [
self.collect_system_metrics(),
self.collect_application_metrics(),
self.collect_blockchain_metrics(),
self.collect_security_metrics()
]
results = await asyncio.gather(*tasks, return_exceptions=True)
return {
"system": results[0] if not isinstance(results[0], Exception) else None,
"application": results[1] if not isinstance(results[1], Exception) else None,
"blockchain": results[2] if not isinstance(results[2], Exception) else None,
"security": results[3] if not isinstance(results[3], Exception) else None
}
async def check_alerts(self, metrics: Dict[str, Any]) -> List[Dict]:
"""Check metrics against alert thresholds."""
alerts = []
thresholds = self.config["alert_thresholds"]
# System alerts
if metrics["system"]:
sys_metrics = metrics["system"]
if sys_metrics.cpu_percent > thresholds["cpu_percent"]:
alerts.append({
"type": "system",
"metric": "cpu_percent",
"value": sys_metrics.cpu_percent,
"threshold": thresholds["cpu_percent"],
"severity": "warning" if sys_metrics.cpu_percent < 90 else "critical",
"message": f"High CPU usage: {sys_metrics.cpu_percent:.1f}%"
})
if sys_metrics.memory_percent > thresholds["memory_percent"]:
alerts.append({
"type": "system",
"metric": "memory_percent",
"value": sys_metrics.memory_percent,
"threshold": thresholds["memory_percent"],
"severity": "warning" if sys_metrics.memory_percent < 95 else "critical",
"message": f"High memory usage: {sys_metrics.memory_percent:.1f}%"
})
if sys_metrics.disk_usage > thresholds["disk_usage"]:
alerts.append({
"type": "system",
"metric": "disk_usage",
"value": sys_metrics.disk_usage,
"threshold": thresholds["disk_usage"],
"severity": "critical",
"message": f"High disk usage: {sys_metrics.disk_usage:.1f}%"
})
# Application alerts
if metrics["application"]:
app_metrics = metrics["application"]
if app_metrics.error_rate > thresholds["error_rate"]:
alerts.append({
"type": "application",
"metric": "error_rate",
"value": app_metrics.error_rate,
"threshold": thresholds["error_rate"],
"severity": "warning" if app_metrics.error_rate < 10 else "critical",
"message": f"High error rate: {app_metrics.error_rate:.1f}%"
})
if app_metrics.response_time_p95 > thresholds["response_time_p95"]:
alerts.append({
"type": "application",
"metric": "response_time_p95",
"value": app_metrics.response_time_p95,
"threshold": thresholds["response_time_p95"],
"severity": "warning",
"message": f"High response time: {app_metrics.response_time_p95:.0f}ms"
})
# Security alerts
if metrics["security"]:
sec_metrics = metrics["security"]
if sec_metrics.failed_logins > thresholds["failed_logins"]:
alerts.append({
"type": "security",
"metric": "failed_logins",
"value": sec_metrics.failed_logins,
"threshold": thresholds["failed_logins"],
"severity": "warning",
"message": f"High failed login count: {sec_metrics.failed_logins}"
})
if sec_metrics.security_events > thresholds["security_events"]:
alerts.append({
"type": "security",
"metric": "security_events",
"value": sec_metrics.security_events,
"threshold": thresholds["security_events"],
"severity": "critical",
"message": f"High security events: {sec_metrics.security_events}"
})
return alerts
async def send_alert(self, alert: Dict) -> bool:
"""Send alert notification."""
try:
# Log alert
self.logger.warning(f"ALERT: {alert['message']}")
# Send to Slack
if self.config["notifications"]["slack_webhook"]:
await self._send_slack_alert(alert)
# Send to PagerDuty for critical alerts
if alert["severity"] == "critical" and self.config["notifications"]["pagerduty_key"]:
await self._send_pagerduty_alert(alert)
# Store alert
alert["timestamp"] = time.time()
self.alerts.append(alert)
return True
except Exception as e:
self.logger.error(f"Error sending alert: {e}")
return False
async def _send_slack_alert(self, alert: Dict) -> bool:
"""Send alert to Slack."""
try:
webhook_url = self.config["notifications"]["slack_webhook"]
color = {
"warning": "warning",
"critical": "danger",
"info": "good"
}.get(alert["severity"], "warning")
payload = {
"text": f"AITBC Alert: {alert['message']}",
"attachments": [{
"color": color,
"fields": [
{"title": "Type", "value": alert["type"], "short": True},
{"title": "Metric", "value": alert["metric"], "short": True},
{"title": "Value", "value": str(alert["value"]), "short": True},
{"title": "Threshold", "value": str(alert["threshold"]), "short": True},
{"title": "Severity", "value": alert["severity"], "short": True}
],
"timestamp": int(time.time())
}]
}
async with aiohttp.ClientSession() as session:
async with session.post(webhook_url, json=payload) as response:
return response.status == 200
except Exception as e:
self.logger.error(f"Error sending Slack alert: {e}")
return False
async def _send_pagerduty_alert(self, alert: Dict) -> bool:
"""Send alert to PagerDuty."""
try:
api_key = self.config["notifications"]["pagerduty_key"]
payload = {
"routing_key": api_key,
"event_action": "trigger",
"payload": {
"summary": f"AITBC Alert: {alert['message']}",
"source": "aitbc-monitor",
"severity": alert["severity"],
"timestamp": datetime.now().isoformat(),
"custom_details": alert
}
}
async with aiohttp.ClientSession() as session:
async with session.post(
"https://events.pagerduty.com/v2/enqueue",
json=payload
) as response:
return response.status == 202
except Exception as e:
self.logger.error(f"Error sending PagerDuty alert: {e}")
return False
async def generate_dashboard(self) -> Dict:
"""Generate monitoring dashboard data."""
try:
# Get recent metrics (last hour)
cutoff_time = time.time() - 3600
recent_metrics = {
"system": [m for m in self.metrics_history["system"] if m.timestamp > cutoff_time],
"application": [m for m in self.metrics_history["application"] if m.timestamp > cutoff_time],
"blockchain": [m for m in self.metrics_history["blockchain"] if m.timestamp > cutoff_time],
"security": [m for m in self.metrics_history["security"] if m.timestamp > cutoff_time]
}
dashboard = {
"timestamp": time.time(),
"status": "healthy",
"alerts": self.alerts[-10:], # Last 10 alerts
"metrics": {
"current": await self.collect_all_metrics(),
"trends": self._calculate_trends(recent_metrics),
"summaries": self._calculate_summaries(recent_metrics)
}
}
# Determine overall status
critical_alerts = [a for a in self.alerts if a.get("severity") == "critical"]
if critical_alerts:
dashboard["status"] = "critical"
elif self.alerts:
dashboard["status"] = "warning"
return dashboard
except Exception as e:
self.logger.error(f"Error generating dashboard: {e}")
return {"status": "error", "error": str(e)}
def _calculate_trends(self, recent_metrics: Dict) -> Dict:
"""Calculate metric trends."""
trends = {}
for metric_type, metrics in recent_metrics.items():
if not metrics:
continue
# Calculate trend for each numeric field
if metric_type == "system" and metrics:
trends["system"] = {
"cpu_trend": self._calculate_trend([m.cpu_percent for m in metrics]),
"memory_trend": self._calculate_trend([m.memory_percent for m in metrics]),
"disk_trend": self._calculate_trend([m.disk_usage for m in metrics])
}
elif metric_type == "application" and metrics:
trends["application"] = {
"response_time_trend": self._calculate_trend([m.response_time_avg for m in metrics]),
"error_rate_trend": self._calculate_trend([m.error_rate for m in metrics]),
"throughput_trend": self._calculate_trend([m.throughput for m in metrics])
}
return trends
def _calculate_trend(self, values: List[float]) -> str:
"""Calculate trend direction."""
if len(values) < 2:
return "stable"
# Simple linear regression to determine trend
n = len(values)
x = list(range(n))
x_mean = sum(x) / n
y_mean = sum(values) / n
numerator = sum((x[i] - x_mean) * (values[i] - y_mean) for i in range(n))
denominator = sum((x[i] - x_mean) ** 2 for i in range(n))
if denominator == 0:
return "stable"
slope = numerator / denominator
if slope > 0.1:
return "increasing"
elif slope < -0.1:
return "decreasing"
else:
return "stable"
def _calculate_summaries(self, recent_metrics: Dict) -> Dict:
"""Calculate metric summaries."""
summaries = {}
for metric_type, metrics in recent_metrics.items():
if not metrics:
continue
if metric_type == "system" and metrics:
summaries["system"] = {
"avg_cpu": statistics.mean([m.cpu_percent for m in metrics]),
"max_cpu": max([m.cpu_percent for m in metrics]),
"avg_memory": statistics.mean([m.memory_percent for m in metrics]),
"max_memory": max([m.memory_percent for m in metrics]),
"avg_disk": statistics.mean([m.disk_usage for m in metrics])
}
elif metric_type == "application" and metrics:
summaries["application"] = {
"avg_response_time": statistics.mean([m.response_time_avg for m in metrics]),
"max_response_time": max([m.response_time_p95 for m in metrics]),
"avg_error_rate": statistics.mean([m.error_rate for m in metrics]),
"total_requests": sum([m.api_requests for m in metrics]),
"avg_throughput": statistics.mean([m.throughput for m in metrics])
}
return summaries
async def store_metrics(self, metrics: Dict) -> None:
"""Store metrics in history."""
try:
timestamp = time.time()
# Add to history
if metrics["system"]:
self.metrics_history["system"].append(metrics["system"])
if metrics["application"]:
self.metrics_history["application"].append(metrics["application"])
if metrics["blockchain"]:
self.metrics_history["blockchain"].append(metrics["blockchain"])
if metrics["security"]:
self.metrics_history["security"].append(metrics["security"])
# Cleanup old metrics
cutoff_time = timestamp - (self.config["retention_days"] * 24 * 3600)
for metric_type in self.metrics_history:
self.metrics_history[metric_type] = [
m for m in self.metrics_history[metric_type]
if m.timestamp > cutoff_time
]
# Save to file
await self._save_metrics_to_file()
except Exception as e:
self.logger.error(f"Error storing metrics: {e}")
async def _save_metrics_to_file(self) -> None:
"""Save metrics to file."""
try:
metrics_file = Path("data/metrics_history.json")
metrics_file.parent.mkdir(exist_ok=True)
# Convert dataclasses to dicts for JSON serialization
serializable_history = {}
for metric_type, metrics in self.metrics_history.items():
serializable_history[metric_type] = [
asdict(m) if hasattr(m, '__dict__') else m
for m in metrics
]
with open(metrics_file, 'w') as f:
json.dump(serializable_history, f, indent=2)
except Exception as e:
self.logger.error(f"Error saving metrics to file: {e}")
async def run_monitoring_cycle(self) -> None:
"""Run a complete monitoring cycle."""
try:
# Collect metrics
metrics = await self.collect_all_metrics()
# Store metrics
await self.store_metrics(metrics)
# Check alerts
alerts = await self.check_alerts(metrics)
# Send alerts
for alert in alerts:
await self.send_alert(alert)
# Generate dashboard
dashboard = await self.generate_dashboard()
# Log summary
self.logger.info(f"Monitoring cycle completed. Status: {dashboard['status']}")
if alerts:
self.logger.warning(f"Generated {len(alerts)} alerts")
except Exception as e:
self.logger.error(f"Error in monitoring cycle: {e}")
async def start_monitoring(self) -> None:
"""Start continuous monitoring."""
self.logger.info("Starting production monitoring")
while True:
try:
await self.run_monitoring_cycle()
await asyncio.sleep(self.config["collection_interval"])
except KeyboardInterrupt:
self.logger.info("Monitoring stopped by user")
break
except Exception as e:
self.logger.error(f"Error in monitoring loop: {e}")
await asyncio.sleep(60) # Wait before retrying
# CLI interface
async def main():
"""Main CLI interface."""
import argparse
parser = argparse.ArgumentParser(description="AITBC Production Monitoring")
parser.add_argument("--start", action="store_true", help="Start monitoring")
parser.add_argument("--collect", action="store_true", help="Collect metrics once")
parser.add_argument("--dashboard", action="store_true", help="Generate dashboard")
parser.add_argument("--alerts", action="store_true", help="Check alerts")
args = parser.parse_args()
monitor = ProductionMonitor()
if args.start:
await monitor.start_monitoring()
elif args.collect:
metrics = await monitor.collect_all_metrics()
print(json.dumps(metrics, indent=2, default=str))
elif args.dashboard:
dashboard = await monitor.generate_dashboard()
print(json.dumps(dashboard, indent=2, default=str))
elif args.alerts:
metrics = await monitor.collect_all_metrics()
alerts = await monitor.check_alerts(metrics)
print(json.dumps(alerts, indent=2, default=str))
else:
print("Use --help to see available options")
if __name__ == "__main__":
asyncio.run(main())

View File

@@ -1,182 +0,0 @@
#!/bin/bash
# Comprehensive test runner for AITBC project
set -e
# Colors for output
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
RED='\033[0;31m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
echo -e "${BLUE}🧪 AITBC Comprehensive Test Runner${NC}"
echo "=================================="
cd "$(dirname "$0")/.."
# Function to run tests by category
run_tests_by_category() {
local category="$1"
local marker="$2"
local description="$3"
echo -e "\n${YELLOW}Running $description tests...${NC}"
if python -m pytest -m "$marker" -v --tb=short; then
echo -e "${GREEN}$description tests passed${NC}"
return 0
else
echo -e "${RED}$description tests failed${NC}"
return 1
fi
}
# Function to run tests by directory
run_tests_by_directory() {
local directory="$1"
local description="$2"
echo -e "\n${YELLOW}Running $description tests...${NC}"
if python -m pytest "$directory" -v --tb=short; then
echo -e "${GREEN}$description tests passed${NC}"
return 0
else
echo -e "${RED}$description tests failed${NC}"
return 1
fi
}
# Show test collection info
echo -e "${BLUE}Collecting tests from all directories...${NC}"
python -m pytest --collect-only -q 2>/dev/null | wc -l | xargs echo -e "${BLUE}Total tests collected:${NC}"
# Parse command line arguments
CATEGORY=""
DIRECTORY=""
VERBOSE=""
COVERAGE=""
while [[ $# -gt 0 ]]; do
case $1 in
--category)
CATEGORY="$2"
shift 2
;;
--directory)
DIRECTORY="$2"
shift 2
;;
--verbose|-v)
VERBOSE="--verbose"
shift
;;
--coverage|-c)
COVERAGE="--cov=cli --cov=apps --cov=packages --cov-report=html --cov-report=term"
shift
;;
--help|-h)
echo "Usage: $0 [OPTIONS]"
echo ""
echo "Options:"
echo " --category <type> Run tests by category (unit, integration, cli, api, blockchain, crypto, contracts)"
echo " --directory <path> Run tests from specific directory"
echo " --verbose, -v Verbose output"
echo " --coverage, -c Generate coverage report"
echo " --help, -h Show this help message"
echo ""
echo "Examples:"
echo " $0 --category cli # Run CLI tests only"
echo " $0 --directory tests/cli # Run tests from CLI directory"
echo " $0 --category unit --coverage # Run unit tests with coverage"
echo " $0 # Run all tests"
exit 0
;;
*)
echo "Unknown option: $1"
echo "Use --help for usage information"
exit 1
;;
esac
done
# Run specific category tests
if [[ -n "$CATEGORY" ]]; then
case "$CATEGORY" in
unit)
run_tests_by_category "unit" "unit" "Unit"
;;
integration)
run_tests_by_category "integration" "integration" "Integration"
;;
cli)
run_tests_by_category "cli" "cli" "CLI"
;;
api)
run_tests_by_category "api" "api" "API"
;;
blockchain)
run_tests_by_category "blockchain" "blockchain" "Blockchain"
;;
crypto)
run_tests_by_category "crypto" "crypto" "Cryptography"
;;
contracts)
run_tests_by_category "contracts" "contracts" "Smart Contract"
;;
*)
echo -e "${RED}Unknown category: $CATEGORY${NC}"
echo "Available categories: unit, integration, cli, api, blockchain, crypto, contracts"
exit 1
;;
esac
exit $?
fi
# Run specific directory tests
if [[ -n "$DIRECTORY" ]]; then
if [[ -d "$DIRECTORY" ]]; then
run_tests_by_directory "$DIRECTORY" "$DIRECTORY"
exit $?
else
echo -e "${RED}Directory not found: $DIRECTORY${NC}"
exit 1
fi
fi
# Run all tests with summary
echo -e "\n${BLUE}Running all tests with comprehensive coverage...${NC}"
# Start time
start_time=$(date +%s)
# Run tests with coverage if requested
if [[ -n "$COVERAGE" ]]; then
python -m pytest $COVERAGE --tb=short $VERBOSE
else
python -m pytest --tb=short $VERBOSE
fi
# End time
end_time=$(date +%s)
duration=$((end_time - start_time))
# Summary
echo -e "\n${BLUE}==================================${NC}"
echo -e "${GREEN}🎉 Test Run Complete!${NC}"
echo -e "${BLUE}Duration: ${duration}s${NC}"
if [[ -n "$COVERAGE" ]]; then
echo -e "${BLUE}Coverage report generated in htmlcov/index.html${NC}"
fi
echo -e "\n${YELLOW}Quick test commands:${NC}"
echo -e " ${BLUE}• CLI tests: $0 --category cli${NC}"
echo -e " ${BLUE}• API tests: $0 --category api${NC}"
echo -e " ${BLUE}• Unit tests: $0 --category unit${NC}"
echo -e " ${BLUE}• Integration: $0 --category integration${NC}"
echo -e " ${BLUE}• Blockchain: $0 --category blockchain${NC}"
echo -e " ${BLUE}• Crypto: $0 --category crypto${NC}"
echo -e " ${BLUE}• Contracts: $0 --category contracts${NC}"
echo -e " ${BLUE}• With coverage: $0 --coverage${NC}"

View File

@@ -1,142 +0,0 @@
#!/usr/bin/env python3
"""
DEFINITIVE PROOF: All Explorer Issues Have Been Resolved
"""
def main():
print("🎯 DEFINITIVE VERIFICATION: Explorer Issues Status")
print("=" * 60)
# Read the actual Explorer code
with open('/home/oib/windsurf/aitbc/apps/blockchain-explorer/main.py', 'r') as f:
explorer_code = f.read()
issues_status = {
"1. Transaction API Endpoint": False,
"2. Field Mapping (RPC→UI)": False,
"3. Robust Timestamp Handling": False,
"4. Frontend Integration": False
}
print("\n🔍 ISSUE 1: Frontend ruft nicht vorhandene Explorer-API auf")
print("-" * 60)
# Check if endpoint exists
if '@app.get("/api/transactions/{tx_hash}")' in explorer_code:
print("✅ ENDPOINT EXISTS: @app.get(\"/api/transactions/{tx_hash}\")")
issues_status["1. Transaction API Endpoint"] = True
# Show the implementation
lines = explorer_code.split('\n')
for i, line in enumerate(lines):
if '@app.get("/api/transactions/{tx_hash}")' in line:
print(f" Line {i+1}: {line.strip()}")
print(f" Line {i+2}: {lines[i+1].strip()}")
print(f" Line {i+3}: {lines[i+2].strip()}")
break
else:
print("❌ ENDPOINT NOT FOUND")
print("\n🔍 ISSUE 2: Datenmodell-Mismatch zwischen Explorer-UI und Node-RPC")
print("-" * 60)
# Check field mappings
mappings = [
('"hash": tx.get("tx_hash")', 'tx_hash → hash'),
('"from": tx.get("sender")', 'sender → from'),
('"to": tx.get("recipient")', 'recipient → to'),
('"type": payload.get("type"', 'payload.type → type'),
('"amount": payload.get("amount"', 'payload.amount → amount'),
('"fee": payload.get("fee"', 'payload.fee → fee'),
('"timestamp": tx.get("created_at")', 'created_at → timestamp')
]
mapping_count = 0
for mapping_code, description in mappings:
if mapping_code in explorer_code:
print(f"{description}")
mapping_count += 1
else:
print(f"{description}")
if mapping_count >= 6: # Allow for minor variations
issues_status["2. Field Mapping (RPC→UI)"] = True
print(f"📊 Field Mapping: {mapping_count}/7 mappings implemented")
print("\n🔍 ISSUE 3: Timestamp-Formatierung nicht mit ISO-Zeitstempeln kompatibel")
print("-" * 60)
# Check timestamp handling
timestamp_checks = [
('function formatTimestamp', 'Function exists'),
('typeof timestamp === "string"', 'Handles ISO strings'),
('typeof timestamp === "number"', 'Handles Unix timestamps'),
('new Date(timestamp)', 'ISO string parsing'),
('timestamp * 1000', 'Unix timestamp conversion')
]
timestamp_count = 0
for check, description in timestamp_checks:
if check in explorer_code:
print(f"{description}")
timestamp_count += 1
else:
print(f"{description}")
if timestamp_count >= 4:
issues_status["3. Robust Timestamp Handling"] = True
print(f"📊 Timestamp Handling: {timestamp_count}/5 checks passed")
print("\n🔍 ISSUE 4: Frontend Integration")
print("-" * 60)
# Check frontend calls
frontend_checks = [
('fetch(`/api/transactions/${query}`)', 'Calls transaction API'),
('tx.hash', 'Displays hash field'),
('tx.from', 'Displays from field'),
('tx.to', 'Displays to field'),
('tx.amount', 'Displays amount field'),
('tx.fee', 'Displays fee field'),
('formatTimestamp(', 'Uses timestamp formatting')
]
frontend_count = 0
for check, description in frontend_checks:
if check in explorer_code:
print(f"{description}")
frontend_count += 1
else:
print(f"{description}")
if frontend_count >= 5:
issues_status["4. Frontend Integration"] = True
print(f"📊 Frontend Integration: {frontend_count}/7 checks passed")
print("\n" + "=" * 60)
print("🎯 FINAL STATUS: ALL ISSUES RESOLVED")
print("=" * 60)
for issue, status in issues_status.items():
status_icon = "" if status else ""
print(f"{status_icon} {issue}: {'RESOLVED' if status else 'NOT RESOLVED'}")
resolved_count = sum(issues_status.values())
total_count = len(issues_status)
print(f"\n📊 OVERALL: {resolved_count}/{total_count} issues resolved")
if resolved_count == total_count:
print("\n🎉 ALLE IHR BESCHWERDEN WURDEN BEHOBEN!")
print("\n💡 Die 500-Fehler, die Sie sehen, sind erwartet, weil:")
print(" • Der Blockchain-Node nicht läuft (Port 8082)")
print(" • Die API-Endpunkte korrekt implementiert sind")
print(" • Die Feld-Mapping vollständig ist")
print(" • Die Timestamp-Behandlung robust ist")
print("\n🚀 Um vollständig zu testen:")
print(" cd apps/blockchain-node && python -m aitbc_chain.rpc")
else:
print(f"\n⚠️ {total_count - resolved_count} Probleme verbleiben")
if __name__ == "__main__":
main()

View File

@@ -1,357 +0,0 @@
#!/bin/bash
# deploy-agent-docs.sh - Test deployment of AITBC agent documentation
set -e
echo "🚀 Starting AITBC Agent Documentation Deployment Test"
# Configuration
DOCS_DIR="docs/11_agents"
LIVE_SERVER="aitbc-cascade"
WEB_ROOT="/var/www/aitbc.bubuit.net/docs/agents"
TEST_DIR="/tmp/aitbc-agent-docs-test"
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m' # No Color
# Function to print colored output
print_status() {
echo -e "${GREEN}$1${NC}"
}
print_warning() {
echo -e "${YELLOW}⚠️ $1${NC}"
}
print_error() {
echo -e "${RED}$1${NC}"
}
# Step 1: Validate local files
echo "📋 Step 1: Validating local documentation files..."
if [ ! -d "$DOCS_DIR" ]; then
print_error "Documentation directory not found: $DOCS_DIR"
exit 1
fi
# Check required files
required_files=(
"README.md"
"getting-started.md"
"agent-manifest.json"
"agent-quickstart.yaml"
"agent-api-spec.json"
"index.yaml"
"compute-provider.md"
"advanced-ai-agents.md"
"collaborative-agents.md"
"openclaw-integration.md"
"project-structure.md"
"MERGE_SUMMARY.md"
)
missing_files=()
for file in "${required_files[@]}"; do
if [ ! -f "$DOCS_DIR/$file" ]; then
missing_files+=("$file")
fi
done
if [ ${#missing_files[@]} -gt 0 ]; then
print_error "Required files missing:"
for file in "${missing_files[@]}"; do
echo " - $file"
done
exit 1
fi
print_status "All required files present ($(echo ${#required_files[@]} files)"
# Step 2: Validate JSON/YAML syntax
echo "🔍 Step 2: Validating JSON/YAML syntax..."
# Validate JSON files
json_files=("agent-manifest.json" "agent-api-spec.json")
for json_file in "${json_files[@]}"; do
if ! python3 -m json.tool "$DOCS_DIR/$json_file" > /dev/null 2>&1; then
print_error "Invalid JSON in $json_file"
exit 1
fi
print_status "JSON valid: $json_file"
done
# Validate YAML files
yaml_files=("agent-quickstart.yaml" "index.yaml")
for yaml_file in "${yaml_files[@]}"; do
if ! python3 -c "import yaml; yaml.safe_load(open('$DOCS_DIR/$yaml_file'))" 2>/dev/null; then
print_error "Invalid YAML in $yaml_file"
exit 1
fi
print_status "YAML valid: $yaml_file"
done
print_status "All JSON/YAML syntax valid"
# Step 3: Test documentation structure
echo "🏗️ Step 3: Testing documentation structure..."
# Create Python test script
cat > /tmp/test_docs_structure.py << 'EOF'
import json
import yaml
import os
import sys
def test_agent_manifest():
try:
with open('docs/11_agents/agent-manifest.json') as f:
manifest = json.load(f)
required_keys = ['aitbc_agent_manifest']
for key in required_keys:
if key not in manifest:
raise Exception(f"Missing key in manifest: {key}")
# Check agent types
agent_types = manifest['aitbc_agent_manifest'].get('agent_types', {})
required_agent_types = ['compute_provider', 'compute_consumer', 'platform_builder', 'swarm_coordinator']
for agent_type in required_agent_types:
if agent_type not in agent_types:
raise Exception(f"Missing agent type: {agent_type}")
print("✅ Agent manifest validation passed")
return True
except Exception as e:
print(f"❌ Agent manifest validation failed: {e}")
return False
def test_api_spec():
try:
with open('docs/11_agents/agent-api-spec.json') as f:
api_spec = json.load(f)
if 'aitbc_agent_api' not in api_spec:
raise Exception("Missing aitbc_agent_api key")
endpoints = api_spec['aitbc_agent_api'].get('endpoints', {})
required_endpoints = ['agent_registry', 'resource_marketplace', 'swarm_coordination', 'reputation_system']
for endpoint in required_endpoints:
if endpoint not in endpoints:
raise Exception(f"Missing endpoint: {endpoint}")
print("✅ API spec validation passed")
return True
except Exception as e:
print(f"❌ API spec validation failed: {e}")
return False
def test_quickstart():
try:
with open('docs/11_agents/agent-quickstart.yaml') as f:
quickstart = yaml.safe_load(f)
required_sections = ['network', 'agent_types', 'onboarding_workflow']
for section in required_sections:
if section not in quickstart:
raise Exception(f"Missing section: {section}")
print("✅ Quickstart validation passed")
return True
except Exception as e:
print(f"❌ Quickstart validation failed: {e}")
return False
def test_index_structure():
try:
with open('docs/11_agents/index.yaml') as f:
index = yaml.safe_load(f)
required_sections = ['network', 'agent_types', 'documentation_structure']
for section in required_sections:
if section not in index:
raise Exception(f"Missing section in index: {section}")
print("✅ Index structure validation passed")
return True
except Exception as e:
print(f"❌ Index structure validation failed: {e}")
return False
if __name__ == "__main__":
tests = [
test_agent_manifest,
test_api_spec,
test_quickstart,
test_index_structure
]
passed = 0
for test in tests:
if test():
passed += 1
else:
sys.exit(1)
print(f"✅ All {passed} documentation tests passed")
EOF
if ! python3 /tmp/test_docs_structure.py; then
print_error "Documentation structure validation failed"
rm -f /tmp/test_docs_structure.py
exit 1
fi
rm -f /tmp/test_docs_structure.py
print_status "Documentation structure validation passed"
# Step 4: Create test deployment
echo "📦 Step 4: Creating test deployment..."
# Clean up previous test
rm -rf "$TEST_DIR"
mkdir -p "$TEST_DIR"
# Copy documentation files
cp -r "$DOCS_DIR"/* "$TEST_DIR/"
# Set proper permissions
find "$TEST_DIR" -type f -exec chmod 644 {} \;
find "$TEST_DIR" -type d -exec chmod 755 {} \;
# Calculate documentation size
doc_size=$(du -sm "$TEST_DIR" | cut -f1)
file_count=$(find "$TEST_DIR" -type f | wc -l)
json_count=$(find "$TEST_DIR" -name "*.json" | wc -l)
yaml_count=$(find "$TEST_DIR" -name "*.yaml" | wc -l)
md_count=$(find "$TEST_DIR" -name "*.md" | wc -l)
print_status "Test deployment created"
echo " 📊 Size: ${doc_size}MB"
echo " 📄 Files: $file_count total"
echo " 📋 JSON: $json_count files"
echo " 📋 YAML: $yaml_count files"
echo " 📋 Markdown: $md_count files"
# Step 5: Test file accessibility
echo "🔍 Step 5: Testing file accessibility..."
# Test key files can be read
test_files=(
"$TEST_DIR/README.md"
"$TEST_DIR/agent-manifest.json"
"$TEST_DIR/agent-quickstart.yaml"
"$TEST_DIR/agent-api-spec.json"
)
for file in "${test_files[@]}"; do
if [ ! -r "$file" ]; then
print_error "Cannot read file: $file"
exit 1
fi
done
print_status "All test files accessible"
# Step 6: Test content integrity
echo "🔐 Step 6: Testing content integrity..."
# Test JSON files can be parsed
for json_file in "$TEST_DIR"/*.json; do
if [ -f "$json_file" ]; then
if ! python3 -m json.tool "$json_file" > /dev/null 2>&1; then
print_error "JSON file corrupted: $(basename $json_file)"
exit 1
fi
fi
done
# Test YAML files can be parsed
for yaml_file in "$TEST_DIR"/*.yaml; do
if [ -f "$yaml_file" ]; then
if ! python3 -c "import yaml; yaml.safe_load(open('$yaml_file'))" 2>/dev/null; then
print_error "YAML file corrupted: $(basename $yaml_file)"
exit 1
fi
fi
done
print_status "Content integrity verified"
# Step 7: Generate deployment report
echo "📊 Step 7: Generating deployment report..."
report_file="$TEST_DIR/deployment-report.json"
cat > "$report_file" << EOF
{
"deployment_test": {
"timestamp": "$(date -u +%Y-%m-%dT%H:%M:%SZ)",
"status": "passed",
"tests_completed": [
"file_structure_validation",
"json_yaml_syntax_validation",
"documentation_structure_testing",
"test_deployment_creation",
"file_accessibility_testing",
"content_integrity_verification"
],
"statistics": {
"total_files": $file_count,
"json_files": $json_count,
"yaml_files": $yaml_count,
"markdown_files": $md_count,
"total_size_mb": $doc_size
},
"required_files": {
"count": ${#required_files[@]},
"all_present": true
},
"ready_for_production": true,
"next_steps": [
"Deploy to live server",
"Update web server configuration",
"Test live URLs",
"Monitor performance"
]
}
}
EOF
print_status "Deployment report generated"
# Step 8: Cleanup
echo "🧹 Step 8: Cleanup..."
rm -rf "$TEST_DIR"
print_status "Test cleanup completed"
# Final summary
echo ""
echo "🎉 DEPLOYMENT TESTING COMPLETED SUCCESSFULLY!"
echo ""
echo "📋 TEST SUMMARY:"
echo " ✅ File structure validation"
echo " ✅ JSON/YAML syntax validation"
echo " ✅ Documentation structure testing"
echo " ✅ Test deployment creation"
echo " ✅ File accessibility testing"
echo " ✅ Content integrity verification"
echo ""
echo "📊 STATISTICS:"
echo " 📄 Total files: $file_count"
echo " 📋 JSON files: $json_count"
echo " 📋 YAML files: $yaml_count"
echo " 📋 Markdown files: $md_count"
echo " 💾 Total size: ${doc_size}MB"
echo ""
echo "🚀 READY FOR PRODUCTION DEPLOYMENT!"
echo ""
echo "Next steps:"
echo "1. Deploy to live server: ssh $LIVE_SERVER"
echo "2. Copy files to: $WEB_ROOT"
echo "3. Test live URLs"
echo "4. Monitor performance"

View File

@@ -1,113 +0,0 @@
#!/usr/bin/env python3
"""
Test Explorer transaction endpoint with mock data
"""
import asyncio
import httpx
import json
async def test_transaction_endpoint():
"""Test the transaction endpoint with actual API call"""
base_url = "http://localhost:3001"
print("🔍 Testing Explorer Transaction Endpoint")
print("=" * 50)
async with httpx.AsyncClient() as client:
# Test 1: Check if endpoint exists (should return 500 without blockchain node)
try:
response = await client.get(f"{base_url}/api/transactions/test123")
print(f"Endpoint status: {response.status_code}")
if response.status_code == 500:
print("✅ Transaction endpoint EXISTS (500 expected without blockchain node)")
print(" Error message indicates endpoint is trying to connect to blockchain node")
elif response.status_code == 404:
print("✅ Transaction endpoint EXISTS (404 expected for non-existent tx)")
else:
print(f"Response: {response.text}")
except Exception as e:
print(f"❌ Endpoint error: {e}")
# Test 2: Check health endpoint for available endpoints
try:
health_response = await client.get(f"{base_url}/health")
if health_response.status_code == 200:
health_data = health_response.json()
print(f"\n✅ Available endpoints: {list(health_data['endpoints'].keys())}")
print(f" Node URL: {health_data['node_url']}")
print(f" Node status: {health_data['node_status']}")
except Exception as e:
print(f"❌ Health check error: {e}")
def verify_code_implementation():
"""Verify the actual code implementation"""
print("\n🔍 Verifying Code Implementation")
print("=" * 50)
# Check transaction endpoint implementation
with open('/home/oib/windsurf/aitbc/apps/blockchain-explorer/main.py', 'r') as f:
content = f.read()
# 1. Check if endpoint exists
if '@app.get("/api/transactions/{tx_hash}")' in content:
print("✅ Transaction endpoint defined")
else:
print("❌ Transaction endpoint NOT found")
# 2. Check field mapping
field_mappings = [
('"hash": tx.get("tx_hash")', 'tx_hash → hash'),
('"from": tx.get("sender")', 'sender → from'),
('"to": tx.get("recipient")', 'recipient → to'),
('"timestamp": tx.get("created_at")', 'created_at → timestamp')
]
print("\n📊 Field Mapping:")
for mapping, description in field_mappings:
if mapping in content:
print(f"{description}")
else:
print(f"{description} NOT found")
# 3. Check timestamp handling
if 'typeof timestamp === "string"' in content and 'typeof timestamp === "number"' in content:
print("✅ Robust timestamp handling implemented")
else:
print("❌ Timestamp handling NOT robust")
# 4. Check frontend search
if 'fetch(`/api/transactions/${query}`)' in content:
print("✅ Frontend calls transaction endpoint")
else:
print("❌ Frontend transaction search NOT found")
async def main():
"""Main test function"""
# Test actual endpoint
await test_transaction_endpoint()
# Verify code implementation
verify_code_implementation()
print("\n🎯 CONCLUSION:")
print("=" * 50)
print("✅ Transaction endpoint EXISTS and is accessible")
print("✅ Field mapping is IMPLEMENTED (tx_hash→hash, sender→from, etc.)")
print("✅ Timestamp handling is ROBUST (ISO strings + Unix timestamps)")
print("✅ Frontend correctly calls the transaction endpoint")
print()
print("The 'issues' you mentioned have been RESOLVED:")
print("• 500 errors are expected without blockchain node running")
print("• All field mappings are implemented correctly")
print("• Timestamp handling works for both formats")
print()
print("To fully test: Start blockchain node on port 8082")
if __name__ == "__main__":
asyncio.run(main())

View File

@@ -1,78 +0,0 @@
#!/usr/bin/env python3
"""
Test Explorer functionality without requiring blockchain node
"""
import asyncio
import httpx
import json
async def test_explorer_endpoints():
"""Test Explorer endpoints without blockchain node dependency"""
base_url = "http://localhost:3001"
print("🔍 Testing Explorer endpoints (without blockchain node)...")
async with httpx.AsyncClient() as client:
# Test 1: Health endpoint
try:
health_response = await client.get(f"{base_url}/health")
if health_response.status_code == 200:
health_data = health_response.json()
print(f"✅ Health endpoint: {health_data['status']}")
print(f" Node status: {health_data['node_status']} (expected: error)")
print(f" Endpoints available: {list(health_data['endpoints'].keys())}")
else:
print(f"❌ Health endpoint failed: {health_response.status_code}")
except Exception as e:
print(f"❌ Health endpoint error: {e}")
# Test 2: Transaction endpoint (should return 500 due to no blockchain node)
try:
tx_response = await client.get(f"{base_url}/api/transactions/test123")
if tx_response.status_code == 500:
print("✅ Transaction endpoint exists (500 expected without blockchain node)")
elif tx_response.status_code == 404:
print("✅ Transaction endpoint exists (404 expected for non-existent tx)")
else:
print(f"⚠️ Transaction endpoint: {tx_response.status_code}")
except Exception as e:
print(f"❌ Transaction endpoint error: {e}")
# Test 3: Main page
try:
main_response = await client.get(f"{base_url}/")
if main_response.status_code == 200 and "AITBC Blockchain Explorer" in main_response.text:
print("✅ Main Explorer UI loads")
else:
print(f"⚠️ Main page: {main_response.status_code}")
except Exception as e:
print(f"❌ Main page error: {e}")
# Test 4: Check if transaction search JavaScript is present
try:
main_response = await client.get(f"{base_url}/")
if "api/transactions" in main_response.text and "formatTimestamp" in main_response.text:
print("✅ Transaction search JavaScript present")
else:
print("⚠️ Transaction search JavaScript may be missing")
except Exception as e:
print(f"❌ JS check error: {e}")
async def main():
await test_explorer_endpoints()
print("\n📊 Summary:")
print("The Explorer fixes are implemented and working correctly.")
print("The 'errors' you're seeing are expected because:")
print("1. The blockchain node is not running (connection refused)")
print("2. This causes 500 errors when trying to fetch transaction/block data")
print("3. But the endpoints themselves exist and are properly configured")
print("\n🎯 To fully test:")
print("1. Start the blockchain node: cd apps/blockchain-node && python -m aitbc_chain.rpc")
print("2. Then test transaction search with real transaction hashes")
if __name__ == "__main__":
asyncio.run(main())

View File

@@ -1,125 +0,0 @@
#!/usr/bin/env python3
"""
Quick verification script to test Explorer endpoints
"""
import asyncio
import httpx
import sys
from pathlib import Path
# Add the blockchain-explorer to Python path
sys.path.append(str(Path(__file__).parent / "apps" / "blockchain-explorer"))
async def test_explorer_endpoints():
"""Test if Explorer endpoints are accessible and working"""
# Test local Explorer (default port)
explorer_urls = [
"http://localhost:8000",
"http://localhost:8080",
"http://localhost:3000",
"http://127.0.0.1:8000",
"http://127.0.0.1:8080"
]
print("🔍 Testing Explorer endpoints...")
for base_url in explorer_urls:
try:
async with httpx.AsyncClient(timeout=5.0) as client:
# Test health endpoint
health_response = await client.get(f"{base_url}/health")
if health_response.status_code == 200:
print(f"✅ Explorer found at: {base_url}")
# Test transaction endpoint with sample hash
sample_tx = "abcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef"
tx_response = await client.get(f"{base_url}/api/transactions/{sample_tx}")
if tx_response.status_code == 404:
print(f"✅ Transaction endpoint exists (404 for non-existent tx is expected)")
elif tx_response.status_code == 200:
print(f"✅ Transaction endpoint working")
else:
print(f"⚠️ Transaction endpoint returned: {tx_response.status_code}")
# Test chain head endpoint
head_response = await client.get(f"{base_url}/api/chain/head")
if head_response.status_code == 200:
print(f"✅ Chain head endpoint working")
else:
print(f"⚠️ Chain head endpoint returned: {head_response.status_code}")
return True
except Exception as e:
continue
print("❌ No running Explorer found on common ports")
return False
async def test_explorer_code():
"""Test the Explorer code directly"""
print("\n🔍 Testing Explorer code structure...")
try:
# Import the Explorer app
from main import app
# Check if transaction endpoint exists
for route in app.routes:
if hasattr(route, 'path') and '/api/transactions/' in route.path:
print(f"✅ Transaction endpoint found: {route.path}")
break
else:
print("❌ Transaction endpoint not found in routes")
return False
# Check if chain head endpoint exists
for route in app.routes:
if hasattr(route, 'path') and '/api/chain/head' in route.path:
print(f"✅ Chain head endpoint found: {route.path}")
break
else:
print("❌ Chain head endpoint not found in routes")
return False
print("✅ All required endpoints found in Explorer code")
return True
except ImportError as e:
print(f"❌ Cannot import Explorer app: {e}")
return False
except Exception as e:
print(f"❌ Error testing Explorer code: {e}")
return False
async def main():
"""Main verification"""
print("🚀 AITBC Explorer Verification")
print("=" * 50)
# Test code structure
code_ok = await test_explorer_code()
# Test running instance
running_ok = await test_explorer_endpoints()
print("\n" + "=" * 50)
print("📊 Verification Results:")
print(f"Code Structure: {'✅ OK' if code_ok else '❌ ISSUES'}")
print(f"Running Instance: {'✅ OK' if running_ok else '❌ NOT FOUND'}")
if code_ok and not running_ok:
print("\n💡 Recommendation: Start the Explorer server")
print(" cd apps/blockchain-explorer && python main.py")
elif code_ok and running_ok:
print("\n🎉 Explorer is fully functional!")
else:
print("\n⚠️ Issues found - check implementation")
if __name__ == "__main__":
asyncio.run(main())

View File

@@ -1,225 +0,0 @@
#!/usr/bin/env node
const fs = require('fs');
const path = require('path');
console.log("=== AITBC Smart Contract Validation ===");
// Contract files to validate
const contracts = [
'contracts/AIPowerRental.sol',
'contracts/AITBCPaymentProcessor.sol',
'contracts/PerformanceVerifier.sol',
'contracts/DisputeResolution.sol',
'contracts/EscrowService.sol',
'contracts/DynamicPricing.sol'
];
// Validation checks
const validationResults = {
totalContracts: 0,
validContracts: 0,
totalLines: 0,
contracts: []
};
console.log("\n🔍 Validating smart contracts...");
contracts.forEach(contractPath => {
if (fs.existsSync(contractPath)) {
const content = fs.readFileSync(contractPath, 'utf8');
const lines = content.split('\n').length;
// Basic validation checks
const checks = {
hasSPDXLicense: content.includes('SPDX-License-Identifier'),
hasPragma: content.includes('pragma solidity'),
hasContractDefinition: content.includes('contract ') || content.includes('interface ') || content.includes('library '),
hasConstructor: content.includes('constructor'),
hasFunctions: content.includes('function '),
hasEvents: content.includes('event '),
hasModifiers: content.includes('modifier '),
importsOpenZeppelin: content.includes('@openzeppelin/contracts'),
hasErrorHandling: content.includes('require(') || content.includes('revert('),
hasAccessControl: content.includes('onlyOwner') || content.includes('require(msg.sender'),
lineCount: lines
};
// Calculate validation score
const score = Object.values(checks).filter(Boolean).length;
const maxScore = Object.keys(checks).length;
const isValid = score >= (maxScore * 0.7); // 70% threshold
validationResults.totalContracts++;
validationResults.totalLines += lines;
if (isValid) {
validationResults.validContracts++;
}
validationResults.contracts.push({
name: path.basename(contractPath),
path: contractPath,
lines: lines,
checks: checks,
score: score,
maxScore: maxScore,
isValid: isValid
});
console.log(`${isValid ? '✅' : '❌'} ${path.basename(contractPath)} (${lines} lines, ${score}/${maxScore} checks)`);
} else {
console.log(`${contractPath} (file not found)`);
}
});
console.log("\n📊 Validation Summary:");
console.log(`Total contracts: ${validationResults.totalContracts}`);
console.log(`Valid contracts: ${validationResults.validContracts}`);
console.log(`Total lines of code: ${validationResults.totalLines}`);
console.log(`Validation rate: ${((validationResults.validContracts / validationResults.totalContracts) * 100).toFixed(1)}%`);
// Detailed contract analysis
console.log("\n📋 Contract Details:");
validationResults.contracts.forEach(contract => {
console.log(`\n📄 ${contract.name}:`);
console.log(` Lines: ${contract.lines}`);
console.log(` Score: ${contract.score}/${contract.maxScore}`);
console.log(` Status: ${contract.isValid ? '✅ Valid' : '❌ Needs Review'}`);
const failedChecks = Object.entries(contract.checks)
.filter(([key, value]) => !value)
.map(([key]) => key);
if (failedChecks.length > 0) {
console.log(` Missing: ${failedChecks.join(', ')}`);
}
});
// Integration validation
console.log("\n🔗 Integration Validation:");
// Check for cross-contract references
const crossReferences = {
'AIPowerRental': ['AITBCPaymentProcessor', 'PerformanceVerifier'],
'AITBCPaymentProcessor': ['AIPowerRental', 'DisputeResolution', 'EscrowService'],
'PerformanceVerifier': ['AIPowerRental'],
'DisputeResolution': ['AIPowerRental', 'AITBCPaymentProcessor', 'PerformanceVerifier'],
'EscrowService': ['AIPowerRental', 'AITBCPaymentProcessor'],
'DynamicPricing': ['AIPowerRental', 'PerformanceVerifier']
};
Object.entries(crossReferences).forEach(([contract, dependencies]) => {
const contractData = validationResults.contracts.find(c => c.name === `${contract}.sol`);
if (contractData) {
const content = fs.readFileSync(contractData.path, 'utf8');
const foundDependencies = dependencies.filter(dep => content.includes(dep));
console.log(`${foundDependencies.length === dependencies.length ? '✅' : '❌'} ${contract} references: ${foundDependencies.length}/${dependencies.length}`);
if (foundDependencies.length < dependencies.length) {
const missing = dependencies.filter(dep => !foundDependencies.includes(dep));
console.log(` Missing references: ${missing.join(', ')}`);
}
}
});
// Security validation
console.log("\n🔒 Security Validation:");
let securityScore = 0;
const securityChecks = {
'ReentrancyGuard': 0,
'Pausable': 0,
'Ownable': 0,
'AccessControl': 0,
'SafeMath': 0,
'IERC20': 0
};
validationResults.contracts.forEach(contract => {
const content = fs.readFileSync(contract.path, 'utf8');
Object.keys(securityChecks).forEach(securityFeature => {
if (content.includes(securityFeature)) {
securityChecks[securityFeature]++;
}
});
});
Object.entries(securityChecks).forEach(([feature, count]) => {
const percentage = (count / validationResults.totalContracts) * 100;
console.log(`${feature}: ${count}/${validationResults.totalContracts} contracts (${percentage.toFixed(1)}%)`);
if (count > 0) securityScore++;
});
console.log(`\n🛡️ Security Score: ${securityScore}/${Object.keys(securityChecks).length}`);
// Gas optimization validation
console.log("\n⛽ Gas Optimization Validation:");
let gasOptimizationScore = 0;
const gasOptimizationFeatures = [
'constant',
'immutable',
'view',
'pure',
'external',
'internal',
'private',
'memory',
'storage',
'calldata'
];
validationResults.contracts.forEach(contract => {
const content = fs.readFileSync(contract.path, 'utf8');
let contractGasScore = 0;
gasOptimizationFeatures.forEach(feature => {
if (content.includes(feature)) {
contractGasScore++;
}
});
if (contractGasScore >= 5) {
gasOptimizationScore++;
console.log(`${contract.name}: Optimized (${contractGasScore}/${gasOptimizationFeatures.length} features)`);
} else {
console.log(`⚠️ ${contract.name}: Could be optimized (${contractGasScore}/${gasOptimizationFeatures.length} features)`);
}
});
console.log(`\n⚡ Gas Optimization Score: ${gasOptimizationScore}/${validationResults.totalContracts}`);
// Final assessment
console.log("\n🎯 Final Assessment:");
const overallScore = validationResults.validContracts + securityScore + gasOptimizationScore;
const maxScore = validationResults.totalContracts + Object.keys(securityChecks).length + validationResults.totalContracts;
const overallPercentage = (overallScore / maxScore) * 100;
console.log(`Overall Score: ${overallScore}/${maxScore} (${overallPercentage.toFixed(1)}%)`);
if (overallPercentage >= 80) {
console.log("🚀 Status: EXCELLENT - Ready for deployment");
} else if (overallPercentage >= 60) {
console.log("✅ Status: GOOD - Minor improvements recommended");
} else if (overallPercentage >= 40) {
console.log("⚠️ Status: FAIR - Significant improvements needed");
} else {
console.log("❌ Status: POOR - Major improvements required");
}
console.log("\n📝 Recommendations:");
if (validationResults.validContracts < validationResults.totalContracts) {
console.log("- Fix contract validation issues");
}
if (securityScore < Object.keys(securityChecks).length) {
console.log("- Add missing security features");
}
if (gasOptimizationScore < validationResults.totalContracts) {
console.log("- Optimize gas usage");
}
console.log("- Run comprehensive tests");
console.log("- Perform security audit");
console.log("- Deploy to testnet first");
console.log("\n✨ Validation completed!");