chore: remove outdated documentation and reference files
Some checks failed
AITBC CI/CD Pipeline / lint-and-test (3.11) (push) Has been cancelled
AITBC CI/CD Pipeline / lint-and-test (3.12) (push) Has been cancelled
AITBC CI/CD Pipeline / lint-and-test (3.13) (push) Has been cancelled
AITBC CI/CD Pipeline / test-cli (push) Has been cancelled
AITBC CI/CD Pipeline / test-services (push) Has been cancelled
AITBC CI/CD Pipeline / test-production-services (push) Has been cancelled
AITBC CI/CD Pipeline / security-scan (push) Has been cancelled
AITBC CI/CD Pipeline / build (push) Has been cancelled
AITBC CI/CD Pipeline / deploy-staging (push) Has been cancelled
AITBC CI/CD Pipeline / deploy-production (push) Has been cancelled
AITBC CI/CD Pipeline / performance-test (push) Has been cancelled
AITBC CI/CD Pipeline / docs (push) Has been cancelled
AITBC CI/CD Pipeline / release (push) Has been cancelled
AITBC CI/CD Pipeline / notify (push) Has been cancelled
Security Scanning / Bandit Security Scan (apps/coordinator-api/src) (push) Has been cancelled
Security Scanning / Bandit Security Scan (cli/aitbc_cli) (push) Has been cancelled
Security Scanning / Bandit Security Scan (packages/py/aitbc-core/src) (push) Has been cancelled
Security Scanning / Bandit Security Scan (packages/py/aitbc-crypto/src) (push) Has been cancelled
Security Scanning / Bandit Security Scan (packages/py/aitbc-sdk/src) (push) Has been cancelled
Security Scanning / Bandit Security Scan (tests) (push) Has been cancelled
Security Scanning / CodeQL Security Analysis (javascript) (push) Has been cancelled
Security Scanning / CodeQL Security Analysis (python) (push) Has been cancelled
Security Scanning / Dependency Security Scan (push) Has been cancelled
Security Scanning / Container Security Scan (push) Has been cancelled
Security Scanning / OSSF Scorecard (push) Has been cancelled
Security Scanning / Security Summary Report (push) Has been cancelled
AITBC CLI Level 1 Commands Test / test-cli-level1 (3.11) (push) Has been cancelled
AITBC CLI Level 1 Commands Test / test-cli-level1 (3.12) (push) Has been cancelled
AITBC CLI Level 1 Commands Test / test-cli-level1 (3.13) (push) Has been cancelled
AITBC CLI Level 1 Commands Test / test-summary (push) Has been cancelled
Some checks failed
AITBC CI/CD Pipeline / lint-and-test (3.11) (push) Has been cancelled
AITBC CI/CD Pipeline / lint-and-test (3.12) (push) Has been cancelled
AITBC CI/CD Pipeline / lint-and-test (3.13) (push) Has been cancelled
AITBC CI/CD Pipeline / test-cli (push) Has been cancelled
AITBC CI/CD Pipeline / test-services (push) Has been cancelled
AITBC CI/CD Pipeline / test-production-services (push) Has been cancelled
AITBC CI/CD Pipeline / security-scan (push) Has been cancelled
AITBC CI/CD Pipeline / build (push) Has been cancelled
AITBC CI/CD Pipeline / deploy-staging (push) Has been cancelled
AITBC CI/CD Pipeline / deploy-production (push) Has been cancelled
AITBC CI/CD Pipeline / performance-test (push) Has been cancelled
AITBC CI/CD Pipeline / docs (push) Has been cancelled
AITBC CI/CD Pipeline / release (push) Has been cancelled
AITBC CI/CD Pipeline / notify (push) Has been cancelled
Security Scanning / Bandit Security Scan (apps/coordinator-api/src) (push) Has been cancelled
Security Scanning / Bandit Security Scan (cli/aitbc_cli) (push) Has been cancelled
Security Scanning / Bandit Security Scan (packages/py/aitbc-core/src) (push) Has been cancelled
Security Scanning / Bandit Security Scan (packages/py/aitbc-crypto/src) (push) Has been cancelled
Security Scanning / Bandit Security Scan (packages/py/aitbc-sdk/src) (push) Has been cancelled
Security Scanning / Bandit Security Scan (tests) (push) Has been cancelled
Security Scanning / CodeQL Security Analysis (javascript) (push) Has been cancelled
Security Scanning / CodeQL Security Analysis (python) (push) Has been cancelled
Security Scanning / Dependency Security Scan (push) Has been cancelled
Security Scanning / Container Security Scan (push) Has been cancelled
Security Scanning / OSSF Scorecard (push) Has been cancelled
Security Scanning / Security Summary Report (push) Has been cancelled
AITBC CLI Level 1 Commands Test / test-cli-level1 (3.11) (push) Has been cancelled
AITBC CLI Level 1 Commands Test / test-cli-level1 (3.12) (push) Has been cancelled
AITBC CLI Level 1 Commands Test / test-cli-level1 (3.13) (push) Has been cancelled
AITBC CLI Level 1 Commands Test / test-summary (push) Has been cancelled
- Remove debugging service documentation (DEBUgging_SERVICES.md) - Remove development logs policy and quick reference guides - Remove E2E test creation summary - Remove gift certificate example file - Remove GitHub pull summary documentation
This commit is contained in:
914
scripts/deployment/complete-agent-protocols.sh
Executable file
914
scripts/deployment/complete-agent-protocols.sh
Executable file
@@ -0,0 +1,914 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# AITBC Agent Protocols Implementation - Part 2
|
||||
# Complete implementation with integration layer and services
|
||||
#
|
||||
|
||||
set -e
|
||||
|
||||
# Colors for output
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
NC='\033[0m'
|
||||
|
||||
print_status() {
|
||||
echo -e "${GREEN}[INFO]${NC} $1"
|
||||
}
|
||||
|
||||
print_header() {
|
||||
echo -e "${BLUE}=== $1 ===${NC}"
|
||||
}
|
||||
|
||||
# Configuration
|
||||
PROJECT_ROOT="/opt/aitbc"
|
||||
SERVICES_DIR="$PROJECT_ROOT/apps/agent-services"
|
||||
AGENTS_DIR="$PROJECT_ROOT/apps/agents"
|
||||
|
||||
# Complete implementation
|
||||
main() {
|
||||
print_header "COMPLETING AGENT PROTOCOLS IMPLEMENTATION"
|
||||
|
||||
# Step 5: Implement Integration Layer
|
||||
print_header "Step 5: Implementing Integration Layer"
|
||||
implement_integration_layer
|
||||
|
||||
# Step 6: Create Agent Services
|
||||
print_header "Step 6: Creating Agent Services"
|
||||
create_agent_services
|
||||
|
||||
# Step 7: Set up Testing Framework
|
||||
print_header "Step 7: Setting Up Testing Framework"
|
||||
setup_testing_framework
|
||||
|
||||
# Step 8: Configure Deployment
|
||||
print_header "Step 8: Configuring Deployment"
|
||||
configure_deployment
|
||||
|
||||
print_header "Agent Protocols Implementation Complete! 🎉"
|
||||
}
|
||||
|
||||
# Implement Integration Layer
|
||||
implement_integration_layer() {
|
||||
print_status "Implementing integration layer..."
|
||||
|
||||
cat > "$SERVICES_DIR/agent-bridge/src/integration_layer.py" << 'EOF'
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
AITBC Agent Integration Layer
|
||||
Connects agent protocols to existing AITBC services
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import aiohttp
|
||||
import json
|
||||
from typing import Dict, Any, List, Optional
|
||||
from datetime import datetime
|
||||
|
||||
class AITBCServiceIntegration:
|
||||
"""Integration layer for AITBC services"""
|
||||
|
||||
def __init__(self):
|
||||
self.service_endpoints = {
|
||||
"coordinator_api": "http://localhost:8000",
|
||||
"blockchain_rpc": "http://localhost:8006",
|
||||
"exchange_service": "http://localhost:8001",
|
||||
"marketplace": "http://localhost:8014",
|
||||
"agent_registry": "http://localhost:8003"
|
||||
}
|
||||
self.session = None
|
||||
|
||||
async def __aenter__(self):
|
||||
self.session = aiohttp.ClientSession()
|
||||
return self
|
||||
|
||||
async def __aexit__(self, exc_type, exc_val, exc_tb):
|
||||
if self.session:
|
||||
await self.session.close()
|
||||
|
||||
async def get_blockchain_info(self) -> Dict[str, Any]:
|
||||
"""Get blockchain information"""
|
||||
try:
|
||||
async with self.session.get(f"{self.service_endpoints['blockchain_rpc']}/health") as response:
|
||||
return await response.json()
|
||||
except Exception as e:
|
||||
return {"error": str(e), "status": "unavailable"}
|
||||
|
||||
async def get_exchange_status(self) -> Dict[str, Any]:
|
||||
"""Get exchange service status"""
|
||||
try:
|
||||
async with self.session.get(f"{self.service_endpoints['exchange_service']}/api/health") as response:
|
||||
return await response.json()
|
||||
except Exception as e:
|
||||
return {"error": str(e), "status": "unavailable"}
|
||||
|
||||
async def get_coordinator_status(self) -> Dict[str, Any]:
|
||||
"""Get coordinator API status"""
|
||||
try:
|
||||
async with self.session.get(f"{self.service_endpoints['coordinator_api']}/health") as response:
|
||||
return await response.json()
|
||||
except Exception as e:
|
||||
return {"error": str(e), "status": "unavailable"}
|
||||
|
||||
async def submit_transaction(self, transaction_data: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Submit transaction to blockchain"""
|
||||
try:
|
||||
async with self.session.post(
|
||||
f"{self.service_endpoints['blockchain_rpc']}/rpc/submit",
|
||||
json=transaction_data
|
||||
) as response:
|
||||
return await response.json()
|
||||
except Exception as e:
|
||||
return {"error": str(e), "status": "failed"}
|
||||
|
||||
async def get_market_data(self, symbol: str = "AITBC/BTC") -> Dict[str, Any]:
|
||||
"""Get market data from exchange"""
|
||||
try:
|
||||
async with self.session.get(f"{self.service_endpoints['exchange_service']}/api/market/{symbol}") as response:
|
||||
return await response.json()
|
||||
except Exception as e:
|
||||
return {"error": str(e), "status": "failed"}
|
||||
|
||||
async def register_agent_with_coordinator(self, agent_data: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Register agent with coordinator"""
|
||||
try:
|
||||
async with self.session.post(
|
||||
f"{self.service_endpoints['coordinator_api']}/api/v1/agents/register",
|
||||
json=agent_data
|
||||
) as response:
|
||||
return await response.json()
|
||||
except Exception as e:
|
||||
return {"error": str(e), "status": "failed"}
|
||||
|
||||
class AgentServiceBridge:
|
||||
"""Bridge between agents and AITBC services"""
|
||||
|
||||
def __init__(self):
|
||||
self.integration = AITBCServiceIntegration()
|
||||
self.active_agents = {}
|
||||
|
||||
async def start_agent(self, agent_id: str, agent_config: Dict[str, Any]) -> bool:
|
||||
"""Start an agent with service integration"""
|
||||
try:
|
||||
# Register agent with coordinator
|
||||
async with self.integration as integration:
|
||||
registration_result = await integration.register_agent_with_coordinator({
|
||||
"agent_id": agent_id,
|
||||
"agent_type": agent_config.get("type", "generic"),
|
||||
"capabilities": agent_config.get("capabilities", []),
|
||||
"endpoint": agent_config.get("endpoint", f"http://localhost:{8000 + len(self.active_agents) + 10}")
|
||||
})
|
||||
|
||||
if registration_result.get("status") == "ok":
|
||||
self.active_agents[agent_id] = {
|
||||
"config": agent_config,
|
||||
"registration": registration_result,
|
||||
"started_at": datetime.utcnow()
|
||||
}
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
except Exception as e:
|
||||
print(f"Failed to start agent {agent_id}: {e}")
|
||||
return False
|
||||
|
||||
async def stop_agent(self, agent_id: str) -> bool:
|
||||
"""Stop an agent"""
|
||||
if agent_id in self.active_agents:
|
||||
del self.active_agents[agent_id]
|
||||
return True
|
||||
return False
|
||||
|
||||
async def get_agent_status(self, agent_id: str) -> Dict[str, Any]:
|
||||
"""Get agent status with service integration"""
|
||||
if agent_id not in self.active_agents:
|
||||
return {"status": "not_found"}
|
||||
|
||||
agent_info = self.active_agents[agent_id]
|
||||
|
||||
async with self.integration as integration:
|
||||
# Get service statuses
|
||||
blockchain_status = await integration.get_blockchain_info()
|
||||
exchange_status = await integration.get_exchange_status()
|
||||
coordinator_status = await integration.get_coordinator_status()
|
||||
|
||||
return {
|
||||
"agent_id": agent_id,
|
||||
"status": "active",
|
||||
"started_at": agent_info["started_at"].isoformat(),
|
||||
"services": {
|
||||
"blockchain": blockchain_status,
|
||||
"exchange": exchange_status,
|
||||
"coordinator": coordinator_status
|
||||
}
|
||||
}
|
||||
|
||||
async def execute_agent_task(self, agent_id: str, task_data: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Execute agent task with service integration"""
|
||||
if agent_id not in self.active_agents:
|
||||
return {"status": "error", "message": "Agent not found"}
|
||||
|
||||
task_type = task_data.get("type")
|
||||
|
||||
if task_type == "market_analysis":
|
||||
return await self._execute_market_analysis(task_data)
|
||||
elif task_type == "trading":
|
||||
return await self._execute_trading_task(task_data)
|
||||
elif task_type == "compliance_check":
|
||||
return await self._execute_compliance_check(task_data)
|
||||
else:
|
||||
return {"status": "error", "message": f"Unknown task type: {task_type}"}
|
||||
|
||||
async def _execute_market_analysis(self, task_data: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Execute market analysis task"""
|
||||
try:
|
||||
async with self.integration as integration:
|
||||
market_data = await integration.get_market_data(task_data.get("symbol", "AITBC/BTC"))
|
||||
|
||||
# Perform basic analysis
|
||||
analysis_result = {
|
||||
"symbol": task_data.get("symbol", "AITBC/BTC"),
|
||||
"market_data": market_data,
|
||||
"analysis": {
|
||||
"trend": "neutral",
|
||||
"volatility": "medium",
|
||||
"recommendation": "hold"
|
||||
},
|
||||
"timestamp": datetime.utcnow().isoformat()
|
||||
}
|
||||
|
||||
return {"status": "success", "result": analysis_result}
|
||||
except Exception as e:
|
||||
return {"status": "error", "message": str(e)}
|
||||
|
||||
async def _execute_trading_task(self, task_data: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Execute trading task"""
|
||||
try:
|
||||
# Get market data first
|
||||
async with self.integration as integration:
|
||||
market_data = await integration.get_market_data(task_data.get("symbol", "AITBC/BTC"))
|
||||
|
||||
# Create transaction
|
||||
transaction = {
|
||||
"type": "trade",
|
||||
"symbol": task_data.get("symbol", "AITBC/BTC"),
|
||||
"side": task_data.get("side", "buy"),
|
||||
"amount": task_data.get("amount", 0.1),
|
||||
"price": task_data.get("price", market_data.get("price", 0.001))
|
||||
}
|
||||
|
||||
# Submit transaction
|
||||
tx_result = await integration.submit_transaction(transaction)
|
||||
|
||||
return {"status": "success", "transaction": tx_result}
|
||||
except Exception as e:
|
||||
return {"status": "error", "message": str(e)}
|
||||
|
||||
async def _execute_compliance_check(self, task_data: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Execute compliance check task"""
|
||||
try:
|
||||
# Basic compliance check
|
||||
compliance_result = {
|
||||
"user_id": task_data.get("user_id"),
|
||||
"check_type": task_data.get("check_type", "basic"),
|
||||
"status": "passed",
|
||||
"checks_performed": ["kyc", "aml", "sanctions"],
|
||||
"timestamp": datetime.utcnow().isoformat()
|
||||
}
|
||||
|
||||
return {"status": "success", "result": compliance_result}
|
||||
except Exception as e:
|
||||
return {"status": "error", "message": str(e)}
|
||||
EOF
|
||||
|
||||
print_status "Integration layer implemented"
|
||||
}
|
||||
|
||||
# Create Agent Services
|
||||
create_agent_services() {
|
||||
print_status "Creating agent services..."
|
||||
|
||||
# Trading Agent
|
||||
cat > "$AGENTS_DIR/trading/src/trading_agent.py" << 'EOF'
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
AITBC Trading Agent
|
||||
Automated trading agent for AITBC marketplace
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
import time
|
||||
from typing import Dict, Any, List
|
||||
from datetime import datetime
|
||||
import sys
|
||||
import os
|
||||
|
||||
# Add parent directory to path
|
||||
sys.path.append(os.path.join(os.path.dirname(__file__), '../../../..'))
|
||||
|
||||
from apps.agent_services.agent_bridge.src.integration_layer import AgentServiceBridge
|
||||
|
||||
class TradingAgent:
|
||||
"""Automated trading agent"""
|
||||
|
||||
def __init__(self, agent_id: str, config: Dict[str, Any]):
|
||||
self.agent_id = agent_id
|
||||
self.config = config
|
||||
self.bridge = AgentServiceBridge()
|
||||
self.is_running = False
|
||||
self.trading_strategy = config.get("strategy", "basic")
|
||||
self.symbols = config.get("symbols", ["AITBC/BTC"])
|
||||
self.trade_interval = config.get("trade_interval", 60) # seconds
|
||||
|
||||
async def start(self) -> bool:
|
||||
"""Start trading agent"""
|
||||
try:
|
||||
# Register with service bridge
|
||||
success = await self.bridge.start_agent(self.agent_id, {
|
||||
"type": "trading",
|
||||
"capabilities": ["market_analysis", "trading", "risk_management"],
|
||||
"endpoint": f"http://localhost:8005"
|
||||
})
|
||||
|
||||
if success:
|
||||
self.is_running = True
|
||||
print(f"Trading agent {self.agent_id} started successfully")
|
||||
return True
|
||||
else:
|
||||
print(f"Failed to start trading agent {self.agent_id}")
|
||||
return False
|
||||
except Exception as e:
|
||||
print(f"Error starting trading agent: {e}")
|
||||
return False
|
||||
|
||||
async def stop(self) -> bool:
|
||||
"""Stop trading agent"""
|
||||
self.is_running = False
|
||||
success = await self.bridge.stop_agent(self.agent_id)
|
||||
if success:
|
||||
print(f"Trading agent {self.agent_id} stopped successfully")
|
||||
return success
|
||||
|
||||
async def run_trading_loop(self):
|
||||
"""Main trading loop"""
|
||||
while self.is_running:
|
||||
try:
|
||||
for symbol in self.symbols:
|
||||
await self._analyze_and_trade(symbol)
|
||||
|
||||
await asyncio.sleep(self.trade_interval)
|
||||
except Exception as e:
|
||||
print(f"Error in trading loop: {e}")
|
||||
await asyncio.sleep(10) # Wait before retrying
|
||||
|
||||
async def _analyze_and_trade(self, symbol: str) -> None:
|
||||
"""Analyze market and execute trades"""
|
||||
try:
|
||||
# Perform market analysis
|
||||
analysis_task = {
|
||||
"type": "market_analysis",
|
||||
"symbol": symbol,
|
||||
"strategy": self.trading_strategy
|
||||
}
|
||||
|
||||
analysis_result = await self.bridge.execute_agent_task(self.agent_id, analysis_task)
|
||||
|
||||
if analysis_result.get("status") == "success":
|
||||
analysis = analysis_result["result"]["analysis"]
|
||||
|
||||
# Make trading decision
|
||||
if self._should_trade(analysis):
|
||||
await self._execute_trade(symbol, analysis)
|
||||
else:
|
||||
print(f"Market analysis failed for {symbol}: {analysis_result}")
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error in analyze_and_trade for {symbol}: {e}")
|
||||
|
||||
def _should_trade(self, analysis: Dict[str, Any]) -> bool:
|
||||
"""Determine if should execute trade"""
|
||||
recommendation = analysis.get("recommendation", "hold")
|
||||
return recommendation in ["buy", "sell"]
|
||||
|
||||
async def _execute_trade(self, symbol: str, analysis: Dict[str, Any]) -> None:
|
||||
"""Execute trade based on analysis"""
|
||||
try:
|
||||
recommendation = analysis.get("recommendation", "hold")
|
||||
|
||||
if recommendation == "buy":
|
||||
trade_task = {
|
||||
"type": "trading",
|
||||
"symbol": symbol,
|
||||
"side": "buy",
|
||||
"amount": self.config.get("trade_amount", 0.1),
|
||||
"strategy": self.trading_strategy
|
||||
}
|
||||
elif recommendation == "sell":
|
||||
trade_task = {
|
||||
"type": "trading",
|
||||
"symbol": symbol,
|
||||
"side": "sell",
|
||||
"amount": self.config.get("trade_amount", 0.1),
|
||||
"strategy": self.trading_strategy
|
||||
}
|
||||
else:
|
||||
return
|
||||
|
||||
trade_result = await self.bridge.execute_agent_task(self.agent_id, trade_task)
|
||||
|
||||
if trade_result.get("status") == "success":
|
||||
print(f"Trade executed successfully: {trade_result}")
|
||||
else:
|
||||
print(f"Trade execution failed: {trade_result}")
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error executing trade: {e}")
|
||||
|
||||
async def get_status(self) -> Dict[str, Any]:
|
||||
"""Get agent status"""
|
||||
return await self.bridge.get_agent_status(self.agent_id)
|
||||
|
||||
# Main execution
|
||||
async def main():
|
||||
"""Main trading agent execution"""
|
||||
agent_id = "trading-agent-001"
|
||||
config = {
|
||||
"strategy": "basic",
|
||||
"symbols": ["AITBC/BTC"],
|
||||
"trade_interval": 30,
|
||||
"trade_amount": 0.1
|
||||
}
|
||||
|
||||
agent = TradingAgent(agent_id, config)
|
||||
|
||||
# Start agent
|
||||
if await agent.start():
|
||||
try:
|
||||
# Run trading loop
|
||||
await agent.run_trading_loop()
|
||||
except KeyboardInterrupt:
|
||||
print("Shutting down trading agent...")
|
||||
finally:
|
||||
await agent.stop()
|
||||
else:
|
||||
print("Failed to start trading agent")
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
EOF
|
||||
|
||||
# Compliance Agent
|
||||
cat > "$AGENTS_DIR/compliance/src/compliance_agent.py" << 'EOF'
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
AITBC Compliance Agent
|
||||
Automated compliance and regulatory monitoring agent
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
import time
|
||||
from typing import Dict, Any, List
|
||||
from datetime import datetime
|
||||
import sys
|
||||
import os
|
||||
|
||||
# Add parent directory to path
|
||||
sys.path.append(os.path.join(os.path.dirname(__file__), '../../../..'))
|
||||
|
||||
from apps.agent_services.agent_bridge.src.integration_layer import AgentServiceBridge
|
||||
|
||||
class ComplianceAgent:
|
||||
"""Automated compliance agent"""
|
||||
|
||||
def __init__(self, agent_id: str, config: Dict[str, Any]):
|
||||
self.agent_id = agent_id
|
||||
self.config = config
|
||||
self.bridge = AgentServiceBridge()
|
||||
self.is_running = False
|
||||
self.check_interval = config.get("check_interval", 300) # 5 minutes
|
||||
self.monitored_entities = config.get("monitored_entities", [])
|
||||
|
||||
async def start(self) -> bool:
|
||||
"""Start compliance agent"""
|
||||
try:
|
||||
success = await self.bridge.start_agent(self.agent_id, {
|
||||
"type": "compliance",
|
||||
"capabilities": ["kyc_check", "aml_screening", "regulatory_reporting"],
|
||||
"endpoint": f"http://localhost:8006"
|
||||
})
|
||||
|
||||
if success:
|
||||
self.is_running = True
|
||||
print(f"Compliance agent {self.agent_id} started successfully")
|
||||
return True
|
||||
else:
|
||||
print(f"Failed to start compliance agent {self.agent_id}")
|
||||
return False
|
||||
except Exception as e:
|
||||
print(f"Error starting compliance agent: {e}")
|
||||
return False
|
||||
|
||||
async def stop(self) -> bool:
|
||||
"""Stop compliance agent"""
|
||||
self.is_running = False
|
||||
success = await self.bridge.stop_agent(self.agent_id)
|
||||
if success:
|
||||
print(f"Compliance agent {self.agent_id} stopped successfully")
|
||||
return success
|
||||
|
||||
async def run_compliance_loop(self):
|
||||
"""Main compliance monitoring loop"""
|
||||
while self.is_running:
|
||||
try:
|
||||
for entity in self.monitored_entities:
|
||||
await self._perform_compliance_check(entity)
|
||||
|
||||
await asyncio.sleep(self.check_interval)
|
||||
except Exception as e:
|
||||
print(f"Error in compliance loop: {e}")
|
||||
await asyncio.sleep(30) # Wait before retrying
|
||||
|
||||
async def _perform_compliance_check(self, entity_id: str) -> None:
|
||||
"""Perform compliance check for entity"""
|
||||
try:
|
||||
compliance_task = {
|
||||
"type": "compliance_check",
|
||||
"user_id": entity_id,
|
||||
"check_type": "full",
|
||||
"monitored_activities": ["trading", "transfers", "wallet_creation"]
|
||||
}
|
||||
|
||||
result = await self.bridge.execute_agent_task(self.agent_id, compliance_task)
|
||||
|
||||
if result.get("status") == "success":
|
||||
compliance_result = result["result"]
|
||||
await self._handle_compliance_result(entity_id, compliance_result)
|
||||
else:
|
||||
print(f"Compliance check failed for {entity_id}: {result}")
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error performing compliance check for {entity_id}: {e}")
|
||||
|
||||
async def _handle_compliance_result(self, entity_id: str, result: Dict[str, Any]) -> None:
|
||||
"""Handle compliance check result"""
|
||||
status = result.get("status", "unknown")
|
||||
|
||||
if status == "passed":
|
||||
print(f"✅ Compliance check passed for {entity_id}")
|
||||
elif status == "failed":
|
||||
print(f"❌ Compliance check failed for {entity_id}")
|
||||
# Trigger alert or further investigation
|
||||
await self._trigger_compliance_alert(entity_id, result)
|
||||
else:
|
||||
print(f"⚠️ Compliance check inconclusive for {entity_id}")
|
||||
|
||||
async def _trigger_compliance_alert(self, entity_id: str, result: Dict[str, Any]) -> None:
|
||||
"""Trigger compliance alert"""
|
||||
alert_data = {
|
||||
"entity_id": entity_id,
|
||||
"alert_type": "compliance_failure",
|
||||
"severity": "high",
|
||||
"details": result,
|
||||
"timestamp": datetime.utcnow().isoformat()
|
||||
}
|
||||
|
||||
# In a real implementation, this would send to alert system
|
||||
print(f"🚨 COMPLIANCE ALERT: {json.dumps(alert_data, indent=2)}")
|
||||
|
||||
async def get_status(self) -> Dict[str, Any]:
|
||||
"""Get agent status"""
|
||||
status = await self.bridge.get_agent_status(self.agent_id)
|
||||
status["monitored_entities"] = len(self.monitored_entities)
|
||||
status["check_interval"] = self.check_interval
|
||||
return status
|
||||
|
||||
# Main execution
|
||||
async def main():
|
||||
"""Main compliance agent execution"""
|
||||
agent_id = "compliance-agent-001"
|
||||
config = {
|
||||
"check_interval": 60, # 1 minute for testing
|
||||
"monitored_entities": ["user001", "user002", "user003"]
|
||||
}
|
||||
|
||||
agent = ComplianceAgent(agent_id, config)
|
||||
|
||||
# Start agent
|
||||
if await agent.start():
|
||||
try:
|
||||
# Run compliance loop
|
||||
await agent.run_compliance_loop()
|
||||
except KeyboardInterrupt:
|
||||
print("Shutting down compliance agent...")
|
||||
finally:
|
||||
await agent.stop()
|
||||
else:
|
||||
print("Failed to start compliance agent")
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
EOF
|
||||
|
||||
print_status "Agent services created"
|
||||
}
|
||||
|
||||
# Set up Testing Framework
|
||||
setup_testing_framework() {
|
||||
print_status "Setting up testing framework..."
|
||||
|
||||
cat > "$PROJECT_ROOT/apps/agent-protocols/tests/test_agent_protocols.py" << 'EOF'
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Test suite for AITBC Agent Protocols
|
||||
"""
|
||||
|
||||
import unittest
|
||||
import asyncio
|
||||
import json
|
||||
import tempfile
|
||||
import os
|
||||
from datetime import datetime
|
||||
|
||||
# Add parent directory to path
|
||||
import sys
|
||||
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..'))
|
||||
|
||||
from src.message_protocol import MessageProtocol, MessageTypes, AgentMessageClient
|
||||
from src.task_manager import TaskManager, TaskStatus, TaskPriority
|
||||
|
||||
class TestMessageProtocol(unittest.TestCase):
|
||||
"""Test message protocol functionality"""
|
||||
|
||||
def setUp(self):
|
||||
self.protocol = MessageProtocol()
|
||||
self.sender_id = "agent-001"
|
||||
self.receiver_id = "agent-002"
|
||||
|
||||
def test_message_creation(self):
|
||||
"""Test message creation"""
|
||||
message = self.protocol.create_message(
|
||||
sender_id=self.sender_id,
|
||||
receiver_id=self.receiver_id,
|
||||
message_type=MessageTypes.TASK_ASSIGNMENT,
|
||||
payload={"task": "test_task", "data": "test_data"}
|
||||
)
|
||||
|
||||
self.assertEqual(message["sender_id"], self.sender_id)
|
||||
self.assertEqual(message["receiver_id"], self.receiver_id)
|
||||
self.assertEqual(message["message_type"], MessageTypes.TASK_ASSIGNMENT)
|
||||
self.assertIsNotNone(message["signature"])
|
||||
|
||||
def test_message_verification(self):
|
||||
"""Test message verification"""
|
||||
message = self.protocol.create_message(
|
||||
sender_id=self.sender_id,
|
||||
receiver_id=self.receiver_id,
|
||||
message_type=MessageTypes.TASK_ASSIGNMENT,
|
||||
payload={"task": "test_task"}
|
||||
)
|
||||
|
||||
# Valid message should verify
|
||||
self.assertTrue(self.protocol.verify_message(message))
|
||||
|
||||
# Tampered message should not verify
|
||||
message["payload"] = "tampered"
|
||||
self.assertFalse(self.protocol.verify_message(message))
|
||||
|
||||
def test_message_encryption(self):
|
||||
"""Test message encryption/decryption"""
|
||||
original_payload = {"sensitive": "data", "numbers": [1, 2, 3]}
|
||||
|
||||
message = self.protocol.create_message(
|
||||
sender_id=self.sender_id,
|
||||
receiver_id=self.receiver_id,
|
||||
message_type=MessageTypes.DATA_RESPONSE,
|
||||
payload=original_payload
|
||||
)
|
||||
|
||||
# Decrypt message
|
||||
decrypted = self.protocol.decrypt_message(message)
|
||||
|
||||
self.assertEqual(decrypted["payload"], original_payload)
|
||||
|
||||
def test_message_queueing(self):
|
||||
"""Test message queuing and delivery"""
|
||||
message = self.protocol.create_message(
|
||||
sender_id=self.sender_id,
|
||||
receiver_id=self.receiver_id,
|
||||
message_type=MessageTypes.HEARTBEAT,
|
||||
payload={"status": "active"}
|
||||
)
|
||||
|
||||
# Send message
|
||||
success = self.protocol.send_message(message)
|
||||
self.assertTrue(success)
|
||||
|
||||
# Receive message
|
||||
messages = self.protocol.receive_messages(self.receiver_id)
|
||||
self.assertEqual(len(messages), 1)
|
||||
self.assertEqual(messages[0]["message_type"], MessageTypes.HEARTBEAT)
|
||||
|
||||
class TestTaskManager(unittest.TestCase):
|
||||
"""Test task manager functionality"""
|
||||
|
||||
def setUp(self):
|
||||
self.temp_db = tempfile.NamedTemporaryFile(delete=False)
|
||||
self.temp_db.close()
|
||||
self.task_manager = TaskManager(self.temp_db.name)
|
||||
|
||||
def tearDown(self):
|
||||
os.unlink(self.temp_db.name)
|
||||
|
||||
def test_task_creation(self):
|
||||
"""Test task creation"""
|
||||
task = self.task_manager.create_task(
|
||||
task_type="market_analysis",
|
||||
payload={"symbol": "AITBC/BTC"},
|
||||
required_capabilities=["market_data", "analysis"],
|
||||
priority=TaskPriority.HIGH
|
||||
)
|
||||
|
||||
self.assertIsNotNone(task.id)
|
||||
self.assertEqual(task.task_type, "market_analysis")
|
||||
self.assertEqual(task.status, TaskStatus.PENDING)
|
||||
self.assertEqual(task.priority, TaskPriority.HIGH)
|
||||
|
||||
def test_task_assignment(self):
|
||||
"""Test task assignment"""
|
||||
task = self.task_manager.create_task(
|
||||
task_type="trading",
|
||||
payload={"symbol": "AITBC/BTC", "side": "buy"},
|
||||
required_capabilities=["trading", "market_access"]
|
||||
)
|
||||
|
||||
success = self.task_manager.assign_task(task.id, "agent-001")
|
||||
self.assertTrue(success)
|
||||
|
||||
# Verify assignment
|
||||
updated_task = self.task_manager.get_agent_tasks("agent-001")[0]
|
||||
self.assertEqual(updated_task.id, task.id)
|
||||
self.assertEqual(updated_task.assigned_agent_id, "agent-001")
|
||||
self.assertEqual(updated_task.status, TaskStatus.ASSIGNED)
|
||||
|
||||
def test_task_completion(self):
|
||||
"""Test task completion"""
|
||||
task = self.task_manager.create_task(
|
||||
task_type="compliance_check",
|
||||
payload={"user_id": "user001"},
|
||||
required_capabilities=["compliance"]
|
||||
)
|
||||
|
||||
# Assign and start task
|
||||
self.task_manager.assign_task(task.id, "agent-002")
|
||||
self.task_manager.start_task(task.id)
|
||||
|
||||
# Complete task
|
||||
result = {"status": "passed", "checks": ["kyc", "aml"]}
|
||||
success = self.task_manager.complete_task(task.id, result)
|
||||
self.assertTrue(success)
|
||||
|
||||
# Verify completion
|
||||
completed_task = self.task_manager.get_agent_tasks("agent-002")[0]
|
||||
self.assertEqual(completed_task.status, TaskStatus.COMPLETED)
|
||||
self.assertEqual(completed_task.result, result)
|
||||
|
||||
def test_task_statistics(self):
|
||||
"""Test task statistics"""
|
||||
# Create multiple tasks
|
||||
for i in range(5):
|
||||
self.task_manager.create_task(
|
||||
task_type=f"task_{i}",
|
||||
payload={"index": i},
|
||||
required_capabilities=["basic"]
|
||||
)
|
||||
|
||||
stats = self.task_manager.get_task_statistics()
|
||||
|
||||
self.assertIn("task_counts", stats)
|
||||
self.assertIn("agent_statistics", stats)
|
||||
self.assertEqual(stats["task_counts"]["pending"], 5)
|
||||
|
||||
class TestAgentMessageClient(unittest.TestCase):
|
||||
"""Test agent message client"""
|
||||
|
||||
def setUp(self):
|
||||
self.client = AgentMessageClient("agent-001", "http://localhost:8003")
|
||||
|
||||
def test_task_assignment_message(self):
|
||||
"""Test task assignment message creation"""
|
||||
task_data = {"task": "test_task", "parameters": {"param1": "value1"}}
|
||||
|
||||
success = self.client.send_task_assignment("agent-002", task_data)
|
||||
self.assertTrue(success)
|
||||
|
||||
# Check message queue
|
||||
messages = self.client.receive_messages()
|
||||
self.assertEqual(len(messages), 1)
|
||||
self.assertEqual(messages[0]["message_type"], MessageTypes.TASK_ASSIGNMENT)
|
||||
|
||||
def test_coordination_message(self):
|
||||
"""Test coordination message"""
|
||||
coordination_data = {"action": "coordinate", "details": {"target": "goal"}}
|
||||
|
||||
success = self.client.send_coordination_message("agent-003", coordination_data)
|
||||
self.assertTrue(success)
|
||||
|
||||
# Check message queue
|
||||
messages = self.client.get_coordination_messages()
|
||||
self.assertEqual(len(messages), 1)
|
||||
self.assertEqual(messages[0]["message_type"], MessageTypes.COORDINATION)
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
EOF
|
||||
|
||||
print_status "Testing framework set up"
|
||||
}
|
||||
|
||||
# Configure Deployment
|
||||
configure_deployment() {
|
||||
print_status "Configuring deployment..."
|
||||
|
||||
# Create systemd service files
|
||||
cat > "/etc/systemd/system/aitbc-agent-registry.service" << 'EOF'
|
||||
[Unit]
|
||||
Description=AITBC Agent Registry Service
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=aitbc
|
||||
Group=aitbc
|
||||
WorkingDirectory=/opt/aitbc/apps/agent-registry/src
|
||||
Environment=PYTHONPATH=/opt/aitbc
|
||||
ExecStart=/usr/bin/python3 app.py
|
||||
Restart=always
|
||||
RestartSec=10
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOF
|
||||
|
||||
cat > "/etc/systemd/system/aitbc-agent-coordinator.service" << 'EOF'
|
||||
[Unit]
|
||||
Description=AITBC Agent Coordinator Service
|
||||
After=network.target aitbc-agent-registry.service
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=aitbc
|
||||
Group=aitbc
|
||||
WorkingDirectory=/opt/aitbc/apps/agent-services/agent-coordinator/src
|
||||
Environment=PYTHONPATH=/opt/aitbc
|
||||
ExecStart=/usr/bin/python3 coordinator.py
|
||||
Restart=always
|
||||
RestartSec=10
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOF
|
||||
|
||||
# Create deployment script
|
||||
cat > "$PROJECT_ROOT/scripts/deploy-agent-protocols.sh" << 'EOF'
|
||||
#!/bin/bash
|
||||
# Deploy AITBC Agent Protocols
|
||||
|
||||
set -e
|
||||
|
||||
echo "🚀 Deploying AITBC Agent Protocols..."
|
||||
|
||||
# Install dependencies
|
||||
pip3 install fastapi uvicorn pydantic cryptography aiohttp
|
||||
|
||||
# Enable and start services
|
||||
systemctl daemon-reload
|
||||
systemctl enable aitbc-agent-registry
|
||||
systemctl enable aitbc-agent-coordinator
|
||||
systemctl start aitbc-agent-registry
|
||||
systemctl start aitbc-agent-coordinator
|
||||
|
||||
# Wait for services to start
|
||||
sleep 5
|
||||
|
||||
# Check service status
|
||||
echo "Checking service status..."
|
||||
systemctl status aitbc-agent-registry --no-pager
|
||||
systemctl status aitbc-agent-coordinator --no-pager
|
||||
|
||||
# Test services
|
||||
echo "Testing services..."
|
||||
curl -s http://localhost:8003/api/health || echo "Agent Registry not responding"
|
||||
curl -s http://localhost:8004/api/health || echo "Agent Coordinator not responding"
|
||||
|
||||
echo "✅ Agent Protocols deployment complete!"
|
||||
EOF
|
||||
|
||||
chmod +x "$PROJECT_ROOT/scripts/deploy-agent-protocols.sh"
|
||||
|
||||
print_status "Deployment configured"
|
||||
}
|
||||
|
||||
# Run main function
|
||||
main "$@"
|
||||
41
scripts/deployment/deploy-agent-protocols-fixed.sh
Executable file
41
scripts/deployment/deploy-agent-protocols-fixed.sh
Executable file
@@ -0,0 +1,41 @@
|
||||
#!/bin/bash
|
||||
# Deploy AITBC Agent Protocols - Using existing virtual environment
|
||||
|
||||
set -e
|
||||
|
||||
echo "🚀 Deploying AITBC Agent Protocols..."
|
||||
|
||||
# Use existing virtual environment
|
||||
VENV_PATH="/opt/aitbc/cli/venv"
|
||||
|
||||
# Install dependencies in virtual environment
|
||||
echo "Installing dependencies..."
|
||||
$VENV_PATH/bin/pip install fastapi uvicorn pydantic cryptography aiohttp
|
||||
|
||||
# Copy service files
|
||||
echo "Setting up systemd services..."
|
||||
sudo cp /opt/aitbc/deployment/agent-protocols/aitbc-agent-registry.service /etc/systemd/system/
|
||||
sudo cp /opt/aitbc/deployment/agent-protocols/aitbc-agent-coordinator.service /etc/systemd/system/
|
||||
|
||||
# Enable and start services
|
||||
echo "Starting agent services..."
|
||||
sudo systemctl daemon-reload
|
||||
sudo systemctl enable aitbc-agent-registry
|
||||
sudo systemctl enable aitbc-agent-coordinator
|
||||
sudo systemctl start aitbc-agent-registry
|
||||
sudo systemctl start aitbc-agent-coordinator
|
||||
|
||||
# Wait for services to start
|
||||
sleep 5
|
||||
|
||||
# Check service status
|
||||
echo "Checking service status..."
|
||||
sudo systemctl status aitbc-agent-registry --no-pager | head -5
|
||||
sudo systemctl status aitbc-agent-coordinator --no-pager | head -5
|
||||
|
||||
# Test services
|
||||
echo "Testing services..."
|
||||
curl -s http://localhost:8003/api/health || echo "Agent Registry not responding"
|
||||
curl -s http://localhost:8004/api/health || echo "Agent Coordinator not responding"
|
||||
|
||||
echo "✅ Agent Protocols deployment complete!"
|
||||
41
scripts/deployment/deploy-agent-protocols-sudo.sh
Executable file
41
scripts/deployment/deploy-agent-protocols-sudo.sh
Executable file
@@ -0,0 +1,41 @@
|
||||
#!/bin/bash
|
||||
# Deploy AITBC Agent Protocols - With proper permissions
|
||||
|
||||
set -e
|
||||
|
||||
echo "🚀 Deploying AITBC Agent Protocols..."
|
||||
|
||||
# Use existing virtual environment with sudo
|
||||
VENV_PATH="/opt/aitbc/cli/venv"
|
||||
|
||||
# Install dependencies in virtual environment
|
||||
echo "Installing dependencies..."
|
||||
sudo $VENV_PATH/bin/pip install fastapi uvicorn pydantic cryptography aiohttp
|
||||
|
||||
# Copy service files
|
||||
echo "Setting up systemd services..."
|
||||
sudo cp /opt/aitbc/deployment/agent-protocols/aitbc-agent-registry.service /etc/systemd/system/
|
||||
sudo cp /opt/aitbc/deployment/agent-protocols/aitbc-agent-coordinator.service /etc/systemd/system/
|
||||
|
||||
# Enable and start services
|
||||
echo "Starting agent services..."
|
||||
sudo systemctl daemon-reload
|
||||
sudo systemctl enable aitbc-agent-registry
|
||||
sudo systemctl enable aitbc-agent-coordinator
|
||||
sudo systemctl start aitbc-agent-registry
|
||||
sudo systemctl start aitbc-agent-coordinator
|
||||
|
||||
# Wait for services to start
|
||||
sleep 5
|
||||
|
||||
# Check service status
|
||||
echo "Checking service status..."
|
||||
sudo systemctl status aitbc-agent-registry --no-pager | head -5
|
||||
sudo systemctl status aitbc-agent-coordinator --no-pager | head -5
|
||||
|
||||
# Test services
|
||||
echo "Testing services..."
|
||||
curl -s http://localhost:8003/api/health || echo "Agent Registry not responding"
|
||||
curl -s http://localhost:8004/api/health || echo "Agent Coordinator not responding"
|
||||
|
||||
echo "✅ Agent Protocols deployment complete!"
|
||||
35
scripts/deployment/deploy-agent-protocols.sh
Executable file
35
scripts/deployment/deploy-agent-protocols.sh
Executable file
@@ -0,0 +1,35 @@
|
||||
#!/bin/bash
|
||||
# Deploy AITBC Agent Protocols
|
||||
|
||||
set -e
|
||||
|
||||
echo "🚀 Deploying AITBC Agent Protocols..."
|
||||
|
||||
# Install dependencies
|
||||
pip3 install fastapi uvicorn pydantic cryptography aiohttp sqlite3
|
||||
|
||||
# Copy service files
|
||||
sudo cp /opt/aitbc/deployment/agent-protocols/aitbc-agent-registry.service /etc/systemd/system/
|
||||
sudo cp /opt/aitbc/deployment/agent-protocols/aitbc-agent-coordinator.service /etc/systemd/system/
|
||||
|
||||
# Enable and start services
|
||||
sudo systemctl daemon-reload
|
||||
sudo systemctl enable aitbc-agent-registry
|
||||
sudo systemctl enable aitbc-agent-coordinator
|
||||
sudo systemctl start aitbc-agent-registry
|
||||
sudo systemctl start aitbc-agent-coordinator
|
||||
|
||||
# Wait for services to start
|
||||
sleep 5
|
||||
|
||||
# Check service status
|
||||
echo "Checking service status..."
|
||||
sudo systemctl status aitbc-agent-registry --no-pager
|
||||
sudo systemctl status aitbc-agent-coordinator --no-pager
|
||||
|
||||
# Test services
|
||||
echo "Testing services..."
|
||||
curl -s http://localhost:8003/api/health || echo "Agent Registry not responding"
|
||||
curl -s http://localhost:8004/api/health || echo "Agent Coordinator not responding"
|
||||
|
||||
echo "✅ Agent Protocols deployment complete!"
|
||||
392
scripts/deployment/deploy.sh
Executable file
392
scripts/deployment/deploy.sh
Executable file
@@ -0,0 +1,392 @@
|
||||
#!/bin/bash
|
||||
|
||||
# AITBC Automated Deployment Script
|
||||
# This script handles automated deployment of AITBC services
|
||||
|
||||
set -e
|
||||
|
||||
# Configuration
|
||||
ENVIRONMENT=${1:-staging}
|
||||
VERSION=${2:-latest}
|
||||
REGION=${3:-us-east-1}
|
||||
NAMESPACE="aitbc-${ENVIRONMENT}"
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Logging function
|
||||
log() {
|
||||
echo -e "${BLUE}[$(date +'%Y-%m-%d %H:%M:%S')]${NC} $1"
|
||||
}
|
||||
|
||||
error() {
|
||||
echo -e "${RED}[ERROR]${NC} $1"
|
||||
exit 1
|
||||
}
|
||||
|
||||
success() {
|
||||
echo -e "${GREEN}[SUCCESS]${NC} $1"
|
||||
}
|
||||
|
||||
warning() {
|
||||
echo -e "${YELLOW}[WARNING]${NC} $1"
|
||||
}
|
||||
|
||||
# Check prerequisites
|
||||
check_prerequisites() {
|
||||
log "Checking prerequisites..."
|
||||
|
||||
# Check if required tools are installed
|
||||
command -v docker >/dev/null 2>&1 || error "Docker is not installed"
|
||||
command -v docker-compose >/dev/null 2>&1 || error "Docker Compose is not installed"
|
||||
command -v kubectl >/dev/null 2>&1 || error "kubectl is not installed"
|
||||
command -v helm >/dev/null 2>&1 || error "Helm is not installed"
|
||||
|
||||
# Check if Docker daemon is running
|
||||
docker info >/dev/null 2>&1 || error "Docker daemon is not running"
|
||||
|
||||
# Check if kubectl can connect to cluster
|
||||
kubectl cluster-info >/dev/null 2>&1 || error "Cannot connect to Kubernetes cluster"
|
||||
|
||||
success "Prerequisites check passed"
|
||||
}
|
||||
|
||||
# Build Docker images
|
||||
build_images() {
|
||||
log "Building Docker images..."
|
||||
|
||||
# Build CLI image
|
||||
log "Building CLI image..."
|
||||
docker build -t aitbc/cli:${VERSION} -f Dockerfile . || error "Failed to build CLI image"
|
||||
|
||||
# Build service images
|
||||
for service_dir in apps/*/; do
|
||||
if [ -f "$service_dir/Dockerfile" ]; then
|
||||
service_name=$(basename "$service_dir")
|
||||
log "Building ${service_name} image..."
|
||||
docker build -t aitbc/${service_name}:${VERSION} -f "$service_dir/Dockerfile" "$service_dir" || error "Failed to build ${service_name} image"
|
||||
fi
|
||||
done
|
||||
|
||||
success "All Docker images built successfully"
|
||||
}
|
||||
|
||||
# Run tests
|
||||
run_tests() {
|
||||
log "Running tests..."
|
||||
|
||||
# Run unit tests
|
||||
log "Running unit tests..."
|
||||
pytest tests/unit/ -v --cov=aitbc_cli --cov-report=term || error "Unit tests failed"
|
||||
|
||||
# Run integration tests
|
||||
log "Running integration tests..."
|
||||
pytest tests/integration/ -v || error "Integration tests failed"
|
||||
|
||||
# Run security tests
|
||||
log "Running security tests..."
|
||||
pytest tests/security/ -v || error "Security tests failed"
|
||||
|
||||
# Run performance tests
|
||||
log "Running performance tests..."
|
||||
pytest tests/performance/test_performance_lightweight.py::TestPerformance::test_cli_performance -v || error "Performance tests failed"
|
||||
|
||||
success "All tests passed"
|
||||
}
|
||||
|
||||
# Deploy to Kubernetes
|
||||
deploy_kubernetes() {
|
||||
log "Deploying to Kubernetes namespace: ${NAMESPACE}"
|
||||
|
||||
# Create namespace if it doesn't exist
|
||||
kubectl create namespace ${NAMESPACE} --dry-run=client -o yaml | kubectl apply -f -
|
||||
|
||||
# Apply secrets
|
||||
log "Applying secrets..."
|
||||
kubectl apply -f k8s/secrets/ -n ${NAMESPACE} || error "Failed to apply secrets"
|
||||
|
||||
# Apply configmaps
|
||||
log "Applying configmaps..."
|
||||
kubectl apply -f k8s/configmaps/ -n ${NAMESPACE} || error "Failed to apply configmaps"
|
||||
|
||||
# Deploy database
|
||||
log "Deploying database..."
|
||||
helm repo add bitnami https://charts.bitnami.com/bitnami
|
||||
helm upgrade --install postgres bitnami/postgresql \
|
||||
--namespace ${NAMESPACE} \
|
||||
--set auth.postgresPassword=${POSTGRES_PASSWORD} \
|
||||
--set auth.database=aitbc \
|
||||
--set primary.persistence.size=20Gi \
|
||||
--set primary.resources.requests.memory=2Gi \
|
||||
--set primary.resources.requests.cpu=1000m \
|
||||
--wait || error "Failed to deploy database"
|
||||
|
||||
# Deploy Redis
|
||||
log "Deploying Redis..."
|
||||
helm upgrade --install redis bitnami/redis \
|
||||
--namespace ${NAMESPACE} \
|
||||
--set auth.password=${REDIS_PASSWORD} \
|
||||
--set master.persistence.size=8Gi \
|
||||
--set master.resources.requests.memory=512Mi \
|
||||
--set master.resources.requests.cpu=500m \
|
||||
--wait || error "Failed to deploy Redis"
|
||||
|
||||
# Deploy core services
|
||||
log "Deploying core services..."
|
||||
|
||||
# Deploy blockchain services
|
||||
for service in blockchain-node consensus-node network-node; do
|
||||
log "Deploying ${service}..."
|
||||
envsubst < k8s/deployments/${service}.yaml | kubectl apply -f - -n ${NAMESPACE} || error "Failed to deploy ${service}"
|
||||
kubectl rollout status deployment/${service} -n ${NAMESPACE} --timeout=300s || error "Failed to rollout ${service}"
|
||||
done
|
||||
|
||||
# Deploy coordinator
|
||||
log "Deploying coordinator-api..."
|
||||
envsubst < k8s/deployments/coordinator-api.yaml | kubectl apply -f - -n ${NAMESPACE} || error "Failed to deploy coordinator-api"
|
||||
kubectl rollout status deployment/coordinator-api -n ${NAMESPACE} --timeout=300s || error "Failed to rollout coordinator-api"
|
||||
|
||||
# Deploy production services
|
||||
for service in exchange-integration compliance-service trading-engine; do
|
||||
log "Deploying ${service}..."
|
||||
envsubst < k8s/deployments/${service}.yaml | kubectl apply -f - -n ${NAMESPACE} || error "Failed to deploy ${service}"
|
||||
kubectl rollout status deployment/${service} -n ${NAMESPACE} --timeout=300s || error "Failed to rollout ${service}"
|
||||
done
|
||||
|
||||
# Deploy plugin ecosystem
|
||||
for service in plugin-registry plugin-marketplace plugin-security plugin-analytics; do
|
||||
log "Deploying ${service}..."
|
||||
envsubst < k8s/deployments/${service}.yaml | kubectl apply -f - -n ${NAMESPACE} || error "Failed to deploy ${service}"
|
||||
kubectl rollout status deployment/${service} -n ${NAMESPACE} --timeout=300s || error "Failed to rollout ${service}"
|
||||
done
|
||||
|
||||
# Deploy global infrastructure
|
||||
for service in global-infrastructure global-ai-agents multi-region-load-balancer; do
|
||||
log "Deploying ${service}..."
|
||||
envsubst < k8s/deployments/${service}.yaml | kubectl apply -f - -n ${NAMESPACE} || error "Failed to deploy ${service}"
|
||||
kubectl rollout status deployment/${service} -n ${NAMESPACE} --timeout=300s || error "Failed to rollout ${service}"
|
||||
done
|
||||
|
||||
# Deploy explorer
|
||||
log "Deploying explorer..."
|
||||
envsubst < k8s/deployments/explorer.yaml | kubectl apply -f - -n ${NAMESPACE} || error "Failed to deploy explorer"
|
||||
kubectl rollout status deployment/explorer -n ${NAMESPACE} --timeout=300s || error "Failed to rollout explorer"
|
||||
|
||||
success "Kubernetes deployment completed"
|
||||
}
|
||||
|
||||
# Deploy with Docker Compose
|
||||
deploy_docker_compose() {
|
||||
log "Deploying with Docker Compose..."
|
||||
|
||||
# Set environment variables
|
||||
export POSTGRES_PASSWORD=${POSTGRES_PASSWORD:-aitbc123}
|
||||
export REDIS_PASSWORD=${REDIS_PASSWORD:-aitbc123}
|
||||
export GRAFANA_PASSWORD=${GRAFANA_PASSWORD:-admin}
|
||||
|
||||
# Stop existing services
|
||||
log "Stopping existing services..."
|
||||
docker-compose down || true
|
||||
|
||||
# Start services
|
||||
log "Starting services..."
|
||||
docker-compose up -d || error "Failed to start services"
|
||||
|
||||
# Wait for services to be healthy
|
||||
log "Waiting for services to be healthy..."
|
||||
sleep 30
|
||||
|
||||
# Check service health
|
||||
for service in postgres redis blockchain-node coordinator-api exchange-integration; do
|
||||
log "Checking ${service} health..."
|
||||
if ! docker-compose ps ${service} | grep -q "Up"; then
|
||||
error "Service ${service} is not running"
|
||||
fi
|
||||
done
|
||||
|
||||
success "Docker Compose deployment completed"
|
||||
}
|
||||
|
||||
# Run health checks
|
||||
run_health_checks() {
|
||||
log "Running health checks..."
|
||||
|
||||
if command -v kubectl >/dev/null 2>&1 && kubectl cluster-info >/dev/null 2>&1; then
|
||||
# Kubernetes health checks
|
||||
log "Checking Kubernetes deployment health..."
|
||||
|
||||
# Check pod status
|
||||
kubectl get pods -n ${NAMESPACE} || error "Failed to get pod status"
|
||||
|
||||
# Check service health
|
||||
services=("coordinator-api" "exchange-integration" "trading-engine" "plugin-registry")
|
||||
for service in "${services[@]}"; do
|
||||
log "Checking ${service} health..."
|
||||
kubectl get pods -n ${NAMESPACE} -l app=${service} -o jsonpath='{.items[0].status.phase}' | grep -q "Running" || error "${service} pods are not running"
|
||||
|
||||
# Check service endpoint
|
||||
service_url=$(kubectl get svc ${service} -n ${NAMESPACE} -o jsonpath='{.status.loadBalancer.ingress[0].ip}' 2>/dev/null || echo "")
|
||||
if [ -n "$service_url" ]; then
|
||||
curl -f http://${service_url}/health >/dev/null 2>&1 || error "${service} health check failed"
|
||||
fi
|
||||
done
|
||||
|
||||
else
|
||||
# Docker Compose health checks
|
||||
log "Checking Docker Compose deployment health..."
|
||||
|
||||
services=("coordinator-api" "exchange-integration" "trading-engine" "plugin-registry")
|
||||
for service in "${services[@]}"; do
|
||||
log "Checking ${service} health..."
|
||||
if ! docker-compose ps ${service} | grep -q "Up"; then
|
||||
error "Service ${service} is not running"
|
||||
fi
|
||||
|
||||
# Check health endpoint
|
||||
port=$(docker-compose port ${service} | cut -d: -f2)
|
||||
curl -f http://localhost:${port}/health >/dev/null 2>&1 || error "${service} health check failed"
|
||||
done
|
||||
fi
|
||||
|
||||
success "All health checks passed"
|
||||
}
|
||||
|
||||
# Run smoke tests
|
||||
run_smoke_tests() {
|
||||
log "Running smoke tests..."
|
||||
|
||||
# Test CLI functionality
|
||||
log "Testing CLI functionality..."
|
||||
docker-compose exec aitbc-cli python -m aitbc_cli.main --help >/dev/null || error "CLI smoke test failed"
|
||||
|
||||
# Test API endpoints
|
||||
log "Testing API endpoints..."
|
||||
|
||||
# Test coordinator API
|
||||
coordinator_port=$(docker-compose port coordinator-api | cut -d: -f2)
|
||||
curl -f http://localhost:${coordinator_port}/health >/dev/null || error "Coordinator API smoke test failed"
|
||||
|
||||
# Test exchange API
|
||||
exchange_port=$(docker-compose port exchange-integration | cut -d: -f2)
|
||||
curl -f http://localhost:${exchange_port}/health >/dev/null || error "Exchange API smoke test failed"
|
||||
|
||||
# Test plugin registry
|
||||
plugin_port=$(docker-compose port plugin-registry | cut -d: -f2)
|
||||
curl -f http://localhost:${plugin_port}/health >/dev/null || error "Plugin registry smoke test failed"
|
||||
|
||||
success "Smoke tests passed"
|
||||
}
|
||||
|
||||
# Rollback deployment
|
||||
rollback() {
|
||||
log "Rolling back deployment..."
|
||||
|
||||
if command -v kubectl >/dev/null 2>&1 && kubectl cluster-info >/dev/null 2>&1; then
|
||||
# Kubernetes rollback
|
||||
log "Rolling back Kubernetes deployment..."
|
||||
|
||||
services=("coordinator-api" "exchange-integration" "trading-engine" "plugin-registry")
|
||||
for service in "${services[@]}"; do
|
||||
log "Rolling back ${service}..."
|
||||
kubectl rollout undo deployment/${service} -n ${NAMESPACE} || error "Failed to rollback ${service}"
|
||||
kubectl rollout status deployment/${service} -n ${NAMESPACE} --timeout=300s || error "Failed to rollback ${service}"
|
||||
done
|
||||
|
||||
else
|
||||
# Docker Compose rollback
|
||||
log "Rolling back Docker Compose deployment..."
|
||||
docker-compose down || error "Failed to stop services"
|
||||
|
||||
# Restart with previous version (assuming it's tagged as 'previous')
|
||||
export VERSION=previous
|
||||
deploy_docker_compose
|
||||
fi
|
||||
|
||||
success "Rollback completed"
|
||||
}
|
||||
|
||||
# Cleanup
|
||||
cleanup() {
|
||||
log "Cleaning up..."
|
||||
|
||||
# Remove unused Docker images
|
||||
docker image prune -f || true
|
||||
|
||||
# Remove unused Docker volumes
|
||||
docker volume prune -f || true
|
||||
|
||||
success "Cleanup completed"
|
||||
}
|
||||
|
||||
# Main deployment function
|
||||
main() {
|
||||
log "Starting AITBC deployment..."
|
||||
log "Environment: ${ENVIRONMENT}"
|
||||
log "Version: ${VERSION}"
|
||||
log "Region: ${REGION}"
|
||||
|
||||
case "${ENVIRONMENT}" in
|
||||
"local"|"docker")
|
||||
check_prerequisites
|
||||
build_images
|
||||
run_tests
|
||||
deploy_docker_compose
|
||||
run_health_checks
|
||||
run_smoke_tests
|
||||
;;
|
||||
"staging"|"production")
|
||||
check_prerequisites
|
||||
build_images
|
||||
run_tests
|
||||
deploy_kubernetes
|
||||
run_health_checks
|
||||
run_smoke_tests
|
||||
;;
|
||||
"rollback")
|
||||
rollback
|
||||
;;
|
||||
"cleanup")
|
||||
cleanup
|
||||
;;
|
||||
*)
|
||||
error "Unknown environment: ${ENVIRONMENT}. Use 'local', 'docker', 'staging', 'production', 'rollback', or 'cleanup'"
|
||||
;;
|
||||
esac
|
||||
|
||||
success "Deployment completed successfully!"
|
||||
|
||||
# Display deployment information
|
||||
log "Deployment Information:"
|
||||
log "Environment: ${ENVIRONMENT}"
|
||||
log "Version: ${VERSION}"
|
||||
log "Namespace: ${NAMESPACE}"
|
||||
|
||||
if [ "${ENVIRONMENT}" = "docker" ]; then
|
||||
log "Services are running on:"
|
||||
log " Coordinator API: http://localhost:8001"
|
||||
log " Exchange Integration: http://localhost:8010"
|
||||
log " Trading Engine: http://localhost:8012"
|
||||
log " Plugin Registry: http://localhost:8013"
|
||||
log " Plugin Marketplace: http://localhost:8014"
|
||||
log " Explorer: http://localhost:8020"
|
||||
log " Grafana: http://localhost:3000 (admin/admin)"
|
||||
log " Prometheus: http://localhost:9090"
|
||||
fi
|
||||
}
|
||||
|
||||
# Handle script interruption
|
||||
trap 'error "Script interrupted"' INT TERM
|
||||
|
||||
# Export environment variables for envsubst
|
||||
export POSTGRES_PASSWORD=${POSTGRES_PASSWORD:-aitbc123}
|
||||
export REDIS_PASSWORD=${REDIS_PASSWORD:-aitbc123}
|
||||
export GRAFANA_PASSWORD=${GRAFANA_PASSWORD:-admin}
|
||||
export VERSION=${VERSION}
|
||||
export NAMESPACE=${NAMESPACE}
|
||||
|
||||
# Run main function
|
||||
main "$@"
|
||||
24
scripts/deployment/deploy/cleanup-deployment.sh
Executable file
24
scripts/deployment/deploy/cleanup-deployment.sh
Executable file
@@ -0,0 +1,24 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Clean up failed deployment and prepare for redeployment
|
||||
|
||||
echo "🧹 Cleaning up failed deployment..."
|
||||
echo "=================================="
|
||||
|
||||
# Stop any running services
|
||||
echo "Stopping services..."
|
||||
ssh ns3-root "systemctl stop blockchain-node blockchain-rpc nginx 2>/dev/null || true"
|
||||
|
||||
# Remove old directories
|
||||
echo "Removing old directories..."
|
||||
ssh ns3-root "rm -rf /opt/blockchain-node /opt/blockchain-node-src /opt/blockchain-explorer 2>/dev/null || true"
|
||||
|
||||
# Remove systemd services
|
||||
echo "Removing systemd services..."
|
||||
ssh ns3-root "systemctl disable blockchain-node blockchain-rpc blockchain-explorer 2>/dev/null || true"
|
||||
ssh ns3-root "rm -f /etc/systemd/system/blockchain-node.service /etc/systemd/system/blockchain-rpc.service /etc/systemd/system/blockchain-explorer.service 2>/dev/null || true"
|
||||
ssh ns3-root "systemctl daemon-reload"
|
||||
|
||||
echo "✅ Cleanup complete!"
|
||||
echo ""
|
||||
echo "You can now run: ./scripts/deploy/deploy-all-remote.sh"
|
||||
109
scripts/deployment/deploy/container-deploy.py
Normal file
109
scripts/deployment/deploy/container-deploy.py
Normal file
@@ -0,0 +1,109 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Deploy AITBC services to incus container
|
||||
"""
|
||||
|
||||
import subprocess
|
||||
import time
|
||||
import sys
|
||||
|
||||
def run_command(cmd, container=None):
|
||||
"""Run command locally or in container"""
|
||||
if container:
|
||||
cmd = f"incus exec {container} -- {cmd}"
|
||||
print(f"Running: {cmd}")
|
||||
result = subprocess.run(cmd, shell=True, capture_output=True, text=True)
|
||||
if result.returncode != 0:
|
||||
print(f"Error: {result.stderr}")
|
||||
return False
|
||||
return True
|
||||
|
||||
def deploy_to_container():
|
||||
container = "aitbc"
|
||||
container_ip = "10.1.223.93"
|
||||
|
||||
print("🚀 Deploying AITBC services to container...")
|
||||
|
||||
# Stop local services
|
||||
print("\n📋 Stopping local services...")
|
||||
subprocess.run("sudo fuser -k 8000/tcp 2>/dev/null || true", shell=True)
|
||||
subprocess.run("sudo fuser -k 9080/tcp 2>/dev/null || true", shell=True)
|
||||
subprocess.run("pkill -f 'marketplace-ui' 2>/dev/null || true", shell=True)
|
||||
subprocess.run("pkill -f 'trade-exchange' 2>/dev/null || true", shell=True)
|
||||
|
||||
# Copy project to container
|
||||
print("\n📁 Copying project to container...")
|
||||
subprocess.run(f"incus file push -r /home/oib/windsurf/aitbc {container}/home/oib/", shell=True)
|
||||
|
||||
# Setup Python environment in container
|
||||
print("\n🐍 Setting up Python environment...")
|
||||
run_command("cd /home/oib/aitbc && python3 -m venv .venv", container)
|
||||
run_command("cd /home/oib/aitbc && source .venv/bin/activate && pip install fastapi uvicorn httpx sqlmodel", container)
|
||||
|
||||
# Install dependencies
|
||||
print("\n📦 Installing dependencies...")
|
||||
run_command("cd /home/oib/aitbc/apps/coordinator-api && source ../../.venv/bin/activate && pip install -e .", container)
|
||||
run_command("cd /home/oib/aitbc/apps/blockchain-node && source ../../.venv/bin/activate && pip install -e .", container)
|
||||
|
||||
# Create startup script
|
||||
print("\n🔧 Creating startup script...")
|
||||
startup_script = """#!/bin/bash
|
||||
cd /home/oib/aitbc
|
||||
|
||||
# Start blockchain node
|
||||
echo "Starting blockchain node..."
|
||||
cd apps/blockchain-node
|
||||
source ../../.venv/bin/activate
|
||||
python -m uvicorn aitbc_chain.app:app --host 0.0.0.0 --port 9080 &
|
||||
NODE_PID=$!
|
||||
|
||||
# Start coordinator API
|
||||
echo "Starting coordinator API..."
|
||||
cd ../coordinator-api
|
||||
source ../../.venv/bin/activate
|
||||
python -m uvicorn src.app.main:app --host 0.0.0.0 --port 8000 &
|
||||
COORD_PID=$!
|
||||
|
||||
# Start marketplace UI
|
||||
echo "Starting marketplace UI..."
|
||||
cd ../marketplace-ui
|
||||
python server.py --port 3001 &
|
||||
MARKET_PID=$!
|
||||
|
||||
# Start trade exchange
|
||||
echo "Starting trade exchange..."
|
||||
cd ../trade-exchange
|
||||
python server.py --port 3002 &
|
||||
EXCHANGE_PID=$!
|
||||
|
||||
echo "Services started!"
|
||||
echo "Blockchain: http://10.1.223.93:9080"
|
||||
echo "API: http://10.1.223.93:8000"
|
||||
echo "Marketplace: http://10.1.223.93:3001"
|
||||
echo "Exchange: http://10.1.223.93:3002"
|
||||
|
||||
# Wait for services
|
||||
wait $NODE_PID $COORD_PID $MARKET_PID $EXCHANGE_PID
|
||||
"""
|
||||
|
||||
# Write startup script to container
|
||||
with open('/tmp/start_aitbc.sh', 'w') as f:
|
||||
f.write(startup_script)
|
||||
|
||||
subprocess.run("incus file push /tmp/start_aitbc.sh aitbc/home/oib/", shell=True)
|
||||
run_command("chmod +x /home/oib/start_aitbc.sh", container)
|
||||
|
||||
# Start services
|
||||
print("\n🚀 Starting AITBC services...")
|
||||
run_command("/home/oib/start_aitbc.sh", container)
|
||||
|
||||
print(f"\n✅ Services deployed to container!")
|
||||
print(f"\n📋 Access URLs:")
|
||||
print(f" 🌐 Container IP: {container_ip}")
|
||||
print(f" 📊 Marketplace: http://{container_ip}:3001")
|
||||
print(f" 💱 Trade Exchange: http://{container_ip}:3002")
|
||||
print(f" 🔗 API: http://{container_ip}:8000")
|
||||
print(f" ⛓️ Blockchain: http://{container_ip}:9080")
|
||||
|
||||
if __name__ == "__main__":
|
||||
deploy_to_container()
|
||||
56
scripts/deployment/deploy/deploy-all-remote.sh
Executable file
56
scripts/deployment/deploy/deploy-all-remote.sh
Executable file
@@ -0,0 +1,56 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Deploy blockchain node and explorer by building directly on ns3
|
||||
|
||||
echo "🚀 AITBC Remote Deployment (Build on Server)"
|
||||
echo "=========================================="
|
||||
echo "This will build the blockchain node directly on ns3"
|
||||
echo "to utilize the gigabit connection instead of uploading."
|
||||
echo ""
|
||||
|
||||
# Copy deployment scripts to server
|
||||
echo "Copying deployment scripts to ns3..."
|
||||
scp scripts/deploy/deploy-blockchain-remote.sh ns3-root:/opt/
|
||||
scp scripts/deploy/deploy-explorer-remote.sh ns3-root:/opt/
|
||||
|
||||
# Create directories on server first
|
||||
echo "Creating directories on ns3..."
|
||||
ssh ns3-root "mkdir -p /opt/blockchain-node-src /opt/blockchain-node"
|
||||
|
||||
# Copy blockchain source code to server (excluding data files)
|
||||
echo "Copying blockchain source code to ns3..."
|
||||
rsync -av --exclude='data/' --exclude='*.db' --exclude='__pycache__' --exclude='.venv' apps/blockchain-node/ ns3-root:/opt/blockchain-node-src/
|
||||
|
||||
# Execute blockchain deployment
|
||||
echo ""
|
||||
echo "Deploying blockchain node..."
|
||||
ssh ns3-root "cd /opt && cp -r /opt/blockchain-node-src/* /opt/blockchain-node/ && cd /opt/blockchain-node && chmod +x ../deploy-blockchain-remote.sh && ../deploy-blockchain-remote.sh"
|
||||
|
||||
# Wait for blockchain to start
|
||||
echo ""
|
||||
echo "Waiting 10 seconds for blockchain node to start..."
|
||||
sleep 10
|
||||
|
||||
# Execute explorer deployment on ns3
|
||||
echo ""
|
||||
echo "Deploying blockchain explorer..."
|
||||
ssh ns3-root "cd /opt && ./deploy-explorer-remote.sh"
|
||||
|
||||
# Check services
|
||||
echo ""
|
||||
echo "Checking service status..."
|
||||
ssh ns3-root "systemctl status blockchain-node blockchain-rpc nginx --no-pager | grep -E 'Active:|Main PID:'"
|
||||
|
||||
echo ""
|
||||
echo "✅ Deployment complete!"
|
||||
echo ""
|
||||
echo "Services:"
|
||||
echo " - Blockchain Node RPC: http://localhost:8082"
|
||||
echo " - Blockchain Explorer: http://localhost:3000"
|
||||
echo ""
|
||||
echo "External access:"
|
||||
echo " - Blockchain Node RPC: http://aitbc.keisanki.net:8082"
|
||||
echo " - Blockchain Explorer: http://aitbc.keisanki.net:3000"
|
||||
echo ""
|
||||
echo "The blockchain node will start syncing automatically."
|
||||
echo "The explorer connects to the local node and displays real-time data."
|
||||
207
scripts/deployment/deploy/deploy-blockchain-and-explorer.sh
Executable file
207
scripts/deployment/deploy/deploy-blockchain-and-explorer.sh
Executable file
@@ -0,0 +1,207 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Deploy blockchain node and explorer to incus container
|
||||
|
||||
set -e
|
||||
|
||||
echo "🚀 Deploying Blockchain Node and Explorer"
|
||||
echo "========================================"
|
||||
|
||||
# Colors
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
NC='\033[0m'
|
||||
|
||||
print_status() {
|
||||
echo -e "${GREEN}[INFO]${NC} $1"
|
||||
}
|
||||
|
||||
print_warning() {
|
||||
echo -e "${YELLOW}[WARN]${NC} $1"
|
||||
}
|
||||
|
||||
# Copy blockchain node to container
|
||||
print_status "Copying blockchain node to container..."
|
||||
ssh ns3-root "rm -rf /opt/blockchain-node 2>/dev/null || true"
|
||||
scp -r apps/blockchain-node ns3-root:/opt/
|
||||
|
||||
# Setup blockchain node in container
|
||||
print_status "Setting up blockchain node..."
|
||||
ssh ns3-root << 'EOF'
|
||||
cd /opt/blockchain-node
|
||||
|
||||
# Create configuration
|
||||
cat > .env << EOL
|
||||
CHAIN_ID=ait-devnet
|
||||
DB_PATH=./data/chain.db
|
||||
RPC_BIND_HOST=0.0.0.0
|
||||
RPC_BIND_PORT=8082
|
||||
P2P_BIND_HOST=0.0.0.0
|
||||
P2P_BIND_PORT=7070
|
||||
PROPOSER_KEY=proposer_key_$(date +%s)
|
||||
MINT_PER_UNIT=1000
|
||||
COORDINATOR_RATIO=0.05
|
||||
GOSSIP_BACKEND=memory
|
||||
EOL
|
||||
|
||||
# Create data directory
|
||||
mkdir -p data/devnet
|
||||
|
||||
# Setup Python environment
|
||||
python3 -m venv .venv
|
||||
source .venv/bin/activate
|
||||
pip install --upgrade pip
|
||||
pip install -e .
|
||||
|
||||
# Generate genesis
|
||||
export PYTHONPATH="${PWD}/src:${PWD}/scripts:${PYTHONPATH:-}"
|
||||
python scripts/make_genesis.py --output data/devnet/genesis.json --force
|
||||
EOF
|
||||
|
||||
# Create systemd service for blockchain node
|
||||
print_status "Creating systemd service for blockchain node..."
|
||||
ssh ns3-root << 'EOF'
|
||||
cat > /etc/systemd/system/blockchain-node.service << EOL
|
||||
[Unit]
|
||||
Description=AITBC Blockchain Node
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=exec
|
||||
User=root
|
||||
WorkingDirectory=/opt/blockchain-node
|
||||
Environment=PATH=/opt/blockchain-node/.venv/bin:/usr/local/bin:/usr/bin:/bin
|
||||
Environment=PYTHONPATH=/opt/blockchain-node/src:/opt/blockchain-node/scripts
|
||||
ExecStart=/opt/blockchain-node/.venv/bin/python3 -m aitbc_chain.main
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOL
|
||||
|
||||
cat > /etc/systemd/system/blockchain-rpc.service << EOL
|
||||
[Unit]
|
||||
Description=AITBC Blockchain RPC API
|
||||
After=blockchain-node.service
|
||||
|
||||
[Service]
|
||||
Type=exec
|
||||
User=root
|
||||
WorkingDirectory=/opt/blockchain-node
|
||||
Environment=PATH=/opt/blockchain-node/.venv/bin:/usr/local/bin:/usr/bin:/bin
|
||||
Environment=PYTHONPATH=/opt/blockchain-node/src:/opt/blockchain-node/scripts
|
||||
ExecStart=/opt/blockchain-node/.venv/bin/python3 -m uvicorn aitbc_chain.app:app --host 0.0.0.0 --port 8082
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOL
|
||||
|
||||
systemctl daemon-reload
|
||||
systemctl enable blockchain-node blockchain-rpc
|
||||
EOF
|
||||
|
||||
# Start blockchain node
|
||||
print_status "Starting blockchain node..."
|
||||
ssh ns3-root "systemctl start blockchain-node blockchain-rpc"
|
||||
|
||||
# Wait for node to start
|
||||
print_status "Waiting for blockchain node to start..."
|
||||
sleep 5
|
||||
|
||||
# Check status
|
||||
print_status "Checking blockchain node status..."
|
||||
ssh ns3-root "systemctl status blockchain-node blockchain-rpc --no-pager | grep -E 'Active:|Main PID:'"
|
||||
|
||||
# Copy explorer to container
|
||||
print_status "Copying blockchain explorer to container..."
|
||||
ssh ns3-root "rm -rf /opt/blockchain-explorer 2>/dev/null || true"
|
||||
scp -r apps/blockchain-explorer ns3-root:/opt/
|
||||
|
||||
# Setup explorer in container
|
||||
print_status "Setting up blockchain explorer..."
|
||||
ssh ns3-root << 'EOF'
|
||||
cd /opt/blockchain-explorer
|
||||
|
||||
# Create Python environment
|
||||
python3 -m venv .venv
|
||||
source .venv/bin/activate
|
||||
pip install --upgrade pip
|
||||
pip install -r requirements.txt
|
||||
EOF
|
||||
|
||||
# Create systemd service for explorer
|
||||
print_status "Creating systemd service for blockchain explorer..."
|
||||
ssh ns3-root << 'EOF'
|
||||
cat > /etc/systemd/system/blockchain-explorer.service << EOL
|
||||
[Unit]
|
||||
Description=AITBC Blockchain Explorer
|
||||
After=blockchain-rpc.service
|
||||
|
||||
[Service]
|
||||
Type=exec
|
||||
User=root
|
||||
WorkingDirectory=/opt/blockchain-explorer
|
||||
Environment=PATH=/opt/blockchain-explorer/.venv/bin:/usr/local/bin:/usr/bin:/bin
|
||||
ExecStart=/opt/blockchain-explorer/.venv/bin/python3 main.py
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOL
|
||||
|
||||
systemctl daemon-reload
|
||||
systemctl enable blockchain-explorer
|
||||
EOF
|
||||
|
||||
# Start explorer
|
||||
print_status "Starting blockchain explorer..."
|
||||
ssh ns3-root "systemctl start blockchain-explorer"
|
||||
|
||||
# Wait for explorer to start
|
||||
print_status "Waiting for explorer to start..."
|
||||
sleep 3
|
||||
|
||||
# Setup port forwarding
|
||||
print_status "Setting up port forwarding..."
|
||||
ssh ns3-root << 'EOF'
|
||||
# Clear existing NAT rules
|
||||
iptables -t nat -F PREROUTING 2>/dev/null || true
|
||||
iptables -t nat -F POSTROUTING 2>/dev/null || true
|
||||
|
||||
# Add port forwarding for blockchain RPC
|
||||
iptables -t nat -A PREROUTING -p tcp --dport 8082 -j DNAT --to-destination 192.168.100.10:8082
|
||||
iptables -t nat -A POSTROUTING -p tcp -d 192.168.100.10 --dport 8082 -j MASQUERADE
|
||||
|
||||
# Add port forwarding for explorer
|
||||
iptables -t nat -A PREROUTING -p tcp --dport 3000 -j DNAT --to-destination 192.168.100.10:3000
|
||||
iptables -t nat -A POSTROUTING -p tcp -d 192.168.100.10 --dport 3000 -j MASQUERADE
|
||||
|
||||
# Save rules
|
||||
mkdir -p /etc/iptables
|
||||
iptables-save > /etc/iptables/rules.v4
|
||||
|
||||
# Install iptables-persistent for persistence
|
||||
apt-get update
|
||||
apt-get install -y iptables-persistent
|
||||
EOF
|
||||
|
||||
# Check all services
|
||||
print_status "Checking all services..."
|
||||
ssh ns3-root "systemctl status blockchain-node blockchain-rpc blockchain-explorer --no-pager | grep -E 'Active:|Main PID:'"
|
||||
|
||||
print_success "✅ Deployment complete!"
|
||||
echo ""
|
||||
echo "Services deployed:"
|
||||
echo " - Blockchain Node RPC: http://192.168.100.10:8082"
|
||||
echo " - Blockchain Explorer: http://192.168.100.10:3000"
|
||||
echo ""
|
||||
echo "External access:"
|
||||
echo " - Blockchain Node RPC: http://aitbc.keisanki.net:8082"
|
||||
echo " - Blockchain Explorer: http://aitbc.keisanki.net:3000"
|
||||
echo ""
|
||||
echo "The explorer is connected to the local blockchain node and will display"
|
||||
echo "real-time blockchain data including blocks and transactions."
|
||||
94
scripts/deployment/deploy/deploy-blockchain-explorer.sh
Executable file
94
scripts/deployment/deploy/deploy-blockchain-explorer.sh
Executable file
@@ -0,0 +1,94 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Deploy blockchain explorer to incus container
|
||||
|
||||
set -e
|
||||
|
||||
echo "🔍 Deploying Blockchain Explorer"
|
||||
echo "================================="
|
||||
|
||||
# Colors
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
NC='\033[0m'
|
||||
|
||||
print_status() {
|
||||
echo -e "${GREEN}[INFO]${NC} $1"
|
||||
}
|
||||
|
||||
print_warning() {
|
||||
echo -e "${YELLOW}[WARN]${NC} $1"
|
||||
}
|
||||
|
||||
# Copy explorer to container
|
||||
print_status "Copying blockchain explorer to container..."
|
||||
ssh ns3-root "rm -rf /opt/blockchain-explorer 2>/dev/null || true"
|
||||
scp -r apps/blockchain-explorer ns3-root:/opt/
|
||||
|
||||
# Setup explorer in container
|
||||
print_status "Setting up blockchain explorer..."
|
||||
ssh ns3-root << 'EOF'
|
||||
cd /opt/blockchain-explorer
|
||||
|
||||
# Create Python environment
|
||||
python3 -m venv .venv
|
||||
source .venv/bin/activate
|
||||
pip install --upgrade pip
|
||||
pip install -r requirements.txt
|
||||
EOF
|
||||
|
||||
# Create systemd service for explorer
|
||||
print_status "Creating systemd service for blockchain explorer..."
|
||||
ssh ns3-root << 'EOF'
|
||||
cat > /etc/systemd/system/blockchain-explorer.service << EOL
|
||||
[Unit]
|
||||
Description=AITBC Blockchain Explorer
|
||||
After=blockchain-rpc.service
|
||||
|
||||
[Service]
|
||||
Type=exec
|
||||
User=root
|
||||
WorkingDirectory=/opt/blockchain-explorer
|
||||
Environment=PATH=/opt/blockchain-explorer/.venv/bin:/usr/local/bin:/usr/bin:/bin
|
||||
ExecStart=/opt/blockchain-explorer/.venv/bin/python3 main.py
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOL
|
||||
|
||||
systemctl daemon-reload
|
||||
systemctl enable blockchain-explorer
|
||||
EOF
|
||||
|
||||
# Start explorer
|
||||
print_status "Starting blockchain explorer..."
|
||||
ssh ns3-root "systemctl start blockchain-explorer"
|
||||
|
||||
# Wait for explorer to start
|
||||
print_status "Waiting for explorer to start..."
|
||||
sleep 3
|
||||
|
||||
# Setup port forwarding for explorer
|
||||
print_status "Setting up port forwarding for explorer..."
|
||||
ssh ns3-root << 'EOF'
|
||||
# Add port forwarding for explorer
|
||||
iptables -t nat -A PREROUTING -p tcp --dport 3000 -j DNAT --to-destination 192.168.100.10:3000
|
||||
iptables -t nat -A POSTROUTING -p tcp -d 192.168.100.10 --dport 3000 -j MASQUERADE
|
||||
|
||||
# Save rules
|
||||
iptables-save > /etc/iptables/rules.v4
|
||||
EOF
|
||||
|
||||
# Check status
|
||||
print_status "Checking blockchain explorer status..."
|
||||
ssh ns3-root "systemctl status blockchain-explorer --no-pager | grep -E 'Active:|Main PID:'"
|
||||
|
||||
print_success "✅ Blockchain explorer deployed!"
|
||||
echo ""
|
||||
echo "Explorer URL: http://192.168.100.10:3000"
|
||||
echo "External URL: http://aitbc.keisanki.net:3000"
|
||||
echo ""
|
||||
echo "The explorer will automatically connect to the local blockchain node."
|
||||
echo "You can view blocks, transactions, and chain statistics."
|
||||
157
scripts/deployment/deploy/deploy-blockchain-remote.sh
Normal file
157
scripts/deployment/deploy/deploy-blockchain-remote.sh
Normal file
@@ -0,0 +1,157 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Deploy blockchain node directly on ns3 server (build in place)
|
||||
|
||||
set -e
|
||||
|
||||
echo "🚀 Deploying Blockchain Node on ns3 (Build in Place)"
|
||||
echo "====================================================="
|
||||
|
||||
# Colors
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
NC='\033[0m'
|
||||
|
||||
print_status() {
|
||||
echo -e "${GREEN}[INFO]${NC} $1"
|
||||
}
|
||||
|
||||
print_warning() {
|
||||
echo -e "${YELLOW}[WARN]${NC} $1"
|
||||
}
|
||||
|
||||
# Check if we're on the right server
|
||||
print_status "Checking server..."
|
||||
if [ "$(hostname)" != "ns3" ] && [ "$(hostname)" != "aitbc" ]; then
|
||||
print_warning "This script should be run on ns3 server"
|
||||
echo "Please run: ssh ns3-root"
|
||||
echo "Then: cd /opt && ./deploy-blockchain-remote.sh"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Install dependencies if needed
|
||||
print_status "Installing dependencies..."
|
||||
apt-get update
|
||||
apt-get install -y python3 python3-venv python3-pip git curl
|
||||
|
||||
# Create directory
|
||||
print_status "Creating blockchain node directory..."
|
||||
mkdir -p /opt/blockchain-node
|
||||
cd /opt/blockchain-node
|
||||
|
||||
# Check if source code exists
|
||||
if [ ! -d "src" ]; then
|
||||
print_status "Source code not found in /opt/blockchain-node, copying from /opt/blockchain-node-src..."
|
||||
if [ -d "/opt/blockchain-node-src" ]; then
|
||||
cp -r /opt/blockchain-node-src/* .
|
||||
else
|
||||
print_warning "Source code not found. Please ensure it was copied properly."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Setup Python environment
|
||||
print_status "Setting up Python environment..."
|
||||
python3 -m venv .venv
|
||||
source .venv/bin/activate
|
||||
pip install --upgrade pip
|
||||
pip install -e .
|
||||
|
||||
# Create configuration with auto-sync
|
||||
print_status "Creating configuration..."
|
||||
cat > .env << EOL
|
||||
CHAIN_ID=ait-devnet
|
||||
DB_PATH=./data/chain.db
|
||||
RPC_BIND_HOST=0.0.0.0
|
||||
RPC_BIND_PORT=8082
|
||||
P2P_BIND_HOST=0.0.0.0
|
||||
P2P_BIND_PORT=7070
|
||||
PROPOSER_KEY=proposer_key_$(date +%s)
|
||||
MINT_PER_UNIT=1000
|
||||
COORDINATOR_RATIO=0.05
|
||||
GOSSIP_BACKEND=memory
|
||||
EOL
|
||||
|
||||
# Create fresh data directory
|
||||
print_status "Creating fresh data directory..."
|
||||
rm -rf data
|
||||
mkdir -p data/devnet
|
||||
|
||||
# Generate fresh genesis
|
||||
print_status "Generating fresh genesis block..."
|
||||
export PYTHONPATH="${PWD}/src:${PWD}/scripts:${PYTHONPATH:-}"
|
||||
python scripts/make_genesis.py --output data/devnet/genesis.json --force
|
||||
|
||||
# Create systemd service for blockchain node
|
||||
print_status "Creating systemd services..."
|
||||
cat > /etc/systemd/system/blockchain-node.service << EOL
|
||||
[Unit]
|
||||
Description=AITBC Blockchain Node
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=exec
|
||||
User=root
|
||||
WorkingDirectory=/opt/blockchain-node
|
||||
Environment=PATH=/opt/blockchain-node/.venv/bin:/usr/local/bin:/usr/bin:/bin
|
||||
Environment=PYTHONPATH=/opt/blockchain-node/src:/opt/blockchain-node/scripts
|
||||
ExecStart=/opt/blockchain-node/.venv/bin/python3 -m aitbc_chain.main
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOL
|
||||
|
||||
cat > /etc/systemd/system/blockchain-rpc.service << EOL
|
||||
[Unit]
|
||||
Description=AITBC Blockchain RPC API
|
||||
After=blockchain-node.service
|
||||
|
||||
[Service]
|
||||
Type=exec
|
||||
User=root
|
||||
WorkingDirectory=/opt/blockchain-node
|
||||
Environment=PATH=/opt/blockchain-node/.venv/bin:/usr/local/bin:/usr/bin:/bin
|
||||
Environment=PYTHONPATH=/opt/blockchain-node/src:/opt/blockchain-node/scripts
|
||||
ExecStart=/opt/blockchain-node/.venv/bin/python3 -m uvicorn aitbc_chain.app:app --host 0.0.0.0 --port 8082
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOL
|
||||
|
||||
# Enable and start services
|
||||
print_status "Starting blockchain node..."
|
||||
systemctl daemon-reload
|
||||
systemctl enable blockchain-node blockchain-rpc
|
||||
systemctl start blockchain-node blockchain-rpc
|
||||
|
||||
# Wait for services to start
|
||||
print_status "Waiting for services to start..."
|
||||
sleep 5
|
||||
|
||||
# Check status
|
||||
print_status "Checking service status..."
|
||||
systemctl status blockchain-node blockchain-rpc --no-pager | head -15
|
||||
|
||||
# Setup port forwarding if in container
|
||||
if [ "$(hostname)" = "aitbc" ]; then
|
||||
print_status "Setting up port forwarding..."
|
||||
iptables -t nat -A PREROUTING -p tcp --dport 8082 -j DNAT --to-destination 192.168.100.10:8082
|
||||
iptables -t nat -A POSTROUTING -p tcp -d 192.168.100.10 --dport 8082 -j MASQUERADE
|
||||
iptables-save > /etc/iptables/rules.v4
|
||||
fi
|
||||
|
||||
print_success "✅ Blockchain node deployed!"
|
||||
echo ""
|
||||
if [ "$(hostname)" = "aitbc" ]; then
|
||||
echo "Node RPC: http://192.168.100.10:8082"
|
||||
echo "External RPC: http://aitbc.keisanki.net:8082"
|
||||
else
|
||||
echo "Node RPC: http://95.216.198.140:8082"
|
||||
echo "External RPC: http://aitbc.keisanki.net:8082"
|
||||
fi
|
||||
echo ""
|
||||
echo "The node will automatically sync on startup."
|
||||
139
scripts/deployment/deploy/deploy-blockchain.sh
Executable file
139
scripts/deployment/deploy/deploy-blockchain.sh
Executable file
@@ -0,0 +1,139 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Deploy blockchain node and explorer to incus container
|
||||
|
||||
set -e
|
||||
|
||||
echo "🚀 Deploying Blockchain Node and Explorer"
|
||||
echo "========================================"
|
||||
|
||||
# Colors
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
NC='\033[0m'
|
||||
|
||||
print_status() {
|
||||
echo -e "${GREEN}[INFO]${NC} $1"
|
||||
}
|
||||
|
||||
print_warning() {
|
||||
echo -e "${YELLOW}[WARN]${NC} $1"
|
||||
}
|
||||
|
||||
# Copy blockchain node to container
|
||||
print_status "Copying blockchain node to container..."
|
||||
ssh ns3-root "rm -rf /opt/blockchain-node 2>/dev/null || true"
|
||||
scp -r apps/blockchain-node ns3-root:/opt/
|
||||
|
||||
# Setup blockchain node in container
|
||||
print_status "Setting up blockchain node..."
|
||||
ssh ns3-root << 'EOF'
|
||||
cd /opt/blockchain-node
|
||||
|
||||
# Create configuration
|
||||
cat > .env << EOL
|
||||
CHAIN_ID=ait-devnet
|
||||
DB_PATH=./data/chain.db
|
||||
RPC_BIND_HOST=0.0.0.0
|
||||
RPC_BIND_PORT=8082
|
||||
P2P_BIND_HOST=0.0.0.0
|
||||
P2P_BIND_PORT=7070
|
||||
PROPOSER_KEY=proposer_key_$(date +%s)
|
||||
MINT_PER_UNIT=1000
|
||||
COORDINATOR_RATIO=0.05
|
||||
GOSSIP_BACKEND=memory
|
||||
EOL
|
||||
|
||||
# Create data directory
|
||||
mkdir -p data/devnet
|
||||
|
||||
# Setup Python environment
|
||||
python3 -m venv .venv
|
||||
source .venv/bin/activate
|
||||
pip install --upgrade pip
|
||||
pip install -e .
|
||||
|
||||
# Generate genesis
|
||||
export PYTHONPATH="${PWD}/src:${PWD}/scripts:${PYTHONPATH:-}"
|
||||
python scripts/make_genesis.py --output data/devnet/genesis.json --force
|
||||
EOF
|
||||
|
||||
# Create systemd service for blockchain node
|
||||
print_status "Creating systemd service for blockchain node..."
|
||||
ssh ns3-root << 'EOF'
|
||||
cat > /etc/systemd/system/blockchain-node.service << EOL
|
||||
[Unit]
|
||||
Description=AITBC Blockchain Node
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=exec
|
||||
User=root
|
||||
WorkingDirectory=/opt/blockchain-node
|
||||
Environment=PATH=/opt/blockchain-node/.venv/bin:/usr/local/bin:/usr/bin:/bin
|
||||
Environment=PYTHONPATH=/opt/blockchain-node/src:/opt/blockchain-node/scripts
|
||||
ExecStart=/opt/blockchain-node/.venv/bin/python3 -m aitbc_chain.main
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOL
|
||||
|
||||
cat > /etc/systemd/system/blockchain-rpc.service << EOL
|
||||
[Unit]
|
||||
Description=AITBC Blockchain RPC API
|
||||
After=blockchain-node.service
|
||||
|
||||
[Service]
|
||||
Type=exec
|
||||
User=root
|
||||
WorkingDirectory=/opt/blockchain-node
|
||||
Environment=PATH=/opt/blockchain-node/.venv/bin:/usr/local/bin:/usr/bin:/bin
|
||||
Environment=PYTHONPATH=/opt/blockchain-node/src:/opt/blockchain-node/scripts
|
||||
ExecStart=/opt/blockchain-node/.venv/bin/python3 -m uvicorn aitbc_chain.app:app --host 0.0.0.0 --port 8082
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOL
|
||||
|
||||
systemctl daemon-reload
|
||||
systemctl enable blockchain-node blockchain-rpc
|
||||
EOF
|
||||
|
||||
# Start blockchain node
|
||||
print_status "Starting blockchain node..."
|
||||
ssh ns3-root "systemctl start blockchain-node blockchain-rpc"
|
||||
|
||||
# Wait for node to start
|
||||
print_status "Waiting for blockchain node to start..."
|
||||
sleep 5
|
||||
|
||||
# Check status
|
||||
print_status "Checking blockchain node status..."
|
||||
ssh ns3-root "systemctl status blockchain-node blockchain-rpc --no-pager | grep -E 'Active:|Main PID:'"
|
||||
|
||||
# Setup port forwarding
|
||||
print_status "Setting up port forwarding..."
|
||||
ssh ns3-root << 'EOF'
|
||||
# Clear existing rules
|
||||
iptables -t nat -F PREROUTING 2>/dev/null || true
|
||||
iptables -t nat -F POSTROUTING 2>/dev/null || true
|
||||
|
||||
# Add port forwarding for blockchain RPC
|
||||
iptables -t nat -A PREROUTING -p tcp --dport 8082 -j DNAT --to-destination 192.168.100.10:8082
|
||||
iptables -t nat -A POSTROUTING -p tcp -d 192.168.100.10 --dport 8082 -j MASQUERADE
|
||||
|
||||
# Save rules
|
||||
mkdir -p /etc/iptables
|
||||
iptables-save > /etc/iptables/rules.v4
|
||||
EOF
|
||||
|
||||
print_success "✅ Blockchain node deployed!"
|
||||
echo ""
|
||||
echo "Node RPC: http://192.168.100.10:8082"
|
||||
echo "External RPC: http://aitbc.keisanki.net:8082"
|
||||
echo ""
|
||||
echo "Next: Deploying blockchain explorer..."
|
||||
316
scripts/deployment/deploy/deploy-direct.sh
Executable file
316
scripts/deployment/deploy/deploy-direct.sh
Executable file
@@ -0,0 +1,316 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Deploy blockchain node and explorer directly on ns3
|
||||
|
||||
set -e
|
||||
|
||||
echo "🚀 AITBC Direct Deployment on ns3"
|
||||
echo "================================="
|
||||
|
||||
# Colors
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
NC='\033[0m'
|
||||
|
||||
print_status() {
|
||||
echo -e "${GREEN}[INFO]${NC} $1"
|
||||
}
|
||||
|
||||
print_warning() {
|
||||
echo -e "${YELLOW}[WARN]${NC} $1"
|
||||
}
|
||||
|
||||
# Check if we're on ns3
|
||||
if [ "$(hostname)" != "ns3" ] && [ "$(hostname)" != "aitbc" ]; then
|
||||
print_warning "This script must be run on ns3 server"
|
||||
echo "Run: ssh ns3-root"
|
||||
echo "Then: cd /opt && ./deploy-direct.sh"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Stop existing services
|
||||
print_status "Stopping existing services..."
|
||||
systemctl stop blockchain-node blockchain-rpc blockchain-explorer nginx 2>/dev/null || true
|
||||
|
||||
# Install dependencies
|
||||
print_status "Installing dependencies..."
|
||||
apt-get update
|
||||
apt-get install -y python3 python3-venv python3-pip git curl nginx
|
||||
|
||||
# Deploy blockchain node
|
||||
print_status "Deploying blockchain node..."
|
||||
cd /opt
|
||||
rm -rf blockchain-node
|
||||
cp -r blockchain-node-src blockchain-node
|
||||
cd blockchain-node
|
||||
|
||||
# Create configuration
|
||||
print_status "Creating configuration..."
|
||||
cat > .env << EOL
|
||||
CHAIN_ID=ait-devnet
|
||||
DB_PATH=./data/chain.db
|
||||
RPC_BIND_HOST=0.0.0.0
|
||||
RPC_BIND_PORT=8082
|
||||
P2P_BIND_HOST=0.0.0.0
|
||||
P2P_BIND_PORT=7070
|
||||
PROPOSER_KEY=proposer_key_$(date +%s)
|
||||
MINT_PER_UNIT=1000
|
||||
COORDINATOR_RATIO=0.05
|
||||
GOSSIP_BACKEND=memory
|
||||
EOL
|
||||
|
||||
# Create fresh data directory
|
||||
rm -rf data
|
||||
mkdir -p data/devnet
|
||||
|
||||
# Setup Python environment
|
||||
python3 -m venv .venv
|
||||
source .venv/bin/activate
|
||||
pip install --upgrade pip
|
||||
pip install -e .
|
||||
|
||||
# Generate genesis
|
||||
export PYTHONPATH="${PWD}/src:${PWD}/scripts:${PYTHONPATH:-}"
|
||||
python scripts/make_genesis.py --output data/devnet/genesis.json --force
|
||||
|
||||
# Create systemd services
|
||||
print_status "Creating systemd services..."
|
||||
cat > /etc/systemd/system/blockchain-node.service << EOL
|
||||
[Unit]
|
||||
Description=AITBC Blockchain Node
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=exec
|
||||
User=root
|
||||
WorkingDirectory=/opt/blockchain-node
|
||||
Environment=PATH=/opt/blockchain-node/.venv/bin:/usr/local/bin:/usr/bin:/bin
|
||||
Environment=PYTHONPATH=/opt/blockchain-node/src:/opt/blockchain-node/scripts
|
||||
ExecStart=/opt/blockchain-node/.venv/bin/python3 -m aitbc_chain.main
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOL
|
||||
|
||||
cat > /etc/systemd/system/blockchain-rpc.service << EOL
|
||||
[Unit]
|
||||
Description=AITBC Blockchain RPC API
|
||||
After=blockchain-node.service
|
||||
|
||||
[Service]
|
||||
Type=exec
|
||||
User=root
|
||||
WorkingDirectory=/opt/blockchain-node
|
||||
Environment=PATH=/opt/blockchain-node/.venv/bin:/usr/local/bin:/usr/bin:/bin
|
||||
Environment=PYTHONPATH=/opt/blockchain-node/src:/opt/blockchain-node/scripts
|
||||
ExecStart=/opt/blockchain-node/.venv/bin/python3 -m uvicorn aitbc_chain.app:app --host 0.0.0.0 --port 8082
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOL
|
||||
|
||||
# Start blockchain services
|
||||
print_status "Starting blockchain services..."
|
||||
systemctl daemon-reload
|
||||
systemctl enable blockchain-node blockchain-rpc
|
||||
systemctl start blockchain-node blockchain-rpc
|
||||
|
||||
# Deploy explorer
|
||||
print_status "Deploying blockchain explorer..."
|
||||
cd /opt
|
||||
rm -rf blockchain-explorer
|
||||
mkdir -p blockchain-explorer
|
||||
cd blockchain-explorer
|
||||
|
||||
# Create HTML explorer
|
||||
cat > index.html << 'EOF'
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<title>AITBC Blockchain Explorer</title>
|
||||
<script src="https://cdn.tailwindcss.com"></script>
|
||||
<script src="https://unpkg.com/lucide@latest"></script>
|
||||
</head>
|
||||
<body class="bg-gray-50">
|
||||
<header class="bg-blue-600 text-white shadow-lg">
|
||||
<div class="container mx-auto px-4 py-4">
|
||||
<div class="flex items-center justify-between">
|
||||
<div class="flex items-center space-x-3">
|
||||
<i data-lucide="cube" class="w-8 h-8"></i>
|
||||
<h1 class="text-2xl font-bold">AITBC Blockchain Explorer</h1>
|
||||
</div>
|
||||
<button onclick="refreshData()" class="bg-blue-500 hover:bg-blue-400 px-3 py-1 rounded flex items-center space-x-1">
|
||||
<i data-lucide="refresh-cw" class="w-4 h-4"></i>
|
||||
<span>Refresh</span>
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
</header>
|
||||
|
||||
<main class="container mx-auto px-4 py-8">
|
||||
<div class="grid grid-cols-1 md:grid-cols-3 gap-6 mb-8">
|
||||
<div class="bg-white rounded-lg shadow p-6">
|
||||
<div class="flex items-center justify-between">
|
||||
<div>
|
||||
<p class="text-gray-500 text-sm">Current Height</p>
|
||||
<p class="text-2xl font-bold" id="chain-height">-</p>
|
||||
</div>
|
||||
<i data-lucide="trending-up" class="w-10 h-10 text-green-500"></i>
|
||||
</div>
|
||||
</div>
|
||||
<div class="bg-white rounded-lg shadow p-6">
|
||||
<div class="flex items-center justify-between">
|
||||
<div>
|
||||
<p class="text-gray-500 text-sm">Latest Block</p>
|
||||
<p class="text-lg font-mono" id="latest-hash">-</p>
|
||||
</div>
|
||||
<i data-lucide="hash" class="w-10 h-10 text-blue-500"></i>
|
||||
</div>
|
||||
</div>
|
||||
<div class="bg-white rounded-lg shadow p-6">
|
||||
<div class="flex items-center justify-between">
|
||||
<div>
|
||||
<p class="text-gray-500 text-sm">Node Status</p>
|
||||
<p class="text-lg font-semibold" id="node-status">-</p>
|
||||
</div>
|
||||
<i data-lucide="activity" class="w-10 h-10 text-purple-500"></i>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="bg-white rounded-lg shadow">
|
||||
<div class="px-6 py-4 border-b">
|
||||
<h2 class="text-xl font-semibold flex items-center">
|
||||
<i data-lucide="blocks" class="w-5 h-5 mr-2"></i>
|
||||
Latest Blocks
|
||||
</h2>
|
||||
</div>
|
||||
<div class="p-6">
|
||||
<table class="w-full">
|
||||
<thead>
|
||||
<tr class="text-left text-gray-500 text-sm">
|
||||
<th class="pb-3">Height</th>
|
||||
<th class="pb-3">Hash</th>
|
||||
<th class="pb-3">Timestamp</th>
|
||||
<th class="pb-3">Transactions</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody id="blocks-table">
|
||||
<tr>
|
||||
<td colspan="4" class="text-center py-8 text-gray-500">
|
||||
Loading blocks...
|
||||
</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
</div>
|
||||
</main>
|
||||
|
||||
<script>
|
||||
lucide.createIcons();
|
||||
|
||||
const RPC_URL = 'http://localhost:8082';
|
||||
|
||||
async function refreshData() {
|
||||
try {
|
||||
const response = await fetch(`${RPC_URL}/rpc/head`);
|
||||
const head = await response.json();
|
||||
|
||||
document.getElementById('chain-height').textContent = head.height || '-';
|
||||
document.getElementById('latest-hash').textContent = head.hash ? head.hash.substring(0, 16) + '...' : '-';
|
||||
document.getElementById('node-status').innerHTML = '<span class="text-green-500">Online</span>';
|
||||
|
||||
// Load last 10 blocks
|
||||
const tbody = document.getElementById('blocks-table');
|
||||
tbody.innerHTML = '';
|
||||
|
||||
for (let i = 0; i < 10 && head.height - i >= 0; i++) {
|
||||
const blockResponse = await fetch(`${RPC_URL}/rpc/blocks/${head.height - i}`);
|
||||
const block = await blockResponse.json();
|
||||
|
||||
const row = tbody.insertRow();
|
||||
row.innerHTML = `
|
||||
<td class="py-3 font-mono">${block.height}</td>
|
||||
<td class="py-3 font-mono text-sm">${block.hash ? block.hash.substring(0, 16) + '...' : '-'}</td>
|
||||
<td class="py-3 text-sm">${new Date(block.timestamp * 1000).toLocaleString()}</td>
|
||||
<td class="py-3">${block.transactions ? block.transactions.length : 0}</td>
|
||||
`;
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Error:', error);
|
||||
document.getElementById('node-status').innerHTML = '<span class="text-red-500">Error</span>';
|
||||
}
|
||||
}
|
||||
|
||||
refreshData();
|
||||
setInterval(refreshData, 30000);
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
EOF
|
||||
|
||||
# Configure nginx
|
||||
print_status "Configuring nginx..."
|
||||
cat > /etc/nginx/sites-available/blockchain-explorer << EOL
|
||||
server {
|
||||
listen 3000;
|
||||
server_name _;
|
||||
root /opt/blockchain-explorer;
|
||||
index index.html;
|
||||
|
||||
location / {
|
||||
try_files \$uri \$uri/ =404;
|
||||
}
|
||||
}
|
||||
EOL
|
||||
|
||||
ln -sf /etc/nginx/sites-available/blockchain-explorer /etc/nginx/sites-enabled/
|
||||
rm -f /etc/nginx/sites-enabled/default
|
||||
nginx -t
|
||||
systemctl reload nginx
|
||||
|
||||
# Setup port forwarding if in container
|
||||
if [ "$(hostname)" = "aitbc" ]; then
|
||||
print_status "Setting up port forwarding..."
|
||||
iptables -t nat -F PREROUTING 2>/dev/null || true
|
||||
iptables -t nat -F POSTROUTING 2>/dev/null || true
|
||||
iptables -t nat -A PREROUTING -p tcp --dport 8082 -j DNAT --to-destination 192.168.100.10:8082
|
||||
iptables -t nat -A POSTROUTING -p tcp -d 192.168.100.10 --dport 8082 -j MASQUERADE
|
||||
iptables -t nat -A PREROUTING -p tcp --dport 3000 -j DNAT --to-destination 192.168.100.10:3000
|
||||
iptables -t nat -A POSTROUTING -p tcp -d 192.168.100.10 --dport 3000 -j MASQUERADE
|
||||
iptables-save > /etc/iptables/rules.v4
|
||||
fi
|
||||
|
||||
# Wait for services to start
|
||||
print_status "Waiting for services to start..."
|
||||
sleep 5
|
||||
|
||||
# Check services
|
||||
print_status "Checking service status..."
|
||||
systemctl status blockchain-node blockchain-rpc nginx --no-pager | grep -E 'Active:|Main PID:'
|
||||
|
||||
print_success "✅ Deployment complete!"
|
||||
echo ""
|
||||
echo "Services:"
|
||||
if [ "$(hostname)" = "aitbc" ]; then
|
||||
echo " - Blockchain Node RPC: http://192.168.100.10:8082"
|
||||
echo " - Blockchain Explorer: http://192.168.100.10:3000"
|
||||
echo ""
|
||||
echo "External access:"
|
||||
echo " - Blockchain Node RPC: http://aitbc.keisanki.net:8082"
|
||||
echo " - Blockchain Explorer: http://aitbc.keisanki.net:3000"
|
||||
else
|
||||
echo " - Blockchain Node RPC: http://localhost:8082"
|
||||
echo " - Blockchain Explorer: http://localhost:3000"
|
||||
echo ""
|
||||
echo "External access:"
|
||||
echo " - Blockchain Node RPC: http://aitbc.keisanki.net:8082"
|
||||
echo " - Blockchain Explorer: http://aitbc.keisanki.net:3000"
|
||||
fi
|
||||
88
scripts/deployment/deploy/deploy-domain.sh
Executable file
88
scripts/deployment/deploy/deploy-domain.sh
Executable file
@@ -0,0 +1,88 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Deploy AITBC services to domain https://aitbc.bubuit.net
|
||||
|
||||
set -e
|
||||
|
||||
DOMAIN="aitbc.bubuit.net"
|
||||
CONTAINER="aitbc"
|
||||
|
||||
echo "🚀 Deploying AITBC services to https://$DOMAIN"
|
||||
echo ""
|
||||
|
||||
# Colors
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
NC='\033[0m'
|
||||
|
||||
print_status() {
|
||||
echo -e "${GREEN}[INFO]${NC} $1"
|
||||
}
|
||||
|
||||
print_warning() {
|
||||
echo -e "${YELLOW}[WARN]${NC} $1"
|
||||
}
|
||||
|
||||
# Stop local services
|
||||
print_status "Stopping local services..."
|
||||
sudo fuser -k 8000/tcp 2>/dev/null || true
|
||||
sudo fuser -k 9080/tcp 2>/dev/null || true
|
||||
sudo fuser -k 3001/tcp 2>/dev/null || true
|
||||
sudo fuser -k 3002/tcp 2>/dev/null || true
|
||||
|
||||
# Deploy to container
|
||||
print_status "Deploying to container..."
|
||||
python /home/oib/windsurf/aitbc/container-deploy.py
|
||||
|
||||
# Copy nginx config to container
|
||||
print_status "Configuring nginx for domain..."
|
||||
incus file push /home/oib/windsurf/aitbc/nginx-aitbc.conf $CONTAINER/etc/nginx/sites-available/aitbc
|
||||
|
||||
# Enable site
|
||||
incus exec $CONTAINER -- ln -sf /etc/nginx/sites-available/aitbc /etc/nginx/sites-enabled/
|
||||
incus exec $CONTAINER -- rm -f /etc/nginx/sites-enabled/default
|
||||
|
||||
# Test nginx config
|
||||
incus exec $CONTAINER -- nginx -t
|
||||
|
||||
# Reload nginx
|
||||
incus exec $CONTAINER -- systemctl reload nginx
|
||||
|
||||
# Install SSL certificate (Let's Encrypt)
|
||||
print_warning "SSL Certificate Setup:"
|
||||
echo "1. Ensure port 80/443 are forwarded to container IP (10.1.223.93)"
|
||||
echo "2. Run certbot in container:"
|
||||
echo " incus exec $CONTAINER -- certbot --nginx -d $DOMAIN"
|
||||
echo ""
|
||||
|
||||
# Update UIs to use correct API endpoints
|
||||
print_status "Updating API endpoints..."
|
||||
|
||||
# Update marketplace API base URL
|
||||
incus exec $CONTAINER -- sed -i "s|http://127.0.0.1:8000|https://$DOMAIN/api|g" /home/oib/aitbc/apps/marketplace-ui/index.html
|
||||
|
||||
# Update exchange API endpoints
|
||||
incus exec $CONTAINER -- sed -i "s|http://127.0.0.1:8000|https://$DOMAIN/api|g" /home/oib/aitbc/apps/trade-exchange/index.html
|
||||
incus exec $CONTAINER -- sed -i "s|http://127.0.0.1:9080|https://$DOMAIN/rpc|g" /home/oib/aitbc/apps/trade-exchange/index.html
|
||||
|
||||
# Restart services to apply changes
|
||||
print_status "Restarting services..."
|
||||
incus exec $CONTAINER -- pkill -f "server.py"
|
||||
sleep 2
|
||||
incus exec $CONTAINER -- /home/oib/start_aitbc.sh
|
||||
|
||||
echo ""
|
||||
print_status "✅ Deployment complete!"
|
||||
echo ""
|
||||
echo "📋 Service URLs:"
|
||||
echo " 🌐 Domain: https://$DOMAIN"
|
||||
echo " 📊 Marketplace: https://$DOMAIN/Marketplace"
|
||||
echo " 💱 Trade Exchange: https://$DOMAIN/Exchange"
|
||||
echo " 🔗 API: https://$DOMAIN/api"
|
||||
echo " ⛓️ Blockchain RPC: https://$DOMAIN/rpc"
|
||||
echo ""
|
||||
echo "📝 Next Steps:"
|
||||
echo "1. Forward ports 80/443 to container IP (10.1.223.93)"
|
||||
echo "2. Install SSL certificate:"
|
||||
echo " incus exec $CONTAINER -- certbot --nginx -d $DOMAIN"
|
||||
echo "3. Test services at the URLs above"
|
||||
74
scripts/deployment/deploy/deploy-exchange.sh
Executable file
74
scripts/deployment/deploy/deploy-exchange.sh
Executable file
@@ -0,0 +1,74 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Deploy AITBC Trade Exchange to the server
|
||||
|
||||
set -e
|
||||
|
||||
SERVER="root@10.1.223.93"
|
||||
EXCHANGE_DIR="/root/aitbc/apps/trade-exchange"
|
||||
|
||||
echo "🚀 Deploying AITBC Trade Exchange"
|
||||
echo "=================================="
|
||||
echo "Server: $SERVER"
|
||||
echo ""
|
||||
|
||||
# Colors
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
NC='\033[0m'
|
||||
|
||||
print_status() {
|
||||
echo -e "${GREEN}[INFO]${NC} $1"
|
||||
}
|
||||
|
||||
print_warning() {
|
||||
echo -e "${YELLOW}[WARN]${NC} $1"
|
||||
}
|
||||
|
||||
# Test SSH connection
|
||||
print_status "Testing SSH connection..."
|
||||
ssh $SERVER "hostname && ip a show eth0 | grep inet"
|
||||
|
||||
# Copy updated files
|
||||
print_status "Copying updated Exchange files..."
|
||||
scp /home/oib/windsurf/aitbc/apps/trade-exchange/index.html $SERVER:$EXCHANGE_DIR/
|
||||
scp /home/oib/windsurf/aitbc/apps/trade-exchange/server.py $SERVER:$EXCHANGE_DIR/
|
||||
|
||||
# Ensure assets are available
|
||||
print_status "Ensuring assets directory exists..."
|
||||
ssh $SERVER "mkdir -p /var/www/aitbc.bubuit.net/assets"
|
||||
ssh $SERVER "mkdir -p /var/www/aitbc.bubuit.net/assets/css"
|
||||
ssh $SERVER "mkdir -p /var/www/aitbc.bubuit.net/assets/js"
|
||||
|
||||
# Copy assets if they don't exist
|
||||
print_status "Copying assets if needed..."
|
||||
if ! ssh $SERVER "test -f /var/www/aitbc.bubuit.net/assets/css/aitbc.css"; then
|
||||
scp -r /home/oib/windsurf/aitbc/assets/* $SERVER:/var/www/aitbc.bubuit.net/assets/
|
||||
fi
|
||||
|
||||
# Restart the exchange service
|
||||
print_status "Restarting Trade Exchange service..."
|
||||
ssh $SERVER "systemctl restart aitbc-exchange"
|
||||
|
||||
# Wait for service to start
|
||||
print_status "Waiting for service to start..."
|
||||
sleep 5
|
||||
|
||||
# Check service status
|
||||
print_status "Checking service status..."
|
||||
ssh $SERVER "systemctl status aitbc-exchange --no-pager -l | head -10"
|
||||
|
||||
# Test the endpoint
|
||||
print_status "Testing Exchange endpoint..."
|
||||
ssh $SERVER "curl -s http://127.0.0.1:3002/ | head -c 100"
|
||||
echo ""
|
||||
|
||||
echo ""
|
||||
print_status "✅ Exchange deployment complete!"
|
||||
echo ""
|
||||
echo "📋 URLs:"
|
||||
echo " 🌐 IP: http://10.1.223.93/Exchange"
|
||||
echo " 🔒 Domain: https://aitbc.bubuit.net/Exchange"
|
||||
echo ""
|
||||
echo "🔍 To check logs:"
|
||||
echo " ssh $SERVER 'journalctl -u aitbc-exchange -f'"
|
||||
396
scripts/deployment/deploy/deploy-explorer-remote.sh
Normal file
396
scripts/deployment/deploy/deploy-explorer-remote.sh
Normal file
@@ -0,0 +1,396 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Deploy blockchain explorer directly on ns3 server
|
||||
|
||||
set -e
|
||||
|
||||
echo "🔍 Deploying Blockchain Explorer on ns3"
|
||||
echo "======================================"
|
||||
|
||||
# Colors
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
NC='\033[0m'
|
||||
|
||||
print_status() {
|
||||
echo -e "${GREEN}[INFO]${NC} $1"
|
||||
}
|
||||
|
||||
print_warning() {
|
||||
echo -e "${YELLOW}[WARN]${NC} $1"
|
||||
}
|
||||
|
||||
# Check if we're on the right server
|
||||
if [ "$(hostname)" != "ns3" ] && [ "$(hostname)" != "aitbc" ]; then
|
||||
print_warning "This script should be run on ns3 server"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Create directory
|
||||
print_status "Creating blockchain explorer directory..."
|
||||
mkdir -p /opt/blockchain-explorer
|
||||
cd /opt/blockchain-explorer
|
||||
|
||||
# Create a simple HTML-based explorer (no build needed)
|
||||
print_status "Creating web-based explorer..."
|
||||
cat > index.html << 'EOF'
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<title>AITBC Blockchain Explorer</title>
|
||||
<script src="https://cdn.tailwindcss.com"></script>
|
||||
<script src="https://unpkg.com/lucide@latest"></script>
|
||||
<style>
|
||||
.fade-in { animation: fadeIn 0.3s ease-in; }
|
||||
@keyframes fadeIn { from { opacity: 0; } to { opacity: 1; } }
|
||||
</style>
|
||||
</head>
|
||||
<body class="bg-gray-50">
|
||||
<header class="bg-blue-600 text-white shadow-lg">
|
||||
<div class="container mx-auto px-4 py-4">
|
||||
<div class="flex items-center justify-between">
|
||||
<div class="flex items-center space-x-3">
|
||||
<i data-lucide="cube" class="w-8 h-8"></i>
|
||||
<h1 class="text-2xl font-bold">AITBC Blockchain Explorer</h1>
|
||||
</div>
|
||||
<div class="flex items-center space-x-4">
|
||||
<span class="text-sm">Network: <span class="font-mono bg-blue-700 px-2 py-1 rounded">ait-devnet</span></span>
|
||||
<button onclick="refreshData()" class="bg-blue-500 hover:bg-blue-400 px-3 py-1 rounded flex items-center space-x-1">
|
||||
<i data-lucide="refresh-cw" class="w-4 h-4"></i>
|
||||
<span>Refresh</span>
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</header>
|
||||
|
||||
<main class="container mx-auto px-4 py-8">
|
||||
<!-- Chain Stats -->
|
||||
<div class="grid grid-cols-1 md:grid-cols-3 gap-6 mb-8">
|
||||
<div class="bg-white rounded-lg shadow p-6">
|
||||
<div class="flex items-center justify-between">
|
||||
<div>
|
||||
<p class="text-gray-500 text-sm">Current Height</p>
|
||||
<p class="text-2xl font-bold" id="chain-height">-</p>
|
||||
</div>
|
||||
<i data-lucide="trending-up" class="w-10 h-10 text-green-500"></i>
|
||||
</div>
|
||||
</div>
|
||||
<div class="bg-white rounded-lg shadow p-6">
|
||||
<div class="flex items-center justify-between">
|
||||
<div>
|
||||
<p class="text-gray-500 text-sm">Latest Block</p>
|
||||
<p class="text-lg font-mono" id="latest-hash">-</p>
|
||||
</div>
|
||||
<i data-lucide="hash" class="w-10 h-10 text-blue-500"></i>
|
||||
</div>
|
||||
</div>
|
||||
<div class="bg-white rounded-lg shadow p-6">
|
||||
<div class="flex items-center justify-between">
|
||||
<div>
|
||||
<p class="text-gray-500 text-sm">Node Status</p>
|
||||
<p class="text-lg font-semibold" id="node-status">-</p>
|
||||
</div>
|
||||
<i data-lucide="activity" class="w-10 h-10 text-purple-500"></i>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Search -->
|
||||
<div class="bg-white rounded-lg shadow p-6 mb-8">
|
||||
<div class="flex space-x-4">
|
||||
<input type="text" id="search-input" placeholder="Search by block height, hash, or transaction hash"
|
||||
class="flex-1 px-4 py-2 border rounded-lg focus:outline-none focus:ring-2 focus:ring-blue-500">
|
||||
<button onclick="search()" class="bg-blue-600 text-white px-6 py-2 rounded-lg hover:bg-blue-700">
|
||||
Search
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Latest Blocks -->
|
||||
<div class="bg-white rounded-lg shadow">
|
||||
<div class="px-6 py-4 border-b">
|
||||
<h2 class="text-xl font-semibold flex items-center">
|
||||
<i data-lucide="blocks" class="w-5 h-5 mr-2"></i>
|
||||
Latest Blocks
|
||||
</h2>
|
||||
</div>
|
||||
<div class="p-6">
|
||||
<div class="overflow-x-auto">
|
||||
<table class="w-full">
|
||||
<thead>
|
||||
<tr class="text-left text-gray-500 text-sm">
|
||||
<th class="pb-3">Height</th>
|
||||
<th class="pb-3">Hash</th>
|
||||
<th class="pb-3">Timestamp</th>
|
||||
<th class="pb-3">Transactions</th>
|
||||
<th class="pb-3">Actions</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody id="blocks-table">
|
||||
<tr>
|
||||
<td colspan="5" class="text-center py-8 text-gray-500">
|
||||
Loading blocks...
|
||||
</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Block Details Modal -->
|
||||
<div id="block-modal" class="fixed inset-0 bg-black bg-opacity-50 hidden z-50">
|
||||
<div class="flex items-center justify-center min-h-screen p-4">
|
||||
<div class="bg-white rounded-lg max-w-4xl w-full max-h-[90vh] overflow-y-auto">
|
||||
<div class="p-6 border-b">
|
||||
<div class="flex justify-between items-center">
|
||||
<h2 class="text-2xl font-bold">Block Details</h2>
|
||||
<button onclick="closeModal()" class="text-gray-500 hover:text-gray-700">
|
||||
<i data-lucide="x" class="w-6 h-6"></i>
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
<div class="p-6" id="block-details">
|
||||
<!-- Block details will be loaded here -->
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</main>
|
||||
|
||||
<footer class="bg-gray-800 text-white mt-12">
|
||||
<div class="container mx-auto px-4 py-6 text-center">
|
||||
<p class="text-sm">AITBC Blockchain Explorer - Connected to node at http://localhost:8082</p>
|
||||
</div>
|
||||
</footer>
|
||||
|
||||
<script>
|
||||
// Initialize lucide icons
|
||||
lucide.createIcons();
|
||||
|
||||
// RPC URL - change based on environment
|
||||
const RPC_URL = window.location.hostname === 'localhost' ?
|
||||
'http://localhost:8082' :
|
||||
'http://95.216.198.140:8082';
|
||||
|
||||
// Global state
|
||||
let currentData = {};
|
||||
|
||||
// Load initial data
|
||||
document.addEventListener('DOMContentLoaded', () => {
|
||||
refreshData();
|
||||
});
|
||||
|
||||
// Refresh all data
|
||||
async function refreshData() {
|
||||
try {
|
||||
await Promise.all([
|
||||
loadChainStats(),
|
||||
loadLatestBlocks()
|
||||
]);
|
||||
} catch (error) {
|
||||
console.error('Error refreshing data:', error);
|
||||
document.getElementById('node-status').innerHTML = '<span class="text-red-500">Error</span>';
|
||||
}
|
||||
}
|
||||
|
||||
// Load chain statistics
|
||||
async function loadChainStats() {
|
||||
const response = await fetch(`${RPC_URL}/rpc/head`);
|
||||
const data = await response.json();
|
||||
|
||||
document.getElementById('chain-height').textContent = data.height || '-';
|
||||
document.getElementById('latest-hash').textContent = data.hash ? data.hash.substring(0, 16) + '...' : '-';
|
||||
document.getElementById('node-status').innerHTML = '<span class="text-green-500">Online</span>';
|
||||
|
||||
currentData.head = data;
|
||||
}
|
||||
|
||||
// Load latest blocks
|
||||
async function loadLatestBlocks() {
|
||||
const tbody = document.getElementById('blocks-table');
|
||||
tbody.innerHTML = '<tr><td colspan="5" class="text-center py-8 text-gray-500">Loading blocks...</td></tr>';
|
||||
|
||||
const head = await fetch(`${RPC_URL}/rpc/head`).then(r => r.json());
|
||||
const blocks = [];
|
||||
|
||||
// Load last 10 blocks
|
||||
for (let i = 0; i < 10 && head.height - i >= 0; i++) {
|
||||
const block = await fetch(`${RPC_URL}/rpc/blocks/${head.height - i}`).then(r => r.json());
|
||||
blocks.push(block);
|
||||
}
|
||||
|
||||
tbody.innerHTML = blocks.map(block => `
|
||||
<tr class="border-t hover:bg-gray-50">
|
||||
<td class="py-3 font-mono">${block.height}</td>
|
||||
<td class="py-3 font-mono text-sm">${block.hash ? block.hash.substring(0, 16) + '...' : '-'}</td>
|
||||
<td class="py-3 text-sm">${formatTimestamp(block.timestamp)}</td>
|
||||
<td class="py-3">${block.transactions ? block.transactions.length : 0}</td>
|
||||
<td class="py-3">
|
||||
<button onclick="showBlockDetails(${block.height})" class="text-blue-600 hover:text-blue-800">
|
||||
View Details
|
||||
</button>
|
||||
</td>
|
||||
</tr>
|
||||
`).join('');
|
||||
}
|
||||
|
||||
// Show block details
|
||||
async function showBlockDetails(height) {
|
||||
const block = await fetch(`${RPC_URL}/rpc/blocks/${height}`).then(r => r.json());
|
||||
const modal = document.getElementById('block-modal');
|
||||
const details = document.getElementById('block-details');
|
||||
|
||||
details.innerHTML = `
|
||||
<div class="space-y-6">
|
||||
<div>
|
||||
<h3 class="text-lg font-semibold mb-2">Block Header</h3>
|
||||
<div class="bg-gray-50 rounded p-4 space-y-2">
|
||||
<div class="flex justify-between">
|
||||
<span class="text-gray-600">Height:</span>
|
||||
<span class="font-mono">${block.height}</span>
|
||||
</div>
|
||||
<div class="flex justify-between">
|
||||
<span class="text-gray-600">Hash:</span>
|
||||
<span class="font-mono text-sm">${block.hash || '-'}</span>
|
||||
</div>
|
||||
<div class="flex justify-between">
|
||||
<span class="text-gray-600">Parent Hash:</span>
|
||||
<span class="font-mono text-sm">${block.parent_hash || '-'}</span>
|
||||
</div>
|
||||
<div class="flex justify-between">
|
||||
<span class="text-gray-600">Timestamp:</span>
|
||||
<span>${formatTimestamp(block.timestamp)}</span>
|
||||
</div>
|
||||
<div class="flex justify-between">
|
||||
<span class="text-gray-600">Proposer:</span>
|
||||
<span class="font-mono text-sm">${block.proposer || '-'}</span>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
${block.transactions && block.transactions.length > 0 ? `
|
||||
<div>
|
||||
<h3 class="text-lg font-semibold mb-2">Transactions (${block.transactions.length})</h3>
|
||||
<div class="space-y-2">
|
||||
${block.transactions.map(tx => `
|
||||
<div class="bg-gray-50 rounded p-4">
|
||||
<div class="flex justify-between mb-2">
|
||||
<span class="text-gray-600">Hash:</span>
|
||||
<span class="font-mono text-sm">${tx.hash || '-'}</span>
|
||||
</div>
|
||||
<div class="flex justify-between mb-2">
|
||||
<span class="text-gray-600">Type:</span>
|
||||
<span>${tx.type || '-'}</span>
|
||||
</div>
|
||||
<div class="flex justify-between mb-2">
|
||||
<span class="text-gray-600">From:</span>
|
||||
<span class="font-mono text-sm">${tx.sender || '-'}</span>
|
||||
</div>
|
||||
<div class="flex justify-between">
|
||||
<span class="text-gray-600">Fee:</span>
|
||||
<span>${tx.fee || '0'}</span>
|
||||
</div>
|
||||
</div>
|
||||
`).join('')}
|
||||
</div>
|
||||
</div>
|
||||
` : '<p class="text-gray-500">No transactions in this block</p>'}
|
||||
</div>
|
||||
`;
|
||||
|
||||
modal.classList.remove('hidden');
|
||||
}
|
||||
|
||||
// Close modal
|
||||
function closeModal() {
|
||||
document.getElementById('block-modal').classList.add('hidden');
|
||||
}
|
||||
|
||||
// Search functionality
|
||||
async function search() {
|
||||
const query = document.getElementById('search-input').value.trim();
|
||||
if (!query) return;
|
||||
|
||||
// Try block height first
|
||||
if (/^\\d+$/.test(query)) {
|
||||
showBlockDetails(parseInt(query));
|
||||
return;
|
||||
}
|
||||
|
||||
// TODO: Add transaction hash search
|
||||
alert('Search by block height is currently supported');
|
||||
}
|
||||
|
||||
// Format timestamp
|
||||
function formatTimestamp(timestamp) {
|
||||
if (!timestamp) return '-';
|
||||
return new Date(timestamp * 1000).toLocaleString();
|
||||
}
|
||||
|
||||
// Auto-refresh every 30 seconds
|
||||
setInterval(refreshData, 30000);
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
EOF
|
||||
|
||||
# Install a simple web server
|
||||
print_status "Installing web server..."
|
||||
apt-get install -y nginx
|
||||
|
||||
# Configure nginx to serve the explorer
|
||||
print_status "Configuring nginx..."
|
||||
cat > /etc/nginx/sites-available/blockchain-explorer << EOL
|
||||
server {
|
||||
listen 3000;
|
||||
server_name _;
|
||||
root /opt/blockchain-explorer;
|
||||
index index.html;
|
||||
|
||||
location / {
|
||||
try_files \$uri \$uri/ =404;
|
||||
}
|
||||
|
||||
# CORS headers for API access
|
||||
location /rpc/ {
|
||||
proxy_pass http://localhost:8082;
|
||||
proxy_set_header Host \$host;
|
||||
proxy_set_header X-Real-IP \$remote_addr;
|
||||
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
|
||||
}
|
||||
}
|
||||
EOL
|
||||
|
||||
# Enable the site
|
||||
ln -sf /etc/nginx/sites-available/blockchain-explorer /etc/nginx/sites-enabled/
|
||||
rm -f /etc/nginx/sites-enabled/default
|
||||
|
||||
# Test and reload nginx
|
||||
nginx -t
|
||||
systemctl reload nginx
|
||||
|
||||
# Setup port forwarding if in container
|
||||
if [ "$(hostname)" = "aitbc" ]; then
|
||||
print_status "Setting up port forwarding..."
|
||||
iptables -t nat -A PREROUTING -p tcp --dport 3000 -j DNAT --to-destination 192.168.100.10:3000
|
||||
iptables -t nat -A POSTROUTING -p tcp -d 192.168.100.10 --dport 3000 -j MASQUERADE
|
||||
iptables-save > /etc/iptables/rules.v4
|
||||
fi
|
||||
|
||||
print_status "Checking nginx status..."
|
||||
systemctl status nginx --no-pager | head -10
|
||||
|
||||
print_success "✅ Blockchain explorer deployed!"
|
||||
echo ""
|
||||
echo "Explorer URL: http://localhost:3000"
|
||||
if [ "$(hostname)" = "aitbc" ]; then
|
||||
echo "External URL: http://aitbc.keisanki.net:3000"
|
||||
else
|
||||
echo "External URL: http://aitbc.keisanki.net:3000"
|
||||
fi
|
||||
echo ""
|
||||
echo "The explorer is a static HTML site served by nginx."
|
||||
66
scripts/deployment/deploy/deploy-explorer.sh
Executable file
66
scripts/deployment/deploy/deploy-explorer.sh
Executable file
@@ -0,0 +1,66 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Deploy AITBC Explorer to the server
|
||||
|
||||
set -e
|
||||
|
||||
SERVER="root@10.1.223.93"
|
||||
EXPLORER_DIR="/root/aitbc/apps/explorer-web"
|
||||
NGINX_CONFIG="/etc/nginx/sites-available/aitbc"
|
||||
|
||||
echo "🚀 Deploying AITBC Explorer to Server"
|
||||
echo "====================================="
|
||||
echo "Server: $SERVER"
|
||||
echo ""
|
||||
|
||||
# Colors
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
NC='\033[0m'
|
||||
|
||||
print_status() {
|
||||
echo -e "${GREEN}[INFO]${NC} $1"
|
||||
}
|
||||
|
||||
print_warning() {
|
||||
echo -e "${YELLOW}[WARN]${NC} $1"
|
||||
}
|
||||
|
||||
# Build the explorer locally first
|
||||
print_status "Building explorer locally..."
|
||||
cd /home/oib/windsurf/aitbc/apps/explorer-web
|
||||
npm run build
|
||||
|
||||
# Copy built files to server
|
||||
print_status "Copying explorer build to server..."
|
||||
scp -r dist $SERVER:$EXPLORER_DIR/
|
||||
|
||||
# Update nginx config to include explorer
|
||||
print_status "Updating nginx configuration..."
|
||||
|
||||
# Backup current config
|
||||
ssh $SERVER "cp $NGINX_CONFIG ${NGINX_CONFIG}.backup"
|
||||
|
||||
# Add explorer location to nginx config
|
||||
ssh $SERVER "sed -i '/# Health endpoint/i\\
|
||||
# Explorer\\
|
||||
location /explorer/ {\\
|
||||
alias /root/aitbc/apps/explorer-web/dist/;\\
|
||||
try_files \$uri \$uri/ /explorer/index.html;\\
|
||||
}\\
|
||||
\\
|
||||
# Explorer mock data\\
|
||||
location /explorer/mock/ {\\
|
||||
alias /root/aitbc/apps/explorer-web/public/mock/;\\
|
||||
}\\
|
||||
' $NGINX_CONFIG"
|
||||
|
||||
# Test and reload nginx
|
||||
print_status "Testing and reloading nginx..."
|
||||
ssh $SERVER "nginx -t && systemctl reload nginx"
|
||||
|
||||
print_status "✅ Explorer deployment complete!"
|
||||
echo ""
|
||||
echo "📋 Explorer URL:"
|
||||
echo " 🌐 Explorer: https://aitbc.bubuit.net/explorer/"
|
||||
echo ""
|
||||
121
scripts/deployment/deploy/deploy-first-node.sh
Executable file
121
scripts/deployment/deploy/deploy-first-node.sh
Executable file
@@ -0,0 +1,121 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Deploy the first blockchain node
|
||||
|
||||
set -e
|
||||
|
||||
echo "🚀 Deploying First Blockchain Node"
|
||||
echo "================================="
|
||||
|
||||
# Colors
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
NC='\033[0m'
|
||||
|
||||
print_status() {
|
||||
echo -e "${GREEN}[INFO]${NC} $1"
|
||||
}
|
||||
|
||||
print_warning() {
|
||||
echo -e "${YELLOW}[WARN]${NC} $1"
|
||||
}
|
||||
|
||||
NODE1_DIR="/opt/blockchain-node"
|
||||
|
||||
# Create configuration for first node
|
||||
print_status "Creating configuration for first node..."
|
||||
cat > $NODE1_DIR/.env << EOF
|
||||
CHAIN_ID=ait-devnet
|
||||
DB_PATH=./data/chain.db
|
||||
RPC_BIND_HOST=127.0.0.1
|
||||
RPC_BIND_PORT=8080
|
||||
P2P_BIND_HOST=0.0.0.0
|
||||
P2P_BIND_PORT=7070
|
||||
PROPOSER_KEY=node1_proposer_key_$(date +%s)
|
||||
MINT_PER_UNIT=1000
|
||||
COORDINATOR_RATIO=0.05
|
||||
GOSSIP_BACKEND=http
|
||||
GOSSIP_BROADCAST_URL=http://127.0.0.1:7071/gossip
|
||||
EOF
|
||||
|
||||
# Create data directory
|
||||
mkdir -p $NODE1_DIR/data/devnet
|
||||
|
||||
# Generate genesis file
|
||||
print_status "Generating genesis file..."
|
||||
cd $NODE1_DIR
|
||||
export PYTHONPATH="${NODE1_DIR}/src:${NODE1_DIR}/scripts:${PYTHONPATH:-}"
|
||||
python3 scripts/make_genesis.py --output data/devnet/genesis.json --force
|
||||
|
||||
# Create systemd service
|
||||
print_status "Creating systemd service..."
|
||||
sudo cat > /etc/systemd/system/blockchain-node.service << EOF
|
||||
[Unit]
|
||||
Description=AITBC Blockchain Node 1
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=exec
|
||||
User=root
|
||||
WorkingDirectory=$NODE1_DIR
|
||||
Environment=PATH=$NODE1_DIR/.venv/bin:/usr/local/bin:/usr/bin:/bin
|
||||
Environment=PYTHONPATH=$NODE1_DIR/src:$NODE1_DIR/scripts
|
||||
ExecStart=$NODE1_DIR/.venv/bin/python3 -m aitbc_chain.main
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOF
|
||||
|
||||
# Create RPC API service
|
||||
print_status "Creating RPC API service..."
|
||||
sudo cat > /etc/systemd/system/blockchain-rpc.service << EOF
|
||||
[Unit]
|
||||
Description=AITBC Blockchain RPC API 1
|
||||
After=blockchain-node.service
|
||||
|
||||
[Service]
|
||||
Type=exec
|
||||
User=root
|
||||
WorkingDirectory=$NODE1_DIR
|
||||
Environment=PATH=$NODE1_DIR/.venv/bin:/usr/local/bin:/usr/bin:/bin
|
||||
Environment=PYTHONPATH=$NODE1_DIR/src:$NODE1_DIR/scripts
|
||||
ExecStart=$NODE1_DIR/.venv/bin/python3 -m uvicorn aitbc_chain.app:app --host 0.0.0.0 --port 8080
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOF
|
||||
|
||||
# Setup Python environment if not exists
|
||||
if [ ! -d "$NODE1_DIR/.venv" ]; then
|
||||
print_status "Setting up Python environment..."
|
||||
cd $NODE1_DIR
|
||||
python3 -m venv .venv
|
||||
source .venv/bin/activate
|
||||
pip install --upgrade pip
|
||||
pip install -e .
|
||||
fi
|
||||
|
||||
# Enable and start services
|
||||
print_status "Enabling and starting services..."
|
||||
sudo systemctl daemon-reload
|
||||
sudo systemctl enable blockchain-node blockchain-rpc
|
||||
sudo systemctl start blockchain-node blockchain-rpc
|
||||
|
||||
# Check status
|
||||
print_status "Checking service status..."
|
||||
sudo systemctl status blockchain-node --no-pager -l
|
||||
sudo systemctl status blockchain-rpc --no-pager -l
|
||||
|
||||
echo ""
|
||||
print_status "✅ First blockchain node deployed!"
|
||||
echo ""
|
||||
echo "Node 1 RPC: http://127.0.0.1:8080"
|
||||
echo "Node 2 RPC: http://127.0.0.1:8081"
|
||||
echo ""
|
||||
echo "To check logs:"
|
||||
echo " Node 1: sudo journalctl -u blockchain-node -f"
|
||||
echo " Node 2: sudo journalctl -u blockchain-node-2 -f"
|
||||
306
scripts/deployment/deploy/deploy-in-container.sh
Executable file
306
scripts/deployment/deploy/deploy-in-container.sh
Executable file
@@ -0,0 +1,306 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Deploy blockchain node and explorer inside the container
|
||||
|
||||
set -e
|
||||
|
||||
echo "🚀 Deploying Inside Container"
|
||||
echo "============================"
|
||||
|
||||
# Colors
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
NC='\033[0m'
|
||||
|
||||
print_status() {
|
||||
echo -e "${GREEN}[INFO]${NC} $1"
|
||||
}
|
||||
|
||||
print_warning() {
|
||||
echo -e "${YELLOW}[WARN]${NC} $1"
|
||||
}
|
||||
|
||||
# Check if we're in the container
|
||||
if [ ! -f /proc/1/environ ] || ! grep -q container=lxc /proc/1/environ 2>/dev/null; then
|
||||
if [ "$(hostname)" != "aitbc" ]; then
|
||||
print_warning "This script must be run inside the aitbc container"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Stop existing services
|
||||
print_status "Stopping existing services..."
|
||||
systemctl stop blockchain-node blockchain-rpc nginx 2>/dev/null || true
|
||||
|
||||
# Install dependencies
|
||||
print_status "Installing dependencies..."
|
||||
apt-get update
|
||||
apt-get install -y python3 python3-venv python3-pip git curl nginx
|
||||
|
||||
# Deploy blockchain node
|
||||
print_status "Deploying blockchain node..."
|
||||
cd /opt
|
||||
rm -rf blockchain-node
|
||||
# The source is already in blockchain-node-src, copy it properly
|
||||
cp -r blockchain-node-src blockchain-node
|
||||
cd blockchain-node
|
||||
|
||||
# Check if pyproject.toml exists
|
||||
if [ ! -f pyproject.toml ]; then
|
||||
print_warning "pyproject.toml not found, looking for it..."
|
||||
find . -name "pyproject.toml" -type f
|
||||
# If it's in a subdirectory, move everything up
|
||||
if [ -f blockchain-node-src/pyproject.toml ]; then
|
||||
print_status "Moving files from nested directory..."
|
||||
mv blockchain-node-src/* .
|
||||
rmdir blockchain-node-src
|
||||
fi
|
||||
fi
|
||||
|
||||
# Create configuration
|
||||
print_status "Creating configuration..."
|
||||
cat > .env << EOL
|
||||
CHAIN_ID=ait-devnet
|
||||
DB_PATH=./data/chain.db
|
||||
RPC_BIND_HOST=0.0.0.0
|
||||
RPC_BIND_PORT=8082
|
||||
P2P_BIND_HOST=0.0.0.0
|
||||
P2P_BIND_PORT=7070
|
||||
PROPOSER_KEY=proposer_key_$(date +%s)
|
||||
MINT_PER_UNIT=1000
|
||||
COORDINATOR_RATIO=0.05
|
||||
GOSSIP_BACKEND=memory
|
||||
EOL
|
||||
|
||||
# Create fresh data directory
|
||||
rm -rf data
|
||||
mkdir -p data/devnet
|
||||
|
||||
# Setup Python environment
|
||||
python3 -m venv .venv
|
||||
source .venv/bin/activate
|
||||
pip install --upgrade pip
|
||||
pip install -e .
|
||||
|
||||
# Generate genesis
|
||||
export PYTHONPATH="${PWD}/src:${PWD}/scripts:${PYTHONPATH:-}"
|
||||
python scripts/make_genesis.py --output data/devnet/genesis.json --force
|
||||
|
||||
# Create systemd services
|
||||
print_status "Creating systemd services..."
|
||||
cat > /etc/systemd/system/blockchain-node.service << EOL
|
||||
[Unit]
|
||||
Description=AITBC Blockchain Node
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=exec
|
||||
User=root
|
||||
WorkingDirectory=/opt/blockchain-node
|
||||
Environment=PATH=/opt/blockchain-node/.venv/bin:/usr/local/bin:/usr/bin:/bin
|
||||
Environment=PYTHONPATH=/opt/blockchain-node/src:/opt/blockchain-node/scripts
|
||||
ExecStart=/opt/blockchain-node/.venv/bin/python3 -m aitbc_chain.main
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOL
|
||||
|
||||
cat > /etc/systemd/system/blockchain-rpc.service << EOL
|
||||
[Unit]
|
||||
Description=AITBC Blockchain RPC API
|
||||
After=blockchain-node.service
|
||||
|
||||
[Service]
|
||||
Type=exec
|
||||
User=root
|
||||
WorkingDirectory=/opt/blockchain-node
|
||||
Environment=PATH=/opt/blockchain-node/.venv/bin:/usr/local/bin:/usr/bin:/bin
|
||||
Environment=PYTHONPATH=/opt/blockchain-node/src:/opt/blockchain-node/scripts
|
||||
ExecStart=/opt/blockchain-node/.venv/bin/python3 -m uvicorn aitbc_chain.app:app --host 0.0.0.0 --port 8082
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOL
|
||||
|
||||
# Start blockchain services
|
||||
print_status "Starting blockchain services..."
|
||||
systemctl daemon-reload
|
||||
systemctl enable blockchain-node blockchain-rpc
|
||||
systemctl start blockchain-node blockchain-rpc
|
||||
|
||||
# Deploy explorer
|
||||
print_status "Deploying blockchain explorer..."
|
||||
cd /opt
|
||||
rm -rf blockchain-explorer
|
||||
mkdir -p blockchain-explorer
|
||||
cd blockchain-explorer
|
||||
|
||||
# Create HTML explorer
|
||||
cat > index.html << 'EOF'
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<title>AITBC Blockchain Explorer</title>
|
||||
<script src="https://cdn.tailwindcss.com"></script>
|
||||
<script src="https://unpkg.com/lucide@latest"></script>
|
||||
</head>
|
||||
<body class="bg-gray-50">
|
||||
<header class="bg-blue-600 text-white shadow-lg">
|
||||
<div class="container mx-auto px-4 py-4">
|
||||
<div class="flex items-center justify-between">
|
||||
<div class="flex items-center space-x-3">
|
||||
<i data-lucide="cube" class="w-8 h-8"></i>
|
||||
<h1 class="text-2xl font-bold">AITBC Blockchain Explorer</h1>
|
||||
</div>
|
||||
<button onclick="refreshData()" class="bg-blue-500 hover:bg-blue-400 px-3 py-1 rounded flex items-center space-x-1">
|
||||
<i data-lucide="refresh-cw" class="w-4 h-4"></i>
|
||||
<span>Refresh</span>
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
</header>
|
||||
|
||||
<main class="container mx-auto px-4 py-8">
|
||||
<div class="grid grid-cols-1 md:grid-cols-3 gap-6 mb-8">
|
||||
<div class="bg-white rounded-lg shadow p-6">
|
||||
<div class="flex items-center justify-between">
|
||||
<div>
|
||||
<p class="text-gray-500 text-sm">Current Height</p>
|
||||
<p class="text-2xl font-bold" id="chain-height">-</p>
|
||||
</div>
|
||||
<i data-lucide="trending-up" class="w-10 h-10 text-green-500"></i>
|
||||
</div>
|
||||
</div>
|
||||
<div class="bg-white rounded-lg shadow p-6">
|
||||
<div class="flex items-center justify-between">
|
||||
<div>
|
||||
<p class="text-gray-500 text-sm">Latest Block</p>
|
||||
<p class="text-lg font-mono" id="latest-hash">-</p>
|
||||
</div>
|
||||
<i data-lucide="hash" class="w-10 h-10 text-blue-500"></i>
|
||||
</div>
|
||||
</div>
|
||||
<div class="bg-white rounded-lg shadow p-6">
|
||||
<div class="flex items-center justify-between">
|
||||
<div>
|
||||
<p class="text-gray-500 text-sm">Node Status</p>
|
||||
<p class="text-lg font-semibold" id="node-status">-</p>
|
||||
</div>
|
||||
<i data-lucide="activity" class="w-10 h-10 text-purple-500"></i>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="bg-white rounded-lg shadow">
|
||||
<div class="px-6 py-4 border-b">
|
||||
<h2 class="text-xl font-semibold flex items-center">
|
||||
<i data-lucide="blocks" class="w-5 h-5 mr-2"></i>
|
||||
Latest Blocks
|
||||
</h2>
|
||||
</div>
|
||||
<div class="p-6">
|
||||
<table class="w-full">
|
||||
<thead>
|
||||
<tr class="text-left text-gray-500 text-sm">
|
||||
<th class="pb-3">Height</th>
|
||||
<th class="pb-3">Hash</th>
|
||||
<th class="pb-3">Timestamp</th>
|
||||
<th class="pb-3">Transactions</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody id="blocks-table">
|
||||
<tr>
|
||||
<td colspan="4" class="text-center py-8 text-gray-500">
|
||||
Loading blocks...
|
||||
</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
</div>
|
||||
</main>
|
||||
|
||||
<script>
|
||||
lucide.createIcons();
|
||||
|
||||
const RPC_URL = 'http://localhost:8082';
|
||||
|
||||
async function refreshData() {
|
||||
try {
|
||||
const response = await fetch(`${RPC_URL}/rpc/head`);
|
||||
const head = await response.json();
|
||||
|
||||
document.getElementById('chain-height').textContent = head.height || '-';
|
||||
document.getElementById('latest-hash').textContent = head.hash ? head.hash.substring(0, 16) + '...' : '-';
|
||||
document.getElementById('node-status').innerHTML = '<span class="text-green-500">Online</span>';
|
||||
|
||||
// Load last 10 blocks
|
||||
const tbody = document.getElementById('blocks-table');
|
||||
tbody.innerHTML = '';
|
||||
|
||||
for (let i = 0; i < 10 && head.height - i >= 0; i++) {
|
||||
const blockResponse = await fetch(`${RPC_URL}/rpc/blocks/${head.height - i}`);
|
||||
const block = await blockResponse.json();
|
||||
|
||||
const row = tbody.insertRow();
|
||||
row.innerHTML = `
|
||||
<td class="py-3 font-mono">${block.height}</td>
|
||||
<td class="py-3 font-mono text-sm">${block.hash ? block.hash.substring(0, 16) + '...' : '-'}</td>
|
||||
<td class="py-3 text-sm">${new Date(block.timestamp * 1000).toLocaleString()}</td>
|
||||
<td class="py-3">${block.transactions ? block.transactions.length : 0}</td>
|
||||
`;
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Error:', error);
|
||||
document.getElementById('node-status').innerHTML = '<span class="text-red-500">Error</span>';
|
||||
}
|
||||
}
|
||||
|
||||
refreshData();
|
||||
setInterval(refreshData, 30000);
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
EOF
|
||||
|
||||
# Configure nginx
|
||||
print_status "Configuring nginx..."
|
||||
cat > /etc/nginx/sites-available/blockchain-explorer << EOL
|
||||
server {
|
||||
listen 3000;
|
||||
server_name _;
|
||||
root /opt/blockchain-explorer;
|
||||
index index.html;
|
||||
|
||||
location / {
|
||||
try_files \$uri \$uri/ =404;
|
||||
}
|
||||
}
|
||||
EOL
|
||||
|
||||
ln -sf /etc/nginx/sites-available/blockchain-explorer /etc/nginx/sites-enabled/
|
||||
rm -f /etc/nginx/sites-enabled/default
|
||||
nginx -t
|
||||
systemctl reload nginx
|
||||
|
||||
# Wait for services to start
|
||||
print_status "Waiting for services to start..."
|
||||
sleep 5
|
||||
|
||||
# Check services
|
||||
print_status "Checking service status..."
|
||||
systemctl status blockchain-node blockchain-rpc nginx --no-pager | grep -E 'Active:|Main PID:'
|
||||
|
||||
print_success "✅ Deployment complete in container!"
|
||||
echo ""
|
||||
echo "Services:"
|
||||
echo " - Blockchain Node RPC: http://localhost:8082"
|
||||
echo " - Blockchain Explorer: http://localhost:3000"
|
||||
echo ""
|
||||
echo "These are accessible from the host via port forwarding."
|
||||
56
scripts/deployment/deploy/deploy-modern-explorer.sh
Normal file
56
scripts/deployment/deploy/deploy-modern-explorer.sh
Normal file
@@ -0,0 +1,56 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Deploy Modern Blockchain Explorer
|
||||
|
||||
set -e
|
||||
|
||||
echo "🚀 Deploying Modern Blockchain Explorer"
|
||||
echo "======================================"
|
||||
|
||||
# Colors
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
NC='\033[0m'
|
||||
|
||||
print_status() {
|
||||
echo -e "${GREEN}[INFO]${NC} $1"
|
||||
}
|
||||
|
||||
print_warning() {
|
||||
echo -e "${YELLOW}[WARN]${NC} $1"
|
||||
}
|
||||
|
||||
# Stop existing services
|
||||
print_status "Stopping existing services..."
|
||||
systemctl stop nginx 2>/dev/null || true
|
||||
|
||||
# Create directory
|
||||
print_status "Creating explorer directory..."
|
||||
rm -rf /opt/blockchain-explorer
|
||||
mkdir -p /opt/blockchain-explorer/assets
|
||||
|
||||
# Copy files
|
||||
print_status "Copying explorer files..."
|
||||
cp -r /opt/blockchain-node-src/apps/blockchain-explorer/* /opt/blockchain-explorer/
|
||||
|
||||
# Update nginx configuration
|
||||
print_status "Updating nginx configuration..."
|
||||
cp /opt/blockchain-explorer/nginx.conf /etc/nginx/sites-available/blockchain-explorer
|
||||
ln -sf /etc/nginx/sites-available/blockchain-explorer /etc/nginx/sites-enabled/
|
||||
rm -f /etc/nginx/sites-enabled/default
|
||||
|
||||
# Test and start nginx
|
||||
print_status "Starting nginx..."
|
||||
nginx -t
|
||||
systemctl start nginx
|
||||
|
||||
print_success "✅ Modern explorer deployed!"
|
||||
echo ""
|
||||
echo "Access URLs:"
|
||||
echo " - Explorer: http://localhost:3000/"
|
||||
echo " - API: http://localhost:3000/api/v1/"
|
||||
echo ""
|
||||
echo "Standardized API Endpoints:"
|
||||
echo " - GET /api/v1/chain/head"
|
||||
echo " - GET /api/v1/chain/blocks?limit=N"
|
||||
echo " - GET /api/v1/chain/blocks/{height}"
|
||||
160
scripts/deployment/deploy/deploy-nginx-reverse-proxy.sh
Executable file
160
scripts/deployment/deploy/deploy-nginx-reverse-proxy.sh
Executable file
@@ -0,0 +1,160 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Deploy nginx reverse proxy for AITBC services
|
||||
# This replaces firehol/iptables port forwarding with nginx reverse proxy
|
||||
|
||||
set -e
|
||||
|
||||
echo "🚀 Deploying Nginx Reverse Proxy for AITBC"
|
||||
echo "=========================================="
|
||||
|
||||
# Colors
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
RED='\033[0;31m'
|
||||
NC='\033[0m'
|
||||
|
||||
print_status() {
|
||||
echo -e "${GREEN}[INFO]${NC} $1"
|
||||
}
|
||||
|
||||
print_warning() {
|
||||
echo -e "${YELLOW}[WARN]${NC} $1"
|
||||
}
|
||||
|
||||
print_error() {
|
||||
echo -e "${RED}[ERROR]${NC} $1"
|
||||
}
|
||||
|
||||
# Check if we're on the host server
|
||||
if ! grep -q "ns3-root" ~/.ssh/config 2>/dev/null; then
|
||||
print_error "ns3-root SSH configuration not found. Please add it to ~/.ssh/config"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Install nginx on host if not already installed
|
||||
print_status "Checking nginx installation on host..."
|
||||
ssh ns3-root "which nginx > /dev/null || (apt-get update && apt-get install -y nginx)"
|
||||
|
||||
# Install certbot for SSL certificates
|
||||
print_status "Checking certbot installation..."
|
||||
ssh ns3-root "which certbot > /dev/null || (apt-get update && apt-get install -y certbot python3-certbot-nginx)"
|
||||
|
||||
# Copy nginx configuration
|
||||
print_status "Copying nginx configuration..."
|
||||
scp infra/nginx/nginx-aitbc-reverse-proxy.conf ns3-root:/tmp/aitbc-reverse-proxy.conf
|
||||
|
||||
# Backup existing nginx configuration
|
||||
print_status "Backing up existing nginx configuration..."
|
||||
ssh ns3-root "mkdir -p /etc/nginx/backup && cp -r /etc/nginx/sites-available/* /etc/nginx/backup/ 2>/dev/null || true"
|
||||
|
||||
# Install the new configuration
|
||||
print_status "Installing nginx reverse proxy configuration..."
|
||||
ssh ns3-root << 'EOF'
|
||||
# Remove existing configurations
|
||||
rm -f /etc/nginx/sites-enabled/default
|
||||
rm -f /etc/nginx/sites-available/aitbc*
|
||||
|
||||
# Copy new configuration
|
||||
cp /tmp/aitbc-reverse-proxy.conf /etc/nginx/sites-available/aitbc-reverse-proxy.conf
|
||||
|
||||
# Create symbolic link
|
||||
ln -sf /etc/nginx/sites-available/aitbc-reverse-proxy.conf /etc/nginx/sites-enabled/
|
||||
|
||||
# Test nginx configuration
|
||||
nginx -t
|
||||
EOF
|
||||
|
||||
# Check if SSL certificate exists
|
||||
print_status "Checking SSL certificate..."
|
||||
if ! ssh ns3-root "test -f /etc/letsencrypt/live/aitbc.keisanki.net/fullchain.pem"; then
|
||||
print_warning "SSL certificate not found. Obtaining Let's Encrypt certificate..."
|
||||
|
||||
# Obtain SSL certificate
|
||||
ssh ns3-root << 'EOF'
|
||||
# Stop nginx temporarily
|
||||
systemctl stop nginx 2>/dev/null || true
|
||||
|
||||
# Obtain certificate
|
||||
certbot certonly --standalone -d aitbc.keisanki.net -d api.aitbc.keisanki.net -d rpc.aitbc.keisanki.net --email admin@keisanki.net --agree-tos --non-interactive
|
||||
|
||||
# Start nginx
|
||||
systemctl start nginx
|
||||
EOF
|
||||
|
||||
if [ $? -ne 0 ]; then
|
||||
print_error "Failed to obtain SSL certificate. Please run certbot manually:"
|
||||
echo "certbot certonly --standalone -d aitbc.keisanki.net -d api.aitbc.keisanki.net -d rpc.aitbc.keisanki.net"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Restart nginx
|
||||
print_status "Restarting nginx..."
|
||||
ssh ns3-root "systemctl restart nginx && systemctl enable nginx"
|
||||
|
||||
# Remove old iptables rules (optional)
|
||||
print_warning "Removing old iptables port forwarding rules (if they exist)..."
|
||||
ssh ns3-root << 'EOF'
|
||||
# Flush existing NAT rules for AITBC ports
|
||||
iptables -t nat -D PREROUTING -p tcp --dport 8000 -j DNAT --to-destination 192.168.100.10:8000 2>/dev/null || true
|
||||
iptables -t nat -D POSTROUTING -p tcp -d 192.168.100.10 --dport 8000 -j MASQUERADE 2>/dev/null || true
|
||||
iptables -t nat -D PREROUTING -p tcp --dport 8081 -j DNAT --to-destination 192.168.100.10:8081 2>/dev/null || true
|
||||
iptables -t nat -D POSTROUTING -p tcp -d 192.168.100.10 --dport 8081 -j MASQUERADE 2>/dev/null || true
|
||||
iptables -t nat -D PREROUTING -p tcp --dport 8082 -j DNAT --to-destination 192.168.100.10:8082 2>/dev/null || true
|
||||
iptables -t nat -D POSTROUTING -p tcp -d 192.168.100.10 --dport 8082 -j MASQUERADE 2>/dev/null || true
|
||||
iptables -t nat -D PREROUTING -p tcp --dport 9080 -j DNAT --to-destination 192.168.100.10:9080 2>/dev/null || true
|
||||
iptables -t nat -D POSTROUTING -p tcp -d 192.168.100.10 --dport 9080 -j MASQUERADE 2>/dev/null || true
|
||||
iptables -t nat -D PREROUTING -p tcp --dport 3000 -j DNAT --to-destination 192.168.100.10:3000 2>/dev/null || true
|
||||
iptables -t nat -D POSTROUTING -p tcp -d 192.168.100.10 --dport 3000 -j MASQUERADE 2>/dev/null || true
|
||||
|
||||
# Save iptables rules
|
||||
iptables-save > /etc/iptables/rules.v4 2>/dev/null || true
|
||||
EOF
|
||||
|
||||
# Wait for nginx to start
|
||||
sleep 2
|
||||
|
||||
# Test the configuration
|
||||
print_status "Testing reverse proxy configuration..."
|
||||
echo ""
|
||||
|
||||
# Test main domain
|
||||
if curl -s -o /dev/null -w "%{http_code}" https://aitbc.keisanki.net/health | grep -q "200"; then
|
||||
print_status "✅ Main domain (aitbc.keisanki.net) - OK"
|
||||
else
|
||||
print_error "❌ Main domain (aitbc.keisanki.net) - FAILED"
|
||||
fi
|
||||
|
||||
# Test API endpoint
|
||||
if curl -s -o /dev/null -w "%{http_code}" https://aitbc.keisanki.net/api/health | grep -q "200"; then
|
||||
print_status "✅ API endpoint - OK"
|
||||
else
|
||||
print_warning "⚠️ API endpoint - Not responding (service may not be running)"
|
||||
fi
|
||||
|
||||
# Test RPC endpoint
|
||||
if curl -s -o /dev/null -w "%{http_code}" https://aitbc.keisanki.net/rpc/head | grep -q "200"; then
|
||||
print_status "✅ RPC endpoint - OK"
|
||||
else
|
||||
print_warning "⚠️ RPC endpoint - Not responding (blockchain node may not be running)"
|
||||
fi
|
||||
|
||||
echo ""
|
||||
print_status "🎉 Nginx reverse proxy deployment complete!"
|
||||
echo ""
|
||||
echo "Service URLs:"
|
||||
echo " • Blockchain Explorer: https://aitbc.keisanki.net"
|
||||
echo " • API: https://aitbc.keisanki.net/api/"
|
||||
echo " • RPC: https://aitbc.keisanki.net/rpc/"
|
||||
echo " • Exchange: https://aitbc.keisanki.net/exchange/"
|
||||
echo ""
|
||||
echo "Alternative URLs:"
|
||||
echo " • API-only: https://api.aitbc.keisanki.net"
|
||||
echo " • RPC-only: https://rpc.aitbc.keisanki.net"
|
||||
echo ""
|
||||
echo "Note: Make sure all services are running in the container:"
|
||||
echo " • blockchain-explorer.service (port 3000)"
|
||||
echo " • coordinator-api.service (port 8000)"
|
||||
echo " • blockchain-rpc.service (port 8082)"
|
||||
echo " • aitbc-exchange.service (port 9080)"
|
||||
55
scripts/deployment/deploy/deploy-production.sh
Normal file
55
scripts/deployment/deploy/deploy-production.sh
Normal file
@@ -0,0 +1,55 @@
|
||||
#!/bin/bash
|
||||
|
||||
echo "🚀 Deploying AITBC for Production..."
|
||||
|
||||
# 1. Setup production assets
|
||||
echo "📦 Setting up production assets..."
|
||||
bash setup-production-assets.sh
|
||||
|
||||
# 2. Copy assets to server
|
||||
echo "📋 Copying assets to server..."
|
||||
scp -r assets/ aitbc:/var/www/html/
|
||||
|
||||
# 3. Update Nginx configuration
|
||||
echo "⚙️ Updating Nginx configuration..."
|
||||
ssh aitbc "cat >> /etc/nginx/sites-available/aitbc.conf << 'EOF'
|
||||
|
||||
# Serve production assets
|
||||
location /assets/ {
|
||||
alias /var/www/html/assets/;
|
||||
expires 1y;
|
||||
add_header Cache-Control \"public, immutable\";
|
||||
add_header X-Content-Type-Options nosniff;
|
||||
|
||||
# Gzip compression
|
||||
gzip on;
|
||||
gzip_types text/css application/javascript image/svg+xml;
|
||||
}
|
||||
|
||||
# Security headers
|
||||
add_header Referrer-Policy \"strict-origin-when-cross-origin\" always;
|
||||
add_header X-Frame-Options \"SAMEORIGIN\" always;
|
||||
add_header X-Content-Type-Options \"nosniff\" always;
|
||||
EOF"
|
||||
|
||||
# 4. Reload Nginx
|
||||
echo "🔄 Reloading Nginx..."
|
||||
ssh aitbc "nginx -t && systemctl reload nginx"
|
||||
|
||||
# 5. Update Exchange page to use production assets
|
||||
echo "🔄 Updating Exchange page..."
|
||||
scp apps/trade-exchange/index.prod.html aitbc:/root/aitbc/apps/trade-exchange/index.html
|
||||
|
||||
# 6. Update Marketplace page
|
||||
echo "🔄 Updating Marketplace page..."
|
||||
sed -i 's|https://cdn.tailwindcss.com|/assets/js/tailwind.js|g' apps/marketplace-ui/index.html
|
||||
sed -i 's|https://unpkg.com/axios/dist/axios.min.js|/assets/js/axios.min.js|g' apps/marketplace-ui/index.html
|
||||
sed -i 's|https://unpkg.com/lucide@latest|/assets/js/lucide.js|g' apps/marketplace-ui/index.html
|
||||
scp apps/marketplace-ui/index.html aitbc:/root/aitbc/apps/marketplace-ui/
|
||||
|
||||
echo "✅ Production deployment complete!"
|
||||
echo ""
|
||||
echo "📝 Next steps:"
|
||||
echo "1. Restart services: ssh aitbc 'systemctl restart aitbc-exchange aitbc-marketplace-ui'"
|
||||
echo "2. Clear browser cache"
|
||||
echo "3. Test all pages"
|
||||
18
scripts/deployment/deploy/deploy-remote-build.sh
Normal file
18
scripts/deployment/deploy/deploy-remote-build.sh
Normal file
@@ -0,0 +1,18 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Deploy blockchain node by building directly on ns3 server
|
||||
|
||||
echo "🚀 Remote Blockchain Deployment (Build on Server)"
|
||||
echo "=============================================="
|
||||
|
||||
# Copy deployment script to server
|
||||
echo "Copying deployment script to ns3..."
|
||||
scp scripts/deploy/deploy-blockchain-remote.sh ns3-root:/opt/
|
||||
|
||||
# Execute deployment on server
|
||||
echo "Executing deployment on ns3 (utilizing gigabit connection)..."
|
||||
ssh ns3-root "cd /opt && chmod +x deploy-blockchain-remote.sh && ./deploy-blockchain-remote.sh"
|
||||
|
||||
echo ""
|
||||
echo "Deployment complete!"
|
||||
echo "The blockchain node was built directly on ns3 using its fast connection."
|
||||
127
scripts/deployment/deploy/deploy-second-node.sh
Executable file
127
scripts/deployment/deploy/deploy-second-node.sh
Executable file
@@ -0,0 +1,127 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Deploy a second blockchain node on the same server
|
||||
|
||||
set -e
|
||||
|
||||
echo "🚀 Deploying Second Blockchain Node"
|
||||
echo "=================================="
|
||||
|
||||
# Colors
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
NC='\033[0m'
|
||||
|
||||
print_status() {
|
||||
echo -e "${GREEN}[INFO]${NC} $1"
|
||||
}
|
||||
|
||||
print_warning() {
|
||||
echo -e "${YELLOW}[WARN]${NC} $1"
|
||||
}
|
||||
|
||||
# Create directory for second node
|
||||
print_status "Creating directory for second node..."
|
||||
NODE2_DIR="/opt/blockchain-node-2"
|
||||
sudo mkdir -p $NODE2_DIR
|
||||
sudo chown $USER:$USER $NODE2_DIR
|
||||
|
||||
# Copy blockchain node code
|
||||
print_status "Copying blockchain node code..."
|
||||
cp -r /opt/blockchain-node/* $NODE2_DIR/
|
||||
|
||||
# Create configuration for second node
|
||||
print_status "Creating configuration for second node..."
|
||||
cat > $NODE2_DIR/.env << EOF
|
||||
CHAIN_ID=ait-devnet
|
||||
DB_PATH=./data/chain2.db
|
||||
RPC_BIND_HOST=127.0.0.1
|
||||
RPC_BIND_PORT=8081
|
||||
P2P_BIND_HOST=0.0.0.0
|
||||
P2P_BIND_PORT=7071
|
||||
PROPOSER_KEY=node2_proposer_key_$(date +%s)
|
||||
MINT_PER_UNIT=1000
|
||||
COORDINATOR_RATIO=0.05
|
||||
GOSSIP_BACKEND=http
|
||||
GOSSIP_BROADCAST_URL=http://127.0.0.1:7070/gossip
|
||||
EOF
|
||||
|
||||
# Create data directory
|
||||
mkdir -p $NODE2_DIR/data/devnet
|
||||
|
||||
# Generate genesis file (same as first node)
|
||||
print_status "Generating genesis file..."
|
||||
cd $NODE2_DIR
|
||||
export PYTHONPATH="${NODE2_DIR}/src:${NODE2_DIR}/scripts:${PYTHONPATH:-}"
|
||||
python3 scripts/make_genesis.py --output data/devnet/genesis.json --force
|
||||
|
||||
# Create systemd service
|
||||
print_status "Creating systemd service..."
|
||||
sudo cat > /etc/systemd/system/blockchain-node-2.service << EOF
|
||||
[Unit]
|
||||
Description=AITBC Blockchain Node 2
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=exec
|
||||
User=root
|
||||
WorkingDirectory=$NODE2_DIR
|
||||
Environment=PATH=$NODE2_DIR/.venv/bin:/usr/local/bin:/usr/bin:/bin
|
||||
Environment=PYTHONPATH=$NODE2_DIR/src:$NODE2_DIR/scripts
|
||||
ExecStart=$NODE2_DIR/.venv/bin/python3 -m aitbc_chain.main
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOF
|
||||
|
||||
# Create RPC API service
|
||||
print_status "Creating RPC API service..."
|
||||
sudo cat > /etc/systemd/system/blockchain-rpc-2.service << EOF
|
||||
[Unit]
|
||||
Description=AITBC Blockchain RPC API 2
|
||||
After=blockchain-node-2.service
|
||||
|
||||
[Service]
|
||||
Type=exec
|
||||
User=root
|
||||
WorkingDirectory=$NODE2_DIR
|
||||
Environment=PATH=$NODE2_DIR/.venv/bin:/usr/local/bin:/usr/bin:/bin
|
||||
Environment=PYTHONPATH=$NODE2_DIR/src:$NODE2_DIR/scripts
|
||||
ExecStart=$NODE2_DIR/.venv/bin/python3 -m uvicorn aitbc_chain.app:app --host 0.0.0.0 --port 8081
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOF
|
||||
|
||||
# Setup Python environment
|
||||
print_status "Setting up Python environment..."
|
||||
cd $NODE2_DIR
|
||||
python3 -m venv .venv
|
||||
source .venv/bin/activate
|
||||
pip install --upgrade pip
|
||||
pip install -e .
|
||||
|
||||
# Enable and start services
|
||||
print_status "Enabling and starting services..."
|
||||
sudo systemctl daemon-reload
|
||||
sudo systemctl enable blockchain-node-2 blockchain-rpc-2
|
||||
sudo systemctl start blockchain-node-2 blockchain-rpc-2
|
||||
|
||||
# Check status
|
||||
print_status "Checking service status..."
|
||||
sudo systemctl status blockchain-node-2 --no-pager -l
|
||||
sudo systemctl status blockchain-rpc-2 --no-pager -l
|
||||
|
||||
echo ""
|
||||
print_status "✅ Second blockchain node deployed!"
|
||||
echo ""
|
||||
echo "Node 1 RPC: http://127.0.0.1:8080"
|
||||
echo "Node 2 RPC: http://127.0.0.1:8081"
|
||||
echo ""
|
||||
echo "To check logs:"
|
||||
echo " Node 1: sudo journalctl -u blockchain-node -f"
|
||||
echo " Node 2: sudo journalctl -u blockchain-node-2 -f"
|
||||
84
scripts/deployment/deploy/deploy-to-aitbc-container.sh
Executable file
84
scripts/deployment/deploy/deploy-to-aitbc-container.sh
Executable file
@@ -0,0 +1,84 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Deploy blockchain node inside incus container aitbc
|
||||
|
||||
set -e
|
||||
|
||||
echo "🚀 AITBC Deployment in Incus Container"
|
||||
echo "======================================"
|
||||
echo "This will deploy inside the aitbc container"
|
||||
echo ""
|
||||
|
||||
# Colors
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
NC='\033[0m'
|
||||
|
||||
print_status() {
|
||||
echo -e "${GREEN}[INFO]${NC} $1"
|
||||
}
|
||||
|
||||
print_warning() {
|
||||
echo -e "${YELLOW}[WARN]${NC} $1"
|
||||
}
|
||||
|
||||
# Check if we're on ns3 host
|
||||
if [ "$(hostname)" != "ns3" ]; then
|
||||
print_warning "This script must be run on ns3 host"
|
||||
echo "Run: ssh ns3-root"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if container exists
|
||||
if ! incus list | grep -q "aitbc.*RUNNING"; then
|
||||
print_warning "Container aitbc is not running"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Copy source to container
|
||||
print_status "Copying source code to container..."
|
||||
incus exec aitbc -- rm -rf /opt/blockchain-node-src 2>/dev/null || true
|
||||
incus exec aitbc -- mkdir -p /opt/blockchain-node-src
|
||||
# Use the source already on the server
|
||||
incus file push -r /opt/blockchain-node-src/. aitbc/opt/blockchain-node-src/
|
||||
# Fix the nested directory issue - move everything up one level
|
||||
incus exec aitbc -- sh -c 'if [ -d /opt/blockchain-node-src/blockchain-node-src ]; then mv /opt/blockchain-node-src/blockchain-node-src/* /opt/blockchain-node-src/ && rmdir /opt/blockchain-node-src/blockchain-node-src; fi'
|
||||
|
||||
# Copy deployment script to container
|
||||
print_status "Copying deployment script to container..."
|
||||
incus file push /opt/deploy-in-container.sh aitbc/opt/
|
||||
|
||||
# Execute deployment inside container
|
||||
print_status "Deploying inside container..."
|
||||
incus exec aitbc -- bash /opt/deploy-in-container.sh
|
||||
|
||||
# Setup port forwarding on host
|
||||
print_status "Setting up port forwarding on host..."
|
||||
iptables -t nat -F PREROUTING 2>/dev/null || true
|
||||
iptables -t nat -F POSTROUTING 2>/dev/null || true
|
||||
|
||||
# Forward blockchain RPC
|
||||
iptables -t nat -A PREROUTING -p tcp --dport 8082 -j DNAT --to-destination 192.168.100.10:8082
|
||||
iptables -t nat -A POSTROUTING -p tcp -d 192.168.100.10 --dport 8082 -j MASQUERADE
|
||||
|
||||
# Forward explorer
|
||||
iptables -t nat -A PREROUTING -p tcp --dport 3000 -j DNAT --to-destination 192.168.100.10:3000
|
||||
iptables -t nat -A POSTROUTING -p tcp -d 192.168.100.10 --dport 3000 -j MASQUERADE
|
||||
|
||||
# Save rules
|
||||
mkdir -p /etc/iptables
|
||||
iptables-save > /etc/iptables/rules.v4
|
||||
|
||||
# Check services
|
||||
print_status "Checking services in container..."
|
||||
incus exec aitbc -- systemctl status blockchain-node blockchain-rpc nginx --no-pager | grep -E 'Active:|Main PID:'
|
||||
|
||||
print_success "✅ Deployment complete!"
|
||||
echo ""
|
||||
echo "Services in container aitbc:"
|
||||
echo " - Blockchain Node RPC: http://192.168.100.10:8082"
|
||||
echo " - Blockchain Explorer: http://192.168.100.10:3000"
|
||||
echo ""
|
||||
echo "External access via ns3:"
|
||||
echo " - Blockchain Node RPC: http://aitbc.keisanki.net:8082"
|
||||
echo " - Blockchain Explorer: http://aitbc.keisanki.net:3000"
|
||||
253
scripts/deployment/deploy/deploy-to-container.sh
Executable file
253
scripts/deployment/deploy/deploy-to-container.sh
Executable file
@@ -0,0 +1,253 @@
|
||||
#!/bin/bash
|
||||
|
||||
# AITBC Services Deployment to Incus Container
|
||||
# This script deploys all AITBC services to the 'aitbc' container
|
||||
|
||||
set -e
|
||||
|
||||
CONTAINER_NAME="aitbc"
|
||||
CONTAINER_IP="10.1.223.93"
|
||||
PROJECT_DIR="/home/oib/windsurf/aitbc"
|
||||
|
||||
echo "🚀 Deploying AITBC services to container: $CONTAINER_NAME"
|
||||
echo "Container IP: $CONTAINER_IP"
|
||||
echo ""
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Function to print colored output
|
||||
print_status() {
|
||||
echo -e "${GREEN}[INFO]${NC} $1"
|
||||
}
|
||||
|
||||
print_warning() {
|
||||
echo -e "${YELLOW}[WARN]${NC} $1"
|
||||
}
|
||||
|
||||
print_error() {
|
||||
echo -e "${RED}[ERROR]${NC} $1"
|
||||
}
|
||||
|
||||
# Stop local services
|
||||
print_status "Stopping local AITBC services..."
|
||||
sudo fuser -k 8000/tcp 2>/dev/null || true
|
||||
sudo fuser -k 9080/tcp 2>/dev/null || true
|
||||
sudo fuser -k 3001/tcp 2>/dev/null || true
|
||||
sudo fuser -k 3002/tcp 2>/dev/null || true
|
||||
pkill -f "aitbc_chain.app" 2>/dev/null || true
|
||||
pkill -f "marketplace-ui" 2>/dev/null || true
|
||||
pkill -f "trade-exchange" 2>/dev/null || true
|
||||
|
||||
# Copy project to container
|
||||
print_status "Copying AITBC project to container..."
|
||||
incus file push -r $PROJECT_DIR $CONTAINER_NAME/home/oib/
|
||||
|
||||
# Setup container environment
|
||||
print_status "Setting up container environment..."
|
||||
incus exec $CONTAINER_NAME -- bash -c "
|
||||
cd /home/oib/aitbc
|
||||
python -m venv .venv
|
||||
source .venv/bin/activate
|
||||
pip install --upgrade pip
|
||||
"
|
||||
|
||||
# Install dependencies for each service
|
||||
print_status "Installing dependencies..."
|
||||
|
||||
# Coordinator API
|
||||
print_status "Installing Coordinator API dependencies..."
|
||||
incus exec $CONTAINER_NAME -- bash -c "
|
||||
cd /home/oib/aitbc/apps/coordinator-api
|
||||
source ../.venv/bin/activate
|
||||
pip install -e .
|
||||
pip install fastapi uvicorn
|
||||
"
|
||||
|
||||
# Blockchain Node
|
||||
print_status "Installing Blockchain Node dependencies..."
|
||||
incus exec $CONTAINER_NAME -- bash -c "
|
||||
cd /home/oib/aitbc/apps/blockchain-node
|
||||
source ../.venv/bin/activate
|
||||
pip install -e .
|
||||
pip install fastapi uvicorn
|
||||
"
|
||||
|
||||
# Create systemd service files
|
||||
print_status "Creating systemd services..."
|
||||
|
||||
# Coordinator API service
|
||||
incus exec $CONTAINER_NAME -- tee /etc/systemd/system/aitbc-coordinator.service > /dev/null <<EOF
|
||||
[Unit]
|
||||
Description=AITBC Coordinator API
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=exec
|
||||
User=oib
|
||||
Group=oib
|
||||
WorkingDirectory=/home/oib/aitbc/apps/coordinator-api
|
||||
Environment=PATH=/home/oib/aitbc/.venv/bin
|
||||
ExecStart=/home/oib/aitbc/.venv/bin/python -m uvicorn src.app.main:app --host 0.0.0.0 --port 8000
|
||||
Restart=always
|
||||
RestartSec=10
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOF
|
||||
|
||||
# Blockchain Node service
|
||||
incus exec $CONTAINER_NAME -- tee /etc/systemd/system/aitbc-blockchain.service > /dev/null <<EOF
|
||||
[Unit]
|
||||
Description=AITBC Blockchain Node
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=exec
|
||||
User=oib
|
||||
Group=oib
|
||||
WorkingDirectory=/home/oib/aitbc/apps/blockchain-node
|
||||
Environment=PATH=/home/oib/aitbc/.venv/bin
|
||||
ExecStart=/home/oib/aitbc/.venv/bin/python -m uvicorn aitbc_chain.app:app --host 0.0.0.0 --port 9080
|
||||
Restart=always
|
||||
RestartSec=10
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOF
|
||||
|
||||
# Marketplace UI service
|
||||
incus exec $CONTAINER_NAME -- tee /etc/systemd/system/aitbc-marketplace.service > /dev/null <<EOF
|
||||
[Unit]
|
||||
Description=AITBC Marketplace UI
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=exec
|
||||
User=oib
|
||||
Group=oib
|
||||
WorkingDirectory=/home/oib/aitbc/apps/marketplace-ui
|
||||
Environment=PATH=/home/oib/aitbc/.venv/bin
|
||||
ExecStart=/home/oib/aitbc/.venv/bin/python server.py --port 3001
|
||||
Restart=always
|
||||
RestartSec=10
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOF
|
||||
|
||||
# Trade Exchange service
|
||||
incus exec $CONTAINER_NAME -- tee /etc/systemd/system/aitbc-exchange.service > /dev/null <<EOF
|
||||
[Unit]
|
||||
Description=AITBC Trade Exchange
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=exec
|
||||
User=oib
|
||||
Group=oib
|
||||
WorkingDirectory=/home/oib/aitbc/apps/trade-exchange
|
||||
Environment=PATH=/home/oib/aitbc/.venv/bin
|
||||
ExecStart=/home/oib/aitbc/.venv/bin/python server.py --port 3002
|
||||
Restart=always
|
||||
RestartSec=10
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOF
|
||||
|
||||
# Reload systemd and start services
|
||||
print_status "Starting AITBC services..."
|
||||
incus exec $CONTAINER_NAME -- systemctl daemon-reload
|
||||
incus exec $CONTAINER_NAME -- systemctl enable aitbc-coordinator
|
||||
incus exec $CONTAINER_NAME -- systemctl enable aitbc-blockchain
|
||||
incus exec $CONTAINER_NAME -- systemctl enable aitbc-marketplace
|
||||
incus exec $CONTAINER_NAME -- systemctl enable aitbc-exchange
|
||||
|
||||
incus exec $CONTAINER_NAME -- systemctl start aitbc-coordinator
|
||||
incus exec $CONTAINER_NAME -- systemctl start aitbc-blockchain
|
||||
incus exec $CONTAINER_NAME -- systemctl start aitbc-marketplace
|
||||
incus exec $CONTAINER_NAME -- systemctl start aitbc-exchange
|
||||
|
||||
# Wait for services to start
|
||||
print_status "Waiting for services to start..."
|
||||
sleep 10
|
||||
|
||||
# Check service status
|
||||
print_status "Checking service status..."
|
||||
incus exec $CONTAINER_NAME -- systemctl status aitbc-coordinator --no-pager -l
|
||||
incus exec $CONTAINER_NAME -- systemctl status aitbc-blockchain --no-pager -l
|
||||
incus exec $CONTAINER_NAME -- systemctl status aitbc-marketplace --no-pager -l
|
||||
incus exec $CONTAINER_NAME -- systemctl status aitbc-exchange --no-pager -l
|
||||
|
||||
# Create nginx configuration for reverse proxy
|
||||
print_status "Setting up Nginx reverse proxy..."
|
||||
incus exec $CONTAINER_NAME -- tee /etc/nginx/sites-available/aitbc > /dev/null <<EOF
|
||||
server {
|
||||
listen 80;
|
||||
server_name _;
|
||||
|
||||
# Coordinator API
|
||||
location /api/ {
|
||||
proxy_pass http://127.0.0.1:8000/v1/;
|
||||
proxy_set_header Host \$host;
|
||||
proxy_set_header X-Real-IP \$remote_addr;
|
||||
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto \$scheme;
|
||||
}
|
||||
|
||||
# Blockchain RPC
|
||||
location /rpc/ {
|
||||
proxy_pass http://127.0.0.1:9080/rpc/;
|
||||
proxy_set_header Host \$host;
|
||||
proxy_set_header X-Real-IP \$remote_addr;
|
||||
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto \$scheme;
|
||||
}
|
||||
|
||||
# Marketplace UI
|
||||
location /marketplace/ {
|
||||
proxy_pass http://127.0.0.1:3001/;
|
||||
proxy_set_header Host \$host;
|
||||
proxy_set_header X-Real-IP \$remote_addr;
|
||||
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto \$scheme;
|
||||
}
|
||||
|
||||
# Trade Exchange
|
||||
location /exchange/ {
|
||||
proxy_pass http://127.0.0.1:3002/;
|
||||
proxy_set_header Host \$host;
|
||||
proxy_set_header X-Real-IP \$remote_addr;
|
||||
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto \$scheme;
|
||||
}
|
||||
|
||||
# Default redirect to marketplace
|
||||
location / {
|
||||
return 301 /marketplace/;
|
||||
}
|
||||
}
|
||||
EOF
|
||||
|
||||
# Enable nginx site
|
||||
incus exec $CONTAINER_NAME -- ln -sf /etc/nginx/sites-available/aitbc /etc/nginx/sites-enabled/
|
||||
incus exec $CONTAINER_NAME -- rm -f /etc/nginx/sites-enabled/default
|
||||
incus exec $CONTAINER_NAME -- nginx -t && incus exec $CONTAINER_NAME -- systemctl reload nginx
|
||||
|
||||
# Print access information
|
||||
echo ""
|
||||
print_status "✅ AITBC services deployed successfully!"
|
||||
echo ""
|
||||
echo "📋 Service URLs:"
|
||||
echo " 🌐 Public IP: $CONTAINER_IP"
|
||||
echo " 📊 Marketplace: http://$CONTAINER_IP/marketplace/"
|
||||
echo " 💱 Trade Exchange: http://$CONTAINER_IP/exchange/"
|
||||
echo " 🔗 API: http://$CONTAINER_IP/api/"
|
||||
echo " ⛓️ Blockchain RPC: http://$CONTAINER_IP/rpc/"
|
||||
echo ""
|
||||
print_status "To check logs: incus exec $CONTAINER_NAME -- journalctl -u aitbc-coordinator -f"
|
||||
print_status "To restart services: incus exec $CONTAINER_NAME -- systemctl restart aitbc-*"
|
||||
50
scripts/deployment/deploy/deploy-to-container.sh.example
Normal file
50
scripts/deployment/deploy/deploy-to-container.sh.example
Normal file
@@ -0,0 +1,50 @@
|
||||
#!/bin/bash
|
||||
|
||||
# AITBC Services Deployment to Container
|
||||
# Copy to deploy-to-container.sh and adjust variables for your environment
|
||||
|
||||
set -e
|
||||
|
||||
# === CONFIGURE THESE ===
|
||||
CONTAINER_NAME="aitbc"
|
||||
CONTAINER_IP="YOUR_CONTAINER_IP"
|
||||
PROJECT_DIR="/path/to/your/aitbc"
|
||||
SSH_ALIAS="your-ssh-alias" # or user@host
|
||||
|
||||
echo "🚀 Deploying AITBC services to container: $CONTAINER_NAME"
|
||||
echo "Container IP: $CONTAINER_IP"
|
||||
echo ""
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
NC='\033[0m'
|
||||
|
||||
# 1. Push website
|
||||
echo -e "${YELLOW}Deploying website...${NC}"
|
||||
scp -r "$PROJECT_DIR/website/index.html" "$PROJECT_DIR/website/404.html" \
|
||||
"$SSH_ALIAS:/var/www/html/"
|
||||
scp -r "$PROJECT_DIR/website/docs/"*.html "$SSH_ALIAS:/var/www/html/docs/"
|
||||
scp "$PROJECT_DIR/website/docs/css/docs.css" "$SSH_ALIAS:/var/www/html/docs/css/"
|
||||
scp "$PROJECT_DIR/website/docs/js/theme.js" "$SSH_ALIAS:/var/www/html/docs/js/"
|
||||
echo -e "${GREEN}✓ Website deployed${NC}"
|
||||
|
||||
# 2. Deploy coordinator API
|
||||
echo -e "${YELLOW}Deploying coordinator API...${NC}"
|
||||
ssh "$SSH_ALIAS" "cd /opt/aitbc && pip install -e apps/coordinator-api/"
|
||||
ssh "$SSH_ALIAS" "systemctl restart aitbc-coordinator"
|
||||
echo -e "${GREEN}✓ Coordinator deployed${NC}"
|
||||
|
||||
# 3. Deploy blockchain node
|
||||
echo -e "${YELLOW}Deploying blockchain node...${NC}"
|
||||
ssh "$SSH_ALIAS" "cd /opt/aitbc && pip install -e apps/blockchain-node/"
|
||||
ssh "$SSH_ALIAS" "systemctl restart aitbc-blockchain"
|
||||
echo -e "${GREEN}✓ Blockchain node deployed${NC}"
|
||||
|
||||
# 4. Verify
|
||||
echo ""
|
||||
echo -e "${YELLOW}Verifying services...${NC}"
|
||||
ssh "$SSH_ALIAS" "curl -s http://127.0.0.1:8000/v1/health | head -1"
|
||||
ssh "$SSH_ALIAS" "curl -s http://127.0.0.1:9080/rpc/health | head -1"
|
||||
echo -e "${GREEN}✓ All services running${NC}"
|
||||
241
scripts/deployment/deploy/deploy-to-server.sh
Executable file
241
scripts/deployment/deploy/deploy-to-server.sh
Executable file
@@ -0,0 +1,241 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Deploy AITBC services to the aitbc server (10.1.223.93)
|
||||
|
||||
set -e
|
||||
|
||||
SERVER="root@10.1.223.93"
|
||||
PROJECT_DIR="/root/aitbc"
|
||||
|
||||
echo "🚀 Deploying AITBC to Server"
|
||||
echo "=========================="
|
||||
echo "Server: $SERVER"
|
||||
echo ""
|
||||
|
||||
# Colors
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
NC='\033[0m'
|
||||
|
||||
print_status() {
|
||||
echo -e "${GREEN}[INFO]${NC} $1"
|
||||
}
|
||||
|
||||
print_warning() {
|
||||
echo -e "${YELLOW}[WARN]${NC} $1"
|
||||
}
|
||||
|
||||
# Test SSH connection
|
||||
print_status "Testing SSH connection..."
|
||||
ssh $SERVER "hostname && ip a show eth0 | grep inet"
|
||||
|
||||
# Copy project to server
|
||||
print_status "Copying project to server..."
|
||||
ssh $SERVER "rm -rf $PROJECT_DIR 2>/dev/null || true"
|
||||
scp -r /home/oib/windsurf/aitbc $SERVER:/root/
|
||||
|
||||
# Setup Python environment
|
||||
print_status "Setting up Python environment..."
|
||||
ssh $SERVER "cd $PROJECT_DIR && python3 -m venv .venv && source .venv/bin/activate && pip install --upgrade pip"
|
||||
|
||||
# Install dependencies
|
||||
print_status "Installing dependencies..."
|
||||
ssh $SERVER "cd $PROJECT_DIR/apps/coordinator-api && source ../../.venv/bin/activate && pip install -e ."
|
||||
ssh $SERVER "cd $PROJECT_DIR/apps/blockchain-node && source ../../.venv/bin/activate && pip install -e ."
|
||||
|
||||
# Create systemd service files
|
||||
print_status "Creating systemd services..."
|
||||
|
||||
# Coordinator API service
|
||||
ssh $SERVER 'cat > /etc/systemd/system/aitbc-coordinator.service << EOF
|
||||
[Unit]
|
||||
Description=AITBC Coordinator API
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=exec
|
||||
User=root
|
||||
WorkingDirectory=/root/aitbc/apps/coordinator-api
|
||||
Environment=PATH=/root/aitbc/.venv/bin
|
||||
ExecStart=/root/aitbc/.venv/bin/python -m uvicorn src.app.main:app --host 0.0.0.0 --port 8000
|
||||
Restart=always
|
||||
RestartSec=10
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOF'
|
||||
|
||||
# Blockchain Node service
|
||||
ssh $SERVER 'cat > /etc/systemd/system/aitbc-blockchain.service << EOF
|
||||
[Unit]
|
||||
Description=AITBC Blockchain Node
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=exec
|
||||
User=root
|
||||
WorkingDirectory=/root/aitbc/apps/blockchain-node
|
||||
Environment=PATH=/root/aitbc/.venv/bin
|
||||
ExecStart=/root/aitbc/.venv/bin/python -m uvicorn aitbc_chain.app:app --host 0.0.0.0 --port 9080
|
||||
Restart=always
|
||||
RestartSec=10
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOF'
|
||||
|
||||
# Marketplace UI service
|
||||
ssh $SERVER 'cat > /etc/systemd/system/aitbc-marketplace.service << EOF
|
||||
[Unit]
|
||||
Description=AITBC Marketplace UI
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=exec
|
||||
User=root
|
||||
WorkingDirectory=/root/aitbc/apps/marketplace-ui
|
||||
Environment=PATH=/root/aitbc/.venv/bin
|
||||
ExecStart=/root/aitbc/.venv/bin/python server.py --port 3001
|
||||
Restart=always
|
||||
RestartSec=10
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOF'
|
||||
|
||||
# Trade Exchange service
|
||||
ssh $SERVER 'cat > /etc/systemd/system/aitbc-exchange.service << EOF
|
||||
[Unit]
|
||||
Description=AITBC Trade Exchange
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=exec
|
||||
User=root
|
||||
WorkingDirectory=/root/aitbc/apps/trade-exchange
|
||||
Environment=PATH=/root/aitbc/.venv/bin
|
||||
ExecStart=/root/aitbc/.venv/bin/python server.py --port 3002
|
||||
Restart=always
|
||||
RestartSec=10
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOF'
|
||||
|
||||
# Install nginx if not installed
|
||||
print_status "Installing nginx..."
|
||||
ssh $SERVER "apt update && apt install -y nginx"
|
||||
|
||||
# Create nginx configuration
|
||||
print_status "Configuring nginx..."
|
||||
ssh $SERVER 'cat > /etc/nginx/sites-available/aitbc << EOF
|
||||
server {
|
||||
listen 80;
|
||||
server_name aitbc.bubuit.net;
|
||||
|
||||
# API routes
|
||||
location /api/ {
|
||||
proxy_pass http://127.0.0.1:8000/v1/;
|
||||
proxy_set_header Host \$host;
|
||||
proxy_set_header X-Real-IP \$remote_addr;
|
||||
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto \$scheme;
|
||||
}
|
||||
|
||||
# Admin routes
|
||||
location /admin/ {
|
||||
proxy_pass http://127.0.0.1:8000/admin/;
|
||||
proxy_set_header Host \$host;
|
||||
proxy_set_header X-Real-IP \$remote_addr;
|
||||
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto \$scheme;
|
||||
}
|
||||
|
||||
# Blockchain RPC
|
||||
location /rpc/ {
|
||||
proxy_pass http://127.0.0.1:9080/rpc/;
|
||||
proxy_set_header Host \$host;
|
||||
proxy_set_header X-Real-IP \$remote_addr;
|
||||
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto \$scheme;
|
||||
}
|
||||
|
||||
# Marketplace UI
|
||||
location /Marketplace {
|
||||
proxy_pass http://127.0.0.1:3001/;
|
||||
proxy_set_header Host \$host;
|
||||
proxy_set_header X-Real-IP \$remote_addr;
|
||||
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto \$scheme;
|
||||
}
|
||||
|
||||
# Trade Exchange
|
||||
location /Exchange {
|
||||
proxy_pass http://127.0.0.1:3002/;
|
||||
proxy_set_header Host \$host;
|
||||
proxy_set_header X-Real-IP \$remote_addr;
|
||||
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto \$scheme;
|
||||
}
|
||||
|
||||
# Health endpoint
|
||||
location /health {
|
||||
proxy_pass http://127.0.0.1:8000/v1/health;
|
||||
proxy_set_header Host \$host;
|
||||
}
|
||||
|
||||
# Default redirect
|
||||
location / {
|
||||
return 301 /Marketplace;
|
||||
}
|
||||
}
|
||||
EOF'
|
||||
|
||||
# Enable nginx site
|
||||
ssh $SERVER "ln -sf /etc/nginx/sites-available/aitbc /etc/nginx/sites-enabled/"
|
||||
ssh $SERVER "rm -f /etc/nginx/sites-enabled/default"
|
||||
|
||||
# Test and reload nginx
|
||||
ssh $SERVER "nginx -t && systemctl reload nginx"
|
||||
|
||||
# Start services
|
||||
print_status "Starting AITBC services..."
|
||||
ssh $SERVER "systemctl daemon-reload"
|
||||
ssh $SERVER "systemctl enable aitbc-coordinator aitbc-blockchain aitbc-marketplace aitbc-exchange"
|
||||
ssh $SERVER "systemctl start aitbc-coordinator aitbc-blockchain aitbc-marketplace aitbc-exchange"
|
||||
|
||||
# Wait for services to start
|
||||
print_status "Waiting for services to start..."
|
||||
sleep 10
|
||||
|
||||
# Check service status
|
||||
print_status "Checking service status..."
|
||||
ssh $SERVER "systemctl status aitbc-coordinator --no-pager -l | head -10"
|
||||
ssh $SERVER "systemctl status aitbc-blockchain --no-pager -l | head -10"
|
||||
|
||||
# Test endpoints
|
||||
print_status "Testing endpoints..."
|
||||
ssh $SERVER "curl -s http://127.0.0.1:8000/v1/health | head -c 100"
|
||||
echo ""
|
||||
ssh $SERVER "curl -s http://127.0.0.1:8000/v1/admin/stats -H 'X-Api-Key: ${ADMIN_API_KEY}' | head -c 100"
|
||||
echo ""
|
||||
|
||||
echo ""
|
||||
print_status "✅ Deployment complete!"
|
||||
echo ""
|
||||
echo "📋 Service URLs:"
|
||||
echo " 🌐 Server IP: 10.1.223.93"
|
||||
echo " 📊 Marketplace: http://10.1.223.93/Marketplace"
|
||||
echo " 💱 Trade Exchange: http://10.1.223.93/Exchange"
|
||||
echo " 🔗 API: http://10.1.223.93/api"
|
||||
echo " ⛓️ Blockchain RPC: http://10.1.223.93/rpc"
|
||||
echo ""
|
||||
echo "🔒 Domain URLs (with SSL):"
|
||||
echo " 📊 Marketplace: https://aitbc.bubuit.net/Marketplace"
|
||||
echo " 💱 Trade Exchange: https://aitbc.bubuit.net/Exchange"
|
||||
echo " 🔗 API: https://aitbc.bubuit.net/api"
|
||||
echo " ⛓️ Blockchain RPC: https://aitbc.bubuit.net/rpc"
|
||||
echo ""
|
||||
print_status "To manage services:"
|
||||
echo " ssh aitbc 'systemctl status aitbc-coordinator'"
|
||||
echo " ssh aitbc 'journalctl -u aitbc-coordinator -f'"
|
||||
158
scripts/deployment/deploy/deploy_container_with_miner.py
Normal file
158
scripts/deployment/deploy/deploy_container_with_miner.py
Normal file
@@ -0,0 +1,158 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Deploy AITBC services to incus container with GPU miner integration
|
||||
"""
|
||||
|
||||
import subprocess
|
||||
import time
|
||||
import sys
|
||||
|
||||
def run_command(cmd, container=None):
|
||||
"""Run command locally or in container"""
|
||||
if container:
|
||||
cmd = f"incus exec {container} -- {cmd}"
|
||||
print(f"Running: {cmd}")
|
||||
result = subprocess.run(cmd, shell=True, capture_output=True, text=True)
|
||||
if result.returncode != 0:
|
||||
print(f"Error: {result.stderr}")
|
||||
return False
|
||||
return True
|
||||
|
||||
def deploy_to_container():
|
||||
container = "aitbc"
|
||||
container_ip = "10.1.223.93"
|
||||
|
||||
print("🚀 Deploying AITBC services to container with GPU miner...")
|
||||
|
||||
# Check if container exists
|
||||
result = subprocess.run("incus list -c n", shell=True, capture_output=True, text=True)
|
||||
if container not in result.stdout:
|
||||
print(f"\n📦 Creating container {container}...")
|
||||
subprocess.run(f"incus launch images:ubuntu/22.04 {container}", shell=True)
|
||||
time.sleep(10)
|
||||
|
||||
# Ensure container is running
|
||||
subprocess.run(f"incus start {container}", shell=True)
|
||||
time.sleep(5)
|
||||
|
||||
# Update and install packages in container
|
||||
print("\n📦 Installing packages in container...")
|
||||
run_command("apt-get update", container)
|
||||
run_command("apt-get install -y python3 python3-pip python3-venv curl", container)
|
||||
|
||||
# Stop local services
|
||||
print("\n📋 Stopping local services...")
|
||||
subprocess.run("sudo fuser -k 8000/tcp 2>/dev/null || true", shell=True)
|
||||
subprocess.run("sudo fuser -k 9080/tcp 2>/dev/null || true", shell=True)
|
||||
subprocess.run("pkill -f 'marketplace-ui' 2>/dev/null || true", shell=True)
|
||||
subprocess.run("pkill -f 'trade-exchange' 2>/dev/null || true", shell=True)
|
||||
|
||||
# Copy project to container
|
||||
print("\n📁 Copying project to container...")
|
||||
subprocess.run(f"incus file push -r /home/oib/windsurf/aitbc {container}/home/oib/", shell=True)
|
||||
|
||||
# Setup Python environment in container
|
||||
print("\n🐍 Setting up Python environment...")
|
||||
run_command("cd /home/oib/aitbc && python3 -m venv .venv", container)
|
||||
run_command("cd /home/oib/aitbc && source .venv/bin/activate && pip install fastapi uvicorn httpx sqlmodel psutil", container)
|
||||
|
||||
# Install dependencies
|
||||
print("\n📦 Installing dependencies...")
|
||||
run_command("cd /home/oib/aitbc/apps/coordinator-api && source ../../.venv/bin/activate && pip install -e .", container)
|
||||
run_command("cd /home/oib/aitbc/apps/blockchain-node && source ../../.venv/bin/activate && pip install -e .", container)
|
||||
|
||||
# Create startup script with GPU miner
|
||||
print("\n🔧 Creating startup script with GPU miner...")
|
||||
startup_script = """#!/bin/bash
|
||||
cd /home/oib/aitbc
|
||||
source .venv/bin/activate
|
||||
|
||||
# Start coordinator API
|
||||
echo "Starting Coordinator API..."
|
||||
cd apps/coordinator-api
|
||||
source ../../.venv/bin/activate
|
||||
python -m uvicorn app.main:app --host 0.0.0.0 --port 8000 &
|
||||
COORD_PID=$!
|
||||
|
||||
# Start blockchain node
|
||||
echo "Starting Blockchain Node..."
|
||||
cd ../../apps/blockchain-node
|
||||
source ../../.venv/bin/activate
|
||||
python -m uvicorn aitbc_chain.app:app --host 0.0.0.0 --port 9080 &
|
||||
BLOCK_PID=$!
|
||||
|
||||
# Start trade exchange
|
||||
echo "Starting Trade Exchange..."
|
||||
cd ../../apps/trade-exchange
|
||||
source ../../.venv/bin/activate
|
||||
python simple_exchange_api.py &
|
||||
EXCHANGE_PID=$!
|
||||
|
||||
# Start GPU registry
|
||||
echo "Starting GPU Registry..."
|
||||
cd ../..
|
||||
python gpu_registry_demo.py &
|
||||
REGISTRY_PID=$!
|
||||
|
||||
# Start GPU miner
|
||||
echo "Starting GPU Miner..."
|
||||
python gpu_miner_with_wait.py &
|
||||
MINER_PID=$!
|
||||
|
||||
echo "All services started!"
|
||||
echo "Coordinator API: http://10.1.223.93:8000"
|
||||
echo "Blockchain RPC: http://10.1.223.93:9080"
|
||||
echo "Trade Exchange: http://10.1.223.93:3002"
|
||||
echo "GPU Registry: http://10.1.223.93:8091"
|
||||
|
||||
# Wait for services
|
||||
wait $COORD_PID $BLOCK_PID $EXCHANGE_PID $REGISTRY_PID $MINER_PID
|
||||
"""
|
||||
|
||||
# Write startup script to container
|
||||
with open('/tmp/startup.sh', 'w') as f:
|
||||
f.write(startup_script)
|
||||
subprocess.run(f"incus file push /tmp/startup.sh {container}/home/oib/aitbc/", shell=True)
|
||||
run_command("chmod +x /home/oib/aitbc/startup.sh", container)
|
||||
|
||||
# Create systemd service
|
||||
print("\n⚙️ Creating systemd service...")
|
||||
service_content = """[Unit]
|
||||
Description=AITBC Services with GPU Miner
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=oib
|
||||
WorkingDirectory=/home/oib/aitbc
|
||||
ExecStart=/home/oib/aitbc/startup.sh
|
||||
Restart=always
|
||||
RestartSec=10
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
"""
|
||||
|
||||
with open('/tmp/aitbc.service', 'w') as f:
|
||||
f.write(service_content)
|
||||
subprocess.run(f"incus file push /tmp/aitbc.service {container}/tmp/", shell=True)
|
||||
run_command("mv /tmp/aitbc.service /etc/systemd/system/", container)
|
||||
run_command("systemctl daemon-reload", container)
|
||||
run_command("systemctl enable aitbc.service", container)
|
||||
run_command("systemctl start aitbc.service", container)
|
||||
|
||||
print("\n✅ Deployment complete!")
|
||||
print(f"\n📊 Service URLs:")
|
||||
print(f" - Coordinator API: http://{container_ip}:8000")
|
||||
print(f" - Blockchain RPC: http://{container_ip}:9080")
|
||||
print(f" - Trade Exchange: http://{container_ip}:3002")
|
||||
print(f" - GPU Registry: http://{container_ip}:8091")
|
||||
print(f"\n🔍 Check GPU status:")
|
||||
print(f" curl http://{container_ip}:8091/miners/list")
|
||||
|
||||
print(f"\n📋 To manage services in container:")
|
||||
print(f" incus exec {container} -- systemctl status aitbc")
|
||||
print(f" incus exec {container} -- journalctl -u aitbc -f")
|
||||
|
||||
if __name__ == "__main__":
|
||||
deploy_to_container()
|
||||
130
scripts/deployment/deploy/deploy_gpu_to_container.py
Normal file
130
scripts/deployment/deploy/deploy_gpu_to_container.py
Normal file
@@ -0,0 +1,130 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Deploy GPU Miner Integration to AITBC Container
|
||||
"""
|
||||
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
def run_in_container(cmd):
|
||||
"""Run command in aitbc container"""
|
||||
full_cmd = f"incus exec aitbc -- {cmd}"
|
||||
print(f"Running: {full_cmd}")
|
||||
result = subprocess.run(full_cmd, shell=True, capture_output=True, text=True)
|
||||
if result.returncode != 0:
|
||||
print(f"Error: {result.stderr}")
|
||||
return False, result.stderr
|
||||
return True, result.stdout
|
||||
|
||||
def deploy_gpu_miner_to_container():
|
||||
print("🚀 Deploying GPU Miner Integration to AITBC Container...")
|
||||
|
||||
# Check container access
|
||||
print("\n1. 🔍 Checking container access...")
|
||||
success, output = run_in_container("whoami")
|
||||
if success:
|
||||
print(f" Container user: {output.strip()}")
|
||||
else:
|
||||
print(" ❌ Cannot access container")
|
||||
return
|
||||
|
||||
# Copy GPU miner files to container
|
||||
print("\n2. 📁 Copying GPU miner files...")
|
||||
files_to_copy = [
|
||||
"gpu_miner_with_wait.py",
|
||||
"gpu_registry_demo.py"
|
||||
]
|
||||
|
||||
for file in files_to_copy:
|
||||
cmd = f"incus file push /home/oib/windsurf/aitbc/{file} aitbc/home/oib/"
|
||||
print(f" Copying {file}...")
|
||||
result = subprocess.run(cmd, shell=True)
|
||||
if result.returncode == 0:
|
||||
print(f" ✅ {file} copied")
|
||||
else:
|
||||
print(f" ❌ Failed to copy {file}")
|
||||
|
||||
# Install dependencies in container
|
||||
print("\n3. 📦 Installing dependencies...")
|
||||
run_in_container("pip install httpx fastapi uvicorn psutil")
|
||||
|
||||
# Create GPU miner service in container
|
||||
print("\n4. ⚙️ Creating GPU miner service...")
|
||||
service_content = """[Unit]
|
||||
Description=AITBC GPU Miner Client
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=oib
|
||||
WorkingDirectory=/home/oib
|
||||
ExecStart=/usr/bin/python3 gpu_miner_with_wait.py
|
||||
Restart=always
|
||||
RestartSec=30
|
||||
StandardOutput=journal
|
||||
StandardError=journal
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
"""
|
||||
|
||||
# Write service file to container
|
||||
with open('/tmp/gpu-miner.service', 'w') as f:
|
||||
f.write(service_content)
|
||||
subprocess.run("incus file push /tmp/gpu-miner.service aitbc/tmp/", shell=True)
|
||||
run_in_container("sudo mv /tmp/gpu-miner.service /etc/systemd/system/")
|
||||
run_in_container("sudo systemctl daemon-reload")
|
||||
run_in_container("sudo systemctl enable gpu-miner.service")
|
||||
run_in_container("sudo systemctl start gpu-miner.service")
|
||||
|
||||
# Create GPU registry service in container
|
||||
print("\n5. 🎮 Creating GPU registry service...")
|
||||
registry_service = """[Unit]
|
||||
Description=AITBC GPU Registry
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=oib
|
||||
WorkingDirectory=/home/oib
|
||||
ExecStart=/usr/bin/python3 gpu_registry_demo.py
|
||||
Restart=always
|
||||
RestartSec=10
|
||||
StandardOutput=journal
|
||||
StandardError=journal
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
"""
|
||||
|
||||
with open('/tmp/gpu-registry.service', 'w') as f:
|
||||
f.write(registry_service)
|
||||
subprocess.run("incus file push /tmp/gpu-registry.service aitbc/tmp/", shell=True)
|
||||
run_in_container("sudo mv /tmp/gpu-registry.service /etc/systemd/system/")
|
||||
run_in_container("sudo systemctl daemon-reload")
|
||||
run_in_container("sudo systemctl enable gpu-registry.service")
|
||||
run_in_container("sudo systemctl start gpu-registry.service")
|
||||
|
||||
# Check services
|
||||
print("\n6. 📊 Checking services...")
|
||||
success, output = run_in_container("sudo systemctl status gpu-miner.service --no-pager")
|
||||
print(output)
|
||||
|
||||
success, output = run_in_container("sudo systemctl status gpu-registry.service --no-pager")
|
||||
print(output)
|
||||
|
||||
# Update coordinator to include miner endpoints
|
||||
print("\n7. 🔗 Updating coordinator API...")
|
||||
|
||||
print("\n✅ GPU Miner deployed to container!")
|
||||
print("\n📊 Access URLs:")
|
||||
print(" - Container IP: 10.1.223.93")
|
||||
print(" - GPU Registry: http://10.1.223.93:8091/miners/list")
|
||||
print(" - Coordinator API: http://10.1.223.93:8000")
|
||||
|
||||
print("\n🔧 To manage services in container:")
|
||||
print(" incus exec aitbc -- sudo systemctl status gpu-miner")
|
||||
print(" incus exec aitbc -- sudo journalctl -u gpu-miner -f")
|
||||
|
||||
if __name__ == "__main__":
|
||||
deploy_gpu_miner_to_container()
|
||||
113
scripts/deployment/deploy/setup-gossip-relay.sh
Executable file
113
scripts/deployment/deploy/setup-gossip-relay.sh
Executable file
@@ -0,0 +1,113 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Setup gossip relay to connect blockchain nodes
|
||||
|
||||
set -e
|
||||
|
||||
echo "🌐 Setting up Gossip Relay for Blockchain Nodes"
|
||||
echo "=============================================="
|
||||
|
||||
# Colors
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
NC='\033[0m'
|
||||
|
||||
print_status() {
|
||||
echo -e "${GREEN}[INFO]${NC} $1"
|
||||
}
|
||||
|
||||
print_warning() {
|
||||
echo -e "${YELLOW}[WARN]${NC} $1"
|
||||
}
|
||||
|
||||
# Stop existing nodes
|
||||
print_status "Stopping blockchain nodes..."
|
||||
sudo systemctl stop blockchain-node blockchain-node-2 blockchain-rpc blockchain-rpc-2 2>/dev/null || true
|
||||
|
||||
# Update node configurations to use broadcast backend
|
||||
print_status "Updating Node 1 configuration..."
|
||||
sudo cat > /opt/blockchain-node/.env << EOF
|
||||
CHAIN_ID=ait-devnet
|
||||
DB_PATH=./data/chain.db
|
||||
RPC_BIND_HOST=127.0.0.1
|
||||
RPC_BIND_PORT=8082
|
||||
P2P_BIND_HOST=0.0.0.0
|
||||
P2P_BIND_PORT=7070
|
||||
PROPOSER_KEY=node1_proposer_key_$(date +%s)
|
||||
MINT_PER_UNIT=1000
|
||||
COORDINATOR_RATIO=0.05
|
||||
GOSSIP_BACKEND=broadcast
|
||||
GOSSIP_BROADCAST_URL=http://127.0.0.1:7070/gossip
|
||||
EOF
|
||||
|
||||
print_status "Updating Node 2 configuration..."
|
||||
sudo cat > /opt/blockchain-node-2/.env << EOF
|
||||
CHAIN_ID=ait-devnet
|
||||
DB_PATH=./data/chain2.db
|
||||
RPC_BIND_HOST=127.0.0.1
|
||||
RPC_BIND_PORT=8081
|
||||
P2P_BIND_HOST=0.0.0.0
|
||||
P2P_BIND_PORT=7071
|
||||
PROPOSER_KEY=node2_proposer_key_$(date +%s)
|
||||
MINT_PER_UNIT=1000
|
||||
COORDINATOR_RATIO=0.05
|
||||
GOSSIP_BACKEND=broadcast
|
||||
GOSSIP_BROADCAST_URL=http://127.0.0.1:7070/gossip
|
||||
EOF
|
||||
|
||||
# Create gossip relay service
|
||||
print_status "Creating gossip relay service..."
|
||||
sudo cat > /etc/systemd/system/blockchain-gossip-relay.service << EOF
|
||||
[Unit]
|
||||
Description=AITBC Blockchain Gossip Relay
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=exec
|
||||
User=root
|
||||
WorkingDirectory=/opt/blockchain-node
|
||||
Environment=PATH=/opt/blockchain-node/.venv/bin:/usr/local/bin:/usr/bin:/bin
|
||||
Environment=PYTHONPATH=/opt/blockchain-node/src:/opt/blockchain-node/scripts
|
||||
ExecStart=/opt/blockchain-node/.venv/bin/python3 -m aitbc_chain.gossip.relay --port 7070 --host 0.0.0.0
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOF
|
||||
|
||||
# Enable and start gossip relay
|
||||
print_status "Starting gossip relay..."
|
||||
sudo systemctl daemon-reload
|
||||
sudo systemctl enable blockchain-gossip-relay
|
||||
sudo systemctl start blockchain-gossip-relay
|
||||
|
||||
# Wait for relay to start
|
||||
sleep 2
|
||||
|
||||
# Check if relay is running
|
||||
print_status "Checking gossip relay status..."
|
||||
sudo systemctl status blockchain-gossip-relay --no-pager | head -10
|
||||
|
||||
# Restart blockchain nodes
|
||||
print_status "Restarting blockchain nodes with shared gossip..."
|
||||
sudo systemctl start blockchain-node blockchain-node-2 blockchain-rpc blockchain-rpc-2
|
||||
|
||||
# Wait for nodes to start
|
||||
sleep 3
|
||||
|
||||
# Check status
|
||||
print_status "Checking node status..."
|
||||
sudo systemctl status blockchain-node blockchain-node-2 --no-pager | grep -E 'Active:|Main PID:'
|
||||
|
||||
echo ""
|
||||
print_status "✅ Gossip relay setup complete!"
|
||||
echo ""
|
||||
echo "Nodes are now connected via shared gossip backend."
|
||||
echo "They should sync blocks and transactions."
|
||||
echo ""
|
||||
echo "To verify connectivity:"
|
||||
echo " 1. Run: python /opt/test_blockchain_simple.py"
|
||||
echo " 2. Check if heights are converging"
|
||||
echo ""
|
||||
echo "Gossip relay logs: sudo journalctl -u blockchain-gossip-relay -f"
|
||||
40
scripts/deployment/deploy/test-deployment.sh
Executable file
40
scripts/deployment/deploy/test-deployment.sh
Executable file
@@ -0,0 +1,40 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Test if blockchain node and explorer are running
|
||||
|
||||
echo "🔍 Testing Blockchain Deployment"
|
||||
echo "==============================="
|
||||
|
||||
# Test blockchain RPC
|
||||
echo "Testing blockchain RPC..."
|
||||
if curl -s http://aitbc.keisanki.net:8082/rpc/head > /dev/null; then
|
||||
echo "✅ Blockchain RPC is accessible"
|
||||
curl -s http://aitbc.keisanki.net:8082/rpc/head | jq '.height'
|
||||
else
|
||||
echo "❌ Blockchain RPC is not accessible"
|
||||
fi
|
||||
|
||||
# Test explorer
|
||||
echo ""
|
||||
echo "Testing blockchain explorer..."
|
||||
if curl -s http://aitbc.keisanki.net:3000 > /dev/null; then
|
||||
echo "✅ Explorer is accessible"
|
||||
else
|
||||
echo "❌ Explorer is not accessible"
|
||||
fi
|
||||
|
||||
# Check services on server
|
||||
echo ""
|
||||
echo "Checking service status on ns3..."
|
||||
ssh ns3-root "systemctl is-active blockchain-node blockchain-rpc nginx" | while read service status; do
|
||||
if [ "$status" = "active" ]; then
|
||||
echo "✅ $service is running"
|
||||
else
|
||||
echo "❌ $service is not running"
|
||||
fi
|
||||
done
|
||||
|
||||
# Check logs if needed
|
||||
echo ""
|
||||
echo "Recent blockchain logs:"
|
||||
ssh ns3-root "journalctl -u blockchain-node -n 5 --no-pager"
|
||||
923
scripts/deployment/implement-agent-protocols.sh
Executable file
923
scripts/deployment/implement-agent-protocols.sh
Executable file
@@ -0,0 +1,923 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# AITBC Agent Protocols Implementation Script
|
||||
# Implements cross-chain agent communication framework
|
||||
#
|
||||
|
||||
set -e
|
||||
|
||||
# Colors for output
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
RED='\033[0;31m'
|
||||
NC='\033[0m'
|
||||
|
||||
print_status() {
|
||||
echo -e "${GREEN}[INFO]${NC} $1"
|
||||
}
|
||||
|
||||
print_warning() {
|
||||
echo -e "${YELLOW}[WARN]${NC} $1"
|
||||
}
|
||||
|
||||
print_header() {
|
||||
echo -e "${BLUE}=== $1 ===${NC}"
|
||||
}
|
||||
|
||||
# Configuration
|
||||
PROJECT_ROOT="/opt/aitbc"
|
||||
AGENT_REGISTRY_DIR="$PROJECT_ROOT/apps/agent-registry"
|
||||
AGENT_PROTOCOLS_DIR="$PROJECT_ROOT/apps/agent-protocols"
|
||||
SERVICES_DIR="$PROJECT_ROOT/apps/agent-services"
|
||||
|
||||
# Main execution
|
||||
main() {
|
||||
print_header "AITBC Agent Protocols Implementation"
|
||||
echo ""
|
||||
echo "🤖 Implementing Cross-Chain Agent Communication Framework"
|
||||
echo "📊 Based on core planning: READY FOR NEXT PHASE"
|
||||
echo "🎯 Success Probability: 90%+ (infrastructure ready)"
|
||||
echo ""
|
||||
|
||||
# Step 1: Create directory structure
|
||||
print_header "Step 1: Creating Agent Protocols Structure"
|
||||
create_directory_structure
|
||||
|
||||
# Step 2: Implement Agent Registry
|
||||
print_header "Step 2: Implementing Agent Registry"
|
||||
implement_agent_registry
|
||||
|
||||
# Step 3: Implement Message Protocol
|
||||
print_header "Step 3: Implementing Message Protocol"
|
||||
implement_message_protocol
|
||||
|
||||
# Step 4: Create Task Management System
|
||||
print_header "Step 4: Creating Task Management System"
|
||||
create_task_management
|
||||
|
||||
# Step 5: Implement Integration Layer
|
||||
print_header "Step 5: Implementing Integration Layer"
|
||||
implement_integration_layer
|
||||
|
||||
# Step 6: Create Agent Services
|
||||
print_header "Step 6: Creating Agent Services"
|
||||
create_agent_services
|
||||
|
||||
# Step 7: Set up Testing Framework
|
||||
print_header "Step 7: Setting Up Testing Framework"
|
||||
setup_testing_framework
|
||||
|
||||
# Step 8: Configure Deployment
|
||||
print_header "Step 8: Configuring Deployment"
|
||||
configure_deployment
|
||||
|
||||
print_header "Agent Protocols Implementation Complete! 🎉"
|
||||
echo ""
|
||||
echo "✅ Directory structure created"
|
||||
echo "✅ Agent registry implemented"
|
||||
echo "✅ Message protocol implemented"
|
||||
echo "✅ Task management system created"
|
||||
echo "✅ Integration layer implemented"
|
||||
echo "✅ Agent services created"
|
||||
echo "✅ Testing framework set up"
|
||||
echo "✅ Deployment configured"
|
||||
echo ""
|
||||
echo "🚀 Agent Protocols Status: READY FOR TESTING"
|
||||
echo "📊 Next Phase: Advanced AI Trading & Analytics"
|
||||
echo "🎯 Goal: GLOBAL AI POWER MARKETPLACE LEADERSHIP"
|
||||
}
|
||||
|
||||
# Create directory structure
|
||||
create_directory_structure() {
|
||||
print_status "Creating agent protocols directory structure..."
|
||||
|
||||
mkdir -p "$AGENT_REGISTRY_DIR"/{src,tests,config}
|
||||
mkdir -p "$AGENT_PROTOCOLS_DIR"/{src,tests,config}
|
||||
mkdir -p "$SERVICES_DIR"/{agent-coordinator,agent-orchestrator,agent-bridge}
|
||||
mkdir -p "$PROJECT_ROOT/apps/agents"/{trading,compliance,analytics,marketplace}
|
||||
|
||||
print_status "Directory structure created"
|
||||
}
|
||||
|
||||
# Implement Agent Registry
|
||||
implement_agent_registry() {
|
||||
print_status "Implementing agent registry service..."
|
||||
|
||||
# Create agent registry main application
|
||||
cat > "$AGENT_REGISTRY_DIR/src/app.py" << 'EOF'
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
AITBC Agent Registry Service
|
||||
Central agent discovery and registration system
|
||||
"""
|
||||
|
||||
from fastapi import FastAPI, HTTPException, Depends
|
||||
from pydantic import BaseModel
|
||||
from typing import List, Optional, Dict, Any
|
||||
import json
|
||||
import time
|
||||
import uuid
|
||||
from datetime import datetime, timedelta
|
||||
import sqlite3
|
||||
from contextlib import contextmanager
|
||||
|
||||
app = FastAPI(title="AITBC Agent Registry API", version="1.0.0")
|
||||
|
||||
# Database setup
|
||||
def get_db():
|
||||
conn = sqlite3.connect('agent_registry.db')
|
||||
conn.row_factory = sqlite3.Row
|
||||
return conn
|
||||
|
||||
@contextmanager
|
||||
def get_db_connection():
|
||||
conn = get_db()
|
||||
try:
|
||||
yield conn
|
||||
finally:
|
||||
conn.close()
|
||||
|
||||
# Initialize database
|
||||
def init_db():
|
||||
with get_db_connection() as conn:
|
||||
conn.execute('''
|
||||
CREATE TABLE IF NOT EXISTS agents (
|
||||
id TEXT PRIMARY KEY,
|
||||
name TEXT NOT NULL,
|
||||
type TEXT NOT NULL,
|
||||
capabilities TEXT NOT NULL,
|
||||
chain_id TEXT NOT NULL,
|
||||
endpoint TEXT NOT NULL,
|
||||
status TEXT DEFAULT 'active',
|
||||
last_heartbeat TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
metadata TEXT,
|
||||
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
||||
)
|
||||
''')
|
||||
|
||||
conn.execute('''
|
||||
CREATE TABLE IF NOT EXISTS agent_types (
|
||||
type TEXT PRIMARY KEY,
|
||||
description TEXT NOT NULL,
|
||||
required_capabilities TEXT NOT NULL
|
||||
)
|
||||
''')
|
||||
|
||||
# Models
|
||||
class Agent(BaseModel):
|
||||
id: str
|
||||
name: str
|
||||
type: str
|
||||
capabilities: List[str]
|
||||
chain_id: str
|
||||
endpoint: str
|
||||
metadata: Optional[Dict[str, Any]] = {}
|
||||
|
||||
class AgentRegistration(BaseModel):
|
||||
name: str
|
||||
type: str
|
||||
capabilities: List[str]
|
||||
chain_id: str
|
||||
endpoint: str
|
||||
metadata: Optional[Dict[str, Any]] = {}
|
||||
|
||||
class AgentHeartbeat(BaseModel):
|
||||
agent_id: str
|
||||
status: str = "active"
|
||||
metadata: Optional[Dict[str, Any]] = {}
|
||||
|
||||
# API Endpoints
|
||||
@app.on_event("startup")
|
||||
async def startup_event():
|
||||
init_db()
|
||||
|
||||
@app.post("/api/agents/register", response_model=Agent)
|
||||
async def register_agent(agent: AgentRegistration):
|
||||
"""Register a new agent"""
|
||||
agent_id = str(uuid.uuid4())
|
||||
|
||||
with get_db_connection() as conn:
|
||||
conn.execute('''
|
||||
INSERT INTO agents (id, name, type, capabilities, chain_id, endpoint, metadata)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?)
|
||||
''', (
|
||||
agent_id, agent.name, agent.type,
|
||||
json.dumps(agent.capabilities), agent.chain_id,
|
||||
agent.endpoint, json.dumps(agent.metadata)
|
||||
))
|
||||
|
||||
return Agent(
|
||||
id=agent_id,
|
||||
name=agent.name,
|
||||
type=agent.type,
|
||||
capabilities=agent.capabilities,
|
||||
chain_id=agent.chain_id,
|
||||
endpoint=agent.endpoint,
|
||||
metadata=agent.metadata
|
||||
)
|
||||
|
||||
@app.get("/api/agents", response_model=List[Agent])
|
||||
async def list_agents(
|
||||
agent_type: Optional[str] = None,
|
||||
chain_id: Optional[str] = None,
|
||||
capability: Optional[str] = None
|
||||
):
|
||||
"""List registered agents with optional filters"""
|
||||
with get_db_connection() as conn:
|
||||
query = "SELECT * FROM agents WHERE status = 'active'"
|
||||
params = []
|
||||
|
||||
if agent_type:
|
||||
query += " AND type = ?"
|
||||
params.append(agent_type)
|
||||
|
||||
if chain_id:
|
||||
query += " AND chain_id = ?"
|
||||
params.append(chain_id)
|
||||
|
||||
if capability:
|
||||
query += " AND capabilities LIKE ?"
|
||||
params.append(f'%{capability}%')
|
||||
|
||||
agents = conn.execute(query, params).fetchall()
|
||||
|
||||
return [
|
||||
Agent(
|
||||
id=agent["id"],
|
||||
name=agent["name"],
|
||||
type=agent["type"],
|
||||
capabilities=json.loads(agent["capabilities"]),
|
||||
chain_id=agent["chain_id"],
|
||||
endpoint=agent["endpoint"],
|
||||
metadata=json.loads(agent["metadata"] or "{}")
|
||||
)
|
||||
for agent in agents
|
||||
]
|
||||
|
||||
@app.post("/api/agents/{agent_id}/heartbeat")
|
||||
async def agent_heartbeat(agent_id: str, heartbeat: AgentHeartbeat):
|
||||
"""Update agent heartbeat"""
|
||||
with get_db_connection() as conn:
|
||||
conn.execute('''
|
||||
UPDATE agents
|
||||
SET last_heartbeat = CURRENT_TIMESTAMP, status = ?, metadata = ?
|
||||
WHERE id = ?
|
||||
''', (heartbeat.status, json.dumps(heartbeat.metadata), agent_id))
|
||||
|
||||
return {"status": "ok", "timestamp": datetime.utcnow()}
|
||||
|
||||
@app.get("/api/agents/{agent_id}")
|
||||
async def get_agent(agent_id: str):
|
||||
"""Get agent details"""
|
||||
with get_db_connection() as conn:
|
||||
agent = conn.execute(
|
||||
"SELECT * FROM agents WHERE id = ?", (agent_id,)
|
||||
).fetchone()
|
||||
|
||||
if not agent:
|
||||
raise HTTPException(status_code=404, detail="Agent not found")
|
||||
|
||||
return Agent(
|
||||
id=agent["id"],
|
||||
name=agent["name"],
|
||||
type=agent["type"],
|
||||
capabilities=json.loads(agent["capabilities"]),
|
||||
chain_id=agent["chain_id"],
|
||||
endpoint=agent["endpoint"],
|
||||
metadata=json.loads(agent["metadata"] or "{}")
|
||||
)
|
||||
|
||||
@app.delete("/api/agents/{agent_id}")
|
||||
async def unregister_agent(agent_id: str):
|
||||
"""Unregister an agent"""
|
||||
with get_db_connection() as conn:
|
||||
conn.execute("DELETE FROM agents WHERE id = ?", (agent_id,))
|
||||
|
||||
return {"status": "ok", "message": "Agent unregistered"}
|
||||
|
||||
@app.get("/api/health")
|
||||
async def health_check():
|
||||
"""Health check endpoint"""
|
||||
return {"status": "ok", "timestamp": datetime.utcnow()}
|
||||
|
||||
if __name__ == "__main__":
|
||||
import uvicorn
|
||||
uvicorn.run(app, host="0.0.0.0", port=8003)
|
||||
EOF
|
||||
|
||||
# Create requirements file
|
||||
cat > "$AGENT_REGISTRY_DIR/requirements.txt" << 'EOF'
|
||||
fastapi==0.104.1
|
||||
uvicorn==0.24.0
|
||||
pydantic==2.5.0
|
||||
sqlite3
|
||||
python-multipart==0.0.6
|
||||
EOF
|
||||
|
||||
print_status "Agent registry implemented"
|
||||
}
|
||||
|
||||
# Implement Message Protocol
|
||||
implement_message_protocol() {
|
||||
print_status "Implementing message protocol..."
|
||||
|
||||
cat > "$AGENT_PROTOCOLS_DIR/src/message_protocol.py" << 'EOF'
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
AITBC Agent Message Protocol
|
||||
Secure cross-chain agent communication
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import time
|
||||
import uuid
|
||||
import hashlib
|
||||
import hmac
|
||||
from typing import Dict, Any, List, Optional
|
||||
from datetime import datetime
|
||||
from cryptography.fernet import Fernet
|
||||
from cryptography.hazmat.primitives import hashes
|
||||
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
|
||||
import base64
|
||||
|
||||
class MessageProtocol:
|
||||
"""Secure message protocol for agent communication"""
|
||||
|
||||
def __init__(self, encryption_key: Optional[str] = None):
|
||||
self.encryption_key = encryption_key or self._generate_key()
|
||||
self.cipher = Fernet(self.encryption_key)
|
||||
self.message_queue = {}
|
||||
|
||||
def _generate_key(self) -> bytes:
|
||||
"""Generate encryption key"""
|
||||
password = os.environ.get('AITBC_AGENT_PROTOCOL_KEY', b"default-key-change-in-production")
|
||||
salt = os.environ.get('AITBC_AGENT_PROTOCOL_SALT', b"aitbc-salt-agent-protocol")
|
||||
if isinstance(password, str):
|
||||
password = password.encode()
|
||||
if isinstance(salt, str):
|
||||
salt = salt.encode()
|
||||
kdf = PBKDF2HMAC(
|
||||
algorithm=hashes.SHA256(),
|
||||
length=32,
|
||||
salt=salt,
|
||||
iterations=100000,
|
||||
)
|
||||
key = base64.urlsafe_b64encode(kdf.derive(password))
|
||||
return key
|
||||
|
||||
def create_message(
|
||||
self,
|
||||
sender_id: str,
|
||||
receiver_id: str,
|
||||
message_type: str,
|
||||
payload: Dict[str, Any],
|
||||
chain_id: str = "ait-devnet",
|
||||
priority: str = "normal"
|
||||
) -> Dict[str, Any]:
|
||||
"""Create a secure agent message"""
|
||||
|
||||
message = {
|
||||
"id": str(uuid.uuid4()),
|
||||
"sender_id": sender_id,
|
||||
"receiver_id": receiver_id,
|
||||
"message_type": message_type,
|
||||
"payload": payload,
|
||||
"chain_id": chain_id,
|
||||
"priority": priority,
|
||||
"timestamp": datetime.utcnow().isoformat(),
|
||||
"signature": None
|
||||
}
|
||||
|
||||
# Sign message
|
||||
message["signature"] = self._sign_message(message)
|
||||
|
||||
# Encrypt payload
|
||||
message["payload"] = self._encrypt_data(json.dumps(payload))
|
||||
|
||||
return message
|
||||
|
||||
def _sign_message(self, message: Dict[str, Any]) -> str:
|
||||
"""Sign message with HMAC"""
|
||||
message_data = json.dumps({
|
||||
"sender_id": message["sender_id"],
|
||||
"receiver_id": message["receiver_id"],
|
||||
"message_type": message["message_type"],
|
||||
"timestamp": message["timestamp"]
|
||||
}, sort_keys=True)
|
||||
|
||||
signature = hmac.new(
|
||||
self.encryption_key,
|
||||
message_data.encode(),
|
||||
hashlib.sha256
|
||||
).hexdigest()
|
||||
|
||||
return signature
|
||||
|
||||
def _encrypt_data(self, data: str) -> str:
|
||||
"""Encrypt data"""
|
||||
encrypted_data = self.cipher.encrypt(data.encode())
|
||||
return base64.urlsafe_b64encode(encrypted_data).decode()
|
||||
|
||||
def _decrypt_data(self, encrypted_data: str) -> str:
|
||||
"""Decrypt data"""
|
||||
encrypted_bytes = base64.urlsafe_b64decode(encrypted_data.encode())
|
||||
decrypted_data = self.cipher.decrypt(encrypted_bytes)
|
||||
return decrypted_data.decode()
|
||||
|
||||
def verify_message(self, message: Dict[str, Any]) -> bool:
|
||||
"""Verify message signature"""
|
||||
try:
|
||||
# Extract signature
|
||||
signature = message.get("signature")
|
||||
if not signature:
|
||||
return False
|
||||
|
||||
# Recreate signature data
|
||||
message_data = json.dumps({
|
||||
"sender_id": message["sender_id"],
|
||||
"receiver_id": message["receiver_id"],
|
||||
"message_type": message["message_type"],
|
||||
"timestamp": message["timestamp"]
|
||||
}, sort_keys=True)
|
||||
|
||||
# Verify signature
|
||||
expected_signature = hmac.new(
|
||||
self.encryption_key,
|
||||
message_data.encode(),
|
||||
hashlib.sha256
|
||||
).hexdigest()
|
||||
|
||||
return hmac.compare_digest(signature, expected_signature)
|
||||
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
def decrypt_message(self, message: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Decrypt message payload"""
|
||||
if not self.verify_message(message):
|
||||
raise ValueError("Invalid message signature")
|
||||
|
||||
try:
|
||||
decrypted_payload = self._decrypt_data(message["payload"])
|
||||
message["payload"] = json.loads(decrypted_payload)
|
||||
return message
|
||||
except Exception as e:
|
||||
raise ValueError(f"Failed to decrypt message: {e}")
|
||||
|
||||
def send_message(self, message: Dict[str, Any]) -> bool:
|
||||
"""Send message to receiver"""
|
||||
try:
|
||||
# Queue message for delivery
|
||||
receiver_id = message["receiver_id"]
|
||||
if receiver_id not in self.message_queue:
|
||||
self.message_queue[receiver_id] = []
|
||||
|
||||
self.message_queue[receiver_id].append(message)
|
||||
return True
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
def receive_messages(self, agent_id: str) -> List[Dict[str, Any]]:
|
||||
"""Receive messages for agent"""
|
||||
messages = self.message_queue.get(agent_id, [])
|
||||
self.message_queue[agent_id] = []
|
||||
|
||||
# Decrypt and verify messages
|
||||
verified_messages = []
|
||||
for message in messages:
|
||||
try:
|
||||
decrypted_message = self.decrypt_message(message)
|
||||
verified_messages.append(decrypted_message)
|
||||
except ValueError:
|
||||
# Skip invalid messages
|
||||
continue
|
||||
|
||||
return verified_messages
|
||||
|
||||
# Message types
|
||||
class MessageTypes:
|
||||
TASK_ASSIGNMENT = "task_assignment"
|
||||
TASK_RESULT = "task_result"
|
||||
HEARTBEAT = "heartbeat"
|
||||
COORDINATION = "coordination"
|
||||
DATA_REQUEST = "data_request"
|
||||
DATA_RESPONSE = "data_response"
|
||||
ERROR = "error"
|
||||
STATUS_UPDATE = "status_update"
|
||||
|
||||
# Agent message client
|
||||
class AgentMessageClient:
|
||||
"""Client for agent message communication"""
|
||||
|
||||
def __init__(self, agent_id: str, registry_url: str):
|
||||
self.agent_id = agent_id
|
||||
self.registry_url = registry_url
|
||||
self.protocol = MessageProtocol()
|
||||
self.received_messages = []
|
||||
|
||||
def send_task_assignment(
|
||||
self,
|
||||
receiver_id: str,
|
||||
task_data: Dict[str, Any],
|
||||
chain_id: str = "ait-devnet"
|
||||
) -> bool:
|
||||
"""Send task assignment to agent"""
|
||||
message = self.protocol.create_message(
|
||||
sender_id=self.agent_id,
|
||||
receiver_id=receiver_id,
|
||||
message_type=MessageTypes.TASK_ASSIGNMENT,
|
||||
payload=task_data,
|
||||
chain_id=chain_id
|
||||
)
|
||||
|
||||
return self.protocol.send_message(message)
|
||||
|
||||
def send_task_result(
|
||||
self,
|
||||
receiver_id: str,
|
||||
task_result: Dict[str, Any],
|
||||
chain_id: str = "ait-devnet"
|
||||
) -> bool:
|
||||
"""Send task result to agent"""
|
||||
message = self.protocol.create_message(
|
||||
sender_id=self.agent_id,
|
||||
receiver_id=receiver_id,
|
||||
message_type=MessageTypes.TASK_RESULT,
|
||||
payload=task_result,
|
||||
chain_id=chain_id
|
||||
)
|
||||
|
||||
return self.protocol.send_message(message)
|
||||
|
||||
def send_coordination_message(
|
||||
self,
|
||||
receiver_id: str,
|
||||
coordination_data: Dict[str, Any],
|
||||
chain_id: str = "ait-devnet"
|
||||
) -> bool:
|
||||
"""Send coordination message to agent"""
|
||||
message = self.protocol.create_message(
|
||||
sender_id=self.agent_id,
|
||||
receiver_id=receiver_id,
|
||||
message_type=MessageTypes.COORDINATION,
|
||||
payload=coordination_data,
|
||||
chain_id=chain_id
|
||||
)
|
||||
|
||||
return self.protocol.send_message(message)
|
||||
|
||||
def receive_messages(self) -> List[Dict[str, Any]]:
|
||||
"""Receive messages for this agent"""
|
||||
return self.protocol.receive_messages(self.agent_id)
|
||||
|
||||
def get_task_assignments(self) -> List[Dict[str, Any]]:
|
||||
"""Get task assignment messages"""
|
||||
messages = self.receive_messages()
|
||||
return [msg for msg in messages if msg["message_type"] == MessageTypes.TASK_ASSIGNMENT]
|
||||
|
||||
def get_task_results(self) -> List[Dict[str, Any]]:
|
||||
"""Get task result messages"""
|
||||
messages = self.receive_messages()
|
||||
return [msg for msg in messages if msg["message_type"] == MessageTypes.TASK_RESULT]
|
||||
|
||||
def get_coordination_messages(self) -> List[Dict[str, Any]]:
|
||||
"""Get coordination messages"""
|
||||
messages = self.receive_messages()
|
||||
return [msg for msg in messages if msg["message_type"] == MessageTypes.COORDINATION]
|
||||
EOF
|
||||
|
||||
print_status "Message protocol implemented"
|
||||
}
|
||||
|
||||
# Create Task Management System
|
||||
create_task_management() {
|
||||
print_status "Creating task management system..."
|
||||
|
||||
cat > "$SERVICES_DIR/agent-coordinator/src/task_manager.py" << 'EOF'
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
AITBC Agent Task Manager
|
||||
Distributes and coordinates tasks among agents
|
||||
"""
|
||||
|
||||
import json
|
||||
import time
|
||||
import uuid
|
||||
from typing import Dict, List, Any, Optional
|
||||
from datetime import datetime, timedelta
|
||||
from enum import Enum
|
||||
import sqlite3
|
||||
from contextlib import contextmanager
|
||||
|
||||
class TaskStatus(Enum):
|
||||
PENDING = "pending"
|
||||
ASSIGNED = "assigned"
|
||||
IN_PROGRESS = "in_progress"
|
||||
COMPLETED = "completed"
|
||||
FAILED = "failed"
|
||||
CANCELLED = "cancelled"
|
||||
|
||||
class TaskPriority(Enum):
|
||||
LOW = "low"
|
||||
NORMAL = "normal"
|
||||
HIGH = "high"
|
||||
CRITICAL = "critical"
|
||||
|
||||
class Task:
|
||||
"""Agent task representation"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
task_type: str,
|
||||
payload: Dict[str, Any],
|
||||
required_capabilities: List[str],
|
||||
priority: TaskPriority = TaskPriority.NORMAL,
|
||||
timeout: int = 300,
|
||||
chain_id: str = "ait-devnet"
|
||||
):
|
||||
self.id = str(uuid.uuid4())
|
||||
self.task_type = task_type
|
||||
self.payload = payload
|
||||
self.required_capabilities = required_capabilities
|
||||
self.priority = priority
|
||||
self.timeout = timeout
|
||||
self.chain_id = chain_id
|
||||
self.status = TaskStatus.PENDING
|
||||
self.assigned_agent_id = None
|
||||
self.created_at = datetime.utcnow()
|
||||
self.assigned_at = None
|
||||
self.started_at = None
|
||||
self.completed_at = None
|
||||
self.result = None
|
||||
self.error = None
|
||||
|
||||
class TaskManager:
|
||||
"""Manages agent task distribution and coordination"""
|
||||
|
||||
def __init__(self, db_path: str = "agent_tasks.db"):
|
||||
self.db_path = db_path
|
||||
self.init_database()
|
||||
|
||||
def init_database(self):
|
||||
"""Initialize task database"""
|
||||
with self.get_db_connection() as conn:
|
||||
conn.execute('''
|
||||
CREATE TABLE IF NOT EXISTS tasks (
|
||||
id TEXT PRIMARY KEY,
|
||||
task_type TEXT NOT NULL,
|
||||
payload TEXT NOT NULL,
|
||||
required_capabilities TEXT NOT NULL,
|
||||
priority TEXT NOT NULL,
|
||||
timeout INTEGER NOT NULL,
|
||||
chain_id TEXT NOT NULL,
|
||||
status TEXT NOT NULL,
|
||||
assigned_agent_id TEXT,
|
||||
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
assigned_at TIMESTAMP,
|
||||
started_at TIMESTAMP,
|
||||
completed_at TIMESTAMP,
|
||||
result TEXT,
|
||||
error TEXT
|
||||
)
|
||||
''')
|
||||
|
||||
conn.execute('''
|
||||
CREATE TABLE IF NOT EXISTS agent_workload (
|
||||
agent_id TEXT PRIMARY KEY,
|
||||
current_tasks INTEGER DEFAULT 0,
|
||||
completed_tasks INTEGER DEFAULT 0,
|
||||
failed_tasks INTEGER DEFAULT 0,
|
||||
last_heartbeat TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
||||
)
|
||||
''')
|
||||
|
||||
@contextmanager
|
||||
def get_db_connection(self):
|
||||
"""Get database connection"""
|
||||
conn = sqlite3.connect(self.db_path)
|
||||
conn.row_factory = sqlite3.Row
|
||||
try:
|
||||
yield conn
|
||||
finally:
|
||||
conn.close()
|
||||
|
||||
def create_task(
|
||||
self,
|
||||
task_type: str,
|
||||
payload: Dict[str, Any],
|
||||
required_capabilities: List[str],
|
||||
priority: TaskPriority = TaskPriority.NORMAL,
|
||||
timeout: int = 300,
|
||||
chain_id: str = "ait-devnet"
|
||||
) -> Task:
|
||||
"""Create a new task"""
|
||||
task = Task(
|
||||
task_type=task_type,
|
||||
payload=payload,
|
||||
required_capabilities=required_capabilities,
|
||||
priority=priority,
|
||||
timeout=timeout,
|
||||
chain_id=chain_id
|
||||
)
|
||||
|
||||
with self.get_db_connection() as conn:
|
||||
conn.execute('''
|
||||
INSERT INTO tasks (
|
||||
id, task_type, payload, required_capabilities,
|
||||
priority, timeout, chain_id, status
|
||||
) VALUES (?, ?, ?, ?, ?, ?, ?, ?)
|
||||
''', (
|
||||
task.id, task.task_type, json.dumps(task.payload),
|
||||
json.dumps(task.required_capabilities), task.priority.value,
|
||||
task.timeout, task.chain_id, task.status.value
|
||||
))
|
||||
|
||||
return task
|
||||
|
||||
def assign_task(self, task_id: str, agent_id: str) -> bool:
|
||||
"""Assign task to agent"""
|
||||
with self.get_db_connection() as conn:
|
||||
# Update task status
|
||||
conn.execute('''
|
||||
UPDATE tasks
|
||||
SET status = ?, assigned_agent_id = ?, assigned_at = CURRENT_TIMESTAMP
|
||||
WHERE id = ? AND status = ?
|
||||
''', (TaskStatus.ASSIGNED.value, agent_id, task_id, TaskStatus.PENDING.value))
|
||||
|
||||
# Update agent workload
|
||||
conn.execute('''
|
||||
INSERT OR REPLACE INTO agent_workload (agent_id, current_tasks)
|
||||
VALUES (
|
||||
?,
|
||||
COALESCE((SELECT current_tasks FROM agent_workload WHERE agent_id = ?), 0) + 1
|
||||
)
|
||||
''', (agent_id, agent_id))
|
||||
|
||||
return True
|
||||
|
||||
def start_task(self, task_id: str) -> bool:
|
||||
"""Mark task as started"""
|
||||
with self.get_db_connection() as conn:
|
||||
conn.execute('''
|
||||
UPDATE tasks
|
||||
SET status = ?, started_at = CURRENT_TIMESTAMP
|
||||
WHERE id = ? AND status = ?
|
||||
''', (TaskStatus.IN_PROGRESS.value, task_id, TaskStatus.ASSIGNED.value))
|
||||
|
||||
return True
|
||||
|
||||
def complete_task(self, task_id: str, result: Dict[str, Any]) -> bool:
|
||||
"""Complete task with result"""
|
||||
with self.get_db_connection() as conn:
|
||||
# Get task info for workload update
|
||||
task = conn.execute(
|
||||
"SELECT assigned_agent_id FROM tasks WHERE id = ?", (task_id,)
|
||||
).fetchone()
|
||||
|
||||
if task and task["assigned_agent_id"]:
|
||||
agent_id = task["assigned_agent_id"]
|
||||
|
||||
# Update task
|
||||
conn.execute('''
|
||||
UPDATE tasks
|
||||
SET status = ?, completed_at = CURRENT_TIMESTAMP, result = ?
|
||||
WHERE id = ? AND status = ?
|
||||
''', (TaskStatus.COMPLETED.value, json.dumps(result), task_id, TaskStatus.IN_PROGRESS.value))
|
||||
|
||||
# Update agent workload
|
||||
conn.execute('''
|
||||
UPDATE agent_workload
|
||||
SET current_tasks = current_tasks - 1,
|
||||
completed_tasks = completed_tasks + 1
|
||||
WHERE agent_id = ?
|
||||
''', (agent_id,))
|
||||
|
||||
return True
|
||||
|
||||
def fail_task(self, task_id: str, error: str) -> bool:
|
||||
"""Mark task as failed"""
|
||||
with self.get_db_connection() as conn:
|
||||
# Get task info for workload update
|
||||
task = conn.execute(
|
||||
"SELECT assigned_agent_id FROM tasks WHERE id = ?", (task_id,)
|
||||
).fetchone()
|
||||
|
||||
if task and task["assigned_agent_id"]:
|
||||
agent_id = task["assigned_agent_id"]
|
||||
|
||||
# Update task
|
||||
conn.execute('''
|
||||
UPDATE tasks
|
||||
SET status = ?, completed_at = CURRENT_TIMESTAMP, error = ?
|
||||
WHERE id = ? AND status = ?
|
||||
''', (TaskStatus.FAILED.value, error, task_id, TaskStatus.IN_PROGRESS.value))
|
||||
|
||||
# Update agent workload
|
||||
conn.execute('''
|
||||
UPDATE agent_workload
|
||||
SET current_tasks = current_tasks - 1,
|
||||
failed_tasks = failed_tasks + 1
|
||||
WHERE agent_id = ?
|
||||
''', (agent_id,))
|
||||
|
||||
return True
|
||||
|
||||
def get_pending_tasks(self, limit: int = 100) -> List[Task]:
|
||||
"""Get pending tasks ordered by priority"""
|
||||
with self.get_db_connection() as conn:
|
||||
rows = conn.execute('''
|
||||
SELECT * FROM tasks
|
||||
WHERE status = ?
|
||||
ORDER BY
|
||||
CASE priority
|
||||
WHEN 'critical' THEN 1
|
||||
WHEN 'high' THEN 2
|
||||
WHEN 'normal' THEN 3
|
||||
WHEN 'low' THEN 4
|
||||
END,
|
||||
created_at ASC
|
||||
LIMIT ?
|
||||
''', (TaskStatus.PENDING.value, limit)).fetchall()
|
||||
|
||||
tasks = []
|
||||
for row in rows:
|
||||
task = Task(
|
||||
task_type=row["task_type"],
|
||||
payload=json.loads(row["payload"]),
|
||||
required_capabilities=json.loads(row["required_capabilities"]),
|
||||
priority=TaskPriority(row["priority"]),
|
||||
timeout=row["timeout"],
|
||||
chain_id=row["chain_id"]
|
||||
)
|
||||
task.id = row["id"]
|
||||
task.status = TaskStatus(row["status"])
|
||||
task.assigned_agent_id = row["assigned_agent_id"]
|
||||
task.created_at = datetime.fromisoformat(row["created_at"])
|
||||
tasks.append(task)
|
||||
|
||||
return tasks
|
||||
|
||||
def get_agent_tasks(self, agent_id: str) -> List[Task]:
|
||||
"""Get tasks assigned to specific agent"""
|
||||
with self.get_db_connection() as conn:
|
||||
rows = conn.execute(
|
||||
"SELECT * FROM tasks WHERE assigned_agent_id = ? ORDER BY created_at DESC",
|
||||
(agent_id,)
|
||||
).fetchall()
|
||||
|
||||
tasks = []
|
||||
for row in rows:
|
||||
task = Task(
|
||||
task_type=row["task_type"],
|
||||
payload=json.loads(row["payload"]),
|
||||
required_capabilities=json.loads(row["required_capabilities"]),
|
||||
priority=TaskPriority(row["priority"]),
|
||||
timeout=row["timeout"],
|
||||
chain_id=row["chain_id"]
|
||||
)
|
||||
task.id = row["id"]
|
||||
task.status = TaskStatus(row["status"])
|
||||
task.assigned_agent_id = row["assigned_agent_id"]
|
||||
task.created_at = datetime.fromisoformat(row["created_at"])
|
||||
tasks.append(task)
|
||||
|
||||
return tasks
|
||||
|
||||
def get_task_statistics(self) -> Dict[str, Any]:
|
||||
"""Get task statistics"""
|
||||
with self.get_db_connection() as conn:
|
||||
# Task counts by status
|
||||
status_counts = conn.execute('''
|
||||
SELECT status, COUNT(*) as count
|
||||
FROM tasks
|
||||
GROUP BY status
|
||||
''').fetchall()
|
||||
|
||||
# Agent workload
|
||||
agent_stats = conn.execute('''
|
||||
SELECT agent_id, current_tasks, completed_tasks, failed_tasks
|
||||
FROM agent_workload
|
||||
ORDER BY completed_tasks DESC
|
||||
''').fetchall()
|
||||
|
||||
return {
|
||||
"task_counts": {row["status"]: row["count"] for row in status_counts},
|
||||
"agent_statistics": [
|
||||
{
|
||||
"agent_id": row["agent_id"],
|
||||
"current_tasks": row["current_tasks"],
|
||||
"completed_tasks": row["completed_tasks"],
|
||||
"failed_tasks": row["failed_tasks"]
|
||||
}
|
||||
for row in agent_stats
|
||||
]
|
||||
}
|
||||
EOF
|
||||
|
||||
print_status "Task management system created"
|
||||
}
|
||||
|
||||
# Continue with remaining implementation steps...
|
||||
echo "Implementation continues with integration layer and services..."
|
||||
1436
scripts/deployment/implement-ai-trading-analytics.sh
Executable file
1436
scripts/deployment/implement-ai-trading-analytics.sh
Executable file
File diff suppressed because it is too large
Load Diff
588
scripts/deployment/production-deploy.sh
Executable file
588
scripts/deployment/production-deploy.sh
Executable file
@@ -0,0 +1,588 @@
|
||||
#!/bin/bash
|
||||
|
||||
# AITBC Production Deployment Script
|
||||
# This script handles production deployment with zero-downtime
|
||||
|
||||
set -e
|
||||
|
||||
# Production Configuration
|
||||
ENVIRONMENT="production"
|
||||
VERSION=${1:-latest}
|
||||
REGION=${2:-us-east-1}
|
||||
NAMESPACE="aitbc-prod"
|
||||
DOMAIN="aitbc.dev"
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
NC='\033[0m'
|
||||
|
||||
# Logging
|
||||
log() {
|
||||
echo -e "${BLUE}[$(date +'%Y-%m-%d %H:%M:%S')]${NC} $1"
|
||||
}
|
||||
|
||||
error() {
|
||||
echo -e "${RED}[ERROR]${NC} $1"
|
||||
exit 1
|
||||
}
|
||||
|
||||
success() {
|
||||
echo -e "${GREEN}[SUCCESS]${NC} $1"
|
||||
}
|
||||
|
||||
warning() {
|
||||
echo -e "${YELLOW}[WARNING]${NC} $1"
|
||||
}
|
||||
|
||||
# Pre-deployment checks
|
||||
pre_deployment_checks() {
|
||||
log "Running pre-deployment checks..."
|
||||
|
||||
# Check if we're on production branch
|
||||
current_branch=$(git branch --show-current)
|
||||
if [ "$current_branch" != "production" ]; then
|
||||
error "Must be on production branch to deploy to production"
|
||||
fi
|
||||
|
||||
# Check if all tests pass
|
||||
log "Running tests..."
|
||||
pytest tests/unit/ -v --tb=short || error "Unit tests failed"
|
||||
pytest tests/integration/ -v --tb=short || error "Integration tests failed"
|
||||
pytest tests/security/ -v --tb=short || error "Security tests failed"
|
||||
pytest tests/performance/test_performance_lightweight.py::TestPerformance::test_cli_performance -v --tb=short || error "Performance tests failed"
|
||||
|
||||
# Check if production infrastructure is ready
|
||||
log "Checking production infrastructure..."
|
||||
kubectl get nodes | grep -q "Ready" || error "Production nodes not ready"
|
||||
kubectl get namespace $NAMESPACE || kubectl create namespace $NAMESPACE
|
||||
|
||||
success "Pre-deployment checks passed"
|
||||
}
|
||||
|
||||
# Backup current deployment
|
||||
backup_current_deployment() {
|
||||
log "Backing up current deployment..."
|
||||
|
||||
# Create backup directory
|
||||
backup_dir="/opt/aitbc/backups/pre-deployment-$(date +%Y%m%d_%H%M%S)"
|
||||
mkdir -p $backup_dir
|
||||
|
||||
# Backup current configuration
|
||||
kubectl get all -n $NAMESPACE -o yaml > $backup_dir/current-deployment.yaml
|
||||
|
||||
# Backup database
|
||||
pg_dump $DATABASE_URL | gzip > $backup_dir/database_backup.sql.gz
|
||||
|
||||
# Backup application data
|
||||
kubectl exec -n $NAMESPACE deployment/coordinator-api -- tar -czf /tmp/app_data_backup.tar.gz /app/data
|
||||
kubectl cp $NAMESPACE/deployment/coordinator-api:/tmp/app_data_backup.tar.gz $backup_dir/app_data_backup.tar.gz
|
||||
|
||||
success "Backup completed: $backup_dir"
|
||||
}
|
||||
|
||||
# Build production images
|
||||
build_production_images() {
|
||||
log "Building production images..."
|
||||
|
||||
# Build CLI image
|
||||
docker build -t aitbc/cli:$VERSION -f Dockerfile --target production . || error "Failed to build CLI image"
|
||||
|
||||
# Build service images
|
||||
for service_dir in apps/*/; do
|
||||
if [ -f "$service_dir/Dockerfile" ]; then
|
||||
service_name=$(basename "$service_dir")
|
||||
log "Building $service_name image..."
|
||||
docker build -t aitbc/$service_name:$VERSION -f "$service_dir/Dockerfile" "$service_dir" || error "Failed to build $service_name image"
|
||||
fi
|
||||
done
|
||||
|
||||
# Push images to registry
|
||||
log "Pushing images to registry..."
|
||||
docker push aitbc/cli:$VERSION
|
||||
|
||||
for service_dir in apps/*/; do
|
||||
if [ -f "$service_dir/Dockerfile" ]; then
|
||||
service_name=$(basename "$service_dir")
|
||||
docker push aitbc/$service_name:$VERSION
|
||||
fi
|
||||
done
|
||||
|
||||
success "Production images built and pushed"
|
||||
}
|
||||
|
||||
# Deploy database
|
||||
deploy_database() {
|
||||
log "Deploying database..."
|
||||
|
||||
# Deploy PostgreSQL
|
||||
helm upgrade --install postgres bitnami/postgresql \
|
||||
--namespace $NAMESPACE \
|
||||
--set auth.postgresPassword=$POSTGRES_PASSWORD \
|
||||
--set auth.database=aitbc_prod \
|
||||
--set primary.persistence.size=100Gi \
|
||||
--set primary.resources.requests.memory=8Gi \
|
||||
--set primary.resources.requests.cpu=2000m \
|
||||
--set primary.resources.limits.memory=16Gi \
|
||||
--set primary.resources.limits.cpu=4000m \
|
||||
--set readReplicas.replicaCount=1 \
|
||||
--set readReplicas.persistence.size=50Gi \
|
||||
--wait \
|
||||
--timeout 10m || error "Failed to deploy PostgreSQL"
|
||||
|
||||
# Deploy Redis
|
||||
helm upgrade --install redis bitnami/redis \
|
||||
--namespace $NAMESPACE \
|
||||
--set auth.password=$REDIS_PASSWORD \
|
||||
--set master.persistence.size=20Gi \
|
||||
--set master.resources.requests.memory=2Gi \
|
||||
--set master.resources.requests.cpu=1000m \
|
||||
--set master.resources.limits.memory=4Gi \
|
||||
--set master.resources.limits.cpu=2000m \
|
||||
--set replica.replicaCount=2 \
|
||||
--wait \
|
||||
--timeout 5m || error "Failed to deploy Redis"
|
||||
|
||||
success "Database deployed successfully"
|
||||
}
|
||||
|
||||
# Deploy core services
|
||||
deploy_core_services() {
|
||||
log "Deploying core services..."
|
||||
|
||||
# Deploy blockchain services
|
||||
for service in blockchain-node consensus-node network-node; do
|
||||
log "Deploying $service..."
|
||||
|
||||
# Create deployment manifest
|
||||
cat > /tmp/$service-deployment.yaml << EOF
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: $service
|
||||
namespace: $NAMESPACE
|
||||
spec:
|
||||
replicas: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
app: $service
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: $service
|
||||
spec:
|
||||
containers:
|
||||
- name: $service
|
||||
image: aitbc/$service:$VERSION
|
||||
ports:
|
||||
- containerPort: 8007
|
||||
name: http
|
||||
env:
|
||||
- name: NODE_ENV
|
||||
value: "production"
|
||||
- name: DATABASE_URL
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: aitbc-secrets
|
||||
key: database-url
|
||||
- name: REDIS_URL
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: aitbc-secrets
|
||||
key: redis-url
|
||||
resources:
|
||||
requests:
|
||||
memory: "2Gi"
|
||||
cpu: "1000m"
|
||||
limits:
|
||||
memory: "4Gi"
|
||||
cpu: "2000m"
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /health
|
||||
port: 8007
|
||||
initialDelaySeconds: 30
|
||||
periodSeconds: 10
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /health
|
||||
port: 8007
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 5
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: $service
|
||||
namespace: $NAMESPACE
|
||||
spec:
|
||||
selector:
|
||||
app: $service
|
||||
ports:
|
||||
- port: 8007
|
||||
targetPort: 8007
|
||||
type: ClusterIP
|
||||
EOF
|
||||
|
||||
# Apply deployment
|
||||
kubectl apply -f /tmp/$service-deployment.yaml -n $NAMESPACE || error "Failed to deploy $service"
|
||||
|
||||
# Wait for deployment
|
||||
kubectl rollout status deployment/$service -n $NAMESPACE --timeout=300s || error "Failed to rollout $service"
|
||||
|
||||
rm /tmp/$service-deployment.yaml
|
||||
done
|
||||
|
||||
success "Core services deployed successfully"
|
||||
}
|
||||
|
||||
# Deploy application services
|
||||
deploy_application_services() {
|
||||
log "Deploying application services..."
|
||||
|
||||
services=("coordinator-api" "exchange-integration" "compliance-service" "trading-engine" "plugin-registry" "plugin-marketplace" "plugin-security" "plugin-analytics" "global-infrastructure" "global-ai-agents" "multi-region-load-balancer")
|
||||
|
||||
for service in "${services[@]}"; do
|
||||
log "Deploying $service..."
|
||||
|
||||
# Determine port
|
||||
case $service in
|
||||
"coordinator-api") port=8001 ;;
|
||||
"exchange-integration") port=8010 ;;
|
||||
"compliance-service") port=8011 ;;
|
||||
"trading-engine") port=8012 ;;
|
||||
"plugin-registry") port=8013 ;;
|
||||
"plugin-marketplace") port=8014 ;;
|
||||
"plugin-security") port=8015 ;;
|
||||
"plugin-analytics") port=8016 ;;
|
||||
"global-infrastructure") port=8017 ;;
|
||||
"global-ai-agents") port=8018 ;;
|
||||
"multi-region-load-balancer") port=8019 ;;
|
||||
esac
|
||||
|
||||
# Create deployment manifest
|
||||
cat > /tmp/$service-deployment.yaml << EOF
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: $service
|
||||
namespace: $NAMESPACE
|
||||
spec:
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: $service
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: $service
|
||||
spec:
|
||||
containers:
|
||||
- name: $service
|
||||
image: aitbc/$service:$VERSION
|
||||
ports:
|
||||
- containerPort: $port
|
||||
name: http
|
||||
env:
|
||||
- name: NODE_ENV
|
||||
value: "production"
|
||||
- name: DATABASE_URL
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: aitbc-secrets
|
||||
key: database-url
|
||||
- name: REDIS_URL
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: aitbc-secrets
|
||||
key: redis-url
|
||||
- name: JWT_SECRET
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: aitbc-secrets
|
||||
key: jwt-secret
|
||||
- name: ENCRYPTION_KEY
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: aitbc-secrets
|
||||
key: encryption-key
|
||||
resources:
|
||||
requests:
|
||||
memory: "1Gi"
|
||||
cpu: "500m"
|
||||
limits:
|
||||
memory: "2Gi"
|
||||
cpu: "1000m"
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /health
|
||||
port: $port
|
||||
initialDelaySeconds: 30
|
||||
periodSeconds: 10
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /health
|
||||
port: $port
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 5
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: $service
|
||||
namespace: $NAMESPACE
|
||||
spec:
|
||||
selector:
|
||||
app: $service
|
||||
ports:
|
||||
- port: $port
|
||||
targetPort: $port
|
||||
type: ClusterIP
|
||||
EOF
|
||||
|
||||
# Apply deployment
|
||||
kubectl apply -f /tmp/$service-deployment.yaml -n $NAMESPACE || error "Failed to deploy $service"
|
||||
|
||||
# Wait for deployment
|
||||
kubectl rollout status deployment/$service -n $NAMESPACE --timeout=300s || error "Failed to rollout $service"
|
||||
|
||||
rm /tmp/$service-deployment.yaml
|
||||
done
|
||||
|
||||
success "Application services deployed successfully"
|
||||
}
|
||||
|
||||
# Deploy ingress and load balancer
|
||||
deploy_ingress() {
|
||||
log "Deploying ingress and load balancer..."
|
||||
|
||||
# Create ingress manifest
|
||||
cat > /tmp/ingress.yaml << EOF
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: aitbc-ingress
|
||||
namespace: $NAMESPACE
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: "nginx"
|
||||
cert-manager.io/cluster-issuer: "letsencrypt-prod"
|
||||
nginx.ingress.kubernetes.io/rate-limit: "100"
|
||||
nginx.ingress.kubernetes.io/rate-limit-window: "1m"
|
||||
spec:
|
||||
tls:
|
||||
- hosts:
|
||||
- api.$DOMAIN
|
||||
- marketplace.$DOMAIN
|
||||
- explorer.$DOMAIN
|
||||
secretName: aitbc-tls
|
||||
rules:
|
||||
- host: api.$DOMAIN
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: coordinator-api
|
||||
port:
|
||||
number: 8001
|
||||
- host: marketplace.$DOMAIN
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: plugin-marketplace
|
||||
port:
|
||||
number: 8014
|
||||
- host: explorer.$DOMAIN
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: explorer
|
||||
port:
|
||||
number: 8020
|
||||
EOF
|
||||
|
||||
# Apply ingress
|
||||
kubectl apply -f /tmp/ingress.yaml -n $NAMESPACE || error "Failed to deploy ingress"
|
||||
|
||||
rm /tmp/ingress.yaml
|
||||
|
||||
success "Ingress deployed successfully"
|
||||
}
|
||||
|
||||
# Deploy monitoring
|
||||
deploy_monitoring() {
|
||||
log "Deploying monitoring stack..."
|
||||
|
||||
# Deploy Prometheus
|
||||
helm upgrade --install prometheus prometheus-community/kube-prometheus-stack \
|
||||
--namespace $NAMESPACE \
|
||||
--create-namespace \
|
||||
--set prometheus.prometheus.spec.retention=30d \
|
||||
--set prometheus.prometheus.spec.storageSpec.volumeClaimTemplate.spec.resources.requests.storage=50Gi \
|
||||
--set grafana.adminPassword=$GRAFANA_PASSWORD \
|
||||
--set grafana.persistence.size=10Gi \
|
||||
--set defaultRules.create=true \
|
||||
--wait \
|
||||
--timeout 10m || error "Failed to deploy monitoring"
|
||||
|
||||
# Import Grafana dashboards
|
||||
log "Importing Grafana dashboards..."
|
||||
|
||||
# Create dashboard configmaps
|
||||
kubectl create configmap grafana-dashboards \
|
||||
--from-file=monitoring/grafana/dashboards/ \
|
||||
-n $NAMESPACE \
|
||||
--dry-run=client -o yaml | kubectl apply -f -
|
||||
|
||||
success "Monitoring deployed successfully"
|
||||
}
|
||||
|
||||
# Run post-deployment tests
|
||||
post_deployment_tests() {
|
||||
log "Running post-deployment tests..."
|
||||
|
||||
# Wait for all services to be ready
|
||||
kubectl wait --for=condition=ready pod -l app!=pod -n $NAMESPACE --timeout=600s
|
||||
|
||||
# Test API endpoints
|
||||
endpoints=(
|
||||
"coordinator-api:8001"
|
||||
"exchange-integration:8010"
|
||||
"trading-engine:8012"
|
||||
"plugin-registry:8013"
|
||||
"plugin-marketplace:8014"
|
||||
)
|
||||
|
||||
for service_port in "${endpoints[@]}"; do
|
||||
service=$(echo $service_port | cut -d: -f1)
|
||||
port=$(echo $service_port | cut -d: -f2)
|
||||
|
||||
log "Testing $service..."
|
||||
|
||||
# Port-forward and test
|
||||
kubectl port-forward -n $NAMESPACE deployment/$service $port:8007 &
|
||||
port_forward_pid=$!
|
||||
|
||||
sleep 5
|
||||
|
||||
if curl -f -s http://localhost:$port/health > /dev/null; then
|
||||
success "$service is healthy"
|
||||
else
|
||||
error "$service health check failed"
|
||||
fi
|
||||
|
||||
# Kill port-forward
|
||||
kill $port_forward_pid 2>/dev/null || true
|
||||
done
|
||||
|
||||
# Test external endpoints
|
||||
external_endpoints=(
|
||||
"https://api.$DOMAIN/health"
|
||||
"https://marketplace.$DOMAIN/api/v1/marketplace/featured"
|
||||
)
|
||||
|
||||
for endpoint in "${external_endpoints[@]}"; do
|
||||
log "Testing $endpoint..."
|
||||
|
||||
if curl -f -s $endpoint > /dev/null; then
|
||||
success "$endpoint is responding"
|
||||
else
|
||||
error "$endpoint is not responding"
|
||||
fi
|
||||
done
|
||||
|
||||
success "Post-deployment tests passed"
|
||||
}
|
||||
|
||||
# Create secrets
|
||||
create_secrets() {
|
||||
log "Creating secrets..."
|
||||
|
||||
# Create secret from environment variables
|
||||
kubectl create secret generic aitbc-secrets \
|
||||
--from-literal=database-url="$DATABASE_URL" \
|
||||
--from-literal=redis-url="$REDIS_URL" \
|
||||
--from-literal=jwt-secret="$JWT_SECRET" \
|
||||
--from-literal=encryption-key="$ENCRYPTION_KEY" \
|
||||
--from-literal=postgres-password="$POSTGRES_PASSWORD" \
|
||||
--from-literal=redis-password="$REDIS_PASSWORD" \
|
||||
--namespace $NAMESPACE \
|
||||
--dry-run=client -o yaml | kubectl apply -f -
|
||||
|
||||
success "Secrets created"
|
||||
}
|
||||
|
||||
# Main deployment function
|
||||
main() {
|
||||
log "Starting AITBC production deployment..."
|
||||
log "Environment: $ENVIRONMENT"
|
||||
log "Version: $VERSION"
|
||||
log "Region: $REGION"
|
||||
log "Domain: $DOMAIN"
|
||||
|
||||
# Check prerequisites
|
||||
command -v kubectl >/dev/null 2>&1 || error "kubectl is not installed"
|
||||
command -v helm >/dev/null 2>&1 || error "Helm is not installed"
|
||||
kubectl cluster-info >/dev/null 2>&1 || error "Cannot connect to Kubernetes cluster"
|
||||
|
||||
# Run deployment steps
|
||||
pre_deployment_checks
|
||||
create_secrets
|
||||
backup_current_deployment
|
||||
build_production_images
|
||||
deploy_database
|
||||
deploy_core_services
|
||||
deploy_application_services
|
||||
deploy_ingress
|
||||
deploy_monitoring
|
||||
post_deployment_tests
|
||||
|
||||
success "Production deployment completed successfully!"
|
||||
|
||||
# Display deployment information
|
||||
log "Deployment Information:"
|
||||
log "Environment: $ENVIRONMENT"
|
||||
log "Version: $VERSION"
|
||||
log "Namespace: $NAMESPACE"
|
||||
log "Domain: $DOMAIN"
|
||||
log ""
|
||||
log "Services are available at:"
|
||||
log " API: https://api.$DOMAIN"
|
||||
log " Marketplace: https://marketplace.$DOMAIN"
|
||||
log " Explorer: https://explorer.$DOMAIN"
|
||||
log " Grafana: https://grafana.$DOMAIN"
|
||||
log ""
|
||||
log "To check deployment status:"
|
||||
log " kubectl get pods -n $NAMESPACE"
|
||||
log " kubectl get services -n $NAMESPACE"
|
||||
log ""
|
||||
log "To view logs:"
|
||||
log " kubectl logs -f deployment/coordinator-api -n $NAMESPACE"
|
||||
}
|
||||
|
||||
# Handle script interruption
|
||||
trap 'error "Script interrupted"' INT TERM
|
||||
|
||||
# Export environment variables
|
||||
export DATABASE_URL=${DATABASE_URL}
|
||||
export REDIS_URL=${REDIS_URL}
|
||||
export JWT_SECRET=${JWT_SECRET}
|
||||
export ENCRYPTION_KEY=${ENCRYPTION_KEY}
|
||||
export POSTGRES_PASSWORD=${POSTGRES_PASSWORD}
|
||||
export REDIS_PASSWORD=${REDIS_PASSWORD}
|
||||
export GRAFANA_PASSWORD=${GRAFANA_PASSWORD}
|
||||
export VERSION=${VERSION}
|
||||
export NAMESPACE=${NAMESPACE}
|
||||
export DOMAIN=${DOMAIN}
|
||||
|
||||
# Run main function
|
||||
main "$@"
|
||||
Reference in New Issue
Block a user