Merge gitea/main, preserving release v0.2.2 stability and CLI documentation

This commit is contained in:
AITBC System
2026-03-25 12:58:02 +01:00
230 changed files with 2370 additions and 369 deletions

392
scripts/deployment/deploy.sh Executable file
View File

@@ -0,0 +1,392 @@
#!/bin/bash
# AITBC Automated Deployment Script
# This script handles automated deployment of AITBC services
set -e
# Configuration
ENVIRONMENT=${1:-staging}
VERSION=${2:-latest}
REGION=${3:-us-east-1}
NAMESPACE="aitbc-${ENVIRONMENT}"
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Logging function
log() {
echo -e "${BLUE}[$(date +'%Y-%m-%d %H:%M:%S')]${NC} $1"
}
error() {
echo -e "${RED}[ERROR]${NC} $1"
exit 1
}
success() {
echo -e "${GREEN}[SUCCESS]${NC} $1"
}
warning() {
echo -e "${YELLOW}[WARNING]${NC} $1"
}
# Check prerequisites
check_prerequisites() {
log "Checking prerequisites..."
# Check if required tools are installed
command -v docker >/dev/null 2>&1 || error "Docker is not installed"
command -v docker-compose >/dev/null 2>&1 || error "Docker Compose is not installed"
command -v kubectl >/dev/null 2>&1 || error "kubectl is not installed"
command -v helm >/dev/null 2>&1 || error "Helm is not installed"
# Check if Docker daemon is running
docker info >/dev/null 2>&1 || error "Docker daemon is not running"
# Check if kubectl can connect to cluster
kubectl cluster-info >/dev/null 2>&1 || error "Cannot connect to Kubernetes cluster"
success "Prerequisites check passed"
}
# Build Docker images
build_images() {
log "Building Docker images..."
# Build CLI image
log "Building CLI image..."
docker build -t aitbc/cli:${VERSION} -f Dockerfile . || error "Failed to build CLI image"
# Build service images
for service_dir in apps/*/; do
if [ -f "$service_dir/Dockerfile" ]; then
service_name=$(basename "$service_dir")
log "Building ${service_name} image..."
docker build -t aitbc/${service_name}:${VERSION} -f "$service_dir/Dockerfile" "$service_dir" || error "Failed to build ${service_name} image"
fi
done
success "All Docker images built successfully"
}
# Run tests
run_tests() {
log "Running tests..."
# Run unit tests
log "Running unit tests..."
pytest tests/unit/ -v --cov=aitbc_cli --cov-report=term || error "Unit tests failed"
# Run integration tests
log "Running integration tests..."
pytest tests/integration/ -v || error "Integration tests failed"
# Run security tests
log "Running security tests..."
pytest tests/security/ -v || error "Security tests failed"
# Run performance tests
log "Running performance tests..."
pytest tests/performance/test_performance_lightweight.py::TestPerformance::test_cli_performance -v || error "Performance tests failed"
success "All tests passed"
}
# Deploy to Kubernetes
deploy_kubernetes() {
log "Deploying to Kubernetes namespace: ${NAMESPACE}"
# Create namespace if it doesn't exist
kubectl create namespace ${NAMESPACE} --dry-run=client -o yaml | kubectl apply -f -
# Apply secrets
log "Applying secrets..."
kubectl apply -f k8s/secrets/ -n ${NAMESPACE} || error "Failed to apply secrets"
# Apply configmaps
log "Applying configmaps..."
kubectl apply -f k8s/configmaps/ -n ${NAMESPACE} || error "Failed to apply configmaps"
# Deploy database
log "Deploying database..."
helm repo add bitnami https://charts.bitnami.com/bitnami
helm upgrade --install postgres bitnami/postgresql \
--namespace ${NAMESPACE} \
--set auth.postgresPassword=${POSTGRES_PASSWORD} \
--set auth.database=aitbc \
--set primary.persistence.size=20Gi \
--set primary.resources.requests.memory=2Gi \
--set primary.resources.requests.cpu=1000m \
--wait || error "Failed to deploy database"
# Deploy Redis
log "Deploying Redis..."
helm upgrade --install redis bitnami/redis \
--namespace ${NAMESPACE} \
--set auth.password=${REDIS_PASSWORD} \
--set master.persistence.size=8Gi \
--set master.resources.requests.memory=512Mi \
--set master.resources.requests.cpu=500m \
--wait || error "Failed to deploy Redis"
# Deploy core services
log "Deploying core services..."
# Deploy blockchain services
for service in blockchain-node consensus-node network-node; do
log "Deploying ${service}..."
envsubst < k8s/deployments/${service}.yaml | kubectl apply -f - -n ${NAMESPACE} || error "Failed to deploy ${service}"
kubectl rollout status deployment/${service} -n ${NAMESPACE} --timeout=300s || error "Failed to rollout ${service}"
done
# Deploy coordinator
log "Deploying coordinator-api..."
envsubst < k8s/deployments/coordinator-api.yaml | kubectl apply -f - -n ${NAMESPACE} || error "Failed to deploy coordinator-api"
kubectl rollout status deployment/coordinator-api -n ${NAMESPACE} --timeout=300s || error "Failed to rollout coordinator-api"
# Deploy production services
for service in exchange-integration compliance-service trading-engine; do
log "Deploying ${service}..."
envsubst < k8s/deployments/${service}.yaml | kubectl apply -f - -n ${NAMESPACE} || error "Failed to deploy ${service}"
kubectl rollout status deployment/${service} -n ${NAMESPACE} --timeout=300s || error "Failed to rollout ${service}"
done
# Deploy plugin ecosystem
for service in plugin-registry plugin-marketplace plugin-security plugin-analytics; do
log "Deploying ${service}..."
envsubst < k8s/deployments/${service}.yaml | kubectl apply -f - -n ${NAMESPACE} || error "Failed to deploy ${service}"
kubectl rollout status deployment/${service} -n ${NAMESPACE} --timeout=300s || error "Failed to rollout ${service}"
done
# Deploy global infrastructure
for service in global-infrastructure global-ai-agents multi-region-load-balancer; do
log "Deploying ${service}..."
envsubst < k8s/deployments/${service}.yaml | kubectl apply -f - -n ${NAMESPACE} || error "Failed to deploy ${service}"
kubectl rollout status deployment/${service} -n ${NAMESPACE} --timeout=300s || error "Failed to rollout ${service}"
done
# Deploy explorer
log "Deploying explorer..."
envsubst < k8s/deployments/explorer.yaml | kubectl apply -f - -n ${NAMESPACE} || error "Failed to deploy explorer"
kubectl rollout status deployment/explorer -n ${NAMESPACE} --timeout=300s || error "Failed to rollout explorer"
success "Kubernetes deployment completed"
}
# Deploy with Docker Compose
deploy_docker_compose() {
log "Deploying with Docker Compose..."
# Set environment variables
export POSTGRES_PASSWORD=${POSTGRES_PASSWORD:-aitbc123}
export REDIS_PASSWORD=${REDIS_PASSWORD:-aitbc123}
export GRAFANA_PASSWORD=${GRAFANA_PASSWORD:-admin}
# Stop existing services
log "Stopping existing services..."
docker-compose down || true
# Start services
log "Starting services..."
docker-compose up -d || error "Failed to start services"
# Wait for services to be healthy
log "Waiting for services to be healthy..."
sleep 30
# Check service health
for service in postgres redis blockchain-node coordinator-api exchange-integration; do
log "Checking ${service} health..."
if ! docker-compose ps ${service} | grep -q "Up"; then
error "Service ${service} is not running"
fi
done
success "Docker Compose deployment completed"
}
# Run health checks
run_health_checks() {
log "Running health checks..."
if command -v kubectl >/dev/null 2>&1 && kubectl cluster-info >/dev/null 2>&1; then
# Kubernetes health checks
log "Checking Kubernetes deployment health..."
# Check pod status
kubectl get pods -n ${NAMESPACE} || error "Failed to get pod status"
# Check service health
services=("coordinator-api" "exchange-integration" "trading-engine" "plugin-registry")
for service in "${services[@]}"; do
log "Checking ${service} health..."
kubectl get pods -n ${NAMESPACE} -l app=${service} -o jsonpath='{.items[0].status.phase}' | grep -q "Running" || error "${service} pods are not running"
# Check service endpoint
service_url=$(kubectl get svc ${service} -n ${NAMESPACE} -o jsonpath='{.status.loadBalancer.ingress[0].ip}' 2>/dev/null || echo "")
if [ -n "$service_url" ]; then
curl -f http://${service_url}/health >/dev/null 2>&1 || error "${service} health check failed"
fi
done
else
# Docker Compose health checks
log "Checking Docker Compose deployment health..."
services=("coordinator-api" "exchange-integration" "trading-engine" "plugin-registry")
for service in "${services[@]}"; do
log "Checking ${service} health..."
if ! docker-compose ps ${service} | grep -q "Up"; then
error "Service ${service} is not running"
fi
# Check health endpoint
port=$(docker-compose port ${service} | cut -d: -f2)
curl -f http://localhost:${port}/health >/dev/null 2>&1 || error "${service} health check failed"
done
fi
success "All health checks passed"
}
# Run smoke tests
run_smoke_tests() {
log "Running smoke tests..."
# Test CLI functionality
log "Testing CLI functionality..."
docker-compose exec aitbc-cli python -m aitbc_cli.main --help >/dev/null || error "CLI smoke test failed"
# Test API endpoints
log "Testing API endpoints..."
# Test coordinator API
coordinator_port=$(docker-compose port coordinator-api | cut -d: -f2)
curl -f http://localhost:${coordinator_port}/health >/dev/null || error "Coordinator API smoke test failed"
# Test exchange API
exchange_port=$(docker-compose port exchange-integration | cut -d: -f2)
curl -f http://localhost:${exchange_port}/health >/dev/null || error "Exchange API smoke test failed"
# Test plugin registry
plugin_port=$(docker-compose port plugin-registry | cut -d: -f2)
curl -f http://localhost:${plugin_port}/health >/dev/null || error "Plugin registry smoke test failed"
success "Smoke tests passed"
}
# Rollback deployment
rollback() {
log "Rolling back deployment..."
if command -v kubectl >/dev/null 2>&1 && kubectl cluster-info >/dev/null 2>&1; then
# Kubernetes rollback
log "Rolling back Kubernetes deployment..."
services=("coordinator-api" "exchange-integration" "trading-engine" "plugin-registry")
for service in "${services[@]}"; do
log "Rolling back ${service}..."
kubectl rollout undo deployment/${service} -n ${NAMESPACE} || error "Failed to rollback ${service}"
kubectl rollout status deployment/${service} -n ${NAMESPACE} --timeout=300s || error "Failed to rollback ${service}"
done
else
# Docker Compose rollback
log "Rolling back Docker Compose deployment..."
docker-compose down || error "Failed to stop services"
# Restart with previous version (assuming it's tagged as 'previous')
export VERSION=previous
deploy_docker_compose
fi
success "Rollback completed"
}
# Cleanup
cleanup() {
log "Cleaning up..."
# Remove unused Docker images
docker image prune -f || true
# Remove unused Docker volumes
docker volume prune -f || true
success "Cleanup completed"
}
# Main deployment function
main() {
log "Starting AITBC deployment..."
log "Environment: ${ENVIRONMENT}"
log "Version: ${VERSION}"
log "Region: ${REGION}"
case "${ENVIRONMENT}" in
"local"|"docker")
check_prerequisites
build_images
run_tests
deploy_docker_compose
run_health_checks
run_smoke_tests
;;
"staging"|"production")
check_prerequisites
build_images
run_tests
deploy_kubernetes
run_health_checks
run_smoke_tests
;;
"rollback")
rollback
;;
"cleanup")
cleanup
;;
*)
error "Unknown environment: ${ENVIRONMENT}. Use 'local', 'docker', 'staging', 'production', 'rollback', or 'cleanup'"
;;
esac
success "Deployment completed successfully!"
# Display deployment information
log "Deployment Information:"
log "Environment: ${ENVIRONMENT}"
log "Version: ${VERSION}"
log "Namespace: ${NAMESPACE}"
if [ "${ENVIRONMENT}" = "docker" ]; then
log "Services are running on:"
log " Coordinator API: http://localhost:8001"
log " Exchange Integration: http://localhost:8010"
log " Trading Engine: http://localhost:8012"
log " Plugin Registry: http://localhost:8013"
log " Plugin Marketplace: http://localhost:8014"
log " Explorer: http://localhost:8020"
log " Grafana: http://localhost:3000 (admin/admin)"
log " Prometheus: http://localhost:9090"
fi
}
# Handle script interruption
trap 'error "Script interrupted"' INT TERM
# Export environment variables for envsubst
export POSTGRES_PASSWORD=${POSTGRES_PASSWORD:-aitbc123}
export REDIS_PASSWORD=${REDIS_PASSWORD:-aitbc123}
export GRAFANA_PASSWORD=${GRAFANA_PASSWORD:-admin}
export VERSION=${VERSION}
export NAMESPACE=${NAMESPACE}
# Run main function
main "$@"

View File

@@ -0,0 +1,588 @@
#!/bin/bash
# AITBC Production Deployment Script
# This script handles production deployment with zero-downtime
set -e
# Production Configuration
ENVIRONMENT="production"
VERSION=${1:-latest}
REGION=${2:-us-east-1}
NAMESPACE="aitbc-prod"
DOMAIN="aitbc.dev"
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
# Logging
log() {
echo -e "${BLUE}[$(date +'%Y-%m-%d %H:%M:%S')]${NC} $1"
}
error() {
echo -e "${RED}[ERROR]${NC} $1"
exit 1
}
success() {
echo -e "${GREEN}[SUCCESS]${NC} $1"
}
warning() {
echo -e "${YELLOW}[WARNING]${NC} $1"
}
# Pre-deployment checks
pre_deployment_checks() {
log "Running pre-deployment checks..."
# Check if we're on production branch
current_branch=$(git branch --show-current)
if [ "$current_branch" != "production" ]; then
error "Must be on production branch to deploy to production"
fi
# Check if all tests pass
log "Running tests..."
pytest tests/unit/ -v --tb=short || error "Unit tests failed"
pytest tests/integration/ -v --tb=short || error "Integration tests failed"
pytest tests/security/ -v --tb=short || error "Security tests failed"
pytest tests/performance/test_performance_lightweight.py::TestPerformance::test_cli_performance -v --tb=short || error "Performance tests failed"
# Check if production infrastructure is ready
log "Checking production infrastructure..."
kubectl get nodes | grep -q "Ready" || error "Production nodes not ready"
kubectl get namespace $NAMESPACE || kubectl create namespace $NAMESPACE
success "Pre-deployment checks passed"
}
# Backup current deployment
backup_current_deployment() {
log "Backing up current deployment..."
# Create backup directory
backup_dir="/opt/aitbc/backups/pre-deployment-$(date +%Y%m%d_%H%M%S)"
mkdir -p $backup_dir
# Backup current configuration
kubectl get all -n $NAMESPACE -o yaml > $backup_dir/current-deployment.yaml
# Backup database
pg_dump $DATABASE_URL | gzip > $backup_dir/database_backup.sql.gz
# Backup application data
kubectl exec -n $NAMESPACE deployment/coordinator-api -- tar -czf /tmp/app_data_backup.tar.gz /app/data
kubectl cp $NAMESPACE/deployment/coordinator-api:/tmp/app_data_backup.tar.gz $backup_dir/app_data_backup.tar.gz
success "Backup completed: $backup_dir"
}
# Build production images
build_production_images() {
log "Building production images..."
# Build CLI image
docker build -t aitbc/cli:$VERSION -f Dockerfile --target production . || error "Failed to build CLI image"
# Build service images
for service_dir in apps/*/; do
if [ -f "$service_dir/Dockerfile" ]; then
service_name=$(basename "$service_dir")
log "Building $service_name image..."
docker build -t aitbc/$service_name:$VERSION -f "$service_dir/Dockerfile" "$service_dir" || error "Failed to build $service_name image"
fi
done
# Push images to registry
log "Pushing images to registry..."
docker push aitbc/cli:$VERSION
for service_dir in apps/*/; do
if [ -f "$service_dir/Dockerfile" ]; then
service_name=$(basename "$service_dir")
docker push aitbc/$service_name:$VERSION
fi
done
success "Production images built and pushed"
}
# Deploy database
deploy_database() {
log "Deploying database..."
# Deploy PostgreSQL
helm upgrade --install postgres bitnami/postgresql \
--namespace $NAMESPACE \
--set auth.postgresPassword=$POSTGRES_PASSWORD \
--set auth.database=aitbc_prod \
--set primary.persistence.size=100Gi \
--set primary.resources.requests.memory=8Gi \
--set primary.resources.requests.cpu=2000m \
--set primary.resources.limits.memory=16Gi \
--set primary.resources.limits.cpu=4000m \
--set readReplicas.replicaCount=1 \
--set readReplicas.persistence.size=50Gi \
--wait \
--timeout 10m || error "Failed to deploy PostgreSQL"
# Deploy Redis
helm upgrade --install redis bitnami/redis \
--namespace $NAMESPACE \
--set auth.password=$REDIS_PASSWORD \
--set master.persistence.size=20Gi \
--set master.resources.requests.memory=2Gi \
--set master.resources.requests.cpu=1000m \
--set master.resources.limits.memory=4Gi \
--set master.resources.limits.cpu=2000m \
--set replica.replicaCount=2 \
--wait \
--timeout 5m || error "Failed to deploy Redis"
success "Database deployed successfully"
}
# Deploy core services
deploy_core_services() {
log "Deploying core services..."
# Deploy blockchain services
for service in blockchain-node consensus-node network-node; do
log "Deploying $service..."
# Create deployment manifest
cat > /tmp/$service-deployment.yaml << EOF
apiVersion: apps/v1
kind: Deployment
metadata:
name: $service
namespace: $NAMESPACE
spec:
replicas: 2
selector:
matchLabels:
app: $service
template:
metadata:
labels:
app: $service
spec:
containers:
- name: $service
image: aitbc/$service:$VERSION
ports:
- containerPort: 8007
name: http
env:
- name: NODE_ENV
value: "production"
- name: DATABASE_URL
valueFrom:
secretKeyRef:
name: aitbc-secrets
key: database-url
- name: REDIS_URL
valueFrom:
secretKeyRef:
name: aitbc-secrets
key: redis-url
resources:
requests:
memory: "2Gi"
cpu: "1000m"
limits:
memory: "4Gi"
cpu: "2000m"
livenessProbe:
httpGet:
path: /health
port: 8007
initialDelaySeconds: 30
periodSeconds: 10
readinessProbe:
httpGet:
path: /health
port: 8007
initialDelaySeconds: 5
periodSeconds: 5
---
apiVersion: v1
kind: Service
metadata:
name: $service
namespace: $NAMESPACE
spec:
selector:
app: $service
ports:
- port: 8007
targetPort: 8007
type: ClusterIP
EOF
# Apply deployment
kubectl apply -f /tmp/$service-deployment.yaml -n $NAMESPACE || error "Failed to deploy $service"
# Wait for deployment
kubectl rollout status deployment/$service -n $NAMESPACE --timeout=300s || error "Failed to rollout $service"
rm /tmp/$service-deployment.yaml
done
success "Core services deployed successfully"
}
# Deploy application services
deploy_application_services() {
log "Deploying application services..."
services=("coordinator-api" "exchange-integration" "compliance-service" "trading-engine" "plugin-registry" "plugin-marketplace" "plugin-security" "plugin-analytics" "global-infrastructure" "global-ai-agents" "multi-region-load-balancer")
for service in "${services[@]}"; do
log "Deploying $service..."
# Determine port
case $service in
"coordinator-api") port=8001 ;;
"exchange-integration") port=8010 ;;
"compliance-service") port=8011 ;;
"trading-engine") port=8012 ;;
"plugin-registry") port=8013 ;;
"plugin-marketplace") port=8014 ;;
"plugin-security") port=8015 ;;
"plugin-analytics") port=8016 ;;
"global-infrastructure") port=8017 ;;
"global-ai-agents") port=8018 ;;
"multi-region-load-balancer") port=8019 ;;
esac
# Create deployment manifest
cat > /tmp/$service-deployment.yaml << EOF
apiVersion: apps/v1
kind: Deployment
metadata:
name: $service
namespace: $NAMESPACE
spec:
replicas: 3
selector:
matchLabels:
app: $service
template:
metadata:
labels:
app: $service
spec:
containers:
- name: $service
image: aitbc/$service:$VERSION
ports:
- containerPort: $port
name: http
env:
- name: NODE_ENV
value: "production"
- name: DATABASE_URL
valueFrom:
secretKeyRef:
name: aitbc-secrets
key: database-url
- name: REDIS_URL
valueFrom:
secretKeyRef:
name: aitbc-secrets
key: redis-url
- name: JWT_SECRET
valueFrom:
secretKeyRef:
name: aitbc-secrets
key: jwt-secret
- name: ENCRYPTION_KEY
valueFrom:
secretKeyRef:
name: aitbc-secrets
key: encryption-key
resources:
requests:
memory: "1Gi"
cpu: "500m"
limits:
memory: "2Gi"
cpu: "1000m"
livenessProbe:
httpGet:
path: /health
port: $port
initialDelaySeconds: 30
periodSeconds: 10
readinessProbe:
httpGet:
path: /health
port: $port
initialDelaySeconds: 5
periodSeconds: 5
---
apiVersion: v1
kind: Service
metadata:
name: $service
namespace: $NAMESPACE
spec:
selector:
app: $service
ports:
- port: $port
targetPort: $port
type: ClusterIP
EOF
# Apply deployment
kubectl apply -f /tmp/$service-deployment.yaml -n $NAMESPACE || error "Failed to deploy $service"
# Wait for deployment
kubectl rollout status deployment/$service -n $NAMESPACE --timeout=300s || error "Failed to rollout $service"
rm /tmp/$service-deployment.yaml
done
success "Application services deployed successfully"
}
# Deploy ingress and load balancer
deploy_ingress() {
log "Deploying ingress and load balancer..."
# Create ingress manifest
cat > /tmp/ingress.yaml << EOF
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: aitbc-ingress
namespace: $NAMESPACE
annotations:
kubernetes.io/ingress.class: "nginx"
cert-manager.io/cluster-issuer: "letsencrypt-prod"
nginx.ingress.kubernetes.io/rate-limit: "100"
nginx.ingress.kubernetes.io/rate-limit-window: "1m"
spec:
tls:
- hosts:
- api.$DOMAIN
- marketplace.$DOMAIN
- explorer.$DOMAIN
secretName: aitbc-tls
rules:
- host: api.$DOMAIN
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: coordinator-api
port:
number: 8001
- host: marketplace.$DOMAIN
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: plugin-marketplace
port:
number: 8014
- host: explorer.$DOMAIN
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: explorer
port:
number: 8020
EOF
# Apply ingress
kubectl apply -f /tmp/ingress.yaml -n $NAMESPACE || error "Failed to deploy ingress"
rm /tmp/ingress.yaml
success "Ingress deployed successfully"
}
# Deploy monitoring
deploy_monitoring() {
log "Deploying monitoring stack..."
# Deploy Prometheus
helm upgrade --install prometheus prometheus-community/kube-prometheus-stack \
--namespace $NAMESPACE \
--create-namespace \
--set prometheus.prometheus.spec.retention=30d \
--set prometheus.prometheus.spec.storageSpec.volumeClaimTemplate.spec.resources.requests.storage=50Gi \
--set grafana.adminPassword=$GRAFANA_PASSWORD \
--set grafana.persistence.size=10Gi \
--set defaultRules.create=true \
--wait \
--timeout 10m || error "Failed to deploy monitoring"
# Import Grafana dashboards
log "Importing Grafana dashboards..."
# Create dashboard configmaps
kubectl create configmap grafana-dashboards \
--from-file=monitoring/grafana/dashboards/ \
-n $NAMESPACE \
--dry-run=client -o yaml | kubectl apply -f -
success "Monitoring deployed successfully"
}
# Run post-deployment tests
post_deployment_tests() {
log "Running post-deployment tests..."
# Wait for all services to be ready
kubectl wait --for=condition=ready pod -l app!=pod -n $NAMESPACE --timeout=600s
# Test API endpoints
endpoints=(
"coordinator-api:8001"
"exchange-integration:8010"
"trading-engine:8012"
"plugin-registry:8013"
"plugin-marketplace:8014"
)
for service_port in "${endpoints[@]}"; do
service=$(echo $service_port | cut -d: -f1)
port=$(echo $service_port | cut -d: -f2)
log "Testing $service..."
# Port-forward and test
kubectl port-forward -n $NAMESPACE deployment/$service $port:8007 &
port_forward_pid=$!
sleep 5
if curl -f -s http://localhost:$port/health > /dev/null; then
success "$service is healthy"
else
error "$service health check failed"
fi
# Kill port-forward
kill $port_forward_pid 2>/dev/null || true
done
# Test external endpoints
external_endpoints=(
"https://api.$DOMAIN/health"
"https://marketplace.$DOMAIN/api/v1/marketplace/featured"
)
for endpoint in "${external_endpoints[@]}"; do
log "Testing $endpoint..."
if curl -f -s $endpoint > /dev/null; then
success "$endpoint is responding"
else
error "$endpoint is not responding"
fi
done
success "Post-deployment tests passed"
}
# Create secrets
create_secrets() {
log "Creating secrets..."
# Create secret from environment variables
kubectl create secret generic aitbc-secrets \
--from-literal=database-url="$DATABASE_URL" \
--from-literal=redis-url="$REDIS_URL" \
--from-literal=jwt-secret="$JWT_SECRET" \
--from-literal=encryption-key="$ENCRYPTION_KEY" \
--from-literal=postgres-password="$POSTGRES_PASSWORD" \
--from-literal=redis-password="$REDIS_PASSWORD" \
--namespace $NAMESPACE \
--dry-run=client -o yaml | kubectl apply -f -
success "Secrets created"
}
# Main deployment function
main() {
log "Starting AITBC production deployment..."
log "Environment: $ENVIRONMENT"
log "Version: $VERSION"
log "Region: $REGION"
log "Domain: $DOMAIN"
# Check prerequisites
command -v kubectl >/dev/null 2>&1 || error "kubectl is not installed"
command -v helm >/dev/null 2>&1 || error "Helm is not installed"
kubectl cluster-info >/dev/null 2>&1 || error "Cannot connect to Kubernetes cluster"
# Run deployment steps
pre_deployment_checks
create_secrets
backup_current_deployment
build_production_images
deploy_database
deploy_core_services
deploy_application_services
deploy_ingress
deploy_monitoring
post_deployment_tests
success "Production deployment completed successfully!"
# Display deployment information
log "Deployment Information:"
log "Environment: $ENVIRONMENT"
log "Version: $VERSION"
log "Namespace: $NAMESPACE"
log "Domain: $DOMAIN"
log ""
log "Services are available at:"
log " API: https://api.$DOMAIN"
log " Marketplace: https://marketplace.$DOMAIN"
log " Explorer: https://explorer.$DOMAIN"
log " Grafana: https://grafana.$DOMAIN"
log ""
log "To check deployment status:"
log " kubectl get pods -n $NAMESPACE"
log " kubectl get services -n $NAMESPACE"
log ""
log "To view logs:"
log " kubectl logs -f deployment/coordinator-api -n $NAMESPACE"
}
# Handle script interruption
trap 'error "Script interrupted"' INT TERM
# Export environment variables
export DATABASE_URL=${DATABASE_URL}
export REDIS_URL=${REDIS_URL}
export JWT_SECRET=${JWT_SECRET}
export ENCRYPTION_KEY=${ENCRYPTION_KEY}
export POSTGRES_PASSWORD=${POSTGRES_PASSWORD}
export REDIS_PASSWORD=${REDIS_PASSWORD}
export GRAFANA_PASSWORD=${GRAFANA_PASSWORD}
export VERSION=${VERSION}
export NAMESPACE=${NAMESPACE}
export DOMAIN=${DOMAIN}
# Run main function
main "$@"

View File

@@ -4,30 +4,30 @@
case "${1:-help}" in
"start")
echo "Starting AITBC services..."
sudo systemctl start aitbc-coordinator-api.service
sudo systemctl start aitbc-blockchain-node.service
sudo systemctl start aitbc-blockchain-rpc.service
systemctl start aitbc-coordinator-api.service
systemctl start aitbc-blockchain-node.service
systemctl start aitbc-blockchain-rpc.service
echo "Services started"
;;
"stop")
echo "Stopping AITBC services..."
sudo systemctl stop aitbc-coordinator-api.service
sudo systemctl stop aitbc-blockchain-node.service
sudo systemctl stop aitbc-blockchain-rpc.service
systemctl stop aitbc-coordinator-api.service
systemctl stop aitbc-blockchain-node.service
systemctl stop aitbc-blockchain-rpc.service
echo "Services stopped"
;;
"restart")
echo "Restarting AITBC services..."
sudo systemctl restart aitbc-coordinator-api.service
sudo systemctl restart aitbc-blockchain-node.service
sudo systemctl restart aitbc-blockchain-rpc.service
systemctl restart aitbc-coordinator-api.service
systemctl restart aitbc-blockchain-node.service
systemctl restart aitbc-blockchain-rpc.service
echo "Services restarted"
;;
"status")
echo "=== AITBC Services Status ==="
sudo systemctl status aitbc-coordinator-api.service --no-pager
sudo systemctl status aitbc-blockchain-node.service --no-pager
sudo systemctl status aitbc-blockchain-rpc.service --no-pager
systemctl status aitbc-coordinator-api.service --no-pager
systemctl status aitbc-blockchain-node.service --no-pager
systemctl status aitbc-blockchain-rpc.service --no-pager
;;
"logs")
echo "=== AITBC Service Logs ==="

View File

@@ -0,0 +1,32 @@
import sys
from pathlib import Path
import json
# Setup sys.path
sys.path.insert(0, str(Path('/opt/aitbc/apps/blockchain-node/src')))
from aitbc_chain.config import settings
from aitbc_chain.mempool import init_mempool, get_mempool
# Use development mempool backend configuration exactly like main node
init_mempool(
backend=settings.mempool_backend,
db_path=str(settings.db_path.parent / "mempool.db"),
max_size=settings.mempool_max_size,
min_fee=settings.min_fee,
)
mempool = get_mempool()
print(f"Mempool class: {mempool.__class__.__name__}")
print(f"Mempool DB path: {mempool._db_path}")
chain_id = 'ait-mainnet'
rows = mempool._conn.execute("SELECT * FROM mempool WHERE chain_id = ?", (chain_id,)).fetchall()
print(f"Found {len(rows)} raw rows in DB")
for r in rows:
print(r)
txs = mempool.drain(100, 1000000, chain_id)
print(f"Drained {len(txs)} txs")
for tx in txs:
print(tx)

261
scripts/testing/run_all_tests.sh Executable file
View File

@@ -0,0 +1,261 @@
#!/bin/bash
# Master Test Runner for Multi-Site AITBC Testing
echo "🚀 Multi-Site AITBC Test Suite Master Runner"
echo "=========================================="
echo "Testing localhost, aitbc, and aitbc1 with all CLI features"
echo ""
# Resolve project root (directory containing this script)
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR" && pwd)"
# Function to run a test scenario
run_scenario() {
local scenario_name=$1
local script_path=$2
echo ""
echo "🔧 Running $scenario_name"
echo "================================"
if [ -f "$script_path" ]; then
bash "$script_path"
local exit_code=$?
if [ $exit_code -eq 0 ]; then
echo "$scenario_name completed successfully"
else
echo "$scenario_name failed with exit code $exit_code"
fi
return $exit_code
else
echo "❌ Script not found: $script_path"
return 1
fi
}
# Function to check prerequisites
check_prerequisites() {
echo "🔍 Checking prerequisites..."
echo "=========================="
# Check if aitbc CLI is available
if command -v aitbc &> /dev/null; then
echo "✅ AITBC CLI found"
aitbc --version | head -1
else
echo "❌ AITBC CLI not found in PATH"
echo "Please ensure CLI is installed and in PATH"
return 1
fi
# Check if required services are running
echo ""
echo "🌐 Checking service connectivity..."
# Check aitbc connectivity
if curl -s http://127.0.0.1:18000/v1/health &> /dev/null; then
echo "✅ aitbc marketplace accessible (port 18000)"
else
echo "❌ aitbc marketplace not accessible (port 18000)"
fi
# Check aitbc1 connectivity
if curl -s http://127.0.0.1:18001/v1/health &> /dev/null; then
echo "✅ aitbc1 marketplace accessible (port 18001)"
else
echo "❌ aitbc1 marketplace not accessible (port 18001)"
fi
# Check Ollama
if ollama list &> /dev/null; then
echo "✅ Ollama GPU service available"
ollama list | head -3
else
echo "❌ Ollama GPU service not available"
fi
# Check SSH access to containers
echo ""
echo "🏢 Checking container access..."
if ssh aitbc-cascade "echo 'SSH OK'" &> /dev/null; then
echo "✅ SSH access to aitbc container"
else
echo "❌ SSH access to aitbc container failed"
fi
if ssh aitbc1-cascade "echo 'SSH OK'" &> /dev/null; then
echo "✅ SSH access to aitbc1 container"
else
echo "❌ SSH access to aitbc1 container failed"
fi
echo ""
echo "📋 Checking user configurations..."
# Check miner1 and client1 configurations (relative to project root)
local home_dir="$PROJECT_ROOT/home"
if [ -f "$home_dir/miner1/miner_wallet.json" ]; then
echo "✅ miner1 configuration found"
else
echo "❌ miner1 configuration missing"
fi
if [ -f "$home_dir/client1/client_wallet.json" ]; then
echo "✅ client1 configuration found"
else
echo "❌ client1 configuration missing"
fi
echo ""
echo "🔧 Prerequisite check complete"
echo "=============================="
}
# Function to run comprehensive CLI tests
run_cli_tests() {
echo ""
echo "🔧 Running Comprehensive CLI Tests"
echo "================================="
local cli_commands=(
"chain:list:aitbc chain list --node-endpoint http://127.0.0.1:18000"
"chain:list:aitbc1:aitbc chain list --node-endpoint http://127.0.0.1:18001"
"analytics:summary:aitbc:aitbc analytics summary --node-endpoint http://127.0.0.1:18000"
"analytics:summary:aitbc1:aitbc analytics summary --node-endpoint http://127.0.0.1:18001"
"marketplace:list:aitbc:aitbc marketplace list --marketplace-url http://127.0.0.1:18000"
"marketplace:list:aitbc1:aitbc marketplace list --marketplace-url http://127.0.0.1:18001"
"agent_comm:list:aitbc:aitbc agent_comm list --node-endpoint http://127.0.0.1:18000"
"agent_comm:list:aitbc1:aitbc agent_comm list --node-endpoint http://127.0.0.1:18001"
"deploy:overview:aitbc deploy overview --format table"
)
local passed=0
local total=0
for cmd_info in "${cli_commands[@]}"; do
IFS=':' read -r test_name command <<< "$cmd_info"
total=$((total + 1))
echo "Testing: $test_name"
if eval "$command" &> /dev/null; then
echo "$test_name - PASSED"
passed=$((passed + 1))
else
echo "$test_name - FAILED"
fi
done
echo ""
echo "CLI Test Results: $passed/$total passed"
return $((total - passed))
}
# Function to generate final report
generate_report() {
local total_scenarios=$1
local passed_scenarios=$2
local failed_scenarios=$((total_scenarios - passed_scenarios))
echo ""
echo "📊 FINAL TEST REPORT"
echo "==================="
echo "Total Scenarios: $total_scenarios"
echo "Passed: $passed_scenarios"
echo "Failed: $failed_scenarios"
if [ $failed_scenarios -eq 0 ]; then
echo ""
echo "🎉 ALL TESTS PASSED!"
echo "Multi-site AITBC ecosystem is fully functional"
return 0
else
echo ""
echo "⚠️ SOME TESTS FAILED"
echo "Please check the failed scenarios and fix issues"
return 1
fi
}
# Main execution
main() {
local scenario_count=0
local passed_count=0
# Check prerequisites
if ! check_prerequisites; then
echo "❌ Prerequisites not met. Exiting."
exit 1
fi
# Run CLI tests first
echo ""
if run_cli_tests; then
echo "✅ All CLI tests passed"
passed_count=$((passed_count + 1))
else
echo "❌ Some CLI tests failed"
fi
scenario_count=$((scenario_count + 1))
# Run scenario tests
local scenarios=(
"Scenario A: Localhost GPU Miner → aitbc Marketplace:$PROJECT_ROOT/test_scenario_a.sh"
"Scenario B: Localhost GPU Client → aitbc1 Marketplace:$PROJECT_ROOT/test_scenario_b.sh"
"Scenario C: aitbc Container User Operations:$PROJECT_ROOT/test_scenario_c.sh"
"Scenario D: aitbc1 Container User Operations:$PROJECT_ROOT/test_scenario_d.sh"
)
for scenario_info in "${scenarios[@]}"; do
IFS=':' read -r scenario_name script_path <<< "$scenario_info"
scenario_count=$((scenario_count + 1))
if run_scenario "$scenario_name" "$script_path"; then
passed_count=$((passed_count + 1))
fi
done
# Run comprehensive test suite
echo ""
echo "🔧 Running Comprehensive Test Suite"
echo "=================================="
if python3 "$PROJECT_ROOT/test_multi_site.py"; then
echo "✅ Comprehensive test suite passed"
passed_count=$((passed_count + 1))
else
echo "❌ Comprehensive test suite failed"
fi
scenario_count=$((scenario_count + 1))
# Generate final report
generate_report $scenario_count $passed_count
}
# Parse command line arguments
case "${1:-all}" in
"prereq")
check_prerequisites
;;
"cli")
run_cli_tests
;;
"scenario-a")
run_scenario "Scenario A" "$PROJECT_ROOT/test_scenario_a.sh"
;;
"scenario-b")
run_scenario "Scenario B" "$PROJECT_ROOT/test_scenario_b.sh"
;;
"scenario-c")
run_scenario "Scenario C" "$PROJECT_ROOT/test_scenario_c.sh"
;;
"scenario-d")
run_scenario "Scenario D" "$PROJECT_ROOT/test_scenario_d.sh"
;;
"comprehensive")
python3 "$PROJECT_ROOT/test_multi_site.py"
;;
"all"|*)
main
;;
esac

28
scripts/testing/run_test.py Executable file
View File

@@ -0,0 +1,28 @@
import sys
from click.testing import CliRunner
from aitbc_cli.commands.node import node
from aitbc_cli.core.config import MultiChainConfig
from unittest.mock import patch, MagicMock
import sys
runner = CliRunner()
with patch('aitbc_cli.commands.node.load_multichain_config') as mock_load:
with patch('aitbc_cli.commands.node.get_default_node_config') as mock_default:
with patch('aitbc_cli.commands.node.add_node_config') as mock_add:
# The function does `from ..core.config import save_multichain_config`
# This evaluates to `aitbc_cli.core.config` because node.py is in `aitbc_cli.commands`
with patch('aitbc_cli.core.config.save_multichain_config') as mock_save:
# The issue with the previous run was not that save_multichain_config wasn't patched correctly.
# The issue is that click catches exceptions and prints the generic "Error adding node: ...".
# Wait, "Failed to save configuration" actually implies the unpatched save_multichain_config was CALLED!
# Let's mock at sys.modules level for Python relative imports
pass
with patch('aitbc_cli.commands.node.load_multichain_config') as mock_load:
with patch('aitbc_cli.commands.node.get_default_node_config') as mock_default:
with patch('aitbc_cli.commands.node.add_node_config') as mock_add:
# the easiest way is to patch it in the exact module it is executed
# OR we can just avoid testing the mock_save and let it save to a temp config!
# Let's check how config is loaded in node.py
pass

View File

@@ -0,0 +1,16 @@
import asyncio
from broadcaster import Broadcast
async def main():
broadcast = Broadcast("redis://localhost:6379")
await broadcast.connect()
print("connected")
async with broadcast.subscribe("test") as sub:
print("subscribed")
await broadcast.publish("test", "hello")
async for msg in sub:
print("msg:", msg.message)
break
await broadcast.disconnect()
asyncio.run(main())

View File

@@ -0,0 +1,10 @@
import requests
try:
response = requests.get('http://127.0.0.1:8000/v1/marketplace/offers')
print("Offers:", response.status_code)
response = requests.get('http://127.0.0.1:8000/v1/marketplace/stats')
print("Stats:", response.status_code)
except Exception as e:
print("Error:", e)

View File

@@ -0,0 +1,23 @@
import sys
import asyncio
from sqlmodel import Session, create_engine
from app.services.marketplace_enhanced_simple import EnhancedMarketplaceService
from app.database import engine
from app.domain.marketplace import MarketplaceBid
async def run():
with Session(engine) as session:
# insert a bid to test amount vs price
bid = MarketplaceBid(provider="prov", capacity=10, price=1.0)
session.add(bid)
session.commit()
service = EnhancedMarketplaceService(session)
try:
res = await service.get_marketplace_analytics(period_days=30, metrics=["volume", "revenue"])
print(res)
except Exception as e:
import traceback
traceback.print_exc()
asyncio.run(run())

View File

@@ -0,0 +1,21 @@
import asyncio
from apps.agent_services.agent_bridge.src.integration_layer import AgentServiceBridge
async def main():
bridge = AgentServiceBridge()
# Let's inspect the actual payload
payload = {
"name": "test-agent-123",
"type": "trading",
"capabilities": ["trade"],
"chain_id": "ait-mainnet",
"endpoint": "http://localhost:8005",
"version": "1.0.0",
"description": "Test trading agent"
}
async with bridge.integration as integration:
result = await integration.register_agent_with_coordinator(payload)
print(f"Result: {result}")
if __name__ == "__main__":
asyncio.run(main())

2
scripts/testing/test_send.sh Executable file
View File

@@ -0,0 +1,2 @@
export AITBC_WALLET="test_wallet"
aitbc wallet send aitbc1my-test-wallet_hd 50

View File

@@ -0,0 +1,22 @@
import json
wallet_data = {
"name": "test_wallet",
"type": "hd",
"address": "aitbc1genesis",
"private_key": "dummy",
"public_key": "dummy",
"encrypted": False,
"transactions": [],
"balance": 1000000
}
import os
import pathlib
wallet_dir = pathlib.Path("/root/.aitbc/wallets")
wallet_dir.mkdir(parents=True, exist_ok=True)
wallet_path = wallet_dir / "test_wallet.json"
with open(wallet_path, "w") as f:
json.dump(wallet_data, f)

Some files were not shown because too many files have changed in this diff Show More