refactor: remove Docker configuration files - transitioning to native deployment

- Remove Dockerfile for CLI multi-stage build
- Remove docker-compose.yml with 20+ service definitions
- Remove containerized deployment infrastructure (blockchain, consensus, network nodes)
- Remove plugin ecosystem services (registry, marketplace, security, analytics)
- Remove global infrastructure and AI agent services
- Remove monitoring stack (Prometheus, Grafana) and nginx reverse proxy
- Remove database services
This commit is contained in:
AITBC System
2026-03-18 20:44:21 +01:00
parent d2cdd39548
commit fe3e8b82e5
35 changed files with 384 additions and 1477 deletions

View File

@@ -1,392 +0,0 @@
#!/bin/bash
# AITBC Automated Deployment Script
# This script handles automated deployment of AITBC services
set -e
# Configuration
ENVIRONMENT=${1:-staging}
VERSION=${2:-latest}
REGION=${3:-us-east-1}
NAMESPACE="aitbc-${ENVIRONMENT}"
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Logging function
log() {
echo -e "${BLUE}[$(date +'%Y-%m-%d %H:%M:%S')]${NC} $1"
}
error() {
echo -e "${RED}[ERROR]${NC} $1"
exit 1
}
success() {
echo -e "${GREEN}[SUCCESS]${NC} $1"
}
warning() {
echo -e "${YELLOW}[WARNING]${NC} $1"
}
# Check prerequisites
check_prerequisites() {
log "Checking prerequisites..."
# Check if required tools are installed
command -v docker >/dev/null 2>&1 || error "Docker is not installed"
command -v docker-compose >/dev/null 2>&1 || error "Docker Compose is not installed"
command -v kubectl >/dev/null 2>&1 || error "kubectl is not installed"
command -v helm >/dev/null 2>&1 || error "Helm is not installed"
# Check if Docker daemon is running
docker info >/dev/null 2>&1 || error "Docker daemon is not running"
# Check if kubectl can connect to cluster
kubectl cluster-info >/dev/null 2>&1 || error "Cannot connect to Kubernetes cluster"
success "Prerequisites check passed"
}
# Build Docker images
build_images() {
log "Building Docker images..."
# Build CLI image
log "Building CLI image..."
docker build -t aitbc/cli:${VERSION} -f Dockerfile . || error "Failed to build CLI image"
# Build service images
for service_dir in apps/*/; do
if [ -f "$service_dir/Dockerfile" ]; then
service_name=$(basename "$service_dir")
log "Building ${service_name} image..."
docker build -t aitbc/${service_name}:${VERSION} -f "$service_dir/Dockerfile" "$service_dir" || error "Failed to build ${service_name} image"
fi
done
success "All Docker images built successfully"
}
# Run tests
run_tests() {
log "Running tests..."
# Run unit tests
log "Running unit tests..."
pytest tests/unit/ -v --cov=aitbc_cli --cov-report=term || error "Unit tests failed"
# Run integration tests
log "Running integration tests..."
pytest tests/integration/ -v || error "Integration tests failed"
# Run security tests
log "Running security tests..."
pytest tests/security/ -v || error "Security tests failed"
# Run performance tests
log "Running performance tests..."
pytest tests/performance/test_performance_lightweight.py::TestPerformance::test_cli_performance -v || error "Performance tests failed"
success "All tests passed"
}
# Deploy to Kubernetes
deploy_kubernetes() {
log "Deploying to Kubernetes namespace: ${NAMESPACE}"
# Create namespace if it doesn't exist
kubectl create namespace ${NAMESPACE} --dry-run=client -o yaml | kubectl apply -f -
# Apply secrets
log "Applying secrets..."
kubectl apply -f k8s/secrets/ -n ${NAMESPACE} || error "Failed to apply secrets"
# Apply configmaps
log "Applying configmaps..."
kubectl apply -f k8s/configmaps/ -n ${NAMESPACE} || error "Failed to apply configmaps"
# Deploy database
log "Deploying database..."
helm repo add bitnami https://charts.bitnami.com/bitnami
helm upgrade --install postgres bitnami/postgresql \
--namespace ${NAMESPACE} \
--set auth.postgresPassword=${POSTGRES_PASSWORD} \
--set auth.database=aitbc \
--set primary.persistence.size=20Gi \
--set primary.resources.requests.memory=2Gi \
--set primary.resources.requests.cpu=1000m \
--wait || error "Failed to deploy database"
# Deploy Redis
log "Deploying Redis..."
helm upgrade --install redis bitnami/redis \
--namespace ${NAMESPACE} \
--set auth.password=${REDIS_PASSWORD} \
--set master.persistence.size=8Gi \
--set master.resources.requests.memory=512Mi \
--set master.resources.requests.cpu=500m \
--wait || error "Failed to deploy Redis"
# Deploy core services
log "Deploying core services..."
# Deploy blockchain services
for service in blockchain-node consensus-node network-node; do
log "Deploying ${service}..."
envsubst < k8s/deployments/${service}.yaml | kubectl apply -f - -n ${NAMESPACE} || error "Failed to deploy ${service}"
kubectl rollout status deployment/${service} -n ${NAMESPACE} --timeout=300s || error "Failed to rollout ${service}"
done
# Deploy coordinator
log "Deploying coordinator-api..."
envsubst < k8s/deployments/coordinator-api.yaml | kubectl apply -f - -n ${NAMESPACE} || error "Failed to deploy coordinator-api"
kubectl rollout status deployment/coordinator-api -n ${NAMESPACE} --timeout=300s || error "Failed to rollout coordinator-api"
# Deploy production services
for service in exchange-integration compliance-service trading-engine; do
log "Deploying ${service}..."
envsubst < k8s/deployments/${service}.yaml | kubectl apply -f - -n ${NAMESPACE} || error "Failed to deploy ${service}"
kubectl rollout status deployment/${service} -n ${NAMESPACE} --timeout=300s || error "Failed to rollout ${service}"
done
# Deploy plugin ecosystem
for service in plugin-registry plugin-marketplace plugin-security plugin-analytics; do
log "Deploying ${service}..."
envsubst < k8s/deployments/${service}.yaml | kubectl apply -f - -n ${NAMESPACE} || error "Failed to deploy ${service}"
kubectl rollout status deployment/${service} -n ${NAMESPACE} --timeout=300s || error "Failed to rollout ${service}"
done
# Deploy global infrastructure
for service in global-infrastructure global-ai-agents multi-region-load-balancer; do
log "Deploying ${service}..."
envsubst < k8s/deployments/${service}.yaml | kubectl apply -f - -n ${NAMESPACE} || error "Failed to deploy ${service}"
kubectl rollout status deployment/${service} -n ${NAMESPACE} --timeout=300s || error "Failed to rollout ${service}"
done
# Deploy explorer
log "Deploying explorer..."
envsubst < k8s/deployments/explorer.yaml | kubectl apply -f - -n ${NAMESPACE} || error "Failed to deploy explorer"
kubectl rollout status deployment/explorer -n ${NAMESPACE} --timeout=300s || error "Failed to rollout explorer"
success "Kubernetes deployment completed"
}
# Deploy with Docker Compose
deploy_docker_compose() {
log "Deploying with Docker Compose..."
# Set environment variables
export POSTGRES_PASSWORD=${POSTGRES_PASSWORD:-aitbc123}
export REDIS_PASSWORD=${REDIS_PASSWORD:-aitbc123}
export GRAFANA_PASSWORD=${GRAFANA_PASSWORD:-admin}
# Stop existing services
log "Stopping existing services..."
docker-compose down || true
# Start services
log "Starting services..."
docker-compose up -d || error "Failed to start services"
# Wait for services to be healthy
log "Waiting for services to be healthy..."
sleep 30
# Check service health
for service in postgres redis blockchain-node coordinator-api exchange-integration; do
log "Checking ${service} health..."
if ! docker-compose ps ${service} | grep -q "Up"; then
error "Service ${service} is not running"
fi
done
success "Docker Compose deployment completed"
}
# Run health checks
run_health_checks() {
log "Running health checks..."
if command -v kubectl >/dev/null 2>&1 && kubectl cluster-info >/dev/null 2>&1; then
# Kubernetes health checks
log "Checking Kubernetes deployment health..."
# Check pod status
kubectl get pods -n ${NAMESPACE} || error "Failed to get pod status"
# Check service health
services=("coordinator-api" "exchange-integration" "trading-engine" "plugin-registry")
for service in "${services[@]}"; do
log "Checking ${service} health..."
kubectl get pods -n ${NAMESPACE} -l app=${service} -o jsonpath='{.items[0].status.phase}' | grep -q "Running" || error "${service} pods are not running"
# Check service endpoint
service_url=$(kubectl get svc ${service} -n ${NAMESPACE} -o jsonpath='{.status.loadBalancer.ingress[0].ip}' 2>/dev/null || echo "")
if [ -n "$service_url" ]; then
curl -f http://${service_url}/health >/dev/null 2>&1 || error "${service} health check failed"
fi
done
else
# Docker Compose health checks
log "Checking Docker Compose deployment health..."
services=("coordinator-api" "exchange-integration" "trading-engine" "plugin-registry")
for service in "${services[@]}"; do
log "Checking ${service} health..."
if ! docker-compose ps ${service} | grep -q "Up"; then
error "Service ${service} is not running"
fi
# Check health endpoint
port=$(docker-compose port ${service} | cut -d: -f2)
curl -f http://localhost:${port}/health >/dev/null 2>&1 || error "${service} health check failed"
done
fi
success "All health checks passed"
}
# Run smoke tests
run_smoke_tests() {
log "Running smoke tests..."
# Test CLI functionality
log "Testing CLI functionality..."
docker-compose exec aitbc-cli python -m aitbc_cli.main --help >/dev/null || error "CLI smoke test failed"
# Test API endpoints
log "Testing API endpoints..."
# Test coordinator API
coordinator_port=$(docker-compose port coordinator-api | cut -d: -f2)
curl -f http://localhost:${coordinator_port}/health >/dev/null || error "Coordinator API smoke test failed"
# Test exchange API
exchange_port=$(docker-compose port exchange-integration | cut -d: -f2)
curl -f http://localhost:${exchange_port}/health >/dev/null || error "Exchange API smoke test failed"
# Test plugin registry
plugin_port=$(docker-compose port plugin-registry | cut -d: -f2)
curl -f http://localhost:${plugin_port}/health >/dev/null || error "Plugin registry smoke test failed"
success "Smoke tests passed"
}
# Rollback deployment
rollback() {
log "Rolling back deployment..."
if command -v kubectl >/dev/null 2>&1 && kubectl cluster-info >/dev/null 2>&1; then
# Kubernetes rollback
log "Rolling back Kubernetes deployment..."
services=("coordinator-api" "exchange-integration" "trading-engine" "plugin-registry")
for service in "${services[@]}"; do
log "Rolling back ${service}..."
kubectl rollout undo deployment/${service} -n ${NAMESPACE} || error "Failed to rollback ${service}"
kubectl rollout status deployment/${service} -n ${NAMESPACE} --timeout=300s || error "Failed to rollback ${service}"
done
else
# Docker Compose rollback
log "Rolling back Docker Compose deployment..."
docker-compose down || error "Failed to stop services"
# Restart with previous version (assuming it's tagged as 'previous')
export VERSION=previous
deploy_docker_compose
fi
success "Rollback completed"
}
# Cleanup
cleanup() {
log "Cleaning up..."
# Remove unused Docker images
docker image prune -f || true
# Remove unused Docker volumes
docker volume prune -f || true
success "Cleanup completed"
}
# Main deployment function
main() {
log "Starting AITBC deployment..."
log "Environment: ${ENVIRONMENT}"
log "Version: ${VERSION}"
log "Region: ${REGION}"
case "${ENVIRONMENT}" in
"local"|"docker")
check_prerequisites
build_images
run_tests
deploy_docker_compose
run_health_checks
run_smoke_tests
;;
"staging"|"production")
check_prerequisites
build_images
run_tests
deploy_kubernetes
run_health_checks
run_smoke_tests
;;
"rollback")
rollback
;;
"cleanup")
cleanup
;;
*)
error "Unknown environment: ${ENVIRONMENT}. Use 'local', 'docker', 'staging', 'production', 'rollback', or 'cleanup'"
;;
esac
success "Deployment completed successfully!"
# Display deployment information
log "Deployment Information:"
log "Environment: ${ENVIRONMENT}"
log "Version: ${VERSION}"
log "Namespace: ${NAMESPACE}"
if [ "${ENVIRONMENT}" = "docker" ]; then
log "Services are running on:"
log " Coordinator API: http://localhost:8001"
log " Exchange Integration: http://localhost:8010"
log " Trading Engine: http://localhost:8012"
log " Plugin Registry: http://localhost:8013"
log " Plugin Marketplace: http://localhost:8014"
log " Explorer: http://localhost:8020"
log " Grafana: http://localhost:3000 (admin/admin)"
log " Prometheus: http://localhost:9090"
fi
}
# Handle script interruption
trap 'error "Script interrupted"' INT TERM
# Export environment variables for envsubst
export POSTGRES_PASSWORD=${POSTGRES_PASSWORD:-aitbc123}
export REDIS_PASSWORD=${REDIS_PASSWORD:-aitbc123}
export GRAFANA_PASSWORD=${GRAFANA_PASSWORD:-admin}
export VERSION=${VERSION}
export NAMESPACE=${NAMESPACE}
# Run main function
main "$@"

View File

@@ -1,588 +0,0 @@
#!/bin/bash
# AITBC Production Deployment Script
# This script handles production deployment with zero-downtime
set -e
# Production Configuration
ENVIRONMENT="production"
VERSION=${1:-latest}
REGION=${2:-us-east-1}
NAMESPACE="aitbc-prod"
DOMAIN="aitbc.dev"
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
# Logging
log() {
echo -e "${BLUE}[$(date +'%Y-%m-%d %H:%M:%S')]${NC} $1"
}
error() {
echo -e "${RED}[ERROR]${NC} $1"
exit 1
}
success() {
echo -e "${GREEN}[SUCCESS]${NC} $1"
}
warning() {
echo -e "${YELLOW}[WARNING]${NC} $1"
}
# Pre-deployment checks
pre_deployment_checks() {
log "Running pre-deployment checks..."
# Check if we're on production branch
current_branch=$(git branch --show-current)
if [ "$current_branch" != "production" ]; then
error "Must be on production branch to deploy to production"
fi
# Check if all tests pass
log "Running tests..."
pytest tests/unit/ -v --tb=short || error "Unit tests failed"
pytest tests/integration/ -v --tb=short || error "Integration tests failed"
pytest tests/security/ -v --tb=short || error "Security tests failed"
pytest tests/performance/test_performance_lightweight.py::TestPerformance::test_cli_performance -v --tb=short || error "Performance tests failed"
# Check if production infrastructure is ready
log "Checking production infrastructure..."
kubectl get nodes | grep -q "Ready" || error "Production nodes not ready"
kubectl get namespace $NAMESPACE || kubectl create namespace $NAMESPACE
success "Pre-deployment checks passed"
}
# Backup current deployment
backup_current_deployment() {
log "Backing up current deployment..."
# Create backup directory
backup_dir="/opt/aitbc/backups/pre-deployment-$(date +%Y%m%d_%H%M%S)"
mkdir -p $backup_dir
# Backup current configuration
kubectl get all -n $NAMESPACE -o yaml > $backup_dir/current-deployment.yaml
# Backup database
pg_dump $DATABASE_URL | gzip > $backup_dir/database_backup.sql.gz
# Backup application data
kubectl exec -n $NAMESPACE deployment/coordinator-api -- tar -czf /tmp/app_data_backup.tar.gz /app/data
kubectl cp $NAMESPACE/deployment/coordinator-api:/tmp/app_data_backup.tar.gz $backup_dir/app_data_backup.tar.gz
success "Backup completed: $backup_dir"
}
# Build production images
build_production_images() {
log "Building production images..."
# Build CLI image
docker build -t aitbc/cli:$VERSION -f Dockerfile --target production . || error "Failed to build CLI image"
# Build service images
for service_dir in apps/*/; do
if [ -f "$service_dir/Dockerfile" ]; then
service_name=$(basename "$service_dir")
log "Building $service_name image..."
docker build -t aitbc/$service_name:$VERSION -f "$service_dir/Dockerfile" "$service_dir" || error "Failed to build $service_name image"
fi
done
# Push images to registry
log "Pushing images to registry..."
docker push aitbc/cli:$VERSION
for service_dir in apps/*/; do
if [ -f "$service_dir/Dockerfile" ]; then
service_name=$(basename "$service_dir")
docker push aitbc/$service_name:$VERSION
fi
done
success "Production images built and pushed"
}
# Deploy database
deploy_database() {
log "Deploying database..."
# Deploy PostgreSQL
helm upgrade --install postgres bitnami/postgresql \
--namespace $NAMESPACE \
--set auth.postgresPassword=$POSTGRES_PASSWORD \
--set auth.database=aitbc_prod \
--set primary.persistence.size=100Gi \
--set primary.resources.requests.memory=8Gi \
--set primary.resources.requests.cpu=2000m \
--set primary.resources.limits.memory=16Gi \
--set primary.resources.limits.cpu=4000m \
--set readReplicas.replicaCount=1 \
--set readReplicas.persistence.size=50Gi \
--wait \
--timeout 10m || error "Failed to deploy PostgreSQL"
# Deploy Redis
helm upgrade --install redis bitnami/redis \
--namespace $NAMESPACE \
--set auth.password=$REDIS_PASSWORD \
--set master.persistence.size=20Gi \
--set master.resources.requests.memory=2Gi \
--set master.resources.requests.cpu=1000m \
--set master.resources.limits.memory=4Gi \
--set master.resources.limits.cpu=2000m \
--set replica.replicaCount=2 \
--wait \
--timeout 5m || error "Failed to deploy Redis"
success "Database deployed successfully"
}
# Deploy core services
deploy_core_services() {
log "Deploying core services..."
# Deploy blockchain services
for service in blockchain-node consensus-node network-node; do
log "Deploying $service..."
# Create deployment manifest
cat > /tmp/$service-deployment.yaml << EOF
apiVersion: apps/v1
kind: Deployment
metadata:
name: $service
namespace: $NAMESPACE
spec:
replicas: 2
selector:
matchLabels:
app: $service
template:
metadata:
labels:
app: $service
spec:
containers:
- name: $service
image: aitbc/$service:$VERSION
ports:
- containerPort: 8007
name: http
env:
- name: NODE_ENV
value: "production"
- name: DATABASE_URL
valueFrom:
secretKeyRef:
name: aitbc-secrets
key: database-url
- name: REDIS_URL
valueFrom:
secretKeyRef:
name: aitbc-secrets
key: redis-url
resources:
requests:
memory: "2Gi"
cpu: "1000m"
limits:
memory: "4Gi"
cpu: "2000m"
livenessProbe:
httpGet:
path: /health
port: 8007
initialDelaySeconds: 30
periodSeconds: 10
readinessProbe:
httpGet:
path: /health
port: 8007
initialDelaySeconds: 5
periodSeconds: 5
---
apiVersion: v1
kind: Service
metadata:
name: $service
namespace: $NAMESPACE
spec:
selector:
app: $service
ports:
- port: 8007
targetPort: 8007
type: ClusterIP
EOF
# Apply deployment
kubectl apply -f /tmp/$service-deployment.yaml -n $NAMESPACE || error "Failed to deploy $service"
# Wait for deployment
kubectl rollout status deployment/$service -n $NAMESPACE --timeout=300s || error "Failed to rollout $service"
rm /tmp/$service-deployment.yaml
done
success "Core services deployed successfully"
}
# Deploy application services
deploy_application_services() {
log "Deploying application services..."
services=("coordinator-api" "exchange-integration" "compliance-service" "trading-engine" "plugin-registry" "plugin-marketplace" "plugin-security" "plugin-analytics" "global-infrastructure" "global-ai-agents" "multi-region-load-balancer")
for service in "${services[@]}"; do
log "Deploying $service..."
# Determine port
case $service in
"coordinator-api") port=8001 ;;
"exchange-integration") port=8010 ;;
"compliance-service") port=8011 ;;
"trading-engine") port=8012 ;;
"plugin-registry") port=8013 ;;
"plugin-marketplace") port=8014 ;;
"plugin-security") port=8015 ;;
"plugin-analytics") port=8016 ;;
"global-infrastructure") port=8017 ;;
"global-ai-agents") port=8018 ;;
"multi-region-load-balancer") port=8019 ;;
esac
# Create deployment manifest
cat > /tmp/$service-deployment.yaml << EOF
apiVersion: apps/v1
kind: Deployment
metadata:
name: $service
namespace: $NAMESPACE
spec:
replicas: 3
selector:
matchLabels:
app: $service
template:
metadata:
labels:
app: $service
spec:
containers:
- name: $service
image: aitbc/$service:$VERSION
ports:
- containerPort: $port
name: http
env:
- name: NODE_ENV
value: "production"
- name: DATABASE_URL
valueFrom:
secretKeyRef:
name: aitbc-secrets
key: database-url
- name: REDIS_URL
valueFrom:
secretKeyRef:
name: aitbc-secrets
key: redis-url
- name: JWT_SECRET
valueFrom:
secretKeyRef:
name: aitbc-secrets
key: jwt-secret
- name: ENCRYPTION_KEY
valueFrom:
secretKeyRef:
name: aitbc-secrets
key: encryption-key
resources:
requests:
memory: "1Gi"
cpu: "500m"
limits:
memory: "2Gi"
cpu: "1000m"
livenessProbe:
httpGet:
path: /health
port: $port
initialDelaySeconds: 30
periodSeconds: 10
readinessProbe:
httpGet:
path: /health
port: $port
initialDelaySeconds: 5
periodSeconds: 5
---
apiVersion: v1
kind: Service
metadata:
name: $service
namespace: $NAMESPACE
spec:
selector:
app: $service
ports:
- port: $port
targetPort: $port
type: ClusterIP
EOF
# Apply deployment
kubectl apply -f /tmp/$service-deployment.yaml -n $NAMESPACE || error "Failed to deploy $service"
# Wait for deployment
kubectl rollout status deployment/$service -n $NAMESPACE --timeout=300s || error "Failed to rollout $service"
rm /tmp/$service-deployment.yaml
done
success "Application services deployed successfully"
}
# Deploy ingress and load balancer
deploy_ingress() {
log "Deploying ingress and load balancer..."
# Create ingress manifest
cat > /tmp/ingress.yaml << EOF
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: aitbc-ingress
namespace: $NAMESPACE
annotations:
kubernetes.io/ingress.class: "nginx"
cert-manager.io/cluster-issuer: "letsencrypt-prod"
nginx.ingress.kubernetes.io/rate-limit: "100"
nginx.ingress.kubernetes.io/rate-limit-window: "1m"
spec:
tls:
- hosts:
- api.$DOMAIN
- marketplace.$DOMAIN
- explorer.$DOMAIN
secretName: aitbc-tls
rules:
- host: api.$DOMAIN
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: coordinator-api
port:
number: 8001
- host: marketplace.$DOMAIN
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: plugin-marketplace
port:
number: 8014
- host: explorer.$DOMAIN
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: explorer
port:
number: 8020
EOF
# Apply ingress
kubectl apply -f /tmp/ingress.yaml -n $NAMESPACE || error "Failed to deploy ingress"
rm /tmp/ingress.yaml
success "Ingress deployed successfully"
}
# Deploy monitoring
deploy_monitoring() {
log "Deploying monitoring stack..."
# Deploy Prometheus
helm upgrade --install prometheus prometheus-community/kube-prometheus-stack \
--namespace $NAMESPACE \
--create-namespace \
--set prometheus.prometheus.spec.retention=30d \
--set prometheus.prometheus.spec.storageSpec.volumeClaimTemplate.spec.resources.requests.storage=50Gi \
--set grafana.adminPassword=$GRAFANA_PASSWORD \
--set grafana.persistence.size=10Gi \
--set defaultRules.create=true \
--wait \
--timeout 10m || error "Failed to deploy monitoring"
# Import Grafana dashboards
log "Importing Grafana dashboards..."
# Create dashboard configmaps
kubectl create configmap grafana-dashboards \
--from-file=monitoring/grafana/dashboards/ \
-n $NAMESPACE \
--dry-run=client -o yaml | kubectl apply -f -
success "Monitoring deployed successfully"
}
# Run post-deployment tests
post_deployment_tests() {
log "Running post-deployment tests..."
# Wait for all services to be ready
kubectl wait --for=condition=ready pod -l app!=pod -n $NAMESPACE --timeout=600s
# Test API endpoints
endpoints=(
"coordinator-api:8001"
"exchange-integration:8010"
"trading-engine:8012"
"plugin-registry:8013"
"plugin-marketplace:8014"
)
for service_port in "${endpoints[@]}"; do
service=$(echo $service_port | cut -d: -f1)
port=$(echo $service_port | cut -d: -f2)
log "Testing $service..."
# Port-forward and test
kubectl port-forward -n $NAMESPACE deployment/$service $port:8007 &
port_forward_pid=$!
sleep 5
if curl -f -s http://localhost:$port/health > /dev/null; then
success "$service is healthy"
else
error "$service health check failed"
fi
# Kill port-forward
kill $port_forward_pid 2>/dev/null || true
done
# Test external endpoints
external_endpoints=(
"https://api.$DOMAIN/health"
"https://marketplace.$DOMAIN/api/v1/marketplace/featured"
)
for endpoint in "${external_endpoints[@]}"; do
log "Testing $endpoint..."
if curl -f -s $endpoint > /dev/null; then
success "$endpoint is responding"
else
error "$endpoint is not responding"
fi
done
success "Post-deployment tests passed"
}
# Create secrets
create_secrets() {
log "Creating secrets..."
# Create secret from environment variables
kubectl create secret generic aitbc-secrets \
--from-literal=database-url="$DATABASE_URL" \
--from-literal=redis-url="$REDIS_URL" \
--from-literal=jwt-secret="$JWT_SECRET" \
--from-literal=encryption-key="$ENCRYPTION_KEY" \
--from-literal=postgres-password="$POSTGRES_PASSWORD" \
--from-literal=redis-password="$REDIS_PASSWORD" \
--namespace $NAMESPACE \
--dry-run=client -o yaml | kubectl apply -f -
success "Secrets created"
}
# Main deployment function
main() {
log "Starting AITBC production deployment..."
log "Environment: $ENVIRONMENT"
log "Version: $VERSION"
log "Region: $REGION"
log "Domain: $DOMAIN"
# Check prerequisites
command -v kubectl >/dev/null 2>&1 || error "kubectl is not installed"
command -v helm >/dev/null 2>&1 || error "Helm is not installed"
kubectl cluster-info >/dev/null 2>&1 || error "Cannot connect to Kubernetes cluster"
# Run deployment steps
pre_deployment_checks
create_secrets
backup_current_deployment
build_production_images
deploy_database
deploy_core_services
deploy_application_services
deploy_ingress
deploy_monitoring
post_deployment_tests
success "Production deployment completed successfully!"
# Display deployment information
log "Deployment Information:"
log "Environment: $ENVIRONMENT"
log "Version: $VERSION"
log "Namespace: $NAMESPACE"
log "Domain: $DOMAIN"
log ""
log "Services are available at:"
log " API: https://api.$DOMAIN"
log " Marketplace: https://marketplace.$DOMAIN"
log " Explorer: https://explorer.$DOMAIN"
log " Grafana: https://grafana.$DOMAIN"
log ""
log "To check deployment status:"
log " kubectl get pods -n $NAMESPACE"
log " kubectl get services -n $NAMESPACE"
log ""
log "To view logs:"
log " kubectl logs -f deployment/coordinator-api -n $NAMESPACE"
}
# Handle script interruption
trap 'error "Script interrupted"' INT TERM
# Export environment variables
export DATABASE_URL=${DATABASE_URL}
export REDIS_URL=${REDIS_URL}
export JWT_SECRET=${JWT_SECRET}
export ENCRYPTION_KEY=${ENCRYPTION_KEY}
export POSTGRES_PASSWORD=${POSTGRES_PASSWORD}
export REDIS_PASSWORD=${REDIS_PASSWORD}
export GRAFANA_PASSWORD=${GRAFANA_PASSWORD}
export VERSION=${VERSION}
export NAMESPACE=${NAMESPACE}
export DOMAIN=${DOMAIN}
# Run main function
main "$@"