feat: add marketplace metrics, privacy features, and service registry endpoints
- Add Prometheus metrics for marketplace API throughput and error rates with new dashboard panels - Implement confidential transaction models with encryption support and access control - Add key management system with registration, rotation, and audit logging - Create services and registry routers for service discovery and management - Integrate ZK proof generation for privacy-preserving receipts - Add metrics instru
This commit is contained in:
570
infra/k8s/backup-configmap.yaml
Normal file
570
infra/k8s/backup-configmap.yaml
Normal file
@ -0,0 +1,570 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: backup-scripts
|
||||
namespace: default
|
||||
labels:
|
||||
app: aitbc-backup
|
||||
component: backup
|
||||
data:
|
||||
backup_postgresql.sh: |
|
||||
#!/bin/bash
|
||||
# PostgreSQL Backup Script for AITBC
|
||||
# Usage: ./backup_postgresql.sh [namespace] [backup_name]
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Configuration
|
||||
NAMESPACE=${1:-default}
|
||||
BACKUP_NAME=${2:-postgresql-backup-$(date +%Y%m%d_%H%M%S)}
|
||||
BACKUP_DIR="/tmp/postgresql-backups"
|
||||
RETENTION_DAYS=30
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Logging function
|
||||
log() {
|
||||
echo -e "${GREEN}[$(date +'%Y-%m-%d %H:%M:%S')]${NC} $1"
|
||||
}
|
||||
|
||||
error() {
|
||||
echo -e "${RED}[$(date +'%Y-%m-%d %H:%M:%S')] ERROR:${NC} $1" >&2
|
||||
}
|
||||
|
||||
warn() {
|
||||
echo -e "${YELLOW}[$(date +'%Y-%m-%d %H:%M:%S')] WARNING:${NC} $1"
|
||||
}
|
||||
|
||||
# Check dependencies
|
||||
check_dependencies() {
|
||||
if ! command -v kubectl &> /dev/null; then
|
||||
error "kubectl is not installed or not in PATH"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! command -v pg_dump &> /dev/null; then
|
||||
error "pg_dump is not installed or not in PATH"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Create backup directory
|
||||
create_backup_dir() {
|
||||
mkdir -p "$BACKUP_DIR"
|
||||
log "Created backup directory: $BACKUP_DIR"
|
||||
}
|
||||
|
||||
# Get PostgreSQL pod name
|
||||
get_postgresql_pod() {
|
||||
local pod=$(kubectl get pods -n "$NAMESPACE" -l app.kubernetes.io/name=postgresql -o jsonpath='{.items[0].metadata.name}' 2>/dev/null || echo "")
|
||||
if [[ -z "$pod" ]]; then
|
||||
pod=$(kubectl get pods -n "$NAMESPACE" -l app=postgresql -o jsonpath='{.items[0].metadata.name}' 2>/dev/null || echo "")
|
||||
fi
|
||||
|
||||
if [[ -z "$pod" ]]; then
|
||||
error "Could not find PostgreSQL pod in namespace $NAMESPACE"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "$pod"
|
||||
}
|
||||
|
||||
# Wait for PostgreSQL to be ready
|
||||
wait_for_postgresql() {
|
||||
local pod=$1
|
||||
log "Waiting for PostgreSQL pod $pod to be ready..."
|
||||
|
||||
kubectl wait --for=condition=ready pod "$pod" -n "$NAMESPACE" --timeout=300s
|
||||
|
||||
# Check if PostgreSQL is accepting connections
|
||||
local retries=30
|
||||
while [[ $retries -gt 0 ]]; do
|
||||
if kubectl exec -n "$NAMESPACE" "$pod" -- pg_isready -U postgres >/dev/null 2>&1; then
|
||||
log "PostgreSQL is ready"
|
||||
return 0
|
||||
fi
|
||||
sleep 2
|
||||
((retries--))
|
||||
done
|
||||
|
||||
error "PostgreSQL did not become ready within timeout"
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Perform backup
|
||||
perform_backup() {
|
||||
local pod=$1
|
||||
local backup_file="$BACKUP_DIR/${BACKUP_NAME}.sql"
|
||||
|
||||
log "Starting PostgreSQL backup to $backup_file"
|
||||
|
||||
# Get database credentials from secret
|
||||
local db_user=$(kubectl get secret -n "$NAMESPACE" coordinator-postgresql -o jsonpath='{.data.username}' 2>/dev/null | base64 -d || echo "postgres")
|
||||
local db_password=$(kubectl get secret -n "$NAMESPACE" coordinator-postgresql -o jsonpath='{.data.password}' 2>/dev/null | base64 -d || echo "")
|
||||
local db_name=$(kubectl get secret -n "$NAMESPACE" coordinator-postgresql -o jsonpath='{.data.database}' 2>/dev/null | base64 -d || echo "aitbc")
|
||||
|
||||
# Perform the backup
|
||||
PGPASSWORD="$db_password" kubectl exec -n "$NAMESPACE" "$pod" -- \
|
||||
pg_dump -U "$db_user" -h localhost -d "$db_name" \
|
||||
--verbose --clean --if-exists --create --format=custom \
|
||||
--file="/tmp/${BACKUP_NAME}.dump"
|
||||
|
||||
# Copy backup from pod
|
||||
kubectl cp "$NAMESPACE/$pod:/tmp/${BACKUP_NAME}.dump" "$backup_file"
|
||||
|
||||
# Clean up remote backup file
|
||||
kubectl exec -n "$NAMESPACE" "$pod" -- rm -f "/tmp/${BACKUP_NAME}.dump"
|
||||
|
||||
# Compress backup
|
||||
gzip "$backup_file"
|
||||
backup_file="${backup_file}.gz"
|
||||
|
||||
log "Backup completed: $backup_file"
|
||||
|
||||
# Verify backup
|
||||
if [[ -f "$backup_file" ]] && [[ -s "$backup_file" ]]; then
|
||||
local size=$(du -h "$backup_file" | cut -f1)
|
||||
log "Backup size: $size"
|
||||
else
|
||||
error "Backup file is empty or missing"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Clean old backups
|
||||
cleanup_old_backups() {
|
||||
log "Cleaning up backups older than $RETENTION_DAYS days"
|
||||
find "$BACKUP_DIR" -name "*.sql.gz" -type f -mtime +$RETENTION_DAYS -delete
|
||||
log "Cleanup completed"
|
||||
}
|
||||
|
||||
# Upload to cloud storage (optional)
|
||||
upload_to_cloud() {
|
||||
local backup_file="$1"
|
||||
|
||||
# Check if AWS CLI is configured
|
||||
if command -v aws &> /dev/null && aws sts get-caller-identity &>/dev/null; then
|
||||
log "Uploading backup to S3"
|
||||
local s3_bucket="aitbc-backups-${NAMESPACE}"
|
||||
local s3_key="postgresql/$(basename "$backup_file")"
|
||||
|
||||
aws s3 cp "$backup_file" "s3://$s3_bucket/$s3_key" --storage-class GLACIER_IR
|
||||
log "Backup uploaded to s3://$s3_bucket/$s3_key"
|
||||
else
|
||||
warn "AWS CLI not configured, skipping cloud upload"
|
||||
fi
|
||||
}
|
||||
|
||||
# Main execution
|
||||
main() {
|
||||
log "Starting PostgreSQL backup process"
|
||||
|
||||
check_dependencies
|
||||
create_backup_dir
|
||||
|
||||
local pod=$(get_postgresql_pod)
|
||||
wait_for_postgresql "$pod"
|
||||
|
||||
perform_backup "$pod"
|
||||
cleanup_old_backups
|
||||
|
||||
local backup_file="$BACKUP_DIR/${BACKUP_NAME}.sql.gz"
|
||||
upload_to_cloud "$backup_file"
|
||||
|
||||
log "PostgreSQL backup process completed successfully"
|
||||
}
|
||||
|
||||
# Run main function
|
||||
main "$@"
|
||||
|
||||
backup_redis.sh: |
|
||||
#!/bin/bash
|
||||
# Redis Backup Script for AITBC
|
||||
# Usage: ./backup_redis.sh [namespace] [backup_name]
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Configuration
|
||||
NAMESPACE=${1:-default}
|
||||
BACKUP_NAME=${2:-redis-backup-$(date +%Y%m%d_%H%M%S)}
|
||||
BACKUP_DIR="/tmp/redis-backups"
|
||||
RETENTION_DAYS=30
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Logging function
|
||||
log() {
|
||||
echo -e "${GREEN}[$(date +'%Y-%m-%d %H:%M:%S')]${NC} $1"
|
||||
}
|
||||
|
||||
error() {
|
||||
echo -e "${RED}[$(date +'%Y-%m-%d %H:%M:%S')] ERROR:${NC} $1" >&2
|
||||
}
|
||||
|
||||
warn() {
|
||||
echo -e "${YELLOW}[$(date +'%Y-%m-%d %H:%M:%S')] WARNING:${NC} $1"
|
||||
}
|
||||
|
||||
# Check dependencies
|
||||
check_dependencies() {
|
||||
if ! command -v kubectl &> /dev/null; then
|
||||
error "kubectl is not installed or not in PATH"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Create backup directory
|
||||
create_backup_dir() {
|
||||
mkdir -p "$BACKUP_DIR"
|
||||
log "Created backup directory: $BACKUP_DIR"
|
||||
}
|
||||
|
||||
# Get Redis pod name
|
||||
get_redis_pod() {
|
||||
local pod=$(kubectl get pods -n "$NAMESPACE" -l app.kubernetes.io/name=redis -o jsonpath='{.items[0].metadata.name}' 2>/dev/null || echo "")
|
||||
if [[ -z "$pod" ]]; then
|
||||
pod=$(kubectl get pods -n "$NAMESPACE" -l app=redis -o jsonpath='{.items[0].metadata.name}' 2>/dev/null || echo "")
|
||||
fi
|
||||
|
||||
if [[ -z "$pod" ]]; then
|
||||
error "Could not find Redis pod in namespace $NAMESPACE"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "$pod"
|
||||
}
|
||||
|
||||
# Wait for Redis to be ready
|
||||
wait_for_redis() {
|
||||
local pod=$1
|
||||
log "Waiting for Redis pod $pod to be ready..."
|
||||
|
||||
kubectl wait --for=condition=ready pod "$pod" -n "$NAMESPACE" --timeout=300s
|
||||
|
||||
# Check if Redis is accepting connections
|
||||
local retries=30
|
||||
while [[ $retries -gt 0 ]]; do
|
||||
if kubectl exec -n "$NAMESPACE" "$pod" -- redis-cli ping 2>/dev/null | grep -q PONG; then
|
||||
log "Redis is ready"
|
||||
return 0
|
||||
fi
|
||||
sleep 2
|
||||
((retries--))
|
||||
done
|
||||
|
||||
error "Redis did not become ready within timeout"
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Perform backup
|
||||
perform_backup() {
|
||||
local pod=$1
|
||||
local backup_file="$BACKUP_DIR/${BACKUP_NAME}.rdb"
|
||||
|
||||
log "Starting Redis backup to $backup_file"
|
||||
|
||||
# Create Redis backup
|
||||
kubectl exec -n "$NAMESPACE" "$pod" -- redis-cli BGSAVE
|
||||
|
||||
# Wait for background save to complete
|
||||
log "Waiting for background save to complete..."
|
||||
local retries=60
|
||||
while [[ $retries -gt 0 ]]; do
|
||||
local lastsave=$(kubectl exec -n "$NAMESPACE" "$pod" -- redis-cli LASTSAVE)
|
||||
local lastbgsave=$(kubectl exec -n "$NAMESPACE" "$pod" -- redis-cli LASTSAVE)
|
||||
|
||||
if [[ "$lastsave" -gt "$lastbgsave" ]]; then
|
||||
log "Background save completed"
|
||||
break
|
||||
fi
|
||||
sleep 2
|
||||
((retries--))
|
||||
done
|
||||
|
||||
if [[ $retries -eq 0 ]]; then
|
||||
error "Background save did not complete within timeout"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Copy RDB file from pod
|
||||
kubectl cp "$NAMESPACE/$pod:/data/dump.rdb" "$backup_file"
|
||||
|
||||
# Also create an append-only file backup if enabled
|
||||
local aof_enabled=$(kubectl exec -n "$NAMESPACE" "$pod" -- redis-cli CONFIG GET appendonly | tail -1)
|
||||
if [[ "$aof_enabled" == "yes" ]]; then
|
||||
local aof_backup="$BACKUP_DIR/${BACKUP_NAME}.aof"
|
||||
kubectl cp "$NAMESPACE/$pod:/data/appendonly.aof" "$aof_backup"
|
||||
log "AOF backup created: $aof_backup"
|
||||
fi
|
||||
|
||||
log "Backup completed: $backup_file"
|
||||
|
||||
# Verify backup
|
||||
if [[ -f "$backup_file" ]] && [[ -s "$backup_file" ]]; then
|
||||
local size=$(du -h "$backup_file" | cut -f1)
|
||||
log "Backup size: $size"
|
||||
else
|
||||
error "Backup file is empty or missing"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Clean old backups
|
||||
cleanup_old_backups() {
|
||||
log "Cleaning up backups older than $RETENTION_DAYS days"
|
||||
find "$BACKUP_DIR" -name "*.rdb" -type f -mtime +$RETENTION_DAYS -delete
|
||||
find "$BACKUP_DIR" -name "*.aof" -type f -mtime +$RETENTION_DAYS -delete
|
||||
log "Cleanup completed"
|
||||
}
|
||||
|
||||
# Upload to cloud storage (optional)
|
||||
upload_to_cloud() {
|
||||
local backup_file="$1"
|
||||
|
||||
# Check if AWS CLI is configured
|
||||
if command -v aws &> /dev/null && aws sts get-caller-identity &>/dev/null; then
|
||||
log "Uploading backup to S3"
|
||||
local s3_bucket="aitbc-backups-${NAMESPACE}"
|
||||
local s3_key="redis/$(basename "$backup_file")"
|
||||
|
||||
aws s3 cp "$backup_file" "s3://$s3_bucket/$s3_key" --storage-class GLACIER_IR
|
||||
log "Backup uploaded to s3://$s3_bucket/$s3_key"
|
||||
|
||||
# Upload AOF file if exists
|
||||
local aof_file="${backup_file%.rdb}.aof"
|
||||
if [[ -f "$aof_file" ]]; then
|
||||
local aof_key="redis/$(basename "$aof_file")"
|
||||
aws s3 cp "$aof_file" "s3://$s3_bucket/$aof_key" --storage-class GLACIER_IR
|
||||
log "AOF backup uploaded to s3://$s3_bucket/$aof_key"
|
||||
fi
|
||||
else
|
||||
warn "AWS CLI not configured, skipping cloud upload"
|
||||
fi
|
||||
}
|
||||
|
||||
# Main execution
|
||||
main() {
|
||||
log "Starting Redis backup process"
|
||||
|
||||
check_dependencies
|
||||
create_backup_dir
|
||||
|
||||
local pod=$(get_redis_pod)
|
||||
wait_for_redis "$pod"
|
||||
|
||||
perform_backup "$pod"
|
||||
cleanup_old_backups
|
||||
|
||||
local backup_file="$BACKUP_DIR/${BACKUP_NAME}.rdb"
|
||||
upload_to_cloud "$backup_file"
|
||||
|
||||
log "Redis backup process completed successfully"
|
||||
}
|
||||
|
||||
# Run main function
|
||||
main "$@"
|
||||
|
||||
backup_ledger.sh: |
|
||||
#!/bin/bash
|
||||
# Ledger Storage Backup Script for AITBC
|
||||
# Usage: ./backup_ledger.sh [namespace] [backup_name]
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Configuration
|
||||
NAMESPACE=${1:-default}
|
||||
BACKUP_NAME=${2:-ledger-backup-$(date +%Y%m%d_%H%M%S)}
|
||||
BACKUP_DIR="/tmp/ledger-backups"
|
||||
RETENTION_DAYS=30
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Logging function
|
||||
log() {
|
||||
echo -e "${GREEN}[$(date +'%Y-%m-%d %H:%M:%S')]${NC} $1"
|
||||
}
|
||||
|
||||
error() {
|
||||
echo -e "${RED}[$(date +'%Y-%m-%d %H:%M:%S')] ERROR:${NC} $1" >&2
|
||||
}
|
||||
|
||||
warn() {
|
||||
echo -e "${YELLOW}[$(date +'%Y-%m-%d %H:%M:%S')] WARNING:${NC} $1"
|
||||
}
|
||||
|
||||
# Check dependencies
|
||||
check_dependencies() {
|
||||
if ! command -v kubectl &> /dev/null; then
|
||||
error "kubectl is not installed or not in PATH"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Create backup directory
|
||||
create_backup_dir() {
|
||||
mkdir -p "$BACKUP_DIR"
|
||||
log "Created backup directory: $BACKUP_DIR"
|
||||
}
|
||||
|
||||
# Get blockchain node pods
|
||||
get_blockchain_pods() {
|
||||
local pods=$(kubectl get pods -n "$NAMESPACE" -l app.kubernetes.io/name=blockchain-node -o jsonpath='{.items[*].metadata.name}' 2>/dev/null || echo "")
|
||||
if [[ -z "$pods" ]]; then
|
||||
pods=$(kubectl get pods -n "$NAMESPACE" -l app=blockchain-node -o jsonpath='{.items[*].metadata.name}' 2>/dev/null || echo "")
|
||||
fi
|
||||
|
||||
if [[ -z "$pods" ]]; then
|
||||
error "Could not find blockchain node pods in namespace $NAMESPACE"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo $pods
|
||||
}
|
||||
|
||||
# Wait for blockchain node to be ready
|
||||
wait_for_blockchain_node() {
|
||||
local pod=$1
|
||||
log "Waiting for blockchain node pod $pod to be ready..."
|
||||
|
||||
kubectl wait --for=condition=ready pod "$pod" -n "$NAMESPACE" --timeout=300s
|
||||
|
||||
# Check if node is responding
|
||||
local retries=30
|
||||
while [[ $retries -gt 0 ]]; do
|
||||
if kubectl exec -n "$NAMESPACE" "$pod" -- curl -s http://localhost:8080/v1/health >/dev/null 2>&1; then
|
||||
log "Blockchain node is ready"
|
||||
return 0
|
||||
fi
|
||||
sleep 2
|
||||
((retries--))
|
||||
done
|
||||
|
||||
error "Blockchain node did not become ready within timeout"
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Backup ledger data
|
||||
backup_ledger_data() {
|
||||
local pod=$1
|
||||
local ledger_backup_dir="$BACKUP_DIR/${BACKUP_NAME}"
|
||||
mkdir -p "$ledger_backup_dir"
|
||||
|
||||
log "Starting ledger backup from pod $pod"
|
||||
|
||||
# Get the latest block height before backup
|
||||
local latest_block=$(kubectl exec -n "$NAMESPACE" "$pod" -- curl -s http://localhost:8080/v1/blocks/head | jq -r '.height // 0')
|
||||
log "Latest block height: $latest_block"
|
||||
|
||||
# Backup blockchain data directory
|
||||
local blockchain_data_dir="/app/data/chain"
|
||||
if kubectl exec -n "$NAMESPACE" "$pod" -- test -d "$blockchain_data_dir"; then
|
||||
log "Backing up blockchain data directory..."
|
||||
kubectl exec -n "$NAMESPACE" "$pod" -- tar -czf "/tmp/${BACKUP_NAME}-chain.tar.gz" -C "$blockchain_data_dir" .
|
||||
kubectl cp "$NAMESPACE/$pod:/tmp/${BACKUP_NAME}-chain.tar.gz" "$ledger_backup_dir/chain.tar.gz"
|
||||
kubectl exec -n "$NAMESPACE" "$pod" -- rm -f "/tmp/${BACKUP_NAME}-chain.tar.gz"
|
||||
fi
|
||||
|
||||
# Backup wallet data
|
||||
local wallet_data_dir="/app/data/wallets"
|
||||
if kubectl exec -n "$NAMESPACE" "$pod" -- test -d "$wallet_data_dir"; then
|
||||
log "Backing up wallet data directory..."
|
||||
kubectl exec -n "$NAMESPACE" "$pod" -- tar -czf "/tmp/${BACKUP_NAME}-wallets.tar.gz" -C "$wallet_data_dir" .
|
||||
kubectl cp "$NAMESPACE/$pod:/tmp/${BACKUP_NAME}-wallets.tar.gz" "$ledger_backup_dir/wallets.tar.gz"
|
||||
kubectl exec -n "$NAMESPACE" "$pod" -- rm -f "/tmp/${BACKUP_NAME}-wallets.tar.gz"
|
||||
fi
|
||||
|
||||
# Backup receipts
|
||||
local receipts_data_dir="/app/data/receipts"
|
||||
if kubectl exec -n "$NAMESPACE" "$pod" -- test -d "$receipts_data_dir"; then
|
||||
log "Backing up receipts directory..."
|
||||
kubectl exec -n "$NAMESPACE" "$pod" -- tar -czf "/tmp/${BACKUP_NAME}-receipts.tar.gz" -C "$receipts_data_dir" .
|
||||
kubectl cp "$NAMESPACE/$pod:/tmp/${BACKUP_NAME}-receipts.tar.gz" "$ledger_backup_dir/receipts.tar.gz"
|
||||
kubectl exec -n "$NAMESPACE" "$pod" -- rm -f "/tmp/${BACKUP_NAME}-receipts.tar.gz"
|
||||
fi
|
||||
|
||||
# Create metadata file
|
||||
cat > "$ledger_backup_dir/metadata.json" << EOF
|
||||
{
|
||||
"backup_name": "$BACKUP_NAME",
|
||||
"timestamp": "$(date -u +%Y-%m-%dT%H:%M:%SZ)",
|
||||
"namespace": "$NAMESPACE",
|
||||
"source_pod": "$pod",
|
||||
"latest_block_height": $latest_block,
|
||||
"backup_type": "full"
|
||||
}
|
||||
EOF
|
||||
|
||||
log "Ledger backup completed: $ledger_backup_dir"
|
||||
|
||||
# Verify backup
|
||||
local total_size=$(du -sh "$ledger_backup_dir" | cut -f1)
|
||||
log "Total backup size: $total_size"
|
||||
}
|
||||
|
||||
# Clean old backups
|
||||
cleanup_old_backups() {
|
||||
log "Cleaning up backups older than $RETENTION_DAYS days"
|
||||
find "$BACKUP_DIR" -maxdepth 1 -type d -name "ledger-backup-*" -mtime +$RETENTION_DAYS -exec rm -rf {} \;
|
||||
find "$BACKUP_DIR" -name "*-incremental.json" -type f -mtime +$RETENTION_DAYS -delete
|
||||
log "Cleanup completed"
|
||||
}
|
||||
|
||||
# Upload to cloud storage (optional)
|
||||
upload_to_cloud() {
|
||||
local backup_dir="$1"
|
||||
|
||||
# Check if AWS CLI is configured
|
||||
if command -v aws &> /dev/null && aws sts get-caller-identity &>/dev/null; then
|
||||
log "Uploading backup to S3"
|
||||
local s3_bucket="aitbc-backups-${NAMESPACE}"
|
||||
|
||||
# Upload entire backup directory
|
||||
aws s3 cp "$backup_dir" "s3://$s3_bucket/ledger/$(basename "$backup_dir")/" --recursive --storage-class GLACIER_IR
|
||||
|
||||
log "Backup uploaded to s3://$s3_bucket/ledger/$(basename "$backup_dir")/"
|
||||
else
|
||||
warn "AWS CLI not configured, skipping cloud upload"
|
||||
fi
|
||||
}
|
||||
|
||||
# Main execution
|
||||
main() {
|
||||
log "Starting ledger backup process"
|
||||
|
||||
check_dependencies
|
||||
create_backup_dir
|
||||
|
||||
local pods=($(get_blockchain_pods))
|
||||
|
||||
# Use the first ready pod for backup
|
||||
for pod in "${pods[@]}"; do
|
||||
if kubectl wait --for=condition=ready pod "$pod" -n "$NAMESPACE" --timeout=10s >/dev/null 2>&1; then
|
||||
wait_for_blockchain_node "$pod"
|
||||
backup_ledger_data "$pod"
|
||||
|
||||
local backup_dir="$BACKUP_DIR/${BACKUP_NAME}"
|
||||
upload_to_cloud "$backup_dir"
|
||||
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
cleanup_old_backups
|
||||
|
||||
log "Ledger backup process completed successfully"
|
||||
}
|
||||
|
||||
# Run main function
|
||||
main "$@"
|
||||
156
infra/k8s/backup-cronjob.yaml
Normal file
156
infra/k8s/backup-cronjob.yaml
Normal file
@ -0,0 +1,156 @@
|
||||
apiVersion: batch/v1
|
||||
kind: CronJob
|
||||
metadata:
|
||||
name: aitbc-backup
|
||||
namespace: default
|
||||
labels:
|
||||
app: aitbc-backup
|
||||
component: backup
|
||||
spec:
|
||||
schedule: "0 2 * * *" # Run daily at 2 AM
|
||||
concurrencyPolicy: Forbid
|
||||
successfulJobsHistoryLimit: 7
|
||||
failedJobsHistoryLimit: 3
|
||||
jobTemplate:
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
restartPolicy: OnFailure
|
||||
containers:
|
||||
- name: postgresql-backup
|
||||
image: postgres:15-alpine
|
||||
command:
|
||||
- /bin/bash
|
||||
- -c
|
||||
- |
|
||||
echo "Starting PostgreSQL backup..."
|
||||
/scripts/backup_postgresql.sh default postgresql-backup-$(date +%Y%m%d_%H%M%S)
|
||||
echo "PostgreSQL backup completed"
|
||||
env:
|
||||
- name: PGPASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: coordinator-postgresql
|
||||
key: password
|
||||
volumeMounts:
|
||||
- name: backup-scripts
|
||||
mountPath: /scripts
|
||||
readOnly: true
|
||||
- name: backup-storage
|
||||
mountPath: /backups
|
||||
resources:
|
||||
requests:
|
||||
memory: "256Mi"
|
||||
cpu: "100m"
|
||||
limits:
|
||||
memory: "512Mi"
|
||||
cpu: "500m"
|
||||
|
||||
- name: redis-backup
|
||||
image: redis:7-alpine
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- |
|
||||
echo "Waiting for PostgreSQL backup to complete..."
|
||||
sleep 60
|
||||
echo "Starting Redis backup..."
|
||||
/scripts/backup_redis.sh default redis-backup-$(date +%Y%m%d_%H%M%S)
|
||||
echo "Redis backup completed"
|
||||
volumeMounts:
|
||||
- name: backup-scripts
|
||||
mountPath: /scripts
|
||||
readOnly: true
|
||||
- name: backup-storage
|
||||
mountPath: /backups
|
||||
resources:
|
||||
requests:
|
||||
memory: "128Mi"
|
||||
cpu: "50m"
|
||||
limits:
|
||||
memory: "256Mi"
|
||||
cpu: "200m"
|
||||
|
||||
- name: ledger-backup
|
||||
image: alpine:3.18
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- |
|
||||
echo "Waiting for previous backups to complete..."
|
||||
sleep 120
|
||||
echo "Starting Ledger backup..."
|
||||
/scripts/backup_ledger.sh default ledger-backup-$(date +%Y%m%d_%H%M%S)
|
||||
echo "Ledger backup completed"
|
||||
volumeMounts:
|
||||
- name: backup-scripts
|
||||
mountPath: /scripts
|
||||
readOnly: true
|
||||
- name: backup-storage
|
||||
mountPath: /backups
|
||||
resources:
|
||||
requests:
|
||||
memory: "256Mi"
|
||||
cpu: "100m"
|
||||
limits:
|
||||
memory: "512Mi"
|
||||
cpu: "500m"
|
||||
|
||||
volumes:
|
||||
- name: backup-scripts
|
||||
configMap:
|
||||
name: backup-scripts
|
||||
defaultMode: 0755
|
||||
|
||||
- name: backup-storage
|
||||
persistentVolumeClaim:
|
||||
claimName: backup-storage-pvc
|
||||
|
||||
# Add service account for cloud storage access
|
||||
serviceAccountName: backup-service-account
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: backup-service-account
|
||||
namespace: default
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: backup-role
|
||||
namespace: default
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["pods", "pods/exec", "secrets"]
|
||||
verbs: ["get", "list"]
|
||||
- apiGroups: ["batch"]
|
||||
resources: ["jobs", "cronjobs"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: backup-role-binding
|
||||
namespace: default
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: backup-service-account
|
||||
namespace: default
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: backup-role
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: backup-storage-pvc
|
||||
namespace: default
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
storageClassName: fast-ssd
|
||||
resources:
|
||||
requests:
|
||||
storage: 500Gi
|
||||
99
infra/k8s/cert-manager.yaml
Normal file
99
infra/k8s/cert-manager.yaml
Normal file
@ -0,0 +1,99 @@
|
||||
# Cert-Manager Installation
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: cert-manager
|
||||
namespace: argocd
|
||||
finalizers:
|
||||
- resources-finalizer.argocd.argoproj.io
|
||||
spec:
|
||||
project: default
|
||||
source:
|
||||
repoURL: https://charts.jetstack.io
|
||||
chart: cert-manager
|
||||
targetRevision: v1.14.0
|
||||
helm:
|
||||
releaseName: cert-manager
|
||||
parameters:
|
||||
- name: installCRDs
|
||||
value: "true"
|
||||
- name: namespace
|
||||
value: cert-manager
|
||||
destination:
|
||||
server: https://kubernetes.default.svc
|
||||
namespace: cert-manager
|
||||
syncPolicy:
|
||||
automated:
|
||||
prune: true
|
||||
selfHeal: true
|
||||
---
|
||||
# Let's Encrypt Production ClusterIssuer
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: ClusterIssuer
|
||||
metadata:
|
||||
name: letsencrypt-prod
|
||||
spec:
|
||||
acme:
|
||||
server: https://acme-v02.api.letsencrypt.org/directory
|
||||
email: admin@aitbc.io
|
||||
privateKeySecretRef:
|
||||
name: letsencrypt-prod
|
||||
solvers:
|
||||
- http01:
|
||||
ingress:
|
||||
class: nginx
|
||||
---
|
||||
# Let's Encrypt Staging ClusterIssuer (for testing)
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: ClusterIssuer
|
||||
metadata:
|
||||
name: letsencrypt-staging
|
||||
spec:
|
||||
acme:
|
||||
server: https://acme-staging-v02.api.letsencrypt.org/directory
|
||||
email: admin@aitbc.io
|
||||
privateKeySecretRef:
|
||||
name: letsencrypt-staging
|
||||
solvers:
|
||||
- http01:
|
||||
ingress:
|
||||
class: nginx
|
||||
---
|
||||
# Self-Signed Issuer for Development
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: Issuer
|
||||
metadata:
|
||||
name: selfsigned-issuer
|
||||
namespace: default
|
||||
spec:
|
||||
selfSigned: {}
|
||||
---
|
||||
# Development Certificate
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: coordinator-dev-tls
|
||||
namespace: default
|
||||
spec:
|
||||
secretName: coordinator-dev-tls
|
||||
dnsNames:
|
||||
- coordinator.local
|
||||
- coordinator.127.0.0.2.nip.io
|
||||
issuerRef:
|
||||
name: selfsigned-issuer
|
||||
kind: Issuer
|
||||
---
|
||||
# Production Certificate Template
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: coordinator-prod-tls
|
||||
namespace: default
|
||||
spec:
|
||||
secretName: coordinator-prod-tls
|
||||
dnsNames:
|
||||
- api.aitbc.io
|
||||
- www.api.aitbc.io
|
||||
issuerRef:
|
||||
name: letsencrypt-prod
|
||||
kind: ClusterIssuer
|
||||
56
infra/k8s/default-deny-netpol.yaml
Normal file
56
infra/k8s/default-deny-netpol.yaml
Normal file
@ -0,0 +1,56 @@
|
||||
# Default Deny All Network Policy
|
||||
# This policy denies all ingress and egress traffic by default
|
||||
# Individual services must have their own network policies to allow traffic
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
name: default-deny-all-ingress
|
||||
namespace: default
|
||||
spec:
|
||||
podSelector: {}
|
||||
policyTypes:
|
||||
- Ingress
|
||||
---
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
name: default-deny-all-egress
|
||||
namespace: default
|
||||
spec:
|
||||
podSelector: {}
|
||||
policyTypes:
|
||||
- Egress
|
||||
---
|
||||
# Allow DNS resolution for all pods
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
name: allow-dns
|
||||
namespace: default
|
||||
spec:
|
||||
podSelector: {}
|
||||
policyTypes:
|
||||
- Egress
|
||||
egress:
|
||||
- to: []
|
||||
ports:
|
||||
- protocol: UDP
|
||||
port: 53
|
||||
- protocol: TCP
|
||||
port: 53
|
||||
---
|
||||
# Allow traffic to Kubernetes API
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
name: allow-k8s-api
|
||||
namespace: default
|
||||
spec:
|
||||
podSelector: {}
|
||||
policyTypes:
|
||||
- Egress
|
||||
egress:
|
||||
- to: []
|
||||
ports:
|
||||
- protocol: TCP
|
||||
port: 443
|
||||
81
infra/k8s/sealed-secrets.yaml
Normal file
81
infra/k8s/sealed-secrets.yaml
Normal file
@ -0,0 +1,81 @@
|
||||
# SealedSecrets Controller Installation
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: sealed-secrets
|
||||
namespace: argocd
|
||||
finalizers:
|
||||
- resources-finalizer.argocd.argoproj.io
|
||||
spec:
|
||||
project: default
|
||||
source:
|
||||
repoURL: https://bitnami-labs.github.io/sealed-secrets
|
||||
chart: sealed-secrets
|
||||
targetRevision: 2.15.0
|
||||
helm:
|
||||
releaseName: sealed-secrets
|
||||
parameters:
|
||||
- name: namespace
|
||||
value: kube-system
|
||||
destination:
|
||||
server: https://kubernetes.default.svc
|
||||
namespace: kube-system
|
||||
syncPolicy:
|
||||
automated:
|
||||
prune: true
|
||||
selfHeal: true
|
||||
---
|
||||
# Example SealedSecret for Coordinator API Keys
|
||||
apiVersion: bitnami.com/v1alpha1
|
||||
kind: SealedSecret
|
||||
metadata:
|
||||
name: coordinator-api-keys
|
||||
namespace: default
|
||||
annotations:
|
||||
sealedsecrets.bitnami.com/cluster-wide: "true"
|
||||
spec:
|
||||
encryptedData:
|
||||
# Production API key (encrypted)
|
||||
api-key-prod: AgBy3i4OJSWK+PiTySYZZA9rO43cGDEQAx...
|
||||
# Staging API key (encrypted)
|
||||
api-key-staging: AgBy3i4OJSWK+PiTySYZZA9rO43cGDEQAx...
|
||||
# Development API key (encrypted)
|
||||
api-key-dev: AgBy3i4OJSWK+PiTySYZZA9rO43cGDEQAx...
|
||||
template:
|
||||
metadata:
|
||||
name: coordinator-api-keys
|
||||
namespace: default
|
||||
type: Opaque
|
||||
---
|
||||
# Example SealedSecret for Database Credentials
|
||||
apiVersion: bitnami.com/v1alpha1
|
||||
kind: SealedSecret
|
||||
metadata:
|
||||
name: coordinator-db-credentials
|
||||
namespace: default
|
||||
spec:
|
||||
encryptedData:
|
||||
username: AgBy3i4OJSWK+PiTySYZZA9rO43cGDEQAx...
|
||||
password: AgBy3i4OJSWK+PiTySYZZA9rO43cGDEQAx...
|
||||
database: AgBy3i4OJSWK+PiTySYZZA9rO43cGDEQAx...
|
||||
template:
|
||||
metadata:
|
||||
name: coordinator-db-credentials
|
||||
namespace: default
|
||||
type: Opaque
|
||||
---
|
||||
# Example SealedSecret for JWT Signing Keys (if needed in future)
|
||||
apiVersion: bitnami.com/v1alpha1
|
||||
kind: SealedSecret
|
||||
metadata:
|
||||
name: coordinator-jwt-keys
|
||||
namespace: default
|
||||
spec:
|
||||
encryptedData:
|
||||
private-key: AgBy3i4OJSWK+PiTySYZZA9rO43cGDEQAx...
|
||||
public-key: AgBy3i4OJSWK+PiTySYZZA9rO43cGDEQAx...
|
||||
template:
|
||||
metadata:
|
||||
name: coordinator-jwt-keys
|
||||
namespace: default
|
||||
type: Opaque
|
||||
Reference in New Issue
Block a user