- Add Prometheus metrics for marketplace API throughput and error rates with new dashboard panels - Implement confidential transaction models with encryption support and access control - Add key management system with registration, rotation, and audit logging - Create services and registry routers for service discovery and management - Integrate ZK proof generation for privacy-preserving receipts - Add metrics instru
571 lines
18 KiB
YAML
571 lines
18 KiB
YAML
apiVersion: v1
|
|
kind: ConfigMap
|
|
metadata:
|
|
name: backup-scripts
|
|
namespace: default
|
|
labels:
|
|
app: aitbc-backup
|
|
component: backup
|
|
data:
|
|
backup_postgresql.sh: |
|
|
#!/bin/bash
|
|
# PostgreSQL Backup Script for AITBC
|
|
# Usage: ./backup_postgresql.sh [namespace] [backup_name]
|
|
|
|
set -euo pipefail
|
|
|
|
# Configuration
|
|
NAMESPACE=${1:-default}
|
|
BACKUP_NAME=${2:-postgresql-backup-$(date +%Y%m%d_%H%M%S)}
|
|
BACKUP_DIR="/tmp/postgresql-backups"
|
|
RETENTION_DAYS=30
|
|
|
|
# Colors for output
|
|
RED='\033[0;31m'
|
|
GREEN='\033[0;32m'
|
|
YELLOW='\033[1;33m'
|
|
NC='\033[0m' # No Color
|
|
|
|
# Logging function
|
|
log() {
|
|
echo -e "${GREEN}[$(date +'%Y-%m-%d %H:%M:%S')]${NC} $1"
|
|
}
|
|
|
|
error() {
|
|
echo -e "${RED}[$(date +'%Y-%m-%d %H:%M:%S')] ERROR:${NC} $1" >&2
|
|
}
|
|
|
|
warn() {
|
|
echo -e "${YELLOW}[$(date +'%Y-%m-%d %H:%M:%S')] WARNING:${NC} $1"
|
|
}
|
|
|
|
# Check dependencies
|
|
check_dependencies() {
|
|
if ! command -v kubectl &> /dev/null; then
|
|
error "kubectl is not installed or not in PATH"
|
|
exit 1
|
|
fi
|
|
|
|
if ! command -v pg_dump &> /dev/null; then
|
|
error "pg_dump is not installed or not in PATH"
|
|
exit 1
|
|
fi
|
|
}
|
|
|
|
# Create backup directory
|
|
create_backup_dir() {
|
|
mkdir -p "$BACKUP_DIR"
|
|
log "Created backup directory: $BACKUP_DIR"
|
|
}
|
|
|
|
# Get PostgreSQL pod name
|
|
get_postgresql_pod() {
|
|
local pod=$(kubectl get pods -n "$NAMESPACE" -l app.kubernetes.io/name=postgresql -o jsonpath='{.items[0].metadata.name}' 2>/dev/null || echo "")
|
|
if [[ -z "$pod" ]]; then
|
|
pod=$(kubectl get pods -n "$NAMESPACE" -l app=postgresql -o jsonpath='{.items[0].metadata.name}' 2>/dev/null || echo "")
|
|
fi
|
|
|
|
if [[ -z "$pod" ]]; then
|
|
error "Could not find PostgreSQL pod in namespace $NAMESPACE"
|
|
exit 1
|
|
fi
|
|
|
|
echo "$pod"
|
|
}
|
|
|
|
# Wait for PostgreSQL to be ready
|
|
wait_for_postgresql() {
|
|
local pod=$1
|
|
log "Waiting for PostgreSQL pod $pod to be ready..."
|
|
|
|
kubectl wait --for=condition=ready pod "$pod" -n "$NAMESPACE" --timeout=300s
|
|
|
|
# Check if PostgreSQL is accepting connections
|
|
local retries=30
|
|
while [[ $retries -gt 0 ]]; do
|
|
if kubectl exec -n "$NAMESPACE" "$pod" -- pg_isready -U postgres >/dev/null 2>&1; then
|
|
log "PostgreSQL is ready"
|
|
return 0
|
|
fi
|
|
sleep 2
|
|
((retries--))
|
|
done
|
|
|
|
error "PostgreSQL did not become ready within timeout"
|
|
exit 1
|
|
}
|
|
|
|
# Perform backup
|
|
perform_backup() {
|
|
local pod=$1
|
|
local backup_file="$BACKUP_DIR/${BACKUP_NAME}.sql"
|
|
|
|
log "Starting PostgreSQL backup to $backup_file"
|
|
|
|
# Get database credentials from secret
|
|
local db_user=$(kubectl get secret -n "$NAMESPACE" coordinator-postgresql -o jsonpath='{.data.username}' 2>/dev/null | base64 -d || echo "postgres")
|
|
local db_password=$(kubectl get secret -n "$NAMESPACE" coordinator-postgresql -o jsonpath='{.data.password}' 2>/dev/null | base64 -d || echo "")
|
|
local db_name=$(kubectl get secret -n "$NAMESPACE" coordinator-postgresql -o jsonpath='{.data.database}' 2>/dev/null | base64 -d || echo "aitbc")
|
|
|
|
# Perform the backup
|
|
PGPASSWORD="$db_password" kubectl exec -n "$NAMESPACE" "$pod" -- \
|
|
pg_dump -U "$db_user" -h localhost -d "$db_name" \
|
|
--verbose --clean --if-exists --create --format=custom \
|
|
--file="/tmp/${BACKUP_NAME}.dump"
|
|
|
|
# Copy backup from pod
|
|
kubectl cp "$NAMESPACE/$pod:/tmp/${BACKUP_NAME}.dump" "$backup_file"
|
|
|
|
# Clean up remote backup file
|
|
kubectl exec -n "$NAMESPACE" "$pod" -- rm -f "/tmp/${BACKUP_NAME}.dump"
|
|
|
|
# Compress backup
|
|
gzip "$backup_file"
|
|
backup_file="${backup_file}.gz"
|
|
|
|
log "Backup completed: $backup_file"
|
|
|
|
# Verify backup
|
|
if [[ -f "$backup_file" ]] && [[ -s "$backup_file" ]]; then
|
|
local size=$(du -h "$backup_file" | cut -f1)
|
|
log "Backup size: $size"
|
|
else
|
|
error "Backup file is empty or missing"
|
|
exit 1
|
|
fi
|
|
}
|
|
|
|
# Clean old backups
|
|
cleanup_old_backups() {
|
|
log "Cleaning up backups older than $RETENTION_DAYS days"
|
|
find "$BACKUP_DIR" -name "*.sql.gz" -type f -mtime +$RETENTION_DAYS -delete
|
|
log "Cleanup completed"
|
|
}
|
|
|
|
# Upload to cloud storage (optional)
|
|
upload_to_cloud() {
|
|
local backup_file="$1"
|
|
|
|
# Check if AWS CLI is configured
|
|
if command -v aws &> /dev/null && aws sts get-caller-identity &>/dev/null; then
|
|
log "Uploading backup to S3"
|
|
local s3_bucket="aitbc-backups-${NAMESPACE}"
|
|
local s3_key="postgresql/$(basename "$backup_file")"
|
|
|
|
aws s3 cp "$backup_file" "s3://$s3_bucket/$s3_key" --storage-class GLACIER_IR
|
|
log "Backup uploaded to s3://$s3_bucket/$s3_key"
|
|
else
|
|
warn "AWS CLI not configured, skipping cloud upload"
|
|
fi
|
|
}
|
|
|
|
# Main execution
|
|
main() {
|
|
log "Starting PostgreSQL backup process"
|
|
|
|
check_dependencies
|
|
create_backup_dir
|
|
|
|
local pod=$(get_postgresql_pod)
|
|
wait_for_postgresql "$pod"
|
|
|
|
perform_backup "$pod"
|
|
cleanup_old_backups
|
|
|
|
local backup_file="$BACKUP_DIR/${BACKUP_NAME}.sql.gz"
|
|
upload_to_cloud "$backup_file"
|
|
|
|
log "PostgreSQL backup process completed successfully"
|
|
}
|
|
|
|
# Run main function
|
|
main "$@"
|
|
|
|
backup_redis.sh: |
|
|
#!/bin/bash
|
|
# Redis Backup Script for AITBC
|
|
# Usage: ./backup_redis.sh [namespace] [backup_name]
|
|
|
|
set -euo pipefail
|
|
|
|
# Configuration
|
|
NAMESPACE=${1:-default}
|
|
BACKUP_NAME=${2:-redis-backup-$(date +%Y%m%d_%H%M%S)}
|
|
BACKUP_DIR="/tmp/redis-backups"
|
|
RETENTION_DAYS=30
|
|
|
|
# Colors for output
|
|
RED='\033[0;31m'
|
|
GREEN='\033[0;32m'
|
|
YELLOW='\033[1;33m'
|
|
NC='\033[0m' # No Color
|
|
|
|
# Logging function
|
|
log() {
|
|
echo -e "${GREEN}[$(date +'%Y-%m-%d %H:%M:%S')]${NC} $1"
|
|
}
|
|
|
|
error() {
|
|
echo -e "${RED}[$(date +'%Y-%m-%d %H:%M:%S')] ERROR:${NC} $1" >&2
|
|
}
|
|
|
|
warn() {
|
|
echo -e "${YELLOW}[$(date +'%Y-%m-%d %H:%M:%S')] WARNING:${NC} $1"
|
|
}
|
|
|
|
# Check dependencies
|
|
check_dependencies() {
|
|
if ! command -v kubectl &> /dev/null; then
|
|
error "kubectl is not installed or not in PATH"
|
|
exit 1
|
|
fi
|
|
}
|
|
|
|
# Create backup directory
|
|
create_backup_dir() {
|
|
mkdir -p "$BACKUP_DIR"
|
|
log "Created backup directory: $BACKUP_DIR"
|
|
}
|
|
|
|
# Get Redis pod name
|
|
get_redis_pod() {
|
|
local pod=$(kubectl get pods -n "$NAMESPACE" -l app.kubernetes.io/name=redis -o jsonpath='{.items[0].metadata.name}' 2>/dev/null || echo "")
|
|
if [[ -z "$pod" ]]; then
|
|
pod=$(kubectl get pods -n "$NAMESPACE" -l app=redis -o jsonpath='{.items[0].metadata.name}' 2>/dev/null || echo "")
|
|
fi
|
|
|
|
if [[ -z "$pod" ]]; then
|
|
error "Could not find Redis pod in namespace $NAMESPACE"
|
|
exit 1
|
|
fi
|
|
|
|
echo "$pod"
|
|
}
|
|
|
|
# Wait for Redis to be ready
|
|
wait_for_redis() {
|
|
local pod=$1
|
|
log "Waiting for Redis pod $pod to be ready..."
|
|
|
|
kubectl wait --for=condition=ready pod "$pod" -n "$NAMESPACE" --timeout=300s
|
|
|
|
# Check if Redis is accepting connections
|
|
local retries=30
|
|
while [[ $retries -gt 0 ]]; do
|
|
if kubectl exec -n "$NAMESPACE" "$pod" -- redis-cli ping 2>/dev/null | grep -q PONG; then
|
|
log "Redis is ready"
|
|
return 0
|
|
fi
|
|
sleep 2
|
|
((retries--))
|
|
done
|
|
|
|
error "Redis did not become ready within timeout"
|
|
exit 1
|
|
}
|
|
|
|
# Perform backup
|
|
perform_backup() {
|
|
local pod=$1
|
|
local backup_file="$BACKUP_DIR/${BACKUP_NAME}.rdb"
|
|
|
|
log "Starting Redis backup to $backup_file"
|
|
|
|
# Create Redis backup
|
|
kubectl exec -n "$NAMESPACE" "$pod" -- redis-cli BGSAVE
|
|
|
|
# Wait for background save to complete
|
|
log "Waiting for background save to complete..."
|
|
local retries=60
|
|
while [[ $retries -gt 0 ]]; do
|
|
local lastsave=$(kubectl exec -n "$NAMESPACE" "$pod" -- redis-cli LASTSAVE)
|
|
local lastbgsave=$(kubectl exec -n "$NAMESPACE" "$pod" -- redis-cli LASTSAVE)
|
|
|
|
if [[ "$lastsave" -gt "$lastbgsave" ]]; then
|
|
log "Background save completed"
|
|
break
|
|
fi
|
|
sleep 2
|
|
((retries--))
|
|
done
|
|
|
|
if [[ $retries -eq 0 ]]; then
|
|
error "Background save did not complete within timeout"
|
|
exit 1
|
|
fi
|
|
|
|
# Copy RDB file from pod
|
|
kubectl cp "$NAMESPACE/$pod:/data/dump.rdb" "$backup_file"
|
|
|
|
# Also create an append-only file backup if enabled
|
|
local aof_enabled=$(kubectl exec -n "$NAMESPACE" "$pod" -- redis-cli CONFIG GET appendonly | tail -1)
|
|
if [[ "$aof_enabled" == "yes" ]]; then
|
|
local aof_backup="$BACKUP_DIR/${BACKUP_NAME}.aof"
|
|
kubectl cp "$NAMESPACE/$pod:/data/appendonly.aof" "$aof_backup"
|
|
log "AOF backup created: $aof_backup"
|
|
fi
|
|
|
|
log "Backup completed: $backup_file"
|
|
|
|
# Verify backup
|
|
if [[ -f "$backup_file" ]] && [[ -s "$backup_file" ]]; then
|
|
local size=$(du -h "$backup_file" | cut -f1)
|
|
log "Backup size: $size"
|
|
else
|
|
error "Backup file is empty or missing"
|
|
exit 1
|
|
fi
|
|
}
|
|
|
|
# Clean old backups
|
|
cleanup_old_backups() {
|
|
log "Cleaning up backups older than $RETENTION_DAYS days"
|
|
find "$BACKUP_DIR" -name "*.rdb" -type f -mtime +$RETENTION_DAYS -delete
|
|
find "$BACKUP_DIR" -name "*.aof" -type f -mtime +$RETENTION_DAYS -delete
|
|
log "Cleanup completed"
|
|
}
|
|
|
|
# Upload to cloud storage (optional)
|
|
upload_to_cloud() {
|
|
local backup_file="$1"
|
|
|
|
# Check if AWS CLI is configured
|
|
if command -v aws &> /dev/null && aws sts get-caller-identity &>/dev/null; then
|
|
log "Uploading backup to S3"
|
|
local s3_bucket="aitbc-backups-${NAMESPACE}"
|
|
local s3_key="redis/$(basename "$backup_file")"
|
|
|
|
aws s3 cp "$backup_file" "s3://$s3_bucket/$s3_key" --storage-class GLACIER_IR
|
|
log "Backup uploaded to s3://$s3_bucket/$s3_key"
|
|
|
|
# Upload AOF file if exists
|
|
local aof_file="${backup_file%.rdb}.aof"
|
|
if [[ -f "$aof_file" ]]; then
|
|
local aof_key="redis/$(basename "$aof_file")"
|
|
aws s3 cp "$aof_file" "s3://$s3_bucket/$aof_key" --storage-class GLACIER_IR
|
|
log "AOF backup uploaded to s3://$s3_bucket/$aof_key"
|
|
fi
|
|
else
|
|
warn "AWS CLI not configured, skipping cloud upload"
|
|
fi
|
|
}
|
|
|
|
# Main execution
|
|
main() {
|
|
log "Starting Redis backup process"
|
|
|
|
check_dependencies
|
|
create_backup_dir
|
|
|
|
local pod=$(get_redis_pod)
|
|
wait_for_redis "$pod"
|
|
|
|
perform_backup "$pod"
|
|
cleanup_old_backups
|
|
|
|
local backup_file="$BACKUP_DIR/${BACKUP_NAME}.rdb"
|
|
upload_to_cloud "$backup_file"
|
|
|
|
log "Redis backup process completed successfully"
|
|
}
|
|
|
|
# Run main function
|
|
main "$@"
|
|
|
|
backup_ledger.sh: |
|
|
#!/bin/bash
|
|
# Ledger Storage Backup Script for AITBC
|
|
# Usage: ./backup_ledger.sh [namespace] [backup_name]
|
|
|
|
set -euo pipefail
|
|
|
|
# Configuration
|
|
NAMESPACE=${1:-default}
|
|
BACKUP_NAME=${2:-ledger-backup-$(date +%Y%m%d_%H%M%S)}
|
|
BACKUP_DIR="/tmp/ledger-backups"
|
|
RETENTION_DAYS=30
|
|
|
|
# Colors for output
|
|
RED='\033[0;31m'
|
|
GREEN='\033[0;32m'
|
|
YELLOW='\033[1;33m'
|
|
NC='\033[0m' # No Color
|
|
|
|
# Logging function
|
|
log() {
|
|
echo -e "${GREEN}[$(date +'%Y-%m-%d %H:%M:%S')]${NC} $1"
|
|
}
|
|
|
|
error() {
|
|
echo -e "${RED}[$(date +'%Y-%m-%d %H:%M:%S')] ERROR:${NC} $1" >&2
|
|
}
|
|
|
|
warn() {
|
|
echo -e "${YELLOW}[$(date +'%Y-%m-%d %H:%M:%S')] WARNING:${NC} $1"
|
|
}
|
|
|
|
# Check dependencies
|
|
check_dependencies() {
|
|
if ! command -v kubectl &> /dev/null; then
|
|
error "kubectl is not installed or not in PATH"
|
|
exit 1
|
|
fi
|
|
}
|
|
|
|
# Create backup directory
|
|
create_backup_dir() {
|
|
mkdir -p "$BACKUP_DIR"
|
|
log "Created backup directory: $BACKUP_DIR"
|
|
}
|
|
|
|
# Get blockchain node pods
|
|
get_blockchain_pods() {
|
|
local pods=$(kubectl get pods -n "$NAMESPACE" -l app.kubernetes.io/name=blockchain-node -o jsonpath='{.items[*].metadata.name}' 2>/dev/null || echo "")
|
|
if [[ -z "$pods" ]]; then
|
|
pods=$(kubectl get pods -n "$NAMESPACE" -l app=blockchain-node -o jsonpath='{.items[*].metadata.name}' 2>/dev/null || echo "")
|
|
fi
|
|
|
|
if [[ -z "$pods" ]]; then
|
|
error "Could not find blockchain node pods in namespace $NAMESPACE"
|
|
exit 1
|
|
fi
|
|
|
|
echo $pods
|
|
}
|
|
|
|
# Wait for blockchain node to be ready
|
|
wait_for_blockchain_node() {
|
|
local pod=$1
|
|
log "Waiting for blockchain node pod $pod to be ready..."
|
|
|
|
kubectl wait --for=condition=ready pod "$pod" -n "$NAMESPACE" --timeout=300s
|
|
|
|
# Check if node is responding
|
|
local retries=30
|
|
while [[ $retries -gt 0 ]]; do
|
|
if kubectl exec -n "$NAMESPACE" "$pod" -- curl -s http://localhost:8080/v1/health >/dev/null 2>&1; then
|
|
log "Blockchain node is ready"
|
|
return 0
|
|
fi
|
|
sleep 2
|
|
((retries--))
|
|
done
|
|
|
|
error "Blockchain node did not become ready within timeout"
|
|
exit 1
|
|
}
|
|
|
|
# Backup ledger data
|
|
backup_ledger_data() {
|
|
local pod=$1
|
|
local ledger_backup_dir="$BACKUP_DIR/${BACKUP_NAME}"
|
|
mkdir -p "$ledger_backup_dir"
|
|
|
|
log "Starting ledger backup from pod $pod"
|
|
|
|
# Get the latest block height before backup
|
|
local latest_block=$(kubectl exec -n "$NAMESPACE" "$pod" -- curl -s http://localhost:8080/v1/blocks/head | jq -r '.height // 0')
|
|
log "Latest block height: $latest_block"
|
|
|
|
# Backup blockchain data directory
|
|
local blockchain_data_dir="/app/data/chain"
|
|
if kubectl exec -n "$NAMESPACE" "$pod" -- test -d "$blockchain_data_dir"; then
|
|
log "Backing up blockchain data directory..."
|
|
kubectl exec -n "$NAMESPACE" "$pod" -- tar -czf "/tmp/${BACKUP_NAME}-chain.tar.gz" -C "$blockchain_data_dir" .
|
|
kubectl cp "$NAMESPACE/$pod:/tmp/${BACKUP_NAME}-chain.tar.gz" "$ledger_backup_dir/chain.tar.gz"
|
|
kubectl exec -n "$NAMESPACE" "$pod" -- rm -f "/tmp/${BACKUP_NAME}-chain.tar.gz"
|
|
fi
|
|
|
|
# Backup wallet data
|
|
local wallet_data_dir="/app/data/wallets"
|
|
if kubectl exec -n "$NAMESPACE" "$pod" -- test -d "$wallet_data_dir"; then
|
|
log "Backing up wallet data directory..."
|
|
kubectl exec -n "$NAMESPACE" "$pod" -- tar -czf "/tmp/${BACKUP_NAME}-wallets.tar.gz" -C "$wallet_data_dir" .
|
|
kubectl cp "$NAMESPACE/$pod:/tmp/${BACKUP_NAME}-wallets.tar.gz" "$ledger_backup_dir/wallets.tar.gz"
|
|
kubectl exec -n "$NAMESPACE" "$pod" -- rm -f "/tmp/${BACKUP_NAME}-wallets.tar.gz"
|
|
fi
|
|
|
|
# Backup receipts
|
|
local receipts_data_dir="/app/data/receipts"
|
|
if kubectl exec -n "$NAMESPACE" "$pod" -- test -d "$receipts_data_dir"; then
|
|
log "Backing up receipts directory..."
|
|
kubectl exec -n "$NAMESPACE" "$pod" -- tar -czf "/tmp/${BACKUP_NAME}-receipts.tar.gz" -C "$receipts_data_dir" .
|
|
kubectl cp "$NAMESPACE/$pod:/tmp/${BACKUP_NAME}-receipts.tar.gz" "$ledger_backup_dir/receipts.tar.gz"
|
|
kubectl exec -n "$NAMESPACE" "$pod" -- rm -f "/tmp/${BACKUP_NAME}-receipts.tar.gz"
|
|
fi
|
|
|
|
# Create metadata file
|
|
cat > "$ledger_backup_dir/metadata.json" << EOF
|
|
{
|
|
"backup_name": "$BACKUP_NAME",
|
|
"timestamp": "$(date -u +%Y-%m-%dT%H:%M:%SZ)",
|
|
"namespace": "$NAMESPACE",
|
|
"source_pod": "$pod",
|
|
"latest_block_height": $latest_block,
|
|
"backup_type": "full"
|
|
}
|
|
EOF
|
|
|
|
log "Ledger backup completed: $ledger_backup_dir"
|
|
|
|
# Verify backup
|
|
local total_size=$(du -sh "$ledger_backup_dir" | cut -f1)
|
|
log "Total backup size: $total_size"
|
|
}
|
|
|
|
# Clean old backups
|
|
cleanup_old_backups() {
|
|
log "Cleaning up backups older than $RETENTION_DAYS days"
|
|
find "$BACKUP_DIR" -maxdepth 1 -type d -name "ledger-backup-*" -mtime +$RETENTION_DAYS -exec rm -rf {} \;
|
|
find "$BACKUP_DIR" -name "*-incremental.json" -type f -mtime +$RETENTION_DAYS -delete
|
|
log "Cleanup completed"
|
|
}
|
|
|
|
# Upload to cloud storage (optional)
|
|
upload_to_cloud() {
|
|
local backup_dir="$1"
|
|
|
|
# Check if AWS CLI is configured
|
|
if command -v aws &> /dev/null && aws sts get-caller-identity &>/dev/null; then
|
|
log "Uploading backup to S3"
|
|
local s3_bucket="aitbc-backups-${NAMESPACE}"
|
|
|
|
# Upload entire backup directory
|
|
aws s3 cp "$backup_dir" "s3://$s3_bucket/ledger/$(basename "$backup_dir")/" --recursive --storage-class GLACIER_IR
|
|
|
|
log "Backup uploaded to s3://$s3_bucket/ledger/$(basename "$backup_dir")/"
|
|
else
|
|
warn "AWS CLI not configured, skipping cloud upload"
|
|
fi
|
|
}
|
|
|
|
# Main execution
|
|
main() {
|
|
log "Starting ledger backup process"
|
|
|
|
check_dependencies
|
|
create_backup_dir
|
|
|
|
local pods=($(get_blockchain_pods))
|
|
|
|
# Use the first ready pod for backup
|
|
for pod in "${pods[@]}"; do
|
|
if kubectl wait --for=condition=ready pod "$pod" -n "$NAMESPACE" --timeout=10s >/dev/null 2>&1; then
|
|
wait_for_blockchain_node "$pod"
|
|
backup_ledger_data "$pod"
|
|
|
|
local backup_dir="$BACKUP_DIR/${BACKUP_NAME}"
|
|
upload_to_cloud "$backup_dir"
|
|
|
|
break
|
|
fi
|
|
done
|
|
|
|
cleanup_old_backups
|
|
|
|
log "Ledger backup process completed successfully"
|
|
}
|
|
|
|
# Run main function
|
|
main "$@"
|