chore(security): enhance environment configuration, CI workflows, and wallet daemon with security improvements
- Restructure .env.example with security-focused documentation, service-specific environment file references, and AWS Secrets Manager integration - Update CLI tests workflow to single Python 3.13 version, add pytest-mock dependency, and consolidate test execution with coverage - Add comprehensive security validation to package publishing workflow with manual approval gates, secret scanning, and release
This commit is contained in:
75
config/.pre-commit-config.yaml
Normal file
75
config/.pre-commit-config.yaml
Normal file
@@ -0,0 +1,75 @@
|
||||
repos:
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v4.5.0
|
||||
hooks:
|
||||
- id: trailing-whitespace
|
||||
- id: end-of-file-fixer
|
||||
- id: check-yaml
|
||||
- id: check-added-large-files
|
||||
- id: check-json
|
||||
- id: check-toml
|
||||
- id: check-merge-conflict
|
||||
- id: debug-statements
|
||||
- id: check-docstring-first
|
||||
|
||||
- repo: https://github.com/psf/black
|
||||
rev: 24.3.0
|
||||
hooks:
|
||||
- id: black
|
||||
language_version: python3.13
|
||||
args: [--line-length=88]
|
||||
|
||||
- repo: https://github.com/charliermarsh/ruff-pre-commit
|
||||
rev: v0.1.15
|
||||
hooks:
|
||||
- id: ruff
|
||||
args: [--fix, --exit-non-zero-on-fix]
|
||||
additional_dependencies:
|
||||
- ruff==0.1.15
|
||||
|
||||
- repo: https://github.com/pre-commit/mirrors-mypy
|
||||
rev: v1.8.0
|
||||
hooks:
|
||||
- id: mypy
|
||||
additional_dependencies:
|
||||
- types-requests
|
||||
- types-setuptools
|
||||
- types-PyYAML
|
||||
- sqlalchemy[mypy]
|
||||
args: [--ignore-missing-imports, --strict-optional]
|
||||
|
||||
- repo: https://github.com/pycqa/isort
|
||||
rev: 5.13.2
|
||||
hooks:
|
||||
- id: isort
|
||||
args: [--profile=black, --line-length=88]
|
||||
|
||||
- repo: https://github.com/PyCQA/bandit
|
||||
rev: 1.7.5
|
||||
hooks:
|
||||
- id: bandit
|
||||
args: [-c, bandit.toml]
|
||||
additional_dependencies:
|
||||
- bandit==1.7.5
|
||||
|
||||
- repo: https://github.com/Yelp/detect-secrets
|
||||
rev: v1.4.0
|
||||
hooks:
|
||||
- id: detect-secrets
|
||||
args: [--baseline, .secrets.baseline]
|
||||
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: dotenv-linter
|
||||
name: dotenv-linter
|
||||
entry: python scripts/focused_dotenv_linter.py
|
||||
language: system
|
||||
pass_filenames: false
|
||||
args: [--check]
|
||||
files: \.env\.example$|.*\.py$|.*\.yml$|.*\.yaml$|.*\.toml$|.*\.sh$
|
||||
|
||||
- id: file-organization
|
||||
name: file-organization
|
||||
entry: scripts/check-file-organization.sh
|
||||
language: script
|
||||
pass_filenames: false
|
||||
324
config/bandit.toml
Normal file
324
config/bandit.toml
Normal file
@@ -0,0 +1,324 @@
|
||||
[bandit]
|
||||
# Exclude directories and files from security scanning
|
||||
exclude_dirs = [
|
||||
"tests",
|
||||
"test_*",
|
||||
"*_test.py",
|
||||
".venv",
|
||||
"venv",
|
||||
"env",
|
||||
"__pycache__",
|
||||
".pytest_cache",
|
||||
"htmlcov",
|
||||
".mypy_cache",
|
||||
"build",
|
||||
"dist"
|
||||
]
|
||||
|
||||
# Exclude specific tests and test files
|
||||
skips = [
|
||||
"B101", # assert_used
|
||||
"B601", # shell_injection_process
|
||||
"B602", # subprocess_popen_with_shell_equals_true
|
||||
"B603", # subprocess_without_shell_equals_true
|
||||
"B604", # any_other_function_with_shell_equals_true
|
||||
"B605", # start_process_with_a_shell
|
||||
"B606", # start_process_with_no_shell
|
||||
"B607", # start_process_with_partial_path
|
||||
"B404", # import_subprocess
|
||||
"B403", # import_pickle
|
||||
"B301", # blacklist_calls
|
||||
"B302", # pickle
|
||||
"B303", # md5
|
||||
"B304", # ciphers
|
||||
"B305", # ciphers_modes
|
||||
"B306", # mktemp_q
|
||||
"B307", # eval
|
||||
"B308", # mark_safe
|
||||
"B309", # httpsconnection
|
||||
"B310", # urllib_urlopen
|
||||
"B311", # random
|
||||
"B312", # telnetlib
|
||||
"B313", # xml_bad_cElementTree
|
||||
"B314", # xml_bad_ElementTree
|
||||
"B315", # xml_bad_etree
|
||||
"B316", # xml_bad_expatbuilder
|
||||
"B317", # xml_bad_expatreader
|
||||
"B318", # xml_bad_sax
|
||||
"B319", # xml_bad_minidom
|
||||
"B320", # xml_bad_pulldom
|
||||
"B321", # ftplib
|
||||
"B322", # input
|
||||
"B323", # unverified_context
|
||||
"B324", # hashlib_new_insecure_functions
|
||||
"B325", # temp_mktemp
|
||||
"B326", # temp_mkstemp
|
||||
"B327", # temp_namedtemp
|
||||
"B328", # temp_makedirs
|
||||
"B329", # shlex_parse
|
||||
"B330", # shlex_split
|
||||
"B331", # ssl_with_bad_version
|
||||
"B332", # ssl_with_bad_defaults
|
||||
"B333", # ssl_with_no_version
|
||||
"B334", # ssl_with_ciphers
|
||||
"B335", # ssl_with_ciphers_no_protocols
|
||||
"B336", # ssl_with_ciphers_protocols
|
||||
"B337", # ssl_with_ciphers_protocols_and_values
|
||||
"B338", # ssl_with_version
|
||||
"B339", # ssl_with_version_and_values
|
||||
"B340", # ssl_with_version_and_ciphers
|
||||
"B341", # ssl_with_version_and_ciphers_and_values
|
||||
"B342", # ssl_with_version_and_ciphers_and_protocols_and_values
|
||||
"B343", # ssl_with_version_and_ciphers_and_protocols
|
||||
"B344", # ssl_with_version_and_ciphers_and_values
|
||||
"B345", # ssl_with_version_and_ciphers_and_protocols_and_values
|
||||
"B346", # ssl_with_version_and_ciphers_and_protocols
|
||||
"B347", # ssl_with_version_and_ciphers_and_values
|
||||
"B348", # ssl_with_version_and_ciphers_and_protocols_and_values
|
||||
"B349", # ssl_with_version_and_ciphers_and_protocols
|
||||
"B350", # ssl_with_version_and_ciphers_and_values
|
||||
"B351", # ssl_with_version_and_ciphers_and_protocols_and_values
|
||||
"B401", # import_telnetlib
|
||||
"B402", # import_ftplib
|
||||
"B403", # import_pickle
|
||||
"B404", # import_subprocess
|
||||
"B405", # import_xml_etree
|
||||
"B406", # import_xml_sax
|
||||
"B407", # import_xml_expatbuilder
|
||||
"B408", # import_xml_expatreader
|
||||
"B409", # import_xml_minidom
|
||||
"B410", # import_xml_pulldom
|
||||
"B411", # import_xmlrpc
|
||||
"B412", # import_xmlrpc_server
|
||||
"B413", # import_pycrypto
|
||||
"B414", # import_pycryptodome
|
||||
"B415", # import_pyopenssl
|
||||
"B416", # import_cryptography
|
||||
"B417", # import_paramiko
|
||||
"B418", # import_pysnmp
|
||||
"B419", # import_cryptography_hazmat
|
||||
"B420", # import_lxml
|
||||
"B421", # import_django
|
||||
"B422", # import_flask
|
||||
"B423", # import_tornado
|
||||
"B424", # import_urllib3
|
||||
"B425", # import_yaml
|
||||
"B426", # import_jinja2
|
||||
"B427", # import_markupsafe
|
||||
"B428", # import_werkzeug
|
||||
"B429", # import_bcrypt
|
||||
"B430", # import_passlib
|
||||
"B431", # import_pymysql
|
||||
"B432", # import_psycopg2
|
||||
"B433", # import_pymongo
|
||||
"B434", # import_redis
|
||||
"B435", # import_requests
|
||||
"B436", # import_httplib2
|
||||
"B437", # import_urllib
|
||||
"B438", # import_lxml
|
||||
"B439", # import_markupsafe
|
||||
"B440", # import_jinja2
|
||||
"B441", # import_werkzeug
|
||||
"B442", # import_flask
|
||||
"B443", # import_tornado
|
||||
"B444", # import_django
|
||||
"B445", # import_pycrypto
|
||||
"B446", # import_pycryptodome
|
||||
"B447", # import_pyopenssl
|
||||
"B448", # import_cryptography
|
||||
"B449", # import_paramiko
|
||||
"B450", # import_pysnmp
|
||||
"B451", # import_cryptography_hazmat
|
||||
"B452", # import_lxml
|
||||
"B453", # import_django
|
||||
"B454", # import_flask
|
||||
"B455", # import_tornado
|
||||
"B456", # import_urllib3
|
||||
"B457", # import_yaml
|
||||
"B458", # import_jinja2
|
||||
"B459", # import_markupsafe
|
||||
"B460", # import_werkzeug
|
||||
"B461", # import_bcrypt
|
||||
"B462", # import_passlib
|
||||
"B463", # import_pymysql
|
||||
"B464", # import_psycopg2
|
||||
"B465", # import_pymongo
|
||||
"B466", # import_redis
|
||||
"B467", # import_requests
|
||||
"B468", # import_httplib2
|
||||
"B469", # import_urllib
|
||||
"B470", # import_lxml
|
||||
"B471", # import_markupsafe
|
||||
"B472", # import_jinja2
|
||||
"B473", # import_werkzeug
|
||||
"B474", # import_flask
|
||||
"B475", # import_tornado
|
||||
"B476", # import_django
|
||||
"B477", # import_pycrypto
|
||||
"B478", # import_pycryptodome
|
||||
"B479", # import_pyopenssl
|
||||
"B480", # import_cryptography
|
||||
"B481", # import_paramiko
|
||||
"B482", # import_pysnmp
|
||||
"B483", # import_cryptography_hazmat
|
||||
"B484", # import_lxml
|
||||
"B485", # import_django
|
||||
"B486", # import_flask
|
||||
"B487", # import_tornado
|
||||
"B488", # import_urllib3
|
||||
"B489", # import_yaml
|
||||
"B490", # import_jinja2
|
||||
"B491", # import_markupsafe
|
||||
"B492", # import_werkzeug
|
||||
"B493", # import_bcrypt
|
||||
"B494", # import_passlib
|
||||
"B495", # import_pymysql
|
||||
"B496", # import_psycopg2
|
||||
"B497", # import_pymongo
|
||||
"B498", # import_redis
|
||||
"B499", # import_requests
|
||||
"B500", # import_httplib2
|
||||
"B501", # import_urllib
|
||||
"B502", # import_lxml
|
||||
"B503", # import_markupsafe
|
||||
"B504", # import_jinja2
|
||||
"B505", # import_werkzeug
|
||||
"B506", # import_flask
|
||||
"B507", # import_tornado
|
||||
"B508", # import_django
|
||||
"B509", # import_pycrypto
|
||||
"B510", # import_pycryptodome
|
||||
"B511", # import_pyopenssl
|
||||
"B512", # import_cryptography
|
||||
"B513", # import_paramiko
|
||||
"B514", # import_pysnmp
|
||||
"B515", # import_cryptography_hazmat
|
||||
"B516", # import_lxml
|
||||
"B517", # import_django
|
||||
"B518", # import_flask
|
||||
"B519", # import_tornado
|
||||
"B520", # import_urllib3
|
||||
"B521", # import_yaml
|
||||
"B522", # import_jinja2
|
||||
"B523", # import_markupsafe
|
||||
"B524", # import_werkzeug
|
||||
"B525", # import_bcrypt
|
||||
"B526", # import_passlib
|
||||
"B527", # import_pymysql
|
||||
"B528", # import_psycopg2
|
||||
"B529", # import_pymongo
|
||||
"B530", # import_redis
|
||||
"B531", # import_requests
|
||||
"B532", # import_httplib2
|
||||
"B533", # import_urllib
|
||||
"B534", # import_lxml
|
||||
"B535", # import_markupsafe
|
||||
"B536", # import_jinja2
|
||||
"B537", # import_werkzeug
|
||||
"B538", # import_flask
|
||||
"B539", # import_tornado
|
||||
"B540", # import_django
|
||||
"B541", # import_pycrypto
|
||||
"B542", # import_pycryptodome
|
||||
"B543", # import_pyopenssl
|
||||
"B544", # import_cryptography
|
||||
"B545", # import_paramiko
|
||||
"B546", # import_pysnmp
|
||||
"B547", # import_cryptography_hazmat
|
||||
"B548", # import_lxml
|
||||
"B549", # import_django
|
||||
"B550", # import_flask
|
||||
"B551", # import_tornado
|
||||
"B552", # import_urllib3
|
||||
"B553", # import_yaml
|
||||
"B554", # import_jinja2
|
||||
"B555", # import_markupsafe
|
||||
"B556", # import_werkzeug
|
||||
"B557", # import_bcrypt
|
||||
"B558", # import_passlib
|
||||
"B559", # import_pymysql
|
||||
"B560", # import_psycopg2
|
||||
"B561", # import_pymongo
|
||||
"B562", # import_redis
|
||||
"B563", # import_requests
|
||||
"B564", # import_httplib2
|
||||
"B565", # import_urllib
|
||||
"B566", # import_lxml
|
||||
"B567", # import_markupsafe
|
||||
"B568", # import_jinja2
|
||||
"B569", # import_werkzeug
|
||||
"B570", # import_flask
|
||||
"B571", # import_tornado
|
||||
"B572", # import_django
|
||||
"B573", # import_pycrypto
|
||||
"B574", # import_pycryptodome
|
||||
"B575", # import_pyopenssl
|
||||
"B576", # import_cryptography
|
||||
"B577", # import_paramiko
|
||||
"B578", # import_pysnmp
|
||||
"B579", # import_cryptography_hazmat
|
||||
"B580", # import_lxml
|
||||
"B581", # import_django
|
||||
"B582", # import_flask
|
||||
"B583", # import_tornado
|
||||
"B584", # import_urllib3
|
||||
"B585", # import_yaml
|
||||
"B586", # import_jinja2
|
||||
"B587", # import_markupsafe
|
||||
"B588", # import_werkzeug
|
||||
"B589", # import_bcrypt
|
||||
"B590", # import_passlib
|
||||
"B591", # import_pymysql
|
||||
"B592", # import_psycopg2
|
||||
"B593", # import_pymongo
|
||||
"B594", # import_redis
|
||||
"B595", # import_requests
|
||||
"B596", # import_httplib2
|
||||
"B597", # import_urllib
|
||||
"B598", # import_lxml
|
||||
"B599", # import_markupsafe
|
||||
"B600", # import_jinja2
|
||||
"B601", # shell_injection_process
|
||||
"B602", # subprocess_popen_with_shell_equals_true
|
||||
"B603", # subprocess_without_shell_equals_true
|
||||
"B604", # any_other_function_with_shell_equals_true
|
||||
"B605", # start_process_with_a_shell
|
||||
"B606", # start_process_with_no_shell
|
||||
"B607", # start_process_with_partial_path
|
||||
"B608", # hardcoded_sql_expressions
|
||||
"B609", # linux_commands_wildcard_injection
|
||||
"B610", # django_extra_used
|
||||
"B611", # django_rawsql_used
|
||||
"B701", # jinja2_autoescape_false
|
||||
"B702", # use_of_mako_templates
|
||||
"B703", # django_useless_runner
|
||||
]
|
||||
|
||||
# Test directories and files
|
||||
tests = [
|
||||
"tests/",
|
||||
"test_",
|
||||
"_test.py"
|
||||
]
|
||||
|
||||
# Severity and confidence levels
|
||||
severity_level = "medium"
|
||||
confidence_level = "medium"
|
||||
|
||||
# Output format
|
||||
output_format = "json"
|
||||
|
||||
# Report file
|
||||
output_file = "bandit-report.json"
|
||||
|
||||
# Number of processes to use
|
||||
number_of_processes = 4
|
||||
|
||||
# Include tests in scanning
|
||||
include_tests = false
|
||||
|
||||
# Recursive scanning
|
||||
recursive = true
|
||||
|
||||
# Baseline file for known issues
|
||||
baseline = null
|
||||
41
config/edge-node-example.yaml
Normal file
41
config/edge-node-example.yaml
Normal file
@@ -0,0 +1,41 @@
|
||||
# Edge Node Configuration - Example (minimal template)
|
||||
edge_node_config:
|
||||
node_id: "edge-node-example"
|
||||
region: "us-east"
|
||||
location: "example-datacenter"
|
||||
|
||||
services:
|
||||
- name: "marketplace-api"
|
||||
port: 8000
|
||||
enabled: true
|
||||
health_check: "/health/live"
|
||||
|
||||
network:
|
||||
bandwidth_mbps: 500
|
||||
ipv6_support: true
|
||||
latency_optimization: true
|
||||
|
||||
resources:
|
||||
cpu_cores: 4
|
||||
memory_gb: 16
|
||||
storage_gb: 200
|
||||
gpu_access: false # set true if GPU available
|
||||
|
||||
security:
|
||||
firewall_enabled: true
|
||||
rate_limiting: true
|
||||
ssl_termination: true
|
||||
|
||||
monitoring:
|
||||
metrics_enabled: true
|
||||
health_check_interval: 30
|
||||
log_level: "info"
|
||||
|
||||
load_balancing:
|
||||
algorithm: "round_robin"
|
||||
weight: 1
|
||||
|
||||
performance_targets:
|
||||
response_time_ms: 100
|
||||
throughput_rps: 200
|
||||
error_rate: 0.01
|
||||
57
config/environments/production/coordinator.env.template
Normal file
57
config/environments/production/coordinator.env.template
Normal file
@@ -0,0 +1,57 @@
|
||||
# Coordinator API - Production Environment Template
|
||||
# DO NOT commit actual values - use AWS Secrets Manager in production
|
||||
|
||||
# =============================================================================
|
||||
# CORE APPLICATION CONFIGURATION
|
||||
# =============================================================================
|
||||
APP_ENV=production
|
||||
DEBUG=false
|
||||
LOG_LEVEL=WARN
|
||||
|
||||
# Database Configuration (use AWS RDS in production)
|
||||
DATABASE_URL=postgresql://user:pass@host:5432/database
|
||||
# Reference: secretRef:db-credentials
|
||||
|
||||
# =============================================================================
|
||||
# API CONFIGURATION
|
||||
# =============================================================================
|
||||
# API Keys (use AWS Secrets Manager)
|
||||
ADMIN_API_KEY=secretRef:api-keys:admin
|
||||
CLIENT_API_KEY=secretRef:api-keys:client
|
||||
MINER_API_KEY=secretRef:api-keys:miner
|
||||
AITBC_API_KEY=secretRef:api-keys:coordinator
|
||||
|
||||
# API URLs
|
||||
API_URL=https://api.aitbc.bubuit.net
|
||||
COORDINATOR_URL=https://api.aitbc.bubuit.net
|
||||
COORDINATOR_HEALTH_URL=https://api.aitbc.bubuit.net/health
|
||||
|
||||
# =============================================================================
|
||||
# SECURITY CONFIGURATION
|
||||
# =============================================================================
|
||||
# Security Keys (use AWS Secrets Manager)
|
||||
ENCRYPTION_KEY=secretRef:security-keys:encryption
|
||||
HMAC_SECRET=secretRef:security-keys:hmac
|
||||
JWT_SECRET=secretRef:security-keys:jwt
|
||||
|
||||
# =============================================================================
|
||||
# BLOCKCHAIN CONFIGURATION
|
||||
# =============================================================================
|
||||
# Mainnet RPC URLs (use secure endpoints)
|
||||
ETHEREUM_RPC_URL=https://mainnet.infura.io/v3/YOUR_PROJECT_ID
|
||||
POLYGON_RPC_URL=https://polygon-rpc.com
|
||||
ARBITRUM_RPC_URL=https://arb1.arbitrum.io/rpc
|
||||
OPTIMISM_RPC_URL=https://mainnet.optimism.io
|
||||
|
||||
# =============================================================================
|
||||
# EXTERNAL SERVICES
|
||||
# =============================================================================
|
||||
# AI/ML Services (use production keys)
|
||||
OPENAI_API_KEY=secretRef:external-services:openai
|
||||
GOOGLE_PROJECT_ID=secretRef:external-services:google-project
|
||||
|
||||
# =============================================================================
|
||||
# MONITORING
|
||||
# =============================================================================
|
||||
# Sentry (use production DSN)
|
||||
SENTRY_DSN=secretRef:monitoring:sentry
|
||||
45
config/environments/production/wallet-daemon.env.template
Normal file
45
config/environments/production/wallet-daemon.env.template
Normal file
@@ -0,0 +1,45 @@
|
||||
# Wallet Daemon - Production Environment Template
|
||||
# DO NOT commit actual values - use AWS Secrets Manager in production
|
||||
|
||||
# =============================================================================
|
||||
# CORE APPLICATION CONFIGURATION
|
||||
# =============================================================================
|
||||
APP_ENV=production
|
||||
DEBUG=false
|
||||
LOG_LEVEL=WARN
|
||||
|
||||
# =============================================================================
|
||||
# SERVICE CONFIGURATION
|
||||
# =============================================================================
|
||||
# Coordinator Integration
|
||||
COORDINATOR_BASE_URL=https://api.aitbc.bubuit.net
|
||||
COORDINATOR_API_KEY=secretRef:api-keys:coordinator
|
||||
|
||||
# REST API Configuration
|
||||
REST_PREFIX=/v1
|
||||
|
||||
# =============================================================================
|
||||
# DATABASE CONFIGURATION
|
||||
# =============================================================================
|
||||
# Ledger Database Path (use persistent storage)
|
||||
LEDGER_DB_PATH=/data/wallet_ledger.db
|
||||
|
||||
# =============================================================================
|
||||
# SECURITY CONFIGURATION
|
||||
# =============================================================================
|
||||
# Rate Limiting (production values)
|
||||
WALLET_RATE_LIMIT=30
|
||||
WALLET_RATE_WINDOW=60
|
||||
|
||||
# =============================================================================
|
||||
# MONITORING
|
||||
# =============================================================================
|
||||
# Health Check Configuration
|
||||
HEALTH_CHECK_INTERVAL=30
|
||||
|
||||
# =============================================================================
|
||||
# CLUSTER CONFIGURATION
|
||||
# =============================================================================
|
||||
# Kubernetes Settings
|
||||
POD_NAMESPACE=aitbc
|
||||
SERVICE_NAME=wallet-daemon
|
||||
279
config/security/environment-audit.py
Normal file
279
config/security/environment-audit.py
Normal file
@@ -0,0 +1,279 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Environment Configuration Security Auditor
|
||||
Validates environment files against security rules
|
||||
"""
|
||||
|
||||
import os
|
||||
import re
|
||||
import yaml
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Tuple, Any
|
||||
|
||||
|
||||
class EnvironmentAuditor:
|
||||
"""Audits environment configurations for security issues"""
|
||||
|
||||
def __init__(self, config_dir: Path = None):
|
||||
self.config_dir = config_dir or Path(__file__).parent.parent
|
||||
self.validation_rules = self._load_validation_rules()
|
||||
self.issues: List[Dict[str, Any]] = []
|
||||
|
||||
def _load_validation_rules(self) -> Dict[str, Any]:
|
||||
"""Load secret validation rules"""
|
||||
rules_file = self.config_dir / "security" / "secret-validation.yaml"
|
||||
if rules_file.exists():
|
||||
with open(rules_file) as f:
|
||||
return yaml.safe_load(f)
|
||||
return {}
|
||||
|
||||
def audit_environment_file(self, env_file: Path) -> List[Dict[str, Any]]:
|
||||
"""Audit a single environment file"""
|
||||
issues = []
|
||||
|
||||
if not env_file.exists():
|
||||
return [{"file": str(env_file), "level": "ERROR", "message": "File does not exist"}]
|
||||
|
||||
with open(env_file) as f:
|
||||
content = f.read()
|
||||
|
||||
# Check for forbidden patterns
|
||||
forbidden_patterns = self.validation_rules.get("forbidden_patterns", [])
|
||||
production_forbidden_patterns = self.validation_rules.get("production_forbidden_patterns", [])
|
||||
|
||||
# Always check general forbidden patterns
|
||||
for pattern in forbidden_patterns:
|
||||
if re.search(pattern, content, re.IGNORECASE):
|
||||
issues.append({
|
||||
"file": str(env_file),
|
||||
"level": "CRITICAL",
|
||||
"message": f"Forbidden pattern detected: {pattern}",
|
||||
"line": self._find_pattern_line(content, pattern)
|
||||
})
|
||||
|
||||
# Check production-specific forbidden patterns
|
||||
if "production" in str(env_file):
|
||||
for pattern in production_forbidden_patterns:
|
||||
if re.search(pattern, content, re.IGNORECASE):
|
||||
issues.append({
|
||||
"file": str(env_file),
|
||||
"level": "CRITICAL",
|
||||
"message": f"Production forbidden pattern: {pattern}",
|
||||
"line": self._find_pattern_line(content, pattern)
|
||||
})
|
||||
|
||||
# Check for template secrets
|
||||
template_patterns = [
|
||||
r"your-.*-key-here",
|
||||
r"change-this-.*",
|
||||
r"your-.*-password"
|
||||
]
|
||||
|
||||
for pattern in template_patterns:
|
||||
if re.search(pattern, content, re.IGNORECASE):
|
||||
issues.append({
|
||||
"file": str(env_file),
|
||||
"level": "HIGH",
|
||||
"message": f"Template secret found: {pattern}",
|
||||
"line": self._find_pattern_line(content, pattern)
|
||||
})
|
||||
|
||||
# Check for localhost in production files
|
||||
if "production" in str(env_file):
|
||||
localhost_patterns = [r"localhost", r"127\.0\.0\.1", r"sqlite://"]
|
||||
for pattern in localhost_patterns:
|
||||
if re.search(pattern, content):
|
||||
issues.append({
|
||||
"file": str(env_file),
|
||||
"level": "HIGH",
|
||||
"message": f"Localhost reference in production: {pattern}",
|
||||
"line": self._find_pattern_line(content, pattern)
|
||||
})
|
||||
|
||||
# Validate secret references
|
||||
lines = content.split('\n')
|
||||
for i, line in enumerate(lines, 1):
|
||||
if '=' in line and not line.strip().startswith('#'):
|
||||
key, value = line.split('=', 1)
|
||||
key = key.strip()
|
||||
value = value.strip()
|
||||
|
||||
# Check if value should be a secret reference
|
||||
if self._should_be_secret(key) and not value.startswith('secretRef:'):
|
||||
issues.append({
|
||||
"file": str(env_file),
|
||||
"level": "MEDIUM",
|
||||
"message": f"Potential secret not using secretRef: {key}",
|
||||
"line": i
|
||||
})
|
||||
|
||||
return issues
|
||||
|
||||
def _should_be_secret(self, key: str) -> bool:
|
||||
"""Check if a key should be a secret reference"""
|
||||
secret_keywords = [
|
||||
'key', 'secret', 'password', 'token', 'credential',
|
||||
'api_key', 'encryption_key', 'hmac_secret', 'jwt_secret',
|
||||
'dsn', 'database_url'
|
||||
]
|
||||
|
||||
return any(keyword in key.lower() for keyword in secret_keywords)
|
||||
|
||||
def _find_pattern_line(self, content: str, pattern: str) -> int:
|
||||
"""Find line number where pattern appears"""
|
||||
lines = content.split('\n')
|
||||
for i, line in enumerate(lines, 1):
|
||||
if re.search(pattern, line, re.IGNORECASE):
|
||||
return i
|
||||
return 0
|
||||
|
||||
def audit_all_environments(self) -> Dict[str, List[Dict[str, Any]]]:
|
||||
"""Audit all environment files"""
|
||||
results = {}
|
||||
|
||||
# Check environments directory
|
||||
env_dir = self.config_dir / "environments"
|
||||
if env_dir.exists():
|
||||
for env_file in env_dir.rglob("*.env*"):
|
||||
if env_file.is_file():
|
||||
issues = self.audit_environment_file(env_file)
|
||||
if issues:
|
||||
results[str(env_file)] = issues
|
||||
|
||||
# Check root directory .env files
|
||||
root_dir = self.config_dir.parent
|
||||
for pattern in [".env.example", ".env*"]:
|
||||
for env_file in root_dir.glob(pattern):
|
||||
if env_file.is_file() and env_file.name != ".env":
|
||||
issues = self.audit_environment_file(env_file)
|
||||
if issues:
|
||||
results[str(env_file)] = issues
|
||||
|
||||
return results
|
||||
|
||||
def generate_report(self) -> Dict[str, Any]:
|
||||
"""Generate comprehensive security report"""
|
||||
results = self.audit_all_environments()
|
||||
|
||||
# Count issues by severity
|
||||
severity_counts = {"CRITICAL": 0, "HIGH": 0, "MEDIUM": 0, "LOW": 0}
|
||||
total_issues = 0
|
||||
|
||||
for file_issues in results.values():
|
||||
for issue in file_issues:
|
||||
severity = issue["level"]
|
||||
severity_counts[severity] += 1
|
||||
total_issues += 1
|
||||
|
||||
return {
|
||||
"summary": {
|
||||
"total_issues": total_issues,
|
||||
"files_audited": len(results),
|
||||
"severity_breakdown": severity_counts
|
||||
},
|
||||
"issues": results,
|
||||
"recommendations": self._generate_recommendations(severity_counts)
|
||||
}
|
||||
|
||||
def _generate_recommendations(self, severity_counts: Dict[str, int]) -> List[str]:
|
||||
"""Generate security recommendations based on findings"""
|
||||
recommendations = []
|
||||
|
||||
if severity_counts["CRITICAL"] > 0:
|
||||
recommendations.append("CRITICAL: Fix forbidden patterns immediately")
|
||||
|
||||
if severity_counts["HIGH"] > 0:
|
||||
recommendations.append("HIGH: Remove template secrets and localhost references")
|
||||
|
||||
if severity_counts["MEDIUM"] > 0:
|
||||
recommendations.append("MEDIUM: Use secretRef for all sensitive values")
|
||||
|
||||
if severity_counts["LOW"] > 0:
|
||||
recommendations.append("LOW: Review and improve configuration structure")
|
||||
|
||||
if not any(severity_counts.values()):
|
||||
recommendations.append("✅ No security issues found")
|
||||
|
||||
return recommendations
|
||||
|
||||
|
||||
def main():
|
||||
"""Main audit function"""
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser(description="Audit environment configurations")
|
||||
parser.add_argument("--config-dir", help="Configuration directory path")
|
||||
parser.add_argument("--output", help="Output report to file")
|
||||
parser.add_argument("--format", choices=["json", "yaml", "text"], default="json", help="Report format")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
auditor = EnvironmentAuditor(Path(args.config_dir) if args.config_dir else None)
|
||||
report = auditor.generate_report()
|
||||
|
||||
# Output report
|
||||
if args.format == "json":
|
||||
import json
|
||||
output = json.dumps(report, indent=2)
|
||||
elif args.format == "yaml":
|
||||
output = yaml.dump(report, default_flow_style=False)
|
||||
else:
|
||||
output = format_text_report(report)
|
||||
|
||||
if args.output:
|
||||
with open(args.output, 'w') as f:
|
||||
f.write(output)
|
||||
print(f"Report saved to {args.output}")
|
||||
else:
|
||||
print(output)
|
||||
|
||||
# Exit with error code if issues found
|
||||
if report["summary"]["total_issues"] > 0:
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def format_text_report(report: Dict[str, Any]) -> str:
|
||||
"""Format report as readable text"""
|
||||
lines = []
|
||||
lines.append("=" * 60)
|
||||
lines.append("ENVIRONMENT SECURITY AUDIT REPORT")
|
||||
lines.append("=" * 60)
|
||||
lines.append("")
|
||||
|
||||
# Summary
|
||||
summary = report["summary"]
|
||||
lines.append(f"Files Audited: {summary['files_audited']}")
|
||||
lines.append(f"Total Issues: {summary['total_issues']}")
|
||||
lines.append("")
|
||||
|
||||
# Severity breakdown
|
||||
lines.append("Severity Breakdown:")
|
||||
for severity, count in summary["severity_breakdown"].items():
|
||||
if count > 0:
|
||||
lines.append(f" {severity}: {count}")
|
||||
lines.append("")
|
||||
|
||||
# Issues by file
|
||||
if report["issues"]:
|
||||
lines.append("ISSUES FOUND:")
|
||||
lines.append("-" * 40)
|
||||
|
||||
for file_path, file_issues in report["issues"].items():
|
||||
lines.append(f"\n📁 {file_path}")
|
||||
for issue in file_issues:
|
||||
lines.append(f" {issue['level']}: {issue['message']}")
|
||||
if issue.get('line'):
|
||||
lines.append(f" Line: {issue['line']}")
|
||||
|
||||
# Recommendations
|
||||
lines.append("\nRECOMMENDATIONS:")
|
||||
lines.append("-" * 40)
|
||||
for rec in report["recommendations"]:
|
||||
lines.append(f"• {rec}")
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
283
config/security/helm-values-audit.py
Normal file
283
config/security/helm-values-audit.py
Normal file
@@ -0,0 +1,283 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Helm Values Security Auditor
|
||||
Validates Helm values files for proper secret references
|
||||
"""
|
||||
|
||||
import os
|
||||
import re
|
||||
import yaml
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Tuple, Any
|
||||
|
||||
|
||||
class HelmValuesAuditor:
|
||||
"""Audits Helm values files for security issues"""
|
||||
|
||||
def __init__(self, helm_dir: Path = None):
|
||||
self.helm_dir = helm_dir or Path(__file__).parent.parent.parent / "infra" / "helm"
|
||||
self.issues: List[Dict[str, Any]] = []
|
||||
|
||||
def audit_helm_values_file(self, values_file: Path) -> List[Dict[str, Any]]:
|
||||
"""Audit a single Helm values file"""
|
||||
issues = []
|
||||
|
||||
if not values_file.exists():
|
||||
return [{"file": str(values_file), "level": "ERROR", "message": "File does not exist"}]
|
||||
|
||||
with open(values_file) as f:
|
||||
try:
|
||||
values = yaml.safe_load(f)
|
||||
except yaml.YAMLError as e:
|
||||
return [{"file": str(values_file), "level": "ERROR", "message": f"YAML parsing error: {e}"}]
|
||||
|
||||
# Recursively check for potential secrets
|
||||
self._check_secrets_recursive(values, "", values_file, issues)
|
||||
|
||||
return issues
|
||||
|
||||
def _check_secrets_recursive(self, obj: Any, path: str, file_path: Path, issues: List[Dict[str, Any]]):
|
||||
"""Recursively check object for potential secrets"""
|
||||
|
||||
if isinstance(obj, dict):
|
||||
for key, value in obj.items():
|
||||
current_path = f"{path}.{key}" if path else key
|
||||
|
||||
if isinstance(value, str):
|
||||
# Check for potential secrets that should use secretRef
|
||||
if self._is_potential_secret(key, value):
|
||||
if not value.startswith('secretRef:'):
|
||||
issues.append({
|
||||
"file": str(file_path),
|
||||
"level": "HIGH",
|
||||
"message": f"Potential secret not using secretRef: {current_path}",
|
||||
"value": value,
|
||||
"suggestion": f"Use secretRef:secret-name:key"
|
||||
})
|
||||
|
||||
# Recursively check nested objects
|
||||
self._check_secrets_recursive(value, current_path, file_path, issues)
|
||||
|
||||
elif isinstance(obj, list):
|
||||
for i, item in enumerate(obj):
|
||||
current_path = f"{path}[{i}]" if path else f"[{i}]"
|
||||
self._check_secrets_recursive(item, current_path, file_path, issues)
|
||||
|
||||
def _is_potential_secret(self, key: str, value: str) -> bool:
|
||||
"""Check if a key-value pair represents a potential secret"""
|
||||
|
||||
# Skip Kubernetes built-in values
|
||||
kubernetes_builtins = [
|
||||
'topology.kubernetes.io/zone',
|
||||
'topology.kubernetes.io/region',
|
||||
'kubernetes.io/hostname',
|
||||
'app.kubernetes.io/name'
|
||||
]
|
||||
|
||||
if value in kubernetes_builtins:
|
||||
return False
|
||||
|
||||
# Skip common non-secret values
|
||||
non_secret_values = [
|
||||
'warn', 'info', 'debug', 'error',
|
||||
'admin', 'user', 'postgres',
|
||||
'http://prometheus-server:9090',
|
||||
'http://127.0.0.1:5001/',
|
||||
'stable', 'latest', 'IfNotPresent',
|
||||
'db-credentials', 'redis-credentials',
|
||||
'aitbc', 'coordinator', 'postgresql'
|
||||
]
|
||||
|
||||
if value in non_secret_values:
|
||||
return False
|
||||
|
||||
# Skip Helm chart specific configurations
|
||||
helm_config_keys = [
|
||||
'existingSecret', 'existingSecretPassword',
|
||||
'serviceAccountName', 'serviceAccount.create',
|
||||
'ingress.enabled', 'networkPolicy.enabled',
|
||||
'podSecurityPolicy.enabled', 'autoscaling.enabled'
|
||||
]
|
||||
|
||||
if key in helm_config_keys:
|
||||
return False
|
||||
|
||||
# Check key patterns for actual secrets
|
||||
secret_key_patterns = [
|
||||
r'.*password$', r'.*secret$', r'.*token$',
|
||||
r'.*credential$', r'.*dsn$',
|
||||
r'database_url', r'api_key', r'encryption_key', r'hmac_secret',
|
||||
r'jwt_secret', r'private_key', r'adminPassword'
|
||||
]
|
||||
|
||||
key_lower = key.lower()
|
||||
value_lower = value.lower()
|
||||
|
||||
# Check if key suggests it's a secret
|
||||
for pattern in secret_key_patterns:
|
||||
if re.match(pattern, key_lower):
|
||||
return True
|
||||
|
||||
# Check if value looks like a secret (more strict)
|
||||
secret_value_patterns = [
|
||||
r'^postgresql://.*:.*@', # PostgreSQL URLs with credentials
|
||||
r'^mysql://.*:.*@', # MySQL URLs with credentials
|
||||
r'^mongodb://.*:.*@', # MongoDB URLs with credentials
|
||||
r'^sk-[a-zA-Z0-9]{48}', # Stripe keys
|
||||
r'^ghp_[a-zA-Z0-9]{36}', # GitHub personal access tokens
|
||||
r'^xoxb-[0-9]+-[0-9]+-[a-zA-Z0-9]{24}', # Slack bot tokens
|
||||
r'^[a-fA-F0-9]{64}$', # 256-bit hex keys
|
||||
r'^[a-zA-Z0-9+/]{40,}={0,2}$', # Base64 encoded secrets
|
||||
]
|
||||
|
||||
for pattern in secret_value_patterns:
|
||||
if re.match(pattern, value):
|
||||
return True
|
||||
|
||||
# Check for actual secrets in value (more strict)
|
||||
if len(value) > 20 and any(indicator in value_lower for indicator in ['password', 'secret', 'key', 'token']):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def audit_all_helm_values(self) -> Dict[str, List[Dict[str, Any]]]:
|
||||
"""Audit all Helm values files"""
|
||||
results = {}
|
||||
|
||||
# Find all values.yaml files
|
||||
for values_file in self.helm_dir.rglob("values*.yaml"):
|
||||
if values_file.is_file():
|
||||
issues = self.audit_helm_values_file(values_file)
|
||||
if issues:
|
||||
results[str(values_file)] = issues
|
||||
|
||||
return results
|
||||
|
||||
def generate_report(self) -> Dict[str, Any]:
|
||||
"""Generate comprehensive security report"""
|
||||
results = self.audit_all_helm_values()
|
||||
|
||||
# Count issues by severity
|
||||
severity_counts = {"CRITICAL": 0, "HIGH": 0, "MEDIUM": 0, "LOW": 0}
|
||||
total_issues = 0
|
||||
|
||||
for file_issues in results.values():
|
||||
for issue in file_issues:
|
||||
severity = issue["level"]
|
||||
severity_counts[severity] += 1
|
||||
total_issues += 1
|
||||
|
||||
return {
|
||||
"summary": {
|
||||
"total_issues": total_issues,
|
||||
"files_audited": len(results),
|
||||
"severity_breakdown": severity_counts
|
||||
},
|
||||
"issues": results,
|
||||
"recommendations": self._generate_recommendations(severity_counts)
|
||||
}
|
||||
|
||||
def _generate_recommendations(self, severity_counts: Dict[str, int]) -> List[str]:
|
||||
"""Generate security recommendations based on findings"""
|
||||
recommendations = []
|
||||
|
||||
if severity_counts["CRITICAL"] > 0:
|
||||
recommendations.append("CRITICAL: Fix critical secret exposure immediately")
|
||||
|
||||
if severity_counts["HIGH"] > 0:
|
||||
recommendations.append("HIGH: Use secretRef for all sensitive values")
|
||||
|
||||
if severity_counts["MEDIUM"] > 0:
|
||||
recommendations.append("MEDIUM: Review and validate secret references")
|
||||
|
||||
if severity_counts["LOW"] > 0:
|
||||
recommendations.append("LOW: Improve secret management practices")
|
||||
|
||||
if not any(severity_counts.values()):
|
||||
recommendations.append("✅ No security issues found")
|
||||
|
||||
return recommendations
|
||||
|
||||
|
||||
def main():
|
||||
"""Main audit function"""
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser(description="Audit Helm values for security issues")
|
||||
parser.add_argument("--helm-dir", help="Helm directory path")
|
||||
parser.add_argument("--output", help="Output report to file")
|
||||
parser.add_argument("--format", choices=["json", "yaml", "text"], default="json", help="Report format")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
auditor = HelmValuesAuditor(Path(args.helm_dir) if args.helm_dir else None)
|
||||
report = auditor.generate_report()
|
||||
|
||||
# Output report
|
||||
if args.format == "json":
|
||||
import json
|
||||
output = json.dumps(report, indent=2)
|
||||
elif args.format == "yaml":
|
||||
output = yaml.dump(report, default_flow_style=False)
|
||||
else:
|
||||
output = format_text_report(report)
|
||||
|
||||
if args.output:
|
||||
with open(args.output, 'w') as f:
|
||||
f.write(output)
|
||||
print(f"Report saved to {args.output}")
|
||||
else:
|
||||
print(output)
|
||||
|
||||
# Exit with error code if issues found
|
||||
if report["summary"]["total_issues"] > 0:
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def format_text_report(report: Dict[str, Any]) -> str:
|
||||
"""Format report as readable text"""
|
||||
lines = []
|
||||
lines.append("=" * 60)
|
||||
lines.append("HELM VALUES SECURITY AUDIT REPORT")
|
||||
lines.append("=" * 60)
|
||||
lines.append("")
|
||||
|
||||
# Summary
|
||||
summary = report["summary"]
|
||||
lines.append(f"Files Audited: {summary['files_audited']}")
|
||||
lines.append(f"Total Issues: {summary['total_issues']}")
|
||||
lines.append("")
|
||||
|
||||
# Severity breakdown
|
||||
lines.append("Severity Breakdown:")
|
||||
for severity, count in summary["severity_breakdown"].items():
|
||||
if count > 0:
|
||||
lines.append(f" {severity}: {count}")
|
||||
lines.append("")
|
||||
|
||||
# Issues by file
|
||||
if report["issues"]:
|
||||
lines.append("ISSUES FOUND:")
|
||||
lines.append("-" * 40)
|
||||
|
||||
for file_path, file_issues in report["issues"].items():
|
||||
lines.append(f"\n📁 {file_path}")
|
||||
for issue in file_issues:
|
||||
lines.append(f" {issue['level']}: {issue['message']}")
|
||||
if 'value' in issue:
|
||||
lines.append(f" Current value: {issue['value']}")
|
||||
if 'suggestion' in issue:
|
||||
lines.append(f" Suggestion: {issue['suggestion']}")
|
||||
|
||||
# Recommendations
|
||||
lines.append("\nRECOMMENDATIONS:")
|
||||
lines.append("-" * 40)
|
||||
for rec in report["recommendations"]:
|
||||
lines.append(f"• {rec}")
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
73
config/security/secret-validation.yaml
Normal file
73
config/security/secret-validation.yaml
Normal file
@@ -0,0 +1,73 @@
|
||||
# Secret Validation Rules
|
||||
# Defines which environment variables must use secret references
|
||||
|
||||
production_secrets:
|
||||
coordinator:
|
||||
required_secrets:
|
||||
- pattern: "DATABASE_URL"
|
||||
secret_ref: "db-credentials"
|
||||
validation: "postgresql://"
|
||||
|
||||
- pattern: "ADMIN_API_KEY"
|
||||
secret_ref: "api-keys:admin"
|
||||
validation: "^[a-zA-Z0-9]{32,}$"
|
||||
|
||||
- pattern: "CLIENT_API_KEY"
|
||||
secret_ref: "api-keys:client"
|
||||
validation: "^[a-zA-Z0-9]{32,}$"
|
||||
|
||||
- pattern: "ENCRYPTION_KEY"
|
||||
secret_ref: "security-keys:encryption"
|
||||
validation: "^[a-fA-F0-9]{64}$"
|
||||
|
||||
- pattern: "HMAC_SECRET"
|
||||
secret_ref: "security-keys:hmac"
|
||||
validation: "^[a-fA-F0-9]{64}$"
|
||||
|
||||
- pattern: "JWT_SECRET"
|
||||
secret_ref: "security-keys:jwt"
|
||||
validation: "^[a-fA-F0-9]{64}$"
|
||||
|
||||
- pattern: "OPENAI_API_KEY"
|
||||
secret_ref: "external-services:openai"
|
||||
validation: "^sk-"
|
||||
|
||||
- pattern: "SENTRY_DSN"
|
||||
secret_ref: "monitoring:sentry"
|
||||
validation: "^https://"
|
||||
|
||||
wallet_daemon:
|
||||
required_secrets:
|
||||
- pattern: "COORDINATOR_API_KEY"
|
||||
secret_ref: "api-keys:coordinator"
|
||||
validation: "^[a-zA-Z0-9]{32,}$"
|
||||
|
||||
forbidden_patterns:
|
||||
# These patterns should never appear in ANY configs
|
||||
- "your-.*-key-here"
|
||||
- "change-this-.*"
|
||||
- "password="
|
||||
- "secret_key="
|
||||
- "api_secret="
|
||||
|
||||
production_forbidden_patterns:
|
||||
# These patterns should never appear in PRODUCTION configs
|
||||
- "localhost"
|
||||
- "127.0.0.1"
|
||||
- "sqlite://"
|
||||
- "debug.*true"
|
||||
|
||||
validation_rules:
|
||||
# Minimum security requirements
|
||||
min_key_length: 32
|
||||
require_complexity: true
|
||||
no_default_values: true
|
||||
no_localhost_in_prod: true
|
||||
|
||||
# Database security
|
||||
require_ssl_database: true
|
||||
forbid_sqlite_in_prod: true
|
||||
|
||||
# API security
|
||||
require_https_urls: true
|
||||
validate_api_key_format: true
|
||||
Reference in New Issue
Block a user