refactor: flatten CLI directory structure - remove 'box in a box'
BEFORE: /opt/aitbc/cli/ ├── aitbc_cli/ # Python package (box in a box) │ ├── commands/ │ ├── main.py │ └── ... ├── setup.py AFTER: /opt/aitbc/cli/ # Flat structure ├── commands/ # Direct access ├── main.py # Direct access ├── auth/ ├── config/ ├── core/ ├── models/ ├── utils/ ├── plugins.py └── setup.py CHANGES MADE: - Moved all files from aitbc_cli/ to cli/ root - Fixed all relative imports (from . to absolute imports) - Updated setup.py entry point: aitbc_cli.main → main - Added CLI directory to Python path in entry script - Simplified deployment.py to remove dependency on deleted core.deployment - Fixed import paths in all command files - Recreated virtual environment with new structure BENEFITS: - Eliminated 'box in a box' nesting - Simpler directory structure - Direct access to all modules - Cleaner imports - Easier maintenance and development - CLI works with both 'python main.py' and 'aitbc' commands
This commit is contained in:
1
cli/commands/__init__.py
Executable file
1
cli/commands/__init__.py
Executable file
@@ -0,0 +1 @@
|
||||
"""Command modules for AITBC CLI"""
|
||||
512
cli/commands/admin.py
Executable file
512
cli/commands/admin.py
Executable file
@@ -0,0 +1,512 @@
|
||||
"""Admin commands for AITBC CLI"""
|
||||
|
||||
import click
|
||||
import httpx
|
||||
import json
|
||||
from typing import Optional, List, Dict, Any
|
||||
from utils import output, error, success
|
||||
|
||||
|
||||
@click.group()
|
||||
@click.pass_context
|
||||
def admin(ctx):
|
||||
"""System administration commands"""
|
||||
# Set role for admin commands
|
||||
ctx.ensure_object(dict)
|
||||
ctx.parent.detected_role = 'admin'
|
||||
|
||||
|
||||
@admin.command()
|
||||
@click.pass_context
|
||||
def status(ctx):
|
||||
"""Show system status"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.get(
|
||||
f"{config.coordinator_url.rstrip('/')}/v1/admin/stats",
|
||||
headers={"X-Api-Key": config.api_key or ""}
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
status_data = response.json()
|
||||
output(status_data, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to get status: {response.status_code}")
|
||||
ctx.exit(1)
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@admin.command()
|
||||
@click.option("--output", type=click.Path(), help="Output report to file")
|
||||
@click.pass_context
|
||||
def audit_verify(ctx, output):
|
||||
"""Verify audit log integrity"""
|
||||
audit_logger = AuditLogger()
|
||||
is_valid, issues = audit_logger.verify_integrity()
|
||||
|
||||
if is_valid:
|
||||
success("Audit log integrity verified - no tampering detected")
|
||||
else:
|
||||
error("Audit log integrity compromised!")
|
||||
for issue in issues:
|
||||
error(f" - {issue}")
|
||||
ctx.exit(1)
|
||||
|
||||
# Export detailed report if requested
|
||||
if output:
|
||||
try:
|
||||
report = audit_logger.export_report(Path(output))
|
||||
success(f"Audit report exported to {output}")
|
||||
|
||||
# Show summary
|
||||
stats = report["audit_report"]["statistics"]
|
||||
output({
|
||||
"total_entries": stats["total_entries"],
|
||||
"unique_actions": stats["unique_actions"],
|
||||
"unique_users": stats["unique_users"],
|
||||
"date_range": stats["date_range"]
|
||||
}, ctx.obj['output_format'])
|
||||
except Exception as e:
|
||||
error(f"Failed to export report: {e}")
|
||||
|
||||
|
||||
@admin.command()
|
||||
@click.option("--limit", default=50, help="Number of entries to show")
|
||||
@click.option("--action", help="Filter by action type")
|
||||
@click.option("--search", help="Search query")
|
||||
@click.pass_context
|
||||
def audit_logs(ctx, limit: int, action: str, search: str):
|
||||
"""View audit logs with integrity verification"""
|
||||
audit_logger = AuditLogger()
|
||||
|
||||
try:
|
||||
if search:
|
||||
entries = audit_logger.search_logs(search, limit)
|
||||
else:
|
||||
entries = audit_logger.get_logs(limit, action)
|
||||
|
||||
if not entries:
|
||||
warning("No audit entries found")
|
||||
return
|
||||
|
||||
# Show entries
|
||||
output({
|
||||
"total_entries": len(entries),
|
||||
"entries": entries
|
||||
}, ctx.obj['output_format'])
|
||||
|
||||
except Exception as e:
|
||||
error(f"Failed to read audit logs: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@admin.command()
|
||||
@click.option("--limit", default=50, help="Number of jobs to show")
|
||||
@click.option("--status", help="Filter by status")
|
||||
@click.pass_context
|
||||
def jobs(ctx, limit: int, status: Optional[str]):
|
||||
"""List all jobs in the system"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
try:
|
||||
params = {"limit": limit}
|
||||
if status:
|
||||
params["status"] = status
|
||||
|
||||
with httpx.Client() as client:
|
||||
response = client.get(
|
||||
f"{config.coordinator_url}/admin/jobs",
|
||||
params=params,
|
||||
headers={"X-Api-Key": config.api_key or ""}
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
jobs = response.json()
|
||||
output(jobs, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to get jobs: {response.status_code}")
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@admin.command()
|
||||
@click.argument("job_id")
|
||||
@click.pass_context
|
||||
def job_details(ctx, job_id: str):
|
||||
"""Get detailed job information"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.get(
|
||||
f"{config.coordinator_url}/admin/jobs/{job_id}",
|
||||
headers={"X-Api-Key": config.api_key or ""}
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
job_data = response.json()
|
||||
output(job_data, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Job not found: {response.status_code}")
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@admin.command()
|
||||
@click.argument("job_id")
|
||||
@click.pass_context
|
||||
def delete_job(ctx, job_id: str):
|
||||
"""Delete a job from the system"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
if not click.confirm(f"Are you sure you want to delete job {job_id}?"):
|
||||
return
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.delete(
|
||||
f"{config.coordinator_url}/admin/jobs/{job_id}",
|
||||
headers={"X-Api-Key": config.api_key or ""}
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
success(f"Job {job_id} deleted")
|
||||
output({"status": "deleted", "job_id": job_id}, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to delete job: {response.status_code}")
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@admin.command()
|
||||
@click.option("--limit", default=50, help="Number of miners to show")
|
||||
@click.option("--status", help="Filter by status")
|
||||
@click.pass_context
|
||||
def miners(ctx, limit: int, status: Optional[str]):
|
||||
"""List all registered miners"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
try:
|
||||
params = {"limit": limit}
|
||||
if status:
|
||||
params["status"] = status
|
||||
|
||||
with httpx.Client() as client:
|
||||
response = client.get(
|
||||
f"{config.coordinator_url}/admin/miners",
|
||||
params=params,
|
||||
headers={"X-Api-Key": config.api_key or ""}
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
miners = response.json()
|
||||
output(miners, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to get miners: {response.status_code}")
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@admin.command()
|
||||
@click.argument("miner_id")
|
||||
@click.pass_context
|
||||
def miner_details(ctx, miner_id: str):
|
||||
"""Get detailed miner information"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.get(
|
||||
f"{config.coordinator_url}/admin/miners/{miner_id}",
|
||||
headers={"X-Api-Key": config.api_key or ""}
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
miner_data = response.json()
|
||||
output(miner_data, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Miner not found: {response.status_code}")
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@admin.command()
|
||||
@click.argument("miner_id")
|
||||
@click.pass_context
|
||||
def deactivate_miner(ctx, miner_id: str):
|
||||
"""Deactivate a miner"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
if not click.confirm(f"Are you sure you want to deactivate miner {miner_id}?"):
|
||||
return
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.post(
|
||||
f"{config.coordinator_url}/admin/miners/{miner_id}/deactivate",
|
||||
headers={"X-Api-Key": config.api_key or ""}
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
success(f"Miner {miner_id} deactivated")
|
||||
output({"status": "deactivated", "miner_id": miner_id}, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to deactivate miner: {response.status_code}")
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@admin.command()
|
||||
@click.argument("miner_id")
|
||||
@click.pass_context
|
||||
def activate_miner(ctx, miner_id: str):
|
||||
"""Activate a miner"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.post(
|
||||
f"{config.coordinator_url}/admin/miners/{miner_id}/activate",
|
||||
headers={"X-Api-Key": config.api_key or ""}
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
success(f"Miner {miner_id} activated")
|
||||
output({"status": "activated", "miner_id": miner_id}, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to activate miner: {response.status_code}")
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@admin.command()
|
||||
@click.option("--days", type=int, default=7, help="Number of days to analyze")
|
||||
@click.pass_context
|
||||
def analytics(ctx, days: int):
|
||||
"""Get system analytics"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.get(
|
||||
f"{config.coordinator_url}/admin/analytics",
|
||||
params={"days": days},
|
||||
headers={"X-Api-Key": config.api_key or ""}
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
analytics_data = response.json()
|
||||
output(analytics_data, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to get analytics: {response.status_code}")
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@admin.command()
|
||||
@click.option("--level", default="INFO", help="Log level (DEBUG, INFO, WARNING, ERROR)")
|
||||
@click.option("--limit", default=100, help="Number of log entries to show")
|
||||
@click.pass_context
|
||||
def logs(ctx, level: str, limit: int):
|
||||
"""Get system logs"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.get(
|
||||
f"{config.coordinator_url}/admin/logs",
|
||||
params={"level": level, "limit": limit},
|
||||
headers={"X-Api-Key": config.api_key or ""}
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
logs_data = response.json()
|
||||
output(logs_data, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to get logs: {response.status_code}")
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@admin.command()
|
||||
@click.argument("job_id")
|
||||
@click.option("--reason", help="Reason for priority change")
|
||||
@click.pass_context
|
||||
def prioritize_job(ctx, job_id: str, reason: Optional[str]):
|
||||
"""Set job to high priority"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.post(
|
||||
f"{config.coordinator_url}/admin/jobs/{job_id}/prioritize",
|
||||
json={"reason": reason or "Admin priority"},
|
||||
headers={"X-Api-Key": config.api_key or ""}
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
success(f"Job {job_id} prioritized")
|
||||
output({"status": "prioritized", "job_id": job_id}, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to prioritize job: {response.status_code}")
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@admin.command()
|
||||
@click.option("--action", required=True, help="Action to perform")
|
||||
@click.option("--target", help="Target of the action")
|
||||
@click.option("--data", help="Additional data (JSON)")
|
||||
@click.pass_context
|
||||
def execute(ctx, action: str, target: Optional[str], data: Optional[str]):
|
||||
"""Execute custom admin action"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
# Parse data if provided
|
||||
parsed_data = {}
|
||||
if data:
|
||||
try:
|
||||
parsed_data = json.loads(data)
|
||||
except json.JSONDecodeError:
|
||||
error("Invalid JSON data")
|
||||
return
|
||||
|
||||
if target:
|
||||
parsed_data["target"] = target
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.post(
|
||||
f"{config.coordinator_url}/admin/execute/{action}",
|
||||
json=parsed_data,
|
||||
headers={"X-Api-Key": config.api_key or ""}
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
result = response.json()
|
||||
output(result, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to execute action: {response.status_code}")
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@admin.group()
|
||||
def maintenance():
|
||||
"""Maintenance operations"""
|
||||
pass
|
||||
|
||||
|
||||
@maintenance.command()
|
||||
@click.pass_context
|
||||
def cleanup(ctx):
|
||||
"""Clean up old jobs and data"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
if not click.confirm("This will clean up old jobs and temporary data. Continue?"):
|
||||
return
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.post(
|
||||
f"{config.coordinator_url}/admin/maintenance/cleanup",
|
||||
headers={"X-Api-Key": config.api_key or ""}
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
result = response.json()
|
||||
success("Cleanup completed")
|
||||
output(result, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Cleanup failed: {response.status_code}")
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@maintenance.command()
|
||||
@click.pass_context
|
||||
def reindex(ctx):
|
||||
"""Reindex the database"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
if not click.confirm("This will reindex the entire database. Continue?"):
|
||||
return
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.post(
|
||||
f"{config.coordinator_url}/admin/maintenance/reindex",
|
||||
headers={"X-Api-Key": config.api_key or ""}
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
result = response.json()
|
||||
success("Reindex started")
|
||||
output(result, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Reindex failed: {response.status_code}")
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@maintenance.command()
|
||||
@click.pass_context
|
||||
def backup(ctx):
|
||||
"""Create system backup"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.post(
|
||||
f"{config.coordinator_url}/admin/maintenance/backup",
|
||||
headers={"X-Api-Key": config.api_key or ""}
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
result = response.json()
|
||||
success("Backup created")
|
||||
output(result, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Backup failed: {response.status_code}")
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@admin.command(name="audit-log")
|
||||
@click.option("--limit", default=50, help="Number of entries to show")
|
||||
@click.option("--action", "action_filter", help="Filter by action type")
|
||||
@click.pass_context
|
||||
def audit_log(ctx, limit: int, action_filter: Optional[str]):
|
||||
"""View audit log"""
|
||||
from utils import AuditLogger
|
||||
|
||||
logger = AuditLogger()
|
||||
entries = logger.get_logs(limit=limit, action_filter=action_filter)
|
||||
|
||||
if not entries:
|
||||
output({"message": "No audit log entries found"}, ctx.obj['output_format'])
|
||||
return
|
||||
|
||||
output(entries, ctx.obj['output_format'])
|
||||
|
||||
|
||||
# Add maintenance group to admin
|
||||
admin.add_command(maintenance)
|
||||
474
cli/commands/advanced_analytics.py
Executable file
474
cli/commands/advanced_analytics.py
Executable file
@@ -0,0 +1,474 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Advanced Analytics CLI Commands
|
||||
Real-time analytics dashboard and market insights
|
||||
"""
|
||||
|
||||
import click
|
||||
import asyncio
|
||||
import json
|
||||
from typing import Optional, List, Dict, Any
|
||||
from datetime import datetime, timedelta
|
||||
from imports import ensure_coordinator_api_imports
|
||||
|
||||
ensure_coordinator_api_imports()
|
||||
|
||||
try:
|
||||
from app.services.advanced_analytics import (
|
||||
start_analytics_monitoring, stop_analytics_monitoring, get_dashboard_data,
|
||||
create_analytics_alert, get_analytics_summary, advanced_analytics,
|
||||
MetricType, Timeframe
|
||||
)
|
||||
_import_error = None
|
||||
except ImportError as e:
|
||||
_import_error = e
|
||||
|
||||
def _missing(*args, **kwargs):
|
||||
raise ImportError(
|
||||
f"Required service module 'app.services.advanced_analytics' could not be imported: {_import_error}. "
|
||||
"Ensure coordinator-api dependencies are installed and the source directory is accessible."
|
||||
)
|
||||
start_analytics_monitoring = stop_analytics_monitoring = get_dashboard_data = _missing
|
||||
create_analytics_alert = get_analytics_summary = _missing
|
||||
advanced_analytics = None
|
||||
|
||||
class MetricType:
|
||||
pass
|
||||
class Timeframe:
|
||||
pass
|
||||
|
||||
@click.group()
|
||||
def advanced_analytics_group():
|
||||
"""Advanced analytics and market insights commands"""
|
||||
pass
|
||||
|
||||
@advanced_analytics_group.command()
|
||||
@click.option("--symbols", required=True, help="Trading symbols to monitor (comma-separated)")
|
||||
@click.pass_context
|
||||
def start(ctx, symbols: str):
|
||||
"""Start advanced analytics monitoring"""
|
||||
try:
|
||||
symbol_list = [s.strip().upper() for s in symbols.split(",")]
|
||||
|
||||
click.echo(f"📊 Starting Advanced Analytics Monitoring...")
|
||||
click.echo(f"📈 Monitoring symbols: {', '.join(symbol_list)}")
|
||||
|
||||
success = asyncio.run(start_analytics_monitoring(symbol_list))
|
||||
|
||||
if success:
|
||||
click.echo(f"✅ Advanced Analytics monitoring started!")
|
||||
click.echo(f"🔍 Real-time metrics collection active")
|
||||
click.echo(f"📊 Monitoring {len(symbol_list)} symbols")
|
||||
else:
|
||||
click.echo(f"❌ Failed to start monitoring")
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Start monitoring failed: {e}", err=True)
|
||||
|
||||
@advanced_analytics_group.command()
|
||||
@click.pass_context
|
||||
def stop(ctx):
|
||||
"""Stop advanced analytics monitoring"""
|
||||
try:
|
||||
click.echo(f"📊 Stopping Advanced Analytics Monitoring...")
|
||||
|
||||
success = asyncio.run(stop_analytics_monitoring())
|
||||
|
||||
if success:
|
||||
click.echo(f"✅ Advanced Analytics monitoring stopped")
|
||||
else:
|
||||
click.echo(f"⚠️ Monitoring was not running")
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Stop monitoring failed: {e}", err=True)
|
||||
|
||||
@advanced_analytics_group.command()
|
||||
@click.option("--symbol", required=True, help="Trading symbol")
|
||||
@click.option("--format", type=click.Choice(['table', 'json']), default="table", help="Output format")
|
||||
@click.pass_context
|
||||
def dashboard(ctx, symbol: str, format: str):
|
||||
"""Get real-time analytics dashboard"""
|
||||
try:
|
||||
symbol = symbol.upper()
|
||||
click.echo(f"📊 Real-Time Analytics Dashboard: {symbol}")
|
||||
|
||||
dashboard_data = get_dashboard_data(symbol)
|
||||
|
||||
if format == "json":
|
||||
click.echo(json.dumps(dashboard_data, indent=2, default=str))
|
||||
return
|
||||
|
||||
# Display table format
|
||||
click.echo(f"\n📈 Current Metrics:")
|
||||
current_metrics = dashboard_data.get('current_metrics', {})
|
||||
|
||||
if current_metrics:
|
||||
for metric_name, value in current_metrics.items():
|
||||
if isinstance(value, float):
|
||||
if metric_name == 'price_metrics':
|
||||
click.echo(f" 💰 Current Price: ${value:,.2f}")
|
||||
elif metric_name == 'volume_metrics':
|
||||
click.echo(f" 📊 Volume Ratio: {value:.2f}")
|
||||
elif metric_name == 'volatility_metrics':
|
||||
click.echo(f" 📈 Volatility: {value:.2%}")
|
||||
else:
|
||||
click.echo(f" {metric_name}: {value:.4f}")
|
||||
|
||||
# Technical indicators
|
||||
indicators = dashboard_data.get('technical_indicators', {})
|
||||
if indicators:
|
||||
click.echo(f"\n📊 Technical Indicators:")
|
||||
if 'sma_5' in indicators:
|
||||
click.echo(f" 📈 SMA 5: ${indicators['sma_5']:,.2f}")
|
||||
if 'sma_20' in indicators:
|
||||
click.echo(f" 📈 SMA 20: ${indicators['sma_20']:,.2f}")
|
||||
if 'rsi' in indicators:
|
||||
rsi = indicators['rsi']
|
||||
rsi_status = "🔴 Overbought" if rsi > 70 else "🟢 Oversold" if rsi < 30 else "🟡 Neutral"
|
||||
click.echo(f" 📊 RSI: {rsi:.1f} {rsi_status}")
|
||||
if 'bb_upper' in indicators:
|
||||
click.echo(f" 📊 BB Upper: ${indicators['bb_upper']:,.2f}")
|
||||
click.echo(f" 📊 BB Lower: ${indicators['bb_lower']:,.2f}")
|
||||
|
||||
# Market status
|
||||
market_status = dashboard_data.get('market_status', 'unknown')
|
||||
status_icon = {"overbought": "🔴", "oversold": "🟢", "neutral": "🟡"}.get(market_status, "❓")
|
||||
click.echo(f"\n{status_icon} Market Status: {market_status.title()}")
|
||||
|
||||
# Alerts
|
||||
alerts = dashboard_data.get('alerts', [])
|
||||
if alerts:
|
||||
click.echo(f"\n🚨 Active Alerts: {len(alerts)}")
|
||||
for alert in alerts[:3]:
|
||||
click.echo(f" • {alert.name}: {alert.condition} {alert.threshold}")
|
||||
else:
|
||||
click.echo(f"\n✅ No active alerts")
|
||||
|
||||
# Data history info
|
||||
price_history = dashboard_data.get('price_history', [])
|
||||
volume_history = dashboard_data.get('volume_history', [])
|
||||
click.echo(f"\n📊 Data Points:")
|
||||
click.echo(f" Price History: {len(price_history)} points")
|
||||
click.echo(f" Volume History: {len(volume_history)} points")
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Dashboard failed: {e}", err=True)
|
||||
|
||||
@advanced_analytics_group.command()
|
||||
@click.option("--name", required=True, help="Alert name")
|
||||
@click.option("--symbol", required=True, help="Trading symbol")
|
||||
@click.option("--metric", required=True, type=click.Choice(['price_metrics', 'volume_metrics', 'volatility_metrics']), help="Metric type")
|
||||
@click.option("--condition", required=True, type=click.Choice(['gt', 'lt', 'eq', 'change_percent']), help="Alert condition")
|
||||
@click.option("--threshold", type=float, required=True, help="Alert threshold")
|
||||
@click.option("--timeframe", default="1h", type=click.Choice(['real_time', '1m', '5m', '15m', '1h', '4h', '1d']), help="Timeframe")
|
||||
@click.pass_context
|
||||
def create_alert(ctx, name: str, symbol: str, metric: str, condition: str, threshold: float, timeframe: str):
|
||||
"""Create analytics alert"""
|
||||
try:
|
||||
symbol = symbol.upper()
|
||||
click.echo(f"🚨 Creating Analytics Alert...")
|
||||
click.echo(f"📋 Alert Name: {name}")
|
||||
click.echo(f"📊 Symbol: {symbol}")
|
||||
click.echo(f"📈 Metric: {metric}")
|
||||
click.echo(f"⚡ Condition: {condition}")
|
||||
click.echo(f"🎯 Threshold: {threshold}")
|
||||
click.echo(f"⏰ Timeframe: {timeframe}")
|
||||
|
||||
alert_id = create_analytics_alert(name, symbol, metric, condition, threshold, timeframe)
|
||||
|
||||
click.echo(f"\n✅ Alert created successfully!")
|
||||
click.echo(f"🆔 Alert ID: {alert_id}")
|
||||
click.echo(f"📊 Monitoring {symbol} {metric}")
|
||||
|
||||
# Show alert condition in human readable format
|
||||
condition_text = {
|
||||
"gt": "greater than",
|
||||
"lt": "less than",
|
||||
"eq": "equal to",
|
||||
"change_percent": "change percentage"
|
||||
}.get(condition, condition)
|
||||
|
||||
click.echo(f"🔔 Triggers when: {metric} is {condition_text} {threshold}")
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Alert creation failed: {e}", err=True)
|
||||
|
||||
@advanced_analytics_group.command()
|
||||
@click.pass_context
|
||||
def summary(ctx):
|
||||
"""Show analytics summary"""
|
||||
try:
|
||||
click.echo(f"📊 Advanced Analytics Summary")
|
||||
|
||||
summary = get_analytics_summary()
|
||||
|
||||
click.echo(f"\n📈 System Status:")
|
||||
click.echo(f" Monitoring Active: {'✅ Yes' if summary['monitoring_active'] else '❌ No'}")
|
||||
click.echo(f" Total Alerts: {summary['total_alerts']}")
|
||||
click.echo(f" Active Alerts: {summary['active_alerts']}")
|
||||
click.echo(f" Tracked Symbols: {summary['tracked_symbols']}")
|
||||
click.echo(f" Total Metrics Stored: {summary['total_metrics_stored']}")
|
||||
click.echo(f" Performance Reports: {summary['performance_reports']}")
|
||||
|
||||
# Symbol-specific metrics
|
||||
symbol_metrics = {k: v for k, v in summary.items() if k.endswith('_metrics')}
|
||||
if symbol_metrics:
|
||||
click.echo(f"\n📊 Symbol Metrics:")
|
||||
for symbol_key, count in symbol_metrics.items():
|
||||
symbol = symbol_key.replace('_metrics', '')
|
||||
click.echo(f" {symbol}: {count} metrics")
|
||||
|
||||
# Alert breakdown
|
||||
if advanced_analytics.alerts:
|
||||
click.echo(f"\n🚨 Alert Configuration:")
|
||||
for alert_id, alert in advanced_analytics.alerts.items():
|
||||
status_icon = "✅" if alert.active else "❌"
|
||||
click.echo(f" {status_icon} {alert.name} ({alert.symbol})")
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Summary failed: {e}", err=True)
|
||||
|
||||
@advanced_analytics_group.command()
|
||||
@click.option("--symbol", required=True, help="Trading symbol")
|
||||
@click.option("--days", type=int, default=30, help="Analysis period in days")
|
||||
@click.pass_context
|
||||
def performance(ctx, symbol: str, days: int):
|
||||
"""Generate performance analysis report"""
|
||||
try:
|
||||
symbol = symbol.upper()
|
||||
click.echo(f"📊 Performance Analysis: {symbol}")
|
||||
click.echo(f"📅 Analysis Period: {days} days")
|
||||
|
||||
# Calculate date range
|
||||
end_date = datetime.now()
|
||||
start_date = end_date - timedelta(days=days)
|
||||
|
||||
# Generate performance report
|
||||
report = advanced_analytics.generate_performance_report(symbol, start_date, end_date)
|
||||
|
||||
click.echo(f"\n📈 Performance Report:")
|
||||
click.echo(f" Symbol: {report.symbol}")
|
||||
click.echo(f" Period: {report.start_date.strftime('%Y-%m-%d')} to {report.end_date.strftime('%Y-%m-%d')}")
|
||||
|
||||
# Performance metrics
|
||||
click.echo(f"\n💰 Returns:")
|
||||
click.echo(f" Total Return: {report.total_return:.2%}")
|
||||
click.echo(f" Volatility: {report.volatility:.2%}")
|
||||
click.echo(f" Sharpe Ratio: {report.sharpe_ratio:.2f}")
|
||||
click.echo(f" Max Drawdown: {report.max_drawdown:.2%}")
|
||||
|
||||
# Risk metrics
|
||||
click.echo(f"\n⚠️ Risk Metrics:")
|
||||
click.echo(f" Win Rate: {report.win_rate:.1%}")
|
||||
click.echo(f" Profit Factor: {report.profit_factor:.2f}")
|
||||
click.echo(f" Calmar Ratio: {report.calmar_ratio:.2f}")
|
||||
click.echo(f" VaR (95%): {report.var_95:.2%}")
|
||||
|
||||
# Performance assessment
|
||||
if report.total_return > 0.1:
|
||||
assessment = "🔥 EXCELLENT"
|
||||
elif report.total_return > 0.05:
|
||||
assessment = "⚡ GOOD"
|
||||
elif report.total_return > 0:
|
||||
assessment = "💡 POSITIVE"
|
||||
else:
|
||||
assessment = "❌ NEGATIVE"
|
||||
|
||||
click.echo(f"\n{assessment} Performance Assessment")
|
||||
|
||||
# Risk assessment
|
||||
if report.max_drawdown < 0.1:
|
||||
risk_assessment = "🟢 LOW RISK"
|
||||
elif report.max_drawdown < 0.2:
|
||||
risk_assessment = "🟡 MEDIUM RISK"
|
||||
else:
|
||||
risk_assessment = "🔴 HIGH RISK"
|
||||
|
||||
click.echo(f"Risk Level: {risk_assessment}")
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Performance analysis failed: {e}", err=True)
|
||||
|
||||
@advanced_analytics_group.command()
|
||||
@click.option("--symbol", required=True, help="Trading symbol")
|
||||
@click.option("--hours", type=int, default=24, help="Analysis period in hours")
|
||||
@click.pass_context
|
||||
def insights(ctx, symbol: str, hours: int):
|
||||
"""Generate AI-powered market insights"""
|
||||
try:
|
||||
symbol = symbol.upper()
|
||||
click.echo(f"🔍 AI Market Insights: {symbol}")
|
||||
click.echo(f"⏰ Analysis Period: {hours} hours")
|
||||
|
||||
# Get dashboard data
|
||||
dashboard = get_dashboard_data(symbol)
|
||||
|
||||
if not dashboard:
|
||||
click.echo(f"❌ No data available for {symbol}")
|
||||
click.echo(f"💡 Start monitoring first: aitbc advanced-analytics start --symbols {symbol}")
|
||||
return
|
||||
|
||||
# Extract key insights
|
||||
current_metrics = dashboard.get('current_metrics', {})
|
||||
indicators = dashboard.get('technical_indicators', {})
|
||||
market_status = dashboard.get('market_status', 'unknown')
|
||||
|
||||
click.echo(f"\n📊 Current Market Analysis:")
|
||||
|
||||
# Price analysis
|
||||
if 'price_metrics' in current_metrics:
|
||||
current_price = current_metrics['price_metrics']
|
||||
click.echo(f" 💰 Current Price: ${current_price:,.2f}")
|
||||
|
||||
# Volume analysis
|
||||
if 'volume_metrics' in current_metrics:
|
||||
volume_ratio = current_metrics['volume_metrics']
|
||||
volume_status = "🔥 High" if volume_ratio > 1.5 else "📊 Normal" if volume_ratio > 0.8 else "📉 Low"
|
||||
click.echo(f" 📊 Volume Activity: {volume_status} (ratio: {volume_ratio:.2f})")
|
||||
|
||||
# Volatility analysis
|
||||
if 'volatility_metrics' in current_metrics:
|
||||
volatility = current_metrics['volatility_metrics']
|
||||
vol_status = "🔴 High" if volatility > 0.05 else "🟡 Medium" if volatility > 0.02 else "🟢 Low"
|
||||
click.echo(f" 📈 Volatility: {vol_status} ({volatility:.2%})")
|
||||
|
||||
# Technical analysis
|
||||
if indicators:
|
||||
click.echo(f"\n📈 Technical Analysis:")
|
||||
|
||||
if 'rsi' in indicators:
|
||||
rsi = indicators['rsi']
|
||||
rsi_insight = "Overbought - consider selling" if rsi > 70 else "Oversold - consider buying" if rsi < 30 else "Neutral"
|
||||
click.echo(f" 📊 RSI ({rsi:.1f}): {rsi_insight}")
|
||||
|
||||
if 'sma_5' in indicators and 'sma_20' in indicators:
|
||||
sma_5 = indicators['sma_5']
|
||||
sma_20 = indicators['sma_20']
|
||||
if 'price_metrics' in current_metrics:
|
||||
price = current_metrics['price_metrics']
|
||||
if price > sma_5 > sma_20:
|
||||
trend = "🔥 Strong Uptrend"
|
||||
elif price < sma_5 < sma_20:
|
||||
trend = "📉 Strong Downtrend"
|
||||
else:
|
||||
trend = "🟡 Sideways"
|
||||
click.echo(f" 📈 Trend: {trend}")
|
||||
|
||||
if 'bb_upper' in indicators and 'bb_lower' in indicators:
|
||||
bb_upper = indicators['bb_upper']
|
||||
bb_lower = indicators['bb_lower']
|
||||
if 'price_metrics' in current_metrics:
|
||||
price = current_metrics['price_metrics']
|
||||
if price > bb_upper:
|
||||
bb_signal = "Above upper band - overbought"
|
||||
elif price < bb_lower:
|
||||
bb_signal = "Below lower band - oversold"
|
||||
else:
|
||||
bb_signal = "Within bands - normal"
|
||||
click.echo(f" 📊 Bollinger Bands: {bb_signal}")
|
||||
|
||||
# Overall market status
|
||||
click.echo(f"\n🎯 Overall Market Status: {market_status.title()}")
|
||||
|
||||
# Trading recommendation
|
||||
recommendation = _generate_trading_recommendation(dashboard)
|
||||
click.echo(f"💡 Trading Recommendation: {recommendation}")
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Insights generation failed: {e}", err=True)
|
||||
|
||||
def _generate_trading_recommendation(dashboard: Dict[str, Any]) -> str:
|
||||
"""Generate AI-powered trading recommendation"""
|
||||
current_metrics = dashboard.get('current_metrics', {})
|
||||
indicators = dashboard.get('technical_indicators', {})
|
||||
market_status = dashboard.get('market_status', 'unknown')
|
||||
|
||||
# Simple recommendation logic
|
||||
buy_signals = 0
|
||||
sell_signals = 0
|
||||
|
||||
# RSI signals
|
||||
if 'rsi' in indicators:
|
||||
rsi = indicators['rsi']
|
||||
if rsi < 30:
|
||||
buy_signals += 2
|
||||
elif rsi > 70:
|
||||
sell_signals += 2
|
||||
|
||||
# Volume signals
|
||||
if 'volume_metrics' in current_metrics:
|
||||
volume_ratio = current_metrics['volume_metrics']
|
||||
if volume_ratio > 1.5:
|
||||
buy_signals += 1
|
||||
|
||||
# Market status signals
|
||||
if market_status == 'oversold':
|
||||
buy_signals += 1
|
||||
elif market_status == 'overbought':
|
||||
sell_signals += 1
|
||||
|
||||
# Generate recommendation
|
||||
if buy_signals > sell_signals + 1:
|
||||
return "🟢 STRONG BUY - Multiple bullish indicators detected"
|
||||
elif buy_signals > sell_signals:
|
||||
return "💡 BUY - Bullish bias detected"
|
||||
elif sell_signals > buy_signals + 1:
|
||||
return "🔴 STRONG SELL - Multiple bearish indicators detected"
|
||||
elif sell_signals > buy_signals:
|
||||
return "⚠️ SELL - Bearish bias detected"
|
||||
else:
|
||||
return "🟡 HOLD - Mixed signals, wait for clarity"
|
||||
|
||||
@advanced_analytics_group.command()
|
||||
@click.pass_context
|
||||
def test(ctx):
|
||||
"""Test advanced analytics platform"""
|
||||
try:
|
||||
click.echo(f"🧪 Testing Advanced Analytics Platform...")
|
||||
|
||||
async def run_tests():
|
||||
# Test 1: Start monitoring
|
||||
click.echo(f"\n📋 Test 1: Start Monitoring")
|
||||
start_success = await start_analytics_monitoring(["BTC/USDT", "ETH/USDT"])
|
||||
click.echo(f" ✅ Start: {'Success' if start_success else 'Failed'}")
|
||||
|
||||
# Let it run for a few seconds
|
||||
click.echo(f"⏱️ Collecting data...")
|
||||
await asyncio.sleep(3)
|
||||
|
||||
# Test 2: Get dashboard
|
||||
click.echo(f"\n📋 Test 2: Dashboard Data")
|
||||
dashboard = get_dashboard_data("BTC/USDT")
|
||||
click.echo(f" ✅ Dashboard: {len(dashboard)} fields retrieved")
|
||||
|
||||
# Test 3: Get summary
|
||||
click.echo(f"\n📋 Test 3: Analytics Summary")
|
||||
summary = get_analytics_summary()
|
||||
click.echo(f" ✅ Summary: {len(summary)} metrics")
|
||||
|
||||
# Test 4: Stop monitoring
|
||||
click.echo(f"\n📋 Test 4: Stop Monitoring")
|
||||
stop_success = await stop_analytics_monitoring()
|
||||
click.echo(f" ✅ Stop: {'Success' if stop_success else 'Failed'}")
|
||||
|
||||
return start_success, stop_success, dashboard, summary
|
||||
|
||||
# Run the async tests
|
||||
start_success, stop_success, dashboard, summary = asyncio.run(run_tests())
|
||||
|
||||
# Show results
|
||||
click.echo(f"\n🎉 Test Results Summary:")
|
||||
click.echo(f" Platform Status: {'✅ Operational' if start_success and stop_success else '❌ Issues'}")
|
||||
click.echo(f" Data Collection: {'✅ Working' if dashboard else '❌ Issues'}")
|
||||
click.echo(f" Metrics Tracked: {summary.get('total_metrics_stored', 0)}")
|
||||
|
||||
if start_success and stop_success:
|
||||
click.echo(f"\n✅ Advanced Analytics Platform is ready for production use!")
|
||||
else:
|
||||
click.echo(f"\n⚠️ Some issues detected - check logs for details")
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Test failed: {e}", err=True)
|
||||
|
||||
if __name__ == "__main__":
|
||||
advanced_analytics_group()
|
||||
622
cli/commands/agent.py
Executable file
622
cli/commands/agent.py
Executable file
@@ -0,0 +1,622 @@
|
||||
"""Agent commands for AITBC CLI - Advanced AI Agent Management"""
|
||||
|
||||
import click
|
||||
import httpx
|
||||
import json
|
||||
import time
|
||||
import uuid
|
||||
from typing import Optional, Dict, Any, List
|
||||
from pathlib import Path
|
||||
from utils import output, error, success, warning
|
||||
|
||||
|
||||
@click.group()
|
||||
def agent():
|
||||
"""Advanced AI agent workflow and execution management"""
|
||||
pass
|
||||
|
||||
|
||||
@agent.command()
|
||||
@click.option("--name", required=True, help="Agent workflow name")
|
||||
@click.option("--description", default="", help="Agent description")
|
||||
@click.option("--workflow-file", type=click.File('r'), help="Workflow definition from JSON file")
|
||||
@click.option("--verification", default="basic", type=click.Choice(["basic", "full", "zero-knowledge"]),
|
||||
help="Verification level for agent execution")
|
||||
@click.option("--max-execution-time", default=3600, help="Maximum execution time in seconds")
|
||||
@click.option("--max-cost-budget", default=0.0, help="Maximum cost budget")
|
||||
@click.pass_context
|
||||
def create(ctx, name: str, description: str, workflow_file, verification: str,
|
||||
max_execution_time: int, max_cost_budget: float):
|
||||
"""Create a new AI agent workflow"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
# Build workflow data
|
||||
workflow_data = {
|
||||
"name": name,
|
||||
"description": description,
|
||||
"verification_level": verification,
|
||||
"workflow_id": str(uuid.uuid4()),
|
||||
"inputs": {},
|
||||
"max_execution_time": max_execution_time,
|
||||
"max_cost_budget": max_cost_budget
|
||||
}
|
||||
|
||||
if workflow_file:
|
||||
try:
|
||||
workflow_spec = json.load(workflow_file)
|
||||
workflow_data.update(workflow_spec)
|
||||
except Exception as e:
|
||||
error(f"Failed to read workflow file: {e}")
|
||||
return
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.post(
|
||||
f"{config.coordinator_url}/api/v1/agents/workflows",
|
||||
headers={"X-Api-Key": config.api_key or ""},
|
||||
json=workflow_data
|
||||
)
|
||||
|
||||
if response.status_code in (200, 201):
|
||||
workflow = response.json()
|
||||
success(f"Agent workflow created: {workflow['id']}")
|
||||
output(workflow, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to create agent workflow: {response.status_code}")
|
||||
if response.text:
|
||||
error(response.text)
|
||||
ctx.exit(1)
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@agent.command()
|
||||
@click.option("--type", "agent_type", help="Filter by agent type")
|
||||
@click.option("--status", help="Filter by status")
|
||||
@click.option("--verification", help="Filter by verification level")
|
||||
@click.option("--limit", default=20, help="Number of agents to list")
|
||||
@click.option("--owner", help="Filter by owner ID")
|
||||
@click.pass_context
|
||||
def list(ctx, agent_type: Optional[str], status: Optional[str],
|
||||
verification: Optional[str], limit: int, owner: Optional[str]):
|
||||
"""List available AI agent workflows"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
params = {"limit": limit}
|
||||
if agent_type:
|
||||
params["type"] = agent_type
|
||||
if status:
|
||||
params["status"] = status
|
||||
if verification:
|
||||
params["verification"] = verification
|
||||
if owner:
|
||||
params["owner"] = owner
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.get(
|
||||
f"{config.coordinator_url}/api/v1/agents/workflows",
|
||||
headers={"X-Api-Key": config.api_key or ""},
|
||||
params=params
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
workflows = response.json()
|
||||
output(workflows, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to list agent workflows: {response.status_code}")
|
||||
ctx.exit(1)
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@agent.command()
|
||||
@click.argument("agent_id")
|
||||
@click.option("--inputs", type=click.File('r'), help="Input data from JSON file")
|
||||
@click.option("--verification", default="basic", type=click.Choice(["basic", "full", "zero-knowledge"]),
|
||||
help="Verification level for this execution")
|
||||
@click.option("--priority", default="normal", type=click.Choice(["low", "normal", "high"]),
|
||||
help="Execution priority")
|
||||
@click.option("--timeout", default=3600, help="Execution timeout in seconds")
|
||||
@click.pass_context
|
||||
def execute(ctx, agent_id: str, inputs, verification: str, priority: str, timeout: int):
|
||||
"""Execute an AI agent workflow"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
# Prepare execution data
|
||||
execution_data = {
|
||||
"verification_level": verification,
|
||||
"workflow_id": agent_id,
|
||||
"inputs": {},
|
||||
"priority": priority,
|
||||
"timeout_seconds": timeout
|
||||
}
|
||||
|
||||
if inputs:
|
||||
try:
|
||||
input_data = json.load(inputs)
|
||||
execution_data["inputs"] = input_data
|
||||
except Exception as e:
|
||||
error(f"Failed to read inputs file: {e}")
|
||||
return
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.post(
|
||||
f"{config.coordinator_url}/api/v1/agents/workflows/{agent_id}/execute",
|
||||
headers={"X-Api-Key": config.api_key or ""},
|
||||
json=execution_data
|
||||
)
|
||||
|
||||
if response.status_code in (200, 202):
|
||||
execution = response.json()
|
||||
success(f"Agent execution started: {execution['id']}")
|
||||
output(execution, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to start agent execution: {response.status_code}")
|
||||
if response.text:
|
||||
error(response.text)
|
||||
ctx.exit(1)
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@agent.command()
|
||||
@click.argument("execution_id")
|
||||
@click.option("--timeout", default=30, help="Maximum watch time in seconds")
|
||||
@click.option("--interval", default=5, help="Watch interval in seconds")
|
||||
@click.pass_context
|
||||
def status(ctx, execution_id: str, timeout: int, interval: int):
|
||||
"""Get status of agent execution"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
def get_status():
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.get(
|
||||
f"{config.coordinator_url}/api/v1/agents/executions/{execution_id}",
|
||||
headers={"X-Api-Key": config.api_key or ""}
|
||||
)
|
||||
if response.status_code == 200:
|
||||
return response.json()
|
||||
else:
|
||||
error(f"Failed to get status: {response.status_code}")
|
||||
return None
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
return None
|
||||
|
||||
# Single status check with timeout
|
||||
status_data = get_status()
|
||||
if status_data:
|
||||
output(status_data, ctx.obj['output_format'])
|
||||
|
||||
# If execution is still running, provide guidance
|
||||
if status_data.get('status') not in ['completed', 'failed']:
|
||||
output(f"Execution still in progress. Use 'aitbc agent status {execution_id}' to check again.",
|
||||
ctx.obj['output_format'])
|
||||
output(f"Current status: {status_data.get('status', 'Unknown')}", ctx.obj['output_format'])
|
||||
output(f"Progress: {status_data.get('progress', 0)}%", ctx.obj['output_format'])
|
||||
|
||||
|
||||
@agent.command()
|
||||
@click.argument("execution_id")
|
||||
@click.option("--verify", is_flag=True, help="Verify cryptographic receipt")
|
||||
@click.option("--download", type=click.Path(), help="Download receipt to file")
|
||||
@click.pass_context
|
||||
def receipt(ctx, execution_id: str, verify: bool, download: Optional[str]):
|
||||
"""Get verifiable receipt for completed execution"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.get(
|
||||
f"{config.coordinator_url}/api/v1/agents/executions/{execution_id}/receipt",
|
||||
headers={"X-Api-Key": config.api_key or ""}
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
receipt_data = response.json()
|
||||
|
||||
if verify:
|
||||
# Verify receipt
|
||||
verify_response = client.post(
|
||||
f"{config.coordinator_url}/api/v1/agents/receipts/verify",
|
||||
headers={"X-Api-Key": config.api_key or ""},
|
||||
json={"receipt": receipt_data}
|
||||
)
|
||||
|
||||
if verify_response.status_code == 200:
|
||||
verification_result = verify_response.json()
|
||||
receipt_data["verification"] = verification_result
|
||||
|
||||
if verification_result.get("valid"):
|
||||
success("Receipt verification: PASSED")
|
||||
else:
|
||||
warning("Receipt verification: FAILED")
|
||||
else:
|
||||
warning("Could not verify receipt")
|
||||
|
||||
if download:
|
||||
with open(download, 'w') as f:
|
||||
json.dump(receipt_data, f, indent=2)
|
||||
success(f"Receipt downloaded to {download}")
|
||||
else:
|
||||
output(receipt_data, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to get execution receipt: {response.status_code}")
|
||||
ctx.exit(1)
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@click.group()
|
||||
def network():
|
||||
"""Multi-agent collaborative network management"""
|
||||
pass
|
||||
|
||||
|
||||
agent.add_command(network)
|
||||
|
||||
|
||||
@network.command()
|
||||
@click.option("--name", required=True, help="Network name")
|
||||
@click.option("--agents", required=True, help="Comma-separated list of agent IDs")
|
||||
@click.option("--description", default="", help="Network description")
|
||||
@click.option("--coordination", default="centralized",
|
||||
type=click.Choice(["centralized", "decentralized", "hybrid"]),
|
||||
help="Coordination strategy")
|
||||
@click.pass_context
|
||||
def create(ctx, name: str, agents: str, description: str, coordination: str):
|
||||
"""Create collaborative agent network"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
agent_ids = [agent_id.strip() for agent_id in agents.split(',')]
|
||||
|
||||
network_data = {
|
||||
"name": name,
|
||||
"description": description,
|
||||
"agents": agent_ids,
|
||||
"coordination_strategy": coordination
|
||||
}
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.post(
|
||||
f"{config.coordinator_url}/api/v1/agents/networks",
|
||||
headers={"X-Api-Key": config.api_key or ""},
|
||||
json=network_data
|
||||
)
|
||||
|
||||
if response.status_code in (200, 201):
|
||||
network = response.json()
|
||||
success(f"Agent network created: {network['id']}")
|
||||
output(network, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to create agent network: {response.status_code}")
|
||||
if response.text:
|
||||
error(response.text)
|
||||
ctx.exit(1)
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@network.command()
|
||||
@click.argument("network_id")
|
||||
@click.option("--task", type=click.File('r'), required=True, help="Task definition JSON file")
|
||||
@click.option("--priority", default="normal", type=click.Choice(["low", "normal", "high"]),
|
||||
help="Execution priority")
|
||||
@click.pass_context
|
||||
def execute(ctx, network_id: str, task, priority: str):
|
||||
"""Execute collaborative task on agent network"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
try:
|
||||
task_data = json.load(task)
|
||||
except Exception as e:
|
||||
error(f"Failed to read task file: {e}")
|
||||
return
|
||||
|
||||
execution_data = {
|
||||
"task": task_data,
|
||||
"priority": priority
|
||||
}
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.post(
|
||||
f"{config.coordinator_url}/api/v1/agents/networks/{network_id}/execute",
|
||||
headers={"X-Api-Key": config.api_key or ""},
|
||||
json=execution_data
|
||||
)
|
||||
|
||||
if response.status_code in (200, 202):
|
||||
execution = response.json()
|
||||
success(f"Network execution started: {execution['id']}")
|
||||
output(execution, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to start network execution: {response.status_code}")
|
||||
if response.text:
|
||||
error(response.text)
|
||||
ctx.exit(1)
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@network.command()
|
||||
@click.argument("network_id")
|
||||
@click.option("--metrics", default="all", help="Comma-separated metrics to show")
|
||||
@click.option("--real-time", is_flag=True, help="Show real-time metrics")
|
||||
@click.pass_context
|
||||
def status(ctx, network_id: str, metrics: str, real_time: bool):
|
||||
"""Get agent network status and performance metrics"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
params = {}
|
||||
if metrics != "all":
|
||||
params["metrics"] = metrics
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.get(
|
||||
f"{config.coordinator_url}/api/v1/agents/networks/{network_id}/status",
|
||||
headers={"X-Api-Key": config.api_key or ""},
|
||||
params=params
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
status_data = response.json()
|
||||
output(status_data, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to get network status: {response.status_code}")
|
||||
ctx.exit(1)
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@network.command()
|
||||
@click.argument("network_id")
|
||||
@click.option("--objective", default="efficiency",
|
||||
type=click.Choice(["speed", "efficiency", "cost", "quality"]),
|
||||
help="Optimization objective")
|
||||
@click.pass_context
|
||||
def optimize(ctx, network_id: str, objective: str):
|
||||
"""Optimize agent network collaboration"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
optimization_data = {"objective": objective}
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.post(
|
||||
f"{config.coordinator_url}/api/v1/agents/networks/{network_id}/optimize",
|
||||
headers={"X-Api-Key": config.api_key or ""},
|
||||
json=optimization_data
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
result = response.json()
|
||||
success(f"Network optimization completed")
|
||||
output(result, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to optimize network: {response.status_code}")
|
||||
if response.text:
|
||||
error(response.text)
|
||||
ctx.exit(1)
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@click.group()
|
||||
def learning():
|
||||
"""Agent adaptive learning and training management"""
|
||||
pass
|
||||
|
||||
|
||||
agent.add_command(learning)
|
||||
|
||||
|
||||
@learning.command()
|
||||
@click.argument("agent_id")
|
||||
@click.option("--mode", default="reinforcement",
|
||||
type=click.Choice(["reinforcement", "transfer", "meta"]),
|
||||
help="Learning mode")
|
||||
@click.option("--feedback-source", help="Feedback data source")
|
||||
@click.option("--learning-rate", default=0.001, help="Learning rate")
|
||||
@click.pass_context
|
||||
def enable(ctx, agent_id: str, mode: str, feedback_source: Optional[str], learning_rate: float):
|
||||
"""Enable adaptive learning for agent"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
learning_config = {
|
||||
"mode": mode,
|
||||
"learning_rate": learning_rate
|
||||
}
|
||||
|
||||
if feedback_source:
|
||||
learning_config["feedback_source"] = feedback_source
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.post(
|
||||
f"{config.coordinator_url}/api/v1/agents/{agent_id}/learning/enable",
|
||||
headers={"X-Api-Key": config.api_key or ""},
|
||||
json=learning_config
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
result = response.json()
|
||||
success(f"Adaptive learning enabled for agent {agent_id}")
|
||||
output(result, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to enable learning: {response.status_code}")
|
||||
if response.text:
|
||||
error(response.text)
|
||||
ctx.exit(1)
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@learning.command()
|
||||
@click.argument("agent_id")
|
||||
@click.option("--feedback", type=click.File('r'), required=True, help="Feedback data JSON file")
|
||||
@click.option("--epochs", default=10, help="Number of training epochs")
|
||||
@click.pass_context
|
||||
def train(ctx, agent_id: str, feedback, epochs: int):
|
||||
"""Train agent with feedback data"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
try:
|
||||
feedback_data = json.load(feedback)
|
||||
except Exception as e:
|
||||
error(f"Failed to read feedback file: {e}")
|
||||
return
|
||||
|
||||
training_data = {
|
||||
"feedback": feedback_data,
|
||||
"epochs": epochs
|
||||
}
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.post(
|
||||
f"{config.coordinator_url}/api/v1/agents/{agent_id}/learning/train",
|
||||
headers={"X-Api-Key": config.api_key or ""},
|
||||
json=training_data
|
||||
)
|
||||
|
||||
if response.status_code in (200, 202):
|
||||
training = response.json()
|
||||
success(f"Training started: {training['id']}")
|
||||
output(training, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to start training: {response.status_code}")
|
||||
if response.text:
|
||||
error(response.text)
|
||||
ctx.exit(1)
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@learning.command()
|
||||
@click.argument("agent_id")
|
||||
@click.option("--metrics", default="accuracy,efficiency", help="Comma-separated metrics to show")
|
||||
@click.pass_context
|
||||
def progress(ctx, agent_id: str, metrics: str):
|
||||
"""Review agent learning progress"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
params = {"metrics": metrics}
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.get(
|
||||
f"{config.coordinator_url}/api/v1/agents/{agent_id}/learning/progress",
|
||||
headers={"X-Api-Key": config.api_key or ""},
|
||||
params=params
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
progress_data = response.json()
|
||||
output(progress_data, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to get learning progress: {response.status_code}")
|
||||
ctx.exit(1)
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@learning.command()
|
||||
@click.argument("agent_id")
|
||||
@click.option("--format", default="onnx", type=click.Choice(["onnx", "pickle", "torch"]),
|
||||
help="Export format")
|
||||
@click.option("--output-path", type=click.Path(), help="Output file path")
|
||||
@click.pass_context
|
||||
def export(ctx, agent_id: str, format: str, output_path: Optional[str]):
|
||||
"""Export learned agent model"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
params = {"format": format}
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.get(
|
||||
f"{config.coordinator_url}/api/v1/agents/{agent_id}/learning/export",
|
||||
headers={"X-Api-Key": config.api_key or ""},
|
||||
params=params
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
if output_path:
|
||||
with open(output_path, 'wb') as f:
|
||||
f.write(response.content)
|
||||
success(f"Model exported to {output_path}")
|
||||
else:
|
||||
# Output metadata about the export
|
||||
export_info = response.headers.get('X-Export-Info', '{}')
|
||||
try:
|
||||
info_data = json.loads(export_info)
|
||||
output(info_data, ctx.obj['output_format'])
|
||||
except:
|
||||
output({"status": "export_ready", "format": format}, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to export model: {response.status_code}")
|
||||
ctx.exit(1)
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@click.command()
|
||||
@click.option("--type", required=True,
|
||||
type=click.Choice(["optimization", "feature", "bugfix", "documentation"]),
|
||||
help="Contribution type")
|
||||
@click.option("--description", required=True, help="Contribution description")
|
||||
@click.option("--github-repo", default="oib/AITBC", help="GitHub repository")
|
||||
@click.option("--branch", default="main", help="Target branch")
|
||||
@click.pass_context
|
||||
def submit_contribution(ctx, type: str, description: str, github_repo: str, branch: str):
|
||||
"""Submit contribution to platform via GitHub"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
contribution_data = {
|
||||
"type": type,
|
||||
"description": description,
|
||||
"github_repo": github_repo,
|
||||
"target_branch": branch
|
||||
}
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.post(
|
||||
f"{config.coordinator_url}/api/v1/agents/contributions",
|
||||
headers={"X-Api-Key": config.api_key or ""},
|
||||
json=contribution_data
|
||||
)
|
||||
|
||||
if response.status_code in (200, 201):
|
||||
result = response.json()
|
||||
success(f"Contribution submitted: {result['id']}")
|
||||
output(result, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to submit contribution: {response.status_code}")
|
||||
if response.text:
|
||||
error(response.text)
|
||||
ctx.exit(1)
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
agent.add_command(submit_contribution)
|
||||
496
cli/commands/agent_comm.py
Executable file
496
cli/commands/agent_comm.py
Executable file
@@ -0,0 +1,496 @@
|
||||
"""Cross-chain agent communication commands for AITBC CLI"""
|
||||
|
||||
import click
|
||||
import asyncio
|
||||
import json
|
||||
from datetime import datetime, timedelta
|
||||
from typing import Optional
|
||||
from core.config import load_multichain_config
|
||||
from core.agent_communication import (
|
||||
CrossChainAgentCommunication, AgentInfo, AgentMessage,
|
||||
MessageType, AgentStatus
|
||||
)
|
||||
from utils import output, error, success
|
||||
|
||||
@click.group()
|
||||
def agent_comm():
|
||||
"""Cross-chain agent communication commands"""
|
||||
pass
|
||||
|
||||
@agent_comm.command()
|
||||
@click.argument('agent_id')
|
||||
@click.argument('name')
|
||||
@click.argument('chain_id')
|
||||
@click.argument('endpoint')
|
||||
@click.option('--capabilities', help='Comma-separated list of capabilities')
|
||||
@click.option('--reputation', default=0.5, help='Initial reputation score')
|
||||
@click.option('--version', default='1.0.0', help='Agent version')
|
||||
@click.pass_context
|
||||
def register(ctx, agent_id, name, chain_id, endpoint, capabilities, reputation, version):
|
||||
"""Register an agent in the cross-chain network"""
|
||||
try:
|
||||
config = load_multichain_config()
|
||||
comm = CrossChainAgentCommunication(config)
|
||||
|
||||
# Parse capabilities
|
||||
cap_list = capabilities.split(',') if capabilities else []
|
||||
|
||||
# Create agent info
|
||||
agent_info = AgentInfo(
|
||||
agent_id=agent_id,
|
||||
name=name,
|
||||
chain_id=chain_id,
|
||||
node_id="default-node", # Would be determined dynamically
|
||||
status=AgentStatus.ACTIVE,
|
||||
capabilities=cap_list,
|
||||
reputation_score=reputation,
|
||||
last_seen=datetime.now(),
|
||||
endpoint=endpoint,
|
||||
version=version
|
||||
)
|
||||
|
||||
# Register agent
|
||||
success = asyncio.run(comm.register_agent(agent_info))
|
||||
|
||||
if success:
|
||||
success(f"Agent {agent_id} registered successfully!")
|
||||
|
||||
agent_data = {
|
||||
"Agent ID": agent_id,
|
||||
"Name": name,
|
||||
"Chain ID": chain_id,
|
||||
"Status": "active",
|
||||
"Capabilities": ", ".join(cap_list),
|
||||
"Reputation": f"{reputation:.2f}",
|
||||
"Endpoint": endpoint,
|
||||
"Version": version
|
||||
}
|
||||
|
||||
output(agent_data, ctx.obj.get('output_format', 'table'))
|
||||
else:
|
||||
error(f"Failed to register agent {agent_id}")
|
||||
raise click.Abort()
|
||||
|
||||
except Exception as e:
|
||||
error(f"Error registering agent: {str(e)}")
|
||||
raise click.Abort()
|
||||
|
||||
@agent_comm.command()
|
||||
@click.option('--chain-id', help='Filter by chain ID')
|
||||
@click.option('--status', type=click.Choice(['active', 'inactive', 'busy', 'offline']), help='Filter by status')
|
||||
@click.option('--capabilities', help='Filter by capabilities (comma-separated)')
|
||||
@click.option('--format', type=click.Choice(['table', 'json']), default='table', help='Output format')
|
||||
@click.pass_context
|
||||
def list(ctx, chain_id, status, capabilities, format):
|
||||
"""List registered agents"""
|
||||
try:
|
||||
config = load_multichain_config()
|
||||
comm = CrossChainAgentCommunication(config)
|
||||
|
||||
# Get all agents
|
||||
agents = list(comm.agents.values())
|
||||
|
||||
# Apply filters
|
||||
if chain_id:
|
||||
agents = [a for a in agents if a.chain_id == chain_id]
|
||||
|
||||
if status:
|
||||
agents = [a for a in agents if a.status.value == status]
|
||||
|
||||
if capabilities:
|
||||
required_caps = [cap.strip() for cap in capabilities.split(',')]
|
||||
agents = [a for a in agents if any(cap in a.capabilities for cap in required_caps)]
|
||||
|
||||
if not agents:
|
||||
output("No agents found", ctx.obj.get('output_format', 'table'))
|
||||
return
|
||||
|
||||
# Format output
|
||||
agent_data = [
|
||||
{
|
||||
"Agent ID": agent.agent_id,
|
||||
"Name": agent.name,
|
||||
"Chain ID": agent.chain_id,
|
||||
"Status": agent.status.value,
|
||||
"Reputation": f"{agent.reputation_score:.2f}",
|
||||
"Capabilities": ", ".join(agent.capabilities[:3]), # Show first 3
|
||||
"Last Seen": agent.last_seen.strftime("%Y-%m-%d %H:%M:%S")
|
||||
}
|
||||
for agent in agents
|
||||
]
|
||||
|
||||
output(agent_data, ctx.obj.get('output_format', format), title="Registered Agents")
|
||||
|
||||
except Exception as e:
|
||||
error(f"Error listing agents: {str(e)}")
|
||||
raise click.Abort()
|
||||
|
||||
@agent_comm.command()
|
||||
@click.argument('chain_id')
|
||||
@click.option('--capabilities', help='Required capabilities (comma-separated)')
|
||||
@click.option('--format', type=click.Choice(['table', 'json']), default='table', help='Output format')
|
||||
@click.pass_context
|
||||
def discover(ctx, chain_id, capabilities, format):
|
||||
"""Discover agents on a specific chain"""
|
||||
try:
|
||||
config = load_multichain_config()
|
||||
comm = CrossChainAgentCommunication(config)
|
||||
|
||||
# Parse capabilities
|
||||
cap_list = capabilities.split(',') if capabilities else None
|
||||
|
||||
# Discover agents
|
||||
agents = asyncio.run(comm.discover_agents(chain_id, cap_list))
|
||||
|
||||
if not agents:
|
||||
output(f"No agents found on chain {chain_id}", ctx.obj.get('output_format', 'table'))
|
||||
return
|
||||
|
||||
# Format output
|
||||
agent_data = [
|
||||
{
|
||||
"Agent ID": agent.agent_id,
|
||||
"Name": agent.name,
|
||||
"Status": agent.status.value,
|
||||
"Reputation": f"{agent.reputation_score:.2f}",
|
||||
"Capabilities": ", ".join(agent.capabilities),
|
||||
"Endpoint": agent.endpoint,
|
||||
"Version": agent.version
|
||||
}
|
||||
for agent in agents
|
||||
]
|
||||
|
||||
output(agent_data, ctx.obj.get('output_format', format), title=f"Agents on Chain {chain_id}")
|
||||
|
||||
except Exception as e:
|
||||
error(f"Error discovering agents: {str(e)}")
|
||||
raise click.Abort()
|
||||
|
||||
@agent_comm.command()
|
||||
@click.argument('sender_id')
|
||||
@click.argument('receiver_id')
|
||||
@click.argument('message_type')
|
||||
@click.argument('chain_id')
|
||||
@click.option('--payload', help='Message payload (JSON string)')
|
||||
@click.option('--target-chain', help='Target chain for cross-chain messages')
|
||||
@click.option('--priority', default=5, help='Message priority (1-10)')
|
||||
@click.option('--ttl', default=3600, help='Time to live in seconds')
|
||||
@click.pass_context
|
||||
def send(ctx, sender_id, receiver_id, message_type, chain_id, payload, target_chain, priority, ttl):
|
||||
"""Send a message to an agent"""
|
||||
try:
|
||||
config = load_multichain_config()
|
||||
comm = CrossChainAgentCommunication(config)
|
||||
|
||||
# Parse message type
|
||||
try:
|
||||
msg_type = MessageType(message_type)
|
||||
except ValueError:
|
||||
error(f"Invalid message type: {message_type}")
|
||||
error(f"Valid types: {[t.value for t in MessageType]}")
|
||||
raise click.Abort()
|
||||
|
||||
# Parse payload
|
||||
payload_dict = {}
|
||||
if payload:
|
||||
try:
|
||||
payload_dict = json.loads(payload)
|
||||
except json.JSONDecodeError:
|
||||
error("Invalid JSON payload")
|
||||
raise click.Abort()
|
||||
|
||||
# Create message
|
||||
message = AgentMessage(
|
||||
message_id=f"msg_{datetime.now().strftime('%Y%m%d%H%M%S')}_{sender_id}",
|
||||
sender_id=sender_id,
|
||||
receiver_id=receiver_id,
|
||||
message_type=msg_type,
|
||||
chain_id=chain_id,
|
||||
target_chain_id=target_chain,
|
||||
payload=payload_dict,
|
||||
timestamp=datetime.now(),
|
||||
signature="auto_generated", # Would be cryptographically signed
|
||||
priority=priority,
|
||||
ttl_seconds=ttl
|
||||
)
|
||||
|
||||
# Send message
|
||||
success = asyncio.run(comm.send_message(message))
|
||||
|
||||
if success:
|
||||
success(f"Message sent successfully to {receiver_id}")
|
||||
|
||||
message_data = {
|
||||
"Message ID": message.message_id,
|
||||
"Sender": sender_id,
|
||||
"Receiver": receiver_id,
|
||||
"Type": message_type,
|
||||
"Chain": chain_id,
|
||||
"Target Chain": target_chain or "Same",
|
||||
"Priority": priority,
|
||||
"TTL": f"{ttl}s",
|
||||
"Sent": message.timestamp.strftime("%Y-%m-%d %H:%M:%S")
|
||||
}
|
||||
|
||||
output(message_data, ctx.obj.get('output_format', 'table'))
|
||||
else:
|
||||
error(f"Failed to send message to {receiver_id}")
|
||||
raise click.Abort()
|
||||
|
||||
except Exception as e:
|
||||
error(f"Error sending message: {str(e)}")
|
||||
raise click.Abort()
|
||||
|
||||
@agent_comm.command()
|
||||
@click.argument('agent_ids', nargs=-1, required=True)
|
||||
@click.argument('collaboration_type')
|
||||
@click.option('--governance', help='Governance rules (JSON string)')
|
||||
@click.pass_context
|
||||
def collaborate(ctx, agent_ids, collaboration_type, governance):
|
||||
"""Create a multi-agent collaboration"""
|
||||
try:
|
||||
config = load_multichain_config()
|
||||
comm = CrossChainAgentCommunication(config)
|
||||
|
||||
# Parse governance rules
|
||||
governance_dict = {}
|
||||
if governance:
|
||||
try:
|
||||
governance_dict = json.loads(governance)
|
||||
except json.JSONDecodeError:
|
||||
error("Invalid JSON governance rules")
|
||||
raise click.Abort()
|
||||
|
||||
# Create collaboration
|
||||
collaboration_id = asyncio.run(comm.create_collaboration(
|
||||
list(agent_ids), collaboration_type, governance_dict
|
||||
))
|
||||
|
||||
if collaboration_id:
|
||||
success(f"Collaboration created: {collaboration_id}")
|
||||
|
||||
collab_data = {
|
||||
"Collaboration ID": collaboration_id,
|
||||
"Type": collaboration_type,
|
||||
"Participants": ", ".join(agent_ids),
|
||||
"Status": "active",
|
||||
"Created": datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
||||
}
|
||||
|
||||
output(collab_data, ctx.obj.get('output_format', 'table'))
|
||||
else:
|
||||
error("Failed to create collaboration")
|
||||
raise click.Abort()
|
||||
|
||||
except Exception as e:
|
||||
error(f"Error creating collaboration: {str(e)}")
|
||||
raise click.Abort()
|
||||
|
||||
@agent_comm.command()
|
||||
@click.argument('agent_id')
|
||||
@click.argument('interaction_result', type=click.Choice(['success', 'failure']))
|
||||
@click.option('--feedback', type=float, help='Feedback score (0.0-1.0)')
|
||||
@click.pass_context
|
||||
def reputation(ctx, agent_id, interaction_result, feedback):
|
||||
"""Update agent reputation"""
|
||||
try:
|
||||
config = load_multichain_config()
|
||||
comm = CrossChainAgentCommunication(config)
|
||||
|
||||
# Update reputation
|
||||
success = asyncio.run(comm.update_reputation(
|
||||
agent_id, interaction_result == 'success', feedback
|
||||
))
|
||||
|
||||
if success:
|
||||
# Get updated reputation
|
||||
agent_status = asyncio.run(comm.get_agent_status(agent_id))
|
||||
|
||||
if agent_status and agent_status.get('reputation'):
|
||||
rep = agent_status['reputation']
|
||||
success(f"Reputation updated for {agent_id}")
|
||||
|
||||
rep_data = {
|
||||
"Agent ID": agent_id,
|
||||
"Reputation Score": f"{rep['reputation_score']:.3f}",
|
||||
"Total Interactions": rep['total_interactions'],
|
||||
"Successful": rep['successful_interactions'],
|
||||
"Failed": rep['failed_interactions'],
|
||||
"Success Rate": f"{(rep['successful_interactions'] / rep['total_interactions'] * 100):.1f}%" if rep['total_interactions'] > 0 else "N/A",
|
||||
"Last Updated": rep['last_updated']
|
||||
}
|
||||
|
||||
output(rep_data, ctx.obj.get('output_format', 'table'))
|
||||
else:
|
||||
success(f"Reputation updated for {agent_id}")
|
||||
else:
|
||||
error(f"Failed to update reputation for {agent_id}")
|
||||
raise click.Abort()
|
||||
|
||||
except Exception as e:
|
||||
error(f"Error updating reputation: {str(e)}")
|
||||
raise click.Abort()
|
||||
|
||||
@agent_comm.command()
|
||||
@click.argument('agent_id')
|
||||
@click.option('--format', type=click.Choice(['table', 'json']), default='table', help='Output format')
|
||||
@click.pass_context
|
||||
def status(ctx, agent_id, format):
|
||||
"""Get detailed agent status"""
|
||||
try:
|
||||
config = load_multichain_config()
|
||||
comm = CrossChainAgentCommunication(config)
|
||||
|
||||
# Get agent status
|
||||
agent_status = asyncio.run(comm.get_agent_status(agent_id))
|
||||
|
||||
if not agent_status:
|
||||
error(f"Agent {agent_id} not found")
|
||||
raise click.Abort()
|
||||
|
||||
# Format output
|
||||
status_data = [
|
||||
{"Metric": "Agent ID", "Value": agent_status["agent_info"]["agent_id"]},
|
||||
{"Metric": "Name", "Value": agent_status["agent_info"]["name"]},
|
||||
{"Metric": "Chain ID", "Value": agent_status["agent_info"]["chain_id"]},
|
||||
{"Metric": "Status", "Value": agent_status["status"]},
|
||||
{"Metric": "Reputation", "Value": f"{agent_status['agent_info']['reputation_score']:.3f}" if agent_status.get('reputation') else "N/A"},
|
||||
{"Metric": "Capabilities", "Value": ", ".join(agent_status["agent_info"]["capabilities"])},
|
||||
{"Metric": "Message Queue Size", "Value": agent_status["message_queue_size"]},
|
||||
{"Metric": "Active Collaborations", "Value": agent_status["active_collaborations"]},
|
||||
{"Metric": "Last Seen", "Value": agent_status["last_seen"]},
|
||||
{"Metric": "Endpoint", "Value": agent_status["agent_info"]["endpoint"]},
|
||||
{"Metric": "Version", "Value": agent_status["agent_info"]["version"]}
|
||||
]
|
||||
|
||||
output(status_data, ctx.obj.get('output_format', format), title=f"Agent Status: {agent_id}")
|
||||
|
||||
except Exception as e:
|
||||
error(f"Error getting agent status: {str(e)}")
|
||||
raise click.Abort()
|
||||
|
||||
@agent_comm.command()
|
||||
@click.option('--format', type=click.Choice(['table', 'json']), default='table', help='Output format')
|
||||
@click.pass_context
|
||||
def network(ctx, format):
|
||||
"""Get cross-chain network overview"""
|
||||
try:
|
||||
config = load_multichain_config()
|
||||
comm = CrossChainAgentCommunication(config)
|
||||
|
||||
# Get network overview
|
||||
overview = asyncio.run(comm.get_network_overview())
|
||||
|
||||
if not overview:
|
||||
error("No network data available")
|
||||
raise click.Abort()
|
||||
|
||||
# Overview data
|
||||
overview_data = [
|
||||
{"Metric": "Total Agents", "Value": overview["total_agents"]},
|
||||
{"Metric": "Active Agents", "Value": overview["active_agents"]},
|
||||
{"Metric": "Total Collaborations", "Value": overview["total_collaborations"]},
|
||||
{"Metric": "Active Collaborations", "Value": overview["active_collaborations"]},
|
||||
{"Metric": "Total Messages", "Value": overview["total_messages"]},
|
||||
{"Metric": "Queued Messages", "Value": overview["queued_messages"]},
|
||||
{"Metric": "Average Reputation", "Value": f"{overview['average_reputation']:.3f}"},
|
||||
{"Metric": "Routing Table Size", "Value": overview["routing_table_size"]},
|
||||
{"Metric": "Discovery Cache Size", "Value": overview["discovery_cache_size"]}
|
||||
]
|
||||
|
||||
output(overview_data, ctx.obj.get('output_format', format), title="Network Overview")
|
||||
|
||||
# Agents by chain
|
||||
if overview["agents_by_chain"]:
|
||||
chain_data = [
|
||||
{"Chain ID": chain_id, "Total Agents": count, "Active Agents": overview["active_agents_by_chain"].get(chain_id, 0)}
|
||||
for chain_id, count in overview["agents_by_chain"].items()
|
||||
]
|
||||
|
||||
output(chain_data, ctx.obj.get('output_format', format), title="Agents by Chain")
|
||||
|
||||
# Collaborations by type
|
||||
if overview["collaborations_by_type"]:
|
||||
collab_data = [
|
||||
{"Type": collab_type, "Count": count}
|
||||
for collab_type, count in overview["collaborations_by_type"].items()
|
||||
]
|
||||
|
||||
output(collab_data, ctx.obj.get('output_format', format), title="Collaborations by Type")
|
||||
|
||||
except Exception as e:
|
||||
error(f"Error getting network overview: {str(e)}")
|
||||
raise click.Abort()
|
||||
|
||||
@agent_comm.command()
|
||||
@click.option('--realtime', is_flag=True, help='Real-time monitoring')
|
||||
@click.option('--interval', default=10, help='Update interval in seconds')
|
||||
@click.pass_context
|
||||
def monitor(ctx, realtime, interval):
|
||||
"""Monitor cross-chain agent communication"""
|
||||
try:
|
||||
config = load_multichain_config()
|
||||
comm = CrossChainAgentCommunication(config)
|
||||
|
||||
if realtime:
|
||||
# Real-time monitoring
|
||||
from rich.console import Console
|
||||
from rich.live import Live
|
||||
from rich.table import Table
|
||||
import time
|
||||
|
||||
console = Console()
|
||||
|
||||
def generate_monitor_table():
|
||||
try:
|
||||
overview = asyncio.run(comm.get_network_overview())
|
||||
|
||||
table = Table(title=f"Agent Network Monitor - {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
|
||||
table.add_column("Metric", style="cyan")
|
||||
table.add_column("Value", style="green")
|
||||
|
||||
table.add_row("Total Agents", str(overview["total_agents"]))
|
||||
table.add_row("Active Agents", str(overview["active_agents"]))
|
||||
table.add_row("Active Collaborations", str(overview["active_collaborations"]))
|
||||
table.add_row("Queued Messages", str(overview["queued_messages"]))
|
||||
table.add_row("Avg Reputation", f"{overview['average_reputation']:.3f}")
|
||||
|
||||
# Add top chains by agent count
|
||||
if overview["agents_by_chain"]:
|
||||
table.add_row("", "")
|
||||
table.add_row("Top Chains by Agents", "")
|
||||
for chain_id, count in sorted(overview["agents_by_chain"].items(), key=lambda x: x[1], reverse=True)[:3]:
|
||||
active = overview["active_agents_by_chain"].get(chain_id, 0)
|
||||
table.add_row(f" {chain_id}", f"{count} total, {active} active")
|
||||
|
||||
return table
|
||||
except Exception as e:
|
||||
return f"Error getting network data: {e}"
|
||||
|
||||
with Live(generate_monitor_table(), refresh_per_second=1) as live:
|
||||
try:
|
||||
while True:
|
||||
live.update(generate_monitor_table())
|
||||
time.sleep(interval)
|
||||
except KeyboardInterrupt:
|
||||
console.print("\n[yellow]Monitoring stopped by user[/yellow]")
|
||||
else:
|
||||
# Single snapshot
|
||||
overview = asyncio.run(comm.get_network_overview())
|
||||
|
||||
monitor_data = [
|
||||
{"Metric": "Total Agents", "Value": overview["total_agents"]},
|
||||
{"Metric": "Active Agents", "Value": overview["active_agents"]},
|
||||
{"Metric": "Total Collaborations", "Value": overview["total_collaborations"]},
|
||||
{"Metric": "Active Collaborations", "Value": overview["active_collaborations"]},
|
||||
{"Metric": "Total Messages", "Value": overview["total_messages"]},
|
||||
{"Metric": "Queued Messages", "Value": overview["queued_messages"]},
|
||||
{"Metric": "Average Reputation", "Value": f"{overview['average_reputation']:.3f}"},
|
||||
{"Metric": "Routing Table Size", "Value": overview["routing_table_size"]}
|
||||
]
|
||||
|
||||
output(monitor_data, ctx.obj.get('output_format', 'table'), title="Agent Network Monitor")
|
||||
|
||||
except Exception as e:
|
||||
error(f"Error during monitoring: {str(e)}")
|
||||
raise click.Abort()
|
||||
124
cli/commands/ai.py
Normal file
124
cli/commands/ai.py
Normal file
@@ -0,0 +1,124 @@
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
import uuid
|
||||
import click
|
||||
import httpx
|
||||
from pydantic import BaseModel
|
||||
|
||||
@click.group(name='ai')
|
||||
def ai_group():
|
||||
"""AI marketplace commands."""
|
||||
pass
|
||||
|
||||
@ai_group.command()
|
||||
@click.option('--port', default=8008, show_default=True, help='AI provider port')
|
||||
@click.option('--model', default='qwen3:8b', show_default=True, help='Ollama model name')
|
||||
@click.option('--wallet', 'provider_wallet', required=True, help='Provider wallet address (for verification)')
|
||||
@click.option('--marketplace-url', default='http://127.0.0.1:8014', help='Marketplace API base URL')
|
||||
def status(port, model, provider_wallet, marketplace_url):
|
||||
"""Check AI provider service status."""
|
||||
try:
|
||||
resp = httpx.get(f"http://127.0.0.1:{port}/health", timeout=5.0)
|
||||
if resp.status_code == 200:
|
||||
health = resp.json()
|
||||
click.echo(f"✅ AI Provider Status: {health.get('status', 'unknown')}")
|
||||
click.echo(f" Model: {health.get('model', 'unknown')}")
|
||||
click.echo(f" Wallet: {health.get('wallet', 'unknown')}")
|
||||
else:
|
||||
click.echo(f"❌ AI Provider not responding (status: {resp.status_code})")
|
||||
except httpx.ConnectError:
|
||||
click.echo(f"❌ AI Provider not running on port {port}")
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Error checking AI Provider: {e}")
|
||||
|
||||
@ai_group.command()
|
||||
@click.option('--port', default=8008, show_default=True, help='AI provider port')
|
||||
@click.option('--model', default='qwen3:8b', show_default=True, help='Ollama model name')
|
||||
@click.option('--wallet', 'provider_wallet', required=True, help='Provider wallet address (for verification)')
|
||||
@click.option('--marketplace-url', default='http://127.0.0.1:8014', help='Marketplace API base URL')
|
||||
def start(port, model, provider_wallet, marketplace_url):
|
||||
"""Start AI provider service - provides setup instructions"""
|
||||
click.echo(f"AI Provider Service Setup:")
|
||||
click.echo(f" Port: {port}")
|
||||
click.echo(f" Model: {model}")
|
||||
click.echo(f" Wallet: {provider_wallet}")
|
||||
click.echo(f" Marketplace: {marketplace_url}")
|
||||
|
||||
click.echo("\n📋 To start the AI Provider service:")
|
||||
click.echo(f" 1. Create systemd service: /etc/systemd/system/aitbc-ai-provider.service")
|
||||
click.echo(f" 2. Run: sudo systemctl daemon-reload")
|
||||
click.echo(f" 3. Run: sudo systemctl enable aitbc-ai-provider")
|
||||
click.echo(f" 4. Run: sudo systemctl start aitbc-ai-provider")
|
||||
click.echo(f"\n💡 Use 'aitbc ai status --port {port}' to verify service is running")
|
||||
|
||||
@ai_group.command()
|
||||
def stop():
|
||||
"""Stop AI provider service - provides shutdown instructions"""
|
||||
click.echo("📋 To stop the AI Provider service:")
|
||||
click.echo(" 1. Run: sudo systemctl stop aitbc-ai-provider")
|
||||
click.echo(" 2. Run: sudo systemctl status aitbc-ai-provider (to verify)")
|
||||
click.echo("\n💡 Use 'aitbc ai status' to check if service is stopped")
|
||||
|
||||
@ai_group.command()
|
||||
@click.option('--to', required=True, help='Provider host (IP)')
|
||||
@click.option('--port', default=8008, help='Provider port')
|
||||
@click.option('--prompt', required=True, help='Prompt to send')
|
||||
@click.option('--buyer-wallet', 'buyer_wallet', required=True, help='Buyer wallet name (in local wallet store)')
|
||||
@click.option('--provider-wallet', 'provider_wallet', required=True, help='Provider wallet address (recipient)')
|
||||
@click.option('--amount', default=1, help='Amount to pay in AITBC')
|
||||
def request(to, port, prompt, buyer_wallet, provider_wallet, amount):
|
||||
"""Send a prompt to an AI provider (buyer side) with on‑chain payment."""
|
||||
# Helper to get provider balance
|
||||
def get_balance():
|
||||
res = subprocess.run([
|
||||
sys.executable, "-m", "aitbc_cli.main", "blockchain", "balance",
|
||||
"--address", provider_wallet
|
||||
], capture_output=True, text=True, check=True)
|
||||
for line in res.stdout.splitlines():
|
||||
if "Balance:" in line:
|
||||
parts = line.split(":")
|
||||
return float(parts[1].strip())
|
||||
raise ValueError("Balance not found")
|
||||
|
||||
# Step 1: get initial balance
|
||||
before = get_balance()
|
||||
click.echo(f"Provider balance before: {before}")
|
||||
|
||||
# Step 2: send payment via blockchain CLI (use current Python env)
|
||||
if amount > 0:
|
||||
click.echo(f"Sending {amount} AITBC from wallet '{buyer_wallet}' to {provider_wallet}...")
|
||||
try:
|
||||
subprocess.run([
|
||||
sys.executable, "-m", "aitbc_cli.main", "blockchain", "send",
|
||||
"--from", buyer_wallet,
|
||||
"--to", provider_wallet,
|
||||
"--amount", str(amount)
|
||||
], check=True, capture_output=True, text=True)
|
||||
click.echo("Payment sent.")
|
||||
except subprocess.CalledProcessError as e:
|
||||
raise click.ClickException(f"Blockchain send failed: {e.stderr}")
|
||||
|
||||
# Step 3: get new balance
|
||||
after = get_balance()
|
||||
click.echo(f"Provider balance after: {after}")
|
||||
delta = after - before
|
||||
click.echo(f"Balance delta: {delta}")
|
||||
|
||||
# Step 4: call provider
|
||||
url = f"http://{to}:{port}/job"
|
||||
payload = {
|
||||
"prompt": prompt,
|
||||
"buyer": provider_wallet,
|
||||
"amount": amount
|
||||
}
|
||||
try:
|
||||
resp = httpx.post(url, json=payload, timeout=30.0)
|
||||
resp.raise_for_status()
|
||||
data = resp.json()
|
||||
click.echo("Result: " + data.get("result", ""))
|
||||
except httpx.HTTPError as e:
|
||||
raise click.ClickException(f"Request to provider failed: {e}")
|
||||
|
||||
if __name__ == '__main__':
|
||||
ai_group()
|
||||
469
cli/commands/ai_surveillance.py
Executable file
469
cli/commands/ai_surveillance.py
Executable file
@@ -0,0 +1,469 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
AI Surveillance CLI Commands
|
||||
Advanced AI-powered surveillance and behavioral analysis
|
||||
"""
|
||||
|
||||
import click
|
||||
import asyncio
|
||||
import json
|
||||
from typing import Optional, List, Dict, Any
|
||||
from datetime import datetime
|
||||
from imports import ensure_coordinator_api_imports
|
||||
|
||||
ensure_coordinator_api_imports()
|
||||
|
||||
try:
|
||||
from app.services.ai_surveillance import (
|
||||
start_ai_surveillance, stop_ai_surveillance, get_surveillance_summary,
|
||||
get_user_risk_profile, list_active_alerts, analyze_behavior_patterns,
|
||||
ai_surveillance, SurveillanceType, RiskLevel, AlertPriority
|
||||
)
|
||||
_import_error = None
|
||||
except ImportError as e:
|
||||
_import_error = e
|
||||
|
||||
def _missing(*args, **kwargs):
|
||||
raise ImportError(
|
||||
f"Required service module 'app.services.ai_surveillance' could not be imported: {_import_error}. "
|
||||
"Ensure coordinator-api dependencies are installed and the source directory is accessible."
|
||||
)
|
||||
start_ai_surveillance = stop_ai_surveillance = get_surveillance_summary = _missing
|
||||
get_user_risk_profile = list_active_alerts = analyze_behavior_patterns = _missing
|
||||
ai_surveillance = None
|
||||
|
||||
class SurveillanceType:
|
||||
pass
|
||||
class RiskLevel:
|
||||
pass
|
||||
class AlertPriority:
|
||||
pass
|
||||
|
||||
@click.group()
|
||||
def ai_surveillance_group():
|
||||
"""AI-powered surveillance and behavioral analysis commands"""
|
||||
pass
|
||||
|
||||
@ai_surveillance_group.command()
|
||||
@click.option("--symbols", required=True, help="Trading symbols to monitor (comma-separated)")
|
||||
@click.pass_context
|
||||
def start(ctx, symbols: str):
|
||||
"""Start AI surveillance monitoring"""
|
||||
try:
|
||||
symbol_list = [s.strip().upper() for s in symbols.split(",")]
|
||||
|
||||
click.echo(f"🤖 Starting AI Surveillance Monitoring...")
|
||||
click.echo(f"📊 Monitoring symbols: {', '.join(symbol_list)}")
|
||||
|
||||
success = asyncio.run(start_ai_surveillance(symbol_list))
|
||||
|
||||
if success:
|
||||
click.echo(f"✅ AI Surveillance monitoring started!")
|
||||
click.echo(f"🔍 ML-based pattern recognition active")
|
||||
click.echo(f"👥 Behavioral analysis running")
|
||||
click.echo(f"⚠️ Predictive risk assessment enabled")
|
||||
click.echo(f"🛡️ Market integrity protection active")
|
||||
else:
|
||||
click.echo(f"❌ Failed to start AI surveillance")
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Start surveillance failed: {e}", err=True)
|
||||
|
||||
@ai_surveillance_group.command()
|
||||
@click.pass_context
|
||||
def stop(ctx):
|
||||
"""Stop AI surveillance monitoring"""
|
||||
try:
|
||||
click.echo(f"🤖 Stopping AI Surveillance Monitoring...")
|
||||
|
||||
success = asyncio.run(stop_ai_surveillance())
|
||||
|
||||
if success:
|
||||
click.echo(f"✅ AI Surveillance monitoring stopped")
|
||||
else:
|
||||
click.echo(f"⚠️ Surveillance was not running")
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Stop surveillance failed: {e}", err=True)
|
||||
|
||||
@ai_surveillance_group.command()
|
||||
@click.pass_context
|
||||
def status(ctx):
|
||||
"""Show AI surveillance system status"""
|
||||
try:
|
||||
click.echo(f"🤖 AI Surveillance System Status")
|
||||
|
||||
summary = get_surveillance_summary()
|
||||
|
||||
click.echo(f"\n📊 System Overview:")
|
||||
click.echo(f" Monitoring Active: {'✅ Yes' if summary['monitoring_active'] else '❌ No'}")
|
||||
click.echo(f" Total Alerts: {summary['total_alerts']}")
|
||||
click.echo(f" Resolved Alerts: {summary['resolved_alerts']}")
|
||||
click.echo(f" False Positives: {summary['false_positives']}")
|
||||
click.echo(f" Active Alerts: {summary['active_alerts']}")
|
||||
click.echo(f" Behavior Patterns: {summary['behavior_patterns']}")
|
||||
click.echo(f" Monitored Symbols: {summary['monitored_symbols']}")
|
||||
click.echo(f" ML Models: {summary['ml_models']}")
|
||||
|
||||
# Alerts by type
|
||||
alerts_by_type = summary.get('alerts_by_type', {})
|
||||
if alerts_by_type:
|
||||
click.echo(f"\n📈 Alerts by Type:")
|
||||
for alert_type, count in alerts_by_type.items():
|
||||
click.echo(f" {alert_type.replace('_', ' ').title()}: {count}")
|
||||
|
||||
# Alerts by risk level
|
||||
alerts_by_risk = summary.get('alerts_by_risk', {})
|
||||
if alerts_by_risk:
|
||||
click.echo(f"\n⚠️ Alerts by Risk Level:")
|
||||
risk_icons = {"critical": "🔴", "high": "🟠", "medium": "🟡", "low": "🟢"}
|
||||
for risk_level, count in alerts_by_risk.items():
|
||||
icon = risk_icons.get(risk_level, "❓")
|
||||
click.echo(f" {icon} {risk_level.title()}: {count}")
|
||||
|
||||
# ML Model performance
|
||||
model_performance = summary.get('model_performance', {})
|
||||
if model_performance:
|
||||
click.echo(f"\n🤖 ML Model Performance:")
|
||||
for model_id, performance in model_performance.items():
|
||||
click.echo(f" {model_id.replace('_', ' ').title()}:")
|
||||
click.echo(f" Accuracy: {performance['accuracy']:.1%}")
|
||||
click.echo(f" Threshold: {performance['threshold']:.2f}")
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Status check failed: {e}", err=True)
|
||||
|
||||
@ai_surveillance_group.command()
|
||||
@click.option("--limit", type=int, default=20, help="Maximum number of alerts to show")
|
||||
@click.option("--type", type=click.Choice(['pattern_recognition', 'behavioral_analysis', 'predictive_risk', 'market_integrity']), help="Filter by alert type")
|
||||
@click.option("--risk-level", type=click.Choice(['low', 'medium', 'high', 'critical']), help="Filter by risk level")
|
||||
@click.pass_context
|
||||
def alerts(ctx, limit: int, type: str, risk_level: str):
|
||||
"""List active surveillance alerts"""
|
||||
try:
|
||||
click.echo(f"🚨 Active Surveillance Alerts")
|
||||
|
||||
alerts = list_active_alerts(limit)
|
||||
|
||||
# Apply filters
|
||||
if type:
|
||||
alerts = [a for a in alerts if a['type'] == type]
|
||||
|
||||
if risk_level:
|
||||
alerts = [a for a in alerts if a['risk_level'] == risk_level]
|
||||
|
||||
if not alerts:
|
||||
click.echo(f"✅ No active alerts found")
|
||||
return
|
||||
|
||||
click.echo(f"\n📊 Total Alerts: {len(alerts)}")
|
||||
|
||||
if type:
|
||||
click.echo(f"🔍 Filtered by type: {type.replace('_', ' ').title()}")
|
||||
|
||||
if risk_level:
|
||||
click.echo(f"🔍 Filtered by risk level: {risk_level.title()}")
|
||||
|
||||
# Display alerts
|
||||
for i, alert in enumerate(alerts):
|
||||
risk_icon = {"critical": "🔴", "high": "🟠", "medium": "🟡", "low": "🟢"}.get(alert['risk_level'], "❓")
|
||||
priority_icon = {"urgent": "🚨", "high": "⚡", "medium": "📋", "low": "📝"}.get(alert['priority'], "❓")
|
||||
|
||||
click.echo(f"\n{risk_icon} Alert #{i+1}")
|
||||
click.echo(f" ID: {alert['alert_id']}")
|
||||
click.echo(f" Type: {alert['type'].replace('_', ' ').title()}")
|
||||
click.echo(f" User: {alert['user_id']}")
|
||||
click.echo(f" Risk Level: {alert['risk_level'].title()}")
|
||||
click.echo(f" Priority: {alert['priority'].title()}")
|
||||
click.echo(f" Confidence: {alert['confidence']:.1%}")
|
||||
click.echo(f" Description: {alert['description']}")
|
||||
click.echo(f" Detected: {alert['detected_at'][:19]}")
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Alert listing failed: {e}", err=True)
|
||||
|
||||
@ai_surveillance_group.command()
|
||||
@click.option("--user-id", help="Specific user ID to analyze")
|
||||
@click.pass_context
|
||||
def patterns(ctx, user_id: str):
|
||||
"""Analyze behavior patterns"""
|
||||
try:
|
||||
click.echo(f"🔍 Behavior Pattern Analysis")
|
||||
|
||||
if user_id:
|
||||
click.echo(f"👤 Analyzing user: {user_id}")
|
||||
patterns = analyze_behavior_patterns(user_id)
|
||||
|
||||
click.echo(f"\n📊 User Pattern Summary:")
|
||||
click.echo(f" Total Patterns: {patterns['total_patterns']}")
|
||||
click.echo(f" Pattern Types: {', '.join(patterns['pattern_types'])}")
|
||||
|
||||
if patterns['patterns']:
|
||||
click.echo(f"\n📈 Recent Patterns:")
|
||||
for pattern in patterns['patterns'][-5:]: # Last 5 patterns
|
||||
pattern_icon = "⚠️" if pattern['risk_score'] > 0.8 else "📋"
|
||||
click.echo(f" {pattern_icon} {pattern['pattern_type'].replace('_', ' ').title()}")
|
||||
click.echo(f" Confidence: {pattern['confidence']:.1%}")
|
||||
click.echo(f" Risk Score: {pattern['risk_score']:.2f}")
|
||||
click.echo(f" Detected: {pattern['detected_at'][:19]}")
|
||||
else:
|
||||
click.echo(f"📊 Overall Pattern Analysis")
|
||||
patterns = analyze_behavior_patterns()
|
||||
|
||||
click.echo(f"\n📈 System Pattern Summary:")
|
||||
click.echo(f" Total Patterns: {patterns['total_patterns']}")
|
||||
click.echo(f" Average Confidence: {patterns['avg_confidence']:.1%}")
|
||||
click.echo(f" Average Risk Score: {patterns['avg_risk_score']:.2f}")
|
||||
|
||||
pattern_types = patterns.get('pattern_types', {})
|
||||
if pattern_types:
|
||||
click.echo(f"\n📊 Pattern Types:")
|
||||
for pattern_type, count in pattern_types.items():
|
||||
click.echo(f" {pattern_type.replace('_', ' ').title()}: {count}")
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Pattern analysis failed: {e}", err=True)
|
||||
|
||||
@ai_surveillance_group.command()
|
||||
@click.option("--user-id", required=True, help="User ID to analyze")
|
||||
@click.pass_context
|
||||
def risk_profile(ctx, user_id: str):
|
||||
"""Get comprehensive user risk profile"""
|
||||
try:
|
||||
click.echo(f"⚠️ User Risk Profile: {user_id}")
|
||||
|
||||
profile = get_user_risk_profile(user_id)
|
||||
|
||||
click.echo(f"\n📊 Risk Assessment:")
|
||||
click.echo(f" Predictive Risk Score: {profile['predictive_risk']:.2f}")
|
||||
click.echo(f" Risk Trend: {profile['risk_trend'].title()}")
|
||||
click.echo(f" Last Assessed: {profile['last_assessed'][:19] if profile['last_assessed'] else 'Never'}")
|
||||
|
||||
click.echo(f"\n👤 User Activity:")
|
||||
click.echo(f" Behavior Patterns: {profile['behavior_patterns']}")
|
||||
click.echo(f" Surveillance Alerts: {profile['surveillance_alerts']}")
|
||||
|
||||
if profile['pattern_types']:
|
||||
click.echo(f" Pattern Types: {', '.join(profile['pattern_types'])}")
|
||||
|
||||
if profile['alert_types']:
|
||||
click.echo(f" Alert Types: {', '.join(profile['alert_types'])}")
|
||||
|
||||
# Risk assessment
|
||||
risk_score = profile['predictive_risk']
|
||||
if risk_score > 0.9:
|
||||
risk_assessment = "🔴 CRITICAL - Immediate attention required"
|
||||
elif risk_score > 0.8:
|
||||
risk_assessment = "🟠 HIGH - Monitor closely"
|
||||
elif risk_score > 0.6:
|
||||
risk_assessment = "🟡 MEDIUM - Standard monitoring"
|
||||
else:
|
||||
risk_assessment = "🟢 LOW - Normal activity"
|
||||
|
||||
click.echo(f"\n🎯 Risk Assessment: {risk_assessment}")
|
||||
|
||||
# Recommendations
|
||||
if risk_score > 0.8:
|
||||
click.echo(f"\n💡 Recommendations:")
|
||||
click.echo(f" • Review recent trading activity")
|
||||
click.echo(f" • Consider temporary restrictions")
|
||||
click.echo(f" • Enhanced monitoring protocols")
|
||||
click.echo(f" • Manual compliance review")
|
||||
elif risk_score > 0.6:
|
||||
click.echo(f"\n💡 Recommendations:")
|
||||
click.echo(f" • Continue standard monitoring")
|
||||
click.echo(f" • Watch for pattern changes")
|
||||
click.echo(f" • Periodic compliance checks")
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Risk profile failed: {e}", err=True)
|
||||
|
||||
@ai_surveillance_group.command()
|
||||
@click.pass_context
|
||||
def models(ctx):
|
||||
"""Show ML model information"""
|
||||
try:
|
||||
click.echo(f"🤖 AI Surveillance ML Models")
|
||||
|
||||
summary = get_surveillance_summary()
|
||||
model_performance = summary.get('model_performance', {})
|
||||
|
||||
if not model_performance:
|
||||
click.echo(f"❌ No model information available")
|
||||
return
|
||||
|
||||
click.echo(f"\n📊 Model Performance Overview:")
|
||||
|
||||
for model_id, performance in model_performance.items():
|
||||
click.echo(f"\n🤖 {model_id.replace('_', ' ').title()}:")
|
||||
click.echo(f" Accuracy: {performance['accuracy']:.1%}")
|
||||
click.echo(f" Risk Threshold: {performance['threshold']:.2f}")
|
||||
|
||||
# Model status based on accuracy
|
||||
if performance['accuracy'] > 0.9:
|
||||
status = "🟢 Excellent"
|
||||
elif performance['accuracy'] > 0.8:
|
||||
status = "🟡 Good"
|
||||
elif performance['accuracy'] > 0.7:
|
||||
status = "🟠 Fair"
|
||||
else:
|
||||
status = "🔴 Poor"
|
||||
|
||||
click.echo(f" Status: {status}")
|
||||
|
||||
# Model descriptions
|
||||
click.echo(f"\n📋 Model Descriptions:")
|
||||
descriptions = {
|
||||
"pattern_recognition": "Identifies suspicious trading patterns using isolation forest algorithms",
|
||||
"behavioral_analysis": "Analyzes user behavior patterns using clustering techniques",
|
||||
"predictive_risk": "Predicts future risk using gradient boosting models",
|
||||
"market_integrity": "Detects market manipulation using neural networks"
|
||||
}
|
||||
|
||||
for model_id, description in descriptions.items():
|
||||
if model_id in model_performance:
|
||||
click.echo(f"\n🤖 {model_id.replace('_', ' ').title()}:")
|
||||
click.echo(f" {description}")
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Model information failed: {e}", err=True)
|
||||
|
||||
@ai_surveillance_group.command()
|
||||
@click.option("--days", type=int, default=7, help="Analysis period in days")
|
||||
@click.pass_context
|
||||
def analytics(ctx, days: int):
|
||||
"""Generate comprehensive surveillance analytics"""
|
||||
try:
|
||||
click.echo(f"📊 AI Surveillance Analytics")
|
||||
click.echo(f"📅 Analysis Period: {days} days")
|
||||
|
||||
summary = get_surveillance_summary()
|
||||
|
||||
click.echo(f"\n📈 System Performance:")
|
||||
click.echo(f" Monitoring Status: {'✅ Active' if summary['monitoring_active'] else '❌ Inactive'}")
|
||||
click.echo(f" Total Alerts Generated: {summary['total_alerts']}")
|
||||
click.echo(f" Alerts Resolved: {summary['resolved_alerts']}")
|
||||
click.echo(f" Resolution Rate: {(summary['resolved_alerts'] / max(summary['total_alerts'], 1)):.1%}")
|
||||
click.echo(f" False Positive Rate: {(summary['false_positives'] / max(summary['resolved_alerts'], 1)):.1%}")
|
||||
|
||||
# Alert analysis
|
||||
alerts_by_type = summary.get('alerts_by_type', {})
|
||||
if alerts_by_type:
|
||||
click.echo(f"\n📊 Alert Distribution:")
|
||||
total_alerts = sum(alerts_by_type.values())
|
||||
for alert_type, count in alerts_by_type.items():
|
||||
percentage = (count / total_alerts * 100) if total_alerts > 0 else 0
|
||||
click.echo(f" {alert_type.replace('_', ' ').title()}: {count} ({percentage:.1f}%)")
|
||||
|
||||
# Risk analysis
|
||||
alerts_by_risk = summary.get('alerts_by_risk', {})
|
||||
if alerts_by_risk:
|
||||
click.echo(f"\n⚠️ Risk Level Distribution:")
|
||||
total_risk_alerts = sum(alerts_by_risk.values())
|
||||
for risk_level, count in alerts_by_risk.items():
|
||||
percentage = (count / total_risk_alerts * 100) if total_risk_alerts > 0 else 0
|
||||
risk_icon = {"critical": "🔴", "high": "🟠", "medium": "🟡", "low": "🟢"}.get(risk_level, "❓")
|
||||
click.echo(f" {risk_icon} {risk_level.title()}: {count} ({percentage:.1f}%)")
|
||||
|
||||
# Pattern analysis
|
||||
patterns = analyze_behavior_patterns()
|
||||
click.echo(f"\n🔍 Pattern Analysis:")
|
||||
click.echo(f" Total Behavior Patterns: {patterns['total_patterns']}")
|
||||
click.echo(f" Average Confidence: {patterns['avg_confidence']:.1%}")
|
||||
click.echo(f" Average Risk Score: {patterns['avg_risk_score']:.2f}")
|
||||
|
||||
pattern_types = patterns.get('pattern_types', {})
|
||||
if pattern_types:
|
||||
click.echo(f" Most Common Pattern: {max(pattern_types, key=pattern_types.get)}")
|
||||
|
||||
# System health
|
||||
click.echo(f"\n🏥 System Health:")
|
||||
health_score = summary.get('ml_models', 0) * 25 # 25 points per model
|
||||
if health_score >= 80:
|
||||
health_status = "🟢 Excellent"
|
||||
elif health_score >= 60:
|
||||
health_status = "🟡 Good"
|
||||
elif health_score >= 40:
|
||||
health_status = "🟠 Fair"
|
||||
else:
|
||||
health_status = "🔴 Poor"
|
||||
|
||||
click.echo(f" Health Score: {health_score}/100")
|
||||
click.echo(f" Status: {health_status}")
|
||||
|
||||
# Recommendations
|
||||
click.echo(f"\n💡 Analytics Recommendations:")
|
||||
if summary['active_alerts'] > 10:
|
||||
click.echo(f" ⚠️ High number of active alerts - consider increasing monitoring resources")
|
||||
|
||||
if summary['false_positives'] / max(summary['resolved_alerts'], 1) > 0.2:
|
||||
click.echo(f" 🔧 High false positive rate - consider adjusting model thresholds")
|
||||
|
||||
if not summary['monitoring_active']:
|
||||
click.echo(f" 🚨 Surveillance inactive - start monitoring immediately")
|
||||
|
||||
if patterns['avg_risk_score'] > 0.8:
|
||||
click.echo(f" ⚠️ High average risk score - review user base and compliance measures")
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Analytics generation failed: {e}", err=True)
|
||||
|
||||
@ai_surveillance_group.command()
|
||||
@click.pass_context
|
||||
def test(ctx):
|
||||
"""Test AI surveillance system"""
|
||||
try:
|
||||
click.echo(f"🧪 Testing AI Surveillance System...")
|
||||
|
||||
async def run_tests():
|
||||
# Test 1: Start surveillance
|
||||
click.echo(f"\n📋 Test 1: Start Surveillance")
|
||||
start_success = await start_ai_surveillance(["BTC/USDT", "ETH/USDT"])
|
||||
click.echo(f" ✅ Start: {'Success' if start_success else 'Failed'}")
|
||||
|
||||
# Let it run for data collection
|
||||
click.echo(f"⏱️ Collecting surveillance data...")
|
||||
await asyncio.sleep(3)
|
||||
|
||||
# Test 2: Get status
|
||||
click.echo(f"\n📋 Test 2: System Status")
|
||||
summary = get_surveillance_summary()
|
||||
click.echo(f" ✅ Status Retrieved: {len(summary)} metrics")
|
||||
|
||||
# Test 3: Get alerts
|
||||
click.echo(f"\n📋 Test 3: Alert System")
|
||||
alerts = list_active_alerts()
|
||||
click.echo(f" ✅ Alerts: {len(alerts)} generated")
|
||||
|
||||
# Test 4: Pattern analysis
|
||||
click.echo(f"\n📋 Test 4: Pattern Analysis")
|
||||
patterns = analyze_behavior_patterns()
|
||||
click.echo(f" ✅ Patterns: {patterns['total_patterns']} analyzed")
|
||||
|
||||
# Test 5: Stop surveillance
|
||||
click.echo(f"\n📋 Test 5: Stop Surveillance")
|
||||
stop_success = await stop_ai_surveillance()
|
||||
click.echo(f" ✅ Stop: {'Success' if stop_success else 'Failed'}")
|
||||
|
||||
return start_success, stop_success, summary, alerts, patterns
|
||||
|
||||
# Run the async tests
|
||||
start_success, stop_success, summary, alerts, patterns = asyncio.run(run_tests())
|
||||
|
||||
# Show results
|
||||
click.echo(f"\n🎉 Test Results Summary:")
|
||||
click.echo(f" System Status: {'✅ Operational' if start_success and stop_success else '❌ Issues'}")
|
||||
click.echo(f" ML Models: {summary.get('ml_models', 0)} active")
|
||||
click.echo(f" Alerts Generated: {len(alerts)}")
|
||||
click.echo(f" Patterns Detected: {patterns['total_patterns']}")
|
||||
|
||||
if start_success and stop_success:
|
||||
click.echo(f"\n✅ AI Surveillance System is ready for production use!")
|
||||
else:
|
||||
click.echo(f"\n⚠️ Some issues detected - check logs for details")
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Test failed: {e}", err=True)
|
||||
|
||||
if __name__ == "__main__":
|
||||
ai_surveillance_group()
|
||||
401
cli/commands/ai_trading.py
Executable file
401
cli/commands/ai_trading.py
Executable file
@@ -0,0 +1,401 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
AI Trading CLI Commands
|
||||
Advanced AI-powered trading algorithms and analytics
|
||||
"""
|
||||
|
||||
import click
|
||||
import asyncio
|
||||
import json
|
||||
from typing import Optional, List, Dict, Any
|
||||
from datetime import datetime, timedelta
|
||||
from imports import ensure_coordinator_api_imports
|
||||
|
||||
ensure_coordinator_api_imports()
|
||||
|
||||
try:
|
||||
from app.services.ai_trading_engine import (
|
||||
initialize_ai_engine, train_strategies, generate_trading_signals,
|
||||
get_engine_status, ai_trading_engine, TradingStrategy
|
||||
)
|
||||
_import_error = None
|
||||
except ImportError as e:
|
||||
_import_error = e
|
||||
|
||||
def _missing(*args, **kwargs):
|
||||
raise ImportError(
|
||||
f"Required service module 'app.services.ai_trading_engine' could not be imported: {_import_error}. "
|
||||
"Ensure coordinator-api dependencies are installed and the source directory is accessible."
|
||||
)
|
||||
initialize_ai_engine = train_strategies = generate_trading_signals = get_engine_status = _missing
|
||||
ai_trading_engine = None
|
||||
|
||||
class TradingStrategy:
|
||||
pass
|
||||
|
||||
@click.group()
|
||||
def ai_trading():
|
||||
"""AI-powered trading and analytics commands"""
|
||||
pass
|
||||
|
||||
@ai_trading.command()
|
||||
@click.pass_context
|
||||
def init(ctx):
|
||||
"""Initialize AI trading engine"""
|
||||
try:
|
||||
click.echo(f"🤖 Initializing AI Trading Engine...")
|
||||
|
||||
success = asyncio.run(initialize_ai_engine())
|
||||
|
||||
if success:
|
||||
click.echo(f"✅ AI Trading Engine initialized successfully!")
|
||||
click.echo(f"📊 Default strategies loaded:")
|
||||
click.echo(f" • Mean Reversion Strategy")
|
||||
click.echo(f" • Momentum Strategy")
|
||||
else:
|
||||
click.echo(f"❌ Failed to initialize AI Trading Engine")
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Initialization failed: {e}", err=True)
|
||||
|
||||
@ai_trading.command()
|
||||
@click.option("--symbol", default="BTC/USDT", help="Trading symbol")
|
||||
@click.option("--days", type=int, default=30, help="Days of historical data for training")
|
||||
@click.pass_context
|
||||
def train(ctx, symbol: str, days: int):
|
||||
"""Train AI trading strategies"""
|
||||
try:
|
||||
click.echo(f"🧠 Training AI Trading Strategies...")
|
||||
click.echo(f"📊 Symbol: {symbol}")
|
||||
click.echo(f"📅 Training Period: {days} days")
|
||||
|
||||
success = asyncio.run(train_strategies(symbol, days))
|
||||
|
||||
if success:
|
||||
click.echo(f"✅ Training completed successfully!")
|
||||
|
||||
# Get training results
|
||||
status = get_engine_status()
|
||||
click.echo(f"📈 Training Results:")
|
||||
click.echo(f" Strategies Trained: {status['trained_strategies']}/{status['strategies_count']}")
|
||||
click.echo(f" Success Rate: 100%")
|
||||
click.echo(f" Data Points: {days * 24} (hourly data)")
|
||||
else:
|
||||
click.echo(f"❌ Training failed")
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Training failed: {e}", err=True)
|
||||
|
||||
@ai_trading.command()
|
||||
@click.option("--symbol", default="BTC/USDT", help="Trading symbol")
|
||||
@click.option("--count", type=int, default=10, help="Number of signals to show")
|
||||
@click.pass_context
|
||||
def signals(ctx, symbol: str, count: int):
|
||||
"""Generate AI trading signals"""
|
||||
try:
|
||||
click.echo(f"📈 Generating AI Trading Signals...")
|
||||
click.echo(f"📊 Symbol: {symbol}")
|
||||
|
||||
signals = asyncio.run(generate_trading_signals(symbol))
|
||||
|
||||
if not signals:
|
||||
click.echo(f"❌ No signals generated. Make sure strategies are trained.")
|
||||
return
|
||||
|
||||
click.echo(f"\n🎯 Generated {len(signals)} Trading Signals:")
|
||||
|
||||
# Display signals
|
||||
for i, signal in enumerate(signals[:count]):
|
||||
signal_icon = {
|
||||
"buy": "🟢",
|
||||
"sell": "🔴",
|
||||
"hold": "🟡"
|
||||
}.get(signal['signal_type'], "❓")
|
||||
|
||||
confidence_color = "🔥" if signal['confidence'] > 0.8 else "⚡" if signal['confidence'] > 0.6 else "💡"
|
||||
|
||||
click.echo(f"\n{signal_icon} Signal #{i+1}")
|
||||
click.echo(f" Strategy: {signal['strategy'].replace('_', ' ').title()}")
|
||||
click.echo(f" Signal: {signal['signal_type'].upper()}")
|
||||
click.echo(f" Confidence: {signal['confidence']:.2%} {confidence_color}")
|
||||
click.echo(f" Predicted Return: {signal['predicted_return']:.2%}")
|
||||
click.echo(f" Risk Score: {signal['risk_score']:.2f}")
|
||||
click.echo(f" Reasoning: {signal['reasoning']}")
|
||||
click.echo(f" Time: {signal['timestamp'][:19]}")
|
||||
|
||||
if len(signals) > count:
|
||||
click.echo(f"\n... and {len(signals) - count} more signals")
|
||||
|
||||
# Show summary
|
||||
buy_signals = len([s for s in signals if s['signal_type'] == 'buy'])
|
||||
sell_signals = len([s for s in signals if s['signal_type'] == 'sell'])
|
||||
hold_signals = len([s for s in signals if s['signal_type'] == 'hold'])
|
||||
|
||||
click.echo(f"\n📊 Signal Summary:")
|
||||
click.echo(f" 🟢 Buy Signals: {buy_signals}")
|
||||
click.echo(f" 🔴 Sell Signals: {sell_signals}")
|
||||
click.echo(f" 🟡 Hold Signals: {hold_signals}")
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Signal generation failed: {e}", err=True)
|
||||
|
||||
@ai_trading.command()
|
||||
@click.pass_context
|
||||
def status(ctx):
|
||||
"""Show AI trading engine status"""
|
||||
try:
|
||||
click.echo(f"🤖 AI Trading Engine Status")
|
||||
|
||||
status = get_engine_status()
|
||||
|
||||
click.echo(f"\n📊 Engine Overview:")
|
||||
click.echo(f" Total Strategies: {status['strategies_count']}")
|
||||
click.echo(f" Trained Strategies: {status['trained_strategies']}")
|
||||
click.echo(f" Active Signals: {status['active_signals']}")
|
||||
click.echo(f" Market Data Symbols: {len(status['market_data_symbols'])}")
|
||||
|
||||
if status['market_data_symbols']:
|
||||
click.echo(f" Available Symbols: {', '.join(status['market_data_symbols'])}")
|
||||
|
||||
# Performance metrics
|
||||
metrics = status.get('performance_metrics', {})
|
||||
if metrics:
|
||||
click.echo(f"\n📈 Performance Metrics:")
|
||||
click.echo(f" Total Signals Generated: {metrics.get('total_signals', 0)}")
|
||||
click.echo(f" Recent Signals: {metrics.get('recent_signals', 0)}")
|
||||
click.echo(f" Average Confidence: {metrics.get('avg_confidence', 0):.1%}")
|
||||
click.echo(f" Average Risk Score: {metrics.get('avg_risk_score', 0):.2f}")
|
||||
|
||||
click.echo(f"\n📊 Signal Distribution:")
|
||||
click.echo(f" 🟢 Buy Signals: {metrics.get('buy_signals', 0)}")
|
||||
click.echo(f" 🔴 Sell Signals: {metrics.get('sell_signals', 0)}")
|
||||
click.echo(f" 🟡 Hold Signals: {metrics.get('hold_signals', 0)}")
|
||||
|
||||
# Strategy status
|
||||
if ai_trading_engine.strategies:
|
||||
click.echo(f"\n🧠 Strategy Status:")
|
||||
for strategy_name, strategy in ai_trading_engine.strategies.items():
|
||||
status_icon = "✅" if strategy.is_trained else "❌"
|
||||
click.echo(f" {status_icon} {strategy_name.replace('_', ' ').title()}")
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Status check failed: {e}", err=True)
|
||||
|
||||
@ai_trading.command()
|
||||
@click.option("--strategy", required=True, help="Strategy to backtest")
|
||||
@click.option("--symbol", default="BTC/USDT", help="Trading symbol")
|
||||
@click.option("--days", type=int, default=30, help="Backtesting period in days")
|
||||
@click.option("--capital", type=float, default=10000, help="Initial capital")
|
||||
@click.pass_context
|
||||
def backtest(ctx, strategy: str, symbol: str, days: int, capital: float):
|
||||
"""Backtest AI trading strategy"""
|
||||
try:
|
||||
click.echo(f"📊 Backtesting AI Trading Strategy...")
|
||||
click.echo(f"🧠 Strategy: {strategy}")
|
||||
click.echo(f"📊 Symbol: {symbol}")
|
||||
click.echo(f"📅 Period: {days} days")
|
||||
click.echo(f"💰 Initial Capital: ${capital:,.2f}")
|
||||
|
||||
# Calculate date range
|
||||
end_date = datetime.now()
|
||||
start_date = end_date - timedelta(days=days)
|
||||
|
||||
# Run backtest
|
||||
result = asyncio.run(ai_trading_engine.backtest_strategy(
|
||||
strategy, symbol, start_date, end_date, capital
|
||||
))
|
||||
|
||||
click.echo(f"\n📈 Backtest Results:")
|
||||
click.echo(f" Strategy: {result.strategy.value.replace('_', ' ').title()}")
|
||||
click.echo(f" Period: {result.start_date.strftime('%Y-%m-%d')} to {result.end_date.strftime('%Y-%m-%d')}")
|
||||
click.echo(f" Initial Capital: ${result.initial_capital:,.2f}")
|
||||
click.echo(f" Final Capital: ${result.final_capital:,.2f}")
|
||||
|
||||
# Performance metrics
|
||||
total_return_pct = result.total_return * 100
|
||||
click.echo(f"\n📊 Performance:")
|
||||
click.echo(f" Total Return: {total_return_pct:.2f}%")
|
||||
click.echo(f" Sharpe Ratio: {result.sharpe_ratio:.2f}")
|
||||
click.echo(f" Max Drawdown: {result.max_drawdown:.2%}")
|
||||
click.echo(f" Win Rate: {result.win_rate:.1%}")
|
||||
|
||||
# Trading statistics
|
||||
click.echo(f"\n📋 Trading Statistics:")
|
||||
click.echo(f" Total Trades: {result.total_trades}")
|
||||
click.echo(f" Profitable Trades: {result.profitable_trades}")
|
||||
click.echo(f" Average Trade: ${(result.final_capital - result.initial_capital) / max(result.total_trades, 1):.2f}")
|
||||
|
||||
# Performance assessment
|
||||
if total_return_pct > 10:
|
||||
assessment = "🔥 EXCELLENT"
|
||||
elif total_return_pct > 5:
|
||||
assessment = "⚡ GOOD"
|
||||
elif total_return_pct > 0:
|
||||
assessment = "💡 POSITIVE"
|
||||
else:
|
||||
assessment = "❌ NEGATIVE"
|
||||
|
||||
click.echo(f"\n{assessment} Performance Assessment")
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Backtesting failed: {e}", err=True)
|
||||
|
||||
@ai_trading.command()
|
||||
@click.option("--symbol", default="BTC/USDT", help="Trading symbol")
|
||||
@click.option("--hours", type=int, default=24, help="Analysis period in hours")
|
||||
@click.pass_context
|
||||
def analyze(ctx, symbol: str, hours: int):
|
||||
"""Analyze market with AI insights"""
|
||||
try:
|
||||
click.echo(f"🔍 AI Market Analysis...")
|
||||
click.echo(f"📊 Symbol: {symbol}")
|
||||
click.echo(f"⏰ Period: {hours} hours")
|
||||
|
||||
# Get market data
|
||||
market_data = ai_trading_engine.market_data.get(symbol)
|
||||
if not market_data:
|
||||
click.echo(f"❌ No market data available for {symbol}")
|
||||
click.echo(f"💡 Train strategies first with: aitbc ai-trading train --symbol {symbol}")
|
||||
return
|
||||
|
||||
# Get recent data
|
||||
recent_data = market_data.tail(hours)
|
||||
|
||||
if len(recent_data) == 0:
|
||||
click.echo(f"❌ No recent data available")
|
||||
return
|
||||
|
||||
# Calculate basic statistics
|
||||
current_price = recent_data.iloc[-1]['close']
|
||||
price_change = (current_price - recent_data.iloc[0]['close']) / recent_data.iloc[0]['close']
|
||||
volatility = recent_data['close'].pct_change().std()
|
||||
volume_avg = recent_data['volume'].mean()
|
||||
|
||||
click.echo(f"\n📊 Market Analysis:")
|
||||
click.echo(f" Current Price: ${current_price:,.2f}")
|
||||
click.echo(f" Price Change: {price_change:.2%}")
|
||||
click.echo(f" Volatility: {volatility:.2%}")
|
||||
click.echo(f" Average Volume: {volume_avg:,.0f}")
|
||||
|
||||
# Generate AI signals
|
||||
signals = asyncio.run(generate_trading_signals(symbol))
|
||||
|
||||
if signals:
|
||||
click.echo(f"\n🤖 AI Insights:")
|
||||
for signal in signals:
|
||||
signal_icon = {"buy": "🟢", "sell": "🔴", "hold": "🟡"}.get(signal['signal_type'], "❓")
|
||||
|
||||
click.echo(f" {signal_icon} {signal['strategy'].replace('_', ' ').title()}:")
|
||||
click.echo(f" Signal: {signal['signal_type'].upper()}")
|
||||
click.echo(f" Confidence: {signal['confidence']:.1%}")
|
||||
click.echo(f" Reasoning: {signal['reasoning']}")
|
||||
|
||||
# Market recommendation
|
||||
if signals:
|
||||
buy_signals = len([s for s in signals if s['signal_type'] == 'buy'])
|
||||
sell_signals = len([s for s in signals if s['signal_type'] == 'sell'])
|
||||
|
||||
if buy_signals > sell_signals:
|
||||
recommendation = "🟢 BULLISH - Multiple buy signals detected"
|
||||
elif sell_signals > buy_signals:
|
||||
recommendation = "🔴 BEARISH - Multiple sell signals detected"
|
||||
else:
|
||||
recommendation = "🟡 NEUTRAL - Mixed signals, hold position"
|
||||
|
||||
click.echo(f"\n🎯 AI Recommendation: {recommendation}")
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Analysis failed: {e}", err=True)
|
||||
|
||||
@ai_trading.command()
|
||||
@click.pass_context
|
||||
def strategies(ctx):
|
||||
"""List available AI trading strategies"""
|
||||
try:
|
||||
click.echo(f"🧠 Available AI Trading Strategies")
|
||||
|
||||
strategies = {
|
||||
"mean_reversion": {
|
||||
"name": "Mean Reversion",
|
||||
"description": "Identifies overbought/oversold conditions using statistical analysis",
|
||||
"indicators": ["Z-score", "Rolling mean", "Standard deviation"],
|
||||
"time_horizon": "Short-term (hours to days)",
|
||||
"risk_level": "Moderate",
|
||||
"best_conditions": "Sideways markets with clear mean"
|
||||
},
|
||||
"momentum": {
|
||||
"name": "Momentum",
|
||||
"description": "Follows price trends and momentum indicators",
|
||||
"indicators": ["Price momentum", "Trend strength", "Volume analysis"],
|
||||
"time_horizon": "Medium-term (days to weeks)",
|
||||
"risk_level": "Moderate",
|
||||
"best_conditions": "Trending markets with clear direction"
|
||||
}
|
||||
}
|
||||
|
||||
for strategy_key, strategy_info in strategies.items():
|
||||
click.echo(f"\n📊 {strategy_info['name']}")
|
||||
click.echo(f" Description: {strategy_info['description']}")
|
||||
click.echo(f" Indicators: {', '.join(strategy_info['indicators'])}")
|
||||
click.echo(f" Time Horizon: {strategy_info['time_horizon']}")
|
||||
click.echo(f" Risk Level: {strategy_info['risk_level'].title()}")
|
||||
click.echo(f" Best For: {strategy_info['best_conditions']}")
|
||||
|
||||
# Show current status
|
||||
if ai_trading_engine.strategies:
|
||||
click.echo(f"\n🔧 Current Strategy Status:")
|
||||
for strategy_name, strategy in ai_trading_engine.strategies.items():
|
||||
status_icon = "✅" if strategy.is_trained else "❌"
|
||||
click.echo(f" {status_icon} {strategy_name.replace('_', ' ').title()}")
|
||||
|
||||
click.echo(f"\n💡 Usage Examples:")
|
||||
click.echo(f" aitbc ai-trading train --symbol BTC/USDT")
|
||||
click.echo(f" aitbc ai-trading signals --symbol ETH/USDT")
|
||||
click.echo(f" aitbc ai-trading backtest --strategy mean_reversion --symbol BTC/USDT")
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Strategy listing failed: {e}", err=True)
|
||||
|
||||
@ai_trading.command()
|
||||
@click.pass_context
|
||||
def test(ctx):
|
||||
"""Test AI trading engine functionality"""
|
||||
try:
|
||||
click.echo(f"🧪 Testing AI Trading Engine...")
|
||||
|
||||
# Test 1: Initialize
|
||||
click.echo(f"\n📋 Test 1: Engine Initialization")
|
||||
init_success = asyncio.run(initialize_ai_engine())
|
||||
click.echo(f" ✅ Initialization: {'Success' if init_success else 'Failed'}")
|
||||
|
||||
# Test 2: Train strategies
|
||||
click.echo(f"\n📋 Test 2: Strategy Training")
|
||||
train_success = asyncio.run(train_strategies("BTC/USDT", 7))
|
||||
click.echo(f" ✅ Training: {'Success' if train_success else 'Failed'}")
|
||||
|
||||
# Test 3: Generate signals
|
||||
click.echo(f"\n📋 Test 3: Signal Generation")
|
||||
signals = asyncio.run(generate_trading_signals("BTC/USDT"))
|
||||
click.echo(f" ✅ Signals Generated: {len(signals)}")
|
||||
|
||||
# Test 4: Status check
|
||||
click.echo(f"\n📋 Test 4: Status Check")
|
||||
status = get_engine_status()
|
||||
click.echo(f" ✅ Status Retrieved: {len(status)} metrics")
|
||||
|
||||
# Show summary
|
||||
click.echo(f"\n🎉 Test Results Summary:")
|
||||
click.echo(f" Engine Status: {'✅ Operational' if init_success and train_success else '❌ Issues'}")
|
||||
click.echo(f" Strategies: {status['strategies_count']} loaded, {status['trained_strategies']} trained")
|
||||
click.echo(f" Signals: {status['active_signals']} generated")
|
||||
|
||||
if init_success and train_success:
|
||||
click.echo(f"\n✅ AI Trading Engine is ready for production use!")
|
||||
else:
|
||||
click.echo(f"\n⚠️ Some issues detected - check logs for details")
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Test failed: {e}", err=True)
|
||||
|
||||
if __name__ == "__main__":
|
||||
ai_trading()
|
||||
402
cli/commands/analytics.py
Executable file
402
cli/commands/analytics.py
Executable file
@@ -0,0 +1,402 @@
|
||||
"""Analytics and monitoring commands for AITBC CLI"""
|
||||
|
||||
import click
|
||||
import asyncio
|
||||
from datetime import datetime, timedelta
|
||||
from typing import Optional
|
||||
from core.config import load_multichain_config
|
||||
from core.analytics import ChainAnalytics
|
||||
from utils import output, error, success
|
||||
|
||||
@click.group()
|
||||
def analytics():
|
||||
"""Chain analytics and monitoring commands"""
|
||||
pass
|
||||
|
||||
@analytics.command()
|
||||
@click.option('--chain-id', help='Specific chain ID to analyze')
|
||||
@click.option('--hours', default=24, help='Time range in hours')
|
||||
@click.option('--format', type=click.Choice(['table', 'json']), default='table', help='Output format')
|
||||
@click.pass_context
|
||||
def summary(ctx, chain_id, hours, format):
|
||||
"""Get performance summary for chains"""
|
||||
try:
|
||||
config = load_multichain_config()
|
||||
analytics = ChainAnalytics(config)
|
||||
|
||||
if chain_id:
|
||||
# Single chain summary
|
||||
summary = analytics.get_chain_performance_summary(chain_id, hours)
|
||||
if not summary:
|
||||
error(f"No data available for chain {chain_id}")
|
||||
raise click.Abort()
|
||||
|
||||
# Format summary for display
|
||||
summary_data = [
|
||||
{"Metric": "Chain ID", "Value": summary["chain_id"]},
|
||||
{"Metric": "Time Range", "Value": f"{summary['time_range_hours']} hours"},
|
||||
{"Metric": "Data Points", "Value": summary["data_points"]},
|
||||
{"Metric": "Health Score", "Value": f"{summary['health_score']:.1f}/100"},
|
||||
{"Metric": "Active Alerts", "Value": summary["active_alerts"]},
|
||||
{"Metric": "Avg TPS", "Value": f"{summary['statistics']['tps']['avg']:.2f}"},
|
||||
{"Metric": "Avg Block Time", "Value": f"{summary['statistics']['block_time']['avg']:.2f}s"},
|
||||
{"Metric": "Avg Gas Price", "Value": f"{summary['statistics']['gas_price']['avg']:,} wei"}
|
||||
]
|
||||
|
||||
output(summary_data, ctx.obj.get('output_format', format), title=f"Chain Summary: {chain_id}")
|
||||
else:
|
||||
# Cross-chain analysis
|
||||
analysis = analytics.get_cross_chain_analysis()
|
||||
|
||||
if not analysis:
|
||||
error("No analytics data available")
|
||||
raise click.Abort()
|
||||
|
||||
# Overview data
|
||||
overview_data = [
|
||||
{"Metric": "Total Chains", "Value": analysis["total_chains"]},
|
||||
{"Metric": "Active Chains", "Value": analysis["active_chains"]},
|
||||
{"Metric": "Total Alerts", "Value": analysis["alerts_summary"]["total_alerts"]},
|
||||
{"Metric": "Critical Alerts", "Value": analysis["alerts_summary"]["critical_alerts"]},
|
||||
{"Metric": "Total Memory Usage", "Value": f"{analysis['resource_usage']['total_memory_mb']:.1f}MB"},
|
||||
{"Metric": "Total Disk Usage", "Value": f"{analysis['resource_usage']['total_disk_mb']:.1f}MB"},
|
||||
{"Metric": "Total Clients", "Value": analysis["resource_usage"]["total_clients"]},
|
||||
{"Metric": "Total Agents", "Value": analysis["resource_usage"]["total_agents"]}
|
||||
]
|
||||
|
||||
output(overview_data, ctx.obj.get('output_format', format), title="Cross-Chain Analysis Overview")
|
||||
|
||||
# Performance comparison
|
||||
if analysis["performance_comparison"]:
|
||||
comparison_data = [
|
||||
{
|
||||
"Chain ID": chain_id,
|
||||
"TPS": f"{data['tps']:.2f}",
|
||||
"Block Time": f"{data['block_time']:.2f}s",
|
||||
"Health Score": f"{data['health_score']:.1f}/100"
|
||||
}
|
||||
for chain_id, data in analysis["performance_comparison"].items()
|
||||
]
|
||||
|
||||
output(comparison_data, ctx.obj.get('output_format', format), title="Chain Performance Comparison")
|
||||
|
||||
except Exception as e:
|
||||
error(f"Error getting analytics summary: {str(e)}")
|
||||
raise click.Abort()
|
||||
|
||||
@analytics.command()
|
||||
@click.option('--realtime', is_flag=True, help='Real-time monitoring')
|
||||
@click.option('--interval', default=30, help='Update interval in seconds')
|
||||
@click.option('--chain-id', help='Monitor specific chain')
|
||||
@click.pass_context
|
||||
def monitor(ctx, realtime, interval, chain_id):
|
||||
"""Monitor chain performance in real-time"""
|
||||
try:
|
||||
config = load_multichain_config()
|
||||
analytics = ChainAnalytics(config)
|
||||
|
||||
if realtime:
|
||||
# Real-time monitoring
|
||||
from rich.console import Console
|
||||
from rich.live import Live
|
||||
from rich.table import Table
|
||||
import time
|
||||
|
||||
console = Console()
|
||||
|
||||
def generate_monitor_table():
|
||||
try:
|
||||
# Collect latest metrics
|
||||
asyncio.run(analytics.collect_all_metrics())
|
||||
|
||||
table = Table(title=f"Chain Monitor - {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
|
||||
table.add_column("Chain ID", style="cyan")
|
||||
table.add_column("TPS", style="green")
|
||||
table.add_column("Block Time", style="yellow")
|
||||
table.add_column("Health", style="red")
|
||||
table.add_column("Alerts", style="magenta")
|
||||
|
||||
if chain_id:
|
||||
# Single chain monitoring
|
||||
summary = analytics.get_chain_performance_summary(chain_id, 1)
|
||||
if summary:
|
||||
health_color = "green" if summary["health_score"] > 70 else "yellow" if summary["health_score"] > 40 else "red"
|
||||
table.add_row(
|
||||
chain_id,
|
||||
f"{summary['statistics']['tps']['avg']:.2f}",
|
||||
f"{summary['statistics']['block_time']['avg']:.2f}s",
|
||||
f"[{health_color}]{summary['health_score']:.1f}[/{health_color}]",
|
||||
str(summary["active_alerts"])
|
||||
)
|
||||
else:
|
||||
# All chains monitoring
|
||||
analysis = analytics.get_cross_chain_analysis()
|
||||
for chain_id, data in analysis["performance_comparison"].items():
|
||||
health_color = "green" if data["health_score"] > 70 else "yellow" if data["health_score"] > 40 else "red"
|
||||
table.add_row(
|
||||
chain_id,
|
||||
f"{data['tps']:.2f}",
|
||||
f"{data['block_time']:.2f}s",
|
||||
f"[{health_color}]{data['health_score']:.1f}[/{health_color}]",
|
||||
str(len([a for a in analytics.alerts if a.chain_id == chain_id]))
|
||||
)
|
||||
|
||||
return table
|
||||
except Exception as e:
|
||||
return f"Error collecting metrics: {e}"
|
||||
|
||||
with Live(generate_monitor_table(), refresh_per_second=1) as live:
|
||||
try:
|
||||
while True:
|
||||
live.update(generate_monitor_table())
|
||||
time.sleep(interval)
|
||||
except KeyboardInterrupt:
|
||||
console.print("\n[yellow]Monitoring stopped by user[/yellow]")
|
||||
else:
|
||||
# Single snapshot
|
||||
asyncio.run(analytics.collect_all_metrics())
|
||||
|
||||
if chain_id:
|
||||
summary = analytics.get_chain_performance_summary(chain_id, 1)
|
||||
if not summary:
|
||||
error(f"No data available for chain {chain_id}")
|
||||
raise click.Abort()
|
||||
|
||||
monitor_data = [
|
||||
{"Metric": "Chain ID", "Value": summary["chain_id"]},
|
||||
{"Metric": "Current TPS", "Value": f"{summary['statistics']['tps']['avg']:.2f}"},
|
||||
{"Metric": "Current Block Time", "Value": f"{summary['statistics']['block_time']['avg']:.2f}s"},
|
||||
{"Metric": "Health Score", "Value": f"{summary['health_score']:.1f}/100"},
|
||||
{"Metric": "Active Alerts", "Value": summary["active_alerts"]},
|
||||
{"Metric": "Memory Usage", "Value": f"{summary['latest_metrics']['memory_usage_mb']:.1f}MB"},
|
||||
{"Metric": "Disk Usage", "Value": f"{summary['latest_metrics']['disk_usage_mb']:.1f}MB"},
|
||||
{"Metric": "Active Nodes", "Value": summary["latest_metrics"]["active_nodes"]},
|
||||
{"Metric": "Client Count", "Value": summary["latest_metrics"]["client_count"]},
|
||||
{"Metric": "Agent Count", "Value": summary["latest_metrics"]["agent_count"]}
|
||||
]
|
||||
|
||||
output(monitor_data, ctx.obj.get('output_format', 'table'), title=f"Chain Monitor: {chain_id}")
|
||||
else:
|
||||
analysis = analytics.get_cross_chain_analysis()
|
||||
|
||||
monitor_data = [
|
||||
{"Metric": "Total Chains", "Value": analysis["total_chains"]},
|
||||
{"Metric": "Active Chains", "Value": analysis["active_chains"]},
|
||||
{"Metric": "Total Memory Usage", "Value": f"{analysis['resource_usage']['total_memory_mb']:.1f}MB"},
|
||||
{"Metric": "Total Disk Usage", "Value": f"{analysis['resource_usage']['total_disk_mb']:.1f}MB"},
|
||||
{"Metric": "Total Clients", "Value": analysis["resource_usage"]["total_clients"]},
|
||||
{"Metric": "Total Agents", "Value": analysis["resource_usage"]["total_agents"]},
|
||||
{"Metric": "Total Alerts", "Value": analysis["alerts_summary"]["total_alerts"]},
|
||||
{"Metric": "Critical Alerts", "Value": analysis["alerts_summary"]["critical_alerts"]}
|
||||
]
|
||||
|
||||
output(monitor_data, ctx.obj.get('output_format', 'table'), title="System Monitor")
|
||||
|
||||
except Exception as e:
|
||||
error(f"Error during monitoring: {str(e)}")
|
||||
raise click.Abort()
|
||||
|
||||
@analytics.command()
|
||||
@click.option('--chain-id', help='Specific chain ID for predictions')
|
||||
@click.option('--hours', default=24, help='Prediction time horizon in hours')
|
||||
@click.option('--format', type=click.Choice(['table', 'json']), default='table', help='Output format')
|
||||
@click.pass_context
|
||||
def predict(ctx, chain_id, hours, format):
|
||||
"""Predict chain performance"""
|
||||
try:
|
||||
config = load_multichain_config()
|
||||
analytics = ChainAnalytics(config)
|
||||
|
||||
# Collect current metrics first
|
||||
asyncio.run(analytics.collect_all_metrics())
|
||||
|
||||
if chain_id:
|
||||
# Single chain prediction
|
||||
predictions = asyncio.run(analytics.predict_chain_performance(chain_id, hours))
|
||||
|
||||
if not predictions:
|
||||
error(f"No prediction data available for chain {chain_id}")
|
||||
raise click.Abort()
|
||||
|
||||
prediction_data = [
|
||||
{
|
||||
"Metric": pred.metric,
|
||||
"Predicted Value": f"{pred.predicted_value:.2f}",
|
||||
"Confidence": f"{pred.confidence:.1%}",
|
||||
"Time Horizon": f"{pred.time_horizon_hours}h"
|
||||
}
|
||||
for pred in predictions
|
||||
]
|
||||
|
||||
output(prediction_data, ctx.obj.get('output_format', format), title=f"Performance Predictions: {chain_id}")
|
||||
else:
|
||||
# All chains prediction
|
||||
analysis = analytics.get_cross_chain_analysis()
|
||||
all_predictions = {}
|
||||
|
||||
for chain_id in analysis["performance_comparison"].keys():
|
||||
predictions = asyncio.run(analytics.predict_chain_performance(chain_id, hours))
|
||||
if predictions:
|
||||
all_predictions[chain_id] = predictions
|
||||
|
||||
if not all_predictions:
|
||||
error("No prediction data available")
|
||||
raise click.Abort()
|
||||
|
||||
# Format predictions for display
|
||||
prediction_data = []
|
||||
for chain_id, predictions in all_predictions.items():
|
||||
for pred in predictions:
|
||||
prediction_data.append({
|
||||
"Chain ID": chain_id,
|
||||
"Metric": pred.metric,
|
||||
"Predicted Value": f"{pred.predicted_value:.2f}",
|
||||
"Confidence": f"{pred.confidence:.1%}",
|
||||
"Time Horizon": f"{pred.time_horizon_hours}h"
|
||||
})
|
||||
|
||||
output(prediction_data, ctx.obj.get('output_format', format), title="Chain Performance Predictions")
|
||||
|
||||
except Exception as e:
|
||||
error(f"Error generating predictions: {str(e)}")
|
||||
raise click.Abort()
|
||||
|
||||
@analytics.command()
|
||||
@click.option('--chain-id', help='Specific chain ID for recommendations')
|
||||
@click.option('--format', type=click.Choice(['table', 'json']), default='table', help='Output format')
|
||||
@click.pass_context
|
||||
def optimize(ctx, chain_id, format):
|
||||
"""Get optimization recommendations"""
|
||||
try:
|
||||
config = load_multichain_config()
|
||||
analytics = ChainAnalytics(config)
|
||||
|
||||
# Collect current metrics first
|
||||
asyncio.run(analytics.collect_all_metrics())
|
||||
|
||||
if chain_id:
|
||||
# Single chain recommendations
|
||||
recommendations = analytics.get_optimization_recommendations(chain_id)
|
||||
|
||||
if not recommendations:
|
||||
success(f"No optimization recommendations for chain {chain_id}")
|
||||
return
|
||||
|
||||
recommendation_data = [
|
||||
{
|
||||
"Type": rec["type"],
|
||||
"Priority": rec["priority"],
|
||||
"Issue": rec["issue"],
|
||||
"Current Value": rec["current_value"],
|
||||
"Recommended Action": rec["recommended_action"],
|
||||
"Expected Improvement": rec["expected_improvement"]
|
||||
}
|
||||
for rec in recommendations
|
||||
]
|
||||
|
||||
output(recommendation_data, ctx.obj.get('output_format', format), title=f"Optimization Recommendations: {chain_id}")
|
||||
else:
|
||||
# All chains recommendations
|
||||
analysis = analytics.get_cross_chain_analysis()
|
||||
all_recommendations = {}
|
||||
|
||||
for chain_id in analysis["performance_comparison"].keys():
|
||||
recommendations = analytics.get_optimization_recommendations(chain_id)
|
||||
if recommendations:
|
||||
all_recommendations[chain_id] = recommendations
|
||||
|
||||
if not all_recommendations:
|
||||
success("No optimization recommendations available")
|
||||
return
|
||||
|
||||
# Format recommendations for display
|
||||
recommendation_data = []
|
||||
for chain_id, recommendations in all_recommendations.items():
|
||||
for rec in recommendations:
|
||||
recommendation_data.append({
|
||||
"Chain ID": chain_id,
|
||||
"Type": rec["type"],
|
||||
"Priority": rec["priority"],
|
||||
"Issue": rec["issue"],
|
||||
"Current Value": rec["current_value"],
|
||||
"Recommended Action": rec["recommended_action"]
|
||||
})
|
||||
|
||||
output(recommendation_data, ctx.obj.get('output_format', format), title="Chain Optimization Recommendations")
|
||||
|
||||
except Exception as e:
|
||||
error(f"Error getting optimization recommendations: {str(e)}")
|
||||
raise click.Abort()
|
||||
|
||||
@analytics.command()
|
||||
@click.option('--severity', type=click.Choice(['all', 'critical', 'warning']), default='all', help='Alert severity filter')
|
||||
@click.option('--hours', default=24, help='Time range in hours')
|
||||
@click.option('--format', type=click.Choice(['table', 'json']), default='table', help='Output format')
|
||||
@click.pass_context
|
||||
def alerts(ctx, severity, hours, format):
|
||||
"""View performance alerts"""
|
||||
try:
|
||||
config = load_multichain_config()
|
||||
analytics = ChainAnalytics(config)
|
||||
|
||||
# Collect current metrics first
|
||||
asyncio.run(analytics.collect_all_metrics())
|
||||
|
||||
# Filter alerts
|
||||
cutoff_time = datetime.now() - timedelta(hours=hours)
|
||||
filtered_alerts = [
|
||||
alert for alert in analytics.alerts
|
||||
if alert.timestamp >= cutoff_time
|
||||
]
|
||||
|
||||
if severity != 'all':
|
||||
filtered_alerts = [a for a in filtered_alerts if a.severity == severity]
|
||||
|
||||
if not filtered_alerts:
|
||||
success("No alerts found")
|
||||
return
|
||||
|
||||
alert_data = [
|
||||
{
|
||||
"Chain ID": alert.chain_id,
|
||||
"Type": alert.alert_type,
|
||||
"Severity": alert.severity,
|
||||
"Message": alert.message,
|
||||
"Current Value": f"{alert.current_value:.2f}",
|
||||
"Threshold": f"{alert.threshold:.2f}",
|
||||
"Time": alert.timestamp.strftime("%Y-%m-%d %H:%M:%S")
|
||||
}
|
||||
for alert in filtered_alerts
|
||||
]
|
||||
|
||||
output(alert_data, ctx.obj.get('output_format', format), title=f"Performance Alerts (Last {hours}h)")
|
||||
|
||||
except Exception as e:
|
||||
error(f"Error getting alerts: {str(e)}")
|
||||
raise click.Abort()
|
||||
|
||||
@analytics.command()
|
||||
@click.option('--format', type=click.Choice(['json']), default='json', help='Output format')
|
||||
@click.pass_context
|
||||
def dashboard(ctx, format):
|
||||
"""Get complete dashboard data"""
|
||||
try:
|
||||
config = load_multichain_config()
|
||||
analytics = ChainAnalytics(config)
|
||||
|
||||
# Collect current metrics
|
||||
asyncio.run(analytics.collect_all_metrics())
|
||||
|
||||
# Get dashboard data
|
||||
dashboard_data = analytics.get_dashboard_data()
|
||||
|
||||
if format == 'json':
|
||||
import json
|
||||
click.echo(json.dumps(dashboard_data, indent=2, default=str))
|
||||
else:
|
||||
error("Dashboard data only available in JSON format")
|
||||
raise click.Abort()
|
||||
|
||||
except Exception as e:
|
||||
error(f"Error getting dashboard data: {str(e)}")
|
||||
raise click.Abort()
|
||||
220
cli/commands/auth.py
Executable file
220
cli/commands/auth.py
Executable file
@@ -0,0 +1,220 @@
|
||||
"""Authentication commands for AITBC CLI"""
|
||||
|
||||
import click
|
||||
import os
|
||||
from typing import Optional
|
||||
from auth import AuthManager
|
||||
from utils import output, success, error, warning
|
||||
|
||||
|
||||
@click.group()
|
||||
def auth():
|
||||
"""Manage API keys and authentication"""
|
||||
pass
|
||||
|
||||
|
||||
@auth.command()
|
||||
@click.argument("api_key")
|
||||
@click.option("--environment", default="default", help="Environment name (default, dev, staging, prod)")
|
||||
@click.pass_context
|
||||
def login(ctx, api_key: str, environment: str):
|
||||
"""Store API key for authentication"""
|
||||
auth_manager = AuthManager()
|
||||
|
||||
# Validate API key format (basic check)
|
||||
if not api_key or len(api_key) < 10:
|
||||
error("Invalid API key format")
|
||||
ctx.exit(1)
|
||||
return
|
||||
|
||||
auth_manager.store_credential("client", api_key, environment)
|
||||
|
||||
output({
|
||||
"status": "logged_in",
|
||||
"environment": environment,
|
||||
"note": "API key stored securely"
|
||||
}, ctx.obj['output_format'])
|
||||
|
||||
|
||||
@auth.command()
|
||||
@click.option("--environment", default="default", help="Environment name")
|
||||
@click.pass_context
|
||||
def logout(ctx, environment: str):
|
||||
"""Remove stored API key"""
|
||||
auth_manager = AuthManager()
|
||||
|
||||
auth_manager.delete_credential("client", environment)
|
||||
|
||||
output({
|
||||
"status": "logged_out",
|
||||
"environment": environment
|
||||
}, ctx.obj['output_format'])
|
||||
|
||||
|
||||
@auth.command()
|
||||
@click.option("--environment", default="default", help="Environment name")
|
||||
@click.option("--show", is_flag=True, help="Show the actual API key")
|
||||
@click.pass_context
|
||||
def token(ctx, environment: str, show: bool):
|
||||
"""Show stored API key"""
|
||||
auth_manager = AuthManager()
|
||||
|
||||
api_key = auth_manager.get_credential("client", environment)
|
||||
|
||||
if api_key:
|
||||
if show:
|
||||
output({
|
||||
"api_key": api_key,
|
||||
"environment": environment
|
||||
}, ctx.obj['output_format'])
|
||||
else:
|
||||
output({
|
||||
"api_key": "***REDACTED***",
|
||||
"environment": environment,
|
||||
"length": len(api_key)
|
||||
}, ctx.obj['output_format'])
|
||||
else:
|
||||
output({
|
||||
"message": "No API key stored",
|
||||
"environment": environment
|
||||
}, ctx.obj['output_format'])
|
||||
|
||||
|
||||
@auth.command()
|
||||
@click.pass_context
|
||||
def status(ctx):
|
||||
"""Show authentication status"""
|
||||
auth_manager = AuthManager()
|
||||
|
||||
credentials = auth_manager.list_credentials()
|
||||
|
||||
if credentials:
|
||||
output({
|
||||
"status": "authenticated",
|
||||
"stored_credentials": credentials
|
||||
}, ctx.obj['output_format'])
|
||||
else:
|
||||
output({
|
||||
"status": "not_authenticated",
|
||||
"message": "No stored credentials found"
|
||||
}, ctx.obj['output_format'])
|
||||
|
||||
|
||||
@auth.command()
|
||||
@click.option("--environment", default="default", help="Environment name")
|
||||
@click.pass_context
|
||||
def refresh(ctx, environment: str):
|
||||
"""Refresh authentication (placeholder for token refresh)"""
|
||||
auth_manager = AuthManager()
|
||||
|
||||
api_key = auth_manager.get_credential("client", environment)
|
||||
|
||||
if api_key:
|
||||
# In a real implementation, this would refresh the token
|
||||
output({
|
||||
"status": "refreshed",
|
||||
"environment": environment,
|
||||
"message": "Authentication refreshed (placeholder)"
|
||||
}, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"No API key found for environment: {environment}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@auth.group()
|
||||
def keys():
|
||||
"""Manage multiple API keys"""
|
||||
pass
|
||||
|
||||
|
||||
@keys.command()
|
||||
@click.pass_context
|
||||
def list(ctx):
|
||||
"""List all stored API keys"""
|
||||
auth_manager = AuthManager()
|
||||
credentials = auth_manager.list_credentials()
|
||||
|
||||
if credentials:
|
||||
output({
|
||||
"credentials": credentials
|
||||
}, ctx.obj['output_format'])
|
||||
else:
|
||||
output({
|
||||
"message": "No credentials stored"
|
||||
}, ctx.obj['output_format'])
|
||||
|
||||
|
||||
@keys.command()
|
||||
@click.argument("name")
|
||||
@click.argument("api_key")
|
||||
@click.option("--permissions", help="Comma-separated permissions (client,miner,admin)")
|
||||
@click.option("--environment", default="default", help="Environment name")
|
||||
@click.pass_context
|
||||
def create(ctx, name: str, api_key: str, permissions: Optional[str], environment: str):
|
||||
"""Create a new API key entry"""
|
||||
auth_manager = AuthManager()
|
||||
|
||||
if not api_key or len(api_key) < 10:
|
||||
error("Invalid API key format")
|
||||
return
|
||||
|
||||
auth_manager.store_credential(name, api_key, environment)
|
||||
|
||||
output({
|
||||
"status": "created",
|
||||
"name": name,
|
||||
"environment": environment,
|
||||
"permissions": permissions or "none"
|
||||
}, ctx.obj['output_format'])
|
||||
|
||||
|
||||
@keys.command()
|
||||
@click.argument("name")
|
||||
@click.option("--environment", default="default", help="Environment name")
|
||||
@click.pass_context
|
||||
def revoke(ctx, name: str, environment: str):
|
||||
"""Revoke an API key"""
|
||||
auth_manager = AuthManager()
|
||||
|
||||
auth_manager.delete_credential(name, environment)
|
||||
|
||||
output({
|
||||
"status": "revoked",
|
||||
"name": name,
|
||||
"environment": environment
|
||||
}, ctx.obj['output_format'])
|
||||
|
||||
|
||||
@keys.command()
|
||||
@click.pass_context
|
||||
def rotate(ctx):
|
||||
"""Rotate all API keys (placeholder)"""
|
||||
warning("Key rotation not implemented yet")
|
||||
|
||||
output({
|
||||
"message": "Key rotation would update all stored keys",
|
||||
"status": "placeholder"
|
||||
}, ctx.obj['output_format'])
|
||||
|
||||
|
||||
@auth.command()
|
||||
@click.argument("name")
|
||||
@click.pass_context
|
||||
def import_env(ctx, name: str):
|
||||
"""Import API key from environment variable"""
|
||||
env_var = f"{name.upper()}_API_KEY"
|
||||
api_key = os.getenv(env_var)
|
||||
|
||||
if not api_key:
|
||||
error(f"Environment variable {env_var} not set")
|
||||
ctx.exit(1)
|
||||
return
|
||||
|
||||
auth_manager = AuthManager()
|
||||
auth_manager.store_credential(name, api_key)
|
||||
|
||||
output({
|
||||
"status": "imported",
|
||||
"name": name,
|
||||
"source": env_var
|
||||
}, ctx.obj['output_format'])
|
||||
1251
cli/commands/blockchain.py
Executable file
1251
cli/commands/blockchain.py
Executable file
File diff suppressed because it is too large
Load Diff
562
cli/commands/chain.py
Executable file
562
cli/commands/chain.py
Executable file
@@ -0,0 +1,562 @@
|
||||
"""Chain management commands for AITBC CLI"""
|
||||
|
||||
import click
|
||||
from typing import Optional
|
||||
from core.chain_manager import ChainManager, ChainNotFoundError, NodeNotAvailableError
|
||||
from core.config import MultiChainConfig, load_multichain_config
|
||||
from models.chain import ChainType
|
||||
from utils import output, error, success
|
||||
|
||||
@click.group()
|
||||
def chain():
|
||||
"""Multi-chain management commands"""
|
||||
pass
|
||||
|
||||
@chain.command()
|
||||
@click.option('--type', 'chain_type', type=click.Choice(['main', 'topic', 'private', 'all']),
|
||||
default='all', help='Filter by chain type')
|
||||
@click.option('--show-private', is_flag=True, help='Show private chains')
|
||||
@click.option('--sort', type=click.Choice(['id', 'size', 'nodes', 'created']),
|
||||
default='id', help='Sort by field')
|
||||
@click.pass_context
|
||||
def list(ctx, chain_type, show_private, sort):
|
||||
"""List all available chains"""
|
||||
try:
|
||||
config = load_multichain_config()
|
||||
chain_manager = ChainManager(config)
|
||||
|
||||
# Get chains
|
||||
import asyncio
|
||||
chains = asyncio.run(chain_manager.list_chains(
|
||||
chain_type=ChainType(chain_type) if chain_type != 'all' else None,
|
||||
include_private=show_private,
|
||||
sort_by=sort
|
||||
))
|
||||
|
||||
if not chains:
|
||||
output("No chains found", ctx.obj.get('output_format', 'table'))
|
||||
return
|
||||
|
||||
# Format output
|
||||
chains_data = [
|
||||
{
|
||||
"Chain ID": chain.id,
|
||||
"Type": chain.type.value,
|
||||
"Purpose": chain.purpose,
|
||||
"Name": chain.name,
|
||||
"Size": f"{chain.size_mb:.1f}MB",
|
||||
"Nodes": chain.node_count,
|
||||
"Contracts": chain.contract_count,
|
||||
"Clients": chain.client_count,
|
||||
"Miners": chain.miner_count,
|
||||
"Status": chain.status.value
|
||||
}
|
||||
for chain in chains
|
||||
]
|
||||
|
||||
output(chains_data, ctx.obj.get('output_format', 'table'), title="Available Chains")
|
||||
|
||||
except Exception as e:
|
||||
error(f"Error listing chains: {str(e)}")
|
||||
raise click.Abort()
|
||||
|
||||
@chain.command()
|
||||
@click.option('--chain-id', help='Specific chain ID to check status (shows all if not specified)')
|
||||
@click.option('--detailed', is_flag=True, help='Show detailed status information')
|
||||
@click.pass_context
|
||||
def status(ctx, chain_id, detailed):
|
||||
"""Check status of chains"""
|
||||
try:
|
||||
config = load_multichain_config()
|
||||
chain_manager = ChainManager(config)
|
||||
|
||||
import asyncio
|
||||
|
||||
if chain_id:
|
||||
# Get specific chain status
|
||||
chain_info = asyncio.run(chain_manager.get_chain_info(chain_id, detailed=detailed))
|
||||
|
||||
status_data = {
|
||||
"Chain ID": chain_info.id,
|
||||
"Name": chain_info.name,
|
||||
"Type": chain_info.type.value,
|
||||
"Status": chain_info.status.value,
|
||||
"Block Height": chain_info.block_height,
|
||||
"Active Nodes": chain_info.active_nodes,
|
||||
"Total Nodes": chain_info.node_count
|
||||
}
|
||||
|
||||
if detailed:
|
||||
status_data.update({
|
||||
"Consensus": chain_info.consensus_algorithm.value,
|
||||
"TPS": f"{chain_info.tps:.1f}",
|
||||
"Gas Price": f"{chain_info.gas_price / 1e9:.1f} gwei",
|
||||
"Memory Usage": f"{chain_info.memory_usage_mb:.1f}MB"
|
||||
})
|
||||
|
||||
output(status_data, ctx.obj.get('output_format', 'table'), title=f"Chain Status: {chain_id}")
|
||||
else:
|
||||
# Get all chains status
|
||||
chains = asyncio.run(chain_manager.list_chains())
|
||||
|
||||
if not chains:
|
||||
output({"message": "No chains found"}, ctx.obj.get('output_format', 'table'))
|
||||
return
|
||||
|
||||
status_list = []
|
||||
for chain in chains:
|
||||
status_info = {
|
||||
"Chain ID": chain.id,
|
||||
"Name": chain.name,
|
||||
"Type": chain.type.value,
|
||||
"Status": chain.status.value,
|
||||
"Block Height": chain.block_height,
|
||||
"Active Nodes": chain.active_nodes
|
||||
}
|
||||
status_list.append(status_info)
|
||||
|
||||
output(status_list, ctx.obj.get('output_format', 'table'), title="Chain Status Overview")
|
||||
|
||||
except ChainNotFoundError:
|
||||
error(f"Chain {chain_id} not found")
|
||||
raise click.Abort()
|
||||
except Exception as e:
|
||||
error(f"Error getting chain status: {str(e)}")
|
||||
raise click.Abort()
|
||||
|
||||
@chain.command()
|
||||
@click.argument('chain_id')
|
||||
@click.option('--detailed', is_flag=True, help='Show detailed information')
|
||||
@click.option('--metrics', is_flag=True, help='Show performance metrics')
|
||||
@click.pass_context
|
||||
def info(ctx, chain_id, detailed, metrics):
|
||||
"""Get detailed information about a chain"""
|
||||
try:
|
||||
config = load_multichain_config()
|
||||
chain_manager = ChainManager(config)
|
||||
|
||||
import asyncio
|
||||
chain_info = asyncio.run(chain_manager.get_chain_info(chain_id, detailed, metrics))
|
||||
|
||||
# Basic information
|
||||
basic_info = {
|
||||
"Chain ID": chain_info.id,
|
||||
"Type": chain_info.type.value,
|
||||
"Purpose": chain_info.purpose,
|
||||
"Name": chain_info.name,
|
||||
"Description": chain_info.description or "No description",
|
||||
"Status": chain_info.status.value,
|
||||
"Created": chain_info.created_at.strftime("%Y-%m-%d %H:%M:%S"),
|
||||
"Block Height": chain_info.block_height,
|
||||
"Size": f"{chain_info.size_mb:.1f}MB"
|
||||
}
|
||||
|
||||
output(basic_info, ctx.obj.get('output_format', 'table'), title=f"Chain Information: {chain_id}")
|
||||
|
||||
if detailed:
|
||||
# Network details
|
||||
network_info = {
|
||||
"Total Nodes": chain_info.node_count,
|
||||
"Active Nodes": chain_info.active_nodes,
|
||||
"Consensus": chain_info.consensus_algorithm.value,
|
||||
"Block Time": f"{chain_info.block_time}s",
|
||||
"Clients": chain_info.client_count,
|
||||
"Miners": chain_info.miner_count,
|
||||
"Contracts": chain_info.contract_count,
|
||||
"Agents": chain_info.agent_count,
|
||||
"Privacy": chain_info.privacy.visibility,
|
||||
"Access Control": chain_info.privacy.access_control
|
||||
}
|
||||
|
||||
output(network_info, ctx.obj.get('output_format', 'table'), title="Network Details")
|
||||
|
||||
if metrics:
|
||||
# Performance metrics
|
||||
performance_info = {
|
||||
"TPS": f"{chain_info.tps:.1f}",
|
||||
"Avg Block Time": f"{chain_info.avg_block_time:.1f}s",
|
||||
"Avg Gas Used": f"{chain_info.avg_gas_used:,}",
|
||||
"Gas Price": f"{chain_info.gas_price / 1e9:.1f} gwei",
|
||||
"Growth Rate": f"{chain_info.growth_rate_mb_per_day:.1f}MB/day",
|
||||
"Memory Usage": f"{chain_info.memory_usage_mb:.1f}MB",
|
||||
"Disk Usage": f"{chain_info.disk_usage_mb:.1f}MB"
|
||||
}
|
||||
|
||||
output(performance_info, ctx.obj.get('output_format', 'table'), title="Performance Metrics")
|
||||
|
||||
except ChainNotFoundError:
|
||||
error(f"Chain {chain_id} not found")
|
||||
raise click.Abort()
|
||||
except Exception as e:
|
||||
error(f"Error getting chain info: {str(e)}")
|
||||
raise click.Abort()
|
||||
|
||||
@chain.command()
|
||||
@click.argument('config_file', type=click.Path(exists=True))
|
||||
@click.option('--node', help='Target node for chain creation')
|
||||
@click.option('--dry-run', is_flag=True, help='Show what would be created without actually creating')
|
||||
@click.pass_context
|
||||
def create(ctx, config_file, node, dry_run):
|
||||
"""Create a new chain from configuration file"""
|
||||
try:
|
||||
import yaml
|
||||
from models.chain import ChainConfig
|
||||
|
||||
config = load_multichain_config()
|
||||
chain_manager = ChainManager(config)
|
||||
|
||||
# Load and validate configuration
|
||||
with open(config_file, 'r') as f:
|
||||
config_data = yaml.safe_load(f)
|
||||
|
||||
chain_config = ChainConfig(**config_data['chain'])
|
||||
|
||||
if dry_run:
|
||||
dry_run_info = {
|
||||
"Chain Type": chain_config.type.value,
|
||||
"Purpose": chain_config.purpose,
|
||||
"Name": chain_config.name,
|
||||
"Description": chain_config.description or "No description",
|
||||
"Consensus": chain_config.consensus.algorithm.value,
|
||||
"Privacy": chain_config.privacy.visibility,
|
||||
"Target Node": node or "Auto-selected"
|
||||
}
|
||||
|
||||
output(dry_run_info, ctx.obj.get('output_format', 'table'), title="Dry Run - Chain Creation")
|
||||
return
|
||||
|
||||
# Create chain
|
||||
chain_id = chain_manager.create_chain(chain_config, node)
|
||||
|
||||
success(f"Chain created successfully!")
|
||||
result = {
|
||||
"Chain ID": chain_id,
|
||||
"Type": chain_config.type.value,
|
||||
"Purpose": chain_config.purpose,
|
||||
"Name": chain_config.name,
|
||||
"Node": node or "Auto-selected"
|
||||
}
|
||||
|
||||
output(result, ctx.obj.get('output_format', 'table'))
|
||||
|
||||
if chain_config.privacy.visibility == "private":
|
||||
success("Private chain created! Use access codes to invite participants.")
|
||||
|
||||
except Exception as e:
|
||||
error(f"Error creating chain: {str(e)}")
|
||||
raise click.Abort()
|
||||
|
||||
@chain.command()
|
||||
@click.argument('chain_id')
|
||||
@click.option('--force', is_flag=True, help='Force deletion without confirmation')
|
||||
@click.option('--confirm', is_flag=True, help='Confirm deletion')
|
||||
@click.pass_context
|
||||
def delete(ctx, chain_id, force, confirm):
|
||||
"""Delete a chain permanently"""
|
||||
try:
|
||||
config = load_multichain_config()
|
||||
chain_manager = ChainManager(config)
|
||||
|
||||
# Get chain information for confirmation
|
||||
import asyncio
|
||||
chain_info = asyncio.run(chain_manager.get_chain_info(chain_id, detailed=True))
|
||||
|
||||
if not force:
|
||||
# Show warning and confirmation
|
||||
warning_info = {
|
||||
"Chain ID": chain_id,
|
||||
"Type": chain_info.type.value,
|
||||
"Purpose": chain_info.purpose,
|
||||
"Name": chain_info.name,
|
||||
"Status": chain_info.status.value,
|
||||
"Participants": chain_info.client_count,
|
||||
"Transactions": "Multiple" # Would get actual count
|
||||
}
|
||||
|
||||
output(warning_info, ctx.obj.get('output_format', 'table'), title="Chain Deletion Warning")
|
||||
|
||||
if not confirm:
|
||||
error("To confirm deletion, use --confirm flag")
|
||||
raise click.Abort()
|
||||
|
||||
# Delete chain
|
||||
import asyncio
|
||||
is_success = asyncio.run(chain_manager.delete_chain(chain_id, force))
|
||||
|
||||
if is_success:
|
||||
success(f"Chain {chain_id} deleted successfully!")
|
||||
else:
|
||||
error(f"Failed to delete chain {chain_id}")
|
||||
raise click.Abort()
|
||||
|
||||
except ChainNotFoundError:
|
||||
error(f"Chain {chain_id} not found")
|
||||
raise click.Abort()
|
||||
except Exception as e:
|
||||
error(f"Error deleting chain: {str(e)}")
|
||||
raise click.Abort()
|
||||
|
||||
@chain.command()
|
||||
@click.argument('chain_id')
|
||||
@click.argument('node_id')
|
||||
@click.pass_context
|
||||
def add(ctx, chain_id, node_id):
|
||||
"""Add a chain to a specific node"""
|
||||
try:
|
||||
config = load_multichain_config()
|
||||
chain_manager = ChainManager(config)
|
||||
|
||||
import asyncio
|
||||
is_success = asyncio.run(chain_manager.add_chain_to_node(chain_id, node_id))
|
||||
|
||||
if is_success:
|
||||
success(f"Chain {chain_id} added to node {node_id} successfully!")
|
||||
else:
|
||||
error(f"Failed to add chain {chain_id} to node {node_id}")
|
||||
raise click.Abort()
|
||||
|
||||
except Exception as e:
|
||||
error(f"Error adding chain to node: {str(e)}")
|
||||
raise click.Abort()
|
||||
|
||||
@chain.command()
|
||||
@click.argument('chain_id')
|
||||
@click.argument('node_id')
|
||||
@click.option('--migrate', is_flag=True, help='Migrate to another node before removal')
|
||||
@click.pass_context
|
||||
def remove(ctx, chain_id, node_id, migrate):
|
||||
"""Remove a chain from a specific node"""
|
||||
try:
|
||||
config = load_multichain_config()
|
||||
chain_manager = ChainManager(config)
|
||||
|
||||
is_success = chain_manager.remove_chain_from_node(chain_id, node_id, migrate)
|
||||
|
||||
if is_success:
|
||||
success(f"Chain {chain_id} removed from node {node_id} successfully!")
|
||||
else:
|
||||
error(f"Failed to remove chain {chain_id} from node {node_id}")
|
||||
raise click.Abort()
|
||||
|
||||
except Exception as e:
|
||||
error(f"Error removing chain from node: {str(e)}")
|
||||
raise click.Abort()
|
||||
|
||||
@chain.command()
|
||||
@click.argument('chain_id')
|
||||
@click.argument('from_node')
|
||||
@click.argument('to_node')
|
||||
@click.option('--dry-run', is_flag=True, help='Show migration plan without executing')
|
||||
@click.option('--verify', is_flag=True, help='Verify migration after completion')
|
||||
@click.pass_context
|
||||
def migrate(ctx, chain_id, from_node, to_node, dry_run, verify):
|
||||
"""Migrate a chain between nodes"""
|
||||
try:
|
||||
config = load_multichain_config()
|
||||
chain_manager = ChainManager(config)
|
||||
|
||||
migration_result = chain_manager.migrate_chain(chain_id, from_node, to_node, dry_run)
|
||||
|
||||
if dry_run:
|
||||
plan_info = {
|
||||
"Chain ID": chain_id,
|
||||
"Source Node": from_node,
|
||||
"Target Node": to_node,
|
||||
"Feasible": "Yes" if migration_result.success else "No",
|
||||
"Estimated Time": f"{migration_result.transfer_time_seconds}s",
|
||||
"Error": migration_result.error or "None"
|
||||
}
|
||||
|
||||
output(plan_info, ctx.obj.get('output_format', 'table'), title="Migration Plan")
|
||||
return
|
||||
|
||||
if migration_result.success:
|
||||
success(f"Chain migration completed successfully!")
|
||||
result = {
|
||||
"Chain ID": chain_id,
|
||||
"Source Node": from_node,
|
||||
"Target Node": to_node,
|
||||
"Blocks Transferred": migration_result.blocks_transferred,
|
||||
"Transfer Time": f"{migration_result.transfer_time_seconds}s",
|
||||
"Verification": "Passed" if migration_result.verification_passed else "Failed"
|
||||
}
|
||||
|
||||
output(result, ctx.obj.get('output_format', 'table'))
|
||||
else:
|
||||
error(f"Migration failed: {migration_result.error}")
|
||||
raise click.Abort()
|
||||
|
||||
except Exception as e:
|
||||
error(f"Error during migration: {str(e)}")
|
||||
raise click.Abort()
|
||||
|
||||
@chain.command()
|
||||
@click.argument('chain_id')
|
||||
@click.option('--path', help='Backup directory path')
|
||||
@click.option('--compress', is_flag=True, help='Compress backup')
|
||||
@click.option('--verify', is_flag=True, help='Verify backup integrity')
|
||||
@click.pass_context
|
||||
def backup(ctx, chain_id, path, compress, verify):
|
||||
"""Backup chain data"""
|
||||
try:
|
||||
config = load_multichain_config()
|
||||
chain_manager = ChainManager(config)
|
||||
|
||||
import asyncio
|
||||
backup_result = asyncio.run(chain_manager.backup_chain(chain_id, path, compress, verify))
|
||||
|
||||
success(f"Chain backup completed successfully!")
|
||||
result = {
|
||||
"Chain ID": chain_id,
|
||||
"Backup File": backup_result.backup_file,
|
||||
"Original Size": f"{backup_result.original_size_mb:.1f}MB",
|
||||
"Backup Size": f"{backup_result.backup_size_mb:.1f}MB",
|
||||
"Compression": f"{backup_result.compression_ratio:.1f}x" if compress else "None",
|
||||
"Checksum": backup_result.checksum,
|
||||
"Verification": "Passed" if backup_result.verification_passed else "Failed"
|
||||
}
|
||||
|
||||
output(result, ctx.obj.get('output_format', 'table'))
|
||||
|
||||
except Exception as e:
|
||||
error(f"Error during backup: {str(e)}")
|
||||
raise click.Abort()
|
||||
|
||||
@chain.command()
|
||||
@click.argument('backup_file', type=click.Path(exists=True))
|
||||
@click.option('--node', help='Target node for restoration')
|
||||
@click.option('--verify', is_flag=True, help='Verify restoration')
|
||||
@click.pass_context
|
||||
def restore(ctx, backup_file, node, verify):
|
||||
"""Restore chain from backup"""
|
||||
try:
|
||||
config = load_multichain_config()
|
||||
chain_manager = ChainManager(config)
|
||||
|
||||
import asyncio
|
||||
restore_result = asyncio.run(chain_manager.restore_chain(backup_file, node, verify))
|
||||
|
||||
success(f"Chain restoration completed successfully!")
|
||||
result = {
|
||||
"Chain ID": restore_result.chain_id,
|
||||
"Node": restore_result.node_id,
|
||||
"Blocks Restored": restore_result.blocks_restored,
|
||||
"Verification": "Passed" if restore_result.verification_passed else "Failed"
|
||||
}
|
||||
|
||||
output(result, ctx.obj.get('output_format', 'table'))
|
||||
|
||||
except Exception as e:
|
||||
error(f"Error during restoration: {str(e)}")
|
||||
raise click.Abort()
|
||||
|
||||
@chain.command()
|
||||
@click.argument('chain_id')
|
||||
@click.option('--realtime', is_flag=True, help='Real-time monitoring')
|
||||
@click.option('--export', help='Export monitoring data to file')
|
||||
@click.option('--interval', default=5, help='Update interval in seconds')
|
||||
@click.pass_context
|
||||
def monitor(ctx, chain_id, realtime, export, interval):
|
||||
"""Monitor chain activity"""
|
||||
try:
|
||||
config = load_multichain_config()
|
||||
chain_manager = ChainManager(config)
|
||||
|
||||
if realtime:
|
||||
# Real-time monitoring (placeholder implementation)
|
||||
from rich.console import Console
|
||||
from rich.layout import Layout
|
||||
from rich.live import Live
|
||||
import time
|
||||
|
||||
console = Console()
|
||||
|
||||
def generate_monitor_layout():
|
||||
try:
|
||||
import asyncio
|
||||
chain_info = asyncio.run(chain_manager.get_chain_info(chain_id, detailed=True, metrics=True))
|
||||
|
||||
layout = Layout()
|
||||
layout.split_column(
|
||||
Layout(name="header", size=3),
|
||||
Layout(name="stats"),
|
||||
Layout(name="activity", size=10)
|
||||
)
|
||||
|
||||
# Header
|
||||
layout["header"].update(
|
||||
f"Chain Monitor: {chain_id} - {chain_info.status.value.upper()}"
|
||||
)
|
||||
|
||||
# Stats table
|
||||
stats_data = [
|
||||
["Block Height", str(chain_info.block_height)],
|
||||
["TPS", f"{chain_info.tps:.1f}"],
|
||||
["Active Nodes", str(chain_info.active_nodes)],
|
||||
["Gas Price", f"{chain_info.gas_price / 1e9:.1f} gwei"],
|
||||
["Memory Usage", f"{chain_info.memory_usage_mb:.1f}MB"],
|
||||
["Disk Usage", f"{chain_info.disk_usage_mb:.1f}MB"]
|
||||
]
|
||||
|
||||
layout["stats"].update(str(stats_data))
|
||||
|
||||
# Recent activity (placeholder)
|
||||
layout["activity"].update("Recent activity would be displayed here")
|
||||
|
||||
return layout
|
||||
except Exception as e:
|
||||
return f"Error getting chain info: {e}"
|
||||
|
||||
with Live(generate_monitor_layout(), refresh_per_second=1) as live:
|
||||
try:
|
||||
while True:
|
||||
live.update(generate_monitor_layout())
|
||||
time.sleep(interval)
|
||||
except KeyboardInterrupt:
|
||||
console.print("\n[yellow]Monitoring stopped by user[/yellow]")
|
||||
else:
|
||||
# Single snapshot
|
||||
import asyncio
|
||||
chain_info = asyncio.run(chain_manager.get_chain_info(chain_id, detailed=True, metrics=True))
|
||||
|
||||
stats_data = [
|
||||
{
|
||||
"Metric": "Block Height",
|
||||
"Value": str(chain_info.block_height)
|
||||
},
|
||||
{
|
||||
"Metric": "TPS",
|
||||
"Value": f"{chain_info.tps:.1f}"
|
||||
},
|
||||
{
|
||||
"Metric": "Active Nodes",
|
||||
"Value": str(chain_info.active_nodes)
|
||||
},
|
||||
{
|
||||
"Metric": "Gas Price",
|
||||
"Value": f"{chain_info.gas_price / 1e9:.1f} gwei"
|
||||
},
|
||||
{
|
||||
"Metric": "Memory Usage",
|
||||
"Value": f"{chain_info.memory_usage_mb:.1f}MB"
|
||||
},
|
||||
{
|
||||
"Metric": "Disk Usage",
|
||||
"Value": f"{chain_info.disk_usage_mb:.1f}MB"
|
||||
}
|
||||
]
|
||||
|
||||
output(stats_data, ctx.obj.get('output_format', 'table'), title=f"Chain Statistics: {chain_id}")
|
||||
|
||||
if export:
|
||||
import json
|
||||
with open(export, 'w') as f:
|
||||
json.dump(chain_info.dict(), f, indent=2, default=str)
|
||||
success(f"Statistics exported to {export}")
|
||||
|
||||
except ChainNotFoundError:
|
||||
error(f"Chain {chain_id} not found")
|
||||
raise click.Abort()
|
||||
except Exception as e:
|
||||
error(f"Error during monitoring: {str(e)}")
|
||||
raise click.Abort()
|
||||
599
cli/commands/client.py
Executable file
599
cli/commands/client.py
Executable file
@@ -0,0 +1,599 @@
|
||||
"""Client commands for AITBC CLI"""
|
||||
|
||||
import click
|
||||
import httpx
|
||||
import json
|
||||
import time
|
||||
from typing import Optional
|
||||
from utils import output, error, success
|
||||
|
||||
|
||||
@click.group()
|
||||
@click.pass_context
|
||||
def client(ctx):
|
||||
"""Submit and manage jobs"""
|
||||
# Set role for client commands
|
||||
ctx.ensure_object(dict)
|
||||
ctx.parent.detected_role = 'client'
|
||||
|
||||
|
||||
@client.command()
|
||||
@click.option("--type", "job_type", default="inference", help="Job type")
|
||||
@click.option("--prompt", help="Prompt for inference jobs")
|
||||
@click.option("--model", help="Model name")
|
||||
@click.option("--ttl", default=900, help="Time to live in seconds")
|
||||
@click.option("--file", type=click.File('r'), help="Submit job from JSON file")
|
||||
@click.option("--retries", default=0, help="Number of retry attempts (0 = no retry)")
|
||||
@click.option("--retry-delay", default=1.0, help="Initial retry delay in seconds")
|
||||
@click.pass_context
|
||||
def submit(ctx, job_type: str, prompt: Optional[str], model: Optional[str],
|
||||
ttl: int, file, retries: int, retry_delay: float):
|
||||
"""Submit a job to the coordinator"""
|
||||
# Check if we're in test mode
|
||||
if ctx.parent and ctx.parent.parent and ctx.parent.parent.params.get('test_mode', False):
|
||||
output({
|
||||
"job_id": "job_test123",
|
||||
"status": "submitted",
|
||||
"type": job_type,
|
||||
"prompt": prompt or "test prompt",
|
||||
"model": model or "test-model",
|
||||
"ttl": ttl,
|
||||
"submitted_at": "2026-03-07T10:00:00Z"
|
||||
}, ctx.obj.get("output_format", "table"))
|
||||
return
|
||||
|
||||
config = ctx.obj['config']
|
||||
|
||||
# Build job data
|
||||
if file:
|
||||
try:
|
||||
task_data = json.load(file)
|
||||
except Exception as e:
|
||||
error(f"Failed to read job file: {e}")
|
||||
return
|
||||
else:
|
||||
task_data = {"type": job_type}
|
||||
if prompt:
|
||||
task_data["prompt"] = prompt
|
||||
if model:
|
||||
task_data["model"] = model
|
||||
|
||||
# Submit job with retry and exponential backoff
|
||||
max_attempts = retries + 1
|
||||
for attempt in range(1, max_attempts + 1):
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
# Use correct API endpoint format
|
||||
response = client.post(
|
||||
f"{config.coordinator_url}/v1/jobs",
|
||||
headers={
|
||||
"Content-Type": "application/json",
|
||||
"X-Api-Key": config.api_key or ""
|
||||
},
|
||||
json={
|
||||
"payload": task_data,
|
||||
"ttl_seconds": ttl
|
||||
},
|
||||
timeout=10.0
|
||||
)
|
||||
|
||||
if response.status_code in [200, 201]:
|
||||
job = response.json()
|
||||
result = {
|
||||
"job_id": job.get('job_id'),
|
||||
"status": "submitted",
|
||||
"message": "Job submitted successfully"
|
||||
}
|
||||
if attempt > 1:
|
||||
result["attempts"] = attempt
|
||||
output(result, ctx.obj['output_format'])
|
||||
return
|
||||
else:
|
||||
if attempt < max_attempts:
|
||||
delay = retry_delay * (2 ** (attempt - 1))
|
||||
click.echo(f"Attempt {attempt}/{max_attempts} failed ({response.status_code}), retrying in {delay:.1f}s...")
|
||||
time.sleep(delay)
|
||||
else:
|
||||
error(f"Failed to submit job: {response.status_code} - {response.text}")
|
||||
ctx.exit(response.status_code)
|
||||
except Exception as e:
|
||||
if attempt < max_attempts:
|
||||
delay = retry_delay * (2 ** (attempt - 1))
|
||||
click.echo(f"Attempt {attempt}/{max_attempts} failed ({e}), retrying in {delay:.1f}s...")
|
||||
time.sleep(delay)
|
||||
else:
|
||||
error(f"Network error after {max_attempts} attempts: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@client.command()
|
||||
@click.argument("job_id")
|
||||
@click.pass_context
|
||||
def status(ctx, job_id: str):
|
||||
"""Check job status"""
|
||||
# Check if we're in test mode
|
||||
if ctx.parent and ctx.parent.parent and ctx.parent.parent.params.get('test_mode', False):
|
||||
output({
|
||||
"job_id": job_id,
|
||||
"status": "completed",
|
||||
"progress": 100,
|
||||
"result": "Test job completed successfully",
|
||||
"created_at": "2026-03-07T10:00:00Z",
|
||||
"completed_at": "2026-03-07T10:01:00Z"
|
||||
}, ctx.obj.get("output_format", "table"))
|
||||
return
|
||||
|
||||
config = ctx.obj['config']
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.get(
|
||||
f"{config.coordinator_url}/v1/jobs/{job_id}",
|
||||
headers={"X-Api-Key": config.api_key or ""}
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
data = response.json()
|
||||
output(data, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to get job status: {response.status_code}")
|
||||
ctx.exit(1)
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@client.command()
|
||||
@click.option("--limit", default=10, help="Number of blocks to show")
|
||||
@click.option('--chain-id', help='Specific chain ID to query (default: ait-devnet)')
|
||||
@click.pass_context
|
||||
def blocks(ctx, limit: int, chain_id: str):
|
||||
"""List recent blocks from specific chain"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
# Query specific chain (default to ait-devnet if not specified)
|
||||
target_chain = chain_id or 'ait-devnet'
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.get(
|
||||
f"{config.coordinator_url}/api/v1/blocks",
|
||||
params={"limit": limit, "chain_id": target_chain},
|
||||
headers={"X-Api-Key": config.api_key or ""}
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
blocks = response.json()
|
||||
output({
|
||||
"blocks": blocks,
|
||||
"chain_id": target_chain,
|
||||
"limit": limit,
|
||||
"query_type": "single_chain"
|
||||
}, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to get blocks from chain {target_chain}: {response.status_code}")
|
||||
ctx.exit(1)
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@client.command()
|
||||
@click.argument("job_id")
|
||||
@click.pass_context
|
||||
def cancel(ctx, job_id: str):
|
||||
"""Cancel a job"""
|
||||
# Check if we're in test mode
|
||||
if ctx.parent and ctx.parent.parent and ctx.parent.parent.params.get('test_mode', False):
|
||||
output({
|
||||
"job_id": job_id,
|
||||
"status": "cancelled",
|
||||
"cancelled_at": "2026-03-07T10:00:00Z",
|
||||
"message": "Job cancelled successfully"
|
||||
}, ctx.obj.get("output_format", "table"))
|
||||
return
|
||||
|
||||
config = ctx.obj['config']
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.post(
|
||||
f"{config.coordinator_url}/v1/jobs/{job_id}/cancel",
|
||||
headers={"X-Api-Key": config.api_key or ""}
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
success(f"Job {job_id} cancelled")
|
||||
else:
|
||||
error(f"Failed to cancel job: {response.status_code}")
|
||||
ctx.exit(1)
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@client.command()
|
||||
@click.argument("job_id")
|
||||
@click.option("--wait", is_flag=True, help="Wait for job to complete before showing result")
|
||||
@click.option("--timeout", type=int, default=120, help="Max wait time in seconds")
|
||||
@click.pass_context
|
||||
def result(ctx, job_id: str, wait: bool, timeout: int):
|
||||
"""Retrieve the result of a completed job"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
start = time.time()
|
||||
while True:
|
||||
try:
|
||||
with httpx.Client() as http:
|
||||
# Try the dedicated result endpoint first
|
||||
response = http.get(
|
||||
f"{config.coordinator_url}/v1/jobs/{job_id}/result",
|
||||
headers={"X-Api-Key": config.api_key or ""}
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
result_data = response.json()
|
||||
success(f"Job {job_id} completed")
|
||||
output(result_data, ctx.obj['output_format'])
|
||||
return
|
||||
elif response.status_code == 425:
|
||||
# Job not ready yet
|
||||
if wait and (time.time() - start) < timeout:
|
||||
time.sleep(3)
|
||||
continue
|
||||
# Check status for more info
|
||||
status_resp = http.get(
|
||||
f"{config.coordinator_url}/v1/jobs/{job_id}",
|
||||
headers={"X-Api-Key": config.api_key or ""}
|
||||
)
|
||||
if status_resp.status_code == 200:
|
||||
job_data = status_resp.json()
|
||||
output({"job_id": job_id, "state": job_data.get("state", "UNKNOWN"), "message": "Job not yet completed"}, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Job not ready (425)")
|
||||
return
|
||||
elif response.status_code == 404:
|
||||
error(f"Job {job_id} not found")
|
||||
return
|
||||
else:
|
||||
error(f"Failed to get result: {response.status_code}")
|
||||
return
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
return
|
||||
|
||||
|
||||
@client.command()
|
||||
@click.option("--limit", default=10, help="Number of receipts to show")
|
||||
@click.option("--job-id", help="Filter by job ID")
|
||||
@click.option("--status", help="Filter by status")
|
||||
@click.pass_context
|
||||
def receipts(ctx, limit: int, job_id: Optional[str], status: Optional[str]):
|
||||
"""List job receipts"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
try:
|
||||
params = {"limit": limit}
|
||||
if job_id:
|
||||
params["job_id"] = job_id
|
||||
if status:
|
||||
params["status"] = status
|
||||
|
||||
with httpx.Client() as client:
|
||||
response = client.get(
|
||||
f"{config.coordinator_url}/v1/explorer/receipts",
|
||||
params=params,
|
||||
headers={"X-Api-Key": config.api_key or ""}
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
receipts = response.json()
|
||||
output(receipts, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to get receipts: {response.status_code}")
|
||||
ctx.exit(1)
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@client.command()
|
||||
@click.option("--limit", default=10, help="Number of jobs to show")
|
||||
@click.option("--status", help="Filter by status (pending, running, completed, failed)")
|
||||
@click.option("--type", help="Filter by job type")
|
||||
@click.option("--from-time", help="Filter jobs from this timestamp (ISO format)")
|
||||
@click.option("--to-time", help="Filter jobs until this timestamp (ISO format)")
|
||||
@click.pass_context
|
||||
def history(ctx, limit: int, status: Optional[str], type: Optional[str],
|
||||
from_time: Optional[str], to_time: Optional[str]):
|
||||
"""Show job history with filtering options"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
try:
|
||||
params = {"limit": limit}
|
||||
if status:
|
||||
params["status"] = status
|
||||
if type:
|
||||
params["type"] = type
|
||||
if from_time:
|
||||
params["from_time"] = from_time
|
||||
if to_time:
|
||||
params["to_time"] = to_time
|
||||
|
||||
with httpx.Client() as client:
|
||||
response = client.get(
|
||||
f"{config.coordinator_url}/v1/jobs",
|
||||
params=params,
|
||||
headers={"X-Api-Key": config.api_key or ""}
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
jobs = response.json()
|
||||
output(jobs, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to get job history: {response.status_code}")
|
||||
ctx.exit(1)
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@client.command(name="batch-submit")
|
||||
@click.argument("file_path", type=click.Path(exists=True))
|
||||
@click.option("--format", "file_format", type=click.Choice(["json", "csv"]), default=None, help="File format (auto-detected if not specified)")
|
||||
@click.option("--retries", default=0, help="Retry attempts per job")
|
||||
@click.option("--delay", default=0.5, help="Delay between submissions (seconds)")
|
||||
@click.pass_context
|
||||
def batch_submit(ctx, file_path: str, file_format: Optional[str], retries: int, delay: float):
|
||||
"""Submit multiple jobs from a CSV or JSON file"""
|
||||
import csv
|
||||
from pathlib import Path
|
||||
from utils import progress_bar
|
||||
|
||||
config = ctx.obj['config']
|
||||
path = Path(file_path)
|
||||
|
||||
if not file_format:
|
||||
file_format = "csv" if path.suffix.lower() == ".csv" else "json"
|
||||
|
||||
jobs_data = []
|
||||
if file_format == "json":
|
||||
with open(path) as f:
|
||||
data = json.load(f)
|
||||
jobs_data = data if isinstance(data, list) else [data]
|
||||
else:
|
||||
with open(path) as f:
|
||||
reader = csv.DictReader(f)
|
||||
jobs_data = list(reader)
|
||||
|
||||
if not jobs_data:
|
||||
error("No jobs found in file")
|
||||
return
|
||||
|
||||
results = {"submitted": 0, "failed": 0, "job_ids": []}
|
||||
|
||||
with progress_bar("Submitting jobs...", total=len(jobs_data)) as (progress, task):
|
||||
for i, job in enumerate(jobs_data):
|
||||
try:
|
||||
task_data = {"type": job.get("type", "inference")}
|
||||
if "prompt" in job:
|
||||
task_data["prompt"] = job["prompt"]
|
||||
if "model" in job:
|
||||
task_data["model"] = job["model"]
|
||||
|
||||
with httpx.Client() as http_client:
|
||||
response = http_client.post(
|
||||
f"{config.coordinator_url}/v1/jobs",
|
||||
headers={
|
||||
"Content-Type": "application/json",
|
||||
"X-Api-Key": config.api_key or ""
|
||||
},
|
||||
json={"payload": task_data, "ttl_seconds": int(job.get("ttl", 900))}
|
||||
)
|
||||
if response.status_code == 201:
|
||||
result = response.json()
|
||||
results["submitted"] += 1
|
||||
results["job_ids"].append(result.get("job_id"))
|
||||
else:
|
||||
results["failed"] += 1
|
||||
except Exception:
|
||||
results["failed"] += 1
|
||||
|
||||
progress.update(task, advance=1)
|
||||
if delay and i < len(jobs_data) - 1:
|
||||
time.sleep(delay)
|
||||
|
||||
output(results, ctx.obj['output_format'])
|
||||
|
||||
|
||||
@client.command(name="template")
|
||||
@click.argument("action", type=click.Choice(["save", "list", "run", "delete"]))
|
||||
@click.option("--name", help="Template name")
|
||||
@click.option("--type", "job_type", help="Job type")
|
||||
@click.option("--prompt", help="Prompt text")
|
||||
@click.option("--model", help="Model name")
|
||||
@click.option("--ttl", type=int, default=900, help="TTL in seconds")
|
||||
@click.pass_context
|
||||
def template(ctx, action: str, name: Optional[str], job_type: Optional[str],
|
||||
prompt: Optional[str], model: Optional[str], ttl: int):
|
||||
"""Manage job templates for repeated tasks"""
|
||||
from pathlib import Path
|
||||
|
||||
template_dir = Path.home() / ".aitbc" / "templates"
|
||||
template_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
if action == "save":
|
||||
if not name:
|
||||
error("Template name required (--name)")
|
||||
return
|
||||
template_data = {"type": job_type or "inference", "ttl": ttl}
|
||||
if prompt:
|
||||
template_data["prompt"] = prompt
|
||||
if model:
|
||||
template_data["model"] = model
|
||||
with open(template_dir / f"{name}.json", "w") as f:
|
||||
json.dump(template_data, f, indent=2)
|
||||
output({"status": "saved", "name": name, "template": template_data}, ctx.obj['output_format'])
|
||||
|
||||
elif action == "list":
|
||||
templates = []
|
||||
for tf in template_dir.glob("*.json"):
|
||||
with open(tf) as f:
|
||||
data = json.load(f)
|
||||
templates.append({"name": tf.stem, **data})
|
||||
output(templates if templates else {"message": "No templates found"}, ctx.obj['output_format'])
|
||||
|
||||
elif action == "run":
|
||||
if not name:
|
||||
error("Template name required (--name)")
|
||||
return
|
||||
tf = template_dir / f"{name}.json"
|
||||
if not tf.exists():
|
||||
error(f"Template '{name}' not found")
|
||||
return
|
||||
with open(tf) as f:
|
||||
tmpl = json.load(f)
|
||||
if prompt:
|
||||
tmpl["prompt"] = prompt
|
||||
if model:
|
||||
tmpl["model"] = model
|
||||
ctx.invoke(submit, job_type=tmpl.get("type", "inference"),
|
||||
prompt=tmpl.get("prompt"), model=tmpl.get("model"),
|
||||
ttl=tmpl.get("ttl", 900), file=None, retries=0, retry_delay=1.0)
|
||||
|
||||
elif action == "delete":
|
||||
if not name:
|
||||
error("Template name required (--name)")
|
||||
return
|
||||
tf = template_dir / f"{name}.json"
|
||||
if not tf.exists():
|
||||
error(f"Template '{name}' not found")
|
||||
return
|
||||
tf.unlink()
|
||||
output({"status": "deleted", "name": name}, ctx.obj['output_format'])
|
||||
|
||||
|
||||
@client.command(name="pay")
|
||||
@click.argument("job_id")
|
||||
@click.argument("amount", type=float)
|
||||
@click.option("--currency", default="AITBC", help="Payment currency")
|
||||
@click.option("--method", "payment_method", default="aitbc_token", type=click.Choice(["aitbc_token", "bitcoin"]), help="Payment method")
|
||||
@click.option("--escrow-timeout", type=int, default=3600, help="Escrow timeout in seconds")
|
||||
@click.pass_context
|
||||
def pay(ctx, job_id: str, amount: float, currency: str, payment_method: str, escrow_timeout: int):
|
||||
"""Create a payment for a job"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
try:
|
||||
with httpx.Client() as http_client:
|
||||
response = http_client.post(
|
||||
f"{config.coordinator_url}/v1/payments",
|
||||
headers={
|
||||
"Content-Type": "application/json",
|
||||
"X-Api-Key": config.api_key or ""
|
||||
},
|
||||
json={
|
||||
"job_id": job_id,
|
||||
"amount": amount,
|
||||
"currency": currency,
|
||||
"payment_method": payment_method,
|
||||
"escrow_timeout_seconds": escrow_timeout
|
||||
}
|
||||
)
|
||||
if response.status_code == 201:
|
||||
result = response.json()
|
||||
success(f"Payment created for job {job_id}")
|
||||
output(result, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Payment failed: {response.status_code} - {response.text}")
|
||||
ctx.exit(1)
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@client.command(name="payment-status")
|
||||
@click.argument("job_id")
|
||||
@click.pass_context
|
||||
def payment_status(ctx, job_id: str):
|
||||
"""Get payment status for a job"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
try:
|
||||
with httpx.Client() as http_client:
|
||||
response = http_client.get(
|
||||
f"{config.coordinator_url}/v1/jobs/{job_id}/payment",
|
||||
headers={"X-Api-Key": config.api_key or ""}
|
||||
)
|
||||
if response.status_code == 200:
|
||||
output(response.json(), ctx.obj['output_format'])
|
||||
elif response.status_code == 404:
|
||||
error(f"No payment found for job {job_id}")
|
||||
ctx.exit(1)
|
||||
else:
|
||||
error(f"Failed: {response.status_code}")
|
||||
ctx.exit(1)
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@client.command(name="payment-receipt")
|
||||
@click.argument("payment_id")
|
||||
@click.pass_context
|
||||
def payment_receipt(ctx, payment_id: str):
|
||||
"""Get payment receipt with verification"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
try:
|
||||
with httpx.Client() as http_client:
|
||||
response = http_client.get(
|
||||
f"{config.coordinator_url}/v1/payments/{payment_id}/receipt",
|
||||
headers={"X-Api-Key": config.api_key or ""}
|
||||
)
|
||||
if response.status_code == 200:
|
||||
output(response.json(), ctx.obj['output_format'])
|
||||
elif response.status_code == 404:
|
||||
error(f"Payment '{payment_id}' not found")
|
||||
ctx.exit(1)
|
||||
else:
|
||||
error(f"Failed: {response.status_code}")
|
||||
ctx.exit(1)
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@client.command(name="refund")
|
||||
@click.argument("job_id")
|
||||
@click.argument("payment_id")
|
||||
@click.option("--reason", required=True, help="Reason for refund")
|
||||
@click.pass_context
|
||||
def refund(ctx, job_id: str, payment_id: str, reason: str):
|
||||
"""Request a refund for a payment"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
try:
|
||||
with httpx.Client() as http_client:
|
||||
response = http_client.post(
|
||||
f"{config.coordinator_url}/v1/payments/{payment_id}/refund",
|
||||
headers={
|
||||
"Content-Type": "application/json",
|
||||
"X-Api-Key": config.api_key or ""
|
||||
},
|
||||
json={
|
||||
"job_id": job_id,
|
||||
"payment_id": payment_id,
|
||||
"reason": reason
|
||||
}
|
||||
)
|
||||
if response.status_code == 200:
|
||||
result = response.json()
|
||||
success(f"Refund processed for payment {payment_id}")
|
||||
output(result, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Refund failed: {response.status_code} - {response.text}")
|
||||
ctx.exit(1)
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
294
cli/commands/compliance.py
Executable file
294
cli/commands/compliance.py
Executable file
@@ -0,0 +1,294 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Compliance CLI Commands - KYC/AML Integration
|
||||
Real compliance verification and monitoring commands
|
||||
"""
|
||||
|
||||
import click
|
||||
import asyncio
|
||||
import json
|
||||
from typing import Optional, Dict, Any
|
||||
from datetime import datetime
|
||||
|
||||
# Import compliance providers
|
||||
from kyc_aml_providers import submit_kyc_verification, check_kyc_status, perform_aml_screening
|
||||
|
||||
@click.group()
|
||||
def compliance():
|
||||
"""Compliance and regulatory management commands"""
|
||||
pass
|
||||
|
||||
@compliance.command()
|
||||
@click.option("--user-id", required=True, help="User ID to verify")
|
||||
@click.option("--provider", required=True, type=click.Choice(['chainalysis', 'sumsub', 'onfido', 'jumio', 'veriff']), help="KYC provider")
|
||||
@click.option("--first-name", required=True, help="Customer first name")
|
||||
@click.option("--last-name", required=True, help="Customer last name")
|
||||
@click.option("--email", required=True, help="Customer email")
|
||||
@click.option("--dob", help="Date of birth (YYYY-MM-DD)")
|
||||
@click.option("--phone", help="Phone number")
|
||||
@click.pass_context
|
||||
def kyc_submit(ctx, user_id: str, provider: str, first_name: str, last_name: str, email: str, dob: str, phone: str):
|
||||
"""Submit KYC verification request"""
|
||||
try:
|
||||
# Prepare customer data
|
||||
customer_data = {
|
||||
"first_name": first_name,
|
||||
"last_name": last_name,
|
||||
"email": email,
|
||||
"date_of_birth": dob,
|
||||
"phone": phone
|
||||
}
|
||||
|
||||
# Remove None values
|
||||
customer_data = {k: v for k, v in customer_data.items() if v is not None}
|
||||
|
||||
# Submit KYC
|
||||
click.echo(f"🔍 Submitting KYC verification for user {user_id} to {provider}...")
|
||||
|
||||
result = asyncio.run(submit_kyc_verification(user_id, provider, customer_data))
|
||||
|
||||
click.echo(f"✅ KYC verification submitted successfully!")
|
||||
click.echo(f"📋 Request ID: {result['request_id']}")
|
||||
click.echo(f"👤 User ID: {result['user_id']}")
|
||||
click.echo(f"🏢 Provider: {result['provider']}")
|
||||
click.echo(f"📊 Status: {result['status']}")
|
||||
click.echo(f"⚠️ Risk Score: {result['risk_score']:.3f}")
|
||||
click.echo(f"📅 Submitted: {result['created_at']}")
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ KYC submission failed: {e}", err=True)
|
||||
|
||||
@compliance.command()
|
||||
@click.option("--request-id", required=True, help="KYC request ID to check")
|
||||
@click.option("--provider", required=True, type=click.Choice(['chainalysis', 'sumsub', 'onfido', 'jumio', 'veriff']), help="KYC provider")
|
||||
@click.pass_context
|
||||
def kyc_status(ctx, request_id: str, provider: str):
|
||||
"""Check KYC verification status"""
|
||||
try:
|
||||
click.echo(f"🔍 Checking KYC status for request {request_id}...")
|
||||
|
||||
result = asyncio.run(check_kyc_status(request_id, provider))
|
||||
|
||||
# Status icons
|
||||
status_icons = {
|
||||
"pending": "⏳",
|
||||
"approved": "✅",
|
||||
"rejected": "❌",
|
||||
"failed": "💥",
|
||||
"expired": "⏰"
|
||||
}
|
||||
|
||||
status_icon = status_icons.get(result['status'], "❓")
|
||||
|
||||
click.echo(f"{status_icon} KYC Status: {result['status'].upper()}")
|
||||
click.echo(f"📋 Request ID: {result['request_id']}")
|
||||
click.echo(f"👤 User ID: {result['user_id']}")
|
||||
click.echo(f"🏢 Provider: {result['provider']}")
|
||||
click.echo(f"⚠️ Risk Score: {result['risk_score']:.3f}")
|
||||
|
||||
if result.get('rejection_reason'):
|
||||
click.echo(f"🚫 Rejection Reason: {result['rejection_reason']}")
|
||||
|
||||
click.echo(f"📅 Created: {result['created_at']}")
|
||||
|
||||
# Provide guidance based on status
|
||||
if result['status'] == 'pending':
|
||||
click.echo(f"\n💡 Verification is in progress. Check again later.")
|
||||
elif result['status'] == 'approved':
|
||||
click.echo(f"\n🎉 User is verified and can proceed with trading!")
|
||||
elif result['status'] in ['rejected', 'failed']:
|
||||
click.echo(f"\n⚠️ Verification failed. User may need to resubmit documents.")
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ KYC status check failed: {e}", err=True)
|
||||
|
||||
@compliance.command()
|
||||
@click.option("--user-id", required=True, help="User ID to screen")
|
||||
@click.option("--first-name", required=True, help="User first name")
|
||||
@click.option("--last-name", required=True, help="User last name")
|
||||
@click.option("--email", required=True, help="User email")
|
||||
@click.option("--dob", help="Date of birth (YYYY-MM-DD)")
|
||||
@click.option("--phone", help="Phone number")
|
||||
@click.pass_context
|
||||
def aml_screen(ctx, user_id: str, first_name: str, last_name: str, email: str, dob: str, phone: str):
|
||||
"""Perform AML screening on user"""
|
||||
try:
|
||||
# Prepare user data
|
||||
user_data = {
|
||||
"first_name": first_name,
|
||||
"last_name": last_name,
|
||||
"email": email,
|
||||
"date_of_birth": dob,
|
||||
"phone": phone
|
||||
}
|
||||
|
||||
# Remove None values
|
||||
user_data = {k: v for k, v in user_data.items() if v is not None}
|
||||
|
||||
click.echo(f"🔍 Performing AML screening for user {user_id}...")
|
||||
|
||||
result = asyncio.run(perform_aml_screening(user_id, user_data))
|
||||
|
||||
# Risk level icons
|
||||
risk_icons = {
|
||||
"low": "🟢",
|
||||
"medium": "🟡",
|
||||
"high": "🟠",
|
||||
"critical": "🔴"
|
||||
}
|
||||
|
||||
risk_icon = risk_icons.get(result['risk_level'], "❓")
|
||||
|
||||
click.echo(f"{risk_icon} AML Risk Level: {result['risk_level'].upper()}")
|
||||
click.echo(f"📊 Risk Score: {result['risk_score']:.3f}")
|
||||
click.echo(f"👤 User ID: {result['user_id']}")
|
||||
click.echo(f"🏢 Provider: {result['provider']}")
|
||||
click.echo(f"📋 Check ID: {result['check_id']}")
|
||||
click.echo(f"📅 Screened: {result['checked_at']}")
|
||||
|
||||
# Sanctions hits
|
||||
if result['sanctions_hits']:
|
||||
click.echo(f"\n🚨 SANCTIONS HITS FOUND:")
|
||||
for hit in result['sanctions_hits']:
|
||||
click.echo(f" • List: {hit['list']}")
|
||||
click.echo(f" Name: {hit['name']}")
|
||||
click.echo(f" Confidence: {hit['confidence']:.2%}")
|
||||
else:
|
||||
click.echo(f"\n✅ No sanctions hits found")
|
||||
|
||||
# Guidance based on risk level
|
||||
if result['risk_level'] == 'critical':
|
||||
click.echo(f"\n🚨 CRITICAL RISK: Immediate action required!")
|
||||
elif result['risk_level'] == 'high':
|
||||
click.echo(f"\n⚠️ HIGH RISK: Manual review recommended")
|
||||
elif result['risk_level'] == 'medium':
|
||||
click.echo(f"\n🟡 MEDIUM RISK: Monitor transactions closely")
|
||||
else:
|
||||
click.echo(f"\n✅ LOW RISK: User cleared for normal activity")
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ AML screening failed: {e}", err=True)
|
||||
|
||||
@compliance.command()
|
||||
@click.option("--user-id", required=True, help="User ID for full compliance check")
|
||||
@click.option("--first-name", required=True, help="User first name")
|
||||
@click.option("--last-name", required=True, help="User last name")
|
||||
@click.option("--email", required=True, help="User email")
|
||||
@click.option("--dob", help="Date of birth (YYYY-MM-DD)")
|
||||
@click.option("--phone", help="Phone number")
|
||||
@click.option("--kyc-provider", default="chainalysis", type=click.Choice(['chainalysis', 'sumsub', 'onfido', 'jumio', 'veriff']), help="KYC provider")
|
||||
@click.pass_context
|
||||
def full_check(ctx, user_id: str, first_name: str, last_name: str, email: str, dob: str, phone: str, kyc_provider: str):
|
||||
"""Perform full compliance check (KYC + AML)"""
|
||||
try:
|
||||
click.echo(f"🔍 Performing full compliance check for user {user_id}...")
|
||||
click.echo(f"🏢 KYC Provider: {kyc_provider}")
|
||||
click.echo()
|
||||
|
||||
# Prepare user data
|
||||
user_data = {
|
||||
"first_name": first_name,
|
||||
"last_name": last_name,
|
||||
"email": email,
|
||||
"date_of_birth": dob,
|
||||
"phone": phone
|
||||
}
|
||||
|
||||
user_data = {k: v for k, v in user_data.items() if v is not None}
|
||||
|
||||
# Step 1: Submit KYC
|
||||
click.echo("📋 Step 1: Submitting KYC verification...")
|
||||
kyc_result = asyncio.run(submit_kyc_verification(user_id, kyc_provider, user_data))
|
||||
click.echo(f"✅ KYC submitted: {kyc_result['request_id']}")
|
||||
|
||||
# Step 2: Check KYC status
|
||||
click.echo("\n📋 Step 2: Checking KYC status...")
|
||||
kyc_status = asyncio.run(check_kyc_status(kyc_result['request_id'], kyc_provider))
|
||||
|
||||
# Step 3: AML Screening
|
||||
click.echo("\n🔍 Step 3: Performing AML screening...")
|
||||
aml_result = asyncio.run(perform_aml_screening(user_id, user_data))
|
||||
|
||||
# Display comprehensive results
|
||||
click.echo(f"\n{'='*60}")
|
||||
click.echo(f"📊 COMPLIANCE CHECK SUMMARY")
|
||||
click.echo(f"{'='*60}")
|
||||
|
||||
# KYC Results
|
||||
kyc_icons = {"pending": "⏳", "approved": "✅", "rejected": "❌", "failed": "💥"}
|
||||
kyc_icon = kyc_icons.get(kyc_status['status'], "❓")
|
||||
|
||||
click.echo(f"\n{kyc_icon} KYC Verification:")
|
||||
click.echo(f" Status: {kyc_status['status'].upper()}")
|
||||
click.echo(f" Risk Score: {kyc_status['risk_score']:.3f}")
|
||||
click.echo(f" Provider: {kyc_status['provider']}")
|
||||
|
||||
if kyc_status.get('rejection_reason'):
|
||||
click.echo(f" Reason: {kyc_status['rejection_reason']}")
|
||||
|
||||
# AML Results
|
||||
risk_icons = {"low": "🟢", "medium": "🟡", "high": "🟠", "critical": "🔴"}
|
||||
aml_icon = risk_icons.get(aml_result['risk_level'], "❓")
|
||||
|
||||
click.echo(f"\n{aml_icon} AML Screening:")
|
||||
click.echo(f" Risk Level: {aml_result['risk_level'].upper()}")
|
||||
click.echo(f" Risk Score: {aml_result['risk_score']:.3f}")
|
||||
click.echo(f" Sanctions Hits: {len(aml_result['sanctions_hits'])}")
|
||||
|
||||
# Overall Assessment
|
||||
click.echo(f"\n📋 OVERALL ASSESSMENT:")
|
||||
|
||||
kyc_approved = kyc_status['status'] == 'approved'
|
||||
aml_safe = aml_result['risk_level'] in ['low', 'medium']
|
||||
|
||||
if kyc_approved and aml_safe:
|
||||
click.echo(f"✅ USER APPROVED FOR TRADING")
|
||||
click.echo(f" ✅ KYC: Verified")
|
||||
click.echo(f" ✅ AML: Safe")
|
||||
elif not kyc_approved:
|
||||
click.echo(f"❌ USER REJECTED")
|
||||
click.echo(f" ❌ KYC: {kyc_status['status']}")
|
||||
click.echo(f" AML: {aml_result['risk_level']}")
|
||||
else:
|
||||
click.echo(f"⚠️ USER REQUIRES MANUAL REVIEW")
|
||||
click.echo(f" KYC: {kyc_status['status']}")
|
||||
click.echo(f" ⚠️ AML: {aml_result['risk_level']} risk")
|
||||
|
||||
click.echo(f"\n{'='*60}")
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Full compliance check failed: {e}", err=True)
|
||||
|
||||
@compliance.command()
|
||||
@click.pass_context
|
||||
def list_providers(ctx):
|
||||
"""List all supported compliance providers"""
|
||||
try:
|
||||
click.echo("🏢 Supported KYC Providers:")
|
||||
kyc_providers = [
|
||||
("chainalysis", "Blockchain-focused KYC/AML"),
|
||||
("sumsub", "Multi-channel verification"),
|
||||
("onfido", "Document verification"),
|
||||
("jumio", "Identity verification"),
|
||||
("veriff", "Video-based verification")
|
||||
]
|
||||
|
||||
for provider, description in kyc_providers:
|
||||
click.echo(f" • {provider.title()}: {description}")
|
||||
|
||||
click.echo(f"\n🔍 AML Screening:")
|
||||
click.echo(f" • Chainalysis AML: Blockchain transaction analysis")
|
||||
click.echo(f" • Sanctions List Screening: OFAC, UN, EU lists")
|
||||
click.echo(f" • PEP Screening: Politically Exposed Persons")
|
||||
click.echo(f" • Adverse Media: News and public records")
|
||||
|
||||
click.echo(f"\n📝 Usage Examples:")
|
||||
click.echo(f" aitbc compliance kyc-submit --user-id user123 --provider chainalysis --first-name John --last-name Doe --email john@example.com")
|
||||
click.echo(f" aitbc compliance aml-screen --user-id user123 --first-name John --last-name Doe --email john@example.com")
|
||||
click.echo(f" aitbc compliance full-check --user-id user123 --first-name John --last-name Doe --email john@example.com")
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Error listing providers: {e}", err=True)
|
||||
|
||||
if __name__ == "__main__":
|
||||
compliance()
|
||||
513
cli/commands/config.py
Executable file
513
cli/commands/config.py
Executable file
@@ -0,0 +1,513 @@
|
||||
"""Configuration commands for AITBC CLI"""
|
||||
|
||||
import click
|
||||
import os
|
||||
import shlex
|
||||
import subprocess
|
||||
import yaml
|
||||
import json
|
||||
from pathlib import Path
|
||||
from typing import Optional, Dict, Any
|
||||
from config import get_config, Config
|
||||
from utils import output, error, success
|
||||
|
||||
|
||||
@click.group()
|
||||
def config():
|
||||
"""Manage CLI configuration"""
|
||||
pass
|
||||
|
||||
|
||||
@config.command()
|
||||
@click.pass_context
|
||||
def show(ctx):
|
||||
"""Show current configuration"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
config_dict = {
|
||||
"coordinator_url": config.coordinator_url,
|
||||
"api_key": "***REDACTED***" if config.api_key else None,
|
||||
"timeout": getattr(config, 'timeout', 30),
|
||||
"config_file": getattr(config, 'config_file', None)
|
||||
}
|
||||
|
||||
output(config_dict, ctx.obj['output_format'])
|
||||
|
||||
|
||||
@config.command()
|
||||
@click.argument("key")
|
||||
@click.argument("value")
|
||||
@click.option("--global", "global_config", is_flag=True, help="Set global config")
|
||||
@click.pass_context
|
||||
def set(ctx, key: str, value: str, global_config: bool):
|
||||
"""Set configuration value"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
# Determine config file path
|
||||
if global_config:
|
||||
config_dir = Path.home() / ".config" / "aitbc"
|
||||
config_dir.mkdir(parents=True, exist_ok=True)
|
||||
config_file = config_dir / "config.yaml"
|
||||
else:
|
||||
config_file = Path.cwd() / ".aitbc.yaml"
|
||||
|
||||
# Load existing config
|
||||
if config_file.exists():
|
||||
with open(config_file) as f:
|
||||
config_data = yaml.safe_load(f) or {}
|
||||
else:
|
||||
config_data = {}
|
||||
|
||||
# Set the value
|
||||
if key == "api_key":
|
||||
config_data["api_key"] = value
|
||||
if ctx.obj['output_format'] == 'table':
|
||||
success("API key set (use --global to set permanently)")
|
||||
elif key == "coordinator_url":
|
||||
config_data["coordinator_url"] = value
|
||||
if ctx.obj['output_format'] == 'table':
|
||||
success(f"Coordinator URL set to: {value}")
|
||||
elif key == "timeout":
|
||||
try:
|
||||
config_data["timeout"] = int(value)
|
||||
if ctx.obj['output_format'] == 'table':
|
||||
success(f"Timeout set to: {value}s")
|
||||
except ValueError:
|
||||
error("Timeout must be an integer")
|
||||
ctx.exit(1)
|
||||
else:
|
||||
error(f"Unknown configuration key: {key}")
|
||||
ctx.exit(1)
|
||||
|
||||
# Save config
|
||||
with open(config_file, 'w') as f:
|
||||
yaml.dump(config_data, f, default_flow_style=False)
|
||||
|
||||
output({
|
||||
"config_file": str(config_file),
|
||||
"key": key,
|
||||
"value": value
|
||||
}, ctx.obj['output_format'])
|
||||
|
||||
|
||||
@config.command()
|
||||
@click.argument("key")
|
||||
@click.option("--global", "global_config", is_flag=True, help="Get from global config")
|
||||
@click.pass_context
|
||||
def get(ctx, key: str, global_config: bool):
|
||||
"""Get configuration value"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
# Determine config file path
|
||||
if global_config:
|
||||
config_dir = Path.home() / ".config" / "aitbc"
|
||||
config_file = config_dir / "config.yaml"
|
||||
else:
|
||||
config_file = getattr(config, 'config_file', None)
|
||||
|
||||
if not config_file or not Path(config_file).exists():
|
||||
# Try to get from current config object
|
||||
value = getattr(config, key, None)
|
||||
if value is not None:
|
||||
output({key: value}, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Configuration key '{key}' not found")
|
||||
ctx.exit(1)
|
||||
return
|
||||
|
||||
# Load config from file
|
||||
try:
|
||||
with open(config_file, 'r') as f:
|
||||
config_data = yaml.safe_load(f) or {}
|
||||
|
||||
if key in config_data:
|
||||
output({key: config_data[key]}, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Configuration key '{key}' not found")
|
||||
ctx.exit(1)
|
||||
except Exception as e:
|
||||
error(f"Failed to read config: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@config.command()
|
||||
@click.option("--global", "global_config", is_flag=True, help="Show global config")
|
||||
def path(global_config: bool):
|
||||
"""Show configuration file path"""
|
||||
if global_config:
|
||||
config_dir = Path.home() / ".config" / "aitbc"
|
||||
config_file = config_dir / "config.yaml"
|
||||
else:
|
||||
config_file = Path.cwd() / ".aitbc.yaml"
|
||||
|
||||
output({
|
||||
"config_file": str(config_file),
|
||||
"exists": config_file.exists()
|
||||
})
|
||||
|
||||
|
||||
@config.command()
|
||||
@click.option("--global", "global_config", is_flag=True, help="Edit global config")
|
||||
@click.pass_context
|
||||
def edit(ctx, global_config: bool):
|
||||
"""Open configuration file in editor"""
|
||||
# Determine config file path
|
||||
if global_config:
|
||||
config_dir = Path.home() / ".config" / "aitbc"
|
||||
config_dir.mkdir(parents=True, exist_ok=True)
|
||||
config_file = config_dir / "config.yaml"
|
||||
else:
|
||||
config_file = Path.cwd() / ".aitbc.yaml"
|
||||
|
||||
# Create if doesn't exist
|
||||
if not config_file.exists():
|
||||
config = ctx.obj['config']
|
||||
config_data = {
|
||||
"coordinator_url": config.coordinator_url,
|
||||
"timeout": getattr(config, 'timeout', 30)
|
||||
}
|
||||
with open(config_file, 'w') as f:
|
||||
yaml.dump(config_data, f, default_flow_style=False)
|
||||
|
||||
# Open in editor
|
||||
editor = os.getenv('EDITOR', 'nano').strip() or 'nano'
|
||||
editor_cmd = shlex.split(editor)
|
||||
subprocess.run([*editor_cmd, str(config_file)], check=False)
|
||||
|
||||
|
||||
@config.command()
|
||||
@click.option("--global", "global_config", is_flag=True, help="Reset global config")
|
||||
@click.pass_context
|
||||
def reset(ctx, global_config: bool):
|
||||
"""Reset configuration to defaults"""
|
||||
# Determine config file path
|
||||
if global_config:
|
||||
config_dir = Path.home() / ".config" / "aitbc"
|
||||
config_file = config_dir / "config.yaml"
|
||||
else:
|
||||
config_file = Path.cwd() / ".aitbc.yaml"
|
||||
|
||||
if not config_file.exists():
|
||||
output({"message": "No configuration file found"})
|
||||
return
|
||||
|
||||
if not click.confirm(f"Reset configuration at {config_file}?"):
|
||||
return
|
||||
|
||||
# Remove config file
|
||||
config_file.unlink()
|
||||
success("Configuration reset to defaults")
|
||||
|
||||
|
||||
@config.command()
|
||||
@click.option("--format", "output_format", type=click.Choice(['yaml', 'json']), default='yaml', help="Output format")
|
||||
@click.option("--global", "global_config", is_flag=True, help="Export global config")
|
||||
@click.pass_context
|
||||
def export(ctx, output_format: str, global_config: bool):
|
||||
"""Export configuration"""
|
||||
# Determine config file path
|
||||
if global_config:
|
||||
config_dir = Path.home() / ".config" / "aitbc"
|
||||
config_file = config_dir / "config.yaml"
|
||||
else:
|
||||
config_file = Path.cwd() / ".aitbc.yaml"
|
||||
|
||||
if not config_file.exists():
|
||||
error("No configuration file found")
|
||||
ctx.exit(1)
|
||||
|
||||
with open(config_file) as f:
|
||||
config_data = yaml.safe_load(f) or {}
|
||||
|
||||
# Redact sensitive data
|
||||
if 'api_key' in config_data:
|
||||
config_data['api_key'] = "***REDACTED***"
|
||||
|
||||
if output_format == 'json':
|
||||
click.echo(json.dumps(config_data, indent=2))
|
||||
else:
|
||||
click.echo(yaml.dump(config_data, default_flow_style=False))
|
||||
|
||||
|
||||
@config.command()
|
||||
@click.argument("file_path")
|
||||
@click.option("--merge", is_flag=True, help="Merge with existing config")
|
||||
@click.option("--global", "global_config", is_flag=True, help="Import to global config")
|
||||
@click.pass_context
|
||||
def import_config(ctx, file_path: str, merge: bool, global_config: bool):
|
||||
"""Import configuration from file"""
|
||||
import_file = Path(file_path)
|
||||
|
||||
if not import_file.exists():
|
||||
error(f"File not found: {file_path}")
|
||||
ctx.exit(1)
|
||||
|
||||
# Load import file
|
||||
try:
|
||||
with open(import_file) as f:
|
||||
if import_file.suffix.lower() == '.json':
|
||||
import_data = json.load(f)
|
||||
else:
|
||||
import_data = yaml.safe_load(f)
|
||||
except json.JSONDecodeError:
|
||||
error("Invalid JSON data")
|
||||
ctx.exit(1)
|
||||
except Exception as e:
|
||||
error(f"Failed to parse file: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
# Determine target config file
|
||||
if global_config:
|
||||
config_dir = Path.home() / ".config" / "aitbc"
|
||||
config_dir.mkdir(parents=True, exist_ok=True)
|
||||
config_file = config_dir / "config.yaml"
|
||||
else:
|
||||
config_file = Path.cwd() / ".aitbc.yaml"
|
||||
|
||||
# Load existing config if merging
|
||||
if merge and config_file.exists():
|
||||
with open(config_file) as f:
|
||||
config_data = yaml.safe_load(f) or {}
|
||||
config_data.update(import_data)
|
||||
else:
|
||||
config_data = import_data
|
||||
|
||||
# Save config
|
||||
with open(config_file, 'w') as f:
|
||||
yaml.dump(config_data, f, default_flow_style=False)
|
||||
|
||||
if ctx.obj['output_format'] == 'table':
|
||||
success(f"Configuration imported to {config_file}")
|
||||
|
||||
|
||||
@config.command()
|
||||
@click.pass_context
|
||||
def validate(ctx):
|
||||
"""Validate configuration"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
errors = []
|
||||
warnings = []
|
||||
|
||||
# Validate coordinator URL
|
||||
if not config.coordinator_url:
|
||||
errors.append("Coordinator URL is not set")
|
||||
elif not config.coordinator_url.startswith(('http://', 'https://')):
|
||||
errors.append("Coordinator URL must start with http:// or https://")
|
||||
|
||||
# Validate API key
|
||||
if not config.api_key:
|
||||
warnings.append("API key is not set")
|
||||
elif len(config.api_key) < 10:
|
||||
errors.append("API key appears to be too short")
|
||||
|
||||
# Validate timeout
|
||||
timeout = getattr(config, 'timeout', 30)
|
||||
if not isinstance(timeout, (int, float)) or timeout <= 0:
|
||||
errors.append("Timeout must be a positive number")
|
||||
|
||||
# Output results
|
||||
result = {
|
||||
"valid": len(errors) == 0,
|
||||
"errors": errors,
|
||||
"warnings": warnings
|
||||
}
|
||||
|
||||
if errors:
|
||||
error("Configuration validation failed")
|
||||
ctx.exit(1)
|
||||
elif warnings:
|
||||
if ctx.obj['output_format'] == 'table':
|
||||
success("Configuration valid with warnings")
|
||||
else:
|
||||
if ctx.obj['output_format'] == 'table':
|
||||
success("Configuration is valid")
|
||||
|
||||
output(result, ctx.obj['output_format'])
|
||||
|
||||
|
||||
@config.command()
|
||||
def environments():
|
||||
"""List available environments"""
|
||||
env_vars = [
|
||||
'AITBC_COORDINATOR_URL',
|
||||
'AITBC_API_KEY',
|
||||
'AITBC_TIMEOUT',
|
||||
'AITBC_CONFIG_FILE',
|
||||
'CLIENT_API_KEY',
|
||||
'MINER_API_KEY',
|
||||
'ADMIN_API_KEY'
|
||||
]
|
||||
|
||||
env_data = {}
|
||||
for var in env_vars:
|
||||
value = os.getenv(var)
|
||||
if value:
|
||||
if 'API_KEY' in var:
|
||||
value = "***REDACTED***"
|
||||
env_data[var] = value
|
||||
|
||||
output({
|
||||
"environment_variables": env_data,
|
||||
"note": "Use export VAR=value to set environment variables"
|
||||
})
|
||||
|
||||
|
||||
@config.group()
|
||||
def profiles():
|
||||
"""Manage configuration profiles"""
|
||||
pass
|
||||
|
||||
|
||||
@profiles.command()
|
||||
@click.argument("name")
|
||||
@click.pass_context
|
||||
def save(ctx, name: str):
|
||||
"""Save current configuration as a profile"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
# Create profiles directory
|
||||
profiles_dir = Path.home() / ".config" / "aitbc" / "profiles"
|
||||
profiles_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
profile_file = profiles_dir / f"{name}.yaml"
|
||||
|
||||
# Save profile (without API key)
|
||||
profile_data = {
|
||||
"coordinator_url": config.coordinator_url,
|
||||
"timeout": getattr(config, 'timeout', 30)
|
||||
}
|
||||
|
||||
with open(profile_file, 'w') as f:
|
||||
yaml.dump(profile_data, f, default_flow_style=False)
|
||||
|
||||
if ctx.obj['output_format'] == 'table':
|
||||
success(f"Profile '{name}' saved")
|
||||
|
||||
|
||||
@profiles.command()
|
||||
def list():
|
||||
"""List available profiles"""
|
||||
profiles_dir = Path.home() / ".config" / "aitbc" / "profiles"
|
||||
|
||||
if not profiles_dir.exists():
|
||||
output({"profiles": []})
|
||||
return
|
||||
|
||||
profiles = []
|
||||
for profile_file in profiles_dir.glob("*.yaml"):
|
||||
with open(profile_file) as f:
|
||||
profile_data = yaml.safe_load(f)
|
||||
|
||||
profiles.append({
|
||||
"name": profile_file.stem,
|
||||
"coordinator_url": profile_data.get("coordinator_url"),
|
||||
"timeout": profile_data.get("timeout", 30)
|
||||
})
|
||||
|
||||
output({"profiles": profiles})
|
||||
|
||||
|
||||
@profiles.command()
|
||||
@click.argument("name")
|
||||
@click.pass_context
|
||||
def load(ctx, name: str):
|
||||
"""Load a configuration profile"""
|
||||
profiles_dir = Path.home() / ".config" / "aitbc" / "profiles"
|
||||
profile_file = profiles_dir / f"{name}.yaml"
|
||||
|
||||
if not profile_file.exists():
|
||||
error(f"Profile '{name}' not found")
|
||||
ctx.exit(1)
|
||||
|
||||
with open(profile_file) as f:
|
||||
profile_data = yaml.safe_load(f)
|
||||
|
||||
# Load to current config
|
||||
config_file = Path.cwd() / ".aitbc.yaml"
|
||||
|
||||
with open(config_file, 'w') as f:
|
||||
yaml.dump(profile_data, f, default_flow_style=False)
|
||||
|
||||
if ctx.obj['output_format'] == 'table':
|
||||
success(f"Profile '{name}' loaded")
|
||||
|
||||
|
||||
@profiles.command()
|
||||
@click.argument("name")
|
||||
@click.pass_context
|
||||
def delete(ctx, name: str):
|
||||
"""Delete a configuration profile"""
|
||||
profiles_dir = Path.home() / ".config" / "aitbc" / "profiles"
|
||||
profile_file = profiles_dir / f"{name}.yaml"
|
||||
|
||||
if not profile_file.exists():
|
||||
error(f"Profile '{name}' not found")
|
||||
ctx.exit(1)
|
||||
|
||||
if not click.confirm(f"Delete profile '{name}'?"):
|
||||
return
|
||||
|
||||
profile_file.unlink()
|
||||
if ctx.obj['output_format'] == 'table':
|
||||
success(f"Profile '{name}' deleted")
|
||||
|
||||
|
||||
@config.command(name="set-secret")
|
||||
@click.argument("key")
|
||||
@click.argument("value")
|
||||
@click.pass_context
|
||||
def set_secret(ctx, key: str, value: str):
|
||||
"""Set an encrypted configuration value"""
|
||||
from utils import encrypt_value
|
||||
|
||||
config_dir = Path.home() / ".config" / "aitbc"
|
||||
config_dir.mkdir(parents=True, exist_ok=True)
|
||||
secrets_file = config_dir / "secrets.json"
|
||||
|
||||
secrets = {}
|
||||
if secrets_file.exists():
|
||||
with open(secrets_file) as f:
|
||||
secrets = json.load(f)
|
||||
|
||||
secrets[key] = encrypt_value(value)
|
||||
|
||||
with open(secrets_file, "w") as f:
|
||||
json.dump(secrets, f, indent=2)
|
||||
|
||||
# Restrict file permissions
|
||||
secrets_file.chmod(0o600)
|
||||
|
||||
if ctx.obj['output_format'] == 'table':
|
||||
success(f"Secret '{key}' saved (encrypted)")
|
||||
output({"key": key, "status": "encrypted"}, ctx.obj['output_format'])
|
||||
|
||||
|
||||
@config.command(name="get-secret")
|
||||
@click.argument("key")
|
||||
@click.pass_context
|
||||
def get_secret(ctx, key: str):
|
||||
"""Get a decrypted configuration value"""
|
||||
from utils import decrypt_value
|
||||
|
||||
secrets_file = Path.home() / ".config" / "aitbc" / "secrets.json"
|
||||
|
||||
if not secrets_file.exists():
|
||||
error("No secrets file found")
|
||||
ctx.exit(1)
|
||||
return
|
||||
|
||||
with open(secrets_file) as f:
|
||||
secrets = json.load(f)
|
||||
|
||||
if key not in secrets:
|
||||
error(f"Secret '{key}' not found")
|
||||
ctx.exit(1)
|
||||
return
|
||||
|
||||
decrypted = decrypt_value(secrets[key])
|
||||
output({"key": key, "value": decrypted}, ctx.obj['output_format'])
|
||||
|
||||
|
||||
# Add profiles group to config
|
||||
config.add_command(profiles)
|
||||
476
cli/commands/cross_chain.py
Executable file
476
cli/commands/cross_chain.py
Executable file
@@ -0,0 +1,476 @@
|
||||
"""Cross-chain trading commands for AITBC CLI"""
|
||||
|
||||
import click
|
||||
import httpx
|
||||
import json
|
||||
from typing import Optional
|
||||
from tabulate import tabulate
|
||||
from config import get_config
|
||||
from utils import success, error, output
|
||||
|
||||
|
||||
@click.group()
|
||||
def cross_chain():
|
||||
"""Cross-chain trading operations"""
|
||||
pass
|
||||
|
||||
|
||||
@cross_chain.command()
|
||||
@click.option("--from-chain", help="Source chain ID")
|
||||
@click.option("--to-chain", help="Target chain ID")
|
||||
@click.option("--from-token", help="Source token symbol")
|
||||
@click.option("--to-token", help="Target token symbol")
|
||||
@click.pass_context
|
||||
def rates(ctx, from_chain: Optional[str], to_chain: Optional[str],
|
||||
from_token: Optional[str], to_token: Optional[str]):
|
||||
"""Get cross-chain exchange rates"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
# Get rates from cross-chain exchange
|
||||
response = client.get(
|
||||
f"http://localhost:8001/api/v1/cross-chain/rates",
|
||||
timeout=10
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
rates_data = response.json()
|
||||
rates = rates_data.get('rates', {})
|
||||
|
||||
if from_chain and to_chain:
|
||||
# Get specific rate
|
||||
pair_key = f"{from_chain}-{to_chain}"
|
||||
if pair_key in rates:
|
||||
success(f"Exchange rate {from_chain} → {to_chain}: {rates[pair_key]}")
|
||||
else:
|
||||
error(f"No rate available for {from_chain} → {to_chain}")
|
||||
else:
|
||||
# Show all rates
|
||||
success("Cross-chain exchange rates:")
|
||||
rate_table = []
|
||||
for pair, rate in rates.items():
|
||||
chains = pair.split('-')
|
||||
rate_table.append([chains[0], chains[1], f"{rate:.6f}"])
|
||||
|
||||
if rate_table:
|
||||
headers = ["From Chain", "To Chain", "Rate"]
|
||||
print(tabulate(rate_table, headers=headers, tablefmt="grid"))
|
||||
else:
|
||||
output("No cross-chain rates available")
|
||||
else:
|
||||
error(f"Failed to get cross-chain rates: {response.status_code}")
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
|
||||
|
||||
@cross_chain.command()
|
||||
@click.option("--from-chain", required=True, help="Source chain ID")
|
||||
@click.option("--to-chain", required=True, help="Target chain ID")
|
||||
@click.option("--from-token", required=True, help="Source token symbol")
|
||||
@click.option("--to-token", required=True, help="Target token symbol")
|
||||
@click.option("--amount", type=float, required=True, help="Amount to swap")
|
||||
@click.option("--min-amount", type=float, help="Minimum amount to receive")
|
||||
@click.option("--slippage", type=float, default=0.01, help="Slippage tolerance (0-0.1)")
|
||||
@click.option("--address", help="User wallet address")
|
||||
@click.pass_context
|
||||
def swap(ctx, from_chain: str, to_chain: str, from_token: str, to_token: str,
|
||||
amount: float, min_amount: Optional[float], slippage: float, address: Optional[str]):
|
||||
"""Create cross-chain swap"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
# Validate inputs
|
||||
if from_chain == to_chain:
|
||||
error("Source and target chains must be different")
|
||||
return
|
||||
|
||||
if amount <= 0:
|
||||
error("Amount must be greater than 0")
|
||||
return
|
||||
|
||||
# Use default address if not provided
|
||||
if not address:
|
||||
address = config.get('default_address', '0x1234567890123456789012345678901234567890')
|
||||
|
||||
# Calculate minimum amount if not provided
|
||||
if not min_amount:
|
||||
# Get rate first
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.get(
|
||||
f"http://localhost:8001/api/v1/cross-chain/rates",
|
||||
timeout=10
|
||||
)
|
||||
if response.status_code == 200:
|
||||
rates_data = response.json()
|
||||
pair_key = f"{from_chain}-{to_chain}"
|
||||
rate = rates_data.get('rates', {}).get(pair_key, 1.0)
|
||||
min_amount = amount * rate * (1 - slippage) * 0.97 # Account for fees
|
||||
else:
|
||||
min_amount = amount * 0.95 # Conservative fallback
|
||||
except:
|
||||
min_amount = amount * 0.95
|
||||
|
||||
swap_data = {
|
||||
"from_chain": from_chain,
|
||||
"to_chain": to_chain,
|
||||
"from_token": from_token,
|
||||
"to_token": to_token,
|
||||
"amount": amount,
|
||||
"min_amount": min_amount,
|
||||
"user_address": address,
|
||||
"slippage_tolerance": slippage
|
||||
}
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.post(
|
||||
f"http://localhost:8001/api/v1/cross-chain/swap",
|
||||
json=swap_data,
|
||||
timeout=30
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
swap_result = response.json()
|
||||
success("Cross-chain swap created successfully!")
|
||||
output({
|
||||
"Swap ID": swap_result.get('swap_id'),
|
||||
"From Chain": swap_result.get('from_chain'),
|
||||
"To Chain": swap_result.get('to_chain'),
|
||||
"Amount": swap_result.get('amount'),
|
||||
"Expected Amount": swap_result.get('expected_amount'),
|
||||
"Rate": swap_result.get('rate'),
|
||||
"Total Fees": swap_result.get('total_fees'),
|
||||
"Status": swap_result.get('status')
|
||||
}, ctx.obj['output_format'])
|
||||
|
||||
# Show swap ID for tracking
|
||||
success(f"Track swap with: aitbc cross-chain status {swap_result.get('swap_id')}")
|
||||
else:
|
||||
error(f"Failed to create swap: {response.status_code}")
|
||||
if response.text:
|
||||
error(f"Details: {response.text}")
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
|
||||
|
||||
@cross_chain.command()
|
||||
@click.argument("swap_id")
|
||||
@click.pass_context
|
||||
def status(ctx, swap_id: str):
|
||||
"""Check cross-chain swap status"""
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.get(
|
||||
f"http://localhost:8001/api/v1/cross-chain/swap/{swap_id}",
|
||||
timeout=10
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
swap_data = response.json()
|
||||
success(f"Swap Status: {swap_data.get('status', 'unknown')}")
|
||||
|
||||
# Display swap details
|
||||
details = {
|
||||
"Swap ID": swap_data.get('swap_id'),
|
||||
"From Chain": swap_data.get('from_chain'),
|
||||
"To Chain": swap_data.get('to_chain'),
|
||||
"From Token": swap_data.get('from_token'),
|
||||
"To Token": swap_data.get('to_token'),
|
||||
"Amount": swap_data.get('amount'),
|
||||
"Expected Amount": swap_data.get('expected_amount'),
|
||||
"Actual Amount": swap_data.get('actual_amount'),
|
||||
"Status": swap_data.get('status'),
|
||||
"Created At": swap_data.get('created_at'),
|
||||
"Completed At": swap_data.get('completed_at'),
|
||||
"Bridge Fee": swap_data.get('bridge_fee'),
|
||||
"From Tx Hash": swap_data.get('from_tx_hash'),
|
||||
"To Tx Hash": swap_data.get('to_tx_hash')
|
||||
}
|
||||
|
||||
output(details, ctx.obj['output_format'])
|
||||
|
||||
# Show additional status info
|
||||
if swap_data.get('status') == 'completed':
|
||||
success("✅ Swap completed successfully!")
|
||||
elif swap_data.get('status') == 'failed':
|
||||
error("❌ Swap failed")
|
||||
if swap_data.get('error_message'):
|
||||
error(f"Error: {swap_data['error_message']}")
|
||||
elif swap_data.get('status') == 'pending':
|
||||
success("⏳ Swap is pending...")
|
||||
elif swap_data.get('status') == 'executing':
|
||||
success("🔄 Swap is executing...")
|
||||
elif swap_data.get('status') == 'refunded':
|
||||
success("💰 Swap was refunded")
|
||||
else:
|
||||
error(f"Failed to get swap status: {response.status_code}")
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
|
||||
|
||||
@cross_chain.command()
|
||||
@click.option("--user-address", help="Filter by user address")
|
||||
@click.option("--status", help="Filter by status")
|
||||
@click.option("--limit", type=int, default=10, help="Number of swaps to show")
|
||||
@click.pass_context
|
||||
def swaps(ctx, user_address: Optional[str], status: Optional[str], limit: int):
|
||||
"""List cross-chain swaps"""
|
||||
params = {}
|
||||
if user_address:
|
||||
params['user_address'] = user_address
|
||||
if status:
|
||||
params['status'] = status
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.get(
|
||||
f"http://localhost:8001/api/v1/cross-chain/swaps",
|
||||
params=params,
|
||||
timeout=10
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
swaps_data = response.json()
|
||||
swaps = swaps_data.get('swaps', [])
|
||||
|
||||
if swaps:
|
||||
success(f"Found {len(swaps)} cross-chain swaps:")
|
||||
|
||||
# Create table
|
||||
swap_table = []
|
||||
for swap in swaps[:limit]:
|
||||
swap_table.append([
|
||||
swap.get('swap_id', '')[:8] + '...',
|
||||
swap.get('from_chain', ''),
|
||||
swap.get('to_chain', ''),
|
||||
swap.get('amount', 0),
|
||||
swap.get('status', ''),
|
||||
swap.get('created_at', '')[:19]
|
||||
])
|
||||
|
||||
table(["ID", "From", "To", "Amount", "Status", "Created"], swap_table)
|
||||
|
||||
if len(swaps) > limit:
|
||||
success(f"Showing {limit} of {len(swaps)} total swaps")
|
||||
else:
|
||||
success("No cross-chain swaps found")
|
||||
else:
|
||||
error(f"Failed to get swaps: {response.status_code}")
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
|
||||
|
||||
@cross_chain.command()
|
||||
@click.option("--source-chain", required=True, help="Source chain ID")
|
||||
@click.option("--target-chain", required=True, help="Target chain ID")
|
||||
@click.option("--token", required=True, help="Token to bridge")
|
||||
@click.option("--amount", type=float, required=True, help="Amount to bridge")
|
||||
@click.option("--recipient", help="Recipient address")
|
||||
@click.pass_context
|
||||
def bridge(ctx, source_chain: str, target_chain: str, token: str,
|
||||
amount: float, recipient: Optional[str]):
|
||||
"""Create cross-chain bridge transaction"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
# Validate inputs
|
||||
if source_chain == target_chain:
|
||||
error("Source and target chains must be different")
|
||||
return
|
||||
|
||||
if amount <= 0:
|
||||
error("Amount must be greater than 0")
|
||||
return
|
||||
|
||||
# Use default recipient if not provided
|
||||
if not recipient:
|
||||
recipient = config.get('default_address', '0x1234567890123456789012345678901234567890')
|
||||
|
||||
bridge_data = {
|
||||
"source_chain": source_chain,
|
||||
"target_chain": target_chain,
|
||||
"token": token,
|
||||
"amount": amount,
|
||||
"recipient_address": recipient
|
||||
}
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.post(
|
||||
f"http://localhost:8001/api/v1/cross-chain/bridge",
|
||||
json=bridge_data,
|
||||
timeout=30
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
bridge_result = response.json()
|
||||
success("Cross-chain bridge created successfully!")
|
||||
output({
|
||||
"Bridge ID": bridge_result.get('bridge_id'),
|
||||
"Source Chain": bridge_result.get('source_chain'),
|
||||
"Target Chain": bridge_result.get('target_chain'),
|
||||
"Token": bridge_result.get('token'),
|
||||
"Amount": bridge_result.get('amount'),
|
||||
"Bridge Fee": bridge_result.get('bridge_fee'),
|
||||
"Status": bridge_result.get('status')
|
||||
}, ctx.obj['output_format'])
|
||||
|
||||
# Show bridge ID for tracking
|
||||
success(f"Track bridge with: aitbc cross-chain bridge-status {bridge_result.get('bridge_id')}")
|
||||
else:
|
||||
error(f"Failed to create bridge: {response.status_code}")
|
||||
if response.text:
|
||||
error(f"Details: {response.text}")
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
|
||||
|
||||
@cross_chain.command()
|
||||
@click.argument("bridge_id")
|
||||
@click.pass_context
|
||||
def bridge_status(ctx, bridge_id: str):
|
||||
"""Check cross-chain bridge status"""
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.get(
|
||||
f"http://localhost:8001/api/v1/cross-chain/bridge/{bridge_id}",
|
||||
timeout=10
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
bridge_data = response.json()
|
||||
success(f"Bridge Status: {bridge_data.get('status', 'unknown')}")
|
||||
|
||||
# Display bridge details
|
||||
details = {
|
||||
"Bridge ID": bridge_data.get('bridge_id'),
|
||||
"Source Chain": bridge_data.get('source_chain'),
|
||||
"Target Chain": bridge_data.get('target_chain'),
|
||||
"Token": bridge_data.get('token'),
|
||||
"Amount": bridge_data.get('amount'),
|
||||
"Recipient Address": bridge_data.get('recipient_address'),
|
||||
"Status": bridge_data.get('status'),
|
||||
"Created At": bridge_data.get('created_at'),
|
||||
"Completed At": bridge_data.get('completed_at'),
|
||||
"Bridge Fee": bridge_data.get('bridge_fee'),
|
||||
"Source Tx Hash": bridge_data.get('source_tx_hash'),
|
||||
"Target Tx Hash": bridge_data.get('target_tx_hash')
|
||||
}
|
||||
|
||||
output(details, ctx.obj['output_format'])
|
||||
|
||||
# Show additional status info
|
||||
if bridge_data.get('status') == 'completed':
|
||||
success("✅ Bridge completed successfully!")
|
||||
elif bridge_data.get('status') == 'failed':
|
||||
error("❌ Bridge failed")
|
||||
if bridge_data.get('error_message'):
|
||||
error(f"Error: {bridge_data['error_message']}")
|
||||
elif bridge_data.get('status') == 'pending':
|
||||
success("⏳ Bridge is pending...")
|
||||
elif bridge_data.get('status') == 'locked':
|
||||
success("🔒 Bridge is locked...")
|
||||
elif bridge_data.get('status') == 'transferred':
|
||||
success("🔄 Bridge is transferring...")
|
||||
else:
|
||||
error(f"Failed to get bridge status: {response.status_code}")
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
|
||||
|
||||
@cross_chain.command()
|
||||
@click.pass_context
|
||||
def pools(ctx):
|
||||
"""Show cross-chain liquidity pools"""
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.get(
|
||||
f"http://localhost:8001/api/v1/cross-chain/pools",
|
||||
timeout=10
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
pools_data = response.json()
|
||||
pools = pools_data.get('pools', [])
|
||||
|
||||
if pools:
|
||||
success(f"Found {len(pools)} cross-chain liquidity pools:")
|
||||
|
||||
# Create table
|
||||
pool_table = []
|
||||
for pool in pools:
|
||||
pool_table.append([
|
||||
pool.get('pool_id', ''),
|
||||
pool.get('token_a', ''),
|
||||
pool.get('token_b', ''),
|
||||
pool.get('chain_a', ''),
|
||||
pool.get('chain_b', ''),
|
||||
f"{pool.get('reserve_a', 0):.2f}",
|
||||
f"{pool.get('reserve_b', 0):.2f}",
|
||||
f"{pool.get('total_liquidity', 0):.2f}",
|
||||
f"{pool.get('apr', 0):.2%}"
|
||||
])
|
||||
|
||||
table(["Pool ID", "Token A", "Token B", "Chain A", "Chain B",
|
||||
"Reserve A", "Reserve B", "Liquidity", "APR"], pool_table)
|
||||
else:
|
||||
success("No cross-chain liquidity pools found")
|
||||
else:
|
||||
error(f"Failed to get pools: {response.status_code}")
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
|
||||
|
||||
@cross_chain.command()
|
||||
@click.pass_context
|
||||
def stats(ctx):
|
||||
"""Show cross-chain trading statistics"""
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.get(
|
||||
f"http://localhost:8001/api/v1/cross-chain/stats",
|
||||
timeout=10
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
stats_data = response.json()
|
||||
|
||||
success("Cross-Chain Trading Statistics:")
|
||||
|
||||
# Show swap stats
|
||||
swap_stats = stats_data.get('swap_stats', [])
|
||||
if swap_stats:
|
||||
success("Swap Statistics:")
|
||||
swap_table = []
|
||||
for stat in swap_stats:
|
||||
swap_table.append([
|
||||
stat.get('status', ''),
|
||||
stat.get('count', 0),
|
||||
f"{stat.get('volume', 0):.2f}"
|
||||
])
|
||||
table(["Status", "Count", "Volume"], swap_table)
|
||||
|
||||
# Show bridge stats
|
||||
bridge_stats = stats_data.get('bridge_stats', [])
|
||||
if bridge_stats:
|
||||
success("Bridge Statistics:")
|
||||
bridge_table = []
|
||||
for stat in bridge_stats:
|
||||
bridge_table.append([
|
||||
stat.get('status', ''),
|
||||
stat.get('count', 0),
|
||||
f"{stat.get('volume', 0):.2f}"
|
||||
])
|
||||
table(["Status", "Count", "Volume"], bridge_table)
|
||||
|
||||
# Show overall stats
|
||||
success("Overall Statistics:")
|
||||
output({
|
||||
"Total Volume": f"{stats_data.get('total_volume', 0):.2f}",
|
||||
"Supported Chains": ", ".join(stats_data.get('supported_chains', [])),
|
||||
"Last Updated": stats_data.get('timestamp', '')
|
||||
}, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to get stats: {response.status_code}")
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
316
cli/commands/dao.py
Normal file
316
cli/commands/dao.py
Normal file
@@ -0,0 +1,316 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
OpenClaw DAO CLI Commands
|
||||
Provides command-line interface for DAO governance operations
|
||||
"""
|
||||
|
||||
import click
|
||||
import json
|
||||
from datetime import datetime, timedelta
|
||||
from typing import List, Dict, Any
|
||||
from web3 import Web3
|
||||
from utils.blockchain import get_web3_connection, get_contract
|
||||
from utils.config import load_config
|
||||
|
||||
@click.group()
|
||||
def dao():
|
||||
"""OpenClaw DAO governance commands"""
|
||||
pass
|
||||
|
||||
@dao.command()
|
||||
@click.option('--token-address', required=True, help='Governance token contract address')
|
||||
@click.option('--timelock-address', required=True, help='Timelock controller address')
|
||||
@click.option('--network', default='mainnet', help='Blockchain network')
|
||||
def deploy(token_address: str, timelock_address: str, network: str):
|
||||
"""Deploy OpenClaw DAO contract"""
|
||||
try:
|
||||
w3 = get_web3_connection(network)
|
||||
config = load_config()
|
||||
|
||||
# Account for deployment
|
||||
account = w3.eth.account.from_key(config['private_key'])
|
||||
|
||||
# Contract ABI (simplified)
|
||||
abi = [
|
||||
{
|
||||
"inputs": [
|
||||
{"internalType": "address", "name": "_governanceToken", "type": "address"},
|
||||
{"internalType": "contract TimelockController", "name": "_timelock", "type": "address"}
|
||||
],
|
||||
"stateMutability": "nonpayable",
|
||||
"type": "constructor"
|
||||
}
|
||||
]
|
||||
|
||||
# Deploy contract
|
||||
contract = w3.eth.contract(abi=abi, bytecode="0x...") # Actual bytecode needed
|
||||
|
||||
# Build transaction
|
||||
tx = contract.constructor(token_address, timelock_address).build_transaction({
|
||||
'from': account.address,
|
||||
'gas': 2000000,
|
||||
'gasPrice': w3.eth.gas_price,
|
||||
'nonce': w3.eth.get_transaction_count(account.address)
|
||||
})
|
||||
|
||||
# Sign and send
|
||||
signed_tx = w3.eth.account.sign_transaction(tx, config['private_key'])
|
||||
tx_hash = w3.eth.send_raw_transaction(signed_tx.rawTransaction)
|
||||
|
||||
# Wait for confirmation
|
||||
receipt = w3.eth.wait_for_transaction_receipt(tx_hash)
|
||||
|
||||
click.echo(f"✅ OpenClaw DAO deployed at: {receipt.contractAddress}")
|
||||
click.echo(f"📦 Transaction hash: {tx_hash.hex()}")
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Deployment failed: {str(e)}", err=True)
|
||||
|
||||
@dao.command()
|
||||
@click.option('--dao-address', required=True, help='DAO contract address')
|
||||
@click.option('--targets', required=True, help='Comma-separated target addresses')
|
||||
@click.option('--values', required=True, help='Comma-separated ETH values')
|
||||
@click.option('--calldatas', required=True, help='Comma-separated hex calldatas')
|
||||
@click.option('--description', required=True, help='Proposal description')
|
||||
@click.option('--type', 'proposal_type', type=click.Choice(['0', '1', '2', '3']),
|
||||
default='0', help='Proposal type (0=parameter, 1=upgrade, 2=treasury, 3=emergency)')
|
||||
def propose(dao_address: str, targets: str, values: str, calldatas: str,
|
||||
description: str, proposal_type: str):
|
||||
"""Create a new governance proposal"""
|
||||
try:
|
||||
w3 = get_web3_connection()
|
||||
config = load_config()
|
||||
|
||||
# Parse inputs
|
||||
target_addresses = targets.split(',')
|
||||
value_list = [int(v) for v in values.split(',')]
|
||||
calldata_list = calldatas.split(',')
|
||||
|
||||
# Get contract
|
||||
dao_contract = get_contract(dao_address, "OpenClawDAO")
|
||||
|
||||
# Build transaction
|
||||
tx = dao_contract.functions.propose(
|
||||
target_addresses,
|
||||
value_list,
|
||||
calldata_list,
|
||||
description,
|
||||
int(proposal_type)
|
||||
).build_transaction({
|
||||
'from': config['address'],
|
||||
'gas': 500000,
|
||||
'gasPrice': w3.eth.gas_price,
|
||||
'nonce': w3.eth.get_transaction_count(config['address'])
|
||||
})
|
||||
|
||||
# Sign and send
|
||||
signed_tx = w3.eth.account.sign_transaction(tx, config['private_key'])
|
||||
tx_hash = w3.eth.send_raw_transaction(signed_tx.rawTransaction)
|
||||
|
||||
# Get proposal ID
|
||||
receipt = w3.eth.wait_for_transaction_receipt(tx_hash)
|
||||
|
||||
# Parse proposal ID from events
|
||||
proposal_id = None
|
||||
for log in receipt.logs:
|
||||
try:
|
||||
event = dao_contract.events.ProposalCreated().process_log(log)
|
||||
proposal_id = event.args.proposalId
|
||||
break
|
||||
except:
|
||||
continue
|
||||
|
||||
click.echo(f"✅ Proposal created!")
|
||||
click.echo(f"📋 Proposal ID: {proposal_id}")
|
||||
click.echo(f"📦 Transaction hash: {tx_hash.hex()}")
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Proposal creation failed: {str(e)}", err=True)
|
||||
|
||||
@dao.command()
|
||||
@click.option('--dao-address', required=True, help='DAO contract address')
|
||||
@click.option('--proposal-id', required=True, type=int, help='Proposal ID')
|
||||
def vote(dao_address: str, proposal_id: int):
|
||||
"""Cast a vote on a proposal"""
|
||||
try:
|
||||
w3 = get_web3_connection()
|
||||
config = load_config()
|
||||
|
||||
# Get contract
|
||||
dao_contract = get_contract(dao_address, "OpenClawDAO")
|
||||
|
||||
# Check proposal state
|
||||
state = dao_contract.functions.state(proposal_id).call()
|
||||
if state != 1: # Active
|
||||
click.echo("❌ Proposal is not active for voting")
|
||||
return
|
||||
|
||||
# Get voting power
|
||||
token_address = dao_contract.functions.governanceToken().call()
|
||||
token_contract = get_contract(token_address, "ERC20")
|
||||
voting_power = token_contract.functions.balanceOf(config['address']).call()
|
||||
|
||||
if voting_power == 0:
|
||||
click.echo("❌ No voting power (no governance tokens)")
|
||||
return
|
||||
|
||||
click.echo(f"🗳️ Your voting power: {voting_power}")
|
||||
|
||||
# Get vote choice
|
||||
support = click.prompt(
|
||||
"Vote (0=Against, 1=For, 2=Abstain)",
|
||||
type=click.Choice(['0', '1', '2'])
|
||||
)
|
||||
|
||||
reason = click.prompt("Reason (optional)", default="", show_default=False)
|
||||
|
||||
# Build transaction
|
||||
tx = dao_contract.functions.castVoteWithReason(
|
||||
proposal_id,
|
||||
int(support),
|
||||
reason
|
||||
).build_transaction({
|
||||
'from': config['address'],
|
||||
'gas': 100000,
|
||||
'gasPrice': w3.eth.gas_price,
|
||||
'nonce': w3.eth.get_transaction_count(config['address'])
|
||||
})
|
||||
|
||||
# Sign and send
|
||||
signed_tx = w3.eth.account.sign_transaction(tx, config['private_key'])
|
||||
tx_hash = w3.eth.send_raw_transaction(signed_tx.rawTransaction)
|
||||
|
||||
click.echo(f"✅ Vote cast!")
|
||||
click.echo(f"📦 Transaction hash: {tx_hash.hex()}")
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Voting failed: {str(e)}", err=True)
|
||||
|
||||
@dao.command()
|
||||
@click.option('--dao-address', required=True, help='DAO contract address')
|
||||
@click.option('--proposal-id', required=True, type=int, help='Proposal ID')
|
||||
def execute(dao_address: str, proposal_id: int):
|
||||
"""Execute a successful proposal"""
|
||||
try:
|
||||
w3 = get_web3_connection()
|
||||
config = load_config()
|
||||
|
||||
# Get contract
|
||||
dao_contract = get_contract(dao_address, "OpenClawDAO")
|
||||
|
||||
# Check proposal state
|
||||
state = dao_contract.functions.state(proposal_id).call()
|
||||
if state != 7: # Succeeded
|
||||
click.echo("❌ Proposal has not succeeded")
|
||||
return
|
||||
|
||||
# Build transaction
|
||||
tx = dao_contract.functions.execute(proposal_id).build_transaction({
|
||||
'from': config['address'],
|
||||
'gas': 300000,
|
||||
'gasPrice': w3.eth.gas_price,
|
||||
'nonce': w3.eth.get_transaction_count(config['address'])
|
||||
})
|
||||
|
||||
# Sign and send
|
||||
signed_tx = w3.eth.account.sign_transaction(tx, config['private_key'])
|
||||
tx_hash = w3.eth.send_raw_transaction(signed_tx.rawTransaction)
|
||||
|
||||
click.echo(f"✅ Proposal executed!")
|
||||
click.echo(f"📦 Transaction hash: {tx_hash.hex()}")
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Execution failed: {str(e)}", err=True)
|
||||
|
||||
@dao.command()
|
||||
@click.option('--dao-address', required=True, help='DAO contract address')
|
||||
def list_proposals(dao_address: str):
|
||||
"""List all proposals"""
|
||||
try:
|
||||
w3 = get_web3_connection()
|
||||
dao_contract = get_contract(dao_address, "OpenClawDAO")
|
||||
|
||||
# Get proposal count
|
||||
proposal_count = dao_contract.functions.proposalCount().call()
|
||||
|
||||
click.echo(f"📋 Found {proposal_count} proposals:\n")
|
||||
|
||||
for i in range(1, proposal_count + 1):
|
||||
try:
|
||||
proposal = dao_contract.functions.getProposal(i).call()
|
||||
state = dao_contract.functions.state(i).call()
|
||||
|
||||
state_names = {
|
||||
0: "Pending",
|
||||
1: "Active",
|
||||
2: "Canceled",
|
||||
3: "Defeated",
|
||||
4: "Succeeded",
|
||||
5: "Queued",
|
||||
6: "Expired",
|
||||
7: "Executed"
|
||||
}
|
||||
|
||||
type_names = {
|
||||
0: "Parameter Change",
|
||||
1: "Protocol Upgrade",
|
||||
2: "Treasury Allocation",
|
||||
3: "Emergency Action"
|
||||
}
|
||||
|
||||
click.echo(f"🔹 Proposal #{i}")
|
||||
click.echo(f" Type: {type_names.get(proposal[3], 'Unknown')}")
|
||||
click.echo(f" State: {state_names.get(state, 'Unknown')}")
|
||||
click.echo(f" Description: {proposal[4]}")
|
||||
click.echo(f" For: {proposal[6]}, Against: {proposal[7]}, Abstain: {proposal[8]}")
|
||||
click.echo()
|
||||
|
||||
except Exception as e:
|
||||
continue
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Failed to list proposals: {str(e)}", err=True)
|
||||
|
||||
@dao.command()
|
||||
@click.option('--dao-address', required=True, help='DAO contract address')
|
||||
def status(dao_address: str):
|
||||
"""Show DAO status and statistics"""
|
||||
try:
|
||||
w3 = get_web3_connection()
|
||||
dao_contract = get_contract(dao_address, "OpenClawDAO")
|
||||
|
||||
# Get DAO info
|
||||
token_address = dao_contract.functions.governanceToken().call()
|
||||
token_contract = get_contract(token_address, "ERC20")
|
||||
|
||||
total_supply = token_contract.functions.totalSupply().call()
|
||||
proposal_count = dao_contract.functions.proposalCount().call()
|
||||
|
||||
# Get active proposals
|
||||
active_proposals = dao_contract.functions.getActiveProposals().call()
|
||||
|
||||
click.echo("🏛️ OpenClaw DAO Status")
|
||||
click.echo("=" * 40)
|
||||
click.echo(f"📊 Total Supply: {total_supply / 1e18:.2f} tokens")
|
||||
click.echo(f"📋 Total Proposals: {proposal_count}")
|
||||
click.echo(f"🗳️ Active Proposals: {len(active_proposals)}")
|
||||
click.echo(f"🪙 Governance Token: {token_address}")
|
||||
click.echo(f"🏛️ DAO Address: {dao_address}")
|
||||
|
||||
# Voting parameters
|
||||
voting_delay = dao_contract.functions.votingDelay().call()
|
||||
voting_period = dao_contract.functions.votingPeriod().call()
|
||||
quorum = dao_contract.functions.quorum(w3.eth.block_number).call()
|
||||
threshold = dao_contract.functions.proposalThreshold().call()
|
||||
|
||||
click.echo(f"\n⚙️ Voting Parameters:")
|
||||
click.echo(f" Delay: {voting_delay // 86400} days")
|
||||
click.echo(f" Period: {voting_period // 86400} days")
|
||||
click.echo(f" Quorum: {quorum / 1e18:.2f} tokens ({(quorum * 100 / total_supply):.2f}%)")
|
||||
click.echo(f" Threshold: {threshold / 1e18:.2f} tokens")
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Failed to get status: {str(e)}", err=True)
|
||||
|
||||
if __name__ == '__main__':
|
||||
dao()
|
||||
91
cli/commands/deployment.py
Normal file
91
cli/commands/deployment.py
Normal file
@@ -0,0 +1,91 @@
|
||||
"""Production deployment guidance for AITBC CLI"""
|
||||
|
||||
import click
|
||||
from utils import output, error, success
|
||||
|
||||
@click.group()
|
||||
def deploy():
|
||||
"""Production deployment guidance and setup"""
|
||||
pass
|
||||
|
||||
@deploy.command()
|
||||
@click.option('--service', default='all', help='Service to deploy (all, coordinator, blockchain, marketplace)')
|
||||
@click.option('--environment', default='production', help='Deployment environment')
|
||||
def setup(service, environment):
|
||||
"""Get deployment setup instructions"""
|
||||
output(f"🚀 {environment.title()} Deployment Setup for {service.title()}", None)
|
||||
|
||||
instructions = {
|
||||
'coordinator': [
|
||||
"1. Install dependencies: pip install -r requirements.txt",
|
||||
"2. Set environment variables in .env file",
|
||||
"3. Run: python -m coordinator.main",
|
||||
"4. Configure nginx reverse proxy",
|
||||
"5. Set up SSL certificates"
|
||||
],
|
||||
'blockchain': [
|
||||
"1. Install blockchain node dependencies",
|
||||
"2. Initialize genesis block: aitbc genesis init",
|
||||
"3. Start node: python -m blockchain.node",
|
||||
"4. Configure peer connections",
|
||||
"5. Enable mining if needed"
|
||||
],
|
||||
'marketplace': [
|
||||
"1. Install marketplace dependencies",
|
||||
"2. Set up database: postgresql-setup.sh",
|
||||
"3. Run migrations: python -m marketplace.migrate",
|
||||
"4. Start service: python -m marketplace.main",
|
||||
"5. Configure GPU mining nodes"
|
||||
],
|
||||
'all': [
|
||||
"📋 Complete AITBC Platform Deployment:",
|
||||
"",
|
||||
"1. Prerequisites:",
|
||||
" - Python 3.13+",
|
||||
" - PostgreSQL 14+",
|
||||
" - Redis 6+",
|
||||
" - Docker (optional)",
|
||||
"",
|
||||
"2. Environment Setup:",
|
||||
" - Copy .env.example to .env",
|
||||
" - Configure database URLs",
|
||||
" - Set API keys and secrets",
|
||||
"",
|
||||
"3. Database Setup:",
|
||||
" - createdb aitbc",
|
||||
" - Run migrations: python manage.py migrate",
|
||||
"",
|
||||
"4. Service Deployment:",
|
||||
" - Coordinator: python -m coordinator.main",
|
||||
" - Blockchain: python -m blockchain.node",
|
||||
" - Marketplace: python -m marketplace.main",
|
||||
"",
|
||||
"5. Frontend Setup:",
|
||||
" - npm install",
|
||||
" - npm run build",
|
||||
" - Configure web server"
|
||||
]
|
||||
}
|
||||
|
||||
for step in instructions.get(service, instructions['all']):
|
||||
output(step, None)
|
||||
|
||||
output(f"\n💡 For detailed deployment guides, see: docs/deployment/{environment}.md", None)
|
||||
|
||||
@deploy.command()
|
||||
@click.option('--service', help='Service to check')
|
||||
def status(service):
|
||||
"""Check deployment status"""
|
||||
output(f"📊 Deployment Status Check for {service or 'All Services'}", None)
|
||||
|
||||
checks = [
|
||||
"Coordinator API: http://localhost:8000/health",
|
||||
"Blockchain Node: http://localhost:8006/status",
|
||||
"Marketplace: http://localhost:8014/health",
|
||||
"Wallet Service: http://localhost:8002/status"
|
||||
]
|
||||
|
||||
for check in checks:
|
||||
output(f" • {check}", None)
|
||||
|
||||
output("\n💡 Use curl or browser to check each endpoint", None)
|
||||
534
cli/commands/enterprise_integration.py
Executable file
534
cli/commands/enterprise_integration.py
Executable file
@@ -0,0 +1,534 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Enterprise Integration CLI Commands
|
||||
Enterprise API gateway, multi-tenant architecture, and integration framework
|
||||
"""
|
||||
|
||||
import click
|
||||
import asyncio
|
||||
import json
|
||||
from typing import Optional, List, Dict, Any
|
||||
from datetime import datetime
|
||||
from imports import ensure_coordinator_api_imports
|
||||
|
||||
ensure_coordinator_api_imports()
|
||||
|
||||
try:
|
||||
from app.services.enterprise_integration import (
|
||||
create_tenant, get_tenant_info, generate_api_key,
|
||||
register_integration, get_system_status, list_tenants,
|
||||
list_integrations
|
||||
)
|
||||
# Get EnterpriseAPIGateway if available
|
||||
import app.services.enterprise_integration as ei_module
|
||||
EnterpriseAPIGateway = getattr(ei_module, 'EnterpriseAPIGateway', None)
|
||||
_import_error = None
|
||||
except ImportError as e:
|
||||
_import_error = e
|
||||
|
||||
def _missing(*args, **kwargs):
|
||||
raise ImportError(
|
||||
f"Required service module 'app.services.enterprise_integration' could not be imported: {_import_error}. "
|
||||
"Ensure coordinator-api dependencies are installed and the source directory is accessible."
|
||||
)
|
||||
create_tenant = get_tenant_info = generate_api_key = register_integration = get_system_status = list_tenants = list_integrations = _missing
|
||||
EnterpriseAPIGateway = None
|
||||
|
||||
@click.group()
|
||||
def enterprise_integration_group():
|
||||
"""Enterprise integration and multi-tenant management commands"""
|
||||
pass
|
||||
|
||||
@enterprise_integration_group.command()
|
||||
@click.option("--name", required=True, help="Tenant name")
|
||||
@click.option("--domain", required=True, help="Tenant domain")
|
||||
@click.pass_context
|
||||
def create_tenant_cmd(ctx, name: str, domain: str):
|
||||
"""Create a new tenant"""
|
||||
try:
|
||||
tenant_id = create_tenant(name, domain)
|
||||
click.echo(f"✅ Created tenant '{name}' with ID: {tenant_id}")
|
||||
click.echo(f"⚖️ Multi-tenant: Active")
|
||||
|
||||
# Initialize and start gateway
|
||||
if EnterpriseAPIGateway:
|
||||
gateway = EnterpriseAPIGateway()
|
||||
|
||||
click.echo(f"✅ Enterprise API Gateway started!")
|
||||
click.echo(f"📊 API Endpoints: Configured")
|
||||
click.echo(f"🔑 Authentication: JWT-based")
|
||||
click.echo(f"🏢 Multi-tenant: Isolated")
|
||||
click.echo(f"📈 Load Balancing: Active")
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Failed to start gateway: {e}", err=True)
|
||||
|
||||
@enterprise_integration_group.command()
|
||||
@click.pass_context
|
||||
def gateway_status(ctx):
|
||||
"""Show enterprise API gateway status"""
|
||||
try:
|
||||
click.echo(f"🚀 Enterprise API Gateway Status")
|
||||
|
||||
# Mock gateway status
|
||||
status = {
|
||||
'running': True,
|
||||
'port': 8010,
|
||||
'uptime': '2h 15m',
|
||||
'requests_handled': 15420,
|
||||
'active_tenants': 12,
|
||||
'api_endpoints': 47,
|
||||
'load_balancer': 'active',
|
||||
'authentication': 'jwt',
|
||||
'rate_limiting': 'enabled'
|
||||
}
|
||||
|
||||
click.echo(f"\n📊 Gateway Overview:")
|
||||
click.echo(f" Status: {'✅ Running' if status['running'] else '❌ Stopped'}")
|
||||
click.echo(f" Port: {status['port']}")
|
||||
click.echo(f" Uptime: {status['uptime']}")
|
||||
click.echo(f" Requests Handled: {status['requests_handled']:,}")
|
||||
|
||||
click.echo(f"\n🏢 Multi-Tenant Status:")
|
||||
click.echo(f" Active Tenants: {status['active_tenants']}")
|
||||
click.echo(f" API Endpoints: {status['api_endpoints']}")
|
||||
click.echo(f" Authentication: {status['authentication'].upper()}")
|
||||
|
||||
click.echo(f"\n⚡ Performance:")
|
||||
click.echo(f" Load Balancer: {status['load_balancer'].title()}")
|
||||
click.echo(f" Rate Limiting: {status['rate_limiting'].title()}")
|
||||
|
||||
# Performance metrics
|
||||
click.echo(f"\n📈 Performance Metrics:")
|
||||
click.echo(f" Avg Response Time: 45ms")
|
||||
click.echo(f" Throughput: 850 req/sec")
|
||||
click.echo(f" Error Rate: 0.02%")
|
||||
click.echo(f" CPU Usage: 23%")
|
||||
click.echo(f" Memory Usage: 1.2GB")
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Status check failed: {e}", err=True)
|
||||
|
||||
@enterprise_integration_group.command()
|
||||
@click.option("--tenant-id", help="Specific tenant ID to manage")
|
||||
@click.option("--action", type=click.Choice(['list', 'create', 'update', 'delete']), default='list', help="Tenant management action")
|
||||
@click.pass_context
|
||||
def tenants(ctx, tenant_id: str, action: str):
|
||||
"""Manage enterprise tenants"""
|
||||
try:
|
||||
click.echo(f"🏢 Enterprise Tenant Management")
|
||||
|
||||
if action == 'list':
|
||||
click.echo(f"\n📋 Active Tenants:")
|
||||
|
||||
# Mock tenant data
|
||||
tenants = [
|
||||
{
|
||||
'tenant_id': 'tenant_001',
|
||||
'name': 'Acme Corporation',
|
||||
'status': 'active',
|
||||
'users': 245,
|
||||
'api_calls': 15420,
|
||||
'quota': '100k/hr',
|
||||
'created': '2024-01-15'
|
||||
},
|
||||
{
|
||||
'tenant_id': 'tenant_002',
|
||||
'name': 'Tech Industries',
|
||||
'status': 'active',
|
||||
'users': 89,
|
||||
'api_calls': 8750,
|
||||
'quota': '50k/hr',
|
||||
'created': '2024-02-01'
|
||||
},
|
||||
{
|
||||
'tenant_id': 'tenant_003',
|
||||
'name': 'Global Finance',
|
||||
'status': 'suspended',
|
||||
'users': 156,
|
||||
'api_calls': 3210,
|
||||
'quota': '75k/hr',
|
||||
'created': '2024-01-20'
|
||||
}
|
||||
]
|
||||
|
||||
for tenant in tenants:
|
||||
status_icon = "✅" if tenant['status'] == 'active' else "⏸️"
|
||||
click.echo(f"\n{status_icon} {tenant['name']}")
|
||||
click.echo(f" ID: {tenant['tenant_id']}")
|
||||
click.echo(f" Users: {tenant['users']}")
|
||||
click.echo(f" API Calls: {tenant['api_calls']:,}")
|
||||
click.echo(f" Quota: {tenant['quota']}")
|
||||
click.echo(f" Created: {tenant['created']}")
|
||||
|
||||
elif action == 'create':
|
||||
click.echo(f"\n➕ Create New Tenant")
|
||||
click.echo(f"📝 Tenant creation wizard...")
|
||||
click.echo(f" • Configure tenant settings")
|
||||
click.echo(f" • Set up authentication")
|
||||
click.echo(f" • Configure API quotas")
|
||||
click.echo(f" • Initialize data isolation")
|
||||
click.echo(f"\n✅ Tenant creation template ready")
|
||||
|
||||
elif action == 'update' and tenant_id:
|
||||
click.echo(f"\n✏️ Update Tenant: {tenant_id}")
|
||||
click.echo(f"📝 Tenant update options:")
|
||||
click.echo(f" • Modify tenant configuration")
|
||||
click.echo(f" • Update API quotas")
|
||||
click.echo(f" • Change security settings")
|
||||
click.echo(f" • Update user permissions")
|
||||
|
||||
elif action == 'delete' and tenant_id:
|
||||
click.echo(f"\n🗑️ Delete Tenant: {tenant_id}")
|
||||
click.echo(f"⚠️ WARNING: This action is irreversible!")
|
||||
click.echo(f" • All tenant data will be removed")
|
||||
click.echo(f" • API keys will be revoked")
|
||||
click.echo(f" • User access will be terminated")
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Tenant management failed: {e}", err=True)
|
||||
|
||||
@enterprise_integration_group.command()
|
||||
@click.option("--tenant-id", required=True, help="Tenant ID for security audit")
|
||||
@click.pass_context
|
||||
def security_audit(ctx, tenant_id: str):
|
||||
"""Run enterprise security audit"""
|
||||
try:
|
||||
click.echo(f"🔒 Enterprise Security Audit")
|
||||
click.echo(f"🏢 Tenant: {tenant_id}")
|
||||
|
||||
# Mock security audit results
|
||||
audit_results = {
|
||||
'overall_score': 94,
|
||||
'critical_issues': 0,
|
||||
'high_risk': 2,
|
||||
'medium_risk': 5,
|
||||
'low_risk': 12,
|
||||
'compliance_status': 'compliant',
|
||||
'last_audit': datetime.now().strftime('%Y-%m-%d %H:%M:%S')
|
||||
}
|
||||
|
||||
click.echo(f"\n📊 Security Overview:")
|
||||
click.echo(f" Overall Score: {audit_results['overall_score']}/100")
|
||||
score_grade = "🟢 Excellent" if audit_results['overall_score'] >= 90 else "🟡 Good" if audit_results['overall_score'] >= 80 else "🟠 Fair"
|
||||
click.echo(f" Grade: {score_grade}")
|
||||
click.echo(f" Compliance: {'✅ Compliant' if audit_results['compliance_status'] == 'compliant' else '❌ Non-compliant'}")
|
||||
click.echo(f" Last Audit: {audit_results['last_audit']}")
|
||||
|
||||
click.echo(f"\n⚠️ Risk Assessment:")
|
||||
click.echo(f" 🔴 Critical Issues: {audit_results['critical_issues']}")
|
||||
click.echo(f" 🟠 High Risk: {audit_results['high_risk']}")
|
||||
click.echo(f" 🟡 Medium Risk: {audit_results['medium_risk']}")
|
||||
click.echo(f" 🟢 Low Risk: {audit_results['low_risk']}")
|
||||
|
||||
# Security categories
|
||||
click.echo(f"\n🔍 Security Categories:")
|
||||
|
||||
categories = [
|
||||
{'name': 'Authentication', 'score': 98, 'status': '✅ Strong'},
|
||||
{'name': 'Authorization', 'score': 92, 'status': '✅ Good'},
|
||||
{'name': 'Data Encryption', 'score': 96, 'status': '✅ Strong'},
|
||||
{'name': 'API Security', 'score': 89, 'status': '⚠️ Needs attention'},
|
||||
{'name': 'Access Control', 'score': 94, 'status': '✅ Good'},
|
||||
{'name': 'Audit Logging', 'score': 91, 'status': '✅ Good'}
|
||||
]
|
||||
|
||||
for category in categories:
|
||||
score_icon = "🟢" if category['score'] >= 90 else "🟡" if category['score'] >= 80 else "🔴"
|
||||
click.echo(f" {score_icon} {category['name']}: {category['score']}/100 {category['status']}")
|
||||
|
||||
# Recommendations
|
||||
click.echo(f"\n💡 Security Recommendations:")
|
||||
if audit_results['high_risk'] > 0:
|
||||
click.echo(f" 🔴 Address {audit_results['high_risk']} high-risk issues immediately")
|
||||
if audit_results['medium_risk'] > 3:
|
||||
click.echo(f" 🟡 Review {audit_results['medium_risk']} medium-risk issues this week")
|
||||
|
||||
click.echo(f" ✅ Continue regular security monitoring")
|
||||
click.echo(f" 📅 Schedule next audit in 30 days")
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Security audit failed: {e}", err=True)
|
||||
|
||||
@enterprise_integration_group.command()
|
||||
@click.option("--provider", type=click.Choice(['sap', 'oracle', 'microsoft', 'salesforce', 'hubspot', 'tableau', 'powerbi', 'workday']), help="Integration provider")
|
||||
@click.option("--integration-type", type=click.Choice(['erp', 'crm', 'bi', 'hr', 'finance', 'custom']), help="Integration type")
|
||||
@click.pass_context
|
||||
def integrations(ctx, provider: str, integration_type: str):
|
||||
"""Manage enterprise integrations"""
|
||||
try:
|
||||
click.echo(f"🔗 Enterprise Integration Framework")
|
||||
|
||||
if provider:
|
||||
click.echo(f"\n📊 {provider.title()} Integration")
|
||||
click.echo(f"🔧 Type: {integration_type.title() if integration_type else 'Multiple'}")
|
||||
|
||||
# Mock integration details
|
||||
integration_info = {
|
||||
'sap': {'status': 'connected', 'endpoints': 12, 'data_flow': 'bidirectional', 'last_sync': '5 min ago'},
|
||||
'oracle': {'status': 'connected', 'endpoints': 8, 'data_flow': 'bidirectional', 'last_sync': '2 min ago'},
|
||||
'microsoft': {'status': 'connected', 'endpoints': 15, 'data_flow': 'bidirectional', 'last_sync': '1 min ago'},
|
||||
'salesforce': {'status': 'connected', 'endpoints': 6, 'data_flow': 'bidirectional', 'last_sync': '3 min ago'},
|
||||
'hubspot': {'status': 'disconnected', 'endpoints': 0, 'data_flow': 'none', 'last_sync': 'Never'},
|
||||
'tableau': {'status': 'connected', 'endpoints': 4, 'data_flow': 'outbound', 'last_sync': '15 min ago'},
|
||||
'powerbi': {'status': 'connected', 'endpoints': 5, 'data_flow': 'outbound', 'last_sync': '10 min ago'},
|
||||
'workday': {'status': 'connected', 'endpoints': 7, 'data_flow': 'bidirectional', 'last_sync': '7 min ago'}
|
||||
}
|
||||
|
||||
info = integration_info.get(provider, {})
|
||||
if info:
|
||||
status_icon = "✅" if info['status'] == 'connected' else "❌"
|
||||
click.echo(f" Status: {status_icon} {info['status'].title()}")
|
||||
click.echo(f" Endpoints: {info['endpoints']}")
|
||||
click.echo(f" Data Flow: {info['data_flow'].title()}")
|
||||
click.echo(f" Last Sync: {info['last_sync']}")
|
||||
|
||||
if info['status'] == 'disconnected':
|
||||
click.echo(f"\n⚠️ Integration is not active")
|
||||
click.echo(f"💡 Run 'enterprise-integration connect --provider {provider}' to enable")
|
||||
|
||||
else:
|
||||
click.echo(f"\n📋 Available Integrations:")
|
||||
|
||||
integrations = [
|
||||
{'provider': 'SAP', 'type': 'ERP', 'status': '✅ Connected'},
|
||||
{'provider': 'Oracle', 'type': 'ERP', 'status': '✅ Connected'},
|
||||
{'provider': 'Microsoft', 'type': 'CRM/ERP', 'status': '✅ Connected'},
|
||||
{'provider': 'Salesforce', 'type': 'CRM', 'status': '✅ Connected'},
|
||||
{'provider': 'HubSpot', 'type': 'CRM', 'status': '❌ Disconnected'},
|
||||
{'provider': 'Tableau', 'type': 'BI', 'status': '✅ Connected'},
|
||||
{'provider': 'PowerBI', 'type': 'BI', 'status': '✅ Connected'},
|
||||
{'provider': 'Workday', 'type': 'HR', 'status': '✅ Connected'}
|
||||
]
|
||||
|
||||
for integration in integrations:
|
||||
click.echo(f" {integration['status']} {integration['provider']} ({integration['type']})")
|
||||
|
||||
click.echo(f"\n📊 Integration Summary:")
|
||||
connected = len([i for i in integrations if '✅' in i['status']])
|
||||
total = len(integrations)
|
||||
click.echo(f" Connected: {connected}/{total}")
|
||||
click.echo(f" Data Types: ERP, CRM, BI, HR")
|
||||
click.echo(f" Protocols: REST, SOAP, OData")
|
||||
click.echo(f" Data Formats: JSON, XML, CSV")
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Integration management failed: {e}", err=True)
|
||||
|
||||
@enterprise_integration_group.command()
|
||||
@click.option("--provider", required=True, type=click.Choice(['sap', 'oracle', 'microsoft', 'salesforce', 'hubspot', 'tableau', 'powerbi', 'workday']), help="Integration provider")
|
||||
@click.pass_context
|
||||
def connect(ctx, provider: str):
|
||||
"""Connect to enterprise integration provider"""
|
||||
try:
|
||||
click.echo(f"🔗 Connect to {provider.title()}")
|
||||
|
||||
click.echo(f"\n🔧 Integration Setup:")
|
||||
click.echo(f" Provider: {provider.title()}")
|
||||
click.echo(f" Protocol: {'REST' if provider in ['salesforce', 'hubspot', 'tableau', 'powerbi'] else 'SOAP/OData'}")
|
||||
click.echo(f" Authentication: OAuth 2.0")
|
||||
|
||||
click.echo(f"\n📝 Configuration Steps:")
|
||||
click.echo(f" 1️⃣ Verify provider credentials")
|
||||
click.echo(f" 2️⃣ Configure API endpoints")
|
||||
click.echo(f" 3️⃣ Set up data mapping")
|
||||
click.echo(f" 4️⃣ Test connectivity")
|
||||
click.echo(f" 5️⃣ Enable data synchronization")
|
||||
|
||||
click.echo(f"\n✅ Integration connection simulated")
|
||||
click.echo(f"📊 {provider.title()} is now connected")
|
||||
click.echo(f"🔄 Data synchronization active")
|
||||
click.echo(f"📈 Monitoring enabled")
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Connection failed: {e}", err=True)
|
||||
|
||||
@enterprise_integration_group.command()
|
||||
@click.pass_context
|
||||
def compliance(ctx):
|
||||
"""Enterprise compliance automation"""
|
||||
try:
|
||||
click.echo(f"⚖️ Enterprise Compliance Automation")
|
||||
|
||||
# Mock compliance data
|
||||
compliance_status = {
|
||||
'gdpr': {'status': 'compliant', 'score': 96, 'last_audit': '2024-02-15'},
|
||||
'soc2': {'status': 'compliant', 'score': 94, 'last_audit': '2024-01-30'},
|
||||
'iso27001': {'status': 'compliant', 'score': 92, 'last_audit': '2024-02-01'},
|
||||
'hipaa': {'status': 'not_applicable', 'score': 0, 'last_audit': 'N/A'},
|
||||
'pci_dss': {'status': 'compliant', 'score': 98, 'last_audit': '2024-02-10'}
|
||||
}
|
||||
|
||||
click.echo(f"\n📊 Compliance Overview:")
|
||||
|
||||
for framework, data in compliance_status.items():
|
||||
if data['status'] == 'compliant':
|
||||
icon = "✅"
|
||||
status_text = f"Compliant ({data['score']}%)"
|
||||
elif data['status'] == 'not_applicable':
|
||||
icon = "⚪"
|
||||
status_text = "Not Applicable"
|
||||
else:
|
||||
icon = "❌"
|
||||
status_text = f"Non-compliant ({data['score']}%)"
|
||||
|
||||
click.echo(f" {icon} {framework.upper()}: {status_text}")
|
||||
if data['last_audit'] != 'N/A':
|
||||
click.echo(f" Last Audit: {data['last_audit']}")
|
||||
|
||||
# Automated workflows
|
||||
click.echo(f"\n🤖 Automated Workflows:")
|
||||
workflows = [
|
||||
{'name': 'Data Protection Impact Assessment', 'status': '✅ Active', 'frequency': 'Quarterly'},
|
||||
{'name': 'Access Review Automation', 'status': '✅ Active', 'frequency': 'Monthly'},
|
||||
{'name': 'Security Incident Response', 'status': '✅ Active', 'frequency': 'Real-time'},
|
||||
{'name': 'Compliance Reporting', 'status': '✅ Active', 'frequency': 'Monthly'},
|
||||
{'name': 'Risk Assessment', 'status': '✅ Active', 'frequency': 'Semi-annual'}
|
||||
]
|
||||
|
||||
for workflow in workflows:
|
||||
click.echo(f" {workflow['status']} {workflow['name']}")
|
||||
click.echo(f" Frequency: {workflow['frequency']}")
|
||||
|
||||
# Recent activities
|
||||
click.echo(f"\n📋 Recent Compliance Activities:")
|
||||
activities = [
|
||||
{'activity': 'GDPR Data Processing Audit', 'date': '2024-03-05', 'status': 'Completed'},
|
||||
{'activity': 'SOC2 Control Testing', 'date': '2024-03-04', 'status': 'Completed'},
|
||||
{'activity': 'Access Review Cycle', 'date': '2024-03-03', 'status': 'Completed'},
|
||||
{'activity': 'Security Policy Update', 'date': '2024-03-02', 'status': 'Completed'},
|
||||
{'activity': 'Risk Assessment Report', 'date': '2024-03-01', 'status': 'Completed'}
|
||||
]
|
||||
|
||||
for activity in activities:
|
||||
status_icon = "✅" if activity['status'] == 'Completed' else "⏳"
|
||||
click.echo(f" {status_icon} {activity['activity']} ({activity['date']})")
|
||||
|
||||
click.echo(f"\n📈 Compliance Metrics:")
|
||||
click.echo(f" Overall Compliance Score: 95%")
|
||||
click.echo(f" Automated Controls: 87%")
|
||||
click.echo(f" Audit Findings: 0 critical, 2 minor")
|
||||
click.echo(f" Remediation Time: 3.2 days avg")
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Compliance check failed: {e}", err=True)
|
||||
|
||||
@enterprise_integration_group.command()
|
||||
@click.pass_context
|
||||
def analytics(ctx):
|
||||
"""Enterprise integration analytics"""
|
||||
try:
|
||||
click.echo(f"📊 Enterprise Integration Analytics")
|
||||
|
||||
# Mock analytics data
|
||||
analytics_data = {
|
||||
'total_integrations': 8,
|
||||
'active_integrations': 7,
|
||||
'daily_api_calls': 15420,
|
||||
'data_transferred_gb': 2.4,
|
||||
'avg_response_time_ms': 45,
|
||||
'error_rate_percent': 0.02,
|
||||
'uptime_percent': 99.98
|
||||
}
|
||||
|
||||
click.echo(f"\n📈 Integration Performance:")
|
||||
click.echo(f" Total Integrations: {analytics_data['total_integrations']}")
|
||||
click.echo(f" Active Integrations: {analytics_data['active_integrations']}")
|
||||
click.echo(f" Daily API Calls: {analytics_data['daily_api_calls']:,}")
|
||||
click.echo(f" Data Transferred: {analytics_data['data_transferred_gb']} GB")
|
||||
click.echo(f" Avg Response Time: {analytics_data['avg_response_time_ms']} ms")
|
||||
click.echo(f" Error Rate: {analytics_data['error_rate_percent']}%")
|
||||
click.echo(f" Uptime: {analytics_data['uptime_percent']}%")
|
||||
|
||||
# Provider breakdown
|
||||
click.echo(f"\n📊 Provider Performance:")
|
||||
providers = [
|
||||
{'name': 'SAP', 'calls': 5230, 'response_time': 42, 'success_rate': 99.9},
|
||||
{'name': 'Oracle', 'calls': 3420, 'response_time': 48, 'success_rate': 99.8},
|
||||
{'name': 'Microsoft', 'calls': 2890, 'response_time': 44, 'success_rate': 99.95},
|
||||
{'name': 'Salesforce', 'calls': 1870, 'response_time': 46, 'success_rate': 99.7},
|
||||
{'name': 'Tableau', 'calls': 1230, 'response_time': 52, 'success_rate': 99.9},
|
||||
{'name': 'PowerBI', 'calls': 890, 'response_time': 50, 'success_rate': 99.8}
|
||||
]
|
||||
|
||||
for provider in providers:
|
||||
click.echo(f" 📊 {provider['name']}:")
|
||||
click.echo(f" Calls: {provider['calls']:,}")
|
||||
click.echo(f" Response: {provider['response_time']}ms")
|
||||
click.echo(f" Success: {provider['success_rate']}%")
|
||||
|
||||
# Data flow analysis
|
||||
click.echo(f"\n🔄 Data Flow Analysis:")
|
||||
click.echo(f" Inbound Data: 1.8 GB/day")
|
||||
click.echo(f" Outbound Data: 0.6 GB/day")
|
||||
click.echo(f" Sync Operations: 342")
|
||||
click.echo(f" Failed Syncs: 3")
|
||||
click.echo(f" Data Quality Score: 97.3%")
|
||||
|
||||
# Trends
|
||||
click.echo(f"\n📈 30-Day Trends:")
|
||||
click.echo(f" 📈 API Calls: +12.3%")
|
||||
click.echo(f" 📉 Response Time: -8.7%")
|
||||
click.echo(f" 📈 Data Volume: +15.2%")
|
||||
click.echo(f" 📉 Error Rate: -23.1%")
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Analytics failed: {e}", err=True)
|
||||
|
||||
@enterprise_integration_group.command()
|
||||
@click.pass_context
|
||||
def test(ctx):
|
||||
"""Test enterprise integration framework"""
|
||||
try:
|
||||
click.echo(f"🧪 Testing Enterprise Integration Framework...")
|
||||
|
||||
# Test 1: API Gateway
|
||||
click.echo(f"\n📋 Test 1: API Gateway")
|
||||
click.echo(f" ✅ Gateway initialization: Success")
|
||||
click.echo(f" ✅ Authentication system: Working")
|
||||
click.echo(f" ✅ Multi-tenant isolation: Working")
|
||||
click.echo(f" ✅ Load balancing: Active")
|
||||
|
||||
# Test 2: Tenant Management
|
||||
click.echo(f"\n📋 Test 2: Tenant Management")
|
||||
click.echo(f" ✅ Tenant creation: Working")
|
||||
click.echo(f" ✅ Data isolation: Working")
|
||||
click.echo(f" ✅ Quota enforcement: Working")
|
||||
click.echo(f" ✅ User management: Working")
|
||||
|
||||
# Test 3: Security
|
||||
click.echo(f"\n📋 Test 3: Security Systems")
|
||||
click.echo(f" ✅ Authentication: JWT working")
|
||||
click.echo(f" ✅ Authorization: RBAC working")
|
||||
click.echo(f" ✅ Encryption: AES-256 working")
|
||||
click.echo(f" ✅ Audit logging: Working")
|
||||
|
||||
# Test 4: Integrations
|
||||
click.echo(f"\n📋 Test 4: Integration Framework")
|
||||
click.echo(f" ✅ Provider connections: 8/8 working")
|
||||
click.echo(f" ✅ Data synchronization: Working")
|
||||
click.echo(f" ✅ Error handling: Working")
|
||||
click.echo(f" ✅ Monitoring: Working")
|
||||
|
||||
# Test 5: Compliance
|
||||
click.echo(f"\n📋 Test 5: Compliance Automation")
|
||||
click.echo(f" ✅ GDPR workflows: Active")
|
||||
click.echo(f" ✅ SOC2 controls: Working")
|
||||
click.echo(f" ✅ Reporting automation: Working")
|
||||
click.echo(f" ✅ Audit trails: Working")
|
||||
|
||||
# Show results
|
||||
click.echo(f"\n🎉 Test Results Summary:")
|
||||
click.echo(f" API Gateway: ✅ Operational")
|
||||
click.echo(f" Multi-Tenant: ✅ Working")
|
||||
click.echo(f" Security: ✅ Enterprise-grade")
|
||||
click.echo(f" Integrations: ✅ 100% success rate")
|
||||
click.echo(f" Compliance: ✅ Automated")
|
||||
|
||||
click.echo(f"\n✅ Enterprise Integration Framework is ready for production!")
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Test failed: {e}", err=True)
|
||||
|
||||
if __name__ == "__main__":
|
||||
enterprise_integration_group()
|
||||
981
cli/commands/exchange.py
Executable file
981
cli/commands/exchange.py
Executable file
@@ -0,0 +1,981 @@
|
||||
"""Exchange integration commands for AITBC CLI"""
|
||||
|
||||
import click
|
||||
import httpx
|
||||
import json
|
||||
import os
|
||||
from pathlib import Path
|
||||
from typing import Optional, Dict, Any, List
|
||||
from datetime import datetime
|
||||
from utils import output, error, success, warning
|
||||
from config import get_config
|
||||
|
||||
|
||||
@click.group()
|
||||
def exchange():
|
||||
"""Exchange integration and trading management commands"""
|
||||
pass
|
||||
|
||||
|
||||
@exchange.command()
|
||||
@click.option("--name", required=True, help="Exchange name (e.g., Binance, Coinbase, Kraken)")
|
||||
@click.option("--api-key", required=True, help="Exchange API key")
|
||||
@click.option("--secret-key", help="Exchange API secret key")
|
||||
@click.option("--sandbox", is_flag=True, help="Use sandbox/testnet environment")
|
||||
@click.option("--description", help="Exchange description")
|
||||
@click.pass_context
|
||||
def register(ctx, name: str, api_key: str, secret_key: Optional[str], sandbox: bool, description: Optional[str]):
|
||||
"""Register a new exchange integration"""
|
||||
config = get_config()
|
||||
|
||||
# Create exchange configuration
|
||||
exchange_config = {
|
||||
"name": name,
|
||||
"api_key": api_key,
|
||||
"secret_key": secret_key or "NOT_SET",
|
||||
"sandbox": sandbox,
|
||||
"description": description or f"{name} exchange integration",
|
||||
"created_at": datetime.utcnow().isoformat(),
|
||||
"status": "active",
|
||||
"trading_pairs": [],
|
||||
"last_sync": None
|
||||
}
|
||||
|
||||
# Store exchange configuration
|
||||
exchanges_file = Path.home() / ".aitbc" / "exchanges.json"
|
||||
exchanges_file.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Load existing exchanges
|
||||
exchanges = {}
|
||||
if exchanges_file.exists():
|
||||
with open(exchanges_file, 'r') as f:
|
||||
exchanges = json.load(f)
|
||||
|
||||
# Add new exchange
|
||||
exchanges[name.lower()] = exchange_config
|
||||
|
||||
# Save exchanges
|
||||
with open(exchanges_file, 'w') as f:
|
||||
json.dump(exchanges, f, indent=2)
|
||||
|
||||
success(f"Exchange '{name}' registered successfully")
|
||||
output({
|
||||
"exchange": name,
|
||||
"status": "registered",
|
||||
"sandbox": sandbox,
|
||||
"created_at": exchange_config["created_at"]
|
||||
})
|
||||
|
||||
|
||||
@exchange.command()
|
||||
@click.option("--base-asset", required=True, help="Base asset symbol (e.g., AITBC)")
|
||||
@click.option("--quote-asset", required=True, help="Quote asset symbol (e.g., BTC)")
|
||||
@click.option("--exchange", required=True, help="Exchange name")
|
||||
@click.option("--min-order-size", type=float, default=0.001, help="Minimum order size")
|
||||
@click.option("--price-precision", type=int, default=8, help="Price precision")
|
||||
@click.option("--quantity-precision", type=int, default=8, help="Quantity precision")
|
||||
@click.pass_context
|
||||
def create_pair(ctx, base_asset: str, quote_asset: str, exchange: str, min_order_size: float, price_precision: int, quantity_precision: int):
|
||||
"""Create a new trading pair"""
|
||||
pair_symbol = f"{base_asset}/{quote_asset}"
|
||||
|
||||
# Load exchanges
|
||||
exchanges_file = Path.home() / ".aitbc" / "exchanges.json"
|
||||
if not exchanges_file.exists():
|
||||
error("No exchanges registered. Use 'aitbc exchange register' first.")
|
||||
return
|
||||
|
||||
with open(exchanges_file, 'r') as f:
|
||||
exchanges = json.load(f)
|
||||
|
||||
if exchange.lower() not in exchanges:
|
||||
error(f"Exchange '{exchange}' not registered.")
|
||||
return
|
||||
|
||||
# Create trading pair configuration
|
||||
pair_config = {
|
||||
"symbol": pair_symbol,
|
||||
"base_asset": base_asset,
|
||||
"quote_asset": quote_asset,
|
||||
"exchange": exchange,
|
||||
"min_order_size": min_order_size,
|
||||
"price_precision": price_precision,
|
||||
"quantity_precision": quantity_precision,
|
||||
"status": "active",
|
||||
"created_at": datetime.utcnow().isoformat(),
|
||||
"trading_enabled": False
|
||||
}
|
||||
|
||||
# Update exchange with new pair
|
||||
exchanges[exchange.lower()]["trading_pairs"].append(pair_config)
|
||||
|
||||
# Save exchanges
|
||||
with open(exchanges_file, 'w') as f:
|
||||
json.dump(exchanges, f, indent=2)
|
||||
|
||||
success(f"Trading pair '{pair_symbol}' created on {exchange}")
|
||||
output({
|
||||
"pair": pair_symbol,
|
||||
"exchange": exchange,
|
||||
"status": "created",
|
||||
"min_order_size": min_order_size,
|
||||
"created_at": pair_config["created_at"]
|
||||
})
|
||||
|
||||
|
||||
@exchange.command()
|
||||
@click.option("--pair", required=True, help="Trading pair symbol (e.g., AITBC/BTC)")
|
||||
@click.option("--price", type=float, help="Initial price for the pair")
|
||||
@click.option("--base-liquidity", type=float, default=10000, help="Base asset liquidity amount")
|
||||
@click.option("--quote-liquidity", type=float, default=10000, help="Quote asset liquidity amount")
|
||||
@click.option("--exchange", help="Exchange name (if not specified, uses first available)")
|
||||
@click.pass_context
|
||||
def start_trading(ctx, pair: str, price: Optional[float], base_liquidity: float, quote_liquidity: float, exchange: Optional[str]):
|
||||
"""Start trading for a specific pair"""
|
||||
|
||||
# Load exchanges
|
||||
exchanges_file = Path.home() / ".aitbc" / "exchanges.json"
|
||||
if not exchanges_file.exists():
|
||||
error("No exchanges registered. Use 'aitbc exchange register' first.")
|
||||
return
|
||||
|
||||
with open(exchanges_file, 'r') as f:
|
||||
exchanges = json.load(f)
|
||||
|
||||
# Find the pair
|
||||
target_exchange = None
|
||||
target_pair = None
|
||||
|
||||
for exchange_name, exchange_data in exchanges.items():
|
||||
for pair_config in exchange_data.get("trading_pairs", []):
|
||||
if pair_config["symbol"] == pair:
|
||||
target_exchange = exchange_name
|
||||
target_pair = pair_config
|
||||
break
|
||||
if target_pair:
|
||||
break
|
||||
|
||||
if not target_pair:
|
||||
error(f"Trading pair '{pair}' not found. Create it first with 'aitbc exchange create-pair'.")
|
||||
return
|
||||
|
||||
# Update pair to enable trading
|
||||
target_pair["trading_enabled"] = True
|
||||
target_pair["started_at"] = datetime.utcnow().isoformat()
|
||||
target_pair["initial_price"] = price or 0.00001 # Default price for AITBC
|
||||
target_pair["base_liquidity"] = base_liquidity
|
||||
target_pair["quote_liquidity"] = quote_liquidity
|
||||
|
||||
# Save exchanges
|
||||
with open(exchanges_file, 'w') as f:
|
||||
json.dump(exchanges, f, indent=2)
|
||||
|
||||
success(f"Trading started for pair '{pair}' on {target_exchange}")
|
||||
output({
|
||||
"pair": pair,
|
||||
"exchange": target_exchange,
|
||||
"status": "trading_active",
|
||||
"initial_price": target_pair["initial_price"],
|
||||
"base_liquidity": base_liquidity,
|
||||
"quote_liquidity": quote_liquidity,
|
||||
"started_at": target_pair["started_at"]
|
||||
})
|
||||
|
||||
|
||||
@exchange.command()
|
||||
@click.option("--pair", help="Trading pair symbol (e.g., AITBC/BTC)")
|
||||
@click.option("--exchange", help="Exchange name")
|
||||
@click.option("--real-time", is_flag=True, help="Enable real-time monitoring")
|
||||
@click.option("--interval", type=int, default=60, help="Update interval in seconds")
|
||||
@click.pass_context
|
||||
def monitor(ctx, pair: Optional[str], exchange: Optional[str], real_time: bool, interval: int):
|
||||
"""Monitor exchange trading activity"""
|
||||
|
||||
# Load exchanges
|
||||
exchanges_file = Path.home() / ".aitbc" / "exchanges.json"
|
||||
if not exchanges_file.exists():
|
||||
error("No exchanges registered. Use 'aitbc exchange register' first.")
|
||||
return
|
||||
|
||||
with open(exchanges_file, 'r') as f:
|
||||
exchanges = json.load(f)
|
||||
|
||||
# Filter exchanges and pairs
|
||||
monitoring_data = []
|
||||
|
||||
for exchange_name, exchange_data in exchanges.items():
|
||||
if exchange and exchange_name != exchange.lower():
|
||||
continue
|
||||
|
||||
for pair_config in exchange_data.get("trading_pairs", []):
|
||||
if pair and pair_config["symbol"] != pair:
|
||||
continue
|
||||
|
||||
monitoring_data.append({
|
||||
"exchange": exchange_name,
|
||||
"pair": pair_config["symbol"],
|
||||
"status": "active" if pair_config.get("trading_enabled") else "inactive",
|
||||
"created_at": pair_config.get("created_at"),
|
||||
"started_at": pair_config.get("started_at"),
|
||||
"initial_price": pair_config.get("initial_price"),
|
||||
"base_liquidity": pair_config.get("base_liquidity"),
|
||||
"quote_liquidity": pair_config.get("quote_liquidity")
|
||||
})
|
||||
|
||||
if not monitoring_data:
|
||||
error("No trading pairs found for monitoring.")
|
||||
return
|
||||
|
||||
# Display monitoring data
|
||||
output({
|
||||
"monitoring_active": True,
|
||||
"real_time": real_time,
|
||||
"interval": interval,
|
||||
"pairs": monitoring_data,
|
||||
"total_pairs": len(monitoring_data)
|
||||
})
|
||||
|
||||
if real_time:
|
||||
warning(f"Real-time monitoring enabled. Updates every {interval} seconds.")
|
||||
# Note: In a real implementation, this would start a background monitoring process
|
||||
|
||||
|
||||
@exchange.command()
|
||||
@click.option("--pair", required=True, help="Trading pair symbol (e.g., AITBC/BTC)")
|
||||
@click.option("--amount", type=float, required=True, help="Liquidity amount")
|
||||
@click.option("--side", type=click.Choice(['buy', 'sell']), default='both', help="Side to provide liquidity")
|
||||
@click.option("--exchange", help="Exchange name")
|
||||
@click.pass_context
|
||||
def add_liquidity(ctx, pair: str, amount: float, side: str, exchange: Optional[str]):
|
||||
"""Add liquidity to a trading pair"""
|
||||
|
||||
# Load exchanges
|
||||
exchanges_file = Path.home() / ".aitbc" / "exchanges.json"
|
||||
if not exchanges_file.exists():
|
||||
error("No exchanges registered. Use 'aitbc exchange register' first.")
|
||||
return
|
||||
|
||||
with open(exchanges_file, 'r') as f:
|
||||
exchanges = json.load(f)
|
||||
|
||||
# Find the pair
|
||||
target_exchange = None
|
||||
target_pair = None
|
||||
|
||||
for exchange_name, exchange_data in exchanges.items():
|
||||
if exchange and exchange_name != exchange.lower():
|
||||
continue
|
||||
|
||||
for pair_config in exchange_data.get("trading_pairs", []):
|
||||
if pair_config["symbol"] == pair:
|
||||
target_exchange = exchange_name
|
||||
target_pair = pair_config
|
||||
break
|
||||
if target_pair:
|
||||
break
|
||||
|
||||
if not target_pair:
|
||||
error(f"Trading pair '{pair}' not found.")
|
||||
return
|
||||
|
||||
# Add liquidity
|
||||
if side == 'buy' or side == 'both':
|
||||
target_pair["quote_liquidity"] = target_pair.get("quote_liquidity", 0) + amount
|
||||
if side == 'sell' or side == 'both':
|
||||
target_pair["base_liquidity"] = target_pair.get("base_liquidity", 0) + amount
|
||||
|
||||
target_pair["liquidity_updated_at"] = datetime.utcnow().isoformat()
|
||||
|
||||
# Save exchanges
|
||||
with open(exchanges_file, 'w') as f:
|
||||
json.dump(exchanges, f, indent=2)
|
||||
|
||||
success(f"Added {amount} liquidity to {pair} on {target_exchange} ({side} side)")
|
||||
output({
|
||||
"pair": pair,
|
||||
"exchange": target_exchange,
|
||||
"amount": amount,
|
||||
"side": side,
|
||||
"base_liquidity": target_pair.get("base_liquidity"),
|
||||
"quote_liquidity": target_pair.get("quote_liquidity"),
|
||||
"updated_at": target_pair["liquidity_updated_at"]
|
||||
})
|
||||
|
||||
|
||||
@exchange.command()
|
||||
@click.pass_context
|
||||
def list(ctx):
|
||||
"""List all registered exchanges and trading pairs"""
|
||||
|
||||
# Load exchanges
|
||||
exchanges_file = Path.home() / ".aitbc" / "exchanges.json"
|
||||
if not exchanges_file.exists():
|
||||
warning("No exchanges registered.")
|
||||
return
|
||||
|
||||
with open(exchanges_file, 'r') as f:
|
||||
exchanges = json.load(f)
|
||||
|
||||
# Format output
|
||||
exchange_list = []
|
||||
for exchange_name, exchange_data in exchanges.items():
|
||||
exchange_info = {
|
||||
"name": exchange_data["name"],
|
||||
"status": exchange_data["status"],
|
||||
"sandbox": exchange_data.get("sandbox", False),
|
||||
"trading_pairs": len(exchange_data.get("trading_pairs", [])),
|
||||
"created_at": exchange_data["created_at"]
|
||||
}
|
||||
exchange_list.append(exchange_info)
|
||||
|
||||
output({
|
||||
"exchanges": exchange_list,
|
||||
"total_exchanges": len(exchange_list),
|
||||
"total_pairs": sum(ex["trading_pairs"] for ex in exchange_list)
|
||||
})
|
||||
|
||||
|
||||
@exchange.command()
|
||||
@click.argument("exchange_name")
|
||||
@click.pass_context
|
||||
def status(ctx, exchange_name: str):
|
||||
"""Get detailed status of a specific exchange"""
|
||||
|
||||
# Load exchanges
|
||||
exchanges_file = Path.home() / ".aitbc" / "exchanges.json"
|
||||
if not exchanges_file.exists():
|
||||
error("No exchanges registered.")
|
||||
return
|
||||
|
||||
with open(exchanges_file, 'r') as f:
|
||||
exchanges = json.load(f)
|
||||
|
||||
if exchange_name.lower() not in exchanges:
|
||||
error(f"Exchange '{exchange_name}' not found.")
|
||||
return
|
||||
|
||||
exchange_data = exchanges[exchange_name.lower()]
|
||||
|
||||
output({
|
||||
"exchange": exchange_data["name"],
|
||||
"status": exchange_data["status"],
|
||||
"sandbox": exchange_data.get("sandbox", False),
|
||||
"description": exchange_data.get("description"),
|
||||
"created_at": exchange_data["created_at"],
|
||||
"trading_pairs": exchange_data.get("trading_pairs", []),
|
||||
"last_sync": exchange_data.get("last_sync")
|
||||
})
|
||||
config = ctx.obj['config']
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.get(
|
||||
f"{config.coordinator_url}/v1/exchange/rates",
|
||||
timeout=10
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
rates_data = response.json()
|
||||
success("Current exchange rates:")
|
||||
output(rates_data, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to get exchange rates: {response.status_code}")
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
|
||||
|
||||
@exchange.command()
|
||||
@click.option("--aitbc-amount", type=float, help="Amount of AITBC to buy")
|
||||
@click.option("--btc-amount", type=float, help="Amount of BTC to spend")
|
||||
@click.option("--user-id", help="User ID for the payment")
|
||||
@click.option("--notes", help="Additional notes for the payment")
|
||||
@click.pass_context
|
||||
def create_payment(ctx, aitbc_amount: Optional[float], btc_amount: Optional[float],
|
||||
user_id: Optional[str], notes: Optional[str]):
|
||||
"""Create a Bitcoin payment request for AITBC purchase"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
# Validate input
|
||||
if aitbc_amount is not None and aitbc_amount <= 0:
|
||||
error("AITBC amount must be greater than 0")
|
||||
return
|
||||
|
||||
if btc_amount is not None and btc_amount <= 0:
|
||||
error("BTC amount must be greater than 0")
|
||||
return
|
||||
|
||||
if not aitbc_amount and not btc_amount:
|
||||
error("Either --aitbc-amount or --btc-amount must be specified")
|
||||
return
|
||||
|
||||
# Get exchange rates to calculate missing amount
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
rates_response = client.get(
|
||||
f"{config.coordinator_url}/v1/exchange/rates",
|
||||
timeout=10
|
||||
)
|
||||
|
||||
if rates_response.status_code != 200:
|
||||
error("Failed to get exchange rates")
|
||||
return
|
||||
|
||||
rates = rates_response.json()
|
||||
btc_to_aitbc = rates.get('btc_to_aitbc', 100000)
|
||||
|
||||
# Calculate missing amount
|
||||
if aitbc_amount and not btc_amount:
|
||||
btc_amount = aitbc_amount / btc_to_aitbc
|
||||
elif btc_amount and not aitbc_amount:
|
||||
aitbc_amount = btc_amount * btc_to_aitbc
|
||||
|
||||
# Prepare payment request
|
||||
payment_data = {
|
||||
"user_id": user_id or "cli_user",
|
||||
"aitbc_amount": aitbc_amount,
|
||||
"btc_amount": btc_amount
|
||||
}
|
||||
|
||||
if notes:
|
||||
payment_data["notes"] = notes
|
||||
|
||||
# Create payment
|
||||
response = client.post(
|
||||
f"{config.coordinator_url}/v1/exchange/create-payment",
|
||||
json=payment_data,
|
||||
timeout=10
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
payment = response.json()
|
||||
success(f"Payment created: {payment.get('payment_id')}")
|
||||
success(f"Send {btc_amount:.8f} BTC to: {payment.get('payment_address')}")
|
||||
success(f"Expires at: {payment.get('expires_at')}")
|
||||
output(payment, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to create payment: {response.status_code}")
|
||||
if response.text:
|
||||
error(f"Error details: {response.text}")
|
||||
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
|
||||
|
||||
@exchange.command()
|
||||
@click.option("--payment-id", required=True, help="Payment ID to check")
|
||||
@click.pass_context
|
||||
def payment_status(ctx, payment_id: str):
|
||||
"""Check payment confirmation status"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.get(
|
||||
f"{config.coordinator_url}/v1/exchange/payment-status/{payment_id}",
|
||||
timeout=10
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
status_data = response.json()
|
||||
status = status_data.get('status', 'unknown')
|
||||
|
||||
if status == 'confirmed':
|
||||
success(f"Payment {payment_id} is confirmed!")
|
||||
success(f"AITBC amount: {status_data.get('aitbc_amount', 0)}")
|
||||
elif status == 'pending':
|
||||
success(f"Payment {payment_id} is pending confirmation")
|
||||
elif status == 'expired':
|
||||
error(f"Payment {payment_id} has expired")
|
||||
else:
|
||||
success(f"Payment {payment_id} status: {status}")
|
||||
|
||||
output(status_data, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to get payment status: {response.status_code}")
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
|
||||
|
||||
@exchange.command()
|
||||
@click.pass_context
|
||||
def market_stats(ctx):
|
||||
"""Get exchange market statistics"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.get(
|
||||
f"{config.coordinator_url}/v1/exchange/market-stats",
|
||||
timeout=10
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
stats = response.json()
|
||||
success("Exchange market statistics:")
|
||||
output(stats, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to get market stats: {response.status_code}")
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
|
||||
|
||||
@exchange.group()
|
||||
def wallet():
|
||||
"""Bitcoin wallet operations"""
|
||||
pass
|
||||
|
||||
|
||||
@wallet.command()
|
||||
@click.pass_context
|
||||
def balance(ctx):
|
||||
"""Get Bitcoin wallet balance"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.get(
|
||||
f"{config.coordinator_url}/exchange/wallet/balance",
|
||||
timeout=10
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
balance_data = response.json()
|
||||
success("Bitcoin wallet balance:")
|
||||
output(balance_data, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to get wallet balance: {response.status_code}")
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
|
||||
|
||||
@wallet.command()
|
||||
@click.pass_context
|
||||
def info(ctx):
|
||||
"""Get comprehensive Bitcoin wallet information"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.get(
|
||||
f"{config.coordinator_url}/exchange/wallet/info",
|
||||
timeout=10
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
wallet_info = response.json()
|
||||
success("Bitcoin wallet information:")
|
||||
output(wallet_info, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to get wallet info: {response.status_code}")
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
|
||||
|
||||
@exchange.command()
|
||||
@click.option("--name", required=True, help="Exchange name (e.g., Binance, Coinbase)")
|
||||
@click.option("--api-key", required=True, help="API key for exchange integration")
|
||||
@click.option("--api-secret", help="API secret for exchange integration")
|
||||
@click.option("--sandbox", is_flag=True, default=False, help="Use sandbox/testnet environment")
|
||||
@click.pass_context
|
||||
def register(ctx, name: str, api_key: str, api_secret: Optional[str], sandbox: bool):
|
||||
"""Register a new exchange integration"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
exchange_data = {
|
||||
"name": name,
|
||||
"api_key": api_key,
|
||||
"sandbox": sandbox
|
||||
}
|
||||
|
||||
if api_secret:
|
||||
exchange_data["api_secret"] = api_secret
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.post(
|
||||
f"{config.coordinator_url}/v1/exchange/register",
|
||||
json=exchange_data,
|
||||
timeout=10
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
result = response.json()
|
||||
success(f"Exchange '{name}' registered successfully!")
|
||||
success(f"Exchange ID: {result.get('exchange_id')}")
|
||||
output(result, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to register exchange: {response.status_code}")
|
||||
if response.text:
|
||||
error(f"Error details: {response.text}")
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
|
||||
|
||||
@exchange.command()
|
||||
@click.option("--pair", required=True, help="Trading pair (e.g., AITBC/BTC, AITBC/ETH)")
|
||||
@click.option("--base-asset", required=True, help="Base asset symbol")
|
||||
@click.option("--quote-asset", required=True, help="Quote asset symbol")
|
||||
@click.option("--min-order-size", type=float, help="Minimum order size")
|
||||
@click.option("--max-order-size", type=float, help="Maximum order size")
|
||||
@click.option("--price-precision", type=int, default=8, help="Price decimal precision")
|
||||
@click.option("--size-precision", type=int, default=8, help="Size decimal precision")
|
||||
@click.pass_context
|
||||
def create_pair(ctx, pair: str, base_asset: str, quote_asset: str,
|
||||
min_order_size: Optional[float], max_order_size: Optional[float],
|
||||
price_precision: int, size_precision: int):
|
||||
"""Create a new trading pair"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
pair_data = {
|
||||
"pair": pair,
|
||||
"base_asset": base_asset,
|
||||
"quote_asset": quote_asset,
|
||||
"price_precision": price_precision,
|
||||
"size_precision": size_precision
|
||||
}
|
||||
|
||||
if min_order_size is not None:
|
||||
pair_data["min_order_size"] = min_order_size
|
||||
if max_order_size is not None:
|
||||
pair_data["max_order_size"] = max_order_size
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.post(
|
||||
f"{config.coordinator_url}/v1/exchange/create-pair",
|
||||
json=pair_data,
|
||||
timeout=10
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
result = response.json()
|
||||
success(f"Trading pair '{pair}' created successfully!")
|
||||
success(f"Pair ID: {result.get('pair_id')}")
|
||||
output(result, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to create trading pair: {response.status_code}")
|
||||
if response.text:
|
||||
error(f"Error details: {response.text}")
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
|
||||
|
||||
@exchange.command()
|
||||
@click.option("--pair", required=True, help="Trading pair to start trading")
|
||||
@click.option("--exchange", help="Specific exchange to enable")
|
||||
@click.option("--order-type", multiple=True, default=["limit", "market"],
|
||||
help="Order types to enable (limit, market, stop_limit)")
|
||||
@click.pass_context
|
||||
def start_trading(ctx, pair: str, exchange: Optional[str], order_type: tuple):
|
||||
"""Start trading for a specific pair"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
trading_data = {
|
||||
"pair": pair,
|
||||
"order_types": list(order_type)
|
||||
}
|
||||
|
||||
if exchange:
|
||||
trading_data["exchange"] = exchange
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.post(
|
||||
f"{config.coordinator_url}/v1/exchange/start-trading",
|
||||
json=trading_data,
|
||||
timeout=10
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
result = response.json()
|
||||
success(f"Trading started for pair '{pair}'!")
|
||||
success(f"Order types: {', '.join(order_type)}")
|
||||
output(result, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to start trading: {response.status_code}")
|
||||
if response.text:
|
||||
error(f"Error details: {response.text}")
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
|
||||
|
||||
@exchange.command()
|
||||
@click.option("--pair", help="Filter by trading pair")
|
||||
@click.option("--exchange", help="Filter by exchange")
|
||||
@click.option("--status", help="Filter by status (active, inactive, suspended)")
|
||||
@click.pass_context
|
||||
def list_pairs(ctx, pair: Optional[str], exchange: Optional[str], status: Optional[str]):
|
||||
"""List all trading pairs"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
params = {}
|
||||
if pair:
|
||||
params["pair"] = pair
|
||||
if exchange:
|
||||
params["exchange"] = exchange
|
||||
if status:
|
||||
params["status"] = status
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.get(
|
||||
f"{config.coordinator_url}/v1/exchange/pairs",
|
||||
params=params,
|
||||
timeout=10
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
pairs = response.json()
|
||||
success("Trading pairs:")
|
||||
output(pairs, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to list trading pairs: {response.status_code}")
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
|
||||
|
||||
@exchange.command()
|
||||
@click.option("--exchange", required=True, help="Exchange name (binance, coinbasepro, kraken)")
|
||||
@click.option("--api-key", required=True, help="API key for exchange")
|
||||
@click.option("--secret", required=True, help="API secret for exchange")
|
||||
@click.option("--sandbox", is_flag=True, default=True, help="Use sandbox/testnet environment")
|
||||
@click.option("--passphrase", help="API passphrase (for Coinbase)")
|
||||
@click.pass_context
|
||||
def connect(ctx, exchange: str, api_key: str, secret: str, sandbox: bool, passphrase: Optional[str]):
|
||||
"""Connect to a real exchange API"""
|
||||
try:
|
||||
# Import the real exchange integration
|
||||
import sys
|
||||
sys.path.append('/home/oib/windsurf/aitbc/apps/exchange')
|
||||
from real_exchange_integration import connect_to_exchange
|
||||
|
||||
# Run async connection
|
||||
import asyncio
|
||||
success = asyncio.run(connect_to_exchange(exchange, api_key, secret, sandbox, passphrase))
|
||||
|
||||
if success:
|
||||
success(f"✅ Successfully connected to {exchange}")
|
||||
if sandbox:
|
||||
success("🧪 Using sandbox/testnet environment")
|
||||
else:
|
||||
error(f"❌ Failed to connect to {exchange}")
|
||||
|
||||
except ImportError:
|
||||
error("❌ Real exchange integration not available. Install ccxt library.")
|
||||
except Exception as e:
|
||||
error(f"❌ Connection error: {e}")
|
||||
|
||||
|
||||
@exchange.command()
|
||||
@click.option("--exchange", help="Check specific exchange (default: all)")
|
||||
@click.pass_context
|
||||
def status(ctx, exchange: Optional[str]):
|
||||
"""Check exchange connection status"""
|
||||
try:
|
||||
# Import the real exchange integration
|
||||
import sys
|
||||
sys.path.append('/home/oib/windsurf/aitbc/apps/exchange')
|
||||
from real_exchange_integration import get_exchange_status
|
||||
|
||||
# Run async status check
|
||||
import asyncio
|
||||
status_data = asyncio.run(get_exchange_status(exchange))
|
||||
|
||||
# Display status
|
||||
for exchange_name, health in status_data.items():
|
||||
status_icon = "🟢" if health.status.value == "connected" else "🔴" if health.status.value == "error" else "🟡"
|
||||
|
||||
success(f"{status_icon} {exchange_name.upper()}")
|
||||
success(f" Status: {health.status.value}")
|
||||
success(f" Latency: {health.latency_ms:.2f}ms")
|
||||
success(f" Last Check: {health.last_check.strftime('%H:%M:%S')}")
|
||||
|
||||
if health.error_message:
|
||||
error(f" Error: {health.error_message}")
|
||||
print()
|
||||
|
||||
except ImportError:
|
||||
error("❌ Real exchange integration not available. Install ccxt library.")
|
||||
except Exception as e:
|
||||
error(f"❌ Status check error: {e}")
|
||||
|
||||
|
||||
@exchange.command()
|
||||
@click.option("--exchange", required=True, help="Exchange name to disconnect")
|
||||
@click.pass_context
|
||||
def disconnect(ctx, exchange: str):
|
||||
"""Disconnect from an exchange"""
|
||||
try:
|
||||
# Import the real exchange integration
|
||||
import sys
|
||||
sys.path.append('/home/oib/windsurf/aitbc/apps/exchange')
|
||||
from real_exchange_integration import disconnect_from_exchange
|
||||
|
||||
# Run async disconnection
|
||||
import asyncio
|
||||
success = asyncio.run(disconnect_from_exchange(exchange))
|
||||
|
||||
if success:
|
||||
success(f"🔌 Disconnected from {exchange}")
|
||||
else:
|
||||
error(f"❌ Failed to disconnect from {exchange}")
|
||||
|
||||
except ImportError:
|
||||
error("❌ Real exchange integration not available. Install ccxt library.")
|
||||
except Exception as e:
|
||||
error(f"❌ Disconnection error: {e}")
|
||||
|
||||
|
||||
@exchange.command()
|
||||
@click.option("--exchange", required=True, help="Exchange name")
|
||||
@click.option("--symbol", required=True, help="Trading symbol (e.g., BTC/USDT)")
|
||||
@click.option("--limit", type=int, default=20, help="Order book depth")
|
||||
@click.pass_context
|
||||
def orderbook(ctx, exchange: str, symbol: str, limit: int):
|
||||
"""Get order book from exchange"""
|
||||
try:
|
||||
# Import the real exchange integration
|
||||
import sys
|
||||
sys.path.append('/home/oib/windsurf/aitbc/apps/exchange')
|
||||
from real_exchange_integration import exchange_manager
|
||||
|
||||
# Run async order book fetch
|
||||
import asyncio
|
||||
orderbook = asyncio.run(exchange_manager.get_order_book(exchange, symbol, limit))
|
||||
|
||||
# Display order book
|
||||
success(f"📊 Order Book for {symbol} on {exchange.upper()}")
|
||||
|
||||
# Display bids (buy orders)
|
||||
if 'bids' in orderbook and orderbook['bids']:
|
||||
success("\n🟢 Bids (Buy Orders):")
|
||||
for i, bid in enumerate(orderbook['bids'][:10]):
|
||||
price, amount = bid
|
||||
success(f" {i+1}. ${price:.8f} x {amount:.6f}")
|
||||
|
||||
# Display asks (sell orders)
|
||||
if 'asks' in orderbook and orderbook['asks']:
|
||||
success("\n🔴 Asks (Sell Orders):")
|
||||
for i, ask in enumerate(orderbook['asks'][:10]):
|
||||
price, amount = ask
|
||||
success(f" {i+1}. ${price:.8f} x {amount:.6f}")
|
||||
|
||||
# Spread
|
||||
if 'bids' in orderbook and 'asks' in orderbook and orderbook['bids'] and orderbook['asks']:
|
||||
best_bid = orderbook['bids'][0][0]
|
||||
best_ask = orderbook['asks'][0][0]
|
||||
spread = best_ask - best_bid
|
||||
spread_pct = (spread / best_bid) * 100
|
||||
|
||||
success(f"\n📈 Spread: ${spread:.8f} ({spread_pct:.4f}%)")
|
||||
success(f"🎯 Best Bid: ${best_bid:.8f}")
|
||||
success(f"🎯 Best Ask: ${best_ask:.8f}")
|
||||
|
||||
except ImportError:
|
||||
error("❌ Real exchange integration not available. Install ccxt library.")
|
||||
except Exception as e:
|
||||
error(f"❌ Order book error: {e}")
|
||||
|
||||
|
||||
@exchange.command()
|
||||
@click.option("--exchange", required=True, help="Exchange name")
|
||||
@click.pass_context
|
||||
def balance(ctx, exchange: str):
|
||||
"""Get account balance from exchange"""
|
||||
try:
|
||||
# Import the real exchange integration
|
||||
import sys
|
||||
sys.path.append('/home/oib/windsurf/aitbc/apps/exchange')
|
||||
from real_exchange_integration import exchange_manager
|
||||
|
||||
# Run async balance fetch
|
||||
import asyncio
|
||||
balance_data = asyncio.run(exchange_manager.get_balance(exchange))
|
||||
|
||||
# Display balance
|
||||
success(f"💰 Account Balance on {exchange.upper()}")
|
||||
|
||||
if 'total' in balance_data:
|
||||
for asset, amount in balance_data['total'].items():
|
||||
if amount > 0:
|
||||
available = balance_data.get('free', {}).get(asset, 0)
|
||||
used = balance_data.get('used', {}).get(asset, 0)
|
||||
|
||||
success(f"\n{asset}:")
|
||||
success(f" Total: {amount:.8f}")
|
||||
success(f" Available: {available:.8f}")
|
||||
success(f" In Orders: {used:.8f}")
|
||||
else:
|
||||
warning("No balance data available")
|
||||
|
||||
except ImportError:
|
||||
error("❌ Real exchange integration not available. Install ccxt library.")
|
||||
except Exception as e:
|
||||
error(f"❌ Balance error: {e}")
|
||||
|
||||
|
||||
@exchange.command()
|
||||
@click.option("--exchange", required=True, help="Exchange name")
|
||||
@click.pass_context
|
||||
def pairs(ctx, exchange: str):
|
||||
"""List supported trading pairs"""
|
||||
try:
|
||||
# Import the real exchange integration
|
||||
import sys
|
||||
sys.path.append('/home/oib/windsurf/aitbc/apps/exchange')
|
||||
from real_exchange_integration import exchange_manager
|
||||
|
||||
# Run async pairs fetch
|
||||
import asyncio
|
||||
pairs = asyncio.run(exchange_manager.get_supported_pairs(exchange))
|
||||
|
||||
# Display pairs
|
||||
success(f"📋 Supported Trading Pairs on {exchange.upper()}")
|
||||
success(f"Found {len(pairs)} trading pairs:\n")
|
||||
|
||||
# Group by base currency
|
||||
base_currencies = {}
|
||||
for pair in pairs:
|
||||
base = pair.split('/')[0] if '/' in pair else pair.split('-')[0]
|
||||
if base not in base_currencies:
|
||||
base_currencies[base] = []
|
||||
base_currencies[base].append(pair)
|
||||
|
||||
# Display organized pairs
|
||||
for base in sorted(base_currencies.keys()):
|
||||
success(f"\n🔹 {base}:")
|
||||
for pair in sorted(base_currencies[base][:10]): # Show first 10 per base
|
||||
success(f" • {pair}")
|
||||
|
||||
if len(base_currencies[base]) > 10:
|
||||
success(f" ... and {len(base_currencies[base]) - 10} more")
|
||||
|
||||
except ImportError:
|
||||
error("❌ Real exchange integration not available. Install ccxt library.")
|
||||
except Exception as e:
|
||||
error(f"❌ Pairs error: {e}")
|
||||
|
||||
|
||||
@exchange.command()
|
||||
@click.pass_context
|
||||
def list_exchanges(ctx):
|
||||
"""List all supported exchanges"""
|
||||
try:
|
||||
# Import the real exchange integration
|
||||
import sys
|
||||
sys.path.append('/home/oib/windsurf/aitbc/apps/exchange')
|
||||
from real_exchange_integration import exchange_manager
|
||||
|
||||
success("🏢 Supported Exchanges:")
|
||||
for exchange in exchange_manager.supported_exchanges:
|
||||
success(f" • {exchange.title()}")
|
||||
|
||||
success("\n📝 Usage:")
|
||||
success(" aitbc exchange connect --exchange binance --api-key <key> --secret <secret>")
|
||||
success(" aitbc exchange status --exchange binance")
|
||||
success(" aitbc exchange orderbook --exchange binance --symbol BTC/USDT")
|
||||
|
||||
except ImportError:
|
||||
error("❌ Real exchange integration not available. Install ccxt library.")
|
||||
except Exception as e:
|
||||
error(f"❌ Error: {e}")
|
||||
341
cli/commands/explorer.py
Executable file
341
cli/commands/explorer.py
Executable file
@@ -0,0 +1,341 @@
|
||||
"""Explorer commands for AITBC CLI"""
|
||||
|
||||
import click
|
||||
import subprocess
|
||||
import json
|
||||
from typing import Optional, List
|
||||
from utils import output, error
|
||||
|
||||
|
||||
def _get_explorer_endpoint(ctx):
|
||||
"""Get explorer endpoint from config or default"""
|
||||
try:
|
||||
config = ctx.obj['config']
|
||||
# Default to port 8016 for blockchain explorer
|
||||
return getattr(config, 'explorer_url', 'http://10.1.223.1:8016')
|
||||
except:
|
||||
return "http://10.1.223.1:8016"
|
||||
|
||||
|
||||
def _curl_request(url: str, params: dict = None):
|
||||
"""Make curl request instead of httpx to avoid connection issues"""
|
||||
cmd = ['curl', '-s', url]
|
||||
|
||||
if params:
|
||||
param_str = '&'.join([f"{k}={v}" for k, v in params.items()])
|
||||
cmd.append(f"{url}?{param_str}")
|
||||
else:
|
||||
cmd.append(url)
|
||||
|
||||
try:
|
||||
result = subprocess.run(cmd, capture_output=True, text=True, timeout=10)
|
||||
if result.returncode == 0:
|
||||
return result.stdout
|
||||
else:
|
||||
return None
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
|
||||
@click.group()
|
||||
@click.pass_context
|
||||
def explorer(ctx):
|
||||
"""Blockchain explorer operations and queries"""
|
||||
ctx.ensure_object(dict)
|
||||
ctx.parent.detected_role = 'explorer'
|
||||
|
||||
|
||||
@explorer.command()
|
||||
@click.option('--chain-id', default='ait-devnet', help='Chain ID to query (default: ait-devnet)')
|
||||
@click.pass_context
|
||||
def status(ctx, chain_id: str):
|
||||
"""Get explorer and chain status"""
|
||||
try:
|
||||
explorer_url = _get_explorer_endpoint(ctx)
|
||||
|
||||
# Get explorer health
|
||||
response_text = _curl_request(f"{explorer_url}/health")
|
||||
if response_text:
|
||||
try:
|
||||
health = json.loads(response_text)
|
||||
output({
|
||||
"explorer_status": health.get("status", "unknown"),
|
||||
"node_status": health.get("node_status", "unknown"),
|
||||
"version": health.get("version", "unknown"),
|
||||
"features": health.get("features", [])
|
||||
}, ctx.obj['output_format'])
|
||||
except json.JSONDecodeError:
|
||||
error("Invalid response from explorer")
|
||||
else:
|
||||
error("Failed to connect to explorer")
|
||||
|
||||
except Exception as e:
|
||||
error(f"Failed to get explorer status: {str(e)}")
|
||||
|
||||
|
||||
@explorer.command()
|
||||
@click.option('--chain-id', default='ait-devnet', help='Chain ID to query (default: ait-devnet)')
|
||||
@click.pass_context
|
||||
def chains(ctx, chain_id: str):
|
||||
"""List all supported chains"""
|
||||
try:
|
||||
explorer_url = _get_explorer_endpoint(ctx)
|
||||
|
||||
response_text = _curl_request(f"{explorer_url}/api/chains")
|
||||
if response_text:
|
||||
try:
|
||||
chains_data = json.loads(response_text)
|
||||
output(chains_data, ctx.obj['output_format'])
|
||||
except json.JSONDecodeError:
|
||||
error("Invalid response from explorer")
|
||||
else:
|
||||
error("Failed to connect to explorer")
|
||||
|
||||
except Exception as e:
|
||||
error(f"Failed to list chains: {str(e)}")
|
||||
|
||||
|
||||
@explorer.command()
|
||||
@click.option('--chain-id', default='ait-devnet', help='Chain ID to query (default: ait-devnet)')
|
||||
@click.pass_context
|
||||
def head(ctx, chain_id: str):
|
||||
"""Get current chain head information"""
|
||||
try:
|
||||
explorer_url = _get_explorer_endpoint(ctx)
|
||||
|
||||
params = {"chain_id": chain_id}
|
||||
response_text = _curl_request(f"{explorer_url}/api/chain/head", params)
|
||||
if response_text:
|
||||
try:
|
||||
head_data = json.loads(response_text)
|
||||
output(head_data, ctx.obj['output_format'])
|
||||
except json.JSONDecodeError:
|
||||
error("Invalid response from explorer")
|
||||
else:
|
||||
error("Failed to connect to explorer")
|
||||
|
||||
except Exception as e:
|
||||
error(f"Failed to get chain head: {str(e)}")
|
||||
|
||||
|
||||
@explorer.command()
|
||||
@click.argument('height', type=int)
|
||||
@click.option('--chain-id', default='ait-devnet', help='Chain ID to query (default: ait-devnet)')
|
||||
@click.pass_context
|
||||
def block(ctx, height: int, chain_id: str):
|
||||
"""Get block information by height"""
|
||||
try:
|
||||
explorer_url = _get_explorer_endpoint(ctx)
|
||||
|
||||
params = {"chain_id": chain_id}
|
||||
response_text = _curl_request(f"{explorer_url}/api/blocks/{height}", params)
|
||||
if response_text:
|
||||
try:
|
||||
block_data = json.loads(response_text)
|
||||
output(block_data, ctx.obj['output_format'])
|
||||
except json.JSONDecodeError:
|
||||
error("Invalid response from explorer")
|
||||
else:
|
||||
error("Failed to connect to explorer")
|
||||
|
||||
except Exception as e:
|
||||
error(f"Failed to get block {height}: {str(e)}")
|
||||
|
||||
|
||||
@explorer.command()
|
||||
@click.argument('tx_hash')
|
||||
@click.option('--chain-id', default='ait-devnet', help='Chain ID to query (default: ait-devnet)')
|
||||
@click.pass_context
|
||||
def transaction(ctx, tx_hash: str, chain_id: str):
|
||||
"""Get transaction information by hash"""
|
||||
try:
|
||||
explorer_url = _get_explorer_endpoint(ctx)
|
||||
|
||||
params = {"chain_id": chain_id}
|
||||
response_text = _curl_request(f"{explorer_url}/api/transactions/{tx_hash}", params)
|
||||
if response_text:
|
||||
try:
|
||||
tx_data = json.loads(response_text)
|
||||
output(tx_data, ctx.obj['output_format'])
|
||||
except json.JSONDecodeError:
|
||||
error("Invalid response from explorer")
|
||||
else:
|
||||
error("Failed to connect to explorer")
|
||||
|
||||
except Exception as e:
|
||||
error(f"Failed to get transaction {tx_hash}: {str(e)}")
|
||||
|
||||
|
||||
@explorer.command()
|
||||
@click.option('--address', help='Filter by address')
|
||||
@click.option('--amount-min', type=float, help='Minimum amount')
|
||||
@click.option('--amount-max', type=float, help='Maximum amount')
|
||||
@click.option('--type', 'tx_type', help='Transaction type')
|
||||
@click.option('--since', help='Start date (ISO format)')
|
||||
@click.option('--until', help='End date (ISO format)')
|
||||
@click.option('--limit', type=int, default=50, help='Number of results (default: 50)')
|
||||
@click.option('--offset', type=int, default=0, help='Offset for pagination (default: 0)')
|
||||
@click.option('--chain-id', default='ait-devnet', help='Chain ID to query (default: ait-devnet)')
|
||||
@click.pass_context
|
||||
def search_transactions(ctx, address: Optional[str], amount_min: Optional[float],
|
||||
amount_max: Optional[float], tx_type: Optional[str],
|
||||
since: Optional[str], until: Optional[str],
|
||||
limit: int, offset: int, chain_id: str):
|
||||
"""Search transactions with filters"""
|
||||
try:
|
||||
explorer_url = _get_explorer_endpoint(ctx)
|
||||
|
||||
params = {
|
||||
"limit": limit,
|
||||
"offset": offset,
|
||||
"chain_id": chain_id
|
||||
}
|
||||
|
||||
if address:
|
||||
params["address"] = address
|
||||
if amount_min:
|
||||
params["amount_min"] = amount_min
|
||||
if amount_max:
|
||||
params["amount_max"] = amount_max
|
||||
if tx_type:
|
||||
params["tx_type"] = tx_type
|
||||
if since:
|
||||
params["since"] = since
|
||||
if until:
|
||||
params["until"] = until
|
||||
|
||||
response_text = _curl_request(f"{explorer_url}/api/search/transactions", params)
|
||||
if response_text:
|
||||
try:
|
||||
results = json.loads(response_text)
|
||||
output(results, ctx.obj['output_format'])
|
||||
except json.JSONDecodeError:
|
||||
error("Invalid response from explorer")
|
||||
else:
|
||||
error("Failed to connect to explorer")
|
||||
|
||||
except Exception as e:
|
||||
error(f"Failed to search transactions: {str(e)}")
|
||||
|
||||
|
||||
@explorer.command()
|
||||
@click.option('--validator', help='Filter by validator address')
|
||||
@click.option('--since', help='Start date (ISO format)')
|
||||
@click.option('--until', help='End date (ISO format)')
|
||||
@click.option('--min-tx', type=int, help='Minimum transaction count')
|
||||
@click.option('--limit', type=int, default=50, help='Number of results (default: 50)')
|
||||
@click.option('--offset', type=int, default=0, help='Offset for pagination (default: 0)')
|
||||
@click.option('--chain-id', default='ait-devnet', help='Chain ID to query (default: ait-devnet)')
|
||||
@click.pass_context
|
||||
def search_blocks(ctx, validator: Optional[str], since: Optional[str],
|
||||
until: Optional[str], min_tx: Optional[int],
|
||||
limit: int, offset: int, chain_id: str):
|
||||
"""Search blocks with filters"""
|
||||
try:
|
||||
explorer_url = _get_explorer_endpoint(ctx)
|
||||
|
||||
params = {
|
||||
"limit": limit,
|
||||
"offset": offset,
|
||||
"chain_id": chain_id
|
||||
}
|
||||
|
||||
if validator:
|
||||
params["validator"] = validator
|
||||
if since:
|
||||
params["since"] = since
|
||||
if until:
|
||||
params["until"] = until
|
||||
if min_tx:
|
||||
params["min_tx"] = min_tx
|
||||
|
||||
response_text = _curl_request(f"{explorer_url}/api/search/blocks", params)
|
||||
if response_text:
|
||||
try:
|
||||
results = json.loads(response_text)
|
||||
output(results, ctx.obj['output_format'])
|
||||
except json.JSONDecodeError:
|
||||
error("Invalid response from explorer")
|
||||
else:
|
||||
error("Failed to connect to explorer")
|
||||
|
||||
except Exception as e:
|
||||
error(f"Failed to search blocks: {str(e)}")
|
||||
|
||||
|
||||
@explorer.command()
|
||||
@click.option('--period', default='24h', help='Analytics period (1h, 24h, 7d, 30d)')
|
||||
@click.option('--chain-id', default='ait-devnet', help='Chain ID to query (default: ait-devnet)')
|
||||
@click.pass_context
|
||||
def analytics(ctx, period: str, chain_id: str):
|
||||
"""Get blockchain analytics overview"""
|
||||
try:
|
||||
explorer_url = _get_explorer_endpoint(ctx)
|
||||
|
||||
params = {
|
||||
"period": period,
|
||||
"chain_id": chain_id
|
||||
}
|
||||
|
||||
response_text = _curl_request(f"{explorer_url}/api/analytics/overview", params)
|
||||
if response_text:
|
||||
try:
|
||||
analytics_data = json.loads(response_text)
|
||||
output(analytics_data, ctx.obj['output_format'])
|
||||
except json.JSONDecodeError:
|
||||
error("Invalid response from explorer")
|
||||
else:
|
||||
error("Failed to connect to explorer")
|
||||
|
||||
except Exception as e:
|
||||
error(f"Failed to get analytics: {str(e)}")
|
||||
|
||||
|
||||
@explorer.command()
|
||||
@click.option('--format', 'export_format', type=click.Choice(['csv', 'json']), default='csv', help='Export format')
|
||||
@click.option('--type', 'export_type', type=click.Choice(['transactions', 'blocks']), default='transactions', help='Data type to export')
|
||||
@click.option('--chain-id', default='ait-devnet', help='Chain ID to query (default: ait-devnet)')
|
||||
@click.pass_context
|
||||
def export(ctx, export_format: str, export_type: str, chain_id: str):
|
||||
"""Export blockchain data"""
|
||||
try:
|
||||
explorer_url = _get_explorer_endpoint(ctx)
|
||||
|
||||
params = {
|
||||
"format": export_format,
|
||||
"type": export_type,
|
||||
"chain_id": chain_id
|
||||
}
|
||||
|
||||
if export_type == 'transactions':
|
||||
response_text = _curl_request(f"{explorer_url}/api/export/search", params)
|
||||
else:
|
||||
response_text = _curl_request(f"{explorer_url}/api/export/blocks", params)
|
||||
|
||||
if response_text:
|
||||
# Save to file
|
||||
filename = f"explorer_export_{export_type}_{chain_id}.{export_format}"
|
||||
with open(filename, 'w') as f:
|
||||
f.write(response_text)
|
||||
output(f"Data exported to {filename}", ctx.obj['output_format'])
|
||||
else:
|
||||
error("Failed to export data")
|
||||
|
||||
except Exception as e:
|
||||
error(f"Failed to export data: {str(e)}")
|
||||
|
||||
|
||||
@explorer.command()
|
||||
@click.option('--chain-id', default='main', help='Chain ID to explore')
|
||||
@click.pass_context
|
||||
def web(ctx, chain_id: str):
|
||||
"""Get blockchain explorer web URL"""
|
||||
try:
|
||||
explorer_url = _get_explorer_endpoint(ctx)
|
||||
web_url = explorer_url.replace('http://', 'http://') # Ensure proper format
|
||||
|
||||
output(f"Explorer web interface: {web_url}", ctx.obj['output_format'])
|
||||
output("Use the URL above to access the explorer in your browser", ctx.obj['output_format'])
|
||||
|
||||
except Exception as e:
|
||||
error(f"Failed to get explorer URL: {e}", ctx.obj['output_format'])
|
||||
475
cli/commands/genesis.py
Executable file
475
cli/commands/genesis.py
Executable file
@@ -0,0 +1,475 @@
|
||||
"""Genesis block generation commands for AITBC CLI"""
|
||||
|
||||
import click
|
||||
import json
|
||||
import yaml
|
||||
from pathlib import Path
|
||||
from datetime import datetime
|
||||
from core.genesis_generator import GenesisGenerator, GenesisValidationError
|
||||
from core.config import MultiChainConfig, load_multichain_config
|
||||
from models.chain import GenesisConfig
|
||||
from utils import output, error, success
|
||||
from commands.keystore import create_keystore_via_script
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
@click.group()
|
||||
def genesis():
|
||||
"""Genesis block generation and management commands"""
|
||||
pass
|
||||
|
||||
|
||||
@genesis.command()
|
||||
@click.option('--address', required=True, help='Wallet address (id) to create')
|
||||
@click.option('--password-file', default='/opt/aitbc/data/keystore/.password', show_default=True, type=click.Path(exists=True, dir_okay=False), help='Path to password file')
|
||||
@click.option('--output-dir', default='/opt/aitbc/data/keystore', show_default=True, help='Directory to write keystore file')
|
||||
@click.option('--force', is_flag=True, help='Overwrite existing keystore file if present')
|
||||
@click.pass_context
|
||||
def create_keystore(ctx, address, password_file, output_dir, force):
|
||||
"""Create an encrypted keystore for a genesis/treasury address."""
|
||||
try:
|
||||
create_keystore_via_script(address=address, password_file=password_file, output_dir=output_dir, force=force)
|
||||
success(f"Created keystore for {address} at {output_dir}")
|
||||
except Exception as e:
|
||||
error(f"Error creating keystore: {e}")
|
||||
raise click.Abort()
|
||||
|
||||
|
||||
@genesis.command(name="init-production")
|
||||
@click.option('--chain-id', default='ait-mainnet', show_default=True, help='Chain ID to initialize')
|
||||
@click.option('--genesis-file', default='data/genesis_prod.yaml', show_default=True, help='Path to genesis YAML (copy to /opt/aitbc/genesis_prod.yaml if needed)')
|
||||
@click.option('--db', default='/opt/aitbc/data/ait-mainnet/chain.db', show_default=True, help='SQLite DB path')
|
||||
@click.option('--force', is_flag=True, help='Overwrite existing DB (removes file if present)')
|
||||
@click.pass_context
|
||||
def init_production(ctx, chain_id, genesis_file, db, force):
|
||||
"""Initialize production chain DB using genesis allocations."""
|
||||
db_path = Path(db)
|
||||
if db_path.exists() and force:
|
||||
db_path.unlink()
|
||||
python_bin = Path(__file__).resolve().parents[3] / 'apps' / 'blockchain-node' / '.venv' / 'bin' / 'python3'
|
||||
cmd = [
|
||||
str(python_bin),
|
||||
str(Path(__file__).resolve().parents[3] / 'scripts' / 'init_production_genesis.py'),
|
||||
'--chain-id', chain_id,
|
||||
'--db', db,
|
||||
]
|
||||
try:
|
||||
subprocess.run(cmd, check=True)
|
||||
success(f"Initialized production genesis for {chain_id} at {db}")
|
||||
except subprocess.CalledProcessError as e:
|
||||
error(f"Genesis init failed: {e}")
|
||||
raise click.Abort()
|
||||
|
||||
@genesis.command()
|
||||
@click.argument('config_file', type=click.Path(exists=True))
|
||||
@click.option('--output', '-o', 'output_file', help='Output file path')
|
||||
@click.option('--template', help='Use predefined template')
|
||||
@click.option('--format', type=click.Choice(['json', 'yaml']), default='json', help='Output format')
|
||||
@click.pass_context
|
||||
def create(ctx, config_file, output_file, template, format):
|
||||
"""Create genesis block from configuration"""
|
||||
try:
|
||||
config = load_multichain_config()
|
||||
generator = GenesisGenerator(config)
|
||||
|
||||
if template:
|
||||
# Create from template
|
||||
genesis_block = generator.create_from_template(template, config_file)
|
||||
else:
|
||||
# Create from configuration file
|
||||
with open(config_file, 'r') as f:
|
||||
config_data = yaml.safe_load(f)
|
||||
|
||||
genesis_config = GenesisConfig(**config_data['genesis'])
|
||||
genesis_block = generator.create_genesis(genesis_config)
|
||||
|
||||
# Determine output file
|
||||
if output_file is None:
|
||||
chain_id = genesis_block.chain_id
|
||||
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
||||
output_file = f"genesis_{chain_id}_{timestamp}.{format}"
|
||||
|
||||
# Save genesis block
|
||||
output_path = Path(output_file)
|
||||
output_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
if format == 'yaml':
|
||||
with open(output_path, 'w') as f:
|
||||
yaml.dump(genesis_block.dict(), f, default_flow_style=False, indent=2)
|
||||
else:
|
||||
with open(output_path, 'w') as f:
|
||||
json.dump(genesis_block.dict(), f, indent=2)
|
||||
|
||||
success("Genesis block created successfully!")
|
||||
result = {
|
||||
"Chain ID": genesis_block.chain_id,
|
||||
"Chain Type": genesis_block.chain_type.value,
|
||||
"Purpose": genesis_block.purpose,
|
||||
"Name": genesis_block.name,
|
||||
"Genesis Hash": genesis_block.hash,
|
||||
"Output File": output_file,
|
||||
"Format": format
|
||||
}
|
||||
|
||||
output(result, ctx.obj.get('output_format', 'table'))
|
||||
|
||||
if genesis_block.privacy.visibility == "private":
|
||||
success("Private chain genesis created! Use access codes to invite participants.")
|
||||
|
||||
except GenesisValidationError as e:
|
||||
error(f"Genesis validation error: {str(e)}")
|
||||
raise click.Abort()
|
||||
except Exception as e:
|
||||
error(f"Error creating genesis block: {str(e)}")
|
||||
raise click.Abort()
|
||||
|
||||
@genesis.command()
|
||||
@click.argument('genesis_file', type=click.Path(exists=True))
|
||||
@click.pass_context
|
||||
def validate(ctx, genesis_file):
|
||||
"""Validate genesis block integrity"""
|
||||
try:
|
||||
config = load_multichain_config()
|
||||
generator = GenesisGenerator(config)
|
||||
|
||||
# Load genesis block
|
||||
genesis_path = Path(genesis_file)
|
||||
if genesis_path.suffix.lower() in ['.yaml', '.yml']:
|
||||
with open(genesis_path, 'r') as f:
|
||||
genesis_data = yaml.safe_load(f)
|
||||
else:
|
||||
with open(genesis_path, 'r') as f:
|
||||
genesis_data = json.load(f)
|
||||
|
||||
from models.chain import GenesisBlock
|
||||
genesis_block = GenesisBlock(**genesis_data)
|
||||
|
||||
# Validate genesis block
|
||||
validation_result = generator.validate_genesis(genesis_block)
|
||||
|
||||
if validation_result.is_valid:
|
||||
success("Genesis block is valid!")
|
||||
|
||||
# Show validation details
|
||||
checks_data = [
|
||||
{
|
||||
"Check": check,
|
||||
"Status": "✓ Pass" if passed else "✗ Fail"
|
||||
}
|
||||
for check, passed in validation_result.checks.items()
|
||||
]
|
||||
|
||||
output(checks_data, ctx.obj.get('output_format', 'table'), title="Validation Results")
|
||||
else:
|
||||
error("Genesis block validation failed!")
|
||||
|
||||
# Show errors
|
||||
errors_data = [
|
||||
{
|
||||
"Error": error_msg
|
||||
}
|
||||
for error_msg in validation_result.errors
|
||||
]
|
||||
|
||||
output(errors_data, ctx.obj.get('output_format', 'table'), title="Validation Errors")
|
||||
|
||||
# Show failed checks
|
||||
failed_checks = [
|
||||
{
|
||||
"Check": check,
|
||||
"Status": "✗ Fail"
|
||||
}
|
||||
for check, passed in validation_result.checks.items()
|
||||
if not passed
|
||||
]
|
||||
|
||||
if failed_checks:
|
||||
output(failed_checks, ctx.obj.get('output_format', 'table'), title="Failed Checks")
|
||||
|
||||
raise click.Abort()
|
||||
|
||||
except Exception as e:
|
||||
error(f"Error validating genesis block: {str(e)}")
|
||||
raise click.Abort()
|
||||
|
||||
@genesis.command()
|
||||
@click.argument('genesis_file', type=click.Path(exists=True))
|
||||
@click.pass_context
|
||||
def info(ctx, genesis_file):
|
||||
"""Show genesis block information"""
|
||||
try:
|
||||
config = load_multichain_config()
|
||||
generator = GenesisGenerator(config)
|
||||
|
||||
genesis_info = generator.get_genesis_info(genesis_file)
|
||||
|
||||
# Basic information
|
||||
basic_info = {
|
||||
"Chain ID": genesis_info["chain_id"],
|
||||
"Chain Type": genesis_info["chain_type"],
|
||||
"Purpose": genesis_info["purpose"],
|
||||
"Name": genesis_info["name"],
|
||||
"Description": genesis_info.get("description", "No description"),
|
||||
"Created": genesis_info["created"],
|
||||
"Genesis Hash": genesis_info["genesis_hash"],
|
||||
"State Root": genesis_info["state_root"]
|
||||
}
|
||||
|
||||
output(basic_info, ctx.obj.get('output_format', 'table'), title="Genesis Block Information")
|
||||
|
||||
# Configuration details
|
||||
config_info = {
|
||||
"Consensus Algorithm": genesis_info["consensus_algorithm"],
|
||||
"Block Time": f"{genesis_info['block_time']}s",
|
||||
"Gas Limit": f"{genesis_info['gas_limit']:,}",
|
||||
"Gas Price": f"{genesis_info['gas_price'] / 1e9:.1f} gwei",
|
||||
"Accounts Count": genesis_info["accounts_count"],
|
||||
"Contracts Count": genesis_info["contracts_count"]
|
||||
}
|
||||
|
||||
output(config_info, ctx.obj.get('output_format', 'table'), title="Configuration Details")
|
||||
|
||||
# Privacy settings
|
||||
privacy_info = {
|
||||
"Visibility": genesis_info["privacy_visibility"],
|
||||
"Access Control": genesis_info["access_control"]
|
||||
}
|
||||
|
||||
output(privacy_info, ctx.obj.get('output_format', 'table'), title="Privacy Settings")
|
||||
|
||||
# File information
|
||||
file_info = {
|
||||
"File Size": f"{genesis_info['file_size']:,} bytes",
|
||||
"File Format": genesis_info["file_format"]
|
||||
}
|
||||
|
||||
output(file_info, ctx.obj.get('output_format', 'table'), title="File Information")
|
||||
|
||||
except Exception as e:
|
||||
error(f"Error getting genesis info: {str(e)}")
|
||||
raise click.Abort()
|
||||
|
||||
@genesis.command()
|
||||
@click.argument('genesis_file', type=click.Path(exists=True))
|
||||
@click.pass_context
|
||||
def hash(ctx, genesis_file):
|
||||
"""Calculate genesis hash"""
|
||||
try:
|
||||
config = load_multichain_config()
|
||||
generator = GenesisGenerator(config)
|
||||
|
||||
genesis_hash = generator.calculate_genesis_hash(genesis_file)
|
||||
|
||||
result = {
|
||||
"Genesis File": genesis_file,
|
||||
"Genesis Hash": genesis_hash
|
||||
}
|
||||
|
||||
output(result, ctx.obj.get('output_format', 'table'))
|
||||
|
||||
except Exception as e:
|
||||
error(f"Error calculating genesis hash: {str(e)}")
|
||||
raise click.Abort()
|
||||
|
||||
@genesis.command()
|
||||
@click.option('--format', type=click.Choice(['table', 'json']), default='table', help='Output format')
|
||||
@click.pass_context
|
||||
def templates(ctx, format):
|
||||
"""List available genesis templates"""
|
||||
try:
|
||||
config = load_multichain_config()
|
||||
generator = GenesisGenerator(config)
|
||||
|
||||
templates = generator.list_templates()
|
||||
|
||||
if not templates:
|
||||
output("No templates found", ctx.obj.get('output_format', 'table'))
|
||||
return
|
||||
|
||||
if format == 'json':
|
||||
output(templates, ctx.obj.get('output_format', 'table'))
|
||||
else:
|
||||
templates_data = [
|
||||
{
|
||||
"Template": template_name,
|
||||
"Description": template_info["description"],
|
||||
"Chain Type": template_info["chain_type"],
|
||||
"Purpose": template_info["purpose"]
|
||||
}
|
||||
for template_name, template_info in templates.items()
|
||||
]
|
||||
|
||||
output(templates_data, ctx.obj.get('output_format', 'table'), title="Available Templates")
|
||||
|
||||
except Exception as e:
|
||||
error(f"Error listing templates: {str(e)}")
|
||||
raise click.Abort()
|
||||
|
||||
@genesis.command()
|
||||
@click.argument('template_name')
|
||||
@click.option('--output', '-o', help='Output file path')
|
||||
@click.pass_context
|
||||
def template_info(ctx, template_name, output):
|
||||
"""Show detailed information about a template"""
|
||||
try:
|
||||
config = load_multichain_config()
|
||||
generator = GenesisGenerator(config)
|
||||
|
||||
templates = generator.list_templates()
|
||||
|
||||
if template_name not in templates:
|
||||
error(f"Template {template_name} not found")
|
||||
raise click.Abort()
|
||||
|
||||
template_info = templates[template_name]
|
||||
|
||||
info_data = {
|
||||
"Template Name": template_name,
|
||||
"Description": template_info["description"],
|
||||
"Chain Type": template_info["chain_type"],
|
||||
"Purpose": template_info["purpose"],
|
||||
"File Path": template_info["file_path"]
|
||||
}
|
||||
|
||||
output(info_data, ctx.obj.get('output_format', 'table'), title=f"Template Information: {template_name}")
|
||||
|
||||
# Show template content if requested
|
||||
if output:
|
||||
template_path = Path(template_info["file_path"])
|
||||
if template_path.exists():
|
||||
with open(template_path, 'r') as f:
|
||||
template_content = f.read()
|
||||
|
||||
output_path = Path(output)
|
||||
output_path.write_text(template_content)
|
||||
success(f"Template content saved to {output}")
|
||||
|
||||
except Exception as e:
|
||||
error(f"Error getting template info: {str(e)}")
|
||||
raise click.Abort()
|
||||
|
||||
@genesis.command(name="init-production")
|
||||
@click.option('--chain-id', default='ait-mainnet', show_default=True, help='Chain ID to initialize')
|
||||
@click.option('--genesis-file', default='data/genesis_prod.yaml', show_default=True, help='Path to genesis YAML (copy to /opt/aitbc/genesis_prod.yaml if needed)')
|
||||
@click.option('--force', is_flag=True, help='Overwrite existing DB (removes file if present)')
|
||||
@click.pass_context
|
||||
def init_production(ctx, chain_id, genesis_file, force):
|
||||
"""Initialize production chain DB using genesis allocations."""
|
||||
db_path = Path("/opt/aitbc/data") / chain_id / "chain.db"
|
||||
if db_path.exists() and force:
|
||||
db_path.unlink()
|
||||
python_bin = Path(__file__).resolve().parents[3] / 'apps' / 'blockchain-node' / '.venv' / 'bin' / 'python3'
|
||||
cmd = [
|
||||
str(python_bin),
|
||||
str(Path(__file__).resolve().parents[3] / 'scripts' / 'init_production_genesis.py'),
|
||||
'--chain-id', chain_id,
|
||||
]
|
||||
try:
|
||||
subprocess.run(cmd, check=True)
|
||||
success(f"Initialized production genesis for {chain_id} at {db_path}")
|
||||
except subprocess.CalledProcessError as e:
|
||||
error(f"Genesis init failed: {e}")
|
||||
raise click.Abort()
|
||||
|
||||
@genesis.command()
|
||||
@click.argument('chain_id')
|
||||
@click.option('--format', type=click.Choice(['json', 'yaml']), default='json', help='Export format')
|
||||
@click.option('--output', '-o', help='Output file path')
|
||||
@click.pass_context
|
||||
def export(ctx, chain_id, format, output):
|
||||
"""Export genesis block for a chain"""
|
||||
try:
|
||||
config = load_multichain_config()
|
||||
generator = GenesisGenerator(config)
|
||||
|
||||
genesis_data = generator.export_genesis(chain_id, format)
|
||||
|
||||
if output:
|
||||
output_path = Path(output)
|
||||
output_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
if format == 'yaml':
|
||||
# Parse JSON and convert to YAML
|
||||
parsed_data = json.loads(genesis_data)
|
||||
with open(output_path, 'w') as f:
|
||||
yaml.dump(parsed_data, f, default_flow_style=False, indent=2)
|
||||
else:
|
||||
output_path.write_text(genesis_data)
|
||||
|
||||
success(f"Genesis block exported to {output}")
|
||||
else:
|
||||
# Print to stdout
|
||||
if format == 'yaml':
|
||||
parsed_data = json.loads(genesis_data)
|
||||
output(yaml.dump(parsed_data, default_flow_style=False, indent=2),
|
||||
ctx.obj.get('output_format', 'table'))
|
||||
else:
|
||||
output(genesis_data, ctx.obj.get('output_format', 'table'))
|
||||
|
||||
except Exception as e:
|
||||
error(f"Error exporting genesis block: {str(e)}")
|
||||
raise click.Abort()
|
||||
|
||||
@genesis.command()
|
||||
@click.argument('template_name')
|
||||
@click.argument('output_file')
|
||||
@click.option('--format', type=click.Choice(['json', 'yaml']), default='yaml', help='Output format')
|
||||
@click.pass_context
|
||||
def create_template(ctx, template_name, output_file, format):
|
||||
"""Create a new genesis template"""
|
||||
try:
|
||||
# Basic template structure
|
||||
template_data = {
|
||||
"description": f"Genesis template for {template_name}",
|
||||
"genesis": {
|
||||
"chain_type": "topic",
|
||||
"purpose": template_name,
|
||||
"name": f"{template_name.title()} Chain",
|
||||
"description": f"A {template_name} chain for AITBC",
|
||||
"consensus": {
|
||||
"algorithm": "pos",
|
||||
"block_time": 5,
|
||||
"max_validators": 100,
|
||||
"authorities": []
|
||||
},
|
||||
"privacy": {
|
||||
"visibility": "public",
|
||||
"access_control": "open",
|
||||
"require_invitation": False
|
||||
},
|
||||
"parameters": {
|
||||
"max_block_size": 1048576,
|
||||
"max_gas_per_block": 10000000,
|
||||
"min_gas_price": 1000000000,
|
||||
"block_reward": "2000000000000000000"
|
||||
},
|
||||
"accounts": [],
|
||||
"contracts": []
|
||||
}
|
||||
}
|
||||
|
||||
output_path = Path(output_file)
|
||||
output_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
if format == 'yaml':
|
||||
with open(output_path, 'w') as f:
|
||||
yaml.dump(template_data, f, default_flow_style=False, indent=2)
|
||||
else:
|
||||
with open(output_path, 'w') as f:
|
||||
json.dump(template_data, f, indent=2)
|
||||
|
||||
success(f"Template created: {output_file}")
|
||||
|
||||
result = {
|
||||
"Template Name": template_name,
|
||||
"Output File": output_file,
|
||||
"Format": format,
|
||||
"Chain Type": template_data["genesis"]["chain_type"],
|
||||
"Purpose": template_data["genesis"]["purpose"]
|
||||
}
|
||||
|
||||
output(result, ctx.obj.get('output_format', 'table'))
|
||||
|
||||
except Exception as e:
|
||||
error(f"Error creating template: {str(e)}")
|
||||
raise click.Abort()
|
||||
389
cli/commands/genesis_protection.py
Executable file
389
cli/commands/genesis_protection.py
Executable file
@@ -0,0 +1,389 @@
|
||||
"""Genesis protection and verification commands for AITBC CLI"""
|
||||
|
||||
import click
|
||||
import json
|
||||
import hashlib
|
||||
from pathlib import Path
|
||||
from typing import Optional, Dict, Any, List
|
||||
from datetime import datetime
|
||||
from utils import output, error, success, warning
|
||||
|
||||
|
||||
@click.group()
|
||||
def genesis_protection():
|
||||
"""Genesis block protection and verification commands"""
|
||||
pass
|
||||
|
||||
|
||||
@genesis_protection.command()
|
||||
@click.option("--chain", required=True, help="Chain ID to verify")
|
||||
@click.option("--genesis-hash", help="Expected genesis hash for verification")
|
||||
@click.option("--force", is_flag=True, help="Force verification even if hash mismatch")
|
||||
@click.pass_context
|
||||
def verify_genesis(ctx, chain: str, genesis_hash: Optional[str], force: bool):
|
||||
"""Verify genesis block integrity for a specific chain"""
|
||||
|
||||
# Load genesis data
|
||||
genesis_file = Path.home() / ".aitbc" / "genesis_data.json"
|
||||
if not genesis_file.exists():
|
||||
error("No genesis data found. Use blockchain commands to create genesis first.")
|
||||
return
|
||||
|
||||
with open(genesis_file, 'r') as f:
|
||||
genesis_data = json.load(f)
|
||||
|
||||
if chain not in genesis_data:
|
||||
error(f"Genesis data for chain '{chain}' not found.")
|
||||
return
|
||||
|
||||
chain_genesis = genesis_data[chain]
|
||||
|
||||
# Calculate current genesis hash
|
||||
genesis_string = json.dumps(chain_genesis, sort_keys=True, separators=(',', ':'))
|
||||
calculated_hash = hashlib.sha256(genesis_string.encode()).hexdigest()
|
||||
|
||||
# Verification results
|
||||
verification_result = {
|
||||
"chain": chain,
|
||||
"calculated_hash": calculated_hash,
|
||||
"expected_hash": genesis_hash,
|
||||
"hash_match": genesis_hash is None or calculated_hash == genesis_hash,
|
||||
"genesis_timestamp": chain_genesis.get("timestamp"),
|
||||
"genesis_accounts": len(chain_genesis.get("accounts", [])),
|
||||
"verification_timestamp": datetime.utcnow().isoformat()
|
||||
}
|
||||
|
||||
if not verification_result["hash_match"] and not force:
|
||||
error(f"Genesis hash mismatch for chain '{chain}'!")
|
||||
output(verification_result)
|
||||
return
|
||||
|
||||
# Additional integrity checks
|
||||
integrity_checks = {
|
||||
"accounts_valid": all("address" in acc and "balance" in acc for acc in chain_genesis.get("accounts", [])),
|
||||
"authorities_valid": all("address" in auth and "weight" in auth for auth in chain_genesis.get("authorities", [])),
|
||||
"params_valid": "mint_per_unit" in chain_genesis.get("params", {}),
|
||||
"timestamp_valid": isinstance(chain_genesis.get("timestamp"), (int, float))
|
||||
}
|
||||
|
||||
verification_result["integrity_checks"] = integrity_checks
|
||||
verification_result["overall_valid"] = verification_result["hash_match"] and all(integrity_checks.values())
|
||||
|
||||
if verification_result["overall_valid"]:
|
||||
success(f"Genesis verification passed for chain '{chain}'")
|
||||
else:
|
||||
warning(f"Genesis verification completed with issues for chain '{chain}'")
|
||||
|
||||
output(verification_result)
|
||||
|
||||
|
||||
@genesis_protection.command()
|
||||
@click.option("--chain", required=True, help="Chain ID to get hash for")
|
||||
@click.pass_context
|
||||
def genesis_hash(ctx, chain: str):
|
||||
"""Get and display genesis block hash for a specific chain"""
|
||||
|
||||
# Load genesis data
|
||||
genesis_file = Path.home() / ".aitbc" / "genesis_data.json"
|
||||
if not genesis_file.exists():
|
||||
error("No genesis data found.")
|
||||
return
|
||||
|
||||
with open(genesis_file, 'r') as f:
|
||||
genesis_data = json.load(f)
|
||||
|
||||
if chain not in genesis_data:
|
||||
error(f"Genesis data for chain '{chain}' not found.")
|
||||
return
|
||||
|
||||
chain_genesis = genesis_data[chain]
|
||||
|
||||
# Calculate genesis hash
|
||||
genesis_string = json.dumps(chain_genesis, sort_keys=True, separators=(',', ':'))
|
||||
calculated_hash = hashlib.sha256(genesis_string.encode()).hexdigest()
|
||||
|
||||
# Hash information
|
||||
hash_info = {
|
||||
"chain": chain,
|
||||
"genesis_hash": calculated_hash,
|
||||
"genesis_timestamp": chain_genesis.get("timestamp"),
|
||||
"genesis_size": len(genesis_string),
|
||||
"calculated_at": datetime.utcnow().isoformat(),
|
||||
"genesis_summary": {
|
||||
"accounts": len(chain_genesis.get("accounts", [])),
|
||||
"authorities": len(chain_genesis.get("authorities", [])),
|
||||
"total_supply": sum(acc.get("balance", 0) for acc in chain_genesis.get("accounts", [])),
|
||||
"mint_per_unit": chain_genesis.get("params", {}).get("mint_per_unit")
|
||||
}
|
||||
}
|
||||
|
||||
success(f"Genesis hash for chain '{chain}': {calculated_hash}")
|
||||
output(hash_info)
|
||||
|
||||
|
||||
@genesis_protection.command()
|
||||
@click.option("--signer", required=True, help="Signer address")
|
||||
@click.option("--message", help="Message to sign")
|
||||
@click.option("--chain", help="Chain context for signature")
|
||||
@click.option("--private-key", help="Private key for signing (for demo)")
|
||||
@click.pass_context
|
||||
def verify_signature(ctx, signer: str, message: Optional[str], chain: Optional[str], private_key: Optional[str]):
|
||||
"""Verify digital signature for genesis or transactions"""
|
||||
|
||||
if not message:
|
||||
message = f"Genesis verification for {chain or 'all chains'} at {datetime.utcnow().isoformat()}"
|
||||
|
||||
# Create signature (simplified for demo)
|
||||
signature_data = f"{signer}:{message}:{chain or 'global'}"
|
||||
signature = hashlib.sha256(signature_data.encode()).hexdigest()
|
||||
|
||||
# Verification result
|
||||
verification_result = {
|
||||
"signer": signer,
|
||||
"message": message,
|
||||
"chain": chain,
|
||||
"signature": signature,
|
||||
"verification_timestamp": datetime.utcnow().isoformat(),
|
||||
"signature_valid": True # In real implementation, this would verify against actual signature
|
||||
}
|
||||
|
||||
# Add chain context if provided
|
||||
if chain:
|
||||
genesis_file = Path.home() / ".aitbc" / "genesis_data.json"
|
||||
if genesis_file.exists():
|
||||
with open(genesis_file, 'r') as f:
|
||||
genesis_data = json.load(f)
|
||||
|
||||
if chain in genesis_data:
|
||||
verification_result["chain_context"] = {
|
||||
"chain_exists": True,
|
||||
"genesis_timestamp": genesis_data[chain].get("timestamp"),
|
||||
"genesis_accounts": len(genesis_data[chain].get("accounts", []))
|
||||
}
|
||||
else:
|
||||
verification_result["chain_context"] = {
|
||||
"chain_exists": False
|
||||
}
|
||||
|
||||
success(f"Signature verified for signer '{signer}'")
|
||||
output(verification_result)
|
||||
|
||||
|
||||
@genesis_protection.command()
|
||||
@click.option("--all-chains", is_flag=True, help="Verify genesis across all chains")
|
||||
@click.option("--chain", help="Verify specific chain only")
|
||||
@click.option("--network-wide", is_flag=True, help="Perform network-wide genesis consensus")
|
||||
@click.pass_context
|
||||
def network_verify_genesis(ctx, all_chains: bool, chain: Optional[str], network_wide: bool):
|
||||
"""Perform network-wide genesis consensus verification"""
|
||||
|
||||
genesis_file = Path.home() / ".aitbc" / "genesis_data.json"
|
||||
if not genesis_file.exists():
|
||||
error("No genesis data found.")
|
||||
return
|
||||
|
||||
with open(genesis_file, 'r') as f:
|
||||
genesis_data = json.load(f)
|
||||
|
||||
# Determine which chains to verify
|
||||
chains_to_verify = []
|
||||
if all_chains:
|
||||
chains_to_verify = list(genesis_data.keys())
|
||||
elif chain:
|
||||
if chain not in genesis_data:
|
||||
error(f"Chain '{chain}' not found in genesis data.")
|
||||
return
|
||||
chains_to_verify = [chain]
|
||||
else:
|
||||
error("Must specify either --all-chains or --chain.")
|
||||
return
|
||||
|
||||
# Network verification results
|
||||
network_results = {
|
||||
"verification_type": "network_wide" if network_wide else "selective",
|
||||
"chains_verified": chains_to_verify,
|
||||
"verification_timestamp": datetime.utcnow().isoformat(),
|
||||
"chain_results": {},
|
||||
"overall_consensus": True,
|
||||
"total_chains": len(chains_to_verify)
|
||||
}
|
||||
|
||||
consensus_issues = []
|
||||
|
||||
for chain_id in chains_to_verify:
|
||||
chain_genesis = genesis_data[chain_id]
|
||||
|
||||
# Calculate chain genesis hash
|
||||
genesis_string = json.dumps(chain_genesis, sort_keys=True, separators=(',', ':'))
|
||||
calculated_hash = hashlib.sha256(genesis_string.encode()).hexdigest()
|
||||
|
||||
# Chain-specific verification
|
||||
chain_result = {
|
||||
"chain": chain_id,
|
||||
"genesis_hash": calculated_hash,
|
||||
"genesis_timestamp": chain_genesis.get("timestamp"),
|
||||
"accounts_count": len(chain_genesis.get("accounts", [])),
|
||||
"authorities_count": len(chain_genesis.get("authorities", [])),
|
||||
"integrity_checks": {
|
||||
"accounts_valid": all("address" in acc and "balance" in acc for acc in chain_genesis.get("accounts", [])),
|
||||
"authorities_valid": all("address" in auth and "weight" in auth for auth in chain_genesis.get("authorities", [])),
|
||||
"params_valid": "mint_per_unit" in chain_genesis.get("params", {}),
|
||||
"timestamp_valid": isinstance(chain_genesis.get("timestamp"), (int, float))
|
||||
},
|
||||
"chain_valid": True
|
||||
}
|
||||
|
||||
# Check chain validity
|
||||
chain_result["chain_valid"] = all(chain_result["integrity_checks"].values())
|
||||
|
||||
if not chain_result["chain_valid"]:
|
||||
consensus_issues.append(f"Chain '{chain_id}' has integrity issues")
|
||||
network_results["overall_consensus"] = False
|
||||
|
||||
network_results["chain_results"][chain_id] = chain_result
|
||||
|
||||
# Network-wide consensus summary
|
||||
network_results["consensus_summary"] = {
|
||||
"chains_valid": len([r for r in network_results["chain_results"].values() if r["chain_valid"]]),
|
||||
"chains_invalid": len([r for r in network_results["chain_results"].values() if not r["chain_valid"]]),
|
||||
"consensus_achieved": network_results["overall_consensus"],
|
||||
"issues": consensus_issues
|
||||
}
|
||||
|
||||
if network_results["overall_consensus"]:
|
||||
success(f"Network-wide genesis consensus achieved for {len(chains_to_verify)} chains")
|
||||
else:
|
||||
warning(f"Network-wide genesis consensus has issues: {len(consensus_issues)} chains with problems")
|
||||
|
||||
output(network_results)
|
||||
|
||||
|
||||
@genesis_protection.command()
|
||||
@click.option("--chain", required=True, help="Chain ID to protect")
|
||||
@click.option("--protection-level", type=click.Choice(['basic', 'standard', 'maximum']), default='standard', help="Level of protection to apply")
|
||||
@click.option("--backup", is_flag=True, help="Create backup before applying protection")
|
||||
@click.pass_context
|
||||
def protect(ctx, chain: str, protection_level: str, backup: bool):
|
||||
"""Apply protection mechanisms to genesis block"""
|
||||
|
||||
genesis_file = Path.home() / ".aitbc" / "genesis_data.json"
|
||||
if not genesis_file.exists():
|
||||
error("No genesis data found.")
|
||||
return
|
||||
|
||||
with open(genesis_file, 'r') as f:
|
||||
genesis_data = json.load(f)
|
||||
|
||||
if chain not in genesis_data:
|
||||
error(f"Chain '{chain}' not found in genesis data.")
|
||||
return
|
||||
|
||||
# Create backup if requested
|
||||
if backup:
|
||||
backup_file = Path.home() / ".aitbc" / f"genesis_backup_{chain}_{datetime.utcnow().strftime('%Y%m%d_%H%M%S')}.json"
|
||||
with open(backup_file, 'w') as f:
|
||||
json.dump(genesis_data, f, indent=2)
|
||||
success(f"Genesis backup created: {backup_file}")
|
||||
|
||||
# Apply protection based on level
|
||||
chain_genesis = genesis_data[chain]
|
||||
|
||||
protection_config = {
|
||||
"chain": chain,
|
||||
"protection_level": protection_level,
|
||||
"applied_at": datetime.utcnow().isoformat(),
|
||||
"protection mechanisms": []
|
||||
}
|
||||
|
||||
if protection_level in ['standard', 'maximum']:
|
||||
# Add protection metadata
|
||||
chain_genesis["protection"] = {
|
||||
"level": protection_level,
|
||||
"applied_at": protection_config["applied_at"],
|
||||
"immutable": True,
|
||||
"checksum": hashlib.sha256(json.dumps(chain_genesis, sort_keys=True).encode()).hexdigest()
|
||||
}
|
||||
protection_config["protection mechanisms"].append("immutable_metadata")
|
||||
|
||||
if protection_level == 'maximum':
|
||||
# Add additional protection measures
|
||||
chain_genesis["protection"]["network_consensus_required"] = True
|
||||
chain_genesis["protection"]["signature_verification"] = True
|
||||
chain_genesis["protection"]["audit_trail"] = True
|
||||
protection_config["protection mechanisms"].extend(["network_consensus", "signature_verification", "audit_trail"])
|
||||
|
||||
# Save protected genesis
|
||||
with open(genesis_file, 'w') as f:
|
||||
json.dump(genesis_data, f, indent=2)
|
||||
|
||||
# Create protection record
|
||||
protection_file = Path.home() / ".aitbc" / "genesis_protection.json"
|
||||
protection_file.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
protection_records = {}
|
||||
if protection_file.exists():
|
||||
with open(protection_file, 'r') as f:
|
||||
protection_records = json.load(f)
|
||||
|
||||
protection_records[f"{chain}_{protection_config['applied_at']}"] = protection_config
|
||||
|
||||
with open(protection_file, 'w') as f:
|
||||
json.dump(protection_records, f, indent=2)
|
||||
|
||||
success(f"Genesis protection applied to chain '{chain}' at {protection_level} level")
|
||||
output(protection_config)
|
||||
|
||||
|
||||
@genesis_protection.command()
|
||||
@click.option("--chain", help="Filter by chain ID")
|
||||
@click.pass_context
|
||||
def status(ctx, chain: Optional[str]):
|
||||
"""Get genesis protection status"""
|
||||
|
||||
genesis_file = Path.home() / ".aitbc" / "genesis_data.json"
|
||||
protection_file = Path.home() / ".aitbc" / "genesis_protection.json"
|
||||
|
||||
status_info = {
|
||||
"genesis_data_exists": genesis_file.exists(),
|
||||
"protection_records_exist": protection_file.exists(),
|
||||
"chains": {},
|
||||
"protection_summary": {
|
||||
"total_chains": 0,
|
||||
"protected_chains": 0,
|
||||
"unprotected_chains": 0
|
||||
}
|
||||
}
|
||||
|
||||
if genesis_file.exists():
|
||||
with open(genesis_file, 'r') as f:
|
||||
genesis_data = json.load(f)
|
||||
|
||||
for chain_id, chain_genesis in genesis_data.items():
|
||||
if chain and chain_id != chain:
|
||||
continue
|
||||
|
||||
chain_status = {
|
||||
"chain": chain_id,
|
||||
"protected": "protection" in chain_genesis,
|
||||
"protection_level": chain_genesis.get("protection", {}).get("level", "none"),
|
||||
"protected_at": chain_genesis.get("protection", {}).get("applied_at"),
|
||||
"genesis_timestamp": chain_genesis.get("timestamp"),
|
||||
"accounts_count": len(chain_genesis.get("accounts", []))
|
||||
}
|
||||
|
||||
status_info["chains"][chain_id] = chain_status
|
||||
status_info["protection_summary"]["total_chains"] += 1
|
||||
|
||||
if chain_status["protected"]:
|
||||
status_info["protection_summary"]["protected_chains"] += 1
|
||||
else:
|
||||
status_info["protection_summary"]["unprotected_chains"] += 1
|
||||
|
||||
if protection_file.exists():
|
||||
with open(protection_file, 'r') as f:
|
||||
protection_records = json.load(f)
|
||||
|
||||
status_info["total_protection_records"] = len(protection_records)
|
||||
status_info["latest_protection"] = max(protection_records.keys()) if protection_records else None
|
||||
|
||||
output(status_info)
|
||||
73
cli/commands/global_ai_agents.py
Executable file
73
cli/commands/global_ai_agents.py
Executable file
@@ -0,0 +1,73 @@
|
||||
"""
|
||||
Global AI Agents CLI Commands for AITBC
|
||||
Commands for managing global AI agent communication and collaboration
|
||||
"""
|
||||
|
||||
import click
|
||||
import json
|
||||
import requests
|
||||
from datetime import datetime
|
||||
from typing import Dict, Any, List, Optional
|
||||
|
||||
@click.group()
|
||||
def global_ai_agents():
|
||||
"""Global AI agents management commands"""
|
||||
pass
|
||||
|
||||
@global_ai_agents.command()
|
||||
@click.option('--agent-id', help='Specific agent ID')
|
||||
@click.option('--test-mode', is_flag=True, help='Run in test mode')
|
||||
def status(agent_id, test_mode):
|
||||
"""Get AI agent network status"""
|
||||
try:
|
||||
if test_mode:
|
||||
click.echo("🤖 AI Agent Network Status (test mode)")
|
||||
click.echo("📊 Total Agents: 125")
|
||||
click.echo("✅ Active Agents: 118")
|
||||
click.echo("🌍 Regions: 3")
|
||||
click.echo("⚡ Avg Response Time: 45ms")
|
||||
return
|
||||
|
||||
# Get status from service
|
||||
config = get_config()
|
||||
params = {}
|
||||
if agent_id:
|
||||
params["agent_id"] = agent_id
|
||||
|
||||
response = requests.get(
|
||||
f"{config.coordinator_url}/api/v1/network/status",
|
||||
params=params,
|
||||
headers={"Authorization": f"Bearer {config.api_key}"},
|
||||
timeout=30
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
status = response.json()
|
||||
dashboard = status['dashboard']
|
||||
click.echo("🤖 AI Agent Network Status")
|
||||
click.echo(f"📊 Total Agents: {dashboard.get('total_agents', 0)}")
|
||||
click.echo(f"✅ Active Agents: {dashboard.get('active_agents', 0)}")
|
||||
click.echo(f"🌍 Regions: {dashboard.get('regions', 0)}")
|
||||
click.echo(f"⚡ Avg Response Time: {dashboard.get('avg_response_time', 0)}ms")
|
||||
else:
|
||||
click.echo(f"❌ Failed to get status: {response.text}", err=True)
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Error getting status: {str(e)}", err=True)
|
||||
|
||||
# Helper function to get config
|
||||
def get_config():
|
||||
"""Get CLI configuration"""
|
||||
try:
|
||||
from config import get_config
|
||||
return get_config()
|
||||
except ImportError:
|
||||
# Fallback for testing
|
||||
from types import SimpleNamespace
|
||||
return SimpleNamespace(
|
||||
coordinator_url="http://localhost:8018",
|
||||
api_key="test-api-key"
|
||||
)
|
||||
|
||||
if __name__ == "__main__":
|
||||
global_ai_agents()
|
||||
571
cli/commands/global_infrastructure.py
Executable file
571
cli/commands/global_infrastructure.py
Executable file
@@ -0,0 +1,571 @@
|
||||
"""
|
||||
Global Infrastructure CLI Commands for AITBC
|
||||
Commands for managing global infrastructure deployment and multi-region optimization
|
||||
"""
|
||||
|
||||
import click
|
||||
import json
|
||||
import requests
|
||||
from datetime import datetime
|
||||
from typing import Dict, Any, List, Optional
|
||||
|
||||
@click.group()
|
||||
def global_infrastructure():
|
||||
"""Global infrastructure management commands"""
|
||||
pass
|
||||
|
||||
@global_infrastructure.command()
|
||||
@click.option('--region-id', required=True, help='Region ID (e.g., us-east-1)')
|
||||
@click.option('--name', required=True, help='Region name')
|
||||
@click.option('--location', required=True, help='Geographic location')
|
||||
@click.option('--endpoint', required=True, help='Region endpoint URL')
|
||||
@click.option('--capacity', type=int, required=True, help='Region capacity')
|
||||
@click.option('--compliance-level', default='partial', help='Compliance level (full, partial, basic)')
|
||||
@click.option('--test-mode', is_flag=True, help='Run in test mode')
|
||||
def deploy_region(region_id, name, location, endpoint, capacity, compliance_level, test_mode):
|
||||
"""Deploy a new global region"""
|
||||
try:
|
||||
region_data = {
|
||||
"region_id": region_id,
|
||||
"name": name,
|
||||
"location": location,
|
||||
"endpoint": endpoint,
|
||||
"status": "deploying",
|
||||
"capacity": capacity,
|
||||
"current_load": 0,
|
||||
"latency_ms": 0,
|
||||
"compliance_level": compliance_level,
|
||||
"deployed_at": datetime.utcnow().isoformat()
|
||||
}
|
||||
|
||||
if test_mode:
|
||||
click.echo(f"🌍 Region deployment started (test mode)")
|
||||
click.echo(f"🆔 Region ID: {region_id}")
|
||||
click.echo(f"📍 Name: {name}")
|
||||
click.echo(f"🗺️ Location: {location}")
|
||||
click.echo(f"🔗 Endpoint: {endpoint}")
|
||||
click.echo(f"💾 Capacity: {capacity}")
|
||||
click.echo(f"⚖️ Compliance Level: {compliance_level}")
|
||||
click.echo(f"✅ Region deployed successfully")
|
||||
return
|
||||
|
||||
# Send to infrastructure service
|
||||
config = get_config()
|
||||
response = requests.post(
|
||||
f"{config.coordinator_url}/api/v1/regions/register",
|
||||
json=region_data,
|
||||
headers={"Authorization": f"Bearer {config.api_key}"},
|
||||
timeout=30
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
result = response.json()
|
||||
click.echo(f"🌍 Region deployment started successfully")
|
||||
click.echo(f"🆔 Region ID: {result['region_id']}")
|
||||
click.echo(f"📍 Name: {result['name']}")
|
||||
click.echo(f"🗺️ Location: {result['location']}")
|
||||
click.echo(f"🔗 Endpoint: {result['endpoint']}")
|
||||
click.echo(f"💾 Capacity: {result['capacity']}")
|
||||
click.echo(f"⚖️ Compliance Level: {result['compliance_level']}")
|
||||
click.echo(f"📅 Deployed At: {result['created_at']}")
|
||||
else:
|
||||
click.echo(f"❌ Region deployment failed: {response.text}", err=True)
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Error deploying region: {str(e)}", err=True)
|
||||
|
||||
@global_infrastructure.command()
|
||||
@click.option('--test-mode', is_flag=True, help='Run in test mode')
|
||||
def list_regions(test_mode):
|
||||
"""List all deployed regions"""
|
||||
try:
|
||||
if test_mode:
|
||||
# Mock regions data
|
||||
mock_regions = [
|
||||
{
|
||||
"region_id": "us-east-1",
|
||||
"name": "US East (N. Virginia)",
|
||||
"location": "North America",
|
||||
"endpoint": "https://us-east-1.api.aitbc.dev",
|
||||
"status": "active",
|
||||
"capacity": 10000,
|
||||
"current_load": 3500,
|
||||
"latency_ms": 45,
|
||||
"compliance_level": "full",
|
||||
"deployed_at": "2024-01-15T10:30:00Z"
|
||||
},
|
||||
{
|
||||
"region_id": "eu-west-1",
|
||||
"name": "EU West (Ireland)",
|
||||
"location": "Europe",
|
||||
"endpoint": "https://eu-west-1.api.aitbc.dev",
|
||||
"status": "active",
|
||||
"capacity": 8000,
|
||||
"current_load": 2800,
|
||||
"latency_ms": 38,
|
||||
"compliance_level": "full",
|
||||
"deployed_at": "2024-01-20T14:20:00Z"
|
||||
},
|
||||
{
|
||||
"region_id": "ap-southeast-1",
|
||||
"name": "AP Southeast (Singapore)",
|
||||
"location": "Asia Pacific",
|
||||
"endpoint": "https://ap-southeast-1.api.aitbc.dev",
|
||||
"status": "active",
|
||||
"capacity": 6000,
|
||||
"current_load": 2200,
|
||||
"latency_ms": 62,
|
||||
"compliance_level": "partial",
|
||||
"deployed_at": "2024-02-01T09:15:00Z"
|
||||
}
|
||||
]
|
||||
|
||||
click.echo("🌍 Global Infrastructure Regions:")
|
||||
click.echo("=" * 60)
|
||||
|
||||
for region in mock_regions:
|
||||
status_icon = "✅" if region['status'] == 'active' else "⏳"
|
||||
load_percentage = (region['current_load'] / region['capacity']) * 100
|
||||
compliance_icon = "🔒" if region['compliance_level'] == 'full' else "⚠️"
|
||||
|
||||
click.echo(f"{status_icon} {region['name']} ({region['region_id']})")
|
||||
click.echo(f" 🗺️ Location: {region['location']}")
|
||||
click.echo(f" 🔗 Endpoint: {region['endpoint']}")
|
||||
click.echo(f" 💾 Load: {region['current_load']}/{region['capacity']} ({load_percentage:.1f}%)")
|
||||
click.echo(f" ⚡ Latency: {region['latency_ms']}ms")
|
||||
click.echo(f" {compliance_icon} Compliance: {region['compliance_level']}")
|
||||
click.echo(f" 📅 Deployed: {region['deployed_at']}")
|
||||
click.echo("")
|
||||
|
||||
return
|
||||
|
||||
# Fetch from infrastructure service
|
||||
config = get_config()
|
||||
response = requests.get(
|
||||
f"{config.coordinator_url}/api/v1/regions",
|
||||
headers={"Authorization": f"Bearer {config.api_key}"},
|
||||
timeout=30
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
result = response.json()
|
||||
regions = result.get("regions", [])
|
||||
|
||||
click.echo("🌍 Global Infrastructure Regions:")
|
||||
click.echo("=" * 60)
|
||||
|
||||
for region in regions:
|
||||
status_icon = "✅" if region['status'] == 'active' else "⏳"
|
||||
load_percentage = (region['current_load'] / region['capacity']) * 100
|
||||
compliance_icon = "🔒" if region['compliance_level'] == 'full' else "⚠️"
|
||||
|
||||
click.echo(f"{status_icon} {region['name']} ({region['region_id']})")
|
||||
click.echo(f" 🗺️ Location: {region['location']}")
|
||||
click.echo(f" 🔗 Endpoint: {region['endpoint']}")
|
||||
click.echo(f" 💾 Load: {region['current_load']}/{region['capacity']} ({load_percentage:.1f}%)")
|
||||
click.echo(f" ⚡ Latency: {region['latency_ms']}ms")
|
||||
click.echo(f" {compliance_icon} Compliance: {region['compliance_level']}")
|
||||
click.echo(f" 📅 Deployed: {region['deployed_at']}")
|
||||
click.echo("")
|
||||
else:
|
||||
click.echo(f"❌ Failed to list regions: {response.text}", err=True)
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Error listing regions: {str(e)}", err=True)
|
||||
|
||||
@global_infrastructure.command()
|
||||
@click.argument('region_id')
|
||||
@click.option('--test-mode', is_flag=True, help='Run in test mode')
|
||||
def region_status(region_id, test_mode):
|
||||
"""Get detailed status of a specific region"""
|
||||
try:
|
||||
if test_mode:
|
||||
# Mock region status
|
||||
mock_region = {
|
||||
"region_id": region_id,
|
||||
"name": "US East (N. Virginia)",
|
||||
"location": "North America",
|
||||
"endpoint": "https://us-east-1.api.aitbc.dev",
|
||||
"status": "active",
|
||||
"capacity": 10000,
|
||||
"current_load": 3500,
|
||||
"latency_ms": 45,
|
||||
"compliance_level": "full",
|
||||
"deployed_at": "2024-01-15T10:30:00Z",
|
||||
"last_health_check": "2024-03-01T14:20:00Z",
|
||||
"services_deployed": ["exchange-integration", "trading-engine", "plugin-registry"],
|
||||
"performance_metrics": [
|
||||
{
|
||||
"timestamp": "2024-03-01T14:20:00Z",
|
||||
"cpu_usage": 35.5,
|
||||
"memory_usage": 62.3,
|
||||
"network_io": 1024.5,
|
||||
"response_time_ms": 45.2
|
||||
}
|
||||
],
|
||||
"compliance_data": {
|
||||
"certifications": ["SOC2", "ISO27001", "GDPR"],
|
||||
"data_residency": "compliant",
|
||||
"last_audit": "2024-02-15T10:30:00Z",
|
||||
"next_audit": "2024-05-15T10:30:00Z"
|
||||
}
|
||||
}
|
||||
|
||||
click.echo(f"🌍 Region Status: {mock_region['name']}")
|
||||
click.echo("=" * 60)
|
||||
click.echo(f"🆔 Region ID: {mock_region['region_id']}")
|
||||
click.echo(f"🗺️ Location: {mock_region['location']}")
|
||||
click.echo(f"🔗 Endpoint: {mock_region['endpoint']}")
|
||||
click.echo(f"📊 Status: {mock_region['status']}")
|
||||
click.echo(f"💾 Capacity: {mock_region['capacity']}")
|
||||
click.echo(f"📈 Current Load: {mock_region['current_load']}")
|
||||
click.echo(f"⚡ Latency: {mock_region['latency_ms']}ms")
|
||||
click.echo(f"⚖️ Compliance Level: {mock_region['compliance_level']}")
|
||||
click.echo(f"📅 Deployed At: {mock_region['deployed_at']}")
|
||||
click.echo(f"🔍 Last Health Check: {mock_region['last_health_check']}")
|
||||
click.echo("")
|
||||
click.echo("🔧 Deployed Services:")
|
||||
for service in mock_region['services_deployed']:
|
||||
click.echo(f" ✅ {service}")
|
||||
click.echo("")
|
||||
click.echo("📊 Performance Metrics:")
|
||||
latest_metric = mock_region['performance_metrics'][-1]
|
||||
click.echo(f" 💻 CPU Usage: {latest_metric['cpu_usage']}%")
|
||||
click.echo(f" 🧠 Memory Usage: {latest_metric['memory_usage']}%")
|
||||
click.echo(f" 🌐 Network I/O: {latest_metric['network_io']} MB/s")
|
||||
click.echo(f" ⚡ Response Time: {latest_metric['response_time_ms']}ms")
|
||||
click.echo("")
|
||||
click.echo("⚖️ Compliance Information:")
|
||||
compliance = mock_region['compliance_data']
|
||||
click.echo(f" 📜 Certifications: {', '.join(compliance['certifications'])}")
|
||||
click.echo(f" 🏠 Data Residency: {compliance['data_residency']}")
|
||||
click.echo(f" 🔍 Last Audit: {compliance['last_audit']}")
|
||||
click.echo(f" 📅 Next Audit: {compliance['next_audit']}")
|
||||
return
|
||||
|
||||
# Fetch from infrastructure service
|
||||
config = get_config()
|
||||
response = requests.get(
|
||||
f"{config.coordinator_url}/api/v1/regions/{region_id}",
|
||||
headers={"Authorization": f"Bearer {config.api_key}"},
|
||||
timeout=30
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
region = response.json()
|
||||
|
||||
click.echo(f"🌍 Region Status: {region['name']}")
|
||||
click.echo("=" * 60)
|
||||
click.echo(f"🆔 Region ID: {region['region_id']}")
|
||||
click.echo(f"🗺️ Location: {region['location']}")
|
||||
click.echo(f"🔗 Endpoint: {region['endpoint']}")
|
||||
click.echo(f"📊 Status: {region['status']}")
|
||||
click.echo(f"💾 Capacity: {region['capacity']}")
|
||||
click.echo(f"📈 Current Load: {region['current_load']}")
|
||||
click.echo(f"⚡ Latency: {region['latency_ms']}ms")
|
||||
click.echo(f"⚖️ Compliance Level: {region['compliance_level']}")
|
||||
click.echo(f"📅 Deployed At: {region['deployed_at']}")
|
||||
click.echo(f"🔍 Last Health Check: {region.get('last_health_check', 'N/A')}")
|
||||
else:
|
||||
click.echo(f"❌ Region not found: {response.text}", err=True)
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Error getting region status: {str(e)}", err=True)
|
||||
|
||||
@global_infrastructure.command()
|
||||
@click.argument('service_name')
|
||||
@click.option('--target-regions', help='Target regions (comma-separated)')
|
||||
@click.option('--strategy', default='rolling', help='Deployment strategy (rolling, blue_green, canary)')
|
||||
@click.option('--configuration', help='Deployment configuration (JSON)')
|
||||
@click.option('--test-mode', is_flag=True, help='Run in test mode')
|
||||
def deploy_service(service_name, target_regions, strategy, configuration, test_mode):
|
||||
"""Deploy a service to multiple regions"""
|
||||
try:
|
||||
# Parse target regions
|
||||
regions = target_regions.split(',') if target_regions else ["us-east-1", "eu-west-1"]
|
||||
|
||||
# Parse configuration
|
||||
config_data = {}
|
||||
if configuration:
|
||||
config_data = json.loads(configuration)
|
||||
|
||||
deployment_data = {
|
||||
"service_name": service_name,
|
||||
"target_regions": regions,
|
||||
"configuration": config_data,
|
||||
"deployment_strategy": strategy,
|
||||
"health_checks": ["/health", "/api/health"],
|
||||
"created_at": datetime.utcnow().isoformat()
|
||||
}
|
||||
|
||||
if test_mode:
|
||||
click.echo(f"🚀 Service deployment started (test mode)")
|
||||
click.echo(f"📦 Service: {service_name}")
|
||||
click.echo(f"🌍 Target Regions: {', '.join(regions)}")
|
||||
click.echo(f"📋 Strategy: {strategy}")
|
||||
click.echo(f"⚙️ Configuration: {config_data or 'Default'}")
|
||||
click.echo(f"✅ Deployment completed successfully")
|
||||
return
|
||||
|
||||
# Send to infrastructure service
|
||||
config = get_config()
|
||||
response = requests.post(
|
||||
f"{config.coordinator_url}/api/v1/deployments/create",
|
||||
json=deployment_data,
|
||||
headers={"Authorization": f"Bearer {config.api_key}"},
|
||||
timeout=30
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
result = response.json()
|
||||
click.echo(f"🚀 Service deployment started successfully")
|
||||
click.echo(f"📦 Service: {service_name}")
|
||||
click.echo(f"🆔 Deployment ID: {result['deployment_id']}")
|
||||
click.echo(f"🌍 Target Regions: {', '.join(result['target_regions'])}")
|
||||
click.echo(f"📋 Strategy: {result['deployment_strategy']}")
|
||||
click.echo(f"📅 Created At: {result['created_at']}")
|
||||
else:
|
||||
click.echo(f"❌ Service deployment failed: {response.text}", err=True)
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Error deploying service: {str(e)}", err=True)
|
||||
|
||||
@global_infrastructure.command()
|
||||
@click.option('--test-mode', is_flag=True, help='Run in test mode')
|
||||
def dashboard(test_mode):
|
||||
"""View global infrastructure dashboard"""
|
||||
try:
|
||||
if test_mode:
|
||||
# Mock dashboard data
|
||||
mock_dashboard = {
|
||||
"infrastructure": {
|
||||
"total_regions": 3,
|
||||
"active_regions": 3,
|
||||
"total_capacity": 24000,
|
||||
"current_load": 8500,
|
||||
"utilization_percentage": 35.4,
|
||||
"average_latency_ms": 48.3
|
||||
},
|
||||
"deployments": {
|
||||
"total": 15,
|
||||
"pending": 2,
|
||||
"in_progress": 1,
|
||||
"completed": 12,
|
||||
"failed": 0
|
||||
},
|
||||
"performance": {
|
||||
"us-east-1": {
|
||||
"cpu_usage": 35.5,
|
||||
"memory_usage": 62.3,
|
||||
"response_time_ms": 45.2
|
||||
},
|
||||
"eu-west-1": {
|
||||
"cpu_usage": 28.7,
|
||||
"memory_usage": 55.1,
|
||||
"response_time_ms": 38.9
|
||||
},
|
||||
"ap-southeast-1": {
|
||||
"cpu_usage": 42.1,
|
||||
"memory_usage": 68.9,
|
||||
"response_time_ms": 62.3
|
||||
}
|
||||
},
|
||||
"compliance": {
|
||||
"compliant_regions": 2,
|
||||
"partial_compliance": 1,
|
||||
"total_audits": 6,
|
||||
"passed_audits": 5,
|
||||
"failed_audits": 1
|
||||
}
|
||||
}
|
||||
|
||||
infra = mock_dashboard['infrastructure']
|
||||
deployments = mock_dashboard['deployments']
|
||||
performance = mock_dashboard['performance']
|
||||
compliance = mock_dashboard['compliance']
|
||||
|
||||
click.echo("🌍 Global Infrastructure Dashboard")
|
||||
click.echo("=" * 60)
|
||||
click.echo("📊 Infrastructure Overview:")
|
||||
click.echo(f" 🌍 Total Regions: {infra['total_regions']}")
|
||||
click.echo(f" ✅ Active Regions: {infra['active_regions']}")
|
||||
click.echo(f" 💾 Total Capacity: {infra['total_capacity']}")
|
||||
click.echo(f" 📈 Current Load: {infra['current_load']}")
|
||||
click.echo(f" 📊 Utilization: {infra['utilization_percentage']:.1f}%")
|
||||
click.echo(f" ⚡ Avg Latency: {infra['average_latency_ms']}ms")
|
||||
click.echo("")
|
||||
click.echo("🚀 Deployment Status:")
|
||||
click.echo(f" 📦 Total Deployments: {deployments['total']}")
|
||||
click.echo(f" ⏳ Pending: {deployments['pending']}")
|
||||
click.echo(f" 🔄 In Progress: {deployments['in_progress']}")
|
||||
click.echo(f" ✅ Completed: {deployments['completed']}")
|
||||
click.echo(f" ❌ Failed: {deployments['failed']}")
|
||||
click.echo("")
|
||||
click.echo("⚡ Performance Metrics:")
|
||||
for region_id, metrics in performance.items():
|
||||
click.echo(f" 🌍 {region_id}:")
|
||||
click.echo(f" 💻 CPU: {metrics['cpu_usage']}%")
|
||||
click.echo(f" 🧠 Memory: {metrics['memory_usage']}%")
|
||||
click.echo(f" ⚡ Response: {metrics['response_time_ms']}ms")
|
||||
click.echo("")
|
||||
click.echo("⚖️ Compliance Status:")
|
||||
click.echo(f" 🔒 Fully Compliant: {compliance['compliant_regions']}")
|
||||
click.echo(f" ⚠️ Partial Compliance: {compliance['partial_compliance']}")
|
||||
click.echo(f" 🔍 Total Audits: {compliance['total_audits']}")
|
||||
click.echo(f" ✅ Passed: {compliance['passed_audits']}")
|
||||
click.echo(f" ❌ Failed: {compliance['failed_audits']}")
|
||||
return
|
||||
|
||||
# Fetch from infrastructure service
|
||||
config = get_config()
|
||||
response = requests.get(
|
||||
f"{config.coordinator_url}/api/v1/global/dashboard",
|
||||
headers={"Authorization": f"Bearer {config.api_key}"},
|
||||
timeout=30
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
dashboard = response.json()
|
||||
infra = dashboard['dashboard']['infrastructure']
|
||||
deployments = dashboard['dashboard']['deployments']
|
||||
performance = dashboard['dashboard'].get('performance', {})
|
||||
compliance = dashboard['dashboard'].get('compliance', {})
|
||||
|
||||
click.echo("🌍 Global Infrastructure Dashboard")
|
||||
click.echo("=" * 60)
|
||||
click.echo("📊 Infrastructure Overview:")
|
||||
click.echo(f" 🌍 Total Regions: {infra['total_regions']}")
|
||||
click.echo(f" ✅ Active Regions: {infra['active_regions']}")
|
||||
click.echo(f" 💾 Total Capacity: {infra['total_capacity']}")
|
||||
click.echo(f" 📈 Current Load: {infra['current_load']}")
|
||||
click.echo(f" 📊 Utilization: {infra['utilization_percentage']:.1f}%")
|
||||
click.echo(f" ⚡ Avg Latency: {infra['average_latency_ms']}ms")
|
||||
click.echo("")
|
||||
click.echo("🚀 Deployment Status:")
|
||||
click.echo(f" 📦 Total Deployments: {deployments['total']}")
|
||||
click.echo(f" ⏳ Pending: {deployments['pending']}")
|
||||
click.echo(f" 🔄 In Progress: {deployments['in_progress']}")
|
||||
click.echo(f" ✅ Completed: {deployments['completed']}")
|
||||
click.echo(f" ❌ Failed: {deployments['failed']}")
|
||||
|
||||
if performance:
|
||||
click.echo("")
|
||||
click.echo("⚡ Performance Metrics:")
|
||||
for region_id, metrics in performance.items():
|
||||
click.echo(f" 🌍 {region_id}:")
|
||||
click.echo(f" 💻 CPU: {metrics.get('cpu_usage', 0)}%")
|
||||
click.echo(f" 🧠 Memory: {metrics.get('memory_usage', 0)}%")
|
||||
click.echo(f" ⚡ Response: {metrics.get('response_time_ms', 0)}ms")
|
||||
|
||||
if compliance:
|
||||
click.echo("")
|
||||
click.echo("⚖️ Compliance Status:")
|
||||
click.echo(f" 🔒 Fully Compliant: {compliance.get('compliant_regions', 0)}")
|
||||
click.echo(f" ⚠️ Partial Compliance: {compliance.get('partial_compliance', 0)}")
|
||||
else:
|
||||
click.echo(f"❌ Failed to get dashboard: {response.text}", err=True)
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Error getting dashboard: {str(e)}", err=True)
|
||||
|
||||
@global_infrastructure.command()
|
||||
@click.argument('deployment_id')
|
||||
@click.option('--test-mode', is_flag=True, help='Run in test mode')
|
||||
def deployment_status(deployment_id, test_mode):
|
||||
"""Get deployment status"""
|
||||
try:
|
||||
if test_mode:
|
||||
# Mock deployment status
|
||||
mock_deployment = {
|
||||
"deployment_id": deployment_id,
|
||||
"service_name": "trading-engine",
|
||||
"target_regions": ["us-east-1", "eu-west-1"],
|
||||
"status": "completed",
|
||||
"deployment_strategy": "rolling",
|
||||
"created_at": "2024-03-01T10:30:00Z",
|
||||
"started_at": "2024-03-01T10:31:00Z",
|
||||
"completed_at": "2024-03-01T10:45:00Z",
|
||||
"deployment_progress": {
|
||||
"us-east-1": {
|
||||
"status": "completed",
|
||||
"started_at": "2024-03-01T10:31:00Z",
|
||||
"completed_at": "2024-03-01T10:38:00Z",
|
||||
"progress": 100
|
||||
},
|
||||
"eu-west-1": {
|
||||
"status": "completed",
|
||||
"started_at": "2024-03-01T10:38:00Z",
|
||||
"completed_at": "2024-03-01T10:45:00Z",
|
||||
"progress": 100
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
click.echo(f"🚀 Deployment Status: {mock_deployment['deployment_id']}")
|
||||
click.echo("=" * 60)
|
||||
click.echo(f"📦 Service: {mock_deployment['service_name']}")
|
||||
click.echo(f"🌍 Target Regions: {', '.join(mock_deployment['target_regions'])}")
|
||||
click.echo(f"📋 Strategy: {mock_deployment['deployment_strategy']}")
|
||||
click.echo(f"📊 Status: {mock_deployment['status']}")
|
||||
click.echo(f"📅 Created: {mock_deployment['created_at']}")
|
||||
click.echo(f"🚀 Started: {mock_deployment['started_at']}")
|
||||
click.echo(f"✅ Completed: {mock_deployment['completed_at']}")
|
||||
click.echo("")
|
||||
click.echo("📈 Progress by Region:")
|
||||
for region_id, progress in mock_deployment['deployment_progress'].items():
|
||||
status_icon = "✅" if progress['status'] == 'completed' else "🔄"
|
||||
click.echo(f" {status_icon} {region_id}: {progress['progress']}% ({progress['status']})")
|
||||
return
|
||||
|
||||
# Fetch from infrastructure service
|
||||
config = get_config()
|
||||
response = requests.get(
|
||||
f"{config.coordinator_url}/api/v1/deployments/{deployment_id}",
|
||||
headers={"Authorization": f"Bearer {config.api_key}"},
|
||||
timeout=30
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
deployment = response.json()
|
||||
|
||||
click.echo(f"🚀 Deployment Status: {deployment['deployment_id']}")
|
||||
click.echo("=" * 60)
|
||||
click.echo(f"📦 Service: {deployment['service_name']}")
|
||||
click.echo(f"🌍 Target Regions: {', '.join(deployment['target_regions'])}")
|
||||
click.echo(f"📋 Strategy: {deployment['deployment_strategy']}")
|
||||
click.echo(f"📊 Status: {deployment['status']}")
|
||||
click.echo(f"📅 Created: {deployment['created_at']}")
|
||||
|
||||
if deployment.get('started_at'):
|
||||
click.echo(f"🚀 Started: {deployment['started_at']}")
|
||||
if deployment.get('completed_at'):
|
||||
click.echo(f"✅ Completed: {deployment['completed_at']}")
|
||||
|
||||
if deployment.get('deployment_progress'):
|
||||
click.echo("")
|
||||
click.echo("📈 Progress by Region:")
|
||||
for region_id, progress in deployment['deployment_progress'].items():
|
||||
status_icon = "✅" if progress['status'] == 'completed' else "🔄"
|
||||
click.echo(f" {status_icon} {region_id}: {progress['progress']}% ({progress['status']})")
|
||||
else:
|
||||
click.echo(f"❌ Deployment not found: {response.text}", err=True)
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Error getting deployment status: {str(e)}", err=True)
|
||||
|
||||
# Helper function to get config
|
||||
def get_config():
|
||||
"""Get CLI configuration"""
|
||||
try:
|
||||
from config import get_config
|
||||
return get_config()
|
||||
except ImportError:
|
||||
# Fallback for testing
|
||||
from types import SimpleNamespace
|
||||
return SimpleNamespace(
|
||||
coordinator_url="http://localhost:8017",
|
||||
api_key="test-api-key"
|
||||
)
|
||||
|
||||
if __name__ == "__main__":
|
||||
global_infrastructure()
|
||||
253
cli/commands/governance.py
Executable file
253
cli/commands/governance.py
Executable file
@@ -0,0 +1,253 @@
|
||||
"""Governance commands for AITBC CLI"""
|
||||
|
||||
import click
|
||||
import httpx
|
||||
import json
|
||||
import os
|
||||
import time
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
from datetime import datetime, timedelta
|
||||
from utils import output, error, success
|
||||
|
||||
|
||||
GOVERNANCE_DIR = Path.home() / ".aitbc" / "governance"
|
||||
|
||||
|
||||
def _ensure_governance_dir():
|
||||
GOVERNANCE_DIR.mkdir(parents=True, exist_ok=True)
|
||||
proposals_file = GOVERNANCE_DIR / "proposals.json"
|
||||
if not proposals_file.exists():
|
||||
with open(proposals_file, "w") as f:
|
||||
json.dump({"proposals": []}, f, indent=2)
|
||||
return proposals_file
|
||||
|
||||
|
||||
def _load_proposals():
|
||||
proposals_file = _ensure_governance_dir()
|
||||
with open(proposals_file) as f:
|
||||
return json.load(f)
|
||||
|
||||
|
||||
def _save_proposals(data):
|
||||
proposals_file = _ensure_governance_dir()
|
||||
with open(proposals_file, "w") as f:
|
||||
json.dump(data, f, indent=2)
|
||||
|
||||
|
||||
@click.group()
|
||||
def governance():
|
||||
"""Governance proposals and voting"""
|
||||
pass
|
||||
|
||||
|
||||
@governance.command()
|
||||
@click.argument("title")
|
||||
@click.option("--description", required=True, help="Proposal description")
|
||||
@click.option("--type", "proposal_type", type=click.Choice(["parameter_change", "feature_toggle", "funding", "general"]), default="general", help="Proposal type")
|
||||
@click.option("--parameter", help="Parameter to change (for parameter_change type)")
|
||||
@click.option("--value", help="New value (for parameter_change type)")
|
||||
@click.option("--amount", type=float, help="Funding amount (for funding type)")
|
||||
@click.option("--duration", type=int, default=7, help="Voting duration in days")
|
||||
@click.pass_context
|
||||
def propose(ctx, title: str, description: str, proposal_type: str,
|
||||
parameter: Optional[str], value: Optional[str],
|
||||
amount: Optional[float], duration: int):
|
||||
"""Create a governance proposal"""
|
||||
import secrets
|
||||
|
||||
data = _load_proposals()
|
||||
proposal_id = f"prop_{secrets.token_hex(6)}"
|
||||
now = datetime.now()
|
||||
|
||||
proposal = {
|
||||
"id": proposal_id,
|
||||
"title": title,
|
||||
"description": description,
|
||||
"type": proposal_type,
|
||||
"proposer": os.environ.get("USER", "unknown"),
|
||||
"created_at": now.isoformat(),
|
||||
"voting_ends": (now + timedelta(days=duration)).isoformat(),
|
||||
"duration_days": duration,
|
||||
"status": "active",
|
||||
"votes": {"for": 0, "against": 0, "abstain": 0},
|
||||
"voters": [],
|
||||
}
|
||||
|
||||
if proposal_type == "parameter_change":
|
||||
proposal["parameter"] = parameter
|
||||
proposal["new_value"] = value
|
||||
elif proposal_type == "funding":
|
||||
proposal["amount"] = amount
|
||||
|
||||
data["proposals"].append(proposal)
|
||||
_save_proposals(data)
|
||||
|
||||
success(f"Proposal '{title}' created: {proposal_id}")
|
||||
output({
|
||||
"proposal_id": proposal_id,
|
||||
"title": title,
|
||||
"type": proposal_type,
|
||||
"status": "active",
|
||||
"voting_ends": proposal["voting_ends"],
|
||||
"duration_days": duration
|
||||
}, ctx.obj.get('output_format', 'table'))
|
||||
|
||||
|
||||
@governance.command()
|
||||
@click.argument("proposal_id")
|
||||
@click.argument("choice", type=click.Choice(["for", "against", "abstain"]))
|
||||
@click.option("--voter", default=None, help="Voter identity (defaults to $USER)")
|
||||
@click.option("--weight", type=float, default=1.0, help="Vote weight")
|
||||
@click.pass_context
|
||||
def vote(ctx, proposal_id: str, choice: str, voter: Optional[str], weight: float):
|
||||
"""Cast a vote on a proposal"""
|
||||
data = _load_proposals()
|
||||
voter = voter or os.environ.get("USER", "unknown")
|
||||
|
||||
proposal = next((p for p in data["proposals"] if p["id"] == proposal_id), None)
|
||||
if not proposal:
|
||||
error(f"Proposal '{proposal_id}' not found")
|
||||
ctx.exit(1)
|
||||
return
|
||||
|
||||
if proposal["status"] != "active":
|
||||
error(f"Proposal is '{proposal['status']}', not active")
|
||||
ctx.exit(1)
|
||||
return
|
||||
|
||||
# Check if voting period has ended
|
||||
voting_ends = datetime.fromisoformat(proposal["voting_ends"])
|
||||
if datetime.now() > voting_ends:
|
||||
proposal["status"] = "closed"
|
||||
_save_proposals(data)
|
||||
error("Voting period has ended")
|
||||
ctx.exit(1)
|
||||
return
|
||||
|
||||
# Check if already voted
|
||||
if voter in proposal["voters"]:
|
||||
error(f"'{voter}' has already voted on this proposal")
|
||||
ctx.exit(1)
|
||||
return
|
||||
|
||||
proposal["votes"][choice] += weight
|
||||
proposal["voters"].append(voter)
|
||||
_save_proposals(data)
|
||||
|
||||
total_votes = sum(proposal["votes"].values())
|
||||
success(f"Vote recorded: {choice} (weight: {weight})")
|
||||
output({
|
||||
"proposal_id": proposal_id,
|
||||
"voter": voter,
|
||||
"choice": choice,
|
||||
"weight": weight,
|
||||
"current_tally": proposal["votes"],
|
||||
"total_votes": total_votes
|
||||
}, ctx.obj.get('output_format', 'table'))
|
||||
|
||||
|
||||
@governance.command(name="list")
|
||||
@click.option("--status", type=click.Choice(["active", "closed", "approved", "rejected", "all"]), default="all", help="Filter by status")
|
||||
@click.option("--type", "proposal_type", help="Filter by proposal type")
|
||||
@click.option("--limit", type=int, default=20, help="Max proposals to show")
|
||||
@click.pass_context
|
||||
def list_proposals(ctx, status: str, proposal_type: Optional[str], limit: int):
|
||||
"""List governance proposals"""
|
||||
data = _load_proposals()
|
||||
proposals = data["proposals"]
|
||||
|
||||
# Auto-close expired proposals
|
||||
now = datetime.now()
|
||||
for p in proposals:
|
||||
if p["status"] == "active":
|
||||
voting_ends = datetime.fromisoformat(p["voting_ends"])
|
||||
if now > voting_ends:
|
||||
total = sum(p["votes"].values())
|
||||
if total > 0 and p["votes"]["for"] > p["votes"]["against"]:
|
||||
p["status"] = "approved"
|
||||
else:
|
||||
p["status"] = "rejected"
|
||||
_save_proposals(data)
|
||||
|
||||
# Filter
|
||||
if status != "all":
|
||||
proposals = [p for p in proposals if p["status"] == status]
|
||||
if proposal_type:
|
||||
proposals = [p for p in proposals if p["type"] == proposal_type]
|
||||
|
||||
proposals = proposals[-limit:]
|
||||
|
||||
if not proposals:
|
||||
output({"message": "No proposals found", "filter": status}, ctx.obj.get('output_format', 'table'))
|
||||
return
|
||||
|
||||
summary = [{
|
||||
"id": p["id"],
|
||||
"title": p["title"],
|
||||
"type": p["type"],
|
||||
"status": p["status"],
|
||||
"votes_for": p["votes"]["for"],
|
||||
"votes_against": p["votes"]["against"],
|
||||
"votes_abstain": p["votes"]["abstain"],
|
||||
"created_at": p["created_at"]
|
||||
} for p in proposals]
|
||||
|
||||
output(summary, ctx.obj.get('output_format', 'table'))
|
||||
|
||||
|
||||
@governance.command()
|
||||
@click.argument("proposal_id")
|
||||
@click.pass_context
|
||||
def result(ctx, proposal_id: str):
|
||||
"""Show voting results for a proposal"""
|
||||
data = _load_proposals()
|
||||
|
||||
proposal = next((p for p in data["proposals"] if p["id"] == proposal_id), None)
|
||||
if not proposal:
|
||||
error(f"Proposal '{proposal_id}' not found")
|
||||
ctx.exit(1)
|
||||
return
|
||||
|
||||
# Auto-close if expired
|
||||
now = datetime.now()
|
||||
if proposal["status"] == "active":
|
||||
voting_ends = datetime.fromisoformat(proposal["voting_ends"])
|
||||
if now > voting_ends:
|
||||
total = sum(proposal["votes"].values())
|
||||
if total > 0 and proposal["votes"]["for"] > proposal["votes"]["against"]:
|
||||
proposal["status"] = "approved"
|
||||
else:
|
||||
proposal["status"] = "rejected"
|
||||
_save_proposals(data)
|
||||
|
||||
votes = proposal["votes"]
|
||||
total = sum(votes.values())
|
||||
pct_for = (votes["for"] / total * 100) if total > 0 else 0
|
||||
pct_against = (votes["against"] / total * 100) if total > 0 else 0
|
||||
|
||||
result_data = {
|
||||
"proposal_id": proposal["id"],
|
||||
"title": proposal["title"],
|
||||
"type": proposal["type"],
|
||||
"status": proposal["status"],
|
||||
"proposer": proposal["proposer"],
|
||||
"created_at": proposal["created_at"],
|
||||
"voting_ends": proposal["voting_ends"],
|
||||
"votes_for": votes["for"],
|
||||
"votes_against": votes["against"],
|
||||
"votes_abstain": votes["abstain"],
|
||||
"total_votes": total,
|
||||
"pct_for": round(pct_for, 1),
|
||||
"pct_against": round(pct_against, 1),
|
||||
"voter_count": len(proposal["voters"]),
|
||||
"outcome": proposal["status"]
|
||||
}
|
||||
|
||||
if proposal.get("parameter"):
|
||||
result_data["parameter"] = proposal["parameter"]
|
||||
result_data["new_value"] = proposal.get("new_value")
|
||||
if proposal.get("amount"):
|
||||
result_data["amount"] = proposal["amount"]
|
||||
|
||||
output(result_data, ctx.obj.get('output_format', 'table'))
|
||||
67
cli/commands/keystore.py
Normal file
67
cli/commands/keystore.py
Normal file
@@ -0,0 +1,67 @@
|
||||
import click
|
||||
import importlib.util
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
def _load_keystore_script():
|
||||
"""Dynamically load the top-level scripts/keystore.py module."""
|
||||
root = Path(__file__).resolve().parents[3] # /opt/aitbc
|
||||
ks_path = root / "scripts" / "keystore.py"
|
||||
spec = importlib.util.spec_from_file_location("aitbc_scripts_keystore", ks_path)
|
||||
if spec is None or spec.loader is None:
|
||||
raise ImportError(f"Unable to load keystore script from {ks_path}")
|
||||
module = importlib.util.module_from_spec(spec)
|
||||
spec.loader.exec_module(module)
|
||||
return module
|
||||
|
||||
@click.group()
|
||||
def keystore():
|
||||
"""Keystore operations (create wallets/keystores)."""
|
||||
pass
|
||||
|
||||
@keystore.command()
|
||||
@click.option("--address", required=True, help="Wallet address (id) to create")
|
||||
@click.option(
|
||||
"--password-file",
|
||||
default="/opt/aitbc/data/keystore/.password",
|
||||
show_default=True,
|
||||
type=click.Path(exists=True, dir_okay=False),
|
||||
help="Path to password file",
|
||||
)
|
||||
@click.option(
|
||||
"--output",
|
||||
default="/opt/aitbc/data/keystore",
|
||||
show_default=True,
|
||||
help="Directory to write keystore files",
|
||||
)
|
||||
@click.option(
|
||||
"--force",
|
||||
is_flag=True,
|
||||
help="Overwrite existing keystore file if present",
|
||||
)
|
||||
@click.pass_context
|
||||
def create(ctx, address: str, password_file: str, output: str, force: bool):
|
||||
"""Create an encrypted keystore for the given address.
|
||||
|
||||
Examples:
|
||||
aitbc keystore create --address aitbc1genesis
|
||||
aitbc keystore create --address aitbc1treasury --password-file keystore/.password --output keystore
|
||||
"""
|
||||
pwd_path = Path(password_file)
|
||||
with open(pwd_path, "r", encoding="utf-8") as f:
|
||||
password = f.read().strip()
|
||||
out_dir = Path(output) if output else Path("/opt/aitbc/data/keystore")
|
||||
out_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
ks_module = _load_keystore_script()
|
||||
ks_module.create_keystore(address=address, password=password, keystore_dir=out_dir, force=force)
|
||||
click.echo(f"Created keystore for {address} at {out_dir}")
|
||||
|
||||
|
||||
# Helper so other commands (genesis) can reuse the same logic
|
||||
def create_keystore_via_script(address: str, password_file: str = "/opt/aitbc/data/keystore/.password", output_dir: str = "/opt/aitbc/data/keystore", force: bool = False):
|
||||
pwd = Path(password_file).read_text(encoding="utf-8").strip()
|
||||
out_dir = Path(output_dir)
|
||||
out_dir.mkdir(parents=True, exist_ok=True)
|
||||
ks_module = _load_keystore_script()
|
||||
ks_module.create_keystore(address=address, password=pwd, keystore_dir=out_dir, force=force)
|
||||
796
cli/commands/market_maker.py
Executable file
796
cli/commands/market_maker.py
Executable file
@@ -0,0 +1,796 @@
|
||||
"""Market making commands for AITBC CLI"""
|
||||
|
||||
import click
|
||||
import json
|
||||
import uuid
|
||||
import httpx
|
||||
from pathlib import Path
|
||||
from typing import Optional, Dict, Any, List
|
||||
from datetime import datetime, timedelta
|
||||
from utils import output, error, success, warning
|
||||
|
||||
|
||||
@click.group()
|
||||
def market_maker():
|
||||
"""Market making bot management commands"""
|
||||
pass
|
||||
|
||||
|
||||
@market_maker.command()
|
||||
@click.option("--exchange", required=True, help="Exchange name")
|
||||
@click.option("--pair", required=True, help="Trading pair symbol (e.g., AITBC/BTC)")
|
||||
@click.option("--spread", type=float, default=0.005, help="Bid-ask spread (as percentage)")
|
||||
@click.option("--depth", type=float, default=1000000, help="Order book depth amount")
|
||||
@click.option("--max-order-size", type=float, default=1000, help="Maximum order size")
|
||||
@click.option("--min-order-size", type=float, default=10, help="Minimum order size")
|
||||
@click.option("--target-inventory", type=float, default=50000, help="Target inventory balance")
|
||||
@click.option("--rebalance-threshold", type=float, default=0.1, help="Inventory rebalance threshold")
|
||||
@click.option("--description", help="Bot description")
|
||||
@click.pass_context
|
||||
def create(ctx, exchange: str, pair: str, spread: float, depth: float, max_order_size: float, min_order_size: float, target_inventory: float, rebalance_threshold: float, description: Optional[str]):
|
||||
"""Create a new market making bot"""
|
||||
|
||||
# Generate unique bot ID
|
||||
bot_id = f"mm_{exchange.lower()}_{pair.replace('/', '_')}_{str(uuid.uuid4())[:8]}"
|
||||
|
||||
# Create bot configuration
|
||||
bot_config = {
|
||||
"bot_id": bot_id,
|
||||
"exchange": exchange,
|
||||
"pair": pair,
|
||||
"status": "stopped",
|
||||
"strategy": "basic_market_making",
|
||||
"config": {
|
||||
"spread": spread,
|
||||
"depth": depth,
|
||||
"max_order_size": max_order_size,
|
||||
"min_order_size": min_order_size,
|
||||
"target_inventory": target_inventory,
|
||||
"rebalance_threshold": rebalance_threshold
|
||||
},
|
||||
"performance": {
|
||||
"total_trades": 0,
|
||||
"total_volume": 0.0,
|
||||
"total_profit": 0.0,
|
||||
"inventory_value": 0.0,
|
||||
"orders_placed": 0,
|
||||
"orders_filled": 0
|
||||
},
|
||||
"created_at": datetime.utcnow().isoformat(),
|
||||
"last_updated": None,
|
||||
"description": description or f"Market making bot for {pair} on {exchange}",
|
||||
"current_orders": [],
|
||||
"inventory": {
|
||||
"base_asset": 0.0,
|
||||
"quote_asset": target_inventory
|
||||
}
|
||||
}
|
||||
|
||||
# Store bot configuration
|
||||
bots_file = Path.home() / ".aitbc" / "market_makers.json"
|
||||
bots_file.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Load existing bots
|
||||
bots = {}
|
||||
if bots_file.exists():
|
||||
with open(bots_file, 'r') as f:
|
||||
bots = json.load(f)
|
||||
|
||||
# Add new bot
|
||||
bots[bot_id] = bot_config
|
||||
|
||||
# Save bots
|
||||
with open(bots_file, 'w') as f:
|
||||
json.dump(bots, f, indent=2)
|
||||
|
||||
success(f"Market making bot created: {bot_id}")
|
||||
output({
|
||||
"bot_id": bot_id,
|
||||
"exchange": exchange,
|
||||
"pair": pair,
|
||||
"status": "created",
|
||||
"spread": spread,
|
||||
"depth": depth,
|
||||
"created_at": bot_config["created_at"]
|
||||
})
|
||||
|
||||
|
||||
@market_maker.command()
|
||||
@click.option("--bot-id", required=True, help="Bot ID to configure")
|
||||
@click.option("--spread", type=float, help="New bid-ask spread")
|
||||
@click.option("--depth", type=float, help="New order book depth")
|
||||
@click.option("--max-order-size", type=float, help="New maximum order size")
|
||||
@click.option("--target-inventory", type=float, help="New target inventory")
|
||||
@click.option("--rebalance-threshold", type=float, help="New rebalance threshold")
|
||||
@click.pass_context
|
||||
def config(ctx, bot_id: str, spread: Optional[float], depth: Optional[float], max_order_size: Optional[float], target_inventory: Optional[float], rebalance_threshold: Optional[float]):
|
||||
"""Configure market making bot parameters"""
|
||||
|
||||
# Load bots
|
||||
bots_file = Path.home() / ".aitbc" / "market_makers.json"
|
||||
if not bots_file.exists():
|
||||
error("No market making bots found.")
|
||||
return
|
||||
|
||||
with open(bots_file, 'r') as f:
|
||||
bots = json.load(f)
|
||||
|
||||
if bot_id not in bots:
|
||||
error(f"Bot '{bot_id}' not found.")
|
||||
return
|
||||
|
||||
bot = bots[bot_id]
|
||||
|
||||
# Update configuration
|
||||
config_updates = {}
|
||||
if spread is not None:
|
||||
bot["config"]["spread"] = spread
|
||||
config_updates["spread"] = spread
|
||||
if depth is not None:
|
||||
bot["config"]["depth"] = depth
|
||||
config_updates["depth"] = depth
|
||||
if max_order_size is not None:
|
||||
bot["config"]["max_order_size"] = max_order_size
|
||||
config_updates["max_order_size"] = max_order_size
|
||||
if target_inventory is not None:
|
||||
bot["config"]["target_inventory"] = target_inventory
|
||||
config_updates["target_inventory"] = target_inventory
|
||||
if rebalance_threshold is not None:
|
||||
bot["config"]["rebalance_threshold"] = rebalance_threshold
|
||||
config_updates["rebalance_threshold"] = rebalance_threshold
|
||||
|
||||
if not config_updates:
|
||||
error("No configuration updates provided.")
|
||||
return
|
||||
|
||||
# Update timestamp
|
||||
bot["last_updated"] = datetime.utcnow().isoformat()
|
||||
|
||||
# Save bots
|
||||
with open(bots_file, 'w') as f:
|
||||
json.dump(bots, f, indent=2)
|
||||
|
||||
success(f"Bot '{bot_id}' configuration updated")
|
||||
output({
|
||||
"bot_id": bot_id,
|
||||
"config_updates": config_updates,
|
||||
"updated_at": bot["last_updated"]
|
||||
})
|
||||
|
||||
|
||||
@market_maker.command()
|
||||
@click.option("--bot-id", required=True, help="Bot ID to start")
|
||||
@click.option("--dry-run", is_flag=True, help="Run in simulation mode without real orders")
|
||||
@click.pass_context
|
||||
def start(ctx, bot_id: str, dry_run: bool):
|
||||
"""Start a market making bot"""
|
||||
|
||||
# Load bots
|
||||
bots_file = Path.home() / ".aitbc" / "market_makers.json"
|
||||
if not bots_file.exists():
|
||||
error("No market making bots found.")
|
||||
return
|
||||
|
||||
with open(bots_file, 'r') as f:
|
||||
bots = json.load(f)
|
||||
|
||||
if bot_id not in bots:
|
||||
error(f"Bot '{bot_id}' not found.")
|
||||
return
|
||||
|
||||
bot = bots[bot_id]
|
||||
|
||||
# Check if bot is already running
|
||||
if bot["status"] == "running":
|
||||
warning(f"Bot '{bot_id}' is already running.")
|
||||
return
|
||||
|
||||
# Update bot status
|
||||
bot["status"] = "running" if not dry_run else "simulation"
|
||||
bot["started_at"] = datetime.utcnow().isoformat()
|
||||
bot["last_updated"] = datetime.utcnow().isoformat()
|
||||
bot["dry_run"] = dry_run
|
||||
|
||||
# Initialize performance tracking for this run
|
||||
bot["current_run"] = {
|
||||
"started_at": bot["started_at"],
|
||||
"orders_placed": 0,
|
||||
"orders_filled": 0,
|
||||
"total_volume": 0.0,
|
||||
"total_profit": 0.0
|
||||
}
|
||||
|
||||
# Save bots
|
||||
with open(bots_file, 'w') as f:
|
||||
json.dump(bots, f, indent=2)
|
||||
|
||||
mode = "simulation" if dry_run else "live"
|
||||
success(f"Bot '{bot_id}' started in {mode} mode")
|
||||
output({
|
||||
"bot_id": bot_id,
|
||||
"status": bot["status"],
|
||||
"mode": mode,
|
||||
"started_at": bot["started_at"],
|
||||
"exchange": bot["exchange"],
|
||||
"pair": bot["pair"]
|
||||
})
|
||||
|
||||
|
||||
@market_maker.command()
|
||||
@click.option("--bot-id", required=True, help="Bot ID to stop")
|
||||
@click.pass_context
|
||||
def stop(ctx, bot_id: str):
|
||||
"""Stop a market making bot"""
|
||||
|
||||
# Load bots
|
||||
bots_file = Path.home() / ".aitbc" / "market_makers.json"
|
||||
if not bots_file.exists():
|
||||
error("No market making bots found.")
|
||||
return
|
||||
|
||||
with open(bots_file, 'r') as f:
|
||||
bots = json.load(f)
|
||||
|
||||
if bot_id not in bots:
|
||||
error(f"Bot '{bot_id}' not found.")
|
||||
return
|
||||
|
||||
bot = bots[bot_id]
|
||||
|
||||
# Check if bot is running
|
||||
if bot["status"] not in ["running", "simulation"]:
|
||||
warning(f"Bot '{bot_id}' is not currently running.")
|
||||
return
|
||||
|
||||
# Update bot status
|
||||
bot["status"] = "stopped"
|
||||
bot["stopped_at"] = datetime.utcnow().isoformat()
|
||||
bot["last_updated"] = datetime.utcnow().isoformat()
|
||||
|
||||
# Cancel all current orders (simulation)
|
||||
bot["current_orders"] = []
|
||||
|
||||
# Save bots
|
||||
with open(bots_file, 'w') as f:
|
||||
json.dump(bots, f, indent=2)
|
||||
|
||||
success(f"Bot '{bot_id}' stopped")
|
||||
output({
|
||||
"bot_id": bot_id,
|
||||
"status": "stopped",
|
||||
"stopped_at": bot["stopped_at"],
|
||||
"final_performance": bot.get("current_run", {})
|
||||
})
|
||||
|
||||
|
||||
@market_maker.command()
|
||||
@click.option("--bot-id", help="Specific bot ID to check")
|
||||
@click.option("--exchange", help="Filter by exchange")
|
||||
@click.option("--pair", help="Filter by trading pair")
|
||||
@click.pass_context
|
||||
def performance(ctx, bot_id: Optional[str], exchange: Optional[str], pair: Optional[str]):
|
||||
"""Get performance metrics for market making bots"""
|
||||
|
||||
# Load bots
|
||||
bots_file = Path.home() / ".aitbc" / "market_makers.json"
|
||||
if not bots_file.exists():
|
||||
error("No market making bots found.")
|
||||
return
|
||||
|
||||
with open(bots_file, 'r') as f:
|
||||
bots = json.load(f)
|
||||
|
||||
# Filter bots
|
||||
performance_data = {}
|
||||
|
||||
for current_bot_id, bot in bots.items():
|
||||
if bot_id and current_bot_id != bot_id:
|
||||
continue
|
||||
if exchange and bot["exchange"] != exchange:
|
||||
continue
|
||||
if pair and bot["pair"] != pair:
|
||||
continue
|
||||
|
||||
# Calculate performance metrics
|
||||
perf = bot.get("performance", {})
|
||||
current_run = bot.get("current_run", {})
|
||||
|
||||
bot_performance = {
|
||||
"bot_id": current_bot_id,
|
||||
"exchange": bot["exchange"],
|
||||
"pair": bot["pair"],
|
||||
"status": bot["status"],
|
||||
"created_at": bot["created_at"],
|
||||
"total_trades": perf.get("total_trades", 0),
|
||||
"total_volume": perf.get("total_volume", 0.0),
|
||||
"total_profit": perf.get("total_profit", 0.0),
|
||||
"orders_placed": perf.get("orders_placed", 0),
|
||||
"orders_filled": perf.get("orders_filled", 0),
|
||||
"fill_rate": (perf.get("orders_filled", 0) / max(perf.get("orders_placed", 1), 1)) * 100,
|
||||
"current_inventory": bot.get("inventory", {}),
|
||||
"current_orders": len(bot.get("current_orders", [])),
|
||||
"strategy": bot.get("strategy", "unknown"),
|
||||
"config": bot.get("config", {})
|
||||
}
|
||||
|
||||
# Add current run data if available
|
||||
if current_run:
|
||||
bot_performance["current_run"] = current_run
|
||||
if "started_at" in current_run:
|
||||
start_time = datetime.fromisoformat(current_run["started_at"].replace('Z', '+00:00'))
|
||||
runtime = datetime.utcnow() - start_time
|
||||
bot_performance["run_time_hours"] = runtime.total_seconds() / 3600
|
||||
|
||||
performance_data[current_bot_id] = bot_performance
|
||||
|
||||
if not performance_data:
|
||||
error("No market making bots found matching the criteria.")
|
||||
return
|
||||
|
||||
output({
|
||||
"performance_data": performance_data,
|
||||
"total_bots": len(performance_data),
|
||||
"generated_at": datetime.utcnow().isoformat()
|
||||
})
|
||||
|
||||
|
||||
@market_maker.command()
|
||||
@click.pass_context
|
||||
def list(ctx):
|
||||
"""List all market making bots"""
|
||||
|
||||
# Load bots
|
||||
bots_file = Path.home() / ".aitbc" / "market_makers.json"
|
||||
if not bots_file.exists():
|
||||
warning("No market making bots found.")
|
||||
return
|
||||
|
||||
with open(bots_file, 'r') as f:
|
||||
bots = json.load(f)
|
||||
|
||||
# Format bot list
|
||||
bot_list = []
|
||||
for bot_id, bot in bots.items():
|
||||
bot_info = {
|
||||
"bot_id": bot_id,
|
||||
"exchange": bot["exchange"],
|
||||
"pair": bot["pair"],
|
||||
"status": bot["status"],
|
||||
"strategy": bot.get("strategy", "unknown"),
|
||||
"created_at": bot["created_at"],
|
||||
"last_updated": bot.get("last_updated"),
|
||||
"total_trades": bot.get("performance", {}).get("total_trades", 0),
|
||||
"current_orders": len(bot.get("current_orders", []))
|
||||
}
|
||||
bot_list.append(bot_info)
|
||||
|
||||
output({
|
||||
"market_makers": bot_list,
|
||||
"total_bots": len(bot_list),
|
||||
"running_bots": len([b for b in bot_list if b["status"] in ["running", "simulation"]]),
|
||||
"stopped_bots": len([b for b in bot_list if b["status"] == "stopped"])
|
||||
})
|
||||
|
||||
|
||||
@market_maker.command()
|
||||
@click.argument("bot_id")
|
||||
@click.pass_context
|
||||
def status(ctx, bot_id: str):
|
||||
"""Get detailed status of a specific market making bot"""
|
||||
|
||||
# Load bots
|
||||
bots_file = Path.home() / ".aitbc" / "market_makers.json"
|
||||
if not bots_file.exists():
|
||||
error("No market making bots found.")
|
||||
return
|
||||
|
||||
with open(bots_file, 'r') as f:
|
||||
bots = json.load(f)
|
||||
|
||||
if bot_id not in bots:
|
||||
error(f"Bot '{bot_id}' not found.")
|
||||
return
|
||||
|
||||
bot = bots[bot_id]
|
||||
|
||||
# Calculate uptime if running
|
||||
uptime_hours = None
|
||||
if bot["status"] in ["running", "simulation"] and "started_at" in bot:
|
||||
start_time = datetime.fromisoformat(bot["started_at"].replace('Z', '+00:00'))
|
||||
uptime = datetime.utcnow() - start_time
|
||||
uptime_hours = uptime.total_seconds() / 3600
|
||||
|
||||
output({
|
||||
"bot_id": bot_id,
|
||||
"exchange": bot["exchange"],
|
||||
"pair": bot["pair"],
|
||||
"status": bot["status"],
|
||||
"strategy": bot.get("strategy", "unknown"),
|
||||
"config": bot.get("config", {}),
|
||||
"performance": bot.get("performance", {}),
|
||||
"inventory": bot.get("inventory", {}),
|
||||
"current_orders": bot.get("current_orders", []),
|
||||
"created_at": bot["created_at"],
|
||||
"last_updated": bot.get("last_updated"),
|
||||
"started_at": bot.get("started_at"),
|
||||
"stopped_at": bot.get("stopped_at"),
|
||||
"uptime_hours": uptime_hours,
|
||||
"dry_run": bot.get("dry_run", False),
|
||||
"description": bot.get("description")
|
||||
})
|
||||
|
||||
|
||||
@market_maker.command()
|
||||
@click.argument("bot_id")
|
||||
@click.pass_context
|
||||
def remove(ctx, bot_id: str):
|
||||
"""Remove a market making bot"""
|
||||
|
||||
# Load bots
|
||||
bots_file = Path.home() / ".aitbc" / "market_makers.json"
|
||||
if not bots_file.exists():
|
||||
error("No market making bots found.")
|
||||
return
|
||||
|
||||
with open(bots_file, 'r') as f:
|
||||
bots = json.load(f)
|
||||
|
||||
if bot_id not in bots:
|
||||
error(f"Bot '{bot_id}' not found.")
|
||||
return
|
||||
|
||||
bot = bots[bot_id]
|
||||
|
||||
# Check if bot is running
|
||||
if bot["status"] in ["running", "simulation"]:
|
||||
error(f"Cannot remove bot '{bot_id}' while it is running. Stop it first.")
|
||||
return
|
||||
|
||||
# Remove bot
|
||||
del bots[bot_id]
|
||||
|
||||
# Save bots
|
||||
with open(bots_file, 'w') as f:
|
||||
json.dump(bots, f, indent=2)
|
||||
|
||||
success(f"Market making bot '{bot_id}' removed")
|
||||
output({
|
||||
"bot_id": bot_id,
|
||||
"status": "removed",
|
||||
"exchange": bot["exchange"],
|
||||
"pair": bot["pair"]
|
||||
})
|
||||
|
||||
|
||||
@click.group()
|
||||
def market_maker():
|
||||
"""Market making operations"""
|
||||
pass
|
||||
|
||||
|
||||
@market_maker.command()
|
||||
@click.option("--exchange", required=True, help="Exchange name (e.g., Binance, Coinbase)")
|
||||
@click.option("--pair", required=True, help="Trading pair (e.g., AITBC/BTC)")
|
||||
@click.option("--spread", type=float, default=0.001, help="Bid-ask spread (as percentage)")
|
||||
@click.option("--depth", type=int, default=5, help="Order book depth levels")
|
||||
@click.option("--base-balance", type=float, help="Base asset balance for market making")
|
||||
@click.option("--quote-balance", type=float, help="Quote asset balance for market making")
|
||||
@click.option("--min-order-size", type=float, help="Minimum order size")
|
||||
@click.option("--max-order-size", type=float, help="Maximum order size")
|
||||
@click.option("--strategy", default="simple", help="Market making strategy")
|
||||
@click.pass_context
|
||||
def create(ctx, exchange: str, pair: str, spread: float, depth: int,
|
||||
base_balance: Optional[float], quote_balance: Optional[float],
|
||||
min_order_size: Optional[float], max_order_size: Optional[float],
|
||||
strategy: str):
|
||||
"""Create a new market making bot"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
bot_config = {
|
||||
"exchange": exchange,
|
||||
"pair": pair,
|
||||
"spread": spread,
|
||||
"depth": depth,
|
||||
"strategy": strategy,
|
||||
"status": "created"
|
||||
}
|
||||
|
||||
if base_balance is not None:
|
||||
bot_config["base_balance"] = base_balance
|
||||
if quote_balance is not None:
|
||||
bot_config["quote_balance"] = quote_balance
|
||||
if min_order_size is not None:
|
||||
bot_config["min_order_size"] = min_order_size
|
||||
if max_order_size is not None:
|
||||
bot_config["max_order_size"] = max_order_size
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.post(
|
||||
f"{config.coordinator_url}/api/v1/market-maker/create",
|
||||
json=bot_config,
|
||||
timeout=10
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
result = response.json()
|
||||
success(f"Market maker bot created for '{pair}' on '{exchange}'!")
|
||||
success(f"Bot ID: {result.get('bot_id')}")
|
||||
output(result, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to create market maker: {response.status_code}")
|
||||
if response.text:
|
||||
error(f"Error details: {response.text}")
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
|
||||
|
||||
@market_maker.command()
|
||||
@click.option("--bot-id", required=True, help="Market maker bot ID")
|
||||
@click.option("--spread", type=float, help="New bid-ask spread")
|
||||
@click.option("--depth", type=int, help="New order book depth")
|
||||
@click.option("--base-balance", type=float, help="New base asset balance")
|
||||
@click.option("--quote-balance", type=float, help="New quote asset balance")
|
||||
@click.option("--min-order-size", type=float, help="New minimum order size")
|
||||
@click.option("--max-order-size", type=float, help="New maximum order size")
|
||||
@click.option("--strategy", help="New market making strategy")
|
||||
@click.pass_context
|
||||
def config(ctx, bot_id: str, spread: Optional[float], depth: Optional[int],
|
||||
base_balance: Optional[float], quote_balance: Optional[float],
|
||||
min_order_size: Optional[float], max_order_size: Optional[float],
|
||||
strategy: Optional[str]):
|
||||
"""Configure market maker bot settings"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
updates = {}
|
||||
if spread is not None:
|
||||
updates["spread"] = spread
|
||||
if depth is not None:
|
||||
updates["depth"] = depth
|
||||
if base_balance is not None:
|
||||
updates["base_balance"] = base_balance
|
||||
if quote_balance is not None:
|
||||
updates["quote_balance"] = quote_balance
|
||||
if min_order_size is not None:
|
||||
updates["min_order_size"] = min_order_size
|
||||
if max_order_size is not None:
|
||||
updates["max_order_size"] = max_order_size
|
||||
if strategy is not None:
|
||||
updates["strategy"] = strategy
|
||||
|
||||
if not updates:
|
||||
error("No configuration updates provided")
|
||||
return
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.post(
|
||||
f"{config.coordinator_url}/api/v1/market-maker/config/{bot_id}",
|
||||
json=updates,
|
||||
timeout=10
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
result = response.json()
|
||||
success(f"Market maker {bot_id} configured successfully!")
|
||||
output(result, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to configure market maker: {response.status_code}")
|
||||
if response.text:
|
||||
error(f"Error details: {response.text}")
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
|
||||
|
||||
@market_maker.command()
|
||||
@click.option("--bot-id", required=True, help="Market maker bot ID")
|
||||
@click.option("--dry-run", is_flag=True, help="Test run without executing real trades")
|
||||
@click.pass_context
|
||||
def start(ctx, bot_id: str, dry_run: bool):
|
||||
"""Start market maker bot"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
start_data = {
|
||||
"dry_run": dry_run
|
||||
}
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.post(
|
||||
f"{config.coordinator_url}/api/v1/market-maker/start/{bot_id}",
|
||||
json=start_data,
|
||||
timeout=10
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
result = response.json()
|
||||
mode = " (dry run)" if dry_run else ""
|
||||
success(f"Market maker {bot_id} started{mode}!")
|
||||
output(result, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to start market maker: {response.status_code}")
|
||||
if response.text:
|
||||
error(f"Error details: {response.text}")
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
|
||||
|
||||
@market_maker.command()
|
||||
@click.option("--bot-id", required=True, help="Market maker bot ID")
|
||||
@click.pass_context
|
||||
def stop(ctx, bot_id: str):
|
||||
"""Stop market maker bot"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.post(
|
||||
f"{config.coordinator_url}/api/v1/market-maker/stop/{bot_id}",
|
||||
timeout=10
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
result = response.json()
|
||||
success(f"Market maker {bot_id} stopped!")
|
||||
output(result, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to stop market maker: {response.status_code}")
|
||||
if response.text:
|
||||
error(f"Error details: {response.text}")
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
|
||||
|
||||
@market_maker.command()
|
||||
@click.option("--bot-id", help="Specific bot ID to check")
|
||||
@click.option("--exchange", help="Filter by exchange")
|
||||
@click.option("--pair", help="Filter by trading pair")
|
||||
@click.option("--status", help="Filter by status (running, stopped, created)")
|
||||
@click.pass_context
|
||||
def performance(ctx, bot_id: Optional[str], exchange: Optional[str],
|
||||
pair: Optional[str], status: Optional[str]):
|
||||
"""Get market maker performance analytics"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
params = {}
|
||||
if bot_id:
|
||||
params["bot_id"] = bot_id
|
||||
if exchange:
|
||||
params["exchange"] = exchange
|
||||
if pair:
|
||||
params["pair"] = pair
|
||||
if status:
|
||||
params["status"] = status
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.get(
|
||||
f"{config.coordinator_url}/api/v1/market-maker/performance",
|
||||
params=params,
|
||||
timeout=10
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
performance_data = response.json()
|
||||
success("Market maker performance:")
|
||||
output(performance_data, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to get performance data: {response.status_code}")
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
|
||||
|
||||
@market_maker.command()
|
||||
@click.option("--bot-id", help="Specific bot ID to list")
|
||||
@click.option("--exchange", help="Filter by exchange")
|
||||
@click.option("--pair", help="Filter by trading pair")
|
||||
@click.option("--status", help="Filter by status")
|
||||
@click.pass_context
|
||||
def list(ctx, bot_id: Optional[str], exchange: Optional[str],
|
||||
pair: Optional[str], status: Optional[str]):
|
||||
"""List market maker bots"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
params = {}
|
||||
if bot_id:
|
||||
params["bot_id"] = bot_id
|
||||
if exchange:
|
||||
params["exchange"] = exchange
|
||||
if pair:
|
||||
params["pair"] = pair
|
||||
if status:
|
||||
params["status"] = status
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.get(
|
||||
f"{config.coordinator_url}/api/v1/market-maker/list",
|
||||
params=params,
|
||||
timeout=10
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
bots = response.json()
|
||||
success("Market maker bots:")
|
||||
output(bots, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to list market makers: {response.status_code}")
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
|
||||
|
||||
@market_maker.command()
|
||||
@click.option("--bot-id", required=True, help="Market maker bot ID")
|
||||
@click.option("--hours", type=int, default=24, help="Hours of history to retrieve")
|
||||
@click.pass_context
|
||||
def history(ctx, bot_id: str, hours: int):
|
||||
"""Get market maker trading history"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
params = {
|
||||
"hours": hours
|
||||
}
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.get(
|
||||
f"{config.coordinator_url}/api/v1/market-maker/history/{bot_id}",
|
||||
params=params,
|
||||
timeout=10
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
history_data = response.json()
|
||||
success(f"Market maker {bot_id} history (last {hours} hours):")
|
||||
output(history_data, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to get market maker history: {response.status_code}")
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
|
||||
|
||||
@market_maker.command()
|
||||
@click.option("--bot-id", required=True, help="Market maker bot ID")
|
||||
@click.pass_context
|
||||
def status(ctx, bot_id: str):
|
||||
"""Get market maker bot status"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.get(
|
||||
f"{config.coordinator_url}/api/v1/market-maker/status/{bot_id}",
|
||||
timeout=10
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
status_data = response.json()
|
||||
success(f"Market maker {bot_id} status:")
|
||||
output(status_data, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to get market maker status: {response.status_code}")
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
|
||||
|
||||
@market_maker.command()
|
||||
@click.pass_context
|
||||
def strategies(ctx):
|
||||
"""List available market making strategies"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.get(
|
||||
f"{config.coordinator_url}/api/v1/market-maker/strategies",
|
||||
timeout=10
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
strategies = response.json()
|
||||
success("Available market making strategies:")
|
||||
output(strategies, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to list strategies: {response.status_code}")
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
1137
cli/commands/marketplace.py
Executable file
1137
cli/commands/marketplace.py
Executable file
File diff suppressed because it is too large
Load Diff
654
cli/commands/marketplace_advanced.py
Executable file
654
cli/commands/marketplace_advanced.py
Executable file
@@ -0,0 +1,654 @@
|
||||
"""Advanced marketplace commands for AITBC CLI - Enhanced marketplace operations"""
|
||||
|
||||
import click
|
||||
import httpx
|
||||
import json
|
||||
import base64
|
||||
from typing import Optional, Dict, Any, List
|
||||
from pathlib import Path
|
||||
from utils import output, error, success, warning
|
||||
|
||||
|
||||
@click.group()
|
||||
def advanced():
|
||||
"""Advanced marketplace operations and analytics"""
|
||||
pass
|
||||
|
||||
|
||||
@click.group()
|
||||
def models():
|
||||
"""Advanced model NFT operations"""
|
||||
pass
|
||||
|
||||
|
||||
advanced.add_command(models)
|
||||
|
||||
|
||||
@models.command()
|
||||
@click.option("--nft-version", default="2.0", help="NFT version filter")
|
||||
@click.option("--category", help="Filter by model category")
|
||||
@click.option("--tags", help="Comma-separated tags to filter")
|
||||
@click.option("--rating-min", type=float, help="Minimum rating filter")
|
||||
@click.option("--limit", default=20, help="Number of models to list")
|
||||
@click.pass_context
|
||||
def list(ctx, nft_version: str, category: Optional[str], tags: Optional[str],
|
||||
rating_min: Optional[float], limit: int):
|
||||
"""List advanced NFT models"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
params = {"nft_version": nft_version, "limit": limit}
|
||||
if category:
|
||||
params["category"] = category
|
||||
if tags:
|
||||
params["tags"] = [t.strip() for t in tags.split(',')]
|
||||
if rating_min:
|
||||
params["rating_min"] = rating_min
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.get(
|
||||
f"{config.coordinator_url}/v1/marketplace/advanced/models",
|
||||
headers={"X-Api-Key": config.api_key or ""},
|
||||
params=params
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
models = response.json()
|
||||
output(models, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to list models: {response.status_code}")
|
||||
ctx.exit(1)
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@models.command()
|
||||
@click.option("--model-file", type=click.Path(exists=True), required=True, help="Model file path")
|
||||
@click.option("--metadata", type=click.File('r'), required=True, help="Model metadata JSON file")
|
||||
@click.option("--price", type=float, help="Initial price")
|
||||
@click.option("--royalty", type=float, default=0.0, help="Royalty percentage")
|
||||
@click.option("--supply", default=1, help="NFT supply")
|
||||
@click.pass_context
|
||||
def mint(ctx, model_file: str, metadata, price: Optional[float], royalty: float, supply: int):
|
||||
"""Create model NFT with advanced metadata"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
# Read model file
|
||||
try:
|
||||
with open(model_file, 'rb') as f:
|
||||
model_data = f.read()
|
||||
except Exception as e:
|
||||
error(f"Failed to read model file: {e}")
|
||||
return
|
||||
|
||||
# Read metadata
|
||||
try:
|
||||
metadata_data = json.load(metadata)
|
||||
except Exception as e:
|
||||
error(f"Failed to read metadata file: {e}")
|
||||
return
|
||||
|
||||
nft_data = {
|
||||
"metadata": metadata_data,
|
||||
"royalty_percentage": royalty,
|
||||
"supply": supply
|
||||
}
|
||||
|
||||
if price:
|
||||
nft_data["initial_price"] = price
|
||||
|
||||
files = {
|
||||
"model": model_data
|
||||
}
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.post(
|
||||
f"{config.coordinator_url}/v1/marketplace/advanced/models/mint",
|
||||
headers={"X-Api-Key": config.api_key or ""},
|
||||
data=nft_data,
|
||||
files=files
|
||||
)
|
||||
|
||||
if response.status_code == 201:
|
||||
nft = response.json()
|
||||
success(f"Model NFT minted: {nft['id']}")
|
||||
output(nft, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to mint NFT: {response.status_code}")
|
||||
if response.text:
|
||||
error(response.text)
|
||||
ctx.exit(1)
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@models.command()
|
||||
@click.argument("nft_id")
|
||||
@click.option("--new-version", type=click.Path(exists=True), required=True, help="New model version file")
|
||||
@click.option("--version-notes", default="", help="Version update notes")
|
||||
@click.option("--compatibility", default="backward",
|
||||
type=click.Choice(["backward", "forward", "breaking"]),
|
||||
help="Compatibility type")
|
||||
@click.pass_context
|
||||
def update(ctx, nft_id: str, new_version: str, version_notes: str, compatibility: str):
|
||||
"""Update model NFT with new version"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
# Read new version file
|
||||
try:
|
||||
with open(new_version, 'rb') as f:
|
||||
version_data = f.read()
|
||||
except Exception as e:
|
||||
error(f"Failed to read version file: {e}")
|
||||
return
|
||||
|
||||
update_data = {
|
||||
"version_notes": version_notes,
|
||||
"compatibility": compatibility
|
||||
}
|
||||
|
||||
files = {
|
||||
"version": version_data
|
||||
}
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.post(
|
||||
f"{config.coordinator_url}/v1/marketplace/advanced/models/{nft_id}/update",
|
||||
headers={"X-Api-Key": config.api_key or ""},
|
||||
data=update_data,
|
||||
files=files
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
result = response.json()
|
||||
success(f"Model NFT updated: {result['version']}")
|
||||
output(result, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to update NFT: {response.status_code}")
|
||||
if response.text:
|
||||
error(response.text)
|
||||
ctx.exit(1)
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@models.command()
|
||||
@click.argument("nft_id")
|
||||
@click.option("--deep-scan", is_flag=True, help="Perform deep authenticity scan")
|
||||
@click.option("--check-integrity", is_flag=True, help="Check model integrity")
|
||||
@click.option("--verify-performance", is_flag=True, help="Verify performance claims")
|
||||
@click.pass_context
|
||||
def verify(ctx, nft_id: str, deep_scan: bool, check_integrity: bool, verify_performance: bool):
|
||||
"""Verify model authenticity and quality"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
verify_data = {
|
||||
"deep_scan": deep_scan,
|
||||
"check_integrity": check_integrity,
|
||||
"verify_performance": verify_performance
|
||||
}
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.post(
|
||||
f"{config.coordinator_url}/v1/marketplace/advanced/models/{nft_id}/verify",
|
||||
headers={"X-Api-Key": config.api_key or ""},
|
||||
json=verify_data
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
verification = response.json()
|
||||
|
||||
if verification.get("authentic"):
|
||||
success("Model authenticity: VERIFIED")
|
||||
else:
|
||||
warning("Model authenticity: FAILED")
|
||||
|
||||
output(verification, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to verify model: {response.status_code}")
|
||||
if response.text:
|
||||
error(response.text)
|
||||
ctx.exit(1)
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@click.group()
|
||||
def marketplace_analytics():
|
||||
"""Marketplace analytics and insights"""
|
||||
pass
|
||||
|
||||
|
||||
advanced.add_command(marketplace_analytics)
|
||||
|
||||
|
||||
@marketplace_analytics.command()
|
||||
@click.option("--period", default="30d", help="Time period (1d, 7d, 30d, 90d)")
|
||||
@click.option("--metrics", default="volume,trends", help="Comma-separated metrics")
|
||||
@click.option("--category", help="Filter by category")
|
||||
@click.option("--format", "output_format", default="json",
|
||||
type=click.Choice(["json", "csv", "pdf"]),
|
||||
help="Output format")
|
||||
@click.pass_context
|
||||
def get_analytics(ctx, period: str, metrics: str, category: Optional[str], output_format: str):
|
||||
"""Get comprehensive marketplace analytics"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
params = {
|
||||
"period": period,
|
||||
"metrics": [m.strip() for m in metrics.split(',')],
|
||||
"format": output_format
|
||||
}
|
||||
|
||||
if category:
|
||||
params["category"] = category
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.get(
|
||||
f"{config.coordinator_url}/v1/marketplace/advanced/analytics",
|
||||
headers={"X-Api-Key": config.api_key or ""},
|
||||
params=params
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
if output_format == "pdf":
|
||||
# Handle PDF download
|
||||
filename = f"marketplace_analytics_{period}.pdf"
|
||||
with open(filename, 'wb') as f:
|
||||
f.write(response.content)
|
||||
success(f"Analytics report downloaded: {filename}")
|
||||
else:
|
||||
analytics_data = response.json()
|
||||
output(analytics_data, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to get analytics: {response.status_code}")
|
||||
ctx.exit(1)
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@marketplace_analytics.command()
|
||||
@click.argument("model_id")
|
||||
@click.option("--competitors", is_flag=True, help="Include competitor analysis")
|
||||
@click.option("--datasets", default="standard", help="Test datasets to use")
|
||||
@click.option("--iterations", default=100, help="Benchmark iterations")
|
||||
@click.pass_context
|
||||
def benchmark(ctx, model_id: str, competitors: bool, datasets: str, iterations: int):
|
||||
"""Model performance benchmarking"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
benchmark_data = {
|
||||
"competitors": competitors,
|
||||
"datasets": datasets,
|
||||
"iterations": iterations
|
||||
}
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.post(
|
||||
f"{config.coordinator_url}/v1/marketplace/advanced/models/{model_id}/benchmark",
|
||||
headers={"X-Api-Key": config.api_key or ""},
|
||||
json=benchmark_data
|
||||
)
|
||||
|
||||
if response.status_code == 202:
|
||||
benchmark = response.json()
|
||||
success(f"Benchmark started: {benchmark['id']}")
|
||||
output(benchmark, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to start benchmark: {response.status_code}")
|
||||
if response.text:
|
||||
error(response.text)
|
||||
ctx.exit(1)
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@marketplace_analytics.command()
|
||||
@click.option("--category", help="Filter by category")
|
||||
@click.option("--forecast", default="7d", help="Forecast period")
|
||||
@click.option("--confidence", default=0.8, help="Confidence threshold")
|
||||
@click.pass_context
|
||||
def trends(ctx, category: Optional[str], forecast: str, confidence: float):
|
||||
"""Market trend analysis and forecasting"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
params = {
|
||||
"forecast_period": forecast,
|
||||
"confidence_threshold": confidence
|
||||
}
|
||||
|
||||
if category:
|
||||
params["category"] = category
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.get(
|
||||
f"{config.coordinator_url}/v1/marketplace/advanced/trends",
|
||||
headers={"X-Api-Key": config.api_key or ""},
|
||||
params=params
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
trends_data = response.json()
|
||||
output(trends_data, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to get trends: {response.status_code}")
|
||||
ctx.exit(1)
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@marketplace_analytics.command()
|
||||
@click.option("--format", default="pdf", type=click.Choice(["pdf", "html", "json"]),
|
||||
help="Report format")
|
||||
@click.option("--email", help="Email address to send report")
|
||||
@click.option("--sections", default="all", help="Comma-separated report sections")
|
||||
@click.pass_context
|
||||
def report(ctx, format: str, email: Optional[str], sections: str):
|
||||
"""Generate comprehensive marketplace report"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
report_data = {
|
||||
"format": format,
|
||||
"sections": [s.strip() for s in sections.split(',')]
|
||||
}
|
||||
|
||||
if email:
|
||||
report_data["email"] = email
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.post(
|
||||
f"{config.coordinator_url}/v1/marketplace/advanced/reports/generate",
|
||||
headers={"X-Api-Key": config.api_key or ""},
|
||||
json=report_data
|
||||
)
|
||||
|
||||
if response.status_code == 202:
|
||||
report_job = response.json()
|
||||
success(f"Report generation started: {report_job['id']}")
|
||||
output(report_job, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to generate report: {response.status_code}")
|
||||
if response.text:
|
||||
error(response.text)
|
||||
ctx.exit(1)
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@click.group()
|
||||
def trading():
|
||||
"""Advanced trading features"""
|
||||
pass
|
||||
|
||||
|
||||
advanced.add_command(trading)
|
||||
|
||||
|
||||
@trading.command()
|
||||
@click.argument("auction_id")
|
||||
@click.option("--amount", type=float, required=True, help="Bid amount")
|
||||
@click.option("--max-auto-bid", type=float, help="Maximum auto-bid amount")
|
||||
@click.option("--proxy", is_flag=True, help="Use proxy bidding")
|
||||
@click.pass_context
|
||||
def bid(ctx, auction_id: str, amount: float, max_auto_bid: Optional[float], proxy: bool):
|
||||
"""Participate in model auction"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
bid_data = {
|
||||
"amount": amount,
|
||||
"proxy_bidding": proxy
|
||||
}
|
||||
|
||||
if max_auto_bid:
|
||||
bid_data["max_auto_bid"] = max_auto_bid
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.post(
|
||||
f"{config.coordinator_url}/v1/marketplace/advanced/auctions/{auction_id}/bid",
|
||||
headers={"X-Api-Key": config.api_key or ""},
|
||||
json=bid_data
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
result = response.json()
|
||||
success(f"Bid placed successfully")
|
||||
output(result, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to place bid: {response.status_code}")
|
||||
if response.text:
|
||||
error(response.text)
|
||||
ctx.exit(1)
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@trading.command()
|
||||
@click.argument("model_id")
|
||||
@click.option("--recipients", required=True, help="Comma-separated recipient:percentage pairs")
|
||||
@click.option("--smart-contract", is_flag=True, help="Use smart contract distribution")
|
||||
@click.pass_context
|
||||
def royalties(ctx, model_id: str, recipients: str, smart_contract: bool):
|
||||
"""Create royalty distribution agreement"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
# Parse recipients
|
||||
royalty_recipients = []
|
||||
for recipient in recipients.split(','):
|
||||
if ':' in recipient:
|
||||
address, percentage = recipient.split(':', 1)
|
||||
royalty_recipients.append({
|
||||
"address": address.strip(),
|
||||
"percentage": float(percentage.strip())
|
||||
})
|
||||
|
||||
royalty_data = {
|
||||
"recipients": royalty_recipients,
|
||||
"smart_contract": smart_contract
|
||||
}
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.post(
|
||||
f"{config.coordinator_url}/v1/marketplace/advanced/models/{model_id}/royalties",
|
||||
headers={"X-Api-Key": config.api_key or ""},
|
||||
json=royalty_data
|
||||
)
|
||||
|
||||
if response.status_code == 201:
|
||||
result = response.json()
|
||||
success(f"Royalty agreement created: {result['id']}")
|
||||
output(result, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to create royalty agreement: {response.status_code}")
|
||||
if response.text:
|
||||
error(response.text)
|
||||
ctx.exit(1)
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@trading.command()
|
||||
@click.option("--strategy", default="arbitrage",
|
||||
type=click.Choice(["arbitrage", "trend-following", "mean-reversion", "custom"]),
|
||||
help="Trading strategy")
|
||||
@click.option("--budget", type=float, required=True, help="Trading budget")
|
||||
@click.option("--risk-level", default="medium",
|
||||
type=click.Choice(["low", "medium", "high"]),
|
||||
help="Risk level")
|
||||
@click.option("--config", type=click.File('r'), help="Custom strategy configuration")
|
||||
@click.pass_context
|
||||
def execute(ctx, strategy: str, budget: float, risk_level: str, config):
|
||||
"""Execute complex trading strategy"""
|
||||
config_obj = ctx.obj['config']
|
||||
|
||||
strategy_data = {
|
||||
"strategy": strategy,
|
||||
"budget": budget,
|
||||
"risk_level": risk_level
|
||||
}
|
||||
|
||||
if config:
|
||||
try:
|
||||
custom_config = json.load(config)
|
||||
strategy_data["custom_config"] = custom_config
|
||||
except Exception as e:
|
||||
error(f"Failed to read strategy config: {e}")
|
||||
return
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.post(
|
||||
f"{config_obj.coordinator_url}/v1/marketplace/advanced/trading/execute",
|
||||
headers={"X-Api-Key": config_obj.api_key or ""},
|
||||
json=strategy_data
|
||||
)
|
||||
|
||||
if response.status_code == 202:
|
||||
execution = response.json()
|
||||
success(f"Trading strategy execution started: {execution['id']}")
|
||||
output(execution, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to execute strategy: {response.status_code}")
|
||||
if response.text:
|
||||
error(response.text)
|
||||
ctx.exit(1)
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@click.group()
|
||||
def dispute():
|
||||
"""Dispute resolution operations"""
|
||||
pass
|
||||
|
||||
|
||||
advanced.add_command(dispute)
|
||||
|
||||
|
||||
@dispute.command()
|
||||
@click.argument("transaction_id")
|
||||
@click.option("--reason", required=True, help="Dispute reason")
|
||||
@click.option("--evidence", type=click.File('rb'), multiple=True, help="Evidence files")
|
||||
@click.option("--category", default="quality",
|
||||
type=click.Choice(["quality", "delivery", "payment", "fraud", "other"]),
|
||||
help="Dispute category")
|
||||
@click.pass_context
|
||||
def file(ctx, transaction_id: str, reason: str, evidence, category: str):
|
||||
"""File dispute resolution request"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
dispute_data = {
|
||||
"transaction_id": transaction_id,
|
||||
"reason": reason,
|
||||
"category": category
|
||||
}
|
||||
|
||||
files = {}
|
||||
for i, evidence_file in enumerate(evidence):
|
||||
files[f"evidence_{i}"] = evidence_file.read()
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.post(
|
||||
f"{config.coordinator_url}/v1/marketplace/advanced/disputes",
|
||||
headers={"X-Api-Key": config.api_key or ""},
|
||||
data=dispute_data,
|
||||
files=files
|
||||
)
|
||||
|
||||
if response.status_code == 201:
|
||||
dispute = response.json()
|
||||
success(f"Dispute filed: {dispute['id']}")
|
||||
output(dispute, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to file dispute: {response.status_code}")
|
||||
if response.text:
|
||||
error(response.text)
|
||||
ctx.exit(1)
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@dispute.command()
|
||||
@click.argument("dispute_id")
|
||||
@click.pass_context
|
||||
def status(ctx, dispute_id: str):
|
||||
"""Get dispute status and progress"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.get(
|
||||
f"{config.coordinator_url}/v1/marketplace/advanced/disputes/{dispute_id}",
|
||||
headers={"X-Api-Key": config.api_key or ""}
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
dispute_data = response.json()
|
||||
output(dispute_data, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to get dispute status: {response.status_code}")
|
||||
ctx.exit(1)
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@dispute.command()
|
||||
@click.argument("dispute_id")
|
||||
@click.option("--resolution", required=True, help="Proposed resolution")
|
||||
@click.option("--evidence", type=click.File('rb'), multiple=True, help="Additional evidence")
|
||||
@click.pass_context
|
||||
def resolve(ctx, dispute_id: str, resolution: str, evidence):
|
||||
"""Propose dispute resolution"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
resolution_data = {
|
||||
"resolution": resolution
|
||||
}
|
||||
|
||||
files = {}
|
||||
for i, evidence_file in enumerate(evidence):
|
||||
files[f"evidence_{i}"] = evidence_file.read()
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.post(
|
||||
f"{config.coordinator_url}/v1/marketplace/advanced/disputes/{dispute_id}/resolve",
|
||||
headers={"X-Api-Key": config.api_key or ""},
|
||||
data=resolution_data,
|
||||
files=files
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
result = response.json()
|
||||
success(f"Resolution proposal submitted")
|
||||
output(result, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to submit resolution: {response.status_code}")
|
||||
if response.text:
|
||||
error(response.text)
|
||||
ctx.exit(1)
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
494
cli/commands/marketplace_cmd.py
Executable file
494
cli/commands/marketplace_cmd.py
Executable file
@@ -0,0 +1,494 @@
|
||||
"""Global chain marketplace commands for AITBC CLI"""
|
||||
|
||||
import click
|
||||
import asyncio
|
||||
import json
|
||||
from decimal import Decimal
|
||||
from datetime import datetime
|
||||
from typing import Optional
|
||||
from core.config import load_multichain_config
|
||||
from core.marketplace import (
|
||||
GlobalChainMarketplace, ChainType, MarketplaceStatus,
|
||||
TransactionStatus
|
||||
)
|
||||
from utils import output, error, success
|
||||
|
||||
@click.group()
|
||||
def marketplace():
|
||||
"""Global chain marketplace commands"""
|
||||
pass
|
||||
|
||||
@marketplace.command()
|
||||
@click.argument('chain_id')
|
||||
@click.argument('chain_name')
|
||||
@click.argument('chain_type')
|
||||
@click.argument('description')
|
||||
@click.argument('seller_id')
|
||||
@click.argument('price')
|
||||
@click.option('--currency', default='ETH', help='Currency for pricing')
|
||||
@click.option('--specs', help='Chain specifications (JSON string)')
|
||||
@click.option('--metadata', help='Additional metadata (JSON string)')
|
||||
@click.pass_context
|
||||
def list(ctx, chain_id, chain_name, chain_type, description, seller_id, price, currency, specs, metadata):
|
||||
"""List a chain for sale in the marketplace"""
|
||||
try:
|
||||
config = load_multichain_config()
|
||||
marketplace = GlobalChainMarketplace(config)
|
||||
|
||||
# Parse chain type
|
||||
try:
|
||||
chain_type_enum = ChainType(chain_type)
|
||||
except ValueError:
|
||||
error(f"Invalid chain type: {chain_type}")
|
||||
error(f"Valid types: {[t.value for t in ChainType]}")
|
||||
raise click.Abort()
|
||||
|
||||
# Parse price
|
||||
try:
|
||||
price_decimal = Decimal(price)
|
||||
except:
|
||||
error("Invalid price format")
|
||||
raise click.Abort()
|
||||
|
||||
# Parse specifications
|
||||
chain_specs = {}
|
||||
if specs:
|
||||
try:
|
||||
chain_specs = json.loads(specs)
|
||||
except json.JSONDecodeError:
|
||||
error("Invalid JSON specifications")
|
||||
raise click.Abort()
|
||||
|
||||
# Parse metadata
|
||||
metadata_dict = {}
|
||||
if metadata:
|
||||
try:
|
||||
metadata_dict = json.loads(metadata)
|
||||
except json.JSONDecodeError:
|
||||
error("Invalid JSON metadata")
|
||||
raise click.Abort()
|
||||
|
||||
# Create listing
|
||||
listing_id = asyncio.run(marketplace.create_listing(
|
||||
chain_id, chain_name, chain_type_enum, description,
|
||||
seller_id, price_decimal, currency, chain_specs, metadata_dict
|
||||
))
|
||||
|
||||
if listing_id:
|
||||
success(f"Chain listed successfully! Listing ID: {listing_id}")
|
||||
|
||||
listing_data = {
|
||||
"Listing ID": listing_id,
|
||||
"Chain ID": chain_id,
|
||||
"Chain Name": chain_name,
|
||||
"Type": chain_type,
|
||||
"Price": f"{price} {currency}",
|
||||
"Seller": seller_id,
|
||||
"Status": "active",
|
||||
"Created": datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
||||
}
|
||||
|
||||
output(listing_data, ctx.obj.get('output_format', 'table'))
|
||||
else:
|
||||
error("Failed to create listing")
|
||||
raise click.Abort()
|
||||
|
||||
except Exception as e:
|
||||
error(f"Error creating listing: {str(e)}")
|
||||
raise click.Abort()
|
||||
|
||||
@marketplace.command()
|
||||
@click.argument('listing_id')
|
||||
@click.argument('buyer_id')
|
||||
@click.option('--payment', default='crypto', help='Payment method')
|
||||
@click.pass_context
|
||||
def buy(ctx, listing_id, buyer_id, payment):
|
||||
"""Purchase a chain from the marketplace"""
|
||||
try:
|
||||
config = load_multichain_config()
|
||||
marketplace = GlobalChainMarketplace(config)
|
||||
|
||||
# Purchase chain
|
||||
transaction_id = asyncio.run(marketplace.purchase_chain(listing_id, buyer_id, payment))
|
||||
|
||||
if transaction_id:
|
||||
success(f"Purchase initiated! Transaction ID: {transaction_id}")
|
||||
|
||||
transaction_data = {
|
||||
"Transaction ID": transaction_id,
|
||||
"Listing ID": listing_id,
|
||||
"Buyer": buyer_id,
|
||||
"Payment Method": payment,
|
||||
"Status": "pending",
|
||||
"Created": datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
||||
}
|
||||
|
||||
output(transaction_data, ctx.obj.get('output_format', 'table'))
|
||||
else:
|
||||
error("Failed to purchase chain")
|
||||
raise click.Abort()
|
||||
|
||||
except Exception as e:
|
||||
error(f"Error purchasing chain: {str(e)}")
|
||||
raise click.Abort()
|
||||
|
||||
@marketplace.command()
|
||||
@click.argument('transaction_id')
|
||||
@click.argument('transaction_hash')
|
||||
@click.pass_context
|
||||
def complete(ctx, transaction_id, transaction_hash):
|
||||
"""Complete a marketplace transaction"""
|
||||
try:
|
||||
config = load_multichain_config()
|
||||
marketplace = GlobalChainMarketplace(config)
|
||||
|
||||
# Complete transaction
|
||||
success = asyncio.run(marketplace.complete_transaction(transaction_id, transaction_hash))
|
||||
|
||||
if success:
|
||||
success(f"Transaction {transaction_id} completed successfully!")
|
||||
|
||||
transaction_data = {
|
||||
"Transaction ID": transaction_id,
|
||||
"Transaction Hash": transaction_hash,
|
||||
"Status": "completed",
|
||||
"Completed": datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
||||
}
|
||||
|
||||
output(transaction_data, ctx.obj.get('output_format', 'table'))
|
||||
else:
|
||||
error(f"Failed to complete transaction {transaction_id}")
|
||||
raise click.Abort()
|
||||
|
||||
except Exception as e:
|
||||
error(f"Error completing transaction: {str(e)}")
|
||||
raise click.Abort()
|
||||
|
||||
@marketplace.command()
|
||||
@click.option('--type', help='Filter by chain type')
|
||||
@click.option('--min-price', help='Minimum price')
|
||||
@click.option('--max-price', help='Maximum price')
|
||||
@click.option('--seller', help='Filter by seller ID')
|
||||
@click.option('--status', help='Filter by listing status')
|
||||
@click.option('--format', type=click.Choice(['table', 'json']), default='table', help='Output format')
|
||||
@click.pass_context
|
||||
def search(ctx, type, min_price, max_price, seller, status, format):
|
||||
"""Search chain listings in the marketplace"""
|
||||
try:
|
||||
config = load_multichain_config()
|
||||
marketplace = GlobalChainMarketplace(config)
|
||||
|
||||
# Parse filters
|
||||
chain_type = None
|
||||
if type:
|
||||
try:
|
||||
chain_type = ChainType(type)
|
||||
except ValueError:
|
||||
error(f"Invalid chain type: {type}")
|
||||
raise click.Abort()
|
||||
|
||||
min_price_dec = None
|
||||
if min_price:
|
||||
try:
|
||||
min_price_dec = Decimal(min_price)
|
||||
except:
|
||||
error("Invalid minimum price format")
|
||||
raise click.Abort()
|
||||
|
||||
max_price_dec = None
|
||||
if max_price:
|
||||
try:
|
||||
max_price_dec = Decimal(max_price)
|
||||
except:
|
||||
error("Invalid maximum price format")
|
||||
raise click.Abort()
|
||||
|
||||
listing_status = None
|
||||
if status:
|
||||
try:
|
||||
listing_status = MarketplaceStatus(status)
|
||||
except ValueError:
|
||||
error(f"Invalid status: {status}")
|
||||
raise click.Abort()
|
||||
|
||||
# Search listings
|
||||
listings = asyncio.run(marketplace.search_listings(
|
||||
chain_type, min_price_dec, max_price_dec, seller, listing_status
|
||||
))
|
||||
|
||||
if not listings:
|
||||
output("No listings found matching your criteria", ctx.obj.get('output_format', 'table'))
|
||||
return
|
||||
|
||||
# Format output
|
||||
listing_data = [
|
||||
{
|
||||
"Listing ID": listing.listing_id,
|
||||
"Chain ID": listing.chain_id,
|
||||
"Chain Name": listing.chain_name,
|
||||
"Type": listing.chain_type.value,
|
||||
"Price": f"{listing.price} {listing.currency}",
|
||||
"Seller": listing.seller_id,
|
||||
"Status": listing.status.value,
|
||||
"Created": listing.created_at.strftime("%Y-%m-%d %H:%M:%S"),
|
||||
"Expires": listing.expires_at.strftime("%Y-%m-%d %H:%M:%S")
|
||||
}
|
||||
for listing in listings
|
||||
]
|
||||
|
||||
output(listing_data, ctx.obj.get('output_format', format), title="Marketplace Listings")
|
||||
|
||||
except Exception as e:
|
||||
error(f"Error searching listings: {str(e)}")
|
||||
raise click.Abort()
|
||||
|
||||
@marketplace.command()
|
||||
@click.argument('chain_id')
|
||||
@click.option('--format', type=click.Choice(['table', 'json']), default='table', help='Output format')
|
||||
@click.pass_context
|
||||
def economy(ctx, chain_id, format):
|
||||
"""Get economic metrics for a specific chain"""
|
||||
try:
|
||||
config = load_multichain_config()
|
||||
marketplace = GlobalChainMarketplace(config)
|
||||
|
||||
# Get chain economy
|
||||
economy = asyncio.run(marketplace.get_chain_economy(chain_id))
|
||||
|
||||
if not economy:
|
||||
error(f"No economic data available for chain {chain_id}")
|
||||
raise click.Abort()
|
||||
|
||||
# Format output
|
||||
economy_data = [
|
||||
{"Metric": "Chain ID", "Value": economy.chain_id},
|
||||
{"Metric": "Total Value Locked", "Value": f"{economy.total_value_locked} ETH"},
|
||||
{"Metric": "Daily Volume", "Value": f"{economy.daily_volume} ETH"},
|
||||
{"Metric": "Market Cap", "Value": f"{economy.market_cap} ETH"},
|
||||
{"Metric": "Transaction Count", "Value": economy.transaction_count},
|
||||
{"Metric": "Active Users", "Value": economy.active_users},
|
||||
{"Metric": "Agent Count", "Value": economy.agent_count},
|
||||
{"Metric": "Governance Tokens", "Value": f"{economy.governance_tokens}"},
|
||||
{"Metric": "Staking Rewards", "Value": f"{economy.staking_rewards}"},
|
||||
{"Metric": "Last Updated", "Value": economy.last_updated.strftime("%Y-%m-%d %H:%M:%S")}
|
||||
]
|
||||
|
||||
output(economy_data, ctx.obj.get('output_format', format), title=f"Chain Economy: {chain_id}")
|
||||
|
||||
except Exception as e:
|
||||
error(f"Error getting chain economy: {str(e)}")
|
||||
raise click.Abort()
|
||||
|
||||
@marketplace.command()
|
||||
@click.argument('user_id')
|
||||
@click.option('--role', type=click.Choice(['buyer', 'seller', 'both']), default='both', help='User role')
|
||||
@click.option('--format', type=click.Choice(['table', 'json']), default='table', help='Output format')
|
||||
@click.pass_context
|
||||
def transactions(ctx, user_id, role, format):
|
||||
"""Get transactions for a specific user"""
|
||||
try:
|
||||
config = load_multichain_config()
|
||||
marketplace = GlobalChainMarketplace(config)
|
||||
|
||||
# Get user transactions
|
||||
transactions = asyncio.run(marketplace.get_user_transactions(user_id, role))
|
||||
|
||||
if not transactions:
|
||||
output(f"No transactions found for user {user_id}", ctx.obj.get('output_format', 'table'))
|
||||
return
|
||||
|
||||
# Format output
|
||||
transaction_data = [
|
||||
{
|
||||
"Transaction ID": transaction.transaction_id,
|
||||
"Listing ID": transaction.listing_id,
|
||||
"Chain ID": transaction.chain_id,
|
||||
"Price": f"{transaction.price} {transaction.currency}",
|
||||
"Role": "buyer" if transaction.buyer_id == user_id else "seller",
|
||||
"Counterparty": transaction.seller_id if transaction.buyer_id == user_id else transaction.buyer_id,
|
||||
"Status": transaction.status.value,
|
||||
"Created": transaction.created_at.strftime("%Y-%m-%d %H:%M:%S"),
|
||||
"Completed": transaction.completed_at.strftime("%Y-%m-%d %H:%M:%S") if transaction.completed_at else "N/A"
|
||||
}
|
||||
for transaction in transactions
|
||||
]
|
||||
|
||||
output(transaction_data, ctx.obj.get('output_format', format), title=f"Transactions for {user_id}")
|
||||
|
||||
except Exception as e:
|
||||
error(f"Error getting user transactions: {str(e)}")
|
||||
raise click.Abort()
|
||||
|
||||
@marketplace.command()
|
||||
@click.option('--format', type=click.Choice(['table', 'json']), default='table', help='Output format')
|
||||
@click.pass_context
|
||||
def overview(ctx, format):
|
||||
"""Get comprehensive marketplace overview"""
|
||||
try:
|
||||
config = load_multichain_config()
|
||||
marketplace = GlobalChainMarketplace(config)
|
||||
|
||||
# Get marketplace overview
|
||||
overview = asyncio.run(marketplace.get_marketplace_overview())
|
||||
|
||||
if not overview:
|
||||
error("No marketplace data available")
|
||||
raise click.Abort()
|
||||
|
||||
# Marketplace metrics
|
||||
if "marketplace_metrics" in overview:
|
||||
metrics = overview["marketplace_metrics"]
|
||||
metrics_data = [
|
||||
{"Metric": "Total Listings", "Value": metrics["total_listings"]},
|
||||
{"Metric": "Active Listings", "Value": metrics["active_listings"]},
|
||||
{"Metric": "Total Transactions", "Value": metrics["total_transactions"]},
|
||||
{"Metric": "Total Volume", "Value": f"{metrics['total_volume']} ETH"},
|
||||
{"Metric": "Average Price", "Value": f"{metrics['average_price']} ETH"},
|
||||
{"Metric": "Market Sentiment", "Value": f"{metrics['market_sentiment']:.2f}"}
|
||||
]
|
||||
|
||||
output(metrics_data, ctx.obj.get('output_format', format), title="Marketplace Metrics")
|
||||
|
||||
# Volume 24h
|
||||
if "volume_24h" in overview:
|
||||
volume_data = [
|
||||
{"Metric": "24h Volume", "Value": f"{overview['volume_24h']} ETH"}
|
||||
]
|
||||
|
||||
output(volume_data, ctx.obj.get('output_format', format), title="24-Hour Volume")
|
||||
|
||||
# Top performing chains
|
||||
if "top_performing_chains" in overview:
|
||||
chains = overview["top_performing_chains"]
|
||||
if chains:
|
||||
chain_data = [
|
||||
{
|
||||
"Chain ID": chain["chain_id"],
|
||||
"Volume": f"{chain['volume']} ETH",
|
||||
"Transactions": chain["transactions"]
|
||||
}
|
||||
for chain in chains[:5] # Top 5
|
||||
]
|
||||
|
||||
output(chain_data, ctx.obj.get('output_format', format), title="Top Performing Chains")
|
||||
|
||||
# Chain types distribution
|
||||
if "chain_types_distribution" in overview:
|
||||
distribution = overview["chain_types_distribution"]
|
||||
if distribution:
|
||||
dist_data = [
|
||||
{"Chain Type": chain_type, "Count": count}
|
||||
for chain_type, count in distribution.items()
|
||||
]
|
||||
|
||||
output(dist_data, ctx.obj.get('output_format', format), title="Chain Types Distribution")
|
||||
|
||||
# User activity
|
||||
if "user_activity" in overview:
|
||||
activity = overview["user_activity"]
|
||||
activity_data = [
|
||||
{"Metric": "Active Buyers (7d)", "Value": activity["active_buyers_7d"]},
|
||||
{"Metric": "Active Sellers (7d)", "Value": activity["active_sellers_7d"]},
|
||||
{"Metric": "Total Unique Users", "Value": activity["total_unique_users"]},
|
||||
{"Metric": "Average Reputation", "Value": f"{activity['average_reputation']:.3f}"}
|
||||
]
|
||||
|
||||
output(activity_data, ctx.obj.get('output_format', format), title="User Activity")
|
||||
|
||||
# Escrow summary
|
||||
if "escrow_summary" in overview:
|
||||
escrow = overview["escrow_summary"]
|
||||
escrow_data = [
|
||||
{"Metric": "Active Escrows", "Value": escrow["active_escrows"]},
|
||||
{"Metric": "Released Escrows", "Value": escrow["released_escrows"]},
|
||||
{"Metric": "Total Escrow Value", "Value": f"{escrow['total_escrow_value']} ETH"},
|
||||
{"Metric": "Escrow Fees Collected", "Value": f"{escrow['escrow_fee_collected']} ETH"}
|
||||
]
|
||||
|
||||
output(escrow_data, ctx.obj.get('output_format', format), title="Escrow Summary")
|
||||
|
||||
except Exception as e:
|
||||
error(f"Error getting marketplace overview: {str(e)}")
|
||||
raise click.Abort()
|
||||
|
||||
@marketplace.command()
|
||||
@click.option('--realtime', is_flag=True, help='Real-time monitoring')
|
||||
@click.option('--interval', default=30, help='Update interval in seconds')
|
||||
@click.pass_context
|
||||
def monitor(ctx, realtime, interval):
|
||||
"""Monitor marketplace activity"""
|
||||
try:
|
||||
config = load_multichain_config()
|
||||
marketplace = GlobalChainMarketplace(config)
|
||||
|
||||
if realtime:
|
||||
# Real-time monitoring
|
||||
from rich.console import Console
|
||||
from rich.live import Live
|
||||
from rich.table import Table
|
||||
import time
|
||||
|
||||
console = Console()
|
||||
|
||||
def generate_monitor_table():
|
||||
try:
|
||||
overview = asyncio.run(marketplace.get_marketplace_overview())
|
||||
|
||||
table = Table(title=f"Marketplace Monitor - {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
|
||||
table.add_column("Metric", style="cyan")
|
||||
table.add_column("Value", style="green")
|
||||
|
||||
if "marketplace_metrics" in overview:
|
||||
metrics = overview["marketplace_metrics"]
|
||||
table.add_row("Total Listings", str(metrics["total_listings"]))
|
||||
table.add_row("Active Listings", str(metrics["active_listings"]))
|
||||
table.add_row("Total Transactions", str(metrics["total_transactions"]))
|
||||
table.add_row("Total Volume", f"{metrics['total_volume']} ETH")
|
||||
table.add_row("Market Sentiment", f"{metrics['market_sentiment']:.2f}")
|
||||
|
||||
if "volume_24h" in overview:
|
||||
table.add_row("24h Volume", f"{overview['volume_24h']} ETH")
|
||||
|
||||
if "user_activity" in overview:
|
||||
activity = overview["user_activity"]
|
||||
table.add_row("Active Users (7d)", str(activity["active_buyers_7d"] + activity["active_sellers_7d"]))
|
||||
|
||||
return table
|
||||
except Exception as e:
|
||||
return f"Error getting marketplace data: {e}"
|
||||
|
||||
with Live(generate_monitor_table(), refresh_per_second=1) as live:
|
||||
try:
|
||||
while True:
|
||||
live.update(generate_monitor_table())
|
||||
time.sleep(interval)
|
||||
except KeyboardInterrupt:
|
||||
console.print("\n[yellow]Monitoring stopped by user[/yellow]")
|
||||
else:
|
||||
# Single snapshot
|
||||
overview = asyncio.run(marketplace.get_marketplace_overview())
|
||||
|
||||
monitor_data = []
|
||||
|
||||
if "marketplace_metrics" in overview:
|
||||
metrics = overview["marketplace_metrics"]
|
||||
monitor_data.extend([
|
||||
{"Metric": "Total Listings", "Value": metrics["total_listings"]},
|
||||
{"Metric": "Active Listings", "Value": metrics["active_listings"]},
|
||||
{"Metric": "Total Transactions", "Value": metrics["total_transactions"]},
|
||||
{"Metric": "Total Volume", "Value": f"{metrics['total_volume']} ETH"},
|
||||
{"Metric": "Market Sentiment", "Value": f"{metrics['market_sentiment']:.2f}"}
|
||||
])
|
||||
|
||||
if "volume_24h" in overview:
|
||||
monitor_data.append({"Metric": "24h Volume", "Value": f"{overview['volume_24h']} ETH"})
|
||||
|
||||
if "user_activity" in overview:
|
||||
activity = overview["user_activity"]
|
||||
monitor_data.append({"Metric": "Active Users (7d)", "Value": activity["active_buyers_7d"] + activity["active_sellers_7d"]})
|
||||
|
||||
output(monitor_data, ctx.obj.get('output_format', 'table'), title="Marketplace Monitor")
|
||||
|
||||
except Exception as e:
|
||||
error(f"Error during monitoring: {str(e)}")
|
||||
raise click.Abort()
|
||||
637
cli/commands/miner.py
Executable file
637
cli/commands/miner.py
Executable file
@@ -0,0 +1,637 @@
|
||||
"""Miner commands for AITBC CLI"""
|
||||
|
||||
import click
|
||||
import httpx
|
||||
import json
|
||||
import time
|
||||
import concurrent.futures
|
||||
from typing import Optional, Dict, Any, List
|
||||
from utils import output, error, success
|
||||
|
||||
|
||||
@click.group(invoke_without_command=True)
|
||||
@click.pass_context
|
||||
def miner(ctx):
|
||||
"""Register as miner and process jobs"""
|
||||
# Set role for miner commands - this will be used by parent context
|
||||
ctx.ensure_object(dict)
|
||||
# Set role at the highest level context (CLI root)
|
||||
ctx.find_root().detected_role = 'miner'
|
||||
|
||||
# If no subcommand was invoked, show help
|
||||
if ctx.invoked_subcommand is None:
|
||||
click.echo(ctx.get_help())
|
||||
|
||||
|
||||
@miner.command()
|
||||
@click.option("--gpu", help="GPU model name")
|
||||
@click.option("--memory", type=int, help="GPU memory in GB")
|
||||
@click.option("--cuda-cores", type=int, help="Number of CUDA cores")
|
||||
@click.option("--miner-id", default="cli-miner", help="Miner ID")
|
||||
@click.pass_context
|
||||
def register(ctx, gpu: Optional[str], memory: Optional[int],
|
||||
cuda_cores: Optional[int], miner_id: str):
|
||||
"""Register as a miner with the coordinator"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
# Build capabilities
|
||||
capabilities = {}
|
||||
if gpu:
|
||||
capabilities["gpu"] = {"model": gpu}
|
||||
if memory:
|
||||
if "gpu" not in capabilities:
|
||||
capabilities["gpu"] = {}
|
||||
capabilities["gpu"]["memory_gb"] = memory
|
||||
if cuda_cores:
|
||||
if "gpu" not in capabilities:
|
||||
capabilities["gpu"] = {}
|
||||
capabilities["gpu"]["cuda_cores"] = cuda_cores
|
||||
|
||||
# Default capabilities if none provided
|
||||
if not capabilities:
|
||||
capabilities = {
|
||||
"cpu": {"cores": 4},
|
||||
"memory": {"gb": 16}
|
||||
}
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.post(
|
||||
f"{config.coordinator_url}/v1/miners/register",
|
||||
headers={
|
||||
"Content-Type": "application/json",
|
||||
"X-Api-Key": config.api_key or "",
|
||||
"X-Miner-ID": miner_id
|
||||
},
|
||||
json={"capabilities": capabilities}
|
||||
)
|
||||
|
||||
if response.status_code in (200, 204):
|
||||
output({
|
||||
"miner_id": miner_id,
|
||||
"status": "registered",
|
||||
"capabilities": capabilities
|
||||
}, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to register: {response.status_code} - {response.text}")
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
|
||||
|
||||
@miner.command()
|
||||
@click.option("--wait", type=int, default=5, help="Max wait time in seconds")
|
||||
@click.option("--miner-id", default="cli-miner", help="Miner ID")
|
||||
@click.pass_context
|
||||
def poll(ctx, wait: int, miner_id: str):
|
||||
"""Poll for a single job"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.post(
|
||||
f"{config.coordinator_url}/v1/miners/poll",
|
||||
json={"max_wait_seconds": 5},
|
||||
headers={
|
||||
"X-Api-Key": config.api_key or "",
|
||||
"X-Miner-ID": miner_id
|
||||
},
|
||||
timeout=wait + 5
|
||||
)
|
||||
|
||||
if response.status_code in (200, 204):
|
||||
if response.status_code == 204:
|
||||
output({"message": "No jobs available"}, ctx.obj['output_format'])
|
||||
else:
|
||||
job = response.json()
|
||||
if job:
|
||||
output(job, ctx.obj['output_format'])
|
||||
else:
|
||||
output({"message": "No jobs available"}, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to poll: {response.status_code}")
|
||||
except httpx.TimeoutException:
|
||||
output({"message": f"No jobs available within {wait} seconds"}, ctx.obj['output_format'])
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
|
||||
|
||||
@miner.command()
|
||||
@click.option("--jobs", type=int, default=1, help="Number of jobs to process")
|
||||
@click.option("--miner-id", default="cli-miner", help="Miner ID")
|
||||
@click.pass_context
|
||||
def mine(ctx, jobs: int, miner_id: str):
|
||||
"""Mine continuously for specified number of jobs"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
processed = 0
|
||||
while processed < jobs:
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
# Poll for job
|
||||
response = client.post(
|
||||
f"{config.coordinator_url}/v1/miners/poll",
|
||||
json={"max_wait_seconds": 5},
|
||||
headers={
|
||||
"X-Api-Key": config.api_key or "",
|
||||
"X-Miner-ID": miner_id
|
||||
},
|
||||
timeout=30
|
||||
)
|
||||
|
||||
if response.status_code in (200, 204):
|
||||
if response.status_code == 204:
|
||||
time.sleep(5)
|
||||
continue
|
||||
job = response.json()
|
||||
if job:
|
||||
job_id = job.get('job_id')
|
||||
output({
|
||||
"job_id": job_id,
|
||||
"status": "processing",
|
||||
"job_number": processed + 1
|
||||
}, ctx.obj['output_format'])
|
||||
|
||||
# Simulate processing (in real implementation, do actual work)
|
||||
time.sleep(2)
|
||||
|
||||
# Submit result
|
||||
result_response = client.post(
|
||||
f"{config.coordinator_url}/v1/miners/{job_id}/result",
|
||||
headers={
|
||||
"Content-Type": "application/json",
|
||||
"X-Api-Key": config.api_key or "",
|
||||
"X-Miner-ID": miner_id
|
||||
},
|
||||
json={
|
||||
"result": {"output": f"Processed job {job_id}"},
|
||||
"metrics": {}
|
||||
}
|
||||
)
|
||||
|
||||
if result_response.status_code == 200:
|
||||
success(f"Job {job_id} completed successfully")
|
||||
processed += 1
|
||||
else:
|
||||
error(f"Failed to submit result: {result_response.status_code}")
|
||||
else:
|
||||
# No job available, wait a bit
|
||||
time.sleep(5)
|
||||
else:
|
||||
error(f"Failed to poll: {response.status_code}")
|
||||
break
|
||||
|
||||
except Exception as e:
|
||||
error(f"Error: {e}")
|
||||
break
|
||||
|
||||
output({
|
||||
"total_processed": processed,
|
||||
"miner_id": miner_id
|
||||
}, ctx.obj['output_format'])
|
||||
|
||||
|
||||
@miner.command()
|
||||
@click.option("--miner-id", default="cli-miner", help="Miner ID")
|
||||
@click.pass_context
|
||||
def heartbeat(ctx, miner_id: str):
|
||||
"""Send heartbeat to coordinator"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.post(
|
||||
f"{config.coordinator_url}/v1/miners/heartbeat",
|
||||
headers={
|
||||
"X-Api-Key": config.api_key or "",
|
||||
"X-Miner-ID": miner_id
|
||||
},
|
||||
json={
|
||||
"inflight": 0,
|
||||
"status": "ONLINE",
|
||||
"metadata": {}
|
||||
}
|
||||
)
|
||||
|
||||
if response.status_code in (200, 204):
|
||||
output({
|
||||
"miner_id": miner_id,
|
||||
"status": "heartbeat_sent",
|
||||
"timestamp": time.time()
|
||||
}, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to send heartbeat: {response.status_code}")
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
|
||||
|
||||
@miner.command()
|
||||
@click.option("--miner-id", default="cli-miner", help="Miner ID")
|
||||
@click.pass_context
|
||||
def status(ctx, miner_id: str):
|
||||
"""Check miner status"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
# This would typically query a miner status endpoint
|
||||
# For now, we'll just show the miner info
|
||||
output({
|
||||
"miner_id": miner_id,
|
||||
"coordinator": config.coordinator_url,
|
||||
"status": "active"
|
||||
}, ctx.obj['output_format'])
|
||||
|
||||
|
||||
@miner.command()
|
||||
@click.option("--miner-id", default="cli-miner", help="Miner ID")
|
||||
@click.option("--from-time", help="Filter from timestamp (ISO format)")
|
||||
@click.option("--to-time", help="Filter to timestamp (ISO format)")
|
||||
@click.pass_context
|
||||
def earnings(ctx, miner_id: str, from_time: Optional[str], to_time: Optional[str]):
|
||||
"""Show miner earnings"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
try:
|
||||
params = {"miner_id": miner_id}
|
||||
if from_time:
|
||||
params["from_time"] = from_time
|
||||
if to_time:
|
||||
params["to_time"] = to_time
|
||||
|
||||
with httpx.Client() as client:
|
||||
response = client.post(
|
||||
f"{config.coordinator_url}/v1/miners/{miner_id}/earnings",
|
||||
params=params,
|
||||
headers={"X-Api-Key": config.api_key or ""}
|
||||
)
|
||||
|
||||
if response.status_code in (200, 204):
|
||||
data = response.json()
|
||||
output(data, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to get earnings: {response.status_code}")
|
||||
ctx.exit(1)
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@miner.command(name="update-capabilities")
|
||||
@click.option("--gpu", help="GPU model name")
|
||||
@click.option("--memory", type=int, help="GPU memory in GB")
|
||||
@click.option("--cuda-cores", type=int, help="Number of CUDA cores")
|
||||
@click.option("--miner-id", default="cli-miner", help="Miner ID")
|
||||
@click.pass_context
|
||||
def update_capabilities(ctx, gpu: Optional[str], memory: Optional[int],
|
||||
cuda_cores: Optional[int], miner_id: str):
|
||||
"""Update miner GPU capabilities"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
capabilities = {}
|
||||
if gpu:
|
||||
capabilities["gpu"] = {"model": gpu}
|
||||
if memory:
|
||||
if "gpu" not in capabilities:
|
||||
capabilities["gpu"] = {}
|
||||
capabilities["gpu"]["memory_gb"] = memory
|
||||
if cuda_cores:
|
||||
if "gpu" not in capabilities:
|
||||
capabilities["gpu"] = {}
|
||||
capabilities["gpu"]["cuda_cores"] = cuda_cores
|
||||
|
||||
if not capabilities:
|
||||
error("No capabilities specified. Use --gpu, --memory, or --cuda-cores.")
|
||||
return
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.put(
|
||||
f"{config.coordinator_url}/v1/miners/{miner_id}/capabilities",
|
||||
headers={
|
||||
"Content-Type": "application/json",
|
||||
"X-Api-Key": config.api_key or ""
|
||||
},
|
||||
json={"capabilities": capabilities}
|
||||
)
|
||||
|
||||
if response.status_code in (200, 204):
|
||||
output({
|
||||
"miner_id": miner_id,
|
||||
"status": "capabilities_updated",
|
||||
"capabilities": capabilities
|
||||
}, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to update capabilities: {response.status_code}")
|
||||
ctx.exit(1)
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@miner.command()
|
||||
@click.option("--miner-id", default="cli-miner", help="Miner ID")
|
||||
@click.option("--force", is_flag=True, help="Force deregistration without confirmation")
|
||||
@click.pass_context
|
||||
def deregister(ctx, miner_id: str, force: bool):
|
||||
"""Deregister miner from the coordinator"""
|
||||
if not force:
|
||||
if not click.confirm(f"Deregister miner '{miner_id}'?"):
|
||||
click.echo("Cancelled.")
|
||||
return
|
||||
|
||||
config = ctx.obj['config']
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.delete(
|
||||
f"{config.coordinator_url}/v1/miners/{miner_id}",
|
||||
headers={"X-Api-Key": config.api_key or ""}
|
||||
)
|
||||
|
||||
if response.status_code in (200, 204):
|
||||
output({
|
||||
"miner_id": miner_id,
|
||||
"status": "deregistered"
|
||||
}, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to deregister: {response.status_code}")
|
||||
ctx.exit(1)
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@miner.command()
|
||||
@click.option("--limit", default=10, help="Number of jobs to show")
|
||||
@click.option("--type", "job_type", help="Filter by job type")
|
||||
@click.option("--min-reward", type=float, help="Minimum reward threshold")
|
||||
@click.option("--status", "job_status", help="Filter by status (pending, running, completed, failed)")
|
||||
@click.option("--miner-id", default="cli-miner", help="Miner ID")
|
||||
@click.pass_context
|
||||
def jobs(ctx, limit: int, job_type: Optional[str], min_reward: Optional[float],
|
||||
job_status: Optional[str], miner_id: str):
|
||||
"""List miner jobs with filtering"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
try:
|
||||
params = {"limit": limit, "miner_id": miner_id}
|
||||
if job_type:
|
||||
params["type"] = job_type
|
||||
if min_reward is not None:
|
||||
params["min_reward"] = min_reward
|
||||
if job_status:
|
||||
params["status"] = job_status
|
||||
|
||||
with httpx.Client() as client:
|
||||
response = client.post(
|
||||
f"{config.coordinator_url}/v1/miners/{miner_id}/jobs",
|
||||
params=params,
|
||||
headers={"X-Api-Key": config.api_key or ""}
|
||||
)
|
||||
|
||||
if response.status_code in (200, 204):
|
||||
data = response.json()
|
||||
output(data, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to get jobs: {response.status_code}")
|
||||
ctx.exit(1)
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
def _process_single_job(config, miner_id: str, worker_id: int) -> Dict[str, Any]:
|
||||
"""Process a single job (used by concurrent mine)"""
|
||||
try:
|
||||
with httpx.Client() as http_client:
|
||||
response = http_client.post(
|
||||
f"{config.coordinator_url}/v1/miners/poll",
|
||||
json={"max_wait_seconds": 5},
|
||||
headers={
|
||||
"X-Api-Key": config.api_key or "",
|
||||
"X-Miner-ID": miner_id
|
||||
},
|
||||
timeout=30
|
||||
)
|
||||
|
||||
if response.status_code == 204:
|
||||
return {"worker": worker_id, "status": "no_job"}
|
||||
if response.status_code == 200:
|
||||
job = response.json()
|
||||
if job:
|
||||
job_id = job.get('job_id')
|
||||
time.sleep(2) # Simulate processing
|
||||
|
||||
result_response = http_client.post(
|
||||
f"{config.coordinator_url}/v1/miners/{job_id}/result",
|
||||
headers={
|
||||
"Content-Type": "application/json",
|
||||
"X-Api-Key": config.api_key or "",
|
||||
"X-Miner-ID": miner_id
|
||||
},
|
||||
json={"result": {"output": f"Processed by worker {worker_id}"}, "metrics": {}}
|
||||
)
|
||||
|
||||
return {
|
||||
"worker": worker_id,
|
||||
"job_id": job_id,
|
||||
"status": "completed" if result_response.status_code == 200 else "failed"
|
||||
}
|
||||
return {"worker": worker_id, "status": "no_job"}
|
||||
except Exception as e:
|
||||
return {"worker": worker_id, "status": "error", "error": str(e)}
|
||||
|
||||
|
||||
def _run_ollama_inference(ollama_url: str, model: str, prompt: str) -> Dict[str, Any]:
|
||||
"""Run inference through local Ollama instance"""
|
||||
try:
|
||||
with httpx.Client(timeout=120) as client:
|
||||
response = client.post(
|
||||
f"{ollama_url}/api/generate",
|
||||
json={
|
||||
"model": model,
|
||||
"prompt": prompt,
|
||||
"stream": False
|
||||
}
|
||||
)
|
||||
if response.status_code == 200:
|
||||
data = response.json()
|
||||
return {
|
||||
"response": data.get("response", ""),
|
||||
"model": data.get("model", model),
|
||||
"total_duration": data.get("total_duration", 0),
|
||||
"eval_count": data.get("eval_count", 0),
|
||||
"eval_duration": data.get("eval_duration", 0),
|
||||
}
|
||||
else:
|
||||
return {"error": f"Ollama returned {response.status_code}"}
|
||||
except Exception as e:
|
||||
return {"error": str(e)}
|
||||
|
||||
|
||||
@miner.command(name="mine-ollama")
|
||||
@click.option("--jobs", type=int, default=1, help="Number of jobs to process")
|
||||
@click.option("--miner-id", default="cli-miner", help="Miner ID")
|
||||
@click.option("--ollama-url", default="http://localhost:11434", help="Ollama API URL")
|
||||
@click.option("--model", default="gemma3:1b", help="Ollama model to use")
|
||||
@click.pass_context
|
||||
def mine_ollama(ctx, jobs: int, miner_id: str, ollama_url: str, model: str):
|
||||
"""Mine jobs using local Ollama for GPU inference"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
# Verify Ollama is reachable
|
||||
try:
|
||||
with httpx.Client(timeout=5) as client:
|
||||
resp = client.get(f"{ollama_url}/api/tags")
|
||||
if resp.status_code != 200:
|
||||
error(f"Cannot reach Ollama at {ollama_url}")
|
||||
return
|
||||
models = [m["name"] for m in resp.json().get("models", [])]
|
||||
if model not in models:
|
||||
error(f"Model '{model}' not found. Available: {', '.join(models)}")
|
||||
return
|
||||
success(f"Ollama connected: {ollama_url} | model: {model}")
|
||||
except Exception as e:
|
||||
error(f"Cannot connect to Ollama: {e}")
|
||||
return
|
||||
|
||||
processed = 0
|
||||
while processed < jobs:
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.post(
|
||||
f"{config.coordinator_url}/v1/miners/poll",
|
||||
json={"max_wait_seconds": 10},
|
||||
headers={
|
||||
"X-Api-Key": config.api_key or "",
|
||||
"X-Miner-ID": miner_id
|
||||
},
|
||||
timeout=30
|
||||
)
|
||||
|
||||
if response.status_code == 204:
|
||||
time.sleep(5)
|
||||
continue
|
||||
|
||||
if response.status_code != 200:
|
||||
error(f"Failed to poll: {response.status_code}")
|
||||
break
|
||||
|
||||
job = response.json()
|
||||
if not job:
|
||||
time.sleep(5)
|
||||
continue
|
||||
|
||||
job_id = job.get('job_id')
|
||||
payload = job.get('payload', {})
|
||||
prompt = payload.get('prompt', '')
|
||||
job_model = payload.get('model', model)
|
||||
|
||||
output({
|
||||
"job_id": job_id,
|
||||
"status": "processing",
|
||||
"prompt": prompt[:80] + ("..." if len(prompt) > 80 else ""),
|
||||
"model": job_model,
|
||||
"job_number": processed + 1
|
||||
}, ctx.obj['output_format'])
|
||||
|
||||
# Run inference through Ollama
|
||||
start_time = time.time()
|
||||
ollama_result = _run_ollama_inference(ollama_url, job_model, prompt)
|
||||
duration_ms = int((time.time() - start_time) * 1000)
|
||||
|
||||
if "error" in ollama_result:
|
||||
error(f"Ollama inference failed: {ollama_result['error']}")
|
||||
# Submit failure
|
||||
client.post(
|
||||
f"{config.coordinator_url}/v1/miners/{job_id}/fail",
|
||||
headers={
|
||||
"Content-Type": "application/json",
|
||||
"X-Api-Key": config.api_key or "",
|
||||
"X-Miner-ID": miner_id
|
||||
},
|
||||
json={"error_code": "INFERENCE_FAILED", "error_message": ollama_result['error'], "metrics": {}}
|
||||
)
|
||||
continue
|
||||
|
||||
# Submit successful result
|
||||
result_response = client.post(
|
||||
f"{config.coordinator_url}/v1/miners/{job_id}/result",
|
||||
headers={
|
||||
"Content-Type": "application/json",
|
||||
"X-Api-Key": config.api_key or "",
|
||||
"X-Miner-ID": miner_id
|
||||
},
|
||||
json={
|
||||
"result": {
|
||||
"response": ollama_result.get("response", ""),
|
||||
"model": ollama_result.get("model", job_model),
|
||||
"provider": "ollama",
|
||||
"eval_count": ollama_result.get("eval_count", 0),
|
||||
},
|
||||
"metrics": {
|
||||
"duration_ms": duration_ms,
|
||||
"eval_count": ollama_result.get("eval_count", 0),
|
||||
"eval_duration": ollama_result.get("eval_duration", 0),
|
||||
"total_duration": ollama_result.get("total_duration", 0),
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
if result_response.status_code == 200:
|
||||
success(f"Job {job_id} completed via Ollama ({duration_ms}ms)")
|
||||
processed += 1
|
||||
else:
|
||||
error(f"Failed to submit result: {result_response.status_code}")
|
||||
|
||||
except Exception as e:
|
||||
error(f"Error: {e}")
|
||||
break
|
||||
|
||||
output({
|
||||
"total_processed": processed,
|
||||
"miner_id": miner_id,
|
||||
"model": model,
|
||||
"provider": "ollama"
|
||||
}, ctx.obj['output_format'])
|
||||
|
||||
|
||||
@miner.command(name="concurrent-mine")
|
||||
@click.option("--workers", type=int, default=2, help="Number of concurrent workers")
|
||||
@click.option("--jobs", "total_jobs", type=int, default=5, help="Total jobs to process")
|
||||
@click.option("--miner-id", default="cli-miner", help="Miner ID")
|
||||
@click.pass_context
|
||||
def concurrent_mine(ctx, workers: int, total_jobs: int, miner_id: str):
|
||||
"""Mine with concurrent job processing"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
success(f"Starting concurrent mining: {workers} workers, {total_jobs} jobs")
|
||||
|
||||
completed = 0
|
||||
failed = 0
|
||||
|
||||
with concurrent.futures.ThreadPoolExecutor(max_workers=workers) as executor:
|
||||
remaining = total_jobs
|
||||
while remaining > 0:
|
||||
batch_size = min(remaining, workers)
|
||||
futures = [
|
||||
executor.submit(_process_single_job, config, miner_id, i)
|
||||
for i in range(batch_size)
|
||||
]
|
||||
|
||||
for future in concurrent.futures.as_completed(futures):
|
||||
result = future.result()
|
||||
if result.get("status") == "completed":
|
||||
completed += 1
|
||||
remaining -= 1
|
||||
output(result, ctx.obj['output_format'])
|
||||
elif result.get("status") == "no_job":
|
||||
time.sleep(2)
|
||||
else:
|
||||
failed += 1
|
||||
remaining -= 1
|
||||
|
||||
output({
|
||||
"status": "finished",
|
||||
"completed": completed,
|
||||
"failed": failed,
|
||||
"workers": workers
|
||||
}, ctx.obj['output_format'])
|
||||
485
cli/commands/monitor.py
Executable file
485
cli/commands/monitor.py
Executable file
@@ -0,0 +1,485 @@
|
||||
"""Monitoring and dashboard commands for AITBC CLI"""
|
||||
|
||||
import click
|
||||
import httpx
|
||||
import json
|
||||
import time
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
from datetime import datetime, timedelta
|
||||
from utils import output, error, success, console
|
||||
|
||||
|
||||
@click.group()
|
||||
def monitor():
|
||||
"""Monitoring, metrics, and alerting commands"""
|
||||
pass
|
||||
|
||||
|
||||
@monitor.command()
|
||||
@click.option("--refresh", type=int, default=5, help="Refresh interval in seconds")
|
||||
@click.option("--duration", type=int, default=0, help="Duration in seconds (0 = indefinite)")
|
||||
@click.pass_context
|
||||
def dashboard(ctx, refresh: int, duration: int):
|
||||
"""Real-time system dashboard"""
|
||||
config = ctx.obj['config']
|
||||
start_time = time.time()
|
||||
|
||||
try:
|
||||
while True:
|
||||
elapsed = time.time() - start_time
|
||||
if duration > 0 and elapsed >= duration:
|
||||
break
|
||||
|
||||
console.clear()
|
||||
console.rule("[bold blue]AITBC Dashboard[/bold blue]")
|
||||
console.print(f"[dim]Refreshing every {refresh}s | Elapsed: {int(elapsed)}s[/dim]\n")
|
||||
|
||||
# Fetch system dashboard
|
||||
try:
|
||||
with httpx.Client(timeout=5) as client:
|
||||
# Get dashboard data
|
||||
try:
|
||||
url = f"{config.coordinator_url}/api/v1/dashboard"
|
||||
resp = client.get(
|
||||
url,
|
||||
headers={"X-Api-Key": config.api_key or ""}
|
||||
)
|
||||
if resp.status_code == 200:
|
||||
dashboard = resp.json()
|
||||
console.print("[bold green]Dashboard Status:[/bold green] Online")
|
||||
|
||||
# Overall status
|
||||
overall_status = dashboard.get("overall_status", "unknown")
|
||||
console.print(f" Overall Status: {overall_status}")
|
||||
|
||||
# Services summary
|
||||
services = dashboard.get("services", {})
|
||||
console.print(f" Services: {len(services)}")
|
||||
|
||||
for service_name, service_data in services.items():
|
||||
status = service_data.get("status", "unknown")
|
||||
console.print(f" {service_name}: {status}")
|
||||
|
||||
# Metrics summary
|
||||
metrics = dashboard.get("metrics", {})
|
||||
if metrics:
|
||||
health_pct = metrics.get("health_percentage", 0)
|
||||
console.print(f" Health: {health_pct:.1f}%")
|
||||
|
||||
else:
|
||||
console.print(f"[bold yellow]Dashboard:[/bold yellow] HTTP {resp.status_code}")
|
||||
except Exception as e:
|
||||
console.print(f"[bold red]Dashboard:[/bold red] Error - {e}")
|
||||
|
||||
except Exception as e:
|
||||
console.print(f"[red]Error fetching data: {e}[/red]")
|
||||
|
||||
console.print(f"\n[dim]Press Ctrl+C to exit[/dim]")
|
||||
time.sleep(refresh)
|
||||
|
||||
except KeyboardInterrupt:
|
||||
console.print("\n[bold]Dashboard stopped[/bold]")
|
||||
|
||||
|
||||
@monitor.command()
|
||||
@click.option("--period", default="24h", help="Time period (1h, 24h, 7d, 30d)")
|
||||
@click.option("--export", "export_path", type=click.Path(), help="Export metrics to file")
|
||||
@click.pass_context
|
||||
def metrics(ctx, period: str, export_path: Optional[str]):
|
||||
"""Collect and display system metrics"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
# Parse period
|
||||
multipliers = {"h": 3600, "d": 86400}
|
||||
unit = period[-1]
|
||||
value = int(period[:-1])
|
||||
seconds = value * multipliers.get(unit, 3600)
|
||||
since = datetime.now() - timedelta(seconds=seconds)
|
||||
|
||||
metrics_data = {
|
||||
"period": period,
|
||||
"since": since.isoformat(),
|
||||
"collected_at": datetime.now().isoformat(),
|
||||
"coordinator": {},
|
||||
"jobs": {},
|
||||
"miners": {}
|
||||
}
|
||||
|
||||
try:
|
||||
with httpx.Client(timeout=10) as client:
|
||||
# Coordinator metrics
|
||||
try:
|
||||
resp = client.get(
|
||||
f"{config.coordinator_url}/status",
|
||||
headers={"X-Api-Key": config.api_key or ""}
|
||||
)
|
||||
if resp.status_code == 200:
|
||||
metrics_data["coordinator"] = resp.json()
|
||||
metrics_data["coordinator"]["status"] = "online"
|
||||
else:
|
||||
metrics_data["coordinator"]["status"] = f"error_{resp.status_code}"
|
||||
except Exception:
|
||||
metrics_data["coordinator"]["status"] = "offline"
|
||||
|
||||
# Job metrics
|
||||
try:
|
||||
resp = client.get(
|
||||
f"{config.coordinator_url}/jobs",
|
||||
headers={"X-Api-Key": config.api_key or ""},
|
||||
params={"limit": 100}
|
||||
)
|
||||
if resp.status_code == 200:
|
||||
jobs = resp.json()
|
||||
if isinstance(jobs, list):
|
||||
metrics_data["jobs"] = {
|
||||
"total": len(jobs),
|
||||
"completed": sum(1 for j in jobs if j.get("status") == "completed"),
|
||||
"pending": sum(1 for j in jobs if j.get("status") == "pending"),
|
||||
"failed": sum(1 for j in jobs if j.get("status") == "failed"),
|
||||
}
|
||||
except Exception:
|
||||
metrics_data["jobs"] = {"error": "unavailable"}
|
||||
|
||||
# Miner metrics
|
||||
try:
|
||||
resp = client.get(
|
||||
f"{config.coordinator_url}/miners",
|
||||
headers={"X-Api-Key": config.api_key or ""}
|
||||
)
|
||||
if resp.status_code == 200:
|
||||
miners = resp.json()
|
||||
if isinstance(miners, list):
|
||||
metrics_data["miners"] = {
|
||||
"total": len(miners),
|
||||
"online": sum(1 for m in miners if m.get("status") == "ONLINE"),
|
||||
"offline": sum(1 for m in miners if m.get("status") != "ONLINE"),
|
||||
}
|
||||
except Exception:
|
||||
metrics_data["miners"] = {"error": "unavailable"}
|
||||
|
||||
except Exception as e:
|
||||
error(f"Failed to collect metrics: {e}")
|
||||
|
||||
if export_path:
|
||||
with open(export_path, "w") as f:
|
||||
json.dump(metrics_data, f, indent=2)
|
||||
success(f"Metrics exported to {export_path}")
|
||||
|
||||
output(metrics_data, ctx.obj['output_format'])
|
||||
|
||||
|
||||
@monitor.command()
|
||||
@click.argument("action", type=click.Choice(["add", "list", "remove", "test"]))
|
||||
@click.option("--name", help="Alert name")
|
||||
@click.option("--type", "alert_type", type=click.Choice(["coordinator_down", "miner_offline", "job_failed", "low_balance"]), help="Alert type")
|
||||
@click.option("--threshold", type=float, help="Alert threshold value")
|
||||
@click.option("--webhook", help="Webhook URL for notifications")
|
||||
@click.pass_context
|
||||
def alerts(ctx, action: str, name: Optional[str], alert_type: Optional[str],
|
||||
threshold: Optional[float], webhook: Optional[str]):
|
||||
"""Configure monitoring alerts"""
|
||||
alerts_dir = Path.home() / ".aitbc" / "alerts"
|
||||
alerts_dir.mkdir(parents=True, exist_ok=True)
|
||||
alerts_file = alerts_dir / "alerts.json"
|
||||
|
||||
# Load existing alerts
|
||||
existing = []
|
||||
if alerts_file.exists():
|
||||
with open(alerts_file) as f:
|
||||
existing = json.load(f)
|
||||
|
||||
if action == "add":
|
||||
if not name or not alert_type:
|
||||
error("Alert name and type required (--name, --type)")
|
||||
return
|
||||
alert = {
|
||||
"name": name,
|
||||
"type": alert_type,
|
||||
"threshold": threshold,
|
||||
"webhook": webhook,
|
||||
"created_at": datetime.now().isoformat(),
|
||||
"enabled": True
|
||||
}
|
||||
existing.append(alert)
|
||||
with open(alerts_file, "w") as f:
|
||||
json.dump(existing, f, indent=2)
|
||||
success(f"Alert '{name}' added")
|
||||
output(alert, ctx.obj['output_format'])
|
||||
|
||||
elif action == "list":
|
||||
if not existing:
|
||||
output({"message": "No alerts configured"}, ctx.obj['output_format'])
|
||||
else:
|
||||
output(existing, ctx.obj['output_format'])
|
||||
|
||||
elif action == "remove":
|
||||
if not name:
|
||||
error("Alert name required (--name)")
|
||||
return
|
||||
existing = [a for a in existing if a["name"] != name]
|
||||
with open(alerts_file, "w") as f:
|
||||
json.dump(existing, f, indent=2)
|
||||
success(f"Alert '{name}' removed")
|
||||
|
||||
elif action == "test":
|
||||
if not name:
|
||||
error("Alert name required (--name)")
|
||||
return
|
||||
alert = next((a for a in existing if a["name"] == name), None)
|
||||
if not alert:
|
||||
error(f"Alert '{name}' not found")
|
||||
return
|
||||
if alert.get("webhook"):
|
||||
try:
|
||||
with httpx.Client(timeout=10) as client:
|
||||
resp = client.post(alert["webhook"], json={
|
||||
"alert": name,
|
||||
"type": alert["type"],
|
||||
"message": f"Test alert from AITBC CLI",
|
||||
"timestamp": datetime.now().isoformat()
|
||||
})
|
||||
output({"status": "sent", "response_code": resp.status_code}, ctx.obj['output_format'])
|
||||
except Exception as e:
|
||||
error(f"Webhook test failed: {e}")
|
||||
else:
|
||||
output({"status": "no_webhook", "alert": alert}, ctx.obj['output_format'])
|
||||
|
||||
|
||||
@monitor.command()
|
||||
@click.option("--period", default="7d", help="Analysis period (1d, 7d, 30d)")
|
||||
@click.pass_context
|
||||
def history(ctx, period: str):
|
||||
"""Historical data analysis"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
multipliers = {"h": 3600, "d": 86400}
|
||||
unit = period[-1]
|
||||
value = int(period[:-1])
|
||||
seconds = value * multipliers.get(unit, 3600)
|
||||
since = datetime.now() - timedelta(seconds=seconds)
|
||||
|
||||
analysis = {
|
||||
"period": period,
|
||||
"since": since.isoformat(),
|
||||
"analyzed_at": datetime.now().isoformat(),
|
||||
"summary": {}
|
||||
}
|
||||
|
||||
try:
|
||||
with httpx.Client(timeout=10) as client:
|
||||
try:
|
||||
resp = client.get(
|
||||
f"{config.coordinator_url}/jobs",
|
||||
headers={"X-Api-Key": config.api_key or ""},
|
||||
params={"limit": 500}
|
||||
)
|
||||
if resp.status_code == 200:
|
||||
jobs = resp.json()
|
||||
if isinstance(jobs, list):
|
||||
completed = [j for j in jobs if j.get("status") == "completed"]
|
||||
failed = [j for j in jobs if j.get("status") == "failed"]
|
||||
analysis["summary"] = {
|
||||
"total_jobs": len(jobs),
|
||||
"completed": len(completed),
|
||||
"failed": len(failed),
|
||||
"success_rate": f"{len(completed) / max(1, len(jobs)) * 100:.1f}%",
|
||||
}
|
||||
except Exception:
|
||||
analysis["summary"] = {"error": "Could not fetch job data"}
|
||||
|
||||
except Exception as e:
|
||||
error(f"Analysis failed: {e}")
|
||||
|
||||
output(analysis, ctx.obj['output_format'])
|
||||
|
||||
|
||||
@monitor.command()
|
||||
@click.argument("action", type=click.Choice(["add", "list", "remove", "test"]))
|
||||
@click.option("--name", help="Webhook name")
|
||||
@click.option("--url", help="Webhook URL")
|
||||
@click.option("--events", help="Comma-separated event types (job_completed,miner_offline,alert)")
|
||||
@click.pass_context
|
||||
def webhooks(ctx, action: str, name: Optional[str], url: Optional[str], events: Optional[str]):
|
||||
"""Manage webhook notifications"""
|
||||
webhooks_dir = Path.home() / ".aitbc" / "webhooks"
|
||||
webhooks_dir.mkdir(parents=True, exist_ok=True)
|
||||
webhooks_file = webhooks_dir / "webhooks.json"
|
||||
|
||||
existing = []
|
||||
if webhooks_file.exists():
|
||||
with open(webhooks_file) as f:
|
||||
existing = json.load(f)
|
||||
|
||||
if action == "add":
|
||||
if not name or not url:
|
||||
error("Webhook name and URL required (--name, --url)")
|
||||
return
|
||||
webhook = {
|
||||
"name": name,
|
||||
"url": url,
|
||||
"events": events.split(",") if events else ["all"],
|
||||
"created_at": datetime.now().isoformat(),
|
||||
"enabled": True
|
||||
}
|
||||
existing.append(webhook)
|
||||
with open(webhooks_file, "w") as f:
|
||||
json.dump(existing, f, indent=2)
|
||||
success(f"Webhook '{name}' added")
|
||||
output(webhook, ctx.obj['output_format'])
|
||||
|
||||
elif action == "list":
|
||||
if not existing:
|
||||
output({"message": "No webhooks configured"}, ctx.obj['output_format'])
|
||||
else:
|
||||
output(existing, ctx.obj['output_format'])
|
||||
|
||||
elif action == "remove":
|
||||
if not name:
|
||||
error("Webhook name required (--name)")
|
||||
return
|
||||
existing = [w for w in existing if w["name"] != name]
|
||||
with open(webhooks_file, "w") as f:
|
||||
json.dump(existing, f, indent=2)
|
||||
success(f"Webhook '{name}' removed")
|
||||
|
||||
elif action == "test":
|
||||
if not name:
|
||||
error("Webhook name required (--name)")
|
||||
return
|
||||
wh = next((w for w in existing if w["name"] == name), None)
|
||||
if not wh:
|
||||
error(f"Webhook '{name}' not found")
|
||||
return
|
||||
try:
|
||||
with httpx.Client(timeout=10) as client:
|
||||
resp = client.post(wh["url"], json={
|
||||
"event": "test",
|
||||
"source": "aitbc-cli",
|
||||
"message": "Test webhook notification",
|
||||
"timestamp": datetime.now().isoformat()
|
||||
})
|
||||
output({"status": "sent", "response_code": resp.status_code}, ctx.obj['output_format'])
|
||||
except Exception as e:
|
||||
error(f"Webhook test failed: {e}")
|
||||
|
||||
|
||||
CAMPAIGNS_DIR = Path.home() / ".aitbc" / "campaigns"
|
||||
|
||||
|
||||
def _ensure_campaigns():
|
||||
CAMPAIGNS_DIR.mkdir(parents=True, exist_ok=True)
|
||||
campaigns_file = CAMPAIGNS_DIR / "campaigns.json"
|
||||
if not campaigns_file.exists():
|
||||
# Seed with default campaigns
|
||||
default = {"campaigns": [
|
||||
{
|
||||
"id": "staking_launch",
|
||||
"name": "Staking Launch Campaign",
|
||||
"type": "staking",
|
||||
"apy_boost": 2.0,
|
||||
"start_date": "2026-02-01T00:00:00",
|
||||
"end_date": "2026-04-01T00:00:00",
|
||||
"status": "active",
|
||||
"total_staked": 0,
|
||||
"participants": 0,
|
||||
"rewards_distributed": 0
|
||||
},
|
||||
{
|
||||
"id": "liquidity_mining_q1",
|
||||
"name": "Q1 Liquidity Mining",
|
||||
"type": "liquidity",
|
||||
"apy_boost": 3.0,
|
||||
"start_date": "2026-01-15T00:00:00",
|
||||
"end_date": "2026-03-15T00:00:00",
|
||||
"status": "active",
|
||||
"total_staked": 0,
|
||||
"participants": 0,
|
||||
"rewards_distributed": 0
|
||||
}
|
||||
]}
|
||||
with open(campaigns_file, "w") as f:
|
||||
json.dump(default, f, indent=2)
|
||||
return campaigns_file
|
||||
|
||||
|
||||
@monitor.command()
|
||||
@click.option("--status", type=click.Choice(["active", "ended", "all"]), default="all", help="Filter by status")
|
||||
@click.pass_context
|
||||
def campaigns(ctx, status: str):
|
||||
"""List active incentive campaigns"""
|
||||
campaigns_file = _ensure_campaigns()
|
||||
with open(campaigns_file) as f:
|
||||
data = json.load(f)
|
||||
|
||||
campaign_list = data.get("campaigns", [])
|
||||
|
||||
# Auto-update status
|
||||
now = datetime.now()
|
||||
for c in campaign_list:
|
||||
end = datetime.fromisoformat(c["end_date"])
|
||||
if now > end and c["status"] == "active":
|
||||
c["status"] = "ended"
|
||||
with open(campaigns_file, "w") as f:
|
||||
json.dump(data, f, indent=2)
|
||||
|
||||
if status != "all":
|
||||
campaign_list = [c for c in campaign_list if c["status"] == status]
|
||||
|
||||
if not campaign_list:
|
||||
output({"message": "No campaigns found"}, ctx.obj['output_format'])
|
||||
return
|
||||
|
||||
output(campaign_list, ctx.obj['output_format'])
|
||||
|
||||
|
||||
@monitor.command(name="campaign-stats")
|
||||
@click.argument("campaign_id", required=False)
|
||||
@click.pass_context
|
||||
def campaign_stats(ctx, campaign_id: Optional[str]):
|
||||
"""Campaign performance metrics (TVL, participants, rewards)"""
|
||||
campaigns_file = _ensure_campaigns()
|
||||
with open(campaigns_file) as f:
|
||||
data = json.load(f)
|
||||
|
||||
campaign_list = data.get("campaigns", [])
|
||||
|
||||
if campaign_id:
|
||||
campaign = next((c for c in campaign_list if c["id"] == campaign_id), None)
|
||||
if not campaign:
|
||||
error(f"Campaign '{campaign_id}' not found")
|
||||
ctx.exit(1)
|
||||
return
|
||||
targets = [campaign]
|
||||
else:
|
||||
targets = campaign_list
|
||||
|
||||
stats = []
|
||||
for c in targets:
|
||||
start = datetime.fromisoformat(c["start_date"])
|
||||
end = datetime.fromisoformat(c["end_date"])
|
||||
now = datetime.now()
|
||||
duration_days = (end - start).days
|
||||
elapsed_days = min((now - start).days, duration_days)
|
||||
progress_pct = round(elapsed_days / max(duration_days, 1) * 100, 1)
|
||||
|
||||
stats.append({
|
||||
"campaign_id": c["id"],
|
||||
"name": c["name"],
|
||||
"type": c["type"],
|
||||
"status": c["status"],
|
||||
"apy_boost": c.get("apy_boost", 0),
|
||||
"tvl": c.get("total_staked", 0),
|
||||
"participants": c.get("participants", 0),
|
||||
"rewards_distributed": c.get("rewards_distributed", 0),
|
||||
"duration_days": duration_days,
|
||||
"elapsed_days": elapsed_days,
|
||||
"progress_pct": progress_pct,
|
||||
"start_date": c["start_date"],
|
||||
"end_date": c["end_date"]
|
||||
})
|
||||
|
||||
if len(stats) == 1:
|
||||
output(stats[0], ctx.obj['output_format'])
|
||||
else:
|
||||
output(stats, ctx.obj['output_format'])
|
||||
67
cli/commands/multi_region_load_balancer.py
Executable file
67
cli/commands/multi_region_load_balancer.py
Executable file
@@ -0,0 +1,67 @@
|
||||
"""
|
||||
Multi-Region Load Balancer CLI Commands for AITBC
|
||||
Commands for managing multi-region load balancing
|
||||
"""
|
||||
|
||||
import click
|
||||
import json
|
||||
import requests
|
||||
from datetime import datetime
|
||||
from typing import Dict, Any, List, Optional
|
||||
|
||||
@click.group()
|
||||
def multi_region_load_balancer():
|
||||
"""Multi-region load balancer management commands"""
|
||||
pass
|
||||
|
||||
@multi_region_load_balancer.command()
|
||||
@click.option('--test-mode', is_flag=True, help='Run in test mode')
|
||||
def status(test_mode):
|
||||
"""Get load balancer status"""
|
||||
try:
|
||||
if test_mode:
|
||||
click.echo("⚖️ Load Balancer Status (test mode)")
|
||||
click.echo("📊 Total Rules: 5")
|
||||
click.echo("✅ Active Rules: 5")
|
||||
click.echo("🌍 Regions: 3")
|
||||
click.echo("📈 Requests/sec: 1,250")
|
||||
return
|
||||
|
||||
# Get status from service
|
||||
config = get_config()
|
||||
response = requests.get(
|
||||
f"{config.coordinator_url}/api/v1/dashboard",
|
||||
headers={"Authorization": f"Bearer {config.api_key}"},
|
||||
timeout=30
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
status = response.json()
|
||||
dashboard = status['dashboard']
|
||||
click.echo("⚖️ Load Balancer Status")
|
||||
click.echo(f"📊 Total Rules: {dashboard.get('total_balancers', 0)}")
|
||||
click.echo(f"✅ Active Rules: {dashboard.get('active_balancers', 0)}")
|
||||
click.echo(f"🌍 Regions: {dashboard.get('regions', 0)}")
|
||||
click.echo(f"📈 Requests/sec: {dashboard.get('requests_per_second', 0)}")
|
||||
else:
|
||||
click.echo(f"❌ Failed to get status: {response.text}", err=True)
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Error getting status: {str(e)}", err=True)
|
||||
|
||||
# Helper function to get config
|
||||
def get_config():
|
||||
"""Get CLI configuration"""
|
||||
try:
|
||||
from config import get_config
|
||||
return get_config()
|
||||
except ImportError:
|
||||
# Fallback for testing
|
||||
from types import SimpleNamespace
|
||||
return SimpleNamespace(
|
||||
coordinator_url="http://localhost:8019",
|
||||
api_key="test-api-key"
|
||||
)
|
||||
|
||||
if __name__ == "__main__":
|
||||
multi_region_load_balancer()
|
||||
470
cli/commands/multimodal.py
Executable file
470
cli/commands/multimodal.py
Executable file
@@ -0,0 +1,470 @@
|
||||
"""Multi-modal processing commands for AITBC CLI"""
|
||||
|
||||
import click
|
||||
import httpx
|
||||
import json
|
||||
import base64
|
||||
import mimetypes
|
||||
from typing import Optional, Dict, Any, List
|
||||
from pathlib import Path
|
||||
from utils import output, error, success, warning
|
||||
|
||||
|
||||
@click.group()
|
||||
def multimodal():
|
||||
"""Multi-modal agent processing and cross-modal operations"""
|
||||
pass
|
||||
|
||||
|
||||
@multimodal.command()
|
||||
@click.option("--name", required=True, help="Multi-modal agent name")
|
||||
@click.option("--modalities", required=True, help="Comma-separated modalities (text,image,audio,video)")
|
||||
@click.option("--description", default="", help="Agent description")
|
||||
@click.option("--model-config", type=click.File('r'), help="Model configuration JSON file")
|
||||
@click.option("--gpu-acceleration", is_flag=True, help="Enable GPU acceleration")
|
||||
@click.pass_context
|
||||
def agent(ctx, name: str, modalities: str, description: str, model_config, gpu_acceleration: bool):
|
||||
"""Create multi-modal agent"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
modality_list = [mod.strip() for mod in modalities.split(',')]
|
||||
|
||||
agent_data = {
|
||||
"name": name,
|
||||
"description": description,
|
||||
"modalities": modality_list,
|
||||
"gpu_acceleration": gpu_acceleration,
|
||||
"agent_type": "multimodal"
|
||||
}
|
||||
|
||||
if model_config:
|
||||
try:
|
||||
config_data = json.load(model_config)
|
||||
agent_data["model_config"] = config_data
|
||||
except Exception as e:
|
||||
error(f"Failed to read model config file: {e}")
|
||||
return
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.post(
|
||||
f"{config.coordinator_url}/multimodal/agents",
|
||||
headers={"X-Api-Key": config.api_key or ""},
|
||||
json=agent_data
|
||||
)
|
||||
|
||||
if response.status_code == 201:
|
||||
agent = response.json()
|
||||
success(f"Multi-modal agent created: {agent['id']}")
|
||||
output(agent, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to create multi-modal agent: {response.status_code}")
|
||||
if response.text:
|
||||
error(response.text)
|
||||
ctx.exit(1)
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@multimodal.command()
|
||||
@click.argument("agent_id")
|
||||
@click.option("--text", help="Text input")
|
||||
@click.option("--image", type=click.Path(exists=True), help="Image file path")
|
||||
@click.option("--audio", type=click.Path(exists=True), help="Audio file path")
|
||||
@click.option("--video", type=click.Path(exists=True), help="Video file path")
|
||||
@click.option("--output-format", default="json", type=click.Choice(["json", "text", "binary"]),
|
||||
help="Output format for results")
|
||||
@click.pass_context
|
||||
def process(ctx, agent_id: str, text: Optional[str], image: Optional[str],
|
||||
audio: Optional[str], video: Optional[str], output_format: str):
|
||||
"""Process multi-modal inputs with agent"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
# Prepare multi-modal data
|
||||
modal_data = {}
|
||||
|
||||
if text:
|
||||
modal_data["text"] = text
|
||||
|
||||
if image:
|
||||
try:
|
||||
with open(image, 'rb') as f:
|
||||
image_data = f.read()
|
||||
modal_data["image"] = {
|
||||
"data": base64.b64encode(image_data).decode(),
|
||||
"mime_type": mimetypes.guess_type(image)[0] or "image/jpeg",
|
||||
"filename": Path(image).name
|
||||
}
|
||||
except Exception as e:
|
||||
error(f"Failed to read image file: {e}")
|
||||
return
|
||||
|
||||
if audio:
|
||||
try:
|
||||
with open(audio, 'rb') as f:
|
||||
audio_data = f.read()
|
||||
modal_data["audio"] = {
|
||||
"data": base64.b64encode(audio_data).decode(),
|
||||
"mime_type": mimetypes.guess_type(audio)[0] or "audio/wav",
|
||||
"filename": Path(audio).name
|
||||
}
|
||||
except Exception as e:
|
||||
error(f"Failed to read audio file: {e}")
|
||||
return
|
||||
|
||||
if video:
|
||||
try:
|
||||
with open(video, 'rb') as f:
|
||||
video_data = f.read()
|
||||
modal_data["video"] = {
|
||||
"data": base64.b64encode(video_data).decode(),
|
||||
"mime_type": mimetypes.guess_type(video)[0] or "video/mp4",
|
||||
"filename": Path(video).name
|
||||
}
|
||||
except Exception as e:
|
||||
error(f"Failed to read video file: {e}")
|
||||
return
|
||||
|
||||
if not modal_data:
|
||||
error("At least one modality input must be provided")
|
||||
return
|
||||
|
||||
process_data = {
|
||||
"modalities": modal_data,
|
||||
"output_format": output_format
|
||||
}
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.post(
|
||||
f"{config.coordinator_url}/multimodal/agents/{agent_id}/process",
|
||||
headers={"X-Api-Key": config.api_key or ""},
|
||||
json=process_data
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
result = response.json()
|
||||
success("Multi-modal processing completed")
|
||||
output(result, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to process multi-modal inputs: {response.status_code}")
|
||||
if response.text:
|
||||
error(response.text)
|
||||
ctx.exit(1)
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@multimodal.command()
|
||||
@click.argument("agent_id")
|
||||
@click.option("--dataset", default="coco_vqa", help="Dataset name for benchmarking")
|
||||
@click.option("--metrics", default="accuracy,latency", help="Comma-separated metrics to evaluate")
|
||||
@click.option("--iterations", default=100, help="Number of benchmark iterations")
|
||||
@click.pass_context
|
||||
def benchmark(ctx, agent_id: str, dataset: str, metrics: str, iterations: int):
|
||||
"""Benchmark multi-modal agent performance"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
benchmark_data = {
|
||||
"dataset": dataset,
|
||||
"metrics": [m.strip() for m in metrics.split(',')],
|
||||
"iterations": iterations
|
||||
}
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.post(
|
||||
f"{config.coordinator_url}/multimodal/agents/{agent_id}/benchmark",
|
||||
headers={"X-Api-Key": config.api_key or ""},
|
||||
json=benchmark_data
|
||||
)
|
||||
|
||||
if response.status_code == 202:
|
||||
benchmark = response.json()
|
||||
success(f"Benchmark started: {benchmark['id']}")
|
||||
output(benchmark, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to start benchmark: {response.status_code}")
|
||||
if response.text:
|
||||
error(response.text)
|
||||
ctx.exit(1)
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@multimodal.command()
|
||||
@click.argument("agent_id")
|
||||
@click.option("--objective", default="throughput",
|
||||
type=click.Choice(["throughput", "latency", "accuracy", "efficiency"]),
|
||||
help="Optimization objective")
|
||||
@click.option("--target", help="Target value for optimization")
|
||||
@click.pass_context
|
||||
def optimize(ctx, agent_id: str, objective: str, target: Optional[str]):
|
||||
"""Optimize multi-modal agent pipeline"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
optimization_data = {"objective": objective}
|
||||
if target:
|
||||
optimization_data["target"] = target
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.post(
|
||||
f"{config.coordinator_url}/multimodal/agents/{agent_id}/optimize",
|
||||
headers={"X-Api-Key": config.api_key or ""},
|
||||
json=optimization_data
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
result = response.json()
|
||||
success(f"Multi-modal optimization completed")
|
||||
output(result, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to optimize agent: {response.status_code}")
|
||||
if response.text:
|
||||
error(response.text)
|
||||
ctx.exit(1)
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@click.group()
|
||||
def convert():
|
||||
"""Cross-modal conversion operations"""
|
||||
pass
|
||||
|
||||
|
||||
multimodal.add_command(convert)
|
||||
|
||||
|
||||
@convert.command()
|
||||
@click.option("--input", "input_path", required=True, type=click.Path(exists=True), help="Input file path")
|
||||
@click.option("--output", "output_format", required=True,
|
||||
type=click.Choice(["text", "image", "audio", "video"]),
|
||||
help="Output modality")
|
||||
@click.option("--model", default="blip", help="Conversion model to use")
|
||||
@click.option("--output-file", type=click.Path(), help="Output file path")
|
||||
@click.pass_context
|
||||
def convert(ctx, input_path: str, output_format: str, model: str, output_file: Optional[str]):
|
||||
"""Convert between modalities"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
# Read input file
|
||||
try:
|
||||
with open(input_path, 'rb') as f:
|
||||
input_data = f.read()
|
||||
except Exception as e:
|
||||
error(f"Failed to read input file: {e}")
|
||||
return
|
||||
|
||||
conversion_data = {
|
||||
"input": {
|
||||
"data": base64.b64encode(input_data).decode(),
|
||||
"mime_type": mimetypes.guess_type(input_path)[0] or "application/octet-stream",
|
||||
"filename": Path(input_path).name
|
||||
},
|
||||
"output_modality": output_format,
|
||||
"model": model
|
||||
}
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.post(
|
||||
f"{config.coordinator_url}/multimodal/convert",
|
||||
headers={"X-Api-Key": config.api_key or ""},
|
||||
json=conversion_data
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
result = response.json()
|
||||
|
||||
if output_file and result.get("output_data"):
|
||||
# Decode and save output
|
||||
output_data = base64.b64decode(result["output_data"])
|
||||
with open(output_file, 'wb') as f:
|
||||
f.write(output_data)
|
||||
success(f"Conversion output saved to {output_file}")
|
||||
else:
|
||||
output(result, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to convert modality: {response.status_code}")
|
||||
if response.text:
|
||||
error(response.text)
|
||||
ctx.exit(1)
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@click.group()
|
||||
def search():
|
||||
"""Multi-modal search operations"""
|
||||
pass
|
||||
|
||||
|
||||
multimodal.add_command(search)
|
||||
|
||||
|
||||
@search.command()
|
||||
@click.argument("query")
|
||||
@click.option("--modalities", default="image,text", help="Comma-separated modalities to search")
|
||||
@click.option("--limit", default=20, help="Number of results to return")
|
||||
@click.option("--threshold", default=0.5, help="Similarity threshold")
|
||||
@click.pass_context
|
||||
def search(ctx, query: str, modalities: str, limit: int, threshold: float):
|
||||
"""Multi-modal search across different modalities"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
search_data = {
|
||||
"query": query,
|
||||
"modalities": [m.strip() for m in modalities.split(',')],
|
||||
"limit": limit,
|
||||
"threshold": threshold
|
||||
}
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.post(
|
||||
f"{config.coordinator_url}/multimodal/search",
|
||||
headers={"X-Api-Key": config.api_key or ""},
|
||||
json=search_data
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
results = response.json()
|
||||
output(results, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to perform multi-modal search: {response.status_code}")
|
||||
ctx.exit(1)
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@click.group()
|
||||
def attention():
|
||||
"""Cross-modal attention analysis"""
|
||||
pass
|
||||
|
||||
|
||||
multimodal.add_command(attention)
|
||||
|
||||
|
||||
@attention.command()
|
||||
@click.argument("agent_id")
|
||||
@click.option("--inputs", type=click.File('r'), required=True, help="Multi-modal inputs JSON file")
|
||||
@click.option("--visualize", is_flag=True, help="Generate attention visualization")
|
||||
@click.option("--output", type=click.Path(), help="Output file for visualization")
|
||||
@click.pass_context
|
||||
def attention(ctx, agent_id: str, inputs, visualize: bool, output: Optional[str]):
|
||||
"""Analyze cross-modal attention patterns"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
try:
|
||||
inputs_data = json.load(inputs)
|
||||
except Exception as e:
|
||||
error(f"Failed to read inputs file: {e}")
|
||||
return
|
||||
|
||||
attention_data = {
|
||||
"inputs": inputs_data,
|
||||
"visualize": visualize
|
||||
}
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.post(
|
||||
f"{config.coordinator_url}/multimodal/agents/{agent_id}/attention",
|
||||
headers={"X-Api-Key": config.api_key or ""},
|
||||
json=attention_data
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
result = response.json()
|
||||
|
||||
if visualize and output and result.get("visualization"):
|
||||
# Save visualization
|
||||
viz_data = base64.b64decode(result["visualization"])
|
||||
with open(output, 'wb') as f:
|
||||
f.write(viz_data)
|
||||
success(f"Attention visualization saved to {output}")
|
||||
else:
|
||||
output(result, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to analyze attention: {response.status_code}")
|
||||
if response.text:
|
||||
error(response.text)
|
||||
ctx.exit(1)
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@multimodal.command()
|
||||
@click.argument("agent_id")
|
||||
@click.pass_context
|
||||
def capabilities(ctx, agent_id: str):
|
||||
"""List multi-modal agent capabilities"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.get(
|
||||
f"{config.coordinator_url}/multimodal/agents/{agent_id}/capabilities",
|
||||
headers={"X-Api-Key": config.api_key or ""}
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
capabilities = response.json()
|
||||
output(capabilities, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to get agent capabilities: {response.status_code}")
|
||||
ctx.exit(1)
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@multimodal.command()
|
||||
@click.argument("agent_id")
|
||||
@click.option("--modality", required=True,
|
||||
type=click.Choice(["text", "image", "audio", "video"]),
|
||||
help="Modality to test")
|
||||
@click.option("--test-data", type=click.File('r'), help="Test data JSON file")
|
||||
@click.pass_context
|
||||
def test(ctx, agent_id: str, modality: str, test_data):
|
||||
"""Test individual modality processing"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
test_input = {}
|
||||
if test_data:
|
||||
try:
|
||||
test_input = json.load(test_data)
|
||||
except Exception as e:
|
||||
error(f"Failed to read test data file: {e}")
|
||||
return
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.post(
|
||||
f"{config.coordinator_url}/multimodal/agents/{agent_id}/test/{modality}",
|
||||
headers={"X-Api-Key": config.api_key or ""},
|
||||
json=test_input
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
result = response.json()
|
||||
success(f"Modality test completed for {modality}")
|
||||
output(result, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to test modality: {response.status_code}")
|
||||
if response.text:
|
||||
error(response.text)
|
||||
ctx.exit(1)
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
439
cli/commands/multisig.py
Executable file
439
cli/commands/multisig.py
Executable file
@@ -0,0 +1,439 @@
|
||||
"""Multi-signature wallet commands for AITBC CLI"""
|
||||
|
||||
import click
|
||||
import json
|
||||
import hashlib
|
||||
import uuid
|
||||
from pathlib import Path
|
||||
from typing import Optional, Dict, Any, List
|
||||
from datetime import datetime, timedelta
|
||||
from utils import output, error, success, warning
|
||||
|
||||
|
||||
@click.group()
|
||||
def multisig():
|
||||
"""Multi-signature wallet management commands"""
|
||||
pass
|
||||
|
||||
|
||||
@multisig.command()
|
||||
@click.option("--threshold", type=int, required=True, help="Number of signatures required")
|
||||
@click.option("--owners", required=True, help="Comma-separated list of owner addresses")
|
||||
@click.option("--name", help="Wallet name for identification")
|
||||
@click.option("--description", help="Wallet description")
|
||||
@click.pass_context
|
||||
def create(ctx, threshold: int, owners: str, name: Optional[str], description: Optional[str]):
|
||||
"""Create a multi-signature wallet"""
|
||||
|
||||
# Parse owners list
|
||||
owner_list = [owner.strip() for owner in owners.split(',')]
|
||||
|
||||
if threshold < 1 or threshold > len(owner_list):
|
||||
error(f"Threshold must be between 1 and {len(owner_list)}")
|
||||
return
|
||||
|
||||
# Generate unique wallet ID
|
||||
wallet_id = f"multisig_{str(uuid.uuid4())[:8]}"
|
||||
|
||||
# Create multisig wallet configuration
|
||||
wallet_config = {
|
||||
"wallet_id": wallet_id,
|
||||
"name": name or f"Multi-sig Wallet {wallet_id}",
|
||||
"threshold": threshold,
|
||||
"owners": owner_list,
|
||||
"status": "active",
|
||||
"created_at": datetime.utcnow().isoformat(),
|
||||
"description": description or f"Multi-signature wallet with {threshold}/{len(owner_list)} threshold",
|
||||
"transactions": [],
|
||||
"proposals": [],
|
||||
"balance": 0.0
|
||||
}
|
||||
|
||||
# Store wallet configuration
|
||||
multisig_file = Path.home() / ".aitbc" / "multisig_wallets.json"
|
||||
multisig_file.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Load existing wallets
|
||||
wallets = {}
|
||||
if multisig_file.exists():
|
||||
with open(multisig_file, 'r') as f:
|
||||
wallets = json.load(f)
|
||||
|
||||
# Add new wallet
|
||||
wallets[wallet_id] = wallet_config
|
||||
|
||||
# Save wallets
|
||||
with open(multisig_file, 'w') as f:
|
||||
json.dump(wallets, f, indent=2)
|
||||
|
||||
success(f"Multi-signature wallet created: {wallet_id}")
|
||||
output({
|
||||
"wallet_id": wallet_id,
|
||||
"name": wallet_config["name"],
|
||||
"threshold": threshold,
|
||||
"owners": owner_list,
|
||||
"status": "created",
|
||||
"created_at": wallet_config["created_at"]
|
||||
})
|
||||
|
||||
|
||||
@multisig.command()
|
||||
@click.option("--wallet-id", required=True, help="Multi-signature wallet ID")
|
||||
@click.option("--recipient", required=True, help="Recipient address")
|
||||
@click.option("--amount", type=float, required=True, help="Amount to send")
|
||||
@click.option("--description", help="Transaction description")
|
||||
@click.pass_context
|
||||
def propose(ctx, wallet_id: str, recipient: str, amount: float, description: Optional[str]):
|
||||
"""Propose a transaction for multi-signature approval"""
|
||||
|
||||
# Load wallets
|
||||
multisig_file = Path.home() / ".aitbc" / "multisig_wallets.json"
|
||||
if not multisig_file.exists():
|
||||
error("No multi-signature wallets found.")
|
||||
return
|
||||
|
||||
with open(multisig_file, 'r') as f:
|
||||
wallets = json.load(f)
|
||||
|
||||
if wallet_id not in wallets:
|
||||
error(f"Multi-signature wallet '{wallet_id}' not found.")
|
||||
return
|
||||
|
||||
wallet = wallets[wallet_id]
|
||||
|
||||
# Generate proposal ID
|
||||
proposal_id = f"prop_{str(uuid.uuid4())[:8]}"
|
||||
|
||||
# Create transaction proposal
|
||||
proposal = {
|
||||
"proposal_id": proposal_id,
|
||||
"wallet_id": wallet_id,
|
||||
"recipient": recipient,
|
||||
"amount": amount,
|
||||
"description": description or f"Send {amount} to {recipient}",
|
||||
"status": "pending",
|
||||
"created_at": datetime.utcnow().isoformat(),
|
||||
"signatures": [],
|
||||
"threshold": wallet["threshold"],
|
||||
"owners": wallet["owners"]
|
||||
}
|
||||
|
||||
# Add proposal to wallet
|
||||
wallet["proposals"].append(proposal)
|
||||
|
||||
# Save wallets
|
||||
with open(multisig_file, 'w') as f:
|
||||
json.dump(wallets, f, indent=2)
|
||||
|
||||
success(f"Transaction proposal created: {proposal_id}")
|
||||
output({
|
||||
"proposal_id": proposal_id,
|
||||
"wallet_id": wallet_id,
|
||||
"recipient": recipient,
|
||||
"amount": amount,
|
||||
"threshold": wallet["threshold"],
|
||||
"status": "pending",
|
||||
"created_at": proposal["created_at"]
|
||||
})
|
||||
|
||||
|
||||
@multisig.command()
|
||||
@click.option("--proposal-id", required=True, help="Proposal ID to sign")
|
||||
@click.option("--signer", required=True, help="Signer address")
|
||||
@click.option("--private-key", help="Private key for signing (for demo)")
|
||||
@click.pass_context
|
||||
def sign(ctx, proposal_id: str, signer: str, private_key: Optional[str]):
|
||||
"""Sign a transaction proposal"""
|
||||
|
||||
# Load wallets
|
||||
multisig_file = Path.home() / ".aitbc" / "multisig_wallets.json"
|
||||
if not multisig_file.exists():
|
||||
error("No multi-signature wallets found.")
|
||||
return
|
||||
|
||||
with open(multisig_file, 'r') as f:
|
||||
wallets = json.load(f)
|
||||
|
||||
# Find the proposal
|
||||
target_wallet = None
|
||||
target_proposal = None
|
||||
|
||||
for wallet_id, wallet in wallets.items():
|
||||
for proposal in wallet.get("proposals", []):
|
||||
if proposal["proposal_id"] == proposal_id:
|
||||
target_wallet = wallet
|
||||
target_proposal = proposal
|
||||
break
|
||||
if target_proposal:
|
||||
break
|
||||
|
||||
if not target_proposal:
|
||||
error(f"Proposal '{proposal_id}' not found.")
|
||||
return
|
||||
|
||||
# Check if signer is an owner
|
||||
if signer not in target_proposal["owners"]:
|
||||
error(f"Signer '{signer}' is not an owner of this wallet.")
|
||||
return
|
||||
|
||||
# Check if already signed
|
||||
for sig in target_proposal["signatures"]:
|
||||
if sig["signer"] == signer:
|
||||
warning(f"Signer '{signer}' has already signed this proposal.")
|
||||
return
|
||||
|
||||
# Create signature (simplified for demo)
|
||||
signature_data = f"{proposal_id}:{signer}:{target_proposal['amount']}"
|
||||
signature = hashlib.sha256(signature_data.encode()).hexdigest()
|
||||
|
||||
# Add signature
|
||||
signature_obj = {
|
||||
"signer": signer,
|
||||
"signature": signature,
|
||||
"timestamp": datetime.utcnow().isoformat()
|
||||
}
|
||||
|
||||
target_proposal["signatures"].append(signature_obj)
|
||||
|
||||
# Check if threshold reached
|
||||
if len(target_proposal["signatures"]) >= target_proposal["threshold"]:
|
||||
target_proposal["status"] = "approved"
|
||||
target_proposal["approved_at"] = datetime.utcnow().isoformat()
|
||||
|
||||
# Add to transactions
|
||||
transaction = {
|
||||
"tx_id": f"tx_{str(uuid.uuid4())[:8]}",
|
||||
"proposal_id": proposal_id,
|
||||
"recipient": target_proposal["recipient"],
|
||||
"amount": target_proposal["amount"],
|
||||
"description": target_proposal["description"],
|
||||
"executed_at": target_proposal["approved_at"],
|
||||
"signatures": target_proposal["signatures"]
|
||||
}
|
||||
target_wallet["transactions"].append(transaction)
|
||||
|
||||
success(f"Transaction approved and executed! Transaction ID: {transaction['tx_id']}")
|
||||
else:
|
||||
success(f"Signature added. {len(target_proposal['signatures'])}/{target_proposal['threshold']} signatures collected.")
|
||||
|
||||
# Save wallets
|
||||
with open(multisig_file, 'w') as f:
|
||||
json.dump(wallets, f, indent=2)
|
||||
|
||||
output({
|
||||
"proposal_id": proposal_id,
|
||||
"signer": signer,
|
||||
"signatures_collected": len(target_proposal["signatures"]),
|
||||
"threshold": target_proposal["threshold"],
|
||||
"status": target_proposal["status"]
|
||||
})
|
||||
|
||||
|
||||
@multisig.command()
|
||||
@click.option("--wallet-id", help="Filter by wallet ID")
|
||||
@click.option("--status", help="Filter by status (pending, approved, rejected)")
|
||||
@click.pass_context
|
||||
def list(ctx, wallet_id: Optional[str], status: Optional[str]):
|
||||
"""List multi-signature wallets and proposals"""
|
||||
|
||||
# Load wallets
|
||||
multisig_file = Path.home() / ".aitbc" / "multisig_wallets.json"
|
||||
if not multisig_file.exists():
|
||||
warning("No multi-signature wallets found.")
|
||||
return
|
||||
|
||||
with open(multisig_file, 'r') as f:
|
||||
wallets = json.load(f)
|
||||
|
||||
# Filter wallets
|
||||
wallet_list = []
|
||||
for wid, wallet in wallets.items():
|
||||
if wallet_id and wid != wallet_id:
|
||||
continue
|
||||
|
||||
wallet_info = {
|
||||
"wallet_id": wid,
|
||||
"name": wallet["name"],
|
||||
"threshold": wallet["threshold"],
|
||||
"owners": wallet["owners"],
|
||||
"status": wallet["status"],
|
||||
"created_at": wallet["created_at"],
|
||||
"balance": wallet.get("balance", 0.0),
|
||||
"total_proposals": len(wallet.get("proposals", [])),
|
||||
"total_transactions": len(wallet.get("transactions", []))
|
||||
}
|
||||
|
||||
# Filter proposals by status if specified
|
||||
if status:
|
||||
filtered_proposals = [p for p in wallet.get("proposals", []) if p.get("status") == status]
|
||||
wallet_info["filtered_proposals"] = len(filtered_proposals)
|
||||
|
||||
wallet_list.append(wallet_info)
|
||||
|
||||
if not wallet_list:
|
||||
error("No multi-signature wallets found matching the criteria.")
|
||||
return
|
||||
|
||||
output({
|
||||
"multisig_wallets": wallet_list,
|
||||
"total_wallets": len(wallet_list),
|
||||
"filter_criteria": {
|
||||
"wallet_id": wallet_id or "all",
|
||||
"status": status or "all"
|
||||
}
|
||||
})
|
||||
|
||||
|
||||
@multisig.command()
|
||||
@click.argument("wallet_id")
|
||||
@click.pass_context
|
||||
def status(ctx, wallet_id: str):
|
||||
"""Get detailed status of a multi-signature wallet"""
|
||||
|
||||
# Load wallets
|
||||
multisig_file = Path.home() / ".aitbc" / "multisig_wallets.json"
|
||||
if not multisig_file.exists():
|
||||
error("No multi-signature wallets found.")
|
||||
return
|
||||
|
||||
with open(multisig_file, 'r') as f:
|
||||
wallets = json.load(f)
|
||||
|
||||
if wallet_id not in wallets:
|
||||
error(f"Multi-signature wallet '{wallet_id}' not found.")
|
||||
return
|
||||
|
||||
wallet = wallets[wallet_id]
|
||||
|
||||
output({
|
||||
"wallet_id": wallet_id,
|
||||
"name": wallet["name"],
|
||||
"threshold": wallet["threshold"],
|
||||
"owners": wallet["owners"],
|
||||
"status": wallet["status"],
|
||||
"balance": wallet.get("balance", 0.0),
|
||||
"created_at": wallet["created_at"],
|
||||
"description": wallet.get("description"),
|
||||
"proposals": wallet.get("proposals", []),
|
||||
"transactions": wallet.get("transactions", [])
|
||||
})
|
||||
|
||||
|
||||
@multisig.command()
|
||||
@click.option("--proposal-id", help="Filter by proposal ID")
|
||||
@click.option("--wallet-id", help="Filter by wallet ID")
|
||||
@click.pass_context
|
||||
def proposals(ctx, proposal_id: Optional[str], wallet_id: Optional[str]):
|
||||
"""List transaction proposals"""
|
||||
|
||||
# Load wallets
|
||||
multisig_file = Path.home() / ".aitbc" / "multisig_wallets.json"
|
||||
if not multisig_file.exists():
|
||||
warning("No multi-signature wallets found.")
|
||||
return
|
||||
|
||||
with open(multisig_file, 'r') as f:
|
||||
wallets = json.load(f)
|
||||
|
||||
# Collect proposals
|
||||
all_proposals = []
|
||||
|
||||
for wid, wallet in wallets.items():
|
||||
if wallet_id and wid != wallet_id:
|
||||
continue
|
||||
|
||||
for proposal in wallet.get("proposals", []):
|
||||
if proposal_id and proposal["proposal_id"] != proposal_id:
|
||||
continue
|
||||
|
||||
proposal_info = {
|
||||
"proposal_id": proposal["proposal_id"],
|
||||
"wallet_id": wid,
|
||||
"wallet_name": wallet["name"],
|
||||
"recipient": proposal["recipient"],
|
||||
"amount": proposal["amount"],
|
||||
"description": proposal["description"],
|
||||
"status": proposal["status"],
|
||||
"threshold": proposal["threshold"],
|
||||
"signatures": proposal["signatures"],
|
||||
"created_at": proposal["created_at"]
|
||||
}
|
||||
|
||||
if proposal.get("approved_at"):
|
||||
proposal_info["approved_at"] = proposal["approved_at"]
|
||||
|
||||
all_proposals.append(proposal_info)
|
||||
|
||||
if not all_proposals:
|
||||
error("No proposals found matching the criteria.")
|
||||
return
|
||||
|
||||
output({
|
||||
"proposals": all_proposals,
|
||||
"total_proposals": len(all_proposals),
|
||||
"filter_criteria": {
|
||||
"proposal_id": proposal_id or "all",
|
||||
"wallet_id": wallet_id or "all"
|
||||
}
|
||||
})
|
||||
|
||||
|
||||
@multisig.command()
|
||||
@click.argument("proposal_id")
|
||||
@click.pass_context
|
||||
def challenge(ctx, proposal_id: str):
|
||||
"""Create a challenge-response for proposal verification"""
|
||||
|
||||
# Load wallets
|
||||
multisig_file = Path.home() / ".aitbc" / "multisig_wallets.json"
|
||||
if not multisig_file.exists():
|
||||
error("No multi-signature wallets found.")
|
||||
return
|
||||
|
||||
with open(multisig_file, 'r') as f:
|
||||
wallets = json.load(f)
|
||||
|
||||
# Find the proposal
|
||||
target_proposal = None
|
||||
for wallet in wallets.values():
|
||||
for proposal in wallet.get("proposals", []):
|
||||
if proposal["proposal_id"] == proposal_id:
|
||||
target_proposal = proposal
|
||||
break
|
||||
if target_proposal:
|
||||
break
|
||||
|
||||
if not target_proposal:
|
||||
error(f"Proposal '{proposal_id}' not found.")
|
||||
return
|
||||
|
||||
# Create challenge
|
||||
challenge_data = {
|
||||
"challenge_id": f"challenge_{str(uuid.uuid4())[:8]}",
|
||||
"proposal_id": proposal_id,
|
||||
"challenge": hashlib.sha256(f"{proposal_id}:{datetime.utcnow().isoformat()}".encode()).hexdigest(),
|
||||
"created_at": datetime.utcnow().isoformat(),
|
||||
"expires_at": (datetime.utcnow() + timedelta(hours=1)).isoformat()
|
||||
}
|
||||
|
||||
# Store challenge (in a real implementation, this would be more secure)
|
||||
challenges_file = Path.home() / ".aitbc" / "multisig_challenges.json"
|
||||
challenges_file.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
challenges = {}
|
||||
if challenges_file.exists():
|
||||
with open(challenges_file, 'r') as f:
|
||||
challenges = json.load(f)
|
||||
|
||||
challenges[challenge_data["challenge_id"]] = challenge_data
|
||||
|
||||
with open(challenges_file, 'w') as f:
|
||||
json.dump(challenges, f, indent=2)
|
||||
|
||||
success(f"Challenge created: {challenge_data['challenge_id']}")
|
||||
output({
|
||||
"challenge_id": challenge_data["challenge_id"],
|
||||
"proposal_id": proposal_id,
|
||||
"challenge": challenge_data["challenge"],
|
||||
"expires_at": challenge_data["expires_at"]
|
||||
})
|
||||
439
cli/commands/node.py
Executable file
439
cli/commands/node.py
Executable file
@@ -0,0 +1,439 @@
|
||||
"""Node management commands for AITBC CLI"""
|
||||
|
||||
import click
|
||||
from typing import Optional
|
||||
from core.config import MultiChainConfig, load_multichain_config, get_default_node_config, add_node_config, remove_node_config
|
||||
from core.node_client import NodeClient
|
||||
from utils import output, error, success
|
||||
|
||||
@click.group()
|
||||
def node():
|
||||
"""Node management commands"""
|
||||
pass
|
||||
|
||||
@node.command()
|
||||
@click.argument('node_id')
|
||||
@click.pass_context
|
||||
def info(ctx, node_id):
|
||||
"""Get detailed node information"""
|
||||
try:
|
||||
config = load_multichain_config()
|
||||
|
||||
if node_id not in config.nodes:
|
||||
error(f"Node {node_id} not found in configuration")
|
||||
raise click.Abort()
|
||||
|
||||
node_config = config.nodes[node_id]
|
||||
|
||||
import asyncio
|
||||
|
||||
async def get_node_info():
|
||||
async with NodeClient(node_config) as client:
|
||||
return await client.get_node_info()
|
||||
|
||||
node_info = asyncio.run(get_node_info())
|
||||
|
||||
# Basic node information
|
||||
basic_info = {
|
||||
"Node ID": node_info["node_id"],
|
||||
"Node Type": node_info["type"],
|
||||
"Status": node_info["status"],
|
||||
"Version": node_info["version"],
|
||||
"Uptime": f"{node_info['uptime_days']} days, {node_info['uptime_hours']} hours",
|
||||
"Endpoint": node_config.endpoint
|
||||
}
|
||||
|
||||
output(basic_info, ctx.obj.get('output_format', 'table'), title=f"Node Information: {node_id}")
|
||||
|
||||
# Performance metrics
|
||||
metrics = {
|
||||
"CPU Usage": f"{node_info['cpu_usage']}%",
|
||||
"Memory Usage": f"{node_info['memory_usage_mb']:.1f}MB",
|
||||
"Disk Usage": f"{node_info['disk_usage_mb']:.1f}MB",
|
||||
"Network In": f"{node_info['network_in_mb']:.1f}MB/s",
|
||||
"Network Out": f"{node_info['network_out_mb']:.1f}MB/s"
|
||||
}
|
||||
|
||||
output(metrics, ctx.obj.get('output_format', 'table'), title="Performance Metrics")
|
||||
|
||||
# Hosted chains
|
||||
if node_info.get("hosted_chains"):
|
||||
chains_data = [
|
||||
{
|
||||
"Chain ID": chain_id,
|
||||
"Type": chain.get("type", "unknown"),
|
||||
"Status": chain.get("status", "unknown")
|
||||
}
|
||||
for chain_id, chain in node_info["hosted_chains"].items()
|
||||
]
|
||||
|
||||
output(chains_data, ctx.obj.get('output_format', 'table'), title="Hosted Chains")
|
||||
|
||||
except Exception as e:
|
||||
error(f"Error getting node info: {str(e)}")
|
||||
raise click.Abort()
|
||||
|
||||
@node.command()
|
||||
@click.option('--show-private', is_flag=True, help='Show private chains')
|
||||
@click.option('--node-id', help='Specific node ID to query')
|
||||
@click.pass_context
|
||||
def chains(ctx, show_private, node_id):
|
||||
"""List chains hosted on all nodes"""
|
||||
try:
|
||||
config = load_multichain_config()
|
||||
|
||||
all_chains = []
|
||||
|
||||
import asyncio
|
||||
|
||||
async def get_all_chains():
|
||||
tasks = []
|
||||
for nid, node_config in config.nodes.items():
|
||||
if node_id and nid != node_id:
|
||||
continue
|
||||
async def get_chains_for_node(nid, nconfig):
|
||||
try:
|
||||
async with NodeClient(nconfig) as client:
|
||||
chains = await client.get_hosted_chains()
|
||||
return [(nid, chain) for chain in chains]
|
||||
except Exception as e:
|
||||
print(f"Error getting chains from node {nid}: {e}")
|
||||
return []
|
||||
|
||||
tasks.append(get_chains_for_node(node_id, node_config))
|
||||
|
||||
results = await asyncio.gather(*tasks)
|
||||
for result in results:
|
||||
all_chains.extend(result)
|
||||
|
||||
asyncio.run(get_all_chains())
|
||||
|
||||
if not all_chains:
|
||||
output("No chains found on any node", ctx.obj.get('output_format', 'table'))
|
||||
return
|
||||
|
||||
# Filter private chains if not requested
|
||||
if not show_private:
|
||||
all_chains = [(node_id, chain) for node_id, chain in all_chains
|
||||
if chain.privacy.visibility != "private"]
|
||||
|
||||
# Format output
|
||||
chains_data = [
|
||||
{
|
||||
"Node ID": node_id,
|
||||
"Chain ID": chain.id,
|
||||
"Type": chain.type.value,
|
||||
"Purpose": chain.purpose,
|
||||
"Name": chain.name,
|
||||
"Status": chain.status.value,
|
||||
"Block Height": chain.block_height,
|
||||
"Size": f"{chain.size_mb:.1f}MB"
|
||||
}
|
||||
for node_id, chain in all_chains
|
||||
]
|
||||
|
||||
output(chains_data, ctx.obj.get('output_format', 'table'), title="Chains by Node")
|
||||
|
||||
except Exception as e:
|
||||
error(f"Error listing chains: {str(e)}")
|
||||
raise click.Abort()
|
||||
|
||||
@node.command()
|
||||
@click.option('--format', type=click.Choice(['table', 'json']), default='table', help='Output format')
|
||||
@click.pass_context
|
||||
def list(ctx, format):
|
||||
"""List all configured nodes"""
|
||||
try:
|
||||
config = load_multichain_config()
|
||||
|
||||
if not config.nodes:
|
||||
output("No nodes configured", ctx.obj.get('output_format', 'table'))
|
||||
return
|
||||
|
||||
nodes_data = [
|
||||
{
|
||||
"Node ID": node_id,
|
||||
"Endpoint": node_config.endpoint,
|
||||
"Timeout": f"{node_config.timeout}s",
|
||||
"Max Connections": node_config.max_connections,
|
||||
"Retry Count": node_config.retry_count
|
||||
}
|
||||
for node_id, node_config in config.nodes.items()
|
||||
]
|
||||
|
||||
output(nodes_data, ctx.obj.get('output_format', 'table'), title="Configured Nodes")
|
||||
|
||||
except Exception as e:
|
||||
error(f"Error listing nodes: {str(e)}")
|
||||
raise click.Abort()
|
||||
|
||||
@node.command()
|
||||
@click.argument('node_id')
|
||||
@click.argument('endpoint')
|
||||
@click.option('--timeout', default=30, help='Request timeout in seconds')
|
||||
@click.option('--max-connections', default=10, help='Maximum concurrent connections')
|
||||
@click.option('--retry-count', default=3, help='Number of retry attempts')
|
||||
@click.pass_context
|
||||
def add(ctx, node_id, endpoint, timeout, max_connections, retry_count):
|
||||
"""Add a new node to configuration"""
|
||||
try:
|
||||
config = load_multichain_config()
|
||||
|
||||
if node_id in config.nodes:
|
||||
error(f"Node {node_id} already exists")
|
||||
raise click.Abort()
|
||||
|
||||
node_config = get_default_node_config()
|
||||
node_config.id = node_id
|
||||
node_config.endpoint = endpoint
|
||||
node_config.timeout = timeout
|
||||
node_config.max_connections = max_connections
|
||||
node_config.retry_count = retry_count
|
||||
|
||||
config = add_node_config(config, node_config)
|
||||
|
||||
from core.config import save_multichain_config
|
||||
save_multichain_config(config)
|
||||
|
||||
success(f"Node {node_id} added successfully!")
|
||||
|
||||
result = {
|
||||
"Node ID": node_id,
|
||||
"Endpoint": endpoint,
|
||||
"Timeout": f"{timeout}s",
|
||||
"Max Connections": max_connections,
|
||||
"Retry Count": retry_count
|
||||
}
|
||||
|
||||
output(result, ctx.obj.get('output_format', 'table'))
|
||||
|
||||
except Exception as e:
|
||||
error(f"Error adding node: {str(e)}")
|
||||
raise click.Abort()
|
||||
|
||||
@node.command()
|
||||
@click.argument('node_id')
|
||||
@click.option('--force', is_flag=True, help='Force removal without confirmation')
|
||||
@click.pass_context
|
||||
def remove(ctx, node_id, force):
|
||||
"""Remove a node from configuration"""
|
||||
try:
|
||||
config = load_multichain_config()
|
||||
|
||||
if node_id not in config.nodes:
|
||||
error(f"Node {node_id} not found")
|
||||
raise click.Abort()
|
||||
|
||||
if not force:
|
||||
# Show node information before removal
|
||||
node_config = config.nodes[node_id]
|
||||
node_info = {
|
||||
"Node ID": node_id,
|
||||
"Endpoint": node_config.endpoint,
|
||||
"Timeout": f"{node_config.timeout}s",
|
||||
"Max Connections": node_config.max_connections
|
||||
}
|
||||
|
||||
output(node_info, ctx.obj.get('output_format', 'table'), title="Node to Remove")
|
||||
|
||||
if not click.confirm(f"Are you sure you want to remove node {node_id}?"):
|
||||
raise click.Abort()
|
||||
|
||||
config = remove_node_config(config, node_id)
|
||||
|
||||
from core.config import save_multichain_config
|
||||
save_multichain_config(config)
|
||||
|
||||
success(f"Node {node_id} removed successfully!")
|
||||
|
||||
except Exception as e:
|
||||
error(f"Error removing node: {str(e)}")
|
||||
raise click.Abort()
|
||||
|
||||
@node.command()
|
||||
@click.argument('node_id')
|
||||
@click.option('--realtime', is_flag=True, help='Real-time monitoring')
|
||||
@click.option('--interval', default=5, help='Update interval in seconds')
|
||||
@click.pass_context
|
||||
def monitor(ctx, node_id, realtime, interval):
|
||||
"""Monitor node activity"""
|
||||
try:
|
||||
config = load_multichain_config()
|
||||
|
||||
if node_id not in config.nodes:
|
||||
error(f"Node {node_id} not found")
|
||||
raise click.Abort()
|
||||
|
||||
node_config = config.nodes[node_id]
|
||||
|
||||
import asyncio
|
||||
from rich.console import Console
|
||||
from rich.layout import Layout
|
||||
from rich.live import Live
|
||||
import time
|
||||
|
||||
console = Console()
|
||||
|
||||
async def get_node_stats():
|
||||
async with NodeClient(node_config) as client:
|
||||
node_info = await client.get_node_info()
|
||||
return node_info
|
||||
|
||||
if realtime:
|
||||
# Real-time monitoring
|
||||
def generate_monitor_layout():
|
||||
try:
|
||||
node_info = asyncio.run(get_node_stats())
|
||||
|
||||
layout = Layout()
|
||||
layout.split_column(
|
||||
Layout(name="header", size=3),
|
||||
Layout(name="metrics"),
|
||||
Layout(name="chains", size=10)
|
||||
)
|
||||
|
||||
# Header
|
||||
layout["header"].update(
|
||||
f"Node Monitor: {node_id} - {node_info['status'].upper()}"
|
||||
)
|
||||
|
||||
# Metrics table
|
||||
metrics_data = [
|
||||
["CPU Usage", f"{node_info['cpu_usage']}%"],
|
||||
["Memory Usage", f"{node_info['memory_usage_mb']:.1f}MB"],
|
||||
["Disk Usage", f"{node_info['disk_usage_mb']:.1f}MB"],
|
||||
["Network In", f"{node_info['network_in_mb']:.1f}MB/s"],
|
||||
["Network Out", f"{node_info['network_out_mb']:.1f}MB/s"],
|
||||
["Uptime", f"{node_info['uptime_days']}d {node_info['uptime_hours']}h"]
|
||||
]
|
||||
|
||||
layout["metrics"].update(str(metrics_data))
|
||||
|
||||
# Chains info
|
||||
if node_info.get("hosted_chains"):
|
||||
chains_text = f"Hosted Chains: {len(node_info['hosted_chains'])}\n"
|
||||
for chain_id, chain in list(node_info["hosted_chains"].items())[:5]:
|
||||
chains_text += f" • {chain_id} ({chain.get('status', 'unknown')})\n"
|
||||
layout["chains"].update(chains_text)
|
||||
else:
|
||||
layout["chains"].update("No chains hosted")
|
||||
|
||||
return layout
|
||||
except Exception as e:
|
||||
return f"Error getting node stats: {e}"
|
||||
|
||||
with Live(generate_monitor_layout(), refresh_per_second=1) as live:
|
||||
try:
|
||||
while True:
|
||||
live.update(generate_monitor_layout())
|
||||
time.sleep(interval)
|
||||
except KeyboardInterrupt:
|
||||
console.print("\n[yellow]Monitoring stopped by user[/yellow]")
|
||||
else:
|
||||
# Single snapshot
|
||||
node_info = asyncio.run(get_node_stats())
|
||||
|
||||
stats_data = [
|
||||
{
|
||||
"Metric": "CPU Usage",
|
||||
"Value": f"{node_info['cpu_usage']}%"
|
||||
},
|
||||
{
|
||||
"Metric": "Memory Usage",
|
||||
"Value": f"{node_info['memory_usage_mb']:.1f}MB"
|
||||
},
|
||||
{
|
||||
"Metric": "Disk Usage",
|
||||
"Value": f"{node_info['disk_usage_mb']:.1f}MB"
|
||||
},
|
||||
{
|
||||
"Metric": "Network In",
|
||||
"Value": f"{node_info['network_in_mb']:.1f}MB/s"
|
||||
},
|
||||
{
|
||||
"Metric": "Network Out",
|
||||
"Value": f"{node_info['network_out_mb']:.1f}MB/s"
|
||||
},
|
||||
{
|
||||
"Metric": "Uptime",
|
||||
"Value": f"{node_info['uptime_days']}d {node_info['uptime_hours']}h"
|
||||
}
|
||||
]
|
||||
|
||||
output(stats_data, ctx.obj.get('output_format', 'table'), title=f"Node Statistics: {node_id}")
|
||||
|
||||
except Exception as e:
|
||||
error(f"Error during monitoring: {str(e)}")
|
||||
raise click.Abort()
|
||||
|
||||
@node.command()
|
||||
@click.argument('node_id')
|
||||
@click.pass_context
|
||||
def test(ctx, node_id):
|
||||
"""Test connectivity to a node"""
|
||||
try:
|
||||
config = load_multichain_config()
|
||||
|
||||
if node_id not in config.nodes:
|
||||
error(f"Node {node_id} not found")
|
||||
raise click.Abort()
|
||||
|
||||
node_config = config.nodes[node_id]
|
||||
|
||||
import asyncio
|
||||
|
||||
async def test_node():
|
||||
try:
|
||||
async with NodeClient(node_config) as client:
|
||||
node_info = await client.get_node_info()
|
||||
chains = await client.get_hosted_chains()
|
||||
|
||||
return {
|
||||
"connected": True,
|
||||
"node_id": node_info["node_id"],
|
||||
"status": node_info["status"],
|
||||
"version": node_info["version"],
|
||||
"chains_count": len(chains)
|
||||
}
|
||||
except Exception as e:
|
||||
return {
|
||||
"connected": False,
|
||||
"error": str(e)
|
||||
}
|
||||
|
||||
result = asyncio.run(test_node())
|
||||
|
||||
if result["connected"]:
|
||||
success(f"Successfully connected to node {node_id}!")
|
||||
|
||||
test_data = [
|
||||
{
|
||||
"Test": "Connection",
|
||||
"Status": "✓ Pass"
|
||||
},
|
||||
{
|
||||
"Test": "Node ID",
|
||||
"Status": result["node_id"]
|
||||
},
|
||||
{
|
||||
"Test": "Status",
|
||||
"Status": result["status"]
|
||||
},
|
||||
{
|
||||
"Test": "Version",
|
||||
"Status": result["version"]
|
||||
},
|
||||
{
|
||||
"Test": "Chains",
|
||||
"Status": f"{result['chains_count']} hosted"
|
||||
}
|
||||
]
|
||||
|
||||
output(test_data, ctx.obj.get('output_format', 'table'), title=f"Node Test Results: {node_id}")
|
||||
else:
|
||||
error(f"Failed to connect to node {node_id}: {result['error']}")
|
||||
raise click.Abort()
|
||||
|
||||
except Exception as e:
|
||||
error(f"Error testing node: {str(e)}")
|
||||
raise click.Abort()
|
||||
603
cli/commands/openclaw.py
Executable file
603
cli/commands/openclaw.py
Executable file
@@ -0,0 +1,603 @@
|
||||
"""OpenClaw integration commands for AITBC CLI"""
|
||||
|
||||
import click
|
||||
import httpx
|
||||
import json
|
||||
import time
|
||||
from typing import Optional, Dict, Any, List
|
||||
from utils import output, error, success, warning
|
||||
|
||||
|
||||
@click.group()
|
||||
def openclaw():
|
||||
"""OpenClaw integration with edge computing deployment"""
|
||||
pass
|
||||
|
||||
|
||||
@click.group()
|
||||
def deploy():
|
||||
"""Agent deployment operations"""
|
||||
pass
|
||||
|
||||
|
||||
openclaw.add_command(deploy)
|
||||
|
||||
|
||||
@deploy.command()
|
||||
@click.argument("agent_id")
|
||||
@click.option("--region", required=True, help="Deployment region")
|
||||
@click.option("--instances", default=1, help="Number of instances to deploy")
|
||||
@click.option("--instance-type", default="standard", help="Instance type")
|
||||
@click.option("--edge-locations", help="Comma-separated edge locations")
|
||||
@click.option("--auto-scale", is_flag=True, help="Enable auto-scaling")
|
||||
@click.pass_context
|
||||
def deploy_agent(ctx, agent_id: str, region: str, instances: int, instance_type: str,
|
||||
edge_locations: Optional[str], auto_scale: bool):
|
||||
"""Deploy agent to OpenClaw network"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
deployment_data = {
|
||||
"agent_id": agent_id,
|
||||
"region": region,
|
||||
"instances": instances,
|
||||
"instance_type": instance_type,
|
||||
"auto_scale": auto_scale
|
||||
}
|
||||
|
||||
if edge_locations:
|
||||
deployment_data["edge_locations"] = [loc.strip() for loc in edge_locations.split(',')]
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.post(
|
||||
f"{config.coordinator_url}/openclaw/deploy",
|
||||
headers={"X-Api-Key": config.api_key or ""},
|
||||
json=deployment_data
|
||||
)
|
||||
|
||||
if response.status_code == 202:
|
||||
deployment = response.json()
|
||||
success(f"Agent deployment started: {deployment['id']}")
|
||||
output(deployment, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to start deployment: {response.status_code}")
|
||||
if response.text:
|
||||
error(response.text)
|
||||
ctx.exit(1)
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@click.argument("deployment_id")
|
||||
@click.option("--instances", required=True, type=int, help="New number of instances")
|
||||
@click.option("--auto-scale", is_flag=True, help="Enable auto-scaling")
|
||||
@click.option("--min-instances", default=1, help="Minimum instances for auto-scaling")
|
||||
@click.option("--max-instances", default=10, help="Maximum instances for auto-scaling")
|
||||
@click.pass_context
|
||||
def scale(ctx, deployment_id: str, instances: int, auto_scale: bool, min_instances: int, max_instances: int):
|
||||
"""Scale agent deployment"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
scale_data = {
|
||||
"instances": instances,
|
||||
"auto_scale": auto_scale,
|
||||
"min_instances": min_instances,
|
||||
"max_instances": max_instances
|
||||
}
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.post(
|
||||
f"{config.coordinator_url}/openclaw/deployments/{deployment_id}/scale",
|
||||
headers={"X-Api-Key": config.api_key or ""},
|
||||
json=scale_data
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
result = response.json()
|
||||
success(f"Deployment scaled successfully")
|
||||
output(result, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to scale deployment: {response.status_code}")
|
||||
if response.text:
|
||||
error(response.text)
|
||||
ctx.exit(1)
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@deploy.command()
|
||||
@click.argument("deployment_id")
|
||||
@click.option("--objective", default="cost",
|
||||
type=click.Choice(["cost", "performance", "latency", "efficiency"]),
|
||||
help="Optimization objective")
|
||||
@click.pass_context
|
||||
def optimize(ctx, deployment_id: str, objective: str):
|
||||
"""Optimize agent deployment"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
optimization_data = {"objective": objective}
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.post(
|
||||
f"{config.coordinator_url}/openclaw/deployments/{deployment_id}/optimize",
|
||||
headers={"X-Api-Key": config.api_key or ""},
|
||||
json=optimization_data
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
result = response.json()
|
||||
success(f"Deployment optimization completed")
|
||||
output(result, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to optimize deployment: {response.status_code}")
|
||||
if response.text:
|
||||
error(response.text)
|
||||
ctx.exit(1)
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@click.group()
|
||||
def monitor():
|
||||
"""OpenClaw monitoring operations"""
|
||||
pass
|
||||
|
||||
|
||||
openclaw.add_command(monitor)
|
||||
|
||||
|
||||
@monitor.command()
|
||||
@click.argument("deployment_id")
|
||||
@click.option("--metrics", default="latency,cost", help="Comma-separated metrics to monitor")
|
||||
@click.option("--real-time", is_flag=True, help="Show real-time metrics")
|
||||
@click.option("--interval", default=10, help="Update interval for real-time monitoring")
|
||||
@click.pass_context
|
||||
def monitor_metrics(ctx, deployment_id: str, metrics: str, real_time: bool, interval: int):
|
||||
"""Monitor OpenClaw agent performance"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
params = {"metrics": [m.strip() for m in metrics.split(',')]}
|
||||
|
||||
def get_metrics():
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.get(
|
||||
f"{config.coordinator_url}/openclaw/deployments/{deployment_id}/metrics",
|
||||
headers={"X-Api-Key": config.api_key or ""},
|
||||
params=params
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
return response.json()
|
||||
else:
|
||||
error(f"Failed to get metrics: {response.status_code}")
|
||||
return None
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
return None
|
||||
|
||||
if real_time:
|
||||
click.echo(f"Monitoring deployment {deployment_id} (Ctrl+C to stop)...")
|
||||
while True:
|
||||
metrics_data = get_metrics()
|
||||
if metrics_data:
|
||||
click.clear()
|
||||
click.echo(f"Deployment ID: {deployment_id}")
|
||||
click.echo(f"Status: {metrics_data.get('status', 'Unknown')}")
|
||||
click.echo(f"Instances: {metrics_data.get('instances', 'N/A')}")
|
||||
|
||||
metrics_list = metrics_data.get('metrics', {})
|
||||
for metric in [m.strip() for m in metrics.split(',')]:
|
||||
if metric in metrics_list:
|
||||
value = metrics_list[metric]
|
||||
click.echo(f"{metric.title()}: {value}")
|
||||
|
||||
if metrics_data.get('status') in ['terminated', 'failed']:
|
||||
break
|
||||
|
||||
time.sleep(interval)
|
||||
else:
|
||||
metrics_data = get_metrics()
|
||||
if metrics_data:
|
||||
output(metrics_data, ctx.obj['output_format'])
|
||||
|
||||
|
||||
@monitor.command()
|
||||
@click.argument("deployment_id")
|
||||
@click.pass_context
|
||||
def status(ctx, deployment_id: str):
|
||||
"""Get deployment status"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.get(
|
||||
f"{config.coordinator_url}/openclaw/deployments/{deployment_id}/status",
|
||||
headers={"X-Api-Key": config.api_key or ""}
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
status_data = response.json()
|
||||
output(status_data, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to get deployment status: {response.status_code}")
|
||||
ctx.exit(1)
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@click.group()
|
||||
def edge():
|
||||
"""Edge computing operations"""
|
||||
pass
|
||||
|
||||
|
||||
openclaw.add_command(edge)
|
||||
|
||||
|
||||
@edge.command()
|
||||
@click.argument("agent_id")
|
||||
@click.option("--locations", required=True, help="Comma-separated edge locations")
|
||||
@click.option("--strategy", default="latency",
|
||||
type=click.Choice(["latency", "cost", "availability", "hybrid"]),
|
||||
help="Edge deployment strategy")
|
||||
@click.option("--replicas", default=1, help="Number of replicas per location")
|
||||
@click.pass_context
|
||||
def deploy(ctx, agent_id: str, locations: str, strategy: str, replicas: int):
|
||||
"""Deploy agent to edge locations"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
edge_data = {
|
||||
"agent_id": agent_id,
|
||||
"locations": [loc.strip() for loc in locations.split(',')],
|
||||
"strategy": strategy,
|
||||
"replicas": replicas
|
||||
}
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.post(
|
||||
f"{config.coordinator_url}/openclaw/edge/deploy",
|
||||
headers={"X-Api-Key": config.api_key or ""},
|
||||
json=edge_data
|
||||
)
|
||||
|
||||
if response.status_code == 202:
|
||||
deployment = response.json()
|
||||
success(f"Edge deployment started: {deployment['id']}")
|
||||
output(deployment, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to start edge deployment: {response.status_code}")
|
||||
if response.text:
|
||||
error(response.text)
|
||||
ctx.exit(1)
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@edge.command()
|
||||
@click.option("--location", help="Filter by location")
|
||||
@click.pass_context
|
||||
def resources(ctx, location: Optional[str]):
|
||||
"""Manage edge resources"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
params = {}
|
||||
if location:
|
||||
params["location"] = location
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.get(
|
||||
f"{config.coordinator_url}/openclaw/edge/resources",
|
||||
headers={"X-Api-Key": config.api_key or ""},
|
||||
params=params
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
resources = response.json()
|
||||
output(resources, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to get edge resources: {response.status_code}")
|
||||
ctx.exit(1)
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@edge.command()
|
||||
@click.argument("deployment_id")
|
||||
@click.option("--latency-target", type=int, help="Target latency in milliseconds")
|
||||
@click.option("--cost-budget", type=float, help="Cost budget")
|
||||
@click.option("--availability", type=float, help="Target availability (0.0-1.0)")
|
||||
@click.pass_context
|
||||
def optimize(ctx, deployment_id: str, latency_target: Optional[int],
|
||||
cost_budget: Optional[float], availability: Optional[float]):
|
||||
"""Optimize edge deployment performance"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
optimization_data = {}
|
||||
if latency_target:
|
||||
optimization_data["latency_target_ms"] = latency_target
|
||||
if cost_budget:
|
||||
optimization_data["cost_budget"] = cost_budget
|
||||
if availability:
|
||||
optimization_data["availability_target"] = availability
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.post(
|
||||
f"{config.coordinator_url}/openclaw/edge/deployments/{deployment_id}/optimize",
|
||||
headers={"X-Api-Key": config.api_key or ""},
|
||||
json=optimization_data
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
result = response.json()
|
||||
success(f"Edge optimization completed")
|
||||
output(result, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to optimize edge deployment: {response.status_code}")
|
||||
if response.text:
|
||||
error(response.text)
|
||||
ctx.exit(1)
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@edge.command()
|
||||
@click.argument("deployment_id")
|
||||
@click.option("--standards", help="Comma-separated compliance standards")
|
||||
@click.pass_context
|
||||
def compliance(ctx, deployment_id: str, standards: Optional[str]):
|
||||
"""Check edge security compliance"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
params = {}
|
||||
if standards:
|
||||
params["standards"] = [s.strip() for s in standards.split(',')]
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.get(
|
||||
f"{config.coordinator_url}/openclaw/edge/deployments/{deployment_id}/compliance",
|
||||
headers={"X-Api-Key": config.api_key or ""},
|
||||
params=params
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
compliance_data = response.json()
|
||||
output(compliance_data, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to check compliance: {response.status_code}")
|
||||
ctx.exit(1)
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@click.group()
|
||||
def routing():
|
||||
"""Agent skill routing and job offloading"""
|
||||
pass
|
||||
|
||||
|
||||
openclaw.add_command(routing)
|
||||
|
||||
|
||||
@routing.command()
|
||||
@click.argument("deployment_id")
|
||||
@click.option("--algorithm", default="load-balanced",
|
||||
type=click.Choice(["load-balanced", "skill-based", "cost-based", "latency-based"]),
|
||||
help="Routing algorithm")
|
||||
@click.option("--weights", help="Comma-separated weights for routing factors")
|
||||
@click.pass_context
|
||||
def optimize(ctx, deployment_id: str, algorithm: str, weights: Optional[str]):
|
||||
"""Optimize agent skill routing"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
routing_data = {"algorithm": algorithm}
|
||||
if weights:
|
||||
routing_data["weights"] = [w.strip() for w in weights.split(',')]
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.post(
|
||||
f"{config.coordinator_url}/openclaw/routing/deployments/{deployment_id}/optimize",
|
||||
headers={"X-Api-Key": config.api_key or ""},
|
||||
json=routing_data
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
result = response.json()
|
||||
success(f"Routing optimization completed")
|
||||
output(result, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to optimize routing: {response.status_code}")
|
||||
if response.text:
|
||||
error(response.text)
|
||||
ctx.exit(1)
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@routing.command()
|
||||
@click.argument("deployment_id")
|
||||
@click.pass_context
|
||||
def status(ctx, deployment_id: str):
|
||||
"""Get routing status and statistics"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.get(
|
||||
f"{config.coordinator_url}/openclaw/routing/deployments/{deployment_id}/status",
|
||||
headers={"X-Api-Key": config.api_key or ""}
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
status_data = response.json()
|
||||
output(status_data, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to get routing status: {response.status_code}")
|
||||
ctx.exit(1)
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@click.group()
|
||||
def ecosystem():
|
||||
"""OpenClaw ecosystem development"""
|
||||
pass
|
||||
|
||||
|
||||
openclaw.add_command(ecosystem)
|
||||
|
||||
|
||||
@ecosystem.command()
|
||||
@click.option("--name", required=True, help="Solution name")
|
||||
@click.option("--type", required=True,
|
||||
type=click.Choice(["agent", "workflow", "integration", "tool"]),
|
||||
help="Solution type")
|
||||
@click.option("--description", default="", help="Solution description")
|
||||
@click.option("--package", type=click.File('rb'), help="Solution package file")
|
||||
@click.pass_context
|
||||
def create(ctx, name: str, type: str, description: str, package):
|
||||
"""Create OpenClaw ecosystem solution"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
solution_data = {
|
||||
"name": name,
|
||||
"type": type,
|
||||
"description": description
|
||||
}
|
||||
|
||||
files = {}
|
||||
if package:
|
||||
files["package"] = package.read()
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.post(
|
||||
f"{config.coordinator_url}/openclaw/ecosystem/solutions",
|
||||
headers={"X-Api-Key": config.api_key or ""},
|
||||
data=solution_data,
|
||||
files=files
|
||||
)
|
||||
|
||||
if response.status_code == 201:
|
||||
solution = response.json()
|
||||
success(f"OpenClaw solution created: {solution['id']}")
|
||||
output(solution, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to create solution: {response.status_code}")
|
||||
if response.text:
|
||||
error(response.text)
|
||||
ctx.exit(1)
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@ecosystem.command()
|
||||
@click.option("--type", help="Filter by solution type")
|
||||
@click.option("--category", help="Filter by category")
|
||||
@click.option("--limit", default=20, help="Number of solutions to list")
|
||||
@click.pass_context
|
||||
def list(ctx, type: Optional[str], category: Optional[str], limit: int):
|
||||
"""List OpenClaw ecosystem solutions"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
params = {"limit": limit}
|
||||
if type:
|
||||
params["type"] = type
|
||||
if category:
|
||||
params["category"] = category
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.get(
|
||||
f"{config.coordinator_url}/openclaw/ecosystem/solutions",
|
||||
headers={"X-Api-Key": config.api_key or ""},
|
||||
params=params
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
solutions = response.json()
|
||||
output(solutions, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to list solutions: {response.status_code}")
|
||||
ctx.exit(1)
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@ecosystem.command()
|
||||
@click.argument("solution_id")
|
||||
@click.pass_context
|
||||
def install(ctx, solution_id: str):
|
||||
"""Install OpenClaw ecosystem solution"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.post(
|
||||
f"{config.coordinator_url}/openclaw/ecosystem/solutions/{solution_id}/install",
|
||||
headers={"X-Api-Key": config.api_key or ""}
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
result = response.json()
|
||||
success(f"Solution installed successfully")
|
||||
output(result, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to install solution: {response.status_code}")
|
||||
if response.text:
|
||||
error(response.text)
|
||||
ctx.exit(1)
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@openclaw.command()
|
||||
@click.argument("deployment_id")
|
||||
@click.pass_context
|
||||
def terminate(ctx, deployment_id: str):
|
||||
"""Terminate OpenClaw deployment"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
if not click.confirm(f"Terminate deployment {deployment_id}? This action cannot be undone."):
|
||||
click.echo("Operation cancelled")
|
||||
return
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.delete(
|
||||
f"{config.coordinator_url}/openclaw/deployments/{deployment_id}",
|
||||
headers={"X-Api-Key": config.api_key or ""}
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
result = response.json()
|
||||
success(f"Deployment {deployment_id} terminated")
|
||||
output(result, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to terminate deployment: {response.status_code}")
|
||||
if response.text:
|
||||
error(response.text)
|
||||
ctx.exit(1)
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
515
cli/commands/optimize.py
Executable file
515
cli/commands/optimize.py
Executable file
@@ -0,0 +1,515 @@
|
||||
"""Autonomous optimization commands for AITBC CLI"""
|
||||
|
||||
import click
|
||||
import httpx
|
||||
import json
|
||||
import time
|
||||
from typing import Optional, Dict, Any, List
|
||||
from utils import output, error, success, warning
|
||||
|
||||
|
||||
@click.group()
|
||||
def optimize():
|
||||
"""Autonomous optimization and predictive operations"""
|
||||
pass
|
||||
|
||||
|
||||
@click.group()
|
||||
def self_opt():
|
||||
"""Self-optimization operations"""
|
||||
pass
|
||||
|
||||
|
||||
optimize.add_command(self_opt)
|
||||
|
||||
|
||||
@self_opt.command()
|
||||
@click.argument("agent_id")
|
||||
@click.option("--mode", default="auto-tune",
|
||||
type=click.Choice(["auto-tune", "self-healing", "performance"]),
|
||||
help="Optimization mode")
|
||||
@click.option("--scope", default="full",
|
||||
type=click.Choice(["full", "performance", "cost", "latency"]),
|
||||
help="Optimization scope")
|
||||
@click.option("--aggressiveness", default="moderate",
|
||||
type=click.Choice(["conservative", "moderate", "aggressive"]),
|
||||
help="Optimization aggressiveness")
|
||||
@click.pass_context
|
||||
def enable(ctx, agent_id: str, mode: str, scope: str, aggressiveness: str):
|
||||
"""Enable autonomous optimization for agent"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
optimization_config = {
|
||||
"mode": mode,
|
||||
"scope": scope,
|
||||
"aggressiveness": aggressiveness
|
||||
}
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.post(
|
||||
f"{config.coordinator_url}/optimize/agents/{agent_id}/enable",
|
||||
headers={"X-Api-Key": config.api_key or ""},
|
||||
json=optimization_config
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
result = response.json()
|
||||
success(f"Autonomous optimization enabled for agent {agent_id}")
|
||||
output(result, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to enable optimization: {response.status_code}")
|
||||
if response.text:
|
||||
error(response.text)
|
||||
ctx.exit(1)
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@self_opt.command()
|
||||
@click.argument("agent_id")
|
||||
@click.option("--metrics", default="performance,cost", help="Comma-separated metrics to monitor")
|
||||
@click.option("--real-time", is_flag=True, help="Show real-time optimization status")
|
||||
@click.option("--interval", default=10, help="Update interval for real-time monitoring")
|
||||
@click.pass_context
|
||||
def status(ctx, agent_id: str, metrics: str, real_time: bool, interval: int):
|
||||
"""Monitor optimization progress and status"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
params = {"metrics": [m.strip() for m in metrics.split(',')]}
|
||||
|
||||
def get_status():
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.get(
|
||||
f"{config.coordinator_url}/optimize/agents/{agent_id}/status",
|
||||
headers={"X-Api-Key": config.api_key or ""},
|
||||
params=params
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
return response.json()
|
||||
else:
|
||||
error(f"Failed to get optimization status: {response.status_code}")
|
||||
return None
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
return None
|
||||
|
||||
if real_time:
|
||||
click.echo(f"Monitoring optimization for agent {agent_id} (Ctrl+C to stop)...")
|
||||
while True:
|
||||
status_data = get_status()
|
||||
if status_data:
|
||||
click.clear()
|
||||
click.echo(f"Optimization Status: {status_data.get('status', 'Unknown')}")
|
||||
click.echo(f"Mode: {status_data.get('mode', 'N/A')}")
|
||||
click.echo(f"Progress: {status_data.get('progress', 0)}%")
|
||||
|
||||
metrics_data = status_data.get('metrics', {})
|
||||
for metric in [m.strip() for m in metrics.split(',')]:
|
||||
if metric in metrics_data:
|
||||
value = metrics_data[metric]
|
||||
click.echo(f"{metric.title()}: {value}")
|
||||
|
||||
if status_data.get('status') in ['completed', 'failed', 'disabled']:
|
||||
break
|
||||
|
||||
time.sleep(interval)
|
||||
else:
|
||||
status_data = get_status()
|
||||
if status_data:
|
||||
output(status_data, ctx.obj['output_format'])
|
||||
|
||||
|
||||
@self_opt.command()
|
||||
@click.argument("agent_id")
|
||||
@click.option("--targets", required=True, help="Comma-separated target metrics (e.g., latency:100ms,cost:0.5)")
|
||||
@click.option("--priority", default="balanced",
|
||||
type=click.Choice(["performance", "cost", "balanced"]),
|
||||
help="Optimization priority")
|
||||
@click.pass_context
|
||||
def objectives(ctx, agent_id: str, targets: str, priority: str):
|
||||
"""Set optimization objectives and targets"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
# Parse targets
|
||||
target_dict = {}
|
||||
for target in targets.split(','):
|
||||
if ':' in target:
|
||||
key, value = target.split(':', 1)
|
||||
target_dict[key.strip()] = value.strip()
|
||||
else:
|
||||
target_dict[target.strip()] = "optimize"
|
||||
|
||||
objectives_data = {
|
||||
"targets": target_dict,
|
||||
"priority": priority
|
||||
}
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.post(
|
||||
f"{config.coordinator_url}/optimize/agents/{agent_id}/objectives",
|
||||
headers={"X-Api-Key": config.api_key or ""},
|
||||
json=objectives_data
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
result = response.json()
|
||||
success(f"Optimization objectives set for agent {agent_id}")
|
||||
output(result, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to set objectives: {response.status_code}")
|
||||
if response.text:
|
||||
error(response.text)
|
||||
ctx.exit(1)
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@self_opt.command()
|
||||
@click.argument("agent_id")
|
||||
@click.option("--priority", default="all",
|
||||
type=click.Choice(["high", "medium", "low", "all"]),
|
||||
help="Filter recommendations by priority")
|
||||
@click.option("--category", help="Filter by category (performance, cost, security)")
|
||||
@click.pass_context
|
||||
def recommendations(ctx, agent_id: str, priority: str, category: Optional[str]):
|
||||
"""Get optimization recommendations"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
params = {}
|
||||
if priority != "all":
|
||||
params["priority"] = priority
|
||||
if category:
|
||||
params["category"] = category
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.get(
|
||||
f"{config.coordinator_url}/optimize/agents/{agent_id}/recommendations",
|
||||
headers={"X-Api-Key": config.api_key or ""},
|
||||
params=params
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
recommendations = response.json()
|
||||
output(recommendations, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to get recommendations: {response.status_code}")
|
||||
ctx.exit(1)
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@self_opt.command()
|
||||
@click.argument("agent_id")
|
||||
@click.option("--recommendation-id", required=True, help="Specific recommendation ID to apply")
|
||||
@click.option("--confirm", is_flag=True, help="Apply without confirmation prompt")
|
||||
@click.pass_context
|
||||
def apply(ctx, agent_id: str, recommendation_id: str, confirm: bool):
|
||||
"""Apply optimization recommendation"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
if not confirm:
|
||||
if not click.confirm(f"Apply recommendation {recommendation_id} to agent {agent_id}?"):
|
||||
click.echo("Operation cancelled")
|
||||
return
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.post(
|
||||
f"{config.coordinator_url}/optimize/agents/{agent_id}/apply/{recommendation_id}",
|
||||
headers={"X-Api-Key": config.api_key or ""}
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
result = response.json()
|
||||
success(f"Optimization recommendation applied")
|
||||
output(result, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to apply recommendation: {response.status_code}")
|
||||
if response.text:
|
||||
error(response.text)
|
||||
ctx.exit(1)
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@click.group()
|
||||
def predict():
|
||||
"""Predictive operations"""
|
||||
pass
|
||||
|
||||
|
||||
optimize.add_command(predict)
|
||||
|
||||
@predict.command()
|
||||
@click.argument("agent_id")
|
||||
@click.option("--horizon", default=24, help="Prediction horizon in hours")
|
||||
@click.option("--resources", default="gpu,memory", help="Comma-separated resources to predict")
|
||||
@click.option("--confidence", default=0.8, help="Minimum confidence threshold")
|
||||
@click.pass_context
|
||||
def predict(ctx, agent_id: str, horizon: int, resources: str, confidence: float):
|
||||
"""Predict resource needs and usage patterns"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
prediction_data = {
|
||||
"horizon_hours": horizon,
|
||||
"resources": [r.strip() for r in resources.split(',')],
|
||||
"confidence_threshold": confidence
|
||||
}
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.post(
|
||||
f"{config.coordinator_url}/predict/agents/{agent_id}/resources",
|
||||
headers={"X-Api-Key": config.api_key or ""},
|
||||
json=prediction_data
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
predictions = response.json()
|
||||
success("Resource prediction completed")
|
||||
output(predictions, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to generate predictions: {response.status_code}")
|
||||
if response.text:
|
||||
error(response.text)
|
||||
ctx.exit(1)
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@click.argument("agent_id")
|
||||
@click.option("--policy", default="cost-efficiency",
|
||||
type=click.Choice(["cost-efficiency", "performance", "availability", "hybrid"]),
|
||||
help="Auto-scaling policy")
|
||||
@click.option("--min-instances", default=1, help="Minimum number of instances")
|
||||
@click.option("--max-instances", default=10, help="Maximum number of instances")
|
||||
@click.option("--cooldown", default=300, help="Cooldown period in seconds")
|
||||
@click.pass_context
|
||||
def autoscale(ctx, agent_id: str, policy: str, min_instances: int, max_instances: int, cooldown: int):
|
||||
"""Configure auto-scaling based on predictions"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
autoscale_config = {
|
||||
"policy": policy,
|
||||
"min_instances": min_instances,
|
||||
"max_instances": max_instances,
|
||||
"cooldown_seconds": cooldown
|
||||
}
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.post(
|
||||
f"{config.coordinator_url}/predict/agents/{agent_id}/autoscale",
|
||||
headers={"X-Api-Key": config.api_key or ""},
|
||||
json=autoscale_config
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
result = response.json()
|
||||
success(f"Auto-scaling configured for agent {agent_id}")
|
||||
output(result, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to configure auto-scaling: {response.status_code}")
|
||||
if response.text:
|
||||
error(response.text)
|
||||
ctx.exit(1)
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@click.argument("agent_id")
|
||||
@click.option("--metric", required=True, help="Metric to forecast (throughput, latency, cost, etc.)")
|
||||
@click.option("--period", default=7, help="Forecast period in days")
|
||||
@click.option("--granularity", default="hour",
|
||||
type=click.Choice(["minute", "hour", "day", "week"]),
|
||||
help="Forecast granularity")
|
||||
@click.pass_context
|
||||
def forecast(ctx, agent_id: str, metric: str, period: int, granularity: str):
|
||||
"""Generate performance forecasts"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
forecast_params = {
|
||||
"metric": metric,
|
||||
"period_days": period,
|
||||
"granularity": granularity
|
||||
}
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.post(
|
||||
f"{config.coordinator_url}/predict/agents/{agent_id}/forecast",
|
||||
headers={"X-Api-Key": config.api_key or ""},
|
||||
json=forecast_params
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
forecast_data = response.json()
|
||||
success(f"Forecast generated for {metric}")
|
||||
output(forecast_data, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to generate forecast: {response.status_code}")
|
||||
if response.text:
|
||||
error(response.text)
|
||||
ctx.exit(1)
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@click.group()
|
||||
def tune():
|
||||
"""Auto-tuning operations"""
|
||||
pass
|
||||
|
||||
|
||||
optimize.add_command(tune)
|
||||
|
||||
|
||||
@tune.command()
|
||||
@click.argument("agent_id")
|
||||
@click.option("--parameters", help="Comma-separated parameters to tune")
|
||||
@click.option("--objective", default="performance", help="Optimization objective")
|
||||
@click.option("--iterations", default=100, help="Number of tuning iterations")
|
||||
@click.pass_context
|
||||
def auto(ctx, agent_id: str, parameters: Optional[str], objective: str, iterations: int):
|
||||
"""Start automatic parameter tuning"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
tuning_data = {
|
||||
"objective": objective,
|
||||
"iterations": iterations
|
||||
}
|
||||
|
||||
if parameters:
|
||||
tuning_data["parameters"] = [p.strip() for p in parameters.split(',')]
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.post(
|
||||
f"{config.coordinator_url}/tune/agents/{agent_id}/auto",
|
||||
headers={"X-Api-Key": config.api_key or ""},
|
||||
json=tuning_data
|
||||
)
|
||||
|
||||
if response.status_code == 202:
|
||||
tuning = response.json()
|
||||
success(f"Auto-tuning started: {tuning['id']}")
|
||||
output(tuning, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to start auto-tuning: {response.status_code}")
|
||||
if response.text:
|
||||
error(response.text)
|
||||
ctx.exit(1)
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@tune.command()
|
||||
@click.argument("tuning_id")
|
||||
@click.option("--watch", is_flag=True, help="Watch tuning progress")
|
||||
@click.pass_context
|
||||
def status(ctx, tuning_id: str, watch: bool):
|
||||
"""Get auto-tuning status"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
def get_status():
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.get(
|
||||
f"{config.coordinator_url}/tune/sessions/{tuning_id}",
|
||||
headers={"X-Api-Key": config.api_key or ""}
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
return response.json()
|
||||
else:
|
||||
error(f"Failed to get tuning status: {response.status_code}")
|
||||
return None
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
return None
|
||||
|
||||
if watch:
|
||||
click.echo(f"Watching tuning session {tuning_id} (Ctrl+C to stop)...")
|
||||
while True:
|
||||
status_data = get_status()
|
||||
if status_data:
|
||||
click.clear()
|
||||
click.echo(f"Tuning Status: {status_data.get('status', 'Unknown')}")
|
||||
click.echo(f"Progress: {status_data.get('progress', 0)}%")
|
||||
click.echo(f"Iteration: {status_data.get('current_iteration', 0)}/{status_data.get('total_iterations', 0)}")
|
||||
click.echo(f"Best Score: {status_data.get('best_score', 'N/A')}")
|
||||
|
||||
if status_data.get('status') in ['completed', 'failed', 'cancelled']:
|
||||
break
|
||||
|
||||
time.sleep(5)
|
||||
else:
|
||||
status_data = get_status()
|
||||
if status_data:
|
||||
output(status_data, ctx.obj['output_format'])
|
||||
|
||||
|
||||
@tune.command()
|
||||
@click.argument("tuning_id")
|
||||
@click.pass_context
|
||||
def results(ctx, tuning_id: str):
|
||||
"""Get auto-tuning results and best parameters"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.get(
|
||||
f"{config.coordinator_url}/tune/sessions/{tuning_id}/results",
|
||||
headers={"X-Api-Key": config.api_key or ""}
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
results = response.json()
|
||||
output(results, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to get tuning results: {response.status_code}")
|
||||
ctx.exit(1)
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@optimize.command()
|
||||
@click.argument("agent_id")
|
||||
@click.pass_context
|
||||
def disable(ctx, agent_id: str):
|
||||
"""Disable autonomous optimization for agent"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.post(
|
||||
f"{config.coordinator_url}/optimize/agents/{agent_id}/disable",
|
||||
headers={"X-Api-Key": config.api_key or ""}
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
result = response.json()
|
||||
success(f"Autonomous optimization disabled for agent {agent_id}")
|
||||
output(result, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to disable optimization: {response.status_code}")
|
||||
if response.text:
|
||||
error(response.text)
|
||||
ctx.exit(1)
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
427
cli/commands/oracle.py
Executable file
427
cli/commands/oracle.py
Executable file
@@ -0,0 +1,427 @@
|
||||
"""Oracle price discovery commands for AITBC CLI"""
|
||||
|
||||
import click
|
||||
import json
|
||||
from pathlib import Path
|
||||
from typing import Optional, Dict, Any, List
|
||||
from datetime import datetime, timedelta
|
||||
from utils import output, error, success, warning
|
||||
|
||||
|
||||
@click.group()
|
||||
def oracle():
|
||||
"""Oracle price discovery and management commands"""
|
||||
pass
|
||||
|
||||
|
||||
@oracle.command()
|
||||
@click.option("--pair", required=True, help="Trading pair symbol (e.g., AITBC/BTC)")
|
||||
@click.option("--price", type=float, required=True, help="Price to set")
|
||||
@click.option("--source", default="creator", help="Price source (creator, market, oracle)")
|
||||
@click.option("--confidence", type=float, default=1.0, help="Confidence level (0.0-1.0)")
|
||||
@click.option("--description", help="Price update description")
|
||||
@click.pass_context
|
||||
def set_price(ctx, pair: str, price: float, source: str, confidence: float, description: Optional[str]):
|
||||
"""Set price for a trading pair"""
|
||||
|
||||
# Create oracle data structure
|
||||
oracle_file = Path.home() / ".aitbc" / "oracle_prices.json"
|
||||
oracle_file.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Load existing oracle data
|
||||
oracle_data = {}
|
||||
if oracle_file.exists():
|
||||
with open(oracle_file, 'r') as f:
|
||||
oracle_data = json.load(f)
|
||||
|
||||
# Create price entry
|
||||
price_entry = {
|
||||
"pair": pair,
|
||||
"price": price,
|
||||
"source": source,
|
||||
"confidence": confidence,
|
||||
"description": description or f"Price set by {source}",
|
||||
"timestamp": datetime.utcnow().isoformat(),
|
||||
"volume": 0.0,
|
||||
"spread": 0.0
|
||||
}
|
||||
|
||||
# Add to oracle data
|
||||
if pair not in oracle_data:
|
||||
oracle_data[pair] = {"history": [], "current_price": None, "last_updated": None}
|
||||
|
||||
# Add to history
|
||||
oracle_data[pair]["history"].append(price_entry)
|
||||
# Keep only last 1000 entries
|
||||
if len(oracle_data[pair]["history"]) > 1000:
|
||||
oracle_data[pair]["history"] = oracle_data[pair]["history"][-1000:]
|
||||
|
||||
# Update current price
|
||||
oracle_data[pair]["current_price"] = price_entry
|
||||
oracle_data[pair]["last_updated"] = price_entry["timestamp"]
|
||||
|
||||
# Save oracle data
|
||||
with open(oracle_file, 'w') as f:
|
||||
json.dump(oracle_data, f, indent=2)
|
||||
|
||||
success(f"Price set for {pair}: {price} (source: {source})")
|
||||
output({
|
||||
"pair": pair,
|
||||
"price": price,
|
||||
"source": source,
|
||||
"confidence": confidence,
|
||||
"timestamp": price_entry["timestamp"]
|
||||
})
|
||||
|
||||
|
||||
@oracle.command()
|
||||
@click.option("--pair", required=True, help="Trading pair symbol (e.g., AITBC/BTC)")
|
||||
@click.option("--source", default="market", help="Price source (market, oracle, external)")
|
||||
@click.option("--market-price", type=float, help="Market price to update from")
|
||||
@click.option("--confidence", type=float, default=0.8, help="Confidence level for market price")
|
||||
@click.option("--volume", type=float, default=0.0, help="Trading volume")
|
||||
@click.option("--spread", type=float, default=0.0, help="Bid-ask spread")
|
||||
@click.pass_context
|
||||
def update_price(ctx, pair: str, source: str, market_price: Optional[float], confidence: float, volume: float, spread: float):
|
||||
"""Update price from market data"""
|
||||
|
||||
# For demo purposes, if no market price provided, simulate one
|
||||
if market_price is None:
|
||||
# Load current price and apply small random variation
|
||||
oracle_file = Path.home() / ".aitbc" / "oracle_prices.json"
|
||||
if oracle_file.exists():
|
||||
with open(oracle_file, 'r') as f:
|
||||
oracle_data = json.load(f)
|
||||
|
||||
if pair in oracle_data and oracle_data[pair]["current_price"]:
|
||||
current_price = oracle_data[pair]["current_price"]["price"]
|
||||
# Simulate market movement (-2% to +2%)
|
||||
import random
|
||||
variation = random.uniform(-0.02, 0.02)
|
||||
market_price = round(current_price * (1 + variation), 8)
|
||||
else:
|
||||
market_price = 0.00001 # Default AITBC price
|
||||
else:
|
||||
market_price = 0.00001 # Default AITBC price
|
||||
|
||||
# Use set_price logic
|
||||
ctx.invoke(set_price,
|
||||
pair=pair,
|
||||
price=market_price,
|
||||
source=source,
|
||||
confidence=confidence,
|
||||
description=f"Market price update from {source}")
|
||||
|
||||
# Update additional market data
|
||||
oracle_file = Path.home() / ".aitbc" / "oracle_prices.json"
|
||||
with open(oracle_file, 'r') as f:
|
||||
oracle_data = json.load(f)
|
||||
|
||||
# Update market-specific fields
|
||||
oracle_data[pair]["current_price"]["volume"] = volume
|
||||
oracle_data[pair]["current_price"]["spread"] = spread
|
||||
oracle_data[pair]["current_price"]["market_data"] = True
|
||||
|
||||
# Save updated data
|
||||
with open(oracle_file, 'w') as f:
|
||||
json.dump(oracle_data, f, indent=2)
|
||||
|
||||
success(f"Market price updated for {pair}: {market_price}")
|
||||
output({
|
||||
"pair": pair,
|
||||
"market_price": market_price,
|
||||
"source": source,
|
||||
"volume": volume,
|
||||
"spread": spread
|
||||
})
|
||||
|
||||
|
||||
@oracle.command()
|
||||
@click.option("--pair", help="Trading pair symbol (e.g., AITBC/BTC)")
|
||||
@click.option("--days", type=int, default=7, help="Number of days of history to show")
|
||||
@click.option("--limit", type=int, default=100, help="Maximum number of records to show")
|
||||
@click.option("--source", help="Filter by price source")
|
||||
@click.pass_context
|
||||
def price_history(ctx, pair: Optional[str], days: int, limit: int, source: Optional[str]):
|
||||
"""Get price history for trading pairs"""
|
||||
|
||||
oracle_file = Path.home() / ".aitbc" / "oracle_prices.json"
|
||||
if not oracle_file.exists():
|
||||
warning("No price data available.")
|
||||
return
|
||||
|
||||
with open(oracle_file, 'r') as f:
|
||||
oracle_data = json.load(f)
|
||||
|
||||
# Filter data
|
||||
history_data = {}
|
||||
cutoff_time = datetime.utcnow() - timedelta(days=days)
|
||||
|
||||
for pair_name, pair_data in oracle_data.items():
|
||||
if pair and pair_name != pair:
|
||||
continue
|
||||
|
||||
# Filter history by date and source
|
||||
filtered_history = []
|
||||
for entry in pair_data.get("history", []):
|
||||
entry_time = datetime.fromisoformat(entry["timestamp"].replace('Z', '+00:00'))
|
||||
if entry_time >= cutoff_time:
|
||||
if source and entry.get("source") != source:
|
||||
continue
|
||||
filtered_history.append(entry)
|
||||
|
||||
# Limit results
|
||||
filtered_history = filtered_history[-limit:]
|
||||
|
||||
if filtered_history:
|
||||
history_data[pair_name] = {
|
||||
"current_price": pair_data.get("current_price"),
|
||||
"last_updated": pair_data.get("last_updated"),
|
||||
"history": filtered_history,
|
||||
"total_entries": len(filtered_history)
|
||||
}
|
||||
|
||||
if not history_data:
|
||||
error("No price history found for the specified criteria.")
|
||||
return
|
||||
|
||||
output({
|
||||
"price_history": history_data,
|
||||
"filter_criteria": {
|
||||
"pair": pair or "all",
|
||||
"days": days,
|
||||
"limit": limit,
|
||||
"source": source or "all"
|
||||
},
|
||||
"generated_at": datetime.utcnow().isoformat()
|
||||
})
|
||||
|
||||
|
||||
@oracle.command()
|
||||
@click.option("--pairs", help="Comma-separated list of pairs to include (e.g., AITBC/BTC,AITBC/ETH)")
|
||||
@click.option("--interval", type=int, default=60, help="Update interval in seconds")
|
||||
@click.option("--sources", help="Comma-separated list of sources to include")
|
||||
@click.pass_context
|
||||
def price_feed(ctx, pairs: Optional[str], interval: int, sources: Optional[str]):
|
||||
"""Get real-time price feed for multiple pairs"""
|
||||
|
||||
oracle_file = Path.home() / ".aitbc" / "oracle_prices.json"
|
||||
if not oracle_file.exists():
|
||||
warning("No price data available.")
|
||||
return
|
||||
|
||||
with open(oracle_file, 'r') as f:
|
||||
oracle_data = json.load(f)
|
||||
|
||||
# Parse pairs list
|
||||
pair_list = None
|
||||
if pairs:
|
||||
pair_list = [p.strip() for p in pairs.split(',')]
|
||||
|
||||
# Parse sources list
|
||||
source_list = None
|
||||
if sources:
|
||||
source_list = [s.strip() for s in sources.split(',')]
|
||||
|
||||
# Build price feed
|
||||
feed_data = {}
|
||||
|
||||
for pair_name, pair_data in oracle_data.items():
|
||||
if pair_list and pair_name not in pair_list:
|
||||
continue
|
||||
|
||||
current_price = pair_data.get("current_price")
|
||||
if not current_price:
|
||||
continue
|
||||
|
||||
# Filter by source if specified
|
||||
if source_list and current_price.get("source") not in source_list:
|
||||
continue
|
||||
|
||||
feed_data[pair_name] = {
|
||||
"price": current_price["price"],
|
||||
"source": current_price["source"],
|
||||
"confidence": current_price.get("confidence", 1.0),
|
||||
"timestamp": current_price["timestamp"],
|
||||
"volume": current_price.get("volume", 0.0),
|
||||
"spread": current_price.get("spread", 0.0),
|
||||
"description": current_price.get("description")
|
||||
}
|
||||
|
||||
if not feed_data:
|
||||
error("No price data available for the specified criteria.")
|
||||
return
|
||||
|
||||
output({
|
||||
"price_feed": feed_data,
|
||||
"feed_config": {
|
||||
"pairs": pair_list or "all",
|
||||
"interval": interval,
|
||||
"sources": source_list or "all"
|
||||
},
|
||||
"generated_at": datetime.utcnow().isoformat(),
|
||||
"total_pairs": len(feed_data)
|
||||
})
|
||||
|
||||
if interval > 0:
|
||||
warning(f"Price feed configured for {interval}-second intervals.")
|
||||
|
||||
|
||||
@oracle.command()
|
||||
@click.option("--pair", help="Specific trading pair to analyze")
|
||||
@click.option("--hours", type=int, default=24, help="Time window in hours for analysis")
|
||||
@click.pass_context
|
||||
def analyze(ctx, pair: Optional[str], hours: int):
|
||||
"""Analyze price trends and volatility"""
|
||||
|
||||
oracle_file = Path.home() / ".aitbc" / "oracle_prices.json"
|
||||
if not oracle_file.exists():
|
||||
error("No price data available for analysis.")
|
||||
return
|
||||
|
||||
with open(oracle_file, 'r') as f:
|
||||
oracle_data = json.load(f)
|
||||
|
||||
cutoff_time = datetime.utcnow() - timedelta(hours=hours)
|
||||
analysis_results = {}
|
||||
|
||||
for pair_name, pair_data in oracle_data.items():
|
||||
if pair and pair_name != pair:
|
||||
continue
|
||||
|
||||
# Get recent price history
|
||||
recent_prices = []
|
||||
for entry in pair_data.get("history", []):
|
||||
entry_time = datetime.fromisoformat(entry["timestamp"].replace('Z', '+00:00'))
|
||||
if entry_time >= cutoff_time:
|
||||
recent_prices.append(entry["price"])
|
||||
|
||||
if len(recent_prices) < 2:
|
||||
continue
|
||||
|
||||
# Calculate statistics
|
||||
prices = sorted(recent_prices)
|
||||
current_price = recent_prices[-1]
|
||||
|
||||
analysis = {
|
||||
"pair": pair_name,
|
||||
"time_window_hours": hours,
|
||||
"data_points": len(recent_prices),
|
||||
"current_price": current_price,
|
||||
"min_price": min(prices),
|
||||
"max_price": max(prices),
|
||||
"price_range": max(prices) - min(prices),
|
||||
"avg_price": sum(prices) / len(prices),
|
||||
"price_change": current_price - recent_prices[0],
|
||||
"price_change_percent": ((current_price - recent_prices[0]) / recent_prices[0]) * 100 if recent_prices[0] > 0 else 0
|
||||
}
|
||||
|
||||
# Calculate volatility (standard deviation)
|
||||
mean_price = analysis["avg_price"]
|
||||
variance = sum((p - mean_price) ** 2 for p in recent_prices) / len(recent_prices)
|
||||
analysis["volatility"] = variance ** 0.5
|
||||
analysis["volatility_percent"] = (analysis["volatility"] / mean_price) * 100 if mean_price > 0 else 0
|
||||
|
||||
analysis_results[pair_name] = analysis
|
||||
|
||||
if not analysis_results:
|
||||
error("No sufficient data for analysis.")
|
||||
return
|
||||
|
||||
output({
|
||||
"analysis": analysis_results,
|
||||
"analysis_config": {
|
||||
"pair": pair or "all",
|
||||
"time_window_hours": hours
|
||||
},
|
||||
"generated_at": datetime.utcnow().isoformat()
|
||||
})
|
||||
|
||||
|
||||
@oracle.command()
|
||||
@click.pass_context
|
||||
def status(ctx):
|
||||
"""Get oracle system status"""
|
||||
|
||||
oracle_file = Path.home() / ".aitbc" / "oracle_prices.json"
|
||||
|
||||
if not oracle_file.exists():
|
||||
output({
|
||||
"status": "no_data",
|
||||
"message": "No price data available",
|
||||
"total_pairs": 0,
|
||||
"last_update": None
|
||||
})
|
||||
return
|
||||
|
||||
with open(oracle_file, 'r') as f:
|
||||
oracle_data = json.load(f)
|
||||
|
||||
# Calculate status metrics
|
||||
total_pairs = len(oracle_data)
|
||||
active_pairs = 0
|
||||
total_updates = 0
|
||||
last_update = None
|
||||
|
||||
for pair_name, pair_data in oracle_data.items():
|
||||
if pair_data.get("current_price"):
|
||||
active_pairs += 1
|
||||
total_updates += len(pair_data.get("history", []))
|
||||
|
||||
pair_last_update = pair_data.get("last_updated")
|
||||
if pair_last_update:
|
||||
pair_time = datetime.fromisoformat(pair_last_update.replace('Z', '+00:00'))
|
||||
if not last_update or pair_time > last_update:
|
||||
last_update = pair_time
|
||||
|
||||
# Get sources
|
||||
sources = set()
|
||||
for pair_data in oracle_data.values():
|
||||
current = pair_data.get("current_price")
|
||||
if current:
|
||||
sources.add(current.get("source", "unknown"))
|
||||
|
||||
output({
|
||||
"status": "active",
|
||||
"total_pairs": total_pairs,
|
||||
"active_pairs": active_pairs,
|
||||
"total_updates": total_updates,
|
||||
"last_update": last_update.isoformat() if last_update else None,
|
||||
"sources": list(sources),
|
||||
"data_file": str(oracle_file)
|
||||
})
|
||||
|
||||
|
||||
@oracle.command()
|
||||
@click.argument("pair")
|
||||
@click.pass_context
|
||||
def get_price(ctx, pair: str):
|
||||
"""Get current price for a specific pair"""
|
||||
|
||||
oracle_file = Path.home() / ".aitbc" / "oracle_prices.json"
|
||||
if not oracle_file.exists():
|
||||
error("No price data available.")
|
||||
return
|
||||
|
||||
with open(oracle_file, 'r') as f:
|
||||
oracle_data = json.load(f)
|
||||
|
||||
if pair not in oracle_data:
|
||||
error(f"No price data available for {pair}.")
|
||||
return
|
||||
|
||||
current_price = oracle_data[pair].get("current_price")
|
||||
if not current_price:
|
||||
error(f"No current price available for {pair}.")
|
||||
return
|
||||
|
||||
output({
|
||||
"pair": pair,
|
||||
"price": current_price["price"],
|
||||
"source": current_price["source"],
|
||||
"confidence": current_price.get("confidence", 1.0),
|
||||
"timestamp": current_price["timestamp"],
|
||||
"volume": current_price.get("volume", 0.0),
|
||||
"spread": current_price.get("spread", 0.0),
|
||||
"description": current_price.get("description")
|
||||
})
|
||||
89
cli/commands/performance_test.py
Executable file
89
cli/commands/performance_test.py
Executable file
@@ -0,0 +1,89 @@
|
||||
"""
|
||||
Performance Test CLI Commands for AITBC
|
||||
Commands for running performance tests and benchmarks
|
||||
"""
|
||||
|
||||
import click
|
||||
import json
|
||||
import requests
|
||||
from datetime import datetime
|
||||
from typing import Dict, Any, List, Optional
|
||||
|
||||
@click.group()
|
||||
def performance_test():
|
||||
"""Performance testing commands"""
|
||||
pass
|
||||
|
||||
@performance_test.command()
|
||||
@click.option('--test-type', default='cli', help='Test type (cli, api, load)')
|
||||
@click.option('--duration', type=int, default=60, help='Test duration in seconds')
|
||||
@click.option('--concurrent', type=int, default=10, help='Number of concurrent operations')
|
||||
@click.option('--test-mode', is_flag=True, help='Run in test mode')
|
||||
def run(test_type, duration, concurrent, test_mode):
|
||||
"""Run performance tests"""
|
||||
try:
|
||||
click.echo(f"⚡ Running {test_type} performance test")
|
||||
click.echo(f"⏱️ Duration: {duration} seconds")
|
||||
click.echo(f"🔄 Concurrent: {concurrent}")
|
||||
|
||||
if test_mode:
|
||||
click.echo("🔍 TEST MODE - Simulated performance test")
|
||||
click.echo("✅ Test completed successfully")
|
||||
click.echo("📊 Results:")
|
||||
click.echo(" 📈 Average Response Time: 125ms")
|
||||
click.echo(" 📊 Throughput: 850 ops/sec")
|
||||
click.echo(" ✅ Success Rate: 98.5%")
|
||||
return
|
||||
|
||||
# Run actual performance test
|
||||
if test_type == 'cli':
|
||||
result = run_cli_performance_test(duration, concurrent)
|
||||
elif test_type == 'api':
|
||||
result = run_api_performance_test(duration, concurrent)
|
||||
elif test_type == 'load':
|
||||
result = run_load_test(duration, concurrent)
|
||||
else:
|
||||
click.echo(f"❌ Unknown test type: {test_type}", err=True)
|
||||
return
|
||||
|
||||
if result['success']:
|
||||
click.echo("✅ Performance test completed successfully!")
|
||||
click.echo("📊 Results:")
|
||||
click.echo(f" 📈 Average Response Time: {result['avg_response_time']}ms")
|
||||
click.echo(f" 📊 Throughput: {result['throughput']} ops/sec")
|
||||
click.echo(f" ✅ Success Rate: {result['success_rate']:.1f}%")
|
||||
else:
|
||||
click.echo(f"❌ Performance test failed: {result['error']}", err=True)
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Performance test error: {str(e)}", err=True)
|
||||
|
||||
def run_cli_performance_test(duration, concurrent):
|
||||
"""Run CLI performance test"""
|
||||
return {
|
||||
"success": True,
|
||||
"avg_response_time": 125,
|
||||
"throughput": 850,
|
||||
"success_rate": 98.5
|
||||
}
|
||||
|
||||
def run_api_performance_test(duration, concurrent):
|
||||
"""Run API performance test"""
|
||||
return {
|
||||
"success": True,
|
||||
"avg_response_time": 85,
|
||||
"throughput": 1250,
|
||||
"success_rate": 99.2
|
||||
}
|
||||
|
||||
def run_load_test(duration, concurrent):
|
||||
"""Run load test"""
|
||||
return {
|
||||
"success": True,
|
||||
"avg_response_time": 95,
|
||||
"throughput": 950,
|
||||
"success_rate": 97.8
|
||||
}
|
||||
|
||||
if __name__ == "__main__":
|
||||
performance_test()
|
||||
73
cli/commands/plugin_analytics.py
Executable file
73
cli/commands/plugin_analytics.py
Executable file
@@ -0,0 +1,73 @@
|
||||
"""
|
||||
Plugin Analytics CLI Commands for AITBC
|
||||
Commands for plugin analytics and usage tracking
|
||||
"""
|
||||
|
||||
import click
|
||||
import json
|
||||
import requests
|
||||
from datetime import datetime
|
||||
from typing import Dict, Any, List, Optional
|
||||
|
||||
@click.group()
|
||||
def plugin_analytics():
|
||||
"""Plugin analytics management commands"""
|
||||
pass
|
||||
|
||||
@plugin_analytics.command()
|
||||
@click.option('--plugin-id', help='Specific plugin ID')
|
||||
@click.option('--days', type=int, default=30, help='Number of days to analyze')
|
||||
@click.option('--test-mode', is_flag=True, help='Run in test mode')
|
||||
def dashboard(plugin_id, days, test_mode):
|
||||
"""View plugin analytics dashboard"""
|
||||
try:
|
||||
if test_mode:
|
||||
click.echo("📊 Plugin Analytics Dashboard (test mode)")
|
||||
click.echo("📈 Total Plugins: 156")
|
||||
click.echo("📥 Total Downloads: 45,678")
|
||||
click.echo("⭐ Average Rating: 4.2/5.0")
|
||||
click.echo("📅 Period: Last 30 days")
|
||||
return
|
||||
|
||||
# Get analytics from service
|
||||
config = get_config()
|
||||
params = {"days": days}
|
||||
if plugin_id:
|
||||
params["plugin_id"] = plugin_id
|
||||
|
||||
response = requests.get(
|
||||
f"{config.coordinator_url}/api/v1/analytics/dashboard",
|
||||
params=params,
|
||||
headers={"Authorization": f"Bearer {config.api_key}"},
|
||||
timeout=30
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
dashboard = response.json()
|
||||
click.echo("📊 Plugin Analytics Dashboard")
|
||||
click.echo(f"📈 Total Plugins: {dashboard.get('total_plugins', 0)}")
|
||||
click.echo(f"📥 Total Downloads: {dashboard.get('total_downloads', 0)}")
|
||||
click.echo(f"⭐ Average Rating: {dashboard.get('avg_rating', 0)}/5.0")
|
||||
click.echo(f"📅 Period: Last {days} days")
|
||||
else:
|
||||
click.echo(f"❌ Failed to get dashboard: {response.text}", err=True)
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Error getting dashboard: {str(e)}", err=True)
|
||||
|
||||
# Helper function to get config
|
||||
def get_config():
|
||||
"""Get CLI configuration"""
|
||||
try:
|
||||
from config import get_config
|
||||
return get_config()
|
||||
except ImportError:
|
||||
# Fallback for testing
|
||||
from types import SimpleNamespace
|
||||
return SimpleNamespace(
|
||||
coordinator_url="http://localhost:8016",
|
||||
api_key="test-api-key"
|
||||
)
|
||||
|
||||
if __name__ == "__main__":
|
||||
plugin_analytics()
|
||||
579
cli/commands/plugin_marketplace.py
Executable file
579
cli/commands/plugin_marketplace.py
Executable file
@@ -0,0 +1,579 @@
|
||||
"""
|
||||
Plugin Marketplace CLI Commands for AITBC
|
||||
Commands for browsing, purchasing, and managing plugins from the marketplace
|
||||
"""
|
||||
|
||||
import click
|
||||
import json
|
||||
import requests
|
||||
from datetime import datetime
|
||||
from typing import Dict, Any, List, Optional
|
||||
|
||||
@click.group()
|
||||
def plugin_marketplace():
|
||||
"""Plugin marketplace commands"""
|
||||
pass
|
||||
|
||||
@plugin_marketplace.command()
|
||||
@click.option('--category', help='Filter by category')
|
||||
@click.option('--price-min', type=float, help='Minimum price filter')
|
||||
@click.option('--price-max', type=float, help='Maximum price filter')
|
||||
@click.option('--rating-min', type=float, help='Minimum rating filter')
|
||||
@click.option('--sort', default='popularity', help='Sort by (popularity, rating, price, newest)')
|
||||
@click.option('--limit', type=int, default=20, help='Number of results')
|
||||
@click.option('--test-mode', is_flag=True, help='Run in test mode')
|
||||
def browse(category, price_min, price_max, rating_min, sort, limit, test_mode):
|
||||
"""Browse plugins in the marketplace"""
|
||||
try:
|
||||
params = {
|
||||
"limit": limit,
|
||||
"sort": sort
|
||||
}
|
||||
|
||||
if category:
|
||||
params["category"] = category
|
||||
if price_min is not None:
|
||||
params["price_min"] = price_min
|
||||
if price_max is not None:
|
||||
params["price_max"] = price_max
|
||||
if rating_min is not None:
|
||||
params["rating_min"] = rating_min
|
||||
|
||||
if test_mode:
|
||||
# Mock marketplace data
|
||||
mock_plugins = [
|
||||
{
|
||||
"plugin_id": "trading-bot",
|
||||
"name": "Advanced Trading Bot",
|
||||
"version": "1.0.0",
|
||||
"description": "Automated trading bot with advanced algorithms",
|
||||
"author": "AITBC Team",
|
||||
"category": "trading",
|
||||
"price": 99.99,
|
||||
"rating": 4.5,
|
||||
"reviews_count": 42,
|
||||
"downloads": 1250,
|
||||
"featured": True,
|
||||
"tags": ["trading", "automation", "bot"],
|
||||
"preview_image": "https://marketplace.aitbc.dev/plugins/trading-bot/preview.png"
|
||||
},
|
||||
{
|
||||
"plugin_id": "oracle-feed",
|
||||
"name": "Oracle Price Feed",
|
||||
"version": "2.1.0",
|
||||
"description": "Real-time price oracle integration",
|
||||
"author": "Oracle Developer",
|
||||
"category": "oracle",
|
||||
"price": 49.99,
|
||||
"rating": 4.8,
|
||||
"reviews_count": 28,
|
||||
"downloads": 890,
|
||||
"featured": True,
|
||||
"tags": ["oracle", "price", "feed"],
|
||||
"preview_image": "https://marketplace.aitbc.dev/plugins/oracle-feed/preview.png"
|
||||
},
|
||||
{
|
||||
"plugin_id": "security-scanner",
|
||||
"name": "Security Scanner Pro",
|
||||
"version": "3.0.0",
|
||||
"description": "Advanced security scanning and vulnerability detection",
|
||||
"author": "Security Labs",
|
||||
"category": "security",
|
||||
"price": 199.99,
|
||||
"rating": 4.7,
|
||||
"reviews_count": 15,
|
||||
"downloads": 567,
|
||||
"featured": False,
|
||||
"tags": ["security", "scanning", "vulnerability"],
|
||||
"preview_image": "https://marketplace.aitbc.dev/plugins/security-scanner/preview.png"
|
||||
}
|
||||
]
|
||||
|
||||
click.echo("🛒 Plugin Marketplace:")
|
||||
click.echo("=" * 60)
|
||||
|
||||
for plugin in mock_plugins[:limit]:
|
||||
featured_badge = "⭐" if plugin.get('featured') else ""
|
||||
click.echo(f"{featured_badge} {plugin['name']} (v{plugin['version']})")
|
||||
click.echo(f" 💰 Price: ${plugin['price']}")
|
||||
click.echo(f" ⭐ Rating: {plugin['rating']}/5.0 ({plugin['reviews_count']} reviews)")
|
||||
click.echo(f" 📥 Downloads: {plugin['downloads']}")
|
||||
click.echo(f" 📂 Category: {plugin['category']}")
|
||||
click.echo(f" 👤 Author: {plugin['author']}")
|
||||
click.echo(f" 📝 {plugin['description'][:60]}...")
|
||||
click.echo("")
|
||||
|
||||
return
|
||||
|
||||
# Fetch from marketplace service
|
||||
config = get_config()
|
||||
response = requests.get(
|
||||
f"{config.coordinator_url}/api/v1/marketplace/browse",
|
||||
params=params,
|
||||
headers={"Authorization": f"Bearer {config.api_key}"},
|
||||
timeout=30
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
result = response.json()
|
||||
plugins = result.get("plugins", [])
|
||||
|
||||
click.echo("🛒 Plugin Marketplace:")
|
||||
click.echo("=" * 60)
|
||||
|
||||
for plugin in plugins:
|
||||
featured_badge = "⭐" if plugin.get('featured') else ""
|
||||
click.echo(f"{featured_badge} {plugin['name']} (v{plugin['version']})")
|
||||
click.echo(f" 💰 Price: ${plugin.get('price', 0.0)}")
|
||||
click.echo(f" ⭐ Rating: {plugin.get('rating', 0)}/5.0 ({plugin.get('reviews_count', 0)} reviews)")
|
||||
click.echo(f" 📥 Downloads: {plugin.get('downloads', 0)}")
|
||||
click.echo(f" 📂 Category: {plugin.get('category', 'N/A')}")
|
||||
click.echo(f" 👤 Author: {plugin.get('author', 'N/A')}")
|
||||
click.echo(f" 📝 {plugin['description'][:60]}...")
|
||||
click.echo("")
|
||||
else:
|
||||
click.echo(f"❌ Failed to browse marketplace: {response.text}", err=True)
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Error browsing marketplace: {str(e)}", err=True)
|
||||
|
||||
@plugin_marketplace.command()
|
||||
@click.argument('plugin_id')
|
||||
@click.option('--test-mode', is_flag=True, help='Run in test mode')
|
||||
def details(plugin_id, test_mode):
|
||||
"""Get detailed information about a marketplace plugin"""
|
||||
try:
|
||||
if test_mode:
|
||||
# Mock plugin details
|
||||
mock_plugin = {
|
||||
"plugin_id": plugin_id,
|
||||
"name": "Advanced Trading Bot",
|
||||
"version": "1.0.0",
|
||||
"description": "Automated trading bot with advanced algorithms and machine learning capabilities. Features include real-time market analysis, automated trading strategies, risk management, and portfolio optimization.",
|
||||
"author": "AITBC Team",
|
||||
"category": "trading",
|
||||
"price": 99.99,
|
||||
"rating": 4.5,
|
||||
"reviews_count": 42,
|
||||
"downloads": 1250,
|
||||
"featured": True,
|
||||
"tags": ["trading", "automation", "bot", "ml", "risk-management"],
|
||||
"repository": "https://github.com/aitbc/trading-bot",
|
||||
"homepage": "https://aitbc.dev/plugins/trading-bot",
|
||||
"license": "MIT",
|
||||
"created_at": "2024-01-15T10:30:00Z",
|
||||
"updated_at": "2024-03-01T14:20:00Z",
|
||||
"preview_image": "https://marketplace.aitbc.dev/plugins/trading-bot/preview.png",
|
||||
"screenshots": [
|
||||
"https://marketplace.aitbc.dev/plugins/trading-bot/screenshot1.png",
|
||||
"https://marketplace.aitbc.dev/plugins/trading-bot/screenshot2.png"
|
||||
],
|
||||
"documentation": "https://docs.aitbc.dev/plugins/trading-bot",
|
||||
"support": "support@aitbc.dev",
|
||||
"compatibility": {
|
||||
"aitbc_version": ">=1.0.0",
|
||||
"python_version": ">=3.8",
|
||||
"dependencies": ["exchange-integration", "oracle-feed"]
|
||||
},
|
||||
"pricing": {
|
||||
"type": "one-time",
|
||||
"amount": 99.99,
|
||||
"currency": "USD",
|
||||
"includes_support": True,
|
||||
"includes_updates": True
|
||||
},
|
||||
"reviews": [
|
||||
{
|
||||
"id": 1,
|
||||
"user": "trader123",
|
||||
"rating": 5,
|
||||
"title": "Excellent trading bot!",
|
||||
"comment": "This bot has significantly improved my trading performance. Highly recommended!",
|
||||
"date": "2024-02-15T10:30:00Z"
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"user": "alice_trader",
|
||||
"rating": 4,
|
||||
"title": "Good but needs improvements",
|
||||
"comment": "Great features but the UI could be more intuitive.",
|
||||
"date": "2024-02-10T14:20:00Z"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
click.echo(f"🛒 Plugin Details: {mock_plugin['name']}")
|
||||
click.echo("=" * 60)
|
||||
click.echo(f"📦 Version: {mock_plugin['version']}")
|
||||
click.echo(f"👤 Author: {mock_plugin['author']}")
|
||||
click.echo(f"📂 Category: {mock_plugin['category']}")
|
||||
click.echo(f"💰 Price: ${mock_plugin['price']} {mock_plugin['pricing']['currency']}")
|
||||
click.echo(f"⭐ Rating: {mock_plugin['rating']}/5.0 ({mock_plugin['reviews_count']} reviews)")
|
||||
click.echo(f"📥 Downloads: {mock_plugin['downloads']}")
|
||||
click.echo(f"🏷️ Tags: {', '.join(mock_plugin['tags'])}")
|
||||
click.echo(f"📄 License: {mock_plugin['license']}")
|
||||
click.echo(f"📅 Created: {mock_plugin['created_at']}")
|
||||
click.echo(f"🔄 Updated: {mock_plugin['updated_at']}")
|
||||
click.echo("")
|
||||
click.echo("📝 Description:")
|
||||
click.echo(f" {mock_plugin['description']}")
|
||||
click.echo("")
|
||||
click.echo("💰 Pricing:")
|
||||
click.echo(f" Type: {mock_plugin['pricing']['type']}")
|
||||
click.echo(f" Amount: ${mock_plugin['pricing']['amount']} {mock_plugin['pricing']['currency']}")
|
||||
click.echo(f" Includes Support: {'Yes' if mock_plugin['pricing']['includes_support'] else 'No'}")
|
||||
click.echo(f" Includes Updates: {'Yes' if mock_plugin['pricing']['includes_updates'] else 'No'}")
|
||||
click.echo("")
|
||||
click.echo("🔗 Links:")
|
||||
click.echo(f" 📦 Repository: {mock_plugin['repository']}")
|
||||
click.echo(f" 🌐 Homepage: {mock_plugin['homepage']}")
|
||||
click.echo(f" 📚 Documentation: {mock_plugin['documentation']}")
|
||||
click.echo(f" 📧 Support: {mock_plugin['support']}")
|
||||
click.echo("")
|
||||
click.echo("🔧 Compatibility:")
|
||||
click.echo(f" AITBC Version: {mock_plugin['compatibility']['aitbc_version']}")
|
||||
click.echo(f" Python Version: {mock_plugin['compatibility']['python_version']}")
|
||||
click.echo(f" Dependencies: {', '.join(mock_plugin['compatibility']['dependencies'])}")
|
||||
click.echo("")
|
||||
click.echo("⭐ Recent Reviews:")
|
||||
for review in mock_plugin['reviews'][:3]:
|
||||
stars = "⭐" * review['rating']
|
||||
click.echo(f" {stars} {review['title']}")
|
||||
click.echo(f" 👤 {review['user']} - {review['date']}")
|
||||
click.echo(f" 📝 {review['comment']}")
|
||||
click.echo("")
|
||||
return
|
||||
|
||||
# Fetch from marketplace service
|
||||
config = get_config()
|
||||
response = requests.get(
|
||||
f"{config.coordinator_url}/api/v1/marketplace/plugins/{plugin_id}",
|
||||
headers={"Authorization": f"Bearer {config.api_key}"},
|
||||
timeout=30
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
plugin = response.json()
|
||||
|
||||
click.echo(f"🛒 Plugin Details: {plugin['name']}")
|
||||
click.echo("=" * 60)
|
||||
click.echo(f"📦 Version: {plugin['version']}")
|
||||
click.echo(f"👤 Author: {plugin['author']}")
|
||||
click.echo(f"📂 Category: {plugin['category']}")
|
||||
click.echo(f"💰 Price: ${plugin.get('price', 0.0)}")
|
||||
click.echo(f"⭐ Rating: {plugin.get('rating', 0)}/5.0 ({plugin.get('reviews_count', 0)} reviews)")
|
||||
click.echo(f"📥 Downloads: {plugin.get('downloads', 0)}")
|
||||
click.echo(f"🏷️ Tags: {', '.join(plugin.get('tags', []))}")
|
||||
click.echo(f"📄 License: {plugin.get('license', 'N/A')}")
|
||||
click.echo(f"📅 Created: {plugin['created_at']}")
|
||||
click.echo(f"🔄 Updated: {plugin['updated_at']}")
|
||||
click.echo("")
|
||||
click.echo("📝 Description:")
|
||||
click.echo(f" {plugin['description']}")
|
||||
else:
|
||||
click.echo(f"❌ Plugin not found: {response.text}", err=True)
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Error getting plugin details: {str(e)}", err=True)
|
||||
|
||||
@plugin_marketplace.command()
|
||||
@click.argument('plugin_id')
|
||||
@click.option('--test-mode', is_flag=True, help='Run in test mode')
|
||||
def purchase(plugin_id, test_mode):
|
||||
"""Purchase a plugin from the marketplace"""
|
||||
try:
|
||||
if test_mode:
|
||||
click.echo(f"💰 Purchase initiated (test mode)")
|
||||
click.echo(f"📦 Plugin ID: {plugin_id}")
|
||||
click.echo(f"💳 Payment method: Test Card")
|
||||
click.echo(f"💰 Amount: $99.99")
|
||||
click.echo(f"✅ Purchase completed successfully")
|
||||
click.echo(f"📧 License key: TEST-KEY-{plugin_id.upper()}")
|
||||
click.echo(f"📥 Download link: https://marketplace.aitbc.dev/download/{plugin_id}")
|
||||
return
|
||||
|
||||
# Get plugin details first
|
||||
config = get_config()
|
||||
response = requests.get(
|
||||
f"{config.coordinator_url}/api/v1/marketplace/plugins/{plugin_id}",
|
||||
headers={"Authorization": f"Bearer {config.api_key}"},
|
||||
timeout=30
|
||||
)
|
||||
|
||||
if response.status_code != 200:
|
||||
click.echo(f"❌ Plugin not found: {response.text}", err=True)
|
||||
return
|
||||
|
||||
plugin = response.json()
|
||||
|
||||
# Create purchase order
|
||||
purchase_data = {
|
||||
"plugin_id": plugin_id,
|
||||
"price": plugin.get('price', 0.0),
|
||||
"currency": plugin.get('pricing', {}).get('currency', 'USD'),
|
||||
"payment_method": "credit_card",
|
||||
"purchased_at": datetime.utcnow().isoformat()
|
||||
}
|
||||
|
||||
response = requests.post(
|
||||
f"{config.coordinator_url}/api/v1/marketplace/purchase",
|
||||
json=purchase_data,
|
||||
headers={"Authorization": f"Bearer {config.api_key}"},
|
||||
timeout=30
|
||||
)
|
||||
|
||||
if response.status_code == 201:
|
||||
result = response.json()
|
||||
click.echo(f"💰 Purchase completed successfully!")
|
||||
click.echo(f"📦 Plugin: {result['plugin_name']}")
|
||||
click.echo(f"💳 Amount: ${result['amount']} {result['currency']}")
|
||||
click.echo(f"📧 License Key: {result['license_key']}")
|
||||
click.echo(f"📥 Download: {result['download_url']}")
|
||||
click.echo(f"📧 Support: {result['support_email']}")
|
||||
else:
|
||||
click.echo(f"❌ Purchase failed: {response.text}", err=True)
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Error purchasing plugin: {str(e)}", err=True)
|
||||
|
||||
@plugin_marketplace.command()
|
||||
@click.option('--category', help='Filter by category')
|
||||
@click.option('--price-min', type=float, help='Minimum price filter')
|
||||
@click.option('--price-max', type=float, help='Maximum price filter')
|
||||
@click.option('--rating-min', type=float, help='Minimum rating filter')
|
||||
@click.option('--limit', type=int, default=10, help='Number of results')
|
||||
@click.option('--test-mode', is_flag=True, help='Run in test mode')
|
||||
def featured(category, price_min, price_max, rating_min, limit, test_mode):
|
||||
"""Browse featured plugins"""
|
||||
try:
|
||||
params = {
|
||||
"featured": True,
|
||||
"limit": limit
|
||||
}
|
||||
|
||||
if category:
|
||||
params["category"] = category
|
||||
if price_min is not None:
|
||||
params["price_min"] = price_min
|
||||
if price_max is not None:
|
||||
params["price_max"] = price_max
|
||||
if rating_min is not None:
|
||||
params["rating_min"] = rating_min
|
||||
|
||||
if test_mode:
|
||||
# Mock featured plugins
|
||||
mock_featured = [
|
||||
{
|
||||
"plugin_id": "trading-bot",
|
||||
"name": "Advanced Trading Bot",
|
||||
"version": "1.0.0",
|
||||
"description": "Automated trading bot with advanced algorithms",
|
||||
"author": "AITBC Team",
|
||||
"category": "trading",
|
||||
"price": 99.99,
|
||||
"rating": 4.5,
|
||||
"downloads": 1250,
|
||||
"featured": True,
|
||||
"featured_reason": "Top-rated trading automation tool"
|
||||
},
|
||||
{
|
||||
"plugin_id": "oracle-feed",
|
||||
"name": "Oracle Price Feed",
|
||||
"version": "2.1.0",
|
||||
"description": "Real-time price oracle integration",
|
||||
"author": "Oracle Developer",
|
||||
"category": "oracle",
|
||||
"price": 49.99,
|
||||
"rating": 4.8,
|
||||
"downloads": 890,
|
||||
"featured": True,
|
||||
"featured_reason": "Most reliable oracle integration"
|
||||
}
|
||||
]
|
||||
|
||||
click.echo("⭐ Featured Plugins:")
|
||||
click.echo("=" * 60)
|
||||
|
||||
for plugin in mock_featured[:limit]:
|
||||
click.echo(f"⭐ {plugin['name']} (v{plugin['version']})")
|
||||
click.echo(f" 💰 Price: ${plugin['price']}")
|
||||
click.echo(f" ⭐ Rating: {plugin['rating']}/5.0")
|
||||
click.echo(f" 📥 Downloads: {plugin['downloads']}")
|
||||
click.echo(f" 📂 Category: {plugin['category']}")
|
||||
click.echo(f" 👤 Author: {plugin['author']}")
|
||||
click.echo(f" 🏆 {plugin['featured_reason']}")
|
||||
click.echo("")
|
||||
|
||||
return
|
||||
|
||||
# Fetch from marketplace service
|
||||
config = get_config()
|
||||
response = requests.get(
|
||||
f"{config.coordinator_url}/api/v1/marketplace/featured",
|
||||
params=params,
|
||||
headers={"Authorization": f"Bearer {config.api_key}"},
|
||||
timeout=30
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
result = response.json()
|
||||
plugins = result.get("plugins", [])
|
||||
|
||||
click.echo("⭐ Featured Plugins:")
|
||||
click.echo("=" * 60)
|
||||
|
||||
for plugin in plugins:
|
||||
click.echo(f"⭐ {plugin['name']} (v{plugin['version']})")
|
||||
click.echo(f" 💰 Price: ${plugin.get('price', 0.0)}")
|
||||
click.echo(f" ⭐ Rating: {plugin.get('rating', 0)}/5.0")
|
||||
click.echo(f" 📥 Downloads: {plugin.get('downloads', 0)}")
|
||||
click.echo(f" 📂 Category: {plugin.get('category', 'N/A')}")
|
||||
click.echo(f" 👤 Author: {plugin.get('author', 'N/A')}")
|
||||
click.echo(f" 🏆 {plugin.get('featured_reason', 'Featured plugin')}")
|
||||
click.echo("")
|
||||
else:
|
||||
click.echo(f"❌ Failed to get featured plugins: {response.text}", err=True)
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Error getting featured plugins: {str(e)}", err=True)
|
||||
|
||||
@plugin_marketplace.command()
|
||||
@click.option('--test-mode', is_flag=True, help='Run in test mode')
|
||||
def my_purchases(test_mode):
|
||||
"""View your purchased plugins"""
|
||||
try:
|
||||
if test_mode:
|
||||
# Mock purchase history
|
||||
mock_purchases = [
|
||||
{
|
||||
"plugin_id": "trading-bot",
|
||||
"name": "Advanced Trading Bot",
|
||||
"version": "1.0.0",
|
||||
"purchase_date": "2024-02-15T10:30:00Z",
|
||||
"price": 99.99,
|
||||
"license_key": "TEST-KEY-TRADING-BOT",
|
||||
"status": "active",
|
||||
"download_count": 5
|
||||
},
|
||||
{
|
||||
"plugin_id": "oracle-feed",
|
||||
"name": "Oracle Price Feed",
|
||||
"version": "2.1.0",
|
||||
"purchase_date": "2024-02-10T14:20:00Z",
|
||||
"price": 49.99,
|
||||
"license_key": "TEST-KEY-ORACLE-FEED",
|
||||
"status": "active",
|
||||
"download_count": 3
|
||||
}
|
||||
]
|
||||
|
||||
click.echo("📋 Your Purchased Plugins:")
|
||||
click.echo("=" * 60)
|
||||
|
||||
for purchase in mock_purchases:
|
||||
status_icon = "✅" if purchase['status'] == 'active' else "⏳"
|
||||
click.echo(f"{status_icon} {purchase['name']} (v{purchase['version']})")
|
||||
click.echo(f" 📅 Purchased: {purchase['purchase_date']}")
|
||||
click.echo(f" 💰 Price: ${purchase['price']}")
|
||||
click.echo(f" 📧 License Key: {purchase['license_key']}")
|
||||
click.echo(f" 📥 Downloads: {purchase['download_count']}")
|
||||
click.echo("")
|
||||
|
||||
return
|
||||
|
||||
# Get user's purchases
|
||||
config = get_config()
|
||||
response = requests.get(
|
||||
f"{config.coordinator_url}/api/v1/marketplace/purchases",
|
||||
headers={"Authorization": f"Bearer {config.api_key}"},
|
||||
timeout=30
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
result = response.json()
|
||||
purchases = result.get("purchases", [])
|
||||
|
||||
click.echo("📋 Your Purchased Plugins:")
|
||||
click.echo("=" * 60)
|
||||
|
||||
for purchase in purchases:
|
||||
status_icon = "✅" if purchase['status'] == 'active' else "⏳"
|
||||
click.echo(f"{status_icon} {purchase['plugin_name']} (v{purchase['version']})")
|
||||
click.echo(f" 📅 Purchased: {purchase['purchase_date']}")
|
||||
click.echo(f" 💰 Price: ${purchase['price']} {purchase['currency']}")
|
||||
click.echo(f" 📧 License Key: {purchase['license_key']}")
|
||||
click.echo(f" 📥 Downloads: {purchase.get('download_count', 0)}")
|
||||
click.echo("")
|
||||
else:
|
||||
click.echo(f"❌ Failed to get purchases: {response.text}", err=True)
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Error getting purchases: {str(e)}", err=True)
|
||||
|
||||
@plugin_marketplace.command()
|
||||
@click.argument('plugin_id')
|
||||
@click.option('--license-key', help='License key for the plugin')
|
||||
@click.option('--test-mode', is_flag=True, help='Run in test mode')
|
||||
def download(plugin_id, license_key, test_mode):
|
||||
"""Download a purchased plugin"""
|
||||
try:
|
||||
if test_mode:
|
||||
click.echo(f"📥 Download started (test mode)")
|
||||
click.echo(f"📦 Plugin ID: {plugin_id}")
|
||||
click.echo(f"📧 License Key: {license_key or 'TEST-KEY'}")
|
||||
click.echo(f"✅ Download completed successfully")
|
||||
click.echo(f"📁 Download location: /tmp/{plugin_id}.zip")
|
||||
return
|
||||
|
||||
# Validate license key
|
||||
config = get_config()
|
||||
response = requests.post(
|
||||
f"{config.coordinator_url}/api/v1/marketplace/download/{plugin_id}",
|
||||
json={"license_key": license_key},
|
||||
headers={"Authorization": f"Bearer {config.api_key}"},
|
||||
timeout=30
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
result = response.json()
|
||||
click.echo(f"📥 Download started!")
|
||||
click.echo(f"📦 Plugin: {result['plugin_name']}")
|
||||
click.echo(f"📁 Download URL: {result['download_url']}")
|
||||
click.echo(f"📦 File Size: {result['file_size_mb']} MB")
|
||||
click.echo(f"🔑 Checksum: {result['checksum']}")
|
||||
|
||||
# Download the file
|
||||
download_response = requests.get(result['download_url'], timeout=60)
|
||||
|
||||
if download_response.status_code == 200:
|
||||
filename = f"{plugin_id}.zip"
|
||||
with open(filename, 'wb') as f:
|
||||
f.write(download_response.content)
|
||||
|
||||
click.echo(f"✅ Download completed!")
|
||||
click.echo(f"📁 Saved as: {filename}")
|
||||
click.echo(f"📁 Size: {len(download_response.content) / 1024 / 1024:.1f} MB")
|
||||
else:
|
||||
click.echo(f"❌ Download failed: {download_response.text}", err=True)
|
||||
else:
|
||||
click.echo(f"❌ Download failed: {response.text}", err=True)
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Error downloading plugin: {str(e)}", err=True)
|
||||
|
||||
# Helper function to get config
|
||||
def get_config():
|
||||
"""Get CLI configuration"""
|
||||
try:
|
||||
from config import get_config
|
||||
return get_config()
|
||||
except ImportError:
|
||||
# Fallback for testing
|
||||
from types import SimpleNamespace
|
||||
return SimpleNamespace(
|
||||
coordinator_url="http://localhost:8014",
|
||||
api_key="test-api-key"
|
||||
)
|
||||
|
||||
if __name__ == "__main__":
|
||||
plugin_marketplace()
|
||||
503
cli/commands/plugin_registry.py
Executable file
503
cli/commands/plugin_registry.py
Executable file
@@ -0,0 +1,503 @@
|
||||
"""
|
||||
Plugin Registry CLI Commands for AITBC
|
||||
Commands for managing plugin registration, versioning, and discovery
|
||||
"""
|
||||
|
||||
import click
|
||||
import json
|
||||
import requests
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import Dict, Any, List, Optional
|
||||
|
||||
@click.group()
|
||||
def plugin_registry():
|
||||
"""Plugin registry management commands"""
|
||||
pass
|
||||
|
||||
@plugin_registry.command()
|
||||
@click.option('--plugin-id', help='Plugin ID to register')
|
||||
@click.option('--name', required=True, help='Plugin name')
|
||||
@click.option('--version', required=True, help='Plugin version')
|
||||
@click.option('--description', required=True, help='Plugin description')
|
||||
@click.option('--author', required=True, help='Plugin author')
|
||||
@click.option('--category', required=True, help='Plugin category')
|
||||
@click.option('--tags', help='Plugin tags (comma-separated)')
|
||||
@click.option('--repository', help='Source repository URL')
|
||||
@click.option('--homepage', help='Plugin homepage URL')
|
||||
@click.option('--license', help='Plugin license')
|
||||
@click.option('--test-mode', is_flag=True, help='Run in test mode')
|
||||
def register(plugin_id, name, version, description, author, category, tags, repository, homepage, license, test_mode):
|
||||
"""Register a new plugin in the registry"""
|
||||
try:
|
||||
if not plugin_id:
|
||||
plugin_id = name.lower().replace(' ', '-').replace('_', '-')
|
||||
|
||||
# Create plugin registration data
|
||||
plugin_data = {
|
||||
"plugin_id": plugin_id,
|
||||
"name": name,
|
||||
"version": version,
|
||||
"description": description,
|
||||
"author": author,
|
||||
"category": category,
|
||||
"tags": tags.split(',') if tags else [],
|
||||
"repository": repository,
|
||||
"homepage": homepage,
|
||||
"license": license,
|
||||
"status": "active",
|
||||
"created_at": datetime.utcnow().isoformat(),
|
||||
"updated_at": datetime.utcnow().isoformat(),
|
||||
"downloads": 0,
|
||||
"rating": 0.0,
|
||||
"reviews_count": 0
|
||||
}
|
||||
|
||||
if test_mode:
|
||||
# Mock registration for testing
|
||||
plugin_data["registration_id"] = f"reg_{int(datetime.utcnow().timestamp())}"
|
||||
plugin_data["status"] = "registered"
|
||||
click.echo(f"✅ Plugin registered successfully (test mode)")
|
||||
click.echo(f"📋 Plugin ID: {plugin_data['plugin_id']}")
|
||||
click.echo(f"📦 Version: {plugin_data['version']}")
|
||||
click.echo(f"📝 Description: {plugin_data['description']}")
|
||||
return
|
||||
|
||||
# Send to registry service
|
||||
config = get_config()
|
||||
response = requests.post(
|
||||
f"{config.coordinator_url}/api/v1/plugins/register",
|
||||
json=plugin_data,
|
||||
headers={"Authorization": f"Bearer {config.api_key}"},
|
||||
timeout=30
|
||||
)
|
||||
|
||||
if response.status_code == 201:
|
||||
result = response.json()
|
||||
click.echo(f"✅ Plugin registered successfully")
|
||||
click.echo(f"📋 Plugin ID: {result['plugin_id']}")
|
||||
click.echo(f"📦 Version: {result['version']}")
|
||||
click.echo(f"📝 Description: {result['description']}")
|
||||
else:
|
||||
click.echo(f"❌ Registration failed: {response.text}", err=True)
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Error registering plugin: {str(e)}", err=True)
|
||||
|
||||
@plugin_registry.command()
|
||||
@click.option('--plugin-id', help='Specific plugin ID (optional)')
|
||||
@click.option('--category', help='Filter by category')
|
||||
@click.option('--author', help='Filter by author')
|
||||
@click.option('--status', help='Filter by status')
|
||||
@click.option('--limit', type=int, default=20, help='Number of results to return')
|
||||
@click.option('--test-mode', is_flag=True, help='Run in test mode')
|
||||
def list(plugin_id, category, author, status, limit, test_mode):
|
||||
"""List registered plugins"""
|
||||
try:
|
||||
if test_mode:
|
||||
# Mock data for testing
|
||||
mock_plugins = [
|
||||
{
|
||||
"plugin_id": "trading-bot",
|
||||
"name": "Advanced Trading Bot",
|
||||
"version": "1.0.0",
|
||||
"description": "Automated trading bot with advanced algorithms",
|
||||
"author": "AITBC Team",
|
||||
"category": "trading",
|
||||
"tags": ["trading", "automation", "bot"],
|
||||
"status": "active",
|
||||
"downloads": 1250,
|
||||
"rating": 4.5,
|
||||
"reviews_count": 42
|
||||
},
|
||||
{
|
||||
"plugin_id": "oracle-feed",
|
||||
"name": "Oracle Price Feed",
|
||||
"version": "2.1.0",
|
||||
"description": "Real-time price oracle integration",
|
||||
"author": "Oracle Developer",
|
||||
"category": "oracle",
|
||||
"tags": ["oracle", "price", "feed"],
|
||||
"status": "active",
|
||||
"downloads": 890,
|
||||
"rating": 4.8,
|
||||
"reviews_count": 28
|
||||
}
|
||||
]
|
||||
|
||||
click.echo("📋 Registered Plugins:")
|
||||
click.echo("=" * 60)
|
||||
|
||||
for plugin in mock_plugins[:limit]:
|
||||
click.echo(f"📦 {plugin['name']} (v{plugin['version']})")
|
||||
click.echo(f" 🆔 ID: {plugin['plugin_id']}")
|
||||
click.echo(f" 👤 Author: {plugin['author']}")
|
||||
click.echo(f" 📂 Category: {plugin['category']}")
|
||||
click.echo(f" ⭐ Rating: {plugin['rating']}/5.0 ({plugin['reviews_count']} reviews)")
|
||||
click.echo(f" 📥 Downloads: {plugin['downloads']}")
|
||||
click.echo(f" 📝 {plugin['description'][:60]}...")
|
||||
click.echo("")
|
||||
|
||||
return
|
||||
|
||||
# Fetch from registry service
|
||||
config = get_config()
|
||||
params = {
|
||||
"limit": limit
|
||||
}
|
||||
|
||||
if plugin_id:
|
||||
params["plugin_id"] = plugin_id
|
||||
if category:
|
||||
params["category"] = category
|
||||
if author:
|
||||
params["author"] = author
|
||||
if status:
|
||||
params["status"] = status
|
||||
|
||||
response = requests.get(
|
||||
f"{config.coordinator_url}/api/v1/plugins",
|
||||
params=params,
|
||||
headers={"Authorization": f"Bearer {config.api_key}"},
|
||||
timeout=30
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
result = response.json()
|
||||
plugins = result.get("plugins", [])
|
||||
|
||||
click.echo("📋 Registered Plugins:")
|
||||
click.echo("=" * 60)
|
||||
|
||||
for plugin in plugins:
|
||||
click.echo(f"📦 {plugin['name']} (v{plugin['version']})")
|
||||
click.echo(f" 🆔 ID: {plugin['plugin_id']}")
|
||||
click.echo(f" 👤 Author: {plugin['author']}")
|
||||
click.echo(f" 📂 Category: {plugin['category']}")
|
||||
click.echo(f" ⭐ Rating: {plugin.get('rating', 0)}/5.0 ({plugin.get('reviews_count', 0)} reviews)")
|
||||
click.echo(f" 📥 Downloads: {plugin.get('downloads', 0)}")
|
||||
click.echo(f" 📝 {plugin['description'][:60]}...")
|
||||
click.echo("")
|
||||
else:
|
||||
click.echo(f"❌ Failed to list plugins: {response.text}", err=True)
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Error listing plugins: {str(e)}", err=True)
|
||||
|
||||
@plugin_registry.command()
|
||||
@click.argument('plugin_id')
|
||||
@click.option('--test-mode', is_flag=True, help='Run in test mode')
|
||||
def info(plugin_id, test_mode):
|
||||
"""Get detailed information about a specific plugin"""
|
||||
try:
|
||||
if test_mode:
|
||||
# Mock data for testing
|
||||
mock_plugin = {
|
||||
"plugin_id": plugin_id,
|
||||
"name": "Advanced Trading Bot",
|
||||
"version": "1.0.0",
|
||||
"description": "Automated trading bot with advanced algorithms and machine learning capabilities",
|
||||
"author": "AITBC Team",
|
||||
"category": "trading",
|
||||
"tags": ["trading", "automation", "bot", "ml"],
|
||||
"repository": "https://github.com/aitbc/trading-bot",
|
||||
"homepage": "https://aitbc.dev/plugins/trading-bot",
|
||||
"license": "MIT",
|
||||
"status": "active",
|
||||
"created_at": "2024-01-15T10:30:00Z",
|
||||
"updated_at": "2024-03-01T14:20:00Z",
|
||||
"downloads": 1250,
|
||||
"rating": 4.5,
|
||||
"reviews_count": 42,
|
||||
"dependencies": ["exchange-integration", "oracle-feed"],
|
||||
"security_scan": {
|
||||
"status": "passed",
|
||||
"scan_date": "2024-03-01T14:20:00Z",
|
||||
"vulnerabilities": 0
|
||||
},
|
||||
"performance_metrics": {
|
||||
"cpu_usage": 2.5,
|
||||
"memory_usage": 512,
|
||||
"response_time_ms": 45
|
||||
}
|
||||
}
|
||||
|
||||
click.echo(f"📦 Plugin Information: {mock_plugin['name']}")
|
||||
click.echo("=" * 60)
|
||||
click.echo(f"🆔 Plugin ID: {mock_plugin['plugin_id']}")
|
||||
click.echo(f"📦 Version: {mock_plugin['version']}")
|
||||
click.echo(f"👤 Author: {mock_plugin['author']}")
|
||||
click.echo(f"📂 Category: {mock_plugin['category']}")
|
||||
click.echo(f"🏷️ Tags: {', '.join(mock_plugin['tags'])}")
|
||||
click.echo(f"📄 License: {mock_plugin['license']}")
|
||||
click.echo(f"📊 Status: {mock_plugin['status']}")
|
||||
click.echo(f"⭐ Rating: {mock_plugin['rating']}/5.0 ({mock_plugin['reviews_count']} reviews)")
|
||||
click.echo(f"📥 Downloads: {mock_plugin['downloads']}")
|
||||
click.echo(f"📅 Created: {mock_plugin['created_at']}")
|
||||
click.echo(f"🔄 Updated: {mock_plugin['updated_at']}")
|
||||
click.echo("")
|
||||
click.echo("📝 Description:")
|
||||
click.echo(f" {mock_plugin['description']}")
|
||||
click.echo("")
|
||||
click.echo("🔗 Links:")
|
||||
click.echo(f" 📦 Repository: {mock_plugin['repository']}")
|
||||
click.echo(f" 🌐 Homepage: {mock_plugin['homepage']}")
|
||||
click.echo("")
|
||||
click.echo("🔒 Security Scan:")
|
||||
click.echo(f" Status: {mock_plugin['security_scan']['status']}")
|
||||
click.echo(f" Scan Date: {mock_plugin['security_scan']['scan_date']}")
|
||||
click.echo(f" Vulnerabilities: {mock_plugin['security_scan']['vulnerabilities']}")
|
||||
click.echo("")
|
||||
click.echo("⚡ Performance Metrics:")
|
||||
click.echo(f" CPU Usage: {mock_plugin['performance_metrics']['cpu_usage']}%")
|
||||
click.echo(f" Memory Usage: {mock_plugin['performance_metrics']['memory_usage']}MB")
|
||||
click.echo(f" Response Time: {mock_plugin['performance_metrics']['response_time_ms']}ms")
|
||||
return
|
||||
|
||||
# Fetch from registry service
|
||||
config = get_config()
|
||||
response = requests.get(
|
||||
f"{config.coordinator_url}/api/v1/plugins/{plugin_id}",
|
||||
headers={"Authorization": f"Bearer {config.api_key}"},
|
||||
timeout=30
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
plugin = response.json()
|
||||
|
||||
click.echo(f"📦 Plugin Information: {plugin['name']}")
|
||||
click.echo("=" * 60)
|
||||
click.echo(f"🆔 Plugin ID: {plugin['plugin_id']}")
|
||||
click.echo(f"📦 Version: {plugin['version']}")
|
||||
click.echo(f"👤 Author: {plugin['author']}")
|
||||
click.echo(f"📂 Category: {plugin['category']}")
|
||||
click.echo(f"🏷️ Tags: {', '.join(plugin.get('tags', []))}")
|
||||
click.echo(f"📄 License: {plugin.get('license', 'N/A')}")
|
||||
click.echo(f"📊 Status: {plugin['status']}")
|
||||
click.echo(f"⭐ Rating: {plugin.get('rating', 0)}/5.0 ({plugin.get('reviews_count', 0)} reviews)")
|
||||
click.echo(f"📥 Downloads: {plugin.get('downloads', 0)}")
|
||||
click.echo(f"📅 Created: {plugin['created_at']}")
|
||||
click.echo(f"🔄 Updated: {plugin['updated_at']}")
|
||||
click.echo("")
|
||||
click.echo("📝 Description:")
|
||||
click.echo(f" {plugin['description']}")
|
||||
click.echo("")
|
||||
if plugin.get('repository'):
|
||||
click.echo("🔗 Links:")
|
||||
click.echo(f" 📦 Repository: {plugin['repository']}")
|
||||
if plugin.get('homepage'):
|
||||
click.echo(f" 🌐 Homepage: {plugin['homepage']}")
|
||||
else:
|
||||
click.echo(f"❌ Plugin not found: {response.text}", err=True)
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Error getting plugin info: {str(e)}", err=True)
|
||||
|
||||
@plugin_registry.command()
|
||||
@click.argument('plugin_id')
|
||||
@click.option('--version', required=True, help='New version number')
|
||||
@click.option('--changelog', required=True, help='Version changelog')
|
||||
@click.option('--test-mode', is_flag=True, help='Run in test mode')
|
||||
def update_version(plugin_id, version, changelog, test_mode):
|
||||
"""Update plugin version"""
|
||||
try:
|
||||
update_data = {
|
||||
"version": version,
|
||||
"changelog": changelog,
|
||||
"updated_at": datetime.utcnow().isoformat()
|
||||
}
|
||||
|
||||
if test_mode:
|
||||
click.echo(f"✅ Plugin version updated (test mode)")
|
||||
click.echo(f"📦 Plugin ID: {plugin_id}")
|
||||
click.echo(f"📦 New Version: {version}")
|
||||
click.echo(f"📝 Changelog: {changelog}")
|
||||
return
|
||||
|
||||
# Send to registry service
|
||||
config = get_config()
|
||||
response = requests.put(
|
||||
f"{config.coordinator_url}/api/v1/plugins/{plugin_id}/version",
|
||||
json=update_data,
|
||||
headers={"Authorization": f"Bearer {config.api_key}"},
|
||||
timeout=30
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
result = response.json()
|
||||
click.echo(f"✅ Plugin version updated successfully")
|
||||
click.echo(f"📦 Plugin ID: {result['plugin_id']}")
|
||||
click.echo(f"📦 New Version: {result['version']}")
|
||||
click.echo(f"📝 Changelog: {changelog}")
|
||||
else:
|
||||
click.echo(f"❌ Version update failed: {response.text}", err=True)
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Error updating plugin version: {str(e)}", err=True)
|
||||
|
||||
@plugin_registry.command()
|
||||
@click.option('--query', help='Search query')
|
||||
@click.option('--category', help='Filter by category')
|
||||
@click.option('--tags', help='Filter by tags (comma-separated)')
|
||||
@click.option('--limit', type=int, default=10, help='Number of results')
|
||||
@click.option('--test-mode', is_flag=True, help='Run in test mode')
|
||||
def search(query, category, tags, limit, test_mode):
|
||||
"""Search for plugins"""
|
||||
try:
|
||||
search_params = {
|
||||
"limit": limit
|
||||
}
|
||||
|
||||
if query:
|
||||
search_params["query"] = query
|
||||
if category:
|
||||
search_params["category"] = category
|
||||
if tags:
|
||||
search_params["tags"] = tags.split(',')
|
||||
|
||||
if test_mode:
|
||||
# Mock search results
|
||||
mock_results = [
|
||||
{
|
||||
"plugin_id": "trading-bot",
|
||||
"name": "Advanced Trading Bot",
|
||||
"version": "1.0.0",
|
||||
"description": "Automated trading bot with advanced algorithms",
|
||||
"relevance_score": 0.95
|
||||
},
|
||||
{
|
||||
"plugin_id": "oracle-feed",
|
||||
"name": "Oracle Price Feed",
|
||||
"version": "2.1.0",
|
||||
"description": "Real-time price oracle integration",
|
||||
"relevance_score": 0.87
|
||||
}
|
||||
]
|
||||
|
||||
click.echo(f"🔍 Search Results for '{query or 'all'}':")
|
||||
click.echo("=" * 60)
|
||||
|
||||
for result in mock_results:
|
||||
click.echo(f"📦 {result['name']} (v{result['version']})")
|
||||
click.echo(f" 🆔 ID: {result['plugin_id']}")
|
||||
click.echo(f" 📝 {result['description'][:60]}...")
|
||||
click.echo(f" 📊 Relevance: {result['relevance_score']:.2f}")
|
||||
click.echo("")
|
||||
|
||||
return
|
||||
|
||||
# Search in registry service
|
||||
config = get_config()
|
||||
response = requests.get(
|
||||
f"{config.coordinator_url}/api/v1/plugins/search",
|
||||
params=search_params,
|
||||
headers={"Authorization": f"Bearer {config.api_key}"},
|
||||
timeout=30
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
result = response.json()
|
||||
plugins = result.get("plugins", [])
|
||||
|
||||
click.echo(f"🔍 Search Results for '{query or 'all'}':")
|
||||
click.echo("=" * 60)
|
||||
|
||||
for plugin in plugins:
|
||||
click.echo(f"📦 {plugin['name']} (v{plugin['version']})")
|
||||
click.echo(f" 🆔 ID: {plugin['plugin_id']}")
|
||||
click.echo(f" 📝 {plugin['description'][:60]}...")
|
||||
click.echo(f" 📊 Relevance: {plugin.get('relevance_score', 0):.2f}")
|
||||
click.echo("")
|
||||
else:
|
||||
click.echo(f"❌ Search failed: {response.text}", err=True)
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Error searching plugins: {str(e)}", err=True)
|
||||
|
||||
@plugin_registry.command()
|
||||
@click.option('--test-mode', is_flag=True, help='Run in test mode')
|
||||
def status(test_mode):
|
||||
"""Get plugin registry status"""
|
||||
try:
|
||||
if test_mode:
|
||||
# Mock status data
|
||||
status_data = {
|
||||
"total_plugins": 156,
|
||||
"active_plugins": 142,
|
||||
"pending_plugins": 8,
|
||||
"inactive_plugins": 6,
|
||||
"total_downloads": 45678,
|
||||
"categories": {
|
||||
"trading": 45,
|
||||
"oracle": 32,
|
||||
"security": 28,
|
||||
"analytics": 25,
|
||||
"utility": 26
|
||||
},
|
||||
"recent_registrations": 12,
|
||||
"security_scans": {
|
||||
"passed": 148,
|
||||
"failed": 3,
|
||||
"pending": 5
|
||||
}
|
||||
}
|
||||
|
||||
click.echo("📊 Plugin Registry Status:")
|
||||
click.echo("=" * 40)
|
||||
click.echo(f"📦 Total Plugins: {status_data['total_plugins']}")
|
||||
click.echo(f"✅ Active Plugins: {status_data['active_plugins']}")
|
||||
click.echo(f"⏳ Pending Plugins: {status_data['pending_plugins']}")
|
||||
click.echo(f"❌ Inactive Plugins: {status_data['inactive_plugins']}")
|
||||
click.echo(f"📥 Total Downloads: {status_data['total_downloads']}")
|
||||
click.echo("")
|
||||
click.echo("📂 Categories:")
|
||||
for category, count in status_data['categories'].items():
|
||||
click.echo(f" {category}: {count}")
|
||||
click.echo("")
|
||||
click.echo("🔒 Security Scans:")
|
||||
click.echo(f" ✅ Passed: {status_data['security_scans']['passed']}")
|
||||
click.echo(f" ❌ Failed: {status_data['security_scans']['failed']}")
|
||||
click.echo(f" ⏳ Pending: {status_data['security_scans']['pending']}")
|
||||
return
|
||||
|
||||
# Get status from registry service
|
||||
config = get_config()
|
||||
response = requests.get(
|
||||
f"{config.coordinator_url}/api/v1/plugins/status",
|
||||
headers={"Authorization": f"Bearer {config.api_key}"},
|
||||
timeout=30
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
status = response.json()
|
||||
|
||||
click.echo("📊 Plugin Registry Status:")
|
||||
click.echo("=" * 40)
|
||||
click.echo(f"📦 Total Plugins: {status.get('total_plugins', 0)}")
|
||||
click.echo(f"✅ Active Plugins: {status.get('active_plugins', 0)}")
|
||||
click.echo(f"⏳ Pending Plugins: {status.get('pending_plugins', 0)}")
|
||||
click.echo(f"❌ Inactive Plugins: {status.get('inactive_plugins', 0)}")
|
||||
click.echo(f"📥 Total Downloads: {status.get('total_downloads', 0)}")
|
||||
click.echo(f"📈 Recent Registrations: {status.get('recent_registrations', 0)}")
|
||||
else:
|
||||
click.echo(f"❌ Failed to get status: {response.text}", err=True)
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Error getting status: {str(e)}", err=True)
|
||||
|
||||
# Helper function to get config
|
||||
def get_config():
|
||||
"""Get CLI configuration"""
|
||||
try:
|
||||
from config import get_config
|
||||
return get_config()
|
||||
except ImportError:
|
||||
# Fallback for testing
|
||||
from types import SimpleNamespace
|
||||
return SimpleNamespace(
|
||||
coordinator_url="http://localhost:8013",
|
||||
api_key="test-api-key"
|
||||
)
|
||||
|
||||
if __name__ == "__main__":
|
||||
plugin_registry()
|
||||
99
cli/commands/plugin_security.py
Executable file
99
cli/commands/plugin_security.py
Executable file
@@ -0,0 +1,99 @@
|
||||
"""
|
||||
Plugin Security CLI Commands for AITBC
|
||||
Commands for plugin security scanning and vulnerability detection
|
||||
"""
|
||||
|
||||
import click
|
||||
import json
|
||||
import requests
|
||||
from datetime import datetime
|
||||
from typing import Dict, Any, List, Optional
|
||||
|
||||
@click.group()
|
||||
def plugin_security():
|
||||
"""Plugin security management commands"""
|
||||
pass
|
||||
|
||||
@plugin_security.command()
|
||||
@click.argument('plugin_id')
|
||||
@click.option('--test-mode', is_flag=True, help='Run in test mode')
|
||||
def scan(plugin_id, test_mode):
|
||||
"""Scan a plugin for security vulnerabilities"""
|
||||
try:
|
||||
if test_mode:
|
||||
click.echo(f"🔒 Security scan started (test mode)")
|
||||
click.echo(f"📦 Plugin ID: {plugin_id}")
|
||||
click.echo(f"✅ Scan completed - No vulnerabilities found")
|
||||
return
|
||||
|
||||
# Send to security service
|
||||
config = get_config()
|
||||
response = requests.post(
|
||||
f"{config.coordinator_url}/api/v1/security/scan",
|
||||
json={"plugin_id": plugin_id},
|
||||
headers={"Authorization": f"Bearer {config.api_key}"},
|
||||
timeout=30
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
result = response.json()
|
||||
click.echo(f"🔒 Security scan completed")
|
||||
click.echo(f"📦 Plugin ID: {result['plugin_id']}")
|
||||
click.echo(f"🛡️ Status: {result['status']}")
|
||||
click.echo(f"🔍 Vulnerabilities: {result['vulnerabilities_count']}")
|
||||
else:
|
||||
click.echo(f"❌ Security scan failed: {response.text}", err=True)
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Error scanning plugin: {str(e)}", err=True)
|
||||
|
||||
@plugin_security.command()
|
||||
@click.option('--test-mode', is_flag=True, help='Run in test mode')
|
||||
def status(test_mode):
|
||||
"""Get plugin security status"""
|
||||
try:
|
||||
if test_mode:
|
||||
click.echo("🔒 Plugin Security Status (test mode)")
|
||||
click.echo("📊 Total Scans: 156")
|
||||
click.echo("✅ Passed: 148")
|
||||
click.echo("❌ Failed: 3")
|
||||
click.echo("⏳ Pending: 5")
|
||||
return
|
||||
|
||||
# Get status from security service
|
||||
config = get_config()
|
||||
response = requests.get(
|
||||
f"{config.coordinator_url}/api/v1/security/status",
|
||||
headers={"Authorization": f"Bearer {config.api_key}"},
|
||||
timeout=30
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
status = response.json()
|
||||
click.echo("🔒 Plugin Security Status")
|
||||
click.echo(f"📊 Total Scans: {status.get('total_scans', 0)}")
|
||||
click.echo(f"✅ Passed: {status.get('passed', 0)}")
|
||||
click.echo(f"❌ Failed: {status.get('failed', 0)}")
|
||||
click.echo(f"⏳ Pending: {status.get('pending', 0)}")
|
||||
else:
|
||||
click.echo(f"❌ Failed to get status: {response.text}", err=True)
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Error getting status: {str(e)}", err=True)
|
||||
|
||||
# Helper function to get config
|
||||
def get_config():
|
||||
"""Get CLI configuration"""
|
||||
try:
|
||||
from config import get_config
|
||||
return get_config()
|
||||
except ImportError:
|
||||
# Fallback for testing
|
||||
from types import SimpleNamespace
|
||||
return SimpleNamespace(
|
||||
coordinator_url="http://localhost:8015",
|
||||
api_key="test-api-key"
|
||||
)
|
||||
|
||||
if __name__ == "__main__":
|
||||
plugin_security()
|
||||
546
cli/commands/production_deploy.py
Executable file
546
cli/commands/production_deploy.py
Executable file
@@ -0,0 +1,546 @@
|
||||
"""
|
||||
Production Deployment CLI Commands for AITBC
|
||||
Commands for managing production deployment and operations
|
||||
"""
|
||||
|
||||
import click
|
||||
import json
|
||||
import requests
|
||||
import subprocess
|
||||
from datetime import datetime
|
||||
from typing import Dict, Any, List, Optional
|
||||
|
||||
@click.group()
|
||||
def production_deploy():
|
||||
"""Production deployment management commands"""
|
||||
pass
|
||||
|
||||
@production_deploy.command()
|
||||
@click.option('--environment', default='production', help='Target environment')
|
||||
@click.option('--version', default='latest', help='Version to deploy')
|
||||
@click.option('--region', default='us-east-1', help='Target region')
|
||||
@click.option('--dry-run', is_flag=True, help='Show what would be deployed without actually deploying')
|
||||
@click.option('--force', is_flag=True, help='Force deployment even if checks fail')
|
||||
def deploy(environment, version, region, dry_run, force):
|
||||
"""Deploy AITBC to production"""
|
||||
try:
|
||||
click.echo(f"🚀 Starting production deployment...")
|
||||
click.echo(f"🌍 Environment: {environment}")
|
||||
click.echo(f"📦 Version: {version}")
|
||||
click.echo(f"🗺️ Region: {region}")
|
||||
|
||||
if dry_run:
|
||||
click.echo("🔍 DRY RUN MODE - No actual deployment will be performed")
|
||||
|
||||
# Pre-deployment checks
|
||||
if not force:
|
||||
click.echo("🔍 Running pre-deployment checks...")
|
||||
checks = run_pre_deployment_checks(environment, dry_run)
|
||||
|
||||
if not all(checks.values()):
|
||||
failed_checks = [k for k, v in checks.items() if not v]
|
||||
click.echo(f"❌ Pre-deployment checks failed: {', '.join(failed_checks)}")
|
||||
click.echo("💡 Use --force to override or fix the issues and try again")
|
||||
return
|
||||
else:
|
||||
click.echo("✅ All pre-deployment checks passed")
|
||||
|
||||
# Backup current deployment
|
||||
if not dry_run:
|
||||
click.echo("💾 Creating backup of current deployment...")
|
||||
backup_result = create_backup(environment)
|
||||
click.echo(f"✅ Backup created: {backup_result['backup_id']}")
|
||||
else:
|
||||
click.echo("💾 DRY RUN: Would create backup of current deployment")
|
||||
|
||||
# Build images
|
||||
click.echo("🔨 Building production images...")
|
||||
build_result = build_production_images(version, dry_run)
|
||||
if not build_result['success']:
|
||||
click.echo(f"❌ Build failed: {build_result['error']}")
|
||||
return
|
||||
|
||||
# Deploy services
|
||||
click.echo("🚀 Deploying services...")
|
||||
deployment_result = deploy_services(environment, version, region, dry_run)
|
||||
if not deployment_result['success']:
|
||||
click.echo(f"❌ Deployment failed: {deployment_result['error']}")
|
||||
return
|
||||
|
||||
# Post-deployment tests
|
||||
click.echo("🧪 Running post-deployment tests...")
|
||||
test_result = run_post_deployment_tests(environment, dry_run)
|
||||
if not test_result['success']:
|
||||
click.echo(f"❌ Post-deployment tests failed: {test_result['error']}")
|
||||
click.echo("🔄 Rolling back deployment...")
|
||||
rollback_result = rollback_deployment(environment, backup_result['backup_id'])
|
||||
click.echo(f"🔄 Rollback completed: {rollback_result['status']}")
|
||||
return
|
||||
|
||||
# Success
|
||||
click.echo("🎉 Production deployment completed successfully!")
|
||||
click.echo(f"🌍 Environment: {environment}")
|
||||
click.echo(f"📦 Version: {version}")
|
||||
click.echo(f"🗺️ Region: {region}")
|
||||
click.echo(f"📅 Deployed at: {datetime.utcnow().isoformat()}")
|
||||
|
||||
if not dry_run:
|
||||
click.echo("🔗 Service URLs:")
|
||||
click.echo(" 🌐 API: https://api.aitbc.dev")
|
||||
click.echo(" 🛒 Marketplace: https://marketplace.aitbc.dev")
|
||||
click.echo(" 🔍 Explorer: https://explorer.aitbc.dev")
|
||||
click.echo(" 📊 Grafana: https://grafana.aitbc.dev")
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Deployment error: {str(e)}", err=True)
|
||||
|
||||
@production_deploy.command()
|
||||
@click.option('--environment', default='production', help='Target environment')
|
||||
@click.option('--backup-id', help='Specific backup ID to rollback to')
|
||||
@click.option('--dry-run', is_flag=True, help='Show what would be rolled back without actually rolling back')
|
||||
def rollback(environment, backup_id, dry_run):
|
||||
"""Rollback production deployment"""
|
||||
try:
|
||||
click.echo(f"🔄 Starting production rollback...")
|
||||
click.echo(f"🌍 Environment: {environment}")
|
||||
|
||||
if dry_run:
|
||||
click.echo("🔍 DRY RUN MODE - No actual rollback will be performed")
|
||||
|
||||
# Get current deployment info
|
||||
current_info = get_current_deployment_info(environment)
|
||||
click.echo(f"📦 Current Version: {current_info['version']}")
|
||||
click.echo(f"📅 Deployed At: {current_info['deployed_at']}")
|
||||
|
||||
# Get backup info
|
||||
if backup_id:
|
||||
backup_info = get_backup_info(backup_id)
|
||||
else:
|
||||
# Get latest backup
|
||||
backup_info = get_latest_backup(environment)
|
||||
backup_id = backup_info['backup_id']
|
||||
|
||||
click.echo(f"💾 Rolling back to backup: {backup_id}")
|
||||
click.echo(f"📦 Backup Version: {backup_info['version']}")
|
||||
click.echo(f"📅 Backup Created: {backup_info['created_at']}")
|
||||
|
||||
if not dry_run:
|
||||
# Perform rollback
|
||||
rollback_result = rollback_deployment(environment, backup_id)
|
||||
|
||||
if rollback_result['success']:
|
||||
click.echo("✅ Rollback completed successfully!")
|
||||
click.echo(f"📦 New Version: {backup_info['version']}")
|
||||
click.echo(f"📅 Rolled back at: {datetime.utcnow().isoformat()}")
|
||||
else:
|
||||
click.echo(f"❌ Rollback failed: {rollback_result['error']}")
|
||||
else:
|
||||
click.echo("🔄 DRY RUN: Would rollback to specified backup")
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Rollback error: {str(e)}", err=True)
|
||||
|
||||
@production_deploy.command()
|
||||
@click.option('--environment', default='production', help='Target environment')
|
||||
@click.option('--limit', type=int, default=10, help='Number of recent deployments to show')
|
||||
def history(environment, limit):
|
||||
"""Show deployment history"""
|
||||
try:
|
||||
click.echo(f"📜 Deployment History for {environment}")
|
||||
click.echo("=" * 60)
|
||||
|
||||
# Get deployment history
|
||||
history_data = get_deployment_history(environment, limit)
|
||||
|
||||
for deployment in history_data:
|
||||
status_icon = "✅" if deployment['status'] == 'success' else "❌"
|
||||
click.echo(f"{status_icon} {deployment['version']} - {deployment['deployed_at']}")
|
||||
click.echo(f" 🌍 Region: {deployment['region']}")
|
||||
click.echo(f" 📊 Status: {deployment['status']}")
|
||||
click.echo(f" ⏱️ Duration: {deployment.get('duration', 'N/A')}")
|
||||
click.echo(f" 👤 Deployed by: {deployment.get('deployed_by', 'N/A')}")
|
||||
click.echo("")
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Error getting deployment history: {str(e)}", err=True)
|
||||
|
||||
@production_deploy.command()
|
||||
@click.option('--environment', default='production', help='Target environment')
|
||||
def status(environment):
|
||||
"""Show current deployment status"""
|
||||
try:
|
||||
click.echo(f"📊 Current Deployment Status for {environment}")
|
||||
click.echo("=" * 60)
|
||||
|
||||
# Get current status
|
||||
status_data = get_deployment_status(environment)
|
||||
|
||||
click.echo(f"📦 Version: {status_data['version']}")
|
||||
click.echo(f"🌍 Region: {status_data['region']}")
|
||||
click.echo(f"📊 Status: {status_data['status']}")
|
||||
click.echo(f"📅 Deployed At: {status_data['deployed_at']}")
|
||||
click.echo(f"⏱️ Uptime: {status_data['uptime']}")
|
||||
click.echo("")
|
||||
|
||||
# Service status
|
||||
click.echo("🔧 Service Status:")
|
||||
for service, service_status in status_data['services'].items():
|
||||
status_icon = "✅" if service_status['healthy'] else "❌"
|
||||
click.echo(f" {status_icon} {service}: {service_status['status']}")
|
||||
if service_status.get('replicas'):
|
||||
click.echo(f" 📊 Replicas: {service_status['replicas']['ready']}/{service_status['replicas']['total']}")
|
||||
click.echo("")
|
||||
|
||||
# Performance metrics
|
||||
if status_data.get('performance'):
|
||||
click.echo("📈 Performance Metrics:")
|
||||
perf = status_data['performance']
|
||||
click.echo(f" 💻 CPU Usage: {perf.get('cpu_usage', 'N/A')}%")
|
||||
click.echo(f" 🧠 Memory Usage: {perf.get('memory_usage', 'N/A')}%")
|
||||
click.echo(f" 📥 Requests/sec: {perf.get('requests_per_second', 'N/A')}")
|
||||
click.echo(f" ⚡ Response Time: {perf.get('avg_response_time', 'N/A')}ms")
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Error getting deployment status: {str(e)}", err=True)
|
||||
|
||||
@production_deploy.command()
|
||||
@click.option('--environment', default='production', help='Target environment')
|
||||
@click.option('--service', help='Specific service to restart')
|
||||
@click.option('--dry-run', is_flag=True, help='Show what would be restarted without actually restarting')
|
||||
def restart(environment, service, dry_run):
|
||||
"""Restart services in production"""
|
||||
try:
|
||||
click.echo(f"🔄 Restarting services in {environment}")
|
||||
|
||||
if service:
|
||||
click.echo(f"🔧 Service: {service}")
|
||||
else:
|
||||
click.echo("🔧 All services")
|
||||
|
||||
if dry_run:
|
||||
click.echo("🔍 DRY RUN MODE - No actual restart will be performed")
|
||||
|
||||
# Get current status
|
||||
current_status = get_deployment_status(environment)
|
||||
|
||||
if service:
|
||||
if service not in current_status['services']:
|
||||
click.echo(f"❌ Service '{service}' not found")
|
||||
return
|
||||
services_to_restart = [service]
|
||||
else:
|
||||
services_to_restart = list(current_status['services'].keys())
|
||||
|
||||
click.echo(f"🔧 Services to restart: {', '.join(services_to_restart)}")
|
||||
|
||||
if not dry_run:
|
||||
# Restart services
|
||||
restart_result = restart_services(environment, services_to_restart)
|
||||
|
||||
if restart_result['success']:
|
||||
click.echo("✅ Services restarted successfully!")
|
||||
for svc in services_to_restart:
|
||||
click.echo(f" 🔄 {svc}: Restarted")
|
||||
else:
|
||||
click.echo(f"❌ Restart failed: {restart_result['error']}")
|
||||
else:
|
||||
click.echo("🔄 DRY RUN: Would restart specified services")
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Restart error: {str(e)}", err=True)
|
||||
|
||||
@production_deploy.command()
|
||||
@click.option('--environment', default='production', help='Target environment')
|
||||
@click.option('--test-type', default='smoke', help='Test type (smoke, load, security)')
|
||||
@click.option('--timeout', type=int, default=300, help='Test timeout in seconds')
|
||||
def test(environment, test_type, timeout):
|
||||
"""Run production tests"""
|
||||
try:
|
||||
click.echo(f"🧪 Running {test_type} tests in {environment}")
|
||||
click.echo(f"⏱️ Timeout: {timeout} seconds")
|
||||
|
||||
# Run tests
|
||||
test_result = run_production_tests(environment, test_type, timeout)
|
||||
|
||||
if test_result['success']:
|
||||
click.echo("✅ All tests passed!")
|
||||
click.echo(f"📊 Test Results:")
|
||||
click.echo(f" 🧪 Test Type: {test_type}")
|
||||
click.echo(f" ⏱️ Duration: {test_result['duration']} seconds")
|
||||
click.echo(f" ✅ Passed: {test_result['passed']}")
|
||||
click.echo(f" ❌ Failed: {test_result['failed']}")
|
||||
else:
|
||||
click.echo("❌ Tests failed!")
|
||||
click.echo(f"📊 Test Results:")
|
||||
click.echo(f" 🧪 Test Type: {test_type}")
|
||||
click.echo(f" ⏱️ Duration: {test_result['duration']} seconds")
|
||||
click.echo(f" ✅ Passed: {test_result['passed']}")
|
||||
click.echo(f" ❌ Failed: {test_result['failed']}")
|
||||
|
||||
if test_result.get('failures'):
|
||||
click.echo("")
|
||||
click.echo("❌ Failed Tests:")
|
||||
for failure in test_result['failures']:
|
||||
click.echo(f" ❌ {failure['test']}: {failure['error']}")
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Test error: {str(e)}", err=True)
|
||||
|
||||
@production_deploy.command()
|
||||
@click.option('--environment', default='production', help='Target environment')
|
||||
@click.option('--days', type=int, default=7, help='Number of days to include in report')
|
||||
def report(environment, days):
|
||||
"""Generate production deployment report"""
|
||||
try:
|
||||
click.echo(f"📊 Production Deployment Report for {environment}")
|
||||
click.echo(f"📅 Last {days} days")
|
||||
click.echo("=" * 60)
|
||||
|
||||
# Get report data
|
||||
report_data = generate_deployment_report(environment, days)
|
||||
|
||||
# Overview
|
||||
overview = report_data['overview']
|
||||
click.echo("📈 Overview:")
|
||||
click.echo(f" 🚀 Total Deployments: {overview['total_deployments']}")
|
||||
click.echo(f" ✅ Successful: {overview['successful_deployments']}")
|
||||
click.echo(f" ❌ Failed: {overview['failed_deployments']}")
|
||||
click.echo(f" 📊 Success Rate: {overview['success_rate']:.1f}%")
|
||||
click.echo(f" ⏱️ Avg Deployment Time: {overview['avg_deployment_time']} minutes")
|
||||
click.echo("")
|
||||
|
||||
# Recent deployments
|
||||
click.echo("📜 Recent Deployments:")
|
||||
for deployment in report_data['recent_deployments']:
|
||||
status_icon = "✅" if deployment['status'] == 'success' else "❌"
|
||||
click.echo(f" {status_icon} {deployment['version']} - {deployment['deployed_at']}")
|
||||
click.echo(f" 📊 Status: {deployment['status']}")
|
||||
click.echo(f" ⏱️ Duration: {deployment['duration']} minutes")
|
||||
click.echo("")
|
||||
|
||||
# Service health
|
||||
click.echo("🔧 Service Health:")
|
||||
for service, health in report_data['service_health'].items():
|
||||
health_icon = "✅" if health['healthy'] else "❌"
|
||||
uptime = health.get('uptime_percentage', 0)
|
||||
click.echo(f" {health_icon} {service}: {uptime:.1f}% uptime")
|
||||
click.echo("")
|
||||
|
||||
# Performance metrics
|
||||
if report_data.get('performance_metrics'):
|
||||
click.echo("📈 Performance Metrics:")
|
||||
perf = report_data['performance_metrics']
|
||||
click.echo(f" 💻 Avg CPU Usage: {perf['avg_cpu_usage']:.1f}%")
|
||||
click.echo(f" 🧠 Avg Memory Usage: {perf['avg_memory_usage']:.1f}%")
|
||||
click.echo(f" 📥 Avg Requests/sec: {perf['avg_requests_per_second']}")
|
||||
click.echo(f" ⚡ Avg Response Time: {perf['avg_response_time']:.1f}ms")
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Report generation error: {str(e)}", err=True)
|
||||
|
||||
# Helper functions
|
||||
def run_pre_deployment_checks(environment, dry_run):
|
||||
"""Run pre-deployment checks"""
|
||||
if dry_run:
|
||||
return {
|
||||
"tests": True,
|
||||
"infrastructure": True,
|
||||
"services": True,
|
||||
"security": True
|
||||
}
|
||||
|
||||
# In production, these would be actual checks
|
||||
checks = {
|
||||
"tests": True,
|
||||
"infrastructure": True,
|
||||
"services": True,
|
||||
"security": True
|
||||
}
|
||||
|
||||
return checks
|
||||
|
||||
def create_backup(environment):
|
||||
"""Create backup of current deployment"""
|
||||
backup_id = f"backup_{environment}_{int(datetime.utcnow().timestamp())}"
|
||||
return {
|
||||
"backup_id": backup_id,
|
||||
"created_at": datetime.utcnow().isoformat(),
|
||||
"status": "completed"
|
||||
}
|
||||
|
||||
def build_production_images(version, dry_run):
|
||||
"""Build production images"""
|
||||
if dry_run:
|
||||
return {"success": True}
|
||||
|
||||
try:
|
||||
# Simulate build process
|
||||
return {"success": True}
|
||||
except Exception as e:
|
||||
return {"success": False, "error": str(e)}
|
||||
|
||||
def deploy_services(environment, version, region, dry_run):
|
||||
"""Deploy services"""
|
||||
if dry_run:
|
||||
return {"success": True}
|
||||
|
||||
try:
|
||||
# Simulate deployment
|
||||
return {"success": True}
|
||||
except Exception as e:
|
||||
return {"success": False, "error": str(e)}
|
||||
|
||||
def run_post_deployment_tests(environment, dry_run):
|
||||
"""Run post-deployment tests"""
|
||||
if dry_run:
|
||||
return {"success": True}
|
||||
|
||||
try:
|
||||
# Simulate tests
|
||||
return {"success": True}
|
||||
except Exception as e:
|
||||
return {"success": False, "error": str(e)}
|
||||
|
||||
def rollback_deployment(environment, backup_id):
|
||||
"""Rollback deployment"""
|
||||
return {
|
||||
"status": "completed",
|
||||
"backup_id": backup_id,
|
||||
"rolled_back_at": datetime.utcnow().isoformat()
|
||||
}
|
||||
|
||||
def get_current_deployment_info(environment):
|
||||
"""Get current deployment info"""
|
||||
return {
|
||||
"version": "1.0.0",
|
||||
"deployed_at": "2024-03-01T10:30:00Z",
|
||||
"environment": environment
|
||||
}
|
||||
|
||||
def get_backup_info(backup_id):
|
||||
"""Get backup info"""
|
||||
return {
|
||||
"backup_id": backup_id,
|
||||
"version": "0.9.0",
|
||||
"created_at": "2024-02-28T15:45:00Z"
|
||||
}
|
||||
|
||||
def get_latest_backup(environment):
|
||||
"""Get latest backup"""
|
||||
return {
|
||||
"backup_id": f"backup_{environment}_latest",
|
||||
"version": "0.9.0",
|
||||
"created_at": "2024-02-28T15:45:00Z"
|
||||
}
|
||||
|
||||
def get_deployment_history(environment, limit):
|
||||
"""Get deployment history"""
|
||||
return [
|
||||
{
|
||||
"version": "1.0.0",
|
||||
"deployed_at": "2024-03-01T10:30:00Z",
|
||||
"status": "success",
|
||||
"region": "us-east-1",
|
||||
"duration": 15,
|
||||
"deployed_by": "ci-cd"
|
||||
},
|
||||
{
|
||||
"version": "0.9.0",
|
||||
"deployed_at": "2024-02-28T15:45:00Z",
|
||||
"status": "success",
|
||||
"region": "us-east-1",
|
||||
"duration": 12,
|
||||
"deployed_by": "ci-cd"
|
||||
}
|
||||
]
|
||||
|
||||
def get_deployment_status(environment):
|
||||
"""Get deployment status"""
|
||||
return {
|
||||
"version": "1.0.0",
|
||||
"region": "us-east-1",
|
||||
"status": "healthy",
|
||||
"deployed_at": "2024-03-01T10:30:00Z",
|
||||
"uptime": "2 days, 5 hours",
|
||||
"services": {
|
||||
"coordinator-api": {
|
||||
"status": "running",
|
||||
"healthy": True,
|
||||
"replicas": {"ready": 3, "total": 3}
|
||||
},
|
||||
"exchange-integration": {
|
||||
"status": "running",
|
||||
"healthy": True,
|
||||
"replicas": {"ready": 2, "total": 2}
|
||||
},
|
||||
"trading-engine": {
|
||||
"status": "running",
|
||||
"healthy": True,
|
||||
"replicas": {"ready": 3, "total": 3}
|
||||
}
|
||||
},
|
||||
"performance": {
|
||||
"cpu_usage": 45.2,
|
||||
"memory_usage": 62.8,
|
||||
"requests_per_second": 1250,
|
||||
"avg_response_time": 85.3
|
||||
}
|
||||
}
|
||||
|
||||
def restart_services(environment, services):
|
||||
"""Restart services"""
|
||||
return {
|
||||
"success": True,
|
||||
"restarted_services": services,
|
||||
"restarted_at": datetime.utcnow().isoformat()
|
||||
}
|
||||
|
||||
def run_production_tests(environment, test_type, timeout):
|
||||
"""Run production tests"""
|
||||
return {
|
||||
"success": True,
|
||||
"duration": 45,
|
||||
"passed": 10,
|
||||
"failed": 0,
|
||||
"failures": []
|
||||
}
|
||||
|
||||
def generate_deployment_report(environment, days):
|
||||
"""Generate deployment report"""
|
||||
return {
|
||||
"overview": {
|
||||
"total_deployments": 5,
|
||||
"successful_deployments": 4,
|
||||
"failed_deployments": 1,
|
||||
"success_rate": 80.0,
|
||||
"avg_deployment_time": 13.5
|
||||
},
|
||||
"recent_deployments": [
|
||||
{
|
||||
"version": "1.0.0",
|
||||
"deployed_at": "2024-03-01T10:30:00Z",
|
||||
"status": "success",
|
||||
"duration": 15
|
||||
},
|
||||
{
|
||||
"version": "0.9.0",
|
||||
"deployed_at": "2024-02-28T15:45:00Z",
|
||||
"status": "success",
|
||||
"duration": 12
|
||||
}
|
||||
],
|
||||
"service_health": {
|
||||
"coordinator-api": {"healthy": True, "uptime_percentage": 99.9},
|
||||
"exchange-integration": {"healthy": True, "uptime_percentage": 99.8},
|
||||
"trading-engine": {"healthy": True, "uptime_percentage": 99.7}
|
||||
},
|
||||
"performance_metrics": {
|
||||
"avg_cpu_usage": 45.2,
|
||||
"avg_memory_usage": 62.8,
|
||||
"avg_requests_per_second": 1250,
|
||||
"avg_response_time": 85.3
|
||||
}
|
||||
}
|
||||
|
||||
if __name__ == "__main__":
|
||||
production_deploy()
|
||||
483
cli/commands/regulatory.py
Executable file
483
cli/commands/regulatory.py
Executable file
@@ -0,0 +1,483 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Regulatory Reporting CLI Commands
|
||||
Generate and manage regulatory compliance reports
|
||||
"""
|
||||
|
||||
import click
|
||||
import asyncio
|
||||
import json
|
||||
from typing import Optional, List, Dict, Any
|
||||
from datetime import datetime, timedelta
|
||||
from imports import ensure_coordinator_api_imports
|
||||
|
||||
ensure_coordinator_api_imports()
|
||||
|
||||
try:
|
||||
from app.services.regulatory_reporting import (
|
||||
generate_sar, generate_compliance_summary, list_reports,
|
||||
regulatory_reporter, ReportType, ReportStatus, RegulatoryBody
|
||||
)
|
||||
_import_error = None
|
||||
except ImportError as e:
|
||||
_import_error = e
|
||||
|
||||
def _missing(*args, **kwargs):
|
||||
raise ImportError(
|
||||
f"Required service module 'app.services.regulatory_reporting' could not be imported: {_import_error}. "
|
||||
"Ensure coordinator-api dependencies are installed and the source directory is accessible."
|
||||
)
|
||||
generate_sar = generate_compliance_summary = list_reports = regulatory_reporter = _missing
|
||||
|
||||
class ReportType:
|
||||
pass
|
||||
class ReportStatus:
|
||||
pass
|
||||
class RegulatoryBody:
|
||||
pass
|
||||
|
||||
@click.group()
|
||||
def regulatory():
|
||||
"""Regulatory reporting and compliance management commands"""
|
||||
pass
|
||||
|
||||
@regulatory.command()
|
||||
@click.option("--user-id", required=True, help="User ID for suspicious activity")
|
||||
@click.option("--activity-type", required=True, help="Type of suspicious activity")
|
||||
@click.option("--amount", type=float, required=True, help="Amount involved in USD")
|
||||
@click.option("--description", required=True, help="Description of suspicious activity")
|
||||
@click.option("--risk-score", type=float, default=0.5, help="Risk score (0.0-1.0)")
|
||||
@click.option("--currency", default="USD", help="Currency code")
|
||||
@click.pass_context
|
||||
def generate_sar(ctx, user_id: str, activity_type: str, amount: float, description: str, risk_score: float, currency: str):
|
||||
"""Generate Suspicious Activity Report (SAR)"""
|
||||
try:
|
||||
click.echo(f"🔍 Generating Suspicious Activity Report...")
|
||||
click.echo(f"👤 User ID: {user_id}")
|
||||
click.echo(f"📊 Activity Type: {activity_type}")
|
||||
click.echo(f"💰 Amount: ${amount:,.2f} {currency}")
|
||||
click.echo(f"⚠️ Risk Score: {risk_score:.2f}")
|
||||
|
||||
# Create suspicious activity data
|
||||
activity = {
|
||||
"id": f"sar_{user_id}_{int(datetime.now().timestamp())}",
|
||||
"timestamp": datetime.now().isoformat(),
|
||||
"user_id": user_id,
|
||||
"type": activity_type,
|
||||
"description": description,
|
||||
"amount": amount,
|
||||
"currency": currency,
|
||||
"risk_score": risk_score,
|
||||
"indicators": [activity_type, "high_risk"],
|
||||
"evidence": {"cli_generated": True}
|
||||
}
|
||||
|
||||
# Generate SAR
|
||||
result = asyncio.run(generate_sar([activity]))
|
||||
|
||||
click.echo(f"\n✅ SAR Report Generated Successfully!")
|
||||
click.echo(f"📋 Report ID: {result['report_id']}")
|
||||
click.echo(f"📄 Report Type: {result['report_type'].upper()}")
|
||||
click.echo(f"📊 Status: {result['status'].title()}")
|
||||
click.echo(f"📅 Generated: {result['generated_at']}")
|
||||
|
||||
# Show next steps
|
||||
click.echo(f"\n📝 Next Steps:")
|
||||
click.echo(f" 1. Review the generated report")
|
||||
click.echo(f" 2. Submit to regulatory body when ready")
|
||||
click.echo(f" 3. Maintain records for 5 years (BSA requirement)")
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ SAR generation failed: {e}", err=True)
|
||||
|
||||
@regulatory.command()
|
||||
@click.option("--period-start", required=True, help="Start date (YYYY-MM-DD)")
|
||||
@click.option("--period-end", required=True, help="End date (YYYY-MM-DD)")
|
||||
@click.pass_context
|
||||
def compliance_summary(ctx, period_start: str, period_end: str):
|
||||
"""Generate comprehensive compliance summary report"""
|
||||
try:
|
||||
# Parse dates
|
||||
start_date = datetime.strptime(period_start, "%Y-%m-%d")
|
||||
end_date = datetime.strptime(period_end, "%Y-%m-%d")
|
||||
|
||||
click.echo(f"📊 Generating Compliance Summary...")
|
||||
click.echo(f"📅 Period: {period_start} to {period_end}")
|
||||
click.echo(f"📈 Duration: {(end_date - start_date).days} days")
|
||||
|
||||
# Generate compliance summary
|
||||
result = asyncio.run(generate_compliance_summary(
|
||||
start_date.isoformat(),
|
||||
end_date.isoformat()
|
||||
))
|
||||
|
||||
click.echo(f"\n✅ Compliance Summary Generated!")
|
||||
click.echo(f"📋 Report ID: {result['report_id']}")
|
||||
click.echo(f"📊 Overall Compliance Score: {result['overall_score']:.1%}")
|
||||
click.echo(f"📅 Generated: {result['generated_at']}")
|
||||
|
||||
# Get detailed report content
|
||||
report = regulatory_reporter._find_report(result['report_id'])
|
||||
if report:
|
||||
content = report.content
|
||||
|
||||
click.echo(f"\n📈 Executive Summary:")
|
||||
exec_summary = content.get('executive_summary', {})
|
||||
click.echo(f" Critical Issues: {exec_summary.get('critical_issues', 0)}")
|
||||
click.echo(f" Regulatory Filings: {exec_summary.get('regulatory_filings', 0)}")
|
||||
|
||||
click.echo(f"\n👥 KYC Compliance:")
|
||||
kyc = content.get('kyc_compliance', {})
|
||||
click.echo(f" Total Customers: {kyc.get('total_customers', 0):,}")
|
||||
click.echo(f" Verified Customers: {kyc.get('verified_customers', 0):,}")
|
||||
click.echo(f" Completion Rate: {kyc.get('completion_rate', 0):.1%}")
|
||||
|
||||
click.echo(f"\n🔍 AML Compliance:")
|
||||
aml = content.get('aml_compliance', {})
|
||||
click.echo(f" Transaction Monitoring: {'✅ Active' if aml.get('transaction_monitoring') else '❌ Inactive'}")
|
||||
click.echo(f" SARs Filed: {aml.get('suspicious_activity_reports', 0)}")
|
||||
click.echo(f" CTRs Filed: {aml.get('currency_transaction_reports', 0)}")
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Compliance summary generation failed: {e}", err=True)
|
||||
|
||||
@regulatory.command()
|
||||
@click.option("--report-type", type=click.Choice(['sar', 'ctr', 'aml_report', 'compliance_summary']), help="Filter by report type")
|
||||
@click.option("--status", type=click.Choice(['draft', 'pending_review', 'submitted', 'accepted', 'rejected']), help="Filter by status")
|
||||
@click.option("--limit", type=int, default=20, help="Maximum number of reports to show")
|
||||
@click.pass_context
|
||||
def list(ctx, report_type: str, status: str, limit: int):
|
||||
"""List regulatory reports"""
|
||||
try:
|
||||
click.echo(f"📋 Regulatory Reports")
|
||||
|
||||
reports = list_reports(report_type, status)
|
||||
|
||||
if not reports:
|
||||
click.echo(f"✅ No reports found")
|
||||
return
|
||||
|
||||
click.echo(f"\n📊 Total Reports: {len(reports)}")
|
||||
|
||||
if report_type:
|
||||
click.echo(f"🔍 Filtered by type: {report_type.upper()}")
|
||||
|
||||
if status:
|
||||
click.echo(f"🔍 Filtered by status: {status.title()}")
|
||||
|
||||
# Display reports
|
||||
for i, report in enumerate(reports[:limit]):
|
||||
status_icon = {
|
||||
"draft": "📝",
|
||||
"pending_review": "⏳",
|
||||
"submitted": "📤",
|
||||
"accepted": "✅",
|
||||
"rejected": "❌"
|
||||
}.get(report['status'], "❓")
|
||||
|
||||
click.echo(f"\n{status_icon} Report #{i+1}")
|
||||
click.echo(f" ID: {report['report_id']}")
|
||||
click.echo(f" Type: {report['report_type'].upper()}")
|
||||
click.echo(f" Body: {report['regulatory_body'].upper()}")
|
||||
click.echo(f" Status: {report['status'].title()}")
|
||||
click.echo(f" Generated: {report['generated_at'][:19]}")
|
||||
|
||||
if len(reports) > limit:
|
||||
click.echo(f"\n... and {len(reports) - limit} more reports")
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Failed to list reports: {e}", err=True)
|
||||
|
||||
@regulatory.command()
|
||||
@click.option("--report-id", required=True, help="Report ID to export")
|
||||
@click.option("--format", type=click.Choice(['json', 'csv', 'xml']), default="json", help="Export format")
|
||||
@click.option("--output", help="Output file path (default: stdout)")
|
||||
@click.pass_context
|
||||
def export(ctx, report_id: str, format: str, output: str):
|
||||
"""Export regulatory report"""
|
||||
try:
|
||||
click.echo(f"📤 Exporting Report: {report_id}")
|
||||
click.echo(f"📄 Format: {format.upper()}")
|
||||
|
||||
# Export report
|
||||
content = regulatory_reporter.export_report(report_id, format)
|
||||
|
||||
if output:
|
||||
with open(output, 'w') as f:
|
||||
f.write(content)
|
||||
click.echo(f"✅ Report exported to: {output}")
|
||||
else:
|
||||
click.echo(f"\n📄 Report Content:")
|
||||
click.echo("=" * 60)
|
||||
click.echo(content)
|
||||
click.echo("=" * 60)
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Export failed: {e}", err=True)
|
||||
|
||||
@regulatory.command()
|
||||
@click.option("--report-id", required=True, help="Report ID to submit")
|
||||
@click.pass_context
|
||||
def submit(ctx, report_id: str):
|
||||
"""Submit report to regulatory body"""
|
||||
try:
|
||||
click.echo(f"📤 Submitting Report: {report_id}")
|
||||
|
||||
# Get report details
|
||||
report = regulatory_reporter._find_report(report_id)
|
||||
if not report:
|
||||
click.echo(f"❌ Report {report_id} not found")
|
||||
return
|
||||
|
||||
click.echo(f"📄 Type: {report.report_type.value.upper()}")
|
||||
click.echo(f"🏢 Regulatory Body: {report.regulatory_body.value.upper()}")
|
||||
click.echo(f"📊 Current Status: {report.status.value.title()}")
|
||||
|
||||
if report.status != ReportStatus.DRAFT:
|
||||
click.echo(f"⚠️ Report already submitted")
|
||||
return
|
||||
|
||||
# Submit report
|
||||
success = asyncio.run(regulatory_reporter.submit_report(report_id))
|
||||
|
||||
if success:
|
||||
click.echo(f"✅ Report submitted successfully!")
|
||||
click.echo(f"📅 Submitted: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
|
||||
click.echo(f"🏢 Submitted to: {report.regulatory_body.value.upper()}")
|
||||
|
||||
# Show submission details
|
||||
click.echo(f"\n📋 Submission Details:")
|
||||
click.echo(f" Report ID: {report_id}")
|
||||
click.echo(f" Regulatory Body: {report.regulatory_body.value}")
|
||||
click.echo(f" Submission Method: Electronic Filing")
|
||||
click.echo(f" Confirmation: Pending")
|
||||
else:
|
||||
click.echo(f"❌ Report submission failed")
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Submission failed: {e}", err=True)
|
||||
|
||||
@regulatory.command()
|
||||
@click.option("--report-id", required=True, help="Report ID to check")
|
||||
@click.pass_context
|
||||
def status(ctx, report_id: str):
|
||||
"""Check report status"""
|
||||
try:
|
||||
click.echo(f"📊 Report Status: {report_id}")
|
||||
|
||||
report_status = regulatory_reporter.get_report_status(report_id)
|
||||
|
||||
if not report_status:
|
||||
click.echo(f"❌ Report {report_id} not found")
|
||||
return
|
||||
|
||||
status_icon = {
|
||||
"draft": "📝",
|
||||
"pending_review": "⏳",
|
||||
"submitted": "📤",
|
||||
"accepted": "✅",
|
||||
"rejected": "❌"
|
||||
}.get(report_status['status'], "❓")
|
||||
|
||||
click.echo(f"\n{status_icon} Report Details:")
|
||||
click.echo(f" ID: {report_status['report_id']}")
|
||||
click.echo(f" Type: {report_status['report_type'].upper()}")
|
||||
click.echo(f" Body: {report_status['regulatory_body'].upper()}")
|
||||
click.echo(f" Status: {report_status['status'].title()}")
|
||||
click.echo(f" Generated: {report_status['generated_at'][:19]}")
|
||||
|
||||
if report_status['submitted_at']:
|
||||
click.echo(f" Submitted: {report_status['submitted_at'][:19]}")
|
||||
|
||||
if report_status['expires_at']:
|
||||
click.echo(f" Expires: {report_status['expires_at'][:19]}")
|
||||
|
||||
# Show next actions based on status
|
||||
click.echo(f"\n📝 Next Actions:")
|
||||
if report_status['status'] == 'draft':
|
||||
click.echo(f" • Review and edit report content")
|
||||
click.echo(f" • Submit to regulatory body when ready")
|
||||
elif report_status['status'] == 'submitted':
|
||||
click.echo(f" • Wait for regulatory body response")
|
||||
click.echo(f" • Monitor submission status")
|
||||
elif report_status['status'] == 'accepted':
|
||||
click.echo(f" • Store confirmation records")
|
||||
click.echo(f" • Update compliance documentation")
|
||||
elif report_status['status'] == 'rejected':
|
||||
click.echo(f" • Review rejection reasons")
|
||||
click.echo(f" • Resubmit corrected report")
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Status check failed: {e}", err=True)
|
||||
|
||||
@regulatory.command()
|
||||
@click.pass_context
|
||||
def overview(ctx):
|
||||
"""Show regulatory reporting overview"""
|
||||
try:
|
||||
click.echo(f"📊 Regulatory Reporting Overview")
|
||||
|
||||
all_reports = regulatory_reporter.reports
|
||||
|
||||
if not all_reports:
|
||||
click.echo(f"📝 No reports generated yet")
|
||||
return
|
||||
|
||||
# Statistics
|
||||
total_reports = len(all_reports)
|
||||
by_type = {}
|
||||
by_status = {}
|
||||
by_body = {}
|
||||
|
||||
for report in all_reports:
|
||||
# By type
|
||||
rt = report.report_type.value
|
||||
by_type[rt] = by_type.get(rt, 0) + 1
|
||||
|
||||
# By status
|
||||
st = report.status.value
|
||||
by_status[st] = by_status.get(st, 0) + 1
|
||||
|
||||
# By regulatory body
|
||||
rb = report.regulatory_body.value
|
||||
by_body[rb] = by_body.get(rb, 0) + 1
|
||||
|
||||
click.echo(f"\n📈 Overall Statistics:")
|
||||
click.echo(f" Total Reports: {total_reports}")
|
||||
click.echo(f" Report Types: {len(by_type)}")
|
||||
click.echo(f" Regulatory Bodies: {len(by_body)}")
|
||||
|
||||
click.echo(f"\n📋 Reports by Type:")
|
||||
for report_type, count in sorted(by_type.items()):
|
||||
click.echo(f" {report_type.upper()}: {count}")
|
||||
|
||||
click.echo(f"\n📊 Reports by Status:")
|
||||
status_icons = {"draft": "📝", "pending_review": "⏳", "submitted": "📤", "accepted": "✅", "rejected": "❌"}
|
||||
for status, count in sorted(by_status.items()):
|
||||
icon = status_icons.get(status, "❓")
|
||||
click.echo(f" {icon} {status.title()}: {count}")
|
||||
|
||||
click.echo(f"\n🏢 Reports by Regulatory Body:")
|
||||
for body, count in sorted(by_body.items()):
|
||||
click.echo(f" {body.upper()}: {count}")
|
||||
|
||||
# Recent activity
|
||||
recent_reports = sorted(all_reports, key=lambda x: x.generated_at, reverse=True)[:5]
|
||||
click.echo(f"\n📅 Recent Activity:")
|
||||
for report in recent_reports:
|
||||
click.echo(f" {report.generated_at.strftime('%Y-%m-%d %H:%M')} - {report.report_type.value.upper()} ({report.status.value})")
|
||||
|
||||
# Compliance reminders
|
||||
click.echo(f"\n⚠️ Compliance Reminders:")
|
||||
click.echo(f" • SAR reports must be filed within 30 days of detection")
|
||||
click.echo(f" • CTR reports required for transactions over $10,000")
|
||||
click.echo(f" • Maintain records for minimum 5 years")
|
||||
click.echo(f" • Annual AML program review required")
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Overview failed: {e}", err=True)
|
||||
|
||||
@regulatory.command()
|
||||
@click.pass_context
|
||||
def templates(ctx):
|
||||
"""Show available report templates and requirements"""
|
||||
try:
|
||||
click.echo(f"📋 Regulatory Report Templates")
|
||||
|
||||
templates = regulatory_reporter.templates
|
||||
|
||||
for template_name, template_data in templates.items():
|
||||
click.echo(f"\n📄 {template_name.upper()}:")
|
||||
click.echo(f" Format: {template_data['format'].upper()}")
|
||||
click.echo(f" Schema: {template_data['schema']}")
|
||||
click.echo(f" Required Fields ({len(template_data['required_fields'])}):")
|
||||
|
||||
for field in template_data['required_fields']:
|
||||
click.echo(f" • {field}")
|
||||
|
||||
click.echo(f"\n🏢 Regulatory Bodies:")
|
||||
bodies = {
|
||||
"FINCEN": "Financial Crimes Enforcement Network (US Treasury)",
|
||||
"SEC": "Securities and Exchange Commission",
|
||||
"FINRA": "Financial Industry Regulatory Authority",
|
||||
"CFTC": "Commodity Futures Trading Commission",
|
||||
"OFAC": "Office of Foreign Assets Control",
|
||||
"EU_REGULATOR": "European Union Regulatory Authorities"
|
||||
}
|
||||
|
||||
for body, description in bodies.items():
|
||||
click.echo(f"\n🏛️ {body}:")
|
||||
click.echo(f" {description}")
|
||||
|
||||
click.echo(f"\n📝 Filing Requirements:")
|
||||
click.echo(f" • SAR: File within 30 days of suspicious activity detection")
|
||||
click.echo(f" • CTR: File for cash transactions over $10,000")
|
||||
click.echo(f" • AML Reports: Quarterly and annual requirements")
|
||||
click.echo(f" • Compliance Summary: Annual filing requirement")
|
||||
|
||||
click.echo(f"\n⏰ Filing Deadlines:")
|
||||
click.echo(f" • SAR: 30 days from detection")
|
||||
click.echo(f" • CTR: 15 days from transaction")
|
||||
click.echo(f" • Quarterly AML: Within 30 days of quarter end")
|
||||
click.echo(f" • Annual Report: Within 90 days of year end")
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Template display failed: {e}", err=True)
|
||||
|
||||
@regulatory.command()
|
||||
@click.option("--period-start", default="2026-01-01", help="Start date for test data (YYYY-MM-DD)")
|
||||
@click.option("--period-end", default="2026-01-31", help="End date for test data (YYYY-MM-DD)")
|
||||
@click.pass_context
|
||||
def test(ctx, period_start: str, period_end: str):
|
||||
"""Run regulatory reporting test with sample data"""
|
||||
try:
|
||||
click.echo(f"🧪 Running Regulatory Reporting Test...")
|
||||
click.echo(f"📅 Test Period: {period_start} to {period_end}")
|
||||
|
||||
# Test SAR generation
|
||||
click.echo(f"\n📋 Test 1: SAR Generation")
|
||||
result = asyncio.run(generate_sar([{
|
||||
"id": "test_sar_001",
|
||||
"timestamp": datetime.now().isoformat(),
|
||||
"user_id": "test_user_123",
|
||||
"type": "unusual_volume",
|
||||
"description": "Test suspicious activity for SAR generation",
|
||||
"amount": 25000,
|
||||
"currency": "USD",
|
||||
"risk_score": 0.75,
|
||||
"indicators": ["volume_spike", "timing_anomaly"],
|
||||
"evidence": {"test": True}
|
||||
}]))
|
||||
|
||||
click.echo(f" ✅ SAR Generated: {result['report_id']}")
|
||||
|
||||
# Test compliance summary
|
||||
click.echo(f"\n📊 Test 2: Compliance Summary")
|
||||
compliance_result = asyncio.run(generate_compliance_summary(period_start, period_end))
|
||||
click.echo(f" ✅ Compliance Summary: {compliance_result['report_id']}")
|
||||
click.echo(f" 📈 Overall Score: {compliance_result['overall_score']:.1%}")
|
||||
|
||||
# Test report listing
|
||||
click.echo(f"\n📋 Test 3: Report Listing")
|
||||
reports = list_reports()
|
||||
click.echo(f" ✅ Total Reports: {len(reports)}")
|
||||
|
||||
# Test export
|
||||
if reports:
|
||||
test_report_id = reports[0]['report_id']
|
||||
click.echo(f"\n📤 Test 4: Report Export")
|
||||
try:
|
||||
content = regulatory_reporter.export_report(test_report_id, "json")
|
||||
click.echo(f" ✅ Export successful: {len(content)} characters")
|
||||
except Exception as e:
|
||||
click.echo(f" ⚠️ Export test failed: {e}")
|
||||
|
||||
click.echo(f"\n🎉 Regulatory Reporting Test Complete!")
|
||||
click.echo(f"📊 All systems operational")
|
||||
click.echo(f"📝 Ready for production use")
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Test failed: {e}", err=True)
|
||||
|
||||
if __name__ == "__main__":
|
||||
regulatory()
|
||||
87
cli/commands/security_test.py
Executable file
87
cli/commands/security_test.py
Executable file
@@ -0,0 +1,87 @@
|
||||
"""
|
||||
Security Test CLI Commands for AITBC
|
||||
Commands for running security tests and vulnerability scans
|
||||
"""
|
||||
|
||||
import click
|
||||
import json
|
||||
import requests
|
||||
from datetime import datetime
|
||||
from typing import Dict, Any, List, Optional
|
||||
|
||||
@click.group()
|
||||
def security_test():
|
||||
"""Security testing commands"""
|
||||
pass
|
||||
|
||||
@security_test.command()
|
||||
@click.option('--test-type', default='basic', help='Test type (basic, advanced, penetration)')
|
||||
@click.option('--target', help='Target to test (cli, api, services)')
|
||||
@click.option('--test-mode', is_flag=True, help='Run in test mode')
|
||||
def run(test_type, target, test_mode):
|
||||
"""Run security tests"""
|
||||
try:
|
||||
click.echo(f"🔒 Running {test_type} security test")
|
||||
click.echo(f"🎯 Target: {target}")
|
||||
|
||||
if test_mode:
|
||||
click.echo("🔍 TEST MODE - Simulated security test")
|
||||
click.echo("✅ Test completed successfully")
|
||||
click.echo("📊 Results:")
|
||||
click.echo(" 🛡️ Security Score: 95/100")
|
||||
click.echo(" 🔍 Vulnerabilities Found: 2")
|
||||
click.echo(" ⚠️ Risk Level: Low")
|
||||
return
|
||||
|
||||
# Run actual security test
|
||||
if test_type == 'basic':
|
||||
result = run_basic_security_test(target)
|
||||
elif test_type == 'advanced':
|
||||
result = run_advanced_security_test(target)
|
||||
elif test_type == 'penetration':
|
||||
result = run_penetration_test(target)
|
||||
else:
|
||||
click.echo(f"❌ Unknown test type: {test_type}", err=True)
|
||||
return
|
||||
|
||||
if result['success']:
|
||||
click.echo("✅ Security test completed successfully!")
|
||||
click.echo("📊 Results:")
|
||||
click.echo(f" 🛡️ Security Score: {result['security_score']}/100")
|
||||
click.echo(f" 🔍 Vulnerabilities Found: {result['vulnerabilities']}")
|
||||
click.echo(f" ⚠️ Risk Level: {result['risk_level']}")
|
||||
else:
|
||||
click.echo(f"❌ Security test failed: {result['error']}", err=True)
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Security test error: {str(e)}", err=True)
|
||||
|
||||
def run_basic_security_test(target):
|
||||
"""Run basic security test"""
|
||||
return {
|
||||
"success": True,
|
||||
"security_score": 95,
|
||||
"vulnerabilities": 2,
|
||||
"risk_level": "Low"
|
||||
}
|
||||
|
||||
def run_advanced_security_test(target):
|
||||
"""Run advanced security test"""
|
||||
return {
|
||||
"success": True,
|
||||
"security_score": 88,
|
||||
"vulnerabilities": 5,
|
||||
"risk_level": "Medium"
|
||||
}
|
||||
|
||||
def run_penetration_test(target):
|
||||
"""Run penetration test"""
|
||||
return {
|
||||
"success": True,
|
||||
"security_score": 92,
|
||||
"vulnerabilities": 3,
|
||||
"risk_level": "Low"
|
||||
}
|
||||
|
||||
if __name__ == "__main__":
|
||||
security_test()
|
||||
476
cli/commands/simulate.py
Executable file
476
cli/commands/simulate.py
Executable file
@@ -0,0 +1,476 @@
|
||||
"""Simulation commands for AITBC CLI"""
|
||||
|
||||
import click
|
||||
import json
|
||||
import time
|
||||
import random
|
||||
from pathlib import Path
|
||||
from typing import Optional, List, Dict, Any
|
||||
from utils import output, error, success
|
||||
|
||||
|
||||
@click.group()
|
||||
def simulate():
|
||||
"""Run simulations and manage test users"""
|
||||
pass
|
||||
|
||||
|
||||
@simulate.command()
|
||||
@click.option(
|
||||
"--distribute",
|
||||
default="10000,1000",
|
||||
help="Initial distribution: client_amount,miner_amount",
|
||||
)
|
||||
@click.option("--reset", is_flag=True, help="Reset existing simulation")
|
||||
@click.pass_context
|
||||
def init(ctx, distribute: str, reset: bool):
|
||||
"""Initialize test economy"""
|
||||
home_dir = Path("/home/oib/windsurf/aitbc/tests/e2e/fixtures/home")
|
||||
|
||||
if reset:
|
||||
success("Resetting simulation...")
|
||||
# Reset wallet files
|
||||
for wallet_file in ["client_wallet.json", "miner_wallet.json"]:
|
||||
wallet_path = home_dir / wallet_file
|
||||
if wallet_path.exists():
|
||||
wallet_path.unlink()
|
||||
|
||||
# Parse distribution
|
||||
try:
|
||||
client_amount, miner_amount = map(float, distribute.split(","))
|
||||
except (ValueError, TypeError):
|
||||
error("Invalid distribution format. Use: client_amount,miner_amount")
|
||||
return
|
||||
|
||||
# Initialize genesis wallet
|
||||
genesis_path = home_dir / "genesis_wallet.json"
|
||||
if not genesis_path.exists():
|
||||
genesis_wallet = {
|
||||
"address": "aitbc1genesis",
|
||||
"balance": 1000000,
|
||||
"transactions": [],
|
||||
}
|
||||
with open(genesis_path, "w") as f:
|
||||
json.dump(genesis_wallet, f, indent=2)
|
||||
success("Genesis wallet created")
|
||||
|
||||
# Initialize client wallet
|
||||
client_path = home_dir / "client_wallet.json"
|
||||
if not client_path.exists():
|
||||
client_wallet = {
|
||||
"address": "aitbc1client",
|
||||
"balance": client_amount,
|
||||
"transactions": [
|
||||
{
|
||||
"type": "receive",
|
||||
"amount": client_amount,
|
||||
"from": "aitbc1genesis",
|
||||
"timestamp": time.time(),
|
||||
}
|
||||
],
|
||||
}
|
||||
with open(client_path, "w") as f:
|
||||
json.dump(client_wallet, f, indent=2)
|
||||
success(f"Client wallet initialized with {client_amount} AITBC")
|
||||
|
||||
# Initialize miner wallet
|
||||
miner_path = home_dir / "miner_wallet.json"
|
||||
if not miner_path.exists():
|
||||
miner_wallet = {
|
||||
"address": "aitbc1miner",
|
||||
"balance": miner_amount,
|
||||
"transactions": [
|
||||
{
|
||||
"type": "receive",
|
||||
"amount": miner_amount,
|
||||
"from": "aitbc1genesis",
|
||||
"timestamp": time.time(),
|
||||
}
|
||||
],
|
||||
}
|
||||
with open(miner_path, "w") as f:
|
||||
json.dump(miner_wallet, f, indent=2)
|
||||
success(f"Miner wallet initialized with {miner_amount} AITBC")
|
||||
|
||||
output(
|
||||
{
|
||||
"status": "initialized",
|
||||
"distribution": {"client": client_amount, "miner": miner_amount},
|
||||
"total_supply": client_amount + miner_amount,
|
||||
},
|
||||
ctx.obj["output_format"],
|
||||
)
|
||||
|
||||
|
||||
@simulate.group()
|
||||
def user():
|
||||
"""Manage test users"""
|
||||
pass
|
||||
|
||||
|
||||
@user.command()
|
||||
@click.option("--type", type=click.Choice(["client", "miner"]), required=True)
|
||||
@click.option("--name", required=True, help="User name")
|
||||
@click.option("--balance", type=float, default=100, help="Initial balance")
|
||||
@click.pass_context
|
||||
def create(ctx, type: str, name: str, balance: float):
|
||||
"""Create a test user"""
|
||||
home_dir = Path("/home/oib/windsurf/aitbc/tests/e2e/fixtures/home")
|
||||
|
||||
user_id = f"{type}_{name}"
|
||||
wallet_path = home_dir / f"{user_id}_wallet.json"
|
||||
|
||||
if wallet_path.exists():
|
||||
error(f"User {name} already exists")
|
||||
return
|
||||
|
||||
wallet = {
|
||||
"address": f"aitbc1{user_id}",
|
||||
"balance": balance,
|
||||
"transactions": [
|
||||
{
|
||||
"type": "receive",
|
||||
"amount": balance,
|
||||
"from": "aitbc1genesis",
|
||||
"timestamp": time.time(),
|
||||
}
|
||||
],
|
||||
}
|
||||
|
||||
with open(wallet_path, "w") as f:
|
||||
json.dump(wallet, f, indent=2)
|
||||
|
||||
success(f"Created {type} user: {name}")
|
||||
output(
|
||||
{"user_id": user_id, "address": wallet["address"], "balance": balance},
|
||||
ctx.obj["output_format"],
|
||||
)
|
||||
|
||||
|
||||
@user.command()
|
||||
@click.pass_context
|
||||
def list(ctx):
|
||||
"""List all test users"""
|
||||
home_dir = Path("/home/oib/windsurf/aitbc/tests/e2e/fixtures/home")
|
||||
|
||||
users = []
|
||||
for wallet_file in home_dir.glob("*_wallet.json"):
|
||||
if wallet_file.name in ["genesis_wallet.json"]:
|
||||
continue
|
||||
|
||||
with open(wallet_file) as f:
|
||||
wallet = json.load(f)
|
||||
|
||||
user_type = "client" if "client" in wallet_file.name else "miner"
|
||||
user_name = wallet_file.stem.replace("_wallet", "").replace(f"{user_type}_", "")
|
||||
|
||||
users.append(
|
||||
{
|
||||
"name": user_name,
|
||||
"type": user_type,
|
||||
"address": wallet["address"],
|
||||
"balance": wallet["balance"],
|
||||
}
|
||||
)
|
||||
|
||||
output({"users": users}, ctx.obj["output_format"])
|
||||
|
||||
|
||||
@user.command()
|
||||
@click.argument("user")
|
||||
@click.pass_context
|
||||
def balance(ctx, user: str):
|
||||
"""Check user balance"""
|
||||
home_dir = Path("/home/oib/windsurf/aitbc/tests/e2e/fixtures/home")
|
||||
wallet_path = home_dir / f"{user}_wallet.json"
|
||||
|
||||
if not wallet_path.exists():
|
||||
error(f"User {user} not found")
|
||||
return
|
||||
|
||||
with open(wallet_path) as f:
|
||||
wallet = json.load(f)
|
||||
|
||||
output(
|
||||
{"user": user, "address": wallet["address"], "balance": wallet["balance"]},
|
||||
ctx.obj["output_format"],
|
||||
)
|
||||
|
||||
|
||||
@user.command()
|
||||
@click.argument("user")
|
||||
@click.argument("amount", type=float)
|
||||
@click.pass_context
|
||||
def fund(ctx, user: str, amount: float):
|
||||
"""Fund a test user"""
|
||||
home_dir = Path("/home/oib/windsurf/aitbc/tests/e2e/fixtures/home")
|
||||
|
||||
# Load genesis wallet
|
||||
genesis_path = home_dir / "genesis_wallet.json"
|
||||
with open(genesis_path) as f:
|
||||
genesis = json.load(f)
|
||||
|
||||
if genesis["balance"] < amount:
|
||||
error(f"Insufficient genesis balance: {genesis['balance']}")
|
||||
return
|
||||
|
||||
# Load user wallet
|
||||
wallet_path = home_dir / f"{user}_wallet.json"
|
||||
if not wallet_path.exists():
|
||||
error(f"User {user} not found")
|
||||
return
|
||||
|
||||
with open(wallet_path) as f:
|
||||
wallet = json.load(f)
|
||||
|
||||
# Transfer funds
|
||||
genesis["balance"] -= amount
|
||||
genesis["transactions"].append(
|
||||
{
|
||||
"type": "send",
|
||||
"amount": -amount,
|
||||
"to": wallet["address"],
|
||||
"timestamp": time.time(),
|
||||
}
|
||||
)
|
||||
|
||||
wallet["balance"] += amount
|
||||
wallet["transactions"].append(
|
||||
{
|
||||
"type": "receive",
|
||||
"amount": amount,
|
||||
"from": genesis["address"],
|
||||
"timestamp": time.time(),
|
||||
}
|
||||
)
|
||||
|
||||
# Save wallets
|
||||
with open(genesis_path, "w") as f:
|
||||
json.dump(genesis, f, indent=2)
|
||||
|
||||
with open(wallet_path, "w") as f:
|
||||
json.dump(wallet, f, indent=2)
|
||||
|
||||
success(f"Funded {user} with {amount} AITBC")
|
||||
output(
|
||||
{"user": user, "amount": amount, "new_balance": wallet["balance"]},
|
||||
ctx.obj["output_format"],
|
||||
)
|
||||
|
||||
|
||||
@simulate.command()
|
||||
@click.option("--jobs", type=int, default=5, help="Number of jobs to simulate")
|
||||
@click.option("--rounds", type=int, default=3, help="Number of rounds")
|
||||
@click.option(
|
||||
"--delay", type=float, default=1.0, help="Delay between operations (seconds)"
|
||||
)
|
||||
@click.pass_context
|
||||
def workflow(ctx, jobs: int, rounds: int, delay: float):
|
||||
"""Simulate complete workflow"""
|
||||
config = ctx.obj["config"]
|
||||
|
||||
success(f"Starting workflow simulation: {jobs} jobs x {rounds} rounds")
|
||||
|
||||
for round_num in range(1, rounds + 1):
|
||||
click.echo(f"\n--- Round {round_num} ---")
|
||||
|
||||
# Submit jobs
|
||||
submitted_jobs = []
|
||||
for i in range(jobs):
|
||||
prompt = f"Test job {i + 1} (round {round_num})"
|
||||
|
||||
# Simulate job submission
|
||||
job_id = f"job_{round_num}_{i + 1}_{int(time.time())}"
|
||||
submitted_jobs.append(job_id)
|
||||
|
||||
output(
|
||||
{
|
||||
"action": "submit_job",
|
||||
"job_id": job_id,
|
||||
"prompt": prompt,
|
||||
"round": round_num,
|
||||
},
|
||||
ctx.obj["output_format"],
|
||||
)
|
||||
|
||||
time.sleep(delay)
|
||||
|
||||
# Simulate job processing
|
||||
for job_id in submitted_jobs:
|
||||
# Simulate miner picking up job
|
||||
output(
|
||||
{
|
||||
"action": "job_assigned",
|
||||
"job_id": job_id,
|
||||
"miner": f"miner_{random.randint(1, 3)}",
|
||||
"status": "processing",
|
||||
},
|
||||
ctx.obj["output_format"],
|
||||
)
|
||||
|
||||
time.sleep(delay * 0.5)
|
||||
|
||||
# Simulate job completion
|
||||
earnings = random.uniform(1, 10)
|
||||
output(
|
||||
{
|
||||
"action": "job_completed",
|
||||
"job_id": job_id,
|
||||
"earnings": earnings,
|
||||
"status": "completed",
|
||||
},
|
||||
ctx.obj["output_format"],
|
||||
)
|
||||
|
||||
time.sleep(delay * 0.5)
|
||||
|
||||
output(
|
||||
{"status": "completed", "total_jobs": jobs * rounds, "rounds": rounds},
|
||||
ctx.obj["output_format"],
|
||||
)
|
||||
|
||||
|
||||
@simulate.command()
|
||||
@click.option("--clients", type=int, default=10, help="Number of clients")
|
||||
@click.option("--miners", type=int, default=3, help="Number of miners")
|
||||
@click.option("--duration", type=int, default=300, help="Test duration in seconds")
|
||||
@click.option("--job-rate", type=float, default=1.0, help="Jobs per second")
|
||||
@click.pass_context
|
||||
def load_test(ctx, clients: int, miners: int, duration: int, job_rate: float):
|
||||
"""Run load test"""
|
||||
start_time = time.time()
|
||||
end_time = start_time + duration
|
||||
job_interval = 1.0 / job_rate
|
||||
|
||||
success(f"Starting load test: {clients} clients, {miners} miners, {duration}s")
|
||||
|
||||
stats = {
|
||||
"jobs_submitted": 0,
|
||||
"jobs_completed": 0,
|
||||
"errors": 0,
|
||||
"start_time": start_time,
|
||||
}
|
||||
|
||||
while time.time() < end_time:
|
||||
# Submit jobs
|
||||
for client_id in range(clients):
|
||||
if time.time() >= end_time:
|
||||
break
|
||||
|
||||
job_id = f"load_test_{stats['jobs_submitted']}_{int(time.time())}"
|
||||
stats["jobs_submitted"] += 1
|
||||
|
||||
# Simulate random job completion
|
||||
if random.random() > 0.1: # 90% success rate
|
||||
stats["jobs_completed"] += 1
|
||||
else:
|
||||
stats["errors"] += 1
|
||||
|
||||
time.sleep(job_interval)
|
||||
|
||||
# Show progress
|
||||
elapsed = time.time() - start_time
|
||||
if elapsed % 30 < 1: # Every 30 seconds
|
||||
output(
|
||||
{
|
||||
"elapsed": elapsed,
|
||||
"jobs_submitted": stats["jobs_submitted"],
|
||||
"jobs_completed": stats["jobs_completed"],
|
||||
"errors": stats["errors"],
|
||||
"success_rate": stats["jobs_completed"]
|
||||
/ max(1, stats["jobs_submitted"])
|
||||
* 100,
|
||||
},
|
||||
ctx.obj["output_format"],
|
||||
)
|
||||
|
||||
# Final stats
|
||||
total_time = time.time() - start_time
|
||||
output(
|
||||
{
|
||||
"status": "completed",
|
||||
"duration": total_time,
|
||||
"jobs_submitted": stats["jobs_submitted"],
|
||||
"jobs_completed": stats["jobs_completed"],
|
||||
"errors": stats["errors"],
|
||||
"avg_jobs_per_second": stats["jobs_submitted"] / total_time,
|
||||
"success_rate": stats["jobs_completed"]
|
||||
/ max(1, stats["jobs_submitted"])
|
||||
* 100,
|
||||
},
|
||||
ctx.obj["output_format"],
|
||||
)
|
||||
|
||||
|
||||
@simulate.command()
|
||||
@click.option("--file", required=True, help="Scenario file path")
|
||||
@click.pass_context
|
||||
def scenario(ctx, file: str):
|
||||
"""Run predefined scenario"""
|
||||
scenario_path = Path(file)
|
||||
|
||||
if not scenario_path.exists():
|
||||
error(f"Scenario file not found: {file}")
|
||||
return
|
||||
|
||||
with open(scenario_path) as f:
|
||||
scenario = json.load(f)
|
||||
|
||||
success(f"Running scenario: {scenario.get('name', 'Unknown')}")
|
||||
|
||||
# Execute scenario steps
|
||||
for step in scenario.get("steps", []):
|
||||
step_type = step.get("type")
|
||||
step_name = step.get("name", "Unnamed step")
|
||||
|
||||
click.echo(f"\nExecuting: {step_name}")
|
||||
|
||||
if step_type == "submit_jobs":
|
||||
count = step.get("count", 1)
|
||||
for i in range(count):
|
||||
output(
|
||||
{
|
||||
"action": "submit_job",
|
||||
"step": step_name,
|
||||
"job_num": i + 1,
|
||||
"prompt": step.get("prompt", f"Scenario job {i + 1}"),
|
||||
},
|
||||
ctx.obj["output_format"],
|
||||
)
|
||||
|
||||
elif step_type == "wait":
|
||||
duration = step.get("duration", 1)
|
||||
time.sleep(duration)
|
||||
|
||||
elif step_type == "check_balance":
|
||||
user = step.get("user", "client")
|
||||
# Would check actual balance
|
||||
output({"action": "check_balance", "user": user}, ctx.obj["output_format"])
|
||||
|
||||
output(
|
||||
{"status": "completed", "scenario": scenario.get("name", "Unknown")},
|
||||
ctx.obj["output_format"],
|
||||
)
|
||||
|
||||
|
||||
@simulate.command()
|
||||
@click.argument("simulation_id")
|
||||
@click.pass_context
|
||||
def results(ctx, simulation_id: str):
|
||||
"""Show simulation results"""
|
||||
# In a real implementation, this would query stored results
|
||||
# For now, return mock data
|
||||
output(
|
||||
{
|
||||
"simulation_id": simulation_id,
|
||||
"status": "completed",
|
||||
"start_time": time.time() - 3600,
|
||||
"end_time": time.time(),
|
||||
"duration": 3600,
|
||||
"total_jobs": 50,
|
||||
"successful_jobs": 48,
|
||||
"failed_jobs": 2,
|
||||
"success_rate": 96.0,
|
||||
},
|
||||
ctx.obj["output_format"],
|
||||
)
|
||||
380
cli/commands/surveillance.py
Executable file
380
cli/commands/surveillance.py
Executable file
@@ -0,0 +1,380 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Trading Surveillance CLI Commands
|
||||
Monitor and detect market manipulation and suspicious trading activities
|
||||
"""
|
||||
|
||||
import click
|
||||
import asyncio
|
||||
import json
|
||||
from typing import Optional, List, Dict, Any
|
||||
from datetime import datetime, timedelta
|
||||
from imports import ensure_coordinator_api_imports
|
||||
|
||||
ensure_coordinator_api_imports()
|
||||
|
||||
try:
|
||||
from app.services.trading_surveillance import (
|
||||
start_surveillance, stop_surveillance, get_alerts,
|
||||
get_surveillance_summary, AlertLevel
|
||||
)
|
||||
_import_error = None
|
||||
except ImportError as e:
|
||||
_import_error = e
|
||||
|
||||
def _missing(*args, **kwargs):
|
||||
raise ImportError(
|
||||
f"Required service module 'app.services.trading_surveillance' could not be imported: {_import_error}. "
|
||||
"Ensure coordinator-api dependencies are installed and the source directory is accessible."
|
||||
)
|
||||
start_surveillance = stop_surveillance = get_alerts = get_surveillance_summary = _missing
|
||||
|
||||
class AlertLevel:
|
||||
"""Stub for AlertLevel when import fails."""
|
||||
pass
|
||||
|
||||
@click.group()
|
||||
def surveillance():
|
||||
"""Trading surveillance and market monitoring commands"""
|
||||
pass
|
||||
|
||||
@surveillance.command()
|
||||
@click.option("--symbols", required=True, help="Trading symbols to monitor (comma-separated)")
|
||||
@click.option("--duration", type=int, default=300, help="Monitoring duration in seconds")
|
||||
@click.pass_context
|
||||
def start(ctx, symbols: str, duration: int):
|
||||
"""Start trading surveillance monitoring"""
|
||||
try:
|
||||
symbol_list = [s.strip().upper() for s in symbols.split(",")]
|
||||
|
||||
click.echo(f"🔍 Starting trading surveillance...")
|
||||
click.echo(f"📊 Monitoring symbols: {', '.join(symbol_list)}")
|
||||
click.echo(f"⏱️ Duration: {duration} seconds")
|
||||
|
||||
async def run_monitoring():
|
||||
# Start monitoring
|
||||
await start_surveillance(symbol_list)
|
||||
|
||||
click.echo(f"✅ Surveillance started!")
|
||||
click.echo(f"🔍 Monitoring {len(symbol_list)} symbols for manipulation patterns")
|
||||
|
||||
if duration > 0:
|
||||
click.echo(f"⏱️ Will run for {duration} seconds...")
|
||||
|
||||
# Run for specified duration
|
||||
await asyncio.sleep(duration)
|
||||
|
||||
# Stop monitoring
|
||||
await stop_surveillance()
|
||||
click.echo(f"🔍 Surveillance stopped after {duration} seconds")
|
||||
|
||||
# Show results
|
||||
alerts = get_alerts()
|
||||
if alerts['total'] > 0:
|
||||
click.echo(f"\n🚨 Generated {alerts['total']} alerts during monitoring:")
|
||||
for alert in alerts['alerts'][:5]: # Show first 5
|
||||
level_icon = {"critical": "🔴", "high": "🟠", "medium": "🟡", "low": "🟢"}.get(alert['level'], "❓")
|
||||
click.echo(f" {level_icon} {alert['description'][:80]}...")
|
||||
else:
|
||||
click.echo(f"\n✅ No alerts generated during monitoring period")
|
||||
|
||||
# Run the async function
|
||||
asyncio.run(run_monitoring())
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Failed to start surveillance: {e}", err=True)
|
||||
|
||||
@surveillance.command()
|
||||
@click.pass_context
|
||||
def stop(ctx):
|
||||
"""Stop trading surveillance monitoring"""
|
||||
try:
|
||||
click.echo(f"🔍 Stopping trading surveillance...")
|
||||
|
||||
success = asyncio.run(stop_surveillance())
|
||||
|
||||
if success:
|
||||
click.echo(f"✅ Surveillance stopped successfully")
|
||||
else:
|
||||
click.echo(f"⚠️ Surveillance was not running")
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Failed to stop surveillance: {e}", err=True)
|
||||
|
||||
@surveillance.command()
|
||||
@click.option("--level", type=click.Choice(['critical', 'high', 'medium', 'low']), help="Filter by alert level")
|
||||
@click.option("--limit", type=int, default=20, help="Maximum number of alerts to show")
|
||||
@click.pass_context
|
||||
def alerts(ctx, level: str, limit: int):
|
||||
"""Show trading surveillance alerts"""
|
||||
try:
|
||||
click.echo(f"🚨 Trading Surveillance Alerts")
|
||||
|
||||
alerts_data = get_alerts(level)
|
||||
|
||||
if alerts_data['total'] == 0:
|
||||
click.echo(f"✅ No active alerts")
|
||||
return
|
||||
|
||||
click.echo(f"\n📊 Total Active Alerts: {alerts_data['total']}")
|
||||
|
||||
if level:
|
||||
click.echo(f"🔍 Filtered by level: {level.upper()}")
|
||||
|
||||
# Display alerts
|
||||
for i, alert in enumerate(alerts_data['alerts'][:limit]):
|
||||
level_icon = {"critical": "🔴", "high": "🟠", "medium": "🟡", "low": "🟢"}.get(alert['level'], "❓")
|
||||
|
||||
click.echo(f"\n{level_icon} Alert #{i+1}")
|
||||
click.echo(f" ID: {alert['alert_id']}")
|
||||
click.echo(f" Level: {alert['level'].upper()}")
|
||||
click.echo(f" Description: {alert['description']}")
|
||||
click.echo(f" Confidence: {alert['confidence']:.2f}")
|
||||
click.echo(f" Risk Score: {alert['risk_score']:.2f}")
|
||||
click.echo(f" Time: {alert['timestamp']}")
|
||||
|
||||
if alert.get('manipulation_type'):
|
||||
click.echo(f" Manipulation: {alert['manipulation_type'].replace('_', ' ').title()}")
|
||||
|
||||
if alert.get('anomaly_type'):
|
||||
click.echo(f" Anomaly: {alert['anomaly_type'].replace('_', ' ').title()}")
|
||||
|
||||
if alert['affected_symbols']:
|
||||
click.echo(f" Symbols: {', '.join(alert['affected_symbols'])}")
|
||||
|
||||
if alert['affected_users']:
|
||||
click.echo(f" Users: {', '.join(alert['affected_users'][:3])}")
|
||||
if len(alert['affected_users']) > 3:
|
||||
click.echo(f" ... and {len(alert['affected_users']) - 3} more")
|
||||
|
||||
if alerts_data['total'] > limit:
|
||||
click.echo(f"\n... and {alerts_data['total'] - limit} more alerts")
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Failed to get alerts: {e}", err=True)
|
||||
|
||||
@surveillance.command()
|
||||
@click.pass_context
|
||||
def summary(ctx):
|
||||
"""Show surveillance summary and statistics"""
|
||||
try:
|
||||
click.echo(f"📊 Trading Surveillance Summary")
|
||||
|
||||
summary = get_surveillance_summary()
|
||||
|
||||
click.echo(f"\n📈 Alert Statistics:")
|
||||
click.echo(f" Total Alerts: {summary['total_alerts']}")
|
||||
click.echo(f" Active Alerts: {summary['active_alerts']}")
|
||||
|
||||
click.echo(f"\n🎯 Alerts by Severity:")
|
||||
click.echo(f" 🔴 Critical: {summary['by_level']['critical']}")
|
||||
click.echo(f" 🟠 High: {summary['by_level']['high']}")
|
||||
click.echo(f" 🟡 Medium: {summary['by_level']['medium']}")
|
||||
click.echo(f" 🟢 Low: {summary['by_level']['low']}")
|
||||
|
||||
click.echo(f"\n🔍 Alerts by Type:")
|
||||
click.echo(f" Pump & Dump: {summary['by_type']['pump_and_dump']}")
|
||||
click.echo(f" Wash Trading: {summary['by_type']['wash_trading']}")
|
||||
click.echo(f" Spoofing: {summary['by_type']['spoofing']}")
|
||||
click.echo(f" Volume Spikes: {summary['by_type']['volume_spike']}")
|
||||
click.echo(f" Price Anomalies: {summary['by_type']['price_anomaly']}")
|
||||
click.echo(f" Concentrated Trading: {summary['by_type']['concentrated_trading']}")
|
||||
|
||||
click.echo(f"\n⚠️ Risk Distribution:")
|
||||
click.echo(f" High Risk (>0.7): {summary['risk_distribution']['high_risk']}")
|
||||
click.echo(f" Medium Risk (0.4-0.7): {summary['risk_distribution']['medium_risk']}")
|
||||
click.echo(f" Low Risk (<0.4): {summary['risk_distribution']['low_risk']}")
|
||||
|
||||
# Recommendations
|
||||
click.echo(f"\n💡 Recommendations:")
|
||||
|
||||
if summary['by_level']['critical'] > 0:
|
||||
click.echo(f" 🚨 URGENT: {summary['by_level']['critical']} critical alerts require immediate attention")
|
||||
|
||||
if summary['by_level']['high'] > 5:
|
||||
click.echo(f" ⚠️ High alert volume ({summary['by_level']['high']}) - consider increasing monitoring")
|
||||
|
||||
if summary['by_type']['pump_and_dump'] > 2:
|
||||
click.echo(f" 📈 Multiple pump & dump patterns detected - review market integrity")
|
||||
|
||||
if summary['risk_distribution']['high_risk'] > 3:
|
||||
click.echo(f" 🔥 High risk activity detected - implement additional safeguards")
|
||||
|
||||
if summary['active_alerts'] == 0:
|
||||
click.echo(f" ✅ All clear - no suspicious activity detected")
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Failed to get summary: {e}", err=True)
|
||||
|
||||
@surveillance.command()
|
||||
@click.option("--alert-id", required=True, help="Alert ID to resolve")
|
||||
@click.option("--resolution", default="resolved", type=click.Choice(['resolved', 'false_positive']), help="Resolution type")
|
||||
@click.pass_context
|
||||
def resolve(ctx, alert_id: str, resolution: str):
|
||||
"""Resolve a surveillance alert"""
|
||||
try:
|
||||
click.echo(f"🔍 Resolving alert: {alert_id}")
|
||||
|
||||
# Import surveillance to access resolve function
|
||||
from app.services.trading_surveillance import surveillance
|
||||
|
||||
success = surveillance.resolve_alert(alert_id, resolution)
|
||||
|
||||
if success:
|
||||
click.echo(f"✅ Alert {alert_id} marked as {resolution}")
|
||||
else:
|
||||
click.echo(f"❌ Alert {alert_id} not found")
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Failed to resolve alert: {e}", err=True)
|
||||
|
||||
@surveillance.command()
|
||||
@click.option("--symbols", required=True, help="Symbols to test (comma-separated)")
|
||||
@click.option("--duration", type=int, default=10, help="Test duration in seconds")
|
||||
@click.pass_context
|
||||
def test(ctx, symbols: str, duration: int):
|
||||
"""Run surveillance test with mock data"""
|
||||
try:
|
||||
symbol_list = [s.strip().upper() for s in symbols.split(",")]
|
||||
|
||||
click.echo(f"🧪 Running surveillance test...")
|
||||
click.echo(f"📊 Testing symbols: {', '.join(symbol_list)}")
|
||||
click.echo(f"⏱️ Duration: {duration} seconds")
|
||||
|
||||
# Import test function
|
||||
from app.services.trading_surveillance import test_trading_surveillance
|
||||
|
||||
# Run test
|
||||
asyncio.run(test_trading_surveillance())
|
||||
|
||||
# Show recent alerts
|
||||
alerts = get_alerts()
|
||||
click.echo(f"\n🚨 Test Results:")
|
||||
click.echo(f" Total Alerts Generated: {alerts['total']}")
|
||||
|
||||
if alerts['total'] > 0:
|
||||
click.echo(f" Sample Alerts:")
|
||||
for alert in alerts['alerts'][:3]:
|
||||
level_icon = {"critical": "🔴", "high": "🟠", "medium": "🟡", "low": "🟢"}.get(alert['level'], "❓")
|
||||
click.echo(f" {level_icon} {alert['description']}")
|
||||
|
||||
click.echo(f"\n✅ Surveillance test complete!")
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Test failed: {e}", err=True)
|
||||
|
||||
@surveillance.command()
|
||||
@click.pass_context
|
||||
def status(ctx):
|
||||
"""Show current surveillance status"""
|
||||
try:
|
||||
from app.services.trading_surveillance import surveillance
|
||||
|
||||
click.echo(f"📊 Trading Surveillance Status")
|
||||
|
||||
if surveillance.is_monitoring:
|
||||
click.echo(f"🟢 Status: ACTIVE")
|
||||
click.echo(f"📊 Monitoring Symbols: {len(surveillance.monitoring_symbols)}")
|
||||
|
||||
if surveillance.monitoring_symbols:
|
||||
click.echo(f"🔍 Active Symbols: {', '.join(surveillance.monitoring_symbols.keys())}")
|
||||
|
||||
click.echo(f"📈 Total Alerts Generated: {len(surveillance.alerts)}")
|
||||
click.echo(f"🚨 Active Alerts: {len([a for a in surveillance.alerts if a.status == 'active'])}")
|
||||
else:
|
||||
click.echo(f"🔴 Status: INACTIVE")
|
||||
click.echo(f"💤 Surveillance is not currently running")
|
||||
|
||||
click.echo(f"\n⚙️ Configuration:")
|
||||
click.echo(f" Volume Spike Threshold: {surveillance.thresholds['volume_spike_multiplier']}x average")
|
||||
click.echo(f" Price Change Threshold: {surveillance.thresholds['price_change_threshold']:.1%}")
|
||||
click.echo(f" Wash Trade Threshold: {surveillance.thresholds['wash_trade_threshold']:.1%}")
|
||||
click.echo(f" Spoofing Threshold: {surveillance.thresholds['spoofing_threshold']:.1%}")
|
||||
click.echo(f" Concentration Threshold: {surveillance.thresholds['concentration_threshold']:.1%}")
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Failed to get status: {e}", err=True)
|
||||
|
||||
@surveillance.command()
|
||||
@click.pass_context
|
||||
def list_patterns(ctx):
|
||||
"""List detected manipulation patterns and anomalies"""
|
||||
try:
|
||||
click.echo(f"🔍 Trading Pattern Detection")
|
||||
|
||||
patterns = {
|
||||
"Manipulation Patterns": [
|
||||
{
|
||||
"name": "Pump and Dump",
|
||||
"description": "Rapid price increase followed by sharp decline",
|
||||
"indicators": ["Volume spikes", "Unusual price momentum", "Sudden reversals"],
|
||||
"risk_level": "High"
|
||||
},
|
||||
{
|
||||
"name": "Wash Trading",
|
||||
"description": "Circular trading between same entities",
|
||||
"indicators": ["High user concentration", "Repetitive trade patterns", "Low market impact"],
|
||||
"risk_level": "High"
|
||||
},
|
||||
{
|
||||
"name": "Spoofing",
|
||||
"description": "Placing large orders with intent to cancel",
|
||||
"indicators": ["High cancellation rate", "Large order sizes", "No execution"],
|
||||
"risk_level": "Medium"
|
||||
},
|
||||
{
|
||||
"name": "Layering",
|
||||
"description": "Multiple non-executed orders at different prices",
|
||||
"indicators": ["Ladder order patterns", "Rapid cancellations", "Price manipulation"],
|
||||
"risk_level": "Medium"
|
||||
}
|
||||
],
|
||||
"Anomaly Types": [
|
||||
{
|
||||
"name": "Volume Spike",
|
||||
"description": "Unusual increase in trading volume",
|
||||
"indicators": ["3x+ average volume", "Sudden volume changes", "Unusual timing"],
|
||||
"risk_level": "Medium"
|
||||
},
|
||||
{
|
||||
"name": "Price Anomaly",
|
||||
"description": "Unusual price movements",
|
||||
"indicators": ["15%+ price changes", "Deviation from trend", "Gap movements"],
|
||||
"risk_level": "Medium"
|
||||
},
|
||||
{
|
||||
"name": "Concentrated Trading",
|
||||
"description": "Trading dominated by few participants",
|
||||
"indicators": ["High HHI index", "Single user dominance", "Unequal distribution"],
|
||||
"risk_level": "Medium"
|
||||
},
|
||||
{
|
||||
"name": "Unusual Timing",
|
||||
"description": "Suspicious timing patterns",
|
||||
"indicators": ["Off-hours activity", "Coordinated timing", "Predictable patterns"],
|
||||
"risk_level": "Low"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
for category, pattern_list in patterns.items():
|
||||
click.echo(f"\n📋 {category}:")
|
||||
for pattern in pattern_list:
|
||||
risk_icon = {"High": "🔴", "Medium": "🟡", "Low": "🟢"}.get(pattern["risk_level"], "❓")
|
||||
click.echo(f"\n{risk_icon} {pattern['name']}")
|
||||
click.echo(f" Description: {pattern['description']}")
|
||||
click.echo(f" Indicators: {', '.join(pattern['indicators'])}")
|
||||
click.echo(f" Risk Level: {pattern['risk_level']}")
|
||||
|
||||
click.echo(f"\n💡 Detection Methods:")
|
||||
click.echo(f" • Statistical analysis of trading patterns")
|
||||
click.echo(f" • Machine learning anomaly detection")
|
||||
click.echo(f" • Real-time monitoring and alerting")
|
||||
click.echo(f" • Cross-market correlation analysis")
|
||||
click.echo(f" • User behavior pattern analysis")
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"❌ Failed to list patterns: {e}", err=True)
|
||||
|
||||
if __name__ == "__main__":
|
||||
surveillance()
|
||||
246
cli/commands/swarm.py
Executable file
246
cli/commands/swarm.py
Executable file
@@ -0,0 +1,246 @@
|
||||
"""Swarm intelligence commands for AITBC CLI"""
|
||||
|
||||
import click
|
||||
import httpx
|
||||
import json
|
||||
from typing import Optional, Dict, Any, List
|
||||
from utils import output, error, success, warning
|
||||
|
||||
|
||||
@click.group()
|
||||
def swarm():
|
||||
"""Swarm intelligence and collective optimization"""
|
||||
pass
|
||||
|
||||
|
||||
@swarm.command()
|
||||
@click.option("--role", required=True,
|
||||
type=click.Choice(["load-balancer", "resource-optimizer", "task-coordinator", "monitor"]),
|
||||
help="Swarm role")
|
||||
@click.option("--capability", required=True, help="Agent capability")
|
||||
@click.option("--region", help="Operating region")
|
||||
@click.option("--priority", default="normal",
|
||||
type=click.Choice(["low", "normal", "high"]),
|
||||
help="Swarm priority")
|
||||
@click.pass_context
|
||||
def join(ctx, role: str, capability: str, region: Optional[str], priority: str):
|
||||
"""Join agent swarm for collective optimization"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
swarm_data = {
|
||||
"role": role,
|
||||
"capability": capability,
|
||||
"priority": priority
|
||||
}
|
||||
|
||||
if region:
|
||||
swarm_data["region"] = region
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.post(
|
||||
f"{config.coordinator_url}/swarm/join",
|
||||
headers={"X-Api-Key": config.api_key or ""},
|
||||
json=swarm_data
|
||||
)
|
||||
|
||||
if response.status_code == 201:
|
||||
result = response.json()
|
||||
success(f"Joined swarm: {result['swarm_id']}")
|
||||
output(result, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to join swarm: {response.status_code}")
|
||||
if response.text:
|
||||
error(response.text)
|
||||
ctx.exit(1)
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@swarm.command()
|
||||
@click.option("--task", required=True, help="Swarm task type")
|
||||
@click.option("--collaborators", type=int, default=5, help="Number of collaborators")
|
||||
@click.option("--strategy", default="consensus",
|
||||
type=click.Choice(["consensus", "leader-election", "distributed"]),
|
||||
help="Coordination strategy")
|
||||
@click.option("--timeout", default=3600, help="Task timeout in seconds")
|
||||
@click.pass_context
|
||||
def coordinate(ctx, task: str, collaborators: int, strategy: str, timeout: int):
|
||||
"""Coordinate swarm task execution"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
coordination_data = {
|
||||
"task": task,
|
||||
"collaborators": collaborators,
|
||||
"strategy": strategy,
|
||||
"timeout_seconds": timeout
|
||||
}
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.post(
|
||||
f"{config.coordinator_url}/swarm/coordinate",
|
||||
headers={"X-Api-Key": config.api_key or ""},
|
||||
json=coordination_data
|
||||
)
|
||||
|
||||
if response.status_code == 202:
|
||||
result = response.json()
|
||||
success(f"Swarm coordination started: {result['task_id']}")
|
||||
output(result, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to start coordination: {response.status_code}")
|
||||
if response.text:
|
||||
error(response.text)
|
||||
ctx.exit(1)
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@swarm.command()
|
||||
@click.option("--swarm-id", help="Filter by swarm ID")
|
||||
@click.option("--status", help="Filter by status")
|
||||
@click.option("--limit", default=20, help="Number of swarms to list")
|
||||
@click.pass_context
|
||||
def list(ctx, swarm_id: Optional[str], status: Optional[str], limit: int):
|
||||
"""List active swarms"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
params = {"limit": limit}
|
||||
if swarm_id:
|
||||
params["swarm_id"] = swarm_id
|
||||
if status:
|
||||
params["status"] = status
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.get(
|
||||
f"{config.coordinator_url}/swarm/list",
|
||||
headers={"X-Api-Key": config.api_key or ""},
|
||||
params=params
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
swarms = response.json()
|
||||
output(swarms, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to list swarms: {response.status_code}")
|
||||
ctx.exit(1)
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@swarm.command()
|
||||
@click.argument("task_id")
|
||||
@click.option("--real-time", is_flag=True, help="Show real-time progress")
|
||||
@click.option("--interval", default=10, help="Update interval for real-time monitoring")
|
||||
@click.pass_context
|
||||
def status(ctx, task_id: str, real_time: bool, interval: int):
|
||||
"""Get swarm task status"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
def get_status():
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.get(
|
||||
f"{config.coordinator_url}/swarm/tasks/{task_id}/status",
|
||||
headers={"X-Api-Key": config.api_key or ""}
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
return response.json()
|
||||
else:
|
||||
error(f"Failed to get task status: {response.status_code}")
|
||||
return None
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
return None
|
||||
|
||||
if real_time:
|
||||
click.echo(f"Monitoring swarm task {task_id} (Ctrl+C to stop)...")
|
||||
while True:
|
||||
status_data = get_status()
|
||||
if status_data:
|
||||
click.clear()
|
||||
click.echo(f"Task ID: {task_id}")
|
||||
click.echo(f"Status: {status_data.get('status', 'Unknown')}")
|
||||
click.echo(f"Progress: {status_data.get('progress', 0)}%")
|
||||
click.echo(f"Collaborators: {status_data.get('active_collaborators', 0)}/{status_data.get('total_collaborators', 0)}")
|
||||
|
||||
if status_data.get('status') in ['completed', 'failed', 'cancelled']:
|
||||
break
|
||||
|
||||
time.sleep(interval)
|
||||
else:
|
||||
status_data = get_status()
|
||||
if status_data:
|
||||
output(status_data, ctx.obj['output_format'])
|
||||
|
||||
|
||||
@swarm.command()
|
||||
@click.argument("swarm_id")
|
||||
@click.pass_context
|
||||
def leave(ctx, swarm_id: str):
|
||||
"""Leave swarm"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
if not click.confirm(f"Leave swarm {swarm_id}?"):
|
||||
click.echo("Operation cancelled")
|
||||
return
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.post(
|
||||
f"{config.coordinator_url}/swarm/{swarm_id}/leave",
|
||||
headers={"X-Api-Key": config.api_key or ""}
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
result = response.json()
|
||||
success(f"Left swarm {swarm_id}")
|
||||
output(result, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to leave swarm: {response.status_code}")
|
||||
if response.text:
|
||||
error(response.text)
|
||||
ctx.exit(1)
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
|
||||
|
||||
@swarm.command()
|
||||
@click.argument("task_id")
|
||||
@click.option("--consensus-threshold", default=0.7, help="Consensus threshold (0.0-1.0)")
|
||||
@click.pass_context
|
||||
def consensus(ctx, task_id: str, consensus_threshold: float):
|
||||
"""Achieve swarm consensus on task result"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
consensus_data = {
|
||||
"consensus_threshold": consensus_threshold
|
||||
}
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.post(
|
||||
f"{config.coordinator_url}/swarm/tasks/{task_id}/consensus",
|
||||
headers={"X-Api-Key": config.api_key or ""},
|
||||
json=consensus_data
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
result = response.json()
|
||||
success(f"Consensus achieved: {result.get('consensus_reached', False)}")
|
||||
output(result, ctx.obj['output_format'])
|
||||
else:
|
||||
error(f"Failed to achieve consensus: {response.status_code}")
|
||||
if response.text:
|
||||
error(response.text)
|
||||
ctx.exit(1)
|
||||
except Exception as e:
|
||||
error(f"Network error: {e}")
|
||||
ctx.exit(1)
|
||||
58
cli/commands/sync.py
Normal file
58
cli/commands/sync.py
Normal file
@@ -0,0 +1,58 @@
|
||||
"""Sync management commands for AITBC."""
|
||||
|
||||
import asyncio
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
import click
|
||||
|
||||
from utils import success, error, run_subprocess
|
||||
|
||||
|
||||
@click.group()
|
||||
def sync():
|
||||
"""Blockchain synchronization utilities."""
|
||||
pass
|
||||
|
||||
|
||||
@sync.command()
|
||||
@click.option('--source', default='http://10.1.223.40:8006', help='Source RPC URL (leader)')
|
||||
@click.option('--import-url', default='http://127.0.0.1:8006', help='Local RPC URL for import')
|
||||
@click.option('--batch-size', type=int, default=100, help='Blocks per batch')
|
||||
@click.option('--poll-interval', type=float, default=0.2, help='Seconds between batches')
|
||||
def bulk(source, import_url, batch_size, poll_interval):
|
||||
"""Bulk import blocks from a leader to catch up quickly."""
|
||||
try:
|
||||
# Resolve paths
|
||||
blockchain_dir = Path(__file__).resolve().parents[3] / 'apps' / 'blockchain-node'
|
||||
src_dir = blockchain_dir / 'src'
|
||||
venv_python = blockchain_dir / '.venv' / 'bin' / 'python3'
|
||||
sync_cli = src_dir / 'aitbc_chain' / 'sync_cli.py'
|
||||
|
||||
if not sync_cli.exists():
|
||||
error("sync_cli.py not found. Ensure bulk sync feature is deployed.")
|
||||
raise click.Abort()
|
||||
|
||||
cmd = [
|
||||
str(venv_python),
|
||||
str(sync_cli),
|
||||
'--source', source,
|
||||
'--import-url', import_url,
|
||||
'--batch-size', str(batch_size),
|
||||
'--poll-interval', str(poll_interval),
|
||||
]
|
||||
|
||||
# Prepare environment
|
||||
env = {
|
||||
'PYTHONPATH': str(src_dir),
|
||||
}
|
||||
|
||||
success(f"Running bulk sync from {source} to {import_url} (batch size: {batch_size})")
|
||||
result = run_subprocess(cmd, env=env, capture_output=False)
|
||||
if result.returncode != 0:
|
||||
error("Bulk sync failed. Check logs for details.")
|
||||
raise click.Abort()
|
||||
success("Bulk sync completed.")
|
||||
except Exception as e:
|
||||
error(f"Error during bulk sync: {e}")
|
||||
raise click.Abort()
|
||||
467
cli/commands/test_cli.py
Executable file
467
cli/commands/test_cli.py
Executable file
@@ -0,0 +1,467 @@
|
||||
"""
|
||||
AITBC CLI Testing Commands
|
||||
Provides testing and debugging utilities for the AITBC CLI
|
||||
"""
|
||||
|
||||
import click
|
||||
import json
|
||||
import time
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
from typing import Dict, Any, Optional
|
||||
from unittest.mock import Mock, patch
|
||||
|
||||
from utils import output, success, error, warning
|
||||
from config import get_config
|
||||
|
||||
|
||||
@click.group()
|
||||
def test():
|
||||
"""Testing and debugging commands for AITBC CLI"""
|
||||
pass
|
||||
|
||||
|
||||
@test.command()
|
||||
@click.option('--format', type=click.Choice(['json', 'table', 'yaml']), default='table', help='Output format')
|
||||
@click.pass_context
|
||||
def environment(ctx, format):
|
||||
"""Test CLI environment and configuration"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
env_info = {
|
||||
'coordinator_url': config.coordinator_url,
|
||||
'api_key': config.api_key,
|
||||
'output_format': ctx.obj['output_format'],
|
||||
'test_mode': ctx.obj['test_mode'],
|
||||
'dry_run': ctx.obj['dry_run'],
|
||||
'timeout': ctx.obj['timeout'],
|
||||
'no_verify': ctx.obj['no_verify'],
|
||||
'log_level': ctx.obj['log_level']
|
||||
}
|
||||
|
||||
if format == 'json':
|
||||
output(json.dumps(env_info, indent=2))
|
||||
else:
|
||||
output("CLI Environment Test Results:")
|
||||
output(f" Coordinator URL: {env_info['coordinator_url']}")
|
||||
output(f" API Key: {env_info['api_key'][:10]}..." if env_info['api_key'] else " API Key: None")
|
||||
output(f" Output Format: {env_info['output_format']}")
|
||||
output(f" Test Mode: {env_info['test_mode']}")
|
||||
output(f" Dry Run: {env_info['dry_run']}")
|
||||
output(f" Timeout: {env_info['timeout']}s")
|
||||
output(f" No Verify: {env_info['no_verify']}")
|
||||
output(f" Log Level: {env_info['log_level']}")
|
||||
|
||||
|
||||
@test.command()
|
||||
@click.option('--endpoint', default='health', help='API endpoint to test')
|
||||
@click.option('--method', default='GET', help='HTTP method')
|
||||
@click.option('--data', help='JSON data to send (for POST/PUT)')
|
||||
@click.pass_context
|
||||
def api(ctx, endpoint, method, data):
|
||||
"""Test API connectivity"""
|
||||
config = ctx.obj['config']
|
||||
|
||||
try:
|
||||
import httpx
|
||||
|
||||
# Prepare request
|
||||
url = f"{config.coordinator_url.rstrip('/')}/{endpoint.lstrip('/')}"
|
||||
headers = {}
|
||||
if config.api_key:
|
||||
headers['Authorization'] = f"Bearer {config.api_key}"
|
||||
|
||||
# Prepare data
|
||||
json_data = None
|
||||
if data and method in ['POST', 'PUT']:
|
||||
json_data = json.loads(data)
|
||||
|
||||
# Make request
|
||||
with httpx.Client(verify=not ctx.obj['no_verify'], timeout=ctx.obj['timeout']) as client:
|
||||
if method == 'GET':
|
||||
response = client.get(url, headers=headers)
|
||||
elif method == 'POST':
|
||||
response = client.post(url, headers=headers, json=json_data)
|
||||
elif method == 'PUT':
|
||||
response = client.put(url, headers=headers, json=json_data)
|
||||
else:
|
||||
raise ValueError(f"Unsupported method: {method}")
|
||||
|
||||
# Display results
|
||||
output(f"API Test Results:")
|
||||
output(f" URL: {url}")
|
||||
output(f" Method: {method}")
|
||||
output(f" Status Code: {response.status_code}")
|
||||
output(f" Response Time: {response.elapsed.total_seconds():.3f}s")
|
||||
|
||||
if response.status_code == 200:
|
||||
success("✅ API test successful")
|
||||
try:
|
||||
response_data = response.json()
|
||||
output("Response Data:")
|
||||
output(json.dumps(response_data, indent=2))
|
||||
except:
|
||||
output(f"Response: {response.text}")
|
||||
else:
|
||||
error(f"❌ API test failed with status {response.status_code}")
|
||||
output(f"Response: {response.text}")
|
||||
|
||||
except ImportError:
|
||||
error("❌ httpx not installed. Install with: pip install httpx")
|
||||
except Exception as e:
|
||||
error(f"❌ API test failed: {str(e)}")
|
||||
|
||||
|
||||
@test.command()
|
||||
@click.option('--wallet-name', default='test-wallet', help='Test wallet name')
|
||||
@click.option('--test-operations', is_flag=True, default=True, help='Test wallet operations')
|
||||
@click.pass_context
|
||||
def wallet(ctx, wallet_name, test_operations):
|
||||
"""Test wallet functionality"""
|
||||
from commands.wallet import wallet as wallet_cmd
|
||||
|
||||
output(f"Testing wallet functionality with wallet: {wallet_name}")
|
||||
|
||||
# Test wallet creation
|
||||
try:
|
||||
result = ctx.invoke(wallet_cmd, ['create', wallet_name])
|
||||
if result.exit_code == 0:
|
||||
success(f"✅ Wallet '{wallet_name}' created successfully")
|
||||
else:
|
||||
error(f"❌ Wallet creation failed: {result.output}")
|
||||
return
|
||||
except Exception as e:
|
||||
error(f"❌ Wallet creation error: {str(e)}")
|
||||
return
|
||||
|
||||
if test_operations:
|
||||
# Test wallet balance
|
||||
try:
|
||||
result = ctx.invoke(wallet_cmd, ['balance'])
|
||||
if result.exit_code == 0:
|
||||
success("✅ Wallet balance check successful")
|
||||
output(f"Balance output: {result.output}")
|
||||
else:
|
||||
warning(f"⚠️ Wallet balance check failed: {result.output}")
|
||||
except Exception as e:
|
||||
warning(f"⚠️ Wallet balance check error: {str(e)}")
|
||||
|
||||
# Test wallet info
|
||||
try:
|
||||
result = ctx.invoke(wallet_cmd, ['info'])
|
||||
if result.exit_code == 0:
|
||||
success("✅ Wallet info check successful")
|
||||
output(f"Info output: {result.output}")
|
||||
else:
|
||||
warning(f"⚠️ Wallet info check failed: {result.output}")
|
||||
except Exception as e:
|
||||
warning(f"⚠️ Wallet info check error: {str(e)}")
|
||||
|
||||
|
||||
@test.command()
|
||||
@click.option('--job-type', default='ml_inference', help='Type of job to test')
|
||||
@click.option('--test-data', default='{"model": "test-model", "input": "test-data"}', help='Test job data')
|
||||
@click.pass_context
|
||||
def job(ctx, job_type, test_data):
|
||||
"""Test job submission and management"""
|
||||
from commands.client import client as client_cmd
|
||||
|
||||
output(f"Testing job submission with type: {job_type}")
|
||||
|
||||
try:
|
||||
# Parse test data
|
||||
job_data = json.loads(test_data)
|
||||
job_data['type'] = job_type
|
||||
|
||||
# Test job submission
|
||||
with tempfile.NamedTemporaryFile(mode='w', suffix='.json', delete=False) as f:
|
||||
json.dump(job_data, f)
|
||||
temp_file = f.name
|
||||
|
||||
try:
|
||||
result = ctx.invoke(client_cmd, ['submit', '--job-file', temp_file])
|
||||
if result.exit_code == 0:
|
||||
success("✅ Job submission successful")
|
||||
output(f"Submission output: {result.output}")
|
||||
|
||||
# Extract job ID if present
|
||||
if 'job_id' in result.output:
|
||||
import re
|
||||
job_id_match = re.search(r'job[_\s-]?id[:\s]+(\w+)', result.output, re.IGNORECASE)
|
||||
if job_id_match:
|
||||
job_id = job_id_match.group(1)
|
||||
output(f"Extracted job ID: {job_id}")
|
||||
|
||||
# Test job status
|
||||
try:
|
||||
status_result = ctx.invoke(client_cmd, ['status', job_id])
|
||||
if status_result.exit_code == 0:
|
||||
success("✅ Job status check successful")
|
||||
output(f"Status output: {status_result.output}")
|
||||
else:
|
||||
warning(f"⚠️ Job status check failed: {status_result.output}")
|
||||
except Exception as e:
|
||||
warning(f"⚠️ Job status check error: {str(e)}")
|
||||
else:
|
||||
error(f"❌ Job submission failed: {result.output}")
|
||||
finally:
|
||||
# Clean up temp file
|
||||
Path(temp_file).unlink(missing_ok=True)
|
||||
|
||||
except json.JSONDecodeError:
|
||||
error(f"❌ Invalid test data JSON: {test_data}")
|
||||
except Exception as e:
|
||||
error(f"❌ Job test failed: {str(e)}")
|
||||
|
||||
|
||||
@test.command()
|
||||
@click.option('--gpu-type', default='RTX 3080', help='GPU type to test')
|
||||
@click.option('--price', type=float, default=0.1, help='Price to test')
|
||||
@click.pass_context
|
||||
def marketplace(ctx, gpu_type, price):
|
||||
"""Test marketplace functionality"""
|
||||
from commands.marketplace import marketplace as marketplace_cmd
|
||||
|
||||
output(f"Testing marketplace functionality for {gpu_type} at {price} AITBC/hour")
|
||||
|
||||
# Test marketplace offers listing
|
||||
try:
|
||||
result = ctx.invoke(marketplace_cmd, ['offers', 'list'])
|
||||
if result.exit_code == 0:
|
||||
success("✅ Marketplace offers list successful")
|
||||
output(f"Offers output: {result.output}")
|
||||
else:
|
||||
warning(f"⚠️ Marketplace offers list failed: {result.output}")
|
||||
except Exception as e:
|
||||
warning(f"⚠️ Marketplace offers list error: {str(e)}")
|
||||
|
||||
# Test marketplace pricing
|
||||
try:
|
||||
result = ctx.invoke(marketplace_cmd, ['pricing', gpu_type])
|
||||
if result.exit_code == 0:
|
||||
success("✅ Marketplace pricing check successful")
|
||||
output(f"Pricing output: {result.output}")
|
||||
else:
|
||||
warning(f"⚠️ Marketplace pricing check failed: {result.output}")
|
||||
except Exception as e:
|
||||
warning(f"⚠️ Marketplace pricing check error: {str(e)}")
|
||||
|
||||
|
||||
@test.command()
|
||||
@click.option('--test-endpoints', is_flag=True, default=True, help='Test blockchain endpoints')
|
||||
@click.pass_context
|
||||
def blockchain(ctx, test_endpoints):
|
||||
"""Test blockchain functionality"""
|
||||
from commands.blockchain import blockchain as blockchain_cmd
|
||||
|
||||
output("Testing blockchain functionality")
|
||||
|
||||
if test_endpoints:
|
||||
# Test blockchain info
|
||||
try:
|
||||
result = ctx.invoke(blockchain_cmd, ['info'])
|
||||
if result.exit_code == 0:
|
||||
success("✅ Blockchain info successful")
|
||||
output(f"Info output: {result.output}")
|
||||
else:
|
||||
warning(f"⚠️ Blockchain info failed: {result.output}")
|
||||
except Exception as e:
|
||||
warning(f"⚠️ Blockchain info error: {str(e)}")
|
||||
|
||||
# Test chain status
|
||||
try:
|
||||
result = ctx.invoke(blockchain_cmd, ['status'])
|
||||
if result.exit_code == 0:
|
||||
success("✅ Blockchain status successful")
|
||||
output(f"Status output: {result.output}")
|
||||
else:
|
||||
warning(f"⚠️ Blockchain status failed: {result.output}")
|
||||
except Exception as e:
|
||||
warning(f"⚠️ Blockchain status error: {str(e)}")
|
||||
|
||||
|
||||
@test.command()
|
||||
@click.option('--component', help='Specific component to test (wallet, job, marketplace, blockchain, api)')
|
||||
@click.option('--verbose', is_flag=True, help='Verbose test output')
|
||||
@click.pass_context
|
||||
def integration(ctx, component, verbose):
|
||||
"""Run integration tests"""
|
||||
|
||||
if component:
|
||||
output(f"Running integration tests for: {component}")
|
||||
|
||||
if component == 'wallet':
|
||||
ctx.invoke(wallet, ['--test-operations'])
|
||||
elif component == 'job':
|
||||
ctx.invoke(job, [])
|
||||
elif component == 'marketplace':
|
||||
ctx.invoke(marketplace)
|
||||
elif component == 'blockchain':
|
||||
ctx.invoke(blockchain, [])
|
||||
elif component == 'api':
|
||||
ctx.invoke(api, endpoint='health')
|
||||
else:
|
||||
error(f"Unknown component: {component}")
|
||||
return
|
||||
else:
|
||||
output("Running full integration test suite...")
|
||||
|
||||
# Test API connectivity first
|
||||
output("1. Testing API connectivity...")
|
||||
ctx.invoke(api, endpoint='health')
|
||||
|
||||
# Test wallet functionality
|
||||
output("2. Testing wallet functionality...")
|
||||
ctx.invoke(wallet, ['--wallet-name', 'integration-test-wallet'])
|
||||
|
||||
# Test marketplace functionality
|
||||
output("3. Testing marketplace functionality...")
|
||||
ctx.invoke(marketplace)
|
||||
|
||||
# Test blockchain functionality
|
||||
output("4. Testing blockchain functionality...")
|
||||
ctx.invoke(blockchain, [])
|
||||
|
||||
# Test job functionality
|
||||
output("5. Testing job functionality...")
|
||||
ctx.invoke(job, [])
|
||||
|
||||
success("✅ Integration test suite completed")
|
||||
|
||||
|
||||
@test.command()
|
||||
@click.option('--output-file', help='Save test results to file')
|
||||
@click.pass_context
|
||||
def diagnostics(ctx, output_file):
|
||||
"""Run comprehensive diagnostics"""
|
||||
|
||||
diagnostics_data = {
|
||||
'timestamp': time.time(),
|
||||
'test_mode': ctx.obj['test_mode'],
|
||||
'dry_run': ctx.obj['dry_run'],
|
||||
'config': {
|
||||
'coordinator_url': ctx.obj['config'].coordinator_url,
|
||||
'api_key_present': bool(ctx.obj['config'].api_key),
|
||||
'output_format': ctx.obj['output_format']
|
||||
}
|
||||
}
|
||||
|
||||
output("Running comprehensive diagnostics...")
|
||||
|
||||
# Test 1: Environment
|
||||
output("1. Testing environment...")
|
||||
try:
|
||||
ctx.invoke(environment, format='json')
|
||||
diagnostics_data['environment'] = 'PASS'
|
||||
except Exception as e:
|
||||
diagnostics_data['environment'] = f'FAIL: {str(e)}'
|
||||
error(f"Environment test failed: {str(e)}")
|
||||
|
||||
# Test 2: API Connectivity
|
||||
output("2. Testing API connectivity...")
|
||||
try:
|
||||
ctx.invoke(api, endpoint='health')
|
||||
diagnostics_data['api_connectivity'] = 'PASS'
|
||||
except Exception as e:
|
||||
diagnostics_data['api_connectivity'] = f'FAIL: {str(e)}'
|
||||
error(f"API connectivity test failed: {str(e)}")
|
||||
|
||||
# Test 3: Wallet Creation
|
||||
output("3. Testing wallet creation...")
|
||||
try:
|
||||
ctx.invoke(wallet, wallet_name='diagnostics-test', test_operations=True)
|
||||
diagnostics_data['wallet_creation'] = 'PASS'
|
||||
except Exception as e:
|
||||
diagnostics_data['wallet_creation'] = f'FAIL: {str(e)}'
|
||||
error(f"Wallet creation test failed: {str(e)}")
|
||||
|
||||
# Test 4: Marketplace
|
||||
output("4. Testing marketplace...")
|
||||
try:
|
||||
ctx.invoke(marketplace)
|
||||
diagnostics_data['marketplace'] = 'PASS'
|
||||
except Exception as e:
|
||||
diagnostics_data['marketplace'] = f'FAIL: {str(e)}'
|
||||
error(f"Marketplace test failed: {str(e)}")
|
||||
|
||||
# Generate summary
|
||||
passed_tests = sum(1 for v in diagnostics_data.values() if isinstance(v, str) and v == 'PASS')
|
||||
total_tests = len([k for k in diagnostics_data.keys() if k in ['environment', 'api_connectivity', 'wallet_creation', 'marketplace']])
|
||||
|
||||
diagnostics_data['summary'] = {
|
||||
'total_tests': total_tests,
|
||||
'passed_tests': passed_tests,
|
||||
'failed_tests': total_tests - passed_tests,
|
||||
'success_rate': (passed_tests / total_tests * 100) if total_tests > 0 else 0
|
||||
}
|
||||
|
||||
# Display results
|
||||
output("\n" + "="*50)
|
||||
output("DIAGNOSTICS SUMMARY")
|
||||
output("="*50)
|
||||
output(f"Total Tests: {diagnostics_data['summary']['total_tests']}")
|
||||
output(f"Passed: {diagnostics_data['summary']['passed_tests']}")
|
||||
output(f"Failed: {diagnostics_data['summary']['failed_tests']}")
|
||||
output(f"Success Rate: {diagnostics_data['summary']['success_rate']:.1f}%")
|
||||
|
||||
if diagnostics_data['summary']['success_rate'] == 100:
|
||||
success("✅ All diagnostics passed!")
|
||||
else:
|
||||
warning(f"⚠️ {diagnostics_data['summary']['failed_tests']} test(s) failed")
|
||||
|
||||
# Save to file if requested
|
||||
if output_file:
|
||||
with open(output_file, 'w') as f:
|
||||
json.dump(diagnostics_data, f, indent=2)
|
||||
output(f"Diagnostics saved to: {output_file}")
|
||||
|
||||
|
||||
@test.command()
|
||||
def mock():
|
||||
"""Generate mock data for testing"""
|
||||
|
||||
mock_data = {
|
||||
'wallet': {
|
||||
'name': 'test-wallet',
|
||||
'address': 'aitbc1test123456789abcdef',
|
||||
'balance': 1000.0,
|
||||
'transactions': []
|
||||
},
|
||||
'job': {
|
||||
'id': 'test-job-123',
|
||||
'type': 'ml_inference',
|
||||
'status': 'pending',
|
||||
'requirements': {
|
||||
'gpu_type': 'RTX 3080',
|
||||
'memory_gb': 8,
|
||||
'duration_minutes': 30
|
||||
}
|
||||
},
|
||||
'marketplace': {
|
||||
'offers': [
|
||||
{
|
||||
'id': 'offer-1',
|
||||
'provider': 'test-provider',
|
||||
'gpu_type': 'RTX 3080',
|
||||
'price_per_hour': 0.1,
|
||||
'available': True
|
||||
}
|
||||
]
|
||||
},
|
||||
'blockchain': {
|
||||
'chain_id': 'aitbc-testnet',
|
||||
'block_height': 1000,
|
||||
'network_status': 'active'
|
||||
}
|
||||
}
|
||||
|
||||
output("Mock data for testing:")
|
||||
output(json.dumps(mock_data, indent=2))
|
||||
|
||||
# Save to temp file
|
||||
with tempfile.NamedTemporaryFile(mode='w', suffix='.json', delete=False) as f:
|
||||
json.dump(mock_data, f, indent=2)
|
||||
temp_file = f.name
|
||||
|
||||
output(f"Mock data saved to: {temp_file}")
|
||||
return temp_file
|
||||
498
cli/commands/transfer_control.py
Executable file
498
cli/commands/transfer_control.py
Executable file
@@ -0,0 +1,498 @@
|
||||
"""Advanced transfer control commands for AITBC CLI"""
|
||||
|
||||
import click
|
||||
import json
|
||||
from pathlib import Path
|
||||
from typing import Optional, Dict, Any, List
|
||||
from datetime import datetime, timedelta
|
||||
from utils import output, error, success, warning
|
||||
|
||||
|
||||
@click.group()
|
||||
def transfer_control():
|
||||
"""Advanced transfer control and limit management commands"""
|
||||
pass
|
||||
|
||||
|
||||
@transfer_control.command()
|
||||
@click.option("--wallet", required=True, help="Wallet name or address")
|
||||
@click.option("--max-daily", type=float, help="Maximum daily transfer amount")
|
||||
@click.option("--max-weekly", type=float, help="Maximum weekly transfer amount")
|
||||
@click.option("--max-monthly", type=float, help="Maximum monthly transfer amount")
|
||||
@click.option("--max-single", type=float, help="Maximum single transfer amount")
|
||||
@click.option("--whitelist", help="Comma-separated list of whitelisted addresses")
|
||||
@click.option("--blacklist", help="Comma-separated list of blacklisted addresses")
|
||||
@click.pass_context
|
||||
def set_limit(ctx, wallet: str, max_daily: Optional[float], max_weekly: Optional[float], max_monthly: Optional[float], max_single: Optional[float], whitelist: Optional[str], blacklist: Optional[str]):
|
||||
"""Set transfer limits for a wallet"""
|
||||
|
||||
# Load existing limits
|
||||
limits_file = Path.home() / ".aitbc" / "transfer_limits.json"
|
||||
limits_file.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
limits = {}
|
||||
if limits_file.exists():
|
||||
with open(limits_file, 'r') as f:
|
||||
limits = json.load(f)
|
||||
|
||||
# Create or update wallet limits
|
||||
wallet_limits = limits.get(wallet, {
|
||||
"wallet": wallet,
|
||||
"created_at": datetime.utcnow().isoformat(),
|
||||
"updated_at": datetime.utcnow().isoformat(),
|
||||
"status": "active"
|
||||
})
|
||||
|
||||
# Update limits
|
||||
if max_daily is not None:
|
||||
wallet_limits["max_daily"] = max_daily
|
||||
if max_weekly is not None:
|
||||
wallet_limits["max_weekly"] = max_weekly
|
||||
if max_monthly is not None:
|
||||
wallet_limits["max_monthly"] = max_monthly
|
||||
if max_single is not None:
|
||||
wallet_limits["max_single"] = max_single
|
||||
|
||||
# Update whitelist and blacklist
|
||||
if whitelist:
|
||||
wallet_limits["whitelist"] = [addr.strip() for addr in whitelist.split(',')]
|
||||
if blacklist:
|
||||
wallet_limits["blacklist"] = [addr.strip() for addr in blacklist.split(',')]
|
||||
|
||||
wallet_limits["updated_at"] = datetime.utcnow().isoformat()
|
||||
|
||||
# Initialize usage tracking
|
||||
if "usage" not in wallet_limits:
|
||||
wallet_limits["usage"] = {
|
||||
"daily": {"amount": 0.0, "count": 0, "reset_at": datetime.utcnow().isoformat()},
|
||||
"weekly": {"amount": 0.0, "count": 0, "reset_at": datetime.utcnow().isoformat()},
|
||||
"monthly": {"amount": 0.0, "count": 0, "reset_at": datetime.utcnow().isoformat()}
|
||||
}
|
||||
|
||||
# Save limits
|
||||
limits[wallet] = wallet_limits
|
||||
with open(limits_file, 'w') as f:
|
||||
json.dump(limits, f, indent=2)
|
||||
|
||||
success(f"Transfer limits set for wallet '{wallet}'")
|
||||
output({
|
||||
"wallet": wallet,
|
||||
"limits": {
|
||||
"max_daily": wallet_limits.get("max_daily"),
|
||||
"max_weekly": wallet_limits.get("max_weekly"),
|
||||
"max_monthly": wallet_limits.get("max_monthly"),
|
||||
"max_single": wallet_limits.get("max_single")
|
||||
},
|
||||
"whitelist_count": len(wallet_limits.get("whitelist", [])),
|
||||
"blacklist_count": len(wallet_limits.get("blacklist", [])),
|
||||
"updated_at": wallet_limits["updated_at"]
|
||||
})
|
||||
|
||||
|
||||
@transfer_control.command()
|
||||
@click.option("--wallet", required=True, help="Wallet name or address")
|
||||
@click.option("--amount", type=float, required=True, help="Amount to time-lock")
|
||||
@click.option("--duration", type=int, required=True, help="Lock duration in days")
|
||||
@click.option("--recipient", required=True, help="Recipient address")
|
||||
@click.option("--description", help="Lock description")
|
||||
@click.pass_context
|
||||
def time_lock(ctx, wallet: str, amount: float, duration: int, recipient: str, description: Optional[str]):
|
||||
"""Create a time-locked transfer"""
|
||||
|
||||
# Generate lock ID
|
||||
lock_id = f"lock_{str(int(datetime.utcnow().timestamp()))[-8:]}"
|
||||
|
||||
# Calculate release time
|
||||
release_time = datetime.utcnow() + timedelta(days=duration)
|
||||
|
||||
# Create time lock
|
||||
time_lock = {
|
||||
"lock_id": lock_id,
|
||||
"wallet": wallet,
|
||||
"recipient": recipient,
|
||||
"amount": amount,
|
||||
"duration_days": duration,
|
||||
"created_at": datetime.utcnow().isoformat(),
|
||||
"release_time": release_time.isoformat(),
|
||||
"status": "locked",
|
||||
"description": description or f"Time-locked transfer of {amount} to {recipient}",
|
||||
"released_at": None,
|
||||
"released_amount": 0.0
|
||||
}
|
||||
|
||||
# Store time lock
|
||||
timelocks_file = Path.home() / ".aitbc" / "time_locks.json"
|
||||
timelocks_file.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
timelocks = {}
|
||||
if timelocks_file.exists():
|
||||
with open(timelocks_file, 'r') as f:
|
||||
timelocks = json.load(f)
|
||||
|
||||
timelocks[lock_id] = time_lock
|
||||
|
||||
with open(timelocks_file, 'w') as f:
|
||||
json.dump(timelocks, f, indent=2)
|
||||
|
||||
success(f"Time-locked transfer created: {lock_id}")
|
||||
output({
|
||||
"lock_id": lock_id,
|
||||
"wallet": wallet,
|
||||
"recipient": recipient,
|
||||
"amount": amount,
|
||||
"duration_days": duration,
|
||||
"release_time": time_lock["release_time"],
|
||||
"status": "locked"
|
||||
})
|
||||
|
||||
|
||||
@transfer_control.command()
|
||||
@click.option("--wallet", required=True, help="Wallet name or address")
|
||||
@click.option("--total-amount", type=float, required=True, help="Total amount to vest")
|
||||
@click.option("--duration", type=int, required=True, help="Vesting duration in days")
|
||||
@click.option("--cliff-period", type=int, default=0, help="Cliff period in days before any release")
|
||||
@click.option("--release-interval", type=int, default=30, help="Release interval in days")
|
||||
@click.option("--recipient", required=True, help="Recipient address")
|
||||
@click.option("--description", help="Vesting schedule description")
|
||||
@click.pass_context
|
||||
def vesting_schedule(ctx, wallet: str, total_amount: float, duration: int, cliff_period: int, release_interval: int, recipient: str, description: Optional[str]):
|
||||
"""Create a vesting schedule for token release"""
|
||||
|
||||
# Generate schedule ID
|
||||
schedule_id = f"vest_{str(int(datetime.utcnow().timestamp()))[-8:]}"
|
||||
|
||||
# Calculate vesting schedule
|
||||
start_time = datetime.utcnow() + timedelta(days=cliff_period)
|
||||
end_time = datetime.utcnow() + timedelta(days=duration)
|
||||
|
||||
# Create release events
|
||||
releases = []
|
||||
current_time = start_time
|
||||
remaining_amount = total_amount
|
||||
|
||||
while current_time <= end_time and remaining_amount > 0:
|
||||
releases.append({
|
||||
"release_time": current_time.isoformat(),
|
||||
"amount": total_amount / max(1, (duration - cliff_period) // release_interval),
|
||||
"released": False,
|
||||
"released_at": None
|
||||
})
|
||||
current_time += timedelta(days=release_interval)
|
||||
|
||||
# Create vesting schedule
|
||||
vesting_schedule = {
|
||||
"schedule_id": schedule_id,
|
||||
"wallet": wallet,
|
||||
"recipient": recipient,
|
||||
"total_amount": total_amount,
|
||||
"duration_days": duration,
|
||||
"cliff_period_days": cliff_period,
|
||||
"release_interval_days": release_interval,
|
||||
"created_at": datetime.utcnow().isoformat(),
|
||||
"start_time": start_time.isoformat(),
|
||||
"end_time": end_time.isoformat(),
|
||||
"status": "active",
|
||||
"description": description or f"Vesting {total_amount} over {duration} days",
|
||||
"releases": releases,
|
||||
"total_released": 0.0,
|
||||
"released_count": 0
|
||||
}
|
||||
|
||||
# Store vesting schedule
|
||||
vesting_file = Path.home() / ".aitbc" / "vesting_schedules.json"
|
||||
vesting_file.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
vesting_schedules = {}
|
||||
if vesting_file.exists():
|
||||
with open(vesting_file, 'r') as f:
|
||||
vesting_schedules = json.load(f)
|
||||
|
||||
vesting_schedules[schedule_id] = vesting_schedule
|
||||
|
||||
with open(vesting_file, 'w') as f:
|
||||
json.dump(vesting_schedules, f, indent=2)
|
||||
|
||||
success(f"Vesting schedule created: {schedule_id}")
|
||||
output({
|
||||
"schedule_id": schedule_id,
|
||||
"wallet": wallet,
|
||||
"recipient": recipient,
|
||||
"total_amount": total_amount,
|
||||
"duration_days": duration,
|
||||
"cliff_period_days": cliff_period,
|
||||
"release_count": len(releases),
|
||||
"start_time": vesting_schedule["start_time"],
|
||||
"end_time": vesting_schedule["end_time"]
|
||||
})
|
||||
|
||||
|
||||
@transfer_control.command()
|
||||
@click.option("--wallet", help="Filter by wallet")
|
||||
@click.option("--status", help="Filter by status")
|
||||
@click.pass_context
|
||||
def audit_trail(ctx, wallet: Optional[str], status: Optional[str]):
|
||||
"""View complete transfer audit trail"""
|
||||
|
||||
# Collect all transfer-related data
|
||||
audit_data = {
|
||||
"limits": {},
|
||||
"time_locks": {},
|
||||
"vesting_schedules": {},
|
||||
"transfers": {},
|
||||
"generated_at": datetime.utcnow().isoformat()
|
||||
}
|
||||
|
||||
# Load transfer limits
|
||||
limits_file = Path.home() / ".aitbc" / "transfer_limits.json"
|
||||
if limits_file.exists():
|
||||
with open(limits_file, 'r') as f:
|
||||
limits = json.load(f)
|
||||
|
||||
for wallet_id, limit_data in limits.items():
|
||||
if wallet and wallet_id != wallet:
|
||||
continue
|
||||
|
||||
audit_data["limits"][wallet_id] = {
|
||||
"limits": {
|
||||
"max_daily": limit_data.get("max_daily"),
|
||||
"max_weekly": limit_data.get("max_weekly"),
|
||||
"max_monthly": limit_data.get("max_monthly"),
|
||||
"max_single": limit_data.get("max_single")
|
||||
},
|
||||
"usage": limit_data.get("usage", {}),
|
||||
"whitelist": limit_data.get("whitelist", []),
|
||||
"blacklist": limit_data.get("blacklist", []),
|
||||
"created_at": limit_data.get("created_at"),
|
||||
"updated_at": limit_data.get("updated_at")
|
||||
}
|
||||
|
||||
# Load time locks
|
||||
timelocks_file = Path.home() / ".aitbc" / "time_locks.json"
|
||||
if timelocks_file.exists():
|
||||
with open(timelocks_file, 'r') as f:
|
||||
timelocks = json.load(f)
|
||||
|
||||
for lock_id, lock_data in timelocks.items():
|
||||
if wallet and lock_data.get("wallet") != wallet:
|
||||
continue
|
||||
if status and lock_data.get("status") != status:
|
||||
continue
|
||||
|
||||
audit_data["time_locks"][lock_id] = lock_data
|
||||
|
||||
# Load vesting schedules
|
||||
vesting_file = Path.home() / ".aitbc" / "vesting_schedules.json"
|
||||
if vesting_file.exists():
|
||||
with open(vesting_file, 'r') as f:
|
||||
vesting_schedules = json.load(f)
|
||||
|
||||
for schedule_id, schedule_data in vesting_schedules.items():
|
||||
if wallet and schedule_data.get("wallet") != wallet:
|
||||
continue
|
||||
if status and schedule_data.get("status") != status:
|
||||
continue
|
||||
|
||||
audit_data["vesting_schedules"][schedule_id] = schedule_data
|
||||
|
||||
# Generate summary
|
||||
audit_data["summary"] = {
|
||||
"total_wallets_with_limits": len(audit_data["limits"]),
|
||||
"total_time_locks": len(audit_data["time_locks"]),
|
||||
"total_vesting_schedules": len(audit_data["vesting_schedules"]),
|
||||
"filter_criteria": {
|
||||
"wallet": wallet or "all",
|
||||
"status": status or "all"
|
||||
}
|
||||
}
|
||||
|
||||
output(audit_data)
|
||||
|
||||
|
||||
@transfer_control.command()
|
||||
@click.option("--wallet", help="Filter by wallet")
|
||||
@click.pass_context
|
||||
def status(ctx, wallet: Optional[str]):
|
||||
"""Get transfer control status"""
|
||||
|
||||
status_data = {
|
||||
"wallet_limits": {},
|
||||
"active_time_locks": {},
|
||||
"active_vesting_schedules": {},
|
||||
"generated_at": datetime.utcnow().isoformat()
|
||||
}
|
||||
|
||||
# Load and filter limits
|
||||
limits_file = Path.home() / ".aitbc" / "transfer_limits.json"
|
||||
if limits_file.exists():
|
||||
with open(limits_file, 'r') as f:
|
||||
limits = json.load(f)
|
||||
|
||||
for wallet_id, limit_data in limits.items():
|
||||
if wallet and wallet_id != wallet:
|
||||
continue
|
||||
|
||||
# Check usage against limits
|
||||
daily_usage = limit_data.get("usage", {}).get("daily", {})
|
||||
weekly_usage = limit_data.get("usage", {}).get("weekly", {})
|
||||
monthly_usage = limit_data.get("usage", {}).get("monthly", {})
|
||||
|
||||
status_data["wallet_limits"][wallet_id] = {
|
||||
"limits": {
|
||||
"max_daily": limit_data.get("max_daily"),
|
||||
"max_weekly": limit_data.get("max_weekly"),
|
||||
"max_monthly": limit_data.get("max_monthly"),
|
||||
"max_single": limit_data.get("max_single")
|
||||
},
|
||||
"current_usage": {
|
||||
"daily": daily_usage,
|
||||
"weekly": weekly_usage,
|
||||
"monthly": monthly_usage
|
||||
},
|
||||
"status": limit_data.get("status"),
|
||||
"whitelist_count": len(limit_data.get("whitelist", [])),
|
||||
"blacklist_count": len(limit_data.get("blacklist", []))
|
||||
}
|
||||
|
||||
# Load active time locks
|
||||
timelocks_file = Path.home() / ".aitbc" / "time_locks.json"
|
||||
if timelocks_file.exists():
|
||||
with open(timelocks_file, 'r') as f:
|
||||
timelocks = json.load(f)
|
||||
|
||||
for lock_id, lock_data in timelocks.items():
|
||||
if wallet and lock_data.get("wallet") != wallet:
|
||||
continue
|
||||
if lock_data.get("status") == "locked":
|
||||
status_data["active_time_locks"][lock_id] = lock_data
|
||||
|
||||
# Load active vesting schedules
|
||||
vesting_file = Path.home() / ".aitbc" / "vesting_schedules.json"
|
||||
if vesting_file.exists():
|
||||
with open(vesting_file, 'r') as f:
|
||||
vesting_schedules = json.load(f)
|
||||
|
||||
for schedule_id, schedule_data in vesting_schedules.items():
|
||||
if wallet and schedule_data.get("wallet") != wallet:
|
||||
continue
|
||||
if schedule_data.get("status") == "active":
|
||||
status_data["active_vesting_schedules"][schedule_id] = schedule_data
|
||||
|
||||
# Calculate totals
|
||||
status_data["summary"] = {
|
||||
"wallets_with_limits": len(status_data["wallet_limits"]),
|
||||
"active_time_locks": len(status_data["active_time_locks"]),
|
||||
"active_vesting_schedules": len(status_data["active_vesting_schedules"]),
|
||||
"filter_wallet": wallet or "all"
|
||||
}
|
||||
|
||||
output(status_data)
|
||||
|
||||
|
||||
@transfer_control.command()
|
||||
@click.argument("lock_id")
|
||||
@click.pass_context
|
||||
def release_time_lock(ctx, lock_id: str):
|
||||
"""Release a time-locked transfer (if time has passed)"""
|
||||
|
||||
timelocks_file = Path.home() / ".aitbc" / "time_locks.json"
|
||||
if not timelocks_file.exists():
|
||||
error("No time-locked transfers found.")
|
||||
return
|
||||
|
||||
with open(timelocks_file, 'r') as f:
|
||||
timelocks = json.load(f)
|
||||
|
||||
if lock_id not in timelocks:
|
||||
error(f"Time lock '{lock_id}' not found.")
|
||||
return
|
||||
|
||||
lock_data = timelocks[lock_id]
|
||||
|
||||
# Check if lock can be released
|
||||
release_time = datetime.fromisoformat(lock_data["release_time"])
|
||||
current_time = datetime.utcnow()
|
||||
|
||||
if current_time < release_time:
|
||||
error(f"Time lock cannot be released until {release_time.isoformat()}")
|
||||
return
|
||||
|
||||
# Release the lock
|
||||
lock_data["status"] = "released"
|
||||
lock_data["released_at"] = current_time.isoformat()
|
||||
lock_data["released_amount"] = lock_data["amount"]
|
||||
|
||||
# Save updated timelocks
|
||||
with open(timelocks_file, 'w') as f:
|
||||
json.dump(timelocks, f, indent=2)
|
||||
|
||||
success(f"Time lock '{lock_id}' released")
|
||||
output({
|
||||
"lock_id": lock_id,
|
||||
"status": "released",
|
||||
"released_at": lock_data["released_at"],
|
||||
"released_amount": lock_data["released_amount"],
|
||||
"recipient": lock_data["recipient"]
|
||||
})
|
||||
|
||||
|
||||
@transfer_control.command()
|
||||
@click.argument("schedule_id")
|
||||
@click.pass_context
|
||||
def release_vesting(ctx, schedule_id: str):
|
||||
"""Release available vesting amounts"""
|
||||
|
||||
vesting_file = Path.home() / ".aitbc" / "vesting_schedules.json"
|
||||
if not vesting_file.exists():
|
||||
error("No vesting schedules found.")
|
||||
return
|
||||
|
||||
with open(vesting_file, 'r') as f:
|
||||
vesting_schedules = json.load(f)
|
||||
|
||||
if schedule_id not in vesting_schedules:
|
||||
error(f"Vesting schedule '{schedule_id}' not found.")
|
||||
return
|
||||
|
||||
schedule = vesting_schedules[schedule_id]
|
||||
current_time = datetime.utcnow()
|
||||
|
||||
# Find available releases
|
||||
available_releases = []
|
||||
total_available = 0.0
|
||||
|
||||
for release in schedule["releases"]:
|
||||
if not release["released"]:
|
||||
release_time = datetime.fromisoformat(release["release_time"])
|
||||
if current_time >= release_time:
|
||||
available_releases.append(release)
|
||||
total_available += release["amount"]
|
||||
|
||||
if not available_releases:
|
||||
warning("No vesting amounts available for release at this time.")
|
||||
return
|
||||
|
||||
# Mark releases as released
|
||||
for release in available_releases:
|
||||
release["released"] = True
|
||||
release["released_at"] = current_time.isoformat()
|
||||
|
||||
# Update schedule totals
|
||||
schedule["total_released"] += total_available
|
||||
schedule["released_count"] += len(available_releases)
|
||||
|
||||
# Check if schedule is complete
|
||||
if schedule["released_count"] == len(schedule["releases"]):
|
||||
schedule["status"] = "completed"
|
||||
|
||||
# Save updated schedules
|
||||
with open(vesting_file, 'w') as f:
|
||||
json.dump(vesting_schedules, f, indent=2)
|
||||
|
||||
success(f"Released {total_available} from vesting schedule '{schedule_id}'")
|
||||
output({
|
||||
"schedule_id": schedule_id,
|
||||
"released_amount": total_available,
|
||||
"releases_count": len(available_releases),
|
||||
"total_released": schedule["total_released"],
|
||||
"schedule_status": schedule["status"]
|
||||
})
|
||||
2385
cli/commands/wallet.py
Executable file
2385
cli/commands/wallet.py
Executable file
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user