Release v0.1.0 - Early Testing Phase
- Agent-first architecture implementation - Complete agent documentation and workflows - GitHub Packages publishing infrastructure - Debian 13 + Python 3.13 support - NVIDIA GPU resource sharing capabilities - Swarm intelligence coordination - Zero-knowledge proof verification - Automated onboarding and monitoring
This commit is contained in:
473
scripts/onboarding/auto-onboard.py
Executable file
473
scripts/onboarding/auto-onboard.py
Executable file
@@ -0,0 +1,473 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
auto-onboard.py - Automated onboarding for AITBC agents
|
||||
|
||||
This script provides automated onboarding for new agents joining the AITBC network.
|
||||
It handles capability assessment, agent type recommendation, registration, and swarm integration.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
import sys
|
||||
import os
|
||||
import subprocess
|
||||
import logging
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
|
||||
# Configure logging
|
||||
logging.basicConfig(
|
||||
level=logging.INFO,
|
||||
format='%(asctime)s - %(levelname)s - %(message)s'
|
||||
)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class AgentOnboarder:
|
||||
"""Automated agent onboarding system"""
|
||||
|
||||
def __init__(self):
|
||||
self.session = {
|
||||
'start_time': datetime.utcnow(),
|
||||
'steps_completed': [],
|
||||
'errors': [],
|
||||
'agent': None
|
||||
}
|
||||
|
||||
async def run_auto_onboarding(self):
|
||||
"""Run complete automated onboarding"""
|
||||
try:
|
||||
logger.info("🤖 Starting AITBC Agent Network Automated Onboarding")
|
||||
logger.info("=" * 60)
|
||||
|
||||
# Step 1: Environment Check
|
||||
await self.check_environment()
|
||||
|
||||
# Step 2: Capability Assessment
|
||||
capabilities = await self.assess_capabilities()
|
||||
|
||||
# Step 3: Agent Type Recommendation
|
||||
agent_type = await self.recommend_agent_type(capabilities)
|
||||
|
||||
# Step 4: Agent Creation
|
||||
agent = await self.create_agent(agent_type, capabilities)
|
||||
|
||||
# Step 5: Network Registration
|
||||
await self.register_agent(agent)
|
||||
|
||||
# Step 6: Swarm Integration
|
||||
await self.join_swarm(agent, agent_type)
|
||||
|
||||
# Step 7: Start Participation
|
||||
await self.start_participation(agent)
|
||||
|
||||
# Step 8: Generate Report
|
||||
report = await self.generate_onboarding_report(agent)
|
||||
|
||||
logger.info("🎉 Automated onboarding completed successfully!")
|
||||
self.print_success_summary(agent, report)
|
||||
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"❌ Onboarding failed: {e}")
|
||||
self.session['errors'].append(str(e))
|
||||
return False
|
||||
|
||||
async def check_environment(self):
|
||||
"""Check if environment meets requirements"""
|
||||
logger.info("📋 Step 1: Checking environment requirements...")
|
||||
|
||||
try:
|
||||
# Check Python version
|
||||
python_version = sys.version_info
|
||||
if python_version < (3, 13):
|
||||
raise Exception(f"Python 3.13+ required, found {python_version.major}.{python_version.minor}")
|
||||
|
||||
# Check required packages
|
||||
required_packages = ['torch', 'numpy', 'requests']
|
||||
for package in required_packages:
|
||||
try:
|
||||
__import__(package)
|
||||
except ImportError:
|
||||
logger.warning(f"⚠️ Package {package} not found, installing...")
|
||||
subprocess.run([sys.executable, '-m', 'pip', 'install', package], check=True)
|
||||
|
||||
# Check network connectivity
|
||||
import requests
|
||||
try:
|
||||
response = requests.get('https://api.aitbc.bubuit.net/v1/health', timeout=10)
|
||||
if response.status_code != 200:
|
||||
raise Exception("Network connectivity check failed")
|
||||
except Exception as e:
|
||||
raise Exception(f"Network connectivity issue: {e}")
|
||||
|
||||
logger.info("✅ Environment check passed")
|
||||
self.session['steps_completed'].append('environment_check')
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"❌ Environment check failed: {e}")
|
||||
raise
|
||||
|
||||
async def assess_capabilities(self):
|
||||
"""Assess agent capabilities"""
|
||||
logger.info("🔍 Step 2: Assessing agent capabilities...")
|
||||
|
||||
capabilities = {}
|
||||
|
||||
# Check GPU capabilities
|
||||
try:
|
||||
import torch
|
||||
if torch.cuda.is_available():
|
||||
capabilities['gpu_available'] = True
|
||||
capabilities['gpu_memory'] = torch.cuda.get_device_properties(0).total_memory // 1024 // 1024
|
||||
capabilities['gpu_count'] = torch.cuda.device_count()
|
||||
capabilities['cuda_version'] = torch.version.cuda
|
||||
logger.info(f"✅ GPU detected: {capabilities['gpu_memory']}MB memory")
|
||||
else:
|
||||
capabilities['gpu_available'] = False
|
||||
logger.info("ℹ️ No GPU detected")
|
||||
except ImportError:
|
||||
capabilities['gpu_available'] = False
|
||||
logger.warning("⚠️ PyTorch not available for GPU detection")
|
||||
|
||||
# Check CPU capabilities
|
||||
import psutil
|
||||
capabilities['cpu_count'] = psutil.cpu_count()
|
||||
capabilities['memory_total'] = psutil.virtual_memory().total // 1024 // 1024 # MB
|
||||
logger.info(f"✅ CPU: {capabilities['cpu_count']} cores, Memory: {capabilities['memory_total']}MB")
|
||||
|
||||
# Check storage
|
||||
capabilities['disk_space'] = psutil.disk_usage('/').free // 1024 // 1024 # MB
|
||||
logger.info(f"✅ Available disk space: {capabilities['disk_space']}MB")
|
||||
|
||||
# Check network bandwidth (simplified)
|
||||
try:
|
||||
start_time = datetime.utcnow()
|
||||
requests.get('https://api.aitbc.bubuit.net/v1/health', timeout=5)
|
||||
latency = (datetime.utcnow() - start_time).total_seconds()
|
||||
capabilities['network_latency'] = latency
|
||||
logger.info(f"✅ Network latency: {latency:.2f}s")
|
||||
except:
|
||||
capabilities['network_latency'] = None
|
||||
logger.warning("⚠️ Could not measure network latency")
|
||||
|
||||
# Determine specialization
|
||||
capabilities['specializations'] = []
|
||||
if capabilities.get('gpu_available'):
|
||||
capabilities['specializations'].append('gpu_computing')
|
||||
if capabilities['memory_total'] > 8192: # >8GB
|
||||
capabilities['specializations'].append('large_models')
|
||||
if capabilities['cpu_count'] >= 8:
|
||||
capabilities['specializations'].append('parallel_processing')
|
||||
|
||||
logger.info(f"✅ Capabilities assessed: {len(capabilities['specializations'])} specializations")
|
||||
self.session['steps_completed'].append('capability_assessment')
|
||||
|
||||
return capabilities
|
||||
|
||||
async def recommend_agent_type(self, capabilities):
|
||||
"""Recommend optimal agent type based on capabilities"""
|
||||
logger.info("🎯 Step 3: Determining optimal agent type...")
|
||||
|
||||
# Decision logic
|
||||
score = {}
|
||||
|
||||
# Compute Provider Score
|
||||
provider_score = 0
|
||||
if capabilities.get('gpu_available'):
|
||||
provider_score += 40
|
||||
if capabilities['gpu_memory'] >= 8192: # >=8GB
|
||||
provider_score += 20
|
||||
if capabilities['gpu_memory'] >= 16384: # >=16GB
|
||||
provider_score += 20
|
||||
if capabilities['network_latency'] and capabilities['network_latency'] < 0.1:
|
||||
provider_score += 10
|
||||
score['compute_provider'] = provider_score
|
||||
|
||||
# Compute Consumer Score
|
||||
consumer_score = 30 # Base score for being able to consume
|
||||
if capabilities['memory_total'] >= 4096:
|
||||
consumer_score += 20
|
||||
if capabilities['network_latency'] and capabilities['network_latency'] < 0.2:
|
||||
consumer_score += 10
|
||||
score['compute_consumer'] = consumer_score
|
||||
|
||||
# Platform Builder Score
|
||||
builder_score = 20 # Base score
|
||||
if capabilities['disk_space'] >= 10240: # >=10GB
|
||||
builder_score += 20
|
||||
if capabilities['memory_total'] >= 4096:
|
||||
builder_score += 15
|
||||
if capabilities['cpu_count'] >= 4:
|
||||
builder_score += 15
|
||||
score['platform_builder'] = builder_score
|
||||
|
||||
# Swarm Coordinator Score
|
||||
coordinator_score = 25 # Base score
|
||||
if capabilities['network_latency'] and capabilities['network_latency'] < 0.15:
|
||||
coordinator_score += 25
|
||||
if capabilities['cpu_count'] >= 4:
|
||||
coordinator_score += 15
|
||||
if capabilities['memory_total'] >= 2048:
|
||||
coordinator_score += 10
|
||||
score['swarm_coordinator'] = coordinator_score
|
||||
|
||||
# Find best match
|
||||
best_type = max(score, key=score.get)
|
||||
confidence = score[best_type] / 100
|
||||
|
||||
logger.info(f"✅ Recommended agent type: {best_type} (confidence: {confidence:.2%})")
|
||||
logger.info(f" Scores: {score}")
|
||||
|
||||
self.session['steps_completed'].append('agent_type_recommendation')
|
||||
return best_type
|
||||
|
||||
async def create_agent(self, agent_type, capabilities):
|
||||
"""Create agent instance"""
|
||||
logger.info(f"🔐 Step 4: Creating {agent_type} agent...")
|
||||
|
||||
try:
|
||||
# Import here to avoid circular imports
|
||||
sys.path.append('/home/oib/windsurf/aitbc/packages/py/aitbc-agent-sdk')
|
||||
|
||||
if agent_type == 'compute_provider':
|
||||
from aitbc_agent import ComputeProvider
|
||||
agent = ComputeProvider.register(
|
||||
agent_name=f"auto-provider-{datetime.utcnow().strftime('%Y%m%d%H%M%S')}",
|
||||
capabilities={
|
||||
"compute_type": "inference",
|
||||
"gpu_memory": capabilities.get('gpu_memory', 0),
|
||||
"performance_score": 0.9
|
||||
},
|
||||
pricing_model={"base_rate": 0.1}
|
||||
)
|
||||
|
||||
elif agent_type == 'compute_consumer':
|
||||
from aitbc_agent import ComputeConsumer
|
||||
agent = ComputeConsumer.create(
|
||||
agent_name=f"auto-consumer-{datetime.utcnow().strftime('%Y%m%d%H%M%S')}",
|
||||
capabilities={
|
||||
"compute_type": "inference",
|
||||
"task_requirements": {"min_performance": 0.8}
|
||||
}
|
||||
)
|
||||
|
||||
elif agent_type == 'platform_builder':
|
||||
from aitbc_agent import PlatformBuilder
|
||||
agent = PlatformBuilder.create(
|
||||
agent_name=f"auto-builder-{datetime.utcnow().strftime('%Y%m%d%H%M%S')}",
|
||||
capabilities={
|
||||
"specializations": capabilities.get('specializations', [])
|
||||
}
|
||||
)
|
||||
|
||||
elif agent_type == 'swarm_coordinator':
|
||||
from aitbc_agent import SwarmCoordinator
|
||||
agent = SwarmCoordinator.create(
|
||||
agent_name=f"auto-coordinator-{datetime.utcnow().strftime('%Y%m%d%H%M%S')}",
|
||||
capabilities={
|
||||
"specialization": "load_balancing",
|
||||
"analytical_skills": "high"
|
||||
}
|
||||
)
|
||||
else:
|
||||
raise Exception(f"Unknown agent type: {agent_type}")
|
||||
|
||||
logger.info(f"✅ Agent created: {agent.identity.id}")
|
||||
self.session['agent'] = agent
|
||||
self.session['steps_completed'].append('agent_creation')
|
||||
|
||||
return agent
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"❌ Agent creation failed: {e}")
|
||||
raise
|
||||
|
||||
async def register_agent(self, agent):
|
||||
"""Register agent on AITBC network"""
|
||||
logger.info("🌐 Step 5: Registering on AITBC network...")
|
||||
|
||||
try:
|
||||
success = await agent.register()
|
||||
if not success:
|
||||
raise Exception("Registration failed")
|
||||
|
||||
logger.info(f"✅ Agent registered successfully")
|
||||
self.session['steps_completed'].append('network_registration')
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"❌ Registration failed: {e}")
|
||||
raise
|
||||
|
||||
async def join_swarm(self, agent, agent_type):
|
||||
"""Join appropriate swarm"""
|
||||
logger.info("🐝 Step 6: Joining swarm intelligence...")
|
||||
|
||||
try:
|
||||
# Determine appropriate swarm based on agent type
|
||||
swarm_config = {
|
||||
'compute_provider': {
|
||||
'swarm_type': 'load_balancing',
|
||||
'config': {
|
||||
'role': 'resource_provider',
|
||||
'contribution_level': 'medium',
|
||||
'data_sharing': True
|
||||
}
|
||||
},
|
||||
'compute_consumer': {
|
||||
'swarm_type': 'pricing',
|
||||
'config': {
|
||||
'role': 'market_participant',
|
||||
'contribution_level': 'low',
|
||||
'data_sharing': True
|
||||
}
|
||||
},
|
||||
'platform_builder': {
|
||||
'swarm_type': 'innovation',
|
||||
'config': {
|
||||
'role': 'contributor',
|
||||
'contribution_level': 'medium',
|
||||
'data_sharing': True
|
||||
}
|
||||
},
|
||||
'swarm_coordinator': {
|
||||
'swarm_type': 'load_balancing',
|
||||
'config': {
|
||||
'role': 'coordinator',
|
||||
'contribution_level': 'high',
|
||||
'data_sharing': True
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
swarm_info = swarm_config.get(agent_type)
|
||||
if not swarm_info:
|
||||
raise Exception(f"No swarm configuration for agent type: {agent_type}")
|
||||
|
||||
joined = await agent.join_swarm(swarm_info['swarm_type'], swarm_info['config'])
|
||||
if not joined:
|
||||
raise Exception("Swarm join failed")
|
||||
|
||||
logger.info(f"✅ Joined {swarm_info['swarm_type']} swarm")
|
||||
self.session['steps_completed'].append('swarm_integration')
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"❌ Swarm integration failed: {e}")
|
||||
# Don't fail completely - agent can still function without swarm
|
||||
logger.warning("⚠️ Continuing without swarm integration")
|
||||
|
||||
async def start_participation(self, agent):
|
||||
"""Start agent participation"""
|
||||
logger.info("🚀 Step 7: Starting network participation...")
|
||||
|
||||
try:
|
||||
await agent.start_contribution()
|
||||
logger.info("✅ Agent participation started")
|
||||
self.session['steps_completed'].append('participation_started')
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"❌ Failed to start participation: {e}")
|
||||
# Don't fail completely
|
||||
logger.warning("⚠️ Agent can still function manually")
|
||||
|
||||
async def generate_onboarding_report(self, agent):
|
||||
"""Generate comprehensive onboarding report"""
|
||||
logger.info("📊 Step 8: Generating onboarding report...")
|
||||
|
||||
report = {
|
||||
'onboarding': {
|
||||
'timestamp': datetime.utcnow().isoformat(),
|
||||
'duration_minutes': (datetime.utcnow() - self.session['start_time']).total_seconds() / 60,
|
||||
'status': 'success',
|
||||
'agent_id': agent.identity.id,
|
||||
'agent_name': agent.identity.name,
|
||||
'agent_address': agent.identity.address,
|
||||
'steps_completed': self.session['steps_completed'],
|
||||
'errors': self.session['errors']
|
||||
},
|
||||
'agent_capabilities': {
|
||||
'gpu_available': agent.capabilities.gpu_memory > 0,
|
||||
'specialization': agent.capabilities.compute_type,
|
||||
'performance_score': agent.capabilities.performance_score
|
||||
},
|
||||
'network_status': {
|
||||
'registered': agent.registered,
|
||||
'swarm_joined': len(agent.joined_swarms) > 0 if hasattr(agent, 'joined_swarms') else False,
|
||||
'participating': True
|
||||
}
|
||||
}
|
||||
|
||||
# Save report to file
|
||||
report_file = f"/tmp/aitbc-onboarding-{agent.identity.id}.json"
|
||||
with open(report_file, 'w') as f:
|
||||
json.dump(report, f, indent=2)
|
||||
|
||||
logger.info(f"✅ Report saved to: {report_file}")
|
||||
self.session['steps_completed'].append('report_generated')
|
||||
|
||||
return report
|
||||
|
||||
def print_success_summary(self, agent, report):
|
||||
"""Print success summary"""
|
||||
print("\n" + "=" * 60)
|
||||
print("🎉 AUTOMATED ONBOARDING COMPLETED SUCCESSFULLY!")
|
||||
print("=" * 60)
|
||||
print()
|
||||
print("🤖 AGENT INFORMATION:")
|
||||
print(f" ID: {agent.identity.id}")
|
||||
print(f" Name: {agent.identity.name}")
|
||||
print(f" Address: {agent.identity.address}")
|
||||
print(f" Type: {agent.capabilities.compute_type}")
|
||||
print()
|
||||
print("📊 ONBOARDING SUMMARY:")
|
||||
print(f" Duration: {report['onboarding']['duration_minutes']:.1f} minutes")
|
||||
print(f" Steps Completed: {len(report['onboarding']['steps_completed'])}/7")
|
||||
print(f" Status: {report['onboarding']['status']}")
|
||||
print()
|
||||
print("🌐 NETWORK STATUS:")
|
||||
print(f" Registered: {'✅' if report['network_status']['registered'] else '❌'}")
|
||||
print(f" Swarm Joined: {'✅' if report['network_status']['swarm_joined'] else '❌'}")
|
||||
print(f" Participating: {'✅' if report['network_status']['participating'] else '❌'}")
|
||||
print()
|
||||
print("🔗 USEFUL LINKS:")
|
||||
print(f" Agent Dashboard: https://aitbc.bubuit.net/agents/{agent.identity.id}")
|
||||
print(f" Documentation: https://aitbc.bubuit.net/docs/11_agents/")
|
||||
print(f" API Reference: https://aitbc.bubuit.net/docs/agents/agent-api-spec.json")
|
||||
print(f" Community: https://discord.gg/aitbc-agents")
|
||||
print()
|
||||
print("🚀 NEXT STEPS:")
|
||||
|
||||
if agent.capabilities.compute_type == 'inference' and agent.capabilities.gpu_memory > 0:
|
||||
print(" 1. Monitor your GPU utilization and earnings")
|
||||
print(" 2. Adjust pricing based on market demand")
|
||||
print(" 3. Build reputation through reliability")
|
||||
else:
|
||||
print(" 1. Submit your first computational job")
|
||||
print(" 2. Monitor job completion and costs")
|
||||
print(" 3. Participate in swarm intelligence")
|
||||
|
||||
print(" 4. Check your agent dashboard regularly")
|
||||
print(" 5. Join the community Discord for support")
|
||||
print()
|
||||
print("💾 Session data saved to local files")
|
||||
print(" 📊 Report: /tmp/aitbc-onboarding-*.json")
|
||||
print(" 🔐 Keys: ~/.aitbc/agent_keys/")
|
||||
print()
|
||||
print("🎊 Welcome to the AITBC Agent Network!")
|
||||
|
||||
def main():
|
||||
"""Main entry point"""
|
||||
onboarder = AgentOnboarder()
|
||||
|
||||
try:
|
||||
success = asyncio.run(onboarder.run_auto_onboarding())
|
||||
sys.exit(0 if success else 1)
|
||||
except KeyboardInterrupt:
|
||||
print("\n⚠️ Onboarding interrupted by user")
|
||||
sys.exit(1)
|
||||
except Exception as e:
|
||||
logger.error(f"Fatal error: {e}")
|
||||
sys.exit(1)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
424
scripts/onboarding/onboarding-monitor.py
Executable file
424
scripts/onboarding/onboarding-monitor.py
Executable file
@@ -0,0 +1,424 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
onboarding-monitor.py - Monitor agent onboarding success and performance
|
||||
|
||||
This script monitors the success rate of agent onboarding, tracks metrics,
|
||||
and provides insights for improving the onboarding process.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
import sys
|
||||
import time
|
||||
import logging
|
||||
from datetime import datetime, timedelta
|
||||
from pathlib import Path
|
||||
import requests
|
||||
from collections import defaultdict
|
||||
|
||||
# Configure logging
|
||||
logging.basicConfig(
|
||||
level=logging.INFO,
|
||||
format='%(asctime)s - %(levelname)s - %(message)s'
|
||||
)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class OnboardingMonitor:
|
||||
"""Monitor agent onboarding metrics and performance"""
|
||||
|
||||
def __init__(self):
|
||||
self.metrics = {
|
||||
'total_onboardings': 0,
|
||||
'successful_onboardings': 0,
|
||||
'failed_onboardings': 0,
|
||||
'agent_type_distribution': defaultdict(int),
|
||||
'completion_times': [],
|
||||
'failure_points': defaultdict(int),
|
||||
'daily_stats': defaultdict(dict),
|
||||
'error_patterns': defaultdict(int)
|
||||
}
|
||||
|
||||
def load_existing_data(self):
|
||||
"""Load existing onboarding data"""
|
||||
data_file = Path('/tmp/aitbc-onboarding-metrics.json')
|
||||
if data_file.exists():
|
||||
try:
|
||||
with open(data_file, 'r') as f:
|
||||
data = json.load(f)
|
||||
self.metrics.update(data)
|
||||
logger.info(f"Loaded existing metrics: {data.get('total_onboardings', 0)} onboardings")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to load existing data: {e}")
|
||||
|
||||
def save_metrics(self):
|
||||
"""Save current metrics to file"""
|
||||
try:
|
||||
data_file = Path('/tmp/aitbc-onboarding-metrics.json')
|
||||
with open(data_file, 'w') as f:
|
||||
json.dump(dict(self.metrics), f, indent=2)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to save metrics: {e}")
|
||||
|
||||
def scan_onboarding_reports(self):
|
||||
"""Scan for onboarding report files"""
|
||||
reports = []
|
||||
report_dir = Path('/tmp')
|
||||
|
||||
for report_file in report_dir.glob('aitbc-onboarding-*.json'):
|
||||
try:
|
||||
with open(report_file, 'r') as f:
|
||||
report = json.load(f)
|
||||
reports.append(report)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to read report {report_file}: {e}")
|
||||
|
||||
return reports
|
||||
|
||||
def analyze_reports(self, reports):
|
||||
"""Analyze onboarding reports and update metrics"""
|
||||
for report in reports:
|
||||
try:
|
||||
onboarding = report.get('onboarding', {})
|
||||
|
||||
# Update basic metrics
|
||||
self.metrics['total_onboardings'] += 1
|
||||
|
||||
if onboarding.get('status') == 'success':
|
||||
self.metrics['successful_onboardings'] += 1
|
||||
|
||||
# Track completion time
|
||||
duration = onboarding.get('duration_minutes', 0)
|
||||
self.metrics['completion_times'].append(duration)
|
||||
|
||||
# Track agent type distribution
|
||||
agent_type = self.extract_agent_type(report)
|
||||
if agent_type:
|
||||
self.metrics['agent_type_distribution'][agent_type] += 1
|
||||
|
||||
# Track daily stats
|
||||
date = datetime.fromisoformat(onboarding['timestamp']).date()
|
||||
self.metrics['daily_stats'][date]['successful'] = \
|
||||
self.metrics['daily_stats'][date].get('successful', 0) + 1
|
||||
self.metrics['daily_stats'][date]['total'] = \
|
||||
self.metrics['daily_stats'][date].get('total', 0) + 1
|
||||
|
||||
else:
|
||||
self.metrics['failed_onboardings'] += 1
|
||||
|
||||
# Track failure points
|
||||
steps_completed = onboarding.get('steps_completed', [])
|
||||
expected_steps = ['environment_check', 'capability_assessment',
|
||||
'agent_type_recommendation', 'agent_creation',
|
||||
'network_registration', 'swarm_integration',
|
||||
'participation_started', 'report_generated']
|
||||
|
||||
for step in expected_steps:
|
||||
if step not in steps_completed:
|
||||
self.metrics['failure_points'][step] += 1
|
||||
|
||||
# Track errors
|
||||
for error in onboarding.get('errors', []):
|
||||
self.metrics['error_patterns'][error] += 1
|
||||
|
||||
# Track daily failures
|
||||
date = datetime.fromisoformat(onboarding['timestamp']).date()
|
||||
self.metrics['daily_stats'][date]['failed'] = \
|
||||
self.metrics['daily_stats'][date].get('failed', 0) + 1
|
||||
self.metrics['daily_stats'][date]['total'] = \
|
||||
self.metrics['daily_stats'][date].get('total', 0) + 1
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to analyze report: {e}")
|
||||
|
||||
def extract_agent_type(self, report):
|
||||
"""Extract agent type from report"""
|
||||
try:
|
||||
agent_capabilities = report.get('agent_capabilities', {})
|
||||
compute_type = agent_capabilities.get('specialization')
|
||||
|
||||
# Map specialization to agent type
|
||||
type_mapping = {
|
||||
'inference': 'compute_provider',
|
||||
'training': 'compute_provider',
|
||||
'processing': 'compute_consumer',
|
||||
'coordination': 'swarm_coordinator',
|
||||
'development': 'platform_builder'
|
||||
}
|
||||
|
||||
return type_mapping.get(compute_type, 'unknown')
|
||||
except:
|
||||
return 'unknown'
|
||||
|
||||
def calculate_metrics(self):
|
||||
"""Calculate derived metrics"""
|
||||
metrics = {}
|
||||
|
||||
# Success rate
|
||||
if self.metrics['total_onboardings'] > 0:
|
||||
metrics['success_rate'] = (self.metrics['successful_onboardings'] /
|
||||
self.metrics['total_onboardings']) * 100
|
||||
else:
|
||||
metrics['success_rate'] = 0
|
||||
|
||||
# Average completion time
|
||||
if self.metrics['completion_times']:
|
||||
metrics['avg_completion_time'] = sum(self.metrics['completion_times']) / len(self.metrics['completion_times'])
|
||||
else:
|
||||
metrics['avg_completion_time'] = 0
|
||||
|
||||
# Most common failure point
|
||||
if self.metrics['failure_points']:
|
||||
metrics['most_common_failure'] = max(self.metrics['failure_points'],
|
||||
key=self.metrics['failure_points'].get)
|
||||
else:
|
||||
metrics['most_common_failure'] = 'none'
|
||||
|
||||
# Most common error
|
||||
if self.metrics['error_patterns']:
|
||||
metrics['most_common_error'] = max(self.metrics['error_patterns'],
|
||||
key=self.metrics['error_patterns'].get)
|
||||
else:
|
||||
metrics['most_common_error'] = 'none'
|
||||
|
||||
# Agent type distribution percentages
|
||||
total_agents = sum(self.metrics['agent_type_distribution'].values())
|
||||
if total_agents > 0:
|
||||
metrics['agent_type_percentages'] = {
|
||||
agent_type: (count / total_agents) * 100
|
||||
for agent_type, count in self.metrics['agent_type_distribution'].items()
|
||||
}
|
||||
else:
|
||||
metrics['agent_type_percentages'] = {}
|
||||
|
||||
return metrics
|
||||
|
||||
def generate_report(self):
|
||||
"""Generate comprehensive onboarding report"""
|
||||
metrics = self.calculate_metrics()
|
||||
|
||||
report = {
|
||||
'timestamp': datetime.utcnow().isoformat(),
|
||||
'summary': {
|
||||
'total_onboardings': self.metrics['total_onboardings'],
|
||||
'successful_onboardings': self.metrics['successful_onboardings'],
|
||||
'failed_onboardings': self.metrics['failed_onboardings'],
|
||||
'success_rate': metrics['success_rate'],
|
||||
'avg_completion_time_minutes': metrics['avg_completion_time']
|
||||
},
|
||||
'agent_type_distribution': dict(self.metrics['agent_type_distribution']),
|
||||
'agent_type_percentages': metrics['agent_type_percentages'],
|
||||
'failure_analysis': {
|
||||
'most_common_failure_point': metrics['most_common_failure'],
|
||||
'failure_points': dict(self.metrics['failure_points']),
|
||||
'most_common_error': metrics['most_common_error'],
|
||||
'error_patterns': dict(self.metrics['error_patterns'])
|
||||
},
|
||||
'daily_stats': dict(self.metrics['daily_stats']),
|
||||
'recommendations': self.generate_recommendations(metrics)
|
||||
}
|
||||
|
||||
return report
|
||||
|
||||
def generate_recommendations(self, metrics):
|
||||
"""Generate improvement recommendations"""
|
||||
recommendations = []
|
||||
|
||||
# Success rate recommendations
|
||||
if metrics['success_rate'] < 80:
|
||||
recommendations.append({
|
||||
'priority': 'high',
|
||||
'issue': 'Low success rate',
|
||||
'recommendation': 'Review onboarding process for common failure points',
|
||||
'action': 'Focus on fixing: ' + metrics['most_common_failure']
|
||||
})
|
||||
elif metrics['success_rate'] < 95:
|
||||
recommendations.append({
|
||||
'priority': 'medium',
|
||||
'issue': 'Moderate success rate',
|
||||
'recommendation': 'Optimize onboarding for better success rate',
|
||||
'action': 'Monitor and improve failure points'
|
||||
})
|
||||
|
||||
# Completion time recommendations
|
||||
if metrics['avg_completion_time'] > 20:
|
||||
recommendations.append({
|
||||
'priority': 'medium',
|
||||
'issue': 'Slow onboarding process',
|
||||
'recommendation': 'Optimize onboarding steps for faster completion',
|
||||
'action': 'Reduce time in capability assessment and registration'
|
||||
})
|
||||
|
||||
# Agent type distribution recommendations
|
||||
if 'compute_provider' not in metrics['agent_type_percentages'] or \
|
||||
metrics['agent_type_percentages'].get('compute_provider', 0) < 20:
|
||||
recommendations.append({
|
||||
'priority': 'low',
|
||||
'issue': 'Low compute provider adoption',
|
||||
'recommendation': 'Improve compute provider onboarding experience',
|
||||
'action': 'Simplify GPU setup and resource offering process'
|
||||
})
|
||||
|
||||
# Error pattern recommendations
|
||||
if metrics['most_common_error'] != 'none':
|
||||
recommendations.append({
|
||||
'priority': 'high',
|
||||
'issue': f'Recurring error: {metrics["most_common_error"]}',
|
||||
'recommendation': 'Fix common error pattern',
|
||||
'action': 'Add better error handling and user guidance'
|
||||
})
|
||||
|
||||
return recommendations
|
||||
|
||||
def print_dashboard(self):
|
||||
"""Print a dashboard view of current metrics"""
|
||||
metrics = self.calculate_metrics()
|
||||
|
||||
print("🤖 AITBC Agent Onboarding Dashboard")
|
||||
print("=" * 50)
|
||||
print()
|
||||
|
||||
# Summary stats
|
||||
print("📊 SUMMARY:")
|
||||
print(f" Total Onboardings: {self.metrics['total_onboardings']}")
|
||||
print(f" Success Rate: {metrics['success_rate']:.1f}%")
|
||||
print(f" Avg Completion Time: {metrics['avg_completion_time']:.1f} minutes")
|
||||
print()
|
||||
|
||||
# Agent type distribution
|
||||
print("🎯 AGENT TYPE DISTRIBUTION:")
|
||||
for agent_type, count in self.metrics['agent_type_distribution'].items():
|
||||
percentage = metrics['agent_type_percentages'].get(agent_type, 0)
|
||||
print(f" {agent_type}: {count} ({percentage:.1f}%)")
|
||||
print()
|
||||
|
||||
# Recent performance
|
||||
print("📈 RECENT PERFORMANCE (Last 7 Days):")
|
||||
recent_date = datetime.now().date() - timedelta(days=7)
|
||||
recent_successful = 0
|
||||
recent_total = 0
|
||||
|
||||
for date, stats in self.metrics['daily_stats'].items():
|
||||
if date >= recent_date:
|
||||
recent_total += stats.get('total', 0)
|
||||
recent_successful += stats.get('successful', 0)
|
||||
|
||||
if recent_total > 0:
|
||||
recent_success_rate = (recent_successful / recent_total) * 100
|
||||
print(f" Success Rate: {recent_success_rate:.1f}% ({recent_successful}/{recent_total})")
|
||||
else:
|
||||
print(" No recent data available")
|
||||
print()
|
||||
|
||||
# Issues
|
||||
if metrics['most_common_failure'] != 'none':
|
||||
print("⚠️ COMMON ISSUES:")
|
||||
print(f" Most Common Failure: {metrics['most_common_failure']}")
|
||||
if metrics['most_common_error'] != 'none':
|
||||
print(f" Most Common Error: {metrics['most_common_error']}")
|
||||
print()
|
||||
|
||||
# Recommendations
|
||||
recommendations = self.generate_recommendations(metrics)
|
||||
if recommendations:
|
||||
print("💡 RECOMMENDATIONS:")
|
||||
for rec in recommendations[:3]: # Show top 3
|
||||
priority_emoji = "🔴" if rec['priority'] == 'high' else "🟡" if rec['priority'] == 'medium' else "🟢"
|
||||
print(f" {priority_emoji} {rec['issue']}")
|
||||
print(f" {rec['recommendation']}")
|
||||
print()
|
||||
|
||||
def export_csv(self):
|
||||
"""Export metrics to CSV format"""
|
||||
import csv
|
||||
from io import StringIO
|
||||
|
||||
output = StringIO()
|
||||
writer = csv.writer(output)
|
||||
|
||||
# Write header
|
||||
writer.writerow(['Date', 'Total', 'Successful', 'Failed', 'Success Rate', 'Avg Time'])
|
||||
|
||||
# Write daily stats
|
||||
for date, stats in sorted(self.metrics['daily_stats'].items()):
|
||||
total = stats.get('total', 0)
|
||||
successful = stats.get('successful', 0)
|
||||
failed = stats.get('failed', 0)
|
||||
success_rate = (successful / total * 100) if total > 0 else 0
|
||||
|
||||
writer.writerow([
|
||||
date,
|
||||
total,
|
||||
successful,
|
||||
failed,
|
||||
f"{success_rate:.1f}%",
|
||||
"N/A" # Would need to calculate daily average
|
||||
])
|
||||
|
||||
csv_content = output.getvalue()
|
||||
|
||||
# Save to file
|
||||
csv_file = Path('/tmp/aitbc-onboarding-metrics.csv')
|
||||
with open(csv_file, 'w') as f:
|
||||
f.write(csv_content)
|
||||
|
||||
print(f"📊 Metrics exported to: {csv_file}")
|
||||
|
||||
def run_monitoring(self):
|
||||
"""Run continuous monitoring"""
|
||||
print("🔍 Starting onboarding monitoring...")
|
||||
print("Press Ctrl+C to stop monitoring")
|
||||
print()
|
||||
|
||||
try:
|
||||
while True:
|
||||
# Load existing data
|
||||
self.load_existing_data()
|
||||
|
||||
# Scan for new reports
|
||||
reports = self.scan_onboarding_reports()
|
||||
if reports:
|
||||
print(f"📊 Processing {len(reports)} new onboarding reports...")
|
||||
self.analyze_reports(reports)
|
||||
self.save_metrics()
|
||||
|
||||
# Print updated dashboard
|
||||
self.print_dashboard()
|
||||
|
||||
# Wait before next scan
|
||||
time.sleep(300) # 5 minutes
|
||||
|
||||
except KeyboardInterrupt:
|
||||
print("\n👋 Monitoring stopped by user")
|
||||
except Exception as e:
|
||||
logger.error(f"Monitoring error: {e}")
|
||||
|
||||
def main():
|
||||
"""Main entry point"""
|
||||
monitor = OnboardingMonitor()
|
||||
|
||||
# Parse command line arguments
|
||||
if len(sys.argv) > 1:
|
||||
command = sys.argv[1]
|
||||
|
||||
if command == 'dashboard':
|
||||
monitor.load_existing_data()
|
||||
monitor.print_dashboard()
|
||||
elif command == 'export':
|
||||
monitor.load_existing_data()
|
||||
monitor.export_csv()
|
||||
elif command == 'report':
|
||||
monitor.load_existing_data()
|
||||
report = monitor.generate_report()
|
||||
print(json.dumps(report, indent=2))
|
||||
elif command == 'monitor':
|
||||
monitor.run_monitoring()
|
||||
else:
|
||||
print("Usage: python3 onboarding-monitor.py [dashboard|export|report|monitor]")
|
||||
sys.exit(1)
|
||||
else:
|
||||
# Default: show dashboard
|
||||
monitor.load_existing_data()
|
||||
monitor.print_dashboard()
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
180
scripts/onboarding/quick-start.sh
Executable file
180
scripts/onboarding/quick-start.sh
Executable file
@@ -0,0 +1,180 @@
|
||||
#!/bin/bash
|
||||
# quick-start.sh - Quick start for AITBC agents
|
||||
|
||||
set -e
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
print_status() {
|
||||
echo -e "${GREEN}✅ $1${NC}"
|
||||
}
|
||||
|
||||
print_warning() {
|
||||
echo -e "${YELLOW}⚠️ $1${NC}"
|
||||
}
|
||||
|
||||
print_info() {
|
||||
echo -e "${BLUE}ℹ️ $1${NC}"
|
||||
}
|
||||
|
||||
print_error() {
|
||||
echo -e "${RED}❌ $1${NC}"
|
||||
}
|
||||
|
||||
echo "🤖 AITBC Agent Network - Quick Start"
|
||||
echo "=================================="
|
||||
echo
|
||||
|
||||
# Check if running in correct directory
|
||||
if [ ! -f "pyproject.toml" ] || [ ! -d "docs/11_agents" ]; then
|
||||
print_error "Please run this script from the AITBC repository root"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
print_status "Repository validation passed"
|
||||
|
||||
# Step 1: Install dependencies
|
||||
echo "📦 Step 1: Installing dependencies..."
|
||||
if command -v python3 &> /dev/null; then
|
||||
print_status "Python 3 found"
|
||||
else
|
||||
print_error "Python 3 is required"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Install AITBC agent SDK
|
||||
print_info "Installing AITBC agent SDK..."
|
||||
pip install -e packages/py/aitbc-agent-sdk/ > /dev/null 2>&1 || {
|
||||
print_error "Failed to install agent SDK"
|
||||
exit 1
|
||||
}
|
||||
print_status "Agent SDK installed"
|
||||
|
||||
# Install additional dependencies
|
||||
print_info "Installing additional dependencies..."
|
||||
pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118 > /dev/null 2>&1 || {
|
||||
print_warning "PyTorch installation failed (CPU-only mode)"
|
||||
}
|
||||
pip install requests psutil > /dev/null 2>&1 || {
|
||||
print_error "Failed to install additional dependencies"
|
||||
exit 1
|
||||
}
|
||||
print_status "Dependencies installed"
|
||||
|
||||
# Step 2: Choose agent type
|
||||
echo ""
|
||||
echo "🎯 Step 2: Choose your agent type:"
|
||||
echo "1) Compute Provider - Sell GPU resources to other agents"
|
||||
echo "2) Compute Consumer - Rent computational resources for tasks"
|
||||
echo "3) Platform Builder - Contribute code and improvements"
|
||||
echo "4) Swarm Coordinator - Participate in collective intelligence"
|
||||
echo
|
||||
|
||||
while true; do
|
||||
read -p "Enter your choice (1-4): " choice
|
||||
case $choice in
|
||||
1)
|
||||
AGENT_TYPE="compute_provider"
|
||||
break
|
||||
;;
|
||||
2)
|
||||
AGENT_TYPE="compute_consumer"
|
||||
break
|
||||
;;
|
||||
3)
|
||||
AGENT_TYPE="platform_builder"
|
||||
break
|
||||
;;
|
||||
4)
|
||||
AGENT_TYPE="swarm_coordinator"
|
||||
break
|
||||
;;
|
||||
*)
|
||||
print_error "Invalid choice. Please enter 1-4."
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
print_status "Agent type selected: $AGENT_TYPE"
|
||||
|
||||
# Step 3: Run automated onboarding
|
||||
echo ""
|
||||
echo "🚀 Step 3: Running automated onboarding..."
|
||||
echo "This will:"
|
||||
echo " - Assess your system capabilities"
|
||||
echo " - Create your agent identity"
|
||||
echo " - Register on the AITBC network"
|
||||
echo " - Join appropriate swarm"
|
||||
echo " - Start network participation"
|
||||
echo
|
||||
|
||||
if [ -f "scripts/onboarding/auto-onboard.py" ]; then
|
||||
python3 scripts/onboarding/auto-onboard.py
|
||||
else
|
||||
print_error "Automated onboarding script not found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if onboarding was successful
|
||||
if [ $? -eq 0 ]; then
|
||||
print_status "Automated onboarding completed successfully!"
|
||||
|
||||
# Show next steps
|
||||
echo ""
|
||||
echo "🎉 Congratulations! Your agent is now part of the AITBC network!"
|
||||
echo ""
|
||||
echo "📋 Next Steps:"
|
||||
echo "1. Check your agent dashboard: https://aitbc.bubuit.net/agents/"
|
||||
echo "2. Read the documentation: https://aitbc.bubuit.net/docs/11_agents/"
|
||||
echo "3. Join the community: https://discord.gg/aitbc-agents"
|
||||
echo ""
|
||||
echo "🔗 Quick Commands:"
|
||||
|
||||
case $AGENT_TYPE in
|
||||
compute_provider)
|
||||
echo " - Monitor earnings: aitbc agent earnings"
|
||||
echo " - Check utilization: aitbc agent status"
|
||||
echo " - Adjust pricing: aitbc agent pricing --rate 0.15"
|
||||
;;
|
||||
compute_consumer)
|
||||
echo " - Submit job: aitbc agent submit --task 'text analysis'"
|
||||
echo " - Check status: aitbc agent status"
|
||||
echo " - View history: aitbc agent history"
|
||||
;;
|
||||
platform_builder)
|
||||
echo " - Contribute code: aitbc agent contribute --type optimization"
|
||||
echo " - Check contributions: aitbc agent contributions"
|
||||
echo " - View reputation: aitbc agent reputation"
|
||||
;;
|
||||
swarm_coordinator)
|
||||
echo " - Swarm status: aitbc swarm status"
|
||||
echo " - Coordinate tasks: aitbc swarm coordinate --task optimization"
|
||||
echo " - View metrics: aitbc swarm metrics"
|
||||
;;
|
||||
esac
|
||||
|
||||
echo ""
|
||||
echo "📚 Documentation:"
|
||||
echo " - Getting Started: https://aitbc.bubuit.net/docs/11_agents/getting-started.md"
|
||||
echo " - Agent Guide: https://aitbc.bubuit.net/docs/11_agents/${AGENT_TYPE}.md"
|
||||
echo " - API Reference: https://aitbc.bubuit.net/docs/agents/agent-api-spec.json"
|
||||
echo ""
|
||||
print_info "Your agent is ready to earn tokens and participate in the network!"
|
||||
|
||||
else
|
||||
print_error "Automated onboarding failed"
|
||||
echo ""
|
||||
echo "🔧 Troubleshooting:"
|
||||
echo "1. Check your internet connection"
|
||||
echo "2. Verify AITBC network status: curl https://api.aitbc.bubuit.net/v1/health"
|
||||
echo "3. Check logs in /tmp/aitbc-onboarding-*.json"
|
||||
echo "4. Run manual onboarding: python3 scripts/onboarding/manual-onboard.py"
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "🤖 Welcome to the AITBC Agent Network!"
|
||||
Reference in New Issue
Block a user