feat: add marketplace metrics, privacy features, and service registry endpoints
- Add Prometheus metrics for marketplace API throughput and error rates with new dashboard panels - Implement confidential transaction models with encryption support and access control - Add key management system with registration, rotation, and audit logging - Create services and registry routers for service discovery and management - Integrate ZK proof generation for privacy-preserving receipts - Add metrics instru
This commit is contained in:
628
ecosystem-analytics/analytics_service.py
Normal file
628
ecosystem-analytics/analytics_service.py
Normal file
@ -0,0 +1,628 @@
|
||||
"""
|
||||
Ecosystem Analytics Service for AITBC
|
||||
|
||||
Tracks and analyzes ecosystem metrics including:
|
||||
- Hackathon participation and outcomes
|
||||
- Grant program effectiveness
|
||||
- Extension adoption and usage
|
||||
- Developer engagement
|
||||
- Network effects and cross-chain activity
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
from datetime import datetime, timedelta
|
||||
from typing import Dict, List, Any, Optional
|
||||
from dataclasses import dataclass, asdict
|
||||
from decimal import Decimal
|
||||
import pandas as pd
|
||||
import plotly.graph_objects as go
|
||||
import plotly.express as px
|
||||
from sqlalchemy import create_engine, select, func
|
||||
from sqlalchemy.orm import sessionmaker
|
||||
|
||||
# Configuration - in production, this would come from environment variables or config file
|
||||
class Settings:
|
||||
DATABASE_URL = "postgresql://user:pass@localhost/aitbc"
|
||||
|
||||
settings = Settings()
|
||||
|
||||
|
||||
@dataclass
|
||||
class EcosystemMetric:
|
||||
"""Base class for ecosystem metrics"""
|
||||
timestamp: datetime
|
||||
metric_name: str
|
||||
value: float
|
||||
unit: str
|
||||
dimensions: Dict[str, Any]
|
||||
source: str
|
||||
|
||||
|
||||
@dataclass
|
||||
class HackathonMetric:
|
||||
"""Hackathon-specific metrics"""
|
||||
event_id: str
|
||||
event_name: str
|
||||
start_date: datetime
|
||||
end_date: datetime
|
||||
participants: int
|
||||
submissions: int
|
||||
winners: int
|
||||
projects_deployed: int
|
||||
github_stars: int
|
||||
community_engagement: float
|
||||
technical_score: float
|
||||
innovation_score: float
|
||||
|
||||
|
||||
@dataclass
|
||||
class GrantMetric:
|
||||
"""Grant program metrics"""
|
||||
grant_id: str
|
||||
project_name: str
|
||||
amount_awarded: Decimal
|
||||
amount_disbursed: Decimal
|
||||
milestones_completed: int
|
||||
total_milestones: int
|
||||
users_acquired: int
|
||||
github_contributors: int
|
||||
code_commits: int
|
||||
documentation_score: float
|
||||
community_score: float
|
||||
|
||||
|
||||
@dataclass
|
||||
class ExtensionMetric:
|
||||
"""Extension/connector metrics"""
|
||||
extension_id: str
|
||||
extension_name: str
|
||||
downloads: int
|
||||
active_installations: int
|
||||
api_calls: int
|
||||
error_rate: float
|
||||
avg_response_time: float
|
||||
user_satisfaction: float
|
||||
integration_count: int
|
||||
revenue_generated: Decimal
|
||||
|
||||
|
||||
class EcosystemAnalyticsService:
|
||||
"""Main analytics service for ecosystem metrics"""
|
||||
|
||||
def __init__(self):
|
||||
self.engine = create_engine(settings.DATABASE_URL)
|
||||
self.Session = sessionmaker(bind=self.engine)
|
||||
self.logger = __import__('logging').getLogger(__name__)
|
||||
|
||||
async def collect_hackathon_metrics(self, event_id: str) -> HackathonMetric:
|
||||
"""Collect metrics for a specific hackathon"""
|
||||
with self.Session() as db:
|
||||
# Get event details
|
||||
event = self._get_hackathon_event(db, event_id)
|
||||
if not event:
|
||||
raise ValueError(f"Hackathon {event_id} not found")
|
||||
|
||||
# Collect participant metrics
|
||||
participants = await self._count_participants(event_id)
|
||||
submissions = await self._count_submissions(event_id)
|
||||
|
||||
# Get project metrics
|
||||
projects = await self._get_hackathon_projects(event_id)
|
||||
projects_deployed = sum(1 for p in projects if p.get('deployed', False))
|
||||
|
||||
# Calculate engagement scores
|
||||
community_engagement = await self._calculate_community_engagement(event_id)
|
||||
technical_scores = [p.get('technical_score', 0) for p in projects]
|
||||
innovation_scores = [p.get('innovation_score', 0) for p in projects]
|
||||
|
||||
# Get GitHub metrics
|
||||
github_stars = sum(p.get('github_stars', 0) for p in projects)
|
||||
|
||||
metric = HackathonMetric(
|
||||
event_id=event_id,
|
||||
event_name=event['name'],
|
||||
start_date=event['start_date'],
|
||||
end_date=event['end_date'],
|
||||
participants=participants,
|
||||
submissions=submissions,
|
||||
winners=len([p for p in projects if p.get('winner', False)]),
|
||||
projects_deployed=projects_deployed,
|
||||
github_stars=github_stars,
|
||||
community_engagement=community_engagement,
|
||||
technical_score=sum(technical_scores) / len(technical_scores) if technical_scores else 0,
|
||||
innovation_score=sum(innovation_scores) / len(innovation_scores) if innovation_scores else 0
|
||||
)
|
||||
|
||||
# Store metrics
|
||||
await self._store_metric(metric)
|
||||
|
||||
return metric
|
||||
|
||||
async def collect_grant_metrics(self, grant_id: str) -> GrantMetric:
|
||||
"""Collect metrics for a specific grant"""
|
||||
with self.Session() as db:
|
||||
# Get grant details
|
||||
grant = self._get_grant_details(db, grant_id)
|
||||
if not grant:
|
||||
raise ValueError(f"Grant {grant_id} not found")
|
||||
|
||||
# Get project metrics
|
||||
project = await self._get_grant_project(grant_id)
|
||||
|
||||
# Calculate completion metrics
|
||||
milestones_completed = await self._count_completed_milestones(grant_id)
|
||||
total_milestones = grant.get('total_milestones', 1)
|
||||
|
||||
# Get adoption metrics
|
||||
users_acquired = await self._count_project_users(grant_id)
|
||||
github_contributors = await self._count_github_contributors(project.get('repo_url'))
|
||||
code_commits = await self._count_code_commits(project.get('repo_url'))
|
||||
|
||||
# Calculate quality scores
|
||||
documentation_score = await self._evaluate_documentation(project.get('docs_url'))
|
||||
community_score = await self._evaluate_community_health(project.get('repo_url'))
|
||||
|
||||
metric = GrantMetric(
|
||||
grant_id=grant_id,
|
||||
project_name=grant['project_name'],
|
||||
amount_awarded=Decimal(str(grant.get('amount_awarded', 0))),
|
||||
amount_disbursed=Decimal(str(grant.get('amount_disbursed', 0))),
|
||||
milestones_completed=milestones_completed,
|
||||
total_milestones=total_milestones,
|
||||
users_acquired=users_acquired,
|
||||
github_contributors=github_contributors,
|
||||
code_commits=code_commits,
|
||||
documentation_score=documentation_score,
|
||||
community_score=community_score
|
||||
)
|
||||
|
||||
# Store metrics
|
||||
await self._store_metric(metric)
|
||||
|
||||
return metric
|
||||
|
||||
async def collect_extension_metrics(self, extension_id: str) -> ExtensionMetric:
|
||||
"""Collect metrics for a specific extension"""
|
||||
with self.Session() as db:
|
||||
# Get extension details
|
||||
extension = self._get_extension_details(db, extension_id)
|
||||
if not extension:
|
||||
raise ValueError(f"Extension {extension_id} not found")
|
||||
|
||||
# Get usage metrics
|
||||
downloads = await self._count_downloads(extension_id)
|
||||
active_installations = await self._count_active_installations(extension_id)
|
||||
|
||||
# Get performance metrics
|
||||
api_calls = await self._count_api_calls(extension_id, days=30)
|
||||
error_rate = await self._calculate_error_rate(extension_id, days=30)
|
||||
avg_response_time = await self._calculate_avg_response_time(extension_id, days=30)
|
||||
|
||||
# Get quality metrics
|
||||
user_satisfaction = await self._calculate_user_satisfaction(extension_id)
|
||||
integration_count = await self._count_integrations(extension_id)
|
||||
|
||||
# Get business metrics
|
||||
revenue_generated = await self._calculate_revenue(extension_id, days=30)
|
||||
|
||||
metric = ExtensionMetric(
|
||||
extension_id=extension_id,
|
||||
extension_name=extension['name'],
|
||||
downloads=downloads,
|
||||
active_installations=active_installations,
|
||||
api_calls=api_calls,
|
||||
error_rate=error_rate,
|
||||
avg_response_time=avg_response_time,
|
||||
user_satisfaction=user_satisfaction,
|
||||
integration_count=integration_count,
|
||||
revenue_generated=Decimal(str(revenue_generated))
|
||||
)
|
||||
|
||||
# Store metrics
|
||||
await self._store_metric(metric)
|
||||
|
||||
return metric
|
||||
|
||||
async def generate_ecosystem_dashboard(self, days: int = 30) -> Dict[str, Any]:
|
||||
"""Generate comprehensive ecosystem dashboard"""
|
||||
end_date = datetime.utcnow()
|
||||
start_date = end_date - timedelta(days=days)
|
||||
|
||||
dashboard = {
|
||||
"period": {
|
||||
"start": start_date.isoformat(),
|
||||
"end": end_date.isoformat(),
|
||||
"days": days
|
||||
},
|
||||
"summary": await self._generate_summary_metrics(start_date, end_date),
|
||||
"hackathons": await self._generate_hackathon_section(start_date, end_date),
|
||||
"grants": await self._generate_grant_section(start_date, end_date),
|
||||
"extensions": await self._generate_extension_section(start_date, end_date),
|
||||
"network_effects": await self._generate_network_effects(start_date, end_date)
|
||||
}
|
||||
|
||||
return dashboard
|
||||
|
||||
async def generate_hackathon_report(self, event_id: str) -> Dict[str, Any]:
|
||||
"""Generate detailed hackathon report"""
|
||||
metric = await self.collect_hackathon_metrics(event_id)
|
||||
|
||||
# Generate visualizations
|
||||
figures = {}
|
||||
|
||||
# Participation funnel
|
||||
fig_funnel = go.Figure(go.Funnel(
|
||||
y=["Registrations", "Active Participants", "Submissions", "Deployed Projects", "Winners"],
|
||||
x=[
|
||||
metric.participants * 1.5, # Estimated registrations
|
||||
metric.participants,
|
||||
metric.submissions,
|
||||
metric.projects_deployed,
|
||||
metric.winners
|
||||
]
|
||||
))
|
||||
fig_funnel.update_layout(title="Hackathon Participation Funnel")
|
||||
figures['funnel'] = fig_funnel.to_json()
|
||||
|
||||
# Score distribution
|
||||
fig_scores = go.Figure()
|
||||
fig_scores.add_trace(go.Scatter(
|
||||
x=list(range(metric.submissions)),
|
||||
y=[{'technical_score': 75, 'innovation_score': 85}] * metric.submissions, # Sample data
|
||||
mode='markers',
|
||||
name='Projects'
|
||||
))
|
||||
fig_scores.update_layout(title="Project Scores Distribution")
|
||||
figures['scores'] = fig_scores.to_json()
|
||||
|
||||
# Project categories
|
||||
categories = ['DeFi', 'Enterprise', 'Developer Tools', 'Analytics', 'Other']
|
||||
counts = [15, 20, 10, 8, 12] # Sample data
|
||||
|
||||
fig_categories = px.pie(
|
||||
values=counts,
|
||||
names=categories,
|
||||
title="Project Categories"
|
||||
)
|
||||
figures['categories'] = fig_categories.to_json()
|
||||
|
||||
report = {
|
||||
"event": asdict(metric),
|
||||
"figures": figures,
|
||||
"insights": await self._generate_hackathon_insights(metric),
|
||||
"recommendations": await self._generate_hackathon_recommendations(metric)
|
||||
}
|
||||
|
||||
return report
|
||||
|
||||
async def generate_grant_impact_report(self, grant_id: str) -> Dict[str, Any]:
|
||||
"""Generate grant impact report"""
|
||||
metric = await self.collect_grant_metrics(grant_id)
|
||||
|
||||
# Generate ROI analysis
|
||||
roi_analysis = await self._calculate_grant_roi(metric)
|
||||
|
||||
# Generate adoption curve
|
||||
adoption_data = await self._get_adoption_curve(grant_id)
|
||||
|
||||
fig_adoption = px.line(
|
||||
x=[d['date'] for d in adoption_data],
|
||||
y=[d['users'] for d in adoption_data],
|
||||
title="User Adoption Over Time"
|
||||
)
|
||||
|
||||
report = {
|
||||
"grant": asdict(metric),
|
||||
"roi_analysis": roi_analysis,
|
||||
"adoption_chart": fig_adoption.to_json(),
|
||||
"milestone_progress": {
|
||||
"completed": metric.milestones_completed,
|
||||
"total": metric.total_milestones,
|
||||
"percentage": (metric.milestones_completed / metric.total_milestones * 100) if metric.total_milestones > 0 else 0
|
||||
},
|
||||
"quality_metrics": {
|
||||
"documentation": metric.documentation_score,
|
||||
"community": metric.community_score,
|
||||
"overall": (metric.documentation_score + metric.community_score) / 2
|
||||
}
|
||||
}
|
||||
|
||||
return report
|
||||
|
||||
async def export_metrics(self, metric_type: str, format: str = "csv") -> bytes:
|
||||
"""Export metrics in specified format"""
|
||||
# Get metrics data
|
||||
if metric_type == "hackathons":
|
||||
data = await self._get_all_hackathon_metrics()
|
||||
elif metric_type == "grants":
|
||||
data = await self._get_all_grant_metrics()
|
||||
elif metric_type == "extensions":
|
||||
data = await self._get_all_extension_metrics()
|
||||
else:
|
||||
raise ValueError(f"Unknown metric type: {metric_type}")
|
||||
|
||||
# Convert to DataFrame
|
||||
df = pd.DataFrame([asdict(m) for m in data])
|
||||
|
||||
# Export in requested format
|
||||
if format == "csv":
|
||||
return df.to_csv(index=False).encode('utf-8')
|
||||
elif format == "json":
|
||||
return df.to_json(orient='records', indent=2).encode('utf-8')
|
||||
elif format == "excel":
|
||||
return df.to_excel(index=False).encode('utf-8')
|
||||
else:
|
||||
raise ValueError(f"Unsupported format: {format}")
|
||||
|
||||
# Private helper methods
|
||||
|
||||
async def _store_metric(self, metric: Any):
|
||||
"""Store metric in database"""
|
||||
# Implementation would store in metrics table
|
||||
pass
|
||||
|
||||
async def _count_participants(self, event_id: str) -> int:
|
||||
"""Count hackathon participants"""
|
||||
# Implementation would query participant data
|
||||
return 150 # Sample
|
||||
|
||||
async def _count_submissions(self, event_id: str) -> int:
|
||||
"""Count hackathon submissions"""
|
||||
return 45 # Sample
|
||||
|
||||
async def _get_hackathon_projects(self, event_id: str) -> List[Dict]:
|
||||
"""Get all projects from hackathon"""
|
||||
# Implementation would query project data
|
||||
return [] # Sample
|
||||
|
||||
async def _calculate_community_engagement(self, event_id: str) -> float:
|
||||
"""Calculate community engagement score"""
|
||||
return 85.5 # Sample
|
||||
|
||||
async def _count_completed_milestones(self, grant_id: str) -> int:
|
||||
"""Count completed grant milestones"""
|
||||
return 3 # Sample
|
||||
|
||||
async def _count_project_users(self, grant_id: str) -> int:
|
||||
"""Count users of grant project"""
|
||||
return 500 # Sample
|
||||
|
||||
async def _count_github_contributors(self, repo_url: str) -> int:
|
||||
"""Count GitHub contributors"""
|
||||
return 12 # Sample
|
||||
|
||||
async def _count_code_commits(self, repo_url: str) -> int:
|
||||
"""Count code commits"""
|
||||
return 234 # Sample
|
||||
|
||||
async def _evaluate_documentation(self, docs_url: str) -> float:
|
||||
"""Evaluate documentation quality"""
|
||||
return 90.0 # Sample
|
||||
|
||||
async def _evaluate_community_health(self, repo_url: str) -> float:
|
||||
"""Evaluate community health"""
|
||||
return 75.5 # Sample
|
||||
|
||||
async def _count_downloads(self, extension_id: str) -> int:
|
||||
"""Count extension downloads"""
|
||||
return 1250 # Sample
|
||||
|
||||
async def _count_active_installations(self, extension_id: str) -> int:
|
||||
"""Count active installations"""
|
||||
return 350 # Sample
|
||||
|
||||
async def _count_api_calls(self, extension_id: str, days: int) -> int:
|
||||
"""Count API calls to extension"""
|
||||
return 15000 # Sample
|
||||
|
||||
async def _calculate_error_rate(self, extension_id: str, days: int) -> float:
|
||||
"""Calculate error rate"""
|
||||
return 0.02 # Sample
|
||||
|
||||
async def _calculate_avg_response_time(self, extension_id: str, days: int) -> float:
|
||||
"""Calculate average response time"""
|
||||
return 125.5 # Sample
|
||||
|
||||
async def _calculate_user_satisfaction(self, extension_id: str) -> float:
|
||||
"""Calculate user satisfaction score"""
|
||||
return 4.5 # Sample
|
||||
|
||||
async def _count_integrations(self, extension_id: str) -> int:
|
||||
"""Count integrations using extension"""
|
||||
return 25 # Sample
|
||||
|
||||
async def _calculate_revenue(self, extension_id: str, days: int) -> float:
|
||||
"""Calculate revenue generated"""
|
||||
return 5000.0 # Sample
|
||||
|
||||
async def _generate_summary_metrics(self, start_date: datetime, end_date: datetime) -> Dict:
|
||||
"""Generate summary metrics for dashboard"""
|
||||
return {
|
||||
"total_hackathons": 4,
|
||||
"total_participants": 600,
|
||||
"total_grants_awarded": 12,
|
||||
"total_grant_amount": 500000,
|
||||
"active_extensions": 25,
|
||||
"total_downloads": 50000,
|
||||
"github_stars": 2500,
|
||||
"community_members": 1500
|
||||
}
|
||||
|
||||
async def _generate_hackathon_section(self, start_date: datetime, end_date: datetime) -> Dict:
|
||||
"""Generate hackathon section of dashboard"""
|
||||
return {
|
||||
"upcoming": [],
|
||||
"recent": [],
|
||||
"top_projects": [],
|
||||
"participation_trend": []
|
||||
}
|
||||
|
||||
async def _generate_grant_section(self, start_date: datetime, end_date: datetime) -> Dict:
|
||||
"""Generate grant section of dashboard"""
|
||||
return {
|
||||
"active_grants": 8,
|
||||
"completed_grants": 4,
|
||||
"total_disbursed": 350000,
|
||||
"roi_average": 2.5,
|
||||
"success_rate": 0.85
|
||||
}
|
||||
|
||||
async def _generate_extension_section(self, start_date: datetime, end_date: datetime) -> Dict:
|
||||
"""Generate extension section of dashboard"""
|
||||
return {
|
||||
"total_extensions": 25,
|
||||
"new_extensions": 3,
|
||||
"most_popular": [],
|
||||
"growth_rate": 0.15
|
||||
}
|
||||
|
||||
async def _generate_network_effects(self, start_date: datetime, end_date: datetime) -> Dict:
|
||||
"""Generate network effects analysis"""
|
||||
return {
|
||||
"cross_chain_volume": 1000000,
|
||||
"interoperability_score": 85.5,
|
||||
"network_value": 25000000,
|
||||
"metcalfe_coefficient": 1.2
|
||||
}
|
||||
|
||||
async def _generate_hackathon_insights(self, metric: HackathonMetric) -> List[str]:
|
||||
"""Generate insights from hackathon metrics"""
|
||||
insights = []
|
||||
|
||||
if metric.projects_deployed / metric.submissions > 0.5:
|
||||
insights.append("High deployment rate indicates strong technical execution")
|
||||
|
||||
if metric.community_engagement > 80:
|
||||
insights.append("Excellent community engagement and participation")
|
||||
|
||||
if metric.github_stars > 100:
|
||||
insights.append("Strong GitHub community interest")
|
||||
|
||||
return insights
|
||||
|
||||
async def _generate_hackathon_recommendations(self, metric: HackathonMetric) -> List[str]:
|
||||
"""Generate recommendations for improvement"""
|
||||
recommendations = []
|
||||
|
||||
if metric.projects_deployed / metric.submissions < 0.3:
|
||||
recommendations.append("Provide more deployment support and infrastructure")
|
||||
|
||||
if metric.technical_score < 70:
|
||||
recommendations.append("Offer technical workshops and mentorship")
|
||||
|
||||
if metric.innovation_score < 70:
|
||||
recommendations.append("Encourage more innovative and ambitious projects")
|
||||
|
||||
return recommendations
|
||||
|
||||
async def _calculate_grant_roi(self, metric: GrantMetric) -> Dict:
|
||||
"""Calculate grant ROI"""
|
||||
if metric.amount_disbursed == 0:
|
||||
return {"roi": 0, "payback_period": None}
|
||||
|
||||
# Simplified ROI calculation
|
||||
estimated_value = metric.users_acquired * 100 # $100 per user
|
||||
roi = (estimated_value - float(metric.amount_disbursed)) / float(metric.amount_disbursed)
|
||||
|
||||
return {
|
||||
"roi": roi,
|
||||
"payback_period": "12 months" if roi > 0 else None,
|
||||
"value_created": estimated_value
|
||||
}
|
||||
|
||||
async def _get_adoption_curve(self, grant_id: str) -> List[Dict]:
|
||||
"""Get user adoption over time"""
|
||||
# Sample data
|
||||
return [
|
||||
{"date": "2024-01-01", "users": 50},
|
||||
{"date": "2024-02-01", "users": 120},
|
||||
{"date": "2024-03-01", "users": 200},
|
||||
{"date": "2024-04-01", "users": 350},
|
||||
{"date": "2024-05-01", "users": 500}
|
||||
]
|
||||
|
||||
def _get_hackathon_event(self, db, event_id: str) -> Optional[Dict]:
|
||||
"""Get hackathon event details"""
|
||||
# Implementation would query database
|
||||
return {
|
||||
"name": "DeFi Innovation Hackathon",
|
||||
"start_date": datetime(2024, 1, 15),
|
||||
"end_date": datetime(2024, 1, 22)
|
||||
}
|
||||
|
||||
def _get_grant_details(self, db, grant_id: str) -> Optional[Dict]:
|
||||
"""Get grant details"""
|
||||
# Implementation would query database
|
||||
return {
|
||||
"project_name": "Advanced Analytics Platform",
|
||||
"amount_awarded": 50000,
|
||||
"amount_disbursed": 25000,
|
||||
"total_milestones": 4
|
||||
}
|
||||
|
||||
def _get_extension_details(self, db, extension_id: str) -> Optional[Dict]:
|
||||
"""Get extension details"""
|
||||
# Implementation would query database
|
||||
return {
|
||||
"name": "SAP ERP Connector"
|
||||
}
|
||||
|
||||
async def _get_grant_project(self, grant_id: str) -> Dict:
|
||||
"""Get grant project details"""
|
||||
return {
|
||||
"repo_url": "https://github.com/example/project",
|
||||
"docs_url": "https://docs.example.com"
|
||||
}
|
||||
|
||||
async def _get_all_hackathon_metrics(self) -> List[HackathonMetric]:
|
||||
"""Get all hackathon metrics"""
|
||||
# Implementation would query database
|
||||
return []
|
||||
|
||||
async def _get_all_grant_metrics(self) -> List[GrantMetric]:
|
||||
"""Get all grant metrics"""
|
||||
# Implementation would query database
|
||||
return []
|
||||
|
||||
async def _get_all_extension_metrics(self) -> List[ExtensionMetric]:
|
||||
"""Get all extension metrics"""
|
||||
# Implementation would query database
|
||||
return []
|
||||
|
||||
|
||||
# CLI interface for analytics service
|
||||
async def main():
|
||||
"""CLI entry point"""
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser(description="AITBC Ecosystem Analytics")
|
||||
parser.add_argument("--dashboard", action="store_true", help="Generate ecosystem dashboard")
|
||||
parser.add_argument("--hackathon", help="Generate hackathon report for event ID")
|
||||
parser.add_argument("--grant", help="Generate grant impact report for grant ID")
|
||||
parser.add_argument("--export", choices=["hackathons", "grants", "extensions"], help="Export metrics")
|
||||
parser.add_argument("--format", choices=["csv", "json", "excel"], default="json", help="Export format")
|
||||
parser.add_argument("--days", type=int, default=30, help="Number of days for dashboard")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
service = EcosystemAnalyticsService()
|
||||
|
||||
if args.dashboard:
|
||||
dashboard = await service.generate_ecosystem_dashboard(args.days)
|
||||
print(json.dumps(dashboard, indent=2, default=str))
|
||||
elif args.hackathon:
|
||||
report = await service.generate_hackathon_report(args.hackathon)
|
||||
print(json.dumps(report, indent=2, default=str))
|
||||
elif args.grant:
|
||||
report = await service.generate_grant_impact_report(args.grant)
|
||||
print(json.dumps(report, indent=2, default=str))
|
||||
elif args.export:
|
||||
data = await service.export_metrics(args.export, args.format)
|
||||
print(data.decode())
|
||||
else:
|
||||
parser.print_help()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
927
ecosystem-analytics/kpi_tracker.py
Normal file
927
ecosystem-analytics/kpi_tracker.py
Normal file
@ -0,0 +1,927 @@
|
||||
"""
|
||||
Ecosystem KPI Tracker for AITBC
|
||||
Tracks key performance indicators for ecosystem health and strategy reviews
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
from datetime import datetime, timedelta
|
||||
from typing import Dict, List, Any, Optional
|
||||
from dataclasses import dataclass, asdict
|
||||
from decimal import Decimal
|
||||
import pandas as pd
|
||||
import plotly.graph_objects as go
|
||||
import plotly.express as px
|
||||
from sqlalchemy import create_engine, select, func, and_, or_
|
||||
from sqlalchemy.orm import sessionmaker
|
||||
from enum import Enum
|
||||
|
||||
from ..config import settings
|
||||
from ..database import get_db
|
||||
|
||||
|
||||
class KPICategory(Enum):
|
||||
"""Categories of KPIs"""
|
||||
MARKETPLACE = "marketplace"
|
||||
CROSS_CHAIN = "cross_chain"
|
||||
DEVELOPER = "developer"
|
||||
USER = "user"
|
||||
FINANCIAL = "financial"
|
||||
TECHNICAL = "technical"
|
||||
|
||||
|
||||
@dataclass
|
||||
class KPIDefinition:
|
||||
"""Definition of a KPI"""
|
||||
name: str
|
||||
category: KPICategory
|
||||
description: str
|
||||
unit: str
|
||||
target: Optional[float]
|
||||
calculation_method: str
|
||||
data_sources: List[str]
|
||||
frequency: str # daily, weekly, monthly
|
||||
importance: str # high, medium, low
|
||||
|
||||
|
||||
@dataclass
|
||||
class KPIValue:
|
||||
"""A single KPI measurement"""
|
||||
timestamp: datetime
|
||||
kpi_name: str
|
||||
value: float
|
||||
unit: str
|
||||
category: str
|
||||
metadata: Dict[str, Any]
|
||||
|
||||
|
||||
class EcosystemKPITracker:
|
||||
"""Main KPI tracking system"""
|
||||
|
||||
def __init__(self):
|
||||
self.engine = create_engine(settings.DATABASE_URL)
|
||||
self.Session = sessionmaker(bind=self.engine)
|
||||
self.logger = __import__('logging').getLogger(__name__)
|
||||
|
||||
# Define all tracked KPIs
|
||||
self.kpi_definitions = self._initialize_kpi_definitions()
|
||||
|
||||
def _initialize_kpi_definitions(self) -> Dict[str, KPIDefinition]:
|
||||
"""Initialize all KPI definitions"""
|
||||
return {
|
||||
# Marketplace KPIs
|
||||
"active_marketplaces": KPIDefinition(
|
||||
name="active_marketplaces",
|
||||
category=KPICategory.MARKETPLACE,
|
||||
description="Number of active marketplaces on the platform",
|
||||
unit="count",
|
||||
target=50.0,
|
||||
calculation_method="count_active_marketplaces",
|
||||
data_sources=["marketplace_service", "tenant_db"],
|
||||
frequency="daily",
|
||||
importance="high"
|
||||
),
|
||||
"total_volume_usd": KPIDefinition(
|
||||
name="total_volume_usd",
|
||||
category=KPICategory.MARKETPLACE,
|
||||
description="Total transaction volume in USD",
|
||||
unit="USD",
|
||||
target=10000000.0,
|
||||
calculation_method="sum_transaction_volume",
|
||||
data_sources=["transaction_db", "price_oracle"],
|
||||
frequency="daily",
|
||||
importance="high"
|
||||
),
|
||||
"marketplace_utilization": KPIDefinition(
|
||||
name="marketplace_utilization",
|
||||
category=KPICategory.MARKETPLACE,
|
||||
description="Percentage of utilized marketplace capacity",
|
||||
unit="percent",
|
||||
target=75.0,
|
||||
calculation_method="calculate_utilization",
|
||||
data_sources=["marketplace_service", "usage_metrics"],
|
||||
frequency="hourly",
|
||||
importance="medium"
|
||||
),
|
||||
|
||||
# Cross-Chain KPIs
|
||||
"cross_chain_volume": KPIDefinition(
|
||||
name="cross_chain_volume",
|
||||
category=KPICategory.CROSS_CHAIN,
|
||||
description="Total cross-chain transaction volume",
|
||||
unit="USD",
|
||||
target=5000000.0,
|
||||
calculation_method="sum_cross_chain_volume",
|
||||
data_sources=["bridge_service", "transaction_db"],
|
||||
frequency="daily",
|
||||
importance="high"
|
||||
),
|
||||
"active_bridges": KPIDefinition(
|
||||
name="active_bridges",
|
||||
category=KPICategory.CROSS_CHAIN,
|
||||
description="Number of active cross-chain bridges",
|
||||
unit="count",
|
||||
target=10.0,
|
||||
calculation_method="count_active_bridges",
|
||||
data_sources=["bridge_service"],
|
||||
frequency="daily",
|
||||
importance="medium"
|
||||
),
|
||||
"bridge_success_rate": KPIDefinition(
|
||||
name="bridge_success_rate",
|
||||
category=KPICategory.CROSS_CHAIN,
|
||||
description="Success rate of cross-chain transactions",
|
||||
unit="percent",
|
||||
target=95.0,
|
||||
calculation_method="calculate_bridge_success_rate",
|
||||
data_sources=["bridge_service", "transaction_db"],
|
||||
frequency="hourly",
|
||||
importance="high"
|
||||
),
|
||||
|
||||
# Developer KPIs
|
||||
"active_developers": KPIDefinition(
|
||||
name="active_developers",
|
||||
category=KPICategory.DEVELOPER,
|
||||
description="Number of active developers in ecosystem",
|
||||
unit="count",
|
||||
target=1000.0,
|
||||
calculation_method="count_active_developers",
|
||||
data_sources=["github_api", "developer_db"],
|
||||
frequency="weekly",
|
||||
importance="high"
|
||||
),
|
||||
"new_extensions": KPIDefinition(
|
||||
name="new_extensions",
|
||||
category=KPICategory.DEVELOPER,
|
||||
description="Number of new marketplace extensions created",
|
||||
unit="count",
|
||||
target=25.0,
|
||||
calculation_method="count_new_extensions",
|
||||
data_sources=["extension_registry", "github_api"],
|
||||
frequency="weekly",
|
||||
importance="medium"
|
||||
),
|
||||
"developer_satisfaction": KPIDefinition(
|
||||
name="developer_satisfaction",
|
||||
category=KPICategory.DEVELOPER,
|
||||
description="Developer satisfaction score (1-5)",
|
||||
unit="score",
|
||||
target=4.5,
|
||||
calculation_method="calculate_satisfaction_score",
|
||||
data_sources=["surveys", "github_issues", "discord_sentiment"],
|
||||
frequency="monthly",
|
||||
importance="medium"
|
||||
),
|
||||
|
||||
# User KPIs
|
||||
"active_users": KPIDefinition(
|
||||
name="active_users",
|
||||
category=KPICategory.USER,
|
||||
description="Number of active users (30-day)",
|
||||
unit="count",
|
||||
target=10000.0,
|
||||
calculation_method="count_active_users",
|
||||
data_sources=["user_db", "auth_service"],
|
||||
frequency="daily",
|
||||
importance="high"
|
||||
),
|
||||
"user_retention": KPIDefinition(
|
||||
name="user_retention",
|
||||
category=KPICategory.USER,
|
||||
description="30-day user retention rate",
|
||||
unit="percent",
|
||||
target=80.0,
|
||||
calculation_method="calculate_retention_rate",
|
||||
data_sources=["user_db", "analytics_service"],
|
||||
frequency="weekly",
|
||||
importance="high"
|
||||
),
|
||||
"net_promoter_score": KPIDefinition(
|
||||
name="net_promoter_score",
|
||||
category=KPICategory.USER,
|
||||
description="Net Promoter Score",
|
||||
unit="score",
|
||||
target=50.0,
|
||||
calculation_method="calculate_nps",
|
||||
data_sources=["surveys", "feedback_service"],
|
||||
frequency="monthly",
|
||||
importance="medium"
|
||||
),
|
||||
|
||||
# Financial KPIs
|
||||
"revenue": KPIDefinition(
|
||||
name="revenue",
|
||||
category=KPICategory.FINANCIAL,
|
||||
description="Total platform revenue",
|
||||
unit="USD",
|
||||
target=1000000.0,
|
||||
calculation_method="calculate_revenue",
|
||||
data_sources=["billing_service", "payment_processor"],
|
||||
frequency="monthly",
|
||||
importance="high"
|
||||
),
|
||||
"cost_per_transaction": KPIDefinition(
|
||||
name="cost_per_transaction",
|
||||
category=KPICategory.FINANCIAL,
|
||||
description="Average cost per transaction",
|
||||
unit="USD",
|
||||
target=0.10,
|
||||
calculation_method="calculate_cost_per_tx",
|
||||
data_sources=["billing_service", "metrics_service"],
|
||||
frequency="monthly",
|
||||
importance="medium"
|
||||
),
|
||||
"profit_margin": KPIDefinition(
|
||||
name="profit_margin",
|
||||
category=KPICategory.FINANCIAL,
|
||||
description="Platform profit margin",
|
||||
unit="percent",
|
||||
target=20.0,
|
||||
calculation_method="calculate_profit_margin",
|
||||
data_sources=["billing_service", "financial_db"],
|
||||
frequency="quarterly",
|
||||
importance="high"
|
||||
),
|
||||
|
||||
# Technical KPIs
|
||||
"network_hash_rate": KPIDefinition(
|
||||
name="network_hash_rate",
|
||||
category=KPICategory.TECHNICAL,
|
||||
description="Network hash rate",
|
||||
unit="H/s",
|
||||
target=1000000000.0,
|
||||
calculation_method="get_hash_rate",
|
||||
data_sources=["blockchain_node", "metrics_service"],
|
||||
frequency="hourly",
|
||||
importance="high"
|
||||
),
|
||||
"block_time": KPIDefinition(
|
||||
name="block_time",
|
||||
category=KPICategory.TECHNICAL,
|
||||
description="Average block time",
|
||||
unit="seconds",
|
||||
target=12.0,
|
||||
calculation_method="calculate_average_block_time",
|
||||
data_sources=["blockchain_node", "block_db"],
|
||||
frequency="hourly",
|
||||
importance="high"
|
||||
),
|
||||
"uptime": KPIDefinition(
|
||||
name="uptime",
|
||||
category=KPICategory.TECHNICAL,
|
||||
description="Platform uptime percentage",
|
||||
unit="percent",
|
||||
target=99.9,
|
||||
calculation_method="calculate_uptime",
|
||||
data_sources=["monitoring_service", "health_checks"],
|
||||
frequency="daily",
|
||||
importance="high"
|
||||
),
|
||||
}
|
||||
|
||||
async def collect_all_kpis(self, period: str = "daily") -> List[KPIValue]:
|
||||
"""Collect all KPIs for a given period"""
|
||||
kpi_values = []
|
||||
|
||||
for kpi_name, kpi_def in self.kpi_definitions.items():
|
||||
if kpi_def.frequency == period or period == "all":
|
||||
try:
|
||||
value = await self._calculate_kpi(kpi_name, kpi_def)
|
||||
kpi_value = KPIValue(
|
||||
timestamp=datetime.utcnow(),
|
||||
kpi_name=kpi_name,
|
||||
value=value,
|
||||
unit=kpi_def.unit,
|
||||
category=kpi_def.category.value,
|
||||
metadata={
|
||||
"target": kpi_def.target,
|
||||
"importance": kpi_def.importance,
|
||||
}
|
||||
)
|
||||
kpi_values.append(kpi_value)
|
||||
except Exception as e:
|
||||
self.logger.error(f"Failed to calculate KPI {kpi_name}: {e}")
|
||||
|
||||
# Store KPIs
|
||||
await self._store_kpis(kpi_values)
|
||||
|
||||
return kpi_values
|
||||
|
||||
async def _calculate_kpi(self, kpi_name: str, kpi_def: KPIDefinition) -> float:
|
||||
"""Calculate a specific KPI"""
|
||||
method_name = kpi_def.calculation_method
|
||||
method = getattr(self, method_name, None)
|
||||
|
||||
if method is None:
|
||||
raise ValueError(f"Unknown calculation method: {method_name}")
|
||||
|
||||
return await method()
|
||||
|
||||
async def _store_kpis(self, kpi_values: List[KPIValue]):
|
||||
"""Store KPI values in database"""
|
||||
with self.Session() as db:
|
||||
for kpi in kpi_values:
|
||||
# Implementation would store in KPI table
|
||||
pass
|
||||
|
||||
# KPI Calculation Methods
|
||||
|
||||
async def count_active_marketplaces(self) -> float:
|
||||
"""Count active marketplaces"""
|
||||
with self.Session() as db:
|
||||
# Query active tenants with marketplace enabled
|
||||
count = db.execute(
|
||||
select(func.count(Tenant.id))
|
||||
.where(
|
||||
and_(
|
||||
Tenant.status == "active",
|
||||
Tenant.features.contains(["marketplace"])
|
||||
)
|
||||
)
|
||||
).scalar()
|
||||
return float(count)
|
||||
|
||||
async def sum_transaction_volume(self) -> float:
|
||||
"""Sum total transaction volume in USD"""
|
||||
with self.Session() as db:
|
||||
# Get transactions in last 24 hours
|
||||
total = db.execute(
|
||||
select(func.sum(Transaction.amount_usd))
|
||||
.where(
|
||||
Transaction.timestamp >= datetime.utcnow() - timedelta(days=1)
|
||||
)
|
||||
).scalar()
|
||||
return float(total or 0)
|
||||
|
||||
async def calculate_utilization(self) -> float:
|
||||
"""Calculate marketplace utilization percentage"""
|
||||
# Get total capacity and used capacity
|
||||
total_capacity = await self._get_total_capacity()
|
||||
used_capacity = await self._get_used_capacity()
|
||||
|
||||
if total_capacity == 0:
|
||||
return 0.0
|
||||
|
||||
return (used_capacity / total_capacity) * 100
|
||||
|
||||
async def sum_cross_chain_volume(self) -> float:
|
||||
"""Sum cross-chain transaction volume"""
|
||||
with self.Session() as db:
|
||||
total = db.execute(
|
||||
select(func.sum(CrossChainTransaction.amount_usd))
|
||||
.where(
|
||||
CrossChainTransaction.timestamp >= datetime.utcnow() - timedelta(days=1)
|
||||
)
|
||||
).scalar()
|
||||
return float(total or 0)
|
||||
|
||||
async def count_active_bridges(self) -> float:
|
||||
"""Count active cross-chain bridges"""
|
||||
# Query bridge service
|
||||
bridges = await self._query_bridge_service("/bridges?status=active")
|
||||
return float(len(bridges))
|
||||
|
||||
async def calculate_bridge_success_rate(self) -> float:
|
||||
"""Calculate bridge transaction success rate"""
|
||||
with self.Session() as db:
|
||||
total = db.execute(
|
||||
select(func.count(CrossChainTransaction.id))
|
||||
.where(
|
||||
CrossChainTransaction.timestamp >= datetime.utcnow() - timedelta(hours=24)
|
||||
)
|
||||
).scalar()
|
||||
|
||||
successful = db.execute(
|
||||
select(func.count(CrossChainTransaction.id))
|
||||
.where(
|
||||
and_(
|
||||
CrossChainTransaction.timestamp >= datetime.utcnow() - timedelta(hours=24),
|
||||
CrossChainTransaction.status == "completed"
|
||||
)
|
||||
)
|
||||
).scalar()
|
||||
|
||||
if total == 0:
|
||||
return 100.0
|
||||
|
||||
return (successful / total) * 100
|
||||
|
||||
async def count_active_developers(self) -> float:
|
||||
"""Count active developers (last 30 days)"""
|
||||
# Query GitHub API and local records
|
||||
github_contributors = await self._query_github_api("/contributors")
|
||||
local_developers = await self._count_local_developers()
|
||||
|
||||
# Combine and deduplicate
|
||||
all_developers = set(github_contributors + local_developers)
|
||||
return float(len(all_developers))
|
||||
|
||||
async def count_new_extensions(self) -> float:
|
||||
"""Count new extensions this week"""
|
||||
with self.Session() as db:
|
||||
count = db.execute(
|
||||
select(func.count(Extension.id))
|
||||
.where(
|
||||
Extension.created_at >= datetime.utcnow() - timedelta(weeks=1)
|
||||
)
|
||||
).scalar()
|
||||
return float(count)
|
||||
|
||||
async def calculate_satisfaction_score(self) -> float:
|
||||
"""Calculate developer satisfaction score"""
|
||||
# Aggregate from multiple sources
|
||||
survey_scores = await self._get_survey_scores()
|
||||
issue_sentiment = await self._analyze_issue_sentiment()
|
||||
discord_sentiment = await self._analyze_discord_sentiment()
|
||||
|
||||
# Weighted average
|
||||
weights = {"survey": 0.5, "issues": 0.25, "discord": 0.25}
|
||||
|
||||
score = (
|
||||
survey_scores * weights["survey"] +
|
||||
issue_sentiment * weights["issues"] +
|
||||
discord_sentiment * weights["discord"]
|
||||
)
|
||||
|
||||
return score
|
||||
|
||||
async def count_active_users(self) -> float:
|
||||
"""Count active users (last 30 days)"""
|
||||
with self.Session() as db:
|
||||
count = db.execute(
|
||||
select(func.count(User.id))
|
||||
.where(
|
||||
User.last_active >= datetime.utcnow() - timedelta(days=30)
|
||||
)
|
||||
).scalar()
|
||||
return float(count)
|
||||
|
||||
async def calculate_retention_rate(self) -> float:
|
||||
"""Calculate 30-day user retention rate"""
|
||||
# Cohort analysis
|
||||
cohort_users = await self._get_cohort_users(30) # Users from 30 days ago
|
||||
retained_users = await self._count_retained_users(cohort_users)
|
||||
|
||||
if not cohort_users:
|
||||
return 0.0
|
||||
|
||||
return (retained_users / len(cohort_users)) * 100
|
||||
|
||||
async def calculate_nps(self) -> float:
|
||||
"""Calculate Net Promoter Score"""
|
||||
responses = await self._get_nps_responses()
|
||||
|
||||
if not responses:
|
||||
return 0.0
|
||||
|
||||
promoters = sum(1 for r in responses if r >= 9)
|
||||
detractors = sum(1 for r in responses if r <= 6)
|
||||
|
||||
nps = ((promoters - detractors) / len(responses)) * 100
|
||||
return nps
|
||||
|
||||
async def calculate_revenue(self) -> float:
|
||||
"""Calculate total platform revenue"""
|
||||
with self.Session() as db:
|
||||
total = db.execute(
|
||||
select(func.sum(Revenue.amount))
|
||||
.where(
|
||||
Revenue.period == "monthly"
|
||||
)
|
||||
).scalar()
|
||||
return float(total or 0)
|
||||
|
||||
async def calculate_cost_per_tx(self) -> float:
|
||||
"""Calculate cost per transaction"""
|
||||
total_cost = await self._get_monthly_costs()
|
||||
tx_count = await self._get_monthly_tx_count()
|
||||
|
||||
if tx_count == 0:
|
||||
return 0.0
|
||||
|
||||
return total_cost / tx_count
|
||||
|
||||
async def calculate_profit_margin(self) -> float:
|
||||
"""Calculate profit margin percentage"""
|
||||
revenue = await self.calculate_revenue()
|
||||
costs = await self._get_monthly_costs()
|
||||
|
||||
if revenue == 0:
|
||||
return 0.0
|
||||
|
||||
profit = revenue - costs
|
||||
return (profit / revenue) * 100
|
||||
|
||||
async def get_hash_rate(self) -> float:
|
||||
"""Get current network hash rate"""
|
||||
# Query blockchain node metrics
|
||||
metrics = await self._query_blockchain_metrics()
|
||||
return float(metrics.get("hash_rate", 0))
|
||||
|
||||
async def calculate_average_block_time(self) -> float:
|
||||
"""Calculate average block time"""
|
||||
with self.Session() as db:
|
||||
avg_time = db.execute(
|
||||
select(func.avg(Block.timestamp_diff))
|
||||
.where(
|
||||
Block.timestamp >= datetime.utcnow() - timedelta(hours=1)
|
||||
)
|
||||
).scalar()
|
||||
return float(avg_time or 0)
|
||||
|
||||
async def calculate_uptime(self) -> float:
|
||||
"""Calculate platform uptime percentage"""
|
||||
# Get uptime from monitoring service
|
||||
uptime_data = await self._query_monitoring_service("/uptime")
|
||||
return float(uptime_data.get("uptime_percentage", 0))
|
||||
|
||||
# Helper methods for data collection
|
||||
|
||||
async def _get_total_capacity(self) -> float:
|
||||
"""Get total marketplace capacity"""
|
||||
# Implementation would query marketplace service
|
||||
return 10000.0 # Sample
|
||||
|
||||
async def _get_used_capacity(self) -> float:
|
||||
"""Get used marketplace capacity"""
|
||||
# Implementation would query usage metrics
|
||||
return 7500.0 # Sample
|
||||
|
||||
async def _query_bridge_service(self, endpoint: str) -> List[Dict]:
|
||||
"""Query bridge service API"""
|
||||
# Implementation would make HTTP request
|
||||
return [] # Sample
|
||||
|
||||
async def _query_github_api(self, endpoint: str) -> List[str]:
|
||||
"""Query GitHub API"""
|
||||
# Implementation would use GitHub API
|
||||
return [] # Sample
|
||||
|
||||
async def _count_local_developers(self) -> List[str]:
|
||||
"""Count local developers"""
|
||||
with self.Session() as db:
|
||||
developers = db.execute(
|
||||
select(Developer.github_username)
|
||||
.where(
|
||||
Developer.last_active >= datetime.utcnow() - timedelta(days=30)
|
||||
)
|
||||
).all()
|
||||
return [d[0] for d in developers]
|
||||
|
||||
async def _get_survey_scores(self) -> float:
|
||||
"""Get survey satisfaction scores"""
|
||||
# Implementation would query survey service
|
||||
return 4.2 # Sample
|
||||
|
||||
async def _analyze_issue_sentiment(self) -> float:
|
||||
"""Analyze GitHub issue sentiment"""
|
||||
# Implementation would use sentiment analysis
|
||||
return 3.8 # Sample
|
||||
|
||||
async def _analyze_discord_sentiment(self) -> float:
|
||||
"""Analyze Discord message sentiment"""
|
||||
# Implementation would use sentiment analysis
|
||||
return 4.0 # Sample
|
||||
|
||||
async def _get_cohort_users(self, days_ago: int) -> List[str]:
|
||||
"""Get users from a specific cohort"""
|
||||
with self.Session() as db:
|
||||
cohort_date = datetime.utcnow() - timedelta(days=days_ago)
|
||||
users = db.execute(
|
||||
select(User.id)
|
||||
.where(
|
||||
and_(
|
||||
User.created_at >= cohort_date,
|
||||
User.created_at < cohort_date + timedelta(days=1)
|
||||
)
|
||||
)
|
||||
).all()
|
||||
return [u[0] for u in users]
|
||||
|
||||
async def _count_retained_users(self, user_ids: List[str]) -> int:
|
||||
"""Count how many users are still active"""
|
||||
with self.Session() as db:
|
||||
count = db.execute(
|
||||
select(func.count(User.id))
|
||||
.where(
|
||||
and_(
|
||||
User.id.in_(user_ids),
|
||||
User.last_active >= datetime.utcnow() - timedelta(days=30)
|
||||
)
|
||||
)
|
||||
).scalar()
|
||||
return count
|
||||
|
||||
async def _get_nps_responses(self) -> List[int]:
|
||||
"""Get NPS survey responses"""
|
||||
# Implementation would query survey service
|
||||
return [9, 10, 8, 7, 9, 10, 6, 9] # Sample
|
||||
|
||||
async def _get_monthly_costs(self) -> float:
|
||||
"""Get monthly operational costs"""
|
||||
# Implementation would query financial service
|
||||
return 800000.0 # Sample
|
||||
|
||||
async def _get_monthly_tx_count(self) -> int:
|
||||
"""Get monthly transaction count"""
|
||||
with self.Session() as db:
|
||||
count = db.execute(
|
||||
select(func.count(Transaction.id))
|
||||
.where(
|
||||
Transaction.timestamp >= datetime.utcnow() - timedelta(days=30)
|
||||
)
|
||||
).scalar()
|
||||
return count
|
||||
|
||||
async def _query_blockchain_metrics(self) -> Dict[str, float]:
|
||||
"""Query blockchain node metrics"""
|
||||
# Implementation would query blockchain node
|
||||
return {"hash_rate": 1000000000.0} # Sample
|
||||
|
||||
async def _query_monitoring_service(self, endpoint: str) -> Dict[str, float]:
|
||||
"""Query monitoring service"""
|
||||
# Implementation would query monitoring service
|
||||
return {"uptime_percentage": 99.95} # Sample
|
||||
|
||||
async def generate_kpi_dashboard(self, period: str = "monthly") -> Dict[str, Any]:
|
||||
"""Generate comprehensive KPI dashboard"""
|
||||
# Collect all KPIs
|
||||
kpis = await self.collect_all_kpis("all")
|
||||
|
||||
# Group by category
|
||||
by_category = {}
|
||||
for kpi in kpis:
|
||||
if kpi.category not in by_category:
|
||||
by_category[kpi.category] = []
|
||||
by_category[kpi.category].append(kpi)
|
||||
|
||||
# Calculate health scores
|
||||
health_scores = await self._calculate_health_scores(by_category)
|
||||
|
||||
# Generate insights
|
||||
insights = await self._generate_insights(kpis)
|
||||
|
||||
# Create visualizations
|
||||
charts = await self._create_charts(kpis)
|
||||
|
||||
return {
|
||||
"timestamp": datetime.utcnow().isoformat(),
|
||||
"period": period,
|
||||
"kpis": [asdict(kpi) for kpi in kpis],
|
||||
"by_category": {
|
||||
cat: [asdict(kpi) for kpi in kpis]
|
||||
for cat, kpis in by_category.items()
|
||||
},
|
||||
"health_scores": health_scores,
|
||||
"insights": insights,
|
||||
"charts": charts,
|
||||
}
|
||||
|
||||
async def _calculate_health_scores(self, by_category: Dict[str, List[KPIValue]]) -> Dict[str, float]:
|
||||
"""Calculate health scores for each category"""
|
||||
scores = {}
|
||||
|
||||
for category, kpis in by_category.items():
|
||||
if not kpis:
|
||||
scores[category] = 0.0
|
||||
continue
|
||||
|
||||
# Weight by importance
|
||||
total_score = 0.0
|
||||
total_weight = 0.0
|
||||
|
||||
for kpi in kpis:
|
||||
target = kpi.metadata.get("target", 0)
|
||||
if target == 0:
|
||||
continue
|
||||
|
||||
# Calculate score as percentage of target
|
||||
score = min((kpi.value / target) * 100, 100)
|
||||
|
||||
# Apply importance weight
|
||||
weight = {"high": 3, "medium": 2, "low": 1}.get(
|
||||
kpi.metadata.get("importance", "medium"), 2
|
||||
)
|
||||
|
||||
total_score += score * weight
|
||||
total_weight += weight
|
||||
|
||||
if total_weight > 0:
|
||||
scores[category] = total_score / total_weight
|
||||
else:
|
||||
scores[category] = 0.0
|
||||
|
||||
return scores
|
||||
|
||||
async def _generate_insights(self, kpis: List[KPIValue]) -> List[str]:
|
||||
"""Generate insights from KPI data"""
|
||||
insights = []
|
||||
|
||||
# Analyze trends
|
||||
for kpi in kpis:
|
||||
if kpi.value < (kpi.metadata.get("target", 0) * 0.8):
|
||||
insights.append(
|
||||
f"⚠️ {kpi.kpi_name} is below target ({kpi.value:.2f} vs {kpi.metadata.get('target')})"
|
||||
)
|
||||
elif kpi.value > (kpi.metadata.get("target", 0) * 1.2):
|
||||
insights.append(
|
||||
f"🎉 {kpi.kpi_name} exceeds target ({kpi.value:.2f} vs {kpi.metadata.get('target')})"
|
||||
)
|
||||
|
||||
# Cross-category insights
|
||||
marketplace_kpis = [k for k in kpis if k.category == "marketplace"]
|
||||
if marketplace_kpis:
|
||||
volume_kpi = next((k for k in marketplace_kpis if k.kpi_name == "total_volume_usd"), None)
|
||||
utilization_kpi = next((k for k in marketplace_kpis if k.kpi_name == "marketplace_utilization"), None)
|
||||
|
||||
if volume_kpi and utilization_kpi:
|
||||
if volume_kpi.value > 1000000 and utilization_kpi.value < 50:
|
||||
insights.append(
|
||||
"💡 High volume but low utilization - consider increasing capacity"
|
||||
)
|
||||
|
||||
return insights[:10] # Limit to top 10 insights
|
||||
|
||||
async def _create_charts(self, kpis: List[KPIValue]) -> Dict[str, str]:
|
||||
"""Create chart visualizations"""
|
||||
charts = {}
|
||||
|
||||
# KPI gauge charts
|
||||
for kpi in kpis[:5]: # Limit to top 5
|
||||
fig = go.Figure(go.Indicator(
|
||||
mode = "gauge+number+delta",
|
||||
value = kpi.value,
|
||||
domain = {'x': [0, 1], 'y': [0, 1]},
|
||||
title = {'text': kpi.kpi_name},
|
||||
delta = {'reference': kpi.metadata.get('target', 0)},
|
||||
gauge = {
|
||||
'axis': {'range': [None, kpi.metadata.get('target', 100) * 1.5]},
|
||||
'bar': {'color': "darkblue"},
|
||||
'steps': [
|
||||
{'range': [0, kpi.metadata.get('target', 100) * 0.5], 'color': "lightgray"},
|
||||
{'range': [kpi.metadata.get('target', 100) * 0.5, kpi.metadata.get('target', 100)], 'color': "gray"}
|
||||
],
|
||||
'threshold': {
|
||||
'line': {'color': "red", 'width': 4},
|
||||
'thickness': 0.75,
|
||||
'value': kpi.metadata.get('target', 100) * 0.9
|
||||
}
|
||||
}
|
||||
))
|
||||
|
||||
charts[f"gauge_{kpi.kpi_name}"] = fig.to_json()
|
||||
|
||||
# Category comparison chart
|
||||
categories = {}
|
||||
for kpi in kpis:
|
||||
if kpi.category not in categories:
|
||||
categories[kpi.category] = []
|
||||
categories[kpi.category].append(kpi.value / (kpi.metadata.get('target', 1) * 100))
|
||||
|
||||
fig = px.bar(
|
||||
x=list(categories.keys()),
|
||||
y=[sum(v)/len(v) for v in categories.values()],
|
||||
title="KPI Performance by Category",
|
||||
labels={"x": "Category", "y": "Average % of Target"}
|
||||
)
|
||||
charts["category_comparison"] = fig.to_json()
|
||||
|
||||
return charts
|
||||
|
||||
async def export_kpis(self, format: str = "csv", period: str = "monthly") -> bytes:
|
||||
"""Export KPI data"""
|
||||
kpis = await self.collect_all_kpis(period)
|
||||
|
||||
# Convert to DataFrame
|
||||
df = pd.DataFrame([asdict(kpi) for kpi in kpis])
|
||||
|
||||
if format == "csv":
|
||||
return df.to_csv(index=False).encode('utf-8')
|
||||
elif format == "json":
|
||||
return df.to_json(orient='records', indent=2).encode('utf-8')
|
||||
elif format == "excel":
|
||||
return df.to_excel(index=False).encode('utf-8')
|
||||
else:
|
||||
raise ValueError(f"Unsupported format: {format}")
|
||||
|
||||
async def generate_strategy_review(self, quarter: str) -> Dict[str, Any]:
|
||||
"""Generate quarterly strategy review document"""
|
||||
# Get KPI data for the quarter
|
||||
kpis = await self.collect_all_kpis("all")
|
||||
|
||||
# Compare with previous quarter
|
||||
previous_kpis = await self._get_previous_quarter_kpis(quarter)
|
||||
|
||||
# Generate analysis
|
||||
analysis = {
|
||||
"quarter": quarter,
|
||||
"executive_summary": await self._generate_executive_summary(kpis, previous_kpis),
|
||||
"key_achievements": await self._identify_achievements(kpis),
|
||||
"challenges": await self._identify_challenges(kpis),
|
||||
"recommendations": await self._generate_recommendations(kpis, previous_kpis),
|
||||
"next_quarter_goals": await self._set_next_quarter_goals(kpis),
|
||||
}
|
||||
|
||||
return analysis
|
||||
|
||||
async def _get_previous_quarter_kpis(self, quarter: str) -> List[KPIValue]:
|
||||
"""Get KPIs from previous quarter"""
|
||||
# Implementation would query historical KPI data
|
||||
return [] # Sample
|
||||
|
||||
async def _generate_executive_summary(self, kpis: List[KPIValue], previous: List[KPIValue]) -> str:
|
||||
"""Generate executive summary"""
|
||||
# Implementation would analyze KPI trends
|
||||
return "Ecosystem shows strong growth with 25% increase in active users and 40% growth in transaction volume."
|
||||
|
||||
async def _identify_achievements(self, kpis: List[KPIValue]) -> List[str]:
|
||||
"""Identify key achievements"""
|
||||
achievements = []
|
||||
|
||||
for kpi in kpis:
|
||||
if kpi.value >= kpi.metadata.get("target", 0):
|
||||
achievements.append(
|
||||
f"Exceeded {kpi.kpi_name} target with {kpi.value:.2f} (target: {kpi.metadata.get('target')})"
|
||||
)
|
||||
|
||||
return achievements
|
||||
|
||||
async def _identify_challenges(self, kpis: List[KPIValue]) -> List[str]:
|
||||
"""Identify challenges and areas for improvement"""
|
||||
challenges = []
|
||||
|
||||
for kpi in kpis:
|
||||
if kpi.value < (kpi.metadata.get("target", 0) * 0.7):
|
||||
challenges.append(
|
||||
f"{kpi.kpi_name} below target at {kpi.value:.2f} (target: {kpi.metadata.get('target')})"
|
||||
)
|
||||
|
||||
return challenges
|
||||
|
||||
async def _generate_recommendations(self, kpis: List[KPIValue], previous: List[KPIValue]) -> List[str]:
|
||||
"""Generate strategic recommendations"""
|
||||
recommendations = []
|
||||
|
||||
# Analyze trends and generate recommendations
|
||||
recommendations.extend([
|
||||
"Focus on improving developer onboarding to increase extension creation",
|
||||
"Invest in cross-chain infrastructure to support growing volume",
|
||||
"Enhance user retention programs to improve 30-day retention rate",
|
||||
])
|
||||
|
||||
return recommendations
|
||||
|
||||
async def _set_next_quarter_goals(self, kpis: List[KPIValue]) -> Dict[str, float]:
|
||||
"""Set goals for next quarter"""
|
||||
goals = {}
|
||||
|
||||
for kpi in kpis:
|
||||
# Set goals 10-20% higher than current performance
|
||||
current_target = kpi.metadata.get("target", kpi.value)
|
||||
next_target = current_target * 1.15
|
||||
goals[kpi.kpi_name] = next_target
|
||||
|
||||
return goals
|
||||
|
||||
|
||||
# CLI interface
|
||||
async def main():
|
||||
"""CLI entry point"""
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser(description="AITBC Ecosystem KPI Tracker")
|
||||
parser.add_argument("--collect", action="store_true", help="Collect all KPIs")
|
||||
parser.add_argument("--dashboard", action="store_true", help="Generate KPI dashboard")
|
||||
parser.add_argument("--export", choices=["csv", "json", "excel"], help="Export KPIs")
|
||||
parser.add_argument("--period", default="daily", help="Period for KPI collection")
|
||||
parser.add_argument("--strategy-review", help="Generate strategy review for quarter")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
tracker = EcosystemKPITracker()
|
||||
|
||||
if args.collect:
|
||||
kpis = await tracker.collect_all_kpis(args.period)
|
||||
print(f"Collected {len(kpis)} KPIs")
|
||||
for kpi in kpis:
|
||||
print(f"{kpi.kpi_name}: {kpi.value:.2f} {kpi.unit}")
|
||||
|
||||
elif args.dashboard:
|
||||
dashboard = await tracker.generate_kpi_dashboard()
|
||||
print(json.dumps(dashboard, indent=2, default=str))
|
||||
|
||||
elif args.export:
|
||||
data = await tracker.export_kpis(args.export, args.period)
|
||||
print(data.decode())
|
||||
|
||||
elif args.strategy_review:
|
||||
review = await tracker.generate_strategy_review(args.strategy_review)
|
||||
print(json.dumps(review, indent=2, default=str))
|
||||
|
||||
else:
|
||||
parser.print_help()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
Reference in New Issue
Block a user