feat: add marketplace metrics, privacy features, and service registry endpoints
- Add Prometheus metrics for marketplace API throughput and error rates with new dashboard panels - Implement confidential transaction models with encryption support and access control - Add key management system with registration, rotation, and audit logging - Create services and registry routers for service discovery and management - Integrate ZK proof generation for privacy-preserving receipts - Add metrics instru
This commit is contained in:
927
ecosystem-analytics/kpi_tracker.py
Normal file
927
ecosystem-analytics/kpi_tracker.py
Normal file
@ -0,0 +1,927 @@
|
||||
"""
|
||||
Ecosystem KPI Tracker for AITBC
|
||||
Tracks key performance indicators for ecosystem health and strategy reviews
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
from datetime import datetime, timedelta
|
||||
from typing import Dict, List, Any, Optional
|
||||
from dataclasses import dataclass, asdict
|
||||
from decimal import Decimal
|
||||
import pandas as pd
|
||||
import plotly.graph_objects as go
|
||||
import plotly.express as px
|
||||
from sqlalchemy import create_engine, select, func, and_, or_
|
||||
from sqlalchemy.orm import sessionmaker
|
||||
from enum import Enum
|
||||
|
||||
from ..config import settings
|
||||
from ..database import get_db
|
||||
|
||||
|
||||
class KPICategory(Enum):
|
||||
"""Categories of KPIs"""
|
||||
MARKETPLACE = "marketplace"
|
||||
CROSS_CHAIN = "cross_chain"
|
||||
DEVELOPER = "developer"
|
||||
USER = "user"
|
||||
FINANCIAL = "financial"
|
||||
TECHNICAL = "technical"
|
||||
|
||||
|
||||
@dataclass
|
||||
class KPIDefinition:
|
||||
"""Definition of a KPI"""
|
||||
name: str
|
||||
category: KPICategory
|
||||
description: str
|
||||
unit: str
|
||||
target: Optional[float]
|
||||
calculation_method: str
|
||||
data_sources: List[str]
|
||||
frequency: str # daily, weekly, monthly
|
||||
importance: str # high, medium, low
|
||||
|
||||
|
||||
@dataclass
|
||||
class KPIValue:
|
||||
"""A single KPI measurement"""
|
||||
timestamp: datetime
|
||||
kpi_name: str
|
||||
value: float
|
||||
unit: str
|
||||
category: str
|
||||
metadata: Dict[str, Any]
|
||||
|
||||
|
||||
class EcosystemKPITracker:
|
||||
"""Main KPI tracking system"""
|
||||
|
||||
def __init__(self):
|
||||
self.engine = create_engine(settings.DATABASE_URL)
|
||||
self.Session = sessionmaker(bind=self.engine)
|
||||
self.logger = __import__('logging').getLogger(__name__)
|
||||
|
||||
# Define all tracked KPIs
|
||||
self.kpi_definitions = self._initialize_kpi_definitions()
|
||||
|
||||
def _initialize_kpi_definitions(self) -> Dict[str, KPIDefinition]:
|
||||
"""Initialize all KPI definitions"""
|
||||
return {
|
||||
# Marketplace KPIs
|
||||
"active_marketplaces": KPIDefinition(
|
||||
name="active_marketplaces",
|
||||
category=KPICategory.MARKETPLACE,
|
||||
description="Number of active marketplaces on the platform",
|
||||
unit="count",
|
||||
target=50.0,
|
||||
calculation_method="count_active_marketplaces",
|
||||
data_sources=["marketplace_service", "tenant_db"],
|
||||
frequency="daily",
|
||||
importance="high"
|
||||
),
|
||||
"total_volume_usd": KPIDefinition(
|
||||
name="total_volume_usd",
|
||||
category=KPICategory.MARKETPLACE,
|
||||
description="Total transaction volume in USD",
|
||||
unit="USD",
|
||||
target=10000000.0,
|
||||
calculation_method="sum_transaction_volume",
|
||||
data_sources=["transaction_db", "price_oracle"],
|
||||
frequency="daily",
|
||||
importance="high"
|
||||
),
|
||||
"marketplace_utilization": KPIDefinition(
|
||||
name="marketplace_utilization",
|
||||
category=KPICategory.MARKETPLACE,
|
||||
description="Percentage of utilized marketplace capacity",
|
||||
unit="percent",
|
||||
target=75.0,
|
||||
calculation_method="calculate_utilization",
|
||||
data_sources=["marketplace_service", "usage_metrics"],
|
||||
frequency="hourly",
|
||||
importance="medium"
|
||||
),
|
||||
|
||||
# Cross-Chain KPIs
|
||||
"cross_chain_volume": KPIDefinition(
|
||||
name="cross_chain_volume",
|
||||
category=KPICategory.CROSS_CHAIN,
|
||||
description="Total cross-chain transaction volume",
|
||||
unit="USD",
|
||||
target=5000000.0,
|
||||
calculation_method="sum_cross_chain_volume",
|
||||
data_sources=["bridge_service", "transaction_db"],
|
||||
frequency="daily",
|
||||
importance="high"
|
||||
),
|
||||
"active_bridges": KPIDefinition(
|
||||
name="active_bridges",
|
||||
category=KPICategory.CROSS_CHAIN,
|
||||
description="Number of active cross-chain bridges",
|
||||
unit="count",
|
||||
target=10.0,
|
||||
calculation_method="count_active_bridges",
|
||||
data_sources=["bridge_service"],
|
||||
frequency="daily",
|
||||
importance="medium"
|
||||
),
|
||||
"bridge_success_rate": KPIDefinition(
|
||||
name="bridge_success_rate",
|
||||
category=KPICategory.CROSS_CHAIN,
|
||||
description="Success rate of cross-chain transactions",
|
||||
unit="percent",
|
||||
target=95.0,
|
||||
calculation_method="calculate_bridge_success_rate",
|
||||
data_sources=["bridge_service", "transaction_db"],
|
||||
frequency="hourly",
|
||||
importance="high"
|
||||
),
|
||||
|
||||
# Developer KPIs
|
||||
"active_developers": KPIDefinition(
|
||||
name="active_developers",
|
||||
category=KPICategory.DEVELOPER,
|
||||
description="Number of active developers in ecosystem",
|
||||
unit="count",
|
||||
target=1000.0,
|
||||
calculation_method="count_active_developers",
|
||||
data_sources=["github_api", "developer_db"],
|
||||
frequency="weekly",
|
||||
importance="high"
|
||||
),
|
||||
"new_extensions": KPIDefinition(
|
||||
name="new_extensions",
|
||||
category=KPICategory.DEVELOPER,
|
||||
description="Number of new marketplace extensions created",
|
||||
unit="count",
|
||||
target=25.0,
|
||||
calculation_method="count_new_extensions",
|
||||
data_sources=["extension_registry", "github_api"],
|
||||
frequency="weekly",
|
||||
importance="medium"
|
||||
),
|
||||
"developer_satisfaction": KPIDefinition(
|
||||
name="developer_satisfaction",
|
||||
category=KPICategory.DEVELOPER,
|
||||
description="Developer satisfaction score (1-5)",
|
||||
unit="score",
|
||||
target=4.5,
|
||||
calculation_method="calculate_satisfaction_score",
|
||||
data_sources=["surveys", "github_issues", "discord_sentiment"],
|
||||
frequency="monthly",
|
||||
importance="medium"
|
||||
),
|
||||
|
||||
# User KPIs
|
||||
"active_users": KPIDefinition(
|
||||
name="active_users",
|
||||
category=KPICategory.USER,
|
||||
description="Number of active users (30-day)",
|
||||
unit="count",
|
||||
target=10000.0,
|
||||
calculation_method="count_active_users",
|
||||
data_sources=["user_db", "auth_service"],
|
||||
frequency="daily",
|
||||
importance="high"
|
||||
),
|
||||
"user_retention": KPIDefinition(
|
||||
name="user_retention",
|
||||
category=KPICategory.USER,
|
||||
description="30-day user retention rate",
|
||||
unit="percent",
|
||||
target=80.0,
|
||||
calculation_method="calculate_retention_rate",
|
||||
data_sources=["user_db", "analytics_service"],
|
||||
frequency="weekly",
|
||||
importance="high"
|
||||
),
|
||||
"net_promoter_score": KPIDefinition(
|
||||
name="net_promoter_score",
|
||||
category=KPICategory.USER,
|
||||
description="Net Promoter Score",
|
||||
unit="score",
|
||||
target=50.0,
|
||||
calculation_method="calculate_nps",
|
||||
data_sources=["surveys", "feedback_service"],
|
||||
frequency="monthly",
|
||||
importance="medium"
|
||||
),
|
||||
|
||||
# Financial KPIs
|
||||
"revenue": KPIDefinition(
|
||||
name="revenue",
|
||||
category=KPICategory.FINANCIAL,
|
||||
description="Total platform revenue",
|
||||
unit="USD",
|
||||
target=1000000.0,
|
||||
calculation_method="calculate_revenue",
|
||||
data_sources=["billing_service", "payment_processor"],
|
||||
frequency="monthly",
|
||||
importance="high"
|
||||
),
|
||||
"cost_per_transaction": KPIDefinition(
|
||||
name="cost_per_transaction",
|
||||
category=KPICategory.FINANCIAL,
|
||||
description="Average cost per transaction",
|
||||
unit="USD",
|
||||
target=0.10,
|
||||
calculation_method="calculate_cost_per_tx",
|
||||
data_sources=["billing_service", "metrics_service"],
|
||||
frequency="monthly",
|
||||
importance="medium"
|
||||
),
|
||||
"profit_margin": KPIDefinition(
|
||||
name="profit_margin",
|
||||
category=KPICategory.FINANCIAL,
|
||||
description="Platform profit margin",
|
||||
unit="percent",
|
||||
target=20.0,
|
||||
calculation_method="calculate_profit_margin",
|
||||
data_sources=["billing_service", "financial_db"],
|
||||
frequency="quarterly",
|
||||
importance="high"
|
||||
),
|
||||
|
||||
# Technical KPIs
|
||||
"network_hash_rate": KPIDefinition(
|
||||
name="network_hash_rate",
|
||||
category=KPICategory.TECHNICAL,
|
||||
description="Network hash rate",
|
||||
unit="H/s",
|
||||
target=1000000000.0,
|
||||
calculation_method="get_hash_rate",
|
||||
data_sources=["blockchain_node", "metrics_service"],
|
||||
frequency="hourly",
|
||||
importance="high"
|
||||
),
|
||||
"block_time": KPIDefinition(
|
||||
name="block_time",
|
||||
category=KPICategory.TECHNICAL,
|
||||
description="Average block time",
|
||||
unit="seconds",
|
||||
target=12.0,
|
||||
calculation_method="calculate_average_block_time",
|
||||
data_sources=["blockchain_node", "block_db"],
|
||||
frequency="hourly",
|
||||
importance="high"
|
||||
),
|
||||
"uptime": KPIDefinition(
|
||||
name="uptime",
|
||||
category=KPICategory.TECHNICAL,
|
||||
description="Platform uptime percentage",
|
||||
unit="percent",
|
||||
target=99.9,
|
||||
calculation_method="calculate_uptime",
|
||||
data_sources=["monitoring_service", "health_checks"],
|
||||
frequency="daily",
|
||||
importance="high"
|
||||
),
|
||||
}
|
||||
|
||||
async def collect_all_kpis(self, period: str = "daily") -> List[KPIValue]:
|
||||
"""Collect all KPIs for a given period"""
|
||||
kpi_values = []
|
||||
|
||||
for kpi_name, kpi_def in self.kpi_definitions.items():
|
||||
if kpi_def.frequency == period or period == "all":
|
||||
try:
|
||||
value = await self._calculate_kpi(kpi_name, kpi_def)
|
||||
kpi_value = KPIValue(
|
||||
timestamp=datetime.utcnow(),
|
||||
kpi_name=kpi_name,
|
||||
value=value,
|
||||
unit=kpi_def.unit,
|
||||
category=kpi_def.category.value,
|
||||
metadata={
|
||||
"target": kpi_def.target,
|
||||
"importance": kpi_def.importance,
|
||||
}
|
||||
)
|
||||
kpi_values.append(kpi_value)
|
||||
except Exception as e:
|
||||
self.logger.error(f"Failed to calculate KPI {kpi_name}: {e}")
|
||||
|
||||
# Store KPIs
|
||||
await self._store_kpis(kpi_values)
|
||||
|
||||
return kpi_values
|
||||
|
||||
async def _calculate_kpi(self, kpi_name: str, kpi_def: KPIDefinition) -> float:
|
||||
"""Calculate a specific KPI"""
|
||||
method_name = kpi_def.calculation_method
|
||||
method = getattr(self, method_name, None)
|
||||
|
||||
if method is None:
|
||||
raise ValueError(f"Unknown calculation method: {method_name}")
|
||||
|
||||
return await method()
|
||||
|
||||
async def _store_kpis(self, kpi_values: List[KPIValue]):
|
||||
"""Store KPI values in database"""
|
||||
with self.Session() as db:
|
||||
for kpi in kpi_values:
|
||||
# Implementation would store in KPI table
|
||||
pass
|
||||
|
||||
# KPI Calculation Methods
|
||||
|
||||
async def count_active_marketplaces(self) -> float:
|
||||
"""Count active marketplaces"""
|
||||
with self.Session() as db:
|
||||
# Query active tenants with marketplace enabled
|
||||
count = db.execute(
|
||||
select(func.count(Tenant.id))
|
||||
.where(
|
||||
and_(
|
||||
Tenant.status == "active",
|
||||
Tenant.features.contains(["marketplace"])
|
||||
)
|
||||
)
|
||||
).scalar()
|
||||
return float(count)
|
||||
|
||||
async def sum_transaction_volume(self) -> float:
|
||||
"""Sum total transaction volume in USD"""
|
||||
with self.Session() as db:
|
||||
# Get transactions in last 24 hours
|
||||
total = db.execute(
|
||||
select(func.sum(Transaction.amount_usd))
|
||||
.where(
|
||||
Transaction.timestamp >= datetime.utcnow() - timedelta(days=1)
|
||||
)
|
||||
).scalar()
|
||||
return float(total or 0)
|
||||
|
||||
async def calculate_utilization(self) -> float:
|
||||
"""Calculate marketplace utilization percentage"""
|
||||
# Get total capacity and used capacity
|
||||
total_capacity = await self._get_total_capacity()
|
||||
used_capacity = await self._get_used_capacity()
|
||||
|
||||
if total_capacity == 0:
|
||||
return 0.0
|
||||
|
||||
return (used_capacity / total_capacity) * 100
|
||||
|
||||
async def sum_cross_chain_volume(self) -> float:
|
||||
"""Sum cross-chain transaction volume"""
|
||||
with self.Session() as db:
|
||||
total = db.execute(
|
||||
select(func.sum(CrossChainTransaction.amount_usd))
|
||||
.where(
|
||||
CrossChainTransaction.timestamp >= datetime.utcnow() - timedelta(days=1)
|
||||
)
|
||||
).scalar()
|
||||
return float(total or 0)
|
||||
|
||||
async def count_active_bridges(self) -> float:
|
||||
"""Count active cross-chain bridges"""
|
||||
# Query bridge service
|
||||
bridges = await self._query_bridge_service("/bridges?status=active")
|
||||
return float(len(bridges))
|
||||
|
||||
async def calculate_bridge_success_rate(self) -> float:
|
||||
"""Calculate bridge transaction success rate"""
|
||||
with self.Session() as db:
|
||||
total = db.execute(
|
||||
select(func.count(CrossChainTransaction.id))
|
||||
.where(
|
||||
CrossChainTransaction.timestamp >= datetime.utcnow() - timedelta(hours=24)
|
||||
)
|
||||
).scalar()
|
||||
|
||||
successful = db.execute(
|
||||
select(func.count(CrossChainTransaction.id))
|
||||
.where(
|
||||
and_(
|
||||
CrossChainTransaction.timestamp >= datetime.utcnow() - timedelta(hours=24),
|
||||
CrossChainTransaction.status == "completed"
|
||||
)
|
||||
)
|
||||
).scalar()
|
||||
|
||||
if total == 0:
|
||||
return 100.0
|
||||
|
||||
return (successful / total) * 100
|
||||
|
||||
async def count_active_developers(self) -> float:
|
||||
"""Count active developers (last 30 days)"""
|
||||
# Query GitHub API and local records
|
||||
github_contributors = await self._query_github_api("/contributors")
|
||||
local_developers = await self._count_local_developers()
|
||||
|
||||
# Combine and deduplicate
|
||||
all_developers = set(github_contributors + local_developers)
|
||||
return float(len(all_developers))
|
||||
|
||||
async def count_new_extensions(self) -> float:
|
||||
"""Count new extensions this week"""
|
||||
with self.Session() as db:
|
||||
count = db.execute(
|
||||
select(func.count(Extension.id))
|
||||
.where(
|
||||
Extension.created_at >= datetime.utcnow() - timedelta(weeks=1)
|
||||
)
|
||||
).scalar()
|
||||
return float(count)
|
||||
|
||||
async def calculate_satisfaction_score(self) -> float:
|
||||
"""Calculate developer satisfaction score"""
|
||||
# Aggregate from multiple sources
|
||||
survey_scores = await self._get_survey_scores()
|
||||
issue_sentiment = await self._analyze_issue_sentiment()
|
||||
discord_sentiment = await self._analyze_discord_sentiment()
|
||||
|
||||
# Weighted average
|
||||
weights = {"survey": 0.5, "issues": 0.25, "discord": 0.25}
|
||||
|
||||
score = (
|
||||
survey_scores * weights["survey"] +
|
||||
issue_sentiment * weights["issues"] +
|
||||
discord_sentiment * weights["discord"]
|
||||
)
|
||||
|
||||
return score
|
||||
|
||||
async def count_active_users(self) -> float:
|
||||
"""Count active users (last 30 days)"""
|
||||
with self.Session() as db:
|
||||
count = db.execute(
|
||||
select(func.count(User.id))
|
||||
.where(
|
||||
User.last_active >= datetime.utcnow() - timedelta(days=30)
|
||||
)
|
||||
).scalar()
|
||||
return float(count)
|
||||
|
||||
async def calculate_retention_rate(self) -> float:
|
||||
"""Calculate 30-day user retention rate"""
|
||||
# Cohort analysis
|
||||
cohort_users = await self._get_cohort_users(30) # Users from 30 days ago
|
||||
retained_users = await self._count_retained_users(cohort_users)
|
||||
|
||||
if not cohort_users:
|
||||
return 0.0
|
||||
|
||||
return (retained_users / len(cohort_users)) * 100
|
||||
|
||||
async def calculate_nps(self) -> float:
|
||||
"""Calculate Net Promoter Score"""
|
||||
responses = await self._get_nps_responses()
|
||||
|
||||
if not responses:
|
||||
return 0.0
|
||||
|
||||
promoters = sum(1 for r in responses if r >= 9)
|
||||
detractors = sum(1 for r in responses if r <= 6)
|
||||
|
||||
nps = ((promoters - detractors) / len(responses)) * 100
|
||||
return nps
|
||||
|
||||
async def calculate_revenue(self) -> float:
|
||||
"""Calculate total platform revenue"""
|
||||
with self.Session() as db:
|
||||
total = db.execute(
|
||||
select(func.sum(Revenue.amount))
|
||||
.where(
|
||||
Revenue.period == "monthly"
|
||||
)
|
||||
).scalar()
|
||||
return float(total or 0)
|
||||
|
||||
async def calculate_cost_per_tx(self) -> float:
|
||||
"""Calculate cost per transaction"""
|
||||
total_cost = await self._get_monthly_costs()
|
||||
tx_count = await self._get_monthly_tx_count()
|
||||
|
||||
if tx_count == 0:
|
||||
return 0.0
|
||||
|
||||
return total_cost / tx_count
|
||||
|
||||
async def calculate_profit_margin(self) -> float:
|
||||
"""Calculate profit margin percentage"""
|
||||
revenue = await self.calculate_revenue()
|
||||
costs = await self._get_monthly_costs()
|
||||
|
||||
if revenue == 0:
|
||||
return 0.0
|
||||
|
||||
profit = revenue - costs
|
||||
return (profit / revenue) * 100
|
||||
|
||||
async def get_hash_rate(self) -> float:
|
||||
"""Get current network hash rate"""
|
||||
# Query blockchain node metrics
|
||||
metrics = await self._query_blockchain_metrics()
|
||||
return float(metrics.get("hash_rate", 0))
|
||||
|
||||
async def calculate_average_block_time(self) -> float:
|
||||
"""Calculate average block time"""
|
||||
with self.Session() as db:
|
||||
avg_time = db.execute(
|
||||
select(func.avg(Block.timestamp_diff))
|
||||
.where(
|
||||
Block.timestamp >= datetime.utcnow() - timedelta(hours=1)
|
||||
)
|
||||
).scalar()
|
||||
return float(avg_time or 0)
|
||||
|
||||
async def calculate_uptime(self) -> float:
|
||||
"""Calculate platform uptime percentage"""
|
||||
# Get uptime from monitoring service
|
||||
uptime_data = await self._query_monitoring_service("/uptime")
|
||||
return float(uptime_data.get("uptime_percentage", 0))
|
||||
|
||||
# Helper methods for data collection
|
||||
|
||||
async def _get_total_capacity(self) -> float:
|
||||
"""Get total marketplace capacity"""
|
||||
# Implementation would query marketplace service
|
||||
return 10000.0 # Sample
|
||||
|
||||
async def _get_used_capacity(self) -> float:
|
||||
"""Get used marketplace capacity"""
|
||||
# Implementation would query usage metrics
|
||||
return 7500.0 # Sample
|
||||
|
||||
async def _query_bridge_service(self, endpoint: str) -> List[Dict]:
|
||||
"""Query bridge service API"""
|
||||
# Implementation would make HTTP request
|
||||
return [] # Sample
|
||||
|
||||
async def _query_github_api(self, endpoint: str) -> List[str]:
|
||||
"""Query GitHub API"""
|
||||
# Implementation would use GitHub API
|
||||
return [] # Sample
|
||||
|
||||
async def _count_local_developers(self) -> List[str]:
|
||||
"""Count local developers"""
|
||||
with self.Session() as db:
|
||||
developers = db.execute(
|
||||
select(Developer.github_username)
|
||||
.where(
|
||||
Developer.last_active >= datetime.utcnow() - timedelta(days=30)
|
||||
)
|
||||
).all()
|
||||
return [d[0] for d in developers]
|
||||
|
||||
async def _get_survey_scores(self) -> float:
|
||||
"""Get survey satisfaction scores"""
|
||||
# Implementation would query survey service
|
||||
return 4.2 # Sample
|
||||
|
||||
async def _analyze_issue_sentiment(self) -> float:
|
||||
"""Analyze GitHub issue sentiment"""
|
||||
# Implementation would use sentiment analysis
|
||||
return 3.8 # Sample
|
||||
|
||||
async def _analyze_discord_sentiment(self) -> float:
|
||||
"""Analyze Discord message sentiment"""
|
||||
# Implementation would use sentiment analysis
|
||||
return 4.0 # Sample
|
||||
|
||||
async def _get_cohort_users(self, days_ago: int) -> List[str]:
|
||||
"""Get users from a specific cohort"""
|
||||
with self.Session() as db:
|
||||
cohort_date = datetime.utcnow() - timedelta(days=days_ago)
|
||||
users = db.execute(
|
||||
select(User.id)
|
||||
.where(
|
||||
and_(
|
||||
User.created_at >= cohort_date,
|
||||
User.created_at < cohort_date + timedelta(days=1)
|
||||
)
|
||||
)
|
||||
).all()
|
||||
return [u[0] for u in users]
|
||||
|
||||
async def _count_retained_users(self, user_ids: List[str]) -> int:
|
||||
"""Count how many users are still active"""
|
||||
with self.Session() as db:
|
||||
count = db.execute(
|
||||
select(func.count(User.id))
|
||||
.where(
|
||||
and_(
|
||||
User.id.in_(user_ids),
|
||||
User.last_active >= datetime.utcnow() - timedelta(days=30)
|
||||
)
|
||||
)
|
||||
).scalar()
|
||||
return count
|
||||
|
||||
async def _get_nps_responses(self) -> List[int]:
|
||||
"""Get NPS survey responses"""
|
||||
# Implementation would query survey service
|
||||
return [9, 10, 8, 7, 9, 10, 6, 9] # Sample
|
||||
|
||||
async def _get_monthly_costs(self) -> float:
|
||||
"""Get monthly operational costs"""
|
||||
# Implementation would query financial service
|
||||
return 800000.0 # Sample
|
||||
|
||||
async def _get_monthly_tx_count(self) -> int:
|
||||
"""Get monthly transaction count"""
|
||||
with self.Session() as db:
|
||||
count = db.execute(
|
||||
select(func.count(Transaction.id))
|
||||
.where(
|
||||
Transaction.timestamp >= datetime.utcnow() - timedelta(days=30)
|
||||
)
|
||||
).scalar()
|
||||
return count
|
||||
|
||||
async def _query_blockchain_metrics(self) -> Dict[str, float]:
|
||||
"""Query blockchain node metrics"""
|
||||
# Implementation would query blockchain node
|
||||
return {"hash_rate": 1000000000.0} # Sample
|
||||
|
||||
async def _query_monitoring_service(self, endpoint: str) -> Dict[str, float]:
|
||||
"""Query monitoring service"""
|
||||
# Implementation would query monitoring service
|
||||
return {"uptime_percentage": 99.95} # Sample
|
||||
|
||||
async def generate_kpi_dashboard(self, period: str = "monthly") -> Dict[str, Any]:
|
||||
"""Generate comprehensive KPI dashboard"""
|
||||
# Collect all KPIs
|
||||
kpis = await self.collect_all_kpis("all")
|
||||
|
||||
# Group by category
|
||||
by_category = {}
|
||||
for kpi in kpis:
|
||||
if kpi.category not in by_category:
|
||||
by_category[kpi.category] = []
|
||||
by_category[kpi.category].append(kpi)
|
||||
|
||||
# Calculate health scores
|
||||
health_scores = await self._calculate_health_scores(by_category)
|
||||
|
||||
# Generate insights
|
||||
insights = await self._generate_insights(kpis)
|
||||
|
||||
# Create visualizations
|
||||
charts = await self._create_charts(kpis)
|
||||
|
||||
return {
|
||||
"timestamp": datetime.utcnow().isoformat(),
|
||||
"period": period,
|
||||
"kpis": [asdict(kpi) for kpi in kpis],
|
||||
"by_category": {
|
||||
cat: [asdict(kpi) for kpi in kpis]
|
||||
for cat, kpis in by_category.items()
|
||||
},
|
||||
"health_scores": health_scores,
|
||||
"insights": insights,
|
||||
"charts": charts,
|
||||
}
|
||||
|
||||
async def _calculate_health_scores(self, by_category: Dict[str, List[KPIValue]]) -> Dict[str, float]:
|
||||
"""Calculate health scores for each category"""
|
||||
scores = {}
|
||||
|
||||
for category, kpis in by_category.items():
|
||||
if not kpis:
|
||||
scores[category] = 0.0
|
||||
continue
|
||||
|
||||
# Weight by importance
|
||||
total_score = 0.0
|
||||
total_weight = 0.0
|
||||
|
||||
for kpi in kpis:
|
||||
target = kpi.metadata.get("target", 0)
|
||||
if target == 0:
|
||||
continue
|
||||
|
||||
# Calculate score as percentage of target
|
||||
score = min((kpi.value / target) * 100, 100)
|
||||
|
||||
# Apply importance weight
|
||||
weight = {"high": 3, "medium": 2, "low": 1}.get(
|
||||
kpi.metadata.get("importance", "medium"), 2
|
||||
)
|
||||
|
||||
total_score += score * weight
|
||||
total_weight += weight
|
||||
|
||||
if total_weight > 0:
|
||||
scores[category] = total_score / total_weight
|
||||
else:
|
||||
scores[category] = 0.0
|
||||
|
||||
return scores
|
||||
|
||||
async def _generate_insights(self, kpis: List[KPIValue]) -> List[str]:
|
||||
"""Generate insights from KPI data"""
|
||||
insights = []
|
||||
|
||||
# Analyze trends
|
||||
for kpi in kpis:
|
||||
if kpi.value < (kpi.metadata.get("target", 0) * 0.8):
|
||||
insights.append(
|
||||
f"⚠️ {kpi.kpi_name} is below target ({kpi.value:.2f} vs {kpi.metadata.get('target')})"
|
||||
)
|
||||
elif kpi.value > (kpi.metadata.get("target", 0) * 1.2):
|
||||
insights.append(
|
||||
f"🎉 {kpi.kpi_name} exceeds target ({kpi.value:.2f} vs {kpi.metadata.get('target')})"
|
||||
)
|
||||
|
||||
# Cross-category insights
|
||||
marketplace_kpis = [k for k in kpis if k.category == "marketplace"]
|
||||
if marketplace_kpis:
|
||||
volume_kpi = next((k for k in marketplace_kpis if k.kpi_name == "total_volume_usd"), None)
|
||||
utilization_kpi = next((k for k in marketplace_kpis if k.kpi_name == "marketplace_utilization"), None)
|
||||
|
||||
if volume_kpi and utilization_kpi:
|
||||
if volume_kpi.value > 1000000 and utilization_kpi.value < 50:
|
||||
insights.append(
|
||||
"💡 High volume but low utilization - consider increasing capacity"
|
||||
)
|
||||
|
||||
return insights[:10] # Limit to top 10 insights
|
||||
|
||||
async def _create_charts(self, kpis: List[KPIValue]) -> Dict[str, str]:
|
||||
"""Create chart visualizations"""
|
||||
charts = {}
|
||||
|
||||
# KPI gauge charts
|
||||
for kpi in kpis[:5]: # Limit to top 5
|
||||
fig = go.Figure(go.Indicator(
|
||||
mode = "gauge+number+delta",
|
||||
value = kpi.value,
|
||||
domain = {'x': [0, 1], 'y': [0, 1]},
|
||||
title = {'text': kpi.kpi_name},
|
||||
delta = {'reference': kpi.metadata.get('target', 0)},
|
||||
gauge = {
|
||||
'axis': {'range': [None, kpi.metadata.get('target', 100) * 1.5]},
|
||||
'bar': {'color': "darkblue"},
|
||||
'steps': [
|
||||
{'range': [0, kpi.metadata.get('target', 100) * 0.5], 'color': "lightgray"},
|
||||
{'range': [kpi.metadata.get('target', 100) * 0.5, kpi.metadata.get('target', 100)], 'color': "gray"}
|
||||
],
|
||||
'threshold': {
|
||||
'line': {'color': "red", 'width': 4},
|
||||
'thickness': 0.75,
|
||||
'value': kpi.metadata.get('target', 100) * 0.9
|
||||
}
|
||||
}
|
||||
))
|
||||
|
||||
charts[f"gauge_{kpi.kpi_name}"] = fig.to_json()
|
||||
|
||||
# Category comparison chart
|
||||
categories = {}
|
||||
for kpi in kpis:
|
||||
if kpi.category not in categories:
|
||||
categories[kpi.category] = []
|
||||
categories[kpi.category].append(kpi.value / (kpi.metadata.get('target', 1) * 100))
|
||||
|
||||
fig = px.bar(
|
||||
x=list(categories.keys()),
|
||||
y=[sum(v)/len(v) for v in categories.values()],
|
||||
title="KPI Performance by Category",
|
||||
labels={"x": "Category", "y": "Average % of Target"}
|
||||
)
|
||||
charts["category_comparison"] = fig.to_json()
|
||||
|
||||
return charts
|
||||
|
||||
async def export_kpis(self, format: str = "csv", period: str = "monthly") -> bytes:
|
||||
"""Export KPI data"""
|
||||
kpis = await self.collect_all_kpis(period)
|
||||
|
||||
# Convert to DataFrame
|
||||
df = pd.DataFrame([asdict(kpi) for kpi in kpis])
|
||||
|
||||
if format == "csv":
|
||||
return df.to_csv(index=False).encode('utf-8')
|
||||
elif format == "json":
|
||||
return df.to_json(orient='records', indent=2).encode('utf-8')
|
||||
elif format == "excel":
|
||||
return df.to_excel(index=False).encode('utf-8')
|
||||
else:
|
||||
raise ValueError(f"Unsupported format: {format}")
|
||||
|
||||
async def generate_strategy_review(self, quarter: str) -> Dict[str, Any]:
|
||||
"""Generate quarterly strategy review document"""
|
||||
# Get KPI data for the quarter
|
||||
kpis = await self.collect_all_kpis("all")
|
||||
|
||||
# Compare with previous quarter
|
||||
previous_kpis = await self._get_previous_quarter_kpis(quarter)
|
||||
|
||||
# Generate analysis
|
||||
analysis = {
|
||||
"quarter": quarter,
|
||||
"executive_summary": await self._generate_executive_summary(kpis, previous_kpis),
|
||||
"key_achievements": await self._identify_achievements(kpis),
|
||||
"challenges": await self._identify_challenges(kpis),
|
||||
"recommendations": await self._generate_recommendations(kpis, previous_kpis),
|
||||
"next_quarter_goals": await self._set_next_quarter_goals(kpis),
|
||||
}
|
||||
|
||||
return analysis
|
||||
|
||||
async def _get_previous_quarter_kpis(self, quarter: str) -> List[KPIValue]:
|
||||
"""Get KPIs from previous quarter"""
|
||||
# Implementation would query historical KPI data
|
||||
return [] # Sample
|
||||
|
||||
async def _generate_executive_summary(self, kpis: List[KPIValue], previous: List[KPIValue]) -> str:
|
||||
"""Generate executive summary"""
|
||||
# Implementation would analyze KPI trends
|
||||
return "Ecosystem shows strong growth with 25% increase in active users and 40% growth in transaction volume."
|
||||
|
||||
async def _identify_achievements(self, kpis: List[KPIValue]) -> List[str]:
|
||||
"""Identify key achievements"""
|
||||
achievements = []
|
||||
|
||||
for kpi in kpis:
|
||||
if kpi.value >= kpi.metadata.get("target", 0):
|
||||
achievements.append(
|
||||
f"Exceeded {kpi.kpi_name} target with {kpi.value:.2f} (target: {kpi.metadata.get('target')})"
|
||||
)
|
||||
|
||||
return achievements
|
||||
|
||||
async def _identify_challenges(self, kpis: List[KPIValue]) -> List[str]:
|
||||
"""Identify challenges and areas for improvement"""
|
||||
challenges = []
|
||||
|
||||
for kpi in kpis:
|
||||
if kpi.value < (kpi.metadata.get("target", 0) * 0.7):
|
||||
challenges.append(
|
||||
f"{kpi.kpi_name} below target at {kpi.value:.2f} (target: {kpi.metadata.get('target')})"
|
||||
)
|
||||
|
||||
return challenges
|
||||
|
||||
async def _generate_recommendations(self, kpis: List[KPIValue], previous: List[KPIValue]) -> List[str]:
|
||||
"""Generate strategic recommendations"""
|
||||
recommendations = []
|
||||
|
||||
# Analyze trends and generate recommendations
|
||||
recommendations.extend([
|
||||
"Focus on improving developer onboarding to increase extension creation",
|
||||
"Invest in cross-chain infrastructure to support growing volume",
|
||||
"Enhance user retention programs to improve 30-day retention rate",
|
||||
])
|
||||
|
||||
return recommendations
|
||||
|
||||
async def _set_next_quarter_goals(self, kpis: List[KPIValue]) -> Dict[str, float]:
|
||||
"""Set goals for next quarter"""
|
||||
goals = {}
|
||||
|
||||
for kpi in kpis:
|
||||
# Set goals 10-20% higher than current performance
|
||||
current_target = kpi.metadata.get("target", kpi.value)
|
||||
next_target = current_target * 1.15
|
||||
goals[kpi.kpi_name] = next_target
|
||||
|
||||
return goals
|
||||
|
||||
|
||||
# CLI interface
|
||||
async def main():
|
||||
"""CLI entry point"""
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser(description="AITBC Ecosystem KPI Tracker")
|
||||
parser.add_argument("--collect", action="store_true", help="Collect all KPIs")
|
||||
parser.add_argument("--dashboard", action="store_true", help="Generate KPI dashboard")
|
||||
parser.add_argument("--export", choices=["csv", "json", "excel"], help="Export KPIs")
|
||||
parser.add_argument("--period", default="daily", help="Period for KPI collection")
|
||||
parser.add_argument("--strategy-review", help="Generate strategy review for quarter")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
tracker = EcosystemKPITracker()
|
||||
|
||||
if args.collect:
|
||||
kpis = await tracker.collect_all_kpis(args.period)
|
||||
print(f"Collected {len(kpis)} KPIs")
|
||||
for kpi in kpis:
|
||||
print(f"{kpi.kpi_name}: {kpi.value:.2f} {kpi.unit}")
|
||||
|
||||
elif args.dashboard:
|
||||
dashboard = await tracker.generate_kpi_dashboard()
|
||||
print(json.dumps(dashboard, indent=2, default=str))
|
||||
|
||||
elif args.export:
|
||||
data = await tracker.export_kpis(args.export, args.period)
|
||||
print(data.decode())
|
||||
|
||||
elif args.strategy_review:
|
||||
review = await tracker.generate_strategy_review(args.strategy_review)
|
||||
print(json.dumps(review, indent=2, default=str))
|
||||
|
||||
else:
|
||||
parser.print_help()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
Reference in New Issue
Block a user