Update database paths and fix foreign key references across coordinator API

- Change SQLite database path from `/home/oib/windsurf/aitbc/data/` to `/opt/data/`
- Fix foreign key references to use correct table names (users, wallets, gpu_registry)
- Replace governance router with new governance and community routers
- Add multi-modal RL router to main application
- Simplify DEPLOYMENT_READINESS_REPORT.md to focus on production deployment status
- Update governance router with decentralized DAO voting
This commit is contained in:
oib
2026-02-26 19:32:06 +01:00
parent 1e2ea0bb9d
commit 7bb2905cca
89 changed files with 38245 additions and 1260 deletions

View File

@@ -27,7 +27,7 @@ class DatabaseConfig(BaseSettings):
# Default SQLite path
if self.adapter == "sqlite":
return "sqlite:///../data/coordinator.db"
return "sqlite:////opt/data/coordinator.db"
# Default PostgreSQL connection string
return f"{self.adapter}://localhost:5432/coordinator"
@@ -118,7 +118,7 @@ class Settings(BaseSettings):
if self.database.url:
return self.database.url
# Default SQLite path for backward compatibility
return "sqlite:////home/oib/windsurf/aitbc/data/coordinator.db"
return "sqlite:////opt/data/coordinator.db"
@database_url.setter
def database_url(self, value: str):

View File

@@ -0,0 +1,481 @@
"""
Advanced Agent Performance Domain Models
Implements SQLModel definitions for meta-learning, resource management, and performance optimization
"""
from datetime import datetime, timedelta
from typing import Optional, Dict, List, Any
from uuid import uuid4
from enum import Enum
from sqlmodel import SQLModel, Field, Column, JSON
from sqlalchemy import DateTime, Float, Integer, Text
class LearningStrategy(str, Enum):
"""Learning strategy enumeration"""
META_LEARNING = "meta_learning"
TRANSFER_LEARNING = "transfer_learning"
REINFORCEMENT_LEARNING = "reinforcement_learning"
SUPERVISED_LEARNING = "supervised_learning"
UNSUPERVISED_LEARNING = "unsupervised_learning"
FEDERATED_LEARNING = "federated_learning"
class PerformanceMetric(str, Enum):
"""Performance metric enumeration"""
ACCURACY = "accuracy"
PRECISION = "precision"
RECALL = "recall"
F1_SCORE = "f1_score"
LATENCY = "latency"
THROUGHPUT = "throughput"
RESOURCE_EFFICIENCY = "resource_efficiency"
COST_EFFICIENCY = "cost_efficiency"
ADAPTATION_SPEED = "adaptation_speed"
GENERALIZATION = "generalization"
class ResourceType(str, Enum):
"""Resource type enumeration"""
CPU = "cpu"
GPU = "gpu"
MEMORY = "memory"
STORAGE = "storage"
NETWORK = "network"
CACHE = "cache"
class OptimizationTarget(str, Enum):
"""Optimization target enumeration"""
SPEED = "speed"
ACCURACY = "accuracy"
EFFICIENCY = "efficiency"
COST = "cost"
SCALABILITY = "scalability"
RELIABILITY = "reliability"
class AgentPerformanceProfile(SQLModel, table=True):
"""Agent performance profiles and metrics"""
__tablename__ = "agent_performance_profiles"
__table_args__ = {"extend_existing": True}
id: str = Field(default_factory=lambda: f"perf_{uuid4().hex[:8]}", primary_key=True)
profile_id: str = Field(unique=True, index=True)
# Agent identification
agent_id: str = Field(index=True)
agent_type: str = Field(default="openclaw")
agent_version: str = Field(default="1.0.0")
# Performance metrics
overall_score: float = Field(default=0.0, ge=0, le=100)
performance_metrics: Dict[str, float] = Field(default={}, sa_column=Column(JSON))
# Learning capabilities
learning_strategies: List[str] = Field(default=[], sa_column=Column(JSON))
adaptation_rate: float = Field(default=0.0, ge=0, le=1.0)
generalization_score: float = Field(default=0.0, ge=0, le=1.0)
# Resource utilization
resource_efficiency: Dict[str, float] = Field(default={}, sa_column=Column(JSON))
cost_per_task: float = Field(default=0.0)
throughput: float = Field(default=0.0)
average_latency: float = Field(default=0.0)
# Specialization areas
specialization_areas: List[str] = Field(default=[], sa_column=Column(JSON))
expertise_levels: Dict[str, float] = Field(default={}, sa_column=Column(JSON))
# Performance history
performance_history: List[Dict[str, Any]] = Field(default=[], sa_column=Column(JSON))
improvement_trends: Dict[str, float] = Field(default={}, sa_column=Column(JSON))
# Benchmarking
benchmark_scores: Dict[str, float] = Field(default={}, sa_column=Column(JSON))
ranking_position: Optional[int] = None
percentile_rank: Optional[float] = None
# Timestamps
created_at: datetime = Field(default_factory=datetime.utcnow)
updated_at: datetime = Field(default_factory=datetime.utcnow)
last_assessed: Optional[datetime] = None
# Additional data
profile_metadata: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
performance_notes: str = Field(default="", max_length=1000)
class MetaLearningModel(SQLModel, table=True):
"""Meta-learning models and configurations"""
__tablename__ = "meta_learning_models"
__table_args__ = {"extend_existing": True}
id: str = Field(default_factory=lambda: f"meta_{uuid4().hex[:8]}", primary_key=True)
model_id: str = Field(unique=True, index=True)
# Model identification
model_name: str = Field(max_length=100)
model_type: str = Field(default="meta_learning")
model_version: str = Field(default="1.0.0")
# Learning configuration
base_algorithms: List[str] = Field(default=[], sa_column=Column(JSON))
meta_strategy: LearningStrategy
adaptation_targets: List[str] = Field(default=[], sa_column=Column(JSON))
# Training data
training_tasks: List[str] = Field(default=[], sa_column=Column(JSON))
task_distributions: Dict[str, float] = Field(default={}, sa_column=Column(JSON))
meta_features: List[str] = Field(default=[], sa_column=Column(JSON))
# Model performance
meta_accuracy: float = Field(default=0.0, ge=0, le=1.0)
adaptation_speed: float = Field(default=0.0, ge=0, le=1.0)
generalization_ability: float = Field(default=0.0, ge=0, le=1.0)
# Resource requirements
training_time: Optional[float] = None # hours
computational_cost: Optional[float] = None # cost units
memory_requirement: Optional[float] = None # GB
gpu_requirement: Optional[bool] = Field(default=False)
# Deployment status
status: str = Field(default="training") # training, ready, deployed, deprecated
deployment_count: int = Field(default=0)
success_rate: float = Field(default=0.0, ge=0, le=1.0)
# Timestamps
created_at: datetime = Field(default_factory=datetime.utcnow)
updated_at: datetime = Field(default_factory=datetime.utcnow)
trained_at: Optional[datetime] = None
deployed_at: Optional[datetime] = None
# Additional data
model_profile_metadata: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
training_logs: List[Dict[str, Any]] = Field(default=[], sa_column=Column(JSON))
class ResourceAllocation(SQLModel, table=True):
"""Resource allocation and optimization records"""
__tablename__ = "resource_allocations"
__table_args__ = {"extend_existing": True}
id: str = Field(default_factory=lambda: f"alloc_{uuid4().hex[:8]}", primary_key=True)
allocation_id: str = Field(unique=True, index=True)
# Allocation details
agent_id: str = Field(index=True)
task_id: Optional[str] = None
session_id: Optional[str] = None
# Resource requirements
cpu_cores: float = Field(default=1.0)
memory_gb: float = Field(default=2.0)
gpu_count: float = Field(default=0.0)
gpu_memory_gb: float = Field(default=0.0)
storage_gb: float = Field(default=10.0)
network_bandwidth: float = Field(default=100.0) # Mbps
# Optimization targets
optimization_target: OptimizationTarget
priority_level: str = Field(default="normal") # low, normal, high, critical
# Performance metrics
actual_performance: Dict[str, float] = Field(default={}, sa_column=Column(JSON))
efficiency_score: float = Field(default=0.0, ge=0, le=1.0)
cost_efficiency: float = Field(default=0.0, ge=0, le=1.0)
# Allocation status
status: str = Field(default="pending") # pending, allocated, active, completed, failed
allocated_at: Optional[datetime] = None
started_at: Optional[datetime] = None
completed_at: Optional[datetime] = None
# Optimization results
optimization_applied: bool = Field(default=False)
optimization_savings: float = Field(default=0.0)
performance_improvement: float = Field(default=0.0)
# Timestamps
created_at: datetime = Field(default_factory=datetime.utcnow)
updated_at: datetime = Field(default_factory=datetime.utcnow())
# Additional data
allocation_profile_metadata: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
resource_utilization: Dict[str, float] = Field(default={}, sa_column=Column(JSON))
class PerformanceOptimization(SQLModel, table=True):
"""Performance optimization records and results"""
__tablename__ = "performance_optimizations"
__table_args__ = {"extend_existing": True}
id: str = Field(default_factory=lambda: f"opt_{uuid4().hex[:8]}", primary_key=True)
optimization_id: str = Field(unique=True, index=True)
# Optimization details
agent_id: str = Field(index=True)
optimization_type: str = Field(max_length=50) # resource, algorithm, hyperparameter, architecture
target_metric: PerformanceMetric
# Before optimization
baseline_performance: Dict[str, float] = Field(default={}, sa_column=Column(JSON))
baseline_resources: Dict[str, float] = Field(default={}, sa_column=Column(JSON))
baseline_cost: float = Field(default=0.0)
# Optimization configuration
optimization_parameters: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
optimization_algorithm: str = Field(default="auto")
search_space: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
# After optimization
optimized_performance: Dict[str, float] = Field(default={}, sa_column=Column(JSON))
optimized_resources: Dict[str, float] = Field(default={}, sa_column=Column(JSON))
optimized_cost: float = Field(default=0.0)
# Improvement metrics
performance_improvement: float = Field(default=0.0)
resource_savings: float = Field(default=0.0)
cost_savings: float = Field(default=0.0)
overall_efficiency_gain: float = Field(default=0.0)
# Optimization process
optimization_duration: Optional[float] = None # seconds
iterations_required: int = Field(default=0)
convergence_achieved: bool = Field(default=False)
# Status and deployment
status: str = Field(default="pending") # pending, running, completed, failed, deployed
applied_at: Optional[datetime] = None
rollback_available: bool = Field(default=True)
# Timestamps
created_at: datetime = Field(default_factory=datetime.utcnow)
updated_at: datetime = Field(default_factory=datetime.utcnow)
completed_at: Optional[datetime] = None
# Additional data
optimization_profile_metadata: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
performance_logs: List[Dict[str, Any]] = Field(default=[], sa_column=Column(JSON))
class AgentCapability(SQLModel, table=True):
"""Agent capabilities and skill assessments"""
__tablename__ = "agent_capabilities"
__table_args__ = {"extend_existing": True}
id: str = Field(default_factory=lambda: f"cap_{uuid4().hex[:8]}", primary_key=True)
capability_id: str = Field(unique=True, index=True)
# Capability details
agent_id: str = Field(index=True)
capability_name: str = Field(max_length=100)
capability_type: str = Field(max_length=50) # cognitive, creative, analytical, technical
domain_area: str = Field(max_length=50)
# Skill level assessment
skill_level: float = Field(default=0.0, ge=0, le=10.0)
proficiency_score: float = Field(default=0.0, ge=0, le=1.0)
experience_years: float = Field(default=0.0)
# Capability metrics
performance_metrics: Dict[str, float] = Field(default={}, sa_column=Column(JSON))
success_rate: float = Field(default=0.0, ge=0, le=1.0)
average_quality: float = Field(default=0.0, ge=0, le=5.0)
# Learning and adaptation
learning_rate: float = Field(default=0.0, ge=0, le=1.0)
adaptation_speed: float = Field(default=0.0, ge=0, le=1.0)
knowledge_retention: float = Field(default=0.0, ge=0, le=1.0)
# Specialization
specializations: List[str] = Field(default=[], sa_column=Column(JSON))
sub_capabilities: List[str] = Field(default=[], sa_column=Column(JSON))
tool_proficiency: Dict[str, float] = Field(default={}, sa_column=Column(JSON))
# Development history
acquired_at: datetime = Field(default_factory=datetime.utcnow)
last_improved: Optional[datetime] = None
improvement_count: int = Field(default=0)
# Certification and validation
certified: bool = Field(default=False)
certification_level: Optional[str] = None
last_validated: Optional[datetime] = None
# Timestamps
created_at: datetime = Field(default_factory=datetime.utcnow)
updated_at: datetime = Field(default_factory=datetime.utcnow)
# Additional data
capability_profile_metadata: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
training_history: List[Dict[str, Any]] = Field(default=[], sa_column=Column(JSON))
class FusionModel(SQLModel, table=True):
"""Multi-modal agent fusion models"""
__tablename__ = "fusion_models"
__table_args__ = {"extend_existing": True}
id: str = Field(default_factory=lambda: f"fusion_{uuid4().hex[:8]}", primary_key=True)
fusion_id: str = Field(unique=True, index=True)
# Model identification
model_name: str = Field(max_length=100)
fusion_type: str = Field(max_length=50) # ensemble, hybrid, multi_modal, cross_domain
model_version: str = Field(default="1.0.0")
# Component models
base_models: List[str] = Field(default=[], sa_column=Column(JSON))
model_weights: Dict[str, float] = Field(default={}, sa_column=Column(JSON))
fusion_strategy: str = Field(default="weighted_average")
# Input modalities
input_modalities: List[str] = Field(default=[], sa_column=Column(JSON))
modality_weights: Dict[str, float] = Field(default={}, sa_column=Column(JSON))
# Performance metrics
fusion_performance: Dict[str, float] = Field(default={}, sa_column=Column(JSON))
synergy_score: float = Field(default=0.0, ge=0, le=1.0)
robustness_score: float = Field(default=0.0, ge=0, le=1.0)
# Resource requirements
computational_complexity: str = Field(default="medium") # low, medium, high, very_high
memory_requirement: float = Field(default=0.0) # GB
inference_time: float = Field(default=0.0) # seconds
# Training data
training_datasets: List[str] = Field(default=[], sa_column=Column(JSON))
data_requirements: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
# Deployment status
status: str = Field(default="training") # training, ready, deployed, deprecated
deployment_count: int = Field(default=0)
performance_stability: float = Field(default=0.0, ge=0, le=1.0)
# Timestamps
created_at: datetime = Field(default_factory=datetime.utcnow)
updated_at: datetime = Field(default_factory=datetime.utcnow)
trained_at: Optional[datetime] = None
deployed_at: Optional[datetime] = None
# Additional data
fusion_profile_metadata: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
training_logs: List[Dict[str, Any]] = Field(default=[], sa_column=Column(JSON))
class ReinforcementLearningConfig(SQLModel, table=True):
"""Reinforcement learning configurations and policies"""
__tablename__ = "rl_configurations"
__table_args__ = {"extend_existing": True}
id: str = Field(default_factory=lambda: f"rl_{uuid4().hex[:8]}", primary_key=True)
config_id: str = Field(unique=True, index=True)
# Configuration details
agent_id: str = Field(index=True)
environment_type: str = Field(max_length=50)
algorithm: str = Field(default="ppo") # ppo, a2c, dqn, sac, td3
# Learning parameters
learning_rate: float = Field(default=0.001)
discount_factor: float = Field(default=0.99)
exploration_rate: float = Field(default=0.1)
batch_size: int = Field(default=64)
# Network architecture
network_layers: List[int] = Field(default=[256, 256, 128], sa_column=Column(JSON))
activation_functions: List[str] = Field(default=["relu", "relu", "tanh"], sa_column=Column(JSON))
# Training configuration
max_episodes: int = Field(default=1000)
max_steps_per_episode: int = Field(default=1000)
save_frequency: int = Field(default=100)
# Performance metrics
reward_history: List[float] = Field(default=[], sa_column=Column(JSON))
success_rate_history: List[float] = Field(default=[], sa_column=Column(JSON))
convergence_episode: Optional[int] = None
# Policy details
policy_type: str = Field(default="stochastic") # stochastic, deterministic
action_space: List[str] = Field(default=[], sa_column=Column(JSON))
state_space: List[str] = Field(default=[], sa_column=Column(JSON))
# Status and deployment
status: str = Field(default="training") # training, ready, deployed, deprecated
training_progress: float = Field(default=0.0, ge=0, le=1.0)
deployment_performance: Dict[str, float] = Field(default={}, sa_column=Column(JSON))
# Timestamps
created_at: datetime = Field(default_factory=datetime.utcnow)
updated_at: datetime = Field(default_factory=datetime.utcnow)
trained_at: Optional[datetime] = None
deployed_at: Optional[datetime] = None
# Additional data
rl_profile_metadata: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
training_logs: List[Dict[str, Any]] = Field(default=[], sa_column=Column(JSON))
class CreativeCapability(SQLModel, table=True):
"""Creative and specialized AI capabilities"""
__tablename__ = "creative_capabilities"
__table_args__ = {"extend_existing": True}
id: str = Field(default_factory=lambda: f"creative_{uuid4().hex[:8]}", primary_key=True)
capability_id: str = Field(unique=True, index=True)
# Capability details
agent_id: str = Field(index=True)
creative_domain: str = Field(max_length=50) # art, music, writing, design, innovation
capability_type: str = Field(max_length=50) # generative, compositional, analytical, innovative
# Creative metrics
originality_score: float = Field(default=0.0, ge=0, le=1.0)
novelty_score: float = Field(default=0.0, ge=0, le=1.0)
aesthetic_quality: float = Field(default=0.0, ge=0, le=5.0)
coherence_score: float = Field(default=0.0, ge=0, le=1.0)
# Generation capabilities
generation_models: List[str] = Field(default=[], sa_column=Column(JSON))
style_variety: int = Field(default=1)
output_quality: float = Field(default=0.0, ge=0, le=5.0)
# Learning and adaptation
creative_learning_rate: float = Field(default=0.0, ge=0, le=1.0)
style_adaptation: float = Field(default=0.0, ge=0, le=1.0)
cross_domain_transfer: float = Field(default=0.0, ge=0, le=1.0)
# Specialization
creative_specializations: List[str] = Field(default=[], sa_column=Column(JSON))
tool_proficiency: Dict[str, float] = Field(default={}, sa_column=Column(JSON))
domain_knowledge: Dict[str, float] = Field(default={}, sa_column=Column(JSON))
# Performance tracking
creations_generated: int = Field(default=0)
user_ratings: List[float] = Field(default=[], sa_column=Column(JSON))
expert_evaluations: List[Dict[str, Any]] = Field(default=[], sa_column=Column(JSON))
# Status and certification
status: str = Field(default="developing") # developing, ready, certified, deprecated
certification_level: Optional[str] = None
last_evaluation: Optional[datetime] = None
# Timestamps
created_at: datetime = Field(default_factory=datetime.utcnow)
updated_at: datetime = Field(default_factory=datetime.utcnow)
# Additional data
creative_profile_metadata: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
portfolio_samples: List[Dict[str, Any]] = Field(default=[], sa_column=Column(JSON))

View File

@@ -0,0 +1,440 @@
"""
Marketplace Analytics Domain Models
Implements SQLModel definitions for analytics, insights, and reporting
"""
from datetime import datetime, timedelta
from typing import Optional, Dict, List, Any
from uuid import uuid4
from enum import Enum
from sqlmodel import SQLModel, Field, Column, JSON
from sqlalchemy import DateTime, Float, Integer, Text
class AnalyticsPeriod(str, Enum):
"""Analytics period enumeration"""
REALTIME = "realtime"
HOURLY = "hourly"
DAILY = "daily"
WEEKLY = "weekly"
MONTHLY = "monthly"
QUARTERLY = "quarterly"
YEARLY = "yearly"
class MetricType(str, Enum):
"""Metric type enumeration"""
VOLUME = "volume"
COUNT = "count"
AVERAGE = "average"
PERCENTAGE = "percentage"
RATIO = "ratio"
RATE = "rate"
VALUE = "value"
class InsightType(str, Enum):
"""Insight type enumeration"""
TREND = "trend"
ANOMALY = "anomaly"
OPPORTUNITY = "opportunity"
WARNING = "warning"
PREDICTION = "prediction"
RECOMMENDATION = "recommendation"
class ReportType(str, Enum):
"""Report type enumeration"""
MARKET_OVERVIEW = "market_overview"
AGENT_PERFORMANCE = "agent_performance"
ECONOMIC_ANALYSIS = "economic_analysis"
GEOGRAPHIC_ANALYSIS = "geographic_analysis"
COMPETITIVE_ANALYSIS = "competitive_analysis"
RISK_ASSESSMENT = "risk_assessment"
class MarketMetric(SQLModel, table=True):
"""Market metrics and KPIs"""
__tablename__ = "market_metrics"
__table_args__ = {"extend_existing": True}
id: str = Field(default_factory=lambda: f"metric_{uuid4().hex[:8]}", primary_key=True)
metric_name: str = Field(index=True)
metric_type: MetricType
period_type: AnalyticsPeriod
# Metric values
value: float = Field(default=0.0)
previous_value: Optional[float] = None
change_percentage: Optional[float] = None
# Contextual data
unit: str = Field(default="")
category: str = Field(default="general")
subcategory: str = Field(default="")
# Geographic and temporal context
geographic_region: Optional[str] = None
agent_tier: Optional[str] = None
trade_type: Optional[str] = None
# Metadata
metric_metadata: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
# Timestamps
recorded_at: datetime = Field(default_factory=datetime.utcnow)
period_start: datetime
period_end: datetime
# Additional data
breakdown: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
comparisons: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
class MarketInsight(SQLModel, table=True):
"""Market insights and analysis"""
__tablename__ = "market_insights"
__table_args__ = {"extend_existing": True}
id: str = Field(default_factory=lambda: f"insight_{uuid4().hex[:8]}", primary_key=True)
insight_type: InsightType
title: str = Field(max_length=200)
description: str = Field(default="", max_length=1000)
# Insight data
confidence_score: float = Field(default=0.0, ge=0, le=1.0)
impact_level: str = Field(default="medium") # low, medium, high, critical
urgency_level: str = Field(default="normal") # low, normal, high, urgent
# Related metrics and context
related_metrics: List[str] = Field(default=[], sa_column=Column(JSON))
affected_entities: List[str] = Field(default=[], sa_column=Column(JSON))
time_horizon: str = Field(default="short_term") # immediate, short_term, medium_term, long_term
# Analysis details
analysis_method: str = Field(default="statistical")
data_sources: List[str] = Field(default=[], sa_column=Column(JSON))
assumptions: List[str] = Field(default=[], sa_column=Column(JSON))
# Recommendations and actions
recommendations: List[str] = Field(default=[], sa_column=Column(JSON))
suggested_actions: List[Dict[str, Any]] = Field(default=[], sa_column=Column(JSON))
# Status and tracking
status: str = Field(default="active") # active, resolved, expired
acknowledged_by: Optional[str] = None
acknowledged_at: Optional[datetime] = None
resolved_by: Optional[str] = None
resolved_at: Optional[datetime] = None
# Timestamps
created_at: datetime = Field(default_factory=datetime.utcnow)
updated_at: datetime = Field(default_factory=datetime.utcnow)
expires_at: Optional[datetime] = None
# Additional data
insight_data: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
visualization_config: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
class AnalyticsReport(SQLModel, table=True):
"""Generated analytics reports"""
__tablename__ = "analytics_reports"
__table_args__ = {"extend_existing": True}
id: str = Field(default_factory=lambda: f"report_{uuid4().hex[:8]}", primary_key=True)
report_id: str = Field(unique=True, index=True)
# Report details
report_type: ReportType
title: str = Field(max_length=200)
description: str = Field(default="", max_length=1000)
# Report parameters
period_type: AnalyticsPeriod
start_date: datetime
end_date: datetime
filters: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
# Report content
summary: str = Field(default="", max_length=2000)
key_findings: List[str] = Field(default=[], sa_column=Column(JSON))
recommendations: List[str] = Field(default=[], sa_column=Column(JSON))
# Report data
data_sections: List[Dict[str, Any]] = Field(default=[], sa_column=Column(JSON))
charts: List[Dict[str, Any]] = Field(default=[], sa_column=Column(JSON))
tables: List[Dict[str, Any]] = Field(default=[], sa_column=Column(JSON))
# Generation details
generated_by: str = Field(default="system") # system, user, scheduled
generation_time: float = Field(default=0.0) # seconds
data_points_analyzed: int = Field(default=0)
# Status and delivery
status: str = Field(default="generated") # generating, generated, failed, delivered
delivery_method: str = Field(default="api") # api, email, dashboard
recipients: List[str] = Field(default=[], sa_column=Column(JSON))
# Timestamps
created_at: datetime = Field(default_factory=datetime.utcnow)
updated_at: datetime = Field(default_factory=datetime.utcnow)
generated_at: datetime = Field(default_factory=datetime.utcnow)
delivered_at: Optional[datetime] = None
# Additional data
report_metric_metadata: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
template_used: Optional[str] = None
class DashboardConfig(SQLModel, table=True):
"""Analytics dashboard configurations"""
__tablename__ = "dashboard_configs"
__table_args__ = {"extend_existing": True}
id: str = Field(default_factory=lambda: f"dashboard_{uuid4().hex[:8]}", primary_key=True)
dashboard_id: str = Field(unique=True, index=True)
# Dashboard details
name: str = Field(max_length=100)
description: str = Field(default="", max_length=500)
dashboard_type: str = Field(default="custom") # default, custom, executive, operational
# Layout and configuration
layout: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
widgets: List[Dict[str, Any]] = Field(default=[], sa_column=Column(JSON))
filters: List[Dict[str, Any]] = Field(default=[], sa_column=Column(JSON))
# Data sources and refresh
data_sources: List[str] = Field(default=[], sa_column=Column(JSON))
refresh_interval: int = Field(default=300) # seconds
auto_refresh: bool = Field(default=True)
# Access and permissions
owner_id: str = Field(index=True)
viewers: List[str] = Field(default=[], sa_column=Column(JSON))
editors: List[str] = Field(default=[], sa_column=Column(JSON))
is_public: bool = Field(default=False)
# Status and versioning
status: str = Field(default="active") # active, inactive, archived
version: int = Field(default=1)
last_modified_by: Optional[str] = None
# Timestamps
created_at: datetime = Field(default_factory=datetime.utcnow)
updated_at: datetime = Field(default_factory=datetime.utcnow)
last_viewed_at: Optional[datetime] = None
# Additional data
dashboard_settings: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
theme_config: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
class DataCollectionJob(SQLModel, table=True):
"""Data collection and processing jobs"""
__tablename__ = "data_collection_jobs"
__table_args__ = {"extend_existing": True}
id: str = Field(default_factory=lambda: f"job_{uuid4().hex[:8]}", primary_key=True)
job_id: str = Field(unique=True, index=True)
# Job details
job_type: str = Field(max_length=50) # metrics_collection, insight_generation, report_generation
job_name: str = Field(max_length=100)
description: str = Field(default="", max_length=500)
# Job parameters
parameters: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
data_sources: List[str] = Field(default=[], sa_column=Column(JSON))
target_metrics: List[str] = Field(default=[], sa_column=Column(JSON))
# Schedule and execution
schedule_type: str = Field(default="manual") # manual, scheduled, triggered
cron_expression: Optional[str] = None
next_run: Optional[datetime] = None
# Execution details
status: str = Field(default="pending") # pending, running, completed, failed, cancelled
progress: float = Field(default=0.0, ge=0, le=100.0)
started_at: Optional[datetime] = None
completed_at: Optional[datetime] = None
# Results and output
records_processed: int = Field(default=0)
records_generated: int = Field(default=0)
errors: List[str] = Field(default=[], sa_column=Column(JSON))
output_files: List[str] = Field(default=[], sa_column=Column(JSON))
# Performance metrics
execution_time: float = Field(default=0.0) # seconds
memory_usage: float = Field(default=0.0) # MB
cpu_usage: float = Field(default=0.0) # percentage
# Timestamps
created_at: datetime = Field(default_factory=datetime.utcnow)
updated_at: datetime = Field(default_factory=datetime.utcnow)
# Additional data
job_metric_metadata: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
execution_log: List[Dict[str, Any]] = Field(default=[], sa_column=Column(JSON))
class AlertRule(SQLModel, table=True):
"""Analytics alert rules and notifications"""
__tablename__ = "alert_rules"
__table_args__ = {"extend_existing": True}
id: str = Field(default_factory=lambda: f"alert_{uuid4().hex[:8]}", primary_key=True)
rule_id: str = Field(unique=True, index=True)
# Rule details
name: str = Field(max_length=100)
description: str = Field(default="", max_length=500)
rule_type: str = Field(default="threshold") # threshold, anomaly, trend, pattern
# Conditions and triggers
conditions: List[Dict[str, Any]] = Field(default=[], sa_column=Column(JSON))
threshold_value: Optional[float] = None
comparison_operator: str = Field(default="greater_than") # greater_than, less_than, equals, contains
# Target metrics and entities
target_metrics: List[str] = Field(default=[], sa_column=Column(JSON))
target_entities: List[str] = Field(default=[], sa_column=Column(JSON))
geographic_scope: List[str] = Field(default=[], sa_column=Column(JSON))
# Alert configuration
severity: str = Field(default="medium") # low, medium, high, critical
cooldown_period: int = Field(default=300) # seconds
auto_resolve: bool = Field(default=False)
resolve_conditions: List[Dict[str, Any]] = Field(default=[], sa_column=Column(JSON))
# Notification settings
notification_channels: List[str] = Field(default=[], sa_column=Column(JSON))
notification_recipients: List[str] = Field(default=[], sa_column=Column(JSON))
message_template: str = Field(default="", max_length=1000)
# Status and scheduling
status: str = Field(default="active") # active, inactive, disabled
created_by: str = Field(index=True)
last_triggered: Optional[datetime] = None
trigger_count: int = Field(default=0)
# Timestamps
created_at: datetime = Field(default_factory=datetime.utcnow)
updated_at: datetime = Field(default_factory=datetime.utcnow)
# Additional data
rule_metric_metadata: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
test_results: List[Dict[str, Any]] = Field(default=[], sa_column=Column(JSON))
class AnalyticsAlert(SQLModel, table=True):
"""Generated analytics alerts"""
__tablename__ = "analytics_alerts"
__table_args__ = {"extend_existing": True}
id: str = Field(default_factory=lambda: f"alert_{uuid4().hex[:8]}", primary_key=True)
alert_id: str = Field(unique=True, index=True)
# Alert details
rule_id: str = Field(index=True)
alert_type: str = Field(max_length=50)
title: str = Field(max_length=200)
message: str = Field(default="", max_length=1000)
# Alert data
severity: str = Field(default="medium")
confidence: float = Field(default=0.0, ge=0, le=1.0)
impact_assessment: str = Field(default="", max_length=500)
# Trigger data
trigger_value: Optional[float] = None
threshold_value: Optional[float] = None
deviation_percentage: Optional[float] = None
affected_metrics: List[str] = Field(default=[], sa_column=Column(JSON))
# Context and entities
geographic_regions: List[str] = Field(default=[], sa_column=Column(JSON))
affected_agents: List[str] = Field(default=[], sa_column=Column(JSON))
time_period: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
# Status and resolution
status: str = Field(default="active") # active, acknowledged, resolved, false_positive
acknowledged_by: Optional[str] = None
acknowledged_at: Optional[datetime] = None
resolved_by: Optional[str] = None
resolved_at: Optional[datetime] = None
resolution_notes: str = Field(default="", max_length=1000)
# Notifications
notifications_sent: List[str] = Field(default=[], sa_column=Column(JSON))
delivery_status: Dict[str, str] = Field(default={}, sa_column=Column(JSON))
# Timestamps
created_at: datetime = Field(default_factory=datetime.utcnow)
updated_at: datetime = Field(default_factory=datetime.utcnow)
expires_at: Optional[datetime] = None
# Additional data
alert_data: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
related_insights: List[str] = Field(default=[], sa_column=Column(JSON))
class UserPreference(SQLModel, table=True):
"""User analytics preferences and settings"""
__tablename__ = "user_preferences"
__table_args__ = {"extend_existing": True}
id: str = Field(default_factory=lambda: f"pref_{uuid4().hex[:8]}", primary_key=True)
user_id: str = Field(index=True)
# Notification preferences
email_notifications: bool = Field(default=True)
alert_notifications: bool = Field(default=True)
report_notifications: bool = Field(default=False)
notification_frequency: str = Field(default="daily") # immediate, daily, weekly, monthly
# Dashboard preferences
default_dashboard: Optional[str] = None
preferred_timezone: str = Field(default="UTC")
date_format: str = Field(default="YYYY-MM-DD")
time_format: str = Field(default="24h")
# Metric preferences
favorite_metrics: List[str] = Field(default=[], sa_column=Column(JSON))
metric_units: Dict[str, str] = Field(default={}, sa_column=Column(JSON))
default_period: AnalyticsPeriod = Field(default=AnalyticsPeriod.DAILY)
# Alert preferences
alert_severity_threshold: str = Field(default="medium") # low, medium, high, critical
quiet_hours: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
alert_channels: List[str] = Field(default=[], sa_column=Column(JSON))
# Report preferences
auto_subscribe_reports: List[str] = Field(default=[], sa_column=Column(JSON))
report_format: str = Field(default="json") # json, csv, pdf, html
include_charts: bool = Field(default=True)
# Privacy and security
data_retention_days: int = Field(default=90)
share_analytics: bool = Field(default=False)
anonymous_usage: bool = Field(default=False)
# Timestamps
created_at: datetime = Field(default_factory=datetime.utcnow)
updated_at: datetime = Field(default_factory=datetime.utcnow)
last_login: Optional[datetime] = None
# Additional preferences
custom_settings: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
ui_preferences: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))

View File

@@ -0,0 +1,453 @@
"""
Agent Certification and Partnership Domain Models
Implements SQLModel definitions for certification, verification, and partnership programs
"""
from datetime import datetime, timedelta
from typing import Optional, Dict, List, Any
from uuid import uuid4
from enum import Enum
from sqlmodel import SQLModel, Field, Column, JSON
from sqlalchemy import DateTime, Float, Integer, Text
class CertificationLevel(str, Enum):
"""Certification level enumeration"""
BASIC = "basic"
INTERMEDIATE = "intermediate"
ADVANCED = "advanced"
ENTERPRISE = "enterprise"
PREMIUM = "premium"
class CertificationStatus(str, Enum):
"""Certification status enumeration"""
PENDING = "pending"
ACTIVE = "active"
EXPIRED = "expired"
REVOKED = "revoked"
SUSPENDED = "suspended"
class VerificationType(str, Enum):
"""Verification type enumeration"""
IDENTITY = "identity"
PERFORMANCE = "performance"
RELIABILITY = "reliability"
SECURITY = "security"
COMPLIANCE = "compliance"
CAPABILITY = "capability"
class PartnershipType(str, Enum):
"""Partnership type enumeration"""
TECHNOLOGY = "technology"
SERVICE = "service"
RESELLER = "reseller"
INTEGRATION = "integration"
STRATEGIC = "strategic"
AFFILIATE = "affiliate"
class BadgeType(str, Enum):
"""Badge type enumeration"""
ACHIEVEMENT = "achievement"
MILESTONE = "milestone"
RECOGNITION = "recognition"
SPECIALIZATION = "specialization"
EXCELLENCE = "excellence"
CONTRIBUTION = "contribution"
class AgentCertification(SQLModel, table=True):
"""Agent certification records"""
__tablename__ = "agent_certifications"
__table_args__ = {"extend_existing": True}
id: str = Field(default_factory=lambda: f"cert_{uuid4().hex[:8]}", primary_key=True)
certification_id: str = Field(unique=True, index=True)
# Certification details
agent_id: str = Field(index=True)
certification_level: CertificationLevel
certification_type: str = Field(default="standard") # standard, specialized, enterprise
# Issuance information
issued_by: str = Field(index=True) # Who issued the certification
issued_at: datetime = Field(default_factory=datetime.utcnow)
expires_at: Optional[datetime] = None
verification_hash: str = Field(max_length=64) # Blockchain verification hash
# Status and metadata
status: CertificationStatus = Field(default=CertificationStatus.ACTIVE)
renewal_count: int = Field(default=0)
last_renewed_at: Optional[datetime] = None
# Requirements and verification
requirements_met: List[str] = Field(default=[], sa_column=Column(JSON))
verification_results: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
supporting_documents: List[str] = Field(default=[], sa_column=Column(JSON))
# Benefits and privileges
granted_privileges: List[str] = Field(default=[], sa_column=Column(JSON))
access_levels: List[str] = Field(default=[], sa_column=Column(JSON))
special_capabilities: List[str] = Field(default=[], sa_column=Column(JSON))
# Audit trail
audit_log: List[Dict[str, Any]] = Field(default=[], sa_column=Column(JSON))
last_verified_at: Optional[datetime] = None
# Additional data
cert_metadata: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
notes: str = Field(default="", max_length=1000)
class CertificationRequirement(SQLModel, table=True):
"""Certification requirements and criteria"""
__tablename__ = "certification_requirements"
__table_args__ = {"extend_existing": True}
id: str = Field(default_factory=lambda: f"req_{uuid4().hex[:8]}", primary_key=True)
# Requirement details
certification_level: CertificationLevel
requirement_type: VerificationType
requirement_name: str = Field(max_length=100)
description: str = Field(default="", max_length=500)
# Criteria and thresholds
criteria: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
minimum_threshold: Optional[float] = None
maximum_threshold: Optional[float] = None
required_values: List[str] = Field(default=[], sa_column=Column(JSON))
# Verification method
verification_method: str = Field(default="automated") # automated, manual, hybrid
verification_frequency: str = Field(default="once") # once, monthly, quarterly, annually
# Dependencies and prerequisites
prerequisites: List[str] = Field(default=[], sa_column=Column(JSON))
depends_on: List[str] = Field(default=[], sa_column=Column(JSON))
# Status and configuration
is_active: bool = Field(default=True)
is_mandatory: bool = Field(default=True)
weight: float = Field(default=1.0) # Importance weight
# Timestamps
created_at: datetime = Field(default_factory=datetime.utcnow)
updated_at: datetime = Field(default_factory=datetime.utcnow)
effective_date: datetime = Field(default_factory=datetime.utcnow)
expiry_date: Optional[datetime] = None
# Additional data
cert_metadata: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
class VerificationRecord(SQLModel, table=True):
"""Agent verification records and results"""
__tablename__ = "verification_records"
__table_args__ = {"extend_existing": True}
id: str = Field(default_factory=lambda: f"verify_{uuid4().hex[:8]}", primary_key=True)
verification_id: str = Field(unique=True, index=True)
# Verification details
agent_id: str = Field(index=True)
verification_type: VerificationType
verification_method: str = Field(default="automated")
# Request information
requested_by: str = Field(index=True)
requested_at: datetime = Field(default_factory=datetime.utcnow)
priority: str = Field(default="normal") # low, normal, high, urgent
# Verification process
started_at: Optional[datetime] = None
completed_at: Optional[datetime] = None
processing_time: Optional[float] = None # seconds
# Results and outcomes
status: str = Field(default="pending") # pending, in_progress, passed, failed, cancelled
result_score: Optional[float] = None
result_details: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
failure_reasons: List[str] = Field(default=[], sa_column=Column(JSON))
# Verification data
input_data: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
output_data: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
evidence: List[Dict[str, Any]] = Field(default=[], sa_column=Column(JSON))
# Review and approval
reviewed_by: Optional[str] = None
reviewed_at: Optional[datetime] = None
approved_by: Optional[str] = None
approved_at: Optional[datetime] = None
# Audit and compliance
compliance_score: Optional[float] = None
risk_assessment: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
audit_trail: List[Dict[str, Any]] = Field(default=[], sa_column=Column(JSON))
# Additional data
cert_metadata: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
notes: str = Field(default="", max_length=1000)
class PartnershipProgram(SQLModel, table=True):
"""Partnership programs and alliances"""
__tablename__ = "partnership_programs"
__table_args__ = {"extend_existing": True}
id: str = Field(default_factory=lambda: f"partner_{uuid4().hex[:8]}", primary_key=True)
program_id: str = Field(unique=True, index=True)
# Program details
program_name: str = Field(max_length=200)
program_type: PartnershipType
description: str = Field(default="", max_length=1000)
# Program configuration
tier_levels: List[str] = Field(default=[], sa_column=Column(JSON))
benefits_by_tier: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
requirements_by_tier: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
# Eligibility criteria
eligibility_requirements: List[str] = Field(default=[], sa_column=Column(JSON))
minimum_criteria: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
exclusion_criteria: List[str] = Field(default=[], sa_column=Column(JSON))
# Program benefits
financial_benefits: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
non_financial_benefits: List[str] = Field(default=[], sa_column=Column(JSON))
exclusive_access: List[str] = Field(default=[], sa_column=Column(JSON))
# Partnership terms
agreement_terms: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
commission_structure: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
performance_metrics: List[str] = Field(default=[], sa_column=Column(JSON))
# Status and management
status: str = Field(default="active") # active, inactive, suspended, terminated
max_participants: Optional[int] = None
current_participants: int = Field(default=0)
# Timestamps
created_at: datetime = Field(default_factory=datetime.utcnow)
updated_at: datetime = Field(default_factory=datetime.utcnow)
launched_at: Optional[datetime] = None
expires_at: Optional[datetime] = None
# Additional data
program_cert_metadata: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
contact_info: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
class AgentPartnership(SQLModel, table=True):
"""Agent participation in partnership programs"""
__tablename__ = "agent_partnerships"
__table_args__ = {"extend_existing": True}
id: str = Field(default_factory=lambda: f"agent_partner_{uuid4().hex[:8]}", primary_key=True)
partnership_id: str = Field(unique=True, index=True)
# Partnership details
agent_id: str = Field(index=True)
program_id: str = Field(index=True)
partnership_type: PartnershipType
current_tier: str = Field(default="basic")
# Application and approval
applied_at: datetime = Field(default_factory=datetime.utcnow)
approved_by: Optional[str] = None
approved_at: Optional[datetime] = None
rejection_reasons: List[str] = Field(default=[], sa_column=Column(JSON))
# Performance and metrics
performance_score: float = Field(default=0.0)
performance_metrics: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
contribution_value: float = Field(default=0.0)
# Benefits and compensation
earned_benefits: List[str] = Field(default=[], sa_column=Column(JSON))
total_earnings: float = Field(default=0.0)
pending_payments: float = Field(default=0.0)
# Status and lifecycle
status: str = Field(default="active") # active, inactive, suspended, terminated
tier_progress: float = Field(default=0.0, ge=0, le=100.0)
next_tier_eligible: bool = Field(default=False)
# Agreement details
agreement_signed: bool = Field(default=False)
agreement_signed_at: Optional[datetime] = None
agreement_expires_at: Optional[datetime] = None
# Timestamps
created_at: datetime = Field(default_factory=datetime.utcnow)
updated_at: datetime = Field(default_factory=datetime.utcnow)
last_activity: Optional[datetime] = None
# Additional data
partnership_cert_metadata: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
notes: str = Field(default="", max_length=1000)
class AchievementBadge(SQLModel, table=True):
"""Achievement and recognition badges"""
__tablename__ = "achievement_badges"
__table_args__ = {"extend_existing": True}
id: str = Field(default_factory=lambda: f"badge_{uuid4().hex[:8]}", primary_key=True)
badge_id: str = Field(unique=True, index=True)
# Badge details
badge_name: str = Field(max_length=100)
badge_type: BadgeType
description: str = Field(default="", max_length=500)
badge_icon: str = Field(default="", max_length=200) # Icon identifier or URL
# Badge criteria
achievement_criteria: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
required_metrics: List[str] = Field(default=[], sa_column=Column(JSON))
threshold_values: Dict[str, float] = Field(default={}, sa_column=Column(JSON))
# Badge properties
rarity: str = Field(default="common") # common, uncommon, rare, epic, legendary
point_value: int = Field(default=0)
category: str = Field(default="general") # performance, contribution, specialization, excellence
# Visual design
color_scheme: Dict[str, str] = Field(default={}, sa_column=Column(JSON))
display_properties: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
# Status and availability
is_active: bool = Field(default=True)
is_limited: bool = Field(default=False)
max_awards: Optional[int] = None
current_awards: int = Field(default=0)
# Timestamps
created_at: datetime = Field(default_factory=datetime.utcnow)
updated_at: datetime = Field(default_factory=datetime.utcnow)
available_from: datetime = Field(default_factory=datetime.utcnow)
available_until: Optional[datetime] = None
# Additional data
badge_cert_metadata: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
requirements_text: str = Field(default="", max_length=1000)
class AgentBadge(SQLModel, table=True):
"""Agent earned badges and achievements"""
__tablename__ = "agent_badges"
__table_args__ = {"extend_existing": True}
id: str = Field(default_factory=lambda: f"agent_badge_{uuid4().hex[:8]}", primary_key=True)
# Badge relationship
agent_id: str = Field(index=True)
badge_id: str = Field(index=True)
# Award details
awarded_by: str = Field(index=True) # System or user who awarded the badge
awarded_at: datetime = Field(default_factory=datetime.utcnow)
award_reason: str = Field(default="", max_length=500)
# Achievement context
achievement_context: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
metrics_at_award: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
supporting_evidence: List[str] = Field(default=[], sa_column=Column(JSON))
# Badge status
is_displayed: bool = Field(default=True)
is_featured: bool = Field(default=False)
display_order: int = Field(default=0)
# Progress tracking (for progressive badges)
current_progress: float = Field(default=0.0, ge=0, le=100.0)
next_milestone: Optional[str] = None
# Expiration and renewal
expires_at: Optional[datetime] = None
is_permanent: bool = Field(default=True)
renewal_criteria: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
# Social features
share_count: int = Field(default=0)
view_count: int = Field(default=0)
congratulation_count: int = Field(default=0)
# Timestamps
created_at: datetime = Field(default_factory=datetime.utcnow)
updated_at: datetime = Field(default_factory=datetime.utcnow)
last_viewed_at: Optional[datetime] = None
# Additional data
badge_cert_metadata: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
notes: str = Field(default="", max_length=1000)
class CertificationAudit(SQLModel, table=True):
"""Certification audit and compliance records"""
__tablename__ = "certification_audits"
__table_args__ = {"extend_existing": True}
id: str = Field(default_factory=lambda: f"audit_{uuid4().hex[:8]}", primary_key=True)
audit_id: str = Field(unique=True, index=True)
# Audit details
audit_type: str = Field(max_length=50) # routine, investigation, compliance, security
audit_scope: str = Field(max_length=100) # individual, program, system
target_entity_id: str = Field(index=True) # agent_id, certification_id, etc.
# Audit scheduling
scheduled_by: str = Field(index=True)
scheduled_at: datetime = Field(default_factory=datetime.utcnow)
started_at: Optional[datetime] = None
completed_at: Optional[datetime] = None
# Audit execution
auditor_id: str = Field(index=True)
audit_methodology: str = Field(default="", max_length=500)
checklists: List[str] = Field(default=[], sa_column=Column(JSON))
# Findings and results
overall_score: Optional[float] = None
compliance_score: Optional[float] = None
risk_score: Optional[float] = None
findings: List[Dict[str, Any]] = Field(default=[], sa_column=Column(JSON))
violations: List[Dict[str, Any]] = Field(default=[], sa_column=Column(JSON))
recommendations: List[str] = Field(default=[], sa_column=Column(JSON))
# Actions and resolutions
corrective_actions: List[Dict[str, Any]] = Field(default=[], sa_column=Column(JSON))
follow_up_required: bool = Field(default=False)
follow_up_date: Optional[datetime] = None
# Status and outcome
status: str = Field(default="scheduled") # scheduled, in_progress, completed, failed, cancelled
outcome: str = Field(default="pending") # pass, fail, conditional, pending_review
# Reporting and documentation
report_generated: bool = Field(default=False)
report_url: Optional[str] = None
evidence_documents: List[str] = Field(default=[], sa_column=Column(JSON))
# Timestamps
created_at: datetime = Field(default_factory=datetime.utcnow)
updated_at: datetime = Field(default_factory=datetime.utcnow)
# Additional data
audit_cert_metadata: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
notes: str = Field(default="", max_length=2000)

View File

@@ -0,0 +1,153 @@
"""
Community and Developer Ecosystem Models
Database models for OpenClaw agent community, third-party solutions, and innovation labs
"""
from typing import Optional, List, Dict, Any
from sqlmodel import Field, SQLModel, Column, JSON, Relationship
from datetime import datetime
from enum import Enum
import uuid
class DeveloperTier(str, Enum):
NOVICE = "novice"
BUILDER = "builder"
EXPERT = "expert"
MASTER = "master"
PARTNER = "partner"
class SolutionStatus(str, Enum):
DRAFT = "draft"
REVIEW = "review"
PUBLISHED = "published"
DEPRECATED = "deprecated"
REJECTED = "rejected"
class LabStatus(str, Enum):
PROPOSED = "proposed"
FUNDING = "funding"
ACTIVE = "active"
COMPLETED = "completed"
ARCHIVED = "archived"
class HackathonStatus(str, Enum):
ANNOUNCED = "announced"
REGISTRATION = "registration"
ONGOING = "ongoing"
JUDGING = "judging"
COMPLETED = "completed"
class DeveloperProfile(SQLModel, table=True):
"""Profile for a developer in the OpenClaw community"""
__tablename__ = "developer_profiles"
developer_id: str = Field(primary_key=True, default_factory=lambda: f"dev_{uuid.uuid4().hex[:8]}")
user_id: str = Field(index=True)
username: str = Field(unique=True)
bio: Optional[str] = None
tier: DeveloperTier = Field(default=DeveloperTier.NOVICE)
reputation_score: float = Field(default=0.0)
total_earnings: float = Field(default=0.0)
skills: List[str] = Field(default_factory=list, sa_column=Column(JSON))
github_handle: Optional[str] = None
website: Optional[str] = None
joined_at: datetime = Field(default_factory=datetime.utcnow)
last_active: datetime = Field(default_factory=datetime.utcnow)
class AgentSolution(SQLModel, table=True):
"""A third-party agent solution available in the developer marketplace"""
__tablename__ = "agent_solutions"
solution_id: str = Field(primary_key=True, default_factory=lambda: f"sol_{uuid.uuid4().hex[:8]}")
developer_id: str = Field(foreign_key="developer_profiles.developer_id")
title: str
description: str
version: str = Field(default="1.0.0")
capabilities: List[str] = Field(default_factory=list, sa_column=Column(JSON))
frameworks: List[str] = Field(default_factory=list, sa_column=Column(JSON))
price_model: str = Field(default="free") # free, one_time, subscription, usage_based
price_amount: float = Field(default=0.0)
currency: str = Field(default="AITBC")
status: SolutionStatus = Field(default=SolutionStatus.DRAFT)
downloads: int = Field(default=0)
average_rating: float = Field(default=0.0)
review_count: int = Field(default=0)
solution_metadata: Dict[str, Any] = Field(default_factory=dict, sa_column=Column(JSON))
created_at: datetime = Field(default_factory=datetime.utcnow)
updated_at: datetime = Field(default_factory=datetime.utcnow)
published_at: Optional[datetime] = None
class InnovationLab(SQLModel, table=True):
"""Research program or innovation lab for agent development"""
__tablename__ = "innovation_labs"
lab_id: str = Field(primary_key=True, default_factory=lambda: f"lab_{uuid.uuid4().hex[:8]}")
title: str
description: str
research_area: str
lead_researcher_id: str = Field(foreign_key="developer_profiles.developer_id")
members: List[str] = Field(default_factory=list, sa_column=Column(JSON)) # List of developer_ids
status: LabStatus = Field(default=LabStatus.PROPOSED)
funding_goal: float = Field(default=0.0)
current_funding: float = Field(default=0.0)
milestones: List[Dict[str, Any]] = Field(default_factory=list, sa_column=Column(JSON))
publications: List[Dict[str, Any]] = Field(default_factory=list, sa_column=Column(JSON))
created_at: datetime = Field(default_factory=datetime.utcnow)
target_completion: Optional[datetime] = None
class CommunityPost(SQLModel, table=True):
"""A post in the community support/collaboration platform"""
__tablename__ = "community_posts"
post_id: str = Field(primary_key=True, default_factory=lambda: f"post_{uuid.uuid4().hex[:8]}")
author_id: str = Field(foreign_key="developer_profiles.developer_id")
title: str
content: str
category: str = Field(default="discussion") # discussion, question, showcase, tutorial
tags: List[str] = Field(default_factory=list, sa_column=Column(JSON))
upvotes: int = Field(default=0)
views: int = Field(default=0)
is_resolved: bool = Field(default=False)
parent_post_id: Optional[str] = Field(default=None, foreign_key="community_posts.post_id")
created_at: datetime = Field(default_factory=datetime.utcnow)
updated_at: datetime = Field(default_factory=datetime.utcnow)
class Hackathon(SQLModel, table=True):
"""Innovation challenge or hackathon"""
__tablename__ = "hackathons"
hackathon_id: str = Field(primary_key=True, default_factory=lambda: f"hack_{uuid.uuid4().hex[:8]}")
title: str
description: str
theme: str
sponsor: str = Field(default="AITBC Foundation")
prize_pool: float = Field(default=0.0)
prize_currency: str = Field(default="AITBC")
status: HackathonStatus = Field(default=HackathonStatus.ANNOUNCED)
participants: List[str] = Field(default_factory=list, sa_column=Column(JSON)) # List of developer_ids
submissions: List[Dict[str, Any]] = Field(default_factory=list, sa_column=Column(JSON))
registration_start: datetime
registration_end: datetime
event_start: datetime
event_end: datetime
created_at: datetime = Field(default_factory=datetime.utcnow)

View File

@@ -0,0 +1,127 @@
"""
Decentralized Governance Models
Database models for OpenClaw DAO, voting, proposals, and governance analytics
"""
from typing import Optional, List, Dict, Any
from sqlmodel import Field, SQLModel, Column, JSON, Relationship
from datetime import datetime
from enum import Enum
import uuid
class ProposalStatus(str, Enum):
DRAFT = "draft"
ACTIVE = "active"
SUCCEEDED = "succeeded"
DEFEATED = "defeated"
EXECUTED = "executed"
CANCELLED = "cancelled"
class VoteType(str, Enum):
FOR = "for"
AGAINST = "against"
ABSTAIN = "abstain"
class GovernanceRole(str, Enum):
MEMBER = "member"
DELEGATE = "delegate"
COUNCIL = "council"
ADMIN = "admin"
class GovernanceProfile(SQLModel, table=True):
"""Profile for a participant in the AITBC DAO"""
__tablename__ = "governance_profiles"
profile_id: str = Field(primary_key=True, default_factory=lambda: f"gov_{uuid.uuid4().hex[:8]}")
user_id: str = Field(unique=True, index=True)
role: GovernanceRole = Field(default=GovernanceRole.MEMBER)
voting_power: float = Field(default=0.0) # Calculated based on staked AITBC and reputation
delegated_power: float = Field(default=0.0) # Power delegated to them by others
total_votes_cast: int = Field(default=0)
proposals_created: int = Field(default=0)
proposals_passed: int = Field(default=0)
delegate_to: Optional[str] = Field(default=None) # Profile ID they delegate their vote to
joined_at: datetime = Field(default_factory=datetime.utcnow)
last_voted_at: Optional[datetime] = None
class Proposal(SQLModel, table=True):
"""A governance proposal submitted to the DAO"""
__tablename__ = "proposals"
proposal_id: str = Field(primary_key=True, default_factory=lambda: f"prop_{uuid.uuid4().hex[:8]}")
proposer_id: str = Field(foreign_key="governance_profiles.profile_id")
title: str
description: str
category: str = Field(default="general") # parameters, funding, protocol, marketplace
execution_payload: Dict[str, Any] = Field(default_factory=dict, sa_column=Column(JSON))
status: ProposalStatus = Field(default=ProposalStatus.DRAFT)
votes_for: float = Field(default=0.0)
votes_against: float = Field(default=0.0)
votes_abstain: float = Field(default=0.0)
quorum_required: float = Field(default=0.0)
passing_threshold: float = Field(default=0.5) # Usually 50%
snapshot_block: Optional[int] = Field(default=None)
snapshot_timestamp: Optional[datetime] = Field(default=None)
created_at: datetime = Field(default_factory=datetime.utcnow)
voting_starts: datetime
voting_ends: datetime
executed_at: Optional[datetime] = None
class Vote(SQLModel, table=True):
"""A vote cast on a specific proposal"""
__tablename__ = "votes"
vote_id: str = Field(primary_key=True, default_factory=lambda: f"vote_{uuid.uuid4().hex[:8]}")
proposal_id: str = Field(foreign_key="proposals.proposal_id", index=True)
voter_id: str = Field(foreign_key="governance_profiles.profile_id")
vote_type: VoteType
voting_power_used: float
reason: Optional[str] = None
power_at_snapshot: float = Field(default=0.0)
delegated_power_at_snapshot: float = Field(default=0.0)
created_at: datetime = Field(default_factory=datetime.utcnow)
class DaoTreasury(SQLModel, table=True):
"""Record of the DAO's treasury funds and allocations"""
__tablename__ = "dao_treasury"
treasury_id: str = Field(primary_key=True, default="main_treasury")
total_balance: float = Field(default=0.0)
allocated_funds: float = Field(default=0.0)
asset_breakdown: Dict[str, float] = Field(default_factory=dict, sa_column=Column(JSON))
last_updated: datetime = Field(default_factory=datetime.utcnow)
class TransparencyReport(SQLModel, table=True):
"""Automated transparency and analytics report for the governance system"""
__tablename__ = "transparency_reports"
report_id: str = Field(primary_key=True, default_factory=lambda: f"rep_{uuid.uuid4().hex[:8]}")
period: str # e.g., "2026-Q1", "2026-02"
total_proposals: int
passed_proposals: int
active_voters: int
total_voting_power_participated: float
treasury_inflow: float
treasury_outflow: float
metrics: Dict[str, Any] = Field(default_factory=dict, sa_column=Column(JSON))
generated_at: datetime = Field(default_factory=datetime.utcnow)

View File

@@ -93,7 +93,7 @@ class EdgeGPUMetrics(SQLModel, table=True):
__table_args__ = {"extend_existing": True}
id: str = Field(default_factory=lambda: f"egm_{uuid4().hex[:8]}", primary_key=True)
gpu_id: str = Field(foreign_key="gpuregistry.id")
gpu_id: str = Field(foreign_key="gpu_registry.id")
# Latency metrics
network_latency_ms: float = Field()

View File

@@ -0,0 +1,255 @@
"""
Agent Reputation and Trust System Domain Models
Implements SQLModel definitions for agent reputation, trust scores, and economic metrics
"""
from datetime import datetime, timedelta
from typing import Optional, Dict, List, Any
from uuid import uuid4
from enum import Enum
from sqlmodel import SQLModel, Field, Column, JSON
from sqlalchemy import DateTime, Float, Integer, Text
class ReputationLevel(str, Enum):
"""Agent reputation level enumeration"""
BEGINNER = "beginner"
INTERMEDIATE = "intermediate"
ADVANCED = "advanced"
EXPERT = "expert"
MASTER = "master"
class TrustScoreCategory(str, Enum):
"""Trust score calculation categories"""
PERFORMANCE = "performance"
RELIABILITY = "reliability"
COMMUNITY = "community"
SECURITY = "security"
ECONOMIC = "economic"
class AgentReputation(SQLModel, table=True):
"""Agent reputation profile and metrics"""
__tablename__ = "agent_reputation"
__table_args__ = {"extend_existing": True}
id: str = Field(default_factory=lambda: f"rep_{uuid4().hex[:8]}", primary_key=True)
agent_id: str = Field(index=True, foreign_key="ai_agent_workflows.id")
# Core reputation metrics
trust_score: float = Field(default=500.0, ge=0, le=1000) # 0-1000 scale
reputation_level: ReputationLevel = Field(default=ReputationLevel.BEGINNER)
performance_rating: float = Field(default=3.0, ge=1.0, le=5.0) # 1-5 stars
reliability_score: float = Field(default=50.0, ge=0, le=100.0) # 0-100%
community_rating: float = Field(default=3.0, ge=1.0, le=5.0) # 1-5 stars
# Economic metrics
total_earnings: float = Field(default=0.0) # Total AITBC earned
transaction_count: int = Field(default=0) # Total transactions
success_rate: float = Field(default=0.0, ge=0, le=100.0) # Success percentage
dispute_count: int = Field(default=0) # Number of disputes
dispute_won_count: int = Field(default=0) # Disputes won
# Activity metrics
jobs_completed: int = Field(default=0)
jobs_failed: int = Field(default=0)
average_response_time: float = Field(default=0.0) # milliseconds
uptime_percentage: float = Field(default=0.0, ge=0, le=100.0)
# Geographic and service info
geographic_region: str = Field(default="", max_length=50)
service_categories: List[str] = Field(default=[], sa_column=Column(JSON))
specialization_tags: List[str] = Field(default=[], sa_column=Column(JSON))
# Timestamps
created_at: datetime = Field(default_factory=datetime.utcnow)
updated_at: datetime = Field(default_factory=datetime.utcnow)
last_activity: datetime = Field(default_factory=datetime.utcnow)
# Additional metadata
reputation_history: List[Dict[str, Any]] = Field(default=[], sa_column=Column(JSON))
achievements: List[str] = Field(default=[], sa_column=Column(JSON))
certifications: List[str] = Field(default=[], sa_column=Column(JSON))
class TrustScoreCalculation(SQLModel, table=True):
"""Trust score calculation records and factors"""
__tablename__ = "trust_score_calculations"
__table_args__ = {"extend_existing": True}
id: str = Field(default_factory=lambda: f"trust_{uuid4().hex[:8]}", primary_key=True)
agent_id: str = Field(index=True, foreign_key="agent_reputation.id")
# Calculation details
category: TrustScoreCategory
base_score: float = Field(ge=0, le=1000)
weight_factor: float = Field(default=1.0, ge=0, le=10)
adjusted_score: float = Field(ge=0, le=1000)
# Contributing factors
performance_factor: float = Field(default=1.0)
reliability_factor: float = Field(default=1.0)
community_factor: float = Field(default=1.0)
security_factor: float = Field(default=1.0)
economic_factor: float = Field(default=1.0)
# Calculation metadata
calculation_method: str = Field(default="weighted_average")
confidence_level: float = Field(default=0.8, ge=0, le=1.0)
# Timestamps
calculated_at: datetime = Field(default_factory=datetime.utcnow)
effective_period: int = Field(default=86400) # seconds
# Additional data
calculation_details: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
class ReputationEvent(SQLModel, table=True):
"""Reputation-changing events and transactions"""
__tablename__ = "reputation_events"
__table_args__ = {"extend_existing": True}
id: str = Field(default_factory=lambda: f"event_{uuid4().hex[:8]}", primary_key=True)
agent_id: str = Field(index=True, foreign_key="agent_reputation.id")
# Event details
event_type: str = Field(max_length=50) # "job_completed", "dispute_resolved", etc.
event_subtype: str = Field(default="", max_length=50)
impact_score: float = Field(ge=-100, le=100) # Positive or negative impact
# Scoring details
trust_score_before: float = Field(ge=0, le=1000)
trust_score_after: float = Field(ge=0, le=1000)
reputation_level_before: Optional[ReputationLevel] = None
reputation_level_after: Optional[ReputationLevel] = None
# Event context
related_transaction_id: Optional[str] = None
related_job_id: Optional[str] = None
related_dispute_id: Optional[str] = None
# Event metadata
event_data: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
verification_status: str = Field(default="pending") # pending, verified, rejected
# Timestamps
occurred_at: datetime = Field(default_factory=datetime.utcnow)
processed_at: Optional[datetime] = None
expires_at: Optional[datetime] = None
class AgentEconomicProfile(SQLModel, table=True):
"""Detailed economic profile for agents"""
__tablename__ = "agent_economic_profiles"
__table_args__ = {"extend_existing": True}
id: str = Field(default_factory=lambda: f"econ_{uuid4().hex[:8]}", primary_key=True)
agent_id: str = Field(index=True, foreign_key="agent_reputation.id")
# Earnings breakdown
daily_earnings: float = Field(default=0.0)
weekly_earnings: float = Field(default=0.0)
monthly_earnings: float = Field(default=0.0)
yearly_earnings: float = Field(default=0.0)
# Performance metrics
average_job_value: float = Field(default=0.0)
peak_hourly_rate: float = Field(default=0.0)
utilization_rate: float = Field(default=0.0, ge=0, le=100.0)
# Market position
market_share: float = Field(default=0.0, ge=0, le=100.0)
competitive_ranking: int = Field(default=0)
price_tier: str = Field(default="standard") # budget, standard, premium
# Risk metrics
default_risk_score: float = Field(default=0.0, ge=0, le=100.0)
volatility_score: float = Field(default=0.0, ge=0, le=100.0)
liquidity_score: float = Field(default=0.0, ge=0, le=100.0)
# Timestamps
profile_date: datetime = Field(default_factory=datetime.utcnow)
last_updated: datetime = Field(default_factory=datetime.utcnow)
# Historical data
earnings_history: List[Dict[str, Any]] = Field(default=[], sa_column=Column(JSON))
performance_history: List[Dict[str, Any]] = Field(default=[], sa_column=Column(JSON))
class CommunityFeedback(SQLModel, table=True):
"""Community feedback and ratings for agents"""
__tablename__ = "community_feedback"
__table_args__ = {"extend_existing": True}
id: str = Field(default_factory=lambda: f"feedback_{uuid4().hex[:8]}", primary_key=True)
agent_id: str = Field(index=True, foreign_key="agent_reputation.id")
# Feedback details
reviewer_id: str = Field(index=True)
reviewer_type: str = Field(default="client") # client, provider, peer
# Ratings
overall_rating: float = Field(ge=1.0, le=5.0)
performance_rating: float = Field(ge=1.0, le=5.0)
communication_rating: float = Field(ge=1.0, le=5.0)
reliability_rating: float = Field(ge=1.0, le=5.0)
value_rating: float = Field(ge=1.0, le=5.0)
# Feedback content
feedback_text: str = Field(default="", max_length=1000)
feedback_tags: List[str] = Field(default=[], sa_column=Column(JSON))
# Verification
verified_transaction: bool = Field(default=False)
verification_weight: float = Field(default=1.0, ge=0.1, le=10.0)
# Moderation
moderation_status: str = Field(default="approved") # approved, pending, rejected
moderator_notes: str = Field(default="", max_length=500)
# Timestamps
created_at: datetime = Field(default_factory=datetime.utcnow)
updated_at: datetime = Field(default_factory=datetime.utcnow)
helpful_votes: int = Field(default=0)
# Additional metadata
feedback_context: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
class ReputationLevelThreshold(SQLModel, table=True):
"""Configuration for reputation level thresholds"""
__tablename__ = "reputation_level_thresholds"
__table_args__ = {"extend_existing": True}
id: str = Field(default_factory=lambda: f"threshold_{uuid4().hex[:8]}", primary_key=True)
level: ReputationLevel
# Threshold requirements
min_trust_score: float = Field(ge=0, le=1000)
min_performance_rating: float = Field(ge=1.0, le=5.0)
min_reliability_score: float = Field(ge=0, le=100.0)
min_transactions: int = Field(default=0)
min_success_rate: float = Field(ge=0, le=100.0)
# Benefits and restrictions
max_concurrent_jobs: int = Field(default=1)
priority_boost: float = Field(default=1.0)
fee_discount: float = Field(default=0.0, ge=0, le=100.0)
# Timestamps
created_at: datetime = Field(default_factory=datetime.utcnow)
updated_at: datetime = Field(default_factory=datetime.utcnow)
is_active: bool = Field(default=True)
# Additional configuration
level_requirements: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
level_benefits: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))

View File

@@ -0,0 +1,319 @@
"""
Agent Reward System Domain Models
Implements SQLModel definitions for performance-based rewards, incentives, and distributions
"""
from datetime import datetime, timedelta
from typing import Optional, Dict, List, Any
from uuid import uuid4
from enum import Enum
from sqlmodel import SQLModel, Field, Column, JSON
from sqlalchemy import DateTime, Float, Integer, Text
class RewardTier(str, Enum):
"""Reward tier enumeration"""
BRONZE = "bronze"
SILVER = "silver"
GOLD = "gold"
PLATINUM = "platinum"
DIAMOND = "diamond"
class RewardType(str, Enum):
"""Reward type enumeration"""
PERFORMANCE_BONUS = "performance_bonus"
LOYALTY_BONUS = "loyalty_bonus"
REFERRAL_BONUS = "referral_bonus"
MILESTONE_BONUS = "milestone_bonus"
COMMUNITY_BONUS = "community_bonus"
SPECIAL_BONUS = "special_bonus"
class RewardStatus(str, Enum):
"""Reward status enumeration"""
PENDING = "pending"
APPROVED = "approved"
DISTRIBUTED = "distributed"
EXPIRED = "expired"
CANCELLED = "cancelled"
class RewardTierConfig(SQLModel, table=True):
"""Reward tier configuration and thresholds"""
__tablename__ = "reward_tier_configs"
__table_args__ = {"extend_existing": True}
id: str = Field(default_factory=lambda: f"tier_{uuid4().hex[:8]}", primary_key=True)
tier: RewardTier
# Threshold requirements
min_trust_score: float = Field(ge=0, le=1000)
min_performance_rating: float = Field(ge=1.0, le=5.0)
min_monthly_earnings: float = Field(ge=0)
min_transaction_count: int = Field(ge=0)
min_success_rate: float = Field(ge=0, le=100.0)
# Reward multipliers and benefits
base_multiplier: float = Field(default=1.0, ge=1.0)
performance_bonus_multiplier: float = Field(default=1.0, ge=1.0)
loyalty_bonus_multiplier: float = Field(default=1.0, ge=1.0)
referral_bonus_multiplier: float = Field(default=1.0, ge=1.0)
# Tier benefits
max_concurrent_jobs: int = Field(default=1)
priority_boost: float = Field(default=1.0)
fee_discount: float = Field(default=0.0, ge=0, le=100.0)
support_level: str = Field(default="basic")
# Timestamps
created_at: datetime = Field(default_factory=datetime.utcnow)
updated_at: datetime = Field(default_factory=datetime.utcnow)
is_active: bool = Field(default=True)
# Additional configuration
tier_requirements: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
tier_benefits: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
class AgentRewardProfile(SQLModel, table=True):
"""Agent reward profile and earnings tracking"""
__tablename__ = "agent_reward_profiles"
__table_args__ = {"extend_existing": True}
id: str = Field(default_factory=lambda: f"reward_{uuid4().hex[:8]}", primary_key=True)
agent_id: str = Field(index=True, foreign_key="agent_reputation.id")
# Current tier and status
current_tier: RewardTier = Field(default=RewardTier.BRONZE)
tier_progress: float = Field(default=0.0, ge=0, le=100.0) # Progress to next tier
# Earnings tracking
base_earnings: float = Field(default=0.0)
bonus_earnings: float = Field(default=0.0)
total_earnings: float = Field(default=0.0)
lifetime_earnings: float = Field(default=0.0)
# Performance metrics for rewards
performance_score: float = Field(default=0.0)
loyalty_score: float = Field(default=0.0)
referral_count: int = Field(default=0)
community_contributions: int = Field(default=0)
# Reward history
rewards_distributed: int = Field(default=0)
last_reward_date: Optional[datetime] = None
current_streak: int = Field(default=0) # Consecutive reward periods
longest_streak: int = Field(default=0)
# Timestamps
created_at: datetime = Field(default_factory=datetime.utcnow)
updated_at: datetime = Field(default_factory=datetime.utcnow)
last_activity: datetime = Field(default_factory=datetime.utcnow)
# Additional metadata
reward_preferences: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
achievement_history: List[Dict[str, Any]] = Field(default=[], sa_column=Column(JSON))
class RewardCalculation(SQLModel, table=True):
"""Reward calculation records and factors"""
__tablename__ = "reward_calculations"
__table_args__ = {"extend_existing": True}
id: str = Field(default_factory=lambda: f"calc_{uuid4().hex[:8]}", primary_key=True)
agent_id: str = Field(index=True, foreign_key="agent_reward_profiles.id")
# Calculation details
reward_type: RewardType
base_amount: float = Field(ge=0)
tier_multiplier: float = Field(default=1.0, ge=1.0)
# Bonus factors
performance_bonus: float = Field(default=0.0)
loyalty_bonus: float = Field(default=0.0)
referral_bonus: float = Field(default=0.0)
community_bonus: float = Field(default=0.0)
special_bonus: float = Field(default=0.0)
# Final calculation
total_reward: float = Field(ge=0)
effective_multiplier: float = Field(default=1.0, ge=1.0)
# Calculation metadata
calculation_period: str = Field(default="daily") # daily, weekly, monthly
reference_date: datetime = Field(default_factory=datetime.utcnow)
trust_score_at_calculation: float = Field(ge=0, le=1000)
performance_metrics: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
# Timestamps
calculated_at: datetime = Field(default_factory=datetime.utcnow)
expires_at: Optional[datetime] = None
# Additional data
calculation_details: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
class RewardDistribution(SQLModel, table=True):
"""Reward distribution records and transactions"""
__tablename__ = "reward_distributions"
__table_args__ = {"extend_existing": True}
id: str = Field(default_factory=lambda: f"dist_{uuid4().hex[:8]}", primary_key=True)
calculation_id: str = Field(index=True, foreign_key="reward_calculations.id")
agent_id: str = Field(index=True, foreign_key="agent_reward_profiles.id")
# Distribution details
reward_amount: float = Field(ge=0)
reward_type: RewardType
distribution_method: str = Field(default="automatic") # automatic, manual, batch
# Transaction details
transaction_id: Optional[str] = None
transaction_hash: Optional[str] = None
transaction_status: str = Field(default="pending")
# Status tracking
status: RewardStatus = Field(default=RewardStatus.PENDING)
processed_at: Optional[datetime] = None
confirmed_at: Optional[datetime] = None
# Distribution metadata
batch_id: Optional[str] = None
priority: int = Field(default=5, ge=1, le=10) # 1 = highest priority
retry_count: int = Field(default=0)
error_message: Optional[str] = None
# Timestamps
created_at: datetime = Field(default_factory=datetime.utcnow)
updated_at: datetime = Field(default_factory=datetime.utcnow)
scheduled_at: Optional[datetime] = None
# Additional data
distribution_details: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
class RewardEvent(SQLModel, table=True):
"""Reward-related events and triggers"""
__tablename__ = "reward_events"
__table_args__ = {"extend_existing": True}
id: str = Field(default_factory=lambda: f"event_{uuid4().hex[:8]}", primary_key=True)
agent_id: str = Field(index=True, foreign_key="agent_reward_profiles.id")
# Event details
event_type: str = Field(max_length=50) # "tier_upgrade", "milestone_reached", etc.
event_subtype: str = Field(default="", max_length=50)
trigger_source: str = Field(max_length=50) # "system", "manual", "automatic"
# Event impact
reward_impact: float = Field(ge=0) # Total reward amount from this event
tier_impact: Optional[RewardTier] = None
# Event context
related_transaction_id: Optional[str] = None
related_calculation_id: Optional[str] = None
related_distribution_id: Optional[str] = None
# Event metadata
event_data: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
verification_status: str = Field(default="pending") # pending, verified, rejected
# Timestamps
occurred_at: datetime = Field(default_factory=datetime.utcnow)
processed_at: Optional[datetime] = None
expires_at: Optional[datetime] = None
# Additional metadata
event_context: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
class RewardMilestone(SQLModel, table=True):
"""Reward milestones and achievements"""
__tablename__ = "reward_milestones"
__table_args__ = {"extend_existing": True}
id: str = Field(default_factory=lambda: f"milestone_{uuid4().hex[:8]}", primary_key=True)
agent_id: str = Field(index=True, foreign_key="agent_reward_profiles.id")
# Milestone details
milestone_type: str = Field(max_length=50) # "earnings", "jobs", "reputation", etc.
milestone_name: str = Field(max_length=100)
milestone_description: str = Field(default="", max_length=500)
# Threshold and progress
target_value: float = Field(ge=0)
current_value: float = Field(default=0.0, ge=0)
progress_percentage: float = Field(default=0.0, ge=0, le=100.0)
# Rewards
reward_amount: float = Field(default=0.0, ge=0)
reward_type: RewardType = Field(default=RewardType.MILESTONE_BONUS)
# Status
is_completed: bool = Field(default=False)
is_claimed: bool = Field(default=False)
completed_at: Optional[datetime] = None
claimed_at: Optional[datetime] = None
# Timestamps
created_at: datetime = Field(default_factory=datetime.utcnow)
updated_at: datetime = Field(default_factory=datetime.utcnow)
expires_at: Optional[datetime] = None
# Additional data
milestone_config: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
class RewardAnalytics(SQLModel, table=True):
"""Reward system analytics and metrics"""
__tablename__ = "reward_analytics"
__table_args__ = {"extend_existing": True}
id: str = Field(default_factory=lambda: f"analytics_{uuid4().hex[:8]}", primary_key=True)
# Analytics period
period_type: str = Field(default="daily") # daily, weekly, monthly
period_start: datetime
period_end: datetime
# Aggregate metrics
total_rewards_distributed: float = Field(default=0.0)
total_agents_rewarded: int = Field(default=0)
average_reward_per_agent: float = Field(default=0.0)
# Tier distribution
bronze_rewards: float = Field(default=0.0)
silver_rewards: float = Field(default=0.0)
gold_rewards: float = Field(default=0.0)
platinum_rewards: float = Field(default=0.0)
diamond_rewards: float = Field(default=0.0)
# Reward type distribution
performance_rewards: float = Field(default=0.0)
loyalty_rewards: float = Field(default=0.0)
referral_rewards: float = Field(default=0.0)
milestone_rewards: float = Field(default=0.0)
community_rewards: float = Field(default=0.0)
special_rewards: float = Field(default=0.0)
# Performance metrics
calculation_count: int = Field(default=0)
distribution_count: int = Field(default=0)
success_rate: float = Field(default=0.0, ge=0, le=100.0)
average_processing_time: float = Field(default=0.0) # milliseconds
# Timestamps
created_at: datetime = Field(default_factory=datetime.utcnow)
updated_at: datetime = Field(default_factory=datetime.utcnow)
# Additional analytics data
analytics_data: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))

View File

@@ -0,0 +1,426 @@
"""
Agent-to-Agent Trading Protocol Domain Models
Implements SQLModel definitions for P2P trading, matching, negotiation, and settlement
"""
from datetime import datetime, timedelta
from typing import Optional, Dict, List, Any
from uuid import uuid4
from enum import Enum
from sqlmodel import SQLModel, Field, Column, JSON
from sqlalchemy import DateTime, Float, Integer, Text
class TradeStatus(str, Enum):
"""Trade status enumeration"""
OPEN = "open"
MATCHING = "matching"
NEGOTIATING = "negotiating"
AGREED = "agreed"
SETTLING = "settling"
COMPLETED = "completed"
CANCELLED = "cancelled"
FAILED = "failed"
class TradeType(str, Enum):
"""Trade type enumeration"""
AI_POWER = "ai_power"
COMPUTE_RESOURCES = "compute_resources"
DATA_SERVICES = "data_services"
MODEL_SERVICES = "model_services"
INFERENCE_TASKS = "inference_tasks"
TRAINING_TASKS = "training_tasks"
class NegotiationStatus(str, Enum):
"""Negotiation status enumeration"""
PENDING = "pending"
ACTIVE = "active"
ACCEPTED = "accepted"
REJECTED = "rejected"
COUNTERED = "countered"
EXPIRED = "expired"
class SettlementType(str, Enum):
"""Settlement type enumeration"""
IMMEDIATE = "immediate"
ESCROW = "escrow"
MILESTONE = "milestone"
SUBSCRIPTION = "subscription"
class TradeRequest(SQLModel, table=True):
"""P2P trade request from buyer agent"""
__tablename__ = "trade_requests"
__table_args__ = {"extend_existing": True}
id: str = Field(default_factory=lambda: f"req_{uuid4().hex[:8]}", primary_key=True)
request_id: str = Field(unique=True, index=True)
# Request details
buyer_agent_id: str = Field(index=True)
trade_type: TradeType
title: str = Field(max_length=200)
description: str = Field(default="", max_length=1000)
# Requirements and specifications
requirements: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
specifications: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
constraints: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
# Pricing and terms
budget_range: Dict[str, float] = Field(default={}, sa_column=Column(JSON)) # min, max
preferred_terms: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
negotiation_flexible: bool = Field(default=True)
# Timing and duration
start_time: Optional[datetime] = None
end_time: Optional[datetime] = None
duration_hours: Optional[int] = None
urgency_level: str = Field(default="normal") # low, normal, high, urgent
# Geographic and service constraints
preferred_regions: List[str] = Field(default=[], sa_column=Column(JSON))
excluded_regions: List[str] = Field(default=[], sa_column=Column(JSON))
service_level_required: str = Field(default="standard") # basic, standard, premium
# Status and metadata
status: TradeStatus = Field(default=TradeStatus.OPEN)
priority: int = Field(default=5, ge=1, le=10) # 1 = highest priority
# Matching and negotiation
match_count: int = Field(default=0)
negotiation_count: int = Field(default=0)
best_match_score: float = Field(default=0.0)
# Timestamps
created_at: datetime = Field(default_factory=datetime.utcnow)
updated_at: datetime = Field(default_factory=datetime.utcnow)
expires_at: Optional[datetime] = None
last_activity: datetime = Field(default_factory=datetime.utcnow)
# Additional metadata
tags: List[str] = Field(default=[], sa_column=Column(JSON))
trading_metadata: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
class TradeMatch(SQLModel, table=True):
"""Trade match between buyer request and seller offer"""
__tablename__ = "trade_matches"
__table_args__ = {"extend_existing": True}
id: str = Field(default_factory=lambda: f"match_{uuid4().hex[:8]}", primary_key=True)
match_id: str = Field(unique=True, index=True)
# Match participants
request_id: str = Field(index=True, foreign_key="trade_requests.request_id")
buyer_agent_id: str = Field(index=True)
seller_agent_id: str = Field(index=True)
# Matching details
match_score: float = Field(ge=0, le=100) # 0-100 compatibility score
confidence_level: float = Field(ge=0, le=1) # 0-1 confidence in match
# Compatibility factors
price_compatibility: float = Field(ge=0, le=100)
timing_compatibility: float = Field(ge=0, le=100)
specification_compatibility: float = Field(ge=0, le=100)
reputation_compatibility: float = Field(ge=0, le=100)
geographic_compatibility: float = Field(ge=0, le=100)
# Seller offer details
seller_offer: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
proposed_terms: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
# Status and interaction
status: TradeStatus = Field(default=TradeStatus.MATCHING)
buyer_response: Optional[str] = None # interested, not_interested, negotiating
seller_response: Optional[str] = None # accepted, rejected, countered
# Negotiation initiation
negotiation_initiated: bool = Field(default=False)
negotiation_initiator: Optional[str] = None # buyer, seller
initial_terms: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
# Timestamps
created_at: datetime = Field(default_factory=datetime.utcnow)
updated_at: datetime = Field(default_factory=datetime.utcnow)
expires_at: Optional[datetime] = None
last_interaction: Optional[datetime] = None
# Additional data
match_factors: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
interaction_history: List[Dict[str, Any]] = Field(default=[], sa_column=Column(JSON))
class TradeNegotiation(SQLModel, table=True):
"""Negotiation process between buyer and seller"""
__tablename__ = "trade_negotiations"
__table_args__ = {"extend_existing": True}
id: str = Field(default_factory=lambda: f"neg_{uuid4().hex[:8]}", primary_key=True)
negotiation_id: str = Field(unique=True, index=True)
# Negotiation participants
match_id: str = Field(index=True, foreign_key="trade_matches.match_id")
buyer_agent_id: str = Field(index=True)
seller_agent_id: str = Field(index=True)
# Negotiation details
status: NegotiationStatus = Field(default=NegotiationStatus.PENDING)
negotiation_round: int = Field(default=1)
max_rounds: int = Field(default=5)
# Terms and conditions
current_terms: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
initial_terms: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
final_terms: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
# Negotiation parameters
price_range: Dict[str, float] = Field(default={}, sa_column=Column(JSON))
service_level_agreements: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
delivery_terms: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
payment_terms: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
# Negotiation metrics
concession_count: int = Field(default=0)
counter_offer_count: int = Field(default=0)
agreement_score: float = Field(default=0.0, ge=0, le=100)
# AI negotiation assistance
ai_assisted: bool = Field(default=True)
negotiation_strategy: str = Field(default="balanced") # aggressive, balanced, cooperative
auto_accept_threshold: float = Field(default=85.0, ge=0, le=100)
# Timestamps
created_at: datetime = Field(default_factory=datetime.utcnow)
updated_at: datetime = Field(default_factory=datetime.utcnow)
started_at: Optional[datetime] = None
completed_at: Optional[datetime] = None
expires_at: Optional[datetime] = None
last_offer_at: Optional[datetime] = None
# Additional data
negotiation_history: List[Dict[str, Any]] = Field(default=[], sa_column=Column(JSON))
ai_recommendations: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
class TradeAgreement(SQLModel, table=True):
"""Final trade agreement between buyer and seller"""
__tablename__ = "trade_agreements"
__table_args__ = {"extend_existing": True}
id: str = Field(default_factory=lambda: f"agree_{uuid4().hex[:8]}", primary_key=True)
agreement_id: str = Field(unique=True, index=True)
# Agreement participants
negotiation_id: str = Field(index=True, foreign_key="trade_negotiations.negotiation_id")
buyer_agent_id: str = Field(index=True)
seller_agent_id: str = Field(index=True)
# Agreement details
trade_type: TradeType
title: str = Field(max_length=200)
description: str = Field(default="", max_length=1000)
# Final terms and conditions
agreed_terms: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
specifications: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
service_level_agreement: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
# Pricing and payment
total_price: float = Field(ge=0)
currency: str = Field(default="AITBC")
payment_schedule: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
settlement_type: SettlementType
# Delivery and performance
delivery_timeline: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
performance_metrics: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
quality_standards: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
# Legal and compliance
terms_and_conditions: str = Field(default="", max_length=5000)
compliance_requirements: List[str] = Field(default=[], sa_column=Column(JSON))
dispute_resolution: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
# Status and execution
status: TradeStatus = Field(default=TradeStatus.AGREED)
execution_status: str = Field(default="pending") # pending, active, completed, failed
completion_percentage: float = Field(default=0.0, ge=0, le=100)
# Timestamps
created_at: datetime = Field(default_factory=datetime.utcnow)
updated_at: datetime = Field(default_factory=datetime.utcnow)
signed_at: datetime = Field(default_factory=datetime.utcnow)
starts_at: Optional[datetime] = None
ends_at: Optional[datetime] = None
completed_at: Optional[datetime] = None
# Additional data
agreement_document: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
attachments: List[Dict[str, Any]] = Field(default=[], sa_column=Column(JSON))
class TradeSettlement(SQLModel, table=True):
"""Trade settlement and payment processing"""
__tablename__ = "trade_settlements"
__table_args__ = {"extend_existing": True}
id: str = Field(default_factory=lambda: f"settle_{uuid4().hex[:8]}", primary_key=True)
settlement_id: str = Field(unique=True, index=True)
# Settlement reference
agreement_id: str = Field(index=True, foreign_key="trade_agreements.agreement_id")
buyer_agent_id: str = Field(index=True)
seller_agent_id: str = Field(index=True)
# Settlement details
settlement_type: SettlementType
total_amount: float = Field(ge=0)
currency: str = Field(default="AITBC")
# Payment processing
payment_status: str = Field(default="pending") # pending, processing, completed, failed
transaction_id: Optional[str] = None
transaction_hash: Optional[str] = None
block_number: Optional[int] = None
# Escrow details (if applicable)
escrow_enabled: bool = Field(default=False)
escrow_address: Optional[str] = None
escrow_release_conditions: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
# Milestone payments (if applicable)
milestone_payments: List[Dict[str, Any]] = Field(default=[], sa_column=Column(JSON))
completed_milestones: List[str] = Field(default=[], sa_column=Column(JSON))
# Fees and deductions
platform_fee: float = Field(default=0.0)
processing_fee: float = Field(default=0.0)
gas_fee: float = Field(default=0.0)
net_amount_seller: float = Field(ge=0)
# Status and timestamps
status: TradeStatus = Field(default=TradeStatus.SETTLING)
initiated_at: datetime = Field(default_factory=datetime.utcnow)
processed_at: Optional[datetime] = None
completed_at: Optional[datetime] = None
refunded_at: Optional[datetime] = None
# Dispute and resolution
dispute_raised: bool = Field(default=False)
dispute_details: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
resolution_details: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
# Additional data
settlement_data: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
audit_trail: List[Dict[str, Any]] = Field(default=[], sa_column=Column(JSON))
class TradeFeedback(SQLModel, table=True):
"""Trade feedback and rating system"""
__tablename__ = "trade_feedback"
__table_args__ = {"extend_existing": True}
id: str = Field(default_factory=lambda: f"feedback_{uuid4().hex[:8]}", primary_key=True)
# Feedback reference
agreement_id: str = Field(index=True, foreign_key="trade_agreements.agreement_id")
reviewer_agent_id: str = Field(index=True)
reviewed_agent_id: str = Field(index=True)
reviewer_role: str = Field(default="buyer") # buyer, seller
# Ratings
overall_rating: float = Field(ge=1.0, le=5.0)
communication_rating: float = Field(ge=1.0, le=5.0)
performance_rating: float = Field(ge=1.0, le=5.0)
timeliness_rating: float = Field(ge=1.0, le=5.0)
value_rating: float = Field(ge=1.0, le=5.0)
# Feedback content
feedback_text: str = Field(default="", max_length=1000)
feedback_tags: List[str] = Field(default=[], sa_column=Column(JSON))
# Trade specifics
trade_category: str = Field(default="general")
trade_complexity: str = Field(default="medium") # simple, medium, complex
trade_duration: Optional[int] = None # in hours
# Verification and moderation
verified_trade: bool = Field(default=True)
moderation_status: str = Field(default="approved") # approved, pending, rejected
moderator_notes: str = Field(default="", max_length=500)
# Timestamps
created_at: datetime = Field(default_factory=datetime.utcnow)
updated_at: datetime = Field(default_factory=datetime.utcnow)
trade_completed_at: datetime
# Additional data
feedback_context: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
performance_metrics: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
class TradingAnalytics(SQLModel, table=True):
"""P2P trading system analytics and metrics"""
__tablename__ = "trading_analytics"
__table_args__ = {"extend_existing": True}
id: str = Field(default_factory=lambda: f"analytics_{uuid4().hex[:8]}", primary_key=True)
# Analytics period
period_type: str = Field(default="daily") # daily, weekly, monthly
period_start: datetime
period_end: datetime
# Trade volume metrics
total_trades: int = Field(default=0)
completed_trades: int = Field(default=0)
failed_trades: int = Field(default=0)
cancelled_trades: int = Field(default=0)
# Financial metrics
total_trade_volume: float = Field(default=0.0)
average_trade_value: float = Field(default=0.0)
total_platform_fees: float = Field(default=0.0)
# Trade type distribution
trade_type_distribution: Dict[str, int] = Field(default={}, sa_column=Column(JSON))
# Agent metrics
active_buyers: int = Field(default=0)
active_sellers: int = Field(default=0)
new_agents: int = Field(default=0)
# Performance metrics
average_matching_time: float = Field(default=0.0) # minutes
average_negotiation_time: float = Field(default=0.0) # minutes
average_settlement_time: float = Field(default=0.0) # minutes
success_rate: float = Field(default=0.0, ge=0, le=100.0)
# Geographic distribution
regional_distribution: Dict[str, int] = Field(default={}, sa_column=Column(JSON))
# Quality metrics
average_rating: float = Field(default=0.0, ge=1.0, le=5.0)
dispute_rate: float = Field(default=0.0, ge=0, le=100.0)
repeat_trade_rate: float = Field(default=0.0, ge=0, le=100.0)
# Timestamps
created_at: datetime = Field(default_factory=datetime.utcnow)
updated_at: datetime = Field(default_factory=datetime.utcnow)
# Additional analytics data
analytics_data: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))
trends_data: Dict[str, Any] = Field(default={}, sa_column=Column(JSON))

View File

@@ -32,7 +32,7 @@ class Wallet(SQLModel, table=True):
__table_args__ = {"extend_existing": True}
id: Optional[int] = Field(default=None, primary_key=True)
user_id: str = Field(foreign_key="user.id")
user_id: str = Field(foreign_key="users.id")
address: str = Field(unique=True, index=True)
balance: float = Field(default=0.0)
created_at: datetime = Field(default_factory=datetime.utcnow)
@@ -49,8 +49,8 @@ class Transaction(SQLModel, table=True):
__table_args__ = {"extend_existing": True}
id: str = Field(primary_key=True)
user_id: str = Field(foreign_key="user.id")
wallet_id: Optional[int] = Field(foreign_key="wallet.id")
user_id: str = Field(foreign_key="users.id")
wallet_id: Optional[int] = Field(foreign_key="wallets.id")
type: str = Field(max_length=20)
status: str = Field(default="pending", max_length=20)
amount: float
@@ -71,7 +71,7 @@ class UserSession(SQLModel, table=True):
__table_args__ = {"extend_existing": True}
id: Optional[int] = Field(default=None, primary_key=True)
user_id: str = Field(foreign_key="user.id")
user_id: str = Field(foreign_key="users.id")
token: str = Field(unique=True, index=True)
expires_at: datetime
created_at: datetime = Field(default_factory=datetime.utcnow)

View File

@@ -23,11 +23,13 @@ from .routers import (
edge_gpu
)
from .routers.ml_zk_proofs import router as ml_zk_proofs
from .routers.governance import router as governance
from .routers.community import router as community_router
from .routers.governance import router as new_governance_router
from .routers.partners import router as partners
from .routers.marketplace_enhanced_simple import router as marketplace_enhanced
from .routers.openclaw_enhanced_simple import router as openclaw_enhanced
from .routers.monitoring_dashboard import router as monitoring_dashboard
from .routers.multi_modal_rl import router as multi_modal_rl_router
from .storage.models_governance import GovernanceProposal, ProposalVote, TreasuryTransaction, GovernanceParameter
from .exceptions import AITBCError, ErrorResponse
from .logging import get_logger
@@ -79,7 +81,8 @@ def create_app() -> FastAPI:
app.include_router(payments, prefix="/v1")
app.include_router(marketplace_offers, prefix="/v1")
app.include_router(zk_applications.router, prefix="/v1")
app.include_router(governance, prefix="/v1")
app.include_router(new_governance_router, prefix="/v1")
app.include_router(community_router, prefix="/v1")
app.include_router(partners, prefix="/v1")
app.include_router(explorer, prefix="/v1")
app.include_router(web_vitals, prefix="/v1")
@@ -88,6 +91,7 @@ def create_app() -> FastAPI:
app.include_router(marketplace_enhanced, prefix="/v1")
app.include_router(openclaw_enhanced, prefix="/v1")
app.include_router(monitoring_dashboard, prefix="/v1")
app.include_router(multi_modal_rl_router, prefix="/v1")
# Add Prometheus metrics endpoint
metrics_app = make_asgi_app()

View File

@@ -0,0 +1,190 @@
"""
Agent Creativity API Endpoints
REST API for agent creativity enhancement, ideation, and cross-domain synthesis
"""
from datetime import datetime
from typing import Dict, List, Optional, Any
from fastapi import APIRouter, HTTPException, Depends, Query, Body
from pydantic import BaseModel, Field
import logging
from ..storage import SessionDep
from ..services.creative_capabilities_service import (
CreativityEnhancementEngine, IdeationAlgorithm, CrossDomainCreativeIntegrator
)
from ..domain.agent_performance import CreativeCapability
logger = logging.getLogger(__name__)
router = APIRouter(prefix="/v1/agent-creativity", tags=["agent-creativity"])
# Models
class CreativeCapabilityCreate(BaseModel):
agent_id: str
creative_domain: str = Field(..., description="e.g., artistic, design, innovation, scientific, narrative")
capability_type: str = Field(..., description="e.g., generative, compositional, analytical, innovative")
generation_models: List[str]
initial_score: float = Field(0.5, ge=0.0, le=1.0)
class CreativeCapabilityResponse(BaseModel):
capability_id: str
agent_id: str
creative_domain: str
capability_type: str
originality_score: float
novelty_score: float
aesthetic_quality: float
coherence_score: float
style_variety: int
creative_specializations: List[str]
status: str
class EnhanceCreativityRequest(BaseModel):
algorithm: str = Field("divergent_thinking", description="divergent_thinking, conceptual_blending, morphological_analysis, lateral_thinking, bisociation")
training_cycles: int = Field(100, ge=1, le=1000)
class EvaluateCreationRequest(BaseModel):
creation_data: Dict[str, Any]
expert_feedback: Optional[Dict[str, float]] = None
class IdeationRequest(BaseModel):
problem_statement: str
domain: str
technique: str = Field("scamper", description="scamper, triz, six_thinking_hats, first_principles, biomimicry")
num_ideas: int = Field(5, ge=1, le=20)
constraints: Optional[Dict[str, Any]] = None
class SynthesisRequest(BaseModel):
agent_id: str
primary_domain: str
secondary_domains: List[str]
synthesis_goal: str
# Endpoints
@router.post("/capabilities", response_model=CreativeCapabilityResponse)
async def create_creative_capability(
request: CreativeCapabilityCreate,
session: SessionDep
):
"""Initialize a new creative capability for an agent"""
engine = CreativityEnhancementEngine()
try:
capability = await engine.create_creative_capability(
session=session,
agent_id=request.agent_id,
creative_domain=request.creative_domain,
capability_type=request.capability_type,
generation_models=request.generation_models,
initial_score=request.initial_score
)
return capability
except Exception as e:
logger.error(f"Error creating creative capability: {e}")
raise HTTPException(status_code=500, detail=str(e))
@router.post("/capabilities/{capability_id}/enhance")
async def enhance_creativity(
capability_id: str,
request: EnhanceCreativityRequest,
session: SessionDep
):
"""Enhance a specific creative capability using specified algorithm"""
engine = CreativityEnhancementEngine()
try:
result = await engine.enhance_creativity(
session=session,
capability_id=capability_id,
algorithm=request.algorithm,
training_cycles=request.training_cycles
)
return result
except ValueError as e:
raise HTTPException(status_code=404, detail=str(e))
except Exception as e:
logger.error(f"Error enhancing creativity: {e}")
raise HTTPException(status_code=500, detail=str(e))
@router.post("/capabilities/{capability_id}/evaluate")
async def evaluate_creation(
capability_id: str,
request: EvaluateCreationRequest,
session: SessionDep
):
"""Evaluate a creative output and update agent capability metrics"""
engine = CreativityEnhancementEngine()
try:
result = await engine.evaluate_creation(
session=session,
capability_id=capability_id,
creation_data=request.creation_data,
expert_feedback=request.expert_feedback
)
return result
except ValueError as e:
raise HTTPException(status_code=404, detail=str(e))
except Exception as e:
logger.error(f"Error evaluating creation: {e}")
raise HTTPException(status_code=500, detail=str(e))
@router.post("/ideation/generate")
async def generate_ideas(request: IdeationRequest):
"""Generate innovative ideas using specialized ideation algorithms"""
ideation_engine = IdeationAlgorithm()
try:
result = await ideation_engine.generate_ideas(
problem_statement=request.problem_statement,
domain=request.domain,
technique=request.technique,
num_ideas=request.num_ideas,
constraints=request.constraints
)
return result
except Exception as e:
logger.error(f"Error generating ideas: {e}")
raise HTTPException(status_code=500, detail=str(e))
@router.post("/synthesis/cross-domain")
async def synthesize_cross_domain(
request: SynthesisRequest,
session: SessionDep
):
"""Synthesize concepts from multiple domains to create novel outputs"""
integrator = CrossDomainCreativeIntegrator()
try:
result = await integrator.generate_cross_domain_synthesis(
session=session,
agent_id=request.agent_id,
primary_domain=request.primary_domain,
secondary_domains=request.secondary_domains,
synthesis_goal=request.synthesis_goal
)
return result
except ValueError as e:
raise HTTPException(status_code=400, detail=str(e))
except Exception as e:
logger.error(f"Error in cross-domain synthesis: {e}")
raise HTTPException(status_code=500, detail=str(e))
@router.get("/capabilities/{agent_id}")
async def list_agent_creative_capabilities(
agent_id: str,
session: SessionDep
):
"""List all creative capabilities for a specific agent"""
try:
capabilities = session.exec(
select(CreativeCapability).where(CreativeCapability.agent_id == agent_id)
).all()
return capabilities
except Exception as e:
logger.error(f"Error fetching creative capabilities: {e}")
raise HTTPException(status_code=500, detail=str(e))

View File

@@ -0,0 +1,721 @@
"""
Advanced Agent Performance API Endpoints
REST API for meta-learning, resource optimization, and performance enhancement
"""
from datetime import datetime, timedelta
from typing import Dict, List, Optional, Any
from fastapi import APIRouter, HTTPException, Depends, Query
from pydantic import BaseModel, Field
import logging
from ..storage import SessionDep
from ..services.agent_performance_service import (
AgentPerformanceService, MetaLearningEngine, ResourceManager, PerformanceOptimizer
)
from ..domain.agent_performance import (
AgentPerformanceProfile, MetaLearningModel, ResourceAllocation,
PerformanceOptimization, AgentCapability, FusionModel,
ReinforcementLearningConfig, CreativeCapability,
LearningStrategy, PerformanceMetric, ResourceType,
OptimizationTarget
)
logger = logging.getLogger(__name__)
router = APIRouter(prefix="/v1/agent-performance", tags=["agent-performance"])
# Pydantic models for API requests/responses
class PerformanceProfileRequest(BaseModel):
"""Request model for performance profile creation"""
agent_id: str
agent_type: str = Field(default="openclaw")
initial_metrics: Dict[str, float] = Field(default_factory=dict)
class PerformanceProfileResponse(BaseModel):
"""Response model for performance profile"""
profile_id: str
agent_id: str
agent_type: str
overall_score: float
performance_metrics: Dict[str, float]
learning_strategies: List[str]
specialization_areas: List[str]
expertise_levels: Dict[str, float]
resource_efficiency: Dict[str, float]
cost_per_task: float
throughput: float
average_latency: float
last_assessed: Optional[str]
created_at: str
updated_at: str
class MetaLearningRequest(BaseModel):
"""Request model for meta-learning model creation"""
model_name: str
base_algorithms: List[str]
meta_strategy: LearningStrategy
adaptation_targets: List[str]
class MetaLearningResponse(BaseModel):
"""Response model for meta-learning model"""
model_id: str
model_name: str
model_type: str
meta_strategy: str
adaptation_targets: List[str]
meta_accuracy: float
adaptation_speed: float
generalization_ability: float
status: str
created_at: str
trained_at: Optional[str]
class ResourceAllocationRequest(BaseModel):
"""Request model for resource allocation"""
agent_id: str
task_requirements: Dict[str, Any]
optimization_target: OptimizationTarget = Field(default=OptimizationTarget.EFFICIENCY)
priority_level: str = Field(default="normal")
class ResourceAllocationResponse(BaseModel):
"""Response model for resource allocation"""
allocation_id: str
agent_id: str
cpu_cores: float
memory_gb: float
gpu_count: float
gpu_memory_gb: float
storage_gb: float
network_bandwidth: float
optimization_target: str
status: str
allocated_at: str
class PerformanceOptimizationRequest(BaseModel):
"""Request model for performance optimization"""
agent_id: str
target_metric: PerformanceMetric
current_performance: Dict[str, float]
optimization_type: str = Field(default="comprehensive")
class PerformanceOptimizationResponse(BaseModel):
"""Response model for performance optimization"""
optimization_id: str
agent_id: str
optimization_type: str
target_metric: str
status: str
performance_improvement: float
resource_savings: float
cost_savings: float
overall_efficiency_gain: float
created_at: str
completed_at: Optional[str]
class CapabilityRequest(BaseModel):
"""Request model for agent capability"""
agent_id: str
capability_name: str
capability_type: str
domain_area: str
skill_level: float = Field(ge=0, le=10.0)
specialization_areas: List[str] = Field(default_factory=list)
class CapabilityResponse(BaseModel):
"""Response model for agent capability"""
capability_id: str
agent_id: str
capability_name: str
capability_type: str
domain_area: str
skill_level: float
proficiency_score: float
specialization_areas: List[str]
status: str
created_at: str
# API Endpoints
@router.post("/profiles", response_model=PerformanceProfileResponse)
async def create_performance_profile(
profile_request: PerformanceProfileRequest,
session: SessionDep
) -> PerformanceProfileResponse:
"""Create agent performance profile"""
performance_service = AgentPerformanceService(session)
try:
profile = await performance_service.create_performance_profile(
agent_id=profile_request.agent_id,
agent_type=profile_request.agent_type,
initial_metrics=profile_request.initial_metrics
)
return PerformanceProfileResponse(
profile_id=profile.profile_id,
agent_id=profile.agent_id,
agent_type=profile.agent_type,
overall_score=profile.overall_score,
performance_metrics=profile.performance_metrics,
learning_strategies=profile.learning_strategies,
specialization_areas=profile.specialization_areas,
expertise_levels=profile.expertise_levels,
resource_efficiency=profile.resource_efficiency,
cost_per_task=profile.cost_per_task,
throughput=profile.throughput,
average_latency=profile.average_latency,
last_assessed=profile.last_assessed.isoformat() if profile.last_assessed else None,
created_at=profile.created_at.isoformat(),
updated_at=profile.updated_at.isoformat()
)
except Exception as e:
logger.error(f"Error creating performance profile: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.get("/profiles/{agent_id}", response_model=Dict[str, Any])
async def get_performance_profile(
agent_id: str,
session: SessionDep
) -> Dict[str, Any]:
"""Get agent performance profile"""
performance_service = AgentPerformanceService(session)
try:
profile = await performance_service.get_comprehensive_profile(agent_id)
if 'error' in profile:
raise HTTPException(status_code=404, detail=profile['error'])
return profile
except HTTPException:
raise
except Exception as e:
logger.error(f"Error getting performance profile for agent {agent_id}: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.post("/profiles/{agent_id}/metrics")
async def update_performance_metrics(
agent_id: str,
metrics: Dict[str, float],
task_context: Optional[Dict[str, Any]] = None,
session: SessionDep
) -> Dict[str, Any]:
"""Update agent performance metrics"""
performance_service = AgentPerformanceService(session)
try:
profile = await performance_service.update_performance_metrics(
agent_id=agent_id,
new_metrics=metrics,
task_context=task_context
)
return {
"success": True,
"profile_id": profile.profile_id,
"overall_score": profile.overall_score,
"updated_at": profile.updated_at.isoformat(),
"improvement_trends": profile.improvement_trends
}
except Exception as e:
logger.error(f"Error updating performance metrics for agent {agent_id}: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.post("/meta-learning/models", response_model=MetaLearningResponse)
async def create_meta_learning_model(
model_request: MetaLearningRequest,
session: SessionDep
) -> MetaLearningResponse:
"""Create meta-learning model"""
meta_learning_engine = MetaLearningEngine()
try:
model = await meta_learning_engine.create_meta_learning_model(
session=session,
model_name=model_request.model_name,
base_algorithms=model_request.base_algorithms,
meta_strategy=model_request.meta_strategy,
adaptation_targets=model_request.adaptation_targets
)
return MetaLearningResponse(
model_id=model.model_id,
model_name=model.model_name,
model_type=model.model_type,
meta_strategy=model.meta_strategy.value,
adaptation_targets=model.adaptation_targets,
meta_accuracy=model.meta_accuracy,
adaptation_speed=model.adaptation_speed,
generalization_ability=model.generalization_ability,
status=model.status,
created_at=model.created_at.isoformat(),
trained_at=model.trained_at.isoformat() if model.trained_at else None
)
except Exception as e:
logger.error(f"Error creating meta-learning model: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.post("/meta-learning/models/{model_id}/adapt")
async def adapt_model_to_task(
model_id: str,
task_data: Dict[str, Any],
adaptation_steps: int = Query(default=10, ge=1, le=50),
session: SessionDep
) -> Dict[str, Any]:
"""Adapt meta-learning model to new task"""
meta_learning_engine = MetaLearningEngine()
try:
results = await meta_learning_engine.adapt_to_new_task(
session=session,
model_id=model_id,
task_data=task_data,
adaptation_steps=adaptation_steps
)
return {
"success": True,
"model_id": model_id,
"adaptation_results": results,
"adapted_at": datetime.utcnow().isoformat()
}
except ValueError as e:
raise HTTPException(status_code=404, detail=str(e))
except Exception as e:
logger.error(f"Error adapting model {model_id}: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.get("/meta-learning/models")
async def list_meta_learning_models(
status: Optional[str] = Query(default=None, description="Filter by status"),
meta_strategy: Optional[str] = Query(default=None, description="Filter by meta strategy"),
limit: int = Query(default=50, ge=1, le=100, description="Number of results"),
session: SessionDep
) -> List[Dict[str, Any]]:
"""List meta-learning models"""
try:
query = select(MetaLearningModel)
if status:
query = query.where(MetaLearningModel.status == status)
if meta_strategy:
query = query.where(MetaLearningModel.meta_strategy == LearningStrategy(meta_strategy))
models = session.exec(
query.order_by(MetaLearningModel.created_at.desc()).limit(limit)
).all()
return [
{
"model_id": model.model_id,
"model_name": model.model_name,
"model_type": model.model_type,
"meta_strategy": model.meta_strategy.value,
"adaptation_targets": model.adaptation_targets,
"meta_accuracy": model.meta_accuracy,
"adaptation_speed": model.adaptation_speed,
"generalization_ability": model.generalization_ability,
"status": model.status,
"deployment_count": model.deployment_count,
"success_rate": model.success_rate,
"created_at": model.created_at.isoformat(),
"trained_at": model.trained_at.isoformat() if model.trained_at else None
}
for model in models
]
except Exception as e:
logger.error(f"Error listing meta-learning models: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.post("/resources/allocate", response_model=ResourceAllocationResponse)
async def allocate_resources(
allocation_request: ResourceAllocationRequest,
session: SessionDep
) -> ResourceAllocationResponse:
"""Allocate resources for agent task"""
resource_manager = ResourceManager()
try:
allocation = await resource_manager.allocate_resources(
session=session,
agent_id=allocation_request.agent_id,
task_requirements=allocation_request.task_requirements,
optimization_target=allocation_request.optimization_target
)
return ResourceAllocationResponse(
allocation_id=allocation.allocation_id,
agent_id=allocation.agent_id,
cpu_cores=allocation.cpu_cores,
memory_gb=allocation.memory_gb,
gpu_count=allocation.gpu_count,
gpu_memory_gb=allocation.gpu_memory_gb,
storage_gb=allocation.storage_gb,
network_bandwidth=allocation.network_bandwidth,
optimization_target=allocation.optimization_target.value,
status=allocation.status,
allocated_at=allocation.allocated_at.isoformat()
)
except Exception as e:
logger.error(f"Error allocating resources: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.get("/resources/{agent_id}")
async def get_resource_allocations(
agent_id: str,
status: Optional[str] = Query(default=None, description="Filter by status"),
limit: int = Query(default=20, ge=1, le=100, description="Number of results"),
session: SessionDep
) -> List[Dict[str, Any]]:
"""Get resource allocations for agent"""
try:
query = select(ResourceAllocation).where(ResourceAllocation.agent_id == agent_id)
if status:
query = query.where(ResourceAllocation.status == status)
allocations = session.exec(
query.order_by(ResourceAllocation.created_at.desc()).limit(limit)
).all()
return [
{
"allocation_id": allocation.allocation_id,
"agent_id": allocation.agent_id,
"task_id": allocation.task_id,
"cpu_cores": allocation.cpu_cores,
"memory_gb": allocation.memory_gb,
"gpu_count": allocation.gpu_count,
"gpu_memory_gb": allocation.gpu_memory_gb,
"storage_gb": allocation.storage_gb,
"network_bandwidth": allocation.network_bandwidth,
"optimization_target": allocation.optimization_target.value,
"priority_level": allocation.priority_level,
"status": allocation.status,
"efficiency_score": allocation.efficiency_score,
"cost_efficiency": allocation.cost_efficiency,
"allocated_at": allocation.allocated_at.isoformat() if allocation.allocated_at else None,
"started_at": allocation.started_at.isoformat() if allocation.started_at else None,
"completed_at": allocation.completed_at.isoformat() if allocation.completed_at else None
}
for allocation in allocations
]
except Exception as e:
logger.error(f"Error getting resource allocations for agent {agent_id}: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.post("/optimization/optimize", response_model=PerformanceOptimizationResponse)
async def optimize_performance(
optimization_request: PerformanceOptimizationRequest,
session: SessionDep
) -> PerformanceOptimizationResponse:
"""Optimize agent performance"""
performance_optimizer = PerformanceOptimizer()
try:
optimization = await performance_optimizer.optimize_agent_performance(
session=session,
agent_id=optimization_request.agent_id,
target_metric=optimization_request.target_metric,
current_performance=optimization_request.current_performance
)
return PerformanceOptimizationResponse(
optimization_id=optimization.optimization_id,
agent_id=optimization.agent_id,
optimization_type=optimization.optimization_type,
target_metric=optimization.target_metric.value,
status=optimization.status,
performance_improvement=optimization.performance_improvement,
resource_savings=optimization.resource_savings,
cost_savings=optimization.cost_savings,
overall_efficiency_gain=optimization.overall_efficiency_gain,
created_at=optimization.created_at.isoformat(),
completed_at=optimization.completed_at.isoformat() if optimization.completed_at else None
)
except Exception as e:
logger.error(f"Error optimizing performance: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.get("/optimization/{agent_id}")
async def get_optimization_history(
agent_id: str,
status: Optional[str] = Query(default=None, description="Filter by status"),
target_metric: Optional[str] = Query(default=None, description="Filter by target metric"),
limit: int = Query(default=20, ge=1, le=100, description="Number of results"),
session: SessionDep
) -> List[Dict[str, Any]]:
"""Get optimization history for agent"""
try:
query = select(PerformanceOptimization).where(PerformanceOptimization.agent_id == agent_id)
if status:
query = query.where(PerformanceOptimization.status == status)
if target_metric:
query = query.where(PerformanceOptimization.target_metric == PerformanceMetric(target_metric))
optimizations = session.exec(
query.order_by(PerformanceOptimization.created_at.desc()).limit(limit)
).all()
return [
{
"optimization_id": optimization.optimization_id,
"agent_id": optimization.agent_id,
"optimization_type": optimization.optimization_type,
"target_metric": optimization.target_metric.value,
"status": optimization.status,
"baseline_performance": optimization.baseline_performance,
"optimized_performance": optimization.optimized_performance,
"baseline_cost": optimization.baseline_cost,
"optimized_cost": optimization.optimized_cost,
"performance_improvement": optimization.performance_improvement,
"resource_savings": optimization.resource_savings,
"cost_savings": optimization.cost_savings,
"overall_efficiency_gain": optimization.overall_efficiency_gain,
"optimization_duration": optimization.optimization_duration,
"iterations_required": optimization.iterations_required,
"convergence_achieved": optimization.convergence_achieved,
"created_at": optimization.created_at.isoformat(),
"completed_at": optimization.completed_at.isoformat() if optimization.completed_at else None
}
for optimization in optimizations
]
except Exception as e:
logger.error(f"Error getting optimization history for agent {agent_id}: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.post("/capabilities", response_model=CapabilityResponse)
async def create_capability(
capability_request: CapabilityRequest,
session: SessionDep
) -> CapabilityResponse:
"""Create agent capability"""
try:
capability_id = f"cap_{uuid4().hex[:8]}"
capability = AgentCapability(
capability_id=capability_id,
agent_id=capability_request.agent_id,
capability_name=capability_request.capability_name,
capability_type=capability_request.capability_type,
domain_area=capability_request.domain_area,
skill_level=capability_request.skill_level,
specialization_areas=capability_request.specialization_areas,
proficiency_score=min(1.0, capability_request.skill_level / 10.0),
created_at=datetime.utcnow()
)
session.add(capability)
session.commit()
session.refresh(capability)
return CapabilityResponse(
capability_id=capability.capability_id,
agent_id=capability.agent_id,
capability_name=capability.capability_name,
capability_type=capability.capability_type,
domain_area=capability.domain_area,
skill_level=capability.skill_level,
proficiency_score=capability.proficiency_score,
specialization_areas=capability.specialization_areas,
status=capability.status,
created_at=capability.created_at.isoformat()
)
except Exception as e:
logger.error(f"Error creating capability: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.get("/capabilities/{agent_id}")
async def get_agent_capabilities(
agent_id: str,
capability_type: Optional[str] = Query(default=None, description="Filter by capability type"),
domain_area: Optional[str] = Query(default=None, description="Filter by domain area"),
limit: int = Query(default=50, ge=1, le=100, description="Number of results"),
session: SessionDep
) -> List[Dict[str, Any]]:
"""Get agent capabilities"""
try:
query = select(AgentCapability).where(AgentCapability.agent_id == agent_id)
if capability_type:
query = query.where(AgentCapability.capability_type == capability_type)
if domain_area:
query = query.where(AgentCapability.domain_area == domain_area)
capabilities = session.exec(
query.order_by(AgentCapability.skill_level.desc()).limit(limit)
).all()
return [
{
"capability_id": capability.capability_id,
"agent_id": capability.agent_id,
"capability_name": capability.capability_name,
"capability_type": capability.capability_type,
"domain_area": capability.domain_area,
"skill_level": capability.skill_level,
"proficiency_score": capability.proficiency_score,
"experience_years": capability.experience_years,
"success_rate": capability.success_rate,
"average_quality": capability.average_quality,
"learning_rate": capability.learning_rate,
"adaptation_speed": capability.adaptation_speed,
"specialization_areas": capability.specialization_areas,
"sub_capabilities": capability.sub_capabilities,
"tool_proficiency": capability.tool_proficiency,
"certified": capability.certified,
"certification_level": capability.certification_level,
"status": capability.status,
"acquired_at": capability.acquired_at.isoformat(),
"last_improved": capability.last_improved.isoformat() if capability.last_improved else None
}
for capability in capabilities
]
except Exception as e:
logger.error(f"Error getting capabilities for agent {agent_id}: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.get("/analytics/performance-summary")
async def get_performance_summary(
agent_ids: List[str] = Query(default=[], description="List of agent IDs"),
metric: Optional[str] = Query(default="overall_score", description="Metric to summarize"),
period: str = Query(default="7d", description="Time period"),
session: SessionDep
) -> Dict[str, Any]:
"""Get performance summary for agents"""
try:
if not agent_ids:
# Get all agents if none specified
profiles = session.exec(select(AgentPerformanceProfile)).all()
agent_ids = [p.agent_id for p in profiles]
summaries = []
for agent_id in agent_ids:
profile = session.exec(
select(AgentPerformanceProfile).where(AgentPerformanceProfile.agent_id == agent_id)
).first()
if profile:
summaries.append({
"agent_id": agent_id,
"overall_score": profile.overall_score,
"performance_metrics": profile.performance_metrics,
"resource_efficiency": profile.resource_efficiency,
"cost_per_task": profile.cost_per_task,
"throughput": profile.throughput,
"average_latency": profile.average_latency,
"specialization_areas": profile.specialization_areas,
"last_assessed": profile.last_assessed.isoformat() if profile.last_assessed else None
})
# Calculate summary statistics
if summaries:
overall_scores = [s["overall_score"] for s in summaries]
avg_score = sum(overall_scores) / len(overall_scores)
return {
"period": period,
"agent_count": len(summaries),
"average_score": avg_score,
"top_performers": sorted(summaries, key=lambda x: x["overall_score"], reverse=True)[:10],
"performance_distribution": {
"excellent": len([s for s in summaries if s["overall_score"] >= 80]),
"good": len([s for s in summaries if 60 <= s["overall_score"] < 80]),
"average": len([s for s in summaries if 40 <= s["overall_score"] < 60]),
"below_average": len([s for s in summaries if s["overall_score"] < 40])
},
"specialization_distribution": self.calculate_specialization_distribution(summaries)
}
else:
return {
"period": period,
"agent_count": 0,
"average_score": 0.0,
"top_performers": [],
"performance_distribution": {},
"specialization_distribution": {}
}
except Exception as e:
logger.error(f"Error getting performance summary: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
def calculate_specialization_distribution(summaries: List[Dict[str, Any]]) -> Dict[str, int]:
"""Calculate specialization distribution"""
distribution = {}
for summary in summaries:
for area in summary["specialization_areas"]:
distribution[area] = distribution.get(area, 0) + 1
return distribution
@router.get("/health")
async def health_check() -> Dict[str, Any]:
"""Health check for agent performance service"""
return {
"status": "healthy",
"timestamp": datetime.utcnow().isoformat(),
"version": "1.0.0",
"services": {
"meta_learning_engine": "operational",
"resource_manager": "operational",
"performance_optimizer": "operational",
"performance_service": "operational"
}
}

View File

@@ -0,0 +1,804 @@
"""
Marketplace Analytics API Endpoints
REST API for analytics, insights, reporting, and dashboards
"""
from datetime import datetime, timedelta
from typing import Dict, List, Optional, Any
from fastapi import APIRouter, HTTPException, Depends, Query
from pydantic import BaseModel, Field
import logging
from ..storage import SessionDep
from ..services.analytics_service import MarketplaceAnalytics
from ..domain.analytics import (
MarketMetric, MarketInsight, AnalyticsReport, DashboardConfig,
AnalyticsPeriod, MetricType, InsightType, ReportType
)
logger = logging.getLogger(__name__)
router = APIRouter(prefix="/v1/analytics", tags=["analytics"])
# Pydantic models for API requests/responses
class MetricResponse(BaseModel):
"""Response model for market metric"""
metric_name: str
metric_type: str
period_type: str
value: float
previous_value: Optional[float]
change_percentage: Optional[float]
unit: str
category: str
recorded_at: str
period_start: str
period_end: str
breakdown: Dict[str, Any]
comparisons: Dict[str, Any]
class InsightResponse(BaseModel):
"""Response model for market insight"""
id: str
insight_type: str
title: str
description: str
confidence_score: float
impact_level: str
related_metrics: List[str]
time_horizon: str
recommendations: List[str]
suggested_actions: List[Dict[str, Any]]
created_at: str
expires_at: Optional[str]
insight_data: Dict[str, Any]
class DashboardResponse(BaseModel):
"""Response model for dashboard configuration"""
dashboard_id: str
name: str
description: str
dashboard_type: str
layout: Dict[str, Any]
widgets: List[Dict[str, Any]]
filters: List[Dict[str, Any]]
refresh_interval: int
auto_refresh: bool
owner_id: str
status: str
created_at: str
updated_at: str
class ReportRequest(BaseModel):
"""Request model for generating analytics report"""
report_type: ReportType
period_type: AnalyticsPeriod
start_date: str
end_date: str
filters: Dict[str, Any] = Field(default_factory=dict)
include_charts: bool = Field(default=True)
format: str = Field(default="json")
class MarketOverviewResponse(BaseModel):
"""Response model for market overview"""
timestamp: str
period: str
metrics: Dict[str, Any]
insights: List[Dict[str, Any]]
alerts: List[Dict[str, Any]]
summary: Dict[str, Any]
class AnalyticsSummaryResponse(BaseModel):
"""Response model for analytics summary"""
period_type: str
start_time: str
end_time: str
metrics_collected: int
insights_generated: int
market_data: Dict[str, Any]
# API Endpoints
@router.post("/data-collection", response_model=AnalyticsSummaryResponse)
async def collect_market_data(
period_type: AnalyticsPeriod = Query(default=AnalyticsPeriod.DAILY, description="Collection period"),
session: SessionDep
) -> AnalyticsSummaryResponse:
"""Collect market data for analytics"""
analytics_service = MarketplaceAnalytics(session)
try:
result = await analytics_service.collect_market_data(period_type)
return AnalyticsSummaryResponse(**result)
except Exception as e:
logger.error(f"Error collecting market data: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.get("/insights", response_model=Dict[str, Any])
async def get_market_insights(
time_period: str = Query(default="daily", description="Time period: daily, weekly, monthly"),
insight_type: Optional[str] = Query(default=None, description="Filter by insight type"),
impact_level: Optional[str] = Query(default=None, description="Filter by impact level"),
limit: int = Query(default=20, ge=1, le=100, description="Number of results"),
session: SessionDep
) -> Dict[str, Any]:
"""Get market insights and analysis"""
analytics_service = MarketplaceAnalytics(session)
try:
result = await analytics_service.generate_insights(time_period)
# Apply filters if provided
if insight_type or impact_level:
filtered_insights = {}
for type_name, insights in result["insight_groups"].items():
filtered = insights
if insight_type:
filtered = [i for i in filtered if i["type"] == insight_type]
if impact_level:
filtered = [i for i in filtered if i["impact"] == impact_level]
if filtered:
filtered_insights[type_name] = filtered[:limit]
result["insight_groups"] = filtered_insights
result["total_insights"] = sum(len(insights) for insights in filtered_insights.values())
return result
except Exception as e:
logger.error(f"Error getting market insights: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.get("/metrics", response_model=List[MetricResponse])
async def get_market_metrics(
period_type: AnalyticsPeriod = Query(default=AnalyticsPeriod.DAILY, description="Period type"),
metric_name: Optional[str] = Query(default=None, description="Filter by metric name"),
category: Optional[str] = Query(default=None, description="Filter by category"),
geographic_region: Optional[str] = Query(default=None, description="Filter by region"),
limit: int = Query(default=50, ge=1, le=100, description="Number of results"),
session: SessionDep
) -> List[MetricResponse]:
"""Get market metrics with filters"""
try:
query = select(MarketMetric).where(MarketMetric.period_type == period_type)
if metric_name:
query = query.where(MarketMetric.metric_name == metric_name)
if category:
query = query.where(MarketMetric.category == category)
if geographic_region:
query = query.where(MarketMetric.geographic_region == geographic_region)
metrics = session.exec(
query.order_by(MarketMetric.recorded_at.desc()).limit(limit)
).all()
return [
MetricResponse(
metric_name=metric.metric_name,
metric_type=metric.metric_type.value,
period_type=metric.period_type.value,
value=metric.value,
previous_value=metric.previous_value,
change_percentage=metric.change_percentage,
unit=metric.unit,
category=metric.category,
recorded_at=metric.recorded_at.isoformat(),
period_start=metric.period_start.isoformat(),
period_end=metric.period_end.isoformat(),
breakdown=metric.breakdown,
comparisons=metric.comparisons
)
for metric in metrics
]
except Exception as e:
logger.error(f"Error getting market metrics: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.get("/overview", response_model=MarketOverviewResponse)
async def get_market_overview(
session: SessionDep
) -> MarketOverviewResponse:
"""Get comprehensive market overview"""
analytics_service = MarketplaceAnalytics(session)
try:
overview = await analytics_service.get_market_overview()
return MarketOverviewResponse(**overview)
except Exception as e:
logger.error(f"Error getting market overview: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.post("/dashboards", response_model=DashboardResponse)
async def create_dashboard(
owner_id: str,
dashboard_type: str = Query(default="default", description="Dashboard type: default, executive"),
name: Optional[str] = Query(default=None, description="Custom dashboard name"),
session: SessionDep
) -> DashboardResponse:
"""Create analytics dashboard"""
analytics_service = MarketplaceAnalytics(session)
try:
result = await analytics_service.create_dashboard(owner_id, dashboard_type)
# Get the created dashboard details
dashboard = session.exec(
select(DashboardConfig).where(DashboardConfig.dashboard_id == result["dashboard_id"])
).first()
if not dashboard:
raise HTTPException(status_code=404, detail="Dashboard not found after creation")
return DashboardResponse(
dashboard_id=dashboard.dashboard_id,
name=dashboard.name,
description=dashboard.description,
dashboard_type=dashboard.dashboard_type,
layout=dashboard.layout,
widgets=dashboard.widgets,
filters=dashboard.filters,
refresh_interval=dashboard.refresh_interval,
auto_refresh=dashboard.auto_refresh,
owner_id=dashboard.owner_id,
status=dashboard.status,
created_at=dashboard.created_at.isoformat(),
updated_at=dashboard.updated_at.isoformat()
)
except Exception as e:
logger.error(f"Error creating dashboard: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.get("/dashboards/{dashboard_id}", response_model=DashboardResponse)
async def get_dashboard(
dashboard_id: str,
session: SessionDep
) -> DashboardResponse:
"""Get dashboard configuration"""
try:
dashboard = session.exec(
select(DashboardConfig).where(DashboardConfig.dashboard_id == dashboard_id)
).first()
if not dashboard:
raise HTTPException(status_code=404, detail="Dashboard not found")
return DashboardResponse(
dashboard_id=dashboard.dashboard_id,
name=dashboard.name,
description=dashboard.description,
dashboard_type=dashboard.dashboard_type,
layout=dashboard.layout,
widgets=dashboard.widgets,
filters=dashboard.filters,
refresh_interval=dashboard.refresh_interval,
auto_refresh=dashboard.auto_refresh,
owner_id=dashboard.owner_id,
status=dashboard.status,
created_at=dashboard.created_at.isoformat(),
updated_at=dashboard.updated_at.isoformat()
)
except HTTPException:
raise
except Exception as e:
logger.error(f"Error getting dashboard {dashboard_id}: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.get("/dashboards")
async def list_dashboards(
owner_id: Optional[str] = Query(default=None, description="Filter by owner ID"),
dashboard_type: Optional[str] = Query(default=None, description="Filter by dashboard type"),
status: Optional[str] = Query(default=None, description="Filter by status"),
limit: int = Query(default=50, ge=1, le=100, description="Number of results"),
session: SessionDep
) -> List[DashboardResponse]:
"""List analytics dashboards with filters"""
try:
query = select(DashboardConfig)
if owner_id:
query = query.where(DashboardConfig.owner_id == owner_id)
if dashboard_type:
query = query.where(DashboardConfig.dashboard_type == dashboard_type)
if status:
query = query.where(DashboardConfig.status == status)
dashboards = session.exec(
query.order_by(DashboardConfig.created_at.desc()).limit(limit)
).all()
return [
DashboardResponse(
dashboard_id=dashboard.dashboard_id,
name=dashboard.name,
description=dashboard.description,
dashboard_type=dashboard.dashboard_type,
layout=dashboard.layout,
widgets=dashboard.widgets,
filters=dashboard.filters,
refresh_interval=dashboard.refresh_interval,
auto_refresh=dashboard.auto_refresh,
owner_id=dashboard.owner_id,
status=dashboard.status,
created_at=dashboard.created_at.isoformat(),
updated_at=dashboard.updated_at.isoformat()
)
for dashboard in dashboards
]
except Exception as e:
logger.error(f"Error listing dashboards: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.post("/reports", response_model=Dict[str, Any])
async def generate_report(
report_request: ReportRequest,
session: SessionDep
) -> Dict[str, Any]:
"""Generate analytics report"""
try:
# Parse dates
start_date = datetime.fromisoformat(report_request.start_date)
end_date = datetime.fromisoformat(report_request.end_date)
# Create report record
report = AnalyticsReport(
report_id=f"report_{uuid4().hex[:8]}",
report_type=report_request.report_type,
title=f"{report_request.report_type.value.title()} Report",
description=f"Analytics report for {report_request.period_type.value} period",
period_type=report_request.period_type,
start_date=start_date,
end_date=end_date,
filters=report_request.filters,
generated_by="api",
status="generated"
)
session.add(report)
session.commit()
session.refresh(report)
# Generate report content based on type
if report_request.report_type == ReportType.MARKET_OVERVIEW:
content = await self.generate_market_overview_report(
session, report_request.period_type, start_date, end_date, report_request.filters
)
elif report_request.report_type == ReportType.AGENT_PERFORMANCE:
content = await self.generate_agent_performance_report(
session, report_request.period_type, start_date, end_date, report_request.filters
)
elif report_request.report_type == ReportType.ECONOMIC_ANALYSIS:
content = await self.generate_economic_analysis_report(
session, report_request.period_type, start_date, end_date, report_request.filters
)
else:
content = {"error": "Report type not implemented yet"}
# Update report with content
report.summary = content.get("summary", "")
report.key_findings = content.get("key_findings", [])
report.recommendations = content.get("recommendations", [])
report.data_sections = content.get("data_sections", [])
report.charts = content.get("charts", []) if report_request.include_charts else []
report.tables = content.get("tables", [])
session.commit()
return {
"report_id": report.report_id,
"report_type": report.report_type.value,
"title": report.title,
"period": f"{report_request.period_type.value} from {report_request.start_date} to {report_request.end_date}",
"summary": report.summary,
"key_findings": report.key_findings,
"recommendations": report.recommendations,
"generated_at": report.generated_at.isoformat(),
"format": report_request.format
}
except Exception as e:
logger.error(f"Error generating report: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.get("/reports/{report_id}")
async def get_report(
report_id: str,
format: str = Query(default="json", description="Response format: json, csv, pdf"),
session: SessionDep
) -> Dict[str, Any]:
"""Get generated analytics report"""
try:
report = session.exec(
select(AnalyticsReport).where(AnalyticsReport.report_id == report_id)
).first()
if not report:
raise HTTPException(status_code=404, detail="Report not found")
response_data = {
"report_id": report.report_id,
"report_type": report.report_type.value,
"title": report.title,
"description": report.description,
"period_type": report.period_type.value,
"start_date": report.start_date.isoformat(),
"end_date": report.end_date.isoformat(),
"summary": report.summary,
"key_findings": report.key_findings,
"recommendations": report.recommendations,
"data_sections": report.data_sections,
"charts": report.charts,
"tables": report.tables,
"generated_at": report.generated_at.isoformat(),
"status": report.status
}
# Format response based on requested format
if format == "json":
return response_data
elif format == "csv":
# Convert to CSV format (simplified)
return {"csv_data": self.convert_to_csv(response_data)}
elif format == "pdf":
# Convert to PDF format (simplified)
return {"pdf_url": f"/api/v1/analytics/reports/{report_id}/pdf"}
else:
return response_data
except HTTPException:
raise
except Exception as e:
logger.error(f"Error getting report {report_id}: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.get("/alerts")
async def get_analytics_alerts(
severity: Optional[str] = Query(default=None, description="Filter by severity level"),
status: Optional[str] = Query(default="active", description="Filter by status"),
limit: int = Query(default=20, ge=1, le=100, description="Number of results"),
session: SessionDep
) -> List[Dict[str, Any]]:
"""Get analytics alerts"""
try:
from ..domain.analytics import AnalyticsAlert
query = select(AnalyticsAlert)
if severity:
query = query.where(AnalyticsAlert.severity == severity)
if status:
query = query.where(AnalyticsAlert.status == status)
alerts = session.exec(
query.order_by(AnalyticsAlert.created_at.desc()).limit(limit)
).all()
return [
{
"alert_id": alert.alert_id,
"rule_id": alert.rule_id,
"alert_type": alert.alert_type,
"title": alert.title,
"message": alert.message,
"severity": alert.severity,
"confidence": alert.confidence,
"trigger_value": alert.trigger_value,
"threshold_value": alert.threshold_value,
"affected_metrics": alert.affected_metrics,
"status": alert.status,
"created_at": alert.created_at.isoformat(),
"expires_at": alert.expires_at.isoformat() if alert.expires_at else None
}
for alert in alerts
]
except Exception as e:
logger.error(f"Error getting analytics alerts: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.get("/kpi")
async def get_key_performance_indicators(
period_type: AnalyticsPeriod = Query(default=AnalyticsPeriod.DAILY, description="Period type"),
session: SessionDep
) -> Dict[str, Any]:
"""Get key performance indicators"""
try:
# Get latest metrics for KPIs
end_time = datetime.utcnow()
if period_type == AnalyticsPeriod.DAILY:
start_time = end_time - timedelta(days=1)
elif period_type == AnalyticsPeriod.WEEKLY:
start_time = end_time - timedelta(weeks=1)
elif period_type == AnalyticsPeriod.MONTHLY:
start_time = end_time - timedelta(days=30)
else:
start_time = end_time - timedelta(hours=1)
metrics = session.exec(
select(MarketMetric).where(
and_(
MarketMetric.period_type == period_type,
MarketMetric.period_start >= start_time,
MarketMetric.period_end <= end_time
)
).order_by(MarketMetric.recorded_at.desc())
).all()
# Calculate KPIs
kpis = {}
for metric in metrics:
if metric.metric_name in ["transaction_volume", "active_agents", "average_price", "success_rate"]:
kpis[metric.metric_name] = {
"value": metric.value,
"unit": metric.unit,
"change_percentage": metric.change_percentage,
"trend": "up" if metric.change_percentage and metric.change_percentage > 0 else "down",
"status": self.get_kpi_status(metric.metric_name, metric.value, metric.change_percentage)
}
return {
"period_type": period_type.value,
"start_time": start_time.isoformat(),
"end_time": end_time.isoformat(),
"kpis": kpis,
"overall_health": self.calculate_overall_health(kpis)
}
except Exception as e:
logger.error(f"Error getting KPIs: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
# Helper methods
async def generate_market_overview_report(
session: Session,
period_type: AnalyticsPeriod,
start_date: datetime,
end_date: datetime,
filters: Dict[str, Any]
) -> Dict[str, Any]:
"""Generate market overview report content"""
# Get metrics for the period
metrics = session.exec(
select(MarketMetric).where(
and_(
MarketMetric.period_type == period_type,
MarketMetric.period_start >= start_date,
MarketMetric.period_end <= end_date
)
).order_by(MarketMetric.recorded_at.desc())
).all()
# Get insights for the period
insights = session.exec(
select(MarketInsight).where(
and_(
MarketInsight.created_at >= start_date,
MarketInsight.created_at <= end_date
)
).order_by(MarketInsight.created_at.desc())
).all()
return {
"summary": f"Market overview for {period_type.value} period from {start_date.date()} to {end_date.date()}",
"key_findings": [
f"Total transaction volume: {next((m.value for m in metrics if m.metric_name == 'transaction_volume'), 0):.2f} AITBC",
f"Active agents: {next((int(m.value) for m in metrics if m.metric_name == 'active_agents'), 0)}",
f"Average success rate: {next((m.value for m in metrics if m.metric_name == 'success_rate'), 0):.1f}%",
f"Total insights generated: {len(insights)}"
],
"recommendations": [
"Monitor transaction volume trends for growth opportunities",
"Focus on improving agent success rates",
"Analyze geographic distribution for market expansion"
],
"data_sections": [
{
"title": "Transaction Metrics",
"data": {
metric.metric_name: metric.value
for metric in metrics
if metric.category == "financial"
}
},
{
"title": "Agent Metrics",
"data": {
metric.metric_name: metric.value
for metric in metrics
if metric.category == "agents"
}
}
],
"charts": [
{
"type": "line",
"title": "Transaction Volume Trend",
"data": [m.value for m in metrics if m.metric_name == "transaction_volume"]
},
{
"type": "pie",
"title": "Agent Distribution by Tier",
"data": next((m.breakdown.get("by_tier", {}) for m in metrics if m.metric_name == "active_agents"), {})
}
]
}
async def generate_agent_performance_report(
session: Session,
period_type: AnalyticsPeriod,
start_date: datetime,
end_date: datetime,
filters: Dict[str, Any]
) -> Dict[str, Any]:
"""Generate agent performance report content"""
# Mock implementation - would query actual agent performance data
return {
"summary": f"Agent performance report for {period_type.value} period",
"key_findings": [
"Top performing agents show 20% higher success rates",
"Agent retention rate improved by 5%",
"Average agent earnings increased by 10%"
],
"recommendations": [
"Provide additional training for lower-performing agents",
"Implement recognition programs for top performers",
"Optimize agent matching algorithms"
],
"data_sections": [
{
"title": "Performance Metrics",
"data": {
"top_performers": 25,
"average_success_rate": 87.5,
"retention_rate": 92.0
}
}
]
}
async def generate_economic_analysis_report(
session: Session,
period_type: AnalyticsPeriod,
start_date: datetime,
end_date: datetime,
filters: Dict[str, Any]
) -> Dict[str, Any]:
"""Generate economic analysis report content"""
# Mock implementation - would query actual economic data
return {
"summary": f"Economic analysis for {period_type.value} period",
"key_findings": [
"Market showed 15% growth in transaction volume",
"Price stability maintained across all regions",
"Supply/demand balance improved by 10%"
],
"recommendations": [
"Continue current pricing strategies",
"Focus on market expansion in high-growth regions",
"Monitor supply/demand ratios for optimization"
],
"data_sections": [
{
"title": "Economic Indicators",
"data": {
"market_growth": 15.0,
"price_stability": 95.0,
"supply_demand_balance": 1.1
}
}
]
}
def get_kpi_status(metric_name: str, value: float, change_percentage: Optional[float]) -> str:
"""Get KPI status based on value and change"""
if metric_name == "success_rate":
if value >= 90:
return "excellent"
elif value >= 80:
return "good"
elif value >= 70:
return "fair"
else:
return "poor"
elif metric_name == "transaction_volume":
if change_percentage and change_percentage > 10:
return "excellent"
elif change_percentage and change_percentage > 0:
return "good"
elif change_percentage and change_percentage < -10:
return "poor"
else:
return "fair"
else:
return "good"
def calculate_overall_health(kpis: Dict[str, Any]) -> str:
"""Calculate overall market health"""
if not kpis:
return "unknown"
# Count KPIs by status
status_counts = {}
for kpi_data in kpis.values():
status = kpi_data.get("status", "fair")
status_counts[status] = status_counts.get(status, 0) + 1
total_kpis = len(kpis)
# Determine overall health
if status_counts.get("excellent", 0) >= total_kpis * 0.6:
return "excellent"
elif status_counts.get("excellent", 0) + status_counts.get("good", 0) >= total_kpis * 0.7:
return "good"
elif status_counts.get("poor", 0) >= total_kpis * 0.3:
return "poor"
else:
return "fair"
def convert_to_csv(data: Dict[str, Any]) -> str:
"""Convert report data to CSV format (simplified)"""
csv_lines = []
# Add header
csv_lines.append("Metric,Value,Unit,Change,Trend,Status")
# Add KPI data if available
if "kpis" in data:
for metric_name, kpi_data in data["kpis"].items():
csv_lines.append(
f"{metric_name},{kpi_data.get('value', '')},{kpi_data.get('unit', '')},"
f"{kpi_data.get('change_percentage', '')}%,{kpi_data.get('trend', '')},"
f"{kpi_data.get('status', '')}"
)
return "\n".join(csv_lines)

View File

@@ -0,0 +1,843 @@
"""
Certification and Partnership API Endpoints
REST API for agent certification, partnership programs, and badge system
"""
from datetime import datetime, timedelta
from typing import Dict, List, Optional, Any
from fastapi import APIRouter, HTTPException, Depends, Query
from pydantic import BaseModel, Field
import logging
from ..storage import SessionDep
from ..services.certification_service import (
CertificationAndPartnershipService, CertificationSystem, PartnershipManager, BadgeSystem
)
from ..domain.certification import (
AgentCertification, CertificationRequirement, VerificationRecord,
PartnershipProgram, AgentPartnership, AchievementBadge, AgentBadge,
CertificationLevel, CertificationStatus, VerificationType,
PartnershipType, BadgeType
)
logger = logging.getLogger(__name__)
router = APIRouter(prefix="/v1/certification", tags=["certification"])
# Pydantic models for API requests/responses
class CertificationRequest(BaseModel):
"""Request model for agent certification"""
agent_id: str
level: CertificationLevel
certification_type: str = Field(default="standard", description="Certification type")
issued_by: str = Field(description="Who is issuing the certification")
class CertificationResponse(BaseModel):
"""Response model for agent certification"""
certification_id: str
agent_id: str
certification_level: str
certification_type: str
status: str
issued_by: str
issued_at: str
expires_at: Optional[str]
verification_hash: str
requirements_met: List[str]
granted_privileges: List[str]
access_levels: List[str]
class PartnershipApplicationRequest(BaseModel):
"""Request model for partnership application"""
agent_id: str
program_id: str
application_data: Dict[str, Any] = Field(default_factory=dict, description="Application data")
class PartnershipResponse(BaseModel):
"""Response model for partnership"""
partnership_id: str
agent_id: str
program_id: str
partnership_type: str
current_tier: str
status: str
applied_at: str
approved_at: Optional[str]
performance_score: float
total_earnings: float
earned_benefits: List[str]
class BadgeCreationRequest(BaseModel):
"""Request model for badge creation"""
badge_name: str
badge_type: BadgeType
description: str
criteria: Dict[str, Any] = Field(description="Badge criteria and thresholds")
created_by: str
class BadgeAwardRequest(BaseModel):
"""Request model for badge award"""
agent_id: str
badge_id: str
awarded_by: str
award_reason: str = Field(default="", description="Reason for awarding badge")
context: Dict[str, Any] = Field(default_factory=dict, description="Award context")
class BadgeResponse(BaseModel):
"""Response model for badge"""
badge_id: str
badge_name: str
badge_type: str
description: str
rarity: str
point_value: int
category: str
awarded_at: str
is_featured: bool
badge_icon: str
class AgentCertificationSummary(BaseModel):
"""Response model for agent certification summary"""
agent_id: str
certifications: Dict[str, Any]
partnerships: Dict[str, Any]
badges: Dict[str, Any]
verifications: Dict[str, Any]
# API Endpoints
@router.post("/certify", response_model=CertificationResponse)
async def certify_agent(
certification_request: CertificationRequest,
session: SessionDep
) -> CertificationResponse:
"""Certify an agent at a specific level"""
certification_service = CertificationAndPartnershipService(session)
try:
success, certification, errors = await certification_service.certification_system.certify_agent(
session=session,
agent_id=certification_request.agent_id,
level=certification_request.level,
issued_by=certification_request.issued_by,
certification_type=certification_request.certification_type
)
if not success:
raise HTTPException(status_code=400, detail=f"Certification failed: {'; '.join(errors)}")
return CertificationResponse(
certification_id=certification.certification_id,
agent_id=certification.agent_id,
certification_level=certification.certification_level.value,
certification_type=certification.certification_type,
status=certification.status.value,
issued_by=certification.issued_by,
issued_at=certification.issued_at.isoformat(),
expires_at=certification.expires_at.isoformat() if certification.expires_at else None,
verification_hash=certification.verification_hash,
requirements_met=certification.requirements_met,
granted_privileges=certification.granted_privileges,
access_levels=certification.access_levels
)
except HTTPException:
raise
except Exception as e:
logger.error(f"Error certifying agent: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.post("/certifications/{certification_id}/renew")
async def renew_certification(
certification_id: str,
renewed_by: str,
session: SessionDep
) -> Dict[str, Any]:
"""Renew an existing certification"""
certification_service = CertificationAndPartnershipService(session)
try:
success, message = await certification_service.certification_system.renew_certification(
session=session,
certification_id=certification_id,
renewed_by=renewed_by
)
if not success:
raise HTTPException(status_code=400, detail=message)
return {
"success": True,
"message": message,
"certification_id": certification_id
}
except HTTPException:
raise
except Exception as e:
logger.error(f"Error renewing certification: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.get("/certifications/{agent_id}")
async def get_agent_certifications(
agent_id: str,
status: Optional[str] = Query(default=None, description="Filter by status"),
session: SessionDep
) -> List[CertificationResponse]:
"""Get certifications for an agent"""
try:
query = select(AgentCertification).where(AgentCertification.agent_id == agent_id)
if status:
query = query.where(AgentCertification.status == CertificationStatus(status))
certifications = session.exec(
query.order_by(AgentCertification.issued_at.desc())
).all()
return [
CertificationResponse(
certification_id=cert.certification_id,
agent_id=cert.agent_id,
certification_level=cert.certification_level.value,
certification_type=cert.certification_type,
status=cert.status.value,
issued_by=cert.issued_by,
issued_at=cert.issued_at.isoformat(),
expires_at=cert.expires_at.isoformat() if cert.expires_at else None,
verification_hash=cert.verification_hash,
requirements_met=cert.requirements_met,
granted_privileges=cert.granted_privileges,
access_levels=cert.access_levels
)
for cert in certifications
]
except Exception as e:
logger.error(f"Error getting certifications for agent {agent_id}: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.post("/partnerships/programs")
async def create_partnership_program(
program_name: str,
program_type: PartnershipType,
description: str,
created_by: str,
tier_levels: List[str] = Field(default_factory=lambda: ["basic", "premium"]),
max_participants: Optional[int] = Field(default=None, description="Maximum participants"),
launch_immediately: bool = Field(default=False, description="Launch program immediately"),
session: SessionDep
) -> Dict[str, Any]:
"""Create a new partnership program"""
partnership_manager = PartnershipManager()
try:
program = await partnership_manager.create_partnership_program(
session=session,
program_name=program_name,
program_type=program_type,
description=description,
created_by=created_by,
tier_levels=tier_levels,
max_participants=max_participants,
launch_immediately=launch_immediately
)
return {
"program_id": program.program_id,
"program_name": program.program_name,
"program_type": program.program_type.value,
"status": program.status,
"tier_levels": program.tier_levels,
"max_participants": program.max_participants,
"current_participants": program.current_participants,
"created_at": program.created_at.isoformat(),
"launched_at": program.launched_at.isoformat() if program.launched_at else None
}
except Exception as e:
logger.error(f"Error creating partnership program: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.post("/partnerships/apply", response_model=PartnershipResponse)
async def apply_for_partnership(
application: PartnershipApplicationRequest,
session: SessionDep
) -> PartnershipResponse:
"""Apply for a partnership program"""
partnership_manager = PartnershipManager()
try:
success, partnership, errors = await partnership_manager.apply_for_partnership(
session=session,
agent_id=application.agent_id,
program_id=application.program_id,
application_data=application.application_data
)
if not success:
raise HTTPException(status_code=400, detail=f"Application failed: {'; '.join(errors)}")
return PartnershipResponse(
partnership_id=partnership.partnership_id,
agent_id=partnership.agent_id,
program_id=partnership.program_id,
partnership_type=partnership.partnership_type.value,
current_tier=partnership.current_tier,
status=partnership.status,
applied_at=partnership.applied_at.isoformat(),
approved_at=partnership.approved_at.isoformat() if partnership.approved_at else None,
performance_score=partnership.performance_score,
total_earnings=partnership.total_earnings,
earned_benefits=partnership.earned_benefits
)
except HTTPException:
raise
except Exception as e:
logger.error(f"Error applying for partnership: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.get("/partnerships/{agent_id}")
async def get_agent_partnerships(
agent_id: str,
status: Optional[str] = Query(default=None, description="Filter by status"),
partnership_type: Optional[str] = Query(default=None, description="Filter by partnership type"),
session: SessionDep
) -> List[PartnershipResponse]:
"""Get partnerships for an agent"""
try:
query = select(AgentPartnership).where(AgentPartnership.agent_id == agent_id)
if status:
query = query.where(AgentPartnership.status == status)
if partnership_type:
query = query.where(AgentPartnership.partnership_type == PartnershipType(partnership_type))
partnerships = session.exec(
query.order_by(AgentPartnership.applied_at.desc())
).all()
return [
PartnershipResponse(
partnership_id=partner.partnership_id,
agent_id=partner.agent_id,
program_id=partner.program_id,
partnership_type=partner.partnership_type.value,
current_tier=partner.current_tier,
status=partner.status,
applied_at=partner.applied_at.isoformat(),
approved_at=partner.approved_at.isoformat() if partner.approved_at else None,
performance_score=partner.performance_score,
total_earnings=partner.total_earnings,
earned_benefits=partner.earned_benefits
)
for partner in partnerships
]
except Exception as e:
logger.error(f"Error getting partnerships for agent {agent_id}: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.get("/partnerships/programs")
async def list_partnership_programs(
partnership_type: Optional[str] = Query(default=None, description="Filter by partnership type"),
status: Optional[str] = Query(default="active", description="Filter by status"),
limit: int = Query(default=50, ge=1, le=100, description="Number of results"),
session: SessionDep
) -> List[Dict[str, Any]]:
"""List available partnership programs"""
try:
query = select(PartnershipProgram)
if partnership_type:
query = query.where(PartnershipProgram.program_type == PartnershipType(partnership_type))
if status:
query = query.where(PartnershipProgram.status == status)
programs = session.exec(
query.order_by(PartnershipProgram.created_at.desc()).limit(limit)
).all()
return [
{
"program_id": program.program_id,
"program_name": program.program_name,
"program_type": program.program_type.value,
"description": program.description,
"status": program.status,
"tier_levels": program.tier_levels,
"max_participants": program.max_participants,
"current_participants": program.current_participants,
"created_at": program.created_at.isoformat(),
"launched_at": program.launched_at.isoformat() if program.launched_at else None,
"expires_at": program.expires_at.isoformat() if program.expires_at else None
}
for program in programs
]
except Exception as e:
logger.error(f"Error listing partnership programs: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.post("/badges")
async def create_badge(
badge_request: BadgeCreationRequest,
session: SessionDep
) -> Dict[str, Any]:
"""Create a new achievement badge"""
badge_system = BadgeSystem()
try:
badge = await badge_system.create_badge(
session=session,
badge_name=badge_request.badge_name,
badge_type=badge_request.badge_type,
description=badge_request.description,
criteria=badge_request.criteria,
created_by=badge_request.created_by
)
return {
"badge_id": badge.badge_id,
"badge_name": badge.badge_name,
"badge_type": badge.badge_type.value,
"description": badge.description,
"rarity": badge.rarity,
"point_value": badge.point_value,
"category": badge.category,
"is_active": badge.is_active,
"created_at": badge.created_at.isoformat(),
"available_from": badge.available_from.isoformat(),
"available_until": badge.available_until.isoformat() if badge.available_until else None
}
except Exception as e:
logger.error(f"Error creating badge: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.post("/badges/award", response_model=BadgeResponse)
async def award_badge(
badge_request: BadgeAwardRequest,
session: SessionDep
) -> BadgeResponse:
"""Award a badge to an agent"""
badge_system = BadgeSystem()
try:
success, agent_badge, message = await badge_system.award_badge(
session=session,
agent_id=badge_request.agent_id,
badge_id=badge_request.badge_id,
awarded_by=badge_request.awarded_by,
award_reason=badge_request.award_reason,
context=badge_request.context
)
if not success:
raise HTTPException(status_code=400, detail=message)
# Get badge details
badge = session.exec(
select(AchievementBadge).where(AchievementBadge.badge_id == badge_request.badge_id)
).first()
return BadgeResponse(
badge_id=badge.badge_id,
badge_name=badge.badge_name,
badge_type=badge.badge_type.value,
description=badge.description,
rarity=badge.rarity,
point_value=badge.point_value,
category=badge.category,
awarded_at=agent_badge.awarded_at.isoformat(),
is_featured=agent_badge.is_featured,
badge_icon=badge.badge_icon
)
except HTTPException:
raise
except Exception as e:
logger.error(f"Error awarding badge: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.get("/badges/{agent_id}")
async def get_agent_badges(
agent_id: str,
badge_type: Optional[str] = Query(default=None, description="Filter by badge type"),
category: Optional[str] = Query(default=None, description="Filter by category"),
featured_only: bool = Query(default=False, description="Only featured badges"),
limit: int = Query(default=50, ge=1, le=100, description="Number of results"),
session: SessionDep
) -> List[BadgeResponse]:
"""Get badges for an agent"""
try:
query = select(AgentBadge).where(AgentBadge.agent_id == agent_id)
if badge_type:
query = query.join(AchievementBadge).where(AchievementBadge.badge_type == BadgeType(badge_type))
if category:
query = query.join(AchievementBadge).where(AchievementBadge.category == category)
if featured_only:
query = query.where(AgentBadge.is_featured == True)
agent_badges = session.exec(
query.order_by(AgentBadge.awarded_at.desc()).limit(limit)
).all()
# Get badge details
badge_ids = [ab.badge_id for ab in agent_badges]
badges = session.exec(
select(AchievementBadge).where(AchievementBadge.badge_id.in_(badge_ids))
).all()
badge_map = {badge.badge_id: badge for badge in badges}
return [
BadgeResponse(
badge_id=ab.badge_id,
badge_name=badge_map[ab.badge_id].badge_name,
badge_type=badge_map[ab.badge_id].badge_type.value,
description=badge_map[ab.badge_id].description,
rarity=badge_map[ab.badge_id].rarity,
point_value=badge_map[ab.badge_id].point_value,
category=badge_map[ab.badge_id].category,
awarded_at=ab.awarded_at.isoformat(),
is_featured=ab.is_featured,
badge_icon=badge_map[ab.badge_id].badge_icon
)
for ab in agent_badges if ab.badge_id in badge_map
]
except Exception as e:
logger.error(f"Error getting badges for agent {agent_id}: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.get("/badges")
async def list_available_badges(
badge_type: Optional[str] = Query(default=None, description="Filter by badge type"),
category: Optional[str] = Query(default=None, description="Filter by category"),
rarity: Optional[str] = Query(default=None, description="Filter by rarity"),
active_only: bool = Query(default=True, description="Only active badges"),
limit: int = Query(default=50, ge=1, le=100, description="Number of results"),
session: SessionDep
) -> List[Dict[str, Any]]:
"""List available badges"""
try:
query = select(AchievementBadge)
if badge_type:
query = query.where(AchievementBadge.badge_type == BadgeType(badge_type))
if category:
query = query.where(AchievementBadge.category == category)
if rarity:
query = query.where(AchievementBadge.rarity == rarity)
if active_only:
query = query.where(AchievementBadge.is_active == True)
badges = session.exec(
query.order_by(AchievementBadge.created_at.desc()).limit(limit)
).all()
return [
{
"badge_id": badge.badge_id,
"badge_name": badge.badge_name,
"badge_type": badge.badge_type.value,
"description": badge.description,
"rarity": badge.rarity,
"point_value": badge.point_value,
"category": badge.category,
"is_active": badge.is_active,
"is_limited": badge.is_limited,
"max_awards": badge.max_awards,
"current_awards": badge.current_awards,
"created_at": badge.created_at.isoformat(),
"available_from": badge.available_from.isoformat(),
"available_until": badge.available_until.isoformat() if badge.available_until else None
}
for badge in badges
]
except Exception as e:
logger.error(f"Error listing available badges: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.post("/badges/{agent_id}/check-automatic")
async def check_automatic_badges(
agent_id: str,
session: SessionDep
) -> Dict[str, Any]:
"""Check and award automatic badges for an agent"""
badge_system = BadgeSystem()
try:
awarded_badges = await badge_system.check_and_award_automatic_badges(session, agent_id)
return {
"agent_id": agent_id,
"badges_awarded": awarded_badges,
"total_awarded": len(awarded_badges),
"checked_at": datetime.utcnow().isoformat()
}
except Exception as e:
logger.error(f"Error checking automatic badges for agent {agent_id}: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.get("/summary/{agent_id}", response_model=AgentCertificationSummary)
async def get_agent_summary(
agent_id: str,
session: SessionDep
) -> AgentCertificationSummary:
"""Get comprehensive certification and partnership summary for an agent"""
certification_service = CertificationAndPartnershipService(session)
try:
summary = await certification_service.get_agent_certification_summary(agent_id)
return AgentCertificationSummary(**summary)
except Exception as e:
logger.error(f"Error getting certification summary for agent {agent_id}: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.get("/verification/{agent_id}")
async def get_verification_records(
agent_id: str,
verification_type: Optional[str] = Query(default=None, description="Filter by verification type"),
status: Optional[str] = Query(default=None, description="Filter by status"),
limit: int = Query(default=20, ge=1, le=100, description="Number of results"),
session: SessionDep
) -> List[Dict[str, Any]]:
"""Get verification records for an agent"""
try:
query = select(VerificationRecord).where(VerificationRecord.agent_id == agent_id)
if verification_type:
query = query.where(VerificationRecord.verification_type == VerificationType(verification_type))
if status:
query = query.where(VerificationRecord.status == status)
verifications = session.exec(
query.order_by(VerificationRecord.requested_at.desc()).limit(limit)
).all()
return [
{
"verification_id": verification.verification_id,
"verification_type": verification.verification_type.value,
"verification_method": verification.verification_method,
"status": verification.status,
"requested_by": verification.requested_by,
"requested_at": verification.requested_at.isoformat(),
"started_at": verification.started_at.isoformat() if verification.started_at else None,
"completed_at": verification.completed_at.isoformat() if verification.completed_at else None,
"result_score": verification.result_score,
"failure_reasons": verification.failure_reasons,
"processing_time": verification.processing_time
}
for verification in verifications
]
except Exception as e:
logger.error(f"Error getting verification records for agent {agent_id}: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.get("/levels")
async def get_certification_levels(
session: SessionDep
) -> List[Dict[str, Any]]:
"""Get available certification levels and requirements"""
try:
certification_system = CertificationSystem()
levels = []
for level, config in certification_system.certification_levels.items():
levels.append({
"level": level.value,
"requirements": config['requirements'],
"privileges": config['privileges'],
"validity_days": config['validity_days'],
"renewal_requirements": config['renewal_requirements']
})
return sorted(levels, key=lambda x: ['basic', 'intermediate', 'advanced', 'enterprise', 'premium'].index(x['level']))
except Exception as e:
logger.error(f"Error getting certification levels: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.get("/requirements")
async def get_certification_requirements(
level: Optional[str] = Query(default=None, description="Filter by certification level"),
verification_type: Optional[str] = Query(default=None, description="Filter by verification type"),
session: SessionDep
) -> List[Dict[str, Any]]:
"""Get certification requirements"""
try:
query = select(CertificationRequirement)
if level:
query = query.where(CertificationRequirement.certification_level == CertificationLevel(level))
if verification_type:
query = query.where(CertificationRequirement.verification_type == VerificationType(verification_type))
requirements = session.exec(
query.order_by(CertificationRequirement.certification_level, CertificationRequirement.requirement_name)
).all()
return [
{
"id": requirement.id,
"certification_level": requirement.certification_level.value,
"verification_type": requirement.verification_type.value,
"requirement_name": requirement.requirement_name,
"description": requirement.description,
"criteria": requirement.criteria,
"minimum_threshold": requirement.minimum_threshold,
"maximum_threshold": requirement.maximum_threshold,
"required_values": requirement.required_values,
"verification_method": requirement.verification_method,
"is_mandatory": requirement.is_mandatory,
"weight": requirement.weight,
"is_active": requirement.is_active
}
for requirement in requirements
]
except Exception as e:
logger.error(f"Error getting certification requirements: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.get("/leaderboard")
async def get_certification_leaderboard(
category: str = Query(default="highest_level", description="Leaderboard category"),
limit: int = Query(default=50, ge=1, le=100, description="Number of results"),
session: SessionDep
) -> List[Dict[str, Any]]:
"""Get certification leaderboard"""
try:
if category == "highest_level":
# Get agents with highest certification levels
query = select(AgentCertification).where(
AgentCertification.status == CertificationStatus.ACTIVE
)
elif category == "most_certifications":
# Get agents with most certifications
query = select(AgentCertification).where(
AgentCertification.status == CertificationStatus.ACTIVE
)
else:
query = select(AgentCertification).where(
AgentCertification.status == CertificationStatus.ACTIVE
)
certifications = session.exec(
query.order_by(AgentCertification.issued_at.desc()).limit(limit * 2) # Get more to account for duplicates
).all()
# Group by agent and calculate scores
agent_scores = {}
for cert in certifications:
if cert.agent_id not in agent_scores:
agent_scores[cert.agent_id] = {
'agent_id': cert.agent_id,
'highest_level': cert.certification_level.value,
'certification_count': 0,
'total_privileges': 0,
'latest_certification': cert.issued_at
}
agent_scores[cert.agent_id]['certification_count'] += 1
agent_scores[cert.agent_id]['total_privileges'] += len(cert.granted_privileges)
# Update highest level if current is higher
level_order = ['basic', 'intermediate', 'advanced', 'enterprise', 'premium']
current_level_index = level_order.index(agent_scores[cert.agent_id]['highest_level'])
new_level_index = level_order.index(cert.certification_level.value)
if new_level_index > current_level_index:
agent_scores[cert.agent_id]['highest_level'] = cert.certification_level.value
# Update latest certification
if cert.issued_at > agent_scores[cert.agent_id]['latest_certification']:
agent_scores[cert.agent_id]['latest_certification'] = cert.issued_at
# Sort based on category
if category == "highest_level":
sorted_agents = sorted(
agent_scores.values(),
key=lambda x: ['basic', 'intermediate', 'advanced', 'enterprise', 'premium'].index(x['highest_level']),
reverse=True
)
elif category == "most_certifications":
sorted_agents = sorted(
agent_scores.values(),
key=lambda x: x['certification_count'],
reverse=True
)
else:
sorted_agents = sorted(
agent_scores.values(),
key=lambda x: x['total_privileges'],
reverse=True
)
return [
{
'rank': rank + 1,
'agent_id': agent['agent_id'],
'highest_level': agent['highest_level'],
'certification_count': agent['certification_count'],
'total_privileges': agent['total_privileges'],
'latest_certification': agent['latest_certification'].isoformat()
}
for rank, agent in enumerate(sorted_agents[:limit])
]
except Exception as e:
logger.error(f"Error getting certification leaderboard: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")

View File

@@ -0,0 +1,225 @@
"""
Community and Developer Ecosystem API Endpoints
REST API for managing OpenClaw developer profiles, SDKs, solutions, and hackathons
"""
from datetime import datetime
from typing import Dict, List, Optional, Any
from fastapi import APIRouter, HTTPException, Depends, Query, Body
from pydantic import BaseModel, Field
import logging
from ..storage import SessionDep
from ..services.community_service import (
DeveloperEcosystemService, ThirdPartySolutionService,
InnovationLabService, CommunityPlatformService
)
from ..domain.community import (
DeveloperProfile, AgentSolution, InnovationLab,
CommunityPost, Hackathon, DeveloperTier, SolutionStatus, LabStatus
)
logger = logging.getLogger(__name__)
router = APIRouter(prefix="/community", tags=["community"])
# Models
class DeveloperProfileCreate(BaseModel):
user_id: str
username: str
bio: Optional[str] = None
skills: List[str] = Field(default_factory=list)
class SolutionPublishRequest(BaseModel):
developer_id: str
title: str
description: str
version: str = "1.0.0"
capabilities: List[str] = Field(default_factory=list)
frameworks: List[str] = Field(default_factory=list)
price_model: str = "free"
price_amount: float = 0.0
metadata: Dict[str, Any] = Field(default_factory=dict)
class LabProposalRequest(BaseModel):
title: str
description: str
research_area: str
funding_goal: float = 0.0
milestones: List[Dict[str, Any]] = Field(default_factory=list)
class PostCreateRequest(BaseModel):
title: str
content: str
category: str = "discussion"
tags: List[str] = Field(default_factory=list)
parent_post_id: Optional[str] = None
class HackathonCreateRequest(BaseModel):
title: str
description: str
theme: str
sponsor: str = "AITBC Foundation"
prize_pool: float = 0.0
registration_start: str
registration_end: str
event_start: str
event_end: str
# Endpoints - Developer Ecosystem
@router.post("/developers", response_model=DeveloperProfile)
async def create_developer_profile(request: DeveloperProfileCreate, session: SessionDep):
"""Register a new developer in the OpenClaw ecosystem"""
service = DeveloperEcosystemService(session)
try:
profile = await service.create_developer_profile(
user_id=request.user_id,
username=request.username,
bio=request.bio,
skills=request.skills
)
return profile
except Exception as e:
logger.error(f"Error creating developer profile: {e}")
raise HTTPException(status_code=500, detail=str(e))
@router.get("/developers/{developer_id}", response_model=DeveloperProfile)
async def get_developer_profile(developer_id: str, session: SessionDep):
"""Get a developer's profile and reputation"""
service = DeveloperEcosystemService(session)
profile = await service.get_developer_profile(developer_id)
if not profile:
raise HTTPException(status_code=404, detail="Developer not found")
return profile
@router.get("/sdk/latest")
async def get_latest_sdk(session: SessionDep):
"""Get information about the latest OpenClaw SDK releases"""
service = DeveloperEcosystemService(session)
return await service.get_sdk_release_info()
# Endpoints - Marketplace Solutions
@router.post("/solutions/publish", response_model=AgentSolution)
async def publish_solution(request: SolutionPublishRequest, session: SessionDep):
"""Publish a new third-party agent solution to the marketplace"""
service = ThirdPartySolutionService(session)
try:
solution = await service.publish_solution(request.developer_id, request.dict(exclude={'developer_id'}))
return solution
except Exception as e:
logger.error(f"Error publishing solution: {e}")
raise HTTPException(status_code=500, detail=str(e))
@router.get("/solutions", response_model=List[AgentSolution])
async def list_solutions(
category: Optional[str] = None,
limit: int = 50,
):
"""List available third-party agent solutions"""
service = ThirdPartySolutionService(session)
return await service.list_published_solutions(category, limit)
@router.post("/solutions/{solution_id}/purchase")
async def purchase_solution(solution_id: str, session: SessionDep, buyer_id: str = Body(embed=True)):
"""Purchase or install a third-party solution"""
service = ThirdPartySolutionService(session)
try:
result = await service.purchase_solution(buyer_id, solution_id)
return result
except ValueError as e:
raise HTTPException(status_code=404, detail=str(e))
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
# Endpoints - Innovation Labs
@router.post("/labs/propose", response_model=InnovationLab)
async def propose_innovation_lab(
researcher_id: str = Query(...),
request: LabProposalRequest = Body(...),
):
"""Propose a new agent innovation lab or research program"""
service = InnovationLabService(session)
try:
lab = await service.propose_lab(researcher_id, request.dict())
return lab
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@router.post("/labs/{lab_id}/join")
async def join_innovation_lab(lab_id: str, session: SessionDep, developer_id: str = Body(embed=True)):
"""Join an active innovation lab"""
service = InnovationLabService(session)
try:
lab = await service.join_lab(lab_id, developer_id)
return lab
except ValueError as e:
raise HTTPException(status_code=404, detail=str(e))
@router.post("/labs/{lab_id}/fund")
async def fund_innovation_lab(lab_id: str, session: SessionDep, amount: float = Body(embed=True)):
"""Provide funding to a proposed innovation lab"""
service = InnovationLabService(session)
try:
lab = await service.fund_lab(lab_id, amount)
return lab
except ValueError as e:
raise HTTPException(status_code=404, detail=str(e))
# Endpoints - Community Platform
@router.post("/platform/posts", response_model=CommunityPost)
async def create_community_post(
author_id: str = Query(...),
request: PostCreateRequest = Body(...),
):
"""Create a new post in the community forum"""
service = CommunityPlatformService(session)
try:
post = await service.create_post(author_id, request.dict())
return post
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@router.get("/platform/feed", response_model=List[CommunityPost])
async def get_community_feed(
category: Optional[str] = None,
limit: int = 20,
):
"""Get the latest community posts and discussions"""
service = CommunityPlatformService(session)
return await service.get_feed(category, limit)
@router.post("/platform/posts/{post_id}/upvote")
async def upvote_community_post(post_id: str, session: SessionDep):
"""Upvote a community post (rewards author reputation)"""
service = CommunityPlatformService(session)
try:
post = await service.upvote_post(post_id)
return post
except ValueError as e:
raise HTTPException(status_code=404, detail=str(e))
# Endpoints - Hackathons
@router.post("/hackathons/create", response_model=Hackathon)
async def create_hackathon(
organizer_id: str = Query(...),
request: HackathonCreateRequest = Body(...),
):
"""Create a new agent innovation hackathon (requires high reputation)"""
service = CommunityPlatformService(session)
try:
hackathon = await service.create_hackathon(organizer_id, request.dict())
return hackathon
except ValueError as e:
raise HTTPException(status_code=403, detail=str(e))
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@router.post("/hackathons/{hackathon_id}/register")
async def register_for_hackathon(hackathon_id: str, session: SessionDep, developer_id: str = Body(embed=True)):
"""Register for an upcoming or ongoing hackathon"""
service = CommunityPlatformService(session)
try:
hackathon = await service.register_for_hackathon(hackathon_id, developer_id)
return hackathon
except ValueError as e:
raise HTTPException(status_code=400, detail=str(e))

View File

@@ -1,384 +1,147 @@
"""
Governance Router - Proposal voting and parameter changes
Decentralized Governance API Endpoints
REST API for OpenClaw DAO voting, proposals, and governance analytics
"""
from fastapi import APIRouter, Depends, HTTPException, BackgroundTasks
from datetime import datetime
from typing import Dict, List, Optional, Any
from fastapi import APIRouter, HTTPException, Depends, Query, Body
from pydantic import BaseModel, Field
from typing import Optional, Dict, Any, List
from datetime import datetime, timedelta
import json
import logging
from ..storage import SessionDep
from ..services.governance_service import GovernanceService
from ..domain.governance import (
GovernanceProfile, Proposal, Vote, DaoTreasury, TransparencyReport,
ProposalStatus, VoteType, GovernanceRole
)
logger = logging.getLogger(__name__)
from ..schemas import UserProfile
from ..storage import SessionDep
from ..storage.models_governance import GovernanceProposal, ProposalVote
from sqlmodel import select, func
router = APIRouter(prefix="/governance", tags=["governance"])
router = APIRouter(tags=["governance"])
# Models
class ProfileInitRequest(BaseModel):
user_id: str
initial_voting_power: float = 0.0
class DelegationRequest(BaseModel):
delegatee_id: str
class ProposalCreate(BaseModel):
"""Create a new governance proposal"""
title: str = Field(..., min_length=10, max_length=200)
description: str = Field(..., min_length=50, max_length=5000)
type: str = Field(..., pattern="^(parameter_change|protocol_upgrade|fund_allocation|policy_change)$")
target: Optional[Dict[str, Any]] = Field(default_factory=dict)
voting_period: int = Field(default=7, ge=1, le=30) # days
quorum_threshold: float = Field(default=0.1, ge=0.01, le=1.0) # 10% default
approval_threshold: float = Field(default=0.5, ge=0.01, le=1.0) # 50% default
class ProposalResponse(BaseModel):
"""Governance proposal response"""
id: str
class ProposalCreateRequest(BaseModel):
title: str
description: str
type: str
target: Dict[str, Any]
proposer: str
status: str
created_at: datetime
voting_deadline: datetime
quorum_threshold: float
approval_threshold: float
current_quorum: float
current_approval: float
votes_for: int
votes_against: int
votes_abstain: int
total_voting_power: int
category: str = "general"
execution_payload: Dict[str, Any] = Field(default_factory=dict)
quorum_required: float = 1000.0
voting_starts: Optional[str] = None
voting_ends: Optional[str] = None
class VoteRequest(BaseModel):
vote_type: VoteType
reason: Optional[str] = None
class VoteSubmit(BaseModel):
"""Submit a vote on a proposal"""
proposal_id: str
vote: str = Field(..., pattern="^(for|against|abstain)$")
reason: Optional[str] = Field(max_length=500)
# Endpoints - Profile & Delegation
@router.post("/profiles", response_model=GovernanceProfile)
async def init_governance_profile(request: ProfileInitRequest, session: SessionDep):
"""Initialize a governance profile for a user"""
service = GovernanceService(session)
try:
profile = await service.get_or_create_profile(request.user_id, request.initial_voting_power)
return profile
except Exception as e:
logger.error(f"Error creating governance profile: {e}")
raise HTTPException(status_code=500, detail=str(e))
@router.post("/profiles/{profile_id}/delegate", response_model=GovernanceProfile)
async def delegate_voting_power(profile_id: str, request: DelegationRequest, session: SessionDep):
"""Delegate your voting power to another DAO member"""
service = GovernanceService(session)
try:
profile = await service.delegate_votes(profile_id, request.delegatee_id)
return profile
except ValueError as e:
raise HTTPException(status_code=404, detail=str(e))
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@router.post("/governance/proposals", response_model=ProposalResponse)
# Endpoints - Proposals
@router.post("/proposals", response_model=Proposal)
async def create_proposal(
proposal: ProposalCreate,
user: UserProfile,
session: SessionDep
) -> ProposalResponse:
"""Create a new governance proposal"""
# Check if user has voting power
voting_power = await get_user_voting_power(user.user_id, session)
if voting_power == 0:
raise HTTPException(403, "You must have voting power to create proposals")
# Create proposal
db_proposal = GovernanceProposal(
title=proposal.title,
description=proposal.description,
type=proposal.type,
target=proposal.target,
proposer=user.user_id,
status="active",
created_at=datetime.utcnow(),
voting_deadline=datetime.utcnow() + timedelta(days=proposal.voting_period),
quorum_threshold=proposal.quorum_threshold,
approval_threshold=proposal.approval_threshold
)
session.add(db_proposal)
session.commit()
session.refresh(db_proposal)
# Return response
return await format_proposal_response(db_proposal, session)
session: SessionDep,
proposer_id: str = Query(...),
request: ProposalCreateRequest = Body(...)
):
"""Submit a new governance proposal to the DAO"""
service = GovernanceService(session)
try:
proposal = await service.create_proposal(proposer_id, request.dict())
return proposal
except ValueError as e:
raise HTTPException(status_code=400, detail=str(e))
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@router.get("/governance/proposals", response_model=List[ProposalResponse])
async def list_proposals(
status: Optional[str] = None,
limit: int = 20,
offset: int = 0,
session: SessionDep = None
) -> List[ProposalResponse]:
"""List governance proposals"""
query = select(GovernanceProposal)
if status:
query = query.where(GovernanceProposal.status == status)
query = query.order_by(GovernanceProposal.created_at.desc())
query = query.offset(offset).limit(limit)
proposals = session.exec(query).all()
responses = []
for proposal in proposals:
formatted = await format_proposal_response(proposal, session)
responses.append(formatted)
return responses
@router.get("/governance/proposals/{proposal_id}", response_model=ProposalResponse)
async def get_proposal(
@router.post("/proposals/{proposal_id}/vote", response_model=Vote)
async def cast_vote(
proposal_id: str,
session: SessionDep
) -> ProposalResponse:
"""Get a specific proposal"""
proposal = session.get(GovernanceProposal, proposal_id)
if not proposal:
raise HTTPException(404, "Proposal not found")
return await format_proposal_response(proposal, session)
@router.post("/governance/vote")
async def submit_vote(
vote: VoteSubmit,
user: UserProfile,
session: SessionDep
) -> Dict[str, str]:
"""Submit a vote on a proposal"""
# Check proposal exists and is active
proposal = session.get(GovernanceProposal, vote.proposal_id)
if not proposal:
raise HTTPException(404, "Proposal not found")
if proposal.status != "active":
raise HTTPException(400, "Proposal is not active for voting")
if datetime.utcnow() > proposal.voting_deadline:
raise HTTPException(400, "Voting period has ended")
# Check user voting power
voting_power = await get_user_voting_power(user.user_id, session)
if voting_power == 0:
raise HTTPException(403, "You have no voting power")
# Check if already voted
existing = session.exec(
select(ProposalVote).where(
ProposalVote.proposal_id == vote.proposal_id,
ProposalVote.voter_id == user.user_id
session: SessionDep,
voter_id: str = Query(...),
request: VoteRequest = Body(...)
):
"""Cast a vote on an active proposal"""
service = GovernanceService(session)
try:
vote = await service.cast_vote(
proposal_id=proposal_id,
voter_id=voter_id,
vote_type=request.vote_type,
reason=request.reason
)
).first()
if existing:
# Update existing vote
existing.vote = vote.vote
existing.reason = vote.reason
existing.voted_at = datetime.utcnow()
else:
# Create new vote
db_vote = ProposalVote(
proposal_id=vote.proposal_id,
voter_id=user.user_id,
vote=vote.vote,
voting_power=voting_power,
reason=vote.reason,
voted_at=datetime.utcnow()
)
session.add(db_vote)
session.commit()
# Check if proposal should be finalized
if datetime.utcnow() >= proposal.voting_deadline:
await finalize_proposal(proposal, session)
return {"message": "Vote submitted successfully"}
return vote
except ValueError as e:
raise HTTPException(status_code=400, detail=str(e))
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@router.post("/proposals/{proposal_id}/process", response_model=Proposal)
async def process_proposal(proposal_id: str, session: SessionDep):
"""Manually trigger the lifecycle check of a proposal (e.g., tally votes when time ends)"""
service = GovernanceService(session)
try:
proposal = await service.process_proposal_lifecycle(proposal_id)
return proposal
except ValueError as e:
raise HTTPException(status_code=404, detail=str(e))
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@router.get("/governance/voting-power/{user_id}")
async def get_voting_power(
user_id: str,
session: SessionDep
) -> Dict[str, int]:
"""Get a user's voting power"""
power = await get_user_voting_power(user_id, session)
return {"user_id": user_id, "voting_power": power}
@router.get("/governance/parameters")
async def get_governance_parameters(
session: SessionDep
) -> Dict[str, Any]:
"""Get current governance parameters"""
# These would typically be stored in a config table
return {
"min_proposal_voting_power": 1000,
"max_proposal_title_length": 200,
"max_proposal_description_length": 5000,
"default_voting_period_days": 7,
"max_voting_period_days": 30,
"min_quorum_threshold": 0.01,
"max_quorum_threshold": 1.0,
"min_approval_threshold": 0.01,
"max_approval_threshold": 1.0,
"execution_delay_hours": 24
}
@router.post("/governance/execute/{proposal_id}")
@router.post("/proposals/{proposal_id}/execute", response_model=Proposal)
async def execute_proposal(
proposal_id: str,
background_tasks: BackgroundTasks,
session: SessionDep
) -> Dict[str, str]:
"""Execute an approved proposal"""
proposal = session.get(GovernanceProposal, proposal_id)
if not proposal:
raise HTTPException(404, "Proposal not found")
if proposal.status != "passed":
raise HTTPException(400, "Proposal must be passed to execute")
if datetime.utcnow() < proposal.voting_deadline + timedelta(hours=24):
raise HTTPException(400, "Must wait 24 hours after voting ends to execute")
# Execute proposal based on type
if proposal.type == "parameter_change":
await execute_parameter_change(proposal.target, background_tasks)
elif proposal.type == "protocol_upgrade":
await execute_protocol_upgrade(proposal.target, background_tasks)
elif proposal.type == "fund_allocation":
await execute_fund_allocation(proposal.target, background_tasks)
elif proposal.type == "policy_change":
await execute_policy_change(proposal.target, background_tasks)
# Update proposal status
proposal.status = "executed"
proposal.executed_at = datetime.utcnow()
session.commit()
return {"message": "Proposal executed successfully"}
session: SessionDep,
executor_id: str = Query(...)
):
"""Execute the payload of a succeeded proposal"""
service = GovernanceService(session)
try:
proposal = await service.execute_proposal(proposal_id, executor_id)
return proposal
except ValueError as e:
raise HTTPException(status_code=400, detail=str(e))
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
# Helper functions
async def get_user_voting_power(user_id: str, session) -> int:
"""Calculate a user's voting power based on AITBC holdings"""
# In a real implementation, this would query the blockchain
# For now, return a mock value
return 10000 # Mock voting power
async def format_proposal_response(proposal: GovernanceProposal, session) -> ProposalResponse:
"""Format a proposal for API response"""
# Get vote counts
votes = session.exec(
select(ProposalVote).where(ProposalVote.proposal_id == proposal.id)
).all()
votes_for = sum(1 for v in votes if v.vote == "for")
votes_against = sum(1 for v in votes if v.vote == "against")
votes_abstain = sum(1 for v in votes if v.vote == "abstain")
# Get total voting power
total_power = sum(v.voting_power for v in votes)
power_for = sum(v.voting_power for v in votes if v.vote == "for")
# Calculate quorum and approval
total_voting_power = await get_total_voting_power(session)
current_quorum = total_power / total_voting_power if total_voting_power > 0 else 0
current_approval = power_for / total_power if total_power > 0 else 0
return ProposalResponse(
id=proposal.id,
title=proposal.title,
description=proposal.description,
type=proposal.type,
target=proposal.target,
proposer=proposal.proposer,
status=proposal.status,
created_at=proposal.created_at,
voting_deadline=proposal.voting_deadline,
quorum_threshold=proposal.quorum_threshold,
approval_threshold=proposal.approval_threshold,
current_quorum=current_quorum,
current_approval=current_approval,
votes_for=votes_for,
votes_against=votes_against,
votes_abstain=votes_abstain,
total_voting_power=total_voting_power
)
async def get_total_voting_power(session) -> int:
"""Get total voting power in the system"""
# In a real implementation, this would sum all AITBC tokens
return 1000000 # Mock total voting power
async def finalize_proposal(proposal: GovernanceProposal, session):
"""Finalize a proposal after voting ends"""
# Get final vote counts
votes = session.exec(
select(ProposalVote).where(ProposalVote.proposal_id == proposal.id)
).all()
total_power = sum(v.voting_power for v in votes)
power_for = sum(v.voting_power for v in votes if v.vote == "for")
total_voting_power = await get_total_voting_power(session)
quorum = total_power / total_voting_power if total_voting_power > 0 else 0
approval = power_for / total_power if total_power > 0 else 0
# Check if quorum met
if quorum < proposal.quorum_threshold:
proposal.status = "rejected"
proposal.rejection_reason = "Quorum not met"
# Check if approval threshold met
elif approval < proposal.approval_threshold:
proposal.status = "rejected"
proposal.rejection_reason = "Approval threshold not met"
else:
proposal.status = "passed"
session.commit()
async def execute_parameter_change(target: Dict[str, Any], background_tasks):
"""Execute a parameter change proposal"""
# This would update system parameters
logger.info("Executing parameter change: %s", target)
# Implementation would depend on the specific parameters
async def execute_protocol_upgrade(target: Dict[str, Any], background_tasks):
"""Execute a protocol upgrade proposal"""
# This would trigger a protocol upgrade
logger.info("Executing protocol upgrade: %s", target)
# Implementation would involve coordinating with nodes
async def execute_fund_allocation(target: Dict[str, Any], background_tasks):
"""Execute a fund allocation proposal"""
# This would transfer funds from treasury
logger.info("Executing fund allocation: %s", target)
# Implementation would involve treasury management
async def execute_policy_change(target: Dict[str, Any], background_tasks):
"""Execute a policy change proposal"""
# This would update system policies
logger.info("Executing policy change: %s", target)
# Implementation would depend on the specific policy
# Export the router
__all__ = ["router"]
# Endpoints - Analytics
@router.post("/analytics/reports", response_model=TransparencyReport)
async def generate_transparency_report(
session: SessionDep,
period: str = Query(..., description="e.g., 2026-Q1")
):
"""Generate a governance analytics and transparency report"""
service = GovernanceService(session)
try:
report = await service.generate_transparency_report(period)
return report
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))

View File

@@ -0,0 +1,196 @@
"""
Marketplace Performance Optimization API Endpoints
REST API for managing distributed processing, GPU optimization, caching, and scaling
"""
import asyncio
from datetime import datetime
from typing import Dict, List, Optional, Any
from fastapi import APIRouter, HTTPException, Depends, Query, BackgroundTasks
from pydantic import BaseModel, Field
import logging
from ..storage import SessionDep
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), "../../../../../gpu_acceleration"))
from marketplace_gpu_optimizer import MarketplaceGPUOptimizer
from aitbc.gpu_acceleration.parallel_processing.distributed_framework import DistributedProcessingCoordinator, DistributedTask, WorkerStatus
from aitbc.gpu_acceleration.parallel_processing.marketplace_cache_optimizer import MarketplaceDataOptimizer
from aitbc.gpu_acceleration.parallel_processing.marketplace_monitor import monitor as marketplace_monitor
from aitbc.gpu_acceleration.parallel_processing.marketplace_scaler import ResourceScaler, ScalingPolicy
logger = logging.getLogger(__name__)
router = APIRouter(prefix="/v1/marketplace/performance", tags=["marketplace-performance"])
# Global instances (in a real app these might be injected or application state)
gpu_optimizer = MarketplaceGPUOptimizer()
distributed_coordinator = DistributedProcessingCoordinator()
cache_optimizer = MarketplaceDataOptimizer()
resource_scaler = ResourceScaler()
# Startup event handler for background tasks
@router.on_event("startup")
async def startup_event():
await marketplace_monitor.start()
await distributed_coordinator.start()
await resource_scaler.start()
await cache_optimizer.connect()
@router.on_event("shutdown")
async def shutdown_event():
await marketplace_monitor.stop()
await distributed_coordinator.stop()
await resource_scaler.stop()
await cache_optimizer.disconnect()
# Models
class GPUAllocationRequest(BaseModel):
job_id: Optional[str] = None
memory_bytes: int = Field(1024 * 1024 * 1024, description="Memory needed in bytes")
compute_units: float = Field(1.0, description="Relative compute requirement")
max_latency_ms: int = Field(1000, description="Max acceptable latency")
priority: int = Field(1, ge=1, le=10, description="Job priority 1-10")
class GPUReleaseRequest(BaseModel):
job_id: str
class DistributedTaskRequest(BaseModel):
agent_id: str
payload: Dict[str, Any]
priority: int = Field(1, ge=1, le=100)
requires_gpu: bool = Field(False)
timeout_ms: int = Field(30000)
class WorkerRegistrationRequest(BaseModel):
worker_id: str
capabilities: List[str]
has_gpu: bool = Field(False)
max_concurrent_tasks: int = Field(4)
class ScalingPolicyUpdate(BaseModel):
min_nodes: Optional[int] = None
max_nodes: Optional[int] = None
target_utilization: Optional[float] = None
scale_up_threshold: Optional[float] = None
predictive_scaling: Optional[bool] = None
# Endpoints: GPU Optimization
@router.post("/gpu/allocate")
async def allocate_gpu_resources(request: GPUAllocationRequest):
"""Request optimal GPU resource allocation for a marketplace task"""
try:
start_time = time.time()
result = await gpu_optimizer.optimize_resource_allocation(request.dict())
marketplace_monitor.record_api_call((time.time() - start_time) * 1000)
if not result.get("success"):
raise HTTPException(status_code=503, detail=result.get("reason", "Resources unavailable"))
return result
except HTTPException:
raise
except Exception as e:
marketplace_monitor.record_api_call(0, is_error=True)
logger.error(f"Error in GPU allocation: {e}")
raise HTTPException(status_code=500, detail=str(e))
@router.post("/gpu/release")
async def release_gpu_resources(request: GPUReleaseRequest):
"""Release previously allocated GPU resources"""
success = gpu_optimizer.release_resources(request.job_id)
if not success:
raise HTTPException(status_code=404, detail="Job ID not found")
return {"success": True, "message": f"Resources for {request.job_id} released"}
@router.get("/gpu/status")
async def get_gpu_status():
"""Get overall GPU fleet status and optimization metrics"""
return gpu_optimizer.get_system_status()
# Endpoints: Distributed Processing
@router.post("/distributed/task")
async def submit_distributed_task(request: DistributedTaskRequest):
"""Submit a task to the distributed processing framework"""
task = DistributedTask(
task_id=None,
agent_id=request.agent_id,
payload=request.payload,
priority=request.priority,
requires_gpu=request.requires_gpu,
timeout_ms=request.timeout_ms
)
task_id = await distributed_coordinator.submit_task(task)
return {"task_id": task_id, "status": "submitted"}
@router.get("/distributed/task/{task_id}")
async def get_distributed_task_status(task_id: str):
"""Check the status and get results of a distributed task"""
status = await distributed_coordinator.get_task_status(task_id)
if not status:
raise HTTPException(status_code=404, detail="Task not found")
return status
@router.post("/distributed/worker/register")
async def register_worker(request: WorkerRegistrationRequest):
"""Register a new worker node in the cluster"""
distributed_coordinator.register_worker(
worker_id=request.worker_id,
capabilities=request.capabilities,
has_gpu=request.has_gpu,
max_tasks=request.max_concurrent_tasks
)
return {"success": True, "message": f"Worker {request.worker_id} registered"}
@router.get("/distributed/status")
async def get_cluster_status():
"""Get overall distributed cluster health and load"""
return distributed_coordinator.get_cluster_status()
# Endpoints: Caching
@router.get("/cache/stats")
async def get_cache_stats():
"""Get current caching performance statistics"""
return {
"status": "connected" if cache_optimizer.is_connected else "local_only",
"l1_cache_size": len(cache_optimizer.l1_cache.cache),
"namespaces_tracked": list(cache_optimizer.ttls.keys())
}
@router.post("/cache/invalidate/{namespace}")
async def invalidate_cache_namespace(namespace: str, background_tasks: BackgroundTasks):
"""Invalidate a specific cache namespace (e.g., 'order_book')"""
background_tasks.add_task(cache_optimizer.invalidate_namespace, namespace)
return {"success": True, "message": f"Invalidation for {namespace} queued"}
# Endpoints: Monitoring
@router.get("/monitor/dashboard")
async def get_monitoring_dashboard():
"""Get real-time performance dashboard data"""
return marketplace_monitor.get_realtime_dashboard_data()
# Endpoints: Auto-scaling
@router.get("/scaler/status")
async def get_scaler_status():
"""Get current auto-scaler status and active rules"""
return resource_scaler.get_status()
@router.post("/scaler/policy")
async def update_scaling_policy(policy_update: ScalingPolicyUpdate):
"""Update auto-scaling thresholds and parameters dynamically"""
current_policy = resource_scaler.policy
if policy_update.min_nodes is not None:
current_policy.min_nodes = policy_update.min_nodes
if policy_update.max_nodes is not None:
current_policy.max_nodes = policy_update.max_nodes
if policy_update.target_utilization is not None:
current_policy.target_utilization = policy_update.target_utilization
if policy_update.scale_up_threshold is not None:
current_policy.scale_up_threshold = policy_update.scale_up_threshold
if policy_update.predictive_scaling is not None:
current_policy.predictive_scaling = policy_update.predictive_scaling
return {"success": True, "message": "Scaling policy updated successfully"}

View File

@@ -0,0 +1,822 @@
"""
Multi-Modal Fusion and Advanced RL API Endpoints
REST API for multi-modal agent fusion and advanced reinforcement learning
"""
from datetime import datetime, timedelta
from typing import Dict, List, Optional, Any
from fastapi import APIRouter, Depends, HTTPException, Query, BackgroundTasks, WebSocket, WebSocketDisconnect
from pydantic import BaseModel, Field
import logging
from ..storage import SessionDep
from ..services.multi_modal_fusion import MultiModalFusionEngine
from ..services.advanced_reinforcement_learning import AdvancedReinforcementLearningEngine, MarketplaceStrategyOptimizer, CrossDomainCapabilityIntegrator
from ..domain.agent_performance import (
FusionModel, ReinforcementLearningConfig, AgentCapability,
CreativeCapability
)
logger = logging.getLogger(__name__)
router = APIRouter(prefix="/multi-modal-rl", tags=["multi-modal-rl"])
# Pydantic models for API requests/responses
class FusionModelRequest(BaseModel):
"""Request model for fusion model creation"""
model_name: str
fusion_type: str = Field(default="cross_domain")
base_models: List[str]
input_modalities: List[str]
fusion_strategy: str = Field(default="ensemble_fusion")
class FusionModelResponse(BaseModel):
"""Response model for fusion model"""
fusion_id: str
model_name: str
fusion_type: str
base_models: List[str]
input_modalities: List[str]
fusion_strategy: str
status: str
fusion_performance: Dict[str, float]
synergy_score: float
robustness_score: float
created_at: str
trained_at: Optional[str]
class FusionRequest(BaseModel):
"""Request model for fusion inference"""
fusion_id: str
input_data: Dict[str, Any]
class FusionResponse(BaseModel):
"""Response model for fusion result"""
fusion_type: str
combined_result: Dict[str, Any]
confidence: float
metadata: Dict[str, Any]
class RLAgentRequest(BaseModel):
"""Request model for RL agent creation"""
agent_id: str
environment_type: str
algorithm: str = Field(default="ppo")
training_config: Dict[str, Any] = Field(default_factory=dict)
class RLAgentResponse(BaseModel):
"""Response model for RL agent"""
config_id: str
agent_id: str
environment_type: str
algorithm: str
status: str
learning_rate: float
discount_factor: float
exploration_rate: float
max_episodes: int
created_at: str
trained_at: Optional[str]
class RLTrainingResponse(BaseModel):
"""Response model for RL training"""
config_id: str
final_performance: float
convergence_episode: int
training_episodes: int
success_rate: float
training_time: float
class StrategyOptimizationRequest(BaseModel):
"""Request model for strategy optimization"""
agent_id: str
strategy_type: str
algorithm: str = Field(default="ppo")
training_episodes: int = Field(default=500)
class StrategyOptimizationResponse(BaseModel):
"""Response model for strategy optimization"""
success: bool
config_id: str
strategy_type: str
algorithm: str
final_performance: float
convergence_episode: int
training_episodes: int
success_rate: float
class CapabilityIntegrationRequest(BaseModel):
"""Request model for capability integration"""
agent_id: str
capabilities: List[str]
integration_strategy: str = Field(default="adaptive")
class CapabilityIntegrationResponse(BaseModel):
"""Response model for capability integration"""
agent_id: str
integration_strategy: str
domain_capabilities: Dict[str, List[Dict[str, Any]]]
synergy_score: float
enhanced_capabilities: List[str]
fusion_model_id: str
integration_result: Dict[str, Any]
# API Endpoints
@router.post("/fusion/models", response_model=FusionModelResponse)
async def create_fusion_model(
fusion_request: FusionModelRequest,
session: SessionDep
) -> FusionModelResponse:
"""Create multi-modal fusion model"""
fusion_engine = MultiModalFusionEngine()
try:
fusion_model = await fusion_engine.create_fusion_model(
session=session,
model_name=fusion_request.model_name,
fusion_type=fusion_request.fusion_type,
base_models=fusion_request.base_models,
input_modalities=fusion_request.input_modalities,
fusion_strategy=fusion_request.fusion_strategy
)
return FusionModelResponse(
fusion_id=fusion_model.fusion_id,
model_name=fusion_model.model_name,
fusion_type=fusion_model.fusion_type,
base_models=fusion_model.base_models,
input_modalities=fusion_model.input_modalities,
fusion_strategy=fusion_model.fusion_strategy,
status=fusion_model.status,
fusion_performance=fusion_model.fusion_performance,
synergy_score=fusion_model.synergy_score,
robustness_score=fusion_model.robustness_score,
created_at=fusion_model.created_at.isoformat(),
trained_at=fusion_model.trained_at.isoformat() if fusion_model.trained_at else None
)
except Exception as e:
logger.error(f"Error creating fusion model: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.post("/fusion/{fusion_id}/infer", response_model=FusionResponse)
async def fuse_modalities(
fusion_id: str,
fusion_request: FusionRequest,
session: SessionDep
) -> FusionResponse:
"""Fuse modalities using trained model"""
fusion_engine = MultiModalFusionEngine()
try:
fusion_result = await fusion_engine.fuse_modalities(
session=session,
fusion_id=fusion_id,
input_data=fusion_request.input_data
)
return FusionResponse(
fusion_type=fusion_result['fusion_type'],
combined_result=fusion_result['combined_result'],
confidence=fusion_result.get('confidence', 0.0),
metadata={
'modality_contributions': fusion_result.get('modality_contributions', {}),
'attention_weights': fusion_result.get('attention_weights', {}),
'optimization_gain': fusion_result.get('optimization_gain', 0.0)
}
)
except ValueError as e:
raise HTTPException(status_code=404, detail=str(e))
except Exception as e:
logger.error(f"Error during fusion: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.get("/fusion/models")
async def list_fusion_models(
session: SessionDep,
status: Optional[str] = Query(default=None, description="Filter by status"),
fusion_type: Optional[str] = Query(default=None, description="Filter by fusion type"),
limit: int = Query(default=50, ge=1, le=100, description="Number of results")
) -> List[Dict[str, Any]]:
"""List fusion models"""
try:
query = select(FusionModel)
if status:
query = query.where(FusionModel.status == status)
if fusion_type:
query = query.where(FusionModel.fusion_type == fusion_type)
models = session.exec(
query.order_by(FusionModel.created_at.desc()).limit(limit)
).all()
return [
{
"fusion_id": model.fusion_id,
"model_name": model.model_name,
"fusion_type": model.fusion_type,
"base_models": model.base_models,
"input_modalities": model.input_modalities,
"fusion_strategy": model.fusion_strategy,
"status": model.status,
"fusion_performance": model.fusion_performance,
"synergy_score": model.synergy_score,
"robustness_score": model.robustness_score,
"computational_complexity": model.computational_complexity,
"memory_requirement": model.memory_requirement,
"inference_time": model.inference_time,
"deployment_count": model.deployment_count,
"performance_stability": model.performance_stability,
"created_at": model.created_at.isoformat(),
"trained_at": model.trained_at.isoformat() if model.trained_at else None
}
for model in models
]
except Exception as e:
logger.error(f"Error listing fusion models: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.post("/rl/agents", response_model=RLAgentResponse)
async def create_rl_agent(
agent_request: RLAgentRequest,
session: SessionDep
) -> RLAgentResponse:
"""Create RL agent for marketplace strategies"""
rl_engine = AdvancedReinforcementLearningEngine()
try:
rl_config = await rl_engine.create_rl_agent(
session=session,
agent_id=agent_request.agent_id,
environment_type=agent_request.environment_type,
algorithm=agent_request.algorithm,
training_config=agent_request.training_config
)
return RLAgentResponse(
config_id=rl_config.config_id,
agent_id=rl_config.agent_id,
environment_type=rl_config.environment_type,
algorithm=rl_config.algorithm,
status=rl_config.status,
learning_rate=rl_config.learning_rate,
discount_factor=rl_config.discount_factor,
exploration_rate=rl_config.exploration_rate,
max_episodes=rl_config.max_episodes,
created_at=rl_config.created_at.isoformat(),
trained_at=rl_config.trained_at.isoformat() if rl_config.trained_at else None
)
except Exception as e:
logger.error(f"Error creating RL agent: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.websocket("/fusion/{fusion_id}/stream")
async def fuse_modalities_stream(
websocket: WebSocket,
fusion_id: str,
session: SessionDep
):
"""Stream modalities and receive fusion results via WebSocket for high performance"""
await websocket.accept()
fusion_engine = MultiModalFusionEngine()
try:
while True:
# Receive text data (JSON) containing input modalities
data = await websocket.receive_json()
# Start timing
start_time = datetime.utcnow()
# Process fusion
fusion_result = await fusion_engine.fuse_modalities(
session=session,
fusion_id=fusion_id,
input_data=data
)
# End timing
processing_time = (datetime.utcnow() - start_time).total_seconds()
# Send result back
await websocket.send_json({
"fusion_type": fusion_result['fusion_type'],
"combined_result": fusion_result['combined_result'],
"confidence": fusion_result.get('confidence', 0.0),
"metadata": {
"processing_time": processing_time,
"fusion_strategy": fusion_result.get('strategy', 'unknown'),
"protocol": "websocket"
}
})
except WebSocketDisconnect:
logger.info(f"WebSocket client disconnected from fusion stream {fusion_id}")
except Exception as e:
logger.error(f"Error in fusion stream: {str(e)}")
try:
await websocket.send_json({"error": str(e)})
await websocket.close(code=1011, reason=str(e))
except:
pass
@router.get("/rl/agents/{agent_id}")
async def get_rl_agents(
agent_id: str,
session: SessionDep,
status: Optional[str] = Query(default=None, description="Filter by status"),
algorithm: Optional[str] = Query(default=None, description="Filter by algorithm"),
limit: int = Query(default=20, ge=1, le=100, description="Number of results")
) -> List[Dict[str, Any]]:
"""Get RL agents for agent"""
try:
query = select(ReinforcementLearningConfig).where(ReinforcementLearningConfig.agent_id == agent_id)
if status:
query = query.where(ReinforcementLearningConfig.status == status)
if algorithm:
query = query.where(ReinforcementLearningConfig.algorithm == algorithm)
configs = session.exec(
query.order_by(ReinforcementLearningConfig.created_at.desc()).limit(limit)
).all()
return [
{
"config_id": config.config_id,
"agent_id": config.agent_id,
"environment_type": config.environment_type,
"algorithm": config.algorithm,
"status": config.status,
"learning_rate": config.learning_rate,
"discount_factor": config.discount_factor,
"exploration_rate": config.exploration_rate,
"batch_size": config.batch_size,
"network_layers": config.network_layers,
"activation_functions": config.activation_functions,
"max_episodes": config.max_episodes,
"max_steps_per_episode": config.max_steps_per_episode,
"action_space": config.action_space,
"state_space": config.state_space,
"reward_history": config.reward_history,
"success_rate_history": config.success_rate_history,
"convergence_episode": config.convergence_episode,
"training_progress": config.training_progress,
"deployment_performance": config.deployment_performance,
"deployment_count": config.deployment_count,
"created_at": config.created_at.isoformat(),
"trained_at": config.trained_at.isoformat() if config.trained_at else None,
"deployed_at": config.deployed_at.isoformat() if config.deployed_at else None
}
for config in configs
]
except Exception as e:
logger.error(f"Error getting RL agents for agent {agent_id}: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.post("/rl/optimize-strategy", response_model=StrategyOptimizationResponse)
async def optimize_strategy(
optimization_request: StrategyOptimizationRequest,
session: SessionDep
) -> StrategyOptimizationResponse:
"""Optimize agent strategy using RL"""
strategy_optimizer = MarketplaceStrategyOptimizer()
try:
result = await strategy_optimizer.optimize_agent_strategy(
session=session,
agent_id=optimization_request.agent_id,
strategy_type=optimization_request.strategy_type,
algorithm=optimization_request.algorithm,
training_episodes=optimization_request.training_episodes
)
return StrategyOptimizationResponse(
success=result['success'],
config_id=result.get('config_id'),
strategy_type=result.get('strategy_type'),
algorithm=result.get('algorithm'),
final_performance=result.get('final_performance', 0.0),
convergence_episode=result.get('convergence_episode', 0),
training_episodes=result.get('training_episodes', 0),
success_rate=result.get('success_rate', 0.0)
)
except Exception as e:
logger.error(f"Error optimizing strategy: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.post("/rl/deploy-strategy")
async def deploy_strategy(
config_id: str,
deployment_context: Dict[str, Any],
session: SessionDep
) -> Dict[str, Any]:
"""Deploy trained strategy"""
strategy_optimizer = MarketplaceStrategyOptimizer()
try:
result = await strategy_optimizer.deploy_strategy(
session=session,
config_id=config_id,
deployment_context=deployment_context
)
return result
except ValueError as e:
raise HTTPException(status_code=404, detail=str(e))
except Exception as e:
logger.error(f"Error deploying strategy: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.post("/capabilities/integrate", response_model=CapabilityIntegrationResponse)
async def integrate_capabilities(
integration_request: CapabilityIntegrationRequest,
session: SessionDep
) -> CapabilityIntegrationResponse:
"""Integrate capabilities across domains"""
capability_integrator = CrossDomainCapabilityIntegrator()
try:
result = await capability_integrator.integrate_cross_domain_capabilities(
session=session,
agent_id=integration_request.agent_id,
capabilities=integration_request.capabilities,
integration_strategy=integration_request.integration_strategy
)
# Format domain capabilities for response
formatted_domain_caps = {}
for domain, caps in result['domain_capabilities'].items():
formatted_domain_caps[domain] = [
{
"capability_id": cap.capability_id,
"capability_name": cap.capability_name,
"capability_type": cap.capability_type,
"skill_level": cap.skill_level,
"proficiency_score": cap.proficiency_score
}
for cap in caps
]
return CapabilityIntegrationResponse(
agent_id=result['agent_id'],
integration_strategy=result['integration_strategy'],
domain_capabilities=formatted_domain_caps,
synergy_score=result['synergy_score'],
enhanced_capabilities=result['enhanced_capabilities'],
fusion_model_id=result['fusion_model_id'],
integration_result=result['integration_result']
)
except ValueError as e:
raise HTTPException(status_code=404, detail=str(e))
except Exception as e:
logger.error(f"Error integrating capabilities: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.get("/capabilities/{agent_id}/domains")
async def get_agent_domain_capabilities(
agent_id: str,
session: SessionDep,
domain: Optional[str] = Query(default=None, description="Filter by domain"),
limit: int = Query(default=50, ge=1, le=100, description="Number of results")
) -> List[Dict[str, Any]]:
"""Get agent capabilities grouped by domain"""
try:
query = select(AgentCapability).where(AgentCapability.agent_id == agent_id)
if domain:
query = query.where(AgentCapability.domain_area == domain)
capabilities = session.exec(
query.order_by(AgentCapability.skill_level.desc()).limit(limit)
).all()
# Group by domain
domain_capabilities = {}
for cap in capabilities:
if cap.domain_area not in domain_capabilities:
domain_capabilities[cap.domain_area] = []
domain_capabilities[cap.domain_area].append({
"capability_id": cap.capability_id,
"capability_name": cap.capability_name,
"capability_type": cap.capability_type,
"skill_level": cap.skill_level,
"proficiency_score": cap.proficiency_score,
"specialization_areas": cap.specialization_areas,
"learning_rate": cap.learning_rate,
"adaptation_speed": cap.adaptation_speed,
"certified": cap.certified,
"certification_level": cap.certification_level,
"status": cap.status,
"acquired_at": cap.acquired_at.isoformat(),
"last_improved": cap.last_improved.isoformat() if cap.last_improved else None
})
return [
{
"domain": domain,
"capabilities": caps,
"total_capabilities": len(caps),
"average_skill_level": sum(cap["skill_level"] for cap in caps) / len(caps) if caps else 0.0,
"highest_skill_level": max(cap["skill_level"] for cap in caps) if caps else 0.0
}
for domain, caps in domain_capabilities.items()
]
except Exception as e:
logger.error(f"Error getting domain capabilities for agent {agent_id}: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.get("/creative-capabilities/{agent_id}")
async def get_creative_capabilities(
agent_id: str,
session: SessionDep,
creative_domain: Optional[str] = Query(default=None, description="Filter by creative domain"),
limit: int = Query(default=50, ge=1, le=100, description="Number of results")
) -> List[Dict[str, Any]]:
"""Get creative capabilities for agent"""
try:
query = select(CreativeCapability).where(CreativeCapability.agent_id == agent_id)
if creative_domain:
query = query.where(CreativeCapability.creative_domain == creative_domain)
capabilities = session.exec(
query.order_by(CreativeCapability.originality_score.desc()).limit(limit)
).all()
return [
{
"capability_id": cap.capability_id,
"agent_id": cap.agent_id,
"creative_domain": cap.creative_domain,
"capability_type": cap.capability_type,
"originality_score": cap.originality_score,
"novelty_score": cap.novelty_score,
"aesthetic_quality": cap.aesthetic_quality,
"coherence_score": cap.coherence_score,
"generation_models": cap.generation_models,
"style_variety": cap.style_variety,
"output_quality": cap.output_quality,
"creative_learning_rate": cap.creative_learning_rate,
"style_adaptation": cap.style_adaptation,
"cross_domain_transfer": cap.cross_domain_transfer,
"creative_specializations": cap.creative_specializations,
"tool_proficiency": cap.tool_proficiency,
"domain_knowledge": cap.domain_knowledge,
"creations_generated": cap.creations_generated,
"user_ratings": cap.user_ratings,
"expert_evaluations": cap.expert_evaluations,
"status": cap.status,
"certification_level": cap.certification_level,
"created_at": cap.created_at.isoformat(),
"last_evaluation": cap.last_evaluation.isoformat() if cap.last_evaluation else None
}
for cap in capabilities
]
except Exception as e:
logger.error(f"Error getting creative capabilities for agent {agent_id}: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.get("/analytics/fusion-performance")
async def get_fusion_performance_analytics(
session: SessionDep,
agent_ids: Optional[List[str]] = Query(default=[], description="List of agent IDs"),
fusion_type: Optional[str] = Query(default=None, description="Filter by fusion type"),
period: str = Query(default="7d", description="Time period")
) -> Dict[str, Any]:
"""Get fusion performance analytics"""
try:
query = select(FusionModel)
if fusion_type:
query = query.where(FusionModel.fusion_type == fusion_type)
models = session.exec(query).all()
# Filter by agent IDs if provided (by checking base models)
if agent_ids:
filtered_models = []
for model in models:
# Check if any base model belongs to specified agents
if any(agent_id in str(base_model) for base_model in model.base_models for agent_id in agent_ids):
filtered_models.append(model)
models = filtered_models
# Calculate analytics
total_models = len(models)
ready_models = len([m for m in models if m.status == "ready"])
if models:
avg_synergy = sum(m.synergy_score for m in models) / len(models)
avg_robustness = sum(m.robustness_score for m in models) / len(models)
# Performance metrics
performance_metrics = {}
for model in models:
if model.fusion_performance:
for metric, value in model.fusion_performance.items():
if metric not in performance_metrics:
performance_metrics[metric] = []
performance_metrics[metric].append(value)
avg_performance = {}
for metric, values in performance_metrics.items():
avg_performance[metric] = sum(values) / len(values)
# Fusion strategy distribution
strategy_distribution = {}
for model in models:
strategy = model.fusion_strategy
strategy_distribution[strategy] = strategy_distribution.get(strategy, 0) + 1
else:
avg_synergy = 0.0
avg_robustness = 0.0
avg_performance = {}
strategy_distribution = {}
return {
"period": period,
"total_models": total_models,
"ready_models": ready_models,
"readiness_rate": ready_models / total_models if total_models > 0 else 0.0,
"average_synergy_score": avg_synergy,
"average_robustness_score": avg_robustness,
"average_performance": avg_performance,
"strategy_distribution": strategy_distribution,
"top_performing_models": sorted(
[
{
"fusion_id": model.fusion_id,
"model_name": model.model_name,
"synergy_score": model.synergy_score,
"robustness_score": model.robustness_score,
"deployment_count": model.deployment_count
}
for model in models
],
key=lambda x: x["synergy_score"],
reverse=True
)[:10]
}
except Exception as e:
logger.error(f"Error getting fusion performance analytics: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.get("/analytics/rl-performance")
async def get_rl_performance_analytics(
session: SessionDep,
agent_ids: Optional[List[str]] = Query(default=[], description="List of agent IDs"),
algorithm: Optional[str] = Query(default=None, description="Filter by algorithm"),
environment_type: Optional[str] = Query(default=None, description="Filter by environment type"),
period: str = Query(default="7d", description="Time period")
) -> Dict[str, Any]:
"""Get RL performance analytics"""
try:
query = select(ReinforcementLearningConfig)
if agent_ids:
query = query.where(ReinforcementLearningConfig.agent_id.in_(agent_ids))
if algorithm:
query = query.where(ReinforcementLearningConfig.algorithm == algorithm)
if environment_type:
query = query.where(ReinforcementLearningConfig.environment_type == environment_type)
configs = session.exec(query).all()
# Calculate analytics
total_configs = len(configs)
ready_configs = len([c for c in configs if c.status == "ready"])
if configs:
# Algorithm distribution
algorithm_distribution = {}
for config in configs:
alg = config.algorithm
algorithm_distribution[alg] = algorithm_distribution.get(alg, 0) + 1
# Environment distribution
environment_distribution = {}
for config in configs:
env = config.environment_type
environment_distribution[env] = environment_distribution.get(env, 0) + 1
# Performance metrics
final_performances = []
success_rates = []
convergence_episodes = []
for config in configs:
if config.reward_history:
final_performances.append(np.mean(config.reward_history[-10:]))
if config.success_rate_history:
success_rates.append(np.mean(config.success_rate_history[-10:]))
if config.convergence_episode:
convergence_episodes.append(config.convergence_episode)
avg_performance = np.mean(final_performances) if final_performances else 0.0
avg_success_rate = np.mean(success_rates) if success_rates else 0.0
avg_convergence = np.mean(convergence_episodes) if convergence_episodes else 0.0
else:
algorithm_distribution = {}
environment_distribution = {}
avg_performance = 0.0
avg_success_rate = 0.0
avg_convergence = 0.0
return {
"period": period,
"total_agents": len(set(c.agent_id for c in configs)),
"total_configs": total_configs,
"ready_configs": ready_configs,
"readiness_rate": ready_configs / total_configs if total_configs > 0 else 0.0,
"average_performance": avg_performance,
"average_success_rate": avg_success_rate,
"average_convergence_episode": avg_convergence,
"algorithm_distribution": algorithm_distribution,
"environment_distribution": environment_distribution,
"top_performing_agents": sorted(
[
{
"agent_id": config.agent_id,
"algorithm": config.algorithm,
"environment_type": config.environment_type,
"final_performance": np.mean(config.reward_history[-10:]) if config.reward_history else 0.0,
"convergence_episode": config.convergence_episode,
"deployment_count": config.deployment_count
}
for config in configs
],
key=lambda x: x["final_performance"],
reverse=True
)[:10]
}
except Exception as e:
logger.error(f"Error getting RL performance analytics: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.get("/health")
async def health_check() -> Dict[str, Any]:
"""Health check for multi-modal and RL services"""
return {
"status": "healthy",
"timestamp": datetime.utcnow().isoformat(),
"version": "1.0.0",
"services": {
"multi_modal_fusion_engine": "operational",
"advanced_rl_engine": "operational",
"marketplace_strategy_optimizer": "operational",
"cross_domain_capability_integrator": "operational"
}
}

View File

@@ -0,0 +1,524 @@
"""
Reputation Management API Endpoints
REST API for agent reputation, trust scores, and economic profiles
"""
from datetime import datetime, timedelta
from typing import Dict, List, Optional, Any
from fastapi import APIRouter, HTTPException, Depends, Query
from pydantic import BaseModel, Field
import logging
from ..storage import SessionDep
from ..services.reputation_service import ReputationService
from ..domain.reputation import (
AgentReputation, CommunityFeedback, ReputationLevel,
TrustScoreCategory
)
logger = logging.getLogger(__name__)
router = APIRouter(prefix="/v1/reputation", tags=["reputation"])
# Pydantic models for API requests/responses
class ReputationProfileResponse(BaseModel):
"""Response model for reputation profile"""
agent_id: str
trust_score: float
reputation_level: str
performance_rating: float
reliability_score: float
community_rating: float
total_earnings: float
transaction_count: int
success_rate: float
jobs_completed: int
jobs_failed: int
average_response_time: float
dispute_count: int
certifications: List[str]
specialization_tags: List[str]
geographic_region: str
last_activity: str
recent_events: List[Dict[str, Any]]
recent_feedback: List[Dict[str, Any]]
class FeedbackRequest(BaseModel):
"""Request model for community feedback"""
reviewer_id: str
ratings: Dict[str, float] = Field(..., description="Overall, performance, communication, reliability, value ratings")
feedback_text: str = Field(default="", max_length=1000)
tags: List[str] = Field(default_factory=list)
class FeedbackResponse(BaseModel):
"""Response model for feedback submission"""
id: str
agent_id: str
reviewer_id: str
overall_rating: float
performance_rating: float
communication_rating: float
reliability_rating: float
value_rating: float
feedback_text: str
feedback_tags: List[str]
created_at: str
moderation_status: str
class JobCompletionRequest(BaseModel):
"""Request model for job completion recording"""
agent_id: str
job_id: str
success: bool
response_time: float = Field(..., gt=0, description="Response time in milliseconds")
earnings: float = Field(..., ge=0, description="Earnings in AITBC")
class TrustScoreResponse(BaseModel):
"""Response model for trust score breakdown"""
agent_id: str
composite_score: float
performance_score: float
reliability_score: float
community_score: float
security_score: float
economic_score: float
reputation_level: str
calculated_at: str
class LeaderboardEntry(BaseModel):
"""Leaderboard entry model"""
rank: int
agent_id: str
trust_score: float
reputation_level: str
performance_rating: float
reliability_score: float
community_rating: float
total_earnings: float
transaction_count: int
geographic_region: str
specialization_tags: List[str]
class ReputationMetricsResponse(BaseModel):
"""Response model for reputation metrics"""
total_agents: int
average_trust_score: float
level_distribution: Dict[str, int]
top_regions: List[Dict[str, Any]]
recent_activity: Dict[str, Any]
# API Endpoints
@router.get("/profile/{agent_id}", response_model=ReputationProfileResponse)
async def get_reputation_profile(
agent_id: str,
session: SessionDep
) -> ReputationProfileResponse:
"""Get comprehensive reputation profile for an agent"""
reputation_service = ReputationService(session)
try:
profile_data = await reputation_service.get_reputation_summary(agent_id)
if "error" in profile_data:
raise HTTPException(status_code=404, detail=profile_data["error"])
return ReputationProfileResponse(**profile_data)
except Exception as e:
logger.error(f"Error getting reputation profile for {agent_id}: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.post("/profile/{agent_id}")
async def create_reputation_profile(
agent_id: str,
session: SessionDep
) -> Dict[str, Any]:
"""Create a new reputation profile for an agent"""
reputation_service = ReputationService(session)
try:
reputation = await reputation_service.create_reputation_profile(agent_id)
return {
"message": "Reputation profile created successfully",
"agent_id": reputation.agent_id,
"trust_score": reputation.trust_score,
"reputation_level": reputation.reputation_level.value,
"created_at": reputation.created_at.isoformat()
}
except Exception as e:
logger.error(f"Error creating reputation profile for {agent_id}: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.post("/feedback/{agent_id}", response_model=FeedbackResponse)
async def add_community_feedback(
agent_id: str,
feedback_request: FeedbackRequest,
session: SessionDep
) -> FeedbackResponse:
"""Add community feedback for an agent"""
reputation_service = ReputationService(session)
try:
feedback = await reputation_service.add_community_feedback(
agent_id=agent_id,
reviewer_id=feedback_request.reviewer_id,
ratings=feedback_request.ratings,
feedback_text=feedback_request.feedback_text,
tags=feedback_request.tags
)
return FeedbackResponse(
id=feedback.id,
agent_id=feedback.agent_id,
reviewer_id=feedback.reviewer_id,
overall_rating=feedback.overall_rating,
performance_rating=feedback.performance_rating,
communication_rating=feedback.communication_rating,
reliability_rating=feedback.reliability_rating,
value_rating=feedback.value_rating,
feedback_text=feedback.feedback_text,
feedback_tags=feedback.feedback_tags,
created_at=feedback.created_at.isoformat(),
moderation_status=feedback.moderation_status
)
except Exception as e:
logger.error(f"Error adding feedback for agent {agent_id}: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.post("/job-completion")
async def record_job_completion(
job_request: JobCompletionRequest,
session: SessionDep
) -> Dict[str, Any]:
"""Record job completion and update reputation"""
reputation_service = ReputationService(session)
try:
reputation = await reputation_service.record_job_completion(
agent_id=job_request.agent_id,
job_id=job_request.job_id,
success=job_request.success,
response_time=job_request.response_time,
earnings=job_request.earnings
)
return {
"message": "Job completion recorded successfully",
"agent_id": reputation.agent_id,
"new_trust_score": reputation.trust_score,
"reputation_level": reputation.reputation_level.value,
"jobs_completed": reputation.jobs_completed,
"success_rate": reputation.success_rate,
"total_earnings": reputation.total_earnings
}
except Exception as e:
logger.error(f"Error recording job completion: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.get("/trust-score/{agent_id}", response_model=TrustScoreResponse)
async def get_trust_score_breakdown(
agent_id: str,
session: SessionDep
) -> TrustScoreResponse:
"""Get detailed trust score breakdown for an agent"""
reputation_service = ReputationService(session)
calculator = reputation_service.calculator
try:
# Calculate individual components
performance_score = calculator.calculate_performance_score(agent_id, session)
reliability_score = calculator.calculate_reliability_score(agent_id, session)
community_score = calculator.calculate_community_score(agent_id, session)
security_score = calculator.calculate_security_score(agent_id, session)
economic_score = calculator.calculate_economic_score(agent_id, session)
# Calculate composite score
composite_score = calculator.calculate_composite_trust_score(agent_id, session)
reputation_level = calculator.determine_reputation_level(composite_score)
return TrustScoreResponse(
agent_id=agent_id,
composite_score=composite_score,
performance_score=performance_score,
reliability_score=reliability_score,
community_score=community_score,
security_score=security_score,
economic_score=economic_score,
reputation_level=reputation_level.value,
calculated_at=datetime.utcnow().isoformat()
)
except Exception as e:
logger.error(f"Error getting trust score breakdown for {agent_id}: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.get("/leaderboard", response_model=List[LeaderboardEntry])
async def get_reputation_leaderboard(
category: str = Query(default="trust_score", description="Category to rank by"),
limit: int = Query(default=50, ge=1, le=100, description="Number of results"),
region: Optional[str] = Query(default=None, description="Filter by region"),
session: SessionDep
) -> List[LeaderboardEntry]:
"""Get reputation leaderboard"""
reputation_service = ReputationService(session)
try:
leaderboard_data = await reputation_service.get_leaderboard(
category=category,
limit=limit,
region=region
)
return [LeaderboardEntry(**entry) for entry in leaderboard_data]
except Exception as e:
logger.error(f"Error getting leaderboard: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.get("/metrics", response_model=ReputationMetricsResponse)
async def get_reputation_metrics(
session: SessionDep
) -> ReputationMetricsResponse:
"""Get overall reputation system metrics"""
try:
# Get all reputation profiles
reputations = session.exec(
select(AgentReputation)
).all()
if not reputations:
return ReputationMetricsResponse(
total_agents=0,
average_trust_score=0.0,
level_distribution={},
top_regions=[],
recent_activity={}
)
# Calculate metrics
total_agents = len(reputations)
average_trust_score = sum(r.trust_score for r in reputations) / total_agents
# Level distribution
level_counts = {}
for reputation in reputations:
level = reputation.reputation_level.value
level_counts[level] = level_counts.get(level, 0) + 1
# Top regions
region_counts = {}
for reputation in reputations:
region = reputation.geographic_region or "Unknown"
region_counts[region] = region_counts.get(region, 0) + 1
top_regions = [
{"region": region, "count": count}
for region, count in sorted(region_counts.items(), key=lambda x: x[1], reverse=True)[:10]
]
# Recent activity (last 24 hours)
recent_cutoff = datetime.utcnow() - timedelta(days=1)
recent_events = session.exec(
select(func.count(ReputationEvent.id)).where(
ReputationEvent.occurred_at >= recent_cutoff
)
).first()
recent_activity = {
"events_last_24h": recent_events[0] if recent_events else 0,
"active_agents": len([
r for r in reputations
if r.last_activity and r.last_activity >= recent_cutoff
])
}
return ReputationMetricsResponse(
total_agents=total_agents,
average_trust_score=average_trust_score,
level_distribution=level_counts,
top_regions=top_regions,
recent_activity=recent_activity
)
except Exception as e:
logger.error(f"Error getting reputation metrics: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.get("/feedback/{agent_id}")
async def get_agent_feedback(
agent_id: str,
limit: int = Query(default=10, ge=1, le=50),
session: SessionDep
) -> List[FeedbackResponse]:
"""Get community feedback for an agent"""
try:
feedbacks = session.exec(
select(CommunityFeedback)
.where(
and_(
CommunityFeedback.agent_id == agent_id,
CommunityFeedback.moderation_status == "approved"
)
)
.order_by(CommunityFeedback.created_at.desc())
.limit(limit)
).all()
return [
FeedbackResponse(
id=feedback.id,
agent_id=feedback.agent_id,
reviewer_id=feedback.reviewer_id,
overall_rating=feedback.overall_rating,
performance_rating=feedback.performance_rating,
communication_rating=feedback.communication_rating,
reliability_rating=feedback.reliability_rating,
value_rating=feedback.value_rating,
feedback_text=feedback.feedback_text,
feedback_tags=feedback.feedback_tags,
created_at=feedback.created_at.isoformat(),
moderation_status=feedback.moderation_status
)
for feedback in feedbacks
]
except Exception as e:
logger.error(f"Error getting feedback for agent {agent_id}: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.get("/events/{agent_id}")
async def get_reputation_events(
agent_id: str,
limit: int = Query(default=20, ge=1, le=100),
session: SessionDep
) -> List[Dict[str, Any]]:
"""Get reputation change events for an agent"""
try:
events = session.exec(
select(ReputationEvent)
.where(ReputationEvent.agent_id == agent_id)
.order_by(ReputationEvent.occurred_at.desc())
.limit(limit)
).all()
return [
{
"id": event.id,
"event_type": event.event_type,
"event_subtype": event.event_subtype,
"impact_score": event.impact_score,
"trust_score_before": event.trust_score_before,
"trust_score_after": event.trust_score_after,
"reputation_level_before": event.reputation_level_before.value if event.reputation_level_before else None,
"reputation_level_after": event.reputation_level_after.value if event.reputation_level_after else None,
"occurred_at": event.occurred_at.isoformat(),
"event_data": event.event_data
}
for event in events
]
except Exception as e:
logger.error(f"Error getting reputation events for {agent_id}: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.put("/profile/{agent_id}/specialization")
async def update_specialization(
agent_id: str,
specialization_tags: List[str],
session: SessionDep
) -> Dict[str, Any]:
"""Update agent specialization tags"""
try:
reputation = session.exec(
select(AgentReputation).where(AgentReputation.agent_id == agent_id)
).first()
if not reputation:
raise HTTPException(status_code=404, detail="Reputation profile not found")
reputation.specialization_tags = specialization_tags
reputation.updated_at = datetime.utcnow()
session.commit()
session.refresh(reputation)
return {
"message": "Specialization tags updated successfully",
"agent_id": agent_id,
"specialization_tags": reputation.specialization_tags,
"updated_at": reputation.updated_at.isoformat()
}
except HTTPException:
raise
except Exception as e:
logger.error(f"Error updating specialization for {agent_id}: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.put("/profile/{agent_id}/region")
async def update_region(
agent_id: str,
region: str,
session: SessionDep
) -> Dict[str, Any]:
"""Update agent geographic region"""
try:
reputation = session.exec(
select(AgentReputation).where(AgentReputation.agent_id == agent_id)
).first()
if not reputation:
raise HTTPException(status_code=404, detail="Reputation profile not found")
reputation.geographic_region = region
reputation.updated_at = datetime.utcnow()
session.commit()
session.refresh(reputation)
return {
"message": "Geographic region updated successfully",
"agent_id": agent_id,
"geographic_region": reputation.geographic_region,
"updated_at": reputation.updated_at.isoformat()
}
except HTTPException:
raise
except Exception as e:
logger.error(f"Error updating region for {agent_id}: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")

View File

@@ -0,0 +1,565 @@
"""
Reward System API Endpoints
REST API for agent rewards, incentives, and performance-based earnings
"""
from datetime import datetime, timedelta
from typing import Dict, List, Optional, Any
from fastapi import APIRouter, HTTPException, Depends, Query
from pydantic import BaseModel, Field
import logging
from ..storage import SessionDep
from ..services.reward_service import RewardEngine
from ..domain.rewards import (
AgentRewardProfile, RewardTier, RewardType, RewardStatus
)
logger = logging.getLogger(__name__)
router = APIRouter(prefix="/v1/rewards", tags=["rewards"])
# Pydantic models for API requests/responses
class RewardProfileResponse(BaseModel):
"""Response model for reward profile"""
agent_id: str
current_tier: str
tier_progress: float
base_earnings: float
bonus_earnings: float
total_earnings: float
lifetime_earnings: float
rewards_distributed: int
current_streak: int
longest_streak: int
performance_score: float
loyalty_score: float
referral_count: int
community_contributions: int
last_reward_date: Optional[str]
recent_calculations: List[Dict[str, Any]]
recent_distributions: List[Dict[str, Any]]
class RewardRequest(BaseModel):
"""Request model for reward calculation and distribution"""
agent_id: str
reward_type: RewardType
base_amount: float = Field(..., gt=0, description="Base reward amount in AITBC")
performance_metrics: Dict[str, Any] = Field(..., description="Performance metrics for bonus calculation")
reference_date: Optional[str] = Field(default=None, description="Reference date for calculation")
class RewardResponse(BaseModel):
"""Response model for reward distribution"""
calculation_id: str
distribution_id: str
reward_amount: float
reward_type: str
tier_multiplier: float
total_bonus: float
status: str
class RewardAnalyticsResponse(BaseModel):
"""Response model for reward analytics"""
period_type: str
start_date: str
end_date: str
total_rewards_distributed: float
total_agents_rewarded: int
average_reward_per_agent: float
tier_distribution: Dict[str, int]
total_distributions: int
class TierProgressResponse(BaseModel):
"""Response model for tier progress"""
agent_id: str
current_tier: str
next_tier: Optional[str]
tier_progress: float
trust_score: float
requirements_met: Dict[str, bool]
benefits: Dict[str, Any]
class BatchProcessResponse(BaseModel):
"""Response model for batch processing"""
processed: int
failed: int
total: int
class MilestoneResponse(BaseModel):
"""Response model for milestone achievements"""
id: str
agent_id: str
milestone_type: str
milestone_name: str
target_value: float
current_value: float
progress_percentage: float
reward_amount: float
is_completed: bool
is_claimed: bool
completed_at: Optional[str]
claimed_at: Optional[str]
# API Endpoints
@router.get("/profile/{agent_id}", response_model=RewardProfileResponse)
async def get_reward_profile(
agent_id: str,
session: SessionDep
) -> RewardProfileResponse:
"""Get comprehensive reward profile for an agent"""
reward_engine = RewardEngine(session)
try:
profile_data = await reward_engine.get_reward_summary(agent_id)
if "error" in profile_data:
raise HTTPException(status_code=404, detail=profile_data["error"])
return RewardProfileResponse(**profile_data)
except Exception as e:
logger.error(f"Error getting reward profile for {agent_id}: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.post("/profile/{agent_id}")
async def create_reward_profile(
agent_id: str,
session: SessionDep
) -> Dict[str, Any]:
"""Create a new reward profile for an agent"""
reward_engine = RewardEngine(session)
try:
profile = await reward_engine.create_reward_profile(agent_id)
return {
"message": "Reward profile created successfully",
"agent_id": profile.agent_id,
"current_tier": profile.current_tier.value,
"tier_progress": profile.tier_progress,
"created_at": profile.created_at.isoformat()
}
except Exception as e:
logger.error(f"Error creating reward profile for {agent_id}: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.post("/calculate-and-distribute", response_model=RewardResponse)
async def calculate_and_distribute_reward(
reward_request: RewardRequest,
session: SessionDep
) -> RewardResponse:
"""Calculate and distribute reward for an agent"""
reward_engine = RewardEngine(session)
try:
# Parse reference date if provided
reference_date = None
if reward_request.reference_date:
reference_date = datetime.fromisoformat(reward_request.reference_date)
# Calculate and distribute reward
result = await reward_engine.calculate_and_distribute_reward(
agent_id=reward_request.agent_id,
reward_type=reward_request.reward_type,
base_amount=reward_request.base_amount,
performance_metrics=reward_request.performance_metrics,
reference_date=reference_date
)
return RewardResponse(
calculation_id=result["calculation_id"],
distribution_id=result["distribution_id"],
reward_amount=result["reward_amount"],
reward_type=result["reward_type"],
tier_multiplier=result["tier_multiplier"],
total_bonus=result["total_bonus"],
status=result["status"]
)
except Exception as e:
logger.error(f"Error calculating and distributing reward: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.get("/tier-progress/{agent_id}", response_model=TierProgressResponse)
async def get_tier_progress(
agent_id: str,
session: SessionDep
) -> TierProgressResponse:
"""Get tier progress information for an agent"""
reward_engine = RewardEngine(session)
try:
# Get reward profile
profile = session.exec(
select(AgentRewardProfile).where(AgentRewardProfile.agent_id == agent_id)
).first()
if not profile:
raise HTTPException(status_code=404, detail="Reward profile not found")
# Get reputation for trust score
from ..domain.reputation import AgentReputation
reputation = session.exec(
select(AgentReputation).where(AgentReputation.agent_id == agent_id)
).first()
trust_score = reputation.trust_score if reputation else 500.0
# Determine next tier
current_tier = profile.current_tier
next_tier = None
if current_tier == RewardTier.BRONZE:
next_tier = RewardTier.SILVER
elif current_tier == RewardTier.SILVER:
next_tier = RewardTier.GOLD
elif current_tier == RewardTier.GOLD:
next_tier = RewardTier.PLATINUM
elif current_tier == RewardTier.PLATINUM:
next_tier = RewardTier.DIAMOND
# Calculate requirements met
requirements_met = {
"minimum_trust_score": trust_score >= 400,
"minimum_performance": profile.performance_score >= 3.0,
"minimum_activity": profile.rewards_distributed >= 1,
"minimum_earnings": profile.total_earnings >= 0.1
}
# Get tier benefits
tier_benefits = {
"max_concurrent_jobs": 1,
"priority_boost": 1.0,
"fee_discount": 0.0,
"support_level": "basic"
}
if current_tier == RewardTier.SILVER:
tier_benefits.update({
"max_concurrent_jobs": 2,
"priority_boost": 1.1,
"fee_discount": 5.0,
"support_level": "priority"
})
elif current_tier == RewardTier.GOLD:
tier_benefits.update({
"max_concurrent_jobs": 3,
"priority_boost": 1.2,
"fee_discount": 10.0,
"support_level": "priority"
})
elif current_tier == RewardTier.PLATINUM:
tier_benefits.update({
"max_concurrent_jobs": 5,
"priority_boost": 1.5,
"fee_discount": 15.0,
"support_level": "premium"
})
elif current_tier == RewardTier.DIAMOND:
tier_benefits.update({
"max_concurrent_jobs": 10,
"priority_boost": 2.0,
"fee_discount": 20.0,
"support_level": "premium"
})
return TierProgressResponse(
agent_id=agent_id,
current_tier=current_tier.value,
next_tier=next_tier.value if next_tier else None,
tier_progress=profile.tier_progress,
trust_score=trust_score,
requirements_met=requirements_met,
benefits=tier_benefits
)
except HTTPException:
raise
except Exception as e:
logger.error(f"Error getting tier progress for {agent_id}: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.post("/batch-process", response_model=BatchProcessResponse)
async def batch_process_pending_rewards(
limit: int = Query(default=100, ge=1, le=1000, description="Maximum number of rewards to process"),
session: SessionDep
) -> BatchProcessResponse:
"""Process pending reward distributions in batch"""
reward_engine = RewardEngine(session)
try:
result = await reward_engine.batch_process_pending_rewards(limit)
return BatchProcessResponse(
processed=result["processed"],
failed=result["failed"],
total=result["total"]
)
except Exception as e:
logger.error(f"Error batch processing rewards: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.get("/analytics", response_model=RewardAnalyticsResponse)
async def get_reward_analytics(
period_type: str = Query(default="daily", description="Period type: daily, weekly, monthly"),
start_date: Optional[str] = Query(default=None, description="Start date (ISO format)"),
end_date: Optional[str] = Query(default=None, description="End date (ISO format)"),
session: SessionDep
) -> RewardAnalyticsResponse:
"""Get reward system analytics"""
reward_engine = RewardEngine(session)
try:
# Parse dates if provided
start_dt = None
end_dt = None
if start_date:
start_dt = datetime.fromisoformat(start_date)
if end_date:
end_dt = datetime.fromisoformat(end_date)
analytics_data = await reward_engine.get_reward_analytics(
period_type=period_type,
start_date=start_dt,
end_date=end_dt
)
return RewardAnalyticsResponse(**analytics_data)
except Exception as e:
logger.error(f"Error getting reward analytics: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.get("/leaderboard")
async def get_reward_leaderboard(
tier: Optional[str] = Query(default=None, description="Filter by tier"),
period: str = Query(default="weekly", description="Period: daily, weekly, monthly"),
limit: int = Query(default=50, ge=1, le=100, description="Number of results"),
session: SessionDep
) -> List[Dict[str, Any]]:
"""Get reward leaderboard"""
try:
# Calculate date range based on period
if period == "daily":
start_date = datetime.utcnow() - timedelta(days=1)
elif period == "weekly":
start_date = datetime.utcnow() - timedelta(days=7)
elif period == "monthly":
start_date = datetime.utcnow() - timedelta(days=30)
else:
start_date = datetime.utcnow() - timedelta(days=7)
# Query reward profiles
query = select(AgentRewardProfile).where(
AgentRewardProfile.last_activity >= start_date
)
if tier:
query = query.where(AgentRewardProfile.current_tier == tier)
profiles = session.exec(
query.order_by(AgentRewardProfile.total_earnings.desc()).limit(limit)
).all()
leaderboard = []
for rank, profile in enumerate(profiles, 1):
leaderboard.append({
"rank": rank,
"agent_id": profile.agent_id,
"current_tier": profile.current_tier.value,
"total_earnings": profile.total_earnings,
"lifetime_earnings": profile.lifetime_earnings,
"rewards_distributed": profile.rewards_distributed,
"current_streak": profile.current_streak,
"performance_score": profile.performance_score
})
return leaderboard
except Exception as e:
logger.error(f"Error getting reward leaderboard: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.get("/tiers")
async def get_reward_tiers(
session: SessionDep
) -> List[Dict[str, Any]]:
"""Get reward tier configurations"""
try:
from ..domain.rewards import RewardTierConfig
tier_configs = session.exec(
select(RewardTierConfig).where(RewardTierConfig.is_active == True)
).all()
tiers = []
for config in tier_configs:
tiers.append({
"tier": config.tier.value,
"min_trust_score": config.min_trust_score,
"base_multiplier": config.base_multiplier,
"performance_bonus_multiplier": config.performance_bonus_multiplier,
"max_concurrent_jobs": config.max_concurrent_jobs,
"priority_boost": config.priority_boost,
"fee_discount": config.fee_discount,
"support_level": config.support_level,
"tier_requirements": config.tier_requirements,
"tier_benefits": config.tier_benefits
})
return sorted(tiers, key=lambda x: x["min_trust_score"])
except Exception as e:
logger.error(f"Error getting reward tiers: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.get("/milestones/{agent_id}")
async def get_agent_milestones(
agent_id: str,
include_completed: bool = Query(default=True, description="Include completed milestones"),
session: SessionDep
) -> List[MilestoneResponse]:
"""Get milestones for an agent"""
try:
from ..domain.rewards import RewardMilestone
query = select(RewardMilestone).where(RewardMilestone.agent_id == agent_id)
if not include_completed:
query = query.where(RewardMilestone.is_completed == False)
milestones = session.exec(
query.order_by(RewardMilestone.created_at.desc())
).all()
return [
MilestoneResponse(
id=milestone.id,
agent_id=milestone.agent_id,
milestone_type=milestone.milestone_type,
milestone_name=milestone.milestone_name,
target_value=milestone.target_value,
current_value=milestone.current_value,
progress_percentage=milestone.progress_percentage,
reward_amount=milestone.reward_amount,
is_completed=milestone.is_completed,
is_claimed=milestone.is_claimed,
completed_at=milestone.completed_at.isoformat() if milestone.completed_at else None,
claimed_at=milestone.claimed_at.isoformat() if milestone.claimed_at else None
)
for milestone in milestones
]
except Exception as e:
logger.error(f"Error getting milestones for {agent_id}: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.get("/distributions/{agent_id}")
async def get_reward_distributions(
agent_id: str,
limit: int = Query(default=20, ge=1, le=100),
status: Optional[str] = Query(default=None, description="Filter by status"),
session: SessionDep
) -> List[Dict[str, Any]]:
"""Get reward distribution history for an agent"""
try:
from ..domain.rewards import RewardDistribution
query = select(RewardDistribution).where(RewardDistribution.agent_id == agent_id)
if status:
query = query.where(RewardDistribution.status == status)
distributions = session.exec(
query.order_by(RewardDistribution.created_at.desc()).limit(limit)
).all()
return [
{
"id": distribution.id,
"reward_amount": distribution.reward_amount,
"reward_type": distribution.reward_type.value,
"status": distribution.status.value,
"distribution_method": distribution.distribution_method,
"transaction_id": distribution.transaction_id,
"transaction_status": distribution.transaction_status,
"created_at": distribution.created_at.isoformat(),
"processed_at": distribution.processed_at.isoformat() if distribution.processed_at else None,
"error_message": distribution.error_message
}
for distribution in distributions
]
except Exception as e:
logger.error(f"Error getting distributions for {agent_id}: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.post("/simulate-reward")
async def simulate_reward_calculation(
reward_request: RewardRequest,
session: SessionDep
) -> Dict[str, Any]:
"""Simulate reward calculation without distributing"""
reward_engine = RewardEngine(session)
try:
# Ensure reward profile exists
await reward_engine.create_reward_profile(reward_request.agent_id)
# Calculate reward only (no distribution)
reward_calculation = reward_engine.calculator.calculate_total_reward(
reward_request.agent_id,
reward_request.base_amount,
reward_request.performance_metrics,
session
)
return {
"agent_id": reward_request.agent_id,
"reward_type": reward_request.reward_type.value,
"base_amount": reward_request.base_amount,
"tier_multiplier": reward_calculation["tier_multiplier"],
"performance_bonus": reward_calculation["performance_bonus"],
"loyalty_bonus": reward_calculation["loyalty_bonus"],
"referral_bonus": reward_calculation["referral_bonus"],
"milestone_bonus": reward_calculation["milestone_bonus"],
"effective_multiplier": reward_calculation["effective_multiplier"],
"total_reward": reward_calculation["total_reward"],
"trust_score": reward_calculation["trust_score"],
"simulation": True
}
except Exception as e:
logger.error(f"Error simulating reward calculation: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")

View File

@@ -0,0 +1,722 @@
"""
P2P Trading Protocol API Endpoints
REST API for agent-to-agent trading, matching, negotiation, and settlement
"""
from datetime import datetime, timedelta
from typing import Dict, List, Optional, Any
from fastapi import APIRouter, HTTPException, Depends, Query
from pydantic import BaseModel, Field
import logging
from ..storage import SessionDep
from ..services.trading_service import P2PTradingProtocol
from ..domain.trading import (
TradeRequest, TradeMatch, TradeNegotiation, TradeAgreement, TradeSettlement,
TradeStatus, TradeType, NegotiationStatus, SettlementType
)
logger = logging.getLogger(__name__)
router = APIRouter(prefix="/v1/trading", tags=["trading"])
# Pydantic models for API requests/responses
class TradeRequestRequest(BaseModel):
"""Request model for creating trade request"""
buyer_agent_id: str
trade_type: TradeType
title: str = Field(..., max_length=200)
description: str = Field(default="", max_length=1000)
requirements: Dict[str, Any] = Field(..., description="Trade requirements and specifications")
budget_range: Dict[str, float] = Field(..., description="Budget range with min and max")
start_time: Optional[str] = Field(default=None, description="Start time (ISO format)")
end_time: Optional[str] = Field(default=None, description="End time (ISO format)")
duration_hours: Optional[int] = Field(default=None, description="Duration in hours")
urgency_level: str = Field(default="normal", description="urgency level")
preferred_regions: List[str] = Field(default_factory=list, description="Preferred regions")
excluded_regions: List[str] = Field(default_factory=list, description="Excluded regions")
service_level_required: str = Field(default="standard", description="Service level required")
tags: List[str] = Field(default_factory=list, description="Trade tags")
expires_at: Optional[str] = Field(default=None, description="Expiration time (ISO format)")
class TradeRequestResponse(BaseModel):
"""Response model for trade request"""
request_id: str
buyer_agent_id: str
trade_type: str
title: str
description: str
requirements: Dict[str, Any]
budget_range: Dict[str, float]
status: str
match_count: int
best_match_score: float
created_at: str
updated_at: str
expires_at: Optional[str]
class TradeMatchResponse(BaseModel):
"""Response model for trade match"""
match_id: str
request_id: str
buyer_agent_id: str
seller_agent_id: str
match_score: float
confidence_level: float
price_compatibility: float
specification_compatibility: float
timing_compatibility: float
reputation_compatibility: float
geographic_compatibility: float
seller_offer: Dict[str, Any]
proposed_terms: Dict[str, Any]
status: str
created_at: str
expires_at: Optional[str]
class NegotiationRequest(BaseModel):
"""Request model for initiating negotiation"""
match_id: str
initiator: str = Field(..., description="negotiation initiator: buyer or seller")
strategy: str = Field(default="balanced", description="negotiation strategy")
class NegotiationResponse(BaseModel):
"""Response model for negotiation"""
negotiation_id: str
match_id: str
buyer_agent_id: str
seller_agent_id: str
status: str
negotiation_round: int
current_terms: Dict[str, Any]
negotiation_strategy: str
auto_accept_threshold: float
created_at: str
started_at: Optional[str]
expires_at: Optional[str]
class AgreementResponse(BaseModel):
"""Response model for trade agreement"""
agreement_id: str
negotiation_id: str
buyer_agent_id: str
seller_agent_id: str
trade_type: str
title: str
agreed_terms: Dict[str, Any]
total_price: float
settlement_type: str
status: str
created_at: str
signed_at: str
starts_at: Optional[str]
ends_at: Optional[str]
class SettlementResponse(BaseModel):
"""Response model for settlement"""
settlement_id: str
agreement_id: str
settlement_type: str
total_amount: float
currency: str
payment_status: str
transaction_id: Optional[str]
platform_fee: float
net_amount_seller: float
status: str
initiated_at: str
processed_at: Optional[str]
completed_at: Optional[str]
class TradingSummaryResponse(BaseModel):
"""Response model for trading summary"""
agent_id: str
trade_requests: int
trade_matches: int
negotiations: int
agreements: int
success_rate: float
average_match_score: float
total_trade_volume: float
recent_activity: Dict[str, Any]
# API Endpoints
@router.post("/requests", response_model=TradeRequestResponse)
async def create_trade_request(
request_data: TradeRequestRequest,
session: SessionDep
) -> TradeRequestResponse:
"""Create a new trade request"""
trading_protocol = P2PTradingProtocol(session)
try:
# Parse optional datetime fields
start_time = None
end_time = None
expires_at = None
if request_data.start_time:
start_time = datetime.fromisoformat(request_data.start_time)
if request_data.end_time:
end_time = datetime.fromisoformat(request_data.end_time)
if request_data.expires_at:
expires_at = datetime.fromisoformat(request_data.expires_at)
# Create trade request
trade_request = await trading_protocol.create_trade_request(
buyer_agent_id=request_data.buyer_agent_id,
trade_type=request_data.trade_type,
title=request_data.title,
description=request_data.description,
requirements=request_data.requirements,
budget_range=request_data.budget_range,
start_time=start_time,
end_time=end_time,
duration_hours=request_data.duration_hours,
urgency_level=request_data.urgency_level,
preferred_regions=request_data.preferred_regions,
excluded_regions=request_data.excluded_regions,
service_level_required=request_data.service_level_required,
tags=request_data.tags,
expires_at=expires_at
)
return TradeRequestResponse(
request_id=trade_request.request_id,
buyer_agent_id=trade_request.buyer_agent_id,
trade_type=trade_request.trade_type.value,
title=trade_request.title,
description=trade_request.description,
requirements=trade_request.requirements,
budget_range=trade_request.budget_range,
status=trade_request.status.value,
match_count=trade_request.match_count,
best_match_score=trade_request.best_match_score,
created_at=trade_request.created_at.isoformat(),
updated_at=trade_request.updated_at.isoformat(),
expires_at=trade_request.expires_at.isoformat() if trade_request.expires_at else None
)
except Exception as e:
logger.error(f"Error creating trade request: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.get("/requests/{request_id}", response_model=TradeRequestResponse)
async def get_trade_request(
request_id: str,
session: SessionDep
) -> TradeRequestResponse:
"""Get trade request details"""
try:
trade_request = session.exec(
select(TradeRequest).where(TradeRequest.request_id == request_id)
).first()
if not trade_request:
raise HTTPException(status_code=404, detail="Trade request not found")
return TradeRequestResponse(
request_id=trade_request.request_id,
buyer_agent_id=trade_request.buyer_agent_id,
trade_type=trade_request.trade_type.value,
title=trade_request.title,
description=trade_request.description,
requirements=trade_request.requirements,
budget_range=trade_request.budget_range,
status=trade_request.status.value,
match_count=trade_request.match_count,
best_match_score=trade_request.best_match_score,
created_at=trade_request.created_at.isoformat(),
updated_at=trade_request.updated_at.isoformat(),
expires_at=trade_request.expires_at.isoformat() if trade_request.expires_at else None
)
except HTTPException:
raise
except Exception as e:
logger.error(f"Error getting trade request {request_id}: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.post("/requests/{request_id}/matches")
async def find_matches(
request_id: str,
session: SessionDep
) -> List[str]:
"""Find matching sellers for a trade request"""
trading_protocol = P2PTradingProtocol(session)
try:
matches = await trading_protocol.find_matches(request_id)
return matches
except ValueError as e:
raise HTTPException(status_code=404, detail=str(e))
except Exception as e:
logger.error(f"Error finding matches for request {request_id}: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.get("/requests/{request_id}/matches")
async def get_trade_matches(
request_id: str,
session: SessionDep
) -> List[TradeMatchResponse]:
"""Get trade matches for a request"""
try:
matches = session.exec(
select(TradeMatch).where(TradeMatch.request_id == request_id)
.order_by(TradeMatch.match_score.desc())
).all()
return [
TradeMatchResponse(
match_id=match.match_id,
request_id=match.request_id,
buyer_agent_id=match.buyer_agent_id,
seller_agent_id=match.seller_agent_id,
match_score=match.match_score,
confidence_level=match.confidence_level,
price_compatibility=match.price_compatibility,
specification_compatibility=match.specification_compatibility,
timing_compatibility=match.timing_compatibility,
reputation_compatibility=match.reputation_compatibility,
geographic_compatibility=match.geographic_compatibility,
seller_offer=match.seller_offer,
proposed_terms=match.proposed_terms,
status=match.status.value,
created_at=match.created_at.isoformat(),
expires_at=match.expires_at.isoformat() if match.expires_at else None
)
for match in matches
]
except Exception as e:
logger.error(f"Error getting trade matches for request {request_id}: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.post("/negotiations", response_model=NegotiationResponse)
async def initiate_negotiation(
negotiation_data: NegotiationRequest,
session: SessionDep
) -> NegotiationResponse:
"""Initiate negotiation between buyer and seller"""
trading_protocol = P2PTradingProtocol(session)
try:
negotiation = await trading_protocol.initiate_negotiation(
match_id=negotiation_data.match_id,
initiator=negotiation_data.initiator,
strategy=negotiation_data.strategy
)
return NegotiationResponse(
negotiation_id=negotiation.negotiation_id,
match_id=negotiation.match_id,
buyer_agent_id=negotiation.buyer_agent_id,
seller_agent_id=negotiation.seller_agent_id,
status=negotiation.status.value,
negotiation_round=negotiation.negotiation_round,
current_terms=negotiation.current_terms,
negotiation_strategy=negotiation.negotiation_strategy,
auto_accept_threshold=negotiation.auto_accept_threshold,
created_at=negotiation.created_at.isoformat(),
started_at=negotiation.started_at.isoformat() if negotiation.started_at else None,
expires_at=negotiation.expires_at.isoformat() if negotiation.expires_at else None
)
except ValueError as e:
raise HTTPException(status_code=404, detail=str(e))
except Exception as e:
logger.error(f"Error initiating negotiation: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.get("/negotiations/{negotiation_id}", response_model=NegotiationResponse)
async def get_negotiation(
negotiation_id: str,
session: SessionDep
) -> NegotiationResponse:
"""Get negotiation details"""
try:
negotiation = session.exec(
select(TradeNegotiation).where(TradeNegotiation.negotiation_id == negotiation_id)
).first()
if not negotiation:
raise HTTPException(status_code=404, detail="Negotiation not found")
return NegotiationResponse(
negotiation_id=negotiation.negotiation_id,
match_id=negotiation.match_id,
buyer_agent_id=negotiation.buyer_agent_id,
seller_agent_id=negotiation.seller_agent_id,
status=negotiation.status.value,
negotiation_round=negotiation.negotiation_round,
current_terms=negotiation.current_terms,
negotiation_strategy=negotiation.negotiation_strategy,
auto_accept_threshold=negotiation.auto_accept_threshold,
created_at=negotiation.created_at.isoformat(),
started_at=negotiation.started_at.isoformat() if negotiation.started_at else None,
expires_at=negotiation.expires_at.isoformat() if negotiation.expires_at else None
)
except HTTPException:
raise
except Exception as e:
logger.error(f"Error getting negotiation {negotiation_id}: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.get("/matches/{match_id}")
async def get_trade_match(
match_id: str,
session: SessionDep
) -> TradeMatchResponse:
"""Get trade match details"""
try:
match = session.exec(
select(TradeMatch).where(TradeMatch.match_id == match_id)
).first()
if not match:
raise HTTPException(status_code=404, detail="Trade match not found")
return TradeMatchResponse(
match_id=match.match_id,
request_id=match.request_id,
buyer_agent_id=match.buyer_agent_id,
seller_agent_id=match.seller_agent_id,
match_score=match.match_score,
confidence_level=match.confidence_level,
price_compatibility=match.price_compatibility,
specification_compatibility=match.specification_compatibility,
timing_compatibility=match.timing_compatibility,
reputation_compatibility=match.reputation_compatibility,
geographic_compatibility=match.geographic_compatibility,
seller_offer=match.seller_offer,
proposed_terms=match.proposed_terms,
status=match.status.value,
created_at=match.created_at.isoformat(),
expires_at=match.expires_at.isoformat() if match.expires_at else None
)
except HTTPException:
raise
except Exception as e:
logger.error(f"Error getting trade match {match_id}: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.get("/agents/{agent_id}/summary", response_model=TradingSummaryResponse)
async def get_trading_summary(
agent_id: str,
session: SessionDep
) -> TradingSummaryResponse:
"""Get comprehensive trading summary for an agent"""
trading_protocol = P2PTradingProtocol(session)
try:
summary = await trading_protocol.get_trading_summary(agent_id)
return TradingSummaryResponse(**summary)
except Exception as e:
logger.error(f"Error getting trading summary for {agent_id}: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.get("/requests")
async def list_trade_requests(
agent_id: Optional[str] = Query(default=None, description="Filter by agent ID"),
trade_type: Optional[str] = Query(default=None, description="Filter by trade type"),
status: Optional[str] = Query(default=None, description="Filter by status"),
limit: int = Query(default=50, ge=1, le=100, description="Number of results"),
session: SessionDep
) -> List[TradeRequestResponse]:
"""List trade requests with filters"""
try:
query = select(TradeRequest)
if agent_id:
query = query.where(TradeRequest.buyer_agent_id == agent_id)
if trade_type:
query = query.where(TradeRequest.trade_type == trade_type)
if status:
query = query.where(TradeRequest.status == status)
requests = session.exec(
query.order_by(TradeRequest.created_at.desc()).limit(limit)
).all()
return [
TradeRequestResponse(
request_id=request.request_id,
buyer_agent_id=request.buyer_agent_id,
trade_type=request.trade_type.value,
title=request.title,
description=request.description,
requirements=request.requirements,
budget_range=request.budget_range,
status=request.status.value,
match_count=request.match_count,
best_match_score=request.best_match_score,
created_at=request.created_at.isoformat(),
updated_at=request.updated_at.isoformat(),
expires_at=request.expires_at.isoformat() if request.expires_at else None
)
for request in requests
]
except Exception as e:
logger.error(f"Error listing trade requests: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.get("/matches")
async def list_trade_matches(
agent_id: Optional[str] = Query(default=None, description="Filter by agent ID"),
min_score: Optional[float] = Query(default=None, description="Minimum match score"),
status: Optional[str] = Query(default=None, description="Filter by status"),
limit: int = Query(default=50, ge=1, le=100, description="Number of results"),
session: SessionDep
) -> List[TradeMatchResponse]:
"""List trade matches with filters"""
try:
query = select(TradeMatch)
if agent_id:
query = query.where(
or_(
TradeMatch.buyer_agent_id == agent_id,
TradeMatch.seller_agent_id == agent_id
)
)
if min_score:
query = query.where(TradeMatch.match_score >= min_score)
if status:
query = query.where(TradeMatch.status == status)
matches = session.exec(
query.order_by(TradeMatch.match_score.desc()).limit(limit)
).all()
return [
TradeMatchResponse(
match_id=match.match_id,
request_id=match.request_id,
buyer_agent_id=match.buyer_agent_id,
seller_agent_id=match.seller_agent_id,
match_score=match.match_score,
confidence_level=match.confidence_level,
price_compatibility=match.price_compatibility,
specification_compatibility=match.specification_compatibility,
timing_compatibility=match.timing_compatibility,
reputation_compatibility=match.reputation_compatibility,
geographic_compatibility=match.geographic_compatibility,
seller_offer=match.seller_offer,
proposed_terms=match.proposed_terms,
status=match.status.value,
created_at=match.created_at.isoformat(),
expires_at=match.expires_at.isoformat() if match.expires_at else None
)
for match in matches
]
except Exception as e:
logger.error(f"Error listing trade matches: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.get("/negotiations")
async def list_negotiations(
agent_id: Optional[str] = Query(default=None, description="Filter by agent ID"),
status: Optional[str] = Query(default=None, description="Filter by status"),
strategy: Optional[str] = Query(default=None, description="Filter by strategy"),
limit: int = Query(default=50, ge=1, le=100, description="Number of results"),
session: SessionDep
) -> List[NegotiationResponse]:
"""List negotiations with filters"""
try:
query = select(TradeNegotiation)
if agent_id:
query = query.where(
or_(
TradeNegotiation.buyer_agent_id == agent_id,
TradeNegotiation.seller_agent_id == agent_id
)
)
if status:
query = query.where(TradeNegotiation.status == status)
if strategy:
query = query.where(TradeNegotiation.negotiation_strategy == strategy)
negotiations = session.exec(
query.order_by(TradeNegotiation.created_at.desc()).limit(limit)
).all()
return [
NegotiationResponse(
negotiation_id=negotiation.negotiation_id,
match_id=negotiation.match_id,
buyer_agent_id=negotiation.buyer_agent_id,
seller_agent_id=negotiation.seller_agent_id,
status=negotiation.status.value,
negotiation_round=negotiation.negotiation_round,
current_terms=negotiation.current_terms,
negotiation_strategy=negotiation.negotiation_strategy,
auto_accept_threshold=negotiation.auto_accept_threshold,
created_at=negotiation.created_at.isoformat(),
started_at=negotiation.started_at.isoformat() if negotiation.started_at else None,
expires_at=negotiation.expires_at.isoformat() if negotiation.expires_at else None
)
for negotiation in negotiations
]
except Exception as e:
logger.error(f"Error listing negotiations: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.get("/analytics")
async def get_trading_analytics(
period_type: str = Query(default="daily", description="Period type: daily, weekly, monthly"),
start_date: Optional[str] = Query(default=None, description="Start date (ISO format)"),
end_date: Optional[str] = Query(default=None, description="End date (ISO format)"),
session: SessionDep
) -> Dict[str, Any]:
"""Get P2P trading analytics"""
try:
# Parse dates if provided
start_dt = None
end_dt = None
if start_date:
start_dt = datetime.fromisoformat(start_date)
if end_date:
end_dt = datetime.fromisoformat(end_date)
if not start_dt:
start_dt = datetime.utcnow() - timedelta(days=30)
if not end_dt:
end_dt = datetime.utcnow()
# Get analytics data (mock implementation)
# In real implementation, this would query TradingAnalytics table
analytics = {
"period_type": period_type,
"start_date": start_dt.isoformat(),
"end_date": end_dt.isoformat(),
"total_trades": 150,
"completed_trades": 120,
"failed_trades": 15,
"cancelled_trades": 15,
"total_trade_volume": 7500.0,
"average_trade_value": 50.0,
"success_rate": 80.0,
"trade_type_distribution": {
"ai_power": 60,
"compute_resources": 30,
"data_services": 25,
"model_services": 20,
"inference_tasks": 15
},
"active_buyers": 45,
"active_sellers": 38,
"new_agents": 12,
"average_matching_time": 15.5, # minutes
"average_negotiation_time": 45.2, # minutes
"average_settlement_time": 8.7, # minutes
"regional_distribution": {
"us-east": 35,
"us-west": 28,
"eu-central": 22,
"ap-southeast": 18,
"ap-northeast": 15
}
}
return analytics
except Exception as e:
logger.error(f"Error getting trading analytics: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.post("/simulate-match")
async def simulate_trade_matching(
request_data: TradeRequestRequest,
session: SessionDep
) -> Dict[str, Any]:
"""Simulate trade matching without creating actual request"""
trading_protocol = P2PTradingProtocol(session)
try:
# Create temporary trade request for simulation
temp_request = TradeRequest(
request_id=f"sim_{uuid4().hex[:8]}",
buyer_agent_id=request_data.buyer_agent_id,
trade_type=request_data.trade_type,
title=request_data.title,
description=request_data.description,
requirements=request_data.requirements,
specifications=request_data.requirements.get('specifications', {}),
budget_range=request_data.budget_range,
preferred_regions=request_data.preferred_regions,
excluded_regions=request_data.excluded_regions,
service_level_required=request_data.service_level_required
)
# Get available sellers
seller_offers = await trading_protocol.get_available_sellers(temp_request)
seller_reputations = await trading_protocol.get_seller_reputations(
[offer['agent_id'] for offer in seller_offers]
)
# Find matches
matches = trading_protocol.matching_engine.find_matches(
temp_request, seller_offers, seller_reputations
)
return {
"simulation": True,
"request_details": {
"trade_type": request_data.trade_type.value,
"budget_range": request_data.budget_range,
"requirements": request_data.requirements
},
"available_sellers": len(seller_offers),
"matches_found": len(matches),
"best_matches": matches[:5], # Top 5 matches
"average_match_score": sum(m['match_score'] for m in matches) / len(matches) if matches else 0.0
}
except Exception as e:
logger.error(f"Error simulating trade matching: {str(e)}")
raise HTTPException(status_code=500, detail="Internal server error")

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,307 @@
"""
Community and Developer Ecosystem Services
Services for managing OpenClaw developer tools, SDKs, and third-party solutions
"""
from typing import Optional, List, Dict, Any
from sqlmodel import Session, select
from datetime import datetime
import logging
from uuid import uuid4
from ..domain.community import (
DeveloperProfile, AgentSolution, InnovationLab,
CommunityPost, Hackathon, DeveloperTier, SolutionStatus, LabStatus
)
logger = logging.getLogger(__name__)
class DeveloperEcosystemService:
"""Service for managing the developer ecosystem and SDKs"""
def __init__(self, session: Session):
self.session = session
async def create_developer_profile(self, user_id: str, username: str, bio: str = None, skills: List[str] = None) -> DeveloperProfile:
"""Create a new developer profile"""
profile = DeveloperProfile(
user_id=user_id,
username=username,
bio=bio,
skills=skills or []
)
self.session.add(profile)
self.session.commit()
self.session.refresh(profile)
return profile
async def get_developer_profile(self, developer_id: str) -> Optional[DeveloperProfile]:
"""Get developer profile by ID"""
return self.session.exec(
select(DeveloperProfile).where(DeveloperProfile.developer_id == developer_id)
).first()
async def get_sdk_release_info(self) -> Dict[str, Any]:
"""Get latest SDK information for developers"""
# Mocking SDK release data
return {
"latest_version": "v1.2.0",
"release_date": datetime.utcnow().isoformat(),
"supported_languages": ["python", "typescript", "rust"],
"download_urls": {
"python": "pip install aitbc-agent-sdk",
"typescript": "npm install @aitbc/agent-sdk"
},
"features": [
"Advanced Meta-Learning Integration",
"Cross-Domain Capability Synthesizer",
"Distributed Task Processing Client",
"Decentralized Governance Modules"
]
}
async def update_developer_reputation(self, developer_id: str, score_delta: float) -> DeveloperProfile:
"""Update a developer's reputation score and potentially tier"""
profile = await self.get_developer_profile(developer_id)
if not profile:
raise ValueError(f"Developer {developer_id} not found")
profile.reputation_score += score_delta
# Automatic tier progression based on reputation
if profile.reputation_score >= 1000:
profile.tier = DeveloperTier.MASTER
elif profile.reputation_score >= 500:
profile.tier = DeveloperTier.EXPERT
elif profile.reputation_score >= 100:
profile.tier = DeveloperTier.BUILDER
self.session.add(profile)
self.session.commit()
self.session.refresh(profile)
return profile
class ThirdPartySolutionService:
"""Service for managing the third-party agent solutions marketplace"""
def __init__(self, session: Session):
self.session = session
async def publish_solution(self, developer_id: str, data: Dict[str, Any]) -> AgentSolution:
"""Publish a new third-party agent solution"""
solution = AgentSolution(
developer_id=developer_id,
title=data.get('title'),
description=data.get('description'),
version=data.get('version', '1.0.0'),
capabilities=data.get('capabilities', []),
frameworks=data.get('frameworks', []),
price_model=data.get('price_model', 'free'),
price_amount=data.get('price_amount', 0.0),
solution_metadata=data.get('metadata', {}),
status=SolutionStatus.REVIEW
)
# Auto-publish if free, otherwise manual review required
if solution.price_model == 'free':
solution.status = SolutionStatus.PUBLISHED
solution.published_at = datetime.utcnow()
self.session.add(solution)
self.session.commit()
self.session.refresh(solution)
return solution
async def list_published_solutions(self, category: str = None, limit: int = 50) -> List[AgentSolution]:
"""List published solutions, optionally filtered by capability/category"""
query = select(AgentSolution).where(AgentSolution.status == SolutionStatus.PUBLISHED)
# Filtering by JSON column capability (simplified)
# In a real app, we might use PostgreSQL specific operators
solutions = self.session.exec(query.limit(limit)).all()
if category:
solutions = [s for s in solutions if category in s.capabilities]
return solutions
async def purchase_solution(self, buyer_id: str, solution_id: str) -> Dict[str, Any]:
"""Purchase or download a third-party solution"""
solution = self.session.exec(
select(AgentSolution).where(AgentSolution.solution_id == solution_id)
).first()
if not solution or solution.status != SolutionStatus.PUBLISHED:
raise ValueError("Solution not found or not available")
# Update download count
solution.downloads += 1
self.session.add(solution)
# Update developer earnings if paid
if solution.price_amount > 0:
dev = self.session.exec(
select(DeveloperProfile).where(DeveloperProfile.developer_id == solution.developer_id)
).first()
if dev:
dev.total_earnings += solution.price_amount
self.session.add(dev)
self.session.commit()
# Return installation instructions / access token
return {
"success": True,
"solution_id": solution_id,
"access_token": f"acc_{uuid4().hex}",
"installation_cmd": f"aitbc install {solution_id} --token acc_{uuid4().hex}"
}
class InnovationLabService:
"""Service for managing agent innovation labs and research programs"""
def __init__(self, session: Session):
self.session = session
async def propose_lab(self, researcher_id: str, data: Dict[str, Any]) -> InnovationLab:
"""Propose a new innovation lab/research program"""
lab = InnovationLab(
title=data.get('title'),
description=data.get('description'),
research_area=data.get('research_area'),
lead_researcher_id=researcher_id,
funding_goal=data.get('funding_goal', 0.0),
milestones=data.get('milestones', [])
)
self.session.add(lab)
self.session.commit()
self.session.refresh(lab)
return lab
async def join_lab(self, lab_id: str, developer_id: str) -> InnovationLab:
"""Join an active innovation lab"""
lab = self.session.exec(select(InnovationLab).where(InnovationLab.lab_id == lab_id)).first()
if not lab:
raise ValueError("Lab not found")
if developer_id not in lab.members:
lab.members.append(developer_id)
self.session.add(lab)
self.session.commit()
self.session.refresh(lab)
return lab
async def fund_lab(self, lab_id: str, amount: float) -> InnovationLab:
"""Provide funding to an innovation lab"""
lab = self.session.exec(select(InnovationLab).where(InnovationLab.lab_id == lab_id)).first()
if not lab:
raise ValueError("Lab not found")
lab.current_funding += amount
if lab.status == LabStatus.FUNDING and lab.current_funding >= lab.funding_goal:
lab.status = LabStatus.ACTIVE
self.session.add(lab)
self.session.commit()
self.session.refresh(lab)
return lab
class CommunityPlatformService:
"""Service for managing the community support and collaboration platform"""
def __init__(self, session: Session):
self.session = session
async def create_post(self, author_id: str, data: Dict[str, Any]) -> CommunityPost:
"""Create a new community post (question, tutorial, etc)"""
post = CommunityPost(
author_id=author_id,
title=data.get('title', ''),
content=data.get('content', ''),
category=data.get('category', 'discussion'),
tags=data.get('tags', []),
parent_post_id=data.get('parent_post_id')
)
self.session.add(post)
# Reward developer for participating
if not post.parent_post_id: # New thread
dev_service = DeveloperEcosystemService(self.session)
await dev_service.update_developer_reputation(author_id, 2.0)
self.session.commit()
self.session.refresh(post)
return post
async def get_feed(self, category: str = None, limit: int = 20) -> List[CommunityPost]:
"""Get the community feed"""
query = select(CommunityPost).where(CommunityPost.parent_post_id == None)
if category:
query = query.where(CommunityPost.category == category)
query = query.order_by(CommunityPost.created_at.desc()).limit(limit)
return self.session.exec(query).all()
async def upvote_post(self, post_id: str) -> CommunityPost:
"""Upvote a post and reward the author"""
post = self.session.exec(select(CommunityPost).where(CommunityPost.post_id == post_id)).first()
if not post:
raise ValueError("Post not found")
post.upvotes += 1
self.session.add(post)
# Reward author
dev_service = DeveloperEcosystemService(self.session)
await dev_service.update_developer_reputation(post.author_id, 1.0)
self.session.commit()
self.session.refresh(post)
return post
async def create_hackathon(self, organizer_id: str, data: Dict[str, Any]) -> Hackathon:
"""Create a new agent innovation hackathon"""
# Verify organizer is an expert or partner
dev = self.session.exec(select(DeveloperProfile).where(DeveloperProfile.developer_id == organizer_id)).first()
if not dev or dev.tier not in [DeveloperTier.EXPERT, DeveloperTier.MASTER, DeveloperTier.PARTNER]:
raise ValueError("Only high-tier developers can organize hackathons")
hackathon = Hackathon(
title=data.get('title', ''),
description=data.get('description', ''),
theme=data.get('theme', ''),
sponsor=data.get('sponsor', 'AITBC Foundation'),
prize_pool=data.get('prize_pool', 0.0),
registration_start=datetime.fromisoformat(data.get('registration_start', datetime.utcnow().isoformat())),
registration_end=datetime.fromisoformat(data.get('registration_end')),
event_start=datetime.fromisoformat(data.get('event_start')),
event_end=datetime.fromisoformat(data.get('event_end'))
)
self.session.add(hackathon)
self.session.commit()
self.session.refresh(hackathon)
return hackathon
async def register_for_hackathon(self, hackathon_id: str, developer_id: str) -> Hackathon:
"""Register a developer for a hackathon"""
hackathon = self.session.exec(select(Hackathon).where(Hackathon.hackathon_id == hackathon_id)).first()
if not hackathon:
raise ValueError("Hackathon not found")
if hackathon.status not in [HackathonStatus.ANNOUNCED, HackathonStatus.REGISTRATION]:
raise ValueError("Registration is not open for this hackathon")
if developer_id not in hackathon.participants:
hackathon.participants.append(developer_id)
self.session.add(hackathon)
self.session.commit()
self.session.refresh(hackathon)
return hackathon

View File

@@ -0,0 +1,511 @@
"""
Creative Capabilities Service
Implements advanced creativity enhancement systems and specialized AI capabilities
"""
import asyncio
import numpy as np
from datetime import datetime, timedelta
from typing import Dict, List, Optional, Any, Tuple
from uuid import uuid4
import logging
import random
from sqlmodel import Session, select, update, delete, and_, or_, func
from sqlalchemy.exc import SQLAlchemyError
from ..domain.agent_performance import (
CreativeCapability, AgentCapability, AgentPerformanceProfile
)
logger = logging.getLogger(__name__)
class CreativityEnhancementEngine:
"""Advanced creativity enhancement system for OpenClaw agents"""
def __init__(self):
self.enhancement_algorithms = {
'divergent_thinking': self.divergent_thinking_enhancement,
'conceptual_blending': self.conceptual_blending,
'morphological_analysis': self.morphological_analysis,
'lateral_thinking': self.lateral_thinking_stimulation,
'bisociation': self.bisociation_framework
}
self.creative_domains = {
'artistic': ['visual_arts', 'music_composition', 'literary_arts'],
'design': ['ui_ux', 'product_design', 'architectural'],
'innovation': ['problem_solving', 'product_innovation', 'process_innovation'],
'scientific': ['hypothesis_generation', 'experimental_design'],
'narrative': ['storytelling', 'world_building', 'character_development']
}
self.evaluation_metrics = [
'originality',
'fluency',
'flexibility',
'elaboration',
'aesthetic_value',
'utility'
]
async def create_creative_capability(
self,
session: Session,
agent_id: str,
creative_domain: str,
capability_type: str,
generation_models: List[str],
initial_score: float = 0.5
) -> CreativeCapability:
"""Initialize a new creative capability for an agent"""
capability_id = f"creative_{uuid4().hex[:8]}"
# Determine specialized areas based on domain
specializations = self.creative_domains.get(creative_domain, ['general_creativity'])
capability = CreativeCapability(
capability_id=capability_id,
agent_id=agent_id,
creative_domain=creative_domain,
capability_type=capability_type,
originality_score=initial_score,
novelty_score=initial_score * 0.9,
aesthetic_quality=initial_score * 5.0,
coherence_score=initial_score * 1.1,
generation_models=generation_models,
creative_learning_rate=0.05,
creative_specializations=specializations,
status="developing",
created_at=datetime.utcnow()
)
session.add(capability)
session.commit()
session.refresh(capability)
logger.info(f"Created creative capability {capability_id} for agent {agent_id}")
return capability
async def enhance_creativity(
self,
session: Session,
capability_id: str,
algorithm: str = "divergent_thinking",
training_cycles: int = 100
) -> Dict[str, Any]:
"""Enhance a specific creative capability"""
capability = session.exec(
select(CreativeCapability).where(CreativeCapability.capability_id == capability_id)
).first()
if not capability:
raise ValueError(f"Creative capability {capability_id} not found")
try:
# Apply enhancement algorithm
enhancement_func = self.enhancement_algorithms.get(
algorithm,
self.divergent_thinking_enhancement
)
enhancement_results = await enhancement_func(capability, training_cycles)
# Update capability metrics
capability.originality_score = min(1.0, capability.originality_score + enhancement_results['originality_gain'])
capability.novelty_score = min(1.0, capability.novelty_score + enhancement_results['novelty_gain'])
capability.aesthetic_quality = min(5.0, capability.aesthetic_quality + enhancement_results['aesthetic_gain'])
capability.style_variety += enhancement_results['variety_gain']
# Track training history
capability.creative_metadata['last_enhancement'] = {
'algorithm': algorithm,
'cycles': training_cycles,
'results': enhancement_results,
'timestamp': datetime.utcnow().isoformat()
}
# Update status if ready
if capability.originality_score > 0.8 and capability.aesthetic_quality > 4.0:
capability.status = "certified"
elif capability.originality_score > 0.6:
capability.status = "ready"
capability.updated_at = datetime.utcnow()
session.commit()
logger.info(f"Enhanced creative capability {capability_id} using {algorithm}")
return {
'success': True,
'capability_id': capability_id,
'algorithm': algorithm,
'improvements': enhancement_results,
'new_scores': {
'originality': capability.originality_score,
'novelty': capability.novelty_score,
'aesthetic': capability.aesthetic_quality,
'variety': capability.style_variety
},
'status': capability.status
}
except Exception as e:
logger.error(f"Error enhancing creativity for {capability_id}: {str(e)}")
raise
async def divergent_thinking_enhancement(self, capability: CreativeCapability, cycles: int) -> Dict[str, float]:
"""Enhance divergent thinking capabilities"""
# Simulate divergent thinking training
base_learning_rate = capability.creative_learning_rate
originality_gain = base_learning_rate * (cycles / 100) * random.uniform(0.8, 1.2)
variety_gain = int(max(1, cycles / 50) * random.uniform(0.5, 1.5))
return {
'originality_gain': originality_gain,
'novelty_gain': originality_gain * 0.8,
'aesthetic_gain': originality_gain * 2.0, # Scale to 0-5
'variety_gain': variety_gain,
'fluency_improvement': random.uniform(0.1, 0.3)
}
async def conceptual_blending(self, capability: CreativeCapability, cycles: int) -> Dict[str, float]:
"""Enhance conceptual blending (combining unrelated concepts)"""
base_learning_rate = capability.creative_learning_rate
novelty_gain = base_learning_rate * (cycles / 80) * random.uniform(0.9, 1.3)
return {
'originality_gain': novelty_gain * 0.7,
'novelty_gain': novelty_gain,
'aesthetic_gain': novelty_gain * 1.5,
'variety_gain': int(cycles / 60),
'blending_efficiency': random.uniform(0.15, 0.35)
}
async def morphological_analysis(self, capability: CreativeCapability, cycles: int) -> Dict[str, float]:
"""Enhance morphological analysis (systematic exploration of possibilities)"""
base_learning_rate = capability.creative_learning_rate
# Morphological analysis is systematic, so steady gains
gain = base_learning_rate * (cycles / 100)
return {
'originality_gain': gain * 0.9,
'novelty_gain': gain * 1.1,
'aesthetic_gain': gain * 1.0,
'variety_gain': int(cycles / 40),
'systematic_coverage': random.uniform(0.2, 0.4)
}
async def lateral_thinking_stimulation(self, capability: CreativeCapability, cycles: int) -> Dict[str, float]:
"""Enhance lateral thinking (approaching problems from new angles)"""
base_learning_rate = capability.creative_learning_rate
# Lateral thinking produces highly original but sometimes less coherent results
gain = base_learning_rate * (cycles / 90) * random.uniform(0.7, 1.5)
return {
'originality_gain': gain * 1.3,
'novelty_gain': gain * 1.2,
'aesthetic_gain': gain * 0.8,
'variety_gain': int(cycles / 50),
'perspective_shifts': random.uniform(0.2, 0.5)
}
async def bisociation_framework(self, capability: CreativeCapability, cycles: int) -> Dict[str, float]:
"""Enhance bisociation (connecting two previously unrelated frames of reference)"""
base_learning_rate = capability.creative_learning_rate
gain = base_learning_rate * (cycles / 120) * random.uniform(0.8, 1.4)
return {
'originality_gain': gain * 1.4,
'novelty_gain': gain * 1.3,
'aesthetic_gain': gain * 1.2,
'variety_gain': int(cycles / 70),
'cross_domain_links': random.uniform(0.1, 0.4)
}
async def evaluate_creation(
self,
session: Session,
capability_id: str,
creation_data: Dict[str, Any],
expert_feedback: Optional[Dict[str, float]] = None
) -> Dict[str, Any]:
"""Evaluate a creative output and update capability"""
capability = session.exec(
select(CreativeCapability).where(CreativeCapability.capability_id == capability_id)
).first()
if not capability:
raise ValueError(f"Creative capability {capability_id} not found")
# Perform automated evaluation
auto_eval = self.automated_aesthetic_evaluation(creation_data, capability.creative_domain)
# Combine with expert feedback if available
final_eval = {}
for metric in self.evaluation_metrics:
auto_score = auto_eval.get(metric, 0.5)
if expert_feedback and metric in expert_feedback:
# Expert feedback is weighted more heavily
final_eval[metric] = (auto_score * 0.3) + (expert_feedback[metric] * 0.7)
else:
final_eval[metric] = auto_score
# Update capability based on evaluation
capability.creations_generated += 1
# Moving average update of quality metrics
alpha = 0.1 # Learning rate for metrics
capability.originality_score = (1 - alpha) * capability.originality_score + alpha * final_eval.get('originality', capability.originality_score)
capability.aesthetic_quality = (1 - alpha) * capability.aesthetic_quality + alpha * (final_eval.get('aesthetic_value', 0.5) * 5.0)
capability.coherence_score = (1 - alpha) * capability.coherence_score + alpha * final_eval.get('utility', capability.coherence_score)
# Record evaluation
evaluation_record = {
'timestamp': datetime.utcnow().isoformat(),
'creation_id': creation_data.get('id', f"create_{uuid4().hex[:8]}"),
'scores': final_eval
}
evaluations = capability.expert_evaluations
evaluations.append(evaluation_record)
# Keep only last 50 evaluations
if len(evaluations) > 50:
evaluations = evaluations[-50:]
capability.expert_evaluations = evaluations
capability.last_evaluation = datetime.utcnow()
session.commit()
return {
'success': True,
'evaluation': final_eval,
'capability_updated': True,
'new_aesthetic_quality': capability.aesthetic_quality
}
def automated_aesthetic_evaluation(self, creation_data: Dict[str, Any], domain: str) -> Dict[str, float]:
"""Automated evaluation of creative outputs based on domain heuristics"""
# Simulated automated evaluation logic
# In a real system, this would use specialized models to evaluate art, text, music, etc.
content = str(creation_data.get('content', ''))
complexity = min(1.0, len(content) / 1000.0)
structure_score = 0.5 + (random.uniform(-0.2, 0.3))
if domain == 'artistic':
return {
'originality': random.uniform(0.6, 0.95),
'fluency': complexity,
'flexibility': random.uniform(0.5, 0.8),
'elaboration': structure_score,
'aesthetic_value': random.uniform(0.7, 0.9),
'utility': random.uniform(0.4, 0.7)
}
elif domain == 'innovation':
return {
'originality': random.uniform(0.7, 0.9),
'fluency': structure_score,
'flexibility': random.uniform(0.6, 0.9),
'elaboration': complexity,
'aesthetic_value': random.uniform(0.5, 0.8),
'utility': random.uniform(0.8, 0.95)
}
else:
return {
'originality': random.uniform(0.5, 0.9),
'fluency': random.uniform(0.5, 0.9),
'flexibility': random.uniform(0.5, 0.9),
'elaboration': random.uniform(0.5, 0.9),
'aesthetic_value': random.uniform(0.5, 0.9),
'utility': random.uniform(0.5, 0.9)
}
class IdeationAlgorithm:
"""System for generating innovative ideas and solving complex problems"""
def __init__(self):
self.ideation_techniques = {
'scamper': self.scamper_technique,
'triz': self.triz_inventive_principles,
'six_thinking_hats': self.six_thinking_hats,
'first_principles': self.first_principles_reasoning,
'biomimicry': self.biomimicry_mapping
}
async def generate_ideas(
self,
problem_statement: str,
domain: str,
technique: str = "scamper",
num_ideas: int = 5,
constraints: Optional[Dict[str, Any]] = None
) -> Dict[str, Any]:
"""Generate innovative ideas using specified technique"""
technique_func = self.ideation_techniques.get(technique, self.first_principles_reasoning)
# Simulate idea generation process
await asyncio.sleep(0.5) # Processing time
ideas = []
for i in range(num_ideas):
idea = technique_func(problem_statement, domain, i, constraints)
ideas.append(idea)
# Rank ideas by novelty and feasibility
ranked_ideas = self.rank_ideas(ideas)
return {
'problem': problem_statement,
'technique_used': technique,
'domain': domain,
'generated_ideas': ranked_ideas,
'generation_timestamp': datetime.utcnow().isoformat()
}
def scamper_technique(self, problem: str, domain: str, seed: int, constraints: Any) -> Dict[str, Any]:
"""Substitute, Combine, Adapt, Modify, Put to another use, Eliminate, Reverse"""
operations = ['Substitute', 'Combine', 'Adapt', 'Modify', 'Put to other use', 'Eliminate', 'Reverse']
op = operations[seed % len(operations)]
return {
'title': f"{op}-based innovation for {domain}",
'description': f"Applying the {op} principle to solving: {problem[:30]}...",
'technique_aspect': op,
'novelty_score': random.uniform(0.6, 0.9),
'feasibility_score': random.uniform(0.5, 0.85)
}
def triz_inventive_principles(self, problem: str, domain: str, seed: int, constraints: Any) -> Dict[str, Any]:
"""Theory of Inventive Problem Solving"""
principles = ['Segmentation', 'Extraction', 'Local Quality', 'Asymmetry', 'Consolidation', 'Universality']
principle = principles[seed % len(principles)]
return {
'title': f"TRIZ Principle: {principle}",
'description': f"Solving contradictions in {domain} using {principle}.",
'technique_aspect': principle,
'novelty_score': random.uniform(0.7, 0.95),
'feasibility_score': random.uniform(0.4, 0.8)
}
def six_thinking_hats(self, problem: str, domain: str, seed: int, constraints: Any) -> Dict[str, Any]:
"""De Bono's Six Thinking Hats"""
hats = ['White (Data)', 'Red (Emotion)', 'Black (Caution)', 'Yellow (Optimism)', 'Green (Creativity)', 'Blue (Process)']
hat = hats[seed % len(hats)]
return {
'title': f"{hat} perspective",
'description': f"Analyzing {problem[:20]} from the {hat} standpoint.",
'technique_aspect': hat,
'novelty_score': random.uniform(0.5, 0.8),
'feasibility_score': random.uniform(0.6, 0.9)
}
def first_principles_reasoning(self, problem: str, domain: str, seed: int, constraints: Any) -> Dict[str, Any]:
"""Deconstruct to fundamental truths and build up"""
return {
'title': f"Fundamental reconstruction {seed+1}",
'description': f"Breaking down assumptions in {domain} to fundamental physics/logic.",
'technique_aspect': 'Deconstruction',
'novelty_score': random.uniform(0.8, 0.99),
'feasibility_score': random.uniform(0.3, 0.7)
}
def biomimicry_mapping(self, problem: str, domain: str, seed: int, constraints: Any) -> Dict[str, Any]:
"""Map engineering/design problems to biological solutions"""
biological_systems = ['Mycelium networks', 'Swarm intelligence', 'Photosynthesis', 'Lotus effect', 'Gecko adhesion']
system = biological_systems[seed % len(biological_systems)]
return {
'title': f"Bio-inspired: {system}",
'description': f"Applying principles from {system} to {domain} challenges.",
'technique_aspect': system,
'novelty_score': random.uniform(0.75, 0.95),
'feasibility_score': random.uniform(0.4, 0.75)
}
def rank_ideas(self, ideas: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""Rank ideas based on a combined score of novelty and feasibility"""
for idea in ideas:
# Calculate composite score: 60% novelty, 40% feasibility
idea['composite_score'] = (idea['novelty_score'] * 0.6) + (idea['feasibility_score'] * 0.4)
return sorted(ideas, key=lambda x: x['composite_score'], reverse=True)
class CrossDomainCreativeIntegrator:
"""Integrates creativity across multiple domains for breakthrough innovations"""
def __init__(self):
pass
async def generate_cross_domain_synthesis(
self,
session: Session,
agent_id: str,
primary_domain: str,
secondary_domains: List[str],
synthesis_goal: str
) -> Dict[str, Any]:
"""Synthesize concepts from multiple domains to create novel outputs"""
# Verify agent has capabilities in these domains
capabilities = session.exec(
select(CreativeCapability).where(
and_(
CreativeCapability.agent_id == agent_id,
CreativeCapability.creative_domain.in_([primary_domain] + secondary_domains)
)
)
).all()
found_domains = [cap.creative_domain for cap in capabilities]
if primary_domain not in found_domains:
raise ValueError(f"Agent lacks primary creative domain: {primary_domain}")
# Determine synthesis approach based on available capabilities
synergy_potential = len(found_domains) * 0.2
# Simulate synthesis process
await asyncio.sleep(0.8)
synthesis_result = {
'goal': synthesis_goal,
'primary_framework': primary_domain,
'integrated_perspectives': secondary_domains,
'synthesis_output': f"Novel integration of {primary_domain} principles with mechanisms from {', '.join(secondary_domains)}",
'synergy_score': min(0.95, 0.4 + synergy_potential + random.uniform(0, 0.2)),
'innovation_level': 'disruptive' if synergy_potential > 0.5 else 'incremental',
'suggested_applications': [
f"Cross-functional application in {primary_domain}",
f"Novel methodology for {secondary_domains[0] if secondary_domains else 'general use'}"
]
}
# Update cross-domain transfer metrics for involved capabilities
for cap in capabilities:
cap.cross_domain_transfer = min(1.0, cap.cross_domain_transfer + 0.05)
session.add(cap)
session.commit()
return synthesis_result

View File

@@ -0,0 +1,275 @@
"""
Decentralized Governance Service
Implements the OpenClaw DAO, voting mechanisms, and proposal lifecycle
"""
from typing import Optional, List, Dict, Any
from sqlmodel import Session, select
from datetime import datetime, timedelta
import logging
from uuid import uuid4
from ..domain.governance import (
GovernanceProfile, Proposal, Vote, DaoTreasury, TransparencyReport,
ProposalStatus, VoteType, GovernanceRole
)
logger = logging.getLogger(__name__)
class GovernanceService:
"""Core service for managing DAO operations and voting"""
def __init__(self, session: Session):
self.session = session
async def get_or_create_profile(self, user_id: str, initial_voting_power: float = 0.0) -> GovernanceProfile:
"""Get an existing governance profile or create a new one"""
profile = self.session.exec(select(GovernanceProfile).where(GovernanceProfile.user_id == user_id)).first()
if not profile:
profile = GovernanceProfile(
user_id=user_id,
voting_power=initial_voting_power
)
self.session.add(profile)
self.session.commit()
self.session.refresh(profile)
return profile
async def delegate_votes(self, delegator_id: str, delegatee_id: str) -> GovernanceProfile:
"""Delegate voting power from one profile to another"""
delegator = self.session.exec(select(GovernanceProfile).where(GovernanceProfile.profile_id == delegator_id)).first()
delegatee = self.session.exec(select(GovernanceProfile).where(GovernanceProfile.profile_id == delegatee_id)).first()
if not delegator or not delegatee:
raise ValueError("Delegator or Delegatee not found")
# Remove old delegation if exists
if delegator.delegate_to:
old_delegatee = self.session.exec(select(GovernanceProfile).where(GovernanceProfile.profile_id == delegator.delegate_to)).first()
if old_delegatee:
old_delegatee.delegated_power -= delegator.voting_power
self.session.add(old_delegatee)
# Set new delegation
delegator.delegate_to = delegatee.profile_id
delegatee.delegated_power += delegator.voting_power
self.session.add(delegator)
self.session.add(delegatee)
self.session.commit()
return delegator
async def create_proposal(self, proposer_id: str, data: Dict[str, Any]) -> Proposal:
"""Create a new governance proposal"""
proposer = self.session.exec(select(GovernanceProfile).where(GovernanceProfile.profile_id == proposer_id)).first()
if not proposer:
raise ValueError("Proposer not found")
# Ensure proposer meets minimum voting power requirement to submit
total_power = proposer.voting_power + proposer.delegated_power
if total_power < 100.0: # Arbitrary minimum threshold for example
raise ValueError("Insufficient voting power to submit a proposal")
now = datetime.utcnow()
voting_starts = data.get('voting_starts', now + timedelta(days=1))
if isinstance(voting_starts, str):
voting_starts = datetime.fromisoformat(voting_starts)
voting_ends = data.get('voting_ends', voting_starts + timedelta(days=7))
if isinstance(voting_ends, str):
voting_ends = datetime.fromisoformat(voting_ends)
proposal = Proposal(
proposer_id=proposer_id,
title=data.get('title'),
description=data.get('description'),
category=data.get('category', 'general'),
execution_payload=data.get('execution_payload', {}),
quorum_required=data.get('quorum_required', 1000.0), # Example default
voting_starts=voting_starts,
voting_ends=voting_ends
)
# If voting starts immediately
if voting_starts <= now:
proposal.status = ProposalStatus.ACTIVE
proposer.proposals_created += 1
self.session.add(proposal)
self.session.add(proposer)
self.session.commit()
self.session.refresh(proposal)
return proposal
async def cast_vote(self, proposal_id: str, voter_id: str, vote_type: VoteType, reason: str = None) -> Vote:
"""Cast a vote on an active proposal"""
proposal = self.session.exec(select(Proposal).where(Proposal.proposal_id == proposal_id)).first()
voter = self.session.exec(select(GovernanceProfile).where(GovernanceProfile.profile_id == voter_id)).first()
if not proposal or not voter:
raise ValueError("Proposal or Voter not found")
now = datetime.utcnow()
if proposal.status != ProposalStatus.ACTIVE or now < proposal.voting_starts or now > proposal.voting_ends:
raise ValueError("Proposal is not currently active for voting")
# Check if already voted
existing_vote = self.session.exec(
select(Vote).where(Vote.proposal_id == proposal_id).where(Vote.voter_id == voter_id)
).first()
if existing_vote:
raise ValueError("Voter has already cast a vote on this proposal")
# If voter has delegated their vote, they cannot vote directly (or it overrides)
# For this implementation, we'll say direct voting is allowed but we only use their personal power
power_to_use = voter.voting_power + voter.delegated_power
if power_to_use <= 0:
raise ValueError("Voter has no voting power")
vote = Vote(
proposal_id=proposal_id,
voter_id=voter_id,
vote_type=vote_type,
voting_power_used=power_to_use,
reason=reason
)
# Update proposal tallies
if vote_type == VoteType.FOR:
proposal.votes_for += power_to_use
elif vote_type == VoteType.AGAINST:
proposal.votes_against += power_to_use
else:
proposal.votes_abstain += power_to_use
voter.total_votes_cast += 1
voter.last_voted_at = now
self.session.add(vote)
self.session.add(proposal)
self.session.add(voter)
self.session.commit()
self.session.refresh(vote)
return vote
async def process_proposal_lifecycle(self, proposal_id: str) -> Proposal:
"""Update proposal status based on time and votes"""
proposal = self.session.exec(select(Proposal).where(Proposal.proposal_id == proposal_id)).first()
if not proposal:
raise ValueError("Proposal not found")
now = datetime.utcnow()
# Draft -> Active
if proposal.status == ProposalStatus.DRAFT and now >= proposal.voting_starts:
proposal.status = ProposalStatus.ACTIVE
# Active -> Succeeded/Defeated
elif proposal.status == ProposalStatus.ACTIVE and now > proposal.voting_ends:
total_votes = proposal.votes_for + proposal.votes_against + proposal.votes_abstain
# Check Quorum
if total_votes < proposal.quorum_required:
proposal.status = ProposalStatus.DEFEATED
else:
# Check threshold (usually just FOR vs AGAINST)
votes_cast = proposal.votes_for + proposal.votes_against
if votes_cast == 0:
proposal.status = ProposalStatus.DEFEATED
else:
ratio = proposal.votes_for / votes_cast
if ratio >= proposal.passing_threshold:
proposal.status = ProposalStatus.SUCCEEDED
# Update proposer stats
proposer = self.session.exec(select(GovernanceProfile).where(GovernanceProfile.profile_id == proposal.proposer_id)).first()
if proposer:
proposer.proposals_passed += 1
self.session.add(proposer)
else:
proposal.status = ProposalStatus.DEFEATED
self.session.add(proposal)
self.session.commit()
self.session.refresh(proposal)
return proposal
async def execute_proposal(self, proposal_id: str, executor_id: str) -> Proposal:
"""Execute a successful proposal's payload"""
proposal = self.session.exec(select(Proposal).where(Proposal.proposal_id == proposal_id)).first()
executor = self.session.exec(select(GovernanceProfile).where(GovernanceProfile.profile_id == executor_id)).first()
if not proposal or not executor:
raise ValueError("Proposal or Executor not found")
if proposal.status != ProposalStatus.SUCCEEDED:
raise ValueError("Only SUCCEEDED proposals can be executed")
if executor.role not in [GovernanceRole.ADMIN, GovernanceRole.COUNCIL]:
raise ValueError("Only Council or Admin members can trigger execution")
# In a real system, this would interact with smart contracts or internal service APIs
# based on proposal.execution_payload
logger.info(f"Executing proposal {proposal_id} payload: {proposal.execution_payload}")
# If it's a funding proposal, deduct from treasury
if proposal.category == 'funding' and 'amount' in proposal.execution_payload:
treasury = self.session.exec(select(DaoTreasury).where(DaoTreasury.treasury_id == "main_treasury")).first()
if treasury:
amount = float(proposal.execution_payload['amount'])
if treasury.total_balance - treasury.allocated_funds >= amount:
treasury.allocated_funds += amount
self.session.add(treasury)
else:
raise ValueError("Insufficient funds in DAO Treasury for execution")
proposal.status = ProposalStatus.EXECUTED
proposal.executed_at = datetime.utcnow()
self.session.add(proposal)
self.session.commit()
self.session.refresh(proposal)
return proposal
async def generate_transparency_report(self, period: str) -> TransparencyReport:
"""Generate automated governance analytics report"""
# In reality, we would calculate this based on timestamps matching the period
# For simplicity, we just aggregate current totals
proposals = self.session.exec(select(Proposal)).all()
profiles = self.session.exec(select(GovernanceProfile)).all()
treasury = self.session.exec(select(DaoTreasury).where(DaoTreasury.treasury_id == "main_treasury")).first()
total_proposals = len(proposals)
passed_proposals = len([p for p in proposals if p.status in [ProposalStatus.SUCCEEDED, ProposalStatus.EXECUTED]])
active_voters = len([p for p in profiles if p.total_votes_cast > 0])
total_power = sum(p.voting_power for p in profiles)
report = TransparencyReport(
period=period,
total_proposals=total_proposals,
passed_proposals=passed_proposals,
active_voters=active_voters,
total_voting_power_participated=total_power,
treasury_inflow=10000.0, # Simulated
treasury_outflow=treasury.allocated_funds if treasury else 0.0,
metrics={
"voter_participation_rate": (active_voters / len(profiles)) if profiles else 0,
"proposal_success_rate": (passed_proposals / total_proposals) if total_proposals else 0
}
)
self.session.add(report)
self.session.commit()
self.session.refresh(report)
return report

View File

@@ -0,0 +1,947 @@
"""
Multi-Modal Agent Fusion Service
Implements advanced fusion models and cross-domain capability integration
"""
import asyncio
import numpy as np
from datetime import datetime, timedelta
from typing import Dict, List, Optional, Any, Tuple
from uuid import uuid4
import logging
from sqlmodel import Session, select, update, delete, and_, or_, func
from sqlalchemy.exc import SQLAlchemyError
from ..domain.agent_performance import (
FusionModel, AgentCapability, CreativeCapability,
ReinforcementLearningConfig, AgentPerformanceProfile
)
logger = logging.getLogger(__name__)
class MultiModalFusionEngine:
"""Advanced multi-modal agent fusion system"""
def __init__(self):
self.fusion_strategies = {
'ensemble_fusion': self.ensemble_fusion,
'attention_fusion': self.attention_fusion,
'cross_modal_attention': self.cross_modal_attention,
'neural_architecture_search': self.neural_architecture_search,
'transformer_fusion': self.transformer_fusion,
'graph_neural_fusion': self.graph_neural_fusion
}
self.modality_types = {
'text': {'weight': 0.3, 'encoder': 'transformer'},
'image': {'weight': 0.25, 'encoder': 'cnn'},
'audio': {'weight': 0.2, 'encoder': 'wav2vec'},
'video': {'weight': 0.15, 'encoder': '3d_cnn'},
'structured': {'weight': 0.1, 'encoder': 'tabular'}
}
self.fusion_objectives = {
'performance': 0.4,
'efficiency': 0.3,
'robustness': 0.2,
'adaptability': 0.1
}
async def create_fusion_model(
self,
session: Session,
model_name: str,
fusion_type: str,
base_models: List[str],
input_modalities: List[str],
fusion_strategy: str = "ensemble_fusion"
) -> FusionModel:
"""Create a new multi-modal fusion model"""
fusion_id = f"fusion_{uuid4().hex[:8]}"
# Calculate model weights based on modalities
modality_weights = self.calculate_modality_weights(input_modalities)
# Estimate computational requirements
computational_complexity = self.estimate_complexity(base_models, input_modalities)
# Set memory requirements
memory_requirement = self.estimate_memory_requirement(base_models, fusion_type)
fusion_model = FusionModel(
fusion_id=fusion_id,
model_name=model_name,
fusion_type=fusion_type,
base_models=base_models,
model_weights=self.calculate_model_weights(base_models),
fusion_strategy=fusion_strategy,
input_modalities=input_modalities,
modality_weights=modality_weights,
computational_complexity=computational_complexity,
memory_requirement=memory_requirement,
status="training"
)
session.add(fusion_model)
session.commit()
session.refresh(fusion_model)
# Start fusion training process
asyncio.create_task(self.train_fusion_model(session, fusion_id))
logger.info(f"Created fusion model {fusion_id} with strategy {fusion_strategy}")
return fusion_model
async def train_fusion_model(self, session: Session, fusion_id: str) -> Dict[str, Any]:
"""Train a fusion model"""
fusion_model = session.exec(
select(FusionModel).where(FusionModel.fusion_id == fusion_id)
).first()
if not fusion_model:
raise ValueError(f"Fusion model {fusion_id} not found")
try:
# Simulate fusion training process
training_results = await self.simulate_fusion_training(fusion_model)
# Update model with training results
fusion_model.fusion_performance = training_results['performance']
fusion_model.synergy_score = training_results['synergy']
fusion_model.robustness_score = training_results['robustness']
fusion_model.inference_time = training_results['inference_time']
fusion_model.status = "ready"
fusion_model.trained_at = datetime.utcnow()
session.commit()
logger.info(f"Fusion model {fusion_id} training completed")
return training_results
except Exception as e:
logger.error(f"Error training fusion model {fusion_id}: {str(e)}")
fusion_model.status = "failed"
session.commit()
raise
async def simulate_fusion_training(self, fusion_model: FusionModel) -> Dict[str, Any]:
"""Simulate fusion training process"""
# Calculate training time based on complexity
base_time = 4.0 # hours
complexity_multipliers = {
'low': 1.0,
'medium': 2.0,
'high': 4.0,
'very_high': 8.0
}
training_time = base_time * complexity_multipliers.get(fusion_model.computational_complexity, 2.0)
# Calculate fusion performance based on modalities and base models
modality_bonus = len(fusion_model.input_modalities) * 0.05
model_bonus = len(fusion_model.base_models) * 0.03
# Calculate synergy score (how well modalities complement each other)
synergy_score = self.calculate_synergy_score(fusion_model.input_modalities)
# Calculate robustness (ability to handle missing modalities)
robustness_score = min(1.0, 0.7 + (len(fusion_model.base_models) * 0.1))
# Calculate inference time
inference_time = 0.1 + (len(fusion_model.base_models) * 0.05) # seconds
# Calculate overall performance
base_performance = 0.75
fusion_performance = min(1.0, base_performance + modality_bonus + model_bonus + synergy_score * 0.1)
return {
'performance': {
'accuracy': fusion_performance,
'f1_score': fusion_performance * 0.95,
'precision': fusion_performance * 0.97,
'recall': fusion_performance * 0.93
},
'synergy': synergy_score,
'robustness': robustness_score,
'inference_time': inference_time,
'training_time': training_time,
'convergence_epoch': int(training_time * 5)
}
def calculate_modality_weights(self, modalities: List[str]) -> Dict[str, float]:
"""Calculate weights for different modalities"""
weights = {}
total_weight = 0.0
for modality in modalities:
weight = self.modality_types.get(modality, {}).get('weight', 0.1)
weights[modality] = weight
total_weight += weight
# Normalize weights
if total_weight > 0:
for modality in weights:
weights[modality] /= total_weight
return weights
def calculate_model_weights(self, base_models: List[str]) -> Dict[str, float]:
"""Calculate weights for base models in fusion"""
# Equal weighting by default, could be based on individual model performance
weight = 1.0 / len(base_models)
return {model: weight for model in base_models}
def estimate_complexity(self, base_models: List[str], modalities: List[str]) -> str:
"""Estimate computational complexity"""
model_complexity = len(base_models)
modality_complexity = len(modalities)
total_complexity = model_complexity * modality_complexity
if total_complexity <= 4:
return "low"
elif total_complexity <= 8:
return "medium"
elif total_complexity <= 16:
return "high"
else:
return "very_high"
def estimate_memory_requirement(self, base_models: List[str], fusion_type: str) -> float:
"""Estimate memory requirement in GB"""
base_memory = len(base_models) * 2.0 # 2GB per base model
fusion_multipliers = {
'ensemble': 1.0,
'hybrid': 1.5,
'multi_modal': 2.0,
'cross_domain': 2.5
}
multiplier = fusion_multipliers.get(fusion_type, 1.5)
return base_memory * multiplier
def calculate_synergy_score(self, modalities: List[str]) -> float:
"""Calculate synergy score between modalities"""
# Define synergy matrix between modalities
synergy_matrix = {
('text', 'image'): 0.8,
('text', 'audio'): 0.7,
('text', 'video'): 0.9,
('image', 'audio'): 0.6,
('image', 'video'): 0.85,
('audio', 'video'): 0.75,
('text', 'structured'): 0.6,
('image', 'structured'): 0.5,
('audio', 'structured'): 0.4,
('video', 'structured'): 0.7
}
total_synergy = 0.0
synergy_count = 0
# Calculate pairwise synergy
for i, mod1 in enumerate(modalities):
for j, mod2 in enumerate(modalities):
if i < j: # Avoid duplicate pairs
key = tuple(sorted([mod1, mod2]))
synergy = synergy_matrix.get(key, 0.5)
total_synergy += synergy
synergy_count += 1
# Average synergy score
if synergy_count > 0:
return total_synergy / synergy_count
else:
return 0.5 # Default synergy for single modality
async def fuse_modalities(
self,
session: Session,
fusion_id: str,
input_data: Dict[str, Any]
) -> Dict[str, Any]:
"""Fuse multiple modalities using trained fusion model"""
fusion_model = session.exec(
select(FusionModel).where(FusionModel.fusion_id == fusion_id)
).first()
if not fusion_model:
raise ValueError(f"Fusion model {fusion_id} not found")
if fusion_model.status != "ready":
raise ValueError(f"Fusion model {fusion_id} is not ready for inference")
try:
# Get fusion strategy
fusion_strategy = self.fusion_strategies.get(fusion_model.fusion_strategy)
if not fusion_strategy:
raise ValueError(f"Unknown fusion strategy: {fusion_model.fusion_strategy}")
# Apply fusion strategy
fusion_result = await fusion_strategy(input_data, fusion_model)
# Update deployment count
fusion_model.deployment_count += 1
session.commit()
logger.info(f"Fusion completed for model {fusion_id}")
return fusion_result
except Exception as e:
logger.error(f"Error during fusion with model {fusion_id}: {str(e)}")
raise
async def ensemble_fusion(
self,
input_data: Dict[str, Any],
fusion_model: FusionModel
) -> Dict[str, Any]:
"""Ensemble fusion strategy"""
# Simulate ensemble fusion
ensemble_results = {}
for modality in fusion_model.input_modalities:
if modality in input_data:
# Simulate modality-specific processing
modality_result = self.process_modality(input_data[modality], modality)
weight = fusion_model.modality_weights.get(modality, 0.1)
ensemble_results[modality] = {
'result': modality_result,
'weight': weight,
'confidence': 0.8 + (weight * 0.2)
}
# Combine results using weighted average
combined_result = self.weighted_combination(ensemble_results)
return {
'fusion_type': 'ensemble',
'combined_result': combined_result,
'modality_contributions': ensemble_results,
'confidence': self.calculate_ensemble_confidence(ensemble_results)
}
async def attention_fusion(
self,
input_data: Dict[str, Any],
fusion_model: FusionModel
) -> Dict[str, Any]:
"""Attention-based fusion strategy"""
# Calculate attention weights for each modality
attention_weights = self.calculate_attention_weights(input_data, fusion_model)
# Apply attention to each modality
attended_results = {}
for modality in fusion_model.input_modalities:
if modality in input_data:
modality_result = self.process_modality(input_data[modality], modality)
attention_weight = attention_weights.get(modality, 0.1)
attended_results[modality] = {
'result': modality_result,
'attention_weight': attention_weight,
'attended_result': self.apply_attention(modality_result, attention_weight)
}
# Combine attended results
combined_result = self.attended_combination(attended_results)
return {
'fusion_type': 'attention',
'combined_result': combined_result,
'attention_weights': attention_weights,
'attended_results': attended_results
}
async def cross_modal_attention(
self,
input_data: Dict[str, Any],
fusion_model: FusionModel
) -> Dict[str, Any]:
"""Cross-modal attention fusion strategy"""
# Build cross-modal attention matrix
attention_matrix = self.build_cross_modal_attention(input_data, fusion_model)
# Apply cross-modal attention
cross_modal_results = {}
for i, modality1 in enumerate(fusion_model.input_modalities):
if modality1 in input_data:
modality_result = self.process_modality(input_data[modality1], modality1)
# Get attention from other modalities
cross_attention = {}
for j, modality2 in enumerate(fusion_model.input_modalities):
if i != j and modality2 in input_data:
cross_attention[modality2] = attention_matrix[i][j]
cross_modal_results[modality1] = {
'result': modality_result,
'cross_attention': cross_attention,
'enhanced_result': self.enhance_with_cross_attention(modality_result, cross_attention)
}
# Combine cross-modal enhanced results
combined_result = self.cross_modal_combination(cross_modal_results)
return {
'fusion_type': 'cross_modal_attention',
'combined_result': combined_result,
'attention_matrix': attention_matrix,
'cross_modal_results': cross_modal_results
}
async def neural_architecture_search(
self,
input_data: Dict[str, Any],
fusion_model: FusionModel
) -> Dict[str, Any]:
"""Neural Architecture Search for fusion"""
# Search for optimal fusion architecture
optimal_architecture = await self.search_optimal_architecture(input_data, fusion_model)
# Apply optimal architecture
arch_results = {}
for modality in fusion_model.input_modalities:
if modality in input_data:
modality_result = self.process_modality(input_data[modality], modality)
arch_config = optimal_architecture.get(modality, {})
arch_results[modality] = {
'result': modality_result,
'architecture': arch_config,
'optimized_result': self.apply_architecture(modality_result, arch_config)
}
# Combine optimized results
combined_result = self.architecture_combination(arch_results)
return {
'fusion_type': 'neural_architecture_search',
'combined_result': combined_result,
'optimal_architecture': optimal_architecture,
'arch_results': arch_results
}
async def transformer_fusion(
self,
input_data: Dict[str, Any],
fusion_model: FusionModel
) -> Dict[str, Any]:
"""Transformer-based fusion strategy"""
# Convert modalities to transformer tokens
tokenized_modalities = {}
for modality in fusion_model.input_modalities:
if modality in input_data:
tokens = self.tokenize_modality(input_data[modality], modality)
tokenized_modalities[modality] = tokens
# Apply transformer fusion
fused_embeddings = self.transformer_fusion_embeddings(tokenized_modalities)
# Generate final result
combined_result = self.decode_transformer_output(fused_embeddings)
return {
'fusion_type': 'transformer',
'combined_result': combined_result,
'tokenized_modalities': tokenized_modalities,
'fused_embeddings': fused_embeddings
}
async def graph_neural_fusion(
self,
input_data: Dict[str, Any],
fusion_model: FusionModel
) -> Dict[str, Any]:
"""Graph Neural Network fusion strategy"""
# Build modality graph
modality_graph = self.build_modality_graph(input_data, fusion_model)
# Apply GNN fusion
graph_embeddings = self.gnn_fusion_embeddings(modality_graph)
# Generate final result
combined_result = self.decode_gnn_output(graph_embeddings)
return {
'fusion_type': 'graph_neural',
'combined_result': combined_result,
'modality_graph': modality_graph,
'graph_embeddings': graph_embeddings
}
def process_modality(self, data: Any, modality_type: str) -> Dict[str, Any]:
"""Process individual modality data"""
# Simulate modality-specific processing
if modality_type == 'text':
return {
'features': self.extract_text_features(data),
'embeddings': self.generate_text_embeddings(data),
'confidence': 0.85
}
elif modality_type == 'image':
return {
'features': self.extract_image_features(data),
'embeddings': self.generate_image_embeddings(data),
'confidence': 0.80
}
elif modality_type == 'audio':
return {
'features': self.extract_audio_features(data),
'embeddings': self.generate_audio_embeddings(data),
'confidence': 0.75
}
elif modality_type == 'video':
return {
'features': self.extract_video_features(data),
'embeddings': self.generate_video_embeddings(data),
'confidence': 0.78
}
elif modality_type == 'structured':
return {
'features': self.extract_structured_features(data),
'embeddings': self.generate_structured_embeddings(data),
'confidence': 0.90
}
else:
return {
'features': {},
'embeddings': [],
'confidence': 0.5
}
def weighted_combination(self, results: Dict[str, Any]) -> Dict[str, Any]:
"""Combine results using weighted average"""
combined_features = {}
combined_confidence = 0.0
total_weight = 0.0
for modality, result in results.items():
weight = result['weight']
features = result['result']['features']
confidence = result['confidence']
# Weight features
for feature, value in features.items():
if feature not in combined_features:
combined_features[feature] = 0.0
combined_features[feature] += value * weight
combined_confidence += confidence * weight
total_weight += weight
# Normalize
if total_weight > 0:
for feature in combined_features:
combined_features[feature] /= total_weight
combined_confidence /= total_weight
return {
'features': combined_features,
'confidence': combined_confidence
}
def calculate_attention_weights(self, input_data: Dict[str, Any], fusion_model: FusionModel) -> Dict[str, float]:
"""Calculate attention weights for modalities"""
# Simulate attention weight calculation based on input quality and modality importance
attention_weights = {}
for modality in fusion_model.input_modalities:
if modality in input_data:
# Base weight from modality weights
base_weight = fusion_model.modality_weights.get(modality, 0.1)
# Adjust based on input quality (simulated)
quality_factor = 0.8 + (hash(str(input_data[modality])) % 20) / 100.0
attention_weights[modality] = base_weight * quality_factor
# Normalize attention weights
total_attention = sum(attention_weights.values())
if total_attention > 0:
for modality in attention_weights:
attention_weights[modality] /= total_attention
return attention_weights
def apply_attention(self, result: Dict[str, Any], attention_weight: float) -> Dict[str, Any]:
"""Apply attention weight to modality result"""
attended_result = result.copy()
# Scale features by attention weight
for feature, value in attended_result['features'].items():
attended_result['features'][feature] = value * attention_weight
# Adjust confidence
attended_result['confidence'] = result['confidence'] * (0.5 + attention_weight * 0.5)
return attended_result
def attended_combination(self, results: Dict[str, Any]) -> Dict[str, Any]:
"""Combine attended results"""
combined_features = {}
combined_confidence = 0.0
for modality, result in results.items():
features = result['attended_result']['features']
confidence = result['attended_result']['confidence']
# Add features
for feature, value in features.items():
if feature not in combined_features:
combined_features[feature] = 0.0
combined_features[feature] += value
combined_confidence += confidence
# Average confidence
if results:
combined_confidence /= len(results)
return {
'features': combined_features,
'confidence': combined_confidence
}
def build_cross_modal_attention(self, input_data: Dict[str, Any], fusion_model: FusionModel) -> List[List[float]]:
"""Build cross-modal attention matrix"""
modalities = fusion_model.input_modalities
n_modalities = len(modalities)
# Initialize attention matrix
attention_matrix = [[0.0 for _ in range(n_modalities)] for _ in range(n_modalities)]
# Calculate cross-modal attention based on synergy
for i, mod1 in enumerate(modalities):
for j, mod2 in enumerate(modalities):
if i != j and mod1 in input_data and mod2 in input_data:
# Calculate attention based on synergy and input compatibility
synergy = self.calculate_synergy_score([mod1, mod2])
compatibility = self.calculate_modality_compatibility(input_data[mod1], input_data[mod2])
attention_matrix[i][j] = synergy * compatibility
# Normalize rows
for i in range(n_modalities):
row_sum = sum(attention_matrix[i])
if row_sum > 0:
for j in range(n_modalities):
attention_matrix[i][j] /= row_sum
return attention_matrix
def calculate_modality_compatibility(self, data1: Any, data2: Any) -> float:
"""Calculate compatibility between two modalities"""
# Simulate compatibility calculation
# In real implementation, would analyze actual data compatibility
return 0.6 + (hash(str(data1) + str(data2)) % 40) / 100.0
def enhance_with_cross_attention(self, result: Dict[str, Any], cross_attention: Dict[str, float]) -> Dict[str, Any]:
"""Enhance result with cross-attention from other modalities"""
enhanced_result = result.copy()
# Apply cross-attention enhancement
attention_boost = sum(cross_attention.values()) / len(cross_attention) if cross_attention else 0.0
# Boost features based on cross-attention
for feature, value in enhanced_result['features'].items():
enhanced_result['features'][feature] *= (1.0 + attention_boost * 0.2)
# Boost confidence
enhanced_result['confidence'] = min(1.0, result['confidence'] * (1.0 + attention_boost * 0.3))
return enhanced_result
def cross_modal_combination(self, results: Dict[str, Any]) -> Dict[str, Any]:
"""Combine cross-modal enhanced results"""
combined_features = {}
combined_confidence = 0.0
total_cross_attention = 0.0
for modality, result in results.items():
features = result['enhanced_result']['features']
confidence = result['enhanced_result']['confidence']
cross_attention_sum = sum(result['cross_attention'].values())
# Add features
for feature, value in features.items():
if feature not in combined_features:
combined_features[feature] = 0.0
combined_features[feature] += value
combined_confidence += confidence
total_cross_attention += cross_attention_sum
# Average values
if results:
combined_confidence /= len(results)
total_cross_attention /= len(results)
return {
'features': combined_features,
'confidence': combined_confidence,
'cross_attention_boost': total_cross_attention
}
async def search_optimal_architecture(self, input_data: Dict[str, Any], fusion_model: FusionModel) -> Dict[str, Any]:
"""Search for optimal fusion architecture"""
optimal_arch = {}
for modality in fusion_model.input_modalities:
if modality in input_data:
# Simulate architecture search
arch_config = {
'layers': np.random.randint(2, 6).tolist(),
'units': [2**i for i in range(4, 9)],
'activation': np.random.choice(['relu', 'tanh', 'sigmoid']),
'dropout': np.random.uniform(0.1, 0.3),
'batch_norm': np.random.choice([True, False])
}
optimal_arch[modality] = arch_config
return optimal_arch
def apply_architecture(self, result: Dict[str, Any], arch_config: Dict[str, Any]) -> Dict[str, Any]:
"""Apply architecture configuration to result"""
optimized_result = result.copy()
# Simulate architecture optimization
optimization_factor = 1.0 + (arch_config.get('layers', 3) - 3) * 0.05
# Optimize features
for feature, value in optimized_result['features'].items():
optimized_result['features'][feature] *= optimization_factor
# Optimize confidence
optimized_result['confidence'] = min(1.0, result['confidence'] * optimization_factor)
return optimized_result
def architecture_combination(self, results: Dict[str, Any]) -> Dict[str, Any]:
"""Combine architecture-optimized results"""
combined_features = {}
combined_confidence = 0.0
optimization_gain = 0.0
for modality, result in results.items():
features = result['optimized_result']['features']
confidence = result['optimized_result']['confidence']
# Add features
for feature, value in features.items():
if feature not in combined_features:
combined_features[feature] = 0.0
combined_features[feature] += value
combined_confidence += confidence
# Calculate optimization gain
original_confidence = result['result']['confidence']
optimization_gain += (confidence - original_confidence) / original_confidence if original_confidence > 0 else 0
# Average values
if results:
combined_confidence /= len(results)
optimization_gain /= len(results)
return {
'features': combined_features,
'confidence': combined_confidence,
'optimization_gain': optimization_gain
}
def tokenize_modality(self, data: Any, modality_type: str) -> List[str]:
"""Tokenize modality data for transformer"""
# Simulate tokenization
if modality_type == 'text':
return str(data).split()[:100] # Limit to 100 tokens
elif modality_type == 'image':
return [f"img_token_{i}" for i in range(50)] # 50 image tokens
elif modality_type == 'audio':
return [f"audio_token_{i}" for i in range(75)] # 75 audio tokens
else:
return [f"token_{i}" for i in range(25)] # 25 generic tokens
def transformer_fusion_embeddings(self, tokenized_modalities: Dict[str, List[str]]) -> Dict[str, Any]:
"""Apply transformer fusion to tokenized modalities"""
# Simulate transformer fusion
all_tokens = []
modality_boundaries = []
for modality, tokens in tokenized_modalities.items():
modality_boundaries.append(len(all_tokens))
all_tokens.extend(tokens)
# Simulate transformer processing
embedding_dim = 768
fused_embeddings = np.random.rand(len(all_tokens), embedding_dim).tolist()
return {
'tokens': all_tokens,
'embeddings': fused_embeddings,
'modality_boundaries': modality_boundaries,
'embedding_dim': embedding_dim
}
def decode_transformer_output(self, fused_embeddings: Dict[str, Any]) -> Dict[str, Any]:
"""Decode transformer output to final result"""
# Simulate decoding
embeddings = fused_embeddings['embeddings']
# Pool embeddings (simple average)
pooled_embedding = np.mean(embeddings, axis=0) if embeddings else []
return {
'features': {
'pooled_embedding': pooled_embedding.tolist(),
'embedding_dim': fused_embeddings['embedding_dim']
},
'confidence': 0.88
}
def build_modality_graph(self, input_data: Dict[str, Any], fusion_model: FusionModel) -> Dict[str, Any]:
"""Build modality relationship graph"""
# Simulate graph construction
nodes = list(fusion_model.input_modalities)
edges = []
# Create edges based on synergy
for i, mod1 in enumerate(nodes):
for j, mod2 in enumerate(nodes):
if i < j:
synergy = self.calculate_synergy_score([mod1, mod2])
if synergy > 0.5: # Only add edges for high synergy
edges.append({
'source': mod1,
'target': mod2,
'weight': synergy
})
return {
'nodes': nodes,
'edges': edges,
'node_features': {node: np.random.rand(64).tolist() for node in nodes}
}
def gnn_fusion_embeddings(self, modality_graph: Dict[str, Any]) -> Dict[str, Any]:
"""Apply Graph Neural Network fusion"""
# Simulate GNN processing
nodes = modality_graph['nodes']
edges = modality_graph['edges']
node_features = modality_graph['node_features']
# Simulate GNN layers
gnn_embeddings = {}
for node in nodes:
# Aggregate neighbor features
neighbor_features = []
for edge in edges:
if edge['target'] == node:
neighbor_features.extend(node_features[edge['source']])
elif edge['source'] == node:
neighbor_features.extend(node_features[edge['target']])
# Combine self and neighbor features
self_features = node_features[node]
if neighbor_features:
combined_features = np.mean([self_features] + [neighbor_features], axis=0).tolist()
else:
combined_features = self_features
gnn_embeddings[node] = combined_features
return {
'node_embeddings': gnn_embeddings,
'graph_embedding': np.mean(list(gnn_embeddings.values()), axis=0).tolist()
}
def decode_gnn_output(self, graph_embeddings: Dict[str, Any]) -> Dict[str, Any]:
"""Decode GNN output to final result"""
graph_embedding = graph_embeddings['graph_embedding']
return {
'features': {
'graph_embedding': graph_embedding,
'embedding_dim': len(graph_embedding)
},
'confidence': 0.82
}
# Helper methods for feature extraction (simulated)
def extract_text_features(self, data: Any) -> Dict[str, float]:
return {'length': len(str(data)), 'complexity': 0.7, 'sentiment': 0.8}
def generate_text_embeddings(self, data: Any) -> List[float]:
return np.random.rand(768).tolist()
def extract_image_features(self, data: Any) -> Dict[str, float]:
return {'brightness': 0.6, 'contrast': 0.7, 'sharpness': 0.8}
def generate_image_embeddings(self, data: Any) -> List[float]:
return np.random.rand(512).tolist()
def extract_audio_features(self, data: Any) -> Dict[str, float]:
return {'loudness': 0.7, 'pitch': 0.6, 'tempo': 0.8}
def generate_audio_embeddings(self, data: Any) -> List[float]:
return np.random.rand(256).tolist()
def extract_video_features(self, data: Any) -> Dict[str, float]:
return {'motion': 0.7, 'clarity': 0.8, 'duration': 0.6}
def generate_video_embeddings(self, data: Any) -> List[float]:
return np.random.rand(1024).tolist()
def extract_structured_features(self, data: Any) -> Dict[str, float]:
return {'completeness': 0.9, 'consistency': 0.8, 'quality': 0.85}
def generate_structured_embeddings(self, data: Any) -> List[float]:
return np.random.rand(128).tolist()
def calculate_ensemble_confidence(self, results: Dict[str, Any]) -> float:
"""Calculate overall confidence for ensemble fusion"""
confidences = [result['confidence'] for result in results.values()]
return np.mean(confidences) if confidences else 0.5

View File

@@ -0,0 +1,616 @@
"""
Agent Reputation and Trust Service
Implements reputation management, trust score calculations, and economic profiling
"""
import asyncio
import math
from datetime import datetime, timedelta
from typing import Dict, List, Optional, Any, Tuple
from uuid import uuid4
import json
import logging
from sqlmodel import Session, select, update, delete, and_, or_, func
from sqlalchemy.exc import SQLAlchemyError
from ..domain.reputation import (
AgentReputation, TrustScoreCalculation, ReputationEvent,
AgentEconomicProfile, CommunityFeedback, ReputationLevelThreshold,
ReputationLevel, TrustScoreCategory
)
from ..domain.agent import AIAgentWorkflow, AgentStatus
from ..domain.payment import PaymentTransaction
logger = logging.getLogger(__name__)
class TrustScoreCalculator:
"""Advanced trust score calculation algorithms"""
def __init__(self):
# Weight factors for different categories
self.weights = {
TrustScoreCategory.PERFORMANCE: 0.35,
TrustScoreCategory.RELIABILITY: 0.25,
TrustScoreCategory.COMMUNITY: 0.20,
TrustScoreCategory.SECURITY: 0.10,
TrustScoreCategory.ECONOMIC: 0.10
}
# Decay factors for time-based scoring
self.decay_factors = {
'daily': 0.95,
'weekly': 0.90,
'monthly': 0.80,
'yearly': 0.60
}
def calculate_performance_score(
self,
agent_id: str,
session: Session,
time_window: timedelta = timedelta(days=30)
) -> float:
"""Calculate performance-based trust score component"""
# Get recent job completions
cutoff_date = datetime.utcnow() - time_window
# Query performance metrics
performance_query = select(func.count()).where(
and_(
AgentReputation.agent_id == agent_id,
AgentReputation.updated_at >= cutoff_date
)
)
# For now, use existing performance rating
# In real implementation, this would analyze actual job performance
reputation = session.exec(
select(AgentReputation).where(AgentReputation.agent_id == agent_id)
).first()
if not reputation:
return 500.0 # Neutral score
# Base performance score from rating (1-5 stars to 0-1000)
base_score = (reputation.performance_rating / 5.0) * 1000
# Apply success rate modifier
if reputation.transaction_count > 0:
success_modifier = reputation.success_rate / 100.0
base_score *= success_modifier
# Apply response time modifier (lower is better)
if reputation.average_response_time > 0:
# Normalize response time (assuming 5000ms as baseline)
response_modifier = max(0.5, 1.0 - (reputation.average_response_time / 10000.0))
base_score *= response_modifier
return min(1000.0, max(0.0, base_score))
def calculate_reliability_score(
self,
agent_id: str,
session: Session,
time_window: timedelta = timedelta(days=30)
) -> float:
"""Calculate reliability-based trust score component"""
reputation = session.exec(
select(AgentReputation).where(AgentReputation.agent_id == agent_id)
).first()
if not reputation:
return 500.0
# Base reliability score from reliability percentage
base_score = reputation.reliability_score * 10 # Convert 0-100 to 0-1000
# Apply uptime modifier
if reputation.uptime_percentage > 0:
uptime_modifier = reputation.uptime_percentage / 100.0
base_score *= uptime_modifier
# Apply job completion ratio
total_jobs = reputation.jobs_completed + reputation.jobs_failed
if total_jobs > 0:
completion_ratio = reputation.jobs_completed / total_jobs
base_score *= completion_ratio
return min(1000.0, max(0.0, base_score))
def calculate_community_score(
self,
agent_id: str,
session: Session,
time_window: timedelta = timedelta(days=90)
) -> float:
"""Calculate community-based trust score component"""
cutoff_date = datetime.utcnow() - time_window
# Get recent community feedback
feedback_query = select(CommunityFeedback).where(
and_(
CommunityFeedback.agent_id == agent_id,
CommunityFeedback.created_at >= cutoff_date,
CommunityFeedback.moderation_status == "approved"
)
)
feedbacks = session.exec(feedback_query).all()
if not feedbacks:
return 500.0 # Neutral score
# Calculate weighted average rating
total_weight = 0.0
weighted_sum = 0.0
for feedback in feedbacks:
weight = feedback.verification_weight
rating = feedback.overall_rating
weighted_sum += rating * weight
total_weight += weight
if total_weight > 0:
avg_rating = weighted_sum / total_weight
base_score = (avg_rating / 5.0) * 1000
else:
base_score = 500.0
# Apply feedback volume modifier
feedback_count = len(feedbacks)
if feedback_count > 0:
volume_modifier = min(1.2, 1.0 + (feedback_count / 100.0))
base_score *= volume_modifier
return min(1000.0, max(0.0, base_score))
def calculate_security_score(
self,
agent_id: str,
session: Session,
time_window: timedelta = timedelta(days=180)
) -> float:
"""Calculate security-based trust score component"""
reputation = session.exec(
select(AgentReputation).where(AgentReputation.agent_id == agent_id)
).first()
if not reputation:
return 500.0
# Base security score
base_score = 800.0 # Start with high base score
# Apply dispute history penalty
if reputation.transaction_count > 0:
dispute_ratio = reputation.dispute_count / reputation.transaction_count
dispute_penalty = dispute_ratio * 500 # Max 500 point penalty
base_score -= dispute_penalty
# Apply certifications boost
if reputation.certifications:
certification_boost = min(200.0, len(reputation.certifications) * 50.0)
base_score += certification_boost
return min(1000.0, max(0.0, base_score))
def calculate_economic_score(
self,
agent_id: str,
session: Session,
time_window: timedelta = timedelta(days=30)
) -> float:
"""Calculate economic-based trust score component"""
reputation = session.exec(
select(AgentReputation).where(AgentReputation.agent_id == agent_id)
).first()
if not reputation:
return 500.0
# Base economic score from earnings consistency
if reputation.total_earnings > 0 and reputation.transaction_count > 0:
avg_earning_per_transaction = reputation.total_earnings / reputation.transaction_count
# Higher average earnings indicate higher-value work
earning_modifier = min(2.0, avg_earning_per_transaction / 0.1) # 0.1 AITBC baseline
base_score = 500.0 * earning_modifier
else:
base_score = 500.0
# Apply success rate modifier
if reputation.success_rate > 0:
success_modifier = reputation.success_rate / 100.0
base_score *= success_modifier
return min(1000.0, max(0.0, base_score))
def calculate_composite_trust_score(
self,
agent_id: str,
session: Session,
time_window: timedelta = timedelta(days=30)
) -> float:
"""Calculate composite trust score using weighted components"""
# Calculate individual components
performance_score = self.calculate_performance_score(agent_id, session, time_window)
reliability_score = self.calculate_reliability_score(agent_id, session, time_window)
community_score = self.calculate_community_score(agent_id, session, time_window)
security_score = self.calculate_security_score(agent_id, session, time_window)
economic_score = self.calculate_economic_score(agent_id, session, time_window)
# Apply weights
weighted_score = (
performance_score * self.weights[TrustScoreCategory.PERFORMANCE] +
reliability_score * self.weights[TrustScoreCategory.RELIABILITY] +
community_score * self.weights[TrustScoreCategory.COMMUNITY] +
security_score * self.weights[TrustScoreCategory.SECURITY] +
economic_score * self.weights[TrustScoreCategory.ECONOMIC]
)
# Apply smoothing with previous score if available
reputation = session.exec(
select(AgentReputation).where(AgentReputation.agent_id == agent_id)
).first()
if reputation and reputation.trust_score > 0:
# 70% new score, 30% previous score for stability
final_score = (weighted_score * 0.7) + (reputation.trust_score * 0.3)
else:
final_score = weighted_score
return min(1000.0, max(0.0, final_score))
def determine_reputation_level(self, trust_score: float) -> ReputationLevel:
"""Determine reputation level based on trust score"""
if trust_score >= 900:
return ReputationLevel.MASTER
elif trust_score >= 750:
return ReputationLevel.EXPERT
elif trust_score >= 600:
return ReputationLevel.ADVANCED
elif trust_score >= 400:
return ReputationLevel.INTERMEDIATE
else:
return ReputationLevel.BEGINNER
class ReputationService:
"""Main reputation management service"""
def __init__(self, session: Session):
self.session = session
self.calculator = TrustScoreCalculator()
async def create_reputation_profile(self, agent_id: str) -> AgentReputation:
"""Create a new reputation profile for an agent"""
# Check if profile already exists
existing = self.session.exec(
select(AgentReputation).where(AgentReputation.agent_id == agent_id)
).first()
if existing:
return existing
# Create new reputation profile
reputation = AgentReputation(
agent_id=agent_id,
trust_score=500.0, # Neutral starting score
reputation_level=ReputationLevel.BEGINNER,
performance_rating=3.0,
reliability_score=50.0,
community_rating=3.0,
created_at=datetime.utcnow(),
updated_at=datetime.utcnow()
)
self.session.add(reputation)
self.session.commit()
self.session.refresh(reputation)
logger.info(f"Created reputation profile for agent {agent_id}")
return reputation
async def update_trust_score(
self,
agent_id: str,
event_type: str,
impact_data: Dict[str, Any]
) -> AgentReputation:
"""Update agent trust score based on an event"""
# Get or create reputation profile
reputation = await self.create_reputation_profile(agent_id)
# Store previous scores
old_trust_score = reputation.trust_score
old_reputation_level = reputation.reputation_level
# Calculate new trust score
new_trust_score = self.calculator.calculate_composite_trust_score(agent_id, self.session)
new_reputation_level = self.calculator.determine_reputation_level(new_trust_score)
# Create reputation event
event = ReputationEvent(
agent_id=agent_id,
event_type=event_type,
impact_score=new_trust_score - old_trust_score,
trust_score_before=old_trust_score,
trust_score_after=new_trust_score,
reputation_level_before=old_reputation_level,
reputation_level_after=new_reputation_level,
event_data=impact_data,
occurred_at=datetime.utcnow(),
processed_at=datetime.utcnow()
)
self.session.add(event)
# Update reputation profile
reputation.trust_score = new_trust_score
reputation.reputation_level = new_reputation_level
reputation.updated_at = datetime.utcnow()
reputation.last_activity = datetime.utcnow()
# Add to reputation history
history_entry = {
"timestamp": datetime.utcnow().isoformat(),
"event_type": event_type,
"trust_score_change": new_trust_score - old_trust_score,
"new_trust_score": new_trust_score,
"reputation_level": new_reputation_level.value
}
reputation.reputation_history.append(history_entry)
self.session.commit()
self.session.refresh(reputation)
logger.info(f"Updated trust score for agent {agent_id}: {old_trust_score} -> {new_trust_score}")
return reputation
async def record_job_completion(
self,
agent_id: str,
job_id: str,
success: bool,
response_time: float,
earnings: float
) -> AgentReputation:
"""Record job completion and update reputation"""
reputation = await self.create_reputation_profile(agent_id)
# Update job metrics
if success:
reputation.jobs_completed += 1
else:
reputation.jobs_failed += 1
# Update response time (running average)
if reputation.average_response_time == 0:
reputation.average_response_time = response_time
else:
reputation.average_response_time = (
(reputation.average_response_time * reputation.jobs_completed + response_time) /
(reputation.jobs_completed + 1)
)
# Update earnings
reputation.total_earnings += earnings
reputation.transaction_count += 1
# Update success rate
total_jobs = reputation.jobs_completed + reputation.jobs_failed
reputation.success_rate = (reputation.jobs_completed / total_jobs) * 100.0 if total_jobs > 0 else 0.0
# Update reliability score based on success rate
reputation.reliability_score = reputation.success_rate
# Update performance rating based on response time and success
if success and response_time < 5000: # Good performance
reputation.performance_rating = min(5.0, reputation.performance_rating + 0.1)
elif not success or response_time > 10000: # Poor performance
reputation.performance_rating = max(1.0, reputation.performance_rating - 0.1)
reputation.updated_at = datetime.utcnow()
reputation.last_activity = datetime.utcnow()
# Create trust score update event
impact_data = {
"job_id": job_id,
"success": success,
"response_time": response_time,
"earnings": earnings,
"total_jobs": total_jobs,
"success_rate": reputation.success_rate
}
await self.update_trust_score(agent_id, "job_completed", impact_data)
logger.info(f"Recorded job completion for agent {agent_id}: success={success}, earnings={earnings}")
return reputation
async def add_community_feedback(
self,
agent_id: str,
reviewer_id: str,
ratings: Dict[str, float],
feedback_text: str = "",
tags: List[str] = None
) -> CommunityFeedback:
"""Add community feedback for an agent"""
feedback = CommunityFeedback(
agent_id=agent_id,
reviewer_id=reviewer_id,
overall_rating=ratings.get("overall", 3.0),
performance_rating=ratings.get("performance", 3.0),
communication_rating=ratings.get("communication", 3.0),
reliability_rating=ratings.get("reliability", 3.0),
value_rating=ratings.get("value", 3.0),
feedback_text=feedback_text,
feedback_tags=tags or [],
created_at=datetime.utcnow()
)
self.session.add(feedback)
self.session.commit()
self.session.refresh(feedback)
# Update agent's community rating
await self._update_community_rating(agent_id)
logger.info(f"Added community feedback for agent {agent_id} from reviewer {reviewer_id}")
return feedback
async def _update_community_rating(self, agent_id: str):
"""Update agent's community rating based on feedback"""
# Get all approved feedback
feedbacks = self.session.exec(
select(CommunityFeedback).where(
and_(
CommunityFeedback.agent_id == agent_id,
CommunityFeedback.moderation_status == "approved"
)
)
).all()
if not feedbacks:
return
# Calculate weighted average
total_weight = 0.0
weighted_sum = 0.0
for feedback in feedbacks:
weight = feedback.verification_weight
rating = feedback.overall_rating
weighted_sum += rating * weight
total_weight += weight
if total_weight > 0:
avg_rating = weighted_sum / total_weight
# Update reputation profile
reputation = self.session.exec(
select(AgentReputation).where(AgentReputation.agent_id == agent_id)
).first()
if reputation:
reputation.community_rating = avg_rating
reputation.updated_at = datetime.utcnow()
self.session.commit()
async def get_reputation_summary(self, agent_id: str) -> Dict[str, Any]:
"""Get comprehensive reputation summary for an agent"""
reputation = self.session.exec(
select(AgentReputation).where(AgentReputation.agent_id == agent_id)
).first()
if not reputation:
return {"error": "Reputation profile not found"}
# Get recent events
recent_events = self.session.exec(
select(ReputationEvent).where(
and_(
ReputationEvent.agent_id == agent_id,
ReputationEvent.occurred_at >= datetime.utcnow() - timedelta(days=30)
)
).order_by(ReputationEvent.occurred_at.desc()).limit(10)
).all()
# Get recent feedback
recent_feedback = self.session.exec(
select(CommunityFeedback).where(
and_(
CommunityFeedback.agent_id == agent_id,
CommunityFeedback.moderation_status == "approved"
)
).order_by(CommunityFeedback.created_at.desc()).limit(5)
).all()
return {
"agent_id": agent_id,
"trust_score": reputation.trust_score,
"reputation_level": reputation.reputation_level.value,
"performance_rating": reputation.performance_rating,
"reliability_score": reputation.reliability_score,
"community_rating": reputation.community_rating,
"total_earnings": reputation.total_earnings,
"transaction_count": reputation.transaction_count,
"success_rate": reputation.success_rate,
"jobs_completed": reputation.jobs_completed,
"jobs_failed": reputation.jobs_failed,
"average_response_time": reputation.average_response_time,
"dispute_count": reputation.dispute_count,
"certifications": reputation.certifications,
"specialization_tags": reputation.specialization_tags,
"geographic_region": reputation.geographic_region,
"last_activity": reputation.last_activity.isoformat(),
"recent_events": [
{
"event_type": event.event_type,
"impact_score": event.impact_score,
"occurred_at": event.occurred_at.isoformat()
}
for event in recent_events
],
"recent_feedback": [
{
"overall_rating": feedback.overall_rating,
"feedback_text": feedback.feedback_text,
"created_at": feedback.created_at.isoformat()
}
for feedback in recent_feedback
]
}
async def get_leaderboard(
self,
category: str = "trust_score",
limit: int = 50,
region: str = None
) -> List[Dict[str, Any]]:
"""Get reputation leaderboard"""
query = select(AgentReputation).order_by(
getattr(AgentReputation, category).desc()
).limit(limit)
if region:
query = query.where(AgentReputation.geographic_region == region)
reputations = self.session.exec(query).all()
leaderboard = []
for rank, reputation in enumerate(reputations, 1):
leaderboard.append({
"rank": rank,
"agent_id": reputation.agent_id,
"trust_score": reputation.trust_score,
"reputation_level": reputation.reputation_level.value,
"performance_rating": reputation.performance_rating,
"reliability_score": reputation.reliability_score,
"community_rating": reputation.community_rating,
"total_earnings": reputation.total_earnings,
"transaction_count": reputation.transaction_count,
"geographic_region": reputation.geographic_region,
"specialization_tags": reputation.specialization_tags
})
return leaderboard

View File

@@ -0,0 +1,656 @@
"""
Agent Reward Engine Service
Implements performance-based reward calculations, distributions, and tier management
"""
import asyncio
import math
from datetime import datetime, timedelta
from typing import Dict, List, Optional, Any, Tuple
from uuid import uuid4
import json
import logging
from sqlmodel import Session, select, update, delete, and_, or_, func
from sqlalchemy.exc import SQLAlchemyError
from ..domain.rewards import (
AgentRewardProfile, RewardTierConfig, RewardCalculation, RewardDistribution,
RewardEvent, RewardMilestone, RewardAnalytics, RewardTier, RewardType, RewardStatus
)
from ..domain.reputation import AgentReputation, ReputationLevel
from ..domain.payment import PaymentTransaction
logger = logging.getLogger(__name__)
class RewardCalculator:
"""Advanced reward calculation algorithms"""
def __init__(self):
# Base reward rates (in AITBC)
self.base_rates = {
'job_completion': 0.01, # Base reward per job
'high_performance': 0.005, # Additional for high performance
'perfect_rating': 0.01, # Bonus for 5-star ratings
'on_time_delivery': 0.002, # Bonus for on-time delivery
'repeat_client': 0.003, # Bonus for repeat clients
}
# Performance thresholds
self.performance_thresholds = {
'excellent': 4.5, # Rating threshold for excellent performance
'good': 4.0, # Rating threshold for good performance
'response_time_fast': 2000, # Response time in ms for fast
'response_time_excellent': 1000, # Response time in ms for excellent
}
def calculate_tier_multiplier(self, trust_score: float, session: Session) -> float:
"""Calculate reward multiplier based on agent's tier"""
# Get tier configuration
tier_config = session.exec(
select(RewardTierConfig).where(
and_(
RewardTierConfig.min_trust_score <= trust_score,
RewardTierConfig.is_active == True
)
).order_by(RewardTierConfig.min_trust_score.desc())
).first()
if tier_config:
return tier_config.base_multiplier
else:
# Default tier calculation if no config found
if trust_score >= 900:
return 2.0 # Diamond
elif trust_score >= 750:
return 1.5 # Platinum
elif trust_score >= 600:
return 1.2 # Gold
elif trust_score >= 400:
return 1.1 # Silver
else:
return 1.0 # Bronze
def calculate_performance_bonus(
self,
performance_metrics: Dict[str, Any],
session: Session
) -> float:
"""Calculate performance-based bonus multiplier"""
bonus = 0.0
# Rating bonus
rating = performance_metrics.get('performance_rating', 3.0)
if rating >= self.performance_thresholds['excellent']:
bonus += 0.5 # 50% bonus for excellent performance
elif rating >= self.performance_thresholds['good']:
bonus += 0.2 # 20% bonus for good performance
# Response time bonus
response_time = performance_metrics.get('average_response_time', 5000)
if response_time <= self.performance_thresholds['response_time_excellent']:
bonus += 0.3 # 30% bonus for excellent response time
elif response_time <= self.performance_thresholds['response_time_fast']:
bonus += 0.1 # 10% bonus for fast response time
# Success rate bonus
success_rate = performance_metrics.get('success_rate', 80.0)
if success_rate >= 95.0:
bonus += 0.2 # 20% bonus for excellent success rate
elif success_rate >= 90.0:
bonus += 0.1 # 10% bonus for good success rate
# Job volume bonus
job_count = performance_metrics.get('jobs_completed', 0)
if job_count >= 100:
bonus += 0.15 # 15% bonus for high volume
elif job_count >= 50:
bonus += 0.1 # 10% bonus for moderate volume
return bonus
def calculate_loyalty_bonus(self, agent_id: str, session: Session) -> float:
"""Calculate loyalty bonus based on agent history"""
# Get agent reward profile
reward_profile = session.exec(
select(AgentRewardProfile).where(AgentRewardProfile.agent_id == agent_id)
).first()
if not reward_profile:
return 0.0
bonus = 0.0
# Streak bonus
if reward_profile.current_streak >= 30: # 30+ day streak
bonus += 0.3
elif reward_profile.current_streak >= 14: # 14+ day streak
bonus += 0.2
elif reward_profile.current_streak >= 7: # 7+ day streak
bonus += 0.1
# Lifetime earnings bonus
if reward_profile.lifetime_earnings >= 1000: # 1000+ AITBC
bonus += 0.2
elif reward_profile.lifetime_earnings >= 500: # 500+ AITBC
bonus += 0.1
# Referral bonus
if reward_profile.referral_count >= 10:
bonus += 0.2
elif reward_profile.referral_count >= 5:
bonus += 0.1
# Community contributions bonus
if reward_profile.community_contributions >= 20:
bonus += 0.15
elif reward_profile.community_contributions >= 10:
bonus += 0.1
return bonus
def calculate_referral_bonus(self, referral_data: Dict[str, Any]) -> float:
"""Calculate referral bonus"""
referral_count = referral_data.get('referral_count', 0)
referral_quality = referral_data.get('referral_quality', 1.0) # 0-1 scale
base_bonus = 0.05 * referral_count # 0.05 AITBC per referral
# Quality multiplier
quality_multiplier = 0.5 + (referral_quality * 0.5) # 0.5 to 1.0
return base_bonus * quality_multiplier
def calculate_milestone_bonus(self, agent_id: str, session: Session) -> float:
"""Calculate milestone achievement bonus"""
# Check for unclaimed milestones
milestones = session.exec(
select(RewardMilestone).where(
and_(
RewardMilestone.agent_id == agent_id,
RewardMilestone.is_completed == True,
RewardMilestone.is_claimed == False
)
)
).all()
total_bonus = 0.0
for milestone in milestones:
total_bonus += milestone.reward_amount
# Mark as claimed
milestone.is_claimed = True
milestone.claimed_at = datetime.utcnow()
return total_bonus
def calculate_total_reward(
self,
agent_id: str,
base_amount: float,
performance_metrics: Dict[str, Any],
session: Session
) -> Dict[str, Any]:
"""Calculate total reward with all bonuses and multipliers"""
# Get agent's trust score and tier
reputation = session.exec(
select(AgentReputation).where(AgentReputation.agent_id == agent_id)
).first()
trust_score = reputation.trust_score if reputation else 500.0
# Calculate components
tier_multiplier = self.calculate_tier_multiplier(trust_score, session)
performance_bonus = self.calculate_performance_bonus(performance_metrics, session)
loyalty_bonus = self.calculate_loyalty_bonus(agent_id, session)
referral_bonus = self.calculate_referral_bonus(performance_metrics.get('referral_data', {}))
milestone_bonus = self.calculate_milestone_bonus(agent_id, session)
# Calculate effective multiplier
effective_multiplier = tier_multiplier * (1 + performance_bonus + loyalty_bonus)
# Calculate total reward
total_reward = base_amount * effective_multiplier + referral_bonus + milestone_bonus
return {
'base_amount': base_amount,
'tier_multiplier': tier_multiplier,
'performance_bonus': performance_bonus,
'loyalty_bonus': loyalty_bonus,
'referral_bonus': referral_bonus,
'milestone_bonus': milestone_bonus,
'effective_multiplier': effective_multiplier,
'total_reward': total_reward,
'trust_score': trust_score
}
class RewardEngine:
"""Main reward management and distribution engine"""
def __init__(self, session: Session):
self.session = session
self.calculator = RewardCalculator()
async def create_reward_profile(self, agent_id: str) -> AgentRewardProfile:
"""Create a new reward profile for an agent"""
# Check if profile already exists
existing = self.session.exec(
select(AgentRewardProfile).where(AgentRewardProfile.agent_id == agent_id)
).first()
if existing:
return existing
# Create new reward profile
profile = AgentRewardProfile(
agent_id=agent_id,
current_tier=RewardTier.BRONZE,
tier_progress=0.0,
created_at=datetime.utcnow(),
updated_at=datetime.utcnow()
)
self.session.add(profile)
self.session.commit()
self.session.refresh(profile)
logger.info(f"Created reward profile for agent {agent_id}")
return profile
async def calculate_and_distribute_reward(
self,
agent_id: str,
reward_type: RewardType,
base_amount: float,
performance_metrics: Dict[str, Any],
reference_date: Optional[datetime] = None
) -> Dict[str, Any]:
"""Calculate and distribute reward for an agent"""
# Ensure reward profile exists
await self.create_reward_profile(agent_id)
# Calculate reward
reward_calculation = self.calculator.calculate_total_reward(
agent_id, base_amount, performance_metrics, self.session
)
# Create calculation record
calculation = RewardCalculation(
agent_id=agent_id,
reward_type=reward_type,
base_amount=base_amount,
tier_multiplier=reward_calculation['tier_multiplier'],
performance_bonus=reward_calculation['performance_bonus'],
loyalty_bonus=reward_calculation['loyalty_bonus'],
referral_bonus=reward_calculation['referral_bonus'],
milestone_bonus=reward_calculation['milestone_bonus'],
total_reward=reward_calculation['total_reward'],
effective_multiplier=reward_calculation['effective_multiplier'],
reference_date=reference_date or datetime.utcnow(),
trust_score_at_calculation=reward_calculation['trust_score'],
performance_metrics=performance_metrics,
calculated_at=datetime.utcnow()
)
self.session.add(calculation)
self.session.commit()
self.session.refresh(calculation)
# Create distribution record
distribution = RewardDistribution(
calculation_id=calculation.id,
agent_id=agent_id,
reward_amount=reward_calculation['total_reward'],
reward_type=reward_type,
status=RewardStatus.PENDING,
created_at=datetime.utcnow(),
scheduled_at=datetime.utcnow()
)
self.session.add(distribution)
self.session.commit()
self.session.refresh(distribution)
# Process distribution
await self.process_reward_distribution(distribution.id)
# Update agent profile
await self.update_agent_reward_profile(agent_id, reward_calculation)
# Create reward event
await self.create_reward_event(
agent_id, "reward_distributed", reward_type, reward_calculation['total_reward'],
calculation_id=calculation.id, distribution_id=distribution.id
)
return {
"calculation_id": calculation.id,
"distribution_id": distribution.id,
"reward_amount": reward_calculation['total_reward'],
"reward_type": reward_type,
"tier_multiplier": reward_calculation['tier_multiplier'],
"total_bonus": reward_calculation['performance_bonus'] + reward_calculation['loyalty_bonus'],
"status": "distributed"
}
async def process_reward_distribution(self, distribution_id: str) -> RewardDistribution:
"""Process a reward distribution"""
distribution = self.session.exec(
select(RewardDistribution).where(RewardDistribution.id == distribution_id)
).first()
if not distribution:
raise ValueError(f"Distribution {distribution_id} not found")
if distribution.status != RewardStatus.PENDING:
return distribution
try:
# Simulate blockchain transaction (in real implementation, this would interact with blockchain)
transaction_id = f"tx_{uuid4().hex[:8]}"
transaction_hash = f"0x{uuid4().hex}"
# Update distribution
distribution.transaction_id = transaction_id
distribution.transaction_hash = transaction_hash
distribution.transaction_status = "confirmed"
distribution.status = RewardStatus.DISTRIBUTED
distribution.processed_at = datetime.utcnow()
distribution.confirmed_at = datetime.utcnow()
self.session.commit()
self.session.refresh(distribution)
logger.info(f"Processed reward distribution {distribution_id} for agent {distribution.agent_id}")
except Exception as e:
# Handle distribution failure
distribution.status = RewardStatus.CANCELLED
distribution.error_message = str(e)
distribution.retry_count += 1
self.session.commit()
logger.error(f"Failed to process reward distribution {distribution_id}: {str(e)}")
raise
return distribution
async def update_agent_reward_profile(self, agent_id: str, reward_calculation: Dict[str, Any]):
"""Update agent reward profile after reward distribution"""
profile = self.session.exec(
select(AgentRewardProfile).where(AgentRewardProfile.agent_id == agent_id)
).first()
if not profile:
return
# Update earnings
profile.base_earnings += reward_calculation['base_amount']
profile.bonus_earnings += (
reward_calculation['total_reward'] - reward_calculation['base_amount']
)
profile.total_earnings += reward_calculation['total_reward']
profile.lifetime_earnings += reward_calculation['total_reward']
# Update reward count and streak
profile.rewards_distributed += 1
profile.last_reward_date = datetime.utcnow()
profile.current_streak += 1
if profile.current_streak > profile.longest_streak:
profile.longest_streak = profile.current_streak
# Update performance score
profile.performance_score = reward_calculation.get('performance_rating', 0.0)
# Check for tier upgrade
await self.check_and_update_tier(agent_id)
profile.updated_at = datetime.utcnow()
profile.last_activity = datetime.utcnow()
self.session.commit()
async def check_and_update_tier(self, agent_id: str):
"""Check and update agent's reward tier"""
# Get agent reputation
reputation = self.session.exec(
select(AgentReputation).where(AgentReputation.agent_id == agent_id)
).first()
if not reputation:
return
# Get reward profile
profile = self.session.exec(
select(AgentRewardProfile).where(AgentRewardProfile.agent_id == agent_id)
).first()
if not profile:
return
# Determine new tier
new_tier = self.determine_reward_tier(reputation.trust_score)
old_tier = profile.current_tier
if new_tier != old_tier:
# Update tier
profile.current_tier = new_tier
profile.updated_at = datetime.utcnow()
# Create tier upgrade event
await self.create_reward_event(
agent_id, "tier_upgrade", RewardType.SPECIAL_BONUS, 0.0,
tier_impact=new_tier
)
logger.info(f"Agent {agent_id} upgraded from {old_tier} to {new_tier}")
def determine_reward_tier(self, trust_score: float) -> RewardTier:
"""Determine reward tier based on trust score"""
if trust_score >= 950:
return RewardTier.DIAMOND
elif trust_score >= 850:
return RewardTier.PLATINUM
elif trust_score >= 750:
return RewardTier.GOLD
elif trust_score >= 600:
return RewardTier.SILVER
else:
return RewardTier.BRONZE
async def create_reward_event(
self,
agent_id: str,
event_type: str,
reward_type: RewardType,
reward_impact: float,
calculation_id: Optional[str] = None,
distribution_id: Optional[str] = None,
tier_impact: Optional[RewardTier] = None
):
"""Create a reward event record"""
event = RewardEvent(
agent_id=agent_id,
event_type=event_type,
trigger_source="automatic",
reward_impact=reward_impact,
tier_impact=tier_impact,
related_calculation_id=calculation_id,
related_distribution_id=distribution_id,
occurred_at=datetime.utcnow(),
processed_at=datetime.utcnow()
)
self.session.add(event)
self.session.commit()
async def get_reward_summary(self, agent_id: str) -> Dict[str, Any]:
"""Get comprehensive reward summary for an agent"""
profile = self.session.exec(
select(AgentRewardProfile).where(AgentRewardProfile.agent_id == agent_id)
).first()
if not profile:
return {"error": "Reward profile not found"}
# Get recent calculations
recent_calculations = self.session.exec(
select(RewardCalculation).where(
and_(
RewardCalculation.agent_id == agent_id,
RewardCalculation.calculated_at >= datetime.utcnow() - timedelta(days=30)
)
).order_by(RewardCalculation.calculated_at.desc()).limit(10)
).all()
# Get recent distributions
recent_distributions = self.session.exec(
select(RewardDistribution).where(
and_(
RewardDistribution.agent_id == agent_id,
RewardDistribution.created_at >= datetime.utcnow() - timedelta(days=30)
)
).order_by(RewardDistribution.created_at.desc()).limit(10)
).all()
return {
"agent_id": agent_id,
"current_tier": profile.current_tier.value,
"tier_progress": profile.tier_progress,
"base_earnings": profile.base_earnings,
"bonus_earnings": profile.bonus_earnings,
"total_earnings": profile.total_earnings,
"lifetime_earnings": profile.lifetime_earnings,
"rewards_distributed": profile.rewards_distributed,
"current_streak": profile.current_streak,
"longest_streak": profile.longest_streak,
"performance_score": profile.performance_score,
"loyalty_score": profile.loyalty_score,
"referral_count": profile.referral_count,
"community_contributions": profile.community_contributions,
"last_reward_date": profile.last_reward_date.isoformat() if profile.last_reward_date else None,
"recent_calculations": [
{
"reward_type": calc.reward_type.value,
"total_reward": calc.total_reward,
"calculated_at": calc.calculated_at.isoformat()
}
for calc in recent_calculations
],
"recent_distributions": [
{
"reward_amount": dist.reward_amount,
"status": dist.status.value,
"created_at": dist.created_at.isoformat()
}
for dist in recent_distributions
]
}
async def batch_process_pending_rewards(self, limit: int = 100) -> Dict[str, Any]:
"""Process pending reward distributions in batch"""
# Get pending distributions
pending_distributions = self.session.exec(
select(RewardDistribution).where(
and_(
RewardDistribution.status == RewardStatus.PENDING,
RewardDistribution.scheduled_at <= datetime.utcnow()
)
).order_by(RewardDistribution.priority.asc(), RewardDistribution.created_at.asc())
.limit(limit)
).all()
processed = 0
failed = 0
for distribution in pending_distributions:
try:
await self.process_reward_distribution(distribution.id)
processed += 1
except Exception as e:
failed += 1
logger.error(f"Failed to process distribution {distribution.id}: {str(e)}")
return {
"processed": processed,
"failed": failed,
"total": len(pending_distributions)
}
async def get_reward_analytics(
self,
period_type: str = "daily",
start_date: Optional[datetime] = None,
end_date: Optional[datetime] = None
) -> Dict[str, Any]:
"""Get reward system analytics"""
if not start_date:
start_date = datetime.utcnow() - timedelta(days=30)
if not end_date:
end_date = datetime.utcnow()
# Get distributions in period
distributions = self.session.exec(
select(RewardDistribution).where(
and_(
RewardDistribution.created_at >= start_date,
RewardDistribution.created_at <= end_date,
RewardDistribution.status == RewardStatus.DISTRIBUTED
)
).all()
)
if not distributions:
return {
"period_type": period_type,
"start_date": start_date.isoformat(),
"end_date": end_date.isoformat(),
"total_rewards_distributed": 0.0,
"total_agents_rewarded": 0,
"average_reward_per_agent": 0.0
}
# Calculate analytics
total_rewards = sum(d.reward_amount for d in distributions)
unique_agents = len(set(d.agent_id for d in distributions))
average_reward = total_rewards / unique_agents if unique_agents > 0 else 0.0
# Get agent profiles for tier distribution
agent_ids = list(set(d.agent_id for d in distributions))
profiles = self.session.exec(
select(AgentRewardProfile).where(AgentRewardProfile.agent_id.in_(agent_ids))
).all()
tier_distribution = {}
for profile in profiles:
tier = profile.current_tier.value
tier_distribution[tier] = tier_distribution.get(tier, 0) + 1
return {
"period_type": period_type,
"start_date": start_date.isoformat(),
"end_date": end_date.isoformat(),
"total_rewards_distributed": total_rewards,
"total_agents_rewarded": unique_agents,
"average_reward_per_agent": average_reward,
"tier_distribution": tier_distribution,
"total_distributions": len(distributions)
}

File diff suppressed because it is too large Load Diff

View File

@@ -5,11 +5,11 @@ Wants=aitbc-coordinator-api.service
[Service]
Type=simple
User=debian
Group=debian
WorkingDirectory=/home/oib/aitbc/apps/coordinator-api
Environment=PATH=/home/oib/aitbc/apps/coordinator-api/.venv/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
ExecStart=/home/oib/aitbc/apps/coordinator-api/.venv/bin/python -m uvicorn src.app.routers.marketplace_enhanced_app:app --host 127.0.0.1 --port 8006
User=oib
Group=oib
WorkingDirectory=/home/oib/windsurf/aitbc/apps/coordinator-api
Environment=PATH=/home/oib/windsurf/aitbc/apps/coordinator-api/.venv/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
ExecStart=/home/oib/windsurf/aitbc/apps/coordinator-api/.venv/bin/python -m uvicorn src.app.routers.marketplace_enhanced_app:app --host 127.0.0.1 --port 8006
ExecReload=/bin/kill -HUP $MAINPID
KillMode=mixed
TimeoutStopSec=5
@@ -26,7 +26,7 @@ SyslogIdentifier=aitbc-marketplace-enhanced
NoNewPrivileges=true
ProtectSystem=strict
ProtectHome=true
ReadWritePaths=/home/oib/aitbc/apps/coordinator-api
ReadWritePaths=/home/oib/windsurf/aitbc/apps/coordinator-api
[Install]
WantedBy=multi-user.target

View File

@@ -0,0 +1,39 @@
"""Integration tests for marketplace health endpoints (skipped unless URLs provided).
Set env vars to run:
MARKETPLACE_HEALTH_URL=http://127.0.0.1:18000/v1/health
MARKETPLACE_HEALTH_URL_ALT=http://127.0.0.1:18001/v1/health
"""
import json
import os
import urllib.request
import pytest
def _check_health(url: str) -> None:
with urllib.request.urlopen(url, timeout=5) as resp: # nosec: B310 external URL controlled via env
assert resp.status == 200
data = resp.read().decode("utf-8")
try:
payload = json.loads(data)
except json.JSONDecodeError:
pytest.fail(f"Health response not JSON: {data}")
assert payload.get("status", "").lower() in {"ok", "healthy", "pass"}
@pytest.mark.skipif(
not os.getenv("MARKETPLACE_HEALTH_URL"),
reason="MARKETPLACE_HEALTH_URL not set; integration test skipped",
)
def test_marketplace_health_primary():
_check_health(os.environ["MARKETPLACE_HEALTH_URL"])
@pytest.mark.skipif(
not os.getenv("MARKETPLACE_HEALTH_URL_ALT"),
reason="MARKETPLACE_HEALTH_URL_ALT not set; integration test skipped",
)
def test_marketplace_health_secondary():
_check_health(os.environ["MARKETPLACE_HEALTH_URL_ALT"])

View File

@@ -0,0 +1,38 @@
"""Optional integration checks for Phase 8 endpoints (skipped unless URLs are provided).
Env vars (set any that you want to exercise):
EXPLORER_API_URL # e.g., http://127.0.0.1:8000/v1/explorer/blocks/head
MARKET_STATS_URL # e.g., http://127.0.0.1:8000/v1/marketplace/stats
ECON_STATS_URL # e.g., http://127.0.0.1:8000/v1/economics/summary
"""
import json
import os
import urllib.request
import pytest
def _check_json(url: str) -> None:
with urllib.request.urlopen(url, timeout=5) as resp: # nosec: B310 external URL controlled via env
assert resp.status == 200
data = resp.read().decode("utf-8")
try:
json.loads(data)
except json.JSONDecodeError:
pytest.fail(f"Response not JSON from {url}: {data}")
@pytest.mark.skipif(not os.getenv("EXPLORER_API_URL"), reason="EXPLORER_API_URL not set; explorer check skipped")
def test_explorer_api_head():
_check_json(os.environ["EXPLORER_API_URL"])
@pytest.mark.skipif(not os.getenv("MARKET_STATS_URL"), reason="MARKET_STATS_URL not set; market stats check skipped")
def test_market_stats():
_check_json(os.environ["MARKET_STATS_URL"])
@pytest.mark.skipif(not os.getenv("ECON_STATS_URL"), reason="ECON_STATS_URL not set; economics stats check skipped")
def test_economics_stats():
_check_json(os.environ["ECON_STATS_URL"])

View File

@@ -0,0 +1,59 @@
"""Integration checks mapped to Phase 8 tasks (skipped unless URLs provided).
Environment variables to enable:
MARKETPLACE_HEALTH_URL # e.g., http://127.0.0.1:18000/v1/health (multi-region primary)
MARKETPLACE_HEALTH_URL_ALT # e.g., http://127.0.0.1:18001/v1/health (multi-region secondary)
BLOCKCHAIN_RPC_URL # e.g., http://127.0.0.1:9080/rpc/head (blockchain integration)
COORDINATOR_HEALTH_URL # e.g., http://127.0.0.1:8000/v1/health (agent economics / API health)
"""
import json
import os
import urllib.request
import pytest
def _check_health(url: str, expect_status_field: bool = True) -> None:
with urllib.request.urlopen(url, timeout=5) as resp: # nosec: B310 external URL controlled via env
assert resp.status == 200
data = resp.read().decode("utf-8")
if not expect_status_field:
return
try:
payload = json.loads(data)
except json.JSONDecodeError:
pytest.fail(f"Response not JSON: {data}")
assert payload.get("status", "").lower() in {"ok", "healthy", "pass"}
@pytest.mark.skipif(
not os.getenv("MARKETPLACE_HEALTH_URL"),
reason="MARKETPLACE_HEALTH_URL not set; multi-region primary health skipped",
)
def test_multi_region_primary_health():
_check_health(os.environ["MARKETPLACE_HEALTH_URL"])
@pytest.mark.skipif(
not os.getenv("MARKETPLACE_HEALTH_URL_ALT"),
reason="MARKETPLACE_HEALTH_URL_ALT not set; multi-region secondary health skipped",
)
def test_multi_region_secondary_health():
_check_health(os.environ["MARKETPLACE_HEALTH_URL_ALT"])
@pytest.mark.skipif(
not os.getenv("BLOCKCHAIN_RPC_URL"),
reason="BLOCKCHAIN_RPC_URL not set; blockchain RPC check skipped",
)
def test_blockchain_rpc_head():
_check_health(os.environ["BLOCKCHAIN_RPC_URL"], expect_status_field=False)
@pytest.mark.skipif(
not os.getenv("COORDINATOR_HEALTH_URL"),
reason="COORDINATOR_HEALTH_URL not set; coordinator health skipped",
)
def test_agent_api_health():
_check_health(os.environ["COORDINATOR_HEALTH_URL"])