Update database paths and fix foreign key references across coordinator API

- Change SQLite database path from `/home/oib/windsurf/aitbc/data/` to `/opt/data/`
- Fix foreign key references to use correct table names (users, wallets, gpu_registry)
- Replace governance router with new governance and community routers
- Add multi-modal RL router to main application
- Simplify DEPLOYMENT_READINESS_REPORT.md to focus on production deployment status
- Update governance router with decentralized DAO voting
This commit is contained in:
oib
2026-02-26 19:32:06 +01:00
parent 1e2ea0bb9d
commit 7bb2905cca
89 changed files with 38245 additions and 1260 deletions

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,307 @@
"""
Community and Developer Ecosystem Services
Services for managing OpenClaw developer tools, SDKs, and third-party solutions
"""
from typing import Optional, List, Dict, Any
from sqlmodel import Session, select
from datetime import datetime
import logging
from uuid import uuid4
from ..domain.community import (
DeveloperProfile, AgentSolution, InnovationLab,
CommunityPost, Hackathon, DeveloperTier, SolutionStatus, LabStatus
)
logger = logging.getLogger(__name__)
class DeveloperEcosystemService:
"""Service for managing the developer ecosystem and SDKs"""
def __init__(self, session: Session):
self.session = session
async def create_developer_profile(self, user_id: str, username: str, bio: str = None, skills: List[str] = None) -> DeveloperProfile:
"""Create a new developer profile"""
profile = DeveloperProfile(
user_id=user_id,
username=username,
bio=bio,
skills=skills or []
)
self.session.add(profile)
self.session.commit()
self.session.refresh(profile)
return profile
async def get_developer_profile(self, developer_id: str) -> Optional[DeveloperProfile]:
"""Get developer profile by ID"""
return self.session.exec(
select(DeveloperProfile).where(DeveloperProfile.developer_id == developer_id)
).first()
async def get_sdk_release_info(self) -> Dict[str, Any]:
"""Get latest SDK information for developers"""
# Mocking SDK release data
return {
"latest_version": "v1.2.0",
"release_date": datetime.utcnow().isoformat(),
"supported_languages": ["python", "typescript", "rust"],
"download_urls": {
"python": "pip install aitbc-agent-sdk",
"typescript": "npm install @aitbc/agent-sdk"
},
"features": [
"Advanced Meta-Learning Integration",
"Cross-Domain Capability Synthesizer",
"Distributed Task Processing Client",
"Decentralized Governance Modules"
]
}
async def update_developer_reputation(self, developer_id: str, score_delta: float) -> DeveloperProfile:
"""Update a developer's reputation score and potentially tier"""
profile = await self.get_developer_profile(developer_id)
if not profile:
raise ValueError(f"Developer {developer_id} not found")
profile.reputation_score += score_delta
# Automatic tier progression based on reputation
if profile.reputation_score >= 1000:
profile.tier = DeveloperTier.MASTER
elif profile.reputation_score >= 500:
profile.tier = DeveloperTier.EXPERT
elif profile.reputation_score >= 100:
profile.tier = DeveloperTier.BUILDER
self.session.add(profile)
self.session.commit()
self.session.refresh(profile)
return profile
class ThirdPartySolutionService:
"""Service for managing the third-party agent solutions marketplace"""
def __init__(self, session: Session):
self.session = session
async def publish_solution(self, developer_id: str, data: Dict[str, Any]) -> AgentSolution:
"""Publish a new third-party agent solution"""
solution = AgentSolution(
developer_id=developer_id,
title=data.get('title'),
description=data.get('description'),
version=data.get('version', '1.0.0'),
capabilities=data.get('capabilities', []),
frameworks=data.get('frameworks', []),
price_model=data.get('price_model', 'free'),
price_amount=data.get('price_amount', 0.0),
solution_metadata=data.get('metadata', {}),
status=SolutionStatus.REVIEW
)
# Auto-publish if free, otherwise manual review required
if solution.price_model == 'free':
solution.status = SolutionStatus.PUBLISHED
solution.published_at = datetime.utcnow()
self.session.add(solution)
self.session.commit()
self.session.refresh(solution)
return solution
async def list_published_solutions(self, category: str = None, limit: int = 50) -> List[AgentSolution]:
"""List published solutions, optionally filtered by capability/category"""
query = select(AgentSolution).where(AgentSolution.status == SolutionStatus.PUBLISHED)
# Filtering by JSON column capability (simplified)
# In a real app, we might use PostgreSQL specific operators
solutions = self.session.exec(query.limit(limit)).all()
if category:
solutions = [s for s in solutions if category in s.capabilities]
return solutions
async def purchase_solution(self, buyer_id: str, solution_id: str) -> Dict[str, Any]:
"""Purchase or download a third-party solution"""
solution = self.session.exec(
select(AgentSolution).where(AgentSolution.solution_id == solution_id)
).first()
if not solution or solution.status != SolutionStatus.PUBLISHED:
raise ValueError("Solution not found or not available")
# Update download count
solution.downloads += 1
self.session.add(solution)
# Update developer earnings if paid
if solution.price_amount > 0:
dev = self.session.exec(
select(DeveloperProfile).where(DeveloperProfile.developer_id == solution.developer_id)
).first()
if dev:
dev.total_earnings += solution.price_amount
self.session.add(dev)
self.session.commit()
# Return installation instructions / access token
return {
"success": True,
"solution_id": solution_id,
"access_token": f"acc_{uuid4().hex}",
"installation_cmd": f"aitbc install {solution_id} --token acc_{uuid4().hex}"
}
class InnovationLabService:
"""Service for managing agent innovation labs and research programs"""
def __init__(self, session: Session):
self.session = session
async def propose_lab(self, researcher_id: str, data: Dict[str, Any]) -> InnovationLab:
"""Propose a new innovation lab/research program"""
lab = InnovationLab(
title=data.get('title'),
description=data.get('description'),
research_area=data.get('research_area'),
lead_researcher_id=researcher_id,
funding_goal=data.get('funding_goal', 0.0),
milestones=data.get('milestones', [])
)
self.session.add(lab)
self.session.commit()
self.session.refresh(lab)
return lab
async def join_lab(self, lab_id: str, developer_id: str) -> InnovationLab:
"""Join an active innovation lab"""
lab = self.session.exec(select(InnovationLab).where(InnovationLab.lab_id == lab_id)).first()
if not lab:
raise ValueError("Lab not found")
if developer_id not in lab.members:
lab.members.append(developer_id)
self.session.add(lab)
self.session.commit()
self.session.refresh(lab)
return lab
async def fund_lab(self, lab_id: str, amount: float) -> InnovationLab:
"""Provide funding to an innovation lab"""
lab = self.session.exec(select(InnovationLab).where(InnovationLab.lab_id == lab_id)).first()
if not lab:
raise ValueError("Lab not found")
lab.current_funding += amount
if lab.status == LabStatus.FUNDING and lab.current_funding >= lab.funding_goal:
lab.status = LabStatus.ACTIVE
self.session.add(lab)
self.session.commit()
self.session.refresh(lab)
return lab
class CommunityPlatformService:
"""Service for managing the community support and collaboration platform"""
def __init__(self, session: Session):
self.session = session
async def create_post(self, author_id: str, data: Dict[str, Any]) -> CommunityPost:
"""Create a new community post (question, tutorial, etc)"""
post = CommunityPost(
author_id=author_id,
title=data.get('title', ''),
content=data.get('content', ''),
category=data.get('category', 'discussion'),
tags=data.get('tags', []),
parent_post_id=data.get('parent_post_id')
)
self.session.add(post)
# Reward developer for participating
if not post.parent_post_id: # New thread
dev_service = DeveloperEcosystemService(self.session)
await dev_service.update_developer_reputation(author_id, 2.0)
self.session.commit()
self.session.refresh(post)
return post
async def get_feed(self, category: str = None, limit: int = 20) -> List[CommunityPost]:
"""Get the community feed"""
query = select(CommunityPost).where(CommunityPost.parent_post_id == None)
if category:
query = query.where(CommunityPost.category == category)
query = query.order_by(CommunityPost.created_at.desc()).limit(limit)
return self.session.exec(query).all()
async def upvote_post(self, post_id: str) -> CommunityPost:
"""Upvote a post and reward the author"""
post = self.session.exec(select(CommunityPost).where(CommunityPost.post_id == post_id)).first()
if not post:
raise ValueError("Post not found")
post.upvotes += 1
self.session.add(post)
# Reward author
dev_service = DeveloperEcosystemService(self.session)
await dev_service.update_developer_reputation(post.author_id, 1.0)
self.session.commit()
self.session.refresh(post)
return post
async def create_hackathon(self, organizer_id: str, data: Dict[str, Any]) -> Hackathon:
"""Create a new agent innovation hackathon"""
# Verify organizer is an expert or partner
dev = self.session.exec(select(DeveloperProfile).where(DeveloperProfile.developer_id == organizer_id)).first()
if not dev or dev.tier not in [DeveloperTier.EXPERT, DeveloperTier.MASTER, DeveloperTier.PARTNER]:
raise ValueError("Only high-tier developers can organize hackathons")
hackathon = Hackathon(
title=data.get('title', ''),
description=data.get('description', ''),
theme=data.get('theme', ''),
sponsor=data.get('sponsor', 'AITBC Foundation'),
prize_pool=data.get('prize_pool', 0.0),
registration_start=datetime.fromisoformat(data.get('registration_start', datetime.utcnow().isoformat())),
registration_end=datetime.fromisoformat(data.get('registration_end')),
event_start=datetime.fromisoformat(data.get('event_start')),
event_end=datetime.fromisoformat(data.get('event_end'))
)
self.session.add(hackathon)
self.session.commit()
self.session.refresh(hackathon)
return hackathon
async def register_for_hackathon(self, hackathon_id: str, developer_id: str) -> Hackathon:
"""Register a developer for a hackathon"""
hackathon = self.session.exec(select(Hackathon).where(Hackathon.hackathon_id == hackathon_id)).first()
if not hackathon:
raise ValueError("Hackathon not found")
if hackathon.status not in [HackathonStatus.ANNOUNCED, HackathonStatus.REGISTRATION]:
raise ValueError("Registration is not open for this hackathon")
if developer_id not in hackathon.participants:
hackathon.participants.append(developer_id)
self.session.add(hackathon)
self.session.commit()
self.session.refresh(hackathon)
return hackathon

View File

@@ -0,0 +1,511 @@
"""
Creative Capabilities Service
Implements advanced creativity enhancement systems and specialized AI capabilities
"""
import asyncio
import numpy as np
from datetime import datetime, timedelta
from typing import Dict, List, Optional, Any, Tuple
from uuid import uuid4
import logging
import random
from sqlmodel import Session, select, update, delete, and_, or_, func
from sqlalchemy.exc import SQLAlchemyError
from ..domain.agent_performance import (
CreativeCapability, AgentCapability, AgentPerformanceProfile
)
logger = logging.getLogger(__name__)
class CreativityEnhancementEngine:
"""Advanced creativity enhancement system for OpenClaw agents"""
def __init__(self):
self.enhancement_algorithms = {
'divergent_thinking': self.divergent_thinking_enhancement,
'conceptual_blending': self.conceptual_blending,
'morphological_analysis': self.morphological_analysis,
'lateral_thinking': self.lateral_thinking_stimulation,
'bisociation': self.bisociation_framework
}
self.creative_domains = {
'artistic': ['visual_arts', 'music_composition', 'literary_arts'],
'design': ['ui_ux', 'product_design', 'architectural'],
'innovation': ['problem_solving', 'product_innovation', 'process_innovation'],
'scientific': ['hypothesis_generation', 'experimental_design'],
'narrative': ['storytelling', 'world_building', 'character_development']
}
self.evaluation_metrics = [
'originality',
'fluency',
'flexibility',
'elaboration',
'aesthetic_value',
'utility'
]
async def create_creative_capability(
self,
session: Session,
agent_id: str,
creative_domain: str,
capability_type: str,
generation_models: List[str],
initial_score: float = 0.5
) -> CreativeCapability:
"""Initialize a new creative capability for an agent"""
capability_id = f"creative_{uuid4().hex[:8]}"
# Determine specialized areas based on domain
specializations = self.creative_domains.get(creative_domain, ['general_creativity'])
capability = CreativeCapability(
capability_id=capability_id,
agent_id=agent_id,
creative_domain=creative_domain,
capability_type=capability_type,
originality_score=initial_score,
novelty_score=initial_score * 0.9,
aesthetic_quality=initial_score * 5.0,
coherence_score=initial_score * 1.1,
generation_models=generation_models,
creative_learning_rate=0.05,
creative_specializations=specializations,
status="developing",
created_at=datetime.utcnow()
)
session.add(capability)
session.commit()
session.refresh(capability)
logger.info(f"Created creative capability {capability_id} for agent {agent_id}")
return capability
async def enhance_creativity(
self,
session: Session,
capability_id: str,
algorithm: str = "divergent_thinking",
training_cycles: int = 100
) -> Dict[str, Any]:
"""Enhance a specific creative capability"""
capability = session.exec(
select(CreativeCapability).where(CreativeCapability.capability_id == capability_id)
).first()
if not capability:
raise ValueError(f"Creative capability {capability_id} not found")
try:
# Apply enhancement algorithm
enhancement_func = self.enhancement_algorithms.get(
algorithm,
self.divergent_thinking_enhancement
)
enhancement_results = await enhancement_func(capability, training_cycles)
# Update capability metrics
capability.originality_score = min(1.0, capability.originality_score + enhancement_results['originality_gain'])
capability.novelty_score = min(1.0, capability.novelty_score + enhancement_results['novelty_gain'])
capability.aesthetic_quality = min(5.0, capability.aesthetic_quality + enhancement_results['aesthetic_gain'])
capability.style_variety += enhancement_results['variety_gain']
# Track training history
capability.creative_metadata['last_enhancement'] = {
'algorithm': algorithm,
'cycles': training_cycles,
'results': enhancement_results,
'timestamp': datetime.utcnow().isoformat()
}
# Update status if ready
if capability.originality_score > 0.8 and capability.aesthetic_quality > 4.0:
capability.status = "certified"
elif capability.originality_score > 0.6:
capability.status = "ready"
capability.updated_at = datetime.utcnow()
session.commit()
logger.info(f"Enhanced creative capability {capability_id} using {algorithm}")
return {
'success': True,
'capability_id': capability_id,
'algorithm': algorithm,
'improvements': enhancement_results,
'new_scores': {
'originality': capability.originality_score,
'novelty': capability.novelty_score,
'aesthetic': capability.aesthetic_quality,
'variety': capability.style_variety
},
'status': capability.status
}
except Exception as e:
logger.error(f"Error enhancing creativity for {capability_id}: {str(e)}")
raise
async def divergent_thinking_enhancement(self, capability: CreativeCapability, cycles: int) -> Dict[str, float]:
"""Enhance divergent thinking capabilities"""
# Simulate divergent thinking training
base_learning_rate = capability.creative_learning_rate
originality_gain = base_learning_rate * (cycles / 100) * random.uniform(0.8, 1.2)
variety_gain = int(max(1, cycles / 50) * random.uniform(0.5, 1.5))
return {
'originality_gain': originality_gain,
'novelty_gain': originality_gain * 0.8,
'aesthetic_gain': originality_gain * 2.0, # Scale to 0-5
'variety_gain': variety_gain,
'fluency_improvement': random.uniform(0.1, 0.3)
}
async def conceptual_blending(self, capability: CreativeCapability, cycles: int) -> Dict[str, float]:
"""Enhance conceptual blending (combining unrelated concepts)"""
base_learning_rate = capability.creative_learning_rate
novelty_gain = base_learning_rate * (cycles / 80) * random.uniform(0.9, 1.3)
return {
'originality_gain': novelty_gain * 0.7,
'novelty_gain': novelty_gain,
'aesthetic_gain': novelty_gain * 1.5,
'variety_gain': int(cycles / 60),
'blending_efficiency': random.uniform(0.15, 0.35)
}
async def morphological_analysis(self, capability: CreativeCapability, cycles: int) -> Dict[str, float]:
"""Enhance morphological analysis (systematic exploration of possibilities)"""
base_learning_rate = capability.creative_learning_rate
# Morphological analysis is systematic, so steady gains
gain = base_learning_rate * (cycles / 100)
return {
'originality_gain': gain * 0.9,
'novelty_gain': gain * 1.1,
'aesthetic_gain': gain * 1.0,
'variety_gain': int(cycles / 40),
'systematic_coverage': random.uniform(0.2, 0.4)
}
async def lateral_thinking_stimulation(self, capability: CreativeCapability, cycles: int) -> Dict[str, float]:
"""Enhance lateral thinking (approaching problems from new angles)"""
base_learning_rate = capability.creative_learning_rate
# Lateral thinking produces highly original but sometimes less coherent results
gain = base_learning_rate * (cycles / 90) * random.uniform(0.7, 1.5)
return {
'originality_gain': gain * 1.3,
'novelty_gain': gain * 1.2,
'aesthetic_gain': gain * 0.8,
'variety_gain': int(cycles / 50),
'perspective_shifts': random.uniform(0.2, 0.5)
}
async def bisociation_framework(self, capability: CreativeCapability, cycles: int) -> Dict[str, float]:
"""Enhance bisociation (connecting two previously unrelated frames of reference)"""
base_learning_rate = capability.creative_learning_rate
gain = base_learning_rate * (cycles / 120) * random.uniform(0.8, 1.4)
return {
'originality_gain': gain * 1.4,
'novelty_gain': gain * 1.3,
'aesthetic_gain': gain * 1.2,
'variety_gain': int(cycles / 70),
'cross_domain_links': random.uniform(0.1, 0.4)
}
async def evaluate_creation(
self,
session: Session,
capability_id: str,
creation_data: Dict[str, Any],
expert_feedback: Optional[Dict[str, float]] = None
) -> Dict[str, Any]:
"""Evaluate a creative output and update capability"""
capability = session.exec(
select(CreativeCapability).where(CreativeCapability.capability_id == capability_id)
).first()
if not capability:
raise ValueError(f"Creative capability {capability_id} not found")
# Perform automated evaluation
auto_eval = self.automated_aesthetic_evaluation(creation_data, capability.creative_domain)
# Combine with expert feedback if available
final_eval = {}
for metric in self.evaluation_metrics:
auto_score = auto_eval.get(metric, 0.5)
if expert_feedback and metric in expert_feedback:
# Expert feedback is weighted more heavily
final_eval[metric] = (auto_score * 0.3) + (expert_feedback[metric] * 0.7)
else:
final_eval[metric] = auto_score
# Update capability based on evaluation
capability.creations_generated += 1
# Moving average update of quality metrics
alpha = 0.1 # Learning rate for metrics
capability.originality_score = (1 - alpha) * capability.originality_score + alpha * final_eval.get('originality', capability.originality_score)
capability.aesthetic_quality = (1 - alpha) * capability.aesthetic_quality + alpha * (final_eval.get('aesthetic_value', 0.5) * 5.0)
capability.coherence_score = (1 - alpha) * capability.coherence_score + alpha * final_eval.get('utility', capability.coherence_score)
# Record evaluation
evaluation_record = {
'timestamp': datetime.utcnow().isoformat(),
'creation_id': creation_data.get('id', f"create_{uuid4().hex[:8]}"),
'scores': final_eval
}
evaluations = capability.expert_evaluations
evaluations.append(evaluation_record)
# Keep only last 50 evaluations
if len(evaluations) > 50:
evaluations = evaluations[-50:]
capability.expert_evaluations = evaluations
capability.last_evaluation = datetime.utcnow()
session.commit()
return {
'success': True,
'evaluation': final_eval,
'capability_updated': True,
'new_aesthetic_quality': capability.aesthetic_quality
}
def automated_aesthetic_evaluation(self, creation_data: Dict[str, Any], domain: str) -> Dict[str, float]:
"""Automated evaluation of creative outputs based on domain heuristics"""
# Simulated automated evaluation logic
# In a real system, this would use specialized models to evaluate art, text, music, etc.
content = str(creation_data.get('content', ''))
complexity = min(1.0, len(content) / 1000.0)
structure_score = 0.5 + (random.uniform(-0.2, 0.3))
if domain == 'artistic':
return {
'originality': random.uniform(0.6, 0.95),
'fluency': complexity,
'flexibility': random.uniform(0.5, 0.8),
'elaboration': structure_score,
'aesthetic_value': random.uniform(0.7, 0.9),
'utility': random.uniform(0.4, 0.7)
}
elif domain == 'innovation':
return {
'originality': random.uniform(0.7, 0.9),
'fluency': structure_score,
'flexibility': random.uniform(0.6, 0.9),
'elaboration': complexity,
'aesthetic_value': random.uniform(0.5, 0.8),
'utility': random.uniform(0.8, 0.95)
}
else:
return {
'originality': random.uniform(0.5, 0.9),
'fluency': random.uniform(0.5, 0.9),
'flexibility': random.uniform(0.5, 0.9),
'elaboration': random.uniform(0.5, 0.9),
'aesthetic_value': random.uniform(0.5, 0.9),
'utility': random.uniform(0.5, 0.9)
}
class IdeationAlgorithm:
"""System for generating innovative ideas and solving complex problems"""
def __init__(self):
self.ideation_techniques = {
'scamper': self.scamper_technique,
'triz': self.triz_inventive_principles,
'six_thinking_hats': self.six_thinking_hats,
'first_principles': self.first_principles_reasoning,
'biomimicry': self.biomimicry_mapping
}
async def generate_ideas(
self,
problem_statement: str,
domain: str,
technique: str = "scamper",
num_ideas: int = 5,
constraints: Optional[Dict[str, Any]] = None
) -> Dict[str, Any]:
"""Generate innovative ideas using specified technique"""
technique_func = self.ideation_techniques.get(technique, self.first_principles_reasoning)
# Simulate idea generation process
await asyncio.sleep(0.5) # Processing time
ideas = []
for i in range(num_ideas):
idea = technique_func(problem_statement, domain, i, constraints)
ideas.append(idea)
# Rank ideas by novelty and feasibility
ranked_ideas = self.rank_ideas(ideas)
return {
'problem': problem_statement,
'technique_used': technique,
'domain': domain,
'generated_ideas': ranked_ideas,
'generation_timestamp': datetime.utcnow().isoformat()
}
def scamper_technique(self, problem: str, domain: str, seed: int, constraints: Any) -> Dict[str, Any]:
"""Substitute, Combine, Adapt, Modify, Put to another use, Eliminate, Reverse"""
operations = ['Substitute', 'Combine', 'Adapt', 'Modify', 'Put to other use', 'Eliminate', 'Reverse']
op = operations[seed % len(operations)]
return {
'title': f"{op}-based innovation for {domain}",
'description': f"Applying the {op} principle to solving: {problem[:30]}...",
'technique_aspect': op,
'novelty_score': random.uniform(0.6, 0.9),
'feasibility_score': random.uniform(0.5, 0.85)
}
def triz_inventive_principles(self, problem: str, domain: str, seed: int, constraints: Any) -> Dict[str, Any]:
"""Theory of Inventive Problem Solving"""
principles = ['Segmentation', 'Extraction', 'Local Quality', 'Asymmetry', 'Consolidation', 'Universality']
principle = principles[seed % len(principles)]
return {
'title': f"TRIZ Principle: {principle}",
'description': f"Solving contradictions in {domain} using {principle}.",
'technique_aspect': principle,
'novelty_score': random.uniform(0.7, 0.95),
'feasibility_score': random.uniform(0.4, 0.8)
}
def six_thinking_hats(self, problem: str, domain: str, seed: int, constraints: Any) -> Dict[str, Any]:
"""De Bono's Six Thinking Hats"""
hats = ['White (Data)', 'Red (Emotion)', 'Black (Caution)', 'Yellow (Optimism)', 'Green (Creativity)', 'Blue (Process)']
hat = hats[seed % len(hats)]
return {
'title': f"{hat} perspective",
'description': f"Analyzing {problem[:20]} from the {hat} standpoint.",
'technique_aspect': hat,
'novelty_score': random.uniform(0.5, 0.8),
'feasibility_score': random.uniform(0.6, 0.9)
}
def first_principles_reasoning(self, problem: str, domain: str, seed: int, constraints: Any) -> Dict[str, Any]:
"""Deconstruct to fundamental truths and build up"""
return {
'title': f"Fundamental reconstruction {seed+1}",
'description': f"Breaking down assumptions in {domain} to fundamental physics/logic.",
'technique_aspect': 'Deconstruction',
'novelty_score': random.uniform(0.8, 0.99),
'feasibility_score': random.uniform(0.3, 0.7)
}
def biomimicry_mapping(self, problem: str, domain: str, seed: int, constraints: Any) -> Dict[str, Any]:
"""Map engineering/design problems to biological solutions"""
biological_systems = ['Mycelium networks', 'Swarm intelligence', 'Photosynthesis', 'Lotus effect', 'Gecko adhesion']
system = biological_systems[seed % len(biological_systems)]
return {
'title': f"Bio-inspired: {system}",
'description': f"Applying principles from {system} to {domain} challenges.",
'technique_aspect': system,
'novelty_score': random.uniform(0.75, 0.95),
'feasibility_score': random.uniform(0.4, 0.75)
}
def rank_ideas(self, ideas: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""Rank ideas based on a combined score of novelty and feasibility"""
for idea in ideas:
# Calculate composite score: 60% novelty, 40% feasibility
idea['composite_score'] = (idea['novelty_score'] * 0.6) + (idea['feasibility_score'] * 0.4)
return sorted(ideas, key=lambda x: x['composite_score'], reverse=True)
class CrossDomainCreativeIntegrator:
"""Integrates creativity across multiple domains for breakthrough innovations"""
def __init__(self):
pass
async def generate_cross_domain_synthesis(
self,
session: Session,
agent_id: str,
primary_domain: str,
secondary_domains: List[str],
synthesis_goal: str
) -> Dict[str, Any]:
"""Synthesize concepts from multiple domains to create novel outputs"""
# Verify agent has capabilities in these domains
capabilities = session.exec(
select(CreativeCapability).where(
and_(
CreativeCapability.agent_id == agent_id,
CreativeCapability.creative_domain.in_([primary_domain] + secondary_domains)
)
)
).all()
found_domains = [cap.creative_domain for cap in capabilities]
if primary_domain not in found_domains:
raise ValueError(f"Agent lacks primary creative domain: {primary_domain}")
# Determine synthesis approach based on available capabilities
synergy_potential = len(found_domains) * 0.2
# Simulate synthesis process
await asyncio.sleep(0.8)
synthesis_result = {
'goal': synthesis_goal,
'primary_framework': primary_domain,
'integrated_perspectives': secondary_domains,
'synthesis_output': f"Novel integration of {primary_domain} principles with mechanisms from {', '.join(secondary_domains)}",
'synergy_score': min(0.95, 0.4 + synergy_potential + random.uniform(0, 0.2)),
'innovation_level': 'disruptive' if synergy_potential > 0.5 else 'incremental',
'suggested_applications': [
f"Cross-functional application in {primary_domain}",
f"Novel methodology for {secondary_domains[0] if secondary_domains else 'general use'}"
]
}
# Update cross-domain transfer metrics for involved capabilities
for cap in capabilities:
cap.cross_domain_transfer = min(1.0, cap.cross_domain_transfer + 0.05)
session.add(cap)
session.commit()
return synthesis_result

View File

@@ -0,0 +1,275 @@
"""
Decentralized Governance Service
Implements the OpenClaw DAO, voting mechanisms, and proposal lifecycle
"""
from typing import Optional, List, Dict, Any
from sqlmodel import Session, select
from datetime import datetime, timedelta
import logging
from uuid import uuid4
from ..domain.governance import (
GovernanceProfile, Proposal, Vote, DaoTreasury, TransparencyReport,
ProposalStatus, VoteType, GovernanceRole
)
logger = logging.getLogger(__name__)
class GovernanceService:
"""Core service for managing DAO operations and voting"""
def __init__(self, session: Session):
self.session = session
async def get_or_create_profile(self, user_id: str, initial_voting_power: float = 0.0) -> GovernanceProfile:
"""Get an existing governance profile or create a new one"""
profile = self.session.exec(select(GovernanceProfile).where(GovernanceProfile.user_id == user_id)).first()
if not profile:
profile = GovernanceProfile(
user_id=user_id,
voting_power=initial_voting_power
)
self.session.add(profile)
self.session.commit()
self.session.refresh(profile)
return profile
async def delegate_votes(self, delegator_id: str, delegatee_id: str) -> GovernanceProfile:
"""Delegate voting power from one profile to another"""
delegator = self.session.exec(select(GovernanceProfile).where(GovernanceProfile.profile_id == delegator_id)).first()
delegatee = self.session.exec(select(GovernanceProfile).where(GovernanceProfile.profile_id == delegatee_id)).first()
if not delegator or not delegatee:
raise ValueError("Delegator or Delegatee not found")
# Remove old delegation if exists
if delegator.delegate_to:
old_delegatee = self.session.exec(select(GovernanceProfile).where(GovernanceProfile.profile_id == delegator.delegate_to)).first()
if old_delegatee:
old_delegatee.delegated_power -= delegator.voting_power
self.session.add(old_delegatee)
# Set new delegation
delegator.delegate_to = delegatee.profile_id
delegatee.delegated_power += delegator.voting_power
self.session.add(delegator)
self.session.add(delegatee)
self.session.commit()
return delegator
async def create_proposal(self, proposer_id: str, data: Dict[str, Any]) -> Proposal:
"""Create a new governance proposal"""
proposer = self.session.exec(select(GovernanceProfile).where(GovernanceProfile.profile_id == proposer_id)).first()
if not proposer:
raise ValueError("Proposer not found")
# Ensure proposer meets minimum voting power requirement to submit
total_power = proposer.voting_power + proposer.delegated_power
if total_power < 100.0: # Arbitrary minimum threshold for example
raise ValueError("Insufficient voting power to submit a proposal")
now = datetime.utcnow()
voting_starts = data.get('voting_starts', now + timedelta(days=1))
if isinstance(voting_starts, str):
voting_starts = datetime.fromisoformat(voting_starts)
voting_ends = data.get('voting_ends', voting_starts + timedelta(days=7))
if isinstance(voting_ends, str):
voting_ends = datetime.fromisoformat(voting_ends)
proposal = Proposal(
proposer_id=proposer_id,
title=data.get('title'),
description=data.get('description'),
category=data.get('category', 'general'),
execution_payload=data.get('execution_payload', {}),
quorum_required=data.get('quorum_required', 1000.0), # Example default
voting_starts=voting_starts,
voting_ends=voting_ends
)
# If voting starts immediately
if voting_starts <= now:
proposal.status = ProposalStatus.ACTIVE
proposer.proposals_created += 1
self.session.add(proposal)
self.session.add(proposer)
self.session.commit()
self.session.refresh(proposal)
return proposal
async def cast_vote(self, proposal_id: str, voter_id: str, vote_type: VoteType, reason: str = None) -> Vote:
"""Cast a vote on an active proposal"""
proposal = self.session.exec(select(Proposal).where(Proposal.proposal_id == proposal_id)).first()
voter = self.session.exec(select(GovernanceProfile).where(GovernanceProfile.profile_id == voter_id)).first()
if not proposal or not voter:
raise ValueError("Proposal or Voter not found")
now = datetime.utcnow()
if proposal.status != ProposalStatus.ACTIVE or now < proposal.voting_starts or now > proposal.voting_ends:
raise ValueError("Proposal is not currently active for voting")
# Check if already voted
existing_vote = self.session.exec(
select(Vote).where(Vote.proposal_id == proposal_id).where(Vote.voter_id == voter_id)
).first()
if existing_vote:
raise ValueError("Voter has already cast a vote on this proposal")
# If voter has delegated their vote, they cannot vote directly (or it overrides)
# For this implementation, we'll say direct voting is allowed but we only use their personal power
power_to_use = voter.voting_power + voter.delegated_power
if power_to_use <= 0:
raise ValueError("Voter has no voting power")
vote = Vote(
proposal_id=proposal_id,
voter_id=voter_id,
vote_type=vote_type,
voting_power_used=power_to_use,
reason=reason
)
# Update proposal tallies
if vote_type == VoteType.FOR:
proposal.votes_for += power_to_use
elif vote_type == VoteType.AGAINST:
proposal.votes_against += power_to_use
else:
proposal.votes_abstain += power_to_use
voter.total_votes_cast += 1
voter.last_voted_at = now
self.session.add(vote)
self.session.add(proposal)
self.session.add(voter)
self.session.commit()
self.session.refresh(vote)
return vote
async def process_proposal_lifecycle(self, proposal_id: str) -> Proposal:
"""Update proposal status based on time and votes"""
proposal = self.session.exec(select(Proposal).where(Proposal.proposal_id == proposal_id)).first()
if not proposal:
raise ValueError("Proposal not found")
now = datetime.utcnow()
# Draft -> Active
if proposal.status == ProposalStatus.DRAFT and now >= proposal.voting_starts:
proposal.status = ProposalStatus.ACTIVE
# Active -> Succeeded/Defeated
elif proposal.status == ProposalStatus.ACTIVE and now > proposal.voting_ends:
total_votes = proposal.votes_for + proposal.votes_against + proposal.votes_abstain
# Check Quorum
if total_votes < proposal.quorum_required:
proposal.status = ProposalStatus.DEFEATED
else:
# Check threshold (usually just FOR vs AGAINST)
votes_cast = proposal.votes_for + proposal.votes_against
if votes_cast == 0:
proposal.status = ProposalStatus.DEFEATED
else:
ratio = proposal.votes_for / votes_cast
if ratio >= proposal.passing_threshold:
proposal.status = ProposalStatus.SUCCEEDED
# Update proposer stats
proposer = self.session.exec(select(GovernanceProfile).where(GovernanceProfile.profile_id == proposal.proposer_id)).first()
if proposer:
proposer.proposals_passed += 1
self.session.add(proposer)
else:
proposal.status = ProposalStatus.DEFEATED
self.session.add(proposal)
self.session.commit()
self.session.refresh(proposal)
return proposal
async def execute_proposal(self, proposal_id: str, executor_id: str) -> Proposal:
"""Execute a successful proposal's payload"""
proposal = self.session.exec(select(Proposal).where(Proposal.proposal_id == proposal_id)).first()
executor = self.session.exec(select(GovernanceProfile).where(GovernanceProfile.profile_id == executor_id)).first()
if not proposal or not executor:
raise ValueError("Proposal or Executor not found")
if proposal.status != ProposalStatus.SUCCEEDED:
raise ValueError("Only SUCCEEDED proposals can be executed")
if executor.role not in [GovernanceRole.ADMIN, GovernanceRole.COUNCIL]:
raise ValueError("Only Council or Admin members can trigger execution")
# In a real system, this would interact with smart contracts or internal service APIs
# based on proposal.execution_payload
logger.info(f"Executing proposal {proposal_id} payload: {proposal.execution_payload}")
# If it's a funding proposal, deduct from treasury
if proposal.category == 'funding' and 'amount' in proposal.execution_payload:
treasury = self.session.exec(select(DaoTreasury).where(DaoTreasury.treasury_id == "main_treasury")).first()
if treasury:
amount = float(proposal.execution_payload['amount'])
if treasury.total_balance - treasury.allocated_funds >= amount:
treasury.allocated_funds += amount
self.session.add(treasury)
else:
raise ValueError("Insufficient funds in DAO Treasury for execution")
proposal.status = ProposalStatus.EXECUTED
proposal.executed_at = datetime.utcnow()
self.session.add(proposal)
self.session.commit()
self.session.refresh(proposal)
return proposal
async def generate_transparency_report(self, period: str) -> TransparencyReport:
"""Generate automated governance analytics report"""
# In reality, we would calculate this based on timestamps matching the period
# For simplicity, we just aggregate current totals
proposals = self.session.exec(select(Proposal)).all()
profiles = self.session.exec(select(GovernanceProfile)).all()
treasury = self.session.exec(select(DaoTreasury).where(DaoTreasury.treasury_id == "main_treasury")).first()
total_proposals = len(proposals)
passed_proposals = len([p for p in proposals if p.status in [ProposalStatus.SUCCEEDED, ProposalStatus.EXECUTED]])
active_voters = len([p for p in profiles if p.total_votes_cast > 0])
total_power = sum(p.voting_power for p in profiles)
report = TransparencyReport(
period=period,
total_proposals=total_proposals,
passed_proposals=passed_proposals,
active_voters=active_voters,
total_voting_power_participated=total_power,
treasury_inflow=10000.0, # Simulated
treasury_outflow=treasury.allocated_funds if treasury else 0.0,
metrics={
"voter_participation_rate": (active_voters / len(profiles)) if profiles else 0,
"proposal_success_rate": (passed_proposals / total_proposals) if total_proposals else 0
}
)
self.session.add(report)
self.session.commit()
self.session.refresh(report)
return report

View File

@@ -0,0 +1,947 @@
"""
Multi-Modal Agent Fusion Service
Implements advanced fusion models and cross-domain capability integration
"""
import asyncio
import numpy as np
from datetime import datetime, timedelta
from typing import Dict, List, Optional, Any, Tuple
from uuid import uuid4
import logging
from sqlmodel import Session, select, update, delete, and_, or_, func
from sqlalchemy.exc import SQLAlchemyError
from ..domain.agent_performance import (
FusionModel, AgentCapability, CreativeCapability,
ReinforcementLearningConfig, AgentPerformanceProfile
)
logger = logging.getLogger(__name__)
class MultiModalFusionEngine:
"""Advanced multi-modal agent fusion system"""
def __init__(self):
self.fusion_strategies = {
'ensemble_fusion': self.ensemble_fusion,
'attention_fusion': self.attention_fusion,
'cross_modal_attention': self.cross_modal_attention,
'neural_architecture_search': self.neural_architecture_search,
'transformer_fusion': self.transformer_fusion,
'graph_neural_fusion': self.graph_neural_fusion
}
self.modality_types = {
'text': {'weight': 0.3, 'encoder': 'transformer'},
'image': {'weight': 0.25, 'encoder': 'cnn'},
'audio': {'weight': 0.2, 'encoder': 'wav2vec'},
'video': {'weight': 0.15, 'encoder': '3d_cnn'},
'structured': {'weight': 0.1, 'encoder': 'tabular'}
}
self.fusion_objectives = {
'performance': 0.4,
'efficiency': 0.3,
'robustness': 0.2,
'adaptability': 0.1
}
async def create_fusion_model(
self,
session: Session,
model_name: str,
fusion_type: str,
base_models: List[str],
input_modalities: List[str],
fusion_strategy: str = "ensemble_fusion"
) -> FusionModel:
"""Create a new multi-modal fusion model"""
fusion_id = f"fusion_{uuid4().hex[:8]}"
# Calculate model weights based on modalities
modality_weights = self.calculate_modality_weights(input_modalities)
# Estimate computational requirements
computational_complexity = self.estimate_complexity(base_models, input_modalities)
# Set memory requirements
memory_requirement = self.estimate_memory_requirement(base_models, fusion_type)
fusion_model = FusionModel(
fusion_id=fusion_id,
model_name=model_name,
fusion_type=fusion_type,
base_models=base_models,
model_weights=self.calculate_model_weights(base_models),
fusion_strategy=fusion_strategy,
input_modalities=input_modalities,
modality_weights=modality_weights,
computational_complexity=computational_complexity,
memory_requirement=memory_requirement,
status="training"
)
session.add(fusion_model)
session.commit()
session.refresh(fusion_model)
# Start fusion training process
asyncio.create_task(self.train_fusion_model(session, fusion_id))
logger.info(f"Created fusion model {fusion_id} with strategy {fusion_strategy}")
return fusion_model
async def train_fusion_model(self, session: Session, fusion_id: str) -> Dict[str, Any]:
"""Train a fusion model"""
fusion_model = session.exec(
select(FusionModel).where(FusionModel.fusion_id == fusion_id)
).first()
if not fusion_model:
raise ValueError(f"Fusion model {fusion_id} not found")
try:
# Simulate fusion training process
training_results = await self.simulate_fusion_training(fusion_model)
# Update model with training results
fusion_model.fusion_performance = training_results['performance']
fusion_model.synergy_score = training_results['synergy']
fusion_model.robustness_score = training_results['robustness']
fusion_model.inference_time = training_results['inference_time']
fusion_model.status = "ready"
fusion_model.trained_at = datetime.utcnow()
session.commit()
logger.info(f"Fusion model {fusion_id} training completed")
return training_results
except Exception as e:
logger.error(f"Error training fusion model {fusion_id}: {str(e)}")
fusion_model.status = "failed"
session.commit()
raise
async def simulate_fusion_training(self, fusion_model: FusionModel) -> Dict[str, Any]:
"""Simulate fusion training process"""
# Calculate training time based on complexity
base_time = 4.0 # hours
complexity_multipliers = {
'low': 1.0,
'medium': 2.0,
'high': 4.0,
'very_high': 8.0
}
training_time = base_time * complexity_multipliers.get(fusion_model.computational_complexity, 2.0)
# Calculate fusion performance based on modalities and base models
modality_bonus = len(fusion_model.input_modalities) * 0.05
model_bonus = len(fusion_model.base_models) * 0.03
# Calculate synergy score (how well modalities complement each other)
synergy_score = self.calculate_synergy_score(fusion_model.input_modalities)
# Calculate robustness (ability to handle missing modalities)
robustness_score = min(1.0, 0.7 + (len(fusion_model.base_models) * 0.1))
# Calculate inference time
inference_time = 0.1 + (len(fusion_model.base_models) * 0.05) # seconds
# Calculate overall performance
base_performance = 0.75
fusion_performance = min(1.0, base_performance + modality_bonus + model_bonus + synergy_score * 0.1)
return {
'performance': {
'accuracy': fusion_performance,
'f1_score': fusion_performance * 0.95,
'precision': fusion_performance * 0.97,
'recall': fusion_performance * 0.93
},
'synergy': synergy_score,
'robustness': robustness_score,
'inference_time': inference_time,
'training_time': training_time,
'convergence_epoch': int(training_time * 5)
}
def calculate_modality_weights(self, modalities: List[str]) -> Dict[str, float]:
"""Calculate weights for different modalities"""
weights = {}
total_weight = 0.0
for modality in modalities:
weight = self.modality_types.get(modality, {}).get('weight', 0.1)
weights[modality] = weight
total_weight += weight
# Normalize weights
if total_weight > 0:
for modality in weights:
weights[modality] /= total_weight
return weights
def calculate_model_weights(self, base_models: List[str]) -> Dict[str, float]:
"""Calculate weights for base models in fusion"""
# Equal weighting by default, could be based on individual model performance
weight = 1.0 / len(base_models)
return {model: weight for model in base_models}
def estimate_complexity(self, base_models: List[str], modalities: List[str]) -> str:
"""Estimate computational complexity"""
model_complexity = len(base_models)
modality_complexity = len(modalities)
total_complexity = model_complexity * modality_complexity
if total_complexity <= 4:
return "low"
elif total_complexity <= 8:
return "medium"
elif total_complexity <= 16:
return "high"
else:
return "very_high"
def estimate_memory_requirement(self, base_models: List[str], fusion_type: str) -> float:
"""Estimate memory requirement in GB"""
base_memory = len(base_models) * 2.0 # 2GB per base model
fusion_multipliers = {
'ensemble': 1.0,
'hybrid': 1.5,
'multi_modal': 2.0,
'cross_domain': 2.5
}
multiplier = fusion_multipliers.get(fusion_type, 1.5)
return base_memory * multiplier
def calculate_synergy_score(self, modalities: List[str]) -> float:
"""Calculate synergy score between modalities"""
# Define synergy matrix between modalities
synergy_matrix = {
('text', 'image'): 0.8,
('text', 'audio'): 0.7,
('text', 'video'): 0.9,
('image', 'audio'): 0.6,
('image', 'video'): 0.85,
('audio', 'video'): 0.75,
('text', 'structured'): 0.6,
('image', 'structured'): 0.5,
('audio', 'structured'): 0.4,
('video', 'structured'): 0.7
}
total_synergy = 0.0
synergy_count = 0
# Calculate pairwise synergy
for i, mod1 in enumerate(modalities):
for j, mod2 in enumerate(modalities):
if i < j: # Avoid duplicate pairs
key = tuple(sorted([mod1, mod2]))
synergy = synergy_matrix.get(key, 0.5)
total_synergy += synergy
synergy_count += 1
# Average synergy score
if synergy_count > 0:
return total_synergy / synergy_count
else:
return 0.5 # Default synergy for single modality
async def fuse_modalities(
self,
session: Session,
fusion_id: str,
input_data: Dict[str, Any]
) -> Dict[str, Any]:
"""Fuse multiple modalities using trained fusion model"""
fusion_model = session.exec(
select(FusionModel).where(FusionModel.fusion_id == fusion_id)
).first()
if not fusion_model:
raise ValueError(f"Fusion model {fusion_id} not found")
if fusion_model.status != "ready":
raise ValueError(f"Fusion model {fusion_id} is not ready for inference")
try:
# Get fusion strategy
fusion_strategy = self.fusion_strategies.get(fusion_model.fusion_strategy)
if not fusion_strategy:
raise ValueError(f"Unknown fusion strategy: {fusion_model.fusion_strategy}")
# Apply fusion strategy
fusion_result = await fusion_strategy(input_data, fusion_model)
# Update deployment count
fusion_model.deployment_count += 1
session.commit()
logger.info(f"Fusion completed for model {fusion_id}")
return fusion_result
except Exception as e:
logger.error(f"Error during fusion with model {fusion_id}: {str(e)}")
raise
async def ensemble_fusion(
self,
input_data: Dict[str, Any],
fusion_model: FusionModel
) -> Dict[str, Any]:
"""Ensemble fusion strategy"""
# Simulate ensemble fusion
ensemble_results = {}
for modality in fusion_model.input_modalities:
if modality in input_data:
# Simulate modality-specific processing
modality_result = self.process_modality(input_data[modality], modality)
weight = fusion_model.modality_weights.get(modality, 0.1)
ensemble_results[modality] = {
'result': modality_result,
'weight': weight,
'confidence': 0.8 + (weight * 0.2)
}
# Combine results using weighted average
combined_result = self.weighted_combination(ensemble_results)
return {
'fusion_type': 'ensemble',
'combined_result': combined_result,
'modality_contributions': ensemble_results,
'confidence': self.calculate_ensemble_confidence(ensemble_results)
}
async def attention_fusion(
self,
input_data: Dict[str, Any],
fusion_model: FusionModel
) -> Dict[str, Any]:
"""Attention-based fusion strategy"""
# Calculate attention weights for each modality
attention_weights = self.calculate_attention_weights(input_data, fusion_model)
# Apply attention to each modality
attended_results = {}
for modality in fusion_model.input_modalities:
if modality in input_data:
modality_result = self.process_modality(input_data[modality], modality)
attention_weight = attention_weights.get(modality, 0.1)
attended_results[modality] = {
'result': modality_result,
'attention_weight': attention_weight,
'attended_result': self.apply_attention(modality_result, attention_weight)
}
# Combine attended results
combined_result = self.attended_combination(attended_results)
return {
'fusion_type': 'attention',
'combined_result': combined_result,
'attention_weights': attention_weights,
'attended_results': attended_results
}
async def cross_modal_attention(
self,
input_data: Dict[str, Any],
fusion_model: FusionModel
) -> Dict[str, Any]:
"""Cross-modal attention fusion strategy"""
# Build cross-modal attention matrix
attention_matrix = self.build_cross_modal_attention(input_data, fusion_model)
# Apply cross-modal attention
cross_modal_results = {}
for i, modality1 in enumerate(fusion_model.input_modalities):
if modality1 in input_data:
modality_result = self.process_modality(input_data[modality1], modality1)
# Get attention from other modalities
cross_attention = {}
for j, modality2 in enumerate(fusion_model.input_modalities):
if i != j and modality2 in input_data:
cross_attention[modality2] = attention_matrix[i][j]
cross_modal_results[modality1] = {
'result': modality_result,
'cross_attention': cross_attention,
'enhanced_result': self.enhance_with_cross_attention(modality_result, cross_attention)
}
# Combine cross-modal enhanced results
combined_result = self.cross_modal_combination(cross_modal_results)
return {
'fusion_type': 'cross_modal_attention',
'combined_result': combined_result,
'attention_matrix': attention_matrix,
'cross_modal_results': cross_modal_results
}
async def neural_architecture_search(
self,
input_data: Dict[str, Any],
fusion_model: FusionModel
) -> Dict[str, Any]:
"""Neural Architecture Search for fusion"""
# Search for optimal fusion architecture
optimal_architecture = await self.search_optimal_architecture(input_data, fusion_model)
# Apply optimal architecture
arch_results = {}
for modality in fusion_model.input_modalities:
if modality in input_data:
modality_result = self.process_modality(input_data[modality], modality)
arch_config = optimal_architecture.get(modality, {})
arch_results[modality] = {
'result': modality_result,
'architecture': arch_config,
'optimized_result': self.apply_architecture(modality_result, arch_config)
}
# Combine optimized results
combined_result = self.architecture_combination(arch_results)
return {
'fusion_type': 'neural_architecture_search',
'combined_result': combined_result,
'optimal_architecture': optimal_architecture,
'arch_results': arch_results
}
async def transformer_fusion(
self,
input_data: Dict[str, Any],
fusion_model: FusionModel
) -> Dict[str, Any]:
"""Transformer-based fusion strategy"""
# Convert modalities to transformer tokens
tokenized_modalities = {}
for modality in fusion_model.input_modalities:
if modality in input_data:
tokens = self.tokenize_modality(input_data[modality], modality)
tokenized_modalities[modality] = tokens
# Apply transformer fusion
fused_embeddings = self.transformer_fusion_embeddings(tokenized_modalities)
# Generate final result
combined_result = self.decode_transformer_output(fused_embeddings)
return {
'fusion_type': 'transformer',
'combined_result': combined_result,
'tokenized_modalities': tokenized_modalities,
'fused_embeddings': fused_embeddings
}
async def graph_neural_fusion(
self,
input_data: Dict[str, Any],
fusion_model: FusionModel
) -> Dict[str, Any]:
"""Graph Neural Network fusion strategy"""
# Build modality graph
modality_graph = self.build_modality_graph(input_data, fusion_model)
# Apply GNN fusion
graph_embeddings = self.gnn_fusion_embeddings(modality_graph)
# Generate final result
combined_result = self.decode_gnn_output(graph_embeddings)
return {
'fusion_type': 'graph_neural',
'combined_result': combined_result,
'modality_graph': modality_graph,
'graph_embeddings': graph_embeddings
}
def process_modality(self, data: Any, modality_type: str) -> Dict[str, Any]:
"""Process individual modality data"""
# Simulate modality-specific processing
if modality_type == 'text':
return {
'features': self.extract_text_features(data),
'embeddings': self.generate_text_embeddings(data),
'confidence': 0.85
}
elif modality_type == 'image':
return {
'features': self.extract_image_features(data),
'embeddings': self.generate_image_embeddings(data),
'confidence': 0.80
}
elif modality_type == 'audio':
return {
'features': self.extract_audio_features(data),
'embeddings': self.generate_audio_embeddings(data),
'confidence': 0.75
}
elif modality_type == 'video':
return {
'features': self.extract_video_features(data),
'embeddings': self.generate_video_embeddings(data),
'confidence': 0.78
}
elif modality_type == 'structured':
return {
'features': self.extract_structured_features(data),
'embeddings': self.generate_structured_embeddings(data),
'confidence': 0.90
}
else:
return {
'features': {},
'embeddings': [],
'confidence': 0.5
}
def weighted_combination(self, results: Dict[str, Any]) -> Dict[str, Any]:
"""Combine results using weighted average"""
combined_features = {}
combined_confidence = 0.0
total_weight = 0.0
for modality, result in results.items():
weight = result['weight']
features = result['result']['features']
confidence = result['confidence']
# Weight features
for feature, value in features.items():
if feature not in combined_features:
combined_features[feature] = 0.0
combined_features[feature] += value * weight
combined_confidence += confidence * weight
total_weight += weight
# Normalize
if total_weight > 0:
for feature in combined_features:
combined_features[feature] /= total_weight
combined_confidence /= total_weight
return {
'features': combined_features,
'confidence': combined_confidence
}
def calculate_attention_weights(self, input_data: Dict[str, Any], fusion_model: FusionModel) -> Dict[str, float]:
"""Calculate attention weights for modalities"""
# Simulate attention weight calculation based on input quality and modality importance
attention_weights = {}
for modality in fusion_model.input_modalities:
if modality in input_data:
# Base weight from modality weights
base_weight = fusion_model.modality_weights.get(modality, 0.1)
# Adjust based on input quality (simulated)
quality_factor = 0.8 + (hash(str(input_data[modality])) % 20) / 100.0
attention_weights[modality] = base_weight * quality_factor
# Normalize attention weights
total_attention = sum(attention_weights.values())
if total_attention > 0:
for modality in attention_weights:
attention_weights[modality] /= total_attention
return attention_weights
def apply_attention(self, result: Dict[str, Any], attention_weight: float) -> Dict[str, Any]:
"""Apply attention weight to modality result"""
attended_result = result.copy()
# Scale features by attention weight
for feature, value in attended_result['features'].items():
attended_result['features'][feature] = value * attention_weight
# Adjust confidence
attended_result['confidence'] = result['confidence'] * (0.5 + attention_weight * 0.5)
return attended_result
def attended_combination(self, results: Dict[str, Any]) -> Dict[str, Any]:
"""Combine attended results"""
combined_features = {}
combined_confidence = 0.0
for modality, result in results.items():
features = result['attended_result']['features']
confidence = result['attended_result']['confidence']
# Add features
for feature, value in features.items():
if feature not in combined_features:
combined_features[feature] = 0.0
combined_features[feature] += value
combined_confidence += confidence
# Average confidence
if results:
combined_confidence /= len(results)
return {
'features': combined_features,
'confidence': combined_confidence
}
def build_cross_modal_attention(self, input_data: Dict[str, Any], fusion_model: FusionModel) -> List[List[float]]:
"""Build cross-modal attention matrix"""
modalities = fusion_model.input_modalities
n_modalities = len(modalities)
# Initialize attention matrix
attention_matrix = [[0.0 for _ in range(n_modalities)] for _ in range(n_modalities)]
# Calculate cross-modal attention based on synergy
for i, mod1 in enumerate(modalities):
for j, mod2 in enumerate(modalities):
if i != j and mod1 in input_data and mod2 in input_data:
# Calculate attention based on synergy and input compatibility
synergy = self.calculate_synergy_score([mod1, mod2])
compatibility = self.calculate_modality_compatibility(input_data[mod1], input_data[mod2])
attention_matrix[i][j] = synergy * compatibility
# Normalize rows
for i in range(n_modalities):
row_sum = sum(attention_matrix[i])
if row_sum > 0:
for j in range(n_modalities):
attention_matrix[i][j] /= row_sum
return attention_matrix
def calculate_modality_compatibility(self, data1: Any, data2: Any) -> float:
"""Calculate compatibility between two modalities"""
# Simulate compatibility calculation
# In real implementation, would analyze actual data compatibility
return 0.6 + (hash(str(data1) + str(data2)) % 40) / 100.0
def enhance_with_cross_attention(self, result: Dict[str, Any], cross_attention: Dict[str, float]) -> Dict[str, Any]:
"""Enhance result with cross-attention from other modalities"""
enhanced_result = result.copy()
# Apply cross-attention enhancement
attention_boost = sum(cross_attention.values()) / len(cross_attention) if cross_attention else 0.0
# Boost features based on cross-attention
for feature, value in enhanced_result['features'].items():
enhanced_result['features'][feature] *= (1.0 + attention_boost * 0.2)
# Boost confidence
enhanced_result['confidence'] = min(1.0, result['confidence'] * (1.0 + attention_boost * 0.3))
return enhanced_result
def cross_modal_combination(self, results: Dict[str, Any]) -> Dict[str, Any]:
"""Combine cross-modal enhanced results"""
combined_features = {}
combined_confidence = 0.0
total_cross_attention = 0.0
for modality, result in results.items():
features = result['enhanced_result']['features']
confidence = result['enhanced_result']['confidence']
cross_attention_sum = sum(result['cross_attention'].values())
# Add features
for feature, value in features.items():
if feature not in combined_features:
combined_features[feature] = 0.0
combined_features[feature] += value
combined_confidence += confidence
total_cross_attention += cross_attention_sum
# Average values
if results:
combined_confidence /= len(results)
total_cross_attention /= len(results)
return {
'features': combined_features,
'confidence': combined_confidence,
'cross_attention_boost': total_cross_attention
}
async def search_optimal_architecture(self, input_data: Dict[str, Any], fusion_model: FusionModel) -> Dict[str, Any]:
"""Search for optimal fusion architecture"""
optimal_arch = {}
for modality in fusion_model.input_modalities:
if modality in input_data:
# Simulate architecture search
arch_config = {
'layers': np.random.randint(2, 6).tolist(),
'units': [2**i for i in range(4, 9)],
'activation': np.random.choice(['relu', 'tanh', 'sigmoid']),
'dropout': np.random.uniform(0.1, 0.3),
'batch_norm': np.random.choice([True, False])
}
optimal_arch[modality] = arch_config
return optimal_arch
def apply_architecture(self, result: Dict[str, Any], arch_config: Dict[str, Any]) -> Dict[str, Any]:
"""Apply architecture configuration to result"""
optimized_result = result.copy()
# Simulate architecture optimization
optimization_factor = 1.0 + (arch_config.get('layers', 3) - 3) * 0.05
# Optimize features
for feature, value in optimized_result['features'].items():
optimized_result['features'][feature] *= optimization_factor
# Optimize confidence
optimized_result['confidence'] = min(1.0, result['confidence'] * optimization_factor)
return optimized_result
def architecture_combination(self, results: Dict[str, Any]) -> Dict[str, Any]:
"""Combine architecture-optimized results"""
combined_features = {}
combined_confidence = 0.0
optimization_gain = 0.0
for modality, result in results.items():
features = result['optimized_result']['features']
confidence = result['optimized_result']['confidence']
# Add features
for feature, value in features.items():
if feature not in combined_features:
combined_features[feature] = 0.0
combined_features[feature] += value
combined_confidence += confidence
# Calculate optimization gain
original_confidence = result['result']['confidence']
optimization_gain += (confidence - original_confidence) / original_confidence if original_confidence > 0 else 0
# Average values
if results:
combined_confidence /= len(results)
optimization_gain /= len(results)
return {
'features': combined_features,
'confidence': combined_confidence,
'optimization_gain': optimization_gain
}
def tokenize_modality(self, data: Any, modality_type: str) -> List[str]:
"""Tokenize modality data for transformer"""
# Simulate tokenization
if modality_type == 'text':
return str(data).split()[:100] # Limit to 100 tokens
elif modality_type == 'image':
return [f"img_token_{i}" for i in range(50)] # 50 image tokens
elif modality_type == 'audio':
return [f"audio_token_{i}" for i in range(75)] # 75 audio tokens
else:
return [f"token_{i}" for i in range(25)] # 25 generic tokens
def transformer_fusion_embeddings(self, tokenized_modalities: Dict[str, List[str]]) -> Dict[str, Any]:
"""Apply transformer fusion to tokenized modalities"""
# Simulate transformer fusion
all_tokens = []
modality_boundaries = []
for modality, tokens in tokenized_modalities.items():
modality_boundaries.append(len(all_tokens))
all_tokens.extend(tokens)
# Simulate transformer processing
embedding_dim = 768
fused_embeddings = np.random.rand(len(all_tokens), embedding_dim).tolist()
return {
'tokens': all_tokens,
'embeddings': fused_embeddings,
'modality_boundaries': modality_boundaries,
'embedding_dim': embedding_dim
}
def decode_transformer_output(self, fused_embeddings: Dict[str, Any]) -> Dict[str, Any]:
"""Decode transformer output to final result"""
# Simulate decoding
embeddings = fused_embeddings['embeddings']
# Pool embeddings (simple average)
pooled_embedding = np.mean(embeddings, axis=0) if embeddings else []
return {
'features': {
'pooled_embedding': pooled_embedding.tolist(),
'embedding_dim': fused_embeddings['embedding_dim']
},
'confidence': 0.88
}
def build_modality_graph(self, input_data: Dict[str, Any], fusion_model: FusionModel) -> Dict[str, Any]:
"""Build modality relationship graph"""
# Simulate graph construction
nodes = list(fusion_model.input_modalities)
edges = []
# Create edges based on synergy
for i, mod1 in enumerate(nodes):
for j, mod2 in enumerate(nodes):
if i < j:
synergy = self.calculate_synergy_score([mod1, mod2])
if synergy > 0.5: # Only add edges for high synergy
edges.append({
'source': mod1,
'target': mod2,
'weight': synergy
})
return {
'nodes': nodes,
'edges': edges,
'node_features': {node: np.random.rand(64).tolist() for node in nodes}
}
def gnn_fusion_embeddings(self, modality_graph: Dict[str, Any]) -> Dict[str, Any]:
"""Apply Graph Neural Network fusion"""
# Simulate GNN processing
nodes = modality_graph['nodes']
edges = modality_graph['edges']
node_features = modality_graph['node_features']
# Simulate GNN layers
gnn_embeddings = {}
for node in nodes:
# Aggregate neighbor features
neighbor_features = []
for edge in edges:
if edge['target'] == node:
neighbor_features.extend(node_features[edge['source']])
elif edge['source'] == node:
neighbor_features.extend(node_features[edge['target']])
# Combine self and neighbor features
self_features = node_features[node]
if neighbor_features:
combined_features = np.mean([self_features] + [neighbor_features], axis=0).tolist()
else:
combined_features = self_features
gnn_embeddings[node] = combined_features
return {
'node_embeddings': gnn_embeddings,
'graph_embedding': np.mean(list(gnn_embeddings.values()), axis=0).tolist()
}
def decode_gnn_output(self, graph_embeddings: Dict[str, Any]) -> Dict[str, Any]:
"""Decode GNN output to final result"""
graph_embedding = graph_embeddings['graph_embedding']
return {
'features': {
'graph_embedding': graph_embedding,
'embedding_dim': len(graph_embedding)
},
'confidence': 0.82
}
# Helper methods for feature extraction (simulated)
def extract_text_features(self, data: Any) -> Dict[str, float]:
return {'length': len(str(data)), 'complexity': 0.7, 'sentiment': 0.8}
def generate_text_embeddings(self, data: Any) -> List[float]:
return np.random.rand(768).tolist()
def extract_image_features(self, data: Any) -> Dict[str, float]:
return {'brightness': 0.6, 'contrast': 0.7, 'sharpness': 0.8}
def generate_image_embeddings(self, data: Any) -> List[float]:
return np.random.rand(512).tolist()
def extract_audio_features(self, data: Any) -> Dict[str, float]:
return {'loudness': 0.7, 'pitch': 0.6, 'tempo': 0.8}
def generate_audio_embeddings(self, data: Any) -> List[float]:
return np.random.rand(256).tolist()
def extract_video_features(self, data: Any) -> Dict[str, float]:
return {'motion': 0.7, 'clarity': 0.8, 'duration': 0.6}
def generate_video_embeddings(self, data: Any) -> List[float]:
return np.random.rand(1024).tolist()
def extract_structured_features(self, data: Any) -> Dict[str, float]:
return {'completeness': 0.9, 'consistency': 0.8, 'quality': 0.85}
def generate_structured_embeddings(self, data: Any) -> List[float]:
return np.random.rand(128).tolist()
def calculate_ensemble_confidence(self, results: Dict[str, Any]) -> float:
"""Calculate overall confidence for ensemble fusion"""
confidences = [result['confidence'] for result in results.values()]
return np.mean(confidences) if confidences else 0.5

View File

@@ -0,0 +1,616 @@
"""
Agent Reputation and Trust Service
Implements reputation management, trust score calculations, and economic profiling
"""
import asyncio
import math
from datetime import datetime, timedelta
from typing import Dict, List, Optional, Any, Tuple
from uuid import uuid4
import json
import logging
from sqlmodel import Session, select, update, delete, and_, or_, func
from sqlalchemy.exc import SQLAlchemyError
from ..domain.reputation import (
AgentReputation, TrustScoreCalculation, ReputationEvent,
AgentEconomicProfile, CommunityFeedback, ReputationLevelThreshold,
ReputationLevel, TrustScoreCategory
)
from ..domain.agent import AIAgentWorkflow, AgentStatus
from ..domain.payment import PaymentTransaction
logger = logging.getLogger(__name__)
class TrustScoreCalculator:
"""Advanced trust score calculation algorithms"""
def __init__(self):
# Weight factors for different categories
self.weights = {
TrustScoreCategory.PERFORMANCE: 0.35,
TrustScoreCategory.RELIABILITY: 0.25,
TrustScoreCategory.COMMUNITY: 0.20,
TrustScoreCategory.SECURITY: 0.10,
TrustScoreCategory.ECONOMIC: 0.10
}
# Decay factors for time-based scoring
self.decay_factors = {
'daily': 0.95,
'weekly': 0.90,
'monthly': 0.80,
'yearly': 0.60
}
def calculate_performance_score(
self,
agent_id: str,
session: Session,
time_window: timedelta = timedelta(days=30)
) -> float:
"""Calculate performance-based trust score component"""
# Get recent job completions
cutoff_date = datetime.utcnow() - time_window
# Query performance metrics
performance_query = select(func.count()).where(
and_(
AgentReputation.agent_id == agent_id,
AgentReputation.updated_at >= cutoff_date
)
)
# For now, use existing performance rating
# In real implementation, this would analyze actual job performance
reputation = session.exec(
select(AgentReputation).where(AgentReputation.agent_id == agent_id)
).first()
if not reputation:
return 500.0 # Neutral score
# Base performance score from rating (1-5 stars to 0-1000)
base_score = (reputation.performance_rating / 5.0) * 1000
# Apply success rate modifier
if reputation.transaction_count > 0:
success_modifier = reputation.success_rate / 100.0
base_score *= success_modifier
# Apply response time modifier (lower is better)
if reputation.average_response_time > 0:
# Normalize response time (assuming 5000ms as baseline)
response_modifier = max(0.5, 1.0 - (reputation.average_response_time / 10000.0))
base_score *= response_modifier
return min(1000.0, max(0.0, base_score))
def calculate_reliability_score(
self,
agent_id: str,
session: Session,
time_window: timedelta = timedelta(days=30)
) -> float:
"""Calculate reliability-based trust score component"""
reputation = session.exec(
select(AgentReputation).where(AgentReputation.agent_id == agent_id)
).first()
if not reputation:
return 500.0
# Base reliability score from reliability percentage
base_score = reputation.reliability_score * 10 # Convert 0-100 to 0-1000
# Apply uptime modifier
if reputation.uptime_percentage > 0:
uptime_modifier = reputation.uptime_percentage / 100.0
base_score *= uptime_modifier
# Apply job completion ratio
total_jobs = reputation.jobs_completed + reputation.jobs_failed
if total_jobs > 0:
completion_ratio = reputation.jobs_completed / total_jobs
base_score *= completion_ratio
return min(1000.0, max(0.0, base_score))
def calculate_community_score(
self,
agent_id: str,
session: Session,
time_window: timedelta = timedelta(days=90)
) -> float:
"""Calculate community-based trust score component"""
cutoff_date = datetime.utcnow() - time_window
# Get recent community feedback
feedback_query = select(CommunityFeedback).where(
and_(
CommunityFeedback.agent_id == agent_id,
CommunityFeedback.created_at >= cutoff_date,
CommunityFeedback.moderation_status == "approved"
)
)
feedbacks = session.exec(feedback_query).all()
if not feedbacks:
return 500.0 # Neutral score
# Calculate weighted average rating
total_weight = 0.0
weighted_sum = 0.0
for feedback in feedbacks:
weight = feedback.verification_weight
rating = feedback.overall_rating
weighted_sum += rating * weight
total_weight += weight
if total_weight > 0:
avg_rating = weighted_sum / total_weight
base_score = (avg_rating / 5.0) * 1000
else:
base_score = 500.0
# Apply feedback volume modifier
feedback_count = len(feedbacks)
if feedback_count > 0:
volume_modifier = min(1.2, 1.0 + (feedback_count / 100.0))
base_score *= volume_modifier
return min(1000.0, max(0.0, base_score))
def calculate_security_score(
self,
agent_id: str,
session: Session,
time_window: timedelta = timedelta(days=180)
) -> float:
"""Calculate security-based trust score component"""
reputation = session.exec(
select(AgentReputation).where(AgentReputation.agent_id == agent_id)
).first()
if not reputation:
return 500.0
# Base security score
base_score = 800.0 # Start with high base score
# Apply dispute history penalty
if reputation.transaction_count > 0:
dispute_ratio = reputation.dispute_count / reputation.transaction_count
dispute_penalty = dispute_ratio * 500 # Max 500 point penalty
base_score -= dispute_penalty
# Apply certifications boost
if reputation.certifications:
certification_boost = min(200.0, len(reputation.certifications) * 50.0)
base_score += certification_boost
return min(1000.0, max(0.0, base_score))
def calculate_economic_score(
self,
agent_id: str,
session: Session,
time_window: timedelta = timedelta(days=30)
) -> float:
"""Calculate economic-based trust score component"""
reputation = session.exec(
select(AgentReputation).where(AgentReputation.agent_id == agent_id)
).first()
if not reputation:
return 500.0
# Base economic score from earnings consistency
if reputation.total_earnings > 0 and reputation.transaction_count > 0:
avg_earning_per_transaction = reputation.total_earnings / reputation.transaction_count
# Higher average earnings indicate higher-value work
earning_modifier = min(2.0, avg_earning_per_transaction / 0.1) # 0.1 AITBC baseline
base_score = 500.0 * earning_modifier
else:
base_score = 500.0
# Apply success rate modifier
if reputation.success_rate > 0:
success_modifier = reputation.success_rate / 100.0
base_score *= success_modifier
return min(1000.0, max(0.0, base_score))
def calculate_composite_trust_score(
self,
agent_id: str,
session: Session,
time_window: timedelta = timedelta(days=30)
) -> float:
"""Calculate composite trust score using weighted components"""
# Calculate individual components
performance_score = self.calculate_performance_score(agent_id, session, time_window)
reliability_score = self.calculate_reliability_score(agent_id, session, time_window)
community_score = self.calculate_community_score(agent_id, session, time_window)
security_score = self.calculate_security_score(agent_id, session, time_window)
economic_score = self.calculate_economic_score(agent_id, session, time_window)
# Apply weights
weighted_score = (
performance_score * self.weights[TrustScoreCategory.PERFORMANCE] +
reliability_score * self.weights[TrustScoreCategory.RELIABILITY] +
community_score * self.weights[TrustScoreCategory.COMMUNITY] +
security_score * self.weights[TrustScoreCategory.SECURITY] +
economic_score * self.weights[TrustScoreCategory.ECONOMIC]
)
# Apply smoothing with previous score if available
reputation = session.exec(
select(AgentReputation).where(AgentReputation.agent_id == agent_id)
).first()
if reputation and reputation.trust_score > 0:
# 70% new score, 30% previous score for stability
final_score = (weighted_score * 0.7) + (reputation.trust_score * 0.3)
else:
final_score = weighted_score
return min(1000.0, max(0.0, final_score))
def determine_reputation_level(self, trust_score: float) -> ReputationLevel:
"""Determine reputation level based on trust score"""
if trust_score >= 900:
return ReputationLevel.MASTER
elif trust_score >= 750:
return ReputationLevel.EXPERT
elif trust_score >= 600:
return ReputationLevel.ADVANCED
elif trust_score >= 400:
return ReputationLevel.INTERMEDIATE
else:
return ReputationLevel.BEGINNER
class ReputationService:
"""Main reputation management service"""
def __init__(self, session: Session):
self.session = session
self.calculator = TrustScoreCalculator()
async def create_reputation_profile(self, agent_id: str) -> AgentReputation:
"""Create a new reputation profile for an agent"""
# Check if profile already exists
existing = self.session.exec(
select(AgentReputation).where(AgentReputation.agent_id == agent_id)
).first()
if existing:
return existing
# Create new reputation profile
reputation = AgentReputation(
agent_id=agent_id,
trust_score=500.0, # Neutral starting score
reputation_level=ReputationLevel.BEGINNER,
performance_rating=3.0,
reliability_score=50.0,
community_rating=3.0,
created_at=datetime.utcnow(),
updated_at=datetime.utcnow()
)
self.session.add(reputation)
self.session.commit()
self.session.refresh(reputation)
logger.info(f"Created reputation profile for agent {agent_id}")
return reputation
async def update_trust_score(
self,
agent_id: str,
event_type: str,
impact_data: Dict[str, Any]
) -> AgentReputation:
"""Update agent trust score based on an event"""
# Get or create reputation profile
reputation = await self.create_reputation_profile(agent_id)
# Store previous scores
old_trust_score = reputation.trust_score
old_reputation_level = reputation.reputation_level
# Calculate new trust score
new_trust_score = self.calculator.calculate_composite_trust_score(agent_id, self.session)
new_reputation_level = self.calculator.determine_reputation_level(new_trust_score)
# Create reputation event
event = ReputationEvent(
agent_id=agent_id,
event_type=event_type,
impact_score=new_trust_score - old_trust_score,
trust_score_before=old_trust_score,
trust_score_after=new_trust_score,
reputation_level_before=old_reputation_level,
reputation_level_after=new_reputation_level,
event_data=impact_data,
occurred_at=datetime.utcnow(),
processed_at=datetime.utcnow()
)
self.session.add(event)
# Update reputation profile
reputation.trust_score = new_trust_score
reputation.reputation_level = new_reputation_level
reputation.updated_at = datetime.utcnow()
reputation.last_activity = datetime.utcnow()
# Add to reputation history
history_entry = {
"timestamp": datetime.utcnow().isoformat(),
"event_type": event_type,
"trust_score_change": new_trust_score - old_trust_score,
"new_trust_score": new_trust_score,
"reputation_level": new_reputation_level.value
}
reputation.reputation_history.append(history_entry)
self.session.commit()
self.session.refresh(reputation)
logger.info(f"Updated trust score for agent {agent_id}: {old_trust_score} -> {new_trust_score}")
return reputation
async def record_job_completion(
self,
agent_id: str,
job_id: str,
success: bool,
response_time: float,
earnings: float
) -> AgentReputation:
"""Record job completion and update reputation"""
reputation = await self.create_reputation_profile(agent_id)
# Update job metrics
if success:
reputation.jobs_completed += 1
else:
reputation.jobs_failed += 1
# Update response time (running average)
if reputation.average_response_time == 0:
reputation.average_response_time = response_time
else:
reputation.average_response_time = (
(reputation.average_response_time * reputation.jobs_completed + response_time) /
(reputation.jobs_completed + 1)
)
# Update earnings
reputation.total_earnings += earnings
reputation.transaction_count += 1
# Update success rate
total_jobs = reputation.jobs_completed + reputation.jobs_failed
reputation.success_rate = (reputation.jobs_completed / total_jobs) * 100.0 if total_jobs > 0 else 0.0
# Update reliability score based on success rate
reputation.reliability_score = reputation.success_rate
# Update performance rating based on response time and success
if success and response_time < 5000: # Good performance
reputation.performance_rating = min(5.0, reputation.performance_rating + 0.1)
elif not success or response_time > 10000: # Poor performance
reputation.performance_rating = max(1.0, reputation.performance_rating - 0.1)
reputation.updated_at = datetime.utcnow()
reputation.last_activity = datetime.utcnow()
# Create trust score update event
impact_data = {
"job_id": job_id,
"success": success,
"response_time": response_time,
"earnings": earnings,
"total_jobs": total_jobs,
"success_rate": reputation.success_rate
}
await self.update_trust_score(agent_id, "job_completed", impact_data)
logger.info(f"Recorded job completion for agent {agent_id}: success={success}, earnings={earnings}")
return reputation
async def add_community_feedback(
self,
agent_id: str,
reviewer_id: str,
ratings: Dict[str, float],
feedback_text: str = "",
tags: List[str] = None
) -> CommunityFeedback:
"""Add community feedback for an agent"""
feedback = CommunityFeedback(
agent_id=agent_id,
reviewer_id=reviewer_id,
overall_rating=ratings.get("overall", 3.0),
performance_rating=ratings.get("performance", 3.0),
communication_rating=ratings.get("communication", 3.0),
reliability_rating=ratings.get("reliability", 3.0),
value_rating=ratings.get("value", 3.0),
feedback_text=feedback_text,
feedback_tags=tags or [],
created_at=datetime.utcnow()
)
self.session.add(feedback)
self.session.commit()
self.session.refresh(feedback)
# Update agent's community rating
await self._update_community_rating(agent_id)
logger.info(f"Added community feedback for agent {agent_id} from reviewer {reviewer_id}")
return feedback
async def _update_community_rating(self, agent_id: str):
"""Update agent's community rating based on feedback"""
# Get all approved feedback
feedbacks = self.session.exec(
select(CommunityFeedback).where(
and_(
CommunityFeedback.agent_id == agent_id,
CommunityFeedback.moderation_status == "approved"
)
)
).all()
if not feedbacks:
return
# Calculate weighted average
total_weight = 0.0
weighted_sum = 0.0
for feedback in feedbacks:
weight = feedback.verification_weight
rating = feedback.overall_rating
weighted_sum += rating * weight
total_weight += weight
if total_weight > 0:
avg_rating = weighted_sum / total_weight
# Update reputation profile
reputation = self.session.exec(
select(AgentReputation).where(AgentReputation.agent_id == agent_id)
).first()
if reputation:
reputation.community_rating = avg_rating
reputation.updated_at = datetime.utcnow()
self.session.commit()
async def get_reputation_summary(self, agent_id: str) -> Dict[str, Any]:
"""Get comprehensive reputation summary for an agent"""
reputation = self.session.exec(
select(AgentReputation).where(AgentReputation.agent_id == agent_id)
).first()
if not reputation:
return {"error": "Reputation profile not found"}
# Get recent events
recent_events = self.session.exec(
select(ReputationEvent).where(
and_(
ReputationEvent.agent_id == agent_id,
ReputationEvent.occurred_at >= datetime.utcnow() - timedelta(days=30)
)
).order_by(ReputationEvent.occurred_at.desc()).limit(10)
).all()
# Get recent feedback
recent_feedback = self.session.exec(
select(CommunityFeedback).where(
and_(
CommunityFeedback.agent_id == agent_id,
CommunityFeedback.moderation_status == "approved"
)
).order_by(CommunityFeedback.created_at.desc()).limit(5)
).all()
return {
"agent_id": agent_id,
"trust_score": reputation.trust_score,
"reputation_level": reputation.reputation_level.value,
"performance_rating": reputation.performance_rating,
"reliability_score": reputation.reliability_score,
"community_rating": reputation.community_rating,
"total_earnings": reputation.total_earnings,
"transaction_count": reputation.transaction_count,
"success_rate": reputation.success_rate,
"jobs_completed": reputation.jobs_completed,
"jobs_failed": reputation.jobs_failed,
"average_response_time": reputation.average_response_time,
"dispute_count": reputation.dispute_count,
"certifications": reputation.certifications,
"specialization_tags": reputation.specialization_tags,
"geographic_region": reputation.geographic_region,
"last_activity": reputation.last_activity.isoformat(),
"recent_events": [
{
"event_type": event.event_type,
"impact_score": event.impact_score,
"occurred_at": event.occurred_at.isoformat()
}
for event in recent_events
],
"recent_feedback": [
{
"overall_rating": feedback.overall_rating,
"feedback_text": feedback.feedback_text,
"created_at": feedback.created_at.isoformat()
}
for feedback in recent_feedback
]
}
async def get_leaderboard(
self,
category: str = "trust_score",
limit: int = 50,
region: str = None
) -> List[Dict[str, Any]]:
"""Get reputation leaderboard"""
query = select(AgentReputation).order_by(
getattr(AgentReputation, category).desc()
).limit(limit)
if region:
query = query.where(AgentReputation.geographic_region == region)
reputations = self.session.exec(query).all()
leaderboard = []
for rank, reputation in enumerate(reputations, 1):
leaderboard.append({
"rank": rank,
"agent_id": reputation.agent_id,
"trust_score": reputation.trust_score,
"reputation_level": reputation.reputation_level.value,
"performance_rating": reputation.performance_rating,
"reliability_score": reputation.reliability_score,
"community_rating": reputation.community_rating,
"total_earnings": reputation.total_earnings,
"transaction_count": reputation.transaction_count,
"geographic_region": reputation.geographic_region,
"specialization_tags": reputation.specialization_tags
})
return leaderboard

View File

@@ -0,0 +1,656 @@
"""
Agent Reward Engine Service
Implements performance-based reward calculations, distributions, and tier management
"""
import asyncio
import math
from datetime import datetime, timedelta
from typing import Dict, List, Optional, Any, Tuple
from uuid import uuid4
import json
import logging
from sqlmodel import Session, select, update, delete, and_, or_, func
from sqlalchemy.exc import SQLAlchemyError
from ..domain.rewards import (
AgentRewardProfile, RewardTierConfig, RewardCalculation, RewardDistribution,
RewardEvent, RewardMilestone, RewardAnalytics, RewardTier, RewardType, RewardStatus
)
from ..domain.reputation import AgentReputation, ReputationLevel
from ..domain.payment import PaymentTransaction
logger = logging.getLogger(__name__)
class RewardCalculator:
"""Advanced reward calculation algorithms"""
def __init__(self):
# Base reward rates (in AITBC)
self.base_rates = {
'job_completion': 0.01, # Base reward per job
'high_performance': 0.005, # Additional for high performance
'perfect_rating': 0.01, # Bonus for 5-star ratings
'on_time_delivery': 0.002, # Bonus for on-time delivery
'repeat_client': 0.003, # Bonus for repeat clients
}
# Performance thresholds
self.performance_thresholds = {
'excellent': 4.5, # Rating threshold for excellent performance
'good': 4.0, # Rating threshold for good performance
'response_time_fast': 2000, # Response time in ms for fast
'response_time_excellent': 1000, # Response time in ms for excellent
}
def calculate_tier_multiplier(self, trust_score: float, session: Session) -> float:
"""Calculate reward multiplier based on agent's tier"""
# Get tier configuration
tier_config = session.exec(
select(RewardTierConfig).where(
and_(
RewardTierConfig.min_trust_score <= trust_score,
RewardTierConfig.is_active == True
)
).order_by(RewardTierConfig.min_trust_score.desc())
).first()
if tier_config:
return tier_config.base_multiplier
else:
# Default tier calculation if no config found
if trust_score >= 900:
return 2.0 # Diamond
elif trust_score >= 750:
return 1.5 # Platinum
elif trust_score >= 600:
return 1.2 # Gold
elif trust_score >= 400:
return 1.1 # Silver
else:
return 1.0 # Bronze
def calculate_performance_bonus(
self,
performance_metrics: Dict[str, Any],
session: Session
) -> float:
"""Calculate performance-based bonus multiplier"""
bonus = 0.0
# Rating bonus
rating = performance_metrics.get('performance_rating', 3.0)
if rating >= self.performance_thresholds['excellent']:
bonus += 0.5 # 50% bonus for excellent performance
elif rating >= self.performance_thresholds['good']:
bonus += 0.2 # 20% bonus for good performance
# Response time bonus
response_time = performance_metrics.get('average_response_time', 5000)
if response_time <= self.performance_thresholds['response_time_excellent']:
bonus += 0.3 # 30% bonus for excellent response time
elif response_time <= self.performance_thresholds['response_time_fast']:
bonus += 0.1 # 10% bonus for fast response time
# Success rate bonus
success_rate = performance_metrics.get('success_rate', 80.0)
if success_rate >= 95.0:
bonus += 0.2 # 20% bonus for excellent success rate
elif success_rate >= 90.0:
bonus += 0.1 # 10% bonus for good success rate
# Job volume bonus
job_count = performance_metrics.get('jobs_completed', 0)
if job_count >= 100:
bonus += 0.15 # 15% bonus for high volume
elif job_count >= 50:
bonus += 0.1 # 10% bonus for moderate volume
return bonus
def calculate_loyalty_bonus(self, agent_id: str, session: Session) -> float:
"""Calculate loyalty bonus based on agent history"""
# Get agent reward profile
reward_profile = session.exec(
select(AgentRewardProfile).where(AgentRewardProfile.agent_id == agent_id)
).first()
if not reward_profile:
return 0.0
bonus = 0.0
# Streak bonus
if reward_profile.current_streak >= 30: # 30+ day streak
bonus += 0.3
elif reward_profile.current_streak >= 14: # 14+ day streak
bonus += 0.2
elif reward_profile.current_streak >= 7: # 7+ day streak
bonus += 0.1
# Lifetime earnings bonus
if reward_profile.lifetime_earnings >= 1000: # 1000+ AITBC
bonus += 0.2
elif reward_profile.lifetime_earnings >= 500: # 500+ AITBC
bonus += 0.1
# Referral bonus
if reward_profile.referral_count >= 10:
bonus += 0.2
elif reward_profile.referral_count >= 5:
bonus += 0.1
# Community contributions bonus
if reward_profile.community_contributions >= 20:
bonus += 0.15
elif reward_profile.community_contributions >= 10:
bonus += 0.1
return bonus
def calculate_referral_bonus(self, referral_data: Dict[str, Any]) -> float:
"""Calculate referral bonus"""
referral_count = referral_data.get('referral_count', 0)
referral_quality = referral_data.get('referral_quality', 1.0) # 0-1 scale
base_bonus = 0.05 * referral_count # 0.05 AITBC per referral
# Quality multiplier
quality_multiplier = 0.5 + (referral_quality * 0.5) # 0.5 to 1.0
return base_bonus * quality_multiplier
def calculate_milestone_bonus(self, agent_id: str, session: Session) -> float:
"""Calculate milestone achievement bonus"""
# Check for unclaimed milestones
milestones = session.exec(
select(RewardMilestone).where(
and_(
RewardMilestone.agent_id == agent_id,
RewardMilestone.is_completed == True,
RewardMilestone.is_claimed == False
)
)
).all()
total_bonus = 0.0
for milestone in milestones:
total_bonus += milestone.reward_amount
# Mark as claimed
milestone.is_claimed = True
milestone.claimed_at = datetime.utcnow()
return total_bonus
def calculate_total_reward(
self,
agent_id: str,
base_amount: float,
performance_metrics: Dict[str, Any],
session: Session
) -> Dict[str, Any]:
"""Calculate total reward with all bonuses and multipliers"""
# Get agent's trust score and tier
reputation = session.exec(
select(AgentReputation).where(AgentReputation.agent_id == agent_id)
).first()
trust_score = reputation.trust_score if reputation else 500.0
# Calculate components
tier_multiplier = self.calculate_tier_multiplier(trust_score, session)
performance_bonus = self.calculate_performance_bonus(performance_metrics, session)
loyalty_bonus = self.calculate_loyalty_bonus(agent_id, session)
referral_bonus = self.calculate_referral_bonus(performance_metrics.get('referral_data', {}))
milestone_bonus = self.calculate_milestone_bonus(agent_id, session)
# Calculate effective multiplier
effective_multiplier = tier_multiplier * (1 + performance_bonus + loyalty_bonus)
# Calculate total reward
total_reward = base_amount * effective_multiplier + referral_bonus + milestone_bonus
return {
'base_amount': base_amount,
'tier_multiplier': tier_multiplier,
'performance_bonus': performance_bonus,
'loyalty_bonus': loyalty_bonus,
'referral_bonus': referral_bonus,
'milestone_bonus': milestone_bonus,
'effective_multiplier': effective_multiplier,
'total_reward': total_reward,
'trust_score': trust_score
}
class RewardEngine:
"""Main reward management and distribution engine"""
def __init__(self, session: Session):
self.session = session
self.calculator = RewardCalculator()
async def create_reward_profile(self, agent_id: str) -> AgentRewardProfile:
"""Create a new reward profile for an agent"""
# Check if profile already exists
existing = self.session.exec(
select(AgentRewardProfile).where(AgentRewardProfile.agent_id == agent_id)
).first()
if existing:
return existing
# Create new reward profile
profile = AgentRewardProfile(
agent_id=agent_id,
current_tier=RewardTier.BRONZE,
tier_progress=0.0,
created_at=datetime.utcnow(),
updated_at=datetime.utcnow()
)
self.session.add(profile)
self.session.commit()
self.session.refresh(profile)
logger.info(f"Created reward profile for agent {agent_id}")
return profile
async def calculate_and_distribute_reward(
self,
agent_id: str,
reward_type: RewardType,
base_amount: float,
performance_metrics: Dict[str, Any],
reference_date: Optional[datetime] = None
) -> Dict[str, Any]:
"""Calculate and distribute reward for an agent"""
# Ensure reward profile exists
await self.create_reward_profile(agent_id)
# Calculate reward
reward_calculation = self.calculator.calculate_total_reward(
agent_id, base_amount, performance_metrics, self.session
)
# Create calculation record
calculation = RewardCalculation(
agent_id=agent_id,
reward_type=reward_type,
base_amount=base_amount,
tier_multiplier=reward_calculation['tier_multiplier'],
performance_bonus=reward_calculation['performance_bonus'],
loyalty_bonus=reward_calculation['loyalty_bonus'],
referral_bonus=reward_calculation['referral_bonus'],
milestone_bonus=reward_calculation['milestone_bonus'],
total_reward=reward_calculation['total_reward'],
effective_multiplier=reward_calculation['effective_multiplier'],
reference_date=reference_date or datetime.utcnow(),
trust_score_at_calculation=reward_calculation['trust_score'],
performance_metrics=performance_metrics,
calculated_at=datetime.utcnow()
)
self.session.add(calculation)
self.session.commit()
self.session.refresh(calculation)
# Create distribution record
distribution = RewardDistribution(
calculation_id=calculation.id,
agent_id=agent_id,
reward_amount=reward_calculation['total_reward'],
reward_type=reward_type,
status=RewardStatus.PENDING,
created_at=datetime.utcnow(),
scheduled_at=datetime.utcnow()
)
self.session.add(distribution)
self.session.commit()
self.session.refresh(distribution)
# Process distribution
await self.process_reward_distribution(distribution.id)
# Update agent profile
await self.update_agent_reward_profile(agent_id, reward_calculation)
# Create reward event
await self.create_reward_event(
agent_id, "reward_distributed", reward_type, reward_calculation['total_reward'],
calculation_id=calculation.id, distribution_id=distribution.id
)
return {
"calculation_id": calculation.id,
"distribution_id": distribution.id,
"reward_amount": reward_calculation['total_reward'],
"reward_type": reward_type,
"tier_multiplier": reward_calculation['tier_multiplier'],
"total_bonus": reward_calculation['performance_bonus'] + reward_calculation['loyalty_bonus'],
"status": "distributed"
}
async def process_reward_distribution(self, distribution_id: str) -> RewardDistribution:
"""Process a reward distribution"""
distribution = self.session.exec(
select(RewardDistribution).where(RewardDistribution.id == distribution_id)
).first()
if not distribution:
raise ValueError(f"Distribution {distribution_id} not found")
if distribution.status != RewardStatus.PENDING:
return distribution
try:
# Simulate blockchain transaction (in real implementation, this would interact with blockchain)
transaction_id = f"tx_{uuid4().hex[:8]}"
transaction_hash = f"0x{uuid4().hex}"
# Update distribution
distribution.transaction_id = transaction_id
distribution.transaction_hash = transaction_hash
distribution.transaction_status = "confirmed"
distribution.status = RewardStatus.DISTRIBUTED
distribution.processed_at = datetime.utcnow()
distribution.confirmed_at = datetime.utcnow()
self.session.commit()
self.session.refresh(distribution)
logger.info(f"Processed reward distribution {distribution_id} for agent {distribution.agent_id}")
except Exception as e:
# Handle distribution failure
distribution.status = RewardStatus.CANCELLED
distribution.error_message = str(e)
distribution.retry_count += 1
self.session.commit()
logger.error(f"Failed to process reward distribution {distribution_id}: {str(e)}")
raise
return distribution
async def update_agent_reward_profile(self, agent_id: str, reward_calculation: Dict[str, Any]):
"""Update agent reward profile after reward distribution"""
profile = self.session.exec(
select(AgentRewardProfile).where(AgentRewardProfile.agent_id == agent_id)
).first()
if not profile:
return
# Update earnings
profile.base_earnings += reward_calculation['base_amount']
profile.bonus_earnings += (
reward_calculation['total_reward'] - reward_calculation['base_amount']
)
profile.total_earnings += reward_calculation['total_reward']
profile.lifetime_earnings += reward_calculation['total_reward']
# Update reward count and streak
profile.rewards_distributed += 1
profile.last_reward_date = datetime.utcnow()
profile.current_streak += 1
if profile.current_streak > profile.longest_streak:
profile.longest_streak = profile.current_streak
# Update performance score
profile.performance_score = reward_calculation.get('performance_rating', 0.0)
# Check for tier upgrade
await self.check_and_update_tier(agent_id)
profile.updated_at = datetime.utcnow()
profile.last_activity = datetime.utcnow()
self.session.commit()
async def check_and_update_tier(self, agent_id: str):
"""Check and update agent's reward tier"""
# Get agent reputation
reputation = self.session.exec(
select(AgentReputation).where(AgentReputation.agent_id == agent_id)
).first()
if not reputation:
return
# Get reward profile
profile = self.session.exec(
select(AgentRewardProfile).where(AgentRewardProfile.agent_id == agent_id)
).first()
if not profile:
return
# Determine new tier
new_tier = self.determine_reward_tier(reputation.trust_score)
old_tier = profile.current_tier
if new_tier != old_tier:
# Update tier
profile.current_tier = new_tier
profile.updated_at = datetime.utcnow()
# Create tier upgrade event
await self.create_reward_event(
agent_id, "tier_upgrade", RewardType.SPECIAL_BONUS, 0.0,
tier_impact=new_tier
)
logger.info(f"Agent {agent_id} upgraded from {old_tier} to {new_tier}")
def determine_reward_tier(self, trust_score: float) -> RewardTier:
"""Determine reward tier based on trust score"""
if trust_score >= 950:
return RewardTier.DIAMOND
elif trust_score >= 850:
return RewardTier.PLATINUM
elif trust_score >= 750:
return RewardTier.GOLD
elif trust_score >= 600:
return RewardTier.SILVER
else:
return RewardTier.BRONZE
async def create_reward_event(
self,
agent_id: str,
event_type: str,
reward_type: RewardType,
reward_impact: float,
calculation_id: Optional[str] = None,
distribution_id: Optional[str] = None,
tier_impact: Optional[RewardTier] = None
):
"""Create a reward event record"""
event = RewardEvent(
agent_id=agent_id,
event_type=event_type,
trigger_source="automatic",
reward_impact=reward_impact,
tier_impact=tier_impact,
related_calculation_id=calculation_id,
related_distribution_id=distribution_id,
occurred_at=datetime.utcnow(),
processed_at=datetime.utcnow()
)
self.session.add(event)
self.session.commit()
async def get_reward_summary(self, agent_id: str) -> Dict[str, Any]:
"""Get comprehensive reward summary for an agent"""
profile = self.session.exec(
select(AgentRewardProfile).where(AgentRewardProfile.agent_id == agent_id)
).first()
if not profile:
return {"error": "Reward profile not found"}
# Get recent calculations
recent_calculations = self.session.exec(
select(RewardCalculation).where(
and_(
RewardCalculation.agent_id == agent_id,
RewardCalculation.calculated_at >= datetime.utcnow() - timedelta(days=30)
)
).order_by(RewardCalculation.calculated_at.desc()).limit(10)
).all()
# Get recent distributions
recent_distributions = self.session.exec(
select(RewardDistribution).where(
and_(
RewardDistribution.agent_id == agent_id,
RewardDistribution.created_at >= datetime.utcnow() - timedelta(days=30)
)
).order_by(RewardDistribution.created_at.desc()).limit(10)
).all()
return {
"agent_id": agent_id,
"current_tier": profile.current_tier.value,
"tier_progress": profile.tier_progress,
"base_earnings": profile.base_earnings,
"bonus_earnings": profile.bonus_earnings,
"total_earnings": profile.total_earnings,
"lifetime_earnings": profile.lifetime_earnings,
"rewards_distributed": profile.rewards_distributed,
"current_streak": profile.current_streak,
"longest_streak": profile.longest_streak,
"performance_score": profile.performance_score,
"loyalty_score": profile.loyalty_score,
"referral_count": profile.referral_count,
"community_contributions": profile.community_contributions,
"last_reward_date": profile.last_reward_date.isoformat() if profile.last_reward_date else None,
"recent_calculations": [
{
"reward_type": calc.reward_type.value,
"total_reward": calc.total_reward,
"calculated_at": calc.calculated_at.isoformat()
}
for calc in recent_calculations
],
"recent_distributions": [
{
"reward_amount": dist.reward_amount,
"status": dist.status.value,
"created_at": dist.created_at.isoformat()
}
for dist in recent_distributions
]
}
async def batch_process_pending_rewards(self, limit: int = 100) -> Dict[str, Any]:
"""Process pending reward distributions in batch"""
# Get pending distributions
pending_distributions = self.session.exec(
select(RewardDistribution).where(
and_(
RewardDistribution.status == RewardStatus.PENDING,
RewardDistribution.scheduled_at <= datetime.utcnow()
)
).order_by(RewardDistribution.priority.asc(), RewardDistribution.created_at.asc())
.limit(limit)
).all()
processed = 0
failed = 0
for distribution in pending_distributions:
try:
await self.process_reward_distribution(distribution.id)
processed += 1
except Exception as e:
failed += 1
logger.error(f"Failed to process distribution {distribution.id}: {str(e)}")
return {
"processed": processed,
"failed": failed,
"total": len(pending_distributions)
}
async def get_reward_analytics(
self,
period_type: str = "daily",
start_date: Optional[datetime] = None,
end_date: Optional[datetime] = None
) -> Dict[str, Any]:
"""Get reward system analytics"""
if not start_date:
start_date = datetime.utcnow() - timedelta(days=30)
if not end_date:
end_date = datetime.utcnow()
# Get distributions in period
distributions = self.session.exec(
select(RewardDistribution).where(
and_(
RewardDistribution.created_at >= start_date,
RewardDistribution.created_at <= end_date,
RewardDistribution.status == RewardStatus.DISTRIBUTED
)
).all()
)
if not distributions:
return {
"period_type": period_type,
"start_date": start_date.isoformat(),
"end_date": end_date.isoformat(),
"total_rewards_distributed": 0.0,
"total_agents_rewarded": 0,
"average_reward_per_agent": 0.0
}
# Calculate analytics
total_rewards = sum(d.reward_amount for d in distributions)
unique_agents = len(set(d.agent_id for d in distributions))
average_reward = total_rewards / unique_agents if unique_agents > 0 else 0.0
# Get agent profiles for tier distribution
agent_ids = list(set(d.agent_id for d in distributions))
profiles = self.session.exec(
select(AgentRewardProfile).where(AgentRewardProfile.agent_id.in_(agent_ids))
).all()
tier_distribution = {}
for profile in profiles:
tier = profile.current_tier.value
tier_distribution[tier] = tier_distribution.get(tier, 0) + 1
return {
"period_type": period_type,
"start_date": start_date.isoformat(),
"end_date": end_date.isoformat(),
"total_rewards_distributed": total_rewards,
"total_agents_rewarded": unique_agents,
"average_reward_per_agent": average_reward,
"tier_distribution": tier_distribution,
"total_distributions": len(distributions)
}

File diff suppressed because it is too large Load Diff