feat: add marketplace metrics, privacy features, and service registry endpoints

- Add Prometheus metrics for marketplace API throughput and error rates with new dashboard panels
- Implement confidential transaction models with encryption support and access control
- Add key management system with registration, rotation, and audit logging
- Create services and registry routers for service discovery and management
- Integrate ZK proof generation for privacy-preserving receipts
- Add metrics instru
This commit is contained in:
oib
2025-12-22 10:33:23 +01:00
parent fa5a6fddf3
commit a4d4be4a1e
260 changed files with 59033 additions and 351 deletions

View File

@@ -0,0 +1,169 @@
"""
Database models for confidential transactions
"""
from datetime import datetime
from typing import Optional, Dict, Any, List
from sqlalchemy import Column, String, DateTime, Boolean, Text, JSON, Integer, LargeBinary
from sqlalchemy.dialects.postgresql import UUID
from sqlalchemy.sql import func
import uuid
from ..database import Base
class ConfidentialTransactionDB(Base):
"""Database model for confidential transactions"""
__tablename__ = "confidential_transactions"
# Primary key
id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
# Public fields (always visible)
transaction_id = Column(String(255), unique=True, nullable=False, index=True)
job_id = Column(String(255), nullable=False, index=True)
timestamp = Column(DateTime(timezone=True), server_default=func.now(), nullable=False)
status = Column(String(50), nullable=False, default="created")
# Encryption metadata
confidential = Column(Boolean, nullable=False, default=False)
algorithm = Column(String(50), nullable=True)
# Encrypted data (stored as binary)
encrypted_data = Column(LargeBinary, nullable=True)
encrypted_nonce = Column(LargeBinary, nullable=True)
encrypted_tag = Column(LargeBinary, nullable=True)
# Encrypted keys for participants (JSON encoded)
encrypted_keys = Column(JSON, nullable=True)
participants = Column(JSON, nullable=True)
# Access policies
access_policies = Column(JSON, nullable=True)
# Audit fields
created_at = Column(DateTime(timezone=True), server_default=func.now())
updated_at = Column(DateTime(timezone=True), server_default=func.now(), onupdate=func.now())
created_by = Column(String(255), nullable=True)
# Indexes for performance
__table_args__ = (
{'schema': 'aitbc'}
)
class ParticipantKeyDB(Base):
"""Database model for participant encryption keys"""
__tablename__ = "participant_keys"
id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
participant_id = Column(String(255), unique=True, nullable=False, index=True)
# Key data (encrypted at rest)
encrypted_private_key = Column(LargeBinary, nullable=False)
public_key = Column(LargeBinary, nullable=False)
# Key metadata
algorithm = Column(String(50), nullable=False, default="X25519")
version = Column(Integer, nullable=False, default=1)
# Status
active = Column(Boolean, nullable=False, default=True)
revoked_at = Column(DateTime(timezone=True), nullable=True)
revoke_reason = Column(String(255), nullable=True)
# Audit fields
created_at = Column(DateTime(timezone=True), server_default=func.now())
updated_at = Column(DateTime(timezone=True), server_default=func.now(), onupdate=func.now())
rotated_at = Column(DateTime(timezone=True), nullable=True)
__table_args__ = (
{'schema': 'aitbc'}
)
class ConfidentialAccessLogDB(Base):
"""Database model for confidential data access logs"""
__tablename__ = "confidential_access_logs"
id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
# Access details
transaction_id = Column(String(255), nullable=True, index=True)
participant_id = Column(String(255), nullable=False, index=True)
purpose = Column(String(100), nullable=False)
# Request details
action = Column(String(100), nullable=False)
resource = Column(String(100), nullable=False)
outcome = Column(String(50), nullable=False)
# Additional data
details = Column(JSON, nullable=True)
data_accessed = Column(JSON, nullable=True)
# Metadata
ip_address = Column(String(45), nullable=True)
user_agent = Column(Text, nullable=True)
authorization_id = Column(String(255), nullable=True)
# Integrity
signature = Column(String(128), nullable=True) # SHA-512 hash
# Timestamps
timestamp = Column(DateTime(timezone=True), server_default=func.now(), nullable=False, index=True)
__table_args__ = (
{'schema': 'aitbc'}
)
class KeyRotationLogDB(Base):
"""Database model for key rotation logs"""
__tablename__ = "key_rotation_logs"
id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
participant_id = Column(String(255), nullable=False, index=True)
old_version = Column(Integer, nullable=False)
new_version = Column(Integer, nullable=False)
# Rotation details
rotated_at = Column(DateTime(timezone=True), server_default=func.now(), nullable=False)
reason = Column(String(255), nullable=False)
# Who performed the rotation
rotated_by = Column(String(255), nullable=True)
__table_args__ = (
{'schema': 'aitbc'}
)
class AuditAuthorizationDB(Base):
"""Database model for audit authorizations"""
__tablename__ = "audit_authorizations"
id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
# Authorization details
issuer = Column(String(255), nullable=False)
subject = Column(String(255), nullable=False)
purpose = Column(String(100), nullable=False)
# Validity period
created_at = Column(DateTime(timezone=True), server_default=func.now(), nullable=False)
expires_at = Column(DateTime(timezone=True), nullable=False, index=True)
# Authorization data
signature = Column(String(512), nullable=False)
metadata = Column(JSON, nullable=True)
# Status
active = Column(Boolean, nullable=False, default=True)
revoked_at = Column(DateTime(timezone=True), nullable=True)
used_at = Column(DateTime(timezone=True), nullable=True)
__table_args__ = (
{'schema': 'aitbc'}
)

View File

@@ -0,0 +1,340 @@
"""
Multi-tenant data models for AITBC coordinator
"""
from datetime import datetime, timedelta
from typing import Optional, Dict, Any, List
from enum import Enum
from sqlalchemy import Column, String, DateTime, Boolean, Integer, Text, JSON, ForeignKey, Index, Numeric
from sqlalchemy.dialects.postgresql import UUID
from sqlalchemy.sql import func
from sqlalchemy.orm import relationship
import uuid
from ..database import Base
class TenantStatus(Enum):
"""Tenant status enumeration"""
ACTIVE = "active"
INACTIVE = "inactive"
SUSPENDED = "suspended"
PENDING = "pending"
TRIAL = "trial"
class Tenant(Base):
"""Tenant model for multi-tenancy"""
__tablename__ = "tenants"
# Primary key
id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
# Tenant information
name = Column(String(255), nullable=False, index=True)
slug = Column(String(100), unique=True, nullable=False, index=True)
domain = Column(String(255), unique=True, nullable=True, index=True)
# Status and configuration
status = Column(String(50), nullable=False, default=TenantStatus.PENDING.value)
plan = Column(String(50), nullable=False, default="trial")
# Contact information
contact_email = Column(String(255), nullable=False)
billing_email = Column(String(255), nullable=True)
# Configuration
settings = Column(JSON, nullable=False, default={})
features = Column(JSON, nullable=False, default={})
# Timestamps
created_at = Column(DateTime(timezone=True), server_default=func.now(), nullable=False)
updated_at = Column(DateTime(timezone=True), server_default=func.now(), onupdate=func.now(), nullable=False)
activated_at = Column(DateTime(timezone=True), nullable=True)
deactivated_at = Column(DateTime(timezone=True), nullable=True)
# Relationships
users = relationship("TenantUser", back_populates="tenant", cascade="all, delete-orphan")
quotas = relationship("TenantQuota", back_populates="tenant", cascade="all, delete-orphan")
usage_records = relationship("UsageRecord", back_populates="tenant", cascade="all, delete-orphan")
# Indexes
__table_args__ = (
Index('idx_tenant_status', 'status'),
Index('idx_tenant_plan', 'plan'),
{'schema': 'aitbc'}
)
class TenantUser(Base):
"""Association between users and tenants"""
__tablename__ = "tenant_users"
# Primary key
id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
# Foreign keys
tenant_id = Column(UUID(as_uuid=True), ForeignKey('aitbc.tenants.id'), nullable=False)
user_id = Column(String(255), nullable=False) # User ID from auth system
# Role and permissions
role = Column(String(50), nullable=False, default="member")
permissions = Column(JSON, nullable=False, default=[])
# Status
is_active = Column(Boolean, nullable=False, default=True)
invited_at = Column(DateTime(timezone=True), nullable=True)
joined_at = Column(DateTime(timezone=True), nullable=True)
# Metadata
metadata = Column(JSON, nullable=True)
# Relationships
tenant = relationship("Tenant", back_populates="users")
# Indexes
__table_args__ = (
Index('idx_tenant_user', 'tenant_id', 'user_id'),
Index('idx_user_tenants', 'user_id'),
{'schema': 'aitbc'}
)
class TenantQuota(Base):
"""Resource quotas for tenants"""
__tablename__ = "tenant_quotas"
# Primary key
id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
# Foreign key
tenant_id = Column(UUID(as_uuid=True), ForeignKey('aitbc.tenants.id'), nullable=False)
# Quota definitions
resource_type = Column(String(100), nullable=False) # gpu_hours, storage_gb, api_calls
limit_value = Column(Numeric(20, 4), nullable=False) # Maximum allowed
used_value = Column(Numeric(20, 4), nullable=False, default=0) # Current usage
# Time period
period_type = Column(String(50), nullable=False, default="monthly") # daily, weekly, monthly
period_start = Column(DateTime(timezone=True), nullable=False)
period_end = Column(DateTime(timezone=True), nullable=False)
# Status
is_active = Column(Boolean, nullable=False, default=True)
# Relationships
tenant = relationship("Tenant", back_populates="quotas")
# Indexes
__table_args__ = (
Index('idx_tenant_quota', 'tenant_id', 'resource_type', 'period_start'),
Index('idx_quota_period', 'period_start', 'period_end'),
{'schema': 'aitbc'}
)
class UsageRecord(Base):
"""Usage tracking records for billing"""
__tablename__ = "usage_records"
# Primary key
id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
# Foreign key
tenant_id = Column(UUID(as_uuid=True), ForeignKey('aitbc.tenants.id'), nullable=False)
# Usage details
resource_type = Column(String(100), nullable=False) # gpu_hours, storage_gb, api_calls
resource_id = Column(String(255), nullable=True) # Specific resource ID
quantity = Column(Numeric(20, 4), nullable=False)
unit = Column(String(50), nullable=False) # hours, gb, calls
# Cost information
unit_price = Column(Numeric(10, 4), nullable=False)
total_cost = Column(Numeric(20, 4), nullable=False)
currency = Column(String(10), nullable=False, default="USD")
# Time tracking
usage_start = Column(DateTime(timezone=True), nullable=False)
usage_end = Column(DateTime(timezone=True), nullable=False)
recorded_at = Column(DateTime(timezone=True), server_default=func.now(), nullable=False)
# Metadata
job_id = Column(String(255), nullable=True) # Associated job if applicable
metadata = Column(JSON, nullable=True)
# Relationships
tenant = relationship("Tenant", back_populates="usage_records")
# Indexes
__table_args__ = (
Index('idx_tenant_usage', 'tenant_id', 'usage_start'),
Index('idx_usage_type', 'resource_type', 'usage_start'),
Index('idx_usage_job', 'job_id'),
{'schema': 'aitbc'}
)
class Invoice(Base):
"""Billing invoices for tenants"""
__tablename__ = "invoices"
# Primary key
id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
# Foreign key
tenant_id = Column(UUID(as_uuid=True), ForeignKey('aitbc.tenants.id'), nullable=False)
# Invoice details
invoice_number = Column(String(100), unique=True, nullable=False, index=True)
status = Column(String(50), nullable=False, default="draft")
# Period
period_start = Column(DateTime(timezone=True), nullable=False)
period_end = Column(DateTime(timezone=True), nullable=False)
due_date = Column(DateTime(timezone=True), nullable=False)
# Amounts
subtotal = Column(Numeric(20, 4), nullable=False)
tax_amount = Column(Numeric(20, 4), nullable=False, default=0)
total_amount = Column(Numeric(20, 4), nullable=False)
currency = Column(String(10), nullable=False, default="USD")
# Breakdown
line_items = Column(JSON, nullable=False, default=[])
# Payment
paid_at = Column(DateTime(timezone=True), nullable=True)
payment_method = Column(String(100), nullable=True)
# Timestamps
created_at = Column(DateTime(timezone=True), server_default=func.now(), nullable=False)
updated_at = Column(DateTime(timezone=True), server_default=func.now(), onupdate=func.now(), nullable=False)
# Metadata
metadata = Column(JSON, nullable=True)
# Indexes
__table_args__ = (
Index('idx_invoice_tenant', 'tenant_id', 'period_start'),
Index('idx_invoice_status', 'status'),
Index('idx_invoice_due', 'due_date'),
{'schema': 'aitbc'}
)
class TenantApiKey(Base):
"""API keys for tenant authentication"""
__tablename__ = "tenant_api_keys"
# Primary key
id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
# Foreign key
tenant_id = Column(UUID(as_uuid=True), ForeignKey('aitbc.tenants.id'), nullable=False)
# Key details
key_id = Column(String(100), unique=True, nullable=False, index=True)
key_hash = Column(String(255), unique=True, nullable=False, index=True)
key_prefix = Column(String(20), nullable=False) # First few characters for identification
# Permissions and restrictions
permissions = Column(JSON, nullable=False, default=[])
rate_limit = Column(Integer, nullable=True) # Requests per minute
allowed_ips = Column(JSON, nullable=True) # IP whitelist
# Status
is_active = Column(Boolean, nullable=False, default=True)
expires_at = Column(DateTime(timezone=True), nullable=True)
last_used_at = Column(DateTime(timezone=True), nullable=True)
# Metadata
name = Column(String(255), nullable=False)
description = Column(Text, nullable=True)
created_by = Column(String(255), nullable=False)
# Timestamps
created_at = Column(DateTime(timezone=True), server_default=func.now(), nullable=False)
revoked_at = Column(DateTime(timezone=True), nullable=True)
# Indexes
__table_args__ = (
Index('idx_api_key_tenant', 'tenant_id', 'is_active'),
Index('idx_api_key_hash', 'key_hash'),
{'schema': 'aitbc'}
)
class TenantAuditLog(Base):
"""Audit logs for tenant activities"""
__tablename__ = "tenant_audit_logs"
# Primary key
id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
# Foreign key
tenant_id = Column(UUID(as_uuid=True), ForeignKey('aitbc.tenants.id'), nullable=False)
# Event details
event_type = Column(String(100), nullable=False, index=True)
event_category = Column(String(50), nullable=False, index=True)
actor_id = Column(String(255), nullable=False) # User who performed action
actor_type = Column(String(50), nullable=False) # user, api_key, system
# Target information
resource_type = Column(String(100), nullable=False)
resource_id = Column(String(255), nullable=True)
# Event data
old_values = Column(JSON, nullable=True)
new_values = Column(JSON, nullable=True)
metadata = Column(JSON, nullable=True)
# Request context
ip_address = Column(String(45), nullable=True)
user_agent = Column(Text, nullable=True)
api_key_id = Column(String(100), nullable=True)
# Timestamp
created_at = Column(DateTime(timezone=True), server_default=func.now(), nullable=False, index=True)
# Indexes
__table_args__ = (
Index('idx_audit_tenant', 'tenant_id', 'created_at'),
Index('idx_audit_actor', 'actor_id', 'event_type'),
Index('idx_audit_resource', 'resource_type', 'resource_id'),
{'schema': 'aitbc'}
)
class TenantMetric(Base):
"""Tenant-specific metrics and monitoring data"""
__tablename__ = "tenant_metrics"
# Primary key
id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
# Foreign key
tenant_id = Column(UUID(as_uuid=True), ForeignKey('aitbc.tenants.id'), nullable=False)
# Metric details
metric_name = Column(String(100), nullable=False, index=True)
metric_type = Column(String(50), nullable=False) # counter, gauge, histogram
# Value
value = Column(Numeric(20, 4), nullable=False)
unit = Column(String(50), nullable=True)
# Dimensions
dimensions = Column(JSON, nullable=False, default={})
# Time
timestamp = Column(DateTime(timezone=True), nullable=False, index=True)
# Indexes
__table_args__ = (
Index('idx_metric_tenant', 'tenant_id', 'metric_name', 'timestamp'),
Index('idx_metric_time', 'timestamp'),
{'schema': 'aitbc'}
)

View File

@@ -0,0 +1,547 @@
"""
Dynamic service registry models for AITBC
"""
from typing import Dict, List, Any, Optional, Union
from datetime import datetime
from enum import Enum
from pydantic import BaseModel, Field, validator
class ServiceCategory(str, Enum):
"""Service categories"""
AI_ML = "ai_ml"
MEDIA_PROCESSING = "media_processing"
SCIENTIFIC_COMPUTING = "scientific_computing"
DATA_ANALYTICS = "data_analytics"
GAMING_ENTERTAINMENT = "gaming_entertainment"
DEVELOPMENT_TOOLS = "development_tools"
class ParameterType(str, Enum):
"""Parameter types"""
STRING = "string"
INTEGER = "integer"
FLOAT = "float"
BOOLEAN = "boolean"
ARRAY = "array"
OBJECT = "object"
FILE = "file"
ENUM = "enum"
class PricingModel(str, Enum):
"""Pricing models"""
PER_UNIT = "per_unit" # per image, per minute, per token
PER_HOUR = "per_hour"
PER_GB = "per_gb"
PER_FRAME = "per_frame"
FIXED = "fixed"
CUSTOM = "custom"
class ParameterDefinition(BaseModel):
"""Parameter definition schema"""
name: str = Field(..., description="Parameter name")
type: ParameterType = Field(..., description="Parameter type")
required: bool = Field(True, description="Whether parameter is required")
description: str = Field(..., description="Parameter description")
default: Optional[Any] = Field(None, description="Default value")
min_value: Optional[Union[int, float]] = Field(None, description="Minimum value")
max_value: Optional[Union[int, float]] = Field(None, description="Maximum value")
options: Optional[List[str]] = Field(None, description="Available options for enum type")
validation: Optional[Dict[str, Any]] = Field(None, description="Custom validation rules")
class HardwareRequirement(BaseModel):
"""Hardware requirement definition"""
component: str = Field(..., description="Component type (gpu, cpu, ram, etc.)")
min_value: Union[str, int, float] = Field(..., description="Minimum requirement")
recommended: Optional[Union[str, int, float]] = Field(None, description="Recommended value")
unit: Optional[str] = Field(None, description="Unit (GB, MB, cores, etc.)")
class PricingTier(BaseModel):
"""Pricing tier definition"""
name: str = Field(..., description="Tier name")
model: PricingModel = Field(..., description="Pricing model")
unit_price: float = Field(..., ge=0, description="Price per unit")
min_charge: Optional[float] = Field(None, ge=0, description="Minimum charge")
currency: str = Field("AITBC", description="Currency code")
description: Optional[str] = Field(None, description="Tier description")
class ServiceDefinition(BaseModel):
"""Complete service definition"""
id: str = Field(..., description="Unique service identifier")
name: str = Field(..., description="Human-readable service name")
category: ServiceCategory = Field(..., description="Service category")
description: str = Field(..., description="Service description")
version: str = Field("1.0.0", description="Service version")
icon: Optional[str] = Field(None, description="Icon emoji or URL")
# Input/Output
input_parameters: List[ParameterDefinition] = Field(..., description="Input parameters")
output_schema: Dict[str, Any] = Field(..., description="Output schema")
# Hardware requirements
requirements: List[HardwareRequirement] = Field(..., description="Hardware requirements")
# Pricing
pricing: List[PricingTier] = Field(..., description="Available pricing tiers")
# Capabilities
capabilities: List[str] = Field(default_factory=list, description="Service capabilities")
tags: List[str] = Field(default_factory=list, description="Search tags")
# Limits
max_concurrent: int = Field(1, ge=1, le=100, description="Max concurrent jobs")
timeout_seconds: int = Field(3600, ge=60, description="Default timeout")
# Metadata
provider: Optional[str] = Field(None, description="Service provider")
documentation_url: Optional[str] = Field(None, description="Documentation URL")
example_usage: Optional[Dict[str, Any]] = Field(None, description="Example usage")
@validator('id')
def validate_id(cls, v):
if not v or not v.replace('_', '').replace('-', '').isalnum():
raise ValueError('Service ID must contain only alphanumeric characters, hyphens, and underscores')
return v.lower()
class ServiceRegistry(BaseModel):
"""Service registry containing all available services"""
version: str = Field("1.0.0", description="Registry version")
last_updated: datetime = Field(default_factory=datetime.utcnow, description="Last update time")
services: Dict[str, ServiceDefinition] = Field(..., description="Service definitions by ID")
def get_service(self, service_id: str) -> Optional[ServiceDefinition]:
"""Get service by ID"""
return self.services.get(service_id)
def get_services_by_category(self, category: ServiceCategory) -> List[ServiceDefinition]:
"""Get all services in a category"""
return [s for s in self.services.values() if s.category == category]
def search_services(self, query: str) -> List[ServiceDefinition]:
"""Search services by name, description, or tags"""
query = query.lower()
results = []
for service in self.services.values():
if (query in service.name.lower() or
query in service.description.lower() or
any(query in tag.lower() for tag in service.tags)):
results.append(service)
return results
# Predefined service templates
AI_ML_SERVICES = {
"llm_inference": ServiceDefinition(
id="llm_inference",
name="LLM Inference",
category=ServiceCategory.AI_ML,
description="Run inference on large language models",
icon="🤖",
input_parameters=[
ParameterDefinition(
name="model",
type=ParameterType.ENUM,
required=True,
description="Model to use for inference",
options=["llama-7b", "llama-13b", "llama-70b", "mistral-7b", "mixtral-8x7b", "codellama-7b", "codellama-13b", "codellama-34b", "falcon-7b", "falcon-40b"]
),
ParameterDefinition(
name="prompt",
type=ParameterType.STRING,
required=True,
description="Input prompt text",
min_value=1,
max_value=10000
),
ParameterDefinition(
name="max_tokens",
type=ParameterType.INTEGER,
required=False,
description="Maximum tokens to generate",
default=256,
min_value=1,
max_value=4096
),
ParameterDefinition(
name="temperature",
type=ParameterType.FLOAT,
required=False,
description="Sampling temperature",
default=0.7,
min_value=0.0,
max_value=2.0
),
ParameterDefinition(
name="stream",
type=ParameterType.BOOLEAN,
required=False,
description="Stream response",
default=False
)
],
output_schema={
"type": "object",
"properties": {
"text": {"type": "string"},
"tokens_used": {"type": "integer"},
"finish_reason": {"type": "string"}
}
},
requirements=[
HardwareRequirement(component="gpu", min_value="nvidia", recommended="rtx-4090"),
HardwareRequirement(component="vram", min_value=8, recommended=24, unit="GB"),
HardwareRequirement(component="cuda", min_value="11.8")
],
pricing=[
PricingTier(name="basic", model=PricingModel.PER_UNIT, unit_price=0.001, min_charge=0.01),
PricingTier(name="premium", model=PricingModel.PER_UNIT, unit_price=0.002, min_charge=0.01)
],
capabilities=["generate", "stream", "chat", "completion"],
tags=["llm", "text", "generation", "ai", "nlp"],
max_concurrent=2,
timeout_seconds=300
),
"image_generation": ServiceDefinition(
id="image_generation",
name="Image Generation",
category=ServiceCategory.AI_ML,
description="Generate images from text prompts using diffusion models",
icon="🎨",
input_parameters=[
ParameterDefinition(
name="model",
type=ParameterType.ENUM,
required=True,
description="Image generation model",
options=["stable-diffusion-1.5", "stable-diffusion-2.1", "stable-diffusion-xl", "sdxl-turbo", "dall-e-2", "dall-e-3", "midjourney-v5"]
),
ParameterDefinition(
name="prompt",
type=ParameterType.STRING,
required=True,
description="Text prompt for image generation",
max_value=1000
),
ParameterDefinition(
name="negative_prompt",
type=ParameterType.STRING,
required=False,
description="Negative prompt",
max_value=1000
),
ParameterDefinition(
name="width",
type=ParameterType.INTEGER,
required=False,
description="Image width",
default=512,
options=[256, 512, 768, 1024, 1536, 2048]
),
ParameterDefinition(
name="height",
type=ParameterType.INTEGER,
required=False,
description="Image height",
default=512,
options=[256, 512, 768, 1024, 1536, 2048]
),
ParameterDefinition(
name="num_images",
type=ParameterType.INTEGER,
required=False,
description="Number of images to generate",
default=1,
min_value=1,
max_value=4
),
ParameterDefinition(
name="steps",
type=ParameterType.INTEGER,
required=False,
description="Number of inference steps",
default=20,
min_value=1,
max_value=100
)
],
output_schema={
"type": "object",
"properties": {
"images": {"type": "array", "items": {"type": "string"}},
"parameters": {"type": "object"},
"generation_time": {"type": "number"}
}
},
requirements=[
HardwareRequirement(component="gpu", min_value="nvidia", recommended="rtx-4090"),
HardwareRequirement(component="vram", min_value=4, recommended=16, unit="GB"),
HardwareRequirement(component="cuda", min_value="11.8")
],
pricing=[
PricingTier(name="standard", model=PricingModel.PER_UNIT, unit_price=0.01, min_charge=0.01),
PricingTier(name="hd", model=PricingModel.PER_UNIT, unit_price=0.02, min_charge=0.02),
PricingTier(name="4k", model=PricingModel.PER_UNIT, unit_price=0.05, min_charge=0.05)
],
capabilities=["txt2img", "img2img", "inpainting", "outpainting"],
tags=["image", "generation", "diffusion", "ai", "art"],
max_concurrent=1,
timeout_seconds=600
),
"video_generation": ServiceDefinition(
id="video_generation",
name="Video Generation",
category=ServiceCategory.AI_ML,
description="Generate videos from text or images",
icon="🎬",
input_parameters=[
ParameterDefinition(
name="model",
type=ParameterType.ENUM,
required=True,
description="Video generation model",
options=["sora", "runway-gen2", "pika-labs", "stable-video-diffusion", "make-a-video"]
),
ParameterDefinition(
name="prompt",
type=ParameterType.STRING,
required=True,
description="Text prompt for video generation",
max_value=500
),
ParameterDefinition(
name="duration_seconds",
type=ParameterType.INTEGER,
required=False,
description="Video duration in seconds",
default=4,
min_value=1,
max_value=30
),
ParameterDefinition(
name="fps",
type=ParameterType.INTEGER,
required=False,
description="Frames per second",
default=24,
options=[12, 24, 30]
),
ParameterDefinition(
name="resolution",
type=ParameterType.ENUM,
required=False,
description="Video resolution",
default="720p",
options=["480p", "720p", "1080p", "4k"]
)
],
output_schema={
"type": "object",
"properties": {
"video_url": {"type": "string"},
"thumbnail_url": {"type": "string"},
"duration": {"type": "number"},
"resolution": {"type": "string"}
}
},
requirements=[
HardwareRequirement(component="gpu", min_value="nvidia", recommended="a100"),
HardwareRequirement(component="vram", min_value=16, recommended=40, unit="GB"),
HardwareRequirement(component="cuda", min_value="11.8")
],
pricing=[
PricingTier(name="short", model=PricingModel.PER_UNIT, unit_price=0.1, min_charge=0.1),
PricingTier(name="medium", model=PricingModel.PER_UNIT, unit_price=0.25, min_charge=0.25),
PricingTier(name="long", model=PricingModel.PER_UNIT, unit_price=0.5, min_charge=0.5)
],
capabilities=["txt2video", "img2video", "video-editing"],
tags=["video", "generation", "ai", "animation"],
max_concurrent=1,
timeout_seconds=1800
),
"speech_recognition": ServiceDefinition(
id="speech_recognition",
name="Speech Recognition",
category=ServiceCategory.AI_ML,
description="Transcribe audio to text using speech recognition models",
icon="🎙️",
input_parameters=[
ParameterDefinition(
name="model",
type=ParameterType.ENUM,
required=True,
description="Speech recognition model",
options=["whisper-tiny", "whisper-base", "whisper-small", "whisper-medium", "whisper-large", "whisper-large-v2", "whisper-large-v3"]
),
ParameterDefinition(
name="audio_file",
type=ParameterType.FILE,
required=True,
description="Audio file to transcribe"
),
ParameterDefinition(
name="language",
type=ParameterType.ENUM,
required=False,
description="Audio language",
default="auto",
options=["auto", "en", "es", "fr", "de", "it", "pt", "ru", "ja", "ko", "zh", "ar", "hi"]
),
ParameterDefinition(
name="task",
type=ParameterType.ENUM,
required=False,
description="Task type",
default="transcribe",
options=["transcribe", "translate"]
)
],
output_schema={
"type": "object",
"properties": {
"text": {"type": "string"},
"language": {"type": "string"},
"segments": {"type": "array"}
}
},
requirements=[
HardwareRequirement(component="gpu", min_value="nvidia", recommended="rtx-3060"),
HardwareRequirement(component="vram", min_value=1, recommended=4, unit="GB")
],
pricing=[
PricingTier(name="per_minute", model=PricingModel.PER_UNIT, unit_price=0.001, min_charge=0.01)
],
capabilities=["transcribe", "translate", "timestamp", "speaker-diarization"],
tags=["speech", "audio", "transcription", "whisper"],
max_concurrent=2,
timeout_seconds=600
),
"computer_vision": ServiceDefinition(
id="computer_vision",
name="Computer Vision",
category=ServiceCategory.AI_ML,
description="Analyze images with computer vision models",
icon="👁️",
input_parameters=[
ParameterDefinition(
name="task",
type=ParameterType.ENUM,
required=True,
description="Vision task",
options=["object-detection", "classification", "face-recognition", "segmentation", "ocr"]
),
ParameterDefinition(
name="model",
type=ParameterType.ENUM,
required=True,
description="Vision model",
options=["yolo-v8", "resnet-50", "efficientnet", "vit", "face-net", "tesseract"]
),
ParameterDefinition(
name="image",
type=ParameterType.FILE,
required=True,
description="Input image"
),
ParameterDefinition(
name="confidence_threshold",
type=ParameterType.FLOAT,
required=False,
description="Confidence threshold",
default=0.5,
min_value=0.0,
max_value=1.0
)
],
output_schema={
"type": "object",
"properties": {
"detections": {"type": "array"},
"labels": {"type": "array"},
"confidence_scores": {"type": "array"}
}
},
requirements=[
HardwareRequirement(component="gpu", min_value="nvidia", recommended="rtx-3060"),
HardwareRequirement(component="vram", min_value=2, recommended=8, unit="GB")
],
pricing=[
PricingTier(name="per_image", model=PricingModel.PER_UNIT, unit_price=0.005, min_charge=0.01)
],
capabilities=["detection", "classification", "recognition", "segmentation", "ocr"],
tags=["vision", "image", "analysis", "ai", "detection"],
max_concurrent=4,
timeout_seconds=120
),
"recommendation_system": ServiceDefinition(
id="recommendation_system",
name="Recommendation System",
category=ServiceCategory.AI_ML,
description="Generate personalized recommendations",
icon="🎯",
input_parameters=[
ParameterDefinition(
name="model_type",
type=ParameterType.ENUM,
required=True,
description="Recommendation model type",
options=["collaborative", "content-based", "hybrid", "deep-learning"]
),
ParameterDefinition(
name="user_id",
type=ParameterType.STRING,
required=True,
description="User identifier"
),
ParameterDefinition(
name="item_data",
type=ParameterType.ARRAY,
required=True,
description="Item catalog data"
),
ParameterDefinition(
name="num_recommendations",
type=ParameterType.INTEGER,
required=False,
description="Number of recommendations",
default=10,
min_value=1,
max_value=100
)
],
output_schema={
"type": "object",
"properties": {
"recommendations": {"type": "array"},
"scores": {"type": "array"},
"explanation": {"type": "string"}
}
},
requirements=[
HardwareRequirement(component="gpu", min_value="nvidia", recommended="rtx-3080"),
HardwareRequirement(component="vram", min_value=4, recommended=12, unit="GB"),
HardwareRequirement(component="ram", min_value=16, recommended=32, unit="GB")
],
pricing=[
PricingTier(name="per_request", model=PricingModel.PER_UNIT, unit_price=0.01, min_charge=0.01),
PricingTier(name="bulk", model=PricingModel.PER_UNIT, unit_price=0.005, min_charge=0.1)
],
capabilities=["personalization", "real-time", "batch", "ab-testing"],
tags=["recommendation", "personalization", "ml", "ecommerce"],
max_concurrent=10,
timeout_seconds=60
)
}

View File

@@ -0,0 +1,286 @@
"""
Data analytics service definitions
"""
from typing import Dict, List, Any, Union
from .registry import (
ServiceDefinition,
ServiceCategory,
ParameterDefinition,
ParameterType,
HardwareRequirement,
PricingTier,
PricingModel
)
DATA_ANALYTICS_SERVICES = {
"big_data_processing": ServiceDefinition(
id="big_data_processing",
name="Big Data Processing",
category=ServiceCategory.DATA_ANALYTICS,
description="GPU-accelerated ETL and data processing with RAPIDS",
icon="📊",
input_parameters=[
ParameterDefinition(
name="operation",
type=ParameterType.ENUM,
required=True,
description="Processing operation",
options=["etl", "aggregate", "join", "filter", "transform", "clean"]
),
ParameterDefinition(
name="data_source",
type=ParameterType.STRING,
required=True,
description="Data source URL or connection string"
),
ParameterDefinition(
name="query",
type=ParameterType.STRING,
required=True,
description="SQL or data processing query"
),
ParameterDefinition(
name="output_format",
type=ParameterType.ENUM,
required=False,
description="Output format",
default="parquet",
options=["parquet", "csv", "json", "delta", "orc"]
),
ParameterDefinition(
name="partition_by",
type=ParameterType.ARRAY,
required=False,
description="Partition columns",
items={"type": "string"}
)
],
output_schema={
"type": "object",
"properties": {
"output_url": {"type": "string"},
"row_count": {"type": "integer"},
"columns": {"type": "array"},
"processing_stats": {"type": "object"}
}
},
requirements=[
HardwareRequirement(component="gpu", min_value="nvidia", recommended="rtx-3080"),
HardwareRequirement(component="vram", min_value=8, recommended=16, unit="GB"),
HardwareRequirement(component="ram", min_value=32, recommended=128, unit="GB"),
HardwareRequirement(component="storage", min_value=100, recommended=1000, unit="GB")
],
pricing=[
PricingTier(name="per_gb", model=PricingModel.PER_GB, unit_price=0.01, min_charge=0.1),
PricingTier(name="per_hour", model=PricingModel.PER_HOUR, unit_price=1, min_charge=1),
PricingTier(name="enterprise", model=PricingModel.PER_UNIT, unit_price=0.005, min_charge=0.5)
],
capabilities=["gpu-sql", "etl", "streaming", "distributed"],
tags=["bigdata", "etl", "rapids", "spark", "sql"],
max_concurrent=5,
timeout_seconds=3600
),
"real_time_analytics": ServiceDefinition(
id="real_time_analytics",
name="Real-time Analytics",
category=ServiceCategory.DATA_ANALYTICS,
description="Stream processing and real-time analytics with GPU acceleration",
icon="",
input_parameters=[
ParameterDefinition(
name="stream_source",
type=ParameterType.STRING,
required=True,
description="Stream source (Kafka, Kinesis, etc.)"
),
ParameterDefinition(
name="query",
type=ParameterType.STRING,
required=True,
description="Stream processing query"
),
ParameterDefinition(
name="window_size",
type=ParameterType.STRING,
required=False,
description="Window size (e.g., 1m, 5m, 1h)",
default="5m"
),
ParameterDefinition(
name="aggregations",
type=ParameterType.ARRAY,
required=True,
description="Aggregation functions",
items={"type": "string"}
),
ParameterDefinition(
name="output_sink",
type=ParameterType.STRING,
required=True,
description="Output sink for results"
)
],
output_schema={
"type": "object",
"properties": {
"stream_id": {"type": "string"},
"throughput": {"type": "number"},
"latency_ms": {"type": "integer"},
"metrics": {"type": "object"}
}
},
requirements=[
HardwareRequirement(component="gpu", min_value="nvidia", recommended="a100"),
HardwareRequirement(component="vram", min_value=16, recommended=40, unit="GB"),
HardwareRequirement(component="network", min_value="10Gbps", recommended="100Gbps"),
HardwareRequirement(component="ram", min_value=64, recommended=256, unit="GB")
],
pricing=[
PricingTier(name="per_hour", model=PricingModel.PER_HOUR, unit_price=2, min_charge=2),
PricingTier(name="per_million_events", model=PricingModel.PER_UNIT, unit_price=0.1, min_charge=1),
PricingTier(name="high_throughput", model=PricingModel.PER_HOUR, unit_price=5, min_charge=5)
],
capabilities=["streaming", "windowing", "aggregation", "cep"],
tags=["streaming", "real-time", "analytics", "kafka", "flink"],
max_concurrent=10,
timeout_seconds=86400 # 24 hours
),
"graph_analytics": ServiceDefinition(
id="graph_analytics",
name="Graph Analytics",
category=ServiceCategory.DATA_ANALYTICS,
description="Network analysis and graph algorithms on GPU",
icon="🕸️",
input_parameters=[
ParameterDefinition(
name="algorithm",
type=ParameterType.ENUM,
required=True,
description="Graph algorithm",
options=["pagerank", "community-detection", "shortest-path", "triangles", "clustering", "centrality"]
),
ParameterDefinition(
name="graph_data",
type=ParameterType.FILE,
required=True,
description="Graph data file (edges list, adjacency matrix, etc.)"
),
ParameterDefinition(
name="graph_format",
type=ParameterType.ENUM,
required=False,
description="Graph format",
default="edges",
options=["edges", "adjacency", "csr", "metis"]
),
ParameterDefinition(
name="parameters",
type=ParameterType.OBJECT,
required=False,
description="Algorithm-specific parameters"
),
ParameterDefinition(
name="num_vertices",
type=ParameterType.INTEGER,
required=False,
description="Number of vertices",
min_value=1
)
],
output_schema={
"type": "object",
"properties": {
"results": {"type": "array"},
"statistics": {"type": "object"},
"graph_metrics": {"type": "object"}
}
},
requirements=[
HardwareRequirement(component="gpu", min_value="nvidia", recommended="rtx-3090"),
HardwareRequirement(component="vram", min_value=8, recommended=24, unit="GB"),
HardwareRequirement(component="ram", min_value=16, recommended=64, unit="GB")
],
pricing=[
PricingTier(name="per_million_edges", model=PricingModel.PER_UNIT, unit_price=0.01, min_charge=0.1),
PricingTier(name="per_hour", model=PricingModel.PER_HOUR, unit_price=1, min_charge=1),
PricingTier(name="large_graph", model=PricingModel.PER_UNIT, unit_price=0.005, min_charge=0.5)
],
capabilities=["gpu-graph", "algorithms", "network-analysis", "fraud-detection"],
tags=["graph", "network", "analytics", "pagerank", "fraud"],
max_concurrent=5,
timeout_seconds=3600
),
"time_series_analysis": ServiceDefinition(
id="time_series_analysis",
name="Time Series Analysis",
category=ServiceCategory.DATA_ANALYTICS,
description="Analyze time series data with GPU-accelerated algorithms",
icon="📈",
input_parameters=[
ParameterDefinition(
name="analysis_type",
type=ParameterType.ENUM,
required=True,
description="Analysis type",
options=["forecasting", "anomaly-detection", "decomposition", "seasonality", "trend"]
),
ParameterDefinition(
name="time_series_data",
type=ParameterType.FILE,
required=True,
description="Time series data file"
),
ParameterDefinition(
name="model",
type=ParameterType.ENUM,
required=True,
description="Analysis model",
options=["arima", "prophet", "lstm", "transformer", "holt-winters", "var"]
),
ParameterDefinition(
name="forecast_horizon",
type=ParameterType.INTEGER,
required=False,
description="Forecast horizon",
default=30,
min_value=1,
max_value=365
),
ParameterDefinition(
name="frequency",
type=ParameterType.STRING,
required=False,
description="Data frequency (D, H, M, S)",
default="D"
)
],
output_schema={
"type": "object",
"properties": {
"forecast": {"type": "array"},
"confidence_intervals": {"type": "array"},
"model_metrics": {"type": "object"},
"anomalies": {"type": "array"}
}
},
requirements=[
HardwareRequirement(component="gpu", min_value="nvidia", recommended="rtx-3080"),
HardwareRequirement(component="vram", min_value=8, recommended=16, unit="GB"),
HardwareRequirement(component="ram", min_value=16, recommended=32, unit="GB")
],
pricing=[
PricingTier(name="per_1k_points", model=PricingModel.PER_UNIT, unit_price=0.001, min_charge=0.01),
PricingTier(name="per_forecast", model=PricingModel.PER_UNIT, unit_price=0.01, min_charge=0.1),
PricingTier(name="enterprise", model=PricingModel.PER_HOUR, unit_price=1, min_charge=1)
],
capabilities=["forecasting", "anomaly-detection", "decomposition", "seasonality"],
tags=["time-series", "forecasting", "anomaly", "arima", "lstm"],
max_concurrent=10,
timeout_seconds=1800
)
}

View File

@@ -0,0 +1,408 @@
"""
Development tools service definitions
"""
from typing import Dict, List, Any, Union
from .registry import (
ServiceDefinition,
ServiceCategory,
ParameterDefinition,
ParameterType,
HardwareRequirement,
PricingTier,
PricingModel
)
DEVTOOLS_SERVICES = {
"gpu_compilation": ServiceDefinition(
id="gpu_compilation",
name="GPU-Accelerated Compilation",
category=ServiceCategory.DEVELOPMENT_TOOLS,
description="Compile code with GPU acceleration (CUDA, HIP, OpenCL)",
icon="⚙️",
input_parameters=[
ParameterDefinition(
name="language",
type=ParameterType.ENUM,
required=True,
description="Programming language",
options=["cpp", "cuda", "hip", "opencl", "metal", "sycl"]
),
ParameterDefinition(
name="source_files",
type=ParameterType.ARRAY,
required=True,
description="Source code files",
items={"type": "string"}
),
ParameterDefinition(
name="build_type",
type=ParameterType.ENUM,
required=False,
description="Build type",
default="release",
options=["debug", "release", "relwithdebinfo"]
),
ParameterDefinition(
name="target_arch",
type=ParameterType.ENUM,
required=False,
description="Target architecture",
default="sm_70",
options=["sm_60", "sm_70", "sm_80", "sm_86", "sm_89", "sm_90"]
),
ParameterDefinition(
name="optimization_level",
type=ParameterType.ENUM,
required=False,
description="Optimization level",
default="O2",
options=["O0", "O1", "O2", "O3", "Os"]
),
ParameterDefinition(
name="parallel_jobs",
type=ParameterType.INTEGER,
required=False,
description="Number of parallel compilation jobs",
default=4,
min_value=1,
max_value=64
)
],
output_schema={
"type": "object",
"properties": {
"binary_url": {"type": "string"},
"build_log": {"type": "string"},
"compilation_time": {"type": "number"},
"binary_size": {"type": "integer"}
}
},
requirements=[
HardwareRequirement(component="gpu", min_value="nvidia", recommended="rtx-3080"),
HardwareRequirement(component="vram", min_value=4, recommended=8, unit="GB"),
HardwareRequirement(component="cpu", min_value=8, recommended=16, unit="cores"),
HardwareRequirement(component="ram", min_value=16, recommended=32, unit="GB"),
HardwareRequirement(component="cuda", min_value="11.8")
],
pricing=[
PricingTier(name="per_minute", model=PricingModel.PER_UNIT, unit_price=0.01, min_charge=0.1),
PricingTier(name="per_file", model=PricingModel.PER_UNIT, unit_price=0.001, min_charge=0.01),
PricingTier(name="enterprise", model=PricingModel.PER_HOUR, unit_price=1, min_charge=1)
],
capabilities=["cuda", "hip", "parallel-compilation", "incremental"],
tags=["compilation", "cuda", "gpu", "cpp", "build"],
max_concurrent=5,
timeout_seconds=1800
),
"model_training": ServiceDefinition(
id="model_training",
name="ML Model Training",
category=ServiceCategory.DEVELOPMENT_TOOLS,
description="Fine-tune or train machine learning models on client data",
icon="🧠",
input_parameters=[
ParameterDefinition(
name="model_type",
type=ParameterType.ENUM,
required=True,
description="Model type",
options=["transformer", "cnn", "rnn", "gan", "diffusion", "custom"]
),
ParameterDefinition(
name="base_model",
type=ParameterType.STRING,
required=False,
description="Base model to fine-tune"
),
ParameterDefinition(
name="training_data",
type=ParameterType.FILE,
required=True,
description="Training dataset"
),
ParameterDefinition(
name="validation_data",
type=ParameterType.FILE,
required=False,
description="Validation dataset"
),
ParameterDefinition(
name="epochs",
type=ParameterType.INTEGER,
required=False,
description="Number of training epochs",
default=10,
min_value=1,
max_value=1000
),
ParameterDefinition(
name="batch_size",
type=ParameterType.INTEGER,
required=False,
description="Batch size",
default=32,
min_value=1,
max_value=1024
),
ParameterDefinition(
name="learning_rate",
type=ParameterType.FLOAT,
required=False,
description="Learning rate",
default=0.001,
min_value=0.00001,
max_value=1
),
ParameterDefinition(
name="hyperparameters",
type=ParameterType.OBJECT,
required=False,
description="Additional hyperparameters"
)
],
output_schema={
"type": "object",
"properties": {
"model_url": {"type": "string"},
"training_metrics": {"type": "object"},
"loss_curves": {"type": "array"},
"validation_scores": {"type": "object"}
}
},
requirements=[
HardwareRequirement(component="gpu", min_value="nvidia", recommended="a100"),
HardwareRequirement(component="vram", min_value=16, recommended=40, unit="GB"),
HardwareRequirement(component="cpu", min_value=16, recommended=32, unit="cores"),
HardwareRequirement(component="ram", min_value=32, recommended=128, unit="GB"),
HardwareRequirement(component="storage", min_value=100, recommended=1000, unit="GB")
],
pricing=[
PricingTier(name="per_epoch", model=PricingModel.PER_UNIT, unit_price=0.1, min_charge=1),
PricingTier(name="per_hour", model=PricingModel.PER_HOUR, unit_price=2, min_charge=2),
PricingTier(name="enterprise", model=PricingModel.PER_UNIT, unit_price=0.05, min_charge=0.5)
],
capabilities=["fine-tuning", "training", "hyperparameter-tuning", "distributed"],
tags=["ml", "training", "fine-tuning", "pytorch", "tensorflow"],
max_concurrent=2,
timeout_seconds=86400 # 24 hours
),
"data_processing": ServiceDefinition(
id="data_processing",
name="Large Dataset Processing",
category=ServiceCategory.DEVELOPMENT_TOOLS,
description="Preprocess and transform large datasets",
icon="📦",
input_parameters=[
ParameterDefinition(
name="operation",
type=ParameterType.ENUM,
required=True,
description="Processing operation",
options=["clean", "transform", "normalize", "augment", "split", "encode"]
),
ParameterDefinition(
name="input_data",
type=ParameterType.FILE,
required=True,
description="Input dataset"
),
ParameterDefinition(
name="output_format",
type=ParameterType.ENUM,
required=False,
description="Output format",
default="parquet",
options=["csv", "json", "parquet", "hdf5", "feather", "pickle"]
),
ParameterDefinition(
name="chunk_size",
type=ParameterType.INTEGER,
required=False,
description="Processing chunk size",
default=10000,
min_value=100,
max_value=1000000
),
ParameterDefinition(
name="parameters",
type=ParameterType.OBJECT,
required=False,
description="Operation-specific parameters"
)
],
output_schema={
"type": "object",
"properties": {
"output_url": {"type": "string"},
"processing_stats": {"type": "object"},
"data_quality": {"type": "object"},
"row_count": {"type": "integer"}
}
},
requirements=[
HardwareRequirement(component="gpu", min_value="any", recommended="nvidia"),
HardwareRequirement(component="vram", min_value=4, recommended=16, unit="GB"),
HardwareRequirement(component="ram", min_value=16, recommended=64, unit="GB"),
HardwareRequirement(component="storage", min_value=100, recommended=1000, unit="GB")
],
pricing=[
PricingTier(name="per_gb", model=PricingModel.PER_GB, unit_price=0.01, min_charge=0.1),
PricingTier(name="per_million_rows", model=PricingModel.PER_UNIT, unit_price=0.01, min_charge=0.1),
PricingTier(name="enterprise", model=PricingModel.PER_HOUR, unit_price=1, min_charge=1)
],
capabilities=["gpu-processing", "parallel", "streaming", "validation"],
tags=["data", "preprocessing", "etl", "cleaning", "transformation"],
max_concurrent=5,
timeout_seconds=3600
),
"simulation_testing": ServiceDefinition(
id="simulation_testing",
name="Hardware-in-the-Loop Testing",
category=ServiceCategory.DEVELOPMENT_TOOLS,
description="Run hardware simulations and testing workflows",
icon="🔬",
input_parameters=[
ParameterDefinition(
name="test_type",
type=ParameterType.ENUM,
required=True,
description="Test type",
options=["hardware", "firmware", "software", "integration", "performance"]
),
ParameterDefinition(
name="test_suite",
type=ParameterType.FILE,
required=True,
description="Test suite configuration"
),
ParameterDefinition(
name="hardware_config",
type=ParameterType.OBJECT,
required=True,
description="Hardware configuration"
),
ParameterDefinition(
name="duration",
type=ParameterType.INTEGER,
required=False,
description="Test duration in hours",
default=1,
min_value=0.1,
max_value=168 # 1 week
),
ParameterDefinition(
name="parallel_tests",
type=ParameterType.INTEGER,
required=False,
description="Number of parallel tests",
default=1,
min_value=1,
max_value=10
)
],
output_schema={
"type": "object",
"properties": {
"test_results": {"type": "array"},
"performance_metrics": {"type": "object"},
"failure_logs": {"type": "array"},
"coverage_report": {"type": "object"}
}
},
requirements=[
HardwareRequirement(component="gpu", min_value="any", recommended="nvidia"),
HardwareRequirement(component="cpu", min_value=16, recommended=32, unit="cores"),
HardwareRequirement(component="ram", min_value=32, recommended=128, unit="GB"),
HardwareRequirement(component="storage", min_value=100, recommended=500, unit="GB")
],
pricing=[
PricingTier(name="per_hour", model=PricingModel.PER_HOUR, unit_price=2, min_charge=1),
PricingTier(name="per_test", model=PricingModel.PER_UNIT, unit_price=0.1, min_charge=0.5),
PricingTier(name="continuous", model=PricingModel.PER_HOUR, unit_price=5, min_charge=5)
],
capabilities=["hardware-simulation", "automated-testing", "performance", "debugging"],
tags=["testing", "simulation", "hardware", "hil", "verification"],
max_concurrent=3,
timeout_seconds=604800 # 1 week
),
"code_generation": ServiceDefinition(
id="code_generation",
name="AI Code Generation",
category=ServiceCategory.DEVELOPMENT_TOOLS,
description="Generate code from natural language descriptions",
icon="💻",
input_parameters=[
ParameterDefinition(
name="language",
type=ParameterType.ENUM,
required=True,
description="Target programming language",
options=["python", "javascript", "cpp", "java", "go", "rust", "typescript", "sql"]
),
ParameterDefinition(
name="description",
type=ParameterType.STRING,
required=True,
description="Natural language description of code to generate",
max_value=2000
),
ParameterDefinition(
name="framework",
type=ParameterType.STRING,
required=False,
description="Target framework or library"
),
ParameterDefinition(
name="code_style",
type=ParameterType.ENUM,
required=False,
description="Code style preferences",
default="standard",
options=["standard", "functional", "oop", "minimalist"]
),
ParameterDefinition(
name="include_comments",
type=ParameterType.BOOLEAN,
required=False,
description="Include explanatory comments",
default=True
),
ParameterDefinition(
name="include_tests",
type=ParameterType.BOOLEAN,
required=False,
description="Generate unit tests",
default=False
)
],
output_schema={
"type": "object",
"properties": {
"generated_code": {"type": "string"},
"explanation": {"type": "string"},
"usage_example": {"type": "string"},
"test_code": {"type": "string"}
}
},
requirements=[
HardwareRequirement(component="gpu", min_value="nvidia", recommended="rtx-3080"),
HardwareRequirement(component="vram", min_value=8, recommended=16, unit="GB"),
HardwareRequirement(component="ram", min_value=8, recommended=16, unit="GB")
],
pricing=[
PricingTier(name="per_generation", model=PricingModel.PER_UNIT, unit_price=0.01, min_charge=0.01),
PricingTier(name="per_100_lines", model=PricingModel.PER_UNIT, unit_price=0.001, min_charge=0.01),
PricingTier(name="with_tests", model=PricingModel.PER_UNIT, unit_price=0.02, min_charge=0.02)
],
capabilities=["code-gen", "documentation", "test-gen", "refactoring"],
tags=["code", "generation", "ai", "copilot", "automation"],
max_concurrent=10,
timeout_seconds=120
)
}

View File

@@ -0,0 +1,307 @@
"""
Gaming & entertainment service definitions
"""
from typing import Dict, List, Any, Union
from .registry import (
ServiceDefinition,
ServiceCategory,
ParameterDefinition,
ParameterType,
HardwareRequirement,
PricingTier,
PricingModel
)
GAMING_SERVICES = {
"cloud_gaming": ServiceDefinition(
id="cloud_gaming",
name="Cloud Gaming Server",
category=ServiceCategory.GAMING_ENTERTAINMENT,
description="Host cloud gaming sessions with GPU streaming",
icon="🎮",
input_parameters=[
ParameterDefinition(
name="game",
type=ParameterType.STRING,
required=True,
description="Game title or executable"
),
ParameterDefinition(
name="resolution",
type=ParameterType.ENUM,
required=True,
description="Streaming resolution",
options=["720p", "1080p", "1440p", "4k"]
),
ParameterDefinition(
name="fps",
type=ParameterType.INTEGER,
required=False,
description="Target frame rate",
default=60,
options=[30, 60, 120, 144]
),
ParameterDefinition(
name="session_duration",
type=ParameterType.INTEGER,
required=True,
description="Session duration in minutes",
min_value=15,
max_value=480
),
ParameterDefinition(
name="codec",
type=ParameterType.ENUM,
required=False,
description="Streaming codec",
default="h264",
options=["h264", "h265", "av1", "vp9"]
),
ParameterDefinition(
name="region",
type=ParameterType.STRING,
required=False,
description="Preferred server region"
)
],
output_schema={
"type": "object",
"properties": {
"stream_url": {"type": "string"},
"session_id": {"type": "string"},
"latency_ms": {"type": "integer"},
"quality_metrics": {"type": "object"}
}
},
requirements=[
HardwareRequirement(component="gpu", min_value="nvidia", recommended="rtx-3080"),
HardwareRequirement(component="vram", min_value=8, recommended=16, unit="GB"),
HardwareRequirement(component="network", min_value="100Mbps", recommended="1Gbps"),
HardwareRequirement(component="cpu", min_value=8, recommended=16, unit="cores"),
HardwareRequirement(component="ram", min_value=16, recommended=32, unit="GB")
],
pricing=[
PricingTier(name="per_hour", model=PricingModel.PER_HOUR, unit_price=1, min_charge=0.5),
PricingTier(name="1080p", model=PricingModel.PER_HOUR, unit_price=1.5, min_charge=0.75),
PricingTier(name="4k", model=PricingModel.PER_HOUR, unit_price=3, min_charge=1.5)
],
capabilities=["low-latency", "game-streaming", "multiplayer", "saves"],
tags=["gaming", "cloud", "streaming", "nvidia", "gamepass"],
max_concurrent=1,
timeout_seconds=28800 # 8 hours
),
"game_asset_baking": ServiceDefinition(
id="game_asset_baking",
name="Game Asset Baking",
category=ServiceCategory.GAMING_ENTERTAINMENT,
description="Optimize and bake game assets (textures, meshes, materials)",
icon="🎨",
input_parameters=[
ParameterDefinition(
name="asset_type",
type=ParameterType.ENUM,
required=True,
description="Asset type",
options=["texture", "mesh", "material", "animation", "terrain"]
),
ParameterDefinition(
name="input_assets",
type=ParameterType.ARRAY,
required=True,
description="Input asset files",
items={"type": "string"}
),
ParameterDefinition(
name="target_platform",
type=ParameterType.ENUM,
required=True,
description="Target platform",
options=["pc", "mobile", "console", "web", "vr"]
),
ParameterDefinition(
name="optimization_level",
type=ParameterType.ENUM,
required=False,
description="Optimization level",
default="balanced",
options=["fast", "balanced", "maximum"]
),
ParameterDefinition(
name="texture_formats",
type=ParameterType.ARRAY,
required=False,
description="Output texture formats",
default=["dds", "astc"],
items={"type": "string"}
)
],
output_schema={
"type": "object",
"properties": {
"baked_assets": {"type": "array"},
"compression_stats": {"type": "object"},
"optimization_report": {"type": "object"}
}
},
requirements=[
HardwareRequirement(component="gpu", min_value="nvidia", recommended="rtx-3080"),
HardwareRequirement(component="vram", min_value=8, recommended=16, unit="GB"),
HardwareRequirement(component="ram", min_value=16, recommended=32, unit="GB"),
HardwareRequirement(component="storage", min_value=50, recommended=500, unit="GB")
],
pricing=[
PricingTier(name="per_asset", model=PricingModel.PER_UNIT, unit_price=0.01, min_charge=0.1),
PricingTier(name="per_texture", model=PricingModel.PER_UNIT, unit_price=0.005, min_charge=0.05),
PricingTier(name="per_mesh", model=PricingModel.PER_UNIT, unit_price=0.02, min_charge=0.1)
],
capabilities=["texture-compression", "mesh-optimization", "lod-generation", "platform-specific"],
tags=["gamedev", "assets", "optimization", "textures", "meshes"],
max_concurrent=5,
timeout_seconds=1800
),
"physics_simulation": ServiceDefinition(
id="physics_simulation",
name="Game Physics Simulation",
category=ServiceCategory.GAMING_ENTERTAINMENT,
description="Run physics simulations for game development",
icon="⚛️",
input_parameters=[
ParameterDefinition(
name="engine",
type=ParameterType.ENUM,
required=True,
description="Physics engine",
options=["physx", "havok", "bullet", "box2d", "chipmunk"]
),
ParameterDefinition(
name="simulation_type",
type=ParameterType.ENUM,
required=True,
description="Simulation type",
options=["rigid-body", "soft-body", "fluid", "cloth", "destruction"]
),
ParameterDefinition(
name="scene_file",
type=ParameterType.FILE,
required=False,
description="Scene or level file"
),
ParameterDefinition(
name="parameters",
type=ParameterType.OBJECT,
required=True,
description="Physics parameters"
),
ParameterDefinition(
name="simulation_time",
type=ParameterType.FLOAT,
required=True,
description="Simulation duration in seconds",
min_value=0.1
),
ParameterDefinition(
name="record_frames",
type=ParameterType.BOOLEAN,
required=False,
description="Record animation frames",
default=False
)
],
output_schema={
"type": "object",
"properties": {
"simulation_data": {"type": "array"},
"animation_url": {"type": "string"},
"physics_stats": {"type": "object"}
}
},
requirements=[
HardwareRequirement(component="gpu", min_value="nvidia", recommended="rtx-3080"),
HardwareRequirement(component="vram", min_value=8, recommended=16, unit="GB"),
HardwareRequirement(component="cpu", min_value=8, recommended=16, unit="cores"),
HardwareRequirement(component="ram", min_value=16, recommended=32, unit="GB")
],
pricing=[
PricingTier(name="per_hour", model=PricingModel.PER_HOUR, unit_price=1, min_charge=0.5),
PricingTier(name="per_frame", model=PricingModel.PER_UNIT, unit_price=0.001, min_charge=0.1),
PricingTier(name="complex", model=PricingModel.PER_HOUR, unit_price=2, min_charge=1)
],
capabilities=["gpu-physics", "particle-systems", "destruction", "cloth"],
tags=["physics", "gamedev", "simulation", "physx", "havok"],
max_concurrent=3,
timeout_seconds=3600
),
"vr_ar_rendering": ServiceDefinition(
id="vr_ar_rendering",
name="VR/AR Rendering",
category=ServiceCategory.GAMING_ENTERTAINMENT,
description="Real-time 3D rendering for VR/AR applications",
icon="🥽",
input_parameters=[
ParameterDefinition(
name="platform",
type=ParameterType.ENUM,
required=True,
description="Target platform",
options=["oculus", "vive", "hololens", "magic-leap", "cardboard", "webxr"]
),
ParameterDefinition(
name="scene_file",
type=ParameterType.FILE,
required=True,
description="3D scene file"
),
ParameterDefinition(
name="render_quality",
type=ParameterType.ENUM,
required=False,
description="Render quality",
default="high",
options=["low", "medium", "high", "ultra"]
),
ParameterDefinition(
name="stereo_mode",
type=ParameterType.BOOLEAN,
required=False,
description="Stereo rendering",
default=True
),
ParameterDefinition(
name="target_fps",
type=ParameterType.INTEGER,
required=False,
description="Target frame rate",
default=90,
options=[60, 72, 90, 120, 144]
)
],
output_schema={
"type": "object",
"properties": {
"rendered_frames": {"type": "array"},
"performance_metrics": {"type": "object"},
"vr_package": {"type": "string"}
}
},
requirements=[
HardwareRequirement(component="gpu", min_value="nvidia", recommended="rtx-3080"),
HardwareRequirement(component="vram", min_value=8, recommended=16, unit="GB"),
HardwareRequirement(component="cpu", min_value=8, recommended=16, unit="cores"),
HardwareRequirement(component="ram", min_value=16, recommended=32, unit="GB")
],
pricing=[
PricingTier(name="per_minute", model=PricingModel.PER_UNIT, unit_price=0.02, min_charge=0.5),
PricingTier(name="per_frame", model=PricingModel.PER_UNIT, unit_price=0.001, min_charge=0.1),
PricingTier(name="real-time", model=PricingModel.PER_HOUR, unit_price=5, min_charge=1)
],
capabilities=["stereo-rendering", "real-time", "low-latency", "tracking"],
tags=["vr", "ar", "rendering", "3d", "immersive"],
max_concurrent=2,
timeout_seconds=3600
)
}

View File

@@ -0,0 +1,412 @@
"""
Media processing service definitions
"""
from typing import Dict, List, Any, Union
from .registry import (
ServiceDefinition,
ServiceCategory,
ParameterDefinition,
ParameterType,
HardwareRequirement,
PricingTier,
PricingModel
)
MEDIA_PROCESSING_SERVICES = {
"video_transcoding": ServiceDefinition(
id="video_transcoding",
name="Video Transcoding",
category=ServiceCategory.MEDIA_PROCESSING,
description="Transcode videos between formats using FFmpeg with GPU acceleration",
icon="🎬",
input_parameters=[
ParameterDefinition(
name="input_video",
type=ParameterType.FILE,
required=True,
description="Input video file"
),
ParameterDefinition(
name="output_format",
type=ParameterType.ENUM,
required=True,
description="Output video format",
options=["mp4", "webm", "avi", "mov", "mkv", "flv"]
),
ParameterDefinition(
name="codec",
type=ParameterType.ENUM,
required=False,
description="Video codec",
default="h264",
options=["h264", "h265", "vp9", "av1", "mpeg4"]
),
ParameterDefinition(
name="resolution",
type=ParameterType.STRING,
required=False,
description="Output resolution (e.g., 1920x1080)",
validation={"pattern": r"^\d+x\d+$"}
),
ParameterDefinition(
name="bitrate",
type=ParameterType.STRING,
required=False,
description="Target bitrate (e.g., 5M, 2500k)",
validation={"pattern": r"^\d+[kM]?$"}
),
ParameterDefinition(
name="fps",
type=ParameterType.INTEGER,
required=False,
description="Output frame rate",
min_value=1,
max_value=120
),
ParameterDefinition(
name="gpu_acceleration",
type=ParameterType.BOOLEAN,
required=False,
description="Use GPU acceleration",
default=True
)
],
output_schema={
"type": "object",
"properties": {
"output_url": {"type": "string"},
"metadata": {"type": "object"},
"duration": {"type": "number"},
"file_size": {"type": "integer"}
}
},
requirements=[
HardwareRequirement(component="gpu", min_value="any", recommended="nvidia"),
HardwareRequirement(component="vram", min_value=2, recommended=8, unit="GB"),
HardwareRequirement(component="ram", min_value=8, recommended=16, unit="GB"),
HardwareRequirement(component="storage", min_value=50, unit="GB")
],
pricing=[
PricingTier(name="per_minute", model=PricingModel.PER_UNIT, unit_price=0.005, min_charge=0.01),
PricingTier(name="per_gb", model=PricingModel.PER_GB, unit_price=0.01, min_charge=0.01),
PricingTier(name="4k_premium", model=PricingModel.PER_UNIT, unit_price=0.02, min_charge=0.05)
],
capabilities=["transcode", "compress", "resize", "format-convert"],
tags=["video", "ffmpeg", "transcoding", "encoding", "gpu"],
max_concurrent=2,
timeout_seconds=3600
),
"video_streaming": ServiceDefinition(
id="video_streaming",
name="Live Video Streaming",
category=ServiceCategory.MEDIA_PROCESSING,
description="Real-time video transcoding for adaptive bitrate streaming",
icon="📡",
input_parameters=[
ParameterDefinition(
name="stream_url",
type=ParameterType.STRING,
required=True,
description="Input stream URL"
),
ParameterDefinition(
name="output_formats",
type=ParameterType.ARRAY,
required=True,
description="Output formats for adaptive streaming",
default=["720p", "1080p", "4k"]
),
ParameterDefinition(
name="duration_minutes",
type=ParameterType.INTEGER,
required=False,
description="Streaming duration in minutes",
default=60,
min_value=1,
max_value=480
),
ParameterDefinition(
name="protocol",
type=ParameterType.ENUM,
required=False,
description="Streaming protocol",
default="hls",
options=["hls", "dash", "rtmp", "webrtc"]
)
],
output_schema={
"type": "object",
"properties": {
"stream_url": {"type": "string"},
"playlist_url": {"type": "string"},
"bitrates": {"type": "array"},
"duration": {"type": "number"}
}
},
requirements=[
HardwareRequirement(component="gpu", min_value="nvidia", recommended="rtx-3080"),
HardwareRequirement(component="vram", min_value=8, recommended=16, unit="GB"),
HardwareRequirement(component="network", min_value="1Gbps", recommended="10Gbps"),
HardwareRequirement(component="ram", min_value=16, recommended=32, unit="GB")
],
pricing=[
PricingTier(name="per_minute", model=PricingModel.PER_UNIT, unit_price=0.01, min_charge=0.5),
PricingTier(name="per_hour", model=PricingModel.PER_HOUR, unit_price=0.5, min_charge=0.5)
],
capabilities=["live-transcoding", "adaptive-bitrate", "multi-format", "low-latency"],
tags=["streaming", "live", "transcoding", "real-time"],
max_concurrent=5,
timeout_seconds=28800 # 8 hours
),
"3d_rendering": ServiceDefinition(
id="3d_rendering",
name="3D Rendering",
category=ServiceCategory.MEDIA_PROCESSING,
description="Render 3D scenes using Blender, Unreal Engine, or V-Ray",
icon="🎭",
input_parameters=[
ParameterDefinition(
name="engine",
type=ParameterType.ENUM,
required=True,
description="Rendering engine",
options=["blender-cycles", "blender-eevee", "unreal-engine", "v-ray", "octane"]
),
ParameterDefinition(
name="scene_file",
type=ParameterType.FILE,
required=True,
description="3D scene file (.blend, .ueproject, etc)"
),
ParameterDefinition(
name="resolution_x",
type=ParameterType.INTEGER,
required=False,
description="Output width",
default=1920,
min_value=1,
max_value=8192
),
ParameterDefinition(
name="resolution_y",
type=ParameterType.INTEGER,
required=False,
description="Output height",
default=1080,
min_value=1,
max_value=8192
),
ParameterDefinition(
name="samples",
type=ParameterType.INTEGER,
required=False,
description="Samples per pixel (path tracing)",
default=128,
min_value=1,
max_value=10000
),
ParameterDefinition(
name="frame_start",
type=ParameterType.INTEGER,
required=False,
description="Start frame for animation",
default=1,
min_value=1
),
ParameterDefinition(
name="frame_end",
type=ParameterType.INTEGER,
required=False,
description="End frame for animation",
default=1,
min_value=1
),
ParameterDefinition(
name="output_format",
type=ParameterType.ENUM,
required=False,
description="Output image format",
default="png",
options=["png", "jpg", "exr", "bmp", "tiff", "hdr"]
)
],
output_schema={
"type": "object",
"properties": {
"rendered_images": {"type": "array"},
"metadata": {"type": "object"},
"render_time": {"type": "number"},
"frame_count": {"type": "integer"}
}
},
requirements=[
HardwareRequirement(component="gpu", min_value="nvidia", recommended="rtx-4090"),
HardwareRequirement(component="vram", min_value=8, recommended=24, unit="GB"),
HardwareRequirement(component="ram", min_value=16, recommended=64, unit="GB"),
HardwareRequirement(component="cpu", min_value=8, recommended=16, unit="cores")
],
pricing=[
PricingTier(name="per_frame", model=PricingModel.PER_FRAME, unit_price=0.01, min_charge=0.1),
PricingTier(name="per_hour", model=PricingModel.PER_HOUR, unit_price=0.5, min_charge=0.5),
PricingTier(name="4k_premium", model=PricingModel.PER_FRAME, unit_price=0.05, min_charge=0.5)
],
capabilities=["path-tracing", "ray-tracing", "animation", "gpu-render"],
tags=["3d", "rendering", "blender", "unreal", "v-ray"],
max_concurrent=2,
timeout_seconds=7200
),
"image_processing": ServiceDefinition(
id="image_processing",
name="Batch Image Processing",
category=ServiceCategory.MEDIA_PROCESSING,
description="Process images in bulk with filters, effects, and format conversion",
icon="🖼️",
input_parameters=[
ParameterDefinition(
name="images",
type=ParameterType.ARRAY,
required=True,
description="Array of image files or URLs"
),
ParameterDefinition(
name="operations",
type=ParameterType.ARRAY,
required=True,
description="Processing operations to apply",
items={
"type": "object",
"properties": {
"type": {"type": "string"},
"params": {"type": "object"}
}
}
),
ParameterDefinition(
name="output_format",
type=ParameterType.ENUM,
required=False,
description="Output format",
default="jpg",
options=["jpg", "png", "webp", "avif", "tiff", "bmp"]
),
ParameterDefinition(
name="quality",
type=ParameterType.INTEGER,
required=False,
description="Output quality (1-100)",
default=90,
min_value=1,
max_value=100
),
ParameterDefinition(
name="resize",
type=ParameterType.STRING,
required=False,
description="Resize dimensions (e.g., 1920x1080, 50%)",
validation={"pattern": r"^\d+x\d+|^\d+%$"}
)
],
output_schema={
"type": "object",
"properties": {
"processed_images": {"type": "array"},
"count": {"type": "integer"},
"total_size": {"type": "integer"},
"processing_time": {"type": "number"}
}
},
requirements=[
HardwareRequirement(component="gpu", min_value="any", recommended="nvidia"),
HardwareRequirement(component="vram", min_value=1, recommended=4, unit="GB"),
HardwareRequirement(component="ram", min_value=4, recommended=16, unit="GB")
],
pricing=[
PricingTier(name="per_image", model=PricingModel.PER_UNIT, unit_price=0.001, min_charge=0.01),
PricingTier(name="bulk_100", model=PricingModel.PER_UNIT, unit_price=0.0005, min_charge=0.05),
PricingTier(name="bulk_1000", model=PricingModel.PER_UNIT, unit_price=0.0002, min_charge=0.2)
],
capabilities=["resize", "filter", "format-convert", "batch", "watermark"],
tags=["image", "processing", "batch", "filter", "conversion"],
max_concurrent=10,
timeout_seconds=600
),
"audio_processing": ServiceDefinition(
id="audio_processing",
name="Audio Processing",
category=ServiceCategory.MEDIA_PROCESSING,
description="Process audio files with effects, noise reduction, and format conversion",
icon="🎵",
input_parameters=[
ParameterDefinition(
name="audio_file",
type=ParameterType.FILE,
required=True,
description="Input audio file"
),
ParameterDefinition(
name="operations",
type=ParameterType.ARRAY,
required=True,
description="Audio operations to apply",
items={
"type": "object",
"properties": {
"type": {"type": "string"},
"params": {"type": "object"}
}
}
),
ParameterDefinition(
name="output_format",
type=ParameterType.ENUM,
required=False,
description="Output format",
default="mp3",
options=["mp3", "wav", "flac", "aac", "ogg", "m4a"]
),
ParameterDefinition(
name="sample_rate",
type=ParameterType.INTEGER,
required=False,
description="Output sample rate",
default=44100,
options=[22050, 44100, 48000, 96000, 192000]
),
ParameterDefinition(
name="bitrate",
type=ParameterType.INTEGER,
required=False,
description="Output bitrate (kbps)",
default=320,
options=[128, 192, 256, 320, 512, 1024]
)
],
output_schema={
"type": "object",
"properties": {
"output_url": {"type": "string"},
"metadata": {"type": "object"},
"duration": {"type": "number"},
"file_size": {"type": "integer"}
}
},
requirements=[
HardwareRequirement(component="gpu", min_value="any", recommended="nvidia"),
HardwareRequirement(component="ram", min_value=2, recommended=8, unit="GB")
],
pricing=[
PricingTier(name="per_minute", model=PricingModel.PER_UNIT, unit_price=0.002, min_charge=0.01),
PricingTier(name="per_effect", model=PricingModel.PER_UNIT, unit_price=0.005, min_charge=0.01)
],
capabilities=["noise-reduction", "effects", "format-convert", "enhancement"],
tags=["audio", "processing", "effects", "noise-reduction"],
max_concurrent=5,
timeout_seconds=300
)
}

View File

@@ -0,0 +1,406 @@
"""
Scientific computing service definitions
"""
from typing import Dict, List, Any, Union
from .registry import (
ServiceDefinition,
ServiceCategory,
ParameterDefinition,
ParameterType,
HardwareRequirement,
PricingTier,
PricingModel
)
SCIENTIFIC_COMPUTING_SERVICES = {
"molecular_dynamics": ServiceDefinition(
id="molecular_dynamics",
name="Molecular Dynamics Simulation",
category=ServiceCategory.SCIENTIFIC_COMPUTING,
description="Run molecular dynamics simulations using GROMACS or NAMD",
icon="🧬",
input_parameters=[
ParameterDefinition(
name="software",
type=ParameterType.ENUM,
required=True,
description="MD software package",
options=["gromacs", "namd", "amber", "lammps", "desmond"]
),
ParameterDefinition(
name="structure_file",
type=ParameterType.FILE,
required=True,
description="Molecular structure file (PDB, MOL2, etc)"
),
ParameterDefinition(
name="topology_file",
type=ParameterType.FILE,
required=False,
description="Topology file"
),
ParameterDefinition(
name="force_field",
type=ParameterType.ENUM,
required=True,
description="Force field to use",
options=["AMBER", "CHARMM", "OPLS", "GROMOS", "DREIDING"]
),
ParameterDefinition(
name="simulation_time_ns",
type=ParameterType.FLOAT,
required=True,
description="Simulation time in nanoseconds",
min_value=0.1,
max_value=1000
),
ParameterDefinition(
name="temperature_k",
type=ParameterType.FLOAT,
required=False,
description="Temperature in Kelvin",
default=300,
min_value=0,
max_value=500
),
ParameterDefinition(
name="pressure_bar",
type=ParameterType.FLOAT,
required=False,
description="Pressure in bar",
default=1,
min_value=0,
max_value=1000
),
ParameterDefinition(
name="time_step_fs",
type=ParameterType.FLOAT,
required=False,
description="Time step in femtoseconds",
default=2,
min_value=0.5,
max_value=5
)
],
output_schema={
"type": "object",
"properties": {
"trajectory_url": {"type": "string"},
"log_url": {"type": "string"},
"energy_data": {"type": "array"},
"simulation_stats": {"type": "object"}
}
},
requirements=[
HardwareRequirement(component="gpu", min_value="nvidia", recommended="a100"),
HardwareRequirement(component="vram", min_value=16, recommended=40, unit="GB"),
HardwareRequirement(component="cpu", min_value=16, recommended=64, unit="cores"),
HardwareRequirement(component="ram", min_value=32, recommended=256, unit="GB"),
HardwareRequirement(component="storage", min_value=100, recommended=1000, unit="GB")
],
pricing=[
PricingTier(name="per_ns", model=PricingModel.PER_UNIT, unit_price=0.1, min_charge=1),
PricingTier(name="per_hour", model=PricingModel.PER_HOUR, unit_price=2, min_charge=2),
PricingTier(name="bulk_100ns", model=PricingModel.PER_UNIT, unit_price=0.05, min_charge=5)
],
capabilities=["gpu-accelerated", "parallel", "ensemble", "free-energy"],
tags=["molecular", "dynamics", "simulation", "biophysics", "chemistry"],
max_concurrent=4,
timeout_seconds=86400 # 24 hours
),
"weather_modeling": ServiceDefinition(
id="weather_modeling",
name="Weather Modeling",
category=ServiceCategory.SCIENTIFIC_COMPUTING,
description="Run weather prediction and climate simulations",
icon="🌦️",
input_parameters=[
ParameterDefinition(
name="model",
type=ParameterType.ENUM,
required=True,
description="Weather model",
options=["WRF", "MM5", "IFS", "GFS", "ECMWF"]
),
ParameterDefinition(
name="region",
type=ParameterType.OBJECT,
required=True,
description="Geographic region bounds",
properties={
"lat_min": {"type": "number"},
"lat_max": {"type": "number"},
"lon_min": {"type": "number"},
"lon_max": {"type": "number"}
}
),
ParameterDefinition(
name="forecast_hours",
type=ParameterType.INTEGER,
required=True,
description="Forecast length in hours",
min_value=1,
max_value=384 # 16 days
),
ParameterDefinition(
name="resolution_km",
type=ParameterType.FLOAT,
required=False,
description="Spatial resolution in kilometers",
default=10,
options=[1, 3, 5, 10, 25, 50]
),
ParameterDefinition(
name="output_variables",
type=ParameterType.ARRAY,
required=False,
description="Variables to output",
default=["temperature", "precipitation", "wind", "pressure"],
items={"type": "string"}
)
],
output_schema={
"type": "object",
"properties": {
"forecast_data": {"type": "array"},
"visualization_urls": {"type": "array"},
"metadata": {"type": "object"}
}
},
requirements=[
HardwareRequirement(component="cpu", min_value=32, recommended=128, unit="cores"),
HardwareRequirement(component="ram", min_value=64, recommended=512, unit="GB"),
HardwareRequirement(component="storage", min_value=500, recommended=5000, unit="GB"),
HardwareRequirement(component="network", min_value="10Gbps", recommended="100Gbps")
],
pricing=[
PricingTier(name="per_hour", model=PricingModel.PER_HOUR, unit_price=5, min_charge=10),
PricingTier(name="per_day", model=PricingModel.PER_UNIT, unit_price=100, min_charge=100),
PricingTier(name="high_res", model=PricingModel.PER_HOUR, unit_price=10, min_charge=20)
],
capabilities=["forecast", "climate", "ensemble", "data-assimilation"],
tags=["weather", "climate", "forecast", "meteorology", "atmosphere"],
max_concurrent=2,
timeout_seconds=172800 # 48 hours
),
"financial_modeling": ServiceDefinition(
id="financial_modeling",
name="Financial Modeling",
category=ServiceCategory.SCIENTIFIC_COMPUTING,
description="Run Monte Carlo simulations and risk analysis for financial models",
icon="📊",
input_parameters=[
ParameterDefinition(
name="model_type",
type=ParameterType.ENUM,
required=True,
description="Financial model type",
options=["monte-carlo", "option-pricing", "risk-var", "portfolio-optimization", "credit-risk"]
),
ParameterDefinition(
name="parameters",
type=ParameterType.OBJECT,
required=True,
description="Model parameters"
),
ParameterDefinition(
name="num_simulations",
type=ParameterType.INTEGER,
required=True,
description="Number of Monte Carlo simulations",
default=10000,
min_value=1000,
max_value=10000000
),
ParameterDefinition(
name="time_steps",
type=ParameterType.INTEGER,
required=False,
description="Number of time steps",
default=252,
min_value=1,
max_value=10000
),
ParameterDefinition(
name="confidence_levels",
type=ParameterType.ARRAY,
required=False,
description="Confidence levels for VaR",
default=[0.95, 0.99],
items={"type": "number", "minimum": 0, "maximum": 1}
)
],
output_schema={
"type": "object",
"properties": {
"results": {"type": "array"},
"statistics": {"type": "object"},
"risk_metrics": {"type": "object"},
"confidence_intervals": {"type": "array"}
}
},
requirements=[
HardwareRequirement(component="gpu", min_value="nvidia", recommended="rtx-3080"),
HardwareRequirement(component="vram", min_value=8, recommended=16, unit="GB"),
HardwareRequirement(component="cpu", min_value=8, recommended=32, unit="cores"),
HardwareRequirement(component="ram", min_value=16, recommended=64, unit="GB")
],
pricing=[
PricingTier(name="per_simulation", model=PricingModel.PER_UNIT, unit_price=0.00001, min_charge=0.1),
PricingTier(name="per_hour", model=PricingModel.PER_HOUR, unit_price=1, min_charge=1),
PricingTier(name="enterprise", model=PricingModel.PER_UNIT, unit_price=0.000005, min_charge=0.5)
],
capabilities=["monte-carlo", "var", "option-pricing", "portfolio", "risk-analysis"],
tags=["finance", "risk", "monte-carlo", "var", "options"],
max_concurrent=10,
timeout_seconds=3600
),
"physics_simulation": ServiceDefinition(
id="physics_simulation",
name="Physics Simulation",
category=ServiceCategory.SCIENTIFIC_COMPUTING,
description="Run particle physics and fluid dynamics simulations",
icon="⚛️",
input_parameters=[
ParameterDefinition(
name="simulation_type",
type=ParameterType.ENUM,
required=True,
description="Physics simulation type",
options=["particle-physics", "fluid-dynamics", "electromagnetics", "quantum", "astrophysics"]
),
ParameterDefinition(
name="solver",
type=ParameterType.ENUM,
required=True,
description="Simulation solver",
options=["geant4", "fluent", "comsol", "openfoam", "lammps", "gadget"]
),
ParameterDefinition(
name="geometry_file",
type=ParameterType.FILE,
required=False,
description="Geometry or mesh file"
),
ParameterDefinition(
name="initial_conditions",
type=ParameterType.OBJECT,
required=True,
description="Initial conditions and parameters"
),
ParameterDefinition(
name="simulation_time",
type=ParameterType.FLOAT,
required=True,
description="Simulation time",
min_value=0.001
),
ParameterDefinition(
name="particles",
type=ParameterType.INTEGER,
required=False,
description="Number of particles",
default=1000000,
min_value=1000,
max_value=100000000
)
],
output_schema={
"type": "object",
"properties": {
"results_url": {"type": "string"},
"data_arrays": {"type": "object"},
"visualizations": {"type": "array"},
"statistics": {"type": "object"}
}
},
requirements=[
HardwareRequirement(component="gpu", min_value="nvidia", recommended="a100"),
HardwareRequirement(component="vram", min_value=16, recommended=40, unit="GB"),
HardwareRequirement(component="cpu", min_value=16, recommended=64, unit="cores"),
HardwareRequirement(component="ram", min_value=32, recommended=256, unit="GB"),
HardwareRequirement(component="storage", min_value=100, recommended=1000, unit="GB")
],
pricing=[
PricingTier(name="per_hour", model=PricingModel.PER_HOUR, unit_price=2, min_charge=2),
PricingTier(name="per_particle", model=PricingModel.PER_UNIT, unit_price=0.000001, min_charge=1),
PricingTier(name="hpc", model=PricingModel.PER_HOUR, unit_price=5, min_charge=5)
],
capabilities=["gpu-accelerated", "parallel", "mpi", "large-scale"],
tags=["physics", "simulation", "particle", "fluid", "cfd"],
max_concurrent=4,
timeout_seconds=86400
),
"bioinformatics": ServiceDefinition(
id="bioinformatics",
name="Bioinformatics Analysis",
category=ServiceCategory.SCIENTIFIC_COMPUTING,
description="DNA sequencing, protein folding, and genomic analysis",
icon="🧬",
input_parameters=[
ParameterDefinition(
name="analysis_type",
type=ParameterType.ENUM,
required=True,
description="Bioinformatics analysis type",
options=["dna-sequencing", "protein-folding", "alignment", "phylogeny", "variant-calling"]
),
ParameterDefinition(
name="sequence_file",
type=ParameterType.FILE,
required=True,
description="Input sequence file (FASTA, FASTQ, BAM, etc)"
),
ParameterDefinition(
name="reference_file",
type=ParameterType.FILE,
required=False,
description="Reference genome or protein structure"
),
ParameterDefinition(
name="algorithm",
type=ParameterType.ENUM,
required=True,
description="Analysis algorithm",
options=["blast", "bowtie", "bwa", "alphafold", "gatk", "clustal"]
),
ParameterDefinition(
name="parameters",
type=ParameterType.OBJECT,
required=False,
description="Algorithm-specific parameters"
)
],
output_schema={
"type": "object",
"properties": {
"results_file": {"type": "string"},
"alignment_file": {"type": "string"},
"annotations": {"type": "array"},
"statistics": {"type": "object"}
}
},
requirements=[
HardwareRequirement(component="gpu", min_value="nvidia", recommended="rtx-3090"),
HardwareRequirement(component="vram", min_value=8, recommended=24, unit="GB"),
HardwareRequirement(component="cpu", min_value=16, recommended=32, unit="cores"),
HardwareRequirement(component="ram", min_value=32, recommended=128, unit="GB"),
HardwareRequirement(component="storage", min_value=100, recommended=500, unit="GB")
],
pricing=[
PricingTier(name="per_mb", model=PricingModel.PER_UNIT, unit_price=0.001, min_charge=0.1),
PricingTier(name="per_hour", model=PricingModel.PER_HOUR, unit_price=1, min_charge=1),
PricingTier(name="protein_folding", model=PricingModel.PER_UNIT, unit_price=0.01, min_charge=0.5)
],
capabilities=["sequencing", "alignment", "folding", "annotation", "variant-calling"],
tags=["bioinformatics", "genomics", "proteomics", "dna", "sequencing"],
max_concurrent=5,
timeout_seconds=7200
)
}

View File

@@ -0,0 +1,380 @@
"""
Service schemas for common GPU workloads
"""
from typing import Any, Dict, List, Optional, Union
from enum import Enum
from pydantic import BaseModel, Field, validator
import re
class ServiceType(str, Enum):
"""Supported service types"""
WHISPER = "whisper"
STABLE_DIFFUSION = "stable_diffusion"
LLM_INFERENCE = "llm_inference"
FFMPEG = "ffmpeg"
BLENDER = "blender"
# Whisper Service Schemas
class WhisperModel(str, Enum):
"""Supported Whisper models"""
TINY = "tiny"
BASE = "base"
SMALL = "small"
MEDIUM = "medium"
LARGE = "large"
LARGE_V2 = "large-v2"
LARGE_V3 = "large-v3"
class WhisperLanguage(str, Enum):
"""Supported languages"""
AUTO = "auto"
EN = "en"
ES = "es"
FR = "fr"
DE = "de"
IT = "it"
PT = "pt"
RU = "ru"
JA = "ja"
KO = "ko"
ZH = "zh"
class WhisperTask(str, Enum):
"""Whisper task types"""
TRANSCRIBE = "transcribe"
TRANSLATE = "translate"
class WhisperRequest(BaseModel):
"""Whisper transcription request"""
audio_url: str = Field(..., description="URL of audio file to transcribe")
model: WhisperModel = Field(WhisperModel.BASE, description="Whisper model to use")
language: WhisperLanguage = Field(WhisperLanguage.AUTO, description="Source language")
task: WhisperTask = Field(WhisperTask.TRANSCRIBE, description="Task to perform")
temperature: float = Field(0.0, ge=0.0, le=1.0, description="Sampling temperature")
best_of: int = Field(5, ge=1, le=10, description="Number of candidates")
beam_size: int = Field(5, ge=1, le=10, description="Beam size for decoding")
patience: float = Field(1.0, ge=0.0, le=2.0, description="Beam search patience")
suppress_tokens: Optional[List[int]] = Field(None, description="Tokens to suppress")
initial_prompt: Optional[str] = Field(None, description="Initial prompt for context")
condition_on_previous_text: bool = Field(True, description="Condition on previous text")
fp16: bool = Field(True, description="Use FP16 for faster inference")
verbose: bool = Field(False, description="Include verbose output")
def get_constraints(self) -> Dict[str, Any]:
"""Get hardware constraints for this request"""
vram_requirements = {
WhisperModel.TINY: 1,
WhisperModel.BASE: 1,
WhisperModel.SMALL: 2,
WhisperModel.MEDIUM: 5,
WhisperModel.LARGE: 10,
WhisperModel.LARGE_V2: 10,
WhisperModel.LARGE_V3: 10,
}
return {
"models": ["whisper"],
"min_vram_gb": vram_requirements[self.model],
"gpu": "nvidia", # Whisper requires CUDA
}
# Stable Diffusion Service Schemas
class SDModel(str, Enum):
"""Supported Stable Diffusion models"""
SD_1_5 = "stable-diffusion-1.5"
SD_2_1 = "stable-diffusion-2.1"
SDXL = "stable-diffusion-xl"
SDXL_TURBO = "sdxl-turbo"
SDXL_REFINER = "sdxl-refiner"
class SDSize(str, Enum):
"""Standard image sizes"""
SQUARE_512 = "512x512"
PORTRAIT_512 = "512x768"
LANDSCAPE_512 = "768x512"
SQUARE_768 = "768x768"
PORTRAIT_768 = "768x1024"
LANDSCAPE_768 = "1024x768"
SQUARE_1024 = "1024x1024"
PORTRAIT_1024 = "1024x1536"
LANDSCAPE_1024 = "1536x1024"
class StableDiffusionRequest(BaseModel):
"""Stable Diffusion image generation request"""
prompt: str = Field(..., min_length=1, max_length=1000, description="Text prompt")
negative_prompt: Optional[str] = Field(None, max_length=1000, description="Negative prompt")
model: SDModel = Field(SD_1_5, description="Model to use")
size: SDSize = Field(SDSize.SQUARE_512, description="Image size")
num_images: int = Field(1, ge=1, le=4, description="Number of images to generate")
num_inference_steps: int = Field(20, ge=1, le=100, description="Number of inference steps")
guidance_scale: float = Field(7.5, ge=1.0, le=20.0, description="Guidance scale")
seed: Optional[Union[int, List[int]]] = Field(None, description="Random seed(s)")
scheduler: str = Field("DPMSolverMultistepScheduler", description="Scheduler to use")
enable_safety_checker: bool = Field(True, description="Enable safety checker")
lora: Optional[str] = Field(None, description="LoRA model to use")
lora_scale: float = Field(1.0, ge=0.0, le=2.0, description="LoRA strength")
@validator('seed')
def validate_seed(cls, v):
if v is not None and isinstance(v, list):
if len(v) > 4:
raise ValueError("Maximum 4 seeds allowed")
return v
def get_constraints(self) -> Dict[str, Any]:
"""Get hardware constraints for this request"""
vram_requirements = {
SDModel.SD_1_5: 4,
SDModel.SD_2_1: 4,
SDModel.SDXL: 8,
SDModel.SDXL_TURBO: 8,
SDModel.SDXL_REFINER: 8,
}
size_map = {
"512": 512,
"768": 768,
"1024": 1024,
"1536": 1536,
}
# Extract max dimension from size
max_dim = max(size_map[s.split('x')[0]] for s in SDSize)
return {
"models": ["stable-diffusion"],
"min_vram_gb": vram_requirements[self.model],
"gpu": "nvidia", # SD requires CUDA
"cuda": "11.8", # Minimum CUDA version
}
# LLM Inference Service Schemas
class LLMModel(str, Enum):
"""Supported LLM models"""
LLAMA_7B = "llama-7b"
LLAMA_13B = "llama-13b"
LLAMA_70B = "llama-70b"
MISTRAL_7B = "mistral-7b"
MIXTRAL_8X7B = "mixtral-8x7b"
CODELLAMA_7B = "codellama-7b"
CODELLAMA_13B = "codellama-13b"
CODELLAMA_34B = "codellama-34b"
class LLMRequest(BaseModel):
"""LLM inference request"""
model: LLMModel = Field(..., description="Model to use")
prompt: str = Field(..., min_length=1, max_length=10000, description="Input prompt")
max_tokens: int = Field(256, ge=1, le=4096, description="Maximum tokens to generate")
temperature: float = Field(0.7, ge=0.0, le=2.0, description="Sampling temperature")
top_p: float = Field(0.9, ge=0.0, le=1.0, description="Top-p sampling")
top_k: int = Field(40, ge=0, le=100, description="Top-k sampling")
repetition_penalty: float = Field(1.1, ge=0.0, le=2.0, description="Repetition penalty")
stop_sequences: Optional[List[str]] = Field(None, description="Stop sequences")
stream: bool = Field(False, description="Stream response")
def get_constraints(self) -> Dict[str, Any]:
"""Get hardware constraints for this request"""
vram_requirements = {
LLMModel.LLAMA_7B: 8,
LLMModel.LLAMA_13B: 16,
LLMModel.LLAMA_70B: 64,
LLMModel.MISTRAL_7B: 8,
LLMModel.MIXTRAL_8X7B: 48,
LLMModel.CODELLAMA_7B: 8,
LLMModel.CODELLAMA_13B: 16,
LLMModel.CODELLAMA_34B: 32,
}
return {
"models": ["llm"],
"min_vram_gb": vram_requirements[self.model],
"gpu": "nvidia", # LLMs require CUDA
"cuda": "11.8",
}
# FFmpeg Service Schemas
class FFmpegCodec(str, Enum):
"""Supported video codecs"""
H264 = "h264"
H265 = "h265"
VP9 = "vp9"
AV1 = "av1"
class FFmpegPreset(str, Enum):
"""Encoding presets"""
ULTRAFAST = "ultrafast"
SUPERFAST = "superfast"
VERYFAST = "veryfast"
FASTER = "faster"
FAST = "fast"
MEDIUM = "medium"
SLOW = "slow"
SLOWER = "slower"
VERYSLOW = "veryslow"
class FFmpegRequest(BaseModel):
"""FFmpeg video processing request"""
input_url: str = Field(..., description="URL of input video")
output_format: str = Field("mp4", description="Output format")
codec: FFmpegCodec = Field(FFmpegCodec.H264, description="Video codec")
preset: FFmpegPreset = Field(FFmpegPreset.MEDIUM, description="Encoding preset")
crf: int = Field(23, ge=0, le=51, description="Constant rate factor")
resolution: Optional[str] = Field(None, regex=r"^\d+x\d+$", description="Output resolution (e.g., 1920x1080)")
bitrate: Optional[str] = Field(None, regex=r"^\d+[kM]?$", description="Target bitrate")
fps: Optional[int] = Field(None, ge=1, le=120, description="Output frame rate")
audio_codec: str = Field("aac", description="Audio codec")
audio_bitrate: str = Field("128k", description="Audio bitrate")
custom_args: Optional[List[str]] = Field(None, description="Custom FFmpeg arguments")
def get_constraints(self) -> Dict[str, Any]:
"""Get hardware constraints for this request"""
# NVENC support for H.264/H.265
if self.codec in [FFmpegCodec.H264, FFmpegCodec.H265]:
return {
"models": ["ffmpeg"],
"gpu": "nvidia", # NVENC requires NVIDIA
"min_vram_gb": 4,
}
else:
return {
"models": ["ffmpeg"],
"gpu": "any", # CPU encoding possible
}
# Blender Service Schemas
class BlenderEngine(str, Enum):
"""Blender render engines"""
CYCLES = "cycles"
EEVEE = "eevee"
EEVEE_NEXT = "eevee-next"
class BlenderFormat(str, Enum):
"""Output formats"""
PNG = "png"
JPG = "jpg"
EXR = "exr"
BMP = "bmp"
TIFF = "tiff"
class BlenderRequest(BaseModel):
"""Blender rendering request"""
blend_file_url: str = Field(..., description="URL of .blend file")
engine: BlenderEngine = Field(BlenderEngine.CYCLES, description="Render engine")
format: BlenderFormat = Field(BlenderFormat.PNG, description="Output format")
resolution_x: int = Field(1920, ge=1, le=65536, description="Image width")
resolution_y: int = Field(1080, ge=1, le=65536, description="Image height")
resolution_percentage: int = Field(100, ge=1, le=100, description="Resolution scale")
samples: int = Field(128, ge=1, le=10000, description="Samples (Cycles only)")
frame_start: int = Field(1, ge=1, description="Start frame")
frame_end: int = Field(1, ge=1, description="End frame")
frame_step: int = Field(1, ge=1, description="Frame step")
denoise: bool = Field(True, description="Enable denoising")
transparent: bool = Field(False, description="Transparent background")
custom_args: Optional[List[str]] = Field(None, description="Custom Blender arguments")
@validator('frame_end')
def validate_frame_range(cls, v, values):
if 'frame_start' in values and v < values['frame_start']:
raise ValueError("frame_end must be >= frame_start")
return v
def get_constraints(self) -> Dict[str, Any]:
"""Get hardware constraints for this request"""
# Calculate VRAM based on resolution and samples
pixel_count = self.resolution_x * self.resolution_y
samples_multiplier = 1 if self.engine == BlenderEngine.EEVEE else self.samples / 100
estimated_vram = int((pixel_count * samples_multiplier) / (1024 * 1024))
return {
"models": ["blender"],
"min_vram_gb": max(4, estimated_vram),
"gpu": "nvidia" if self.engine == BlenderEngine.CYCLES else "any",
}
# Unified Service Request
class ServiceRequest(BaseModel):
"""Unified service request wrapper"""
service_type: ServiceType = Field(..., description="Type of service")
request_data: Dict[str, Any] = Field(..., description="Service-specific request data")
def get_service_request(self) -> Union[
WhisperRequest,
StableDiffusionRequest,
LLMRequest,
FFmpegRequest,
BlenderRequest
]:
"""Parse and return typed service request"""
service_classes = {
ServiceType.WHISPER: WhisperRequest,
ServiceType.STABLE_DIFFUSION: StableDiffusionRequest,
ServiceType.LLM_INFERENCE: LLMRequest,
ServiceType.FFMPEG: FFmpegRequest,
ServiceType.BLENDER: BlenderRequest,
}
service_class = service_classes[self.service_type]
return service_class(**self.request_data)
# Service Response Schemas
class ServiceResponse(BaseModel):
"""Base service response"""
job_id: str = Field(..., description="Job ID")
service_type: ServiceType = Field(..., description="Service type")
status: str = Field(..., description="Job status")
estimated_completion: Optional[str] = Field(None, description="Estimated completion time")
class WhisperResponse(BaseModel):
"""Whisper transcription response"""
text: str = Field(..., description="Transcribed text")
language: str = Field(..., description="Detected language")
segments: Optional[List[Dict[str, Any]]] = Field(None, description="Transcription segments")
class StableDiffusionResponse(BaseModel):
"""Stable Diffusion image generation response"""
images: List[str] = Field(..., description="Generated image URLs")
parameters: Dict[str, Any] = Field(..., description="Generation parameters")
nsfw_content_detected: List[bool] = Field(..., description="NSFW detection results")
class LLMResponse(BaseModel):
"""LLM inference response"""
text: str = Field(..., description="Generated text")
finish_reason: str = Field(..., description="Reason for generation stop")
tokens_used: int = Field(..., description="Number of tokens used")
class FFmpegResponse(BaseModel):
"""FFmpeg processing response"""
output_url: str = Field(..., description="URL of processed video")
metadata: Dict[str, Any] = Field(..., description="Video metadata")
duration: float = Field(..., description="Video duration")
class BlenderResponse(BaseModel):
"""Blender rendering response"""
images: List[str] = Field(..., description="Rendered image URLs")
metadata: Dict[str, Any] = Field(..., description="Render metadata")
render_time: float = Field(..., description="Render time in seconds")