feat: implement CLI blockchain features and pool hub enhancements
Some checks failed
API Endpoint Tests / test-api-endpoints (push) Successful in 11s
CLI Tests / test-cli (push) Failing after 7s
Documentation Validation / validate-docs (push) Successful in 8s
Documentation Validation / validate-policies-strict (push) Successful in 3s
Integration Tests / test-service-integration (push) Successful in 38s
Python Tests / test-python (push) Successful in 11s
Security Scanning / security-scan (push) Successful in 29s
Multi-Node Blockchain Health Monitoring / health-check (push) Successful in 1s
Some checks failed
API Endpoint Tests / test-api-endpoints (push) Successful in 11s
CLI Tests / test-cli (push) Failing after 7s
Documentation Validation / validate-docs (push) Successful in 8s
Documentation Validation / validate-policies-strict (push) Successful in 3s
Integration Tests / test-service-integration (push) Successful in 38s
Python Tests / test-python (push) Successful in 11s
Security Scanning / security-scan (push) Successful in 29s
Multi-Node Blockchain Health Monitoring / health-check (push) Successful in 1s
CLI Blockchain Features: - Added block operations: import, export, import-chain, blocks-range - Added messaging system commands (deploy, state, topics, create-topic, messages, post, vote, search, reputation, moderate) - Added network force-sync operation - Replaced marketplace handlers with actual RPC calls - Replaced AI handlers with actual RPC calls - Added account operations (account get) - Added transaction query operations - Added mempool query operations - Created keystore_auth.py for authentication - Removed extended features interception - All handlers use keystore credentials for authenticated endpoints Pool Hub Enhancements: - Added SLA monitoring and capacity tables - Added billing integration service - Added SLA collector service - Added SLA router endpoints - Updated pool hub models and settings - Added integration tests for billing and SLA - Updated documentation with SLA monitoring guide
This commit is contained in:
@@ -10,7 +10,6 @@ from __future__ import annotations
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
from sqlalchemy.dialects import postgresql
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = "a58c1f3b3e87"
|
||||
@@ -34,8 +33,8 @@ def upgrade() -> None:
|
||||
sa.Column("ram_gb", sa.Float()),
|
||||
sa.Column("max_parallel", sa.Integer()),
|
||||
sa.Column("base_price", sa.Float()),
|
||||
sa.Column("tags", postgresql.JSONB(astext_type=sa.Text())),
|
||||
sa.Column("capabilities", postgresql.JSONB(astext_type=sa.Text())),
|
||||
sa.Column("tags", sa.JSON()),
|
||||
sa.Column("capabilities", sa.JSON()),
|
||||
sa.Column("trust_score", sa.Float(), server_default="0.5"),
|
||||
sa.Column("region", sa.String(length=64)),
|
||||
)
|
||||
@@ -53,18 +52,18 @@ def upgrade() -> None:
|
||||
|
||||
op.create_table(
|
||||
"match_requests",
|
||||
sa.Column("id", postgresql.UUID(as_uuid=True), primary_key=True),
|
||||
sa.Column("id", sa.String(36), primary_key=True),
|
||||
sa.Column("job_id", sa.String(length=64), nullable=False),
|
||||
sa.Column("requirements", postgresql.JSONB(astext_type=sa.Text()), nullable=False),
|
||||
sa.Column("hints", postgresql.JSONB(astext_type=sa.Text()), server_default=sa.text("'{}'::jsonb")),
|
||||
sa.Column("requirements", sa.JSON(), nullable=False),
|
||||
sa.Column("hints", sa.JSON(), server_default=sa.text("'{}'")),
|
||||
sa.Column("top_k", sa.Integer(), server_default="1"),
|
||||
sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.text("NOW()")),
|
||||
)
|
||||
|
||||
op.create_table(
|
||||
"match_results",
|
||||
sa.Column("id", postgresql.UUID(as_uuid=True), primary_key=True),
|
||||
sa.Column("request_id", postgresql.UUID(as_uuid=True), sa.ForeignKey("match_requests.id", ondelete="CASCADE"), nullable=False),
|
||||
sa.Column("id", sa.String(36), primary_key=True),
|
||||
sa.Column("request_id", sa.String(36), sa.ForeignKey("match_requests.id", ondelete="CASCADE"), nullable=False),
|
||||
sa.Column("miner_id", sa.String(length=64), nullable=False),
|
||||
sa.Column("score", sa.Float(), nullable=False),
|
||||
sa.Column("explain", sa.Text()),
|
||||
@@ -76,7 +75,7 @@ def upgrade() -> None:
|
||||
|
||||
op.create_table(
|
||||
"feedback",
|
||||
sa.Column("id", postgresql.UUID(as_uuid=True), primary_key=True),
|
||||
sa.Column("id", sa.String(36), primary_key=True),
|
||||
sa.Column("job_id", sa.String(length=64), nullable=False),
|
||||
sa.Column("miner_id", sa.String(length=64), sa.ForeignKey("miners.miner_id", ondelete="CASCADE"), nullable=False),
|
||||
sa.Column("outcome", sa.String(length=32), nullable=False),
|
||||
|
||||
@@ -0,0 +1,124 @@
|
||||
"""add sla and capacity tables
|
||||
|
||||
Revision ID: b2a1c4d5e6f7
|
||||
Revises: a58c1f3b3e87
|
||||
Create Date: 2026-04-22 15:00:00.000000
|
||||
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = "b2a1c4d5e6f7"
|
||||
down_revision = "a58c1f3b3e87"
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
# Add new columns to miner_status table
|
||||
op.add_column(
|
||||
"miner_status",
|
||||
sa.Column("uptime_pct", sa.Float(), nullable=True),
|
||||
)
|
||||
op.add_column(
|
||||
"miner_status",
|
||||
sa.Column("last_heartbeat_at", sa.DateTime(timezone=True), nullable=True),
|
||||
)
|
||||
|
||||
# Create sla_metrics table
|
||||
op.create_table(
|
||||
"sla_metrics",
|
||||
sa.Column(
|
||||
"id",
|
||||
sa.String(36),
|
||||
primary_key=True,
|
||||
),
|
||||
sa.Column(
|
||||
"miner_id",
|
||||
sa.String(length=64),
|
||||
sa.ForeignKey("miners.miner_id", ondelete="CASCADE"),
|
||||
nullable=False,
|
||||
),
|
||||
sa.Column("metric_type", sa.String(length=32), nullable=False),
|
||||
sa.Column("metric_value", sa.Float(), nullable=False),
|
||||
sa.Column("threshold", sa.Float(), nullable=False),
|
||||
sa.Column("is_violation", sa.Boolean(), server_default=sa.text("false")),
|
||||
sa.Column("timestamp", sa.DateTime(timezone=True), server_default=sa.text("NOW()")),
|
||||
sa.Column("meta_data", sa.JSON(), server_default=sa.text("'{}'")),
|
||||
)
|
||||
op.create_index("ix_sla_metrics_miner_id", "sla_metrics", ["miner_id"])
|
||||
op.create_index("ix_sla_metrics_timestamp", "sla_metrics", ["timestamp"])
|
||||
op.create_index("ix_sla_metrics_metric_type", "sla_metrics", ["metric_type"])
|
||||
|
||||
# Create sla_violations table
|
||||
op.create_table(
|
||||
"sla_violations",
|
||||
sa.Column(
|
||||
"id",
|
||||
sa.String(36),
|
||||
primary_key=True,
|
||||
),
|
||||
sa.Column(
|
||||
"miner_id",
|
||||
sa.String(length=64),
|
||||
sa.ForeignKey("miners.miner_id", ondelete="CASCADE"),
|
||||
nullable=False,
|
||||
),
|
||||
sa.Column("violation_type", sa.String(length=32), nullable=False),
|
||||
sa.Column("severity", sa.String(length=16), nullable=False),
|
||||
sa.Column("metric_value", sa.Float(), nullable=False),
|
||||
sa.Column("threshold", sa.Float(), nullable=False),
|
||||
sa.Column("violation_duration_ms", sa.Integer(), nullable=True),
|
||||
sa.Column("resolved_at", sa.DateTime(timezone=True), nullable=True),
|
||||
sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.text("NOW()")),
|
||||
sa.Column("meta_data", sa.JSON(), server_default=sa.text("'{}'")),
|
||||
)
|
||||
op.create_index("ix_sla_violations_miner_id", "sla_violations", ["miner_id"])
|
||||
op.create_index("ix_sla_violations_created_at", "sla_violations", ["created_at"])
|
||||
op.create_index("ix_sla_violations_severity", "sla_violations", ["severity"])
|
||||
|
||||
# Create capacity_snapshots table
|
||||
op.create_table(
|
||||
"capacity_snapshots",
|
||||
sa.Column(
|
||||
"id",
|
||||
sa.String(36),
|
||||
primary_key=True,
|
||||
),
|
||||
sa.Column("total_miners", sa.Integer(), nullable=False),
|
||||
sa.Column("active_miners", sa.Integer(), nullable=False),
|
||||
sa.Column("total_parallel_capacity", sa.Integer(), nullable=False),
|
||||
sa.Column("total_queue_length", sa.Integer(), nullable=False),
|
||||
sa.Column("capacity_utilization_pct", sa.Float(), nullable=False),
|
||||
sa.Column("forecast_capacity", sa.Integer(), nullable=False),
|
||||
sa.Column("recommended_scaling", sa.String(length=32), nullable=False),
|
||||
sa.Column("scaling_reason", sa.Text(), nullable=True),
|
||||
sa.Column("timestamp", sa.DateTime(timezone=True), server_default=sa.text("NOW()")),
|
||||
sa.Column("meta_data", sa.JSON(), server_default=sa.text("'{}'")),
|
||||
)
|
||||
op.create_index("ix_capacity_snapshots_timestamp", "capacity_snapshots", ["timestamp"])
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
# Drop capacity_snapshots table
|
||||
op.drop_index("ix_capacity_snapshots_timestamp", table_name="capacity_snapshots")
|
||||
op.drop_table("capacity_snapshots")
|
||||
|
||||
# Drop sla_violations table
|
||||
op.drop_index("ix_sla_violations_severity", table_name="sla_violations")
|
||||
op.drop_index("ix_sla_violations_created_at", table_name="sla_violations")
|
||||
op.drop_index("ix_sla_violations_miner_id", table_name="sla_violations")
|
||||
op.drop_table("sla_violations")
|
||||
|
||||
# Drop sla_metrics table
|
||||
op.drop_index("ix_sla_metrics_metric_type", table_name="sla_metrics")
|
||||
op.drop_index("ix_sla_metrics_timestamp", table_name="sla_metrics")
|
||||
op.drop_index("ix_sla_metrics_miner_id", table_name="sla_metrics")
|
||||
op.drop_table("sla_metrics")
|
||||
|
||||
# Remove columns from miner_status table
|
||||
op.drop_column("miner_status", "last_heartbeat_at")
|
||||
op.drop_column("miner_status", "uptime_pct")
|
||||
Reference in New Issue
Block a user