feat: implement CLI blockchain features and pool hub enhancements
Some checks failed
API Endpoint Tests / test-api-endpoints (push) Successful in 11s
CLI Tests / test-cli (push) Failing after 7s
Documentation Validation / validate-docs (push) Successful in 8s
Documentation Validation / validate-policies-strict (push) Successful in 3s
Integration Tests / test-service-integration (push) Successful in 38s
Python Tests / test-python (push) Successful in 11s
Security Scanning / security-scan (push) Successful in 29s
Multi-Node Blockchain Health Monitoring / health-check (push) Successful in 1s

CLI Blockchain Features:
- Added block operations: import, export, import-chain, blocks-range
- Added messaging system commands (deploy, state, topics, create-topic, messages, post, vote, search, reputation, moderate)
- Added network force-sync operation
- Replaced marketplace handlers with actual RPC calls
- Replaced AI handlers with actual RPC calls
- Added account operations (account get)
- Added transaction query operations
- Added mempool query operations
- Created keystore_auth.py for authentication
- Removed extended features interception
- All handlers use keystore credentials for authenticated endpoints

Pool Hub Enhancements:
- Added SLA monitoring and capacity tables
- Added billing integration service
- Added SLA collector service
- Added SLA router endpoints
- Updated pool hub models and settings
- Added integration tests for billing and SLA
- Updated documentation with SLA monitoring guide
This commit is contained in:
aitbc
2026-04-22 15:59:00 +02:00
parent 51920a15d7
commit e22d864944
28 changed files with 4783 additions and 358 deletions

View File

@@ -6,10 +6,14 @@ from pathlib import Path
import pytest
import pytest_asyncio
from dotenv import load_dotenv
from redis.asyncio import Redis
from sqlalchemy.ext.asyncio import AsyncEngine, AsyncSession, async_sessionmaker, create_async_engine
# Load .env file
BASE_DIR = Path(__file__).resolve().parents[2]
load_dotenv(BASE_DIR / ".env")
POOLHUB_SRC = BASE_DIR / "pool-hub" / "src"
if str(POOLHUB_SRC) not in sys.path:
sys.path.insert(0, str(POOLHUB_SRC))

View File

@@ -0,0 +1,192 @@
"""
Tests for Billing Integration Service
"""
import pytest
from datetime import datetime, timedelta
from decimal import Decimal
from unittest.mock import AsyncMock, patch
from sqlalchemy.orm import Session
from poolhub.models import Miner, MatchRequest, MatchResult
from poolhub.services.billing_integration import BillingIntegration
@pytest.fixture
def billing_integration(db_session: Session) -> BillingIntegration:
"""Create billing integration fixture"""
return BillingIntegration(db_session)
@pytest.fixture
def sample_miner(db_session: Session) -> Miner:
"""Create sample miner fixture"""
miner = Miner(
miner_id="test_miner_001",
api_key_hash="hash123",
addr="127.0.0.1:8080",
proto="http",
gpu_vram_gb=24.0,
gpu_name="RTX 4090",
cpu_cores=16,
ram_gb=64.0,
max_parallel=4,
base_price=0.50,
)
db_session.add(miner)
db_session.commit()
return miner
@pytest.mark.asyncio
async def test_record_usage(billing_integration: BillingIntegration):
"""Test recording usage data"""
# Mock the HTTP client
with patch("poolhub.services.billing_integration.httpx.AsyncClient") as mock_client:
mock_response = AsyncMock()
mock_response.json.return_value = {"status": "success", "id": "usage_123"}
mock_response.raise_for_status = AsyncMock()
mock_client.return_value.__aenter__.return_value.post = AsyncMock(return_value=mock_response)
result = await billing_integration.record_usage(
tenant_id="tenant_001",
resource_type="gpu_hours",
quantity=Decimal("10.5"),
unit_price=Decimal("0.50"),
job_id="job_123",
)
assert result["status"] == "success"
@pytest.mark.asyncio
async def test_record_usage_with_fallback_pricing(billing_integration: BillingIntegration):
"""Test recording usage with fallback pricing when unit_price not provided"""
with patch("poolhub.services.billing_integration.httpx.AsyncClient") as mock_client:
mock_response = AsyncMock()
mock_response.json.return_value = {"status": "success", "id": "usage_123"}
mock_response.raise_for_status = AsyncMock()
mock_client.return_value.__aenter__.return_value.post = AsyncMock(return_value=mock_response)
result = await billing_integration.record_usage(
tenant_id="tenant_001",
resource_type="gpu_hours",
quantity=Decimal("10.5"),
# unit_price not provided
)
assert result["status"] == "success"
@pytest.mark.asyncio
async def test_sync_miner_usage(billing_integration: BillingIntegration, sample_miner: Miner):
"""Test syncing usage for a specific miner"""
end_date = datetime.utcnow()
start_date = end_date - timedelta(hours=24)
with patch("poolhub.services.billing_integration.httpx.AsyncClient") as mock_client:
mock_response = AsyncMock()
mock_response.json.return_value = {"status": "success", "id": "usage_123"}
mock_response.raise_for_status = AsyncMock()
mock_client.return_value.__aenter__.return_value.post = AsyncMock(return_value=mock_response)
result = await billing_integration.sync_miner_usage(
miner_id=sample_miner.miner_id,
start_date=start_date,
end_date=end_date,
)
assert result["miner_id"] == sample_miner.miner_id
assert result["tenant_id"] == sample_miner.miner_id
assert "usage_records" in result
@pytest.mark.asyncio
async def test_sync_all_miners_usage(billing_integration: BillingIntegration, sample_miner: Miner):
"""Test syncing usage for all miners"""
with patch("poolhub.services.billing_integration.httpx.AsyncClient") as mock_client:
mock_response = AsyncMock()
mock_response.json.return_value = {"status": "success", "id": "usage_123"}
mock_response.raise_for_status = AsyncMock()
mock_client.return_value.__aenter__.return_value.post = AsyncMock(return_value=mock_response)
result = await billing_integration.sync_all_miners_usage(hours_back=24)
assert result["miners_processed"] >= 1
assert "total_usage_records" in result
def test_collect_miner_usage(billing_integration: BillingIntegration, sample_miner: Miner):
"""Test collecting usage data for a miner"""
end_date = datetime.utcnow()
start_date = end_date - timedelta(hours=24)
usage_data = billing_integration.db.run_sync(
lambda sess: billing_integration._collect_miner_usage(
sample_miner.miner_id, start_date, end_date
)
)
assert "gpu_hours" in usage_data
assert "api_calls" in usage_data
assert "compute_hours" in usage_data
@pytest.mark.asyncio
async def test_get_billing_metrics(billing_integration: BillingIntegration):
"""Test getting billing metrics from coordinator-api"""
with patch("poolhub.services.billing_integration.httpx.AsyncClient") as mock_client:
mock_response = AsyncMock()
mock_response.json.return_value = {
"totals": {"cost": 100.0, "records": 50},
"by_resource": {"gpu_hours": {"cost": 50.0}},
}
mock_response.raise_for_status = AsyncMock()
mock_client.return_value.__aenter__.return_value.get = AsyncMock(return_value=mock_response)
metrics = await billing_integration.get_billing_metrics(hours=24)
assert "totals" in metrics
@pytest.mark.asyncio
async def test_trigger_invoice_generation(billing_integration: BillingIntegration):
"""Test triggering invoice generation"""
with patch("poolhub.services.billing_integration.httpx.AsyncClient") as mock_client:
mock_response = AsyncMock()
mock_response.json.return_value = {
"invoice_number": "INV-001",
"status": "draft",
"total_amount": 100.0,
}
mock_response.raise_for_status = AsyncMock()
mock_client.return_value.__aenter__.return_value.post = AsyncMock(return_value=mock_response)
end_date = datetime.utcnow()
start_date = end_date - timedelta(days=30)
result = await billing_integration.trigger_invoice_generation(
tenant_id="tenant_001",
period_start=start_date,
period_end=end_date,
)
assert result["invoice_number"] == "INV-001"
def test_resource_type_mapping(billing_integration: BillingIntegration):
"""Test resource type mapping"""
assert "gpu_hours" in billing_integration.resource_type_mapping
assert "storage_gb" in billing_integration.resource_type_mapping
def test_fallback_pricing(billing_integration: BillingIntegration):
"""Test fallback pricing configuration"""
assert "gpu_hours" in billing_integration.fallback_pricing
assert billing_integration.fallback_pricing["gpu_hours"]["unit_price"] == Decimal("0.50")

View File

@@ -0,0 +1,212 @@
"""
Integration Tests for Pool-Hub with Coordinator-API
Tests the integration between pool-hub and coordinator-api's billing system.
"""
import pytest
from datetime import datetime, timedelta
from decimal import Decimal
from sqlalchemy.orm import Session
from poolhub.models import Miner, MinerStatus, SLAMetric, CapacitySnapshot
from poolhub.services.sla_collector import SLACollector
from poolhub.services.billing_integration import BillingIntegration
@pytest.fixture
def sla_collector(db_session: Session) -> SLACollector:
"""Create SLA collector fixture"""
return SLACollector(db_session)
@pytest.fixture
def billing_integration(db_session: Session) -> BillingIntegration:
"""Create billing integration fixture"""
return BillingIntegration(db_session)
@pytest.fixture
def sample_miner(db_session: Session) -> Miner:
"""Create sample miner fixture"""
miner = Miner(
miner_id="test_miner_001",
api_key_hash="hash123",
addr="127.0.0.1:8080",
proto="http",
gpu_vram_gb=24.0,
gpu_name="RTX 4090",
cpu_cores=16,
ram_gb=64.0,
max_parallel=4,
base_price=0.50,
)
db_session.add(miner)
db_session.commit()
return miner
def test_end_to_end_sla_to_billing_workflow(
sla_collector: SLACollector,
billing_integration: BillingIntegration,
sample_miner: Miner,
):
"""Test end-to-end workflow from SLA collection to billing"""
# Step 1: Collect SLA metrics
sla_collector.db.run_sync(
lambda sess: sla_collector.record_sla_metric(
miner_id=sample_miner.miner_id,
metric_type="uptime_pct",
metric_value=98.5,
)
)
# Step 2: Verify metric was recorded
metrics = sla_collector.db.run_sync(
lambda sess: sla_collector.get_sla_metrics(
miner_id=sample_miner.miner_id, hours=1
)
)
assert len(metrics) > 0
# Step 3: Collect usage data for billing
end_date = datetime.utcnow()
start_date = end_date - timedelta(hours=1)
usage_data = sla_collector.db.run_sync(
lambda sess: billing_integration._collect_miner_usage(
sample_miner.miner_id, start_date, end_date
)
)
assert "gpu_hours" in usage_data
assert "api_calls" in usage_data
def test_capacity_snapshot_creation(sla_collector: SLACollector, sample_miner: Miner):
"""Test capacity snapshot creation for capacity planning"""
# Create capacity snapshot
capacity = sla_collector.db.run_sync(
lambda sess: sla_collector.collect_capacity_availability()
)
assert capacity["total_miners"] >= 1
assert "active_miners" in capacity
assert "capacity_availability_pct" in capacity
# Verify snapshot was stored in database
snapshots = sla_collector.db.run_sync(
lambda sess: sla_collector.db.query(CapacitySnapshot)
.order_by(CapacitySnapshot.timestamp.desc())
.limit(1)
.all()
)
assert len(snapshots) > 0
def test_sla_violation_billing_correlation(
sla_collector: SLACollector,
billing_integration: BillingIntegration,
sample_miner: Miner,
):
"""Test correlation between SLA violations and billing"""
# Record a violation
sla_collector.db.run_sync(
lambda sess: sla_collector.record_sla_metric(
miner_id=sample_miner.miner_id,
metric_type="uptime_pct",
metric_value=80.0, # Below threshold
)
)
# Check violation was recorded
violations = sla_collector.db.run_sync(
lambda sess: sla_collector.get_sla_violations(
miner_id=sample_miner.miner_id, resolved=False
)
)
assert len(violations) > 0
# Usage should still be recorded even with violations
end_date = datetime.utcnow()
start_date = end_date - timedelta(hours=1)
usage_data = sla_collector.db.run_sync(
lambda sess: billing_integration._collect_miner_usage(
sample_miner.miner_id, start_date, end_date
)
)
assert usage_data is not None
def test_multi_miner_sla_collection(sla_collector: SLACollector, db_session: Session):
"""Test SLA collection across multiple miners"""
# Create multiple miners
miners = []
for i in range(3):
miner = Miner(
miner_id=f"test_miner_{i:03d}",
api_key_hash=f"hash{i}",
addr=f"127.0.0.{i}:8080",
proto="http",
gpu_vram_gb=24.0,
gpu_name="RTX 4090",
cpu_cores=16,
ram_gb=64.0,
max_parallel=4,
base_price=0.50,
)
db_session.add(miner)
miners.append(miner)
db_session.commit()
# Collect metrics for all miners
results = sla_collector.db.run_sync(
lambda sess: sla_collector.collect_all_miner_metrics()
)
assert results["miners_processed"] >= 3
def test_billing_sync_with_coordinator_api(
billing_integration: BillingIntegration,
sample_miner: Miner,
):
"""Test billing sync with coordinator-api (mocked)"""
from unittest.mock import AsyncMock, patch
end_date = datetime.utcnow()
start_date = end_date - timedelta(hours=1)
with patch("poolhub.services.billing_integration.httpx.AsyncClient") as mock_client:
mock_response = AsyncMock()
mock_response.json.return_value = {"status": "success", "id": "usage_123"}
mock_response.raise_for_status = AsyncMock()
mock_client.return_value.__aenter__.return_value.post = AsyncMock(
return_value=mock_response
)
result = billing_integration.db.run_sync(
lambda sess: billing_integration.sync_miner_usage(
miner_id=sample_miner.miner_id, start_date=start_date, end_date=end_date
)
)
assert result["miner_id"] == sample_miner.miner_id
assert result["usage_records"] >= 0
def test_sla_threshold_configuration(sla_collector: SLACollector):
"""Test SLA threshold configuration"""
# Verify default thresholds
assert sla_collector.sla_thresholds["uptime_pct"] == 95.0
assert sla_collector.sla_thresholds["response_time_ms"] == 1000.0
assert sla_collector.sla_thresholds["completion_rate_pct"] == 90.0
assert sla_collector.sla_thresholds["capacity_availability_pct"] == 80.0
def test_capacity_utilization_calculation(sla_collector: SLACollector, sample_miner: Miner):
"""Test capacity utilization calculation"""
capacity = sla_collector.db.run_sync(
lambda sess: sla_collector.collect_capacity_availability()
)
# Verify utilization is between 0 and 100
assert 0 <= capacity["capacity_availability_pct"] <= 100

View File

@@ -0,0 +1,186 @@
"""
Tests for SLA Collector Service
"""
import pytest
from datetime import datetime, timedelta
from decimal import Decimal
from sqlalchemy.orm import Session
from poolhub.models import Miner, MinerStatus, SLAMetric, SLAViolation, Feedback, MatchResult
from poolhub.services.sla_collector import SLACollector
@pytest.fixture
def sla_collector(db_session: Session) -> SLACollector:
"""Create SLA collector fixture"""
return SLACollector(db_session)
@pytest.fixture
def sample_miner(db_session: Session) -> Miner:
"""Create sample miner fixture"""
miner = Miner(
miner_id="test_miner_001",
api_key_hash="hash123",
addr="127.0.0.1:8080",
proto="http",
gpu_vram_gb=24.0,
gpu_name="RTX 4090",
cpu_cores=16,
ram_gb=64.0,
max_parallel=4,
base_price=0.50,
)
db_session.add(miner)
db_session.commit()
return miner
@pytest.fixture
def sample_miner_status(db_session: Session, sample_miner: Miner) -> MinerStatus:
"""Create sample miner status fixture"""
status = MinerStatus(
miner_id=sample_miner.miner_id,
queue_len=2,
busy=False,
avg_latency_ms=150,
temp_c=65,
mem_free_gb=32.0,
last_heartbeat_at=datetime.utcnow(),
)
db_session.add(status)
db_session.commit()
return status
@pytest.mark.asyncio
async def test_record_sla_metric(sla_collector: SLACollector, sample_miner: Miner):
"""Test recording an SLA metric"""
metric = await sla_collector.record_sla_metric(
miner_id=sample_miner.miner_id,
metric_type="uptime_pct",
metric_value=98.5,
metadata={"test": "true"},
)
assert metric.miner_id == sample_miner.miner_id
assert metric.metric_type == "uptime_pct"
assert metric.metric_value == 98.5
assert metric.is_violation == False
@pytest.mark.asyncio
async def test_record_sla_metric_violation(sla_collector: SLACollector, sample_miner: Miner):
"""Test recording an SLA metric that violates threshold"""
metric = await sla_collector.record_sla_metric(
miner_id=sample_miner.miner_id,
metric_type="uptime_pct",
metric_value=80.0, # Below threshold of 95%
metadata={"test": "true"},
)
assert metric.is_violation == True
# Check violation was recorded
violations = await sla_collector.get_sla_violations(
miner_id=sample_miner.miner_id, resolved=False
)
assert len(violations) > 0
assert violations[0].violation_type == "uptime_pct"
@pytest.mark.asyncio
async def test_collect_miner_uptime(sla_collector: SLACollector, sample_miner_status: MinerStatus):
"""Test collecting miner uptime"""
uptime = await sla_collector.collect_miner_uptime(sample_miner_status.miner_id)
assert uptime is not None
assert 0 <= uptime <= 100
@pytest.mark.asyncio
async def test_collect_response_time_no_results(sla_collector: SLACollector, sample_miner: Miner):
"""Test collecting response time when no match results exist"""
response_time = await sla_collector.collect_response_time(sample_miner.miner_id)
assert response_time is None
@pytest.mark.asyncio
async def test_collect_completion_rate_no_feedback(sla_collector: SLACollector, sample_miner: Miner):
"""Test collecting completion rate when no feedback exists"""
completion_rate = await sla_collector.collect_completion_rate(sample_miner.miner_id)
assert completion_rate is None
@pytest.mark.asyncio
async def test_collect_capacity_availability(sla_collector: SLACollector):
"""Test collecting capacity availability"""
capacity = await sla_collector.collect_capacity_availability()
assert "total_miners" in capacity
assert "active_miners" in capacity
assert "capacity_availability_pct" in capacity
@pytest.mark.asyncio
async def test_get_sla_metrics(sla_collector: SLACollector, sample_miner: Miner):
"""Test getting SLA metrics"""
# Record a metric first
await sla_collector.record_sla_metric(
miner_id=sample_miner.miner_id,
metric_type="uptime_pct",
metric_value=98.5,
)
metrics = await sla_collector.get_sla_metrics(
miner_id=sample_miner.miner_id, hours=24
)
assert len(metrics) > 0
assert metrics[0].miner_id == sample_miner.miner_id
@pytest.mark.asyncio
async def test_get_sla_violations(sla_collector: SLACollector, sample_miner: Miner):
"""Test getting SLA violations"""
# Record a violation
await sla_collector.record_sla_metric(
miner_id=sample_miner.miner_id,
metric_type="uptime_pct",
metric_value=80.0, # Below threshold
)
violations = await sla_collector.get_sla_violations(
miner_id=sample_miner.miner_id, resolved=False
)
assert len(violations) > 0
def test_check_violation_uptime_below_threshold(sla_collector: SLACollector):
"""Test violation check for uptime below threshold"""
is_violation = sla_collector._check_violation("uptime_pct", 90.0, 95.0)
assert is_violation == True
def test_check_violation_uptime_above_threshold(sla_collector: SLACollector):
"""Test violation check for uptime above threshold"""
is_violation = sla_collector._check_violation("uptime_pct", 98.0, 95.0)
assert is_violation == False
@pytest.mark.asyncio
async def test_check_violation_response_time_above_threshold(sla_collector: SLACollector):
"""Test violation check for response time above threshold"""
is_violation = sla_collector._check_violation("response_time_ms", 2000.0, 1000.0)
assert is_violation == True
@pytest.mark.asyncio
async def test_check_violation_response_time_below_threshold(sla_collector: SLACollector):
"""Test violation check for response time below threshold"""
is_violation = sla_collector._check_violation("response_time_ms", 500.0, 1000.0)
assert is_violation == False

View File

@@ -0,0 +1,216 @@
"""
Tests for SLA API Endpoints
"""
import pytest
from datetime import datetime, timedelta
from decimal import Decimal
from fastapi.testclient import TestClient
from sqlalchemy.orm import Session
from poolhub.models import Miner, MinerStatus, SLAMetric
from poolhub.app.routers.sla import router
from poolhub.database import get_db
@pytest.fixture
def test_client(db_session: Session):
"""Create test client fixture"""
from fastapi import FastAPI
app = FastAPI()
app.include_router(router)
# Override database dependency
def override_get_db():
try:
yield db_session
finally:
pass
app.dependency_overrides[get_db] = override_get_db
return TestClient(app)
@pytest.fixture
def sample_miner(db_session: Session) -> Miner:
"""Create sample miner fixture"""
miner = Miner(
miner_id="test_miner_001",
api_key_hash="hash123",
addr="127.0.0.1:8080",
proto="http",
gpu_vram_gb=24.0,
gpu_name="RTX 4090",
cpu_cores=16,
ram_gb=64.0,
max_parallel=4,
base_price=0.50,
)
db_session.add(miner)
db_session.commit()
return miner
@pytest.fixture
def sample_sla_metric(db_session: Session, sample_miner: Miner) -> SLAMetric:
"""Create sample SLA metric fixture"""
from uuid import uuid4
metric = SLAMetric(
id=uuid4(),
miner_id=sample_miner.miner_id,
metric_type="uptime_pct",
metric_value=98.5,
threshold=95.0,
is_violation=False,
timestamp=datetime.utcnow(),
metadata={"test": "true"},
)
db_session.add(metric)
db_session.commit()
return metric
def test_get_miner_sla_metrics(test_client: TestClient, sample_sla_metric: SLAMetric):
"""Test getting SLA metrics for a specific miner"""
response = test_client.get(f"/sla/metrics/{sample_sla_metric.miner_id}?hours=24")
assert response.status_code == 200
data = response.json()
assert len(data) > 0
assert data[0]["miner_id"] == sample_sla_metric.miner_id
def test_get_all_sla_metrics(test_client: TestClient, sample_sla_metric: SLAMetric):
"""Test getting SLA metrics across all miners"""
response = test_client.get("/sla/metrics?hours=24")
assert response.status_code == 200
data = response.json()
assert len(data) > 0
def test_get_sla_violations(test_client: TestClient, sample_miner: Miner):
"""Test getting SLA violations"""
response = test_client.get("/sla/violations?resolved=false")
assert response.status_code == 200
data = response.json()
assert isinstance(data, list)
def test_collect_sla_metrics(test_client: TestClient):
"""Test triggering SLA metrics collection"""
response = test_client.post("/sla/metrics/collect")
assert response.status_code == 200
data = response.json()
assert "miners_processed" in data
def test_get_capacity_snapshots(test_client: TestClient):
"""Test getting capacity planning snapshots"""
response = test_client.get("/sla/capacity/snapshots?hours=24")
assert response.status_code == 200
data = response.json()
assert isinstance(data, list)
def test_get_capacity_forecast(test_client: TestClient):
"""Test getting capacity forecast"""
response = test_client.get("/sla/capacity/forecast?hours_ahead=168")
assert response.status_code == 200
data = response.json()
assert "forecast_horizon_hours" in data
assert "current_capacity" in data
def test_get_scaling_recommendations(test_client: TestClient):
"""Test getting scaling recommendations"""
response = test_client.get("/sla/capacity/recommendations")
assert response.status_code == 200
data = response.json()
assert "current_state" in data
assert "recommendations" in data
def test_configure_capacity_alerts(test_client: TestClient):
"""Test configuring capacity alerts"""
alert_config = {
"threshold_pct": 80.0,
"notification_email": "admin@example.com",
}
response = test_client.post("/sla/capacity/alerts/configure", json=alert_config)
assert response.status_code == 200
data = response.json()
assert data["status"] == "configured"
def test_get_billing_usage(test_client: TestClient):
"""Test getting billing usage data"""
response = test_client.get("/sla/billing/usage?hours=24")
# This may fail if coordinator-api is not available
# For now, we expect either 200 or 500
assert response.status_code in [200, 500]
def test_sync_billing_usage(test_client: TestClient):
"""Test triggering billing sync"""
request_data = {
"hours_back": 24,
}
response = test_client.post("/sla/billing/sync", json=request_data)
# This may fail if coordinator-api is not available
# For now, we expect either 200 or 500
assert response.status_code in [200, 500]
def test_record_usage(test_client: TestClient):
"""Test recording a single usage event"""
request_data = {
"tenant_id": "tenant_001",
"resource_type": "gpu_hours",
"quantity": 10.5,
"unit_price": 0.50,
"job_id": "job_123",
}
response = test_client.post("/sla/billing/usage/record", json=request_data)
# This may fail if coordinator-api is not available
# For now, we expect either 200 or 500
assert response.status_code in [200, 500]
def test_generate_invoice(test_client: TestClient):
"""Test triggering invoice generation"""
end_date = datetime.utcnow()
start_date = end_date - timedelta(days=30)
request_data = {
"tenant_id": "tenant_001",
"period_start": start_date.isoformat(),
"period_end": end_date.isoformat(),
}
response = test_client.post("/sla/billing/invoice/generate", json=request_data)
# This may fail if coordinator-api is not available
# For now, we expect either 200 or 500
assert response.status_code in [200, 500]
def test_get_sla_status(test_client: TestClient):
"""Test getting overall SLA status"""
response = test_client.get("/sla/status")
assert response.status_code == 200
data = response.json()
assert "status" in data
assert "active_violations" in data
assert "timestamp" in data