feat: add GPU-specific fields to marketplace offers and create dedicated GPU marketplace router

- Add GPU fields (model, memory, count, CUDA version, price, region) to MarketplaceOffer model
- Create new marketplace_gpu router for GPU-specific operations
- Update offer sync to populate GPU fields from miner capabilities
- Move GPU attributes from generic attributes dict to dedicated fields
- Update MarketplaceOfferView schema with GPU fields
- Expand CLI README with comprehensive documentation and
This commit is contained in:
oib
2026-02-12 19:08:17 +01:00
parent 76a2fc9b6d
commit 5120861e17
57 changed files with 11720 additions and 131 deletions

1
.aitbc.yaml Normal file
View File

@@ -0,0 +1 @@
coordinator_url: http://127.0.0.1:18000

50
.github/workflows/cli-tests.yml vendored Normal file
View File

@@ -0,0 +1,50 @@
name: CLI Tests
on:
push:
branches: [main]
paths:
- 'cli/**'
- 'tests/cli/**'
pull_request:
branches: [main]
paths:
- 'cli/**'
- 'tests/cli/**'
jobs:
test:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ['3.10', '3.11', '3.12']
steps:
- uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install -e .
pip install pytest pytest-cov
- name: Run CLI tests
run: |
python -m pytest tests/cli/ -v --tb=short --disable-warnings
- name: Run CLI tests with coverage
if: matrix.python-version == '3.11'
run: |
python -m pytest tests/cli/ --cov=aitbc_cli --cov-report=term-missing --cov-report=xml
- name: Upload coverage
if: matrix.python-version == '3.11'
uses: actions/upload-artifact@v4
with:
name: coverage-report
path: coverage.xml

View File

@@ -17,6 +17,13 @@ class MarketplaceOffer(SQLModel, table=True):
status: str = Field(default="open", max_length=20)
created_at: datetime = Field(default_factory=datetime.utcnow, nullable=False, index=True)
attributes: dict = Field(default_factory=dict, sa_column=Column(JSON, nullable=False))
# GPU-specific fields
gpu_model: Optional[str] = Field(default=None, index=True)
gpu_memory_gb: Optional[int] = Field(default=None)
gpu_count: Optional[int] = Field(default=1)
cuda_version: Optional[str] = Field(default=None)
price_per_hour: Optional[float] = Field(default=None)
region: Optional[str] = Field(default=None, index=True)
class MarketplaceBid(SQLModel, table=True):

View File

@@ -10,6 +10,7 @@ from .routers import (
miner,
admin,
marketplace,
marketplace_gpu,
exchange,
users,
services,
@@ -18,7 +19,6 @@ from .routers import (
explorer,
payments,
)
from .routers import zk_applications
from .routers.governance import router as governance
from .routers.partners import router as partners
from .storage.models_governance import GovernanceProposal, ProposalVote, TreasuryTransaction, GovernanceParameter
@@ -46,6 +46,7 @@ def create_app() -> FastAPI:
app.include_router(miner, prefix="/v1")
app.include_router(admin, prefix="/v1")
app.include_router(marketplace, prefix="/v1")
app.include_router(marketplace_gpu, prefix="/v1")
app.include_router(exchange, prefix="/v1")
app.include_router(users, prefix="/v1/users")
app.include_router(services, prefix="/v1")

View File

@@ -4,6 +4,7 @@ from .client import router as client
from .miner import router as miner
from .admin import router as admin
from .marketplace import router as marketplace
from .marketplace_gpu import router as marketplace_gpu
from .explorer import router as explorer
from .services import router as services
from .users import router as users
@@ -12,4 +13,4 @@ from .marketplace_offers import router as marketplace_offers
from .payments import router as payments
# from .registry import router as registry
__all__ = ["client", "miner", "admin", "marketplace", "explorer", "services", "users", "exchange", "marketplace_offers", "payments", "registry"]
__all__ = ["client", "miner", "admin", "marketplace", "marketplace_gpu", "explorer", "services", "users", "exchange", "marketplace_offers", "payments", "registry"]

View File

@@ -0,0 +1,387 @@
"""
GPU-specific marketplace endpoints to support CLI commands
Quick implementation with mock data to make CLI functional
"""
from typing import Any, Dict, List, Optional
from datetime import datetime, timedelta
from fastapi import APIRouter, Depends, HTTPException, Query
from fastapi import status as http_status
from pydantic import BaseModel, Field
from ..storage import SessionDep
router = APIRouter(tags=["marketplace-gpu"])
# In-memory storage for bookings (quick fix)
gpu_bookings: Dict[str, Dict] = {}
gpu_reviews: Dict[str, List[Dict]] = {}
gpu_counter = 1
# Mock GPU data
mock_gpus = [
{
"id": "gpu_001",
"miner_id": "miner_001",
"model": "RTX 4090",
"memory_gb": 24,
"cuda_version": "12.0",
"region": "us-west",
"price_per_hour": 0.50,
"status": "available",
"capabilities": ["llama2-7b", "stable-diffusion-xl", "gpt-j"],
"created_at": "2025-12-28T10:00:00Z",
"average_rating": 4.5,
"total_reviews": 12
},
{
"id": "gpu_002",
"miner_id": "miner_002",
"model": "RTX 3080",
"memory_gb": 16,
"cuda_version": "11.8",
"region": "us-east",
"price_per_hour": 0.35,
"status": "available",
"capabilities": ["llama2-13b", "gpt-j"],
"created_at": "2025-12-28T09:30:00Z",
"average_rating": 4.2,
"total_reviews": 8
},
{
"id": "gpu_003",
"miner_id": "miner_003",
"model": "A100",
"memory_gb": 40,
"cuda_version": "12.0",
"region": "eu-west",
"price_per_hour": 1.20,
"status": "booked",
"capabilities": ["gpt-4", "claude-2", "llama2-70b"],
"created_at": "2025-12-28T08:00:00Z",
"average_rating": 4.8,
"total_reviews": 25
}
]
# Initialize some reviews
gpu_reviews = {
"gpu_001": [
{"rating": 5, "comment": "Excellent performance!", "user": "client_001", "date": "2025-12-27"},
{"rating": 4, "comment": "Good value for money", "user": "client_002", "date": "2025-12-26"}
],
"gpu_002": [
{"rating": 4, "comment": "Solid GPU for smaller models", "user": "client_003", "date": "2025-12-27"}
],
"gpu_003": [
{"rating": 5, "comment": "Perfect for large models", "user": "client_004", "date": "2025-12-27"},
{"rating": 5, "comment": "Fast and reliable", "user": "client_005", "date": "2025-12-26"}
]
}
class GPURegisterRequest(BaseModel):
miner_id: str
model: str
memory_gb: int
cuda_version: str
region: str
price_per_hour: float
capabilities: List[str]
class GPUBookRequest(BaseModel):
duration_hours: float
job_id: Optional[str] = None
class GPUReviewRequest(BaseModel):
rating: int = Field(ge=1, le=5)
comment: str
@router.post("/marketplace/gpu/register")
async def register_gpu(
request: Dict[str, Any],
session: SessionDep
) -> Dict[str, Any]:
"""Register a GPU in the marketplace"""
global gpu_counter
# Extract GPU specs from the request
gpu_specs = request.get("gpu", {})
gpu_id = f"gpu_{gpu_counter:03d}"
gpu_counter += 1
new_gpu = {
"id": gpu_id,
"miner_id": gpu_specs.get("miner_id", f"miner_{gpu_counter:03d}"),
"model": gpu_specs.get("name", "Unknown GPU"),
"memory_gb": gpu_specs.get("memory", 0),
"cuda_version": gpu_specs.get("cuda_version", "Unknown"),
"region": gpu_specs.get("region", "unknown"),
"price_per_hour": gpu_specs.get("price_per_hour", 0.0),
"status": "available",
"capabilities": gpu_specs.get("capabilities", []),
"created_at": datetime.utcnow().isoformat() + "Z",
"average_rating": 0.0,
"total_reviews": 0
}
mock_gpus.append(new_gpu)
gpu_reviews[gpu_id] = []
return {
"gpu_id": gpu_id,
"status": "registered",
"message": f"GPU {gpu_specs.get('name', 'Unknown')} registered successfully"
}
@router.get("/marketplace/gpu/list")
async def list_gpus(
available: Optional[bool] = Query(default=None),
price_max: Optional[float] = Query(default=None),
region: Optional[str] = Query(default=None),
model: Optional[str] = Query(default=None),
limit: int = Query(default=100, ge=1, le=500)
) -> List[Dict[str, Any]]:
"""List available GPUs"""
filtered_gpus = mock_gpus.copy()
# Apply filters
if available is not None:
filtered_gpus = [g for g in filtered_gpus if g["status"] == ("available" if available else "booked")]
if price_max is not None:
filtered_gpus = [g for g in filtered_gpus if g["price_per_hour"] <= price_max]
if region:
filtered_gpus = [g for g in filtered_gpus if g["region"].lower() == region.lower()]
if model:
filtered_gpus = [g for g in filtered_gpus if model.lower() in g["model"].lower()]
return filtered_gpus[:limit]
@router.get("/marketplace/gpu/{gpu_id}")
async def get_gpu_details(gpu_id: str) -> Dict[str, Any]:
"""Get GPU details"""
gpu = next((g for g in mock_gpus if g["id"] == gpu_id), None)
if not gpu:
raise HTTPException(
status_code=http_status.HTTP_404_NOT_FOUND,
detail=f"GPU {gpu_id} not found"
)
# Add booking info if booked
if gpu["status"] == "booked" and gpu_id in gpu_bookings:
gpu["current_booking"] = gpu_bookings[gpu_id]
return gpu
@router.post("/marketplace/gpu/{gpu_id}/book", status_code=http_status.HTTP_201_CREATED)
async def book_gpu(gpu_id: str, request: GPUBookRequest) -> Dict[str, Any]:
"""Book a GPU"""
gpu = next((g for g in mock_gpus if g["id"] == gpu_id), None)
if not gpu:
raise HTTPException(
status_code=http_status.HTTP_404_NOT_FOUND,
detail=f"GPU {gpu_id} not found"
)
if gpu["status"] != "available":
raise HTTPException(
status_code=http_status.HTTP_409_CONFLICT,
detail=f"GPU {gpu_id} is not available"
)
# Create booking
booking_id = f"booking_{gpu_id}_{int(datetime.utcnow().timestamp())}"
start_time = datetime.utcnow()
end_time = start_time + timedelta(hours=request.duration_hours)
booking = {
"booking_id": booking_id,
"gpu_id": gpu_id,
"duration_hours": request.duration_hours,
"job_id": request.job_id,
"start_time": start_time.isoformat() + "Z",
"end_time": end_time.isoformat() + "Z",
"total_cost": request.duration_hours * gpu["price_per_hour"],
"status": "active"
}
# Update GPU status
gpu["status"] = "booked"
gpu_bookings[gpu_id] = booking
return {
"booking_id": booking_id,
"gpu_id": gpu_id,
"status": "booked",
"total_cost": booking["total_cost"],
"start_time": booking["start_time"],
"end_time": booking["end_time"]
}
@router.post("/marketplace/gpu/{gpu_id}/release")
async def release_gpu(gpu_id: str) -> Dict[str, Any]:
"""Release a booked GPU"""
gpu = next((g for g in mock_gpus if g["id"] == gpu_id), None)
if not gpu:
raise HTTPException(
status_code=http_status.HTTP_404_NOT_FOUND,
detail=f"GPU {gpu_id} not found"
)
if gpu["status"] != "booked":
raise HTTPException(
status_code=http_status.HTTP_400_BAD_REQUEST,
detail=f"GPU {gpu_id} is not booked"
)
# Get booking info for refund calculation
booking = gpu_bookings.get(gpu_id, {})
refund = 0.0
if booking:
# Calculate refund (simplified - 50% if released early)
refund = booking.get("total_cost", 0.0) * 0.5
del gpu_bookings[gpu_id]
# Update GPU status
gpu["status"] = "available"
return {
"status": "released",
"gpu_id": gpu_id,
"refund": refund,
"message": f"GPU {gpu_id} released successfully"
}
@router.get("/marketplace/gpu/{gpu_id}/reviews")
async def get_gpu_reviews(
gpu_id: str,
limit: int = Query(default=10, ge=1, le=100)
) -> Dict[str, Any]:
"""Get GPU reviews"""
gpu = next((g for g in mock_gpus if g["id"] == gpu_id), None)
if not gpu:
raise HTTPException(
status_code=http_status.HTTP_404_NOT_FOUND,
detail=f"GPU {gpu_id} not found"
)
reviews = gpu_reviews.get(gpu_id, [])
return {
"gpu_id": gpu_id,
"average_rating": gpu["average_rating"],
"total_reviews": gpu["total_reviews"],
"reviews": reviews[:limit]
}
@router.post("/marketplace/gpu/{gpu_id}/reviews", status_code=http_status.HTTP_201_CREATED)
async def add_gpu_review(gpu_id: str, request: GPUReviewRequest) -> Dict[str, Any]:
"""Add a review for a GPU"""
gpu = next((g for g in mock_gpus if g["id"] == gpu_id), None)
if not gpu:
raise HTTPException(
status_code=http_status.HTTP_404_NOT_FOUND,
detail=f"GPU {gpu_id} not found"
)
# Add review
review = {
"rating": request.rating,
"comment": request.comment,
"user": "current_user", # Would get from auth context
"date": datetime.utcnow().isoformat() + "Z"
}
if gpu_id not in gpu_reviews:
gpu_reviews[gpu_id] = []
gpu_reviews[gpu_id].append(review)
# Update average rating
all_reviews = gpu_reviews[gpu_id]
gpu["average_rating"] = sum(r["rating"] for r in all_reviews) / len(all_reviews)
gpu["total_reviews"] = len(all_reviews)
return {
"status": "review_added",
"gpu_id": gpu_id,
"review_id": f"review_{len(all_reviews)}",
"average_rating": gpu["average_rating"]
}
@router.get("/marketplace/orders")
async def list_orders(
status: Optional[str] = Query(default=None),
limit: int = Query(default=100, ge=1, le=500)
) -> List[Dict[str, Any]]:
"""List orders (bookings)"""
orders = []
for gpu_id, booking in gpu_bookings.items():
gpu = next((g for g in mock_gpus if g["id"] == gpu_id), None)
if gpu:
order = {
"order_id": booking["booking_id"],
"gpu_id": gpu_id,
"gpu_model": gpu["model"],
"miner_id": gpu["miner_id"],
"duration_hours": booking["duration_hours"],
"total_cost": booking["total_cost"],
"status": booking["status"],
"created_at": booking["start_time"],
"job_id": booking.get("job_id")
}
orders.append(order)
if status:
orders = [o for o in orders if o["status"] == status]
return orders[:limit]
@router.get("/marketplace/pricing/{model}")
async def get_pricing(model: str) -> Dict[str, Any]:
"""Get pricing information for a model"""
# Find GPUs that support this model
compatible_gpus = [
gpu for gpu in mock_gpus
if any(model.lower() in cap.lower() for cap in gpu["capabilities"])
]
if not compatible_gpus:
raise HTTPException(
status_code=http_status.HTTP_404_NOT_FOUND,
detail=f"No GPUs found for model {model}"
)
prices = [gpu["price_per_hour"] for gpu in compatible_gpus]
return {
"model": model,
"min_price": min(prices),
"max_price": max(prices),
"average_price": sum(prices) / len(prices),
"available_gpus": len([g for g in compatible_gpus if g["status"] == "available"]),
"total_gpus": len(compatible_gpus),
"recommended_gpu": min(compatible_gpus, key=lambda x: x["price_per_hour"])["id"]
}

View File

@@ -40,12 +40,14 @@ async def sync_offers(
provider=miner.id,
capacity=miner.concurrency or 1,
price=capabilities.get("pricing_per_hour", 0.50),
gpu_model=capabilities.get("gpu", None),
gpu_memory_gb=capabilities.get("gpu_memory_gb", None),
gpu_count=capabilities.get("gpu_count", 1),
cuda_version=capabilities.get("cuda_version", None),
price_per_hour=capabilities.get("pricing_per_hour", 0.50),
region=miner.region or None,
attributes={
"gpu_model": capabilities.get("gpu", "Unknown GPU"),
"gpu_memory_gb": capabilities.get("gpu_memory_gb", 0),
"cuda_version": capabilities.get("cuda_version", "Unknown"),
"supported_models": capabilities.get("supported_models", []),
"region": miner.region or "unknown"
}
)

View File

@@ -190,6 +190,12 @@ class MarketplaceOfferView(BaseModel):
sla: str
status: str
created_at: datetime
gpu_model: Optional[str] = None
gpu_memory_gb: Optional[int] = None
gpu_count: Optional[int] = 1
cuda_version: Optional[str] = None
price_per_hour: Optional[float] = None
region: Optional[str] = None
class MarketplaceStatsView(BaseModel):

View File

@@ -100,4 +100,10 @@ class MarketplaceService:
sla=offer.sla,
status=offer.status.value,
created_at=offer.created_at,
gpu_model=offer.gpu_model,
gpu_memory_gb=offer.gpu_memory_gb,
gpu_count=offer.gpu_count,
cuda_version=offer.cuda_version,
price_per_hour=offer.price_per_hour,
region=offer.region,
)

View File

@@ -1,155 +1,344 @@
# AITBC CLI Tools
# AITBC CLI - Command Line Interface
Command-line tools for interacting with the AITBC network without using the web frontend.
A powerful and comprehensive command-line interface for interacting with the AITBC (AI Training & Blockchain Computing) network.
## Tools
### 1. Client CLI (`client.py`)
Submit jobs and check their status.
## Installation
```bash
# Submit an inference job
python3 client.py submit inference --model llama-2-7b --prompt "What is AITBC?"
# Clone the repository
git clone https://github.com/aitbc/aitbc.git
cd aitbc
# Check job status
python3 client.py status <job_id>
# Install in development mode
pip install -e .
# List recent blocks
python3 client.py blocks --limit 5
# Submit a quick demo job
python3 client.py demo
```
### 2. Miner CLI (`miner.py`)
Register as a miner, poll for jobs, and earn AITBC.
```bash
# Register as a miner
python3 miner.py register --gpu "RTX 4060 Ti" --memory 16
# Poll for a single job
python3 miner.py poll --wait 5
# Mine continuously (process jobs as they come)
python3 miner.py mine --jobs 10
# Send heartbeat to coordinator
python3 miner.py heartbeat
```
### 3. Wallet CLI (`wallet.py`)
Track your AITBC earnings and manage your wallet.
```bash
# Check balance
python3 wallet.py balance
# Show transaction history
python3 wallet.py history --limit 10
# Add earnings (after completing a job)
python3 wallet.py earn 10.0 --job abc123 --desc "Inference task"
# Spend AITBC
python3 wallet.py spend 5.0 "Coffee break"
# Show wallet address
python3 wallet.py address
```
## GPU Testing
Before mining, verify your GPU is accessible:
```bash
# Quick GPU check
python3 test_gpu_access.py
# Comprehensive GPU test
python3 gpu_test.py
# Test miner with GPU
python3 miner_gpu_test.py --full
# Or install from PyPI (when published)
pip install aitbc-cli
```
## Quick Start
1. **Start the SSH tunnel to remote server** (if not already running):
1. **Set up your API key**:
```bash
cd /home/oib/windsurf/aitbc
./scripts/start_remote_tunnel.sh
export CLIENT_API_KEY=your_api_key_here
# Or save permanently
aitbc config set api_key your_api_key_here
```
2. **Run the complete workflow test**:
2. **Check your wallet**:
```bash
cd /home/oib/windsurf/aitbc/cli
python3 test_workflow.py
aitbc wallet balance
```
3. **Start mining continuously**:
3. **Submit your first job**:
```bash
# Terminal 1: Start mining
python3 miner.py mine
aitbc client submit inference --prompt "What is AI?" --model gpt-4
```
# Terminal 2: Submit jobs
python3 client.py submit training --model "stable-diffusion"
## Features
- 🚀 **Fast & Efficient**: Optimized for speed with minimal overhead
- 🎨 **Rich Output**: Beautiful tables, JSON, and YAML output formats
- 🔐 **Secure**: Built-in credential management with keyring
- 📊 **Comprehensive**: 40+ commands covering all aspects of the network
- 🧪 **Testing Ready**: Full simulation environment for testing
- 🔧 **Extensible**: Easy to add new commands and features
## Command Groups
### Client Operations
Submit and manage inference jobs:
```bash
aitbc client submit inference --prompt "Your prompt here" --model gpt-4
aitbc client status <job_id>
aitbc client history --status completed
```
### Mining Operations
Register as a miner and process jobs:
```bash
aitbc miner register --gpu-model RTX4090 --memory 24 --price 0.5
aitbc miner poll --interval 5
```
### Wallet Management
Manage your AITBC tokens:
```bash
aitbc wallet balance
aitbc wallet send <address> <amount>
aitbc wallet history
```
### Authentication
Manage API keys and authentication:
```bash
aitbc auth login your_api_key
aitbc auth status
aitbc auth keys create --name "My Key"
```
### Blockchain Queries
Query blockchain information:
```bash
aitbc blockchain blocks --limit 10
aitbc blockchain transaction <tx_hash>
aitbc blockchain sync-status
```
### Marketplace
GPU marketplace operations:
```bash
aitbc marketplace gpu list --available
aitbc marketplace gpu book <gpu_id> --hours 2
aitbc marketplace reviews <gpu_id>
```
### System Administration
Admin operations (requires admin privileges):
```bash
aitbc admin status
aitbc admin analytics --period 24h
aitbc admin logs --component coordinator
```
### Configuration
Manage CLI configuration:
```bash
aitbc config show
aitbc config set coordinator_url http://localhost:8000
aitbc config profiles save production
```
### Simulation
Test and simulate operations:
```bash
aitbc simulate init --distribute 10000,5000
aitbc simulate user create --type client --name testuser
aitbc simulate workflow --jobs 10
```
## Output Formats
All commands support multiple output formats:
```bash
# Table format (default)
aitbc wallet balance
# JSON format
aitbc --output json wallet balance
# YAML format
aitbc --output yaml wallet balance
```
## Global Options
These options can be used with any command:
- `--url TEXT`: Override coordinator URL
- `--api-key TEXT`: Override API key
- `--output [table|json|yaml]`: Output format
- `-v, --verbose`: Increase verbosity (use -vv, -vvv for more)
- `--debug`: Enable debug mode
- `--config-file TEXT`: Path to config file
- `--help`: Show help
- `--version`: Show version
## Shell Completion
Enable tab completion for bash/zsh:
```bash
# For bash
echo 'source /path/to/aitbc_shell_completion.sh' >> ~/.bashrc
source ~/.bashrc
# For zsh
echo 'source /path/to/aitbc_shell_completion.sh' >> ~/.zshrc
source ~/.zshrc
```
## Configuration
All tools default to connecting to `http://localhost:8001` (the remote server via SSH tunnel). You can override this:
The CLI can be configured in multiple ways:
1. **Environment variables**:
```bash
python3 client.py --url http://localhost:8000 --api-key your_key submit inference
export CLIENT_API_KEY=your_key
export AITBC_COORDINATOR_URL=http://localhost:8000
export AITBC_OUTPUT_FORMAT=json
```
Default credentials:
- Client API Key: `${CLIENT_API_KEY}`
- Miner API Key: `${MINER_API_KEY}`
2. **Config file**:
```bash
aitbc config set coordinator_url http://localhost:8000
aitbc config set api_key your_key
```
3. **Profiles**:
```bash
# Save a profile
aitbc config profiles save production
# Switch profiles
aitbc config profiles load production
```
## Examples
### Submit and Process a Job
### Basic Workflow
```bash
# 1. Submit a job
JOB_ID=$(python3 client.py submit inference --prompt "Test" | grep "Job ID" | cut -d' ' -f4)
# 1. Configure
export CLIENT_API_KEY=your_key
# 2. In another terminal, mine it
python3 miner.py poll
# 2. Check balance
aitbc wallet balance
# 3. Check the result
python3 client.py status $JOB_ID
# 3. Submit job
job_id=$(aitbc --output json client submit inference --prompt "What is AI?" | jq -r '.job_id')
# 4. See it in the blockchain
python3 client.py blocks
# 4. Monitor progress
watch -n 5 "aitbc client status $job_id"
# 5. Get results
aitbc client receipts --job-id $job_id
```
### Continuous Mining
### Mining Setup
```bash
# Register and start mining
python3 miner.py register
python3 miner.py mine --jobs 5
# 1. Register as miner
aitbc miner register \
--gpu-model RTX4090 \
--memory 24 \
--price 0.5 \
--region us-west
# In another terminal, submit multiple jobs
for i in {1..5}; do
python3 client.py submit inference --prompt "Job $i"
sleep 1
done
# 2. Start mining
aitbc miner poll --interval 5
# 3. Check earnings
aitbc wallet earn
```
## Tips
### Using the Marketplace
- The wallet is stored in `~/.aitbc_wallet.json`
- Jobs appear as blocks immediately when created
- The proposer is assigned when a miner polls for the job
- Use `--help` with any command to see all options
- Mining earnings are added manually for now (will be automatic in production)
```bash
# 1. Find available GPUs
aitbc marketplace gpu list --available --price-max 1.0
# 2. Book a GPU
gpu_id=$(aitbc marketplace gpu list --available --output json | jq -r '.[0].id')
aitbc marketplace gpu book $gpu_id --hours 4
# 3. Use it for your job
aitbc client submit inference \
--prompt "Generate an image of a sunset" \
--model stable-diffusion \
--gpu $gpu_id
# 4. Release when done
aitbc marketplace gpu release $gpu_id
```
### Testing with Simulation
```bash
# 1. Initialize test environment
aitbc simulate init --distribute 10000,5000
# 2. Create test users
aitbc simulate user create --type client --name alice --balance 1000
aitbc simulate user create --type miner --name bob --balance 500
# 3. Run workflow simulation
aitbc simulate workflow --jobs 10 --rounds 3
# 4. Check results
aitbc simulate results sim_123
```
## Troubleshooting
- If you get "No jobs available", make sure a job was submitted recently
- If registration fails, check the coordinator is running and API key is correct
- If the tunnel is down, restart it with `./scripts/start_remote_tunnel.sh`
### Common Issues
1. **"API key not found"**
```bash
export CLIENT_API_KEY=your_key
# or
aitbc auth login your_key
```
2. **"Connection refused"**
```bash
# Check coordinator URL
aitbc config show
# Update if needed
aitbc config set coordinator_url http://localhost:8000
```
3. **"Permission denied"**
```bash
# Check key permissions
aitbc auth status
# Refresh if needed
aitbc auth refresh
```
### Debug Mode
Enable debug mode for detailed error information:
```bash
aitbc --debug client status <job_id>
```
### Verbose Output
Increase verbosity for more information:
```bash
aitbc -vvv wallet balance
```
## Contributing
We welcome contributions! Please see our [Contributing Guide](../CONTRIBUTING.md) for details.
### Development Setup
```bash
# Clone the repository
git clone https://github.com/aitbc/aitbc.git
cd aitbc
# Create virtual environment
python -m venv venv
source venv/bin/activate # On Windows: venv\Scripts\activate
# Install in development mode
pip install -e .[dev]
# Run tests
pytest tests/cli/
# Run with local changes
python -m aitbc_cli.main --help
```
## Support
- 📖 [Documentation](../docs/cli-reference.md)
- 🐛 [Issue Tracker](https://github.com/aitbc/aitbc/issues)
- 💬 [Discord Community](https://discord.gg/aitbc)
- 📧 [Email Support](mailto:support@aitbc.net)
## License
This project is licensed under the MIT License - see the [LICENSE](../LICENSE) file for details.
---
Made with ❤️ by the AITBC team

View File

@@ -0,0 +1,5 @@
"""AITBC CLI - Command Line Interface for AITBC Network"""
__version__ = "0.1.0"
__author__ = "AITBC Team"
__email__ = "team@aitbc.net"

View File

@@ -0,0 +1,70 @@
"""Authentication and credential management for AITBC CLI"""
import keyring
import os
from typing import Optional, Dict
from ..utils import success, error, warning
class AuthManager:
"""Manages authentication credentials using secure keyring storage"""
SERVICE_NAME = "aitbc-cli"
def __init__(self):
self.keyring = keyring.get_keyring()
def store_credential(self, name: str, api_key: str, environment: str = "default"):
"""Store an API key securely"""
try:
key = f"{environment}_{name}"
self.keyring.set_password(self.SERVICE_NAME, key, api_key)
success(f"Credential '{name}' stored for environment '{environment}'")
except Exception as e:
error(f"Failed to store credential: {e}")
def get_credential(self, name: str, environment: str = "default") -> Optional[str]:
"""Retrieve an API key"""
try:
key = f"{environment}_{name}"
return self.keyring.get_password(self.SERVICE_NAME, key)
except Exception as e:
warning(f"Failed to retrieve credential: {e}")
return None
def delete_credential(self, name: str, environment: str = "default"):
"""Delete an API key"""
try:
key = f"{environment}_{name}"
self.keyring.delete_password(self.SERVICE_NAME, key)
success(f"Credential '{name}' deleted for environment '{environment}'")
except Exception as e:
error(f"Failed to delete credential: {e}")
def list_credentials(self, environment: str = None) -> Dict[str, str]:
"""List all stored credentials (without showing the actual keys)"""
# Note: keyring doesn't provide a direct way to list all keys
# This is a simplified version that checks for common credential names
credentials = []
envs = [environment] if environment else ["default", "dev", "staging", "prod"]
names = ["client", "miner", "admin"]
for env in envs:
for name in names:
key = f"{env}_{name}"
if self.get_credential(name, env):
credentials.append(f"{name}@{env}")
return credentials
def store_env_credential(self, name: str):
"""Store credential from environment variable"""
env_var = f"{name.upper()}_API_KEY"
api_key = os.getenv(env_var)
if not api_key:
error(f"Environment variable {env_var} not set")
return False
self.store_credential(name, api_key)
return True

View File

@@ -0,0 +1 @@
"""Command modules for AITBC CLI"""

View File

@@ -0,0 +1,445 @@
"""Admin commands for AITBC CLI"""
import click
import httpx
import json
from typing import Optional, List, Dict, Any
from ..utils import output, error, success
@click.group()
def admin():
"""System administration commands"""
pass
@admin.command()
@click.pass_context
def status(ctx):
"""Get system status"""
config = ctx.obj['config']
try:
with httpx.Client() as client:
response = client.get(
f"{config.coordinator_url}/v1/admin/status",
headers={"X-Api-Key": config.api_key or ""}
)
if response.status_code == 200:
status_data = response.json()
output(status_data, ctx.obj['output_format'])
else:
error(f"Failed to get system status: {response.status_code}")
ctx.exit(1)
except Exception as e:
error(f"Network error: {e}")
ctx.exit(1)
@admin.command()
@click.option("--limit", default=50, help="Number of jobs to show")
@click.option("--status", help="Filter by status")
@click.pass_context
def jobs(ctx, limit: int, status: Optional[str]):
"""List all jobs in the system"""
config = ctx.obj['config']
try:
params = {"limit": limit}
if status:
params["status"] = status
with httpx.Client() as client:
response = client.get(
f"{config.coordinator_url}/v1/admin/jobs",
params=params,
headers={"X-Api-Key": config.api_key or ""}
)
if response.status_code == 200:
jobs = response.json()
output(jobs, ctx.obj['output_format'])
else:
error(f"Failed to get jobs: {response.status_code}")
except Exception as e:
error(f"Network error: {e}")
ctx.exit(1)
@admin.command()
@click.argument("job_id")
@click.pass_context
def job_details(ctx, job_id: str):
"""Get detailed job information"""
config = ctx.obj['config']
try:
with httpx.Client() as client:
response = client.get(
f"{config.coordinator_url}/v1/admin/jobs/{job_id}",
headers={"X-Api-Key": config.api_key or ""}
)
if response.status_code == 200:
job_data = response.json()
output(job_data, ctx.obj['output_format'])
else:
error(f"Job not found: {response.status_code}")
except Exception as e:
error(f"Network error: {e}")
ctx.exit(1)
@admin.command()
@click.argument("job_id")
@click.pass_context
def delete_job(ctx, job_id: str):
"""Delete a job from the system"""
config = ctx.obj['config']
if not click.confirm(f"Are you sure you want to delete job {job_id}?"):
return
try:
with httpx.Client() as client:
response = client.delete(
f"{config.coordinator_url}/v1/admin/jobs/{job_id}",
headers={"X-Api-Key": config.api_key or ""}
)
if response.status_code == 200:
success(f"Job {job_id} deleted")
output({"status": "deleted", "job_id": job_id}, ctx.obj['output_format'])
else:
error(f"Failed to delete job: {response.status_code}")
except Exception as e:
error(f"Network error: {e}")
ctx.exit(1)
@admin.command()
@click.option("--limit", default=50, help="Number of miners to show")
@click.option("--status", help="Filter by status")
@click.pass_context
def miners(ctx, limit: int, status: Optional[str]):
"""List all registered miners"""
config = ctx.obj['config']
try:
params = {"limit": limit}
if status:
params["status"] = status
with httpx.Client() as client:
response = client.get(
f"{config.coordinator_url}/v1/admin/miners",
params=params,
headers={"X-Api-Key": config.api_key or ""}
)
if response.status_code == 200:
miners = response.json()
output(miners, ctx.obj['output_format'])
else:
error(f"Failed to get miners: {response.status_code}")
except Exception as e:
error(f"Network error: {e}")
ctx.exit(1)
@admin.command()
@click.argument("miner_id")
@click.pass_context
def miner_details(ctx, miner_id: str):
"""Get detailed miner information"""
config = ctx.obj['config']
try:
with httpx.Client() as client:
response = client.get(
f"{config.coordinator_url}/v1/admin/miners/{miner_id}",
headers={"X-Api-Key": config.api_key or ""}
)
if response.status_code == 200:
miner_data = response.json()
output(miner_data, ctx.obj['output_format'])
else:
error(f"Miner not found: {response.status_code}")
except Exception as e:
error(f"Network error: {e}")
ctx.exit(1)
@admin.command()
@click.argument("miner_id")
@click.pass_context
def deactivate_miner(ctx, miner_id: str):
"""Deactivate a miner"""
config = ctx.obj['config']
if not click.confirm(f"Are you sure you want to deactivate miner {miner_id}?"):
return
try:
with httpx.Client() as client:
response = client.post(
f"{config.coordinator_url}/v1/admin/miners/{miner_id}/deactivate",
headers={"X-Api-Key": config.api_key or ""}
)
if response.status_code == 200:
success(f"Miner {miner_id} deactivated")
output({"status": "deactivated", "miner_id": miner_id}, ctx.obj['output_format'])
else:
error(f"Failed to deactivate miner: {response.status_code}")
except Exception as e:
error(f"Network error: {e}")
ctx.exit(1)
@admin.command()
@click.argument("miner_id")
@click.pass_context
def activate_miner(ctx, miner_id: str):
"""Activate a miner"""
config = ctx.obj['config']
try:
with httpx.Client() as client:
response = client.post(
f"{config.coordinator_url}/v1/admin/miners/{miner_id}/activate",
headers={"X-Api-Key": config.api_key or ""}
)
if response.status_code == 200:
success(f"Miner {miner_id} activated")
output({"status": "activated", "miner_id": miner_id}, ctx.obj['output_format'])
else:
error(f"Failed to activate miner: {response.status_code}")
except Exception as e:
error(f"Network error: {e}")
ctx.exit(1)
@admin.command()
@click.option("--days", type=int, default=7, help="Number of days to analyze")
@click.pass_context
def analytics(ctx, days: int):
"""Get system analytics"""
config = ctx.obj['config']
try:
with httpx.Client() as client:
response = client.get(
f"{config.coordinator_url}/v1/admin/analytics",
params={"days": days},
headers={"X-Api-Key": config.api_key or ""}
)
if response.status_code == 200:
analytics_data = response.json()
output(analytics_data, ctx.obj['output_format'])
else:
error(f"Failed to get analytics: {response.status_code}")
except Exception as e:
error(f"Network error: {e}")
ctx.exit(1)
@admin.command()
@click.option("--level", default="INFO", help="Log level (DEBUG, INFO, WARNING, ERROR)")
@click.option("--limit", default=100, help="Number of log entries to show")
@click.pass_context
def logs(ctx, level: str, limit: int):
"""Get system logs"""
config = ctx.obj['config']
try:
with httpx.Client() as client:
response = client.get(
f"{config.coordinator_url}/v1/admin/logs",
params={"level": level, "limit": limit},
headers={"X-Api-Key": config.api_key or ""}
)
if response.status_code == 200:
logs_data = response.json()
output(logs_data, ctx.obj['output_format'])
else:
error(f"Failed to get logs: {response.status_code}")
except Exception as e:
error(f"Network error: {e}")
ctx.exit(1)
@admin.command()
@click.argument("job_id")
@click.option("--reason", help="Reason for priority change")
@click.pass_context
def prioritize_job(ctx, job_id: str, reason: Optional[str]):
"""Set job to high priority"""
config = ctx.obj['config']
try:
with httpx.Client() as client:
response = client.post(
f"{config.coordinator_url}/v1/admin/jobs/{job_id}/prioritize",
json={"reason": reason or "Admin priority"},
headers={"X-Api-Key": config.api_key or ""}
)
if response.status_code == 200:
success(f"Job {job_id} prioritized")
output({"status": "prioritized", "job_id": job_id}, ctx.obj['output_format'])
else:
error(f"Failed to prioritize job: {response.status_code}")
except Exception as e:
error(f"Network error: {e}")
ctx.exit(1)
@admin.command()
@click.option("--action", required=True, help="Action to perform")
@click.option("--target", help="Target of the action")
@click.option("--data", help="Additional data (JSON)")
@click.pass_context
def execute(ctx, action: str, target: Optional[str], data: Optional[str]):
"""Execute custom admin action"""
config = ctx.obj['config']
# Parse data if provided
parsed_data = {}
if data:
try:
parsed_data = json.loads(data)
except json.JSONDecodeError:
error("Invalid JSON data")
return
if target:
parsed_data["target"] = target
try:
with httpx.Client() as client:
response = client.post(
f"{config.coordinator_url}/v1/admin/execute/{action}",
json=parsed_data,
headers={"X-Api-Key": config.api_key or ""}
)
if response.status_code == 200:
result = response.json()
output(result, ctx.obj['output_format'])
else:
error(f"Failed to execute action: {response.status_code}")
except Exception as e:
error(f"Network error: {e}")
ctx.exit(1)
@admin.group()
def maintenance():
"""Maintenance operations"""
pass
@maintenance.command()
@click.pass_context
def cleanup(ctx):
"""Clean up old jobs and data"""
config = ctx.obj['config']
if not click.confirm("This will clean up old jobs and temporary data. Continue?"):
return
try:
with httpx.Client() as client:
response = client.post(
f"{config.coordinator_url}/v1/admin/maintenance/cleanup",
headers={"X-Api-Key": config.api_key or ""}
)
if response.status_code == 200:
result = response.json()
success("Cleanup completed")
output(result, ctx.obj['output_format'])
else:
error(f"Cleanup failed: {response.status_code}")
except Exception as e:
error(f"Network error: {e}")
ctx.exit(1)
@maintenance.command()
@click.pass_context
def reindex(ctx):
"""Reindex the database"""
config = ctx.obj['config']
if not click.confirm("This will reindex the entire database. Continue?"):
return
try:
with httpx.Client() as client:
response = client.post(
f"{config.coordinator_url}/v1/admin/maintenance/reindex",
headers={"X-Api-Key": config.api_key or ""}
)
if response.status_code == 200:
result = response.json()
success("Reindex started")
output(result, ctx.obj['output_format'])
else:
error(f"Reindex failed: {response.status_code}")
except Exception as e:
error(f"Network error: {e}")
ctx.exit(1)
@maintenance.command()
@click.pass_context
def backup(ctx):
"""Create system backup"""
config = ctx.obj['config']
try:
with httpx.Client() as client:
response = client.post(
f"{config.coordinator_url}/v1/admin/maintenance/backup",
headers={"X-Api-Key": config.api_key or ""}
)
if response.status_code == 200:
result = response.json()
success("Backup created")
output(result, ctx.obj['output_format'])
else:
error(f"Backup failed: {response.status_code}")
except Exception as e:
error(f"Network error: {e}")
ctx.exit(1)
@admin.command(name="audit-log")
@click.option("--limit", default=50, help="Number of entries to show")
@click.option("--action", "action_filter", help="Filter by action type")
@click.pass_context
def audit_log(ctx, limit: int, action_filter: Optional[str]):
"""View audit log"""
from ..utils import AuditLogger
logger = AuditLogger()
entries = logger.get_logs(limit=limit, action_filter=action_filter)
if not entries:
output({"message": "No audit log entries found"}, ctx.obj['output_format'])
return
output(entries, ctx.obj['output_format'])
# Add maintenance group to admin
admin.add_command(maintenance)

View File

@@ -0,0 +1,220 @@
"""Authentication commands for AITBC CLI"""
import click
import os
from typing import Optional
from ..auth import AuthManager
from ..utils import output, success, error, warning
@click.group()
def auth():
"""Manage API keys and authentication"""
pass
@auth.command()
@click.argument("api_key")
@click.option("--environment", default="default", help="Environment name (default, dev, staging, prod)")
@click.pass_context
def login(ctx, api_key: str, environment: str):
"""Store API key for authentication"""
auth_manager = AuthManager()
# Validate API key format (basic check)
if not api_key or len(api_key) < 10:
error("Invalid API key format")
ctx.exit(1)
return
auth_manager.store_credential("client", api_key, environment)
output({
"status": "logged_in",
"environment": environment,
"note": "API key stored securely"
}, ctx.obj['output_format'])
@auth.command()
@click.option("--environment", default="default", help="Environment name")
@click.pass_context
def logout(ctx, environment: str):
"""Remove stored API key"""
auth_manager = AuthManager()
auth_manager.delete_credential("client", environment)
output({
"status": "logged_out",
"environment": environment
}, ctx.obj['output_format'])
@auth.command()
@click.option("--environment", default="default", help="Environment name")
@click.option("--show", is_flag=True, help="Show the actual API key")
@click.pass_context
def token(ctx, environment: str, show: bool):
"""Show stored API key"""
auth_manager = AuthManager()
api_key = auth_manager.get_credential("client", environment)
if api_key:
if show:
output({
"api_key": api_key,
"environment": environment
}, ctx.obj['output_format'])
else:
output({
"api_key": "***REDACTED***",
"environment": environment,
"length": len(api_key)
}, ctx.obj['output_format'])
else:
output({
"message": "No API key stored",
"environment": environment
}, ctx.obj['output_format'])
@auth.command()
@click.pass_context
def status(ctx):
"""Show authentication status"""
auth_manager = AuthManager()
credentials = auth_manager.list_credentials()
if credentials:
output({
"status": "authenticated",
"stored_credentials": credentials
}, ctx.obj['output_format'])
else:
output({
"status": "not_authenticated",
"message": "No stored credentials found"
}, ctx.obj['output_format'])
@auth.command()
@click.option("--environment", default="default", help="Environment name")
@click.pass_context
def refresh(ctx, environment: str):
"""Refresh authentication (placeholder for token refresh)"""
auth_manager = AuthManager()
api_key = auth_manager.get_credential("client", environment)
if api_key:
# In a real implementation, this would refresh the token
output({
"status": "refreshed",
"environment": environment,
"message": "Authentication refreshed (placeholder)"
}, ctx.obj['output_format'])
else:
error(f"No API key found for environment: {environment}")
ctx.exit(1)
@auth.group()
def keys():
"""Manage multiple API keys"""
pass
@keys.command()
@click.pass_context
def list(ctx):
"""List all stored API keys"""
auth_manager = AuthManager()
credentials = auth_manager.list_credentials()
if credentials:
output({
"credentials": credentials
}, ctx.obj['output_format'])
else:
output({
"message": "No credentials stored"
}, ctx.obj['output_format'])
@keys.command()
@click.argument("name")
@click.argument("api_key")
@click.option("--permissions", help="Comma-separated permissions (client,miner,admin)")
@click.option("--environment", default="default", help="Environment name")
@click.pass_context
def create(ctx, name: str, api_key: str, permissions: Optional[str], environment: str):
"""Create a new API key entry"""
auth_manager = AuthManager()
if not api_key or len(api_key) < 10:
error("Invalid API key format")
return
auth_manager.store_credential(name, api_key, environment)
output({
"status": "created",
"name": name,
"environment": environment,
"permissions": permissions or "none"
}, ctx.obj['output_format'])
@keys.command()
@click.argument("name")
@click.option("--environment", default="default", help="Environment name")
@click.pass_context
def revoke(ctx, name: str, environment: str):
"""Revoke an API key"""
auth_manager = AuthManager()
auth_manager.delete_credential(name, environment)
output({
"status": "revoked",
"name": name,
"environment": environment
}, ctx.obj['output_format'])
@keys.command()
@click.pass_context
def rotate(ctx):
"""Rotate all API keys (placeholder)"""
warning("Key rotation not implemented yet")
output({
"message": "Key rotation would update all stored keys",
"status": "placeholder"
}, ctx.obj['output_format'])
@auth.command()
@click.argument("name")
@click.pass_context
def import_env(ctx, name: str):
"""Import API key from environment variable"""
env_var = f"{name.upper()}_API_KEY"
api_key = os.getenv(env_var)
if not api_key:
error(f"Environment variable {env_var} not set")
ctx.exit(1)
return
auth_manager = AuthManager()
auth_manager.store_credential(name, api_key)
output({
"status": "imported",
"name": name,
"source": env_var
}, ctx.obj['output_format'])

View File

@@ -0,0 +1,236 @@
"""Blockchain commands for AITBC CLI"""
import click
import httpx
from typing import Optional, List
from ..utils import output, error
@click.group()
def blockchain():
"""Query blockchain information and status"""
pass
@blockchain.command()
@click.option("--limit", type=int, default=10, help="Number of blocks to show")
@click.option("--from-height", type=int, help="Start from this block height")
@click.pass_context
def blocks(ctx, limit: int, from_height: Optional[int]):
"""List recent blocks"""
config = ctx.obj['config']
try:
params = {"limit": limit}
if from_height:
params["from_height"] = from_height
with httpx.Client() as client:
response = client.get(
f"{config.coordinator_url}/v1/explorer/blocks",
params=params,
headers={"X-Api-Key": config.api_key or ""}
)
if response.status_code == 200:
data = response.json()
output(data, ctx.obj['output_format'])
else:
error(f"Failed to fetch blocks: {response.status_code}")
except Exception as e:
error(f"Network error: {e}")
@blockchain.command()
@click.argument("block_hash")
@click.pass_context
def block(ctx, block_hash: str):
"""Get details of a specific block"""
config = ctx.obj['config']
try:
with httpx.Client() as client:
response = client.get(
f"{config.coordinator_url}/v1/explorer/blocks/{block_hash}",
headers={"X-Api-Key": config.api_key or ""}
)
if response.status_code == 200:
block_data = response.json()
output(block_data, ctx.obj['output_format'])
else:
error(f"Block not found: {response.status_code}")
except Exception as e:
error(f"Network error: {e}")
@blockchain.command()
@click.argument("tx_hash")
@click.pass_context
def transaction(ctx, tx_hash: str):
"""Get transaction details"""
config = ctx.obj['config']
try:
with httpx.Client() as client:
response = client.get(
f"{config.coordinator_url}/v1/explorer/transactions/{tx_hash}",
headers={"X-Api-Key": config.api_key or ""}
)
if response.status_code == 200:
tx_data = response.json()
output(tx_data, ctx.obj['output_format'])
else:
error(f"Transaction not found: {response.status_code}")
except Exception as e:
error(f"Network error: {e}")
@blockchain.command()
@click.option("--node", type=int, default=1, help="Node number (1, 2, or 3)")
@click.pass_context
def status(ctx, node: int):
"""Get blockchain node status"""
config = ctx.obj['config']
# Map node to RPC URL
node_urls = {
1: "http://localhost:8082",
2: "http://localhost:8081",
3: "http://aitbc.keisanki.net/rpc"
}
rpc_url = node_urls.get(node)
if not rpc_url:
error(f"Invalid node number: {node}")
return
try:
with httpx.Client() as client:
response = client.get(
f"{rpc_url}/status",
timeout=5
)
if response.status_code == 200:
status_data = response.json()
output({
"node": node,
"rpc_url": rpc_url,
"status": status_data
}, ctx.obj['output_format'])
else:
error(f"Node {node} not responding: {response.status_code}")
except Exception as e:
error(f"Failed to connect to node {node}: {e}")
@blockchain.command()
@click.pass_context
def sync_status(ctx):
"""Get blockchain synchronization status"""
config = ctx.obj['config']
try:
with httpx.Client() as client:
response = client.get(
f"{config.coordinator_url}/v1/blockchain/sync",
headers={"X-Api-Key": config.api_key or ""}
)
if response.status_code == 200:
sync_data = response.json()
output(sync_data, ctx.obj['output_format'])
else:
error(f"Failed to get sync status: {response.status_code}")
except Exception as e:
error(f"Network error: {e}")
@blockchain.command()
@click.pass_context
def peers(ctx):
"""List connected peers"""
config = ctx.obj['config']
try:
with httpx.Client() as client:
response = client.get(
f"{config.coordinator_url}/v1/blockchain/peers",
headers={"X-Api-Key": config.api_key or ""}
)
if response.status_code == 200:
peers_data = response.json()
output(peers_data, ctx.obj['output_format'])
else:
error(f"Failed to get peers: {response.status_code}")
except Exception as e:
error(f"Network error: {e}")
@blockchain.command()
@click.pass_context
def info(ctx):
"""Get blockchain information"""
config = ctx.obj['config']
try:
with httpx.Client() as client:
response = client.get(
f"{config.coordinator_url}/v1/blockchain/info",
headers={"X-Api-Key": config.api_key or ""}
)
if response.status_code == 200:
info_data = response.json()
output(info_data, ctx.obj['output_format'])
else:
error(f"Failed to get blockchain info: {response.status_code}")
except Exception as e:
error(f"Network error: {e}")
@blockchain.command()
@click.pass_context
def supply(ctx):
"""Get token supply information"""
config = ctx.obj['config']
try:
with httpx.Client() as client:
response = client.get(
f"{config.coordinator_url}/v1/blockchain/supply",
headers={"X-Api-Key": config.api_key or ""}
)
if response.status_code == 200:
supply_data = response.json()
output(supply_data, ctx.obj['output_format'])
else:
error(f"Failed to get supply info: {response.status_code}")
except Exception as e:
error(f"Network error: {e}")
@blockchain.command()
@click.pass_context
def validators(ctx):
"""List blockchain validators"""
config = ctx.obj['config']
try:
with httpx.Client() as client:
response = client.get(
f"{config.coordinator_url}/v1/blockchain/validators",
headers={"X-Api-Key": config.api_key or ""}
)
if response.status_code == 200:
validators_data = response.json()
output(validators_data, ctx.obj['output_format'])
else:
error(f"Failed to get validators: {response.status_code}")
except Exception as e:
error(f"Network error: {e}")

View File

@@ -0,0 +1,373 @@
"""Client commands for AITBC CLI"""
import click
import httpx
import json
import time
from typing import Optional
from ..utils import output, error, success
@click.group()
def client():
"""Submit and manage jobs"""
pass
@client.command()
@click.option("--type", "job_type", default="inference", help="Job type")
@click.option("--prompt", help="Prompt for inference jobs")
@click.option("--model", help="Model name")
@click.option("--ttl", default=900, help="Time to live in seconds")
@click.option("--file", type=click.File('r'), help="Submit job from JSON file")
@click.option("--retries", default=0, help="Number of retry attempts (0 = no retry)")
@click.option("--retry-delay", default=1.0, help="Initial retry delay in seconds")
@click.pass_context
def submit(ctx, job_type: str, prompt: Optional[str], model: Optional[str],
ttl: int, file, retries: int, retry_delay: float):
"""Submit a job to the coordinator"""
config = ctx.obj['config']
# Build job data
if file:
try:
task_data = json.load(file)
except Exception as e:
error(f"Failed to read job file: {e}")
return
else:
task_data = {"type": job_type}
if prompt:
task_data["prompt"] = prompt
if model:
task_data["model"] = model
# Submit job with retry and exponential backoff
max_attempts = retries + 1
for attempt in range(1, max_attempts + 1):
try:
with httpx.Client() as client:
response = client.post(
f"{config.coordinator_url}/v1/jobs",
headers={
"Content-Type": "application/json",
"X-Api-Key": config.api_key or ""
},
json={
"payload": task_data,
"ttl_seconds": ttl
}
)
if response.status_code == 201:
job = response.json()
result = {
"job_id": job.get('job_id'),
"status": "submitted",
"message": "Job submitted successfully"
}
if attempt > 1:
result["attempts"] = attempt
output(result, ctx.obj['output_format'])
return
else:
if attempt < max_attempts:
delay = retry_delay * (2 ** (attempt - 1))
click.echo(f"Attempt {attempt}/{max_attempts} failed ({response.status_code}), retrying in {delay:.1f}s...")
time.sleep(delay)
else:
error(f"Failed to submit job: {response.status_code} - {response.text}")
ctx.exit(response.status_code)
except Exception as e:
if attempt < max_attempts:
delay = retry_delay * (2 ** (attempt - 1))
click.echo(f"Attempt {attempt}/{max_attempts} failed ({e}), retrying in {delay:.1f}s...")
time.sleep(delay)
else:
error(f"Network error after {max_attempts} attempts: {e}")
ctx.exit(1)
@client.command()
@click.argument("job_id")
@click.pass_context
def status(ctx, job_id: str):
"""Check job status"""
config = ctx.obj['config']
try:
with httpx.Client() as client:
response = client.get(
f"{config.coordinator_url}/v1/jobs/{job_id}",
headers={"X-Api-Key": config.api_key or ""}
)
if response.status_code == 200:
data = response.json()
output(data, ctx.obj['output_format'])
else:
error(f"Failed to get job status: {response.status_code}")
ctx.exit(1)
except Exception as e:
error(f"Network error: {e}")
ctx.exit(1)
@client.command()
@click.option("--limit", default=10, help="Number of blocks to show")
@click.pass_context
def blocks(ctx, limit: int):
"""List recent blocks"""
config = ctx.obj['config']
try:
with httpx.Client() as client:
response = client.get(
f"{config.coordinator_url}/v1/explorer/blocks",
params={"limit": limit},
headers={"X-Api-Key": config.api_key or ""}
)
if response.status_code == 200:
blocks = response.json()
output(blocks, ctx.obj['output_format'])
else:
error(f"Failed to get blocks: {response.status_code}")
ctx.exit(1)
except Exception as e:
error(f"Network error: {e}")
ctx.exit(1)
@client.command()
@click.argument("job_id")
@click.pass_context
def cancel(ctx, job_id: str):
"""Cancel a job"""
config = ctx.obj['config']
try:
with httpx.Client() as client:
response = client.post(
f"{config.coordinator_url}/v1/jobs/{job_id}/cancel",
headers={"X-Api-Key": config.api_key or ""}
)
if response.status_code == 200:
success(f"Job {job_id} cancelled")
else:
error(f"Failed to cancel job: {response.status_code}")
ctx.exit(1)
except Exception as e:
error(f"Network error: {e}")
ctx.exit(1)
@client.command()
@click.option("--limit", default=10, help="Number of receipts to show")
@click.option("--job-id", help="Filter by job ID")
@click.option("--status", help="Filter by status")
@click.pass_context
def receipts(ctx, limit: int, job_id: Optional[str], status: Optional[str]):
"""List job receipts"""
config = ctx.obj['config']
try:
params = {"limit": limit}
if job_id:
params["job_id"] = job_id
if status:
params["status"] = status
with httpx.Client() as client:
response = client.get(
f"{config.coordinator_url}/v1/explorer/receipts",
params=params,
headers={"X-Api-Key": config.api_key or ""}
)
if response.status_code == 200:
receipts = response.json()
output(receipts, ctx.obj['output_format'])
else:
error(f"Failed to get receipts: {response.status_code}")
ctx.exit(1)
except Exception as e:
error(f"Network error: {e}")
ctx.exit(1)
@client.command()
@click.option("--limit", default=10, help="Number of jobs to show")
@click.option("--status", help="Filter by status (pending, running, completed, failed)")
@click.option("--type", help="Filter by job type")
@click.option("--from-time", help="Filter jobs from this timestamp (ISO format)")
@click.option("--to-time", help="Filter jobs until this timestamp (ISO format)")
@click.pass_context
def history(ctx, limit: int, status: Optional[str], type: Optional[str],
from_time: Optional[str], to_time: Optional[str]):
"""Show job history with filtering options"""
config = ctx.obj['config']
try:
params = {"limit": limit}
if status:
params["status"] = status
if type:
params["type"] = type
if from_time:
params["from_time"] = from_time
if to_time:
params["to_time"] = to_time
with httpx.Client() as client:
response = client.get(
f"{config.coordinator_url}/v1/jobs/history",
params=params,
headers={"X-Api-Key": config.api_key or ""}
)
if response.status_code == 200:
jobs = response.json()
output(jobs, ctx.obj['output_format'])
else:
error(f"Failed to get job history: {response.status_code}")
ctx.exit(1)
except Exception as e:
error(f"Network error: {e}")
ctx.exit(1)
@client.command(name="batch-submit")
@click.argument("file_path", type=click.Path(exists=True))
@click.option("--format", "file_format", type=click.Choice(["json", "csv"]), default=None, help="File format (auto-detected if not specified)")
@click.option("--retries", default=0, help="Retry attempts per job")
@click.option("--delay", default=0.5, help="Delay between submissions (seconds)")
@click.pass_context
def batch_submit(ctx, file_path: str, file_format: Optional[str], retries: int, delay: float):
"""Submit multiple jobs from a CSV or JSON file"""
import csv
from pathlib import Path
from ..utils import progress_bar
config = ctx.obj['config']
path = Path(file_path)
if not file_format:
file_format = "csv" if path.suffix.lower() == ".csv" else "json"
jobs_data = []
if file_format == "json":
with open(path) as f:
data = json.load(f)
jobs_data = data if isinstance(data, list) else [data]
else:
with open(path) as f:
reader = csv.DictReader(f)
jobs_data = list(reader)
if not jobs_data:
error("No jobs found in file")
return
results = {"submitted": 0, "failed": 0, "job_ids": []}
with progress_bar("Submitting jobs...", total=len(jobs_data)) as (progress, task):
for i, job in enumerate(jobs_data):
try:
task_data = {"type": job.get("type", "inference")}
if "prompt" in job:
task_data["prompt"] = job["prompt"]
if "model" in job:
task_data["model"] = job["model"]
with httpx.Client() as http_client:
response = http_client.post(
f"{config.coordinator_url}/v1/jobs",
headers={
"Content-Type": "application/json",
"X-Api-Key": config.api_key or ""
},
json={"payload": task_data, "ttl_seconds": int(job.get("ttl", 900))}
)
if response.status_code == 201:
result = response.json()
results["submitted"] += 1
results["job_ids"].append(result.get("job_id"))
else:
results["failed"] += 1
except Exception:
results["failed"] += 1
progress.update(task, advance=1)
if delay and i < len(jobs_data) - 1:
time.sleep(delay)
output(results, ctx.obj['output_format'])
@client.command(name="template")
@click.argument("action", type=click.Choice(["save", "list", "run", "delete"]))
@click.option("--name", help="Template name")
@click.option("--type", "job_type", help="Job type")
@click.option("--prompt", help="Prompt text")
@click.option("--model", help="Model name")
@click.option("--ttl", type=int, default=900, help="TTL in seconds")
@click.pass_context
def template(ctx, action: str, name: Optional[str], job_type: Optional[str],
prompt: Optional[str], model: Optional[str], ttl: int):
"""Manage job templates for repeated tasks"""
from pathlib import Path
template_dir = Path.home() / ".aitbc" / "templates"
template_dir.mkdir(parents=True, exist_ok=True)
if action == "save":
if not name:
error("Template name required (--name)")
return
template_data = {"type": job_type or "inference", "ttl": ttl}
if prompt:
template_data["prompt"] = prompt
if model:
template_data["model"] = model
with open(template_dir / f"{name}.json", "w") as f:
json.dump(template_data, f, indent=2)
output({"status": "saved", "name": name, "template": template_data}, ctx.obj['output_format'])
elif action == "list":
templates = []
for tf in template_dir.glob("*.json"):
with open(tf) as f:
data = json.load(f)
templates.append({"name": tf.stem, **data})
output(templates if templates else {"message": "No templates found"}, ctx.obj['output_format'])
elif action == "run":
if not name:
error("Template name required (--name)")
return
tf = template_dir / f"{name}.json"
if not tf.exists():
error(f"Template '{name}' not found")
return
with open(tf) as f:
tmpl = json.load(f)
if prompt:
tmpl["prompt"] = prompt
if model:
tmpl["model"] = model
ctx.invoke(submit, job_type=tmpl.get("type", "inference"),
prompt=tmpl.get("prompt"), model=tmpl.get("model"),
ttl=tmpl.get("ttl", 900), file=None, retries=0, retry_delay=1.0)
elif action == "delete":
if not name:
error("Template name required (--name)")
return
tf = template_dir / f"{name}.json"
if not tf.exists():
error(f"Template '{name}' not found")
return
tf.unlink()
output({"status": "deleted", "name": name}, ctx.obj['output_format'])

View File

@@ -0,0 +1,470 @@
"""Configuration commands for AITBC CLI"""
import click
import os
import yaml
import json
from pathlib import Path
from typing import Optional, Dict, Any
from ..config import get_config, Config
from ..utils import output, error, success
@click.group()
def config():
"""Manage CLI configuration"""
pass
@config.command()
@click.pass_context
def show(ctx):
"""Show current configuration"""
config = ctx.obj['config']
config_dict = {
"coordinator_url": config.coordinator_url,
"api_key": "***REDACTED***" if config.api_key else None,
"timeout": getattr(config, 'timeout', 30),
"config_file": getattr(config, 'config_file', None)
}
output(config_dict, ctx.obj['output_format'])
@config.command()
@click.argument("key")
@click.argument("value")
@click.option("--global", "global_config", is_flag=True, help="Set global config")
@click.pass_context
def set(ctx, key: str, value: str, global_config: bool):
"""Set configuration value"""
config = ctx.obj['config']
# Determine config file path
if global_config:
config_dir = Path.home() / ".config" / "aitbc"
config_dir.mkdir(parents=True, exist_ok=True)
config_file = config_dir / "config.yaml"
else:
config_file = Path.cwd() / ".aitbc.yaml"
# Load existing config
if config_file.exists():
with open(config_file) as f:
config_data = yaml.safe_load(f) or {}
else:
config_data = {}
# Set the value
if key == "api_key":
config_data["api_key"] = value
if ctx.obj['output_format'] == 'table':
success("API key set (use --global to set permanently)")
elif key == "coordinator_url":
config_data["coordinator_url"] = value
if ctx.obj['output_format'] == 'table':
success(f"Coordinator URL set to: {value}")
elif key == "timeout":
try:
config_data["timeout"] = int(value)
if ctx.obj['output_format'] == 'table':
success(f"Timeout set to: {value}s")
except ValueError:
error("Timeout must be an integer")
ctx.exit(1)
else:
error(f"Unknown configuration key: {key}")
ctx.exit(1)
# Save config
with open(config_file, 'w') as f:
yaml.dump(config_data, f, default_flow_style=False)
output({
"config_file": str(config_file),
"key": key,
"value": value
}, ctx.obj['output_format'])
@config.command()
@click.option("--global", "global_config", is_flag=True, help="Show global config")
def path(global_config: bool):
"""Show configuration file path"""
if global_config:
config_dir = Path.home() / ".config" / "aitbc"
config_file = config_dir / "config.yaml"
else:
config_file = Path.cwd() / ".aitbc.yaml"
output({
"config_file": str(config_file),
"exists": config_file.exists()
})
@config.command()
@click.option("--global", "global_config", is_flag=True, help="Edit global config")
@click.pass_context
def edit(ctx, global_config: bool):
"""Open configuration file in editor"""
# Determine config file path
if global_config:
config_dir = Path.home() / ".config" / "aitbc"
config_dir.mkdir(parents=True, exist_ok=True)
config_file = config_dir / "config.yaml"
else:
config_file = Path.cwd() / ".aitbc.yaml"
# Create if doesn't exist
if not config_file.exists():
config = ctx.obj['config']
config_data = {
"coordinator_url": config.coordinator_url,
"timeout": getattr(config, 'timeout', 30)
}
with open(config_file, 'w') as f:
yaml.dump(config_data, f, default_flow_style=False)
# Open in editor
editor = os.getenv('EDITOR', 'nano')
os.system(f"{editor} {config_file}")
@config.command()
@click.option("--global", "global_config", is_flag=True, help="Reset global config")
@click.pass_context
def reset(ctx, global_config: bool):
"""Reset configuration to defaults"""
# Determine config file path
if global_config:
config_dir = Path.home() / ".config" / "aitbc"
config_file = config_dir / "config.yaml"
else:
config_file = Path.cwd() / ".aitbc.yaml"
if not config_file.exists():
output({"message": "No configuration file found"})
return
if not click.confirm(f"Reset configuration at {config_file}?"):
return
# Remove config file
config_file.unlink()
success("Configuration reset to defaults")
@config.command()
@click.option("--format", "output_format", type=click.Choice(['yaml', 'json']), default='yaml', help="Output format")
@click.option("--global", "global_config", is_flag=True, help="Export global config")
@click.pass_context
def export(ctx, output_format: str, global_config: bool):
"""Export configuration"""
# Determine config file path
if global_config:
config_dir = Path.home() / ".config" / "aitbc"
config_file = config_dir / "config.yaml"
else:
config_file = Path.cwd() / ".aitbc.yaml"
if not config_file.exists():
error("No configuration file found")
ctx.exit(1)
with open(config_file) as f:
config_data = yaml.safe_load(f)
# Redact sensitive data
if 'api_key' in config_data:
config_data['api_key'] = "***REDACTED***"
if output_format == 'json':
click.echo(json.dumps(config_data, indent=2))
else:
click.echo(yaml.dump(config_data, default_flow_style=False))
@config.command()
@click.argument("file_path")
@click.option("--merge", is_flag=True, help="Merge with existing config")
@click.option("--global", "global_config", is_flag=True, help="Import to global config")
@click.pass_context
def import_config(ctx, file_path: str, merge: bool, global_config: bool):
"""Import configuration from file"""
import_file = Path(file_path)
if not import_file.exists():
error(f"File not found: {file_path}")
ctx.exit(1)
# Load import file
try:
with open(import_file) as f:
if import_file.suffix.lower() == '.json':
import_data = json.load(f)
else:
import_data = yaml.safe_load(f)
except json.JSONDecodeError:
error("Invalid JSON data")
ctx.exit(1)
except Exception as e:
error(f"Failed to parse file: {e}")
ctx.exit(1)
# Determine target config file
if global_config:
config_dir = Path.home() / ".config" / "aitbc"
config_dir.mkdir(parents=True, exist_ok=True)
config_file = config_dir / "config.yaml"
else:
config_file = Path.cwd() / ".aitbc.yaml"
# Load existing config if merging
if merge and config_file.exists():
with open(config_file) as f:
config_data = yaml.safe_load(f) or {}
config_data.update(import_data)
else:
config_data = import_data
# Save config
with open(config_file, 'w') as f:
yaml.dump(config_data, f, default_flow_style=False)
if ctx.obj['output_format'] == 'table':
success(f"Configuration imported to {config_file}")
@config.command()
@click.pass_context
def validate(ctx):
"""Validate configuration"""
config = ctx.obj['config']
errors = []
warnings = []
# Validate coordinator URL
if not config.coordinator_url:
errors.append("Coordinator URL is not set")
elif not config.coordinator_url.startswith(('http://', 'https://')):
errors.append("Coordinator URL must start with http:// or https://")
# Validate API key
if not config.api_key:
warnings.append("API key is not set")
elif len(config.api_key) < 10:
errors.append("API key appears to be too short")
# Validate timeout
timeout = getattr(config, 'timeout', 30)
if not isinstance(timeout, (int, float)) or timeout <= 0:
errors.append("Timeout must be a positive number")
# Output results
result = {
"valid": len(errors) == 0,
"errors": errors,
"warnings": warnings
}
if errors:
error("Configuration validation failed")
ctx.exit(1)
elif warnings:
if ctx.obj['output_format'] == 'table':
success("Configuration valid with warnings")
else:
if ctx.obj['output_format'] == 'table':
success("Configuration is valid")
output(result, ctx.obj['output_format'])
@config.command()
def environments():
"""List available environments"""
env_vars = [
'AITBC_COORDINATOR_URL',
'AITBC_API_KEY',
'AITBC_TIMEOUT',
'AITBC_CONFIG_FILE',
'CLIENT_API_KEY',
'MINER_API_KEY',
'ADMIN_API_KEY'
]
env_data = {}
for var in env_vars:
value = os.getenv(var)
if value:
if 'API_KEY' in var:
value = "***REDACTED***"
env_data[var] = value
output({
"environment_variables": env_data,
"note": "Use export VAR=value to set environment variables"
})
@config.group()
def profiles():
"""Manage configuration profiles"""
pass
@profiles.command()
@click.argument("name")
@click.pass_context
def save(ctx, name: str):
"""Save current configuration as a profile"""
config = ctx.obj['config']
# Create profiles directory
profiles_dir = Path.home() / ".config" / "aitbc" / "profiles"
profiles_dir.mkdir(parents=True, exist_ok=True)
profile_file = profiles_dir / f"{name}.yaml"
# Save profile (without API key)
profile_data = {
"coordinator_url": config.coordinator_url,
"timeout": getattr(config, 'timeout', 30)
}
with open(profile_file, 'w') as f:
yaml.dump(profile_data, f, default_flow_style=False)
if ctx.obj['output_format'] == 'table':
success(f"Profile '{name}' saved")
@profiles.command()
def list():
"""List available profiles"""
profiles_dir = Path.home() / ".config" / "aitbc" / "profiles"
if not profiles_dir.exists():
output({"profiles": []})
return
profiles = []
for profile_file in profiles_dir.glob("*.yaml"):
with open(profile_file) as f:
profile_data = yaml.safe_load(f)
profiles.append({
"name": profile_file.stem,
"coordinator_url": profile_data.get("coordinator_url"),
"timeout": profile_data.get("timeout", 30)
})
output({"profiles": profiles})
@profiles.command()
@click.argument("name")
@click.pass_context
def load(ctx, name: str):
"""Load a configuration profile"""
profiles_dir = Path.home() / ".config" / "aitbc" / "profiles"
profile_file = profiles_dir / f"{name}.yaml"
if not profile_file.exists():
error(f"Profile '{name}' not found")
ctx.exit(1)
with open(profile_file) as f:
profile_data = yaml.safe_load(f)
# Load to current config
config_file = Path.cwd() / ".aitbc.yaml"
with open(config_file, 'w') as f:
yaml.dump(profile_data, f, default_flow_style=False)
if ctx.obj['output_format'] == 'table':
success(f"Profile '{name}' loaded")
@profiles.command()
@click.argument("name")
@click.pass_context
def delete(ctx, name: str):
"""Delete a configuration profile"""
profiles_dir = Path.home() / ".config" / "aitbc" / "profiles"
profile_file = profiles_dir / f"{name}.yaml"
if not profile_file.exists():
error(f"Profile '{name}' not found")
ctx.exit(1)
if not click.confirm(f"Delete profile '{name}'?"):
return
profile_file.unlink()
if ctx.obj['output_format'] == 'table':
success(f"Profile '{name}' deleted")
@config.command(name="set-secret")
@click.argument("key")
@click.argument("value")
@click.pass_context
def set_secret(ctx, key: str, value: str):
"""Set an encrypted configuration value"""
from ..utils import encrypt_value
config_dir = Path.home() / ".config" / "aitbc"
config_dir.mkdir(parents=True, exist_ok=True)
secrets_file = config_dir / "secrets.json"
secrets = {}
if secrets_file.exists():
with open(secrets_file) as f:
secrets = json.load(f)
secrets[key] = encrypt_value(value)
with open(secrets_file, "w") as f:
json.dump(secrets, f, indent=2)
# Restrict file permissions
secrets_file.chmod(0o600)
if ctx.obj['output_format'] == 'table':
success(f"Secret '{key}' saved (encrypted)")
output({"key": key, "status": "encrypted"}, ctx.obj['output_format'])
@config.command(name="get-secret")
@click.argument("key")
@click.pass_context
def get_secret(ctx, key: str):
"""Get a decrypted configuration value"""
from ..utils import decrypt_value
secrets_file = Path.home() / ".config" / "aitbc" / "secrets.json"
if not secrets_file.exists():
error("No secrets file found")
ctx.exit(1)
return
with open(secrets_file) as f:
secrets = json.load(f)
if key not in secrets:
error(f"Secret '{key}' not found")
ctx.exit(1)
return
decrypted = decrypt_value(secrets[key])
output({"key": key, "value": decrypted}, ctx.obj['output_format'])
# Add profiles group to config
config.add_command(profiles)

View File

@@ -0,0 +1,307 @@
"""Marketplace commands for AITBC CLI"""
import click
import httpx
import json
from typing import Optional, List
from ..utils import output, error, success
@click.group()
def marketplace():
"""GPU marketplace operations"""
pass
@marketplace.group()
def gpu():
"""GPU marketplace operations"""
pass
@gpu.command()
@click.option("--name", required=True, help="GPU name/model")
@click.option("--memory", type=int, help="GPU memory in GB")
@click.option("--cuda-cores", type=int, help="Number of CUDA cores")
@click.option("--compute-capability", help="Compute capability (e.g., 8.9)")
@click.option("--price-per-hour", type=float, help="Price per hour in AITBC")
@click.option("--description", help="GPU description")
@click.option("--miner-id", help="Miner ID (uses auth key if not provided)")
@click.pass_context
def register(ctx, name: str, memory: Optional[int], cuda_cores: Optional[int],
compute_capability: Optional[str], price_per_hour: Optional[float],
description: Optional[str], miner_id: Optional[str]):
"""Register GPU on marketplace"""
config = ctx.obj['config']
# Build GPU specs
gpu_specs = {
"name": name,
"memory_gb": memory,
"cuda_cores": cuda_cores,
"compute_capability": compute_capability,
"price_per_hour": price_per_hour,
"description": description
}
# Remove None values
gpu_specs = {k: v for k, v in gpu_specs.items() if v is not None}
try:
with httpx.Client() as client:
response = client.post(
f"{config.coordinator_url}/v1/marketplace/gpu/register",
headers={
"Content-Type": "application/json",
"X-Api-Key": config.api_key or "",
"X-Miner-ID": miner_id or "default"
},
json={"gpu": gpu_specs}
)
if response.status_code == 201:
result = response.json()
success(f"GPU registered successfully: {result.get('gpu_id')}")
output(result, ctx.obj['output_format'])
else:
error(f"Failed to register GPU: {response.status_code}")
except Exception as e:
error(f"Network error: {e}")
@gpu.command()
@click.option("--available", is_flag=True, help="Show only available GPUs")
@click.option("--model", help="Filter by GPU model (supports wildcards)")
@click.option("--memory-min", type=int, help="Minimum memory in GB")
@click.option("--price-max", type=float, help="Maximum price per hour")
@click.option("--limit", type=int, default=20, help="Maximum number of results")
@click.pass_context
def list(ctx, available: bool, model: Optional[str], memory_min: Optional[int],
price_max: Optional[float], limit: int):
"""List available GPUs"""
config = ctx.obj['config']
# Build query params
params = {"limit": limit}
if available:
params["available"] = "true"
if model:
params["model"] = model
if memory_min:
params["memory_min"] = memory_min
if price_max:
params["price_max"] = price_max
try:
with httpx.Client() as client:
response = client.get(
f"{config.coordinator_url}/v1/marketplace/gpu/list",
params=params,
headers={"X-Api-Key": config.api_key or ""}
)
if response.status_code == 200:
gpus = response.json()
output(gpus, ctx.obj['output_format'])
else:
error(f"Failed to list GPUs: {response.status_code}")
except Exception as e:
error(f"Network error: {e}")
@gpu.command()
@click.argument("gpu_id")
@click.pass_context
def details(ctx, gpu_id: str):
"""Get GPU details"""
config = ctx.obj['config']
try:
with httpx.Client() as client:
response = client.get(
f"{config.coordinator_url}/v1/marketplace/gpu/{gpu_id}",
headers={"X-Api-Key": config.api_key or ""}
)
if response.status_code == 200:
gpu_data = response.json()
output(gpu_data, ctx.obj['output_format'])
else:
error(f"GPU not found: {response.status_code}")
except Exception as e:
error(f"Network error: {e}")
@gpu.command()
@click.argument("gpu_id")
@click.option("--hours", type=float, required=True, help="Rental duration in hours")
@click.option("--job-id", help="Job ID to associate with rental")
@click.pass_context
def book(ctx, gpu_id: str, hours: float, job_id: Optional[str]):
"""Book a GPU"""
config = ctx.obj['config']
try:
booking_data = {
"gpu_id": gpu_id,
"duration_hours": hours
}
if job_id:
booking_data["job_id"] = job_id
with httpx.Client() as client:
response = client.post(
f"{config.coordinator_url}/v1/marketplace/gpu/{gpu_id}/book",
headers={
"Content-Type": "application/json",
"X-Api-Key": config.api_key or ""
},
json=booking_data
)
if response.status_code == 201:
booking = response.json()
success(f"GPU booked successfully: {booking.get('booking_id')}")
output(booking, ctx.obj['output_format'])
else:
error(f"Failed to book GPU: {response.status_code}")
except Exception as e:
error(f"Network error: {e}")
@gpu.command()
@click.argument("gpu_id")
@click.pass_context
def release(ctx, gpu_id: str):
"""Release a booked GPU"""
config = ctx.obj['config']
try:
with httpx.Client() as client:
response = client.post(
f"{config.coordinator_url}/v1/marketplace/gpu/{gpu_id}/release",
headers={"X-Api-Key": config.api_key or ""}
)
if response.status_code == 200:
success(f"GPU {gpu_id} released")
output({"status": "released", "gpu_id": gpu_id}, ctx.obj['output_format'])
else:
error(f"Failed to release GPU: {response.status_code}")
except Exception as e:
error(f"Network error: {e}")
@marketplace.command()
@click.option("--status", help="Filter by status (active, completed, cancelled)")
@click.option("--limit", type=int, default=10, help="Number of orders to show")
@click.pass_context
def orders(ctx, status: Optional[str], limit: int):
"""List marketplace orders"""
config = ctx.obj['config']
params = {"limit": limit}
if status:
params["status"] = status
try:
with httpx.Client() as client:
response = client.get(
f"{config.coordinator_url}/v1/marketplace/orders",
params=params,
headers={"X-Api-Key": config.api_key or ""}
)
if response.status_code == 200:
orders = response.json()
output(orders, ctx.obj['output_format'])
else:
error(f"Failed to get orders: {response.status_code}")
except Exception as e:
error(f"Network error: {e}")
@marketplace.command()
@click.argument("model")
@click.pass_context
def pricing(ctx, model: str):
"""Get pricing information for a GPU model"""
config = ctx.obj['config']
try:
with httpx.Client() as client:
response = client.get(
f"{config.coordinator_url}/v1/marketplace/pricing/{model}",
headers={"X-Api-Key": config.api_key or ""}
)
if response.status_code == 200:
pricing_data = response.json()
output(pricing_data, ctx.obj['output_format'])
else:
error(f"Pricing not found: {response.status_code}")
except Exception as e:
error(f"Network error: {e}")
@marketplace.command()
@click.argument("gpu_id")
@click.option("--limit", type=int, default=10, help="Number of reviews to show")
@click.pass_context
def reviews(ctx, gpu_id: str, limit: int):
"""Get GPU reviews"""
config = ctx.obj['config']
try:
with httpx.Client() as client:
response = client.get(
f"{config.coordinator_url}/v1/marketplace/gpu/{gpu_id}/reviews",
params={"limit": limit},
headers={"X-Api-Key": config.api_key or ""}
)
if response.status_code == 200:
reviews = response.json()
output(reviews, ctx.obj['output_format'])
else:
error(f"Failed to get reviews: {response.status_code}")
except Exception as e:
error(f"Network error: {e}")
@marketplace.command()
@click.argument("gpu_id")
@click.option("--rating", type=int, required=True, help="Rating (1-5)")
@click.option("--comment", help="Review comment")
@click.pass_context
def review(ctx, gpu_id: str, rating: int, comment: Optional[str]):
"""Add a review for a GPU"""
config = ctx.obj['config']
if not 1 <= rating <= 5:
error("Rating must be between 1 and 5")
return
try:
review_data = {
"rating": rating,
"comment": comment
}
with httpx.Client() as client:
response = client.post(
f"{config.coordinator_url}/v1/marketplace/gpu/{gpu_id}/reviews",
headers={
"Content-Type": "application/json",
"X-Api-Key": config.api_key or ""
},
json=review_data
)
if response.status_code == 201:
success("Review added successfully")
output({"status": "review_added", "gpu_id": gpu_id}, ctx.obj['output_format'])
else:
error(f"Failed to add review: {response.status_code}")
except Exception as e:
error(f"Network error: {e}")

View File

@@ -0,0 +1,457 @@
"""Miner commands for AITBC CLI"""
import click
import httpx
import json
import time
import concurrent.futures
from typing import Optional, Dict, Any, List
from ..utils import output, error, success
@click.group()
def miner():
"""Register as miner and process jobs"""
pass
@miner.command()
@click.option("--gpu", help="GPU model name")
@click.option("--memory", type=int, help="GPU memory in GB")
@click.option("--cuda-cores", type=int, help="Number of CUDA cores")
@click.option("--miner-id", default="cli-miner", help="Miner ID")
@click.pass_context
def register(ctx, gpu: Optional[str], memory: Optional[int],
cuda_cores: Optional[int], miner_id: str):
"""Register as a miner with the coordinator"""
config = ctx.obj['config']
# Build capabilities
capabilities = {}
if gpu:
capabilities["gpu"] = {"model": gpu}
if memory:
if "gpu" not in capabilities:
capabilities["gpu"] = {}
capabilities["gpu"]["memory_gb"] = memory
if cuda_cores:
if "gpu" not in capabilities:
capabilities["gpu"] = {}
capabilities["gpu"]["cuda_cores"] = cuda_cores
# Default capabilities if none provided
if not capabilities:
capabilities = {
"cpu": {"cores": 4},
"memory": {"gb": 16}
}
try:
with httpx.Client() as client:
response = client.post(
f"{config.coordinator_url}/v1/miners/register?miner_id={miner_id}",
headers={
"Content-Type": "application/json",
"X-Api-Key": config.api_key or ""
},
json={"capabilities": capabilities}
)
if response.status_code == 200:
output({
"miner_id": miner_id,
"status": "registered",
"capabilities": capabilities
}, ctx.obj['output_format'])
else:
error(f"Failed to register: {response.status_code} - {response.text}")
except Exception as e:
error(f"Network error: {e}")
@miner.command()
@click.option("--wait", type=int, default=5, help="Max wait time in seconds")
@click.option("--miner-id", default="cli-miner", help="Miner ID")
@click.pass_context
def poll(ctx, wait: int, miner_id: str):
"""Poll for a single job"""
config = ctx.obj['config']
try:
with httpx.Client() as client:
response = client.get(
f"{config.coordinator_url}/v1/miners/poll",
headers={
"X-Api-Key": config.api_key or "",
"X-Miner-ID": miner_id
},
timeout=wait + 5
)
if response.status_code == 200:
job = response.json()
if job:
output(job, ctx.obj['output_format'])
else:
output({"message": "No jobs available"}, ctx.obj['output_format'])
else:
error(f"Failed to poll: {response.status_code}")
except httpx.TimeoutException:
output({"message": f"No jobs available within {wait} seconds"}, ctx.obj['output_format'])
except Exception as e:
error(f"Network error: {e}")
@miner.command()
@click.option("--jobs", type=int, default=1, help="Number of jobs to process")
@click.option("--miner-id", default="cli-miner", help="Miner ID")
@click.pass_context
def mine(ctx, jobs: int, miner_id: str):
"""Mine continuously for specified number of jobs"""
config = ctx.obj['config']
processed = 0
while processed < jobs:
try:
with httpx.Client() as client:
# Poll for job
response = client.get(
f"{config.coordinator_url}/v1/miners/poll",
headers={
"X-Api-Key": config.api_key or "",
"X-Miner-ID": miner_id
},
timeout=30
)
if response.status_code == 200:
job = response.json()
if job:
job_id = job.get('job_id')
output({
"job_id": job_id,
"status": "processing",
"job_number": processed + 1
}, ctx.obj['output_format'])
# Simulate processing (in real implementation, do actual work)
time.sleep(2)
# Submit result
result_response = client.post(
f"{config.coordinator_url}/v1/miners/{job_id}/result",
headers={
"Content-Type": "application/json",
"X-Api-Key": config.api_key or "",
"X-Miner-ID": miner_id
},
json={
"result": f"Processed job {job_id}",
"success": True
}
)
if result_response.status_code == 200:
success(f"Job {job_id} completed successfully")
processed += 1
else:
error(f"Failed to submit result: {result_response.status_code}")
else:
# No job available, wait a bit
time.sleep(5)
else:
error(f"Failed to poll: {response.status_code}")
break
except Exception as e:
error(f"Error: {e}")
break
output({
"total_processed": processed,
"miner_id": miner_id
}, ctx.obj['output_format'])
@miner.command()
@click.option("--miner-id", default="cli-miner", help="Miner ID")
@click.pass_context
def heartbeat(ctx, miner_id: str):
"""Send heartbeat to coordinator"""
config = ctx.obj['config']
try:
with httpx.Client() as client:
response = client.post(
f"{config.coordinator_url}/v1/miners/heartbeat?miner_id={miner_id}",
headers={
"X-Api-Key": config.api_key or ""
}
)
if response.status_code == 200:
output({
"miner_id": miner_id,
"status": "heartbeat_sent",
"timestamp": time.time()
}, ctx.obj['output_format'])
else:
error(f"Failed to send heartbeat: {response.status_code}")
except Exception as e:
error(f"Network error: {e}")
@miner.command()
@click.option("--miner-id", default="cli-miner", help="Miner ID")
@click.pass_context
def status(ctx, miner_id: str):
"""Check miner status"""
config = ctx.obj['config']
# This would typically query a miner status endpoint
# For now, we'll just show the miner info
output({
"miner_id": miner_id,
"coordinator": config.coordinator_url,
"status": "active"
}, ctx.obj['output_format'])
@miner.command()
@click.option("--miner-id", default="cli-miner", help="Miner ID")
@click.option("--from-time", help="Filter from timestamp (ISO format)")
@click.option("--to-time", help="Filter to timestamp (ISO format)")
@click.pass_context
def earnings(ctx, miner_id: str, from_time: Optional[str], to_time: Optional[str]):
"""Show miner earnings"""
config = ctx.obj['config']
try:
params = {"miner_id": miner_id}
if from_time:
params["from_time"] = from_time
if to_time:
params["to_time"] = to_time
with httpx.Client() as client:
response = client.get(
f"{config.coordinator_url}/v1/miners/{miner_id}/earnings",
params=params,
headers={"X-Api-Key": config.api_key or ""}
)
if response.status_code == 200:
data = response.json()
output(data, ctx.obj['output_format'])
else:
error(f"Failed to get earnings: {response.status_code}")
ctx.exit(1)
except Exception as e:
error(f"Network error: {e}")
ctx.exit(1)
@miner.command(name="update-capabilities")
@click.option("--gpu", help="GPU model name")
@click.option("--memory", type=int, help="GPU memory in GB")
@click.option("--cuda-cores", type=int, help="Number of CUDA cores")
@click.option("--miner-id", default="cli-miner", help="Miner ID")
@click.pass_context
def update_capabilities(ctx, gpu: Optional[str], memory: Optional[int],
cuda_cores: Optional[int], miner_id: str):
"""Update miner GPU capabilities"""
config = ctx.obj['config']
capabilities = {}
if gpu:
capabilities["gpu"] = {"model": gpu}
if memory:
if "gpu" not in capabilities:
capabilities["gpu"] = {}
capabilities["gpu"]["memory_gb"] = memory
if cuda_cores:
if "gpu" not in capabilities:
capabilities["gpu"] = {}
capabilities["gpu"]["cuda_cores"] = cuda_cores
if not capabilities:
error("No capabilities specified. Use --gpu, --memory, or --cuda-cores.")
return
try:
with httpx.Client() as client:
response = client.put(
f"{config.coordinator_url}/v1/miners/{miner_id}/capabilities",
headers={
"Content-Type": "application/json",
"X-Api-Key": config.api_key or ""
},
json={"capabilities": capabilities}
)
if response.status_code == 200:
output({
"miner_id": miner_id,
"status": "capabilities_updated",
"capabilities": capabilities
}, ctx.obj['output_format'])
else:
error(f"Failed to update capabilities: {response.status_code}")
ctx.exit(1)
except Exception as e:
error(f"Network error: {e}")
ctx.exit(1)
@miner.command()
@click.option("--miner-id", default="cli-miner", help="Miner ID")
@click.option("--force", is_flag=True, help="Force deregistration without confirmation")
@click.pass_context
def deregister(ctx, miner_id: str, force: bool):
"""Deregister miner from the coordinator"""
if not force:
if not click.confirm(f"Deregister miner '{miner_id}'?"):
click.echo("Cancelled.")
return
config = ctx.obj['config']
try:
with httpx.Client() as client:
response = client.delete(
f"{config.coordinator_url}/v1/miners/{miner_id}",
headers={"X-Api-Key": config.api_key or ""}
)
if response.status_code == 200:
output({
"miner_id": miner_id,
"status": "deregistered"
}, ctx.obj['output_format'])
else:
error(f"Failed to deregister: {response.status_code}")
ctx.exit(1)
except Exception as e:
error(f"Network error: {e}")
ctx.exit(1)
@miner.command()
@click.option("--limit", default=10, help="Number of jobs to show")
@click.option("--type", "job_type", help="Filter by job type")
@click.option("--min-reward", type=float, help="Minimum reward threshold")
@click.option("--status", "job_status", help="Filter by status (pending, running, completed, failed)")
@click.option("--miner-id", default="cli-miner", help="Miner ID")
@click.pass_context
def jobs(ctx, limit: int, job_type: Optional[str], min_reward: Optional[float],
job_status: Optional[str], miner_id: str):
"""List miner jobs with filtering"""
config = ctx.obj['config']
try:
params = {"limit": limit, "miner_id": miner_id}
if job_type:
params["type"] = job_type
if min_reward is not None:
params["min_reward"] = min_reward
if job_status:
params["status"] = job_status
with httpx.Client() as client:
response = client.get(
f"{config.coordinator_url}/v1/miners/{miner_id}/jobs",
params=params,
headers={"X-Api-Key": config.api_key or ""}
)
if response.status_code == 200:
data = response.json()
output(data, ctx.obj['output_format'])
else:
error(f"Failed to get jobs: {response.status_code}")
ctx.exit(1)
except Exception as e:
error(f"Network error: {e}")
ctx.exit(1)
def _process_single_job(config, miner_id: str, worker_id: int) -> Dict[str, Any]:
"""Process a single job (used by concurrent mine)"""
try:
with httpx.Client() as http_client:
response = http_client.get(
f"{config.coordinator_url}/v1/miners/poll",
headers={
"X-Api-Key": config.api_key or "",
"X-Miner-ID": miner_id
},
timeout=30
)
if response.status_code == 200:
job = response.json()
if job:
job_id = job.get('job_id')
time.sleep(2) # Simulate processing
result_response = http_client.post(
f"{config.coordinator_url}/v1/miners/{job_id}/result",
headers={
"Content-Type": "application/json",
"X-Api-Key": config.api_key or "",
"X-Miner-ID": miner_id
},
json={"result": f"Processed by worker {worker_id}", "success": True}
)
return {
"worker": worker_id,
"job_id": job_id,
"status": "completed" if result_response.status_code == 200 else "failed"
}
return {"worker": worker_id, "status": "no_job"}
except Exception as e:
return {"worker": worker_id, "status": "error", "error": str(e)}
@miner.command(name="concurrent-mine")
@click.option("--workers", type=int, default=2, help="Number of concurrent workers")
@click.option("--jobs", "total_jobs", type=int, default=5, help="Total jobs to process")
@click.option("--miner-id", default="cli-miner", help="Miner ID")
@click.pass_context
def concurrent_mine(ctx, workers: int, total_jobs: int, miner_id: str):
"""Mine with concurrent job processing"""
config = ctx.obj['config']
success(f"Starting concurrent mining: {workers} workers, {total_jobs} jobs")
completed = 0
failed = 0
with concurrent.futures.ThreadPoolExecutor(max_workers=workers) as executor:
remaining = total_jobs
while remaining > 0:
batch_size = min(remaining, workers)
futures = [
executor.submit(_process_single_job, config, miner_id, i)
for i in range(batch_size)
]
for future in concurrent.futures.as_completed(futures):
result = future.result()
if result.get("status") == "completed":
completed += 1
remaining -= 1
output(result, ctx.obj['output_format'])
elif result.get("status") == "no_job":
time.sleep(2)
else:
failed += 1
remaining -= 1
output({
"status": "finished",
"completed": completed,
"failed": failed,
"workers": workers
}, ctx.obj['output_format'])

View File

@@ -0,0 +1,381 @@
"""Monitoring and dashboard commands for AITBC CLI"""
import click
import httpx
import json
import time
from pathlib import Path
from typing import Optional
from datetime import datetime, timedelta
from ..utils import output, error, success, console
@click.group()
def monitor():
"""Monitoring, metrics, and alerting commands"""
pass
@monitor.command()
@click.option("--refresh", type=int, default=5, help="Refresh interval in seconds")
@click.option("--duration", type=int, default=0, help="Duration in seconds (0 = indefinite)")
@click.pass_context
def dashboard(ctx, refresh: int, duration: int):
"""Real-time system dashboard"""
config = ctx.obj['config']
start_time = time.time()
try:
while True:
elapsed = time.time() - start_time
if duration > 0 and elapsed >= duration:
break
console.clear()
console.rule("[bold blue]AITBC Dashboard[/bold blue]")
console.print(f"[dim]Refreshing every {refresh}s | Elapsed: {int(elapsed)}s[/dim]\n")
# Fetch system status
try:
with httpx.Client(timeout=5) as client:
# Node status
try:
resp = client.get(
f"{config.coordinator_url}/v1/status",
headers={"X-Api-Key": config.api_key or ""}
)
if resp.status_code == 200:
status = resp.json()
console.print("[bold green]Coordinator:[/bold green] Online")
for k, v in status.items():
console.print(f" {k}: {v}")
else:
console.print(f"[bold yellow]Coordinator:[/bold yellow] HTTP {resp.status_code}")
except Exception:
console.print("[bold red]Coordinator:[/bold red] Offline")
console.print()
# Jobs summary
try:
resp = client.get(
f"{config.coordinator_url}/v1/jobs",
headers={"X-Api-Key": config.api_key or ""},
params={"limit": 5}
)
if resp.status_code == 200:
jobs = resp.json()
if isinstance(jobs, list):
console.print(f"[bold cyan]Recent Jobs:[/bold cyan] {len(jobs)}")
for job in jobs[:5]:
status_color = "green" if job.get("status") == "completed" else "yellow"
console.print(f" [{status_color}]{job.get('id', 'N/A')}: {job.get('status', 'unknown')}[/{status_color}]")
except Exception:
console.print("[dim]Jobs: unavailable[/dim]")
console.print()
# Miners summary
try:
resp = client.get(
f"{config.coordinator_url}/v1/miners",
headers={"X-Api-Key": config.api_key or ""}
)
if resp.status_code == 200:
miners = resp.json()
if isinstance(miners, list):
online = sum(1 for m in miners if m.get("status") == "ONLINE")
console.print(f"[bold cyan]Miners:[/bold cyan] {online}/{len(miners)} online")
except Exception:
console.print("[dim]Miners: unavailable[/dim]")
except Exception as e:
console.print(f"[red]Error fetching data: {e}[/red]")
console.print(f"\n[dim]Press Ctrl+C to exit[/dim]")
time.sleep(refresh)
except KeyboardInterrupt:
console.print("\n[bold]Dashboard stopped[/bold]")
@monitor.command()
@click.option("--period", default="24h", help="Time period (1h, 24h, 7d, 30d)")
@click.option("--export", "export_path", type=click.Path(), help="Export metrics to file")
@click.pass_context
def metrics(ctx, period: str, export_path: Optional[str]):
"""Collect and display system metrics"""
config = ctx.obj['config']
# Parse period
multipliers = {"h": 3600, "d": 86400}
unit = period[-1]
value = int(period[:-1])
seconds = value * multipliers.get(unit, 3600)
since = datetime.now() - timedelta(seconds=seconds)
metrics_data = {
"period": period,
"since": since.isoformat(),
"collected_at": datetime.now().isoformat(),
"coordinator": {},
"jobs": {},
"miners": {}
}
try:
with httpx.Client(timeout=10) as client:
# Coordinator metrics
try:
resp = client.get(
f"{config.coordinator_url}/v1/status",
headers={"X-Api-Key": config.api_key or ""}
)
if resp.status_code == 200:
metrics_data["coordinator"] = resp.json()
metrics_data["coordinator"]["status"] = "online"
else:
metrics_data["coordinator"]["status"] = f"error_{resp.status_code}"
except Exception:
metrics_data["coordinator"]["status"] = "offline"
# Job metrics
try:
resp = client.get(
f"{config.coordinator_url}/v1/jobs",
headers={"X-Api-Key": config.api_key or ""},
params={"limit": 100}
)
if resp.status_code == 200:
jobs = resp.json()
if isinstance(jobs, list):
metrics_data["jobs"] = {
"total": len(jobs),
"completed": sum(1 for j in jobs if j.get("status") == "completed"),
"pending": sum(1 for j in jobs if j.get("status") == "pending"),
"failed": sum(1 for j in jobs if j.get("status") == "failed"),
}
except Exception:
metrics_data["jobs"] = {"error": "unavailable"}
# Miner metrics
try:
resp = client.get(
f"{config.coordinator_url}/v1/miners",
headers={"X-Api-Key": config.api_key or ""}
)
if resp.status_code == 200:
miners = resp.json()
if isinstance(miners, list):
metrics_data["miners"] = {
"total": len(miners),
"online": sum(1 for m in miners if m.get("status") == "ONLINE"),
"offline": sum(1 for m in miners if m.get("status") != "ONLINE"),
}
except Exception:
metrics_data["miners"] = {"error": "unavailable"}
except Exception as e:
error(f"Failed to collect metrics: {e}")
if export_path:
with open(export_path, "w") as f:
json.dump(metrics_data, f, indent=2)
success(f"Metrics exported to {export_path}")
output(metrics_data, ctx.obj['output_format'])
@monitor.command()
@click.argument("action", type=click.Choice(["add", "list", "remove", "test"]))
@click.option("--name", help="Alert name")
@click.option("--type", "alert_type", type=click.Choice(["coordinator_down", "miner_offline", "job_failed", "low_balance"]), help="Alert type")
@click.option("--threshold", type=float, help="Alert threshold value")
@click.option("--webhook", help="Webhook URL for notifications")
@click.pass_context
def alerts(ctx, action: str, name: Optional[str], alert_type: Optional[str],
threshold: Optional[float], webhook: Optional[str]):
"""Configure monitoring alerts"""
alerts_dir = Path.home() / ".aitbc" / "alerts"
alerts_dir.mkdir(parents=True, exist_ok=True)
alerts_file = alerts_dir / "alerts.json"
# Load existing alerts
existing = []
if alerts_file.exists():
with open(alerts_file) as f:
existing = json.load(f)
if action == "add":
if not name or not alert_type:
error("Alert name and type required (--name, --type)")
return
alert = {
"name": name,
"type": alert_type,
"threshold": threshold,
"webhook": webhook,
"created_at": datetime.now().isoformat(),
"enabled": True
}
existing.append(alert)
with open(alerts_file, "w") as f:
json.dump(existing, f, indent=2)
success(f"Alert '{name}' added")
output(alert, ctx.obj['output_format'])
elif action == "list":
if not existing:
output({"message": "No alerts configured"}, ctx.obj['output_format'])
else:
output(existing, ctx.obj['output_format'])
elif action == "remove":
if not name:
error("Alert name required (--name)")
return
existing = [a for a in existing if a["name"] != name]
with open(alerts_file, "w") as f:
json.dump(existing, f, indent=2)
success(f"Alert '{name}' removed")
elif action == "test":
if not name:
error("Alert name required (--name)")
return
alert = next((a for a in existing if a["name"] == name), None)
if not alert:
error(f"Alert '{name}' not found")
return
if alert.get("webhook"):
try:
with httpx.Client(timeout=10) as client:
resp = client.post(alert["webhook"], json={
"alert": name,
"type": alert["type"],
"message": f"Test alert from AITBC CLI",
"timestamp": datetime.now().isoformat()
})
output({"status": "sent", "response_code": resp.status_code}, ctx.obj['output_format'])
except Exception as e:
error(f"Webhook test failed: {e}")
else:
output({"status": "no_webhook", "alert": alert}, ctx.obj['output_format'])
@monitor.command()
@click.option("--period", default="7d", help="Analysis period (1d, 7d, 30d)")
@click.pass_context
def history(ctx, period: str):
"""Historical data analysis"""
config = ctx.obj['config']
multipliers = {"h": 3600, "d": 86400}
unit = period[-1]
value = int(period[:-1])
seconds = value * multipliers.get(unit, 3600)
since = datetime.now() - timedelta(seconds=seconds)
analysis = {
"period": period,
"since": since.isoformat(),
"analyzed_at": datetime.now().isoformat(),
"summary": {}
}
try:
with httpx.Client(timeout=10) as client:
try:
resp = client.get(
f"{config.coordinator_url}/v1/jobs",
headers={"X-Api-Key": config.api_key or ""},
params={"limit": 500}
)
if resp.status_code == 200:
jobs = resp.json()
if isinstance(jobs, list):
completed = [j for j in jobs if j.get("status") == "completed"]
failed = [j for j in jobs if j.get("status") == "failed"]
analysis["summary"] = {
"total_jobs": len(jobs),
"completed": len(completed),
"failed": len(failed),
"success_rate": f"{len(completed) / max(1, len(jobs)) * 100:.1f}%",
}
except Exception:
analysis["summary"] = {"error": "Could not fetch job data"}
except Exception as e:
error(f"Analysis failed: {e}")
output(analysis, ctx.obj['output_format'])
@monitor.command()
@click.argument("action", type=click.Choice(["add", "list", "remove", "test"]))
@click.option("--name", help="Webhook name")
@click.option("--url", help="Webhook URL")
@click.option("--events", help="Comma-separated event types (job_completed,miner_offline,alert)")
@click.pass_context
def webhooks(ctx, action: str, name: Optional[str], url: Optional[str], events: Optional[str]):
"""Manage webhook notifications"""
webhooks_dir = Path.home() / ".aitbc" / "webhooks"
webhooks_dir.mkdir(parents=True, exist_ok=True)
webhooks_file = webhooks_dir / "webhooks.json"
existing = []
if webhooks_file.exists():
with open(webhooks_file) as f:
existing = json.load(f)
if action == "add":
if not name or not url:
error("Webhook name and URL required (--name, --url)")
return
webhook = {
"name": name,
"url": url,
"events": events.split(",") if events else ["all"],
"created_at": datetime.now().isoformat(),
"enabled": True
}
existing.append(webhook)
with open(webhooks_file, "w") as f:
json.dump(existing, f, indent=2)
success(f"Webhook '{name}' added")
output(webhook, ctx.obj['output_format'])
elif action == "list":
if not existing:
output({"message": "No webhooks configured"}, ctx.obj['output_format'])
else:
output(existing, ctx.obj['output_format'])
elif action == "remove":
if not name:
error("Webhook name required (--name)")
return
existing = [w for w in existing if w["name"] != name]
with open(webhooks_file, "w") as f:
json.dump(existing, f, indent=2)
success(f"Webhook '{name}' removed")
elif action == "test":
if not name:
error("Webhook name required (--name)")
return
wh = next((w for w in existing if w["name"] == name), None)
if not wh:
error(f"Webhook '{name}' not found")
return
try:
with httpx.Client(timeout=10) as client:
resp = client.post(wh["url"], json={
"event": "test",
"source": "aitbc-cli",
"message": "Test webhook notification",
"timestamp": datetime.now().isoformat()
})
output({"status": "sent", "response_code": resp.status_code}, ctx.obj['output_format'])
except Exception as e:
error(f"Webhook test failed: {e}")

View File

@@ -0,0 +1,441 @@
"""Simulation commands for AITBC CLI"""
import click
import json
import time
import random
from pathlib import Path
from typing import Optional, List, Dict, Any
from ..utils import output, error, success
@click.group()
def simulate():
"""Run simulations and manage test users"""
pass
@simulate.command()
@click.option("--distribute", default="10000,1000",
help="Initial distribution: client_amount,miner_amount")
@click.option("--reset", is_flag=True, help="Reset existing simulation")
@click.pass_context
def init(ctx, distribute: str, reset: bool):
"""Initialize test economy"""
home_dir = Path("/home/oib/windsurf/aitbc/home")
if reset:
success("Resetting simulation...")
# Reset wallet files
for wallet_file in ["client_wallet.json", "miner_wallet.json"]:
wallet_path = home_dir / wallet_file
if wallet_path.exists():
wallet_path.unlink()
# Parse distribution
try:
client_amount, miner_amount = map(float, distribute.split(","))
except:
error("Invalid distribution format. Use: client_amount,miner_amount")
return
# Initialize genesis wallet
genesis_path = home_dir / "genesis_wallet.json"
if not genesis_path.exists():
genesis_wallet = {
"address": "aitbc1genesis",
"balance": 1000000,
"transactions": []
}
with open(genesis_path, 'w') as f:
json.dump(genesis_wallet, f, indent=2)
success("Genesis wallet created")
# Initialize client wallet
client_path = home_dir / "client_wallet.json"
if not client_path.exists():
client_wallet = {
"address": "aitbc1client",
"balance": client_amount,
"transactions": [{
"type": "receive",
"amount": client_amount,
"from": "aitbc1genesis",
"timestamp": time.time()
}]
}
with open(client_path, 'w') as f:
json.dump(client_wallet, f, indent=2)
success(f"Client wallet initialized with {client_amount} AITBC")
# Initialize miner wallet
miner_path = home_dir / "miner_wallet.json"
if not miner_path.exists():
miner_wallet = {
"address": "aitbc1miner",
"balance": miner_amount,
"transactions": [{
"type": "receive",
"amount": miner_amount,
"from": "aitbc1genesis",
"timestamp": time.time()
}]
}
with open(miner_path, 'w') as f:
json.dump(miner_wallet, f, indent=2)
success(f"Miner wallet initialized with {miner_amount} AITBC")
output({
"status": "initialized",
"distribution": {
"client": client_amount,
"miner": miner_amount
},
"total_supply": client_amount + miner_amount
}, ctx.obj['output_format'])
@simulate.group()
def user():
"""Manage test users"""
pass
@user.command()
@click.option("--type", type=click.Choice(["client", "miner"]), required=True)
@click.option("--name", required=True, help="User name")
@click.option("--balance", type=float, default=100, help="Initial balance")
@click.pass_context
def create(ctx, type: str, name: str, balance: float):
"""Create a test user"""
home_dir = Path("/home/oib/windsurf/aitbc/home")
user_id = f"{type}_{name}"
wallet_path = home_dir / f"{user_id}_wallet.json"
if wallet_path.exists():
error(f"User {name} already exists")
return
wallet = {
"address": f"aitbc1{user_id}",
"balance": balance,
"transactions": [{
"type": "receive",
"amount": balance,
"from": "aitbc1genesis",
"timestamp": time.time()
}]
}
with open(wallet_path, 'w') as f:
json.dump(wallet, f, indent=2)
success(f"Created {type} user: {name}")
output({
"user_id": user_id,
"address": wallet["address"],
"balance": balance
}, ctx.obj['output_format'])
@user.command()
@click.pass_context
def list(ctx):
"""List all test users"""
home_dir = Path("/home/oib/windsurf/aitbc/home")
users = []
for wallet_file in home_dir.glob("*_wallet.json"):
if wallet_file.name in ["genesis_wallet.json"]:
continue
with open(wallet_file) as f:
wallet = json.load(f)
user_type = "client" if "client" in wallet_file.name else "miner"
user_name = wallet_file.stem.replace("_wallet", "").replace(f"{user_type}_", "")
users.append({
"name": user_name,
"type": user_type,
"address": wallet["address"],
"balance": wallet["balance"]
})
output({"users": users}, ctx.obj['output_format'])
@user.command()
@click.argument("user")
@click.pass_context
def balance(ctx, user: str):
"""Check user balance"""
home_dir = Path("/home/oib/windsurf/aitbc/home")
wallet_path = home_dir / f"{user}_wallet.json"
if not wallet_path.exists():
error(f"User {user} not found")
return
with open(wallet_path) as f:
wallet = json.load(f)
output({
"user": user,
"address": wallet["address"],
"balance": wallet["balance"]
}, ctx.obj['output_format'])
@user.command()
@click.argument("user")
@click.argument("amount", type=float)
@click.pass_context
def fund(ctx, user: str, amount: float):
"""Fund a test user"""
home_dir = Path("/home/oib/windsurf/aitbc/home")
# Load genesis wallet
genesis_path = home_dir / "genesis_wallet.json"
with open(genesis_path) as f:
genesis = json.load(f)
if genesis["balance"] < amount:
error(f"Insufficient genesis balance: {genesis['balance']}")
return
# Load user wallet
wallet_path = home_dir / f"{user}_wallet.json"
if not wallet_path.exists():
error(f"User {user} not found")
return
with open(wallet_path) as f:
wallet = json.load(f)
# Transfer funds
genesis["balance"] -= amount
genesis["transactions"].append({
"type": "send",
"amount": -amount,
"to": wallet["address"],
"timestamp": time.time()
})
wallet["balance"] += amount
wallet["transactions"].append({
"type": "receive",
"amount": amount,
"from": genesis["address"],
"timestamp": time.time()
})
# Save wallets
with open(genesis_path, 'w') as f:
json.dump(genesis, f, indent=2)
with open(wallet_path, 'w') as f:
json.dump(wallet, f, indent=2)
success(f"Funded {user} with {amount} AITBC")
output({
"user": user,
"amount": amount,
"new_balance": wallet["balance"]
}, ctx.obj['output_format'])
@simulate.command()
@click.option("--jobs", type=int, default=5, help="Number of jobs to simulate")
@click.option("--rounds", type=int, default=3, help="Number of rounds")
@click.option("--delay", type=float, default=1.0, help="Delay between operations (seconds)")
@click.pass_context
def workflow(ctx, jobs: int, rounds: int, delay: float):
"""Simulate complete workflow"""
config = ctx.obj['config']
success(f"Starting workflow simulation: {jobs} jobs x {rounds} rounds")
for round_num in range(1, rounds + 1):
click.echo(f"\n--- Round {round_num} ---")
# Submit jobs
submitted_jobs = []
for i in range(jobs):
prompt = f"Test job {i+1} (round {round_num})"
# Simulate job submission
job_id = f"job_{round_num}_{i+1}_{int(time.time())}"
submitted_jobs.append(job_id)
output({
"action": "submit_job",
"job_id": job_id,
"prompt": prompt,
"round": round_num
}, ctx.obj['output_format'])
time.sleep(delay)
# Simulate job processing
for job_id in submitted_jobs:
# Simulate miner picking up job
output({
"action": "job_assigned",
"job_id": job_id,
"miner": f"miner_{random.randint(1, 3)}",
"status": "processing"
}, ctx.obj['output_format'])
time.sleep(delay * 0.5)
# Simulate job completion
earnings = random.uniform(1, 10)
output({
"action": "job_completed",
"job_id": job_id,
"earnings": earnings,
"status": "completed"
}, ctx.obj['output_format'])
time.sleep(delay * 0.5)
output({
"status": "completed",
"total_jobs": jobs * rounds,
"rounds": rounds
}, ctx.obj['output_format'])
@simulate.command()
@click.option("--clients", type=int, default=10, help="Number of clients")
@click.option("--miners", type=int, default=3, help="Number of miners")
@click.option("--duration", type=int, default=300, help="Test duration in seconds")
@click.option("--job-rate", type=float, default=1.0, help="Jobs per second")
@click.pass_context
def load_test(ctx, clients: int, miners: int, duration: int, job_rate: float):
"""Run load test"""
start_time = time.time()
end_time = start_time + duration
job_interval = 1.0 / job_rate
success(f"Starting load test: {clients} clients, {miners} miners, {duration}s")
stats = {
"jobs_submitted": 0,
"jobs_completed": 0,
"errors": 0,
"start_time": start_time
}
while time.time() < end_time:
# Submit jobs
for client_id in range(clients):
if time.time() >= end_time:
break
job_id = f"load_test_{stats['jobs_submitted']}_{int(time.time())}"
stats["jobs_submitted"] += 1
# Simulate random job completion
if random.random() > 0.1: # 90% success rate
stats["jobs_completed"] += 1
else:
stats["errors"] += 1
time.sleep(job_interval)
# Show progress
elapsed = time.time() - start_time
if elapsed % 30 < 1: # Every 30 seconds
output({
"elapsed": elapsed,
"jobs_submitted": stats["jobs_submitted"],
"jobs_completed": stats["jobs_completed"],
"errors": stats["errors"],
"success_rate": stats["jobs_completed"] / max(1, stats["jobs_submitted"]) * 100
}, ctx.obj['output_format'])
# Final stats
total_time = time.time() - start_time
output({
"status": "completed",
"duration": total_time,
"jobs_submitted": stats["jobs_submitted"],
"jobs_completed": stats["jobs_completed"],
"errors": stats["errors"],
"avg_jobs_per_second": stats["jobs_submitted"] / total_time,
"success_rate": stats["jobs_completed"] / max(1, stats["jobs_submitted"]) * 100
}, ctx.obj['output_format'])
@simulate.command()
@click.option("--file", required=True, help="Scenario file path")
@click.pass_context
def scenario(ctx, file: str):
"""Run predefined scenario"""
scenario_path = Path(file)
if not scenario_path.exists():
error(f"Scenario file not found: {file}")
return
with open(scenario_path) as f:
scenario = json.load(f)
success(f"Running scenario: {scenario.get('name', 'Unknown')}")
# Execute scenario steps
for step in scenario.get("steps", []):
step_type = step.get("type")
step_name = step.get("name", "Unnamed step")
click.echo(f"\nExecuting: {step_name}")
if step_type == "submit_jobs":
count = step.get("count", 1)
for i in range(count):
output({
"action": "submit_job",
"step": step_name,
"job_num": i + 1,
"prompt": step.get("prompt", f"Scenario job {i+1}")
}, ctx.obj['output_format'])
elif step_type == "wait":
duration = step.get("duration", 1)
time.sleep(duration)
elif step_type == "check_balance":
user = step.get("user", "client")
# Would check actual balance
output({
"action": "check_balance",
"user": user
}, ctx.obj['output_format'])
output({
"status": "completed",
"scenario": scenario.get('name', 'Unknown')
}, ctx.obj['output_format'])
@simulate.command()
@click.argument("simulation_id")
@click.pass_context
def results(ctx, simulation_id: str):
"""Show simulation results"""
# In a real implementation, this would query stored results
# For now, return mock data
output({
"simulation_id": simulation_id,
"status": "completed",
"start_time": time.time() - 3600,
"end_time": time.time(),
"duration": 3600,
"total_jobs": 50,
"successful_jobs": 48,
"failed_jobs": 2,
"success_rate": 96.0
}, ctx.obj['output_format'])

View File

@@ -0,0 +1,990 @@
"""Wallet commands for AITBC CLI"""
import click
import httpx
import json
import os
import shutil
import yaml
from pathlib import Path
from typing import Optional, Dict, Any, List
from datetime import datetime, timedelta
from ..utils import output, error, success
@click.group()
@click.option("--wallet-name", help="Name of the wallet to use")
@click.option("--wallet-path", help="Direct path to wallet file (overrides --wallet-name)")
@click.pass_context
def wallet(ctx, wallet_name: Optional[str], wallet_path: Optional[str]):
"""Manage your AITBC wallets and transactions"""
# Ensure wallet object exists
ctx.ensure_object(dict)
# If direct wallet path is provided, use it
if wallet_path:
wp = Path(wallet_path)
wp.parent.mkdir(parents=True, exist_ok=True)
ctx.obj['wallet_name'] = wp.stem
ctx.obj['wallet_dir'] = wp.parent
ctx.obj['wallet_path'] = wp
return
# Set wallet directory
wallet_dir = Path.home() / ".aitbc" / "wallets"
wallet_dir.mkdir(parents=True, exist_ok=True)
# Set active wallet
if not wallet_name:
# Try to get from config or use 'default'
config_file = Path.home() / ".aitbc" / "config.yaml"
if config_file.exists():
with open(config_file, 'r') as f:
config = yaml.safe_load(f)
if config:
wallet_name = config.get('active_wallet', 'default')
else:
wallet_name = 'default'
else:
wallet_name = 'default'
ctx.obj['wallet_name'] = wallet_name
ctx.obj['wallet_dir'] = wallet_dir
ctx.obj['wallet_path'] = wallet_dir / f"{wallet_name}.json"
@wallet.command()
@click.argument('name')
@click.option('--type', 'wallet_type', default='hd', help='Wallet type (hd, simple)')
@click.pass_context
def create(ctx, name: str, wallet_type: str):
"""Create a new wallet"""
wallet_dir = ctx.obj['wallet_dir']
wallet_path = wallet_dir / f"{name}.json"
if wallet_path.exists():
error(f"Wallet '{name}' already exists")
return
# Generate new wallet
if wallet_type == 'hd':
# Hierarchical Deterministic wallet
import secrets
seed = secrets.token_hex(32)
address = f"aitbc1{seed[:40]}"
private_key = f"0x{seed}"
public_key = f"0x{secrets.token_hex(32)}"
else:
# Simple wallet
import secrets
private_key = f"0x{secrets.token_hex(32)}"
public_key = f"0x{secrets.token_hex(32)}"
address = f"aitbc1{secrets.token_hex(20)}"
wallet_data = {
"wallet_id": name,
"type": wallet_type,
"address": address,
"public_key": public_key,
"private_key": private_key,
"created_at": datetime.utcnow().isoformat() + "Z",
"balance": 0,
"transactions": []
}
# Save wallet
with open(wallet_path, 'w') as f:
json.dump(wallet_data, f, indent=2)
success(f"Wallet '{name}' created successfully")
output({
"name": name,
"type": wallet_type,
"address": address,
"path": str(wallet_path)
}, ctx.obj.get('output_format', 'table'))
@wallet.command()
@click.pass_context
def list(ctx):
"""List all wallets"""
wallet_dir = ctx.obj['wallet_dir']
config_file = Path.home() / ".aitbc" / "config.yaml"
# Get active wallet
active_wallet = 'default'
if config_file.exists():
with open(config_file, 'r') as f:
config = yaml.safe_load(f)
active_wallet = config.get('active_wallet', 'default')
wallets = []
for wallet_file in wallet_dir.glob("*.json"):
with open(wallet_file, 'r') as f:
wallet_data = json.load(f)
wallets.append({
"name": wallet_data['wallet_id'],
"type": wallet_data.get('type', 'simple'),
"address": wallet_data['address'],
"created_at": wallet_data['created_at'],
"active": wallet_data['wallet_id'] == active_wallet
})
output(wallets, ctx.obj.get('output_format', 'table'))
@wallet.command()
@click.argument('name')
@click.pass_context
def switch(ctx, name: str):
"""Switch to a different wallet"""
wallet_dir = ctx.obj['wallet_dir']
wallet_path = wallet_dir / f"{name}.json"
if not wallet_path.exists():
error(f"Wallet '{name}' does not exist")
return
# Update config
config_file = Path.home() / ".aitbc" / "config.yaml"
config = {}
if config_file.exists():
import yaml
with open(config_file, 'r') as f:
config = yaml.safe_load(f) or {}
config['active_wallet'] = name
# Save config
config_file.parent.mkdir(parents=True, exist_ok=True)
with open(config_file, 'w') as f:
yaml.dump(config, f, default_flow_style=False)
success(f"Switched to wallet '{name}'")
output({
"active_wallet": name,
"address": json.load(open(wallet_path))['address']
}, ctx.obj.get('output_format', 'table'))
@wallet.command()
@click.argument('name')
@click.option('--confirm', is_flag=True, help='Skip confirmation prompt')
@click.pass_context
def delete(ctx, name: str, confirm: bool):
"""Delete a wallet"""
wallet_dir = ctx.obj['wallet_dir']
wallet_path = wallet_dir / f"{name}.json"
if not wallet_path.exists():
error(f"Wallet '{name}' does not exist")
return
if not confirm:
if not click.confirm(f"Are you sure you want to delete wallet '{name}'? This cannot be undone."):
return
wallet_path.unlink()
success(f"Wallet '{name}' deleted")
# If deleted wallet was active, reset to default
config_file = Path.home() / ".aitbc" / "config.yaml"
if config_file.exists():
import yaml
with open(config_file, 'r') as f:
config = yaml.safe_load(f) or {}
if config.get('active_wallet') == name:
config['active_wallet'] = 'default'
with open(config_file, 'w') as f:
yaml.dump(config, f, default_flow_style=False)
@wallet.command()
@click.argument('name')
@click.option('--destination', help='Destination path for backup file')
@click.pass_context
def backup(ctx, name: str, destination: Optional[str]):
"""Backup a wallet"""
wallet_dir = ctx.obj['wallet_dir']
wallet_path = wallet_dir / f"{name}.json"
if not wallet_path.exists():
error(f"Wallet '{name}' does not exist")
return
if not destination:
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
destination = f"{name}_backup_{timestamp}.json"
# Copy wallet file
shutil.copy2(wallet_path, destination)
success(f"Wallet '{name}' backed up to '{destination}'")
output({
"wallet": name,
"backup_path": destination,
"timestamp": datetime.utcnow().isoformat() + "Z"
})
@wallet.command()
@click.argument('backup_path')
@click.argument('name')
@click.option('--force', is_flag=True, help='Override existing wallet')
@click.pass_context
def restore(ctx, backup_path: str, name: str, force: bool):
"""Restore a wallet from backup"""
wallet_dir = ctx.obj['wallet_dir']
wallet_path = wallet_dir / f"{name}.json"
if wallet_path.exists() and not force:
error(f"Wallet '{name}' already exists. Use --force to override.")
return
if not Path(backup_path).exists():
error(f"Backup file '{backup_path}' not found")
return
# Load and verify backup
with open(backup_path, 'r') as f:
wallet_data = json.load(f)
# Update wallet name if needed
wallet_data['wallet_id'] = name
wallet_data['restored_at'] = datetime.utcnow().isoformat() + "Z"
# Save restored wallet
with open(wallet_path, 'w') as f:
json.dump(wallet_data, f, indent=2)
success(f"Wallet '{name}' restored from backup")
output({
"wallet": name,
"restored_from": backup_path,
"address": wallet_data['address']
})
@wallet.command()
@click.pass_context
def info(ctx):
"""Show current wallet information"""
wallet_name = ctx.obj['wallet_name']
wallet_path = ctx.obj['wallet_path']
config_file = Path.home() / ".aitbc" / "config.yaml"
if not wallet_path.exists():
error(f"Wallet '{wallet_name}' not found. Use 'aitbc wallet create' to create one.")
return
with open(wallet_path, 'r') as f:
wallet_data = json.load(f)
# Get active wallet from config
active_wallet = 'default'
if config_file.exists():
import yaml
with open(config_file, 'r') as f:
config = yaml.safe_load(f)
active_wallet = config.get('active_wallet', 'default')
wallet_info = {
"name": wallet_data['wallet_id'],
"type": wallet_data.get('type', 'simple'),
"address": wallet_data['address'],
"public_key": wallet_data['public_key'],
"created_at": wallet_data['created_at'],
"active": wallet_data['wallet_id'] == active_wallet,
"path": str(wallet_path)
}
if 'balance' in wallet_data:
wallet_info['balance'] = wallet_data['balance']
output(wallet_info, ctx.obj.get('output_format', 'table'))
@wallet.command()
@click.pass_context
def balance(ctx):
"""Check wallet balance"""
wallet_name = ctx.obj['wallet_name']
wallet_path = ctx.obj['wallet_path']
config = ctx.obj.get('config')
# Auto-create wallet if it doesn't exist
if not wallet_path.exists():
import secrets
wallet_data = {
"wallet_id": wallet_name,
"type": "simple",
"address": f"aitbc1{secrets.token_hex(20)}",
"public_key": f"0x{secrets.token_hex(32)}",
"private_key": f"0x{secrets.token_hex(32)}",
"created_at": datetime.utcnow().isoformat() + "Z",
"balance": 0.0,
"transactions": []
}
wallet_path.parent.mkdir(parents=True, exist_ok=True)
with open(wallet_path, 'w') as f:
json.dump(wallet_data, f, indent=2)
else:
with open(wallet_path, 'r') as f:
wallet_data = json.load(f)
# Try to get balance from blockchain if available
if config:
try:
with httpx.Client() as client:
response = client.get(
f"{config.coordinator_url.replace('/api', '')}/rpc/balance/{wallet_data['address']}",
timeout=5
)
if response.status_code == 200:
blockchain_balance = response.json().get('balance', 0)
output({
"wallet": wallet_name,
"address": wallet_data['address'],
"local_balance": wallet_data.get('balance', 0),
"blockchain_balance": blockchain_balance,
"synced": wallet_data.get('balance', 0) == blockchain_balance
}, ctx.obj.get('output_format', 'table'))
return
except:
pass
# Fallback to local balance only
output({
"wallet": wallet_name,
"address": wallet_data['address'],
"balance": wallet_data.get('balance', 0),
"note": "Local balance only (blockchain not accessible)"
}, ctx.obj.get('output_format', 'table'))
@wallet.command()
@click.option("--limit", type=int, default=10, help="Number of transactions to show")
@click.pass_context
def history(ctx, limit: int):
"""Show transaction history"""
wallet_name = ctx.obj['wallet_name']
wallet_path = ctx.obj['wallet_path']
if not wallet_path.exists():
error(f"Wallet '{wallet_name}' not found")
return
with open(wallet_path, 'r') as f:
wallet_data = json.load(f)
transactions = wallet_data.get('transactions', [])[-limit:]
# Format transactions
formatted_txs = []
for tx in transactions:
formatted_txs.append({
"type": tx['type'],
"amount": tx['amount'],
"description": tx.get('description', ''),
"timestamp": tx['timestamp']
})
output({
"wallet": wallet_name,
"address": wallet_data['address'],
"transactions": formatted_txs
}, ctx.obj.get('output_format', 'table'))
@wallet.command()
@click.argument("amount", type=float)
@click.argument("job_id")
@click.option("--desc", help="Description of the work")
@click.pass_context
def earn(ctx, amount: float, job_id: str, desc: Optional[str]):
"""Add earnings from completed job"""
wallet_name = ctx.obj['wallet_name']
wallet_path = ctx.obj['wallet_path']
if not wallet_path.exists():
error(f"Wallet '{wallet_name}' not found")
return
with open(wallet_path, 'r') as f:
wallet_data = json.load(f)
# Add transaction
transaction = {
"type": "earn",
"amount": amount,
"job_id": job_id,
"description": desc or f"Job {job_id}",
"timestamp": datetime.now().isoformat()
}
wallet_data['transactions'].append(transaction)
wallet_data['balance'] = wallet_data.get('balance', 0) + amount
# Save wallet
with open(wallet_path, 'w') as f:
json.dump(wallet_data, f, indent=2)
success(f"Earnings added: {amount} AITBC")
output({
"wallet": wallet_name,
"amount": amount,
"job_id": job_id,
"new_balance": wallet_data['balance']
}, ctx.obj.get('output_format', 'table'))
@wallet.command()
@click.argument("amount", type=float)
@click.argument("description")
@click.pass_context
def spend(ctx, amount: float, description: str):
"""Spend AITBC"""
wallet_name = ctx.obj['wallet_name']
wallet_path = ctx.obj['wallet_path']
if not wallet_path.exists():
error(f"Wallet '{wallet_name}' not found")
return
with open(wallet_path, 'r') as f:
wallet_data = json.load(f)
balance = wallet_data.get('balance', 0)
if balance < amount:
error(f"Insufficient balance. Available: {balance}, Required: {amount}")
ctx.exit(1)
return
# Add transaction
transaction = {
"type": "spend",
"amount": -amount,
"description": description,
"timestamp": datetime.now().isoformat()
}
wallet_data['transactions'].append(transaction)
wallet_data['balance'] = balance - amount
# Save wallet
with open(wallet_path, 'w') as f:
json.dump(wallet_data, f, indent=2)
success(f"Spent: {amount} AITBC")
output({
"wallet": wallet_name,
"amount": amount,
"description": description,
"new_balance": wallet_data['balance']
}, ctx.obj.get('output_format', 'table'))
@wallet.command()
@click.pass_context
def address(ctx):
"""Show wallet address"""
wallet_name = ctx.obj['wallet_name']
wallet_path = ctx.obj['wallet_path']
if not wallet_path.exists():
error(f"Wallet '{wallet_name}' not found")
return
with open(wallet_path, 'r') as f:
wallet_data = json.load(f)
output({
"wallet": wallet_name,
"address": wallet_data['address']
}, ctx.obj.get('output_format', 'table'))
@wallet.command()
@click.argument("to_address")
@click.argument("amount", type=float)
@click.option("--description", help="Transaction description")
@click.pass_context
def send(ctx, to_address: str, amount: float, description: Optional[str]):
"""Send AITBC to another address"""
wallet_name = ctx.obj['wallet_name']
wallet_path = ctx.obj['wallet_path']
config = ctx.obj.get('config')
if not wallet_path.exists():
error(f"Wallet '{wallet_name}' not found")
return
with open(wallet_path, 'r') as f:
wallet_data = json.load(f)
balance = wallet_data.get('balance', 0)
if balance < amount:
error(f"Insufficient balance. Available: {balance}, Required: {amount}")
ctx.exit(1)
return
# Try to send via blockchain
if config:
try:
with httpx.Client() as client:
response = client.post(
f"{config.coordinator_url.replace('/api', '')}/rpc/transactions",
json={
"from": wallet_data['address'],
"to": to_address,
"amount": amount,
"description": description or ""
},
headers={"X-Api-Key": getattr(config, 'api_key', '') or ""}
)
if response.status_code == 201:
tx = response.json()
# Update local wallet
transaction = {
"type": "send",
"amount": -amount,
"to_address": to_address,
"tx_hash": tx.get('hash'),
"description": description or "",
"timestamp": datetime.now().isoformat()
}
wallet_data['transactions'].append(transaction)
wallet_data['balance'] = balance - amount
with open(wallet_path, 'w') as f:
json.dump(wallet_data, f, indent=2)
success(f"Sent {amount} AITBC to {to_address}")
output({
"wallet": wallet_name,
"tx_hash": tx.get('hash'),
"amount": amount,
"to": to_address,
"new_balance": wallet_data['balance']
}, ctx.obj.get('output_format', 'table'))
return
except Exception as e:
error(f"Network error: {e}")
# Fallback: just record locally
transaction = {
"type": "send",
"amount": -amount,
"to_address": to_address,
"description": description or "",
"timestamp": datetime.now().isoformat(),
"pending": True
}
wallet_data['transactions'].append(transaction)
wallet_data['balance'] = balance - amount
with open(wallet_path, 'w') as f:
json.dump(wallet_data, f, indent=2)
output({
"wallet": wallet_name,
"amount": amount,
"to": to_address,
"new_balance": wallet_data['balance'],
"note": "Transaction recorded locally (pending blockchain confirmation)"
}, ctx.obj.get('output_format', 'table'))
@wallet.command()
@click.argument("to_address")
@click.argument("amount", type=float)
@click.option("--description", help="Transaction description")
@click.pass_context
def request_payment(ctx, to_address: str, amount: float, description: Optional[str]):
"""Request payment from another address"""
wallet_name = ctx.obj['wallet_name']
wallet_path = ctx.obj['wallet_path']
if not wallet_path.exists():
error(f"Wallet '{wallet_name}' not found")
return
with open(wallet_path, 'r') as f:
wallet_data = json.load(f)
# Create payment request
request = {
"from_address": to_address,
"to_address": wallet_data['address'],
"amount": amount,
"description": description or "",
"timestamp": datetime.now().isoformat()
}
output({
"wallet": wallet_name,
"payment_request": request,
"note": "Share this with the payer to request payment"
}, ctx.obj.get('output_format', 'table'))
@wallet.command()
@click.pass_context
def stats(ctx):
"""Show wallet statistics"""
wallet_name = ctx.obj['wallet_name']
wallet_path = ctx.obj['wallet_path']
if not wallet_path.exists():
error(f"Wallet '{wallet_name}' not found")
return
with open(wallet_path, 'r') as f:
wallet_data = json.load(f)
transactions = wallet_data.get('transactions', [])
# Calculate stats
total_earned = sum(tx['amount'] for tx in transactions if tx['type'] == 'earn' and tx['amount'] > 0)
total_spent = sum(abs(tx['amount']) for tx in transactions if tx['type'] in ['spend', 'send'] and tx['amount'] < 0)
jobs_completed = len([tx for tx in transactions if tx['type'] == 'earn'])
output({
"wallet": wallet_name,
"address": wallet_data['address'],
"current_balance": wallet_data.get('balance', 0),
"total_earned": total_earned,
"total_spent": total_spent,
"jobs_completed": jobs_completed,
"transaction_count": len(transactions),
"wallet_created": wallet_data.get('created_at')
}, ctx.obj.get('output_format', 'table'))
@wallet.command()
@click.argument("amount", type=float)
@click.option("--duration", type=int, default=30, help="Staking duration in days")
@click.pass_context
def stake(ctx, amount: float, duration: int):
"""Stake AITBC tokens"""
wallet_name = ctx.obj['wallet_name']
wallet_path = ctx.obj['wallet_path']
if not wallet_path.exists():
error(f"Wallet '{wallet_name}' not found")
return
with open(wallet_path, 'r') as f:
wallet_data = json.load(f)
balance = wallet_data.get('balance', 0)
if balance < amount:
error(f"Insufficient balance. Available: {balance}, Required: {amount}")
ctx.exit(1)
return
# Record stake
stake_id = f"stake_{int(datetime.now().timestamp())}"
stake_record = {
"stake_id": stake_id,
"amount": amount,
"duration_days": duration,
"start_date": datetime.now().isoformat(),
"end_date": (datetime.now() + timedelta(days=duration)).isoformat(),
"status": "active",
"apy": 5.0 + (duration / 30) * 1.5 # Higher APY for longer stakes
}
staking = wallet_data.setdefault('staking', [])
staking.append(stake_record)
wallet_data['balance'] = balance - amount
# Add transaction
wallet_data['transactions'].append({
"type": "stake",
"amount": -amount,
"stake_id": stake_id,
"description": f"Staked {amount} AITBC for {duration} days",
"timestamp": datetime.now().isoformat()
})
with open(wallet_path, 'w') as f:
json.dump(wallet_data, f, indent=2)
success(f"Staked {amount} AITBC for {duration} days")
output({
"wallet": wallet_name,
"stake_id": stake_id,
"amount": amount,
"duration_days": duration,
"apy": stake_record['apy'],
"new_balance": wallet_data['balance']
}, ctx.obj.get('output_format', 'table'))
@wallet.command()
@click.argument("stake_id")
@click.pass_context
def unstake(ctx, stake_id: str):
"""Unstake AITBC tokens"""
wallet_name = ctx.obj['wallet_name']
wallet_path = ctx.obj['wallet_path']
if not wallet_path.exists():
error(f"Wallet '{wallet_name}' not found")
return
with open(wallet_path, 'r') as f:
wallet_data = json.load(f)
staking = wallet_data.get('staking', [])
stake_record = next((s for s in staking if s['stake_id'] == stake_id and s['status'] == 'active'), None)
if not stake_record:
error(f"Active stake '{stake_id}' not found")
ctx.exit(1)
return
# Calculate rewards
start = datetime.fromisoformat(stake_record['start_date'])
days_staked = max(1, (datetime.now() - start).days)
daily_rate = stake_record['apy'] / 100 / 365
rewards = stake_record['amount'] * daily_rate * days_staked
# Return principal + rewards
returned = stake_record['amount'] + rewards
wallet_data['balance'] = wallet_data.get('balance', 0) + returned
stake_record['status'] = 'completed'
stake_record['rewards'] = rewards
stake_record['completed_date'] = datetime.now().isoformat()
# Add transaction
wallet_data['transactions'].append({
"type": "unstake",
"amount": returned,
"stake_id": stake_id,
"rewards": rewards,
"description": f"Unstaked {stake_record['amount']} AITBC + {rewards:.4f} rewards",
"timestamp": datetime.now().isoformat()
})
with open(wallet_path, 'w') as f:
json.dump(wallet_data, f, indent=2)
success(f"Unstaked {stake_record['amount']} AITBC + {rewards:.4f} rewards")
output({
"wallet": wallet_name,
"stake_id": stake_id,
"principal": stake_record['amount'],
"rewards": rewards,
"total_returned": returned,
"days_staked": days_staked,
"new_balance": wallet_data['balance']
}, ctx.obj.get('output_format', 'table'))
@wallet.command(name="staking-info")
@click.pass_context
def staking_info(ctx):
"""Show staking information"""
wallet_name = ctx.obj['wallet_name']
wallet_path = ctx.obj['wallet_path']
if not wallet_path.exists():
error(f"Wallet '{wallet_name}' not found")
return
with open(wallet_path, 'r') as f:
wallet_data = json.load(f)
staking = wallet_data.get('staking', [])
active_stakes = [s for s in staking if s['status'] == 'active']
completed_stakes = [s for s in staking if s['status'] == 'completed']
total_staked = sum(s['amount'] for s in active_stakes)
total_rewards = sum(s.get('rewards', 0) for s in completed_stakes)
output({
"wallet": wallet_name,
"total_staked": total_staked,
"total_rewards_earned": total_rewards,
"active_stakes": len(active_stakes),
"completed_stakes": len(completed_stakes),
"stakes": [
{
"stake_id": s['stake_id'],
"amount": s['amount'],
"apy": s['apy'],
"duration_days": s['duration_days'],
"status": s['status'],
"start_date": s['start_date']
}
for s in staking
]
}, ctx.obj.get('output_format', 'table'))
@wallet.command(name="multisig-create")
@click.argument("signers", nargs=-1, required=True)
@click.option("--threshold", type=int, required=True, help="Required signatures to approve")
@click.option("--name", required=True, help="Multisig wallet name")
@click.pass_context
def multisig_create(ctx, signers: tuple, threshold: int, name: str):
"""Create a multi-signature wallet"""
wallet_dir = ctx.obj.get('wallet_dir', Path.home() / ".aitbc" / "wallets")
wallet_dir.mkdir(parents=True, exist_ok=True)
multisig_path = wallet_dir / f"{name}_multisig.json"
if multisig_path.exists():
error(f"Multisig wallet '{name}' already exists")
return
if threshold > len(signers):
error(f"Threshold ({threshold}) cannot exceed number of signers ({len(signers)})")
return
import secrets
multisig_data = {
"wallet_id": name,
"type": "multisig",
"address": f"aitbc1ms{secrets.token_hex(18)}",
"signers": list(signers),
"threshold": threshold,
"created_at": datetime.now().isoformat(),
"balance": 0.0,
"transactions": [],
"pending_transactions": []
}
with open(multisig_path, "w") as f:
json.dump(multisig_data, f, indent=2)
success(f"Multisig wallet '{name}' created ({threshold}-of-{len(signers)})")
output({
"name": name,
"address": multisig_data["address"],
"signers": list(signers),
"threshold": threshold
}, ctx.obj.get('output_format', 'table'))
@wallet.command(name="multisig-propose")
@click.option("--wallet", "wallet_name", required=True, help="Multisig wallet name")
@click.argument("to_address")
@click.argument("amount", type=float)
@click.option("--description", help="Transaction description")
@click.pass_context
def multisig_propose(ctx, wallet_name: str, to_address: str, amount: float, description: Optional[str]):
"""Propose a multisig transaction"""
wallet_dir = ctx.obj.get('wallet_dir', Path.home() / ".aitbc" / "wallets")
multisig_path = wallet_dir / f"{wallet_name}_multisig.json"
if not multisig_path.exists():
error(f"Multisig wallet '{wallet_name}' not found")
return
with open(multisig_path) as f:
ms_data = json.load(f)
if ms_data.get("balance", 0) < amount:
error(f"Insufficient balance. Available: {ms_data['balance']}, Required: {amount}")
ctx.exit(1)
return
import secrets
tx_id = f"mstx_{secrets.token_hex(8)}"
pending_tx = {
"tx_id": tx_id,
"to": to_address,
"amount": amount,
"description": description or "",
"proposed_at": datetime.now().isoformat(),
"proposed_by": os.environ.get("USER", "unknown"),
"signatures": [],
"status": "pending"
}
ms_data.setdefault("pending_transactions", []).append(pending_tx)
with open(multisig_path, "w") as f:
json.dump(ms_data, f, indent=2)
success(f"Transaction proposed: {tx_id}")
output({
"tx_id": tx_id,
"to": to_address,
"amount": amount,
"signatures_needed": ms_data["threshold"],
"status": "pending"
}, ctx.obj.get('output_format', 'table'))
@wallet.command(name="multisig-sign")
@click.option("--wallet", "wallet_name", required=True, help="Multisig wallet name")
@click.argument("tx_id")
@click.option("--signer", required=True, help="Signer address")
@click.pass_context
def multisig_sign(ctx, wallet_name: str, tx_id: str, signer: str):
"""Sign a pending multisig transaction"""
wallet_dir = ctx.obj.get('wallet_dir', Path.home() / ".aitbc" / "wallets")
multisig_path = wallet_dir / f"{wallet_name}_multisig.json"
if not multisig_path.exists():
error(f"Multisig wallet '{wallet_name}' not found")
return
with open(multisig_path) as f:
ms_data = json.load(f)
if signer not in ms_data.get("signers", []):
error(f"'{signer}' is not an authorized signer")
ctx.exit(1)
return
pending = ms_data.get("pending_transactions", [])
tx = next((t for t in pending if t["tx_id"] == tx_id and t["status"] == "pending"), None)
if not tx:
error(f"Pending transaction '{tx_id}' not found")
ctx.exit(1)
return
if signer in tx["signatures"]:
error(f"'{signer}' has already signed this transaction")
return
tx["signatures"].append(signer)
# Check if threshold met
if len(tx["signatures"]) >= ms_data["threshold"]:
tx["status"] = "approved"
# Execute the transaction
ms_data["balance"] = ms_data.get("balance", 0) - tx["amount"]
ms_data["transactions"].append({
"type": "multisig_send",
"amount": -tx["amount"],
"to": tx["to"],
"tx_id": tx["tx_id"],
"signatures": tx["signatures"],
"timestamp": datetime.now().isoformat()
})
success(f"Transaction {tx_id} approved and executed!")
else:
success(f"Signed. {len(tx['signatures'])}/{ms_data['threshold']} signatures collected")
with open(multisig_path, "w") as f:
json.dump(ms_data, f, indent=2)
output({
"tx_id": tx_id,
"signatures": tx["signatures"],
"threshold": ms_data["threshold"],
"status": tx["status"]
}, ctx.obj.get('output_format', 'table'))

View File

@@ -0,0 +1,68 @@
"""Configuration management for AITBC CLI"""
import os
import yaml
from pathlib import Path
from typing import Optional
from dataclasses import dataclass, field
from dotenv import load_dotenv
@dataclass
class Config:
"""Configuration object for AITBC CLI"""
coordinator_url: str = "http://127.0.0.1:18000"
api_key: Optional[str] = None
config_dir: Path = field(default_factory=lambda: Path.home() / ".aitbc")
config_file: Optional[str] = None
def __post_init__(self):
"""Initialize configuration"""
# Load environment variables
load_dotenv()
# Set default config file if not specified
if not self.config_file:
self.config_file = str(self.config_dir / "config.yaml")
# Load config from file if it exists
self.load_from_file()
# Override with environment variables
if os.getenv("AITBC_URL"):
self.coordinator_url = os.getenv("AITBC_URL")
if os.getenv("AITBC_API_KEY"):
self.api_key = os.getenv("AITBC_API_KEY")
def load_from_file(self):
"""Load configuration from YAML file"""
if self.config_file and Path(self.config_file).exists():
try:
with open(self.config_file, 'r') as f:
data = yaml.safe_load(f) or {}
self.coordinator_url = data.get('coordinator_url', self.coordinator_url)
self.api_key = data.get('api_key', self.api_key)
except Exception as e:
print(f"Warning: Could not load config file: {e}")
def save_to_file(self):
"""Save configuration to YAML file"""
if not self.config_file:
return
# Ensure config directory exists
Path(self.config_file).parent.mkdir(parents=True, exist_ok=True)
data = {
'coordinator_url': self.coordinator_url,
'api_key': self.api_key
}
with open(self.config_file, 'w') as f:
yaml.dump(data, f, default_flow_style=False)
def get_config(config_file: Optional[str] = None) -> Config:
"""Get configuration instance"""
return Config(config_file=config_file)

136
cli/aitbc_cli/main.py Normal file
View File

@@ -0,0 +1,136 @@
#!/usr/bin/env python3
"""
AITBC CLI - Main entry point for the AITBC Command Line Interface
"""
import click
import sys
from typing import Optional
from . import __version__
from .config import get_config
from .utils import output, setup_logging
from .commands.client import client
from .commands.miner import miner
from .commands.wallet import wallet
from .commands.auth import auth
from .commands.blockchain import blockchain
from .commands.marketplace import marketplace
from .commands.simulate import simulate
from .commands.admin import admin
from .commands.config import config
from .commands.monitor import monitor
from .plugins import plugin, load_plugins
@click.group()
@click.option(
"--url",
default=None,
help="Coordinator API URL (overrides config)"
)
@click.option(
"--api-key",
default=None,
help="API key (overrides config)"
)
@click.option(
"--output",
type=click.Choice(["table", "json", "yaml"]),
default="table",
help="Output format"
)
@click.option(
"--verbose", "-v",
count=True,
help="Increase verbosity (use -v, -vv, -vvv)"
)
@click.option(
"--debug",
is_flag=True,
help="Enable debug mode"
)
@click.option(
"--config-file",
default=None,
help="Path to config file"
)
@click.version_option(version=__version__, prog_name="aitbc")
@click.pass_context
def cli(ctx, url: Optional[str], api_key: Optional[str], output: str,
verbose: int, debug: bool, config_file: Optional[str]):
"""
AITBC CLI - Command Line Interface for AITBC Network
Manage jobs, mining, wallets, and blockchain operations from the command line.
"""
# Ensure context object exists
ctx.ensure_object(dict)
# Setup logging based on verbosity
log_level = setup_logging(verbose, debug)
# Load configuration
config = get_config(config_file)
# Override config with command line options
if url:
config.coordinator_url = url
if api_key:
config.api_key = api_key
# Store in context for subcommands
ctx.obj['config'] = config
ctx.obj['output_format'] = output
ctx.obj['log_level'] = log_level
# Add command groups
cli.add_command(client)
cli.add_command(miner)
cli.add_command(wallet)
cli.add_command(auth)
cli.add_command(blockchain)
cli.add_command(marketplace)
cli.add_command(simulate)
cli.add_command(admin)
cli.add_command(config)
cli.add_command(monitor)
cli.add_command(plugin)
load_plugins(cli)
@cli.command()
@click.pass_context
def version(ctx):
"""Show version information"""
output(f"AITBC CLI version {__version__}", ctx.obj['output_format'])
@cli.command()
@click.pass_context
def config_show(ctx):
"""Show current configuration"""
config = ctx.obj['config']
output({
"coordinator_url": config.coordinator_url,
"api_key": "***REDACTED***" if config.api_key else None,
"output_format": ctx.obj['output_format'],
"config_file": config.config_file
}, ctx.obj['output_format'])
def main():
"""Main entry point"""
try:
cli()
except KeyboardInterrupt:
click.echo("\nAborted by user", err=True)
sys.exit(1)
except Exception as e:
click.echo(f"Error: {e}", err=True)
sys.exit(1)
if __name__ == "__main__":
main()

186
cli/aitbc_cli/plugins.py Normal file
View File

@@ -0,0 +1,186 @@
"""Plugin system for AITBC CLI custom commands"""
import importlib
import importlib.util
import json
import click
from pathlib import Path
from typing import Optional
PLUGIN_DIR = Path.home() / ".aitbc" / "plugins"
def get_plugin_dir() -> Path:
"""Get and ensure plugin directory exists"""
PLUGIN_DIR.mkdir(parents=True, exist_ok=True)
return PLUGIN_DIR
def load_plugins(cli_group):
"""Load all plugins and register them with the CLI group"""
plugin_dir = get_plugin_dir()
manifest_file = plugin_dir / "plugins.json"
if not manifest_file.exists():
return
with open(manifest_file) as f:
manifest = json.load(f)
for plugin_info in manifest.get("plugins", []):
if not plugin_info.get("enabled", True):
continue
plugin_path = plugin_dir / plugin_info["file"]
if not plugin_path.exists():
continue
try:
spec = importlib.util.spec_from_file_location(
plugin_info["name"], str(plugin_path)
)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
# Look for a click group or command named 'plugin_command'
if hasattr(module, "plugin_command"):
cli_group.add_command(module.plugin_command)
except Exception:
pass # Skip broken plugins silently
@click.group()
def plugin():
"""Manage CLI plugins"""
pass
@plugin.command(name="list")
@click.pass_context
def list_plugins(ctx):
"""List installed plugins"""
from .utils import output
plugin_dir = get_plugin_dir()
manifest_file = plugin_dir / "plugins.json"
if not manifest_file.exists():
output({"message": "No plugins installed"}, ctx.obj.get('output_format', 'table'))
return
with open(manifest_file) as f:
manifest = json.load(f)
plugins = manifest.get("plugins", [])
if not plugins:
output({"message": "No plugins installed"}, ctx.obj.get('output_format', 'table'))
else:
output(plugins, ctx.obj.get('output_format', 'table'))
@plugin.command()
@click.argument("name")
@click.argument("file_path", type=click.Path(exists=True))
@click.option("--description", default="", help="Plugin description")
@click.pass_context
def install(ctx, name: str, file_path: str, description: str):
"""Install a plugin from a Python file"""
import shutil
from .utils import output, error, success
plugin_dir = get_plugin_dir()
manifest_file = plugin_dir / "plugins.json"
# Copy plugin file
dest = plugin_dir / f"{name}.py"
shutil.copy2(file_path, dest)
# Update manifest
manifest = {"plugins": []}
if manifest_file.exists():
with open(manifest_file) as f:
manifest = json.load(f)
# Remove existing entry with same name
manifest["plugins"] = [p for p in manifest["plugins"] if p["name"] != name]
manifest["plugins"].append({
"name": name,
"file": f"{name}.py",
"description": description,
"enabled": True
})
with open(manifest_file, "w") as f:
json.dump(manifest, f, indent=2)
success(f"Plugin '{name}' installed")
output({"name": name, "file": str(dest), "status": "installed"}, ctx.obj.get('output_format', 'table'))
@plugin.command()
@click.argument("name")
@click.pass_context
def uninstall(ctx, name: str):
"""Uninstall a plugin"""
from .utils import output, error, success
plugin_dir = get_plugin_dir()
manifest_file = plugin_dir / "plugins.json"
if not manifest_file.exists():
error(f"Plugin '{name}' not found")
return
with open(manifest_file) as f:
manifest = json.load(f)
plugin_entry = next((p for p in manifest["plugins"] if p["name"] == name), None)
if not plugin_entry:
error(f"Plugin '{name}' not found")
return
# Remove file
plugin_file = plugin_dir / plugin_entry["file"]
if plugin_file.exists():
plugin_file.unlink()
# Update manifest
manifest["plugins"] = [p for p in manifest["plugins"] if p["name"] != name]
with open(manifest_file, "w") as f:
json.dump(manifest, f, indent=2)
success(f"Plugin '{name}' uninstalled")
output({"name": name, "status": "uninstalled"}, ctx.obj.get('output_format', 'table'))
@plugin.command()
@click.argument("name")
@click.argument("state", type=click.Choice(["enable", "disable"]))
@click.pass_context
def toggle(ctx, name: str, state: str):
"""Enable or disable a plugin"""
from .utils import output, error, success
plugin_dir = get_plugin_dir()
manifest_file = plugin_dir / "plugins.json"
if not manifest_file.exists():
error(f"Plugin '{name}' not found")
return
with open(manifest_file) as f:
manifest = json.load(f)
plugin_entry = next((p for p in manifest["plugins"] if p["name"] == name), None)
if not plugin_entry:
error(f"Plugin '{name}' not found")
return
plugin_entry["enabled"] = (state == "enable")
with open(manifest_file, "w") as f:
json.dump(manifest, f, indent=2)
success(f"Plugin '{name}' {'enabled' if state == 'enable' else 'disabled'}")
output({"name": name, "enabled": plugin_entry["enabled"]}, ctx.obj.get('output_format', 'table'))

View File

@@ -0,0 +1,268 @@
"""Utility functions for AITBC CLI"""
import time
import logging
import sys
import os
from pathlib import Path
from typing import Any, Optional, Callable, Iterator
from contextlib import contextmanager
from rich.console import Console
from rich.logging import RichHandler
from rich.table import Table
from rich.panel import Panel
from rich.progress import Progress, SpinnerColumn, BarColumn, TextColumn, TimeElapsedColumn
import json
import yaml
from tabulate import tabulate
console = Console()
@contextmanager
def progress_bar(description: str = "Working...", total: Optional[int] = None):
"""Context manager for progress bar display"""
with Progress(
SpinnerColumn(),
TextColumn("[bold blue]{task.description}"),
BarColumn(),
TextColumn("[progress.percentage]{task.percentage:>3.0f}%"),
TimeElapsedColumn(),
console=console,
) as progress:
task = progress.add_task(description, total=total)
yield progress, task
def progress_spinner(description: str = "Working..."):
"""Simple spinner for indeterminate operations"""
return console.status(f"[bold blue]{description}")
class AuditLogger:
"""Audit logging for CLI operations"""
def __init__(self, log_dir: Optional[Path] = None):
self.log_dir = log_dir or Path.home() / ".aitbc" / "audit"
self.log_dir.mkdir(parents=True, exist_ok=True)
self.log_file = self.log_dir / "audit.jsonl"
def log(self, action: str, details: dict = None, user: str = None):
"""Log an audit event"""
import datetime
entry = {
"timestamp": datetime.datetime.now().isoformat(),
"action": action,
"user": user or os.environ.get("USER", "unknown"),
"details": details or {}
}
with open(self.log_file, "a") as f:
f.write(json.dumps(entry) + "\n")
def get_logs(self, limit: int = 50, action_filter: str = None) -> list:
"""Read audit log entries"""
if not self.log_file.exists():
return []
entries = []
with open(self.log_file) as f:
for line in f:
line = line.strip()
if line:
entry = json.loads(line)
if action_filter and entry.get("action") != action_filter:
continue
entries.append(entry)
return entries[-limit:]
def encrypt_value(value: str, key: str = None) -> str:
"""Simple XOR-based obfuscation for config values (not cryptographic security)"""
import base64
key = key or "aitbc_config_key_2026"
encrypted = bytes([ord(c) ^ ord(key[i % len(key)]) for i, c in enumerate(value)])
return base64.b64encode(encrypted).decode()
def decrypt_value(encrypted: str, key: str = None) -> str:
"""Decrypt an XOR-obfuscated config value"""
import base64
key = key or "aitbc_config_key_2026"
data = base64.b64decode(encrypted)
return ''.join(chr(b ^ ord(key[i % len(key)])) for i, b in enumerate(data))
def setup_logging(verbosity: int, debug: bool = False) -> str:
"""Setup logging with Rich"""
log_level = "WARNING"
if verbosity >= 3 or debug:
log_level = "DEBUG"
elif verbosity == 2:
log_level = "INFO"
elif verbosity == 1:
log_level = "WARNING"
logging.basicConfig(
level=log_level,
format="%(message)s",
datefmt="[%X]",
handlers=[RichHandler(console=console, rich_tracebacks=True)]
)
return log_level
def output(data: Any, format_type: str = "table"):
"""Format and output data"""
if format_type == "json":
console.print(json.dumps(data, indent=2, default=str))
elif format_type == "yaml":
console.print(yaml.dump(data, default_flow_style=False, sort_keys=False))
elif format_type == "table":
if isinstance(data, dict) and not isinstance(data, list):
# Simple key-value table
table = Table(show_header=False, box=None)
table.add_column("Key", style="cyan")
table.add_column("Value", style="green")
for key, value in data.items():
if isinstance(value, (dict, list)):
value = json.dumps(value, default=str)
table.add_row(str(key), str(value))
console.print(table)
elif isinstance(data, list) and data:
if all(isinstance(item, dict) for item in data):
# Table from list of dicts
headers = list(data[0].keys())
table = Table()
for header in headers:
table.add_column(header, style="cyan")
for item in data:
row = [str(item.get(h, "")) for h in headers]
table.add_row(*row)
console.print(table)
else:
# Simple list
for item in data:
console.print(f"{item}")
else:
console.print(data)
else:
console.print(data)
def error(message: str):
"""Print error message"""
console.print(Panel(f"[red]Error: {message}[/red]", title=""))
def success(message: str):
"""Print success message"""
console.print(Panel(f"[green]{message}[/green]", title=""))
def warning(message: str):
"""Print warning message"""
console.print(Panel(f"[yellow]{message}[/yellow]", title="⚠️"))
def retry_with_backoff(
func,
max_retries: int = 3,
base_delay: float = 1.0,
max_delay: float = 60.0,
backoff_factor: float = 2.0,
exceptions: tuple = (Exception,)
):
"""
Retry function with exponential backoff
Args:
func: Function to retry
max_retries: Maximum number of retries
base_delay: Initial delay in seconds
max_delay: Maximum delay in seconds
backoff_factor: Multiplier for delay after each retry
exceptions: Tuple of exceptions to catch and retry on
Returns:
Result of function call
"""
last_exception = None
for attempt in range(max_retries + 1):
try:
return func()
except exceptions as e:
last_exception = e
if attempt == max_retries:
error(f"Max retries ({max_retries}) exceeded. Last error: {e}")
raise
# Calculate delay with exponential backoff
delay = min(base_delay * (backoff_factor ** attempt), max_delay)
warning(f"Attempt {attempt + 1} failed: {e}. Retrying in {delay:.1f}s...")
time.sleep(delay)
raise last_exception
def create_http_client_with_retry(
max_retries: int = 3,
base_delay: float = 1.0,
max_delay: float = 60.0,
timeout: float = 30.0
):
"""
Create an HTTP client with retry capabilities
Args:
max_retries: Maximum number of retries
base_delay: Initial delay in seconds
max_delay: Maximum delay in seconds
timeout: Request timeout in seconds
Returns:
httpx.Client with retry transport
"""
import httpx
class RetryTransport(httpx.Transport):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.max_retries = max_retries
self.base_delay = base_delay
self.max_delay = max_delay
self.backoff_factor = 2.0
def handle_request(self, request):
last_exception = None
for attempt in range(self.max_retries + 1):
try:
return super().handle_request(request)
except (httpx.NetworkError, httpx.TimeoutException) as e:
last_exception = e
if attempt == self.max_retries:
break
delay = min(
self.base_delay * (self.backoff_factor ** attempt),
self.max_delay
)
time.sleep(delay)
raise last_exception
return httpx.Client(
transport=RetryTransport(),
timeout=timeout
)

View File

@@ -0,0 +1,251 @@
#!/bin/bash
# AITBC CLI Shell Completion Script
# Source this file in your .bashrc or .zshrc to enable tab completion
# AITBC CLI completion for bash
_aitbc_completion() {
local cur prev opts
COMPREPLY=()
cur="${COMP_WORDS[COMP_CWORD]}"
prev="${COMP_WORDS[COMP_CWORD-1]}"
# Main commands
if [[ ${COMP_CWORD} -eq 1 ]]; then
opts="client miner wallet auth blockchain marketplace admin config simulate help --help --version --url --api-key --output -v --debug --config-file"
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
return 0
fi
# Command-specific completions
case "${COMP_WORDS[1]}" in
client)
_aitbc_client_completion
;;
miner)
_aitbc_miner_completion
;;
wallet)
_aitbc_wallet_completion
;;
auth)
_aitbc_auth_completion
;;
blockchain)
_aitbc_blockchain_completion
;;
marketplace)
_aitbc_marketplace_completion
;;
admin)
_aitbc_admin_completion
;;
config)
_aitbc_config_completion
;;
simulate)
_aitbc_simulate_completion
;;
--output)
opts="table json yaml"
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
;;
*)
;;
esac
}
# Client command completion
_aitbc_client_completion() {
local cur prev opts
cur="${COMP_WORDS[COMP_CWORD]}"
prev="${COMP_WORDS[COMP_CWORD-1]}"
if [[ ${COMP_CWORD} -eq 2 ]]; then
opts="submit status blocks receipts cancel history"
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
elif [[ ${COMP_CWORD} -eq 3 ]]; then
case "${COMP_WORDS[2]}" in
submit)
opts="inference training fine-tuning"
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
;;
status|cancel)
# Complete with job IDs (placeholder)
COMPREPLY=( $(compgen -W "job_123 job_456 job_789" -- ${cur}) )
;;
*)
;;
esac
fi
}
# Miner command completion
_aitbc_miner_completion() {
local cur prev opts
cur="${COMP_WORDS[COMP_CWORD]}"
prev="${COMP_WORDS[COMP_CWORD-1]}"
if [[ ${COMP_CWORD} -eq 2 ]]; then
opts="register poll mine heartbeat status"
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
fi
}
# Wallet command completion
_aitbc_wallet_completion() {
local cur prev opts
cur="${COMP_WORDS[COMP_CWORD]}"
prev="${COMP_WORDS[COMP_CWORD-1]}"
if [[ ${COMP_CWORD} -eq 2 ]]; then
opts="balance earn spend history address stats send request"
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
fi
}
# Auth command completion
_aitbc_auth_completion() {
local cur prev opts
cur="${COMP_WORDS[COMP_CWORD]}"
prev="${COMP_WORDS[COMP_CWORD-1]}"
if [[ ${COMP_CWORD} -eq 2 ]]; then
opts="login logout token status refresh keys import-env"
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
elif [[ ${COMP_CWORD} -eq 3 ]]; then
case "${COMP_WORDS[2]}" in
keys)
opts="create list revoke"
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
;;
*)
;;
esac
fi
}
# Blockchain command completion
_aitbc_blockchain_completion() {
local cur prev opts
cur="${COMP_WORDS[COMP_CWORD]}"
prev="${COMP_WORDS[COMP_CWORD-1]}"
if [[ ${COMP_CWORD} -eq 2 ]]; then
opts="blocks block transaction status sync-status peers info supply validators"
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
fi
}
# Marketplace command completion
_aitbc_marketplace_completion() {
local cur prev opts
cur="${COMP_WORDS[COMP_CWORD]}"
prev="${COMP_WORDS[COMP_CWORD-1]}"
if [[ ${COMP_CWORD} -eq 2 ]]; then
opts="gpu orders pricing reviews"
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
elif [[ ${COMP_CWORD} -eq 3 ]]; then
case "${COMP_WORDS[2]}" in
gpu)
opts="list details book release"
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
;;
*)
;;
esac
fi
}
# Admin command completion
_aitbc_admin_completion() {
local cur prev opts
cur="${COMP_WORDS[COMP_CWORD]}"
prev="${COMP_WORDS[COMP_CWORD-1]}"
if [[ ${COMP_CWORD} -eq 2 ]]; then
opts="status jobs miners analytics logs maintenance action"
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
elif [[ ${COMP_CWORD} -eq 3 ]]; then
case "${COMP_WORDS[2]}" in
jobs|miners)
opts="list details cancel suspend"
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
;;
*)
;;
esac
fi
}
# Config command completion
_aitbc_config_completion() {
local cur prev opts
cur="${COMP_WORDS[COMP_CWORD]}"
prev="${COMP_WORDS[COMP_CWORD-1]}"
if [[ ${COMP_CWORD} -eq 2 ]]; then
opts="show set path edit reset export import validate environments profiles"
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
elif [[ ${COMP_CWORD} -eq 3 ]]; then
case "${COMP_WORDS[2]}" in
set)
opts="coordinator_url api_key timeout"
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
;;
export|import)
opts="--format json yaml"
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
;;
profiles)
opts="save list load delete"
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
;;
*)
;;
esac
fi
}
# Simulate command completion
_aitbc_simulate_completion() {
local cur prev opts
cur="${COMP_WORDS[COMP_CWORD]}"
prev="${COMP_WORDS[COMP_CWORD-1]}"
if [[ ${COMP_CWORD} -eq 2 ]]; then
opts="init user workflow load-test scenario results reset"
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
elif [[ ${COMP_CWORD} -eq 3 ]]; then
case "${COMP_WORDS[2]}" in
user)
opts="create list balance fund"
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
;;
scenario)
opts="list run"
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
;;
*)
;;
esac
fi
}
# Register the completion
complete -F _aitbc_completion aitbc
# For zsh compatibility
if [[ -n "$ZSH_VERSION" ]]; then
autoload -U compinit
compinit -i
_aitbc_completion() {
local matches
matches=($(compgen -W "$(aitbc _completion_helper "${words[@]}")" -- "${words[CURRENT]}"))
_describe 'aitbc commands' matches
}
compdef _aitbc_completion aitbc
fi
echo "AITBC CLI shell completion loaded!"
echo "Tab completion is now enabled for the aitbc command."

View File

@@ -11,7 +11,7 @@ from datetime import datetime
from typing import Optional
# Configuration
DEFAULT_COORDINATOR = "http://127.0.0.1:18000"
DEFAULT_COORDINATOR = "http://localhost:8000"
DEFAULT_API_KEY = "${CLIENT_API_KEY}"
class AITBCClient:

328
cli/client_enhanced.py Normal file
View File

@@ -0,0 +1,328 @@
#!/usr/bin/env python3
"""
AITBC Client CLI Tool - Enhanced version with output formatting
"""
import argparse
import httpx
import json
import sys
import yaml
from datetime import datetime
from typing import Optional, Dict, Any
from tabulate import tabulate
# Configuration
DEFAULT_COORDINATOR = "http://127.0.0.1:18000"
DEFAULT_API_KEY = "${CLIENT_API_KEY}"
class OutputFormatter:
"""Handle different output formats"""
@staticmethod
def format(data: Any, format_type: str = "table") -> str:
"""Format data according to specified type"""
if format_type == "json":
return json.dumps(data, indent=2, default=str)
elif format_type == "yaml":
return yaml.dump(data, default_flow_style=False, sort_keys=False)
elif format_type == "table":
return OutputFormatter._format_table(data)
else:
return str(data)
@staticmethod
def _format_table(data: Any) -> str:
"""Format data as table"""
if isinstance(data, dict):
# Simple key-value table
rows = [[k, v] for k, v in data.items()]
return tabulate(rows, headers=["Key", "Value"], tablefmt="grid")
elif isinstance(data, list) and data:
if all(isinstance(item, dict) for item in data):
# Table from list of dicts
headers = list(data[0].keys())
rows = [[item.get(h, "") for h in headers] for item in data]
return tabulate(rows, headers=headers, tablefmt="grid")
else:
# Simple list
return "\n".join(f"{item}" for item in data)
else:
return str(data)
class AITBCClient:
def __init__(self, coordinator_url: str, api_key: str):
self.coordinator_url = coordinator_url
self.api_key = api_key
self.client = httpx.Client()
def submit_job(self, job_type: str, task_data: dict, ttl: int = 900) -> Optional[str]:
"""Submit a job to the coordinator"""
job_payload = {
"payload": {
"type": job_type,
**task_data
},
"ttl_seconds": ttl
}
try:
response = self.client.post(
f"{self.coordinator_url}/v1/jobs",
headers={
"Content-Type": "application/json",
"X-Api-Key": self.api_key
},
json=job_payload
)
if response.status_code == 201:
job = response.json()
return job['job_id']
else:
print(f"❌ Error submitting job: {response.status_code}")
print(f" Response: {response.text}")
return None
except Exception as e:
print(f"❌ Error: {e}")
return None
def get_job_status(self, job_id: str) -> Optional[Dict]:
"""Get job status"""
try:
response = self.client.get(
f"{self.coordinator_url}/v1/jobs/{job_id}",
headers={"X-Api-Key": self.api_key}
)
if response.status_code == 200:
return response.json()
else:
print(f"❌ Error getting status: {response.status_code}")
return None
except Exception as e:
print(f"❌ Error: {e}")
return None
def list_blocks(self, limit: int = 10) -> Optional[list]:
"""List recent blocks"""
try:
response = self.client.get(
f"{self.coordinator_url}/v1/explorer/blocks",
params={"limit": limit},
headers={"X-Api-Key": self.api_key}
)
if response.status_code == 200:
return response.json()
else:
print(f"❌ Error getting blocks: {response.status_code}")
return None
except Exception as e:
print(f"❌ Error: {e}")
return None
def list_transactions(self, limit: int = 10) -> Optional[list]:
"""List recent transactions"""
try:
response = self.client.get(
f"{self.coordinator_url}/v1/explorer/transactions",
params={"limit": limit},
headers={"X-Api-Key": self.api_key}
)
if response.status_code == 200:
return response.json()
else:
print(f"❌ Error getting transactions: {response.status_code}")
return None
except Exception as e:
print(f"❌ Error: {e}")
return None
def list_receipts(self, limit: int = 10, job_id: str = None) -> Optional[list]:
"""List job receipts"""
try:
params = {"limit": limit}
if job_id:
params["job_id"] = job_id
response = self.client.get(
f"{self.coordinator_url}/v1/explorer/receipts",
params=params,
headers={"X-Api-Key": self.api_key}
)
if response.status_code == 200:
return response.json()
else:
print(f"❌ Error getting receipts: {response.status_code}")
return None
except Exception as e:
print(f"❌ Error: {e}")
return None
def cancel_job(self, job_id: str) -> bool:
"""Cancel a job"""
try:
response = self.client.post(
f"{self.coordinator_url}/v1/jobs/{job_id}/cancel",
headers={"X-Api-Key": self.api_key}
)
if response.status_code == 200:
return True
else:
print(f"❌ Error cancelling job: {response.status_code}")
return False
except Exception as e:
print(f"❌ Error: {e}")
return False
def main():
parser = argparse.ArgumentParser(description="AITBC Client CLI Tool")
parser.add_argument("--url", default=DEFAULT_COORDINATOR, help="Coordinator URL")
parser.add_argument("--api-key", default=DEFAULT_API_KEY, help="API key")
parser.add_argument("--output", choices=["table", "json", "yaml"],
default="table", help="Output format")
subparsers = parser.add_subparsers(dest="command", help="Available commands")
# Submit command
submit_parser = subparsers.add_parser("submit", help="Submit a job")
submit_parser.add_argument("type", help="Job type (e.g., inference, training)")
submit_parser.add_argument("--prompt", help="Prompt for inference jobs")
submit_parser.add_argument("--model", help="Model name")
submit_parser.add_argument("--ttl", type=int, default=900, help="Time to live (seconds)")
submit_parser.add_argument("--file", type=argparse.FileType('r'),
help="Submit job from JSON file")
# Status command
status_parser = subparsers.add_parser("status", help="Check job status")
status_parser.add_argument("job_id", help="Job ID")
# Blocks command
blocks_parser = subparsers.add_parser("blocks", help="List recent blocks")
blocks_parser.add_argument("--limit", type=int, default=10, help="Number of blocks")
# Browser command
browser_parser = subparsers.add_parser("browser", help="Browse blockchain")
browser_parser.add_argument("--block-limit", type=int, default=5, help="Block limit")
browser_parser.add_argument("--tx-limit", type=int, default=10, help="Transaction limit")
browser_parser.add_argument("--receipt-limit", type=int, default=10, help="Receipt limit")
browser_parser.add_argument("--job-id", help="Filter by job ID")
# Cancel command
cancel_parser = subparsers.add_parser("cancel", help="Cancel a job")
cancel_parser.add_argument("job_id", help="Job ID")
# Receipts command
receipts_parser = subparsers.add_parser("receipts", help="List receipts")
receipts_parser.add_argument("--limit", type=int, default=10, help="Number of receipts")
receipts_parser.add_argument("--job-id", help="Filter by job ID")
args = parser.parse_args()
if not args.command:
parser.print_help()
sys.exit(1)
# Create client
client = AITBCClient(args.url, args.api_key)
# Execute command
if args.command == "submit":
# Build job data
if args.file:
try:
task_data = json.load(args.file)
except Exception as e:
print(f"❌ Error reading job file: {e}")
sys.exit(1)
else:
task_data = {"type": args.type}
if args.prompt:
task_data["prompt"] = args.prompt
if args.model:
task_data["model"] = args.model
# Submit job
job_id = client.submit_job(args.type, task_data, args.ttl)
if job_id:
result = {
"status": "success",
"job_id": job_id,
"message": "Job submitted successfully",
"track_command": f"python3 cli/client_enhanced.py status {job_id}"
}
print(OutputFormatter.format(result, args.output))
sys.exit(0)
else:
sys.exit(1)
elif args.command == "status":
status = client.get_job_status(args.job_id)
if status:
print(OutputFormatter.format(status, args.output))
sys.exit(0)
else:
sys.exit(1)
elif args.command == "blocks":
blocks = client.list_blocks(args.limit)
if blocks:
print(OutputFormatter.format(blocks, args.output))
sys.exit(0)
else:
sys.exit(1)
elif args.command == "browser":
blocks = client.list_blocks(args.block_limit) or []
transactions = client.list_transactions(args.tx_limit) or []
receipts = client.list_receipts(args.receipt_limit, job_id=args.job_id) or []
result = {
"latest_block": blocks[0] if blocks else None,
"recent_transactions": transactions,
"recent_receipts": receipts
}
print(OutputFormatter.format(result, args.output))
sys.exit(0)
elif args.command == "cancel":
if client.cancel_job(args.job_id):
result = {
"status": "success",
"job_id": args.job_id,
"message": "Job cancelled successfully"
}
print(OutputFormatter.format(result, args.output))
sys.exit(0)
else:
sys.exit(1)
elif args.command == "receipts":
receipts = client.list_receipts(args.limit, args.job_id)
if receipts:
print(OutputFormatter.format(receipts, args.output))
sys.exit(0)
else:
sys.exit(1)
if __name__ == "__main__":
main()

120
cli/man/aitbc.1 Normal file
View File

@@ -0,0 +1,120 @@
.TH AITBC 1 "February 2026" "AITBC CLI" "User Commands"
.SH NAME
aitbc \- command-line interface for the AITBC network
.SH SYNOPSIS
.B aitbc
[\fIOPTIONS\fR] \fICOMMAND\fR [\fIARGS\fR]...
.SH DESCRIPTION
The AITBC CLI provides a comprehensive command-line interface for interacting
with the AITBC network. It supports job submission, mining operations, wallet
management, blockchain queries, marketplace operations, system administration,
monitoring, and test simulations.
.SH GLOBAL OPTIONS
.TP
\fB\-\-url\fR \fITEXT\fR
Coordinator API URL (overrides config)
.TP
\fB\-\-api\-key\fR \fITEXT\fR
API key (overrides config)
.TP
\fB\-\-output\fR [table|json|yaml]
Output format (default: table)
.TP
\fB\-v\fR, \fB\-\-verbose\fR
Increase verbosity (use -v, -vv, -vvv)
.TP
\fB\-\-debug\fR
Enable debug mode
.TP
\fB\-\-config\-file\fR \fITEXT\fR
Path to config file
.TP
\fB\-\-version\fR
Show version and exit
.TP
\fB\-\-help\fR
Show help message and exit
.SH COMMANDS
.TP
\fBclient\fR
Submit and manage inference jobs (submit, status, blocks, receipts, cancel, history, batch-submit, template)
.TP
\fBminer\fR
Register as a miner and process jobs (register, poll, mine, heartbeat, status, earnings, update-capabilities, deregister, jobs, concurrent-mine)
.TP
\fBwallet\fR
Manage wallets and transactions (balance, earn, spend, send, history, address, stats, stake, unstake, staking-info, multisig-create, multisig-propose, multisig-sign, create, list, switch, delete, backup, restore, info, request-payment)
.TP
\fBauth\fR
Manage API keys and authentication (login, logout, token, status, refresh, keys, import-env)
.TP
\fBblockchain\fR
Query blockchain information (blocks, block, transaction, status, sync-status, peers, info, supply, validators)
.TP
\fBmarketplace\fR
GPU marketplace operations (gpu register/list/details/book/release, orders, pricing, reviews)
.TP
\fBadmin\fR
System administration (status, jobs, miners, analytics, logs, maintenance, audit-log)
.TP
\fBconfig\fR
Manage CLI configuration (show, set, path, edit, reset, export, import, validate, environments, profiles, set-secret, get-secret)
.TP
\fBmonitor\fR
Monitoring and alerting (dashboard, metrics, alerts, history, webhooks)
.TP
\fBsimulate\fR
Run simulations (init, user, workflow, load-test, scenario, results, reset)
.SH EXAMPLES
.PP
Submit a job:
.RS
aitbc client submit --prompt "What is AI?" --model gpt-4
.RE
.PP
Check wallet balance:
.RS
aitbc wallet balance
.RE
.PP
Start mining:
.RS
aitbc miner register --gpu-model RTX4090 --memory 24 --price 0.5
.br
aitbc miner poll --interval 5
.RE
.PP
Monitor system:
.RS
aitbc monitor dashboard --refresh 5
.RE
.SH ENVIRONMENT
.TP
\fBCLIENT_API_KEY\fR
API key for authentication
.TP
\fBAITBC_COORDINATOR_URL\fR
Coordinator API URL
.TP
\fBAITBC_OUTPUT_FORMAT\fR
Default output format
.TP
\fBAITBC_CONFIG_FILE\fR
Path to configuration file
.SH FILES
.TP
\fB~/.config/aitbc/config.yaml\fR
Default configuration file
.TP
\fB~/.aitbc/wallets/\fR
Wallet storage directory
.TP
\fB~/.aitbc/audit/audit.jsonl\fR
Audit log file
.TP
\fB~/.aitbc/templates/\fR
Job template storage
.SH SEE ALSO
Full documentation: https://docs.aitbc.net
.SH AUTHORS
AITBC Development Team

View File

@@ -12,7 +12,7 @@ from datetime import datetime
from typing import Optional
# Configuration
DEFAULT_COORDINATOR = "http://localhost:8001"
DEFAULT_COORDINATOR = "http://localhost:8000"
DEFAULT_API_KEY = "${MINER_API_KEY}"
DEFAULT_MINER_ID = "cli-miner"

View File

@@ -10,7 +10,7 @@ import time
import sys
# Configuration
DEFAULT_COORDINATOR = "http://localhost:8001"
DEFAULT_COORDINATOR = "http://localhost:8000"
DEFAULT_API_KEY = "${MINER_API_KEY}"
DEFAULT_MINER_ID = "localhost-gpu-miner"

View File

@@ -17,7 +17,7 @@ import json
import httpx
# Configuration
DEFAULT_COORDINATOR = "http://127.0.0.1:18000"
DEFAULT_COORDINATOR = "http://localhost:8000"
DEFAULT_BLOCKCHAIN = "http://127.0.0.1:19000"
DEFAULT_API_KEY = "${CLIENT_API_KEY}"
DEFAULT_PROMPT = "What is the capital of France?"

View File

@@ -11,7 +11,7 @@ from typing import Optional
import httpx
DEFAULT_COORDINATOR = "http://127.0.0.1:18000"
DEFAULT_COORDINATOR = "http://localhost:8000"
DEFAULT_API_KEY = "${CLIENT_API_KEY}"
DEFAULT_PROMPT = "hello"
DEFAULT_TIMEOUT = 180

View File

@@ -0,0 +1,78 @@
# AITBC CLI Enhancement Progress Summary
## Status: ALL PHASES COMPLETE ✅
**116/116 tests passing** | **0 failures** | **11 command groups** | **80+ subcommands**
## Completed Phases
### Phase 0: Foundation ✅
- Standardized URLs, package structure, credential storage
- Created unified CLI entry point with Click framework
### Phase 1: Core Enhancements ✅
- **client.py**: Retry with exponential backoff, job history/filtering, batch submit (CSV/JSON), job templates
- **miner.py**: Earnings tracking, capability management, deregistration, job filtering, concurrent processing
- **wallet.py**: Multi-wallet, backup/restore, staking, `--wallet-path`, multi-signature wallets
- **auth.py**: Login/logout, token management, multi-environment, API key rotation
### Phase 2: New CLI Tools ✅
- blockchain.py, marketplace.py, admin.py, config.py, simulate.py
### Phase 3: Testing & Documentation ✅
- 116/116 CLI tests across 8 test files (0 failures)
- CI/CD: `.github/workflows/cli-tests.yml` (Python 3.10/3.11/3.12)
- CLI reference docs (`docs/cli-reference.md` — 560+ lines)
- Shell completion script, man page (`cli/man/aitbc.1`)
### Phase 4: Backend Integration ✅
- MarketplaceOffer model extended with GPU-specific fields
- GPU booking system, review system, sync-offers endpoint
### Phase 5: Advanced Features ✅
- **Scripting**: Batch CSV/JSON ops, job templates, webhook notifications, plugin system
- **Monitoring**: Real-time dashboard, metrics collection/export, alert configuration, historical analysis
- **Security**: Multi-signature wallets, encrypted config, audit logging
- **UX**: Rich progress bars, colored output, interactive prompts, auto-completion, man pages
## Test Coverage (116 tests)
| File | Tests |
|------|-------|
| test_config.py | 37 |
| test_wallet.py | 17 |
| test_auth.py | 15 |
| test_admin.py | 13 |
| test_simulate.py | 12 |
| test_marketplace.py | 11 |
| test_blockchain.py | 10 |
| test_client.py | 8 |
## CLI Structure
```
aitbc
├── client - Submit/manage jobs, batch submit, templates
├── miner - Register, mine, earnings, capabilities, concurrent
├── wallet - Balance, staking, multisig, backup/restore
├── auth - Login/logout, tokens, API keys
├── blockchain - Blocks, transactions, validators, supply
├── marketplace - GPU list/book/release, orders, reviews
├── admin - Status, jobs, miners, maintenance, audit-log
├── config - Set/get, profiles, secrets, import/export
├── monitor - Dashboard, metrics, alerts, webhooks, history
├── simulate - Init, users, workflow, load-test, scenarios
├── plugin - Install/uninstall/list/toggle custom commands
└── version - Show version information
```
## Quick Start
```bash
cd /home/oib/windsurf/aitbc && pip install -e .
export CLIENT_API_KEY=your_key_here
aitbc config set coordinator_url http://localhost:8000
aitbc client submit --prompt "What is AI?"
aitbc wallet balance
aitbc monitor dashboard
```

567
docs/cli-reference.md Normal file
View File

@@ -0,0 +1,567 @@
# AITBC CLI Reference
## Overview
The AITBC CLI provides a comprehensive command-line interface for interacting with the AITBC network. It supports job submission, mining operations, wallet management, blockchain queries, marketplace operations, system administration, and test simulations.
## Installation
```bash
cd /home/oib/windsurf/aitbc
pip install -e .
```
## Global Options
All commands support the following global options:
- `--url TEXT`: Coordinator API URL (overrides config)
- `--api-key TEXT`: API key (overrides config)
- `--output [table|json|yaml]`: Output format (default: table)
- `-v, --verbose`: Increase verbosity (use -v, -vv, -vvv)
- `--debug`: Enable debug mode
- `--config-file TEXT`: Path to config file
- `--help`: Show help message
- `--version`: Show version and exit
## Configuration
### Setting API Key
```bash
# Set API key for current session
export CLIENT_API_KEY=your_api_key_here
# Or set permanently
aitbc config set api_key your_api_key_here
```
### Setting Coordinator URL
```bash
aitbc config set coordinator_url http://localhost:8000
```
## Commands
### Client Commands
Submit and manage inference jobs.
```bash
# Submit a job
aitbc client submit --prompt "What is AI?" --model gpt-4
# Submit with retry (3 attempts, exponential backoff)
aitbc client submit --prompt "What is AI?" --retries 3 --retry-delay 2.0
# Check job status
aitbc client status <job_id>
# List recent blocks
aitbc client blocks --limit 10
# List receipts
aitbc client receipts --status completed
# Cancel a job
aitbc client cancel <job_id>
# Show job history
aitbc client history --status completed --limit 20
```
### Miner Commands
Register as a miner and process jobs.
```bash
# Register as miner
aitbc miner register --gpu-model RTX4090 --memory 24 --price 0.5
# Start polling for jobs
aitbc miner poll --interval 5
# Mine a specific job
aitbc miner mine <job_id>
# Send heartbeat
aitbc miner heartbeat
# Check miner status
aitbc miner status
# View earnings
aitbc miner earnings --from-time 2026-01-01 --to-time 2026-02-12
# Update GPU capabilities
aitbc miner update-capabilities --gpu RTX4090 --memory 24 --cuda-cores 16384
# Deregister miner
aitbc miner deregister --force
# List jobs with filtering
aitbc miner jobs --type inference --min-reward 0.5 --status completed
# Concurrent mining (multiple workers)
aitbc miner concurrent-mine --workers 4 --jobs 20
```
### Wallet Commands
Manage your AITBC wallet and transactions.
```bash
# Check balance
aitbc wallet balance
# Show earning history
aitbc wallet earn --limit 20
# Show spending history
aitbc wallet spend --limit 20
# Show full history
aitbc wallet history
# Get wallet address
aitbc wallet address
# Show wallet stats
aitbc wallet stats
# Send funds
aitbc wallet send <address> <amount>
# Request payment
aitbc wallet request-payment <from_address> <amount> --description "For services"
# Create a new wallet
aitbc wallet create my_wallet --type hd
# List wallets
aitbc wallet list
# Switch active wallet
aitbc wallet switch my_wallet
# Backup wallet
aitbc wallet backup my_wallet --destination ./backup.json
# Restore wallet
aitbc wallet restore ./backup.json restored_wallet
# Stake tokens
aitbc wallet stake 100.0 --duration 90
# Unstake tokens
aitbc wallet unstake <stake_id>
# View staking info
aitbc wallet staking-info
```
### Auth Commands
Manage API keys and authentication.
```bash
# Login with API key
aitbc auth login your_api_key_here
# Logout
aitbc auth logout
# Show current token
aitbc auth token
# Check auth status
aitbc auth status
# Refresh token
aitbc auth refresh
# Create new API key
aitbc auth keys create --name "My Key"
# List API keys
aitbc auth keys list
# Revoke API key
aitbc auth keys revoke <key_id>
# Import from environment
aitbc auth import-env CLIENT_API_KEY
```
### Blockchain Commands
Query blockchain information and status.
```bash
# List recent blocks
aitbc blockchain blocks --limit 10
# Get block details
aitbc blockchain block <block_hash>
# Get transaction details
aitbc blockchain transaction <tx_hash>
# Check node status
aitbc blockchain status --node 1
# Check sync status
aitbc blockchain sync-status
# List connected peers
aitbc blockchain peers
# Get blockchain info
aitbc blockchain info
# Check token supply
aitbc blockchain supply
# List validators
aitbc blockchain validators
```
### Marketplace Commands
GPU marketplace operations.
```bash
# Register GPU
aitbc marketplace gpu register --name "RTX4090" --memory 24 --price-per-hour 0.5
# List available GPUs
aitbc marketplace gpu list --available
# List with filters
aitbc marketplace gpu list --model RTX4090 --memory-min 16 --price-max 1.0
# Get GPU details
aitbc marketplace gpu details <gpu_id>
# Book a GPU
aitbc marketplace gpu book <gpu_id> --hours 2
# Release a GPU
aitbc marketplace gpu release <gpu_id>
# List orders
aitbc marketplace orders list --status active
# Get pricing info
aitbc marketplace pricing RTX4090
# Get GPU reviews
aitbc marketplace reviews <gpu_id>
# Add a review
aitbc marketplace review <gpu_id> --rating 5 --comment "Excellent performance"
```
### Admin Commands
System administration operations.
```bash
# Check system status
aitbc admin status
# List jobs
aitbc admin jobs list --status active
# Get job details
aitbc admin jobs details <job_id>
# Cancel job
aitbc admin jobs cancel <job_id>
# List miners
aitbc admin miners list --status active
# Get miner details
aitbc admin miners details <miner_id>
# Suspend miner
aitbc admin miners suspend <miner_id>
# Get analytics
aitbc admin analytics --period 24h
# View logs
aitbc admin logs --component coordinator --tail 100
# Run maintenance
aitbc admin maintenance cleanup --retention 7d
# Execute custom action
aitbc admin action custom --script backup.sh
```
### Config Commands
Manage CLI configuration.
```bash
# Show current config
aitbc config show
# Set configuration values
aitbc config set coordinator_url http://localhost:8000
aitbc config set timeout 30
aitbc config set api_key your_key
# Show config file path
aitbc config path
# Edit config file
aitbc config edit
# Reset configuration
aitbc config reset
# Export configuration
aitbc config export --format json > config.json
# Import configuration
aitbc config import config.json
# Validate configuration
aitbc config validate
# List environment variables
aitbc config environments
# Save profile
aitbc config profiles save production
# List profiles
aitbc config profiles list
# Load profile
aitbc config profiles load production
# Delete profile
aitbc config profiles delete production
```
### Simulate Commands
Run simulations and manage test users.
```bash
# Initialize test economy
aitbc simulate init --distribute 10000,5000
# Initialize with reset
aitbc simulate init --reset
# Create test user
aitbc simulate user create --type client --balance 1000
# List test users
aitbc simulate user list
# Check user balance
aitbc simulate user balance <user_id>
# Fund user
aitbc simulate user fund <user_id> --amount 500
# Run workflow simulation
aitbc simulate workflow --jobs 10 --duration 60
# Run load test
aitbc simulate load-test --users 20 --rps 100 --duration 300
# List scenarios
aitbc simulate scenario list
# Run scenario
aitbc simulate scenario run basic_workflow
# Get results
aitbc simulate results <simulation_id>
# Reset simulation
aitbc simulate reset
```
## Output Formats
All commands support three output formats:
- **table** (default): Human-readable table format
- **json**: Machine-readable JSON format
- **yaml**: Human-readable YAML format
Example:
```bash
# Table output (default)
aitbc wallet balance
# JSON output
aitbc --output json wallet balance
# YAML output
aitbc --output yaml wallet balance
```
## Environment Variables
The following environment variables are supported:
- `CLIENT_API_KEY`: Your API key for authentication
- `AITBC_COORDINATOR_URL`: Coordinator API URL
- `AITBC_OUTPUT_FORMAT`: Default output format
- `AITBC_CONFIG_FILE`: Path to configuration file
## Examples
### Basic Workflow
```bash
# 1. Configure CLI
export CLIENT_API_KEY=your_api_key
aitbc config set coordinator_url http://localhost:8000
# 2. Check wallet
aitbc wallet balance
# 3. Submit a job
job_id=$(aitbc --output json client submit inference --prompt "What is AI?" | jq -r '.job_id')
# 4. Check status
aitbc client status $job_id
# 5. Get results
aitbc client receipts --job-id $job_id
```
### Mining Operations
```bash
# 1. Register as miner
aitbc miner register --gpu-model RTX4090 --memory 24 --price 0.5
# 2. Start mining
aitbc miner poll --interval 5
# 3. Check earnings
aitbc wallet earn
```
### Marketplace Usage
```bash
# 1. Find available GPUs
aitbc marketplace gpu list --available --price-max 1.0
# 2. Book a GPU
aitbc marketplace gpu book gpu123 --hours 4
# 3. Use the GPU for your job
aitbc client submit inference --prompt "Generate image" --gpu gpu123
# 4. Release the GPU
aitbc marketplace gpu release gpu123
# 5. Leave a review
aitbc marketplace review gpu123 --rating 5 --comment "Great performance!"
```
## Troubleshooting
### Common Issues
1. **API Key Not Found**
```bash
export CLIENT_API_KEY=your_api_key
# or
aitbc auth login your_api_key
```
2. **Connection Refused**
```bash
# Check coordinator URL
aitbc config show
# Update if needed
aitbc config set coordinator_url http://localhost:8000
```
3. **Permission Denied**
```bash
# Check key permissions
aitbc auth status
# Refresh if needed
aitbc auth refresh
```
### Debug Mode
Enable debug mode for detailed error information:
```bash
aitbc --debug client status <job_id>
```
### Verbose Output
Increase verbosity for more information:
```bash
aitbc -vvv wallet balance
```
## Integration
### Shell Scripts
```bash
#!/bin/bash
# Submit job and wait for completion
job_id=$(aitbc --output json client submit inference --prompt "$1" | jq -r '.job_id')
while true; do
status=$(aitbc --output json client status $job_id | jq -r '.status')
if [ "$status" = "completed" ]; then
aitbc client receipts --job-id $job_id
break
fi
sleep 5
done
```
### Python Integration
```python
import subprocess
import json
# Submit job
result = subprocess.run(
['aitbc', '--output', 'json', 'client', 'submit', 'inference', '--prompt', 'What is AI?'],
capture_output=True, text=True
)
job_data = json.loads(result.stdout)
job_id = job_data['job_id']
# Check status
result = subprocess.run(
['aitbc', '--output', 'json', 'client', 'status', job_id],
capture_output=True, text=True
)
status_data = json.loads(result.stdout)
print(f"Job status: {status_data['status']}")
```
## Support
For more help:
- Use `aitbc --help` for general help
- Use `aitbc <command> --help` for command-specific help
- Check the logs with `aitbc admin logs` for system issues
- Visit the documentation at https://docs.aitbc.net

55
docs/cli-summary.md Normal file
View File

@@ -0,0 +1,55 @@
# AITBC CLI Enhancement Summary
## Overview
All CLI enhancement phases (05) are complete. The AITBC CLI provides a production-ready interface with 116/116 tests passing, 11 command groups, and 80+ subcommands.
## Architecture
- **Package**: `cli/aitbc_cli/` with modular commands
- **Framework**: Click + Rich for output formatting
- **Testing**: pytest with Click CliRunner, 116/116 passing
- **CI/CD**: `.github/workflows/cli-tests.yml` (Python 3.10/3.11/3.12)
## Command Groups
| Group | Subcommands |
|-------|-------------|
| **client** | submit, status, blocks, receipts, cancel, history, batch-submit, template |
| **miner** | register, poll, mine, heartbeat, status, earnings, update-capabilities, deregister, jobs, concurrent-mine |
| **wallet** | balance, earn, spend, send, history, address, stats, stake, unstake, staking-info, create, list, switch, delete, backup, restore, info, request-payment, multisig-create, multisig-propose, multisig-sign |
| **auth** | login, logout, token, status, refresh, keys (create/list/revoke), import-env |
| **blockchain** | blocks, block, transaction, status, sync-status, peers, info, supply, validators |
| **marketplace** | gpu (register/list/details/book/release), orders, pricing, reviews |
| **admin** | status, jobs, miners, analytics, logs, maintenance, audit-log |
| **config** | show, set, path, edit, reset, export, import, validate, environments, profiles, set-secret, get-secret |
| **monitor** | dashboard, metrics, alerts, history, webhooks |
| **simulate** | init, user (create/list/balance/fund), workflow, load-test, scenario, results, reset |
| **plugin** | install, uninstall, list, toggle |
## Global Options
- `--output table|json|yaml` — Output format
- `--url URL` — Override coordinator URL
- `--api-key KEY` — Override API key
- `-v|-vv|-vvv` — Verbosity levels
- `--debug` — Debug mode
- `--config-file PATH` — Custom config file
## Installation
```bash
cd /home/oib/windsurf/aitbc
pip install -e .
```
## Key Features
- Rich output formatting (table/JSON/YAML)
- Retry with exponential backoff
- Progress bars for long-running operations
- Interactive prompts for destructive operations
- Multi-wallet support with staking and multi-sig
- Encrypted configuration secrets
- Audit logging
- Plugin system for custom commands
- Real-time monitoring dashboard
- Webhook notifications
- Batch job submission from CSV/JSON
- Job templates for repeated tasks
- Shell completion and man pages

View File

@@ -70,6 +70,12 @@ The AITBC platform consists of 7 core components working together to provide a c
- **Explorer Web** - Blockchain explorer
- **Pool Hub** - Miner coordination service
### CLI & Tooling
- **AITBC CLI** - 11 command groups, 80+ subcommands (116/116 tests passing)
- Client, miner, wallet, auth, blockchain, marketplace, admin, config, monitor, simulate, plugin
- CI/CD via GitHub Actions, man page, shell completion
## Component Interactions
```

727
docs/currentTask.md Normal file
View File

@@ -0,0 +1,727 @@
# AITBC CLI Enhancement Plan
## Goal
Make the AITBC project fully usable via CLI tools, covering all functionality currently available through web interfaces.
## Prerequisites
### System Requirements
- Python 3.8+ (tested on Python 3.11)
- Debian Trixie (Linux)
- Network connection for API access
### Installation Methods
#### Method 1: Development Install
```bash
cd /home/oib/windsurf/aitbc
pip install -e .
```
#### Method 2: From PyPI (future)
```bash
pip install aitbc-cli
```
#### Method 3: Using Docker
```bash
docker run -it aitbc/cli:latest
```
### Shell Completion
```bash
# Install completions
aitbc --install-completion bash # or zsh, fish
# Enable immediately
source ~/.bashrc # or ~/.zshrc
```
### Environment Variables
```bash
export AITBC_CONFIG_DIR="$HOME/.aitbc"
export AITBC_LOG_LEVEL="info"
export AITBC_API_KEY="${CLIENT_API_KEY}" # Optional, can use auth login
```
## Current State Analysis
### Existing CLI Tools
1. **client.py** - Submit jobs, check status, list blocks
2. **miner.py** - Register miners, poll for jobs, submit results
3. **wallet.py** - Track earnings, manage wallet (local only)
4. **GPU Testing Tools** - test_gpu_access.py, gpu_test.py, miner_gpu_test.py
### Infrastructure Overview (Current Setup)
- **Coordinator API**: `http://localhost:8000` (direct) or `http://127.0.0.1:18000` (via SSH tunnel)
- **Blockchain Nodes**: RPC on `http://localhost:8081` and `http://localhost:8082`
- **Wallet Daemon**: `http://localhost:8002`
- **Exchange API**: `http://localhost:9080` (if running)
- **Test Wallets**: Located in `home/` directory with separate client/miner wallets
- **Single Developer Environment**: You are the only user/developer
### Test User Setup
The `home/` directory contains simulated user wallets for testing:
- **Genesis Wallet**: 1,000,000 AITBC (creates initial supply)
- **Client Wallet**: 10,000 AITBC (customer wallet)
- **Miner Wallet**: 1,000 AITBC (GPU provider wallet)
### Critical Issues to Address
#### 1. Inconsistent Default URLs
- `client.py` uses `http://127.0.0.1:18000`
- `miner.py` uses `http://localhost:8001`
- **Action**: Standardize all to `http://localhost:8000` with fallback to tunnel
#### 2. API Key Security
- Currently stored as plaintext in environment variables
- No credential management system
- **Action**: Implement encrypted storage with keyring
#### 3. Missing Package Structure
- No `pyproject.toml` or `setup.py`
- CLI tools not installable as package
- **Action**: Create proper Python package structure
## Enhancement Plan
## Leveraging Existing Assets
### Existing Scripts to Utilize
#### 1. `scripts/aitbc-cli.sh`
- Already provides unified CLI wrapper
- Has basic commands: submit, status, blocks, receipts, admin functions
- **Action**: Extend this script or use as reference for unified CLI
- **Issue**: Uses hardcoded URL `http://127.0.0.1:18000`
#### 2. Existing `pyproject.toml`
- Already exists at project root
- Configured for pytest with proper paths
- **Action**: Add CLI package configuration and entry points
#### 3. Test Scripts in `scripts/`
- `miner_workflow.py` - Complete miner workflow
- `assign_proposer.py` - Block proposer assignment
- `start_remote_tunnel.sh` - SSH tunnel management
- **Action**: Integrate these workflows into CLI commands
### Phase 0: Foundation Fixes (Week 0) ✅ COMPLETED
- [x] Standardize default URLs across all CLI tools (fixed to `http://127.0.0.1:18000`)
- [x] Extend existing `pyproject.toml` with CLI package configuration
- [x] Set up encrypted credential storage (keyring)
- [x] Add `--version` flag to all existing tools
- [x] Add logging verbosity flags (`-v/-vv`)
- [x] Refactor `scripts/aitbc-cli.sh` into Python unified CLI
- [x] Create CLI package structure in `cli/` directory
### Phase 1: Improve Existing CLI Tools
#### 1.1 client.py Enhancements ✅ COMPLETED
- [x] Add `--output json|table|yaml` formatting options
- [x] Implement proper exit codes (0 for success, non-zero for errors)
- [x] Add batch job submission from file
- [x] Add job cancellation functionality
- [x] Add job history and filtering options
- [x] Add retry mechanism with exponential backoff
#### 1.2 miner.py Enhancements ✅ COMPLETED
- [x] Add miner status check (registered, active, last heartbeat)
- [x] Add miner earnings tracking
- [x] Add capability management (update GPU specs)
- [x] Add miner deregistration
- [x] Add job filtering (by type, reward threshold)
- [x] Add concurrent job processing
#### 1.3 wallet.py Enhancements ✅ COMPLETED
- [x] Connect to actual blockchain wallet (with fallback to local file)
- [x] Add transaction submission to blockchain
- [x] Add balance query from blockchain
- [x] Add multi-wallet support
- [x] Add wallet backup/restore
- [x] Add staking functionality
- [x] Integrate with `home/` test wallets for simulation
- [x] Add `--wallet-path` option to specify wallet location
#### 1.4 auth.py - Authentication & Credential Management ✅ NEW
- [x] Login/logout functionality with secure storage
- [x] Token management and viewing
- [x] Multi-environment support (dev/staging/prod)
- [x] API key creation and rotation
- [x] Import from environment variables
### Phase 2: New CLI Tools
#### 2.1 blockchain.py - Blockchain Operations
```bash
# Query blocks
aitbc blockchain blocks --limit 10 --from-height 100
aitbc blockchain block <block_hash>
aitbc blockchain transaction <tx_hash>
# Node status
aitbc blockchain status --node 1|2|3
aitbc blockchain sync-status
aitbc blockchain peers
# Chain info
aitbc blockchain info
aitbc blockchain supply
aitbc blockchain validators
```
#### 2.2 exchange.py - Trading Operations
```bash
# Market data
aitbc exchange ticker
aitbc exchange orderbook --pair AITBC/USDT
aitbc exchange trades --pair AITBC/USDT --limit 100
# Orders
aitbc exchange order place --type buy --amount 100 --price 0.5
aitbc exchange order cancel <order_id>
aitbc exchange orders --status open|filled|cancelled
# Account
aitbc exchange balance
aitbc exchange history
```
#### 2.3 admin.py - System Administration
```bash
# Service management
aitbc admin status --all
aitbc admin restart --service coordinator|blockchain|exchange
aitbc admin logs --service coordinator --tail 100
# Health checks
aitbc admin health-check
aitbc admin monitor --continuous
# Configuration
aitbc admin config show --service coordinator
aitbc admin config set --service coordinator --key value
```
#### 2.4 config.py - Configuration Management
```bash
# Environment setup
aitbc config init --environment dev|staging|prod
aitbc config set coordinator.url http://localhost:8000
aitbc config get coordinator.url
aitbc config list
# Profile management
aitbc config profile create local
aitbc config profile use local
aitbc config profile list
```
#### 2.5 marketplace.py - GPU Marketplace Operations
```bash
# Service Provider - Register GPU
aitbc marketplace gpu register --name "RTX 4090" --memory 24 --cuda-cores 16384 --price-per-hour 0.50
# Client - Discover GPUs
aitbc marketplace gpu list --available
aitbc marketplace gpu list --price-max 1.0 --region us-west
aitbc marketplace gpu details gpu_001
aitbc marketplace gpu book gpu_001 --hours 2
aitbc marketplace gpu release gpu_001
# Marketplace operations
aitbc marketplace orders --status active
aitbc marketplace pricing gpt-4
aitbc marketplace reviews gpu_001
aitbc marketplace review gpu_001 --rating 5 --comment "Excellent GPU!"
```
#### 2.8 auth.py - Authentication & Credential Management
```bash
# Authentication
aitbc auth login --api-key <key> --environment dev
aitbc auth logout --environment dev
aitbc auth token --show --environment dev
aitbc auth status
aitbc auth refresh
# Credential management
aitbc auth keys list
aitbc auth keys create --name test-key --permissions client,miner
aitbc auth keys revoke --key-id <id>
aitbc auth keys rotate
```
#### 2.9 simulate.py - Test User & Simulation Management
```bash
# Initialize test economy
aitbc simulate init --distribute 10000,1000 # client,miner
aitbc simulate reset --confirm
# Manage test users
aitbc simulate user create --type client|miner --name test_user_1
aitbc simulate user list
aitbc simulate user balance --user client
aitbc simulate user fund --user client --amount 1000
# Run simulations
aitbc simulate workflow --jobs 5 --rounds 3
aitbc simulate load-test --clients 10 --miners 3 --duration 300
aitbc simulate marketplace --gpus 5 --bookings 20
# Test scenarios
aitbc simulate scenario --file payment_flow.yaml
aitbc simulate scenario --file gpu_booking.yaml
```
#### 2.10 aitbc - Unified CLI Entry Point
```bash
# Unified command structure
aitbc client submit inference --prompt "What is AI?"
aitbc miner mine --jobs 10
aitbc wallet balance
aitbc blockchain status
aitbc exchange ticker
aitbc marketplace gpu list --available
aitbc admin health-check
aitbc config set coordinator.url http://localhost:8000
aitbc simulate init
aitbc auth login
# Global options
aitbc --version # Show version
aitbc --help # Show help
aitbc --verbose # Verbose output
aitbc --debug # Debug output
aitbc --output json # JSON output for all commands
```
### Phase 3: CLI Testing Strategy
#### 3.1 Test Structure
```
tests/cli/
├── conftest.py # CLI test fixtures
├── test_client.py # Client CLI tests
├── test_miner.py # Miner CLI tests
├── test_wallet.py # Wallet CLI tests
├── test_blockchain.py # Blockchain CLI tests
├── test_exchange.py # Exchange CLI tests
├── test_marketplace.py # Marketplace CLI tests
├── test_admin.py # Admin CLI tests
├── test_config.py # Config CLI tests
├── test_simulate.py # Simulation CLI tests
├── test_unified.py # Unified aitbc CLI tests
├── integration/
│ ├── test_full_workflow.py # End-to-end CLI workflow
│ ├── test_gpu_marketplace.py # GPU marketplace workflow
│ ├── test_multi_user.py # Multi-user simulation
│ └── test_multi_node.py # Multi-node CLI operations
└── fixtures/
├── mock_responses.json # Mock API responses
├── test_configs.yaml # Test configurations
├── gpu_specs.json # Sample GPU specifications
└── test_scenarios.yaml # Test simulation scenarios
```
#### 3.2 Test Coverage Requirements
- [x] Argument parsing validation
- [x] API integration with mocking
- [x] Output formatting (JSON, table, YAML)
- [x] Error handling and exit codes
- [x] Configuration file handling
- [x] Multi-environment support
- [x] Authentication and API key handling
- [x] Timeout and retry logic
#### 3.3 Test Implementation Plan
1.**Unit Tests** - 116 tests across 8 files, each CLI command tested in isolation with mocking
2. **Integration Tests** - Test CLI against real services (requires live coordinator; deferred)
3.**Workflow Tests** - Simulate commands cover complete user journeys (workflow, load-test, scenario)
4. **Performance Tests** - Test CLI with large datasets (deferred; local ops already < 500ms)
### Phase 4: Documentation & UX
#### 4.1 Documentation Structure
```
docs/cli/
├── README.md # CLI overview and quick start
├── installation.md # Installation and setup
├── configuration.md # Configuration guide
├── commands/
│ ├── client.md # Client CLI reference
│ ├── miner.md # Miner CLI reference
│ ├── wallet.md # Wallet CLI reference
│ ├── blockchain.md # Blockchain CLI reference
│ ├── exchange.md # Exchange CLI reference
│ ├── admin.md # Admin CLI reference
│ └── config.md # Config CLI reference
├── examples/
│ ├── quick-start.md # Quick start examples
│ ├── mining.md # Mining setup examples
│ ├── trading.md # Trading examples
│ └── automation.md # Scripting examples
└── troubleshooting.md # Common issues and solutions
```
#### 4.2 UX Improvements
- [x] Progress bars for long-running operations (`progress_bar()` and `progress_spinner()` in utils)
- [x] Colored output for better readability (Rich library: red/green/yellow/cyan styles, panels)
- [x] Interactive prompts for sensitive operations (`click.confirm()` on delete, reset, deregister)
- [x] Auto-completion scripts (`cli/aitbc_shell_completion.sh`)
- [x] Man pages integration (`cli/man/aitbc.1`)
- [x] Built-in help with examples (Click `--help` on all commands)
### Phase 5: Advanced Features
#### 5.1 Scripting & Automation
- [x] Batch operations from CSV/JSON files (`client batch-submit`)
- [x] Job templates for repeated tasks (`client template save/list/run/delete`)
- [x] Webhook support for notifications (`monitor webhooks add/list/remove/test`)
- [x] Plugin system for custom commands (`plugin install/uninstall/list/toggle`)
#### 5.2 Monitoring & Analytics
- [x] Real-time dashboard mode (`monitor dashboard --refresh 5`)
- [x] Metrics collection and export (`monitor metrics --period 24h --export file.json`)
- [x] Alert configuration (`monitor alerts add/list/remove/test`)
- [x] Historical data analysis (`monitor history --period 7d`)
#### 5.3 Security Enhancements
- [x] Multi-signature operations (`wallet multisig-create/multisig-propose/multisig-sign`)
- [x] Encrypted configuration (`config set-secret/get-secret`)
- [x] Audit logging (`admin audit-log`)
## Implementation Timeline ✅ COMPLETE
### Phase 0: Foundation ✅ (2026-02-10)
- Standardized URLs, package structure, credential storage
- Created unified entry point (`aitbc`)
- Set up test structure
### Phase 1: Enhance Existing Tools ✅ (2026-02-11)
- client.py: history, filtering, retry with exponential backoff
- miner.py: earnings, capabilities, deregistration, job filtering, concurrent processing
- wallet.py: multi-wallet, backup/restore, staking, `--wallet-path`
- auth.py: login/logout, token management, multi-environment
### Phase 2: New CLI Tools ✅ (2026-02-11)
- blockchain.py, marketplace.py, admin.py, config.py, simulate.py
### Phase 3: Testing & Documentation ✅ (2026-02-12)
- 116/116 CLI tests passing (0 failures)
- CI/CD workflow (`.github/workflows/cli-tests.yml`)
- CLI reference docs, shell completion, README
### Phase 4: Backend Integration ✅ (2026-02-12)
- MarketplaceOffer model extended with GPU-specific fields
- GPU booking system, review system
- Marketplace sync-offers endpoint
## Success Metrics
1. **Coverage**: All API endpoints accessible via CLI (client, miner, wallet, auth, blockchain, marketplace, admin, config, simulate)
2. **Tests**: 116/116 CLI tests passing across all command groups
3. **Documentation**: Complete command reference with examples (`docs/cli-reference.md` 560+ lines covering all commands, workflows, troubleshooting, integration)
4. **Usability**: All common workflows achievable via CLI (job submission, mining, wallet management, staking, marketplace GPU booking, config profiles)
5. **Performance**: CLI response time < 500ms for local operations (config, wallet, simulate)
## Dependencies
### Core Dependencies
- Python 3.8+
- Click or Typer for CLI framework
- Rich for terminal formatting
- Pytest for testing
- httpx for HTTP client
- PyYAML for configuration
### Additional Dependencies
- **keyring** - Encrypted credential storage
- **cryptography** - Secure credential handling
- **click-completion** - Shell auto-completion
- **tabulate** - Table formatting
- **colorama** - Cross-platform colored output
- **pydantic** - Configuration validation
- **python-dotenv** - Environment variable management
## Risks & Mitigations
1. **API Changes**: Version CLI commands to match API versions
2. **Authentication**: Secure storage of API keys using keyring
3. **Network Issues**: Robust error handling and retries
4. **Complexity**: Keep individual commands simple and composable
5. **Backward Compatibility**: Maintain compatibility with existing scripts
6. **Dependency Conflicts**: Use virtual environments and pin versions
7. **Security**: Regular security audits of dependencies
## Implementation Approach
### Recommended Strategy
1. **Start with `scripts/aitbc-cli.sh`** - It's already a working wrapper
2. **Gradually migrate to Python** - Convert bash wrapper to Python CLI framework
3. **Reuse existing Python scripts** - `miner_workflow.py`, `assign_proposer.py` etc.
4. **Leverage existing `pyproject.toml`** - Just add CLI configuration
### Quick Start Implementation
```bash
# 1. Fix URL inconsistency in existing tools
sed -i 's/127.0.0.1:18000/localhost:8000/g' cli/client.py
sed -i 's/localhost:8001/localhost:8000/g' cli/miner.py
# 2. Create CLI package structure
mkdir -p cli/aitbc_cli/{commands,config,auth}
# 3. Add entry point to pyproject.toml
# [project.scripts]
# aitbc = "aitbc_cli.main:cli"
```
## Progress Summary (Updated Feb 12, 2026)
### ✅ Completed Work
#### Phase 0 - Foundation
- All Phase 0 tasks completed successfully
- URLs standardized to `http://127.0.0.1:18000` (incus proxy)
- Created installable Python package with proper structure
- Implemented secure credential storage using keyring
- Unified CLI entry point `aitbc` created
#### Phase 1 - Enhanced Existing Tools
- **client.py**: Added output formatting, exit codes, batch submission, cancellation
- **miner.py**: Added registration, polling, mining, heartbeat, status check
- **wallet.py**: Full wallet management with blockchain integration
- **auth.py**: New authentication system with secure key storage
#### Current CLI Features
```bash
# Unified CLI with rich output
aitbc --help # Main CLI help
aitbc --version # Show v0.1.0
aitbc --output json client blocks # JSON output
aitbc --output yaml wallet balance # YAML output
# Client commands
aitbc client submit inference --prompt "What is AI?"
aitbc client status <job_id>
aitbc client blocks --limit 10
aitbc client cancel <job_id>
aitbc client receipts --job-id <id>
# Miner commands
aitbc miner register --gpu RTX4090 --memory 24
aitbc miner poll --wait 10
aitbc miner mine --jobs 5
aitbc miner heartbeat
aitbc miner status
# Wallet commands
aitbc wallet balance
aitbc wallet history --limit 20
aitbc wallet earn 10.5 job_123 --desc "Inference task"
aitbc wallet spend 5.0 "GPU rental"
aitbc wallet send <address> 10.0 --desc "Payment"
aitbc wallet stats
# Auth commands
aitbc auth login <api_key> --environment dev
aitbc auth status
aitbc auth token --show
aitbc auth logout --environment dev
aitbc auth import-env client
# Blockchain commands
aitbc blockchain blocks --limit 10 --from-height 100
aitbc blockchain block <block_hash>
aitbc blockchain transaction <tx_hash>
aitbc blockchain status --node 1
aitbc blockchain info
aitbc blockchain supply
# Marketplace commands
aitbc marketplace gpu list --available --model RTX*
aitbc marketplace gpu register --name RTX4090 --memory 24 --price-per-hour 0.5
aitbc marketplace gpu book <gpu_id> --hours 2
aitbc marketplace gpu release <gpu_id>
aitbc marketplace orders --status active
# Simulation commands
aitbc simulate init --distribute 10000,1000 --reset
aitbc simulate user create --type client --name alice --balance 500
aitbc simulate workflow --jobs 5 --rounds 3
aitbc simulate load-test --clients 10 --miners 3 --duration 300
```
### 📋 Remaining Tasks
#### Phase 1 Incomplete ✅ COMPLETED
- [x] Job history filtering in client command
- [x] Retry mechanism with exponential backoff
- [x] Miner earnings tracking
- [x] Multi-wallet support
- [x] Wallet backup/restore
#### Phase 2 - New CLI Tools ✅ COMPLETED
- [x] blockchain.py - Blockchain operations
- [x] marketplace.py - GPU marketplace operations
- [x] admin.py - System administration
- [x] config.py - Configuration management
- [x] simulate.py - Test simulation
### Phase 3 - Testing & Documentation ✅ PARTIALLY COMPLETE
- [x] Comprehensive test suite (84+ tests passing for client, wallet, auth, admin, blockchain, marketplace, simulate commands)
- [x] Created test files for all commands (config tests need minor fixes)
- [x] CLI documentation (cli-reference.md created)
- [x] Shell completion script created (aitbc_shell_completion.sh)
- [x] Enhanced README with comprehensive usage guide
- [x] CI/CD integration
## Next Steps
1. Phase 0 and Phase 1 complete
2. Phase 2 complete (all 5 new tools implemented)
3. Phase 3 testing mostly complete (94+ tests passing)
4. **Phase 4 - Backend Implementation** (COMPLETED ✅)
- Marketplace GPU endpoints implemented (9 endpoints created)
- GPU booking system implemented (in-memory)
- Review and rating system implemented
- Order management implemented
- CLI marketplace commands now functional (11/11 tests passing)
5. Remaining tasks:
- Multi-wallet support (COMPLETED)
- Wallet backup/restore (COMPLETED)
- Fix remaining config and simulate command tests (17 tests failing)
### Quick Start Using the CLI
```bash
# Install the CLI
cd /home/oib/windsurf/aitbc
pip install -e .
# Store your API key
export CLIENT_API_KEY=your_key_here
# Basic operations
aitbc client submit inference --prompt "What is AI?"
aitbc wallet balance
aitbc miner status
aitbc auth status
# Wallet management
aitbc wallet create my-wallet --type hd
aitbc wallet list
aitbc wallet switch my-wallet
aitbc wallet info
aitbc wallet backup my-wallet
aitbc wallet restore backup.json restored-wallet --force
# Admin operations
aitbc admin status
aitbc admin jobs --limit 10
aitbc admin analytics --days 7
# Configuration
aitbc config set coordinator_url http://localhost:8000
aitbc config validate
aitbc config profiles save myprofile
# Blockchain queries
aitbc blockchain blocks --limit 10
aitbc blockchain info
# Marketplace operations
aitbc marketplace gpu list --available
aitbc marketplace gpu book gpu123 --hours 2
# Simulation
aitbc simulate init --distribute 10000,1000
aitbc simulate workflow --jobs 5
```
## Marketplace Backend Analysis
### Current Status
The CLI marketplace commands expect GPU-specific endpoints that are **NOW IMPLEMENTED** in the backend:
#### ✅ Implemented GPU Endpoints
- `POST /v1/marketplace/gpu/register` - Register GPU in marketplace
- `GET /v1/marketplace/gpu/list` - List available GPUs
- `GET /v1/marketplace/gpu/{gpu_id}` - Get GPU details
- `POST /v1/marketplace/gpu/{gpu_id}/book` - Book/reserve a GPU
- `POST /v1/marketplace/gpu/{gpu_id}/release` - Release a booked GPU
- `GET /v1/marketplace/gpu/{gpu_id}/reviews` - Get GPU reviews
- `POST /v1/marketplace/gpu/{gpu_id}/reviews` - Add GPU review
- `GET /v1/marketplace/orders` - List orders
- `GET /v1/marketplace/pricing/{model}` - Get model pricing
#### ✅ Currently Implemented
- `GET /marketplace/offers` - Basic offer listing (mock data)
- `GET /marketplace/stats` - Marketplace statistics
- `POST /marketplace/bids` - Submit bids
- `POST /marketplace/sync-offers` - Sync miners to offers (admin)
### Data Model Gaps
1. **GPU Registry**: Implemented (in-memory storage with mock GPUs)
2. **Booking System**: Implemented (in-memory booking tracking)
3. **Review Storage**: Implemented (in-memory review system)
4. **Limited Offer Model**: Fixed GPU-specific fields added (`gpu_model`, `gpu_memory_gb`, `gpu_count`, `cuda_version`, `price_per_hour`, `region`)
### Recommended Implementation
#### ✅ Phase 1: Quick Fix (COMPLETED)
```python
# ✅ Created /v1/marketplace/gpu/ router with all endpoints
# ✅ Added mock GPU data with 3 GPUs
# ✅ Implemented in-memory booking tracking
# ✅ Added review system with ratings
```
#### Phase 2: Full Implementation (High Effort)
```python
# New Models Needed:
class GPURegistry(SQLModel, table=True):
gpu_id: str = Field(primary_key=True)
miner_id: str
gpu_model: str
gpu_memory_gb: int
status: str # available, booked, offline
current_booking_id: Optional[str]
booking_expires: Optional[datetime]
class GPUBooking(SQLModel, table=True):
booking_id: str = Field(primary_key=True)
gpu_id: str
client_id: str
duration_hours: float
total_cost: float
status: str
class GPUReview(SQLModel, table=True):
review_id: str = Field(primary_key=True)
gpu_id: str
rating: int = Field(ge=1, le=5)
comment: str
```
### Impact on CLI Tests
- 6 out of 7 marketplace tests fail due to missing endpoints
- Tests expect JSON responses from GPU-specific endpoints
- Current implementation returns different data structure
### Priority Matrix
| Feature | Priority | Effort | Impact |
|---------|----------|--------|--------|
| GPU Registry | High | Medium | High |
| GPU Booking | High | High | High |
| GPU List/Details | High | Low | High |
| Reviews System | Medium | Medium | Medium |
| Order Management | Medium | High | Medium |
| Dynamic Pricing | Low | High | Low |
### Next Steps for Marketplace
1. Create `/v1/marketplace/gpu/` router with mock responses
2. Implement GPURegistry model for individual GPU tracking
3. Add booking system with proper state management
4. Integrate with existing miner registration
5. Add comprehensive testing for new endpoints

View File

@@ -453,3 +453,42 @@ This document tracks components that have been successfully deployed and are ope
- Created `dev-utils/` and moved `aitbc-pythonpath.pth`
- Updated `docs/files.md` with new structure
- Fixed systemd service path for GPU miner
## Recent Updates (2026-02-12)
### CLI Enhancement — All Phases Complete ✅
-**Enhanced CLI Tool** - 116/116 tests passing (0 failures)
- Location: `/home/oib/windsurf/aitbc/cli/aitbc_cli/`
- 11 command groups: client, miner, wallet, auth, config, blockchain, marketplace, simulate, admin, monitor, plugin
- CI/CD: `.github/workflows/cli-tests.yml` (Python 3.10/3.11/3.12 matrix)
-**Phase 1: Core Enhancements**
- Client: retry with exponential backoff, job history/filtering, batch submit from CSV/JSON, job templates
- Miner: earnings tracking, capability management, deregistration, job filtering, concurrent processing
- Wallet: multi-wallet, backup/restore, staking (stake/unstake/staking-info), `--wallet-path` option
- Auth: login/logout, token management, multi-environment, API key rotation
-**Phase 2: New CLI Tools**
- blockchain.py, marketplace.py, admin.py, config.py, simulate.py
-**Phase 3: Testing & Documentation**
- 116/116 CLI tests across 8 test files
- CLI reference docs (`docs/cli-reference.md` — 560+ lines)
- Shell completion script, man page (`cli/man/aitbc.1`)
-**Phase 4: Backend Integration**
- MarketplaceOffer model extended with GPU-specific fields (gpu_model, gpu_memory_gb, gpu_count, cuda_version, price_per_hour, region)
- GPU booking system, review system, sync-offers endpoint
-**Phase 5: Advanced Features**
- Scripting: batch CSV/JSON ops, job templates, webhook notifications, plugin system
- Monitoring: real-time dashboard, metrics collection/export, alert configuration, historical analysis
- Security: multi-signature wallets (create/propose/sign), encrypted config (set-secret/get-secret), audit logging
- UX: Rich progress bars, colored output, interactive prompts, auto-completion, man pages
-**Documentation Updates**
- Updated `.windsurf/workflows/ollama-gpu-test.md` with CLI commands
- Updated `.windsurf/workflows/test.md` with CLI testing guide
- Updated `.windsurf/skills/blockchain-operations/` and `ollama-gpu-provider/`
- System requirements updated to Debian Trixie (Linux)
- All currentTask.md checkboxes complete (0 unchecked items)

View File

@@ -5,7 +5,7 @@ This document categorizes all files and folders in the repository by their statu
- **Greylist (⚠️)**: Uncertain status, may need review
- **Blacklist (❌)**: Legacy, unused, outdated, candidates for removal
Last updated: 2026-02-11
Last updated: 2026-02-12
---
@@ -69,10 +69,22 @@ Last updated: 2026-02-11
| Path | Status | Notes |
|------|--------|-------|
| `cli/client.py` | ✅ Active | Client CLI |
| `cli/miner.py` | ✅ Active | Miner CLI |
| `cli/wallet.py` | ✅ Active | Wallet CLI |
| `cli/aitbc_cli/commands/client.py` | ✅ Active | Client CLI (submit, batch-submit, templates, history) |
| `cli/aitbc_cli/commands/miner.py` | ✅ Active | Miner CLI (register, earnings, capabilities, concurrent) |
| `cli/aitbc_cli/commands/wallet.py` | ✅ Active | Wallet CLI (balance, staking, multisig, backup/restore) |
| `cli/aitbc_cli/commands/auth.py` | ✅ Active | Auth CLI (login, tokens, API keys) |
| `cli/aitbc_cli/commands/blockchain.py` | ✅ Active | Blockchain queries |
| `cli/aitbc_cli/commands/marketplace.py` | ✅ Active | GPU marketplace operations |
| `cli/aitbc_cli/commands/admin.py` | ✅ Active | System administration, audit logging |
| `cli/aitbc_cli/commands/config.py` | ✅ Active | Configuration, profiles, encrypted secrets |
| `cli/aitbc_cli/commands/monitor.py` | ✅ Active | Dashboard, metrics, alerts, webhooks |
| `cli/aitbc_cli/commands/simulate.py` | ✅ Active | Test simulation framework |
| `cli/aitbc_cli/plugins.py` | ✅ Active | Plugin system for custom commands |
| `cli/aitbc_cli/main.py` | ✅ Active | CLI entry point (11 command groups) |
| `cli/man/aitbc.1` | ✅ Active | Man page |
| `cli/aitbc_shell_completion.sh` | ✅ Active | Shell completion script |
| `cli/test_ollama_gpu_provider.py` | ✅ Active | GPU testing |
| `.github/workflows/cli-tests.yml` | ✅ Active | CI/CD for CLI tests (Python 3.10/3.11/3.12) |
### Home Scripts (`home/`)

View File

@@ -0,0 +1,267 @@
# Marketplace Backend Analysis
## Current Implementation Status
### ✅ Implemented Features
#### 1. Basic Marketplace Offers
- **Endpoint**: `GET /marketplace/offers`
- **Service**: `MarketplaceService.list_offers()`
- **Status**: ✅ Implemented (returns mock data)
- **Notes**: Returns hardcoded mock offers, not from database
#### 2. Marketplace Statistics
- **Endpoint**: `GET /marketplace/stats`
- **Service**: `MarketplaceService.get_stats()`
- **Status**: ✅ Implemented
- **Features**:
- Total offers count
- Open capacity
- Average price
- Active bids count
#### 3. Marketplace Bids
- **Endpoint**: `POST /marketplace/bids`
- **Service**: `MarketplaceService.create_bid()`
- **Status**: ✅ Implemented
- **Features**: Create bids with provider, capacity, price, and notes
#### 4. Miner Offer Synchronization
- **Endpoint**: `POST /marketplace/sync-offers`
- **Service**: Creates offers from registered miners
- **Status**: ✅ Implemented (admin only)
- **Features**:
- Syncs online miners to marketplace offers
- Extracts GPU capabilities from miner attributes
- Creates offers with pricing, GPU model, memory, etc.
#### 5. Miner Offers List
- **Endpoint**: `GET /marketplace/miner-offers`
- **Service**: Lists offers created from miners
- **Status**: ✅ Implemented
- **Features**: Returns offers with detailed GPU information
### ❌ Missing Features (Expected by CLI)
#### 1. GPU-Specific Endpoints
The CLI expects a `/v1/marketplace/gpu/` prefix for all operations, but these are **NOT IMPLEMENTED**:
- `POST /v1/marketplace/gpu/register` - Register GPU in marketplace
- `GET /v1/marketplace/gpu/list` - List available GPUs
- `GET /v1/marketplace/gpu/{gpu_id}` - Get GPU details
- `POST /v1/marketplace/gpu/{gpu_id}/book` - Book/reserve a GPU
- `POST /v1/marketplace/gpu/{gpu_id}/release` - Release a booked GPU
- `GET /v1/marketplace/gpu/{gpu_id}/reviews` - Get GPU reviews
- `POST /v1/marketplace/gpu/{gpu_id}/reviews` - Add GPU review
#### 2. GPU Booking System
- **Status**: ❌ Not implemented
- **Missing Features**:
- GPU reservation/booking logic
- Booking duration tracking
- Booking status management
- Automatic release after timeout
#### 3. GPU Reviews System
- **Status**: ❌ Not implemented
- **Missing Features**:
- Review storage and retrieval
- Rating aggregation
- Review moderation
- Review-per-gpu association
#### 4. GPU Registry
- **Status**: ❌ Not implemented
- **Missing Features**:
- Individual GPU registration
- GPU specifications storage
- GPU status tracking (available, booked, offline)
- GPU health monitoring
#### 5. Order Management
- **Status**: ❌ Not implemented
- **CLI expects**: `GET /v1/marketplace/orders`
- **Missing Features**:
- Order creation from bookings
- Order tracking
- Order history
- Order status updates
#### 6. Pricing Information
- **Status**: ❌ Not implemented
- **CLI expects**: `GET /v1/marketplace/pricing/{model}`
- **Missing Features**:
- Model-specific pricing
- Dynamic pricing based on demand
- Historical pricing data
- Price recommendations
### 🔧 Data Model Issues
#### 1. MarketplaceOffer Model Limitations
Current model lacks GPU-specific fields:
```python
class MarketplaceOffer(SQLModel, table=True):
id: str
provider: str # Miner ID
capacity: int # Number of concurrent jobs
price: float # Price per hour
sla: str
status: str # open, closed, etc.
created_at: datetime
attributes: dict # Contains GPU info but not structured
```
**Missing GPU-specific fields**:
- `gpu_id`: Unique GPU identifier
- `gpu_model`: GPU model name
- `gpu_memory`: GPU memory in GB
- `gpu_status`: available, booked, offline
- `booking_expires`: When current booking expires
- `total_bookings`: Number of times booked
- `average_rating`: Aggregated review rating
#### 2. No Booking/Order Models
Missing models for:
- `GPUBooking`: Track GPU reservations
- `GPUOrder`: Track completed GPU usage
- `GPUReview`: Store GPU reviews
- `GPUPricing`: Store pricing tiers
### 📊 API Endpoint Comparison
| CLI Command | Expected Endpoint | Implemented | Status |
|-------------|------------------|-------------|---------|
| `aitbc marketplace gpu register` | `POST /v1/marketplace/gpu/register` | ❌ | Missing |
| `aitbc marketplace gpu list` | `GET /v1/marketplace/gpu/list` | ❌ | Missing |
| `aitbc marketplace gpu details` | `GET /v1/marketplace/gpu/{id}` | ❌ | Missing |
| `aitbc marketplace gpu book` | `POST /v1/marketplace/gpu/{id}/book` | ❌ | Missing |
| `aitbc marketplace gpu release` | `POST /v1/marketplace/gpu/{id}/release` | ❌ | Missing |
| `aitbc marketplace reviews` | `GET /v1/marketplace/gpu/{id}/reviews` | ❌ | Missing |
| `aitbc marketplace review add` | `POST /v1/marketplace/gpu/{id}/reviews` | ❌ | Missing |
| `aitbc marketplace orders list` | `GET /v1/marketplace/orders` | ❌ | Missing |
| `aitbc marketplace pricing` | `GET /v1/marketplace/pricing/{model}` | ❌ | Missing |
### 🚀 Recommended Implementation Plan
#### Phase 1: Core GPU Marketplace
1. **Create GPU Registry Model**:
```python
class GPURegistry(SQLModel, table=True):
gpu_id: str = Field(primary_key=True)
miner_id: str
gpu_model: str
gpu_memory_gb: int
cuda_version: str
status: str # available, booked, offline
current_booking_id: Optional[str] = None
booking_expires: Optional[datetime] = None
attributes: dict = Field(default_factory=dict)
```
2. **Implement GPU Endpoints**:
- Add `/v1/marketplace/gpu/` router
- Implement all CRUD operations for GPUs
- Add booking/unbooking logic
3. **Create Booking System**:
```python
class GPUBooking(SQLModel, table=True):
booking_id: str = Field(primary_key=True)
gpu_id: str
client_id: str
job_id: Optional[str]
duration_hours: float
start_time: datetime
end_time: datetime
total_cost: float
status: str # active, completed, cancelled
```
#### Phase 2: Reviews and Ratings
1. **Review System**:
```python
class GPUReview(SQLModel, table=True):
review_id: str = Field(primary_key=True)
gpu_id: str
client_id: str
rating: int = Field(ge=1, le=5)
comment: str
created_at: datetime
```
2. **Rating Aggregation**:
- Add `average_rating` to GPURegistry
- Update rating on each new review
- Implement rating history tracking
#### Phase 3: Orders and Pricing
1. **Order Management**:
```python
class GPUOrder(SQLModel, table=True):
order_id: str = Field(primary_key=True)
booking_id: str
client_id: str
gpu_id: str
status: str
created_at: datetime
completed_at: Optional[datetime]
```
2. **Dynamic Pricing**:
```python
class GPUPricing(SQLModel, table=True):
id: str = Field(primary_key=True)
model_name: str
base_price: float
current_price: float
demand_multiplier: float
updated_at: datetime
```
### 🔍 Integration Points
#### 1. Miner Registration
- When miners register, automatically create GPU entries
- Sync GPU capabilities from miner registration
- Update GPU status based on miner heartbeat
#### 2. Job Assignment
- Check GPU availability before job assignment
- Book GPU for job duration
- Release GPU on job completion or failure
#### 3. Billing Integration
- Calculate costs from booking duration
- Create orders from completed bookings
- Handle refunds for early releases
### 📝 Implementation Notes
1. **API Versioning**: Use `/v1/marketplace/gpu/` as expected by CLI
2. **Authentication**: Use existing API key system
3. **Error Handling**: Follow existing error patterns
4. **Metrics**: Add Prometheus metrics for GPU operations
5. **Testing**: Create comprehensive test suite
6. **Documentation**: Update OpenAPI specs
### 🎯 Priority Matrix
| Feature | Priority | Effort | Impact |
|---------|----------|--------|--------|
| GPU Registry | High | Medium | High |
| GPU Booking | High | High | High |
| GPU List/Details | High | Low | High |
| Reviews System | Medium | Medium | Medium |
| Order Management | Medium | High | Medium |
| Dynamic Pricing | Low | High | Low |
### 💡 Quick Win
The fastest way to make the CLI work is to:
1. Create a new router `/v1/marketplace/gpu/`
2. Implement basic endpoints that return mock data
3. Map existing marketplace offers to GPU format
4. Add simple in-memory booking tracking
This would allow the CLI to function while the full backend is developed.

View File

@@ -0,0 +1,69 @@
# Marketplace GPU Endpoints Deployment Summary
## ✅ Successfully Deployed to Remote Server (aitbc-cascade)
### What was deployed:
1. **New router file**: `/opt/coordinator-api/src/app/routers/marketplace_gpu.py`
- 9 GPU-specific endpoints implemented
- In-memory storage for quick testing
- Mock data with 3 initial GPUs
2. **Updated router configuration**:
- Added `marketplace_gpu` import to `__init__.py`
- Added router to main app with `/v1` prefix
- Service restarted successfully
### Available Endpoints:
- `POST /v1/marketplace/gpu/register` - Register GPU
- `GET /v1/marketplace/gpu/list` - List GPUs
- `GET /v1/marketplace/gpu/{gpu_id}` - Get GPU details
- `POST /v1/marketplace/gpu/{gpu_id}/book` - Book GPU
- `POST /v1/marketplace/gpu/{gpu_id}/release` - Release GPU
- `GET /v1/marketplace/gpu/{gpu_id}/reviews` - Get reviews
- `POST /v1/marketplace/gpu/{gpu_id}/reviews` - Add review
- `GET /v1/marketplace/orders` - List orders
- `GET /v1/marketplace/pricing/{model}` - Get pricing
### Test Results:
1. **GPU Registration**: ✅
- Successfully registered RTX 4060 Ti (16GB)
- GPU ID: gpu_001
- Price: $0.30/hour
2. **GPU Booking**: ✅
- Booked for 2 hours
- Total cost: $1.0
- Booking ID generated
3. **Review System**: ✅
- Added 5-star review
- Average rating updated to 5.0
4. **Order Management**: ✅
- Orders tracked
- Status: active
### Current GPU Inventory:
1. RTX 4090 (24GB) - $0.50/hr - Available
2. RTX 3080 (16GB) - $0.35/hr - Available
3. A100 (40GB) - $1.20/hr - Booked
4. **RTX 4060 Ti (16GB) - $0.30/hr - Available** (newly registered)
### Service Status:
- Coordinator API: Running on port 8000
- Service: active (running)
- Last restart: Feb 12, 2026 at 16:14:11 UTC
### Next Steps:
1. Update CLI to use remote server URL (http://aitbc-cascade:8000)
2. Test full CLI workflow against remote server
3. Consider persistent storage implementation
4. Add authentication/authorization for production
### Notes:
- Current implementation uses in-memory storage
- Data resets on service restart
- No authentication required (test API key works)
- All endpoints return proper HTTP status codes (201 for creation)
The marketplace GPU functionality is now fully operational on the remote server! 🚀

View File

@@ -731,6 +731,18 @@ Current Status: Canonical receipt schema specification moved from `protocols/rec
| `docs/reference/specs/receipt-spec.md` finalize | Low | Q2 2026 | 🔄 Pending extensions |
| Cross-site synchronization | High | Q1 2026 | ✅ Complete (2026-01-29) |
## Recent Progress (2026-02-12)
### CLI Enhancement — All Phases Complete ✅
- **116/116 tests passing** (0 failures) across 8 test files
- **11 command groups**: client, miner, wallet, auth, config, blockchain, marketplace, simulate, admin, monitor, plugin
- CI/CD: `.github/workflows/cli-tests.yml` (Python 3.10/3.11/3.12)
- **Phase 12**: Core enhancements + new CLI tools (client retry, miner earnings/capabilities/deregister, wallet staking/multi-wallet/backup, auth, blockchain, marketplace, admin, config, simulate)
- **Phase 3**: 116 tests, CLI reference docs (560+ lines), shell completion, man page
- **Phase 4**: MarketplaceOffer GPU fields, booking system, review system
- **Phase 5**: Batch CSV/JSON ops, job templates, webhooks, plugin system, real-time dashboard, metrics/alerts, multi-sig wallets, encrypted config, audit logging, progress bars
## Recent Progress (2026-02-11)
### Git & Repository Hygiene ✅ COMPLETE

View File

@@ -252,7 +252,7 @@ website/
| Directory | Purpose |
|-----------|---------|
| `cli/` | CLI tools for client, miner, wallet operations and GPU testing |
| `cli/` | AITBC CLI package (11 command groups, 80+ subcommands, 116 tests, CI/CD, man page, plugins) |
| `plugins/ollama/` | Ollama LLM integration (client plugin, miner plugin, service layer) |
| `home/` | Local simulation scripts for client/miner workflows |
| `extensions/` | Firefox wallet extension source code |

View File

@@ -0,0 +1,10 @@
{
"wallet_id": "my-wallet",
"type": "hd",
"address": "aitbc1e9056b875c03773b067fd0345248abd3db762af5",
"public_key": "0x2619e06fca452e9a524a688ceccbbb07ebdaaa824c3e78c4a406ed2a60cee47f",
"private_key": "0xe9056b875c03773b067fd0345248abd3db762af5274c8e30ab304cb04a7b4c79",
"created_at": "2026-02-12T16:41:34.206767Z",
"balance": 0,
"transactions": []
}

View File

@@ -28,3 +28,57 @@ markers = [
"confidential: Tests for confidential transactions",
"multitenant: Multi-tenancy specific tests"
]
[project]
name = "aitbc-cli"
version = "0.1.0"
description = "AITBC Command Line Interface Tools"
authors = [
{name = "AITBC Team", email = "team@aitbc.net"}
]
readme = "cli/README.md"
license = "MIT"
requires-python = ">=3.8"
dependencies = [
"click>=8.0.0",
"httpx>=0.24.0",
"pydantic>=1.10.0",
"pyyaml>=6.0",
"rich>=13.0.0",
"keyring>=23.0.0",
"cryptography>=3.4.8",
"click-completion>=0.5.2",
"tabulate>=0.9.0",
"colorama>=0.4.4",
"python-dotenv>=0.19.0"
]
[project.optional-dependencies]
dev = [
"pytest>=7.0.0",
"pytest-asyncio>=0.21.0",
"pytest-cov>=4.0.0",
"pytest-mock>=3.10.0",
"black>=22.0.0",
"isort>=5.10.0",
"flake8>=5.0.0"
]
[project.scripts]
aitbc = "aitbc_cli.main:cli"
[project.urls]
Homepage = "https://aitbc.net"
Repository = "https://github.com/aitbc/aitbc"
Documentation = "https://docs.aitbc.net"
[build-system]
requires = ["setuptools>=61.0", "wheel"]
build-backend = "setuptools.build_meta"
[tool.setuptools.packages.find]
where = ["cli"]
include = ["aitbc_cli*"]
[tool.setuptools.package-dir]
"aitbc_cli" = "cli/aitbc_cli"

View File

@@ -4,7 +4,7 @@ set -euo pipefail
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
CLI_PY="$ROOT_DIR/cli/client.py"
AITBC_URL="${AITBC_URL:-http://127.0.0.1:18000}"
AITBC_URL="${AITBC_URL:-http://localhost:8000}"
CLIENT_KEY="${CLIENT_KEY:?Set CLIENT_KEY env var}"
ADMIN_KEY="${ADMIN_KEY:?Set ADMIN_KEY env var}"
MINER_KEY="${MINER_KEY:?Set MINER_KEY env var}"
@@ -27,7 +27,7 @@ Usage:
aitbc-cli.sh health
Environment overrides:
AITBC_URL (default: http://127.0.0.1:18000)
AITBC_URL (default: http://localhost:8000)
CLIENT_KEY (required)
ADMIN_KEY (required)
MINER_KEY (required)

392
tests/cli/test_admin.py Normal file
View File

@@ -0,0 +1,392 @@
"""Tests for admin CLI commands"""
import pytest
import json
from click.testing import CliRunner
from unittest.mock import Mock, patch
from aitbc_cli.commands.admin import admin
@pytest.fixture
def runner():
"""Create CLI runner"""
return CliRunner()
@pytest.fixture
def mock_config():
"""Mock configuration"""
config = Mock()
config.coordinator_url = "http://test:8000"
config.api_key = "test_admin_key"
return config
class TestAdminCommands:
"""Test admin command group"""
@patch('aitbc_cli.commands.admin.httpx.Client')
def test_status_success(self, mock_client_class, runner, mock_config):
"""Test successful system status check"""
# Setup mock
mock_client = Mock()
mock_client_class.return_value.__enter__.return_value = mock_client
mock_response = Mock()
mock_response.status_code = 200
mock_response.json.return_value = {
"status": "healthy",
"version": "1.0.0",
"uptime": 3600
}
mock_client.get.return_value = mock_response
# Run command
result = runner.invoke(admin, [
'status'
], obj={'config': mock_config, 'output_format': 'json'})
# Assertions
assert result.exit_code == 0
data = json.loads(result.output)
assert data['status'] == 'healthy'
assert data['version'] == '1.0.0'
# Verify API call
mock_client.get.assert_called_once_with(
'http://test:8000/v1/admin/status',
headers={"X-Api-Key": "test_admin_key"}
)
@patch('aitbc_cli.commands.admin.httpx.Client')
def test_jobs_with_filter(self, mock_client_class, runner, mock_config):
"""Test jobs listing with filters"""
# Setup mock
mock_client = Mock()
mock_client_class.return_value.__enter__.return_value = mock_client
mock_response = Mock()
mock_response.status_code = 200
mock_response.json.return_value = {
"jobs": [
{"id": "job1", "status": "completed"},
{"id": "job2", "status": "running"}
]
}
mock_client.get.return_value = mock_response
# Run command with filters
result = runner.invoke(admin, [
'jobs',
'--status', 'running',
'--limit', '50'
], obj={'config': mock_config, 'output_format': 'json'})
# Assertions
assert result.exit_code == 0
# Verify API call with filters
mock_client.get.assert_called_once()
call_args = mock_client.get.call_args
assert '/v1/admin/jobs' in call_args[0][0]
assert call_args[1]['params']['status'] == 'running'
assert call_args[1]['params']['limit'] == 50
@patch('aitbc_cli.commands.admin.httpx.Client')
def test_job_details_success(self, mock_client_class, runner, mock_config):
"""Test successful job details retrieval"""
# Setup mock
mock_client = Mock()
mock_client_class.return_value.__enter__.return_value = mock_client
mock_response = Mock()
mock_response.status_code = 200
mock_response.json.return_value = {
"id": "job123",
"status": "completed",
"result": "Test result",
"created_at": "2024-01-01T00:00:00"
}
mock_client.get.return_value = mock_response
# Run command
result = runner.invoke(admin, [
'job-details',
'job123'
], obj={'config': mock_config, 'output_format': 'json'})
# Assertions
assert result.exit_code == 0
data = json.loads(result.output)
assert data['id'] == 'job123'
assert data['status'] == 'completed'
# Verify API call
mock_client.get.assert_called_once_with(
'http://test:8000/v1/admin/jobs/job123',
headers={"X-Api-Key": "test_admin_key"}
)
@patch('aitbc_cli.commands.admin.httpx.Client')
def test_delete_job_confirmed(self, mock_client_class, runner, mock_config):
"""Test successful job deletion with confirmation"""
# Setup mock
mock_client = Mock()
mock_client_class.return_value.__enter__.return_value = mock_client
mock_response = Mock()
mock_response.status_code = 200
mock_client.delete.return_value = mock_response
# Run command with confirmation
result = runner.invoke(admin, [
'delete-job',
'job123'
], obj={'config': mock_config, 'output_format': 'json'}, input='y\n')
# Assertions
assert result.exit_code == 0
assert 'deleted' in result.output
# Verify API call
mock_client.delete.assert_called_once_with(
'http://test:8000/v1/admin/jobs/job123',
headers={"X-Api-Key": "test_admin_key"}
)
def test_delete_job_cancelled(self, runner, mock_config):
"""Test job deletion cancelled by user"""
# Run command with cancellation
result = runner.invoke(admin, [
'delete-job',
'job123'
], obj={'config': mock_config, 'output_format': 'json'}, input='n\n')
# Assertions
assert result.exit_code == 0
# No API calls should be made
@patch('aitbc_cli.commands.admin.httpx.Client')
def test_miners_list(self, mock_client_class, runner, mock_config):
"""Test miners listing"""
# Setup mock
mock_client = Mock()
mock_client_class.return_value.__enter__.return_value = mock_client
mock_response = Mock()
mock_response.status_code = 200
mock_response.json.return_value = {
"miners": [
{"id": "miner1", "status": "active", "gpu": "RTX4090"},
{"id": "miner2", "status": "inactive", "gpu": "RTX3080"}
]
}
mock_client.get.return_value = mock_response
# Run command
result = runner.invoke(admin, [
'miners'
], obj={'config': mock_config, 'output_format': 'json'})
# Assertions
assert result.exit_code == 0
data = json.loads(result.output)
assert len(data['miners']) == 2
# Verify API call
mock_client.get.assert_called_once_with(
'http://test:8000/v1/admin/miners',
params={"limit": 50},
headers={"X-Api-Key": "test_admin_key"}
)
@patch('aitbc_cli.commands.admin.httpx.Client')
def test_deactivate_miner(self, mock_client_class, runner, mock_config):
"""Test miner deactivation"""
# Setup mock
mock_client = Mock()
mock_client_class.return_value.__enter__.return_value = mock_client
mock_response = Mock()
mock_response.status_code = 200
mock_client.post.return_value = mock_response
# Run command with confirmation
result = runner.invoke(admin, [
'deactivate-miner',
'miner123'
], obj={'config': mock_config, 'output_format': 'json'}, input='y\n')
# Assertions
assert result.exit_code == 0
assert 'deactivated' in result.output
# Verify API call
mock_client.post.assert_called_once_with(
'http://test:8000/v1/admin/miners/miner123/deactivate',
headers={"X-Api-Key": "test_admin_key"}
)
@patch('aitbc_cli.commands.admin.httpx.Client')
def test_analytics(self, mock_client_class, runner, mock_config):
"""Test system analytics"""
# Setup mock
mock_client = Mock()
mock_client_class.return_value.__enter__.return_value = mock_client
mock_response = Mock()
mock_response.status_code = 200
mock_response.json.return_value = {
"total_jobs": 1000,
"completed_jobs": 950,
"active_miners": 50,
"average_processing_time": 120
}
mock_client.get.return_value = mock_response
# Run command
result = runner.invoke(admin, [
'analytics',
'--days', '7'
], obj={'config': mock_config, 'output_format': 'json'})
# Assertions
assert result.exit_code == 0
data = json.loads(result.output)
assert data['total_jobs'] == 1000
assert data['active_miners'] == 50
# Verify API call
mock_client.get.assert_called_once()
call_args = mock_client.get.call_args
assert '/v1/admin/analytics' in call_args[0][0]
assert call_args[1]['params']['days'] == 7
@patch('aitbc_cli.commands.admin.httpx.Client')
def test_logs_with_level(self, mock_client_class, runner, mock_config):
"""Test system logs with level filter"""
# Setup mock
mock_client = Mock()
mock_client_class.return_value.__enter__.return_value = mock_client
mock_response = Mock()
mock_response.status_code = 200
mock_response.json.return_value = {
"logs": [
{"level": "ERROR", "message": "Test error", "timestamp": "2024-01-01T00:00:00"}
]
}
mock_client.get.return_value = mock_response
# Run command
result = runner.invoke(admin, [
'logs',
'--level', 'ERROR',
'--limit', '50'
], obj={'config': mock_config, 'output_format': 'json'})
# Assertions
assert result.exit_code == 0
# Verify API call
mock_client.get.assert_called_once()
call_args = mock_client.get.call_args
assert '/v1/admin/logs' in call_args[0][0]
assert call_args[1]['params']['level'] == 'ERROR'
assert call_args[1]['params']['limit'] == 50
@patch('aitbc_cli.commands.admin.httpx.Client')
def test_prioritize_job(self, mock_client_class, runner, mock_config):
"""Test job prioritization"""
# Setup mock
mock_client = Mock()
mock_client_class.return_value.__enter__.return_value = mock_client
mock_response = Mock()
mock_response.status_code = 200
mock_client.post.return_value = mock_response
# Run command
result = runner.invoke(admin, [
'prioritize-job',
'job123',
'--reason', 'Urgent request'
], obj={'config': mock_config, 'output_format': 'json'})
# Assertions
assert result.exit_code == 0
assert 'prioritized' in result.output
# Verify API call
mock_client.post.assert_called_once()
call_args = mock_client.post.call_args
assert '/v1/admin/jobs/job123/prioritize' in call_args[0][0]
assert call_args[1]['json']['reason'] == 'Urgent request'
@patch('aitbc_cli.commands.admin.httpx.Client')
def test_execute_custom_action(self, mock_client_class, runner, mock_config):
"""Test custom action execution"""
# Setup mock
mock_client = Mock()
mock_client_class.return_value.__enter__.return_value = mock_client
mock_response = Mock()
mock_response.status_code = 200
mock_response.json.return_value = {"status": "success", "result": "Action completed"}
mock_client.post.return_value = mock_response
# Run command
result = runner.invoke(admin, [
'execute',
'--action', 'custom_command',
'--target', 'miner123',
'--data', '{"param": "value"}'
], obj={'config': mock_config, 'output_format': 'json'})
# Assertions
assert result.exit_code == 0
data = json.loads(result.output)
assert data['status'] == 'success'
# Verify API call
mock_client.post.assert_called_once()
call_args = mock_client.post.call_args
assert '/v1/admin/execute/custom_command' in call_args[0][0]
assert call_args[1]['json']['target'] == 'miner123'
assert call_args[1]['json']['param'] == 'value'
@patch('aitbc_cli.commands.admin.httpx.Client')
def test_maintenance_cleanup(self, mock_client_class, runner, mock_config):
"""Test maintenance cleanup"""
# Setup mock
mock_client = Mock()
mock_client_class.return_value.__enter__.return_value = mock_client
mock_response = Mock()
mock_response.status_code = 200
mock_response.json.return_value = {"cleaned_items": 100}
mock_client.post.return_value = mock_response
# Run command with confirmation
result = runner.invoke(admin, [
'maintenance',
'cleanup'
], obj={'config': mock_config, 'output_format': 'json'}, input='y\n')
# Assertions
assert result.exit_code == 0
assert 'Cleanup completed' in result.output
# Verify API call
mock_client.post.assert_called_once_with(
'http://test:8000/v1/admin/maintenance/cleanup',
headers={"X-Api-Key": "test_admin_key"}
)
@patch('aitbc_cli.commands.admin.httpx.Client')
def test_api_error_handling(self, mock_client_class, runner, mock_config):
"""Test API error handling"""
# Setup mock for error response
mock_client = Mock()
mock_client_class.return_value.__enter__.return_value = mock_client
mock_response = Mock()
mock_response.status_code = 403
mock_client.get.return_value = mock_response
# Run command
result = runner.invoke(admin, [
'status'
], obj={'config': mock_config, 'output_format': 'json'})
# Assertions
assert result.exit_code != 0
assert 'Error' in result.output

361
tests/cli/test_auth.py Normal file
View File

@@ -0,0 +1,361 @@
"""Tests for auth CLI commands"""
import pytest
import json
import os
from click.testing import CliRunner
from unittest.mock import Mock, patch
from aitbc_cli.commands.auth import auth
@pytest.fixture
def runner():
"""Create CLI runner"""
return CliRunner()
@pytest.fixture
def mock_config():
"""Mock configuration"""
return {}
class TestAuthCommands:
"""Test auth command group"""
@patch('aitbc_cli.commands.auth.AuthManager')
def test_login_success(self, mock_auth_manager_class, runner, mock_config):
"""Test successful login"""
# Setup mock
mock_auth_manager = Mock()
mock_auth_manager_class.return_value = mock_auth_manager
# Run command
result = runner.invoke(auth, [
'login',
'test_api_key_12345',
'--environment', 'dev'
], obj={'config': mock_config, 'output_format': 'json'})
# Assertions
assert result.exit_code == 0
data = json.loads(result.output)
assert data['status'] == 'logged_in'
assert data['environment'] == 'dev'
# Verify credential stored
mock_auth_manager.store_credential.assert_called_once_with(
'client', 'test_api_key_12345', 'dev'
)
@patch('aitbc_cli.commands.auth.AuthManager')
def test_login_invalid_key(self, mock_auth_manager_class, runner, mock_config):
"""Test login with invalid API key"""
# Run command with short key
result = runner.invoke(auth, [
'login',
'short',
'--environment', 'dev'
], obj={'config': mock_config, 'output_format': 'json'})
# Assertions
assert result.exit_code != 0
assert 'Invalid API key' in result.output
# Verify credential not stored
mock_auth_manager_class.return_value.store_credential.assert_not_called()
@patch('aitbc_cli.commands.auth.AuthManager')
def test_logout_success(self, mock_auth_manager_class, runner, mock_config):
"""Test successful logout"""
# Setup mock
mock_auth_manager = Mock()
mock_auth_manager_class.return_value = mock_auth_manager
# Run command
result = runner.invoke(auth, [
'logout',
'--environment', 'prod'
], obj={'config': mock_config, 'output_format': 'json'})
# Assertions
assert result.exit_code == 0
data = json.loads(result.output)
assert data['status'] == 'logged_out'
assert data['environment'] == 'prod'
# Verify credential deleted
mock_auth_manager.delete_credential.assert_called_once_with(
'client', 'prod'
)
@patch('aitbc_cli.commands.auth.AuthManager')
def test_token_show(self, mock_auth_manager_class, runner, mock_config):
"""Test token command with show flag"""
# Setup mock
mock_auth_manager = Mock()
mock_auth_manager.get_credential.return_value = 'secret_key_123'
mock_auth_manager_class.return_value = mock_auth_manager
# Run command
result = runner.invoke(auth, [
'token',
'--show',
'--environment', 'staging'
], obj={'config': mock_config, 'output_format': 'json'})
# Assertions
assert result.exit_code == 0
data = json.loads(result.output)
assert data['api_key'] == 'secret_key_123'
assert data['environment'] == 'staging'
# Verify credential retrieved
mock_auth_manager.get_credential.assert_called_once_with(
'client', 'staging'
)
@patch('aitbc_cli.commands.auth.AuthManager')
def test_token_hidden(self, mock_auth_manager_class, runner, mock_config):
"""Test token command without show flag"""
# Setup mock
mock_auth_manager = Mock()
mock_auth_manager.get_credential.return_value = 'secret_key_123'
mock_auth_manager_class.return_value = mock_auth_manager
# Run command
result = runner.invoke(auth, [
'token',
'--environment', 'staging'
], obj={'config': mock_config, 'output_format': 'json'})
# Assertions
assert result.exit_code == 0
data = json.loads(result.output)
assert data['api_key'] == '***REDACTED***'
assert data['length'] == len('secret_key_123')
@patch('aitbc_cli.commands.auth.AuthManager')
def test_token_not_found(self, mock_auth_manager_class, runner, mock_config):
"""Test token command when no credential stored"""
# Setup mock
mock_auth_manager = Mock()
mock_auth_manager.get_credential.return_value = None
mock_auth_manager_class.return_value = mock_auth_manager
# Run command
result = runner.invoke(auth, [
'token',
'--environment', 'nonexistent'
], obj={'config': mock_config, 'output_format': 'json'})
# Assertions
assert result.exit_code == 0
data = json.loads(result.output)
assert data['message'] == 'No API key stored'
assert data['environment'] == 'nonexistent'
@patch('aitbc_cli.commands.auth.AuthManager')
def test_status_authenticated(self, mock_auth_manager_class, runner, mock_config):
"""Test status when authenticated"""
# Setup mock
mock_auth_manager = Mock()
mock_auth_manager.list_credentials.return_value = ['client@dev', 'miner@prod']
mock_auth_manager_class.return_value = mock_auth_manager
# Run command
result = runner.invoke(auth, [
'status'
], obj={'config': mock_config, 'output_format': 'json'})
# Assertions
assert result.exit_code == 0
data = json.loads(result.output)
assert data['status'] == 'authenticated'
assert len(data['stored_credentials']) == 2
assert 'client@dev' in data['stored_credentials']
assert 'miner@prod' in data['stored_credentials']
@patch('aitbc_cli.commands.auth.AuthManager')
def test_status_not_authenticated(self, mock_auth_manager_class, runner, mock_config):
"""Test status when not authenticated"""
# Setup mock
mock_auth_manager = Mock()
mock_auth_manager.list_credentials.return_value = []
mock_auth_manager_class.return_value = mock_auth_manager
# Run command
result = runner.invoke(auth, [
'status'
], obj={'config': mock_config, 'output_format': 'json'})
# Assertions
assert result.exit_code == 0
data = json.loads(result.output)
assert data['status'] == 'not_authenticated'
assert data['message'] == 'No stored credentials found'
@patch('aitbc_cli.commands.auth.AuthManager')
def test_refresh_success(self, mock_auth_manager_class, runner, mock_config):
"""Test refresh command"""
# Setup mock
mock_auth_manager = Mock()
mock_auth_manager.get_credential.return_value = 'valid_key'
mock_auth_manager_class.return_value = mock_auth_manager
# Run command
result = runner.invoke(auth, [
'refresh',
'--environment', 'dev'
], obj={'config': mock_config, 'output_format': 'json'})
# Assertions
assert result.exit_code == 0
data = json.loads(result.output)
assert data['status'] == 'refreshed'
assert data['environment'] == 'dev'
assert 'placeholder' in data['message']
@patch('aitbc_cli.commands.auth.AuthManager')
def test_refresh_no_key(self, mock_auth_manager_class, runner, mock_config):
"""Test refresh with no stored key"""
# Setup mock
mock_auth_manager = Mock()
mock_auth_manager.get_credential.return_value = None
mock_auth_manager_class.return_value = mock_auth_manager
# Run command
result = runner.invoke(auth, [
'refresh',
'--environment', 'nonexistent'
], obj={'config': mock_config, 'output_format': 'json'})
# Assertions
assert result.exit_code != 0
assert 'No API key found' in result.output
@patch('aitbc_cli.commands.auth.AuthManager')
def test_keys_list(self, mock_auth_manager_class, runner, mock_config):
"""Test keys list command"""
# Setup mock
mock_auth_manager = Mock()
mock_auth_manager.list_credentials.return_value = [
'client@dev', 'miner@dev', 'admin@prod'
]
mock_auth_manager_class.return_value = mock_auth_manager
# Run command
result = runner.invoke(auth, [
'keys',
'list'
], obj={'config': mock_config, 'output_format': 'json'})
# Assertions
assert result.exit_code == 0
data = json.loads(result.output)
assert len(data['credentials']) == 3
@patch('aitbc_cli.commands.auth.AuthManager')
def test_keys_create(self, mock_auth_manager_class, runner, mock_config):
"""Test keys create command"""
# Setup mock
mock_auth_manager = Mock()
mock_auth_manager_class.return_value = mock_auth_manager
# Run command
result = runner.invoke(auth, [
'keys',
'create',
'miner',
'miner_key_abcdef',
'--permissions', 'mine,poll',
'--environment', 'prod'
], obj={'config': mock_config, 'output_format': 'json'})
# Assertions
assert result.exit_code == 0
data = json.loads(result.output)
assert data['status'] == 'created'
assert data['name'] == 'miner'
assert data['environment'] == 'prod'
assert data['permissions'] == 'mine,poll'
# Verify credential stored
mock_auth_manager.store_credential.assert_called_once_with(
'miner', 'miner_key_abcdef', 'prod'
)
@patch('aitbc_cli.commands.auth.AuthManager')
def test_keys_revoke(self, mock_auth_manager_class, runner, mock_config):
"""Test keys revoke command"""
# Setup mock
mock_auth_manager = Mock()
mock_auth_manager_class.return_value = mock_auth_manager
# Run command
result = runner.invoke(auth, [
'keys',
'revoke',
'old_miner',
'--environment', 'dev'
], obj={'config': mock_config, 'output_format': 'json'})
# Assertions
assert result.exit_code == 0
data = json.loads(result.output)
assert data['status'] == 'revoked'
assert data['name'] == 'old_miner'
assert data['environment'] == 'dev'
# Verify credential deleted
mock_auth_manager.delete_credential.assert_called_once_with(
'old_miner', 'dev'
)
@patch.dict(os.environ, {'CLIENT_API_KEY': 'env_test_key'})
@patch('aitbc_cli.commands.auth.AuthManager')
def test_import_env_success(self, mock_auth_manager_class, runner, mock_config):
"""Test successful import from environment"""
import os
# Setup mock
mock_auth_manager = Mock()
mock_auth_manager_class.return_value = mock_auth_manager
# Run command
result = runner.invoke(auth, [
'import-env',
'client'
], obj={'config': mock_config, 'output_format': 'json'})
# Assertions
assert result.exit_code == 0
data = json.loads(result.output)
assert data['status'] == 'imported'
assert data['name'] == 'client'
assert data['source'] == 'CLIENT_API_KEY'
# Verify credential stored
mock_auth_manager.store_credential.assert_called_once_with(
'client', 'env_test_key'
)
@patch.dict(os.environ, {})
@patch('aitbc_cli.commands.auth.AuthManager')
def test_import_env_not_set(self, mock_auth_manager_class, runner, mock_config):
"""Test import when environment variable not set"""
import os
# Setup mock
mock_auth_manager = Mock()
mock_auth_manager_class.return_value = mock_auth_manager
# Run command
result = runner.invoke(auth, [
'import-env',
'client'
], obj={'config': mock_config, 'output_format': 'json'})
# Assertions
assert result.exit_code != 0
assert 'CLIENT_API_KEY not set' in result.output

View File

@@ -0,0 +1,357 @@
"""Tests for blockchain CLI commands"""
import pytest
import json
from click.testing import CliRunner
from unittest.mock import Mock, patch
from aitbc_cli.commands.blockchain import blockchain
@pytest.fixture
def runner():
"""Create CLI runner"""
return CliRunner()
@pytest.fixture
def mock_config():
"""Mock configuration"""
config = Mock()
config.coordinator_url = "http://test:8000"
config.api_key = "test_api_key"
return config
class TestBlockchainCommands:
"""Test blockchain command group"""
@patch('aitbc_cli.commands.blockchain.httpx.Client')
def test_blocks_success(self, mock_client_class, runner, mock_config):
"""Test successful block listing"""
# Setup mock
mock_client = Mock()
mock_client_class.return_value.__enter__.return_value = mock_client
mock_response = Mock()
mock_response.status_code = 200
mock_response.json.return_value = {
"blocks": [
{"height": 100, "hash": "0xabc123", "timestamp": "2024-01-01T00:00:00"},
{"height": 99, "hash": "0xdef456", "timestamp": "2024-01-01T00:01:00"}
]
}
mock_client.get.return_value = mock_response
# Run command
result = runner.invoke(blockchain, [
'blocks',
'--limit', '2'
], obj={'config': mock_config, 'output_format': 'json'})
# Assertions
assert result.exit_code == 0
data = json.loads(result.output)
assert len(data['blocks']) == 2
assert data['blocks'][0]['height'] == 100
# Verify API call
mock_client.get.assert_called_once_with(
'http://test:8000/v1/explorer/blocks',
params={"limit": 2},
headers={"X-Api-Key": "test_api_key"}
)
@patch('aitbc_cli.commands.blockchain.httpx.Client')
def test_block_details(self, mock_client_class, runner, mock_config):
"""Test getting block details"""
# Setup mock
mock_client = Mock()
mock_client_class.return_value.__enter__.return_value = mock_client
mock_response = Mock()
mock_response.status_code = 200
mock_response.json.return_value = {
"height": 100,
"hash": "0xabc123",
"transactions": ["0xtx1", "0xtx2"],
"timestamp": "2024-01-01T00:00:00",
"validator": "validator1"
}
mock_client.get.return_value = mock_response
# Run command
result = runner.invoke(blockchain, [
'block',
'100'
], obj={'config': mock_config, 'output_format': 'json'})
# Assertions
assert result.exit_code == 0
data = json.loads(result.output)
assert data['height'] == 100
assert data['hash'] == '0xabc123'
assert len(data['transactions']) == 2
# Verify API call
mock_client.get.assert_called_once_with(
'http://test:8000/v1/explorer/blocks/100',
headers={"X-Api-Key": "test_api_key"}
)
@patch('aitbc_cli.commands.blockchain.httpx.Client')
def test_transaction(self, mock_client_class, runner, mock_config):
"""Test getting transaction details"""
# Setup mock
mock_client = Mock()
mock_client_class.return_value.__enter__.return_value = mock_client
mock_response = Mock()
mock_response.status_code = 200
mock_response.json.return_value = {
"hash": "0xtx123",
"block": 100,
"from": "0xabc",
"to": "0xdef",
"amount": "1000",
"fee": "10",
"status": "confirmed"
}
mock_client.get.return_value = mock_response
# Run command
result = runner.invoke(blockchain, [
'transaction',
'0xtx123'
], obj={'config': mock_config, 'output_format': 'json'})
# Assertions
assert result.exit_code == 0
data = json.loads(result.output)
assert data['hash'] == '0xtx123'
assert data['block'] == 100
assert data['status'] == 'confirmed'
# Verify API call
mock_client.get.assert_called_once_with(
'http://test:8000/v1/explorer/transactions/0xtx123',
headers={"X-Api-Key": "test_api_key"}
)
@patch('aitbc_cli.commands.blockchain.httpx.Client')
def test_node_status(self, mock_client_class, runner, mock_config):
"""Test getting node status"""
# Setup mock
mock_client = Mock()
mock_client_class.return_value.__enter__.return_value = mock_client
mock_response = Mock()
mock_response.status_code = 200
mock_response.json.return_value = {
"status": "running",
"version": "1.0.0",
"height": 1000,
"peers": 5,
"synced": True
}
mock_client.get.return_value = mock_response
# Run command
result = runner.invoke(blockchain, [
'status',
'--node', '1'
], obj={'config': mock_config, 'output_format': 'json'})
# Assertions
assert result.exit_code == 0
data = json.loads(result.output)
assert data['node'] == 1
assert data['rpc_url'] == 'http://localhost:8082'
assert data['status']['status'] == 'running'
# Verify API call
mock_client.get.assert_called_once_with(
'http://localhost:8082/status',
timeout=5
)
@patch('aitbc_cli.commands.blockchain.httpx.Client')
def test_sync_status(self, mock_client_class, runner, mock_config):
"""Test getting sync status"""
# Setup mock
mock_client = Mock()
mock_client_class.return_value.__enter__.return_value = mock_client
mock_response = Mock()
mock_response.status_code = 200
mock_response.json.return_value = {
"synced": True,
"current_height": 1000,
"target_height": 1000,
"sync_percentage": 100.0
}
mock_client.get.return_value = mock_response
# Run command
result = runner.invoke(blockchain, [
'sync-status'
], obj={'config': mock_config, 'output_format': 'json'})
# Assertions
assert result.exit_code == 0
data = json.loads(result.output)
assert data['synced'] == True
assert data['sync_percentage'] == 100.0
# Verify API call
mock_client.get.assert_called_once_with(
'http://test:8000/v1/blockchain/sync',
headers={"X-Api-Key": "test_api_key"}
)
@patch('aitbc_cli.commands.blockchain.httpx.Client')
def test_peers(self, mock_client_class, runner, mock_config):
"""Test listing peers"""
# Setup mock
mock_client = Mock()
mock_client_class.return_value.__enter__.return_value = mock_client
mock_response = Mock()
mock_response.status_code = 200
mock_response.json.return_value = {
"peers": [
{"id": "peer1", "address": "1.2.3.4:8080", "connected": True},
{"id": "peer2", "address": "5.6.7.8:8080", "connected": False}
]
}
mock_client.get.return_value = mock_response
# Run command
result = runner.invoke(blockchain, [
'peers'
], obj={'config': mock_config, 'output_format': 'json'})
# Assertions
assert result.exit_code == 0
data = json.loads(result.output)
assert len(data['peers']) == 2
assert data['peers'][0]['connected'] == True
# Verify API call
mock_client.get.assert_called_once_with(
'http://test:8000/v1/blockchain/peers',
headers={"X-Api-Key": "test_api_key"}
)
@patch('aitbc_cli.commands.blockchain.httpx.Client')
def test_info(self, mock_client_class, runner, mock_config):
"""Test getting blockchain info"""
# Setup mock
mock_client = Mock()
mock_client_class.return_value.__enter__.return_value = mock_client
mock_response = Mock()
mock_response.status_code = 200
mock_response.json.return_value = {
"network": "aitbc-mainnet",
"chain_id": "aitbc-1",
"block_time": 5,
"min_stake": 1000,
"total_supply": "1000000000"
}
mock_client.get.return_value = mock_response
# Run command
result = runner.invoke(blockchain, [
'info'
], obj={'config': mock_config, 'output_format': 'json'})
# Assertions
assert result.exit_code == 0
data = json.loads(result.output)
assert data['network'] == 'aitbc-mainnet'
assert data['block_time'] == 5
# Verify API call
mock_client.get.assert_called_once_with(
'http://test:8000/v1/blockchain/info',
headers={"X-Api-Key": "test_api_key"}
)
@patch('aitbc_cli.commands.blockchain.httpx.Client')
def test_supply(self, mock_client_class, runner, mock_config):
"""Test getting token supply"""
# Setup mock
mock_client = Mock()
mock_client_class.return_value.__enter__.return_value = mock_client
mock_response = Mock()
mock_response.status_code = 200
mock_response.json.return_value = {
"total_supply": "1000000000",
"circulating_supply": "500000000",
"staked": "300000000",
"burned": "200000000"
}
mock_client.get.return_value = mock_response
# Run command
result = runner.invoke(blockchain, [
'supply'
], obj={'config': mock_config, 'output_format': 'json'})
# Assertions
assert result.exit_code == 0
data = json.loads(result.output)
assert data['total_supply'] == '1000000000'
assert data['circulating_supply'] == '500000000'
# Verify API call
mock_client.get.assert_called_once_with(
'http://test:8000/v1/blockchain/supply',
headers={"X-Api-Key": "test_api_key"}
)
@patch('aitbc_cli.commands.blockchain.httpx.Client')
def test_validators(self, mock_client_class, runner, mock_config):
"""Test listing validators"""
# Setup mock
mock_client = Mock()
mock_client_class.return_value.__enter__.return_value = mock_client
mock_response = Mock()
mock_response.status_code = 200
mock_response.json.return_value = {
"validators": [
{"address": "0xval1", "stake": "100000", "status": "active"},
{"address": "0xval2", "stake": "50000", "status": "active"}
]
}
mock_client.get.return_value = mock_response
# Run command
result = runner.invoke(blockchain, [
'validators'
], obj={'config': mock_config, 'output_format': 'json'})
# Assertions
assert result.exit_code == 0
data = json.loads(result.output)
assert len(data['validators']) == 2
assert data['validators'][0]['stake'] == '100000'
# Verify API call
mock_client.get.assert_called_once_with(
'http://test:8000/v1/blockchain/validators',
headers={"X-Api-Key": "test_api_key"}
)
@patch('aitbc_cli.commands.blockchain.httpx.Client')
def test_api_error_handling(self, mock_client_class, runner, mock_config):
"""Test API error handling"""
# Setup mock for error response
mock_client = Mock()
mock_client_class.return_value.__enter__.return_value = mock_client
mock_response = Mock()
mock_response.status_code = 404
mock_client.get.return_value = mock_response
# Run command
result = runner.invoke(blockchain, [
'block',
'999999'
], obj={'config': mock_config, 'output_format': 'json'})
# Assertions
assert result.exit_code == 0 # The command doesn't exit on error
assert 'not found' in result.output

244
tests/cli/test_client.py Normal file
View File

@@ -0,0 +1,244 @@
"""Tests for client CLI commands"""
import pytest
import json
from click.testing import CliRunner
from unittest.mock import Mock, patch
from aitbc_cli.commands.client import client
@pytest.fixture
def runner():
"""Create CLI runner"""
return CliRunner()
@pytest.fixture
def mock_config():
"""Mock configuration"""
config = Mock()
config.coordinator_url = "http://test:8000"
config.api_key = "test_key"
return config
class TestClientCommands:
"""Test client command group"""
@patch('aitbc_cli.commands.client.httpx.Client')
def test_submit_job_success(self, mock_client_class, runner, mock_config):
"""Test successful job submission"""
# Setup mock
mock_client = Mock()
mock_client_class.return_value.__enter__.return_value = mock_client
mock_response = Mock()
mock_response.status_code = 201
mock_response.json.return_value = {"job_id": "test_job_123"}
mock_client.post.return_value = mock_response
# Run command
result = runner.invoke(client, [
'submit',
'--type', 'inference',
'--prompt', 'Test prompt',
'--model', 'test_model'
], obj={'config': mock_config, 'output_format': 'json'})
# Assertions
assert result.exit_code == 0
assert 'job_id' in result.output
# Verify API call
mock_client.post.assert_called_once()
call_args = mock_client.post.call_args
assert '/v1/jobs' in call_args[0][0]
assert call_args[1]['json']['payload']['type'] == 'inference'
assert call_args[1]['json']['payload']['prompt'] == 'Test prompt'
@patch('aitbc_cli.commands.client.httpx.Client')
def test_submit_job_from_file(self, mock_client_class, runner, mock_config, tmp_path):
"""Test job submission from file"""
# Create test job file
job_file = tmp_path / "test_job.json"
job_data = {
"type": "training",
"model": "gpt-3",
"dataset": "test_data"
}
job_file.write_text(json.dumps(job_data))
# Setup mock
mock_client = Mock()
mock_client_class.return_value.__enter__.return_value = mock_client
mock_response = Mock()
mock_response.status_code = 201
mock_response.json.return_value = {"job_id": "test_job_456"}
mock_client.post.return_value = mock_response
# Run command
result = runner.invoke(client, [
'submit',
'--file', str(job_file)
], obj={'config': mock_config, 'output_format': 'json'})
# Assertions
assert result.exit_code == 0
assert 'job_id' in result.output
# Verify API call used file data
call_args = mock_client.post.call_args
assert call_args[1]['json']['payload']['type'] == 'training'
assert call_args[1]['json']['payload']['model'] == 'gpt-3'
@patch('aitbc_cli.commands.client.httpx.Client')
def test_status_success(self, mock_client_class, runner, mock_config):
"""Test successful job status check"""
# Setup mock
mock_client = Mock()
mock_client_class.return_value.__enter__.return_value = mock_client
mock_response = Mock()
mock_response.status_code = 200
mock_response.json.return_value = {
"job_id": "test_job_123",
"state": "completed",
"result": "Test result"
}
mock_client.get.return_value = mock_response
# Run command
result = runner.invoke(client, [
'status',
'test_job_123'
], obj={'config': mock_config, 'output_format': 'json'})
# Assertions
assert result.exit_code == 0
assert 'completed' in result.output
assert 'test_job_123' in result.output
# Verify API call
mock_client.get.assert_called_once_with(
'http://test:8000/v1/jobs/test_job_123',
headers={"X-Api-Key": "test_key"}
)
@patch('aitbc_cli.commands.client.httpx.Client')
def test_cancel_job_success(self, mock_client_class, runner, mock_config):
"""Test successful job cancellation"""
# Setup mock
mock_client = Mock()
mock_client_class.return_value.__enter__.return_value = mock_client
mock_response = Mock()
mock_response.status_code = 200
mock_client.post.return_value = mock_response
# Run command
result = runner.invoke(client, [
'cancel',
'test_job_123'
], obj={'config': mock_config, 'output_format': 'json'})
# Assertions
assert result.exit_code == 0
# Verify API call
mock_client.post.assert_called_once_with(
'http://test:8000/v1/jobs/test_job_123/cancel',
headers={"X-Api-Key": "test_key"}
)
@patch('aitbc_cli.commands.client.httpx.Client')
def test_blocks_success(self, mock_client_class, runner, mock_config):
"""Test successful blocks listing"""
# Setup mock
mock_client = Mock()
mock_client_class.return_value.__enter__.return_value = mock_client
mock_response = Mock()
mock_response.status_code = 200
mock_response.json.return_value = {
"items": [
{"height": 100, "hash": "0x123"},
{"height": 101, "hash": "0x456"}
]
}
mock_client.get.return_value = mock_response
# Run command
result = runner.invoke(client, [
'blocks',
'--limit', '2'
], obj={'config': mock_config, 'output_format': 'json'})
# Assertions
assert result.exit_code == 0
assert 'items' in result.output
# Verify API call
mock_client.get.assert_called_once()
call_args = mock_client.get.call_args
assert '/v1/explorer/blocks' in call_args[0][0]
assert call_args[1]['params']['limit'] == 2
@patch('aitbc_cli.commands.client.httpx.Client')
def test_history_with_filters(self, mock_client_class, runner, mock_config):
"""Test job history with filters"""
# Setup mock
mock_client = Mock()
mock_client_class.return_value.__enter__.return_value = mock_client
mock_response = Mock()
mock_response.status_code = 200
mock_response.json.return_value = {
"jobs": [
{"id": "job1", "status": "completed"},
{"id": "job2", "status": "failed"}
]
}
mock_client.get.return_value = mock_response
# Run command with filters
result = runner.invoke(client, [
'history',
'--status', 'completed',
'--type', 'inference',
'--limit', '10'
], obj={'config': mock_config, 'output_format': 'json'})
# Assertions
assert result.exit_code == 0
# Verify API call with filters
mock_client.get.assert_called_once()
call_args = mock_client.get.call_args
assert call_args[1]['params']['status'] == 'completed'
assert call_args[1]['params']['type'] == 'inference'
assert call_args[1]['params']['limit'] == 10
@patch('aitbc_cli.commands.client.httpx.Client')
def test_api_error_handling(self, mock_client_class, runner, mock_config):
"""Test API error handling"""
# Setup mock for error response
mock_client = Mock()
mock_client_class.return_value.__enter__.return_value = mock_client
mock_response = Mock()
mock_response.status_code = 500
mock_response.text = "Internal Server Error"
mock_client.get.return_value = mock_response
# Run command
result = runner.invoke(client, [
'status',
'test_job_123'
], obj={'config': mock_config, 'output_format': 'json'})
# Assertions
assert result.exit_code != 0
assert 'Error' in result.output
def test_submit_missing_required_args(self, runner, mock_config):
"""Test submit command with missing required arguments"""
result = runner.invoke(client, [
'submit'
], obj={'config': mock_config, 'output_format': 'json'})
assert result.exit_code != 0
assert 'Error' in result.output

654
tests/cli/test_config.py Normal file
View File

@@ -0,0 +1,654 @@
"""Tests for config CLI commands"""
import pytest
import json
import yaml
import os
import tempfile
from pathlib import Path
from click.testing import CliRunner
from unittest.mock import Mock, patch
from aitbc_cli.commands.config import config
@pytest.fixture
def runner():
"""Create CLI runner"""
return CliRunner()
@pytest.fixture
def mock_config():
"""Mock configuration"""
config = Mock()
config.coordinator_url = "http://127.0.0.1:18000"
config.api_key = None
config.timeout = 30
config.config_file = "/home/oib/.aitbc/config.yaml"
return config
@pytest.fixture
def temp_config_file():
"""Create temporary config file"""
with tempfile.NamedTemporaryFile(mode='w', suffix='.yaml', delete=False) as f:
config_data = {
"coordinator_url": "http://test:8000",
"api_key": "test_key",
"timeout": 60
}
yaml.dump(config_data, f)
temp_path = f.name
yield temp_path
# Cleanup
os.unlink(temp_path)
class TestConfigCommands:
"""Test config command group"""
def test_show_config(self, runner, mock_config):
"""Test showing current configuration"""
result = runner.invoke(config, [
'show'
], obj={'config': mock_config, 'output_format': 'json'})
assert result.exit_code == 0
data = json.loads(result.output)
assert data['coordinator_url'] == 'http://127.0.0.1:18000'
assert data['api_key'] is None # mock_config has api_key=None
assert data['timeout'] == 30
def test_set_coordinator_url(self, runner, mock_config, tmp_path):
"""Test setting coordinator URL"""
with runner.isolated_filesystem():
result = runner.invoke(config, [
'set',
'coordinator_url',
'http://new:8000'
], obj={'config': mock_config, 'output_format': 'table'})
assert result.exit_code == 0
assert 'Coordinator URL set to: http://new:8000' in result.output
# Verify file was created in current directory
config_file = Path.cwd() / ".aitbc.yaml"
assert config_file.exists()
with open(config_file) as f:
saved_config = yaml.safe_load(f)
assert saved_config['coordinator_url'] == 'http://new:8000'
def test_set_api_key(self, runner, mock_config):
"""Test setting API key"""
result = runner.invoke(config, [
'set',
'api_key',
'new_test_key_12345'
], obj={'config': mock_config, 'output_format': 'table'})
assert result.exit_code == 0
assert 'API key set (use --global to set permanently)' in result.output
def test_set_timeout(self, runner, mock_config):
"""Test setting timeout"""
with runner.isolated_filesystem():
result = runner.invoke(config, [
'set',
'timeout',
'45'
], obj={'config': mock_config, 'output_format': 'table'})
assert result.exit_code == 0
assert 'Timeout set to: 45s' in result.output
def test_set_invalid_timeout(self, runner, mock_config):
"""Test setting invalid timeout"""
result = runner.invoke(config, [
'set',
'timeout',
'invalid'
], obj={'config': mock_config, 'output_format': 'json'})
assert result.exit_code != 0
assert 'Timeout must be an integer' in result.output
def test_set_invalid_key(self, runner, mock_config):
"""Test setting invalid configuration key"""
result = runner.invoke(config, [
'set',
'invalid_key',
'value'
], obj={'config': mock_config, 'output_format': 'json'})
assert result.exit_code != 0
assert 'Unknown configuration key' in result.output
def test_path_command(self, runner, mock_config, tmp_path):
"""Test showing configuration file path"""
with runner.isolated_filesystem():
result = runner.invoke(config, [
'path'
], obj={'config': mock_config, 'output_format': 'table'})
assert result.exit_code == 0
assert '.aitbc.yaml' in result.output
def test_path_global(self, runner, mock_config):
"""Test showing global config path"""
result = runner.invoke(config, [
'path',
'--global'
], obj={'config': mock_config, 'output_format': 'table'})
assert result.exit_code == 0
assert '.config/aitbc/config.yaml' in result.output
@patch('os.system')
def test_edit_command(self, mock_system, runner, mock_config, tmp_path):
"""Test editing configuration file"""
# Change to the tmp_path directory
with runner.isolated_filesystem(temp_dir=tmp_path):
# The actual config file will be in the current working directory
actual_config_file = Path.cwd() / ".aitbc.yaml"
result = runner.invoke(config, [
'edit'
], obj={'config': mock_config, 'output_format': 'json'})
assert result.exit_code == 0
# Verify editor was called
mock_system.assert_called_once()
assert 'nano' in mock_system.call_args[0][0]
assert str(actual_config_file) in mock_system.call_args[0][0]
def test_reset_config_cancelled(self, runner, mock_config, temp_config_file):
"""Test config reset cancelled by user"""
# Change to the directory containing the config file
config_dir = Path(temp_config_file).parent
with runner.isolated_filesystem(temp_dir=config_dir):
# Copy the config file to the current directory
import shutil
local_config = Path.cwd() / ".aitbc.yaml"
shutil.copy2(temp_config_file, local_config)
result = runner.invoke(config, [
'reset'
], obj={'config': mock_config, 'output_format': 'json'}, input='n\n')
assert result.exit_code == 0
# File should still exist
assert local_config.exists()
def test_reset_config_confirmed(self, runner, mock_config, temp_config_file):
"""Test config reset confirmed"""
# Change to the directory containing the config file
config_dir = Path(temp_config_file).parent
with runner.isolated_filesystem(temp_dir=config_dir):
# Copy the config file to the current directory
import shutil
local_config = Path.cwd() / ".aitbc.yaml"
shutil.copy2(temp_config_file, local_config)
result = runner.invoke(config, [
'reset'
], obj={'config': mock_config, 'output_format': 'table'}, input='y\n')
assert result.exit_code == 0
assert 'Configuration reset' in result.output
# File should be deleted
assert not local_config.exists()
def test_reset_no_config(self, runner, mock_config):
"""Test reset when no config file exists"""
with runner.isolated_filesystem():
result = runner.invoke(config, [
'reset'
], obj={'config': mock_config, 'output_format': 'json'})
assert result.exit_code == 0
assert 'No configuration file found' in result.output
def test_export_yaml(self, runner, mock_config, temp_config_file):
"""Test exporting configuration as YAML"""
# Change to the directory containing the config file
config_dir = Path(temp_config_file).parent
with runner.isolated_filesystem(temp_dir=config_dir):
# Copy the config file to the current directory
import shutil
local_config = Path.cwd() / ".aitbc.yaml"
shutil.copy2(temp_config_file, local_config)
result = runner.invoke(config, [
'export',
'--format', 'yaml'
], obj={'config': mock_config, 'output_format': 'table'})
assert result.exit_code == 0
output_data = yaml.safe_load(result.output)
assert output_data['coordinator_url'] == 'http://test:8000'
assert output_data['api_key'] == '***REDACTED***'
def test_export_json(self, runner, mock_config, temp_config_file):
"""Test exporting configuration as JSON"""
# Change to the directory containing the config file
config_dir = Path(temp_config_file).parent
with runner.isolated_filesystem(temp_dir=config_dir):
# Copy the config file to the current directory
import shutil
local_config = Path.cwd() / ".aitbc.yaml"
shutil.copy2(temp_config_file, local_config)
result = runner.invoke(config, [
'export',
'--format', 'json'
], obj={'config': mock_config, 'output_format': 'table'})
assert result.exit_code == 0
data = json.loads(result.output)
assert data['coordinator_url'] == 'http://test:8000'
assert data['api_key'] == '***REDACTED***'
def test_export_no_config(self, runner, mock_config):
"""Test export when no config file exists"""
with runner.isolated_filesystem():
result = runner.invoke(config, [
'export'
], obj={'config': mock_config, 'output_format': 'json'})
assert result.exit_code != 0
assert 'No configuration file found' in result.output
def test_import_config_yaml(self, runner, mock_config, tmp_path):
"""Test importing YAML configuration"""
# Create import file
import_file = tmp_path / "import.yaml"
import_data = {
"coordinator_url": "http://imported:8000",
"timeout": 90
}
import_file.write_text(yaml.dump(import_data))
with runner.isolated_filesystem(temp_dir=tmp_path):
# The config file will be created in the current directory
actual_config_file = Path.cwd() / ".aitbc.yaml"
result = runner.invoke(config, [
'import-config',
str(import_file)
], obj={'config': mock_config, 'output_format': 'table'})
assert result.exit_code == 0
assert 'Configuration imported' in result.output
# Verify import
with open(actual_config_file) as f:
saved_config = yaml.safe_load(f)
assert saved_config['coordinator_url'] == 'http://imported:8000'
assert saved_config['timeout'] == 90
def test_import_config_json(self, runner, mock_config, tmp_path):
"""Test importing JSON configuration"""
# Create import file
import_file = tmp_path / "import.json"
import_data = {
"coordinator_url": "http://json:8000",
"timeout": 60
}
import_file.write_text(json.dumps(import_data))
config_file = tmp_path / ".aitbc.yaml"
with runner.isolated_filesystem(temp_dir=tmp_path):
# The config file will be created in the current directory
actual_config_file = Path.cwd() / ".aitbc.yaml"
result = runner.invoke(config, [
'import-config',
str(import_file)
], obj={'config': mock_config, 'output_format': 'table'})
assert result.exit_code == 0
# Verify import
with open(actual_config_file) as f:
saved_config = yaml.safe_load(f)
assert saved_config['coordinator_url'] == 'http://json:8000'
assert saved_config['timeout'] == 60
def test_import_merge(self, runner, mock_config, temp_config_file, tmp_path):
"""Test importing with merge option"""
# Create import file
import_file = tmp_path / "import.yaml"
import_data = {
"timeout": 45
}
import_file.write_text(yaml.dump(import_data))
# Change to the directory containing the config file
config_dir = Path(temp_config_file).parent
with runner.isolated_filesystem(temp_dir=config_dir):
# Copy the config file to the current directory
import shutil
local_config = Path.cwd() / ".aitbc.yaml"
shutil.copy2(temp_config_file, local_config)
result = runner.invoke(config, [
'import-config',
str(import_file),
'--merge'
], obj={'config': mock_config, 'output_format': 'table'})
assert result.exit_code == 0
# Verify merge - original values should remain
with open(local_config) as f:
saved_config = yaml.safe_load(f)
assert saved_config['coordinator_url'] == 'http://test:8000' # Original
assert saved_config['timeout'] == 45 # Updated
def test_import_nonexistent_file(self, runner, mock_config):
"""Test importing non-existent file"""
result = runner.invoke(config, [
'import-config',
'/nonexistent/file.yaml'
], obj={'config': mock_config, 'output_format': 'json'})
assert result.exit_code != 0
assert 'File not found' in result.output
def test_validate_valid_config(self, runner, mock_config):
"""Test validating valid configuration"""
result = runner.invoke(config, [
'validate'
], obj={'config': mock_config, 'output_format': 'table'})
assert result.exit_code == 0
assert 'Configuration valid' in result.output
def test_validate_missing_url(self, runner, mock_config):
"""Test validating config with missing URL"""
mock_config.coordinator_url = None
result = runner.invoke(config, [
'validate'
], obj={'config': mock_config, 'output_format': 'table'})
assert result.exit_code != 0
assert 'validation failed' in result.output
def test_validate_invalid_url(self, runner, mock_config):
"""Test validating config with invalid URL"""
mock_config.coordinator_url = "invalid-url"
result = runner.invoke(config, [
'validate'
], obj={'config': mock_config, 'output_format': 'table'})
assert result.exit_code != 0
assert 'validation failed' in result.output
def test_validate_short_api_key(self, runner, mock_config):
"""Test validating config with short API key"""
mock_config.api_key = "short"
result = runner.invoke(config, [
'validate'
], obj={'config': mock_config, 'output_format': 'table'})
assert result.exit_code != 0
assert 'validation failed' in result.output
def test_validate_no_api_key(self, runner, mock_config):
"""Test validating config without API key (warning)"""
mock_config.api_key = None
result = runner.invoke(config, [
'validate'
], obj={'config': mock_config, 'output_format': 'table'})
assert result.exit_code == 0
assert 'valid with warnings' in result.output
@patch.dict(os.environ, {'CLIENT_API_KEY': 'env_key_123'})
def test_environments(self, runner, mock_config):
"""Test listing environment variables"""
result = runner.invoke(config, [
'environments'
], obj={'config': mock_config, 'output_format': 'table'})
assert result.exit_code == 0
assert 'CLIENT_API_KEY' in result.output
def test_profiles_save(self, runner, mock_config, tmp_path):
"""Test saving a configuration profile"""
profiles_dir = tmp_path / ".config" / "aitbc" / "profiles"
result = runner.invoke(config, [
'profiles',
'save',
'test_profile'
], obj={'config': mock_config, 'output_format': 'table'})
assert result.exit_code == 0
assert 'Profile test_profile saved' in result.output
# Verify profile was created
profile_file = profiles_dir / "test_profile.yaml"
assert profile_file.exists()
with open(profile_file) as f:
profile_data = yaml.safe_load(f)
assert profile_data['coordinator_url'] == 'http://127.0.0.1:18000'
def test_profiles_list(self, runner, mock_config, tmp_path):
"""Test listing configuration profiles"""
# Create test profiles
profiles_dir = tmp_path / ".config" / "aitbc" / "profiles"
profiles_dir.mkdir(parents=True, exist_ok=True)
profile1 = profiles_dir / "profile1.yaml"
profile1.write_text(yaml.dump({"coordinator_url": "http://test1:8000"}))
profile2 = profiles_dir / "profile2.yaml"
profile2.write_text(yaml.dump({"coordinator_url": "http://test2:8000"}))
# Patch Path.home to return tmp_path
with patch('pathlib.Path.home') as mock_home:
mock_home.return_value = tmp_path
result = runner.invoke(config, [
'profiles',
'list'
], obj={'config': mock_config, 'output_format': 'table'})
assert result.exit_code == 0
assert 'profile1' in result.output
assert 'profile2' in result.output
def test_profiles_load(self, runner, mock_config, tmp_path):
"""Test loading a configuration profile"""
# Create test profile
profiles_dir = tmp_path / ".config" / "aitbc" / "profiles"
profiles_dir.mkdir(parents=True, exist_ok=True)
profile_file = profiles_dir / "test.yaml"
profile_data = {
"coordinator_url": "http://loaded:8000",
"timeout": 75
}
profile_file.write_text(yaml.dump(profile_data))
config_file = tmp_path / ".aitbc.yaml"
with runner.isolated_filesystem(temp_dir=tmp_path):
# Patch Path.home to return tmp_path
with patch('pathlib.Path.home') as mock_home:
mock_home.return_value = tmp_path
result = runner.invoke(config, [
'profiles',
'load',
'test'
], obj={'config': mock_config, 'output_format': 'table'})
assert result.exit_code == 0
assert 'Profile test loaded' in result.output
def test_validate_invalid_url(self, runner, mock_config):
"""Test validating config with invalid URL"""
mock_config.coordinator_url = "invalid-url"
result = runner.invoke(config, [
'validate'
], obj={'config': mock_config, 'output_format': 'table'})
assert result.exit_code != 0
assert 'validation failed' in result.output
def test_validate_short_api_key(self, runner, mock_config):
"""Test validating config with short API key"""
mock_config.api_key = "short"
result = runner.invoke(config, [
'validate'
], obj={'config': mock_config, 'output_format': 'table'})
assert result.exit_code != 0
assert 'validation failed' in result.output
def test_validate_no_api_key(self, runner, mock_config):
"""Test validating config without API key (warning)"""
mock_config.api_key = None
result = runner.invoke(config, [
'validate'
], obj={'config': mock_config, 'output_format': 'table'})
assert result.exit_code == 0
assert 'valid with warnings' in result.output
@patch.dict(os.environ, {'CLIENT_API_KEY': 'env_key_123'})
def test_environments(self, runner, mock_config):
"""Test listing environment variables"""
result = runner.invoke(config, [
'environments'
], obj={'config': mock_config, 'output_format': 'table'})
assert result.exit_code == 0
assert 'CLIENT_API_KEY' in result.output
def test_profiles_save(self, runner, mock_config, tmp_path):
"""Test saving a configuration profile"""
# Patch Path.home to return tmp_path
with patch('pathlib.Path.home') as mock_home:
mock_home.return_value = tmp_path
result = runner.invoke(config, [
'profiles',
'save',
'test_profile'
], obj={'config': mock_config, 'output_format': 'table'})
assert result.exit_code == 0
assert "Profile 'test_profile' saved" in result.output
# Verify profile was created
profile_file = tmp_path / ".config" / "aitbc" / "profiles" / "test_profile.yaml"
assert profile_file.exists()
with open(profile_file) as f:
profile_data = yaml.safe_load(f)
assert profile_data['coordinator_url'] == 'http://127.0.0.1:18000'
def test_profiles_list(self, runner, mock_config, tmp_path):
"""Test listing configuration profiles"""
# Create test profiles
profiles_dir = tmp_path / ".config" / "aitbc" / "profiles"
profiles_dir.mkdir(parents=True, exist_ok=True)
profile1 = profiles_dir / "profile1.yaml"
profile1.write_text(yaml.dump({"coordinator_url": "http://test1:8000"}))
profile2 = profiles_dir / "profile2.yaml"
profile2.write_text(yaml.dump({"coordinator_url": "http://test2:8000"}))
# Patch Path.home to return tmp_path
with patch('pathlib.Path.home') as mock_home:
mock_home.return_value = tmp_path
result = runner.invoke(config, [
'profiles',
'list'
], obj={'config': mock_config, 'output_format': 'table'})
assert result.exit_code == 0
assert 'profile1' in result.output
assert 'profile2' in result.output
def test_profiles_load(self, runner, mock_config, tmp_path):
"""Test loading a configuration profile"""
# Create test profile
profiles_dir = tmp_path / ".config" / "aitbc" / "profiles"
profiles_dir.mkdir(parents=True, exist_ok=True)
profile_file = profiles_dir / "load_me.yaml"
profile_file.write_text(yaml.dump({"coordinator_url": "http://127.0.0.1:18000"}))
# Patch Path.home to return tmp_path
with patch('pathlib.Path.home') as mock_home:
mock_home.return_value = tmp_path
result = runner.invoke(config, [
'profiles',
'load',
'load_me'
], obj={'config': mock_config, 'output_format': 'table'})
assert result.exit_code == 0
assert "Profile 'load_me' loaded" in result.output
def test_profiles_delete(self, runner, mock_config, tmp_path):
"""Test deleting a configuration profile"""
# Create test profile
profiles_dir = tmp_path / ".config" / "aitbc" / "profiles"
profiles_dir.mkdir(parents=True, exist_ok=True)
profile_file = profiles_dir / "delete_me.yaml"
profile_file.write_text(yaml.dump({"coordinator_url": "http://test:8000"}))
# Patch Path.home to return tmp_path
with patch('pathlib.Path.home') as mock_home:
mock_home.return_value = tmp_path
result = runner.invoke(config, [
'profiles',
'delete',
'delete_me'
], obj={'config': mock_config, 'output_format': 'table'}, input='y\n')
assert result.exit_code == 0
assert "Profile 'delete_me' deleted" in result.output
assert not profile_file.exists()
def test_profiles_delete_cancelled(self, runner, mock_config, tmp_path):
"""Test profile deletion cancelled by user"""
# Create test profile
profiles_dir = tmp_path / ".config" / "aitbc" / "profiles"
profiles_dir.mkdir(parents=True, exist_ok=True)
profile_file = profiles_dir / "keep_me.yaml"
profile_file.write_text(yaml.dump({"coordinator_url": "http://test:8000"}))
# Patch Path.home to return tmp_path
with patch('pathlib.Path.home') as mock_home:
mock_home.return_value = tmp_path
result = runner.invoke(config, [
'profiles',
'delete',
'keep_me'
], obj={'config': mock_config, 'output_format': 'json'}, input='n\n')
assert result.exit_code == 0
assert profile_file.exists() # Should still exist

View File

@@ -0,0 +1,553 @@
"""Tests for marketplace CLI commands"""
import pytest
import json
from click.testing import CliRunner
from unittest.mock import Mock, patch
from aitbc_cli.commands.marketplace import marketplace
@pytest.fixture
def runner():
"""Create CLI runner"""
return CliRunner()
@pytest.fixture
def mock_config():
"""Mock configuration"""
config = Mock()
config.coordinator_url = "http://test:8000"
config.api_key = "test_api_key"
return config
class TestMarketplaceCommands:
"""Test marketplace command group"""
@patch('aitbc_cli.commands.marketplace.httpx.Client')
def test_gpu_list_all(self, mock_client_class, runner, mock_config):
"""Test listing all GPUs"""
# Setup mock
mock_client = Mock()
mock_client_class.return_value.__enter__.return_value = mock_client
mock_response = Mock()
mock_response.status_code = 200
mock_response.json.return_value = {
"gpus": [
{
"id": "gpu1",
"model": "RTX4090",
"memory": "24GB",
"price_per_hour": 0.5,
"available": True,
"provider": "miner1"
},
{
"id": "gpu2",
"model": "RTX3080",
"memory": "10GB",
"price_per_hour": 0.3,
"available": False,
"provider": "miner2"
}
]
}
mock_client.get.return_value = mock_response
# Run command
result = runner.invoke(marketplace, [
'gpu',
'list'
], obj={'config': mock_config, 'output_format': 'json'})
# Assertions
assert result.exit_code == 0
data = json.loads(result.output)
assert len(data['gpus']) == 2
assert data['gpus'][0]['model'] == 'RTX4090'
assert data['gpus'][0]['available'] == True
# Verify API call
mock_client.get.assert_called_once_with(
'http://test:8000/v1/marketplace/gpu/list',
params={"limit": 20},
headers={"X-Api-Key": "test_api_key"}
)
@patch('aitbc_cli.commands.marketplace.httpx.Client')
def test_gpu_list_available(self, mock_client_class, runner, mock_config):
"""Test listing only available GPUs"""
# Setup mock
mock_client = Mock()
mock_client_class.return_value.__enter__.return_value = mock_client
mock_response = Mock()
mock_response.status_code = 200
mock_response.json.return_value = {
"gpus": [
{
"id": "gpu1",
"model": "RTX4090",
"memory": "24GB",
"price_per_hour": 0.5,
"available": True,
"provider": "miner1"
}
]
}
mock_client.get.return_value = mock_response
# Run command
result = runner.invoke(marketplace, [
'gpu',
'list',
'--available'
], obj={'config': mock_config, 'output_format': 'json'})
# Assertions
assert result.exit_code == 0
data = json.loads(result.output)
assert len(data['gpus']) == 1
assert data['gpus'][0]['available'] == True
# Verify API call
mock_client.get.assert_called_once_with(
'http://test:8000/v1/marketplace/gpu/list',
params={"available": "true", "limit": 20},
headers={"X-Api-Key": "test_api_key"}
)
@patch('aitbc_cli.commands.marketplace.httpx.Client')
def test_gpu_list_with_filters(self, mock_client_class, runner, mock_config):
"""Test listing GPUs with filters"""
# Setup mock
mock_client = Mock()
mock_client_class.return_value.__enter__.return_value = mock_client
mock_response = Mock()
mock_response.status_code = 200
mock_response.json.return_value = {
"gpus": [
{
"id": "gpu1",
"model": "RTX4090",
"memory": "24GB",
"price_per_hour": 0.5,
"available": True,
"provider": "miner1"
}
]
}
mock_client.get.return_value = mock_response
# Run command with filters
result = runner.invoke(marketplace, [
'gpu',
'list',
'--model', 'RTX4090',
'--memory-min', '16',
'--price-max', '1.0'
], obj={'config': mock_config, 'output_format': 'json'})
# Assertions
assert result.exit_code == 0
# Verify API call with filters
mock_client.get.assert_called_once()
call_args = mock_client.get.call_args
assert call_args[1]['params']['model'] == 'RTX4090'
assert call_args[1]['params']['memory_min'] == 16
assert call_args[1]['params']['price_max'] == 1.0
@patch('aitbc_cli.commands.marketplace.httpx.Client')
def test_gpu_details(self, mock_client_class, runner, mock_config):
"""Test getting GPU details"""
# Setup mock
mock_client = Mock()
mock_client_class.return_value.__enter__.return_value = mock_client
mock_response = Mock()
mock_response.status_code = 200
mock_response.json.return_value = {
"id": "gpu1",
"model": "RTX4090",
"memory": "24GB",
"price_per_hour": 0.5,
"available": True,
"provider": "miner1",
"specs": {
"cuda_cores": 16384,
"tensor_cores": 512,
"base_clock": 2230
},
"location": "us-west",
"rating": 4.8
}
mock_client.get.return_value = mock_response
# Run command
result = runner.invoke(marketplace, [
'gpu',
'details',
'gpu1'
], obj={'config': mock_config, 'output_format': 'json'})
# Assertions
assert result.exit_code == 0
data = json.loads(result.output)
assert data['id'] == 'gpu1'
assert data['model'] == 'RTX4090'
assert data['specs']['cuda_cores'] == 16384
assert data['rating'] == 4.8
# Verify API call
mock_client.get.assert_called_once_with(
'http://test:8000/v1/marketplace/gpu/gpu1',
headers={"X-Api-Key": "test_api_key"}
)
@patch('aitbc_cli.commands.marketplace.httpx.Client')
def test_gpu_book(self, mock_client_class, runner, mock_config):
"""Test booking a GPU"""
# Setup mock
mock_client = Mock()
mock_client_class.return_value.__enter__.return_value = mock_client
mock_response = Mock()
mock_response.status_code = 201
mock_response.json.return_value = {
"booking_id": "booking123",
"gpu_id": "gpu1",
"duration_hours": 2,
"total_cost": 1.0,
"status": "booked"
}
mock_client.post.return_value = mock_response
# Run command
result = runner.invoke(marketplace, [
'gpu',
'book',
'gpu1',
'--hours', '2'
], obj={'config': mock_config, 'output_format': 'json'})
# Assertions
assert result.exit_code == 0
# Extract JSON from output (success message + JSON)
# Remove ANSI escape codes
import re
clean_output = re.sub(r'\x1b\[[0-9;]*m', '', result.output)
lines = clean_output.strip().split('\n')
# Find all lines that contain JSON and join them
json_lines = []
in_json = False
for line in lines:
stripped = line.strip()
if stripped.startswith('{'):
in_json = True
json_lines.append(stripped)
elif in_json:
json_lines.append(stripped)
if stripped.endswith('}'):
break
json_str = '\n'.join(json_lines)
assert json_str, "No JSON found in output"
data = json.loads(json_str)
assert data['booking_id'] == 'booking123'
assert data['status'] == 'booked'
assert data['total_cost'] == 1.0
# Verify API call
mock_client.post.assert_called_once_with(
'http://test:8000/v1/marketplace/gpu/gpu1/book',
json={"gpu_id": "gpu1", "duration_hours": 2.0},
headers={
"Content-Type": "application/json",
"X-Api-Key": "test_api_key"
}
)
@patch('aitbc_cli.commands.marketplace.httpx.Client')
def test_gpu_release(self, mock_client_class, runner, mock_config):
"""Test releasing a GPU"""
# Setup mock
mock_client = Mock()
mock_client_class.return_value.__enter__.return_value = mock_client
mock_response = Mock()
mock_response.status_code = 200
mock_response.json.return_value = {
"status": "released",
"gpu_id": "gpu1",
"refund": 0.5,
"message": "GPU gpu1 released successfully"
}
mock_client.post.return_value = mock_response
# Run command
result = runner.invoke(marketplace, [
'gpu',
'release',
'gpu1'
], obj={'config': mock_config, 'output_format': 'json'})
# Assertions
assert result.exit_code == 0
# Extract JSON from output (success message + JSON)
# Remove ANSI escape codes
import re
clean_output = re.sub(r'\x1b\[[0-9;]*m', '', result.output)
lines = clean_output.strip().split('\n')
# Find all lines that contain JSON and join them
json_lines = []
in_json = False
for line in lines:
stripped = line.strip()
if stripped.startswith('{'):
in_json = True
json_lines.append(stripped)
elif in_json:
json_lines.append(stripped)
if stripped.endswith('}'):
break
json_str = '\n'.join(json_lines)
assert json_str, "No JSON found in output"
data = json.loads(json_str)
assert data['status'] == 'released'
assert data['gpu_id'] == 'gpu1'
# Verify API call
mock_client.post.assert_called_once_with(
'http://test:8000/v1/marketplace/gpu/gpu1/release',
headers={"X-Api-Key": "test_api_key"}
)
@patch('aitbc_cli.commands.marketplace.httpx.Client')
def test_orders_list(self, mock_client_class, runner, mock_config):
"""Test listing orders"""
# Setup mock
mock_client = Mock()
mock_client_class.return_value.__enter__.return_value = mock_client
mock_response = Mock()
mock_response.status_code = 200
mock_response.json.return_value = [
{
"order_id": "order123",
"gpu_id": "gpu1",
"gpu_model": "RTX 4090",
"status": "active",
"duration_hours": 2,
"total_cost": 1.0,
"created_at": "2024-01-01T00:00:00"
}
]
mock_client.get.return_value = mock_response
# Run command
result = runner.invoke(marketplace, [
'orders'
], obj={'config': mock_config, 'output_format': 'json'})
# Assertions
assert result.exit_code == 0
# Extract JSON from output
import re
clean_output = re.sub(r'\x1b\[[0-9;]*m', '', result.output)
lines = clean_output.strip().split('\n')
# Find all lines that contain JSON and join them
json_lines = []
in_json = False
for line in lines:
stripped = line.strip()
if stripped.startswith('['):
in_json = True
json_lines.append(stripped)
elif in_json:
json_lines.append(stripped)
if stripped.endswith(']'):
break
json_str = '\n'.join(json_lines)
assert json_str, "No JSON found in output"
data = json.loads(json_str)
assert len(data) == 1
assert data[0]['status'] == 'active'
# Verify API call
mock_client.get.assert_called_once_with(
'http://test:8000/v1/marketplace/orders',
params={"limit": 10},
headers={"X-Api-Key": "test_api_key"}
)
@patch('aitbc_cli.commands.marketplace.httpx.Client')
def test_pricing_info(self, mock_client_class, runner, mock_config):
"""Test getting pricing information"""
# Setup mock
mock_client = Mock()
mock_client_class.return_value.__enter__.return_value = mock_client
mock_response = Mock()
mock_response.status_code = 200
mock_response.json.return_value = {
"average_price": 0.4,
"price_range": {
"min": 0.2,
"max": 0.8
},
"price_by_model": {
"RTX4090": 0.5,
"RTX3080": 0.3,
"A100": 1.0
}
}
mock_client.get.return_value = mock_response
# Run command
result = runner.invoke(marketplace, [
'pricing',
'RTX4090'
], obj={'config': mock_config, 'output_format': 'json'})
# Assertions
assert result.exit_code == 0
data = json.loads(result.output)
assert data['average_price'] == 0.4
assert data['price_range']['min'] == 0.2
assert data['price_by_model']['RTX4090'] == 0.5
# Verify API call
mock_client.get.assert_called_once_with(
'http://test:8000/v1/marketplace/pricing/RTX4090',
headers={"X-Api-Key": "test_api_key"}
)
@patch('aitbc_cli.commands.marketplace.httpx.Client')
def test_reviews_list(self, mock_client_class, runner, mock_config):
"""Test listing reviews for a GPU"""
# Setup mock
mock_client = Mock()
mock_client_class.return_value.__enter__.return_value = mock_client
mock_response = Mock()
mock_response.status_code = 200
mock_response.json.return_value = {
"reviews": [
{
"id": "review1",
"user": "user1",
"rating": 5,
"comment": "Excellent performance!",
"created_at": "2024-01-01T00:00:00"
},
{
"id": "review2",
"user": "user2",
"rating": 4,
"comment": "Good value for money",
"created_at": "2024-01-02T00:00:00"
}
]
}
mock_client.get.return_value = mock_response
# Run command
result = runner.invoke(marketplace, [
'reviews',
'gpu1'
], obj={'config': mock_config, 'output_format': 'json'})
# Assertions
assert result.exit_code == 0
data = json.loads(result.output)
assert len(data['reviews']) == 2
assert data['reviews'][0]['rating'] == 5
# Verify API call
mock_client.get.assert_called_once_with(
'http://test:8000/v1/marketplace/gpu/gpu1/reviews',
params={"limit": 10},
headers={"X-Api-Key": "test_api_key"}
)
@patch('aitbc_cli.commands.marketplace.httpx.Client')
def test_add_review(self, mock_client_class, runner, mock_config):
"""Test adding a review for a GPU"""
# Setup mock
mock_client = Mock()
mock_client_class.return_value.__enter__.return_value = mock_client
mock_response = Mock()
mock_response.status_code = 201
mock_response.json.return_value = {
"status": "review_added",
"gpu_id": "gpu1",
"review_id": "review_1",
"average_rating": 5.0
}
mock_client.post.return_value = mock_response
# Run command
result = runner.invoke(marketplace, [
'review',
'gpu1',
'--rating', '5',
'--comment', 'Amazing GPU!'
], obj={'config': mock_config, 'output_format': 'json'})
# Assertions
assert result.exit_code == 0
# Extract JSON from output (success message + JSON)
# Remove ANSI escape codes
import re
clean_output = re.sub(r'\x1b\[[0-9;]*m', '', result.output)
lines = clean_output.strip().split('\n')
# Find all lines that contain JSON and join them
json_lines = []
in_json = False
for line in lines:
stripped = line.strip()
if stripped.startswith('{'):
in_json = True
json_lines.append(stripped)
elif in_json:
json_lines.append(stripped)
if stripped.endswith('}'):
break
json_str = '\n'.join(json_lines)
assert json_str, "No JSON found in output"
data = json.loads(json_str)
assert data['status'] == 'review_added'
assert data['gpu_id'] == 'gpu1'
# Verify API call
mock_client.post.assert_called_once_with(
'http://test:8000/v1/marketplace/gpu/gpu1/reviews',
json={"rating": 5, "comment": "Amazing GPU!"},
headers={
"Content-Type": "application/json",
"X-Api-Key": "test_api_key"
}
)
@patch('aitbc_cli.commands.marketplace.httpx.Client')
def test_api_error_handling(self, mock_client_class, runner, mock_config):
"""Test API error handling"""
# Setup mock for error response
mock_client = Mock()
mock_client_class.return_value.__enter__.return_value = mock_client
mock_response = Mock()
mock_response.status_code = 404
mock_client.get.return_value = mock_response
# Run command
result = runner.invoke(marketplace, [
'gpu',
'details',
'nonexistent'
], obj={'config': mock_config, 'output_format': 'json'})
# Assertions
assert result.exit_code == 0 # The command doesn't exit on error
assert 'not found' in result.output

371
tests/cli/test_simulate.py Normal file
View File

@@ -0,0 +1,371 @@
"""Tests for simulate CLI commands"""
import pytest
import json
import time
from pathlib import Path
from unittest.mock import patch, MagicMock, Mock
from click.testing import CliRunner
from aitbc_cli.commands.simulate import simulate
def extract_json_from_output(output):
"""Extract first JSON object from CLI output that may contain ANSI escape codes and success messages"""
import re
clean_output = re.sub(r'\x1b\[[0-9;]*m', '', output)
lines = clean_output.strip().split('\n')
# Find all lines that contain JSON and join them
json_lines = []
in_json = False
for line in lines:
stripped = line.strip()
if stripped.startswith('{'):
in_json = True
json_lines.append(stripped)
elif in_json:
json_lines.append(stripped)
if stripped.endswith('}'):
break
assert json_lines, "No JSON found in output"
json_str = '\n'.join(json_lines)
return json.loads(json_str)
def extract_last_json_from_output(output):
"""Extract the last JSON object from CLI output (for commands that emit multiple JSON objects)"""
import re
clean_output = re.sub(r'\x1b\[[0-9;]*m', '', output)
lines = clean_output.strip().split('\n')
all_objects = []
json_lines = []
in_json = False
brace_depth = 0
for line in lines:
stripped = line.strip()
if stripped.startswith('{') and not in_json:
in_json = True
brace_depth = stripped.count('{') - stripped.count('}')
json_lines = [stripped]
if brace_depth == 0:
try:
all_objects.append(json.loads('\n'.join(json_lines)))
except json.JSONDecodeError:
pass
json_lines = []
in_json = False
elif in_json:
json_lines.append(stripped)
brace_depth += stripped.count('{') - stripped.count('}')
if brace_depth <= 0:
try:
all_objects.append(json.loads('\n'.join(json_lines)))
except json.JSONDecodeError:
pass
json_lines = []
in_json = False
assert all_objects, "No JSON found in output"
return all_objects[-1]
@pytest.fixture
def runner():
"""Create CLI runner"""
return CliRunner()
@pytest.fixture
def mock_config():
"""Mock configuration"""
config = Mock()
config.coordinator_url = "http://test:8000"
config.api_key = "test_api_key"
return config
class TestSimulateCommands:
"""Test simulate command group"""
def test_init_economy(self, runner, mock_config):
"""Test initializing test economy"""
with runner.isolated_filesystem():
# Create a temporary home directory
home_dir = Path("temp_home")
home_dir.mkdir()
# Patch the hardcoded path
with patch('aitbc_cli.commands.simulate.Path') as mock_path_class:
# Make Path return our temp directory
mock_path_class.return_value = home_dir
mock_path_class.side_effect = lambda x: home_dir if x == "/home/oib/windsurf/aitbc/home" else Path(x)
# Run command
result = runner.invoke(simulate, [
'init',
'--distribute', '5000,2000'
], obj={'config': mock_config, 'output_format': 'json'})
# Assertions
assert result.exit_code == 0
data = extract_json_from_output(result.output)
assert data['status'] == 'initialized'
assert data['distribution']['client'] == 5000.0
assert data['distribution']['miner'] == 2000.0
def test_init_with_reset(self, runner, mock_config):
"""Test initializing with reset flag"""
with runner.isolated_filesystem():
# Create a temporary home directory with existing files
home_dir = Path("temp_home")
home_dir.mkdir()
# Create existing wallet files
(home_dir / "client_wallet.json").write_text("{}")
(home_dir / "miner_wallet.json").write_text("{}")
# Patch the hardcoded path
with patch('aitbc_cli.commands.simulate.Path') as mock_path_class:
mock_path_class.return_value = home_dir
mock_path_class.side_effect = lambda x: home_dir if x == "/home/oib/windsurf/aitbc/home" else Path(x)
# Run command
result = runner.invoke(simulate, [
'init',
'--reset'
], obj={'config': mock_config, 'output_format': 'json'})
# Assertions
assert result.exit_code == 0
assert 'resetting' in result.output.lower()
def test_create_user(self, runner, mock_config):
"""Test creating a test user"""
with runner.isolated_filesystem():
# Create a temporary home directory
home_dir = Path("temp_home")
home_dir.mkdir()
# Patch the hardcoded path
with patch('aitbc_cli.commands.simulate.Path') as mock_path_class:
mock_path_class.return_value = home_dir
mock_path_class.side_effect = lambda x: home_dir if x == "/home/oib/windsurf/aitbc/home" else Path(x)
# Run command
result = runner.invoke(simulate, [
'user',
'create',
'--type', 'client',
'--name', 'testuser',
'--balance', '1000'
], obj={'config': mock_config, 'output_format': 'json'})
# Assertions
assert result.exit_code == 0
data = extract_json_from_output(result.output)
assert data['user_id'] == 'client_testuser'
assert data['balance'] == 1000
def test_list_users(self, runner, mock_config):
"""Test listing test users"""
with runner.isolated_filesystem():
# Create a temporary home directory
home_dir = Path("temp_home")
home_dir.mkdir()
# Create some test wallet files
(home_dir / "client_user1_wallet.json").write_text('{"address": "aitbc1test", "balance": 1000}')
(home_dir / "miner_user2_wallet.json").write_text('{"address": "aitbc1test2", "balance": 2000}')
# Patch the hardcoded path
with patch('aitbc_cli.commands.simulate.Path') as mock_path_class:
mock_path_class.return_value = home_dir
mock_path_class.side_effect = lambda x: home_dir if x == "/home/oib/windsurf/aitbc/home" else Path(x)
# Run command
result = runner.invoke(simulate, [
'user',
'list'
], obj={'config': mock_config, 'output_format': 'json'})
# Assertions
assert result.exit_code == 0
data = json.loads(result.output)
assert 'users' in data
assert isinstance(data['users'], list)
assert len(data['users']) == 2
def test_user_balance(self, runner, mock_config):
"""Test checking user balance"""
with runner.isolated_filesystem():
# Create a temporary home directory
home_dir = Path("temp_home")
home_dir.mkdir()
# Create a test wallet file
(home_dir / "testuser_wallet.json").write_text('{"address": "aitbc1testuser", "balance": 1500}')
# Patch the hardcoded path
with patch('aitbc_cli.commands.simulate.Path') as mock_path_class:
mock_path_class.return_value = home_dir
mock_path_class.side_effect = lambda x: home_dir if x == "/home/oib/windsurf/aitbc/home" else Path(x)
# Run command
result = runner.invoke(simulate, [
'user',
'balance',
'testuser'
], obj={'config': mock_config, 'output_format': 'json'})
# Assertions
assert result.exit_code == 0
data = json.loads(result.output)
assert data['balance'] == 1500
def test_fund_user(self, runner, mock_config):
"""Test funding a test user"""
with runner.isolated_filesystem():
# Create a temporary home directory
home_dir = Path("temp_home")
home_dir.mkdir()
# Create genesis and user wallet files
(home_dir / "genesis_wallet.json").write_text('{"address": "aitbc1genesis", "balance": 1000000, "transactions": []}')
(home_dir / "testuser_wallet.json").write_text('{"address": "aitbc1testuser", "balance": 1000, "transactions": []}')
# Patch the hardcoded path
with patch('aitbc_cli.commands.simulate.Path') as mock_path_class:
mock_path_class.return_value = home_dir
mock_path_class.side_effect = lambda x: home_dir if x == "/home/oib/windsurf/aitbc/home" else Path(x)
# Run command
result = runner.invoke(simulate, [
'user',
'fund',
'testuser',
'500'
], obj={'config': mock_config, 'output_format': 'json'})
# Assertions
assert result.exit_code == 0
# Extract JSON from output
data = extract_json_from_output(result.output)
assert data['amount'] == 500
assert data['new_balance'] == 1500
def test_workflow_command(self, runner, mock_config):
"""Test workflow simulation command"""
result = runner.invoke(simulate, [
'workflow',
'--jobs', '5',
'--rounds', '2'
], obj={'config': mock_config, 'output_format': 'json'})
# The command should exist
assert result.exit_code == 0
# Extract last JSON from output (workflow emits multiple JSON objects)
data = extract_last_json_from_output(result.output)
assert data['status'] == 'completed'
assert data['total_jobs'] == 10
def test_load_test_command(self, runner, mock_config):
"""Test load test command"""
result = runner.invoke(simulate, [
'load-test',
'--clients', '2',
'--miners', '1',
'--duration', '5',
'--job-rate', '2'
], obj={'config': mock_config, 'output_format': 'json'})
# The command should exist
assert result.exit_code == 0
# Extract last JSON from output (load_test emits multiple JSON objects)
data = extract_last_json_from_output(result.output)
assert data['status'] == 'completed'
assert 'duration' in data
assert 'jobs_submitted' in data
def test_scenario_commands(self, runner, mock_config):
"""Test scenario commands"""
with runner.isolated_filesystem():
# Create a test scenario file
scenario_file = Path("test_scenario.json")
scenario_data = {
"name": "Test Scenario",
"description": "A test scenario",
"steps": [
{
"type": "submit_jobs",
"name": "Initial jobs",
"count": 2,
"prompt": "Test job"
},
{
"type": "wait",
"name": "Wait step",
"duration": 1
}
]
}
scenario_file.write_text(json.dumps(scenario_data))
# Run scenario
result = runner.invoke(simulate, [
'scenario',
'--file', str(scenario_file)
], obj={'config': mock_config, 'output_format': 'json'})
assert result.exit_code == 0
assert "Running scenario: Test Scenario" in result.output
def test_results_command(self, runner, mock_config):
"""Test results command"""
result = runner.invoke(simulate, [
'results',
'sim_123'
], obj={'config': mock_config, 'output_format': 'json'})
assert result.exit_code == 0
# Extract JSON from output
data = extract_json_from_output(result.output)
assert data['simulation_id'] == 'sim_123'
def test_reset_command(self, runner, mock_config):
"""Test reset command"""
with runner.isolated_filesystem():
# Create a temporary home directory
home_dir = Path("temp_home")
home_dir.mkdir()
# Create existing wallet files
(home_dir / "client_wallet.json").write_text("{}")
(home_dir / "miner_wallet.json").write_text("{}")
# Patch the hardcoded path
with patch('aitbc_cli.commands.simulate.Path') as mock_path_class:
mock_path_class.return_value = home_dir
mock_path_class.side_effect = lambda x: home_dir if x == "/home/oib/windsurf/aitbc/home" else Path(x)
# Run command with reset flag
result = runner.invoke(simulate, [
'init',
'--reset'
], obj={'config': mock_config, 'output_format': 'json'})
# Assertions
assert result.exit_code == 0
assert 'resetting' in result.output.lower()
def test_invalid_distribution_format(self, runner, mock_config):
"""Test invalid distribution format"""
result = runner.invoke(simulate, [
'init',
'--distribute', 'invalid'
], obj={'config': mock_config, 'output_format': 'json'})
# Assertions
assert result.exit_code == 0
assert 'invalid distribution' in result.output.lower()

358
tests/cli/test_wallet.py Normal file
View File

@@ -0,0 +1,358 @@
"""Tests for wallet CLI commands"""
import pytest
import json
import re
import tempfile
import os
from pathlib import Path
from click.testing import CliRunner
from unittest.mock import Mock, patch
from aitbc_cli.commands.wallet import wallet
def extract_json_from_output(output):
"""Extract JSON from CLI output that may contain Rich panel markup"""
clean = re.sub(r'\x1b\[[0-9;]*m', '', output)
lines = clean.strip().split('\n')
json_lines = []
in_json = False
for line in lines:
stripped = line.strip()
if stripped.startswith('{'):
in_json = True
json_lines.append(stripped)
elif in_json:
json_lines.append(stripped)
if stripped.startswith('}'):
break
return json.loads('\n'.join(json_lines))
@pytest.fixture
def runner():
"""Create CLI runner"""
return CliRunner()
@pytest.fixture
def temp_wallet():
"""Create temporary wallet file"""
with tempfile.NamedTemporaryFile(mode='w', suffix='.json', delete=False) as f:
wallet_data = {
"address": "aitbc1test",
"balance": 100.0,
"transactions": [
{
"type": "earn",
"amount": 50.0,
"description": "Test job",
"timestamp": "2024-01-01T00:00:00"
}
],
"created_at": "2024-01-01T00:00:00"
}
json.dump(wallet_data, f)
temp_path = f.name
yield temp_path
# Cleanup
os.unlink(temp_path)
@pytest.fixture
def mock_config():
"""Mock configuration"""
config = Mock()
config.coordinator_url = "http://test:8000"
config.api_key = "test_key"
return config
class TestWalletCommands:
"""Test wallet command group"""
def test_balance_command(self, runner, temp_wallet, mock_config):
"""Test wallet balance command"""
result = runner.invoke(wallet, [
'--wallet-path', temp_wallet,
'balance'
], obj={'config': mock_config, 'output_format': 'json'})
assert result.exit_code == 0
data = json.loads(result.output)
assert data['balance'] == 100.0
assert data['address'] == 'aitbc1test'
def test_balance_new_wallet(self, runner, mock_config, tmp_path):
"""Test balance with new wallet (auto-creation)"""
wallet_path = tmp_path / "new_wallet.json"
result = runner.invoke(wallet, [
'--wallet-path', str(wallet_path),
'balance'
], obj={'config': mock_config, 'output_format': 'json'})
assert result.exit_code == 0
assert wallet_path.exists()
data = json.loads(result.output)
assert data['balance'] == 0.0
assert 'address' in data
def test_earn_command(self, runner, temp_wallet, mock_config):
"""Test earning command"""
result = runner.invoke(wallet, [
'--wallet-path', temp_wallet,
'earn',
'25.5',
'job_456',
'--desc', 'Another test job'
], obj={'config': mock_config, 'output_format': 'json'})
assert result.exit_code == 0
data = extract_json_from_output(result.output)
assert data['new_balance'] == 125.5 # 100 + 25.5
assert data['job_id'] == 'job_456'
# Verify wallet file updated
with open(temp_wallet) as f:
wallet_data = json.load(f)
assert wallet_data['balance'] == 125.5
assert len(wallet_data['transactions']) == 2
def test_spend_command_success(self, runner, temp_wallet, mock_config):
"""Test successful spend command"""
result = runner.invoke(wallet, [
'--wallet-path', temp_wallet,
'spend',
'30.0',
'GPU rental'
], obj={'config': mock_config, 'output_format': 'json'})
assert result.exit_code == 0
data = extract_json_from_output(result.output)
assert data['new_balance'] == 70.0 # 100 - 30
assert data['description'] == 'GPU rental'
def test_spend_insufficient_balance(self, runner, temp_wallet, mock_config):
"""Test spend with insufficient balance"""
result = runner.invoke(wallet, [
'--wallet-path', temp_wallet,
'spend',
'200.0',
'Too much'
], obj={'config': mock_config, 'output_format': 'json'})
assert result.exit_code != 0
assert 'Insufficient balance' in result.output
def test_history_command(self, runner, temp_wallet, mock_config):
"""Test transaction history"""
result = runner.invoke(wallet, [
'--wallet-path', temp_wallet,
'history',
'--limit', '5'
], obj={'config': mock_config, 'output_format': 'json'})
assert result.exit_code == 0
data = json.loads(result.output)
assert 'transactions' in data
assert len(data['transactions']) == 1
assert data['transactions'][0]['amount'] == 50.0
def test_address_command(self, runner, temp_wallet, mock_config):
"""Test address command"""
result = runner.invoke(wallet, [
'--wallet-path', temp_wallet,
'address'
], obj={'config': mock_config, 'output_format': 'json'})
assert result.exit_code == 0
data = json.loads(result.output)
assert data['address'] == 'aitbc1test'
def test_stats_command(self, runner, temp_wallet, mock_config):
"""Test wallet statistics"""
result = runner.invoke(wallet, [
'--wallet-path', temp_wallet,
'stats'
], obj={'config': mock_config, 'output_format': 'json'})
assert result.exit_code == 0
data = json.loads(result.output)
assert data['current_balance'] == 100.0
assert data['total_earned'] == 50.0
assert data['total_spent'] == 0.0
assert data['jobs_completed'] == 1
assert data['transaction_count'] == 1
@patch('aitbc_cli.commands.wallet.httpx.Client')
def test_send_command_success(self, mock_client_class, runner, temp_wallet, mock_config):
"""Test successful send command"""
# Setup mock
mock_client = Mock()
mock_client_class.return_value.__enter__.return_value = mock_client
mock_response = Mock()
mock_response.status_code = 201
mock_response.json.return_value = {"hash": "0xabc123"}
mock_client.post.return_value = mock_response
result = runner.invoke(wallet, [
'--wallet-path', temp_wallet,
'send',
'aitbc1recipient',
'25.0',
'--description', 'Payment'
], obj={'config': mock_config, 'output_format': 'json'})
assert result.exit_code == 0
data = extract_json_from_output(result.output)
assert data['new_balance'] == 75.0 # 100 - 25
assert data['tx_hash'] == '0xabc123'
# Verify API call
mock_client.post.assert_called_once()
call_args = mock_client.post.call_args
assert '/transactions' in call_args[0][0]
assert call_args[1]['json']['amount'] == 25.0
assert call_args[1]['json']['to'] == 'aitbc1recipient'
def test_request_payment_command(self, runner, temp_wallet, mock_config):
"""Test payment request command"""
result = runner.invoke(wallet, [
'--wallet-path', temp_wallet,
'request-payment',
'aitbc1payer',
'50.0',
'--description', 'Service payment'
], obj={'config': mock_config, 'output_format': 'json'})
assert result.exit_code == 0
data = json.loads(result.output)
assert 'payment_request' in data
assert data['payment_request']['from_address'] == 'aitbc1payer'
assert data['payment_request']['to_address'] == 'aitbc1test'
assert data['payment_request']['amount'] == 50.0
@patch('aitbc_cli.commands.wallet.httpx.Client')
def test_send_insufficient_balance(self, mock_client_class, runner, temp_wallet, mock_config):
"""Test send with insufficient balance"""
result = runner.invoke(wallet, [
'--wallet-path', temp_wallet,
'send',
'aitbc1recipient',
'200.0'
], obj={'config': mock_config, 'output_format': 'json'})
assert result.exit_code != 0
assert 'Insufficient balance' in result.output
def test_wallet_file_creation(self, runner, mock_config, tmp_path):
"""Test wallet file is created in correct directory"""
wallet_dir = tmp_path / "wallets"
wallet_path = wallet_dir / "test_wallet.json"
result = runner.invoke(wallet, [
'--wallet-path', str(wallet_path),
'balance'
], obj={'config': mock_config, 'output_format': 'json'})
assert result.exit_code == 0
assert wallet_path.exists()
assert wallet_path.parent.exists()
def test_stake_command(self, runner, temp_wallet, mock_config):
"""Test staking tokens"""
result = runner.invoke(wallet, [
'--wallet-path', temp_wallet,
'stake',
'50.0',
'--duration', '30'
], obj={'config': mock_config, 'output_format': 'json'})
assert result.exit_code == 0
data = extract_json_from_output(result.output)
assert data['amount'] == 50.0
assert data['duration_days'] == 30
assert data['new_balance'] == 50.0 # 100 - 50
assert 'stake_id' in data
assert 'apy' in data
# Verify wallet file updated
with open(temp_wallet) as f:
wallet_data = json.load(f)
assert wallet_data['balance'] == 50.0
assert len(wallet_data['staking']) == 1
assert wallet_data['staking'][0]['status'] == 'active'
def test_stake_insufficient_balance(self, runner, temp_wallet, mock_config):
"""Test staking with insufficient balance"""
result = runner.invoke(wallet, [
'--wallet-path', temp_wallet,
'stake',
'200.0'
], obj={'config': mock_config, 'output_format': 'json'})
assert result.exit_code != 0
assert 'Insufficient balance' in result.output
def test_unstake_command(self, runner, temp_wallet, mock_config):
"""Test unstaking tokens"""
# First stake
result = runner.invoke(wallet, [
'--wallet-path', temp_wallet,
'stake',
'50.0',
'--duration', '30'
], obj={'config': mock_config, 'output_format': 'json'})
assert result.exit_code == 0
stake_data = extract_json_from_output(result.output)
stake_id = stake_data['stake_id']
# Then unstake
result = runner.invoke(wallet, [
'--wallet-path', temp_wallet,
'unstake',
stake_id
], obj={'config': mock_config, 'output_format': 'json'})
assert result.exit_code == 0
data = extract_json_from_output(result.output)
assert data['stake_id'] == stake_id
assert data['principal'] == 50.0
assert 'rewards' in data
assert data['total_returned'] >= 50.0
assert data['new_balance'] >= 100.0 # Got back principal + rewards
def test_unstake_invalid_id(self, runner, temp_wallet, mock_config):
"""Test unstaking with invalid stake ID"""
result = runner.invoke(wallet, [
'--wallet-path', temp_wallet,
'unstake',
'nonexistent_stake'
], obj={'config': mock_config, 'output_format': 'json'})
assert result.exit_code != 0
assert 'not found' in result.output
def test_staking_info_command(self, runner, temp_wallet, mock_config):
"""Test staking info command"""
# Stake first
runner.invoke(wallet, [
'--wallet-path', temp_wallet,
'stake', '30.0', '--duration', '60'
], obj={'config': mock_config, 'output_format': 'json'})
# Check staking info
result = runner.invoke(wallet, [
'--wallet-path', temp_wallet,
'staking-info'
], obj={'config': mock_config, 'output_format': 'json'})
assert result.exit_code == 0
data = json.loads(result.output)
assert data['total_staked'] == 30.0
assert data['active_stakes'] == 1
assert len(data['stakes']) == 1