chore: remove configuration files and enhance blockchain explorer with advanced search, analytics, and export features

- Delete .aitbc.yaml.example CLI configuration template
- Delete .lycheeignore link checker exclusion rules
- Delete .nvmrc Node.js version specification
- Add advanced search panel with filters for address, amount range, transaction type, time range, and validator
- Add analytics dashboard with transaction volume, active addresses, and block time metrics
- Add Chart.js integration
This commit is contained in:
oib
2026-03-02 15:38:25 +01:00
parent af185cdd8b
commit ccedbace53
271 changed files with 35942 additions and 2359 deletions

217
cli/tests/gpu/gpu_test.py Executable file
View File

@@ -0,0 +1,217 @@
#!/usr/bin/env python3
"""
GPU Access Test - Check if miner can access local GPU resources
"""
import argparse
import subprocess
import json
import time
import psutil
def check_nvidia_gpu():
"""Check NVIDIA GPU availability"""
print("🔍 Checking NVIDIA GPU...")
try:
# Check nvidia-smi
result = subprocess.run(
["nvidia-smi", "--query-gpu=name,memory.total,memory.free,utilization.gpu",
"--format=csv,noheader,nounits"],
capture_output=True,
text=True
)
if result.returncode == 0:
lines = result.stdout.strip().split('\n')
print(f"✅ NVIDIA GPU(s) Found: {len(lines)}")
for i, line in enumerate(lines, 1):
parts = line.split(', ')
if len(parts) >= 4:
name = parts[0]
total_mem = parts[1]
free_mem = parts[2]
util = parts[3]
print(f"\n GPU {i}:")
print(f" 📦 Model: {name}")
print(f" 💾 Memory: {free_mem}/{total_mem} MB free")
print(f" ⚡ Utilization: {util}%")
return True
else:
print("❌ nvidia-smi command failed")
return False
except FileNotFoundError:
print("❌ nvidia-smi not found - NVIDIA drivers not installed")
return False
def check_cuda():
"""Check CUDA availability"""
print("\n🔍 Checking CUDA...")
try:
# Try to import pynvml
import pynvml
pynvml.nvmlInit()
device_count = pynvml.nvmlDeviceGetCount()
print(f"✅ CUDA Available - {device_count} device(s)")
for i in range(device_count):
handle = pynvml.nvmlDeviceGetHandleByIndex(i)
name = pynvml.nvmlDeviceGetName(handle).decode('utf-8')
memory_info = pynvml.nvmlDeviceGetMemoryInfo(handle)
print(f"\n CUDA Device {i}:")
print(f" 📦 Name: {name}")
print(f" 💾 Memory: {memory_info.free // 1024**2}/{memory_info.total // 1024**2} MB free")
return True
except ImportError:
print("⚠️ pynvml not installed - install with: pip install pynvml")
return False
except Exception as e:
print(f"❌ CUDA error: {e}")
return False
def check_pytorch():
"""Check PyTorch CUDA support"""
print("\n🔍 Checking PyTorch CUDA...")
try:
import torch
print(f"✅ PyTorch Installed: {torch.__version__}")
print(f" CUDA Available: {torch.cuda.is_available()}")
if torch.cuda.is_available():
print(f" CUDA Version: {torch.version.cuda}")
print(f" GPU Count: {torch.cuda.device_count()}")
for i in range(torch.cuda.device_count()):
props = torch.cuda.get_device_properties(i)
print(f"\n PyTorch GPU {i}:")
print(f" 📦 Name: {props.name}")
print(f" 💾 Memory: {props.total_memory // 1024**2} MB")
print(f" Compute: {props.major}.{props.minor}")
return torch.cuda.is_available()
except ImportError:
print("❌ PyTorch not installed - install with: pip install torch")
return False
def run_gpu_stress_test(duration=10):
"""Run a quick GPU stress test"""
print(f"\n🔥 Running GPU Stress Test ({duration}s)...")
try:
import torch
if not torch.cuda.is_available():
print("❌ CUDA not available for stress test")
return False
device = torch.device('cuda')
# Create tensors and perform matrix multiplication
print(" ⚡ Performing matrix multiplications...")
start_time = time.time()
while time.time() - start_time < duration:
# Create large matrices
a = torch.randn(1000, 1000, device=device)
b = torch.randn(1000, 1000, device=device)
# Multiply them
c = torch.mm(a, b)
# Sync to ensure computation completes
torch.cuda.synchronize()
print("✅ Stress test completed successfully")
return True
except Exception as e:
print(f"❌ Stress test failed: {e}")
return False
def check_system_resources():
"""Check system resources"""
print("\n💻 System Resources:")
# CPU
cpu_percent = psutil.cpu_percent(interval=1)
print(f" 🖥️ CPU Usage: {cpu_percent}%")
print(f" 🧠 CPU Cores: {psutil.cpu_count()} logical, {psutil.cpu_count(logical=False)} physical")
# Memory
memory = psutil.virtual_memory()
print(f" 💾 RAM: {memory.used // 1024**2}/{memory.total // 1024**2} MB used ({memory.percent}%)")
# Disk
disk = psutil.disk_usage('/')
print(f" 💿 Disk: {disk.used // 1024**3}/{disk.total // 1024**3} GB used")
def main():
parser = argparse.ArgumentParser(description="GPU Access Test for AITBC Miner")
parser.add_argument("--stress", type=int, default=0, help="Run stress test for N seconds")
parser.add_argument("--all", action="store_true", help="Run all tests including stress")
args = parser.parse_args()
print("🚀 AITBC GPU Access Test")
print("=" * 60)
# Check system resources
check_system_resources()
# Check GPU availability
has_nvidia = check_nvidia_gpu()
has_cuda = check_cuda()
has_pytorch = check_pytorch()
# Summary
print("\n📊 SUMMARY")
print("=" * 60)
if has_nvidia or has_cuda or has_pytorch:
print("✅ GPU is available for mining!")
if args.stress > 0 or args.all:
run_gpu_stress_test(args.stress if args.stress > 0 else 10)
print("\n💡 Miner can run GPU-intensive tasks:")
print(" • Model inference (LLaMA, Stable Diffusion)")
print(" • Training jobs")
print(" • Batch processing")
else:
print("❌ No GPU available - miner will run in CPU-only mode")
print("\n💡 To enable GPU mining:")
print(" 1. Install NVIDIA drivers")
print(" 2. Install CUDA toolkit")
print(" 3. Install PyTorch with CUDA: pip install torch")
# Check if miner service is running
print("\n🔍 Checking miner service...")
try:
result = subprocess.run(
["systemctl", "is-active", "aitbc-gpu-miner"],
capture_output=True,
text=True
)
if result.stdout.strip() == "active":
print("✅ Miner service is running")
else:
print("⚠️ Miner service is not running")
print(" Start with: sudo systemctl start aitbc-gpu-miner")
except:
print("⚠️ Could not check miner service status")
if __name__ == "__main__":
main()

286
cli/tests/gpu/miner_gpu_test.py Executable file
View File

@@ -0,0 +1,286 @@
#!/usr/bin/env python3
"""
Miner GPU Test - Test if the miner service can access and utilize GPU
"""
import argparse
import httpx
import json
import time
import sys
# Configuration
DEFAULT_COORDINATOR = "http://localhost:8000"
DEFAULT_API_KEY = "${MINER_API_KEY}"
DEFAULT_MINER_ID = "localhost-gpu-miner"
def test_miner_registration(coordinator_url):
"""Test if miner can register with GPU capabilities"""
print("📝 Testing Miner Registration...")
gpu_capabilities = {
"gpu": {
"model": "NVIDIA GeForce RTX 4060 Ti",
"memory_gb": 16,
"cuda_version": "12.1",
"compute_capability": "8.9"
},
"compute": {
"type": "GPU",
"platform": "CUDA",
"supported_tasks": ["inference", "training", "stable-diffusion", "llama"],
"max_concurrent_jobs": 1
}
}
try:
with httpx.Client() as client:
response = client.post(
f"{coordinator_url}/v1/miners/register?miner_id={DEFAULT_MINER_ID}",
headers={
"Content-Type": "application/json",
"X-Api-Key": DEFAULT_API_KEY
},
json={"capabilities": gpu_capabilities}
)
if response.status_code == 200:
print("✅ Miner registered with GPU capabilities")
print(f" GPU Model: {gpu_capabilities['gpu']['model']}")
print(f" Memory: {gpu_capabilities['gpu']['memory_gb']} GB")
print(f" CUDA: {gpu_capabilities['gpu']['cuda_version']}")
return True
else:
print(f"❌ Registration failed: {response.status_code}")
print(f" Response: {response.text}")
return False
except Exception as e:
print(f"❌ Error: {e}")
return False
def test_job_processing(coordinator_url):
"""Test if miner can process a GPU job"""
print("\n⚙️ Testing GPU Job Processing...")
# First submit a test job
print(" 1. Submitting test job...")
try:
with httpx.Client() as client:
# Submit job as client
job_response = client.post(
f"{coordinator_url}/v1/jobs",
headers={
"Content-Type": "application/json",
"X-Api-Key": "${CLIENT_API_KEY}"
},
json={
"payload": {
"type": "inference",
"task": "gpu-test",
"model": "test-gpu-model",
"parameters": {
"require_gpu": True,
"memory_gb": 8
}
},
"ttl_seconds": 300
}
)
if job_response.status_code != 201:
print(f"❌ Failed to submit job: {job_response.status_code}")
return False
job_id = job_response.json()['job_id']
print(f" ✅ Job submitted: {job_id}")
# Poll for the job as miner
print(" 2. Polling for job...")
poll_response = client.post(
f"{coordinator_url}/v1/miners/poll",
headers={
"Content-Type": "application/json",
"X-Api-Key": DEFAULT_API_KEY
},
json={"max_wait_seconds": 5}
)
if poll_response.status_code == 200:
job = poll_response.json()
print(f" ✅ Job received: {job['job_id']}")
# Simulate GPU processing
print(" 3. Simulating GPU processing...")
time.sleep(2)
# Submit result
print(" 4. Submitting result...")
result_response = client.post(
f"{coordinator_url}/v1/miners/{job['job_id']}/result",
headers={
"Content-Type": "application/json",
"X-Api-Key": DEFAULT_API_KEY
},
json={
"result": {
"status": "completed",
"output": "GPU task completed successfully",
"execution_time_ms": 2000,
"gpu_utilization": 85,
"memory_used_mb": 4096
},
"metrics": {
"compute_time": 2.0,
"energy_used": 0.05,
"aitbc_earned": 25.0
}
}
)
if result_response.status_code == 200:
print(" ✅ Result submitted successfully")
print(f" 💰 Earned: 25.0 AITBC")
return True
else:
print(f"❌ Failed to submit result: {result_response.status_code}")
return False
elif poll_response.status_code == 204:
print(" ⚠️ No jobs available")
return False
else:
print(f"❌ Poll failed: {poll_response.status_code}")
return False
except Exception as e:
print(f"❌ Error: {e}")
return False
def test_gpu_heartbeat(coordinator_url):
"""Test sending GPU metrics in heartbeat"""
print("\n💓 Testing GPU Heartbeat...")
heartbeat_data = {
"status": "ONLINE",
"inflight": 0,
"metadata": {
"last_seen": time.time(),
"gpu_utilization": 45,
"gpu_memory_used": 8192,
"gpu_temperature": 68,
"gpu_power_usage": 220,
"cuda_version": "12.1",
"driver_version": "535.104.05"
}
}
try:
with httpx.Client() as client:
response = client.post(
f"{coordinator_url}/v1/miners/heartbeat?miner_id={DEFAULT_MINER_ID}",
headers={
"Content-Type": "application/json",
"X-Api-Key": DEFAULT_API_KEY
},
json=heartbeat_data
)
if response.status_code == 200:
print("✅ GPU heartbeat sent successfully")
print(f" GPU Utilization: {heartbeat_data['metadata']['gpu_utilization']}%")
print(f" Memory Used: {heartbeat_data['metadata']['gpu_memory_used']} MB")
print(f" Temperature: {heartbeat_data['metadata']['gpu_temperature']}°C")
return True
else:
print(f"❌ Heartbeat failed: {response.status_code}")
return False
except Exception as e:
print(f"❌ Error: {e}")
return False
def check_blockchain_status(coordinator_url):
"""Check if processed jobs appear in blockchain"""
print("\n📦 Checking Blockchain Status...")
try:
with httpx.Client() as client:
response = client.get(f"{coordinator_url}/v1/explorer/blocks")
if response.status_code == 200:
blocks = response.json()
print(f"✅ Found {len(blocks['items'])} blocks")
# Show latest blocks
for i, block in enumerate(blocks['items'][:3]):
print(f"\n Block {block['height']}:")
print(f" Hash: {block['hash']}")
print(f" Proposer: {block['proposer']}")
print(f" Time: {block['timestamp']}")
return True
else:
print(f"❌ Failed to get blocks: {response.status_code}")
return False
except Exception as e:
print(f"❌ Error: {e}")
return False
def main():
parser = argparse.ArgumentParser(description="Test Miner GPU Access")
parser.add_argument("--url", help="Coordinator URL")
parser.add_argument("--full", action="store_true", help="Run full test suite")
args = parser.parse_args()
coordinator_url = args.url if args.url else DEFAULT_COORDINATOR
print("🚀 AITBC Miner GPU Test")
print("=" * 60)
print(f"Coordinator: {coordinator_url}")
print(f"Miner ID: {DEFAULT_MINER_ID}")
print()
# Run tests
tests = [
("Miner Registration", lambda: test_miner_registration(coordinator_url)),
("GPU Heartbeat", lambda: test_gpu_heartbeat(coordinator_url)),
]
if args.full:
tests.append(("Job Processing", lambda: test_job_processing(coordinator_url)))
tests.append(("Blockchain Status", lambda: check_blockchain_status(coordinator_url)))
results = []
for test_name, test_func in tests:
print(f"🧪 Running: {test_name}")
result = test_func()
results.append((test_name, result))
print()
# Summary
print("📊 TEST RESULTS")
print("=" * 60)
passed = 0
for test_name, result in results:
status = "✅ PASS" if result else "❌ FAIL"
print(f"{status} {test_name}")
if result:
passed += 1
print(f"\nScore: {passed}/{len(results)} tests passed")
if passed == len(results):
print("\n🎉 All tests passed! Miner is ready for GPU mining.")
print("\n💡 Next steps:")
print(" 1. Start continuous mining: python3 cli/miner.py mine")
print(" 2. Monitor earnings: cd home/miner && python3 wallet.py balance")
else:
print("\n⚠️ Some tests failed. Check the errors above.")
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,84 @@
#!/usr/bin/env python3
"""
Simple GPU Access Test - Verify miner can access GPU
"""
import subprocess
import sys
def main():
print("🔍 GPU Access Test for AITBC Miner")
print("=" * 50)
# Check if nvidia-smi is available
print("\n1. Checking NVIDIA GPU...")
try:
result = subprocess.run(
["nvidia-smi", "--query-gpu=name,memory.total", "--format=csv,noheader"],
capture_output=True,
text=True
)
if result.returncode == 0:
gpu_info = result.stdout.strip()
print(f"✅ GPU Found: {gpu_info}")
else:
print("❌ No NVIDIA GPU detected")
sys.exit(1)
except FileNotFoundError:
print("❌ nvidia-smi not found")
sys.exit(1)
# Check CUDA with PyTorch
print("\n2. Checking CUDA with PyTorch...")
try:
import torch
if torch.cuda.is_available():
print(f"✅ CUDA Available: {torch.version.cuda}")
print(f" GPU Count: {torch.cuda.device_count()}")
device = torch.device('cuda')
# Test computation
print("\n3. Testing GPU computation...")
a = torch.randn(1000, 1000, device=device)
b = torch.randn(1000, 1000, device=device)
c = torch.mm(a, b)
print("✅ GPU computation successful")
# Check memory
memory_allocated = torch.cuda.memory_allocated() / 1024**2
print(f" Memory used: {memory_allocated:.2f} MB")
else:
print("❌ CUDA not available in PyTorch")
sys.exit(1)
except ImportError:
print("❌ PyTorch not installed")
sys.exit(1)
# Check miner service
print("\n4. Checking miner service...")
try:
result = subprocess.run(
["systemctl", "is-active", "aitbc-gpu-miner"],
capture_output=True,
text=True
)
if result.stdout.strip() == "active":
print("✅ Miner service is running")
else:
print("⚠️ Miner service is not running")
except:
print("⚠️ Could not check miner service")
print("\n✅ GPU access test completed!")
print("\n💡 Your GPU is ready for mining AITBC!")
print(" Start mining with: python3 cli/miner.py mine")
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,294 @@
#!/usr/bin/env python3
"""
GPU Marketplace Bids Test
Tests complete marketplace bid workflow: offers listing → bid submission → bid tracking.
"""
import argparse
import sys
import time
from typing import Optional
import httpx
DEFAULT_COORDINATOR = "http://localhost:8000"
DEFAULT_API_KEY = "${CLIENT_API_KEY}"
DEFAULT_PROVIDER = "test_miner_123"
DEFAULT_CAPACITY = 100
DEFAULT_PRICE = 0.05
DEFAULT_TIMEOUT = 300
POLL_INTERVAL = 5
def list_offers(client: httpx.Client, base_url: str, api_key: str,
status: Optional[str] = None, gpu_model: Optional[str] = None) -> Optional[dict]:
"""List marketplace offers with optional filters"""
params = {"limit": 20}
if status:
params["status"] = status
if gpu_model:
params["gpu_model"] = gpu_model
response = client.get(
f"{base_url}/v1/marketplace/offers",
headers={"X-Api-Key": api_key},
params=params,
timeout=10,
)
if response.status_code != 200:
print(f"❌ Failed to list offers: {response.status_code} {response.text}")
return None
return response.json()
def submit_bid(client: httpx.Client, base_url: str, api_key: str,
provider: str, capacity: int, price: float,
notes: Optional[str] = None) -> Optional[str]:
"""Submit a marketplace bid"""
payload = {
"provider": provider,
"capacity": capacity,
"price": price
}
if notes:
payload["notes"] = notes
response = client.post(
f"{base_url}/v1/marketplace/bids",
headers={"X-Api-Key": api_key, "Content-Type": "application/json"},
json=payload,
timeout=10,
)
if response.status_code != 202:
print(f"❌ Bid submission failed: {response.status_code} {response.text}")
return None
return response.json().get("id")
def list_bids(client: httpx.Client, base_url: str, api_key: str,
status: Optional[str] = None, provider: Optional[str] = None) -> Optional[dict]:
"""List marketplace bids with optional filters"""
params = {"limit": 20}
if status:
params["status"] = status
if provider:
params["provider"] = provider
response = client.get(
f"{base_url}/v1/marketplace/bids",
headers={"X-Api-Key": api_key},
params=params,
timeout=10,
)
if response.status_code != 200:
print(f"❌ Failed to list bids: {response.status_code} {response.text}")
return None
return response.json()
def get_bid_details(client: httpx.Client, base_url: str, api_key: str, bid_id: str) -> Optional[dict]:
"""Get detailed information about a specific bid"""
response = client.get(
f"{base_url}/v1/marketplace/bids/{bid_id}",
headers={"X-Api-Key": api_key},
timeout=10,
)
if response.status_code != 200:
print(f"❌ Failed to get bid details: {response.status_code} {response.text}")
return None
return response.json()
def get_marketplace_stats(client: httpx.Client, base_url: str, api_key: str) -> Optional[dict]:
"""Get marketplace statistics"""
response = client.get(
f"{base_url}/v1/marketplace/stats",
headers={"X-Api-Key": api_key},
timeout=10,
)
if response.status_code != 200:
print(f"❌ Failed to get marketplace stats: {response.status_code} {response.text}")
return None
return response.json()
def monitor_bid_status(client: httpx.Client, base_url: str, api_key: str,
bid_id: str, timeout: int) -> Optional[str]:
"""Monitor bid status until it's accepted/rejected or timeout"""
deadline = time.time() + timeout
while time.time() < deadline:
bid_details = get_bid_details(client, base_url, api_key, bid_id)
if not bid_details:
return None
status = bid_details.get("status")
print(f"⏳ Bid status: {status}")
if status in {"accepted", "rejected"}:
return status
time.sleep(POLL_INTERVAL)
print("❌ Bid status monitoring timed out")
return None
def test_basic_workflow(client: httpx.Client, base_url: str, api_key: str,
provider: str, capacity: int, price: float) -> bool:
"""Test basic marketplace bid workflow"""
print("🧪 Testing basic marketplace bid workflow...")
# Step 1: List available offers
print("📋 Listing marketplace offers...")
offers = list_offers(client, base_url, api_key, status="open")
if not offers:
print("❌ Failed to list offers")
return False
offers_list = offers.get("offers", [])
print(f"✅ Found {len(offers_list)} open offers")
if offers_list:
print("📊 Sample offers:")
for i, offer in enumerate(offers_list[:3]): # Show first 3 offers
print(f" {i+1}. {offer.get('gpu_model', 'Unknown')} - ${offer.get('price', 0):.4f}/hr - {offer.get('provider', 'Unknown')}")
# Step 2: Submit bid
print(f"💰 Submitting bid: {capacity} units at ${price:.4f}/unit from {provider}")
bid_id = submit_bid(client, base_url, api_key, provider, capacity, price,
notes="Test bid for GPU marketplace")
if not bid_id:
print("❌ Failed to submit bid")
return False
print(f"✅ Bid submitted: {bid_id}")
# Step 3: Get bid details
print("📄 Getting bid details...")
bid_details = get_bid_details(client, base_url, api_key, bid_id)
if not bid_details:
print("❌ Failed to get bid details")
return False
print(f"✅ Bid details: {bid_details['provider']} - {bid_details['capacity']} units - ${bid_details['price']:.4f}/unit - {bid_details['status']}")
# Step 4: List bids to verify it appears
print("📋 Listing bids to verify...")
bids = list_bids(client, base_url, api_key, provider=provider)
if not bids:
print("❌ Failed to list bids")
return False
bids_list = bids.get("bids", [])
our_bid = next((b for b in bids_list if b.get("id") == bid_id), None)
if not our_bid:
print("❌ Submitted bid not found in bid list")
return False
print(f"✅ Bid found in list: {our_bid['status']}")
return True
def test_competitive_bidding(client: httpx.Client, base_url: str, api_key: str) -> bool:
"""Test competitive bidding scenario with multiple providers"""
print("🧪 Testing competitive bidding scenario...")
# Submit multiple bids from different providers
providers = ["provider_alpha", "provider_beta", "provider_gamma"]
bid_ids = []
for i, provider in enumerate(providers):
price = 0.05 - (i * 0.01) # Decreasing prices
print(f"💰 {provider} submitting bid at ${price:.4f}/unit")
bid_id = submit_bid(client, base_url, api_key, provider, 50, price,
notes=f"Competitive bid from {provider}")
if not bid_id:
print(f"{provider} failed to submit bid")
return False
bid_ids.append((provider, bid_id))
time.sleep(1) # Small delay between submissions
print(f"✅ All {len(bid_ids)} competitive bids submitted")
# List all bids to see the competition
all_bids = list_bids(client, base_url, api_key)
if not all_bids:
print("❌ Failed to list all bids")
return False
bids_list = all_bids.get("bids", [])
competitive_bids = [b for b in bids_list if b.get("provider") in providers]
print(f"📊 Found {len(competitive_bids)} competitive bids:")
for bid in sorted(competitive_bids, key=lambda x: x.get("price", 0)):
print(f" {bid['provider']}: ${bid['price']:.4f}/unit - {bid['status']}")
return True
def test_marketplace_stats(client: httpx.Client, base_url: str, api_key: str) -> bool:
"""Test marketplace statistics functionality"""
print("🧪 Testing marketplace statistics...")
stats = get_marketplace_stats(client, base_url, api_key)
if not stats:
print("❌ Failed to get marketplace stats")
return False
print(f"📊 Marketplace Statistics:")
print(f" Total offers: {stats.get('totalOffers', 0)}")
print(f" Open capacity: {stats.get('openCapacity', 0)}")
print(f" Average price: ${stats.get('averagePrice', 0):.4f}")
print(f" Active bids: {stats.get('activeBids', 0)}")
return True
def main() -> int:
parser = argparse.ArgumentParser(description="GPU marketplace bids end-to-end test")
parser.add_argument("--url", default=DEFAULT_COORDINATOR, help="Coordinator base URL")
parser.add_argument("--api-key", default=DEFAULT_API_KEY, help="Client API key")
parser.add_argument("--provider", default=DEFAULT_PROVIDER, help="Provider ID for bids")
parser.add_argument("--capacity", type=int, default=DEFAULT_CAPACITY, help="Bid capacity")
parser.add_argument("--price", type=float, default=DEFAULT_PRICE, help="Price per unit")
parser.add_argument("--timeout", type=int, default=DEFAULT_TIMEOUT, help="Timeout in seconds")
parser.add_argument("--test", choices=["basic", "competitive", "stats", "all"],
default="all", help="Test scenario to run")
args = parser.parse_args()
with httpx.Client() as client:
print("🚀 Starting GPU marketplace bids test...")
print(f"📍 Coordinator: {args.url}")
print(f"🆔 Provider: {args.provider}")
print(f"💰 Bid: {args.capacity} units at ${args.price:.4f}/unit")
print()
success = True
if args.test in ["basic", "all"]:
success &= test_basic_workflow(client, args.url, args.api_key,
args.provider, args.capacity, args.price)
print()
if args.test in ["competitive", "all"]:
success &= test_competitive_bidding(client, args.url, args.api_key)
print()
if args.test in ["stats", "all"]:
success &= test_marketplace_stats(client, args.url, args.api_key)
print()
if success:
print("✅ All marketplace bid tests completed successfully!")
return 0
else:
print("❌ Some marketplace bid tests failed!")
return 1
if __name__ == "__main__":
sys.exit(main())

View File

@@ -0,0 +1,361 @@
#!/usr/bin/env python3
"""
Exchange End-to-End Test
Tests complete Bitcoin exchange workflow: rates → payment creation → monitoring → confirmation.
"""
import argparse
import sys
import time
from typing import Optional
import httpx
DEFAULT_COORDINATOR = "http://localhost:8000"
DEFAULT_API_KEY = "${CLIENT_API_KEY}"
DEFAULT_USER_ID = "e2e_test_user"
DEFAULT_AITBC_AMOUNT = 1000
DEFAULT_TIMEOUT = 300
POLL_INTERVAL = 10
def get_exchange_rates(client: httpx.Client, base_url: str) -> Optional[dict]:
"""Get current exchange rates"""
response = client.get(
f"{base_url}/v1/exchange/rates",
timeout=10,
)
if response.status_code != 200:
print(f"❌ Failed to get exchange rates: {response.status_code} {response.text}")
return None
return response.json()
def create_payment(client: httpx.Client, base_url: str, user_id: str,
aitbc_amount: float, btc_amount: Optional[float] = None,
notes: Optional[str] = None) -> Optional[dict]:
"""Create a Bitcoin payment request"""
if not btc_amount:
# Get rates to calculate BTC amount
rates = get_exchange_rates(client, base_url)
if not rates:
return None
btc_amount = aitbc_amount / rates['btc_to_aitbc']
payload = {
"user_id": user_id,
"aitbc_amount": aitbc_amount,
"btc_amount": btc_amount
}
if notes:
payload["notes"] = notes
response = client.post(
f"{base_url}/v1/exchange/create-payment",
json=payload,
timeout=10,
)
if response.status_code != 200:
print(f"❌ Failed to create payment: {response.status_code} {response.text}")
return None
return response.json()
def get_payment_status(client: httpx.Client, base_url: str, payment_id: str) -> Optional[dict]:
"""Get payment status"""
response = client.get(
f"{base_url}/v1/exchange/payment-status/{payment_id}",
timeout=10,
)
if response.status_code != 200:
print(f"❌ Failed to get payment status: {response.status_code} {response.text}")
return None
return response.json()
def confirm_payment(client: httpx.Client, base_url: str, payment_id: str,
tx_hash: str) -> Optional[dict]:
"""Confirm payment (simulating blockchain confirmation)"""
response = client.post(
f"{base_url}/v1/exchange/confirm-payment/{payment_id}",
json={"tx_hash": tx_hash},
timeout=10,
)
if response.status_code != 200:
print(f"❌ Failed to confirm payment: {response.status_code} {response.text}")
return None
return response.json()
def get_market_stats(client: httpx.Client, base_url: str) -> Optional[dict]:
"""Get market statistics"""
response = client.get(
f"{base_url}/v1/exchange/market-stats",
timeout=10,
)
if response.status_code != 200:
print(f"❌ Failed to get market stats: {response.status_code} {response.text}")
return None
return response.json()
def get_wallet_balance(client: httpx.Client, base_url: str) -> Optional[dict]:
"""Get Bitcoin wallet balance"""
response = client.get(
f"{base_url}/v1/exchange/wallet/balance",
timeout=10,
)
if response.status_code != 200:
print(f"❌ Failed to get wallet balance: {response.status_code} {response.text}")
return None
return response.json()
def monitor_payment_confirmation(client: httpx.Client, base_url: str,
payment_id: str, timeout: int) -> Optional[str]:
"""Monitor payment until confirmed or timeout"""
deadline = time.time() + timeout
while time.time() < deadline:
status_data = get_payment_status(client, base_url, payment_id)
if not status_data:
return None
status = status_data.get("status")
print(f"⏳ Payment status: {status}")
if status == "confirmed":
return status
elif status == "expired":
print("❌ Payment expired")
return status
time.sleep(POLL_INTERVAL)
print("❌ Payment monitoring timed out")
return None
def test_basic_exchange_workflow(client: httpx.Client, base_url: str, user_id: str,
aitbc_amount: float) -> bool:
"""Test basic exchange workflow"""
print("🧪 Testing basic exchange workflow...")
# Step 1: Get exchange rates
print("💱 Getting exchange rates...")
rates = get_exchange_rates(client, base_url)
if not rates:
print("❌ Failed to get exchange rates")
return False
print(f"✅ Exchange rates: 1 BTC = {rates['btc_to_aitbc']:,} AITBC")
print(f" Fee: {rates['fee_percent']}%")
# Step 2: Create payment
print(f"💰 Creating payment for {aitbc_amount} AITBC...")
payment = create_payment(client, base_url, user_id, aitbc_amount,
notes="E2E test payment")
if not payment:
print("❌ Failed to create payment")
return False
print(f"✅ Payment created: {payment['payment_id']}")
print(f" Send {payment['btc_amount']:.8f} BTC to: {payment['payment_address']}")
print(f" Expires at: {payment['expires_at']}")
# Step 3: Check initial payment status
print("📋 Checking initial payment status...")
status = get_payment_status(client, base_url, payment['payment_id'])
if not status:
print("❌ Failed to get payment status")
return False
print(f"✅ Initial status: {status['status']}")
# Step 4: Simulate payment confirmation
print("🔗 Simulating blockchain confirmation...")
tx_hash = f"test_tx_{int(time.time())}"
confirmation = confirm_payment(client, base_url, payment['payment_id'], tx_hash)
if not confirmation:
print("❌ Failed to confirm payment")
return False
print(f"✅ Payment confirmed with transaction: {tx_hash}")
# Step 5: Verify final status
print("📄 Verifying final payment status...")
final_status = get_payment_status(client, base_url, payment['payment_id'])
if not final_status:
print("❌ Failed to get final payment status")
return False
if final_status['status'] != 'confirmed':
print(f"❌ Expected confirmed status, got: {final_status['status']}")
return False
print(f"✅ Payment confirmed! AITBC amount: {final_status['aitbc_amount']}")
return True
def test_market_statistics(client: httpx.Client, base_url: str) -> bool:
"""Test market statistics functionality"""
print("🧪 Testing market statistics...")
stats = get_market_stats(client, base_url)
if not stats:
print("❌ Failed to get market stats")
return False
print(f"📊 Market Statistics:")
print(f" Current price: ${stats['price']:.8f} per AITBC")
print(f" 24h change: {stats['price_change_24h']:+.2f}%")
print(f" Daily volume: {stats['daily_volume']:,} AITBC")
print(f" Daily volume (BTC): {stats['daily_volume_btc']:.8f} BTC")
print(f" Total payments: {stats['total_payments']}")
print(f" Pending payments: {stats['pending_payments']}")
return True
def test_wallet_operations(client: httpx.Client, base_url: str) -> bool:
"""Test wallet operations"""
print("🧪 Testing wallet operations...")
balance = get_wallet_balance(client, base_url)
if not balance:
print("❌ Failed to get wallet balance (service may be unavailable)")
return True # Don't fail test if wallet service is unavailable
print(f"💰 Wallet Balance:")
print(f" Address: {balance['address']}")
print(f" Balance: {balance['balance']:.8f} BTC")
print(f" Unconfirmed: {balance['unconfirmed_balance']:.8f} BTC")
print(f" Total received: {balance['total_received']:.8f} BTC")
print(f" Total sent: {balance['total_sent']:.8f} BTC")
return True
def test_multiple_payments_scenario(client: httpx.Client, base_url: str,
user_id: str) -> bool:
"""Test multiple payments scenario"""
print("🧪 Testing multiple payments scenario...")
# Create multiple payments
payment_amounts = [500, 1000, 1500]
payment_ids = []
for i, amount in enumerate(payment_amounts):
print(f"💰 Creating payment {i+1}: {amount} AITBC...")
payment = create_payment(client, base_url, f"{user_id}_{i}", amount,
notes=f"Multi-payment test {i+1}")
if not payment:
print(f"❌ Failed to create payment {i+1}")
return False
payment_ids.append(payment['payment_id'])
print(f"✅ Payment {i+1} created: {payment['payment_id']}")
time.sleep(1) # Small delay between payments
# Confirm all payments
for i, payment_id in enumerate(payment_ids):
print(f"🔗 Confirming payment {i+1}...")
tx_hash = f"multi_tx_{i}_{int(time.time())}"
confirmation = confirm_payment(client, base_url, payment_id, tx_hash)
if not confirmation:
print(f"❌ Failed to confirm payment {i+1}")
return False
print(f"✅ Payment {i+1} confirmed")
time.sleep(0.5)
# Check updated market stats
print("📊 Checking updated market statistics...")
final_stats = get_market_stats(client, base_url)
if final_stats:
print(f"✅ Final stats: {final_stats['total_payments']} total payments")
return True
def test_error_scenarios(client: httpx.Client, base_url: str) -> bool:
"""Test error handling scenarios"""
print("🧪 Testing error scenarios...")
# Test invalid payment creation
print("❌ Testing invalid payment creation...")
invalid_payment = create_payment(client, base_url, "test_user", -100)
if invalid_payment:
print("❌ Expected error for negative amount, but got success")
return False
print("✅ Correctly rejected negative amount")
# Test non-existent payment status
print("❌ Testing non-existent payment status...")
fake_status = get_payment_status(client, base_url, "fake_payment_id")
if fake_status:
print("❌ Expected error for fake payment ID, but got success")
return False
print("✅ Correctly rejected fake payment ID")
# Test invalid payment confirmation
print("❌ Testing invalid payment confirmation...")
fake_confirmation = confirm_payment(client, base_url, "fake_payment_id", "fake_tx")
if fake_confirmation:
print("❌ Expected error for fake payment confirmation, but got success")
return False
print("✅ Correctly rejected fake payment confirmation")
return True
def main() -> int:
parser = argparse.ArgumentParser(description="Exchange end-to-end test")
parser.add_argument("--url", default=DEFAULT_COORDINATOR, help="Coordinator base URL")
parser.add_argument("--api-key", default=DEFAULT_API_KEY, help="Client API key")
parser.add_argument("--user-id", default=DEFAULT_USER_ID, help="User ID for payments")
parser.add_argument("--aitbc-amount", type=float, default=DEFAULT_AITBC_AMOUNT, help="AITBC amount for test payment")
parser.add_argument("--timeout", type=int, default=DEFAULT_TIMEOUT, help="Timeout in seconds")
parser.add_argument("--test", choices=["basic", "stats", "wallet", "multi", "errors", "all"],
default="all", help="Test scenario to run")
args = parser.parse_args()
with httpx.Client() as client:
print("🚀 Starting Exchange end-to-end test...")
print(f"📍 Coordinator: {args.url}")
print(f"🆔 User ID: {args.user_id}")
print(f"💰 Test amount: {args.aitbc_amount} AITBC")
print()
success = True
if args.test in ["basic", "all"]:
success &= test_basic_exchange_workflow(client, args.url, args.user_id, args.aitbc_amount)
print()
if args.test in ["stats", "all"]:
success &= test_market_statistics(client, args.url)
print()
if args.test in ["wallet", "all"]:
success &= test_wallet_operations(client, args.url)
print()
if args.test in ["multi", "all"]:
success &= test_multiple_payments_scenario(client, args.url, args.user_id)
print()
if args.test in ["errors", "all"]:
success &= test_error_scenarios(client, args.url)
print()
if success:
print("✅ All exchange tests completed successfully!")
return 0
else:
print("❌ Some exchange tests failed!")
return 1
if __name__ == "__main__":
sys.exit(main())

View File

@@ -0,0 +1,109 @@
#!/usr/bin/env python3
"""
Complete AITBC workflow test - Client submits job, miner processes it, earns AITBC
"""
import subprocess
import time
import sys
import os
def run_command(cmd, description):
"""Run a CLI command and display results"""
print(f"\n{'='*60}")
print(f"🔧 {description}")
print(f"{'='*60}")
result = subprocess.run(cmd, shell=True, capture_output=True, text=True)
print(result.stdout)
if result.stderr:
print(f"Errors: {result.stderr}")
return result.returncode == 0
def main():
print("🚀 AITBC Complete Workflow Test")
print("=" * 60)
# Get the directory of this script
cli_dir = os.path.dirname(os.path.abspath(__file__))
# 1. Check current blocks
run_command(
f"python3 {cli_dir}/client.py blocks --limit 3",
"Checking current blocks"
)
# 2. Register miner
run_command(
f"python3 {cli_dir}/miner.py register --gpu RTX 4090 --memory 24",
"Registering miner"
)
# 3. Submit a job from client
run_command(
f"python3 {cli_dir}/client.py submit inference --model llama-2-7b --prompt 'What is blockchain?'",
"Client submitting inference job"
)
# 4. Miner polls for and processes the job
print(f"\n{'='*60}")
print("⛏️ Miner polling for job (will wait up to 10 seconds)...")
print(f"{'='*60}")
# Run miner in poll mode repeatedly
for i in range(5):
result = subprocess.run(
f"python3 {cli_dir}/miner.py poll --wait 2",
shell=True,
capture_output=True,
text=True
)
print(result.stdout)
if "job_id" in result.stdout:
print("✅ Job found! Processing...")
time.sleep(2)
break
if i < 4:
print("💤 No job yet, trying again...")
time.sleep(2)
# 5. Check updated blocks
run_command(
f"python3 {cli_dir}/client.py blocks --limit 3",
"Checking updated blocks (should show proposer)"
)
# 6. Check wallet
run_command(
f"python3 {cli_dir}/wallet.py balance",
"Checking wallet balance"
)
# Add earnings manually (in real system, this would be automatic)
run_command(
f"python3 {cli_dir}/wallet.py earn 10.0 --job demo-job-123 --desc 'Inference task completed'",
"Adding earnings to wallet"
)
# 7. Final wallet status
run_command(
f"python3 {cli_dir}/wallet.py history",
"Showing transaction history"
)
print(f"\n{'='*60}")
print("✅ Workflow test complete!")
print("💡 Tips:")
print(" - Use 'python3 cli/client.py --help' for client commands")
print(" - Use 'python3 cli/miner.py --help' for miner commands")
print(" - Use 'python3 cli/wallet.py --help' for wallet commands")
print(" - Run 'python3 cli/miner.py mine' for continuous mining")
print(f"{'='*60}")
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,3 @@
"""
Multi-chain tests
"""

View File

@@ -0,0 +1,442 @@
"""
Test for cross-chain agent communication system
"""
import asyncio
import pytest
from datetime import datetime, timedelta
from aitbc_cli.core.config import MultiChainConfig, NodeConfig
from aitbc_cli.core.agent_communication import (
CrossChainAgentCommunication, AgentInfo, AgentMessage,
MessageType, AgentStatus, AgentCollaboration, AgentReputation
)
def test_agent_communication_creation():
"""Test agent communication system creation"""
config = MultiChainConfig()
comm = CrossChainAgentCommunication(config)
assert comm.config == config
assert comm.agents == {}
assert comm.messages == {}
assert comm.collaborations == {}
assert comm.reputations == {}
assert comm.routing_table == {}
async def test_agent_registration():
"""Test agent registration"""
config = MultiChainConfig()
comm = CrossChainAgentCommunication(config)
# Create test agent
agent_info = AgentInfo(
agent_id="test-agent-1",
name="Test Agent",
chain_id="chain-1",
node_id="node-1",
status=AgentStatus.ACTIVE,
capabilities=["trading", "analytics"],
reputation_score=0.8,
last_seen=datetime.now(),
endpoint="http://localhost:8080",
version="1.0.0"
)
# Register agent
success = await comm.register_agent(agent_info)
assert success
assert "test-agent-1" in comm.agents
assert comm.agents["test-agent-1"].name == "Test Agent"
assert "test-agent-1" in comm.reputations
assert comm.reputations["test-agent-1"].reputation_score == 0.8
async def test_agent_discovery():
"""Test agent discovery"""
config = MultiChainConfig()
comm = CrossChainAgentCommunication(config)
# Register multiple agents
agents = [
AgentInfo(
agent_id="agent-1",
name="Agent 1",
chain_id="chain-1",
node_id="node-1",
status=AgentStatus.ACTIVE,
capabilities=["trading", "analytics"],
reputation_score=0.8,
last_seen=datetime.now(),
endpoint="http://localhost:8080",
version="1.0.0"
),
AgentInfo(
agent_id="agent-2",
name="Agent 2",
chain_id="chain-1",
node_id="node-1",
status=AgentStatus.ACTIVE,
capabilities=["mining"],
reputation_score=0.7,
last_seen=datetime.now(),
endpoint="http://localhost:8081",
version="1.0.0"
),
AgentInfo(
agent_id="agent-3",
name="Agent 3",
chain_id="chain-2",
node_id="node-2",
status=AgentStatus.INACTIVE,
capabilities=["trading"],
reputation_score=0.6,
last_seen=datetime.now(),
endpoint="http://localhost:8082",
version="1.0.0"
)
]
for agent in agents:
await comm.register_agent(agent)
# Discover agents on chain-1
chain1_agents = await comm.discover_agents("chain-1")
assert len(chain1_agents) == 2
assert all(agent.chain_id == "chain-1" for agent in chain1_agents)
# Discover agents with trading capability
trading_agents = await comm.discover_agents("chain-1", ["trading"])
assert len(trading_agents) == 1
assert trading_agents[0].agent_id == "agent-1"
# Discover active agents only
active_agents = await comm.discover_agents("chain-1")
assert all(agent.status == AgentStatus.ACTIVE for agent in active_agents)
async def test_message_sending():
"""Test message sending"""
config = MultiChainConfig()
comm = CrossChainAgentCommunication(config)
# Register agents
sender = AgentInfo(
agent_id="sender-agent",
name="Sender",
chain_id="chain-1",
node_id="node-1",
status=AgentStatus.ACTIVE,
capabilities=["trading"],
reputation_score=0.8,
last_seen=datetime.now(),
endpoint="http://localhost:8080",
version="1.0.0"
)
receiver = AgentInfo(
agent_id="receiver-agent",
name="Receiver",
chain_id="chain-1",
node_id="node-1",
status=AgentStatus.ACTIVE,
capabilities=["analytics"],
reputation_score=0.7,
last_seen=datetime.now(),
endpoint="http://localhost:8081",
version="1.0.0"
)
await comm.register_agent(sender)
await comm.register_agent(receiver)
# Create message
message = AgentMessage(
message_id="test-message-1",
sender_id="sender-agent",
receiver_id="receiver-agent",
message_type=MessageType.COMMUNICATION,
chain_id="chain-1",
target_chain_id=None,
payload={"action": "test", "data": "hello"},
timestamp=datetime.now(),
signature="test-signature",
priority=5,
ttl_seconds=3600
)
# Send message
success = await comm.send_message(message)
assert success
assert "test-message-1" in comm.messages
assert len(comm.message_queue["receiver-agent"]) == 0 # Should be delivered immediately
async def test_cross_chain_messaging():
"""Test cross-chain messaging"""
config = MultiChainConfig()
comm = CrossChainAgentCommunication(config)
# Register agents on different chains
sender = AgentInfo(
agent_id="cross-chain-sender",
name="Cross Chain Sender",
chain_id="chain-1",
node_id="node-1",
status=AgentStatus.ACTIVE,
capabilities=["trading"],
reputation_score=0.8,
last_seen=datetime.now(),
endpoint="http://localhost:8080",
version="1.0.0"
)
receiver = AgentInfo(
agent_id="cross-chain-receiver",
name="Cross Chain Receiver",
chain_id="chain-2",
node_id="node-2",
status=AgentStatus.ACTIVE,
capabilities=["analytics"],
reputation_score=0.7,
last_seen=datetime.now(),
endpoint="http://localhost:8081",
version="1.0.0"
)
await comm.register_agent(sender)
await comm.register_agent(receiver)
# Create cross-chain message
message = AgentMessage(
message_id="cross-chain-message-1",
sender_id="cross-chain-sender",
receiver_id="cross-chain-receiver",
message_type=MessageType.COMMUNICATION,
chain_id="chain-1",
target_chain_id="chain-2",
payload={"action": "cross_chain_test", "data": "hello across chains"},
timestamp=datetime.now(),
signature="test-signature",
priority=5,
ttl_seconds=3600
)
# Send cross-chain message
success = await comm.send_message(message)
assert success
assert "cross-chain-message-1" in comm.messages
async def test_collaboration_creation():
"""Test multi-agent collaboration creation"""
config = MultiChainConfig()
comm = CrossChainAgentCommunication(config)
# Register multiple agents
agents = []
for i in range(3):
agent = AgentInfo(
agent_id=f"collab-agent-{i+1}",
name=f"Collab Agent {i+1}",
chain_id=f"chain-{(i % 2) + 1}", # Spread across 2 chains
node_id=f"node-{(i % 2) + 1}",
status=AgentStatus.ACTIVE,
capabilities=["trading", "analytics"],
reputation_score=0.8,
last_seen=datetime.now(),
endpoint=f"http://localhost:808{i}",
version="1.0.0"
)
await comm.register_agent(agent)
agents.append(agent.agent_id)
# Create collaboration
collaboration_id = await comm.create_collaboration(
agents,
"research_project",
{"voting_threshold": 0.6, "resource_sharing": True}
)
assert collaboration_id is not None
assert collaboration_id in comm.collaborations
collaboration = comm.collaborations[collaboration_id]
assert collaboration.collaboration_type == "research_project"
assert len(collaboration.agent_ids) == 3
assert collaboration.status == "active"
assert collaboration.governance_rules["voting_threshold"] == 0.6
async def test_reputation_system():
"""Test reputation system"""
config = MultiChainConfig()
comm = CrossChainAgentCommunication(config)
# Register agent
agent = AgentInfo(
agent_id="reputation-agent",
name="Reputation Agent",
chain_id="chain-1",
node_id="node-1",
status=AgentStatus.ACTIVE,
capabilities=["trading"],
reputation_score=0.5, # Start with neutral reputation
last_seen=datetime.now(),
endpoint="http://localhost:8080",
version="1.0.0"
)
await comm.register_agent(agent)
# Update reputation with successful interactions
for i in range(5):
await comm.update_reputation("reputation-agent", True, 0.8)
# Update reputation with some failures
for i in range(2):
await comm.update_reputation("reputation-agent", False, 0.3)
# Check reputation
reputation = comm.reputations["reputation-agent"]
assert reputation.total_interactions == 7
assert reputation.successful_interactions == 5
assert reputation.failed_interactions == 2
assert reputation.reputation_score > 0.5 # Should have improved
async def test_agent_status():
"""Test agent status retrieval"""
config = MultiChainConfig()
comm = CrossChainAgentCommunication(config)
# Register agent
agent = AgentInfo(
agent_id="status-agent",
name="Status Agent",
chain_id="chain-1",
node_id="node-1",
status=AgentStatus.ACTIVE,
capabilities=["trading", "analytics"],
reputation_score=0.8,
last_seen=datetime.now(),
endpoint="http://localhost:8080",
version="1.0.0"
)
await comm.register_agent(agent)
# Get agent status
status = await comm.get_agent_status("status-agent")
assert status is not None
assert status["agent_info"]["agent_id"] == "status-agent"
assert status["status"] == "active"
assert status["reputation"] is not None
assert status["message_queue_size"] == 0
assert status["active_collaborations"] == 0
async def test_network_overview():
"""Test network overview"""
config = MultiChainConfig()
comm = CrossChainAgentCommunication(config)
# Register multiple agents
for i in range(5):
agent = AgentInfo(
agent_id=f"network-agent-{i+1}",
name=f"Network Agent {i+1}",
chain_id=f"chain-{(i % 3) + 1}", # Spread across 3 chains
node_id=f"node-{(i % 2) + 1}",
status=AgentStatus.ACTIVE if i < 4 else AgentStatus.BUSY,
capabilities=["trading", "analytics"],
reputation_score=0.7 + (i * 0.05),
last_seen=datetime.now(),
endpoint=f"http://localhost:808{i}",
version="1.0.0"
)
await comm.register_agent(agent)
# Create some collaborations
collab_id = await comm.create_collaboration(
["network-agent-1", "network-agent-2"],
"test_collaboration",
{}
)
# Get network overview
overview = await comm.get_network_overview()
assert overview["total_agents"] == 5
assert overview["active_agents"] == 4
assert overview["total_collaborations"] == 1
assert overview["active_collaborations"] == 1
assert len(overview["agents_by_chain"]) == 3
assert overview["average_reputation"] > 0.7
def test_validation_functions():
"""Test validation functions"""
config = MultiChainConfig()
comm = CrossChainAgentCommunication(config)
# Test agent validation
valid_agent = AgentInfo(
agent_id="valid-agent",
name="Valid Agent",
chain_id="chain-1",
node_id="node-1",
status=AgentStatus.ACTIVE,
capabilities=["trading"],
reputation_score=0.8,
last_seen=datetime.now(),
endpoint="http://localhost:8080",
version="1.0.0"
)
assert comm._validate_agent_info(valid_agent) == True
# Test invalid agent (missing capabilities)
invalid_agent = AgentInfo(
agent_id="invalid-agent",
name="Invalid Agent",
chain_id="chain-1",
node_id="node-1",
status=AgentStatus.ACTIVE,
capabilities=[], # Empty capabilities
reputation_score=0.8,
last_seen=datetime.now(),
endpoint="http://localhost:8080",
version="1.0.0"
)
assert comm._validate_agent_info(invalid_agent) == False
# Test message validation
valid_message = AgentMessage(
message_id="valid-message",
sender_id="sender",
receiver_id="receiver",
message_type=MessageType.COMMUNICATION,
chain_id="chain-1",
target_chain_id=None,
payload={"test": "data"},
timestamp=datetime.now(),
signature="signature",
priority=5,
ttl_seconds=3600
)
assert comm._validate_message(valid_message) == True
if __name__ == "__main__":
# Run basic tests
test_agent_communication_creation()
test_validation_functions()
# Run async tests
asyncio.run(test_agent_registration())
asyncio.run(test_agent_discovery())
asyncio.run(test_message_sending())
asyncio.run(test_cross_chain_messaging())
asyncio.run(test_collaboration_creation())
asyncio.run(test_reputation_system())
asyncio.run(test_agent_status())
asyncio.run(test_network_overview())
print("✅ All agent communication tests passed!")

View File

@@ -0,0 +1,334 @@
"""
Test for analytics and monitoring system
"""
import asyncio
import pytest
from datetime import datetime, timedelta
from aitbc_cli.core.config import MultiChainConfig, NodeConfig
from aitbc_cli.core.analytics import ChainAnalytics, ChainMetrics, ChainAlert
def test_analytics_creation():
"""Test analytics system creation"""
config = MultiChainConfig()
analytics = ChainAnalytics(config)
assert analytics.config == config
assert analytics.metrics_history == {}
assert analytics.alerts == []
assert analytics.predictions == {}
assert analytics.health_scores == {}
async def test_metrics_collection():
"""Test metrics collection"""
config = MultiChainConfig()
# Add a test node
test_node = NodeConfig(
id="test-node",
endpoint="http://localhost:8545",
timeout=30,
retry_count=3,
max_connections=10
)
config.nodes["test-node"] = test_node
analytics = ChainAnalytics(config)
# Test metrics collection (will use mock data)
try:
metrics = await analytics.collect_metrics("test-chain", "test-node")
assert metrics.chain_id == "test-chain"
assert metrics.node_id == "test-node"
assert isinstance(metrics.tps, float)
assert isinstance(metrics.block_height, int)
except Exception as e:
print(f"Expected error in test environment: {e}")
def test_performance_summary():
"""Test performance summary generation"""
config = MultiChainConfig()
analytics = ChainAnalytics(config)
# Add some mock metrics
now = datetime.now()
mock_metrics = ChainMetrics(
chain_id="test-chain",
node_id="test-node",
timestamp=now,
block_height=1000,
tps=15.5,
avg_block_time=3.2,
gas_price=20000000000,
memory_usage_mb=256.0,
disk_usage_mb=512.0,
active_nodes=3,
client_count=25,
miner_count=8,
agent_count=12,
network_in_mb=10.5,
network_out_mb=8.2
)
# Add multiple metrics for history
for i in range(10):
metrics = ChainMetrics(
chain_id="test-chain",
node_id="test-node",
timestamp=now - timedelta(hours=i),
block_height=1000 - i,
tps=15.5 + (i * 0.1),
avg_block_time=3.2 + (i * 0.01),
gas_price=20000000000,
memory_usage_mb=256.0 + (i * 10),
disk_usage_mb=512.0 + (i * 5),
active_nodes=3,
client_count=25,
miner_count=8,
agent_count=12,
network_in_mb=10.5,
network_out_mb=8.2
)
analytics.metrics_history["test-chain"].append(metrics)
# Test performance summary
summary = analytics.get_chain_performance_summary("test-chain", 24)
assert summary["chain_id"] == "test-chain"
assert summary["data_points"] == 10
assert "statistics" in summary
assert "tps" in summary["statistics"]
assert "avg" in summary["statistics"]["tps"]
def test_cross_chain_analysis():
"""Test cross-chain analysis"""
config = MultiChainConfig()
analytics = ChainAnalytics(config)
# Add mock metrics for multiple chains
chains = ["chain-1", "chain-2", "chain-3"]
for chain_id in chains:
metrics = ChainMetrics(
chain_id=chain_id,
node_id="test-node",
timestamp=datetime.now(),
block_height=1000,
tps=15.5,
avg_block_time=3.2,
gas_price=20000000000,
memory_usage_mb=256.0,
disk_usage_mb=512.0,
active_nodes=3,
client_count=25,
miner_count=8,
agent_count=12,
network_in_mb=10.5,
network_out_mb=8.2
)
analytics.metrics_history[chain_id].append(metrics)
# Test cross-chain analysis
analysis = analytics.get_cross_chain_analysis()
assert analysis["total_chains"] == 3
assert "resource_usage" in analysis
assert "alerts_summary" in analysis
assert "performance_comparison" in analysis
def test_health_score_calculation():
"""Test health score calculation"""
config = MultiChainConfig()
analytics = ChainAnalytics(config)
# Add mock metrics
metrics = ChainMetrics(
chain_id="test-chain",
node_id="test-node",
timestamp=datetime.now(),
block_height=1000,
tps=20.0, # Good TPS
avg_block_time=3.0, # Good block time
gas_price=20000000000,
memory_usage_mb=500.0, # Moderate memory usage
disk_usage_mb=512.0,
active_nodes=5, # Good node count
client_count=25,
miner_count=8,
agent_count=12,
network_in_mb=10.5,
network_out_mb=8.2
)
analytics.metrics_history["test-chain"].append(metrics)
analytics._calculate_health_score("test-chain")
health_score = analytics.health_scores["test-chain"]
assert 0 <= health_score <= 100
assert health_score > 50 # Should be a good health score
def test_alert_generation():
"""Test alert generation"""
config = MultiChainConfig()
analytics = ChainAnalytics(config)
# Add metrics that should trigger alerts
metrics = ChainMetrics(
chain_id="test-chain",
node_id="test-node",
timestamp=datetime.now(),
block_height=1000,
tps=0.5, # Low TPS - should trigger alert
avg_block_time=15.0, # High block time - should trigger alert
gas_price=20000000000,
memory_usage_mb=3000.0, # High memory usage - should trigger alert
disk_usage_mb=512.0,
active_nodes=0, # Low node count - should trigger alert
client_count=25,
miner_count=8,
agent_count=12,
network_in_mb=10.5,
network_out_mb=8.2
)
# Test alert checking
asyncio.run(analytics._check_alerts(metrics))
# Should have generated multiple alerts
assert len(analytics.alerts) > 0
# Check specific alert types
alert_types = [alert.alert_type for alert in analytics.alerts]
assert "tps_low" in alert_types
assert "block_time_high" in alert_types
assert "memory_high" in alert_types
assert "node_count_low" in alert_types
def test_optimization_recommendations():
"""Test optimization recommendations"""
config = MultiChainConfig()
analytics = ChainAnalytics(config)
# Add metrics that need optimization
metrics = ChainMetrics(
chain_id="test-chain",
node_id="test-node",
timestamp=datetime.now(),
block_height=1000,
tps=0.5, # Low TPS
avg_block_time=15.0, # High block time
gas_price=20000000000,
memory_usage_mb=1500.0, # High memory usage
disk_usage_mb=512.0,
active_nodes=1, # Low node count
client_count=25,
miner_count=8,
agent_count=12,
network_in_mb=10.5,
network_out_mb=8.2
)
analytics.metrics_history["test-chain"].append(metrics)
# Get recommendations
recommendations = analytics.get_optimization_recommendations("test-chain")
assert len(recommendations) > 0
# Check recommendation types
rec_types = [rec["type"] for rec in recommendations]
assert "performance" in rec_types
assert "resource" in rec_types
assert "availability" in rec_types
def test_prediction_system():
"""Test performance prediction system"""
config = MultiChainConfig()
analytics = ChainAnalytics(config)
# Add historical metrics
now = datetime.now()
for i in range(20): # Need at least 10 data points
metrics = ChainMetrics(
chain_id="test-chain",
node_id="test-node",
timestamp=now - timedelta(hours=i),
block_height=1000 - i,
tps=15.0 + (i * 0.5), # Increasing trend
avg_block_time=3.0,
gas_price=20000000000,
memory_usage_mb=256.0 + (i * 10), # Increasing trend
disk_usage_mb=512.0,
active_nodes=3,
client_count=25,
miner_count=8,
agent_count=12,
network_in_mb=10.5,
network_out_mb=8.2
)
analytics.metrics_history["test-chain"].append(metrics)
# Test predictions
predictions = asyncio.run(analytics.predict_chain_performance("test-chain", 24))
assert len(predictions) > 0
# Check prediction types
pred_metrics = [pred.metric for pred in predictions]
assert "tps" in pred_metrics
assert "memory_usage_mb" in pred_metrics
# Check confidence scores
for pred in predictions:
assert 0 <= pred.confidence <= 1
assert pred.predicted_value >= 0
def test_dashboard_data():
"""Test dashboard data generation"""
config = MultiChainConfig()
analytics = ChainAnalytics(config)
# Add mock data
metrics = ChainMetrics(
chain_id="test-chain",
node_id="test-node",
timestamp=datetime.now(),
block_height=1000,
tps=15.5,
avg_block_time=3.2,
gas_price=20000000000,
memory_usage_mb=256.0,
disk_usage_mb=512.0,
active_nodes=3,
client_count=25,
miner_count=8,
agent_count=12,
network_in_mb=10.5,
network_out_mb=8.2
)
analytics.metrics_history["test-chain"].append(metrics)
# Get dashboard data
dashboard_data = analytics.get_dashboard_data()
assert "overview" in dashboard_data
assert "chain_summaries" in dashboard_data
assert "alerts" in dashboard_data
assert "predictions" in dashboard_data
assert "recommendations" in dashboard_data
if __name__ == "__main__":
# Run basic tests
test_analytics_creation()
test_performance_summary()
test_cross_chain_analysis()
test_health_score_calculation()
test_alert_generation()
test_optimization_recommendations()
test_prediction_system()
test_dashboard_data()
# Run async tests
asyncio.run(test_metrics_collection())
print("✅ All analytics tests passed!")

View File

@@ -0,0 +1,132 @@
"""
Basic test for multi-chain CLI functionality
"""
import pytest
import asyncio
import tempfile
import yaml
from pathlib import Path
from aitbc_cli.core.config import MultiChainConfig, load_multichain_config
from aitbc_cli.core.chain_manager import ChainManager
from aitbc_cli.core.genesis_generator import GenesisGenerator
from aitbc_cli.models.chain import ChainConfig, ChainType, ConsensusAlgorithm, ConsensusConfig, PrivacyConfig
def test_multichain_config():
"""Test multi-chain configuration"""
config = MultiChainConfig()
assert config.chains.default_gas_limit == 10000000
assert config.chains.default_gas_price == 20000000000
assert config.logging_level == "INFO"
assert config.enable_caching is True
def test_chain_config():
"""Test chain configuration model"""
consensus_config = ConsensusConfig(
algorithm=ConsensusAlgorithm.POS,
block_time=5,
max_validators=21
)
privacy_config = PrivacyConfig(
visibility="private",
access_control="invite_only"
)
chain_config = ChainConfig(
type=ChainType.PRIVATE,
purpose="test",
name="Test Chain",
consensus=consensus_config,
privacy=privacy_config
)
assert chain_config.type == ChainType.PRIVATE
assert chain_config.purpose == "test"
assert chain_config.consensus.algorithm == ConsensusAlgorithm.POS
assert chain_config.privacy.visibility == "private"
def test_genesis_generator():
"""Test genesis generator"""
config = MultiChainConfig()
generator = GenesisGenerator(config)
# Test template listing
templates = generator.list_templates()
assert isinstance(templates, dict)
assert "private" in templates
assert "topic" in templates
assert "research" in templates
async def test_chain_manager():
"""Test chain manager"""
config = MultiChainConfig()
chain_manager = ChainManager(config)
# Test listing chains (should return empty list initially)
chains = await chain_manager.list_chains()
assert isinstance(chains, list)
def test_config_file_operations():
"""Test configuration file operations"""
with tempfile.TemporaryDirectory() as temp_dir:
config_path = Path(temp_dir) / "test_config.yaml"
# Create test config
config = MultiChainConfig()
config.chains.default_gas_limit = 20000000
# Save config
from aitbc_cli.core.config import save_multichain_config
save_multichain_config(config, str(config_path))
# Load config
loaded_config = load_multichain_config(str(config_path))
assert loaded_config.chains.default_gas_limit == 20000000
def test_chain_config_file():
"""Test chain configuration from file"""
chain_config_data = {
"chain": {
"type": "topic",
"purpose": "healthcare",
"name": "Healthcare Chain",
"consensus": {
"algorithm": "pos",
"block_time": 5
},
"privacy": {
"visibility": "public",
"access_control": "open"
}
}
}
with tempfile.NamedTemporaryFile(mode='w', suffix='.yaml', delete=False) as f:
yaml.dump(chain_config_data, f)
config_file = f.name
try:
# Load and validate
with open(config_file, 'r') as f:
data = yaml.safe_load(f)
chain_config = ChainConfig(**data['chain'])
assert chain_config.type == ChainType.TOPIC
assert chain_config.purpose == "healthcare"
assert chain_config.consensus.algorithm == ConsensusAlgorithm.POS
finally:
Path(config_file).unlink()
if __name__ == "__main__":
# Run basic tests
test_multichain_config()
test_chain_config()
test_genesis_generator()
asyncio.run(test_chain_manager())
test_config_file_operations()
test_chain_config_file()
print("✅ All basic tests passed!")

View File

@@ -0,0 +1,403 @@
"""
Test for production deployment and scaling system
"""
import asyncio
import pytest
from datetime import datetime, timedelta
from pathlib import Path
from aitbc_cli.core.deployment import (
ProductionDeployment, DeploymentConfig, DeploymentMetrics,
ScalingEvent, ScalingPolicy, DeploymentStatus
)
def test_deployment_creation():
"""Test deployment system creation"""
deployment = ProductionDeployment("/tmp/test_aitbc")
assert deployment.config_path == Path("/tmp/test_aitbc")
assert deployment.deployments == {}
assert deployment.metrics == {}
assert deployment.scaling_events == []
assert deployment.health_checks == {}
# Check directories were created
assert deployment.deployment_dir.exists()
assert deployment.config_dir.exists()
assert deployment.logs_dir.exists()
assert deployment.backups_dir.exists()
async def test_create_deployment_config():
"""Test deployment configuration creation"""
deployment = ProductionDeployment("/tmp/test_aitbc")
# Create deployment
deployment_id = await deployment.create_deployment(
name="test-deployment",
environment="production",
region="us-west-1",
instance_type="t3.medium",
min_instances=1,
max_instances=10,
desired_instances=2,
port=8080,
domain="test.aitbc.dev",
database_config={"host": "localhost", "port": 5432, "name": "aitbc"}
)
assert deployment_id is not None
assert deployment_id in deployment.deployments
config = deployment.deployments[deployment_id]
assert config.name == "test-deployment"
assert config.environment == "production"
assert config.min_instances == 1
assert config.max_instances == 10
assert config.desired_instances == 2
assert config.scaling_policy == ScalingPolicy.AUTO
assert config.port == 8080
assert config.domain == "test.aitbc.dev"
async def test_deployment_application():
"""Test application deployment"""
deployment = ProductionDeployment("/tmp/test_aitbc")
# Create deployment first
deployment_id = await deployment.create_deployment(
name="test-app",
environment="staging",
region="us-east-1",
instance_type="t3.small",
min_instances=1,
max_instances=5,
desired_instances=2,
port=3000,
domain="staging.aitbc.dev",
database_config={"host": "localhost", "port": 5432, "name": "aitbc_staging"}
)
# Mock the infrastructure deployment (skip actual system calls)
original_deploy_infra = deployment._deploy_infrastructure
async def mock_deploy_infra(dep_config):
print(f"Mock infrastructure deployment for {dep_config.name}")
return True
deployment._deploy_infrastructure = mock_deploy_infra
# Deploy application
success = await deployment.deploy_application(deployment_id)
assert success
assert deployment_id in deployment.health_checks
assert deployment.health_checks[deployment_id] == True
assert deployment_id in deployment.metrics
# Restore original method
deployment._deploy_infrastructure = original_deploy_infra
async def test_manual_scaling():
"""Test manual scaling"""
deployment = ProductionDeployment("/tmp/test_aitbc")
# Create deployment
deployment_id = await deployment.create_deployment(
name="scale-test",
environment="production",
region="us-west-2",
instance_type="t3.medium",
min_instances=1,
max_instances=10,
desired_instances=2,
port=8080,
domain="scale.aitbc.dev",
database_config={"host": "localhost", "port": 5432, "name": "aitbc"}
)
# Mock infrastructure deployment
original_deploy_infra = deployment._deploy_infrastructure
async def mock_deploy_infra(dep_config):
return True
deployment._deploy_infrastructure = mock_deploy_infra
# Deploy first
await deployment.deploy_application(deployment_id)
# Scale up
success = await deployment.scale_deployment(deployment_id, 5, "manual scaling test")
assert success
# Check deployment was updated
config = deployment.deployments[deployment_id]
assert config.desired_instances == 5
# Check scaling event was created
scaling_events = [e for e in deployment.scaling_events if e.deployment_id == deployment_id]
assert len(scaling_events) > 0
latest_event = scaling_events[-1]
assert latest_event.old_instances == 2
assert latest_event.new_instances == 5
assert latest_event.success == True
assert latest_event.trigger_reason == "manual scaling test"
# Restore original method
deployment._deploy_infrastructure = original_deploy_infra
async def test_auto_scaling():
"""Test automatic scaling"""
deployment = ProductionDeployment("/tmp/test_aitbc")
# Create deployment
deployment_id = await deployment.create_deployment(
name="auto-scale-test",
environment="production",
region="us-east-1",
instance_type="t3.medium",
min_instances=1,
max_instances=10,
desired_instances=2,
port=8080,
domain="autoscale.aitbc.dev",
database_config={"host": "localhost", "port": 5432, "name": "aitbc"}
)
# Mock infrastructure deployment
original_deploy_infra = deployment._deploy_infrastructure
async def mock_deploy_infra(dep_config):
return True
deployment._deploy_infrastructure = mock_deploy_infra
# Deploy first
await deployment.deploy_application(deployment_id)
# Set metrics to trigger scale up (high CPU)
metrics = deployment.metrics[deployment_id]
metrics.cpu_usage = 85.0 # Above threshold
metrics.memory_usage = 40.0
metrics.error_rate = 1.0
metrics.response_time = 500.0
# Trigger auto-scaling
success = await deployment.auto_scale_deployment(deployment_id)
assert success
# Check deployment was scaled up
config = deployment.deployments[deployment_id]
assert config.desired_instances == 3 # Should have scaled up by 1
# Set metrics to trigger scale down
metrics.cpu_usage = 15.0 # Below threshold
metrics.memory_usage = 25.0
# Trigger auto-scaling again
success = await deployment.auto_scale_deployment(deployment_id)
assert success
# Check deployment was scaled down
config = deployment.deployments[deployment_id]
assert config.desired_instances == 2 # Should have scaled down by 1
# Restore original method
deployment._deploy_infrastructure = original_deploy_infra
async def test_deployment_status():
"""Test deployment status retrieval"""
deployment = ProductionDeployment("/tmp/test_aitbc")
# Create and deploy
deployment_id = await deployment.create_deployment(
name="status-test",
environment="production",
region="us-west-1",
instance_type="t3.medium",
min_instances=1,
max_instances=5,
desired_instances=2,
port=8080,
domain="status.aitbc.dev",
database_config={"host": "localhost", "port": 5432, "name": "aitbc"}
)
# Mock infrastructure deployment
original_deploy_infra = deployment._deploy_infrastructure
async def mock_deploy_infra(dep_config):
return True
deployment._deploy_infrastructure = mock_deploy_infra
await deployment.deploy_application(deployment_id)
# Get status
status = await deployment.get_deployment_status(deployment_id)
assert status is not None
assert "deployment" in status
assert "metrics" in status
assert "health_status" in status
assert "recent_scaling_events" in status
assert "uptime_percentage" in status
# Check deployment info
deployment_info = status["deployment"]
assert deployment_info["name"] == "status-test"
assert deployment_info["environment"] == "production"
assert deployment_info["desired_instances"] == 2
# Check health status
assert status["health_status"] == True
# Check metrics
metrics = status["metrics"]
assert metrics["deployment_id"] == deployment_id
assert metrics["cpu_usage"] >= 0
assert metrics["memory_usage"] >= 0
# Restore original method
deployment._deploy_infrastructure = original_deploy_infra
async def test_cluster_overview():
"""Test cluster overview"""
deployment = ProductionDeployment("/tmp/test_aitbc")
# Mock infrastructure deployment
original_deploy_infra = deployment._deploy_infrastructure
async def mock_deploy_infra(dep_config):
return True
deployment._deploy_infrastructure = mock_deploy_infra
# Create multiple deployments
deployment_ids = []
for i in range(3):
deployment_id = await deployment.create_deployment(
name=f"cluster-test-{i+1}",
environment="production" if i % 2 == 0 else "staging",
region="us-west-1",
instance_type="t3.medium",
min_instances=1,
max_instances=5,
desired_instances=2,
port=8080 + i,
domain=f"test{i+1}.aitbc.dev",
database_config={"host": "localhost", "port": 5432, "name": f"aitbc_{i+1}"}
)
await deployment.deploy_application(deployment_id)
deployment_ids.append(deployment_id)
# Get cluster overview
overview = await deployment.get_cluster_overview()
assert overview is not None
assert "total_deployments" in overview
assert "running_deployments" in overview
assert "total_instances" in overview
assert "aggregate_metrics" in overview
assert "recent_scaling_events" in overview
assert "successful_scaling_rate" in overview
assert "health_check_coverage" in overview
# Check overview data
assert overview["total_deployments"] == 3
assert overview["running_deployments"] == 3
assert overview["total_instances"] == 6 # 2 instances per deployment
assert overview["health_check_coverage"] == 1.0 # 100% coverage
# Restore original method
deployment._deploy_infrastructure = original_deploy_infra
def test_scaling_thresholds():
"""Test scaling threshold configuration"""
deployment = ProductionDeployment("/tmp/test_aitbc")
# Check default thresholds
assert deployment.scaling_thresholds['cpu_high'] == 80.0
assert deployment.scaling_thresholds['cpu_low'] == 20.0
assert deployment.scaling_thresholds['memory_high'] == 85.0
assert deployment.scaling_thresholds['memory_low'] == 30.0
assert deployment.scaling_thresholds['error_rate_high'] == 5.0
assert deployment.scaling_thresholds['response_time_high'] == 2000.0
assert deployment.scaling_thresholds['min_uptime'] == 99.0
async def test_deployment_config_validation():
"""Test deployment configuration validation"""
deployment = ProductionDeployment("/tmp/test_aitbc")
# Test valid configuration
deployment_id = await deployment.create_deployment(
name="valid-config",
environment="production",
region="us-west-1",
instance_type="t3.medium",
min_instances=1,
max_instances=10,
desired_instances=5,
port=8080,
domain="valid.aitbc.dev",
database_config={"host": "localhost", "port": 5432, "name": "aitbc"}
)
assert deployment_id is not None
config = deployment.deployments[deployment_id]
assert config.min_instances <= config.desired_instances <= config.max_instances
async def test_metrics_initialization():
"""Test metrics initialization"""
deployment = ProductionDeployment("/tmp/test_aitbc")
# Create deployment
deployment_id = await deployment.create_deployment(
name="metrics-test",
environment="production",
region="us-west-1",
instance_type="t3.medium",
min_instances=1,
max_instances=5,
desired_instances=2,
port=8080,
domain="metrics.aitbc.dev",
database_config={"host": "localhost", "port": 5432, "name": "aitbc"}
)
# Mock infrastructure deployment
original_deploy_infra = deployment._deploy_infrastructure
async def mock_deploy_infra(dep_config):
return True
deployment._deploy_infrastructure = mock_deploy_infra
# Deploy to initialize metrics
await deployment.deploy_application(deployment_id)
# Check metrics were initialized
metrics = deployment.metrics[deployment_id]
assert metrics.deployment_id == deployment_id
assert metrics.cpu_usage >= 0
assert metrics.memory_usage >= 0
assert metrics.disk_usage >= 0
assert metrics.request_count >= 0
assert metrics.error_rate >= 0
assert metrics.response_time >= 0
assert metrics.uptime_percentage >= 0
assert metrics.active_instances >= 1
# Restore original method
deployment._deploy_infrastructure = original_deploy_infra
if __name__ == "__main__":
# Run basic tests
test_deployment_creation()
test_scaling_thresholds()
# Run async tests
asyncio.run(test_create_deployment_config())
asyncio.run(test_deployment_application())
asyncio.run(test_manual_scaling())
asyncio.run(test_auto_scaling())
asyncio.run(test_deployment_status())
asyncio.run(test_cluster_overview())
asyncio.run(test_deployment_config_validation())
asyncio.run(test_metrics_initialization())
print("✅ All deployment tests passed!")

View File

@@ -0,0 +1,372 @@
"""
Test for global chain marketplace system
"""
import asyncio
import pytest
from decimal import Decimal
from datetime import datetime, timedelta
from aitbc_cli.core.config import MultiChainConfig
from aitbc_cli.core.marketplace import (
GlobalChainMarketplace, ChainListing, ChainType, MarketplaceStatus,
MarketplaceTransaction, TransactionStatus, ChainEconomy, MarketplaceMetrics
)
def test_marketplace_creation():
"""Test marketplace system creation"""
config = MultiChainConfig()
marketplace = GlobalChainMarketplace(config)
assert marketplace.config == config
assert marketplace.listings == {}
assert marketplace.transactions == {}
assert marketplace.chain_economies == {}
assert marketplace.user_reputations == {}
assert marketplace.market_metrics is None
async def test_create_listing():
"""Test chain listing creation"""
config = MultiChainConfig()
marketplace = GlobalChainMarketplace(config)
# Set up user reputation
marketplace.user_reputations["seller-1"] = 0.8
# Create listing
listing_id = await marketplace.create_listing(
chain_id="healthcare-chain-001",
chain_name="Healthcare Analytics Chain",
chain_type=ChainType.TOPIC,
description="Advanced healthcare data analytics chain",
seller_id="seller-1",
price=Decimal("1.5"),
currency="ETH",
chain_specifications={"consensus": "pos", "block_time": 5},
metadata={"category": "healthcare", "compliance": "hipaa"}
)
assert listing_id is not None
assert listing_id in marketplace.listings
listing = marketplace.listings[listing_id]
assert listing.chain_id == "healthcare-chain-001"
assert listing.chain_name == "Healthcare Analytics Chain"
assert listing.chain_type == ChainType.TOPIC
assert listing.price == Decimal("1.5")
assert listing.status == MarketplaceStatus.ACTIVE
async def test_purchase_chain():
"""Test chain purchase"""
config = MultiChainConfig()
marketplace = GlobalChainMarketplace(config)
# Set up user reputations
marketplace.user_reputations["seller-1"] = 0.8
marketplace.user_reputations["buyer-1"] = 0.7
# Create listing
listing_id = await marketplace.create_listing(
chain_id="trading-chain-001",
chain_name="Trading Analytics Chain",
chain_type=ChainType.PRIVATE,
description="Private trading analytics chain",
seller_id="seller-1",
price=Decimal("2.0"),
currency="ETH",
chain_specifications={"consensus": "pos"},
metadata={"category": "trading"}
)
# Purchase chain
transaction_id = await marketplace.purchase_chain(listing_id, "buyer-1", "crypto")
assert transaction_id is not None
assert transaction_id in marketplace.transactions
transaction = marketplace.transactions[transaction_id]
assert transaction.buyer_id == "buyer-1"
assert transaction.seller_id == "seller-1"
assert transaction.price == Decimal("2.0")
assert transaction.status == TransactionStatus.PENDING
# Check listing status
listing = marketplace.listings[listing_id]
assert listing.status == MarketplaceStatus.SOLD
async def test_complete_transaction():
"""Test transaction completion"""
config = MultiChainConfig()
marketplace = GlobalChainMarketplace(config)
# Set up user reputations
marketplace.user_reputations["seller-1"] = 0.8
marketplace.user_reputations["buyer-1"] = 0.7
# Create listing and purchase
listing_id = await marketplace.create_listing(
chain_id="research-chain-001",
chain_name="Research Collaboration Chain",
chain_type=ChainType.RESEARCH,
description="Research collaboration chain",
seller_id="seller-1",
price=Decimal("0.5"),
currency="ETH",
chain_specifications={"consensus": "pos"},
metadata={"category": "research"}
)
transaction_id = await marketplace.purchase_chain(listing_id, "buyer-1", "crypto")
# Complete transaction
success = await marketplace.complete_transaction(transaction_id, "0x1234567890abcdef")
assert success
transaction = marketplace.transactions[transaction_id]
assert transaction.status == TransactionStatus.COMPLETED
assert transaction.transaction_hash == "0x1234567890abcdef"
assert transaction.completed_at is not None
# Check escrow release
escrow_contract = marketplace.escrow_contracts.get(transaction.escrow_address)
assert escrow_contract is not None
assert escrow_contract["status"] == "released"
async def test_chain_economy():
"""Test chain economy tracking"""
config = MultiChainConfig()
marketplace = GlobalChainMarketplace(config)
# Get chain economy (should create new one)
economy = await marketplace.get_chain_economy("test-chain-001")
assert economy is not None
assert economy.chain_id == "test-chain-001"
assert isinstance(economy.total_value_locked, Decimal)
assert isinstance(economy.daily_volume, Decimal)
assert economy.transaction_count >= 0
assert economy.last_updated is not None
async def test_search_listings():
"""Test listing search functionality"""
config = MultiChainConfig()
marketplace = GlobalChainMarketplace(config)
# Set up user reputation
marketplace.user_reputations["seller-1"] = 0.8
# Create multiple listings
listings = [
("healthcare-chain-001", "Healthcare Chain", ChainType.TOPIC, Decimal("1.0")),
("trading-chain-001", "Trading Chain", ChainType.PRIVATE, Decimal("2.0")),
("research-chain-001", "Research Chain", ChainType.RESEARCH, Decimal("0.5")),
("enterprise-chain-001", "Enterprise Chain", ChainType.ENTERPRISE, Decimal("5.0"))
]
listing_ids = []
for chain_id, name, chain_type, price in listings:
listing_id = await marketplace.create_listing(
chain_id=chain_id,
chain_name=name,
chain_type=chain_type,
description=f"Description for {name}",
seller_id="seller-1",
price=price,
currency="ETH",
chain_specifications={},
metadata={}
)
listing_ids.append(listing_id)
# Search by chain type
topic_listings = await marketplace.search_listings(chain_type=ChainType.TOPIC)
assert len(topic_listings) == 1
assert topic_listings[0].chain_type == ChainType.TOPIC
# Search by price range
price_listings = await marketplace.search_listings(min_price=Decimal("1.0"), max_price=Decimal("2.0"))
assert len(price_listings) == 2
# Search by seller
seller_listings = await marketplace.search_listings(seller_id="seller-1")
assert len(seller_listings) == 4
async def test_user_transactions():
"""Test user transaction retrieval"""
config = MultiChainConfig()
marketplace = GlobalChainMarketplace(config)
# Set up user reputations
marketplace.user_reputations["seller-1"] = 0.8
marketplace.user_reputations["buyer-1"] = 0.7
marketplace.user_reputations["buyer-2"] = 0.6
# Create listings and purchases
listing_id1 = await marketplace.create_listing(
chain_id="chain-001",
chain_name="Chain 1",
chain_type=ChainType.TOPIC,
description="Description",
seller_id="seller-1",
price=Decimal("1.0"),
currency="ETH",
chain_specifications={},
metadata={}
)
listing_id2 = await marketplace.create_listing(
chain_id="chain-002",
chain_name="Chain 2",
chain_type=ChainType.PRIVATE,
description="Description",
seller_id="seller-1",
price=Decimal("2.0"),
currency="ETH",
chain_specifications={},
metadata={}
)
transaction_id1 = await marketplace.purchase_chain(listing_id1, "buyer-1", "crypto")
transaction_id2 = await marketplace.purchase_chain(listing_id2, "buyer-2", "crypto")
# Get seller transactions
seller_transactions = await marketplace.get_user_transactions("seller-1", "seller")
assert len(seller_transactions) == 2
# Get buyer transactions
buyer_transactions = await marketplace.get_user_transactions("buyer-1", "buyer")
assert len(buyer_transactions) == 1
assert buyer_transactions[0].buyer_id == "buyer-1"
# Get all user transactions
all_transactions = await marketplace.get_user_transactions("seller-1", "both")
assert len(all_transactions) == 2
async def test_marketplace_overview():
"""Test marketplace overview"""
config = MultiChainConfig()
marketplace = GlobalChainMarketplace(config)
# Set up user reputations
marketplace.user_reputations["seller-1"] = 0.8
marketplace.user_reputations["buyer-1"] = 0.7
# Create listings and transactions
listing_id = await marketplace.create_listing(
chain_id="overview-chain-001",
chain_name="Overview Test Chain",
chain_type=ChainType.TOPIC,
description="Test chain for overview",
seller_id="seller-1",
price=Decimal("1.5"),
currency="ETH",
chain_specifications={},
metadata={}
)
transaction_id = await marketplace.purchase_chain(listing_id, "buyer-1", "crypto")
await marketplace.complete_transaction(transaction_id, "0x1234567890abcdef")
# Get marketplace overview
overview = await marketplace.get_marketplace_overview()
assert overview is not None
assert "marketplace_metrics" in overview
assert "volume_24h" in overview
assert "top_performing_chains" in overview
assert "chain_types_distribution" in overview
assert "user_activity" in overview
assert "escrow_summary" in overview
# Check marketplace metrics
metrics = overview["marketplace_metrics"]
assert metrics["total_listings"] == 1
assert metrics["total_transactions"] == 1
assert metrics["total_volume"] == Decimal("1.5")
def test_validation_functions():
"""Test validation functions"""
config = MultiChainConfig()
marketplace = GlobalChainMarketplace(config)
# Test user reputation update
marketplace._update_user_reputation("user-1", 0.1)
print(f"After +0.1: {marketplace.user_reputations['user-1']}")
assert marketplace.user_reputations["user-1"] == 0.6 # Started at 0.5
marketplace._update_user_reputation("user-1", -0.2)
print(f"After -0.2: {marketplace.user_reputations['user-1']}")
assert abs(marketplace.user_reputations["user-1"] - 0.4) < 0.0001 # Allow for floating point precision
# Test bounds
marketplace._update_user_reputation("user-1", 0.6) # Add 0.6 to reach 1.0
print(f"After +0.6: {marketplace.user_reputations['user-1']}")
assert marketplace.user_reputations["user-1"] == 1.0 # Max bound
marketplace._update_user_reputation("user-1", -1.5) # Subtract 1.5 to go below 0
print(f"After -1.5: {marketplace.user_reputations['user-1']}")
assert marketplace.user_reputations["user-1"] == 0.0 # Min bound
async def test_escrow_system():
"""Test escrow contract system"""
config = MultiChainConfig()
marketplace = GlobalChainMarketplace(config)
# Set up user reputations
marketplace.user_reputations["seller-1"] = 0.8
marketplace.user_reputations["buyer-1"] = 0.7
# Create listing and purchase
listing_id = await marketplace.create_listing(
chain_id="escrow-test-chain",
chain_name="Escrow Test Chain",
chain_type=ChainType.TOPIC,
description="Test escrow functionality",
seller_id="seller-1",
price=Decimal("3.0"),
currency="ETH",
chain_specifications={},
metadata={}
)
transaction_id = await marketplace.purchase_chain(listing_id, "buyer-1", "crypto")
# Check escrow creation
transaction = marketplace.transactions[transaction_id]
escrow_address = transaction.escrow_address
assert escrow_address in marketplace.escrow_contracts
escrow_contract = marketplace.escrow_contracts[escrow_address]
assert escrow_contract["status"] == "active"
assert escrow_contract["amount"] == Decimal("3.0")
assert escrow_contract["buyer_id"] == "buyer-1"
assert escrow_contract["seller_id"] == "seller-1"
# Complete transaction and check escrow release
await marketplace.complete_transaction(transaction_id, "0xabcdef1234567890")
escrow_contract = marketplace.escrow_contracts[escrow_address]
assert escrow_contract["status"] == "released"
assert "fee_breakdown" in escrow_contract
fee_breakdown = escrow_contract["fee_breakdown"]
assert fee_breakdown["escrow_fee"] == Decimal("0.06") # 2% of 3.0
assert fee_breakdown["marketplace_fee"] == Decimal("0.03") # 1% of 3.0
assert fee_breakdown["seller_amount"] == Decimal("2.91") # 3.0 - 0.06 - 0.03
if __name__ == "__main__":
# Run basic tests
test_marketplace_creation()
test_validation_functions()
# Run async tests
asyncio.run(test_create_listing())
asyncio.run(test_purchase_chain())
asyncio.run(test_complete_transaction())
asyncio.run(test_chain_economy())
asyncio.run(test_search_listings())
asyncio.run(test_user_transactions())
asyncio.run(test_marketplace_overview())
asyncio.run(test_escrow_system())
print("✅ All marketplace tests passed!")

View File

@@ -0,0 +1,132 @@
"""
Test for multi-chain node integration
"""
import asyncio
import pytest
from aitbc_cli.core.config import MultiChainConfig, NodeConfig
from aitbc_cli.core.node_client import NodeClient
from aitbc_cli.core.chain_manager import ChainManager
def test_node_client_creation():
"""Test node client creation and basic functionality"""
node_config = NodeConfig(
id="test-node",
endpoint="http://localhost:8545",
timeout=30,
retry_count=3,
max_connections=10
)
# Test client creation
client = NodeClient(node_config)
assert client.config.id == "test-node"
assert client.config.endpoint == "http://localhost:8545"
async def test_node_client_mock_operations():
"""Test node client operations with mock data"""
node_config = NodeConfig(
id="test-node",
endpoint="http://localhost:8545",
timeout=30,
retry_count=3,
max_connections=10
)
async with NodeClient(node_config) as client:
# Test node info
node_info = await client.get_node_info()
assert node_info["node_id"] == "test-node"
assert "status" in node_info
assert "uptime_days" in node_info
# Test hosted chains
chains = await client.get_hosted_chains()
assert isinstance(chains, list)
if chains: # If mock data is available
assert hasattr(chains[0], 'id')
assert hasattr(chains[0], 'type')
# Test chain stats
stats = await client.get_chain_stats("test-chain")
assert "chain_id" in stats
assert "block_height" in stats
def test_chain_manager_with_node_client():
"""Test chain manager integration with node client"""
config = MultiChainConfig()
# Add a test node
test_node = NodeConfig(
id="test-node",
endpoint="http://localhost:8545",
timeout=30,
retry_count=3,
max_connections=10
)
config.nodes["test-node"] = test_node
chain_manager = ChainManager(config)
# Test that chain manager can use the node client
assert "test-node" in chain_manager.config.nodes
assert chain_manager.config.nodes["test-node"].endpoint == "http://localhost:8545"
async def test_chain_operations_with_node():
"""Test chain operations using node client"""
config = MultiChainConfig()
# Add a test node
test_node = NodeConfig(
id="test-node",
endpoint="http://localhost:8545",
timeout=30,
retry_count=3,
max_connections=10
)
config.nodes["test-node"] = test_node
chain_manager = ChainManager(config)
# Test listing chains (should work with mock data)
chains = await chain_manager.list_chains()
assert isinstance(chains, list)
# Test node-specific operations
node_chains = await chain_manager._get_node_chains("test-node")
assert isinstance(node_chains, list)
def test_backup_restore_operations():
"""Test backup and restore operations"""
config = MultiChainConfig()
# Add a test node
test_node = NodeConfig(
id="test-node",
endpoint="http://localhost:8545",
timeout=30,
retry_count=3,
max_connections=10
)
config.nodes["test-node"] = test_node
chain_manager = ChainManager(config)
# These would normally be async, but we're testing the structure
assert hasattr(chain_manager, '_execute_backup')
assert hasattr(chain_manager, '_execute_restore')
assert hasattr(chain_manager, '_get_chain_hosting_nodes')
if __name__ == "__main__":
# Run basic tests
test_node_client_creation()
# Run async tests
asyncio.run(test_node_client_mock_operations())
asyncio.run(test_chain_operations_with_node())
# Run sync tests
test_chain_manager_with_node_client()
test_backup_restore_operations()
print("✅ All node integration tests passed!")

View File

@@ -0,0 +1,258 @@
#!/usr/bin/env python3
"""
Ollama GPU Provider Test with Blockchain Verification
Submits an inference job and verifies the complete flow:
- Job submission to coordinator
- Processing by GPU miner
- Receipt generation
- Blockchain transaction recording
"""
import argparse
import sys
import time
from typing import Optional
import json
import httpx
# Configuration
DEFAULT_COORDINATOR = "http://localhost:8000"
DEFAULT_BLOCKCHAIN = "http://127.0.0.1:19000"
DEFAULT_API_KEY = "${CLIENT_API_KEY}"
DEFAULT_PROMPT = "What is the capital of France?"
DEFAULT_MODEL = "llama3.2:latest"
DEFAULT_TIMEOUT = 180
POLL_INTERVAL = 3
def submit_job(client: httpx.Client, base_url: str, api_key: str, prompt: str, model: str) -> Optional[str]:
"""Submit an inference job to the coordinator"""
payload = {
"payload": {
"type": "inference",
"prompt": prompt,
"parameters": {
"prompt": prompt,
"model": model,
"stream": False
},
},
"ttl_seconds": 900,
}
response = client.post(
f"{base_url}/v1/jobs",
headers={"X-Api-Key": api_key, "Content-Type": "application/json"},
json=payload,
timeout=10,
)
if response.status_code != 201:
print(f"❌ Job submission failed: {response.status_code} {response.text}")
return None
return response.json().get("job_id")
def fetch_status(client: httpx.Client, base_url: str, api_key: str, job_id: str) -> Optional[dict]:
"""Fetch job status from coordinator"""
response = client.get(
f"{base_url}/v1/jobs/{job_id}",
headers={"X-Api-Key": api_key},
timeout=10,
)
if response.status_code != 200:
print(f"❌ Status check failed: {response.status_code} {response.text}")
return None
return response.json()
def fetch_result(client: httpx.Client, base_url: str, api_key: str, job_id: str) -> Optional[dict]:
"""Fetch job result from coordinator"""
response = client.get(
f"{base_url}/v1/jobs/{job_id}/result",
headers={"X-Api-Key": api_key},
timeout=10,
)
if response.status_code != 200:
print(f"❌ Result fetch failed: {response.status_code} {response.text}")
return None
return response.json()
def fetch_receipt(client: httpx.Client, base_url: str, api_key: str, job_id: str) -> Optional[dict]:
"""Fetch job receipt from coordinator"""
response = client.get(
f"{base_url}/v1/jobs/{job_id}/receipt",
headers={"X-Api-Key": api_key},
timeout=10,
)
if response.status_code != 200:
print(f"❌ Receipt fetch failed: {response.status_code} {response.text}")
return None
return response.json()
def check_blockchain_transaction(client: httpx.Client, blockchain_url: str, receipt_id: str) -> Optional[dict]:
"""Check if receipt is recorded on blockchain"""
# Search for transaction by receipt ID
response = client.get(
f"{blockchain_url}/rpc/transactions/search",
params={"receipt_id": receipt_id},
timeout=10,
)
if response.status_code != 200:
print(f"⚠️ Blockchain search failed: {response.status_code}")
return None
transactions = response.json().get("transactions", [])
if transactions:
return transactions[0] # Return the first matching transaction
return None
def get_miner_info(client: httpx.Client, base_url: str, api_key: str) -> Optional[dict]:
"""Get registered miner information"""
response = client.get(
f"{base_url}/v1/admin/miners",
headers={"X-Api-Key": api_key},
timeout=10,
)
if response.status_code != 200:
print(f"⚠️ Could not fetch miner info: {response.status_code}")
return None
data = response.json()
# Handle different response formats
if isinstance(data, list):
return data[0] if data else None
elif isinstance(data, dict):
if 'miners' in data:
miners = data['miners']
return miners[0] if miners else None
elif 'items' in data:
items = data['items']
return items[0] if items else None
return None
def main() -> int:
parser = argparse.ArgumentParser(description="Ollama GPU provider with blockchain verification")
parser.add_argument("--coordinator-url", default=DEFAULT_COORDINATOR, help="Coordinator base URL")
parser.add_argument("--blockchain-url", default=DEFAULT_BLOCKCHAIN, help="Blockchain node URL")
parser.add_argument("--api-key", default=DEFAULT_API_KEY, help="Client API key")
parser.add_argument("--prompt", default=DEFAULT_PROMPT, help="Prompt to send")
parser.add_argument("--model", default=DEFAULT_MODEL, help="Model to use")
parser.add_argument("--timeout", type=int, default=DEFAULT_TIMEOUT, help="Timeout in seconds")
args = parser.parse_args()
print("🚀 Starting Ollama GPU Provider Test with Blockchain Verification")
print("=" * 60)
# Check miner registration
print("\n📋 Checking miner registration...")
with httpx.Client() as client:
miner_info = get_miner_info(client, args.coordinator_url, "${ADMIN_API_KEY}")
if miner_info:
print(f"✅ Found registered miner: {miner_info.get('miner_id')}")
print(f" Status: {miner_info.get('status')}")
print(f" Last seen: {miner_info.get('last_seen')}")
else:
print("⚠️ No miners registered. Job may not be processed.")
# Submit job
print(f"\n📤 Submitting inference job...")
print(f" Prompt: {args.prompt}")
print(f" Model: {args.model}")
with httpx.Client() as client:
job_id = submit_job(client, args.coordinator_url, args.api_key, args.prompt, args.model)
if not job_id:
return 1
print(f"✅ Job submitted successfully: {job_id}")
# Monitor job progress
print(f"\n⏳ Monitoring job progress...")
deadline = time.time() + args.timeout
status = None
while time.time() < deadline:
status = fetch_status(client, args.coordinator_url, args.api_key, job_id)
if not status:
return 1
state = status.get("state")
assigned_miner = status.get("assigned_miner_id", "None")
print(f" State: {state} | Miner: {assigned_miner}")
if state == "COMPLETED":
break
if state in {"FAILED", "CANCELED", "EXPIRED"}:
print(f"❌ Job ended in state: {state}")
if status.get("error"):
print(f" Error: {status['error']}")
return 1
time.sleep(POLL_INTERVAL)
if not status or status.get("state") != "COMPLETED":
print("❌ Job did not complete within timeout")
return 1
# Fetch result and receipt
print(f"\n📊 Fetching job results...")
result = fetch_result(client, args.coordinator_url, args.api_key, job_id)
if result is None:
return 1
receipt = fetch_receipt(client, args.coordinator_url, args.api_key, job_id)
if receipt is None:
print("⚠️ No receipt found (payment may not be processed)")
receipt = {}
# Display results
payload = result.get("result") or {}
output = payload.get("output", "No output")
print(f"\n✅ Job completed successfully!")
print(f"📝 Output: {output[:200]}{'...' if len(output) > 200 else ''}")
if receipt:
print(f"\n🧾 Receipt Information:")
print(f" Receipt ID: {receipt.get('receipt_id')}")
print(f" Provider: {receipt.get('provider')}")
print(f" Units: {receipt.get('units')} {receipt.get('unit_type', 'seconds')}")
print(f" Unit Price: {receipt.get('unit_price')} AITBC")
print(f" Total Price: {receipt.get('price')} AITBC")
print(f" Status: {receipt.get('status')}")
# Check blockchain
print(f"\n⛓️ Checking blockchain recording...")
receipt_id = receipt.get('receipt_id')
with httpx.Client() as bc_client:
tx = check_blockchain_transaction(bc_client, args.blockchain_url, receipt_id)
if tx:
print(f"✅ Transaction found on blockchain!")
print(f" TX Hash: {tx.get('tx_hash')}")
print(f" Block: {tx.get('block_height')}")
print(f" From: {tx.get('sender')}")
print(f" To: {tx.get('recipient')}")
print(f" Amount: {tx.get('amount')} AITBC")
# Show transaction payload
payload = tx.get('payload', {})
if 'receipt_id' in payload:
print(f" Payload Receipt: {payload['receipt_id']}")
else:
print(f"⚠️ Transaction not yet found on blockchain")
print(f" This may take a few moments to be mined...")
print(f" Receipt ID: {receipt_id}")
else:
print(f"\n❌ No receipt generated - payment not processed")
print(f"\n🎉 Test completed!")
return 0
if __name__ == "__main__":
sys.exit(main())

View File

@@ -0,0 +1,128 @@
#!/usr/bin/env python3
"""
Ollama GPU Provider Test
Submits an inference job with prompt "hello" and verifies completion.
"""
import argparse
import sys
import time
from typing import Optional
import httpx
DEFAULT_COORDINATOR = "http://localhost:8000"
DEFAULT_API_KEY = "${CLIENT_API_KEY}"
DEFAULT_PROMPT = "hello"
DEFAULT_TIMEOUT = 180
POLL_INTERVAL = 3
def submit_job(client: httpx.Client, base_url: str, api_key: str, prompt: str) -> Optional[str]:
payload = {
"payload": {
"type": "inference",
"prompt": prompt,
"parameters": {"prompt": prompt},
},
"ttl_seconds": 900,
}
response = client.post(
f"{base_url}/v1/jobs",
headers={"X-Api-Key": api_key, "Content-Type": "application/json"},
json=payload,
timeout=10,
)
if response.status_code != 201:
print(f"❌ Job submission failed: {response.status_code} {response.text}")
return None
return response_seen_id(response)
def response_seen_id(response: httpx.Response) -> Optional[str]:
try:
return response.json().get("job_id")
except Exception:
return None
def fetch_status(client: httpx.Client, base_url: str, api_key: str, job_id: str) -> Optional[dict]:
response = client.get(
f"{base_url}/v1/jobs/{job_id}",
headers={"X-Api-Key": api_key},
timeout=10,
)
if response.status_code != 200:
print(f"❌ Status check failed: {response.status_code} {response.text}")
return None
return response.json()
def fetch_result(client: httpx.Client, base_url: str, api_key: str, job_id: str) -> Optional[dict]:
response = client.get(
f"{base_url}/v1/jobs/{job_id}/result",
headers={"X-Api-Key": api_key},
timeout=10,
)
if response.status_code != 200:
print(f"❌ Result fetch failed: {response.status_code} {response.text}")
return None
return response.json()
def main() -> int:
parser = argparse.ArgumentParser(description="Ollama GPU provider end-to-end test")
parser.add_argument("--url", default=DEFAULT_COORDINATOR, help="Coordinator base URL")
parser.add_argument("--api-key", default=DEFAULT_API_KEY, help="Client API key")
parser.add_argument("--prompt", default=DEFAULT_PROMPT, help="Prompt to send")
parser.add_argument("--timeout", type=int, default=DEFAULT_TIMEOUT, help="Timeout in seconds")
args = parser.parse_args()
with httpx.Client() as client:
print("🧪 Submitting GPU provider job...")
job_id = submit_job(client, args.url, args.api_key, args.prompt)
if not job_id:
return 1
print(f"✅ Job submitted: {job_id}")
deadline = time.time() + args.timeout
status = None
while time.time() < deadline:
status = fetch_status(client, args.url, args.api_key, job_id)
if not status:
return 1
state = status.get("state")
print(f"⏳ Job state: {state}")
if state == "COMPLETED":
break
if state in {"FAILED", "CANCELED", "EXPIRED"}:
print(f"❌ Job ended in state: {state}")
return 1
time.sleep(POLL_INTERVAL)
if not status or status.get("state") != "COMPLETED":
print("❌ Job did not complete within timeout")
return 1
result = fetch_result(client, args.url, args.api_key, job_id)
if result is None:
return 1
payload = result.get("result") or {}
output = payload.get("output")
receipt = result.get("receipt")
if not output:
print("❌ Missing output in job result")
return 1
if not receipt:
print("❌ Missing receipt in job result (payment/settlement not recorded)")
return 1
print("✅ GPU provider job completed")
print(f"📝 Output: {output}")
print(f"🧾 Receipt ID: {receipt.get('receipt_id')}")
return 0
if __name__ == "__main__":
sys.exit(main())