chore: remove configuration files and enhance blockchain explorer with advanced search, analytics, and export features
- Delete .aitbc.yaml.example CLI configuration template - Delete .lycheeignore link checker exclusion rules - Delete .nvmrc Node.js version specification - Add advanced search panel with filters for address, amount range, transaction type, time range, and validator - Add analytics dashboard with transaction volume, active addresses, and block time metrics - Add Chart.js integration
This commit is contained in:
217
cli/tests/gpu/gpu_test.py
Executable file
217
cli/tests/gpu/gpu_test.py
Executable file
@@ -0,0 +1,217 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
GPU Access Test - Check if miner can access local GPU resources
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import subprocess
|
||||
import json
|
||||
import time
|
||||
import psutil
|
||||
|
||||
def check_nvidia_gpu():
|
||||
"""Check NVIDIA GPU availability"""
|
||||
print("🔍 Checking NVIDIA GPU...")
|
||||
|
||||
try:
|
||||
# Check nvidia-smi
|
||||
result = subprocess.run(
|
||||
["nvidia-smi", "--query-gpu=name,memory.total,memory.free,utilization.gpu",
|
||||
"--format=csv,noheader,nounits"],
|
||||
capture_output=True,
|
||||
text=True
|
||||
)
|
||||
|
||||
if result.returncode == 0:
|
||||
lines = result.stdout.strip().split('\n')
|
||||
print(f"✅ NVIDIA GPU(s) Found: {len(lines)}")
|
||||
|
||||
for i, line in enumerate(lines, 1):
|
||||
parts = line.split(', ')
|
||||
if len(parts) >= 4:
|
||||
name = parts[0]
|
||||
total_mem = parts[1]
|
||||
free_mem = parts[2]
|
||||
util = parts[3]
|
||||
print(f"\n GPU {i}:")
|
||||
print(f" 📦 Model: {name}")
|
||||
print(f" 💾 Memory: {free_mem}/{total_mem} MB free")
|
||||
print(f" ⚡ Utilization: {util}%")
|
||||
|
||||
return True
|
||||
else:
|
||||
print("❌ nvidia-smi command failed")
|
||||
return False
|
||||
|
||||
except FileNotFoundError:
|
||||
print("❌ nvidia-smi not found - NVIDIA drivers not installed")
|
||||
return False
|
||||
|
||||
def check_cuda():
|
||||
"""Check CUDA availability"""
|
||||
print("\n🔍 Checking CUDA...")
|
||||
|
||||
try:
|
||||
# Try to import pynvml
|
||||
import pynvml
|
||||
pynvml.nvmlInit()
|
||||
|
||||
device_count = pynvml.nvmlDeviceGetCount()
|
||||
print(f"✅ CUDA Available - {device_count} device(s)")
|
||||
|
||||
for i in range(device_count):
|
||||
handle = pynvml.nvmlDeviceGetHandleByIndex(i)
|
||||
name = pynvml.nvmlDeviceGetName(handle).decode('utf-8')
|
||||
memory_info = pynvml.nvmlDeviceGetMemoryInfo(handle)
|
||||
|
||||
print(f"\n CUDA Device {i}:")
|
||||
print(f" 📦 Name: {name}")
|
||||
print(f" 💾 Memory: {memory_info.free // 1024**2}/{memory_info.total // 1024**2} MB free")
|
||||
|
||||
return True
|
||||
|
||||
except ImportError:
|
||||
print("⚠️ pynvml not installed - install with: pip install pynvml")
|
||||
return False
|
||||
except Exception as e:
|
||||
print(f"❌ CUDA error: {e}")
|
||||
return False
|
||||
|
||||
def check_pytorch():
|
||||
"""Check PyTorch CUDA support"""
|
||||
print("\n🔍 Checking PyTorch CUDA...")
|
||||
|
||||
try:
|
||||
import torch
|
||||
|
||||
print(f"✅ PyTorch Installed: {torch.__version__}")
|
||||
print(f" CUDA Available: {torch.cuda.is_available()}")
|
||||
|
||||
if torch.cuda.is_available():
|
||||
print(f" CUDA Version: {torch.version.cuda}")
|
||||
print(f" GPU Count: {torch.cuda.device_count()}")
|
||||
|
||||
for i in range(torch.cuda.device_count()):
|
||||
props = torch.cuda.get_device_properties(i)
|
||||
print(f"\n PyTorch GPU {i}:")
|
||||
print(f" 📦 Name: {props.name}")
|
||||
print(f" 💾 Memory: {props.total_memory // 1024**2} MB")
|
||||
print(f" Compute: {props.major}.{props.minor}")
|
||||
|
||||
return torch.cuda.is_available()
|
||||
|
||||
except ImportError:
|
||||
print("❌ PyTorch not installed - install with: pip install torch")
|
||||
return False
|
||||
|
||||
def run_gpu_stress_test(duration=10):
|
||||
"""Run a quick GPU stress test"""
|
||||
print(f"\n🔥 Running GPU Stress Test ({duration}s)...")
|
||||
|
||||
try:
|
||||
import torch
|
||||
|
||||
if not torch.cuda.is_available():
|
||||
print("❌ CUDA not available for stress test")
|
||||
return False
|
||||
|
||||
device = torch.device('cuda')
|
||||
|
||||
# Create tensors and perform matrix multiplication
|
||||
print(" ⚡ Performing matrix multiplications...")
|
||||
start_time = time.time()
|
||||
|
||||
while time.time() - start_time < duration:
|
||||
# Create large matrices
|
||||
a = torch.randn(1000, 1000, device=device)
|
||||
b = torch.randn(1000, 1000, device=device)
|
||||
|
||||
# Multiply them
|
||||
c = torch.mm(a, b)
|
||||
|
||||
# Sync to ensure computation completes
|
||||
torch.cuda.synchronize()
|
||||
|
||||
print("✅ Stress test completed successfully")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Stress test failed: {e}")
|
||||
return False
|
||||
|
||||
def check_system_resources():
|
||||
"""Check system resources"""
|
||||
print("\n💻 System Resources:")
|
||||
|
||||
# CPU
|
||||
cpu_percent = psutil.cpu_percent(interval=1)
|
||||
print(f" 🖥️ CPU Usage: {cpu_percent}%")
|
||||
print(f" 🧠 CPU Cores: {psutil.cpu_count()} logical, {psutil.cpu_count(logical=False)} physical")
|
||||
|
||||
# Memory
|
||||
memory = psutil.virtual_memory()
|
||||
print(f" 💾 RAM: {memory.used // 1024**2}/{memory.total // 1024**2} MB used ({memory.percent}%)")
|
||||
|
||||
# Disk
|
||||
disk = psutil.disk_usage('/')
|
||||
print(f" 💿 Disk: {disk.used // 1024**3}/{disk.total // 1024**3} GB used")
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="GPU Access Test for AITBC Miner")
|
||||
parser.add_argument("--stress", type=int, default=0, help="Run stress test for N seconds")
|
||||
parser.add_argument("--all", action="store_true", help="Run all tests including stress")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
print("🚀 AITBC GPU Access Test")
|
||||
print("=" * 60)
|
||||
|
||||
# Check system resources
|
||||
check_system_resources()
|
||||
|
||||
# Check GPU availability
|
||||
has_nvidia = check_nvidia_gpu()
|
||||
has_cuda = check_cuda()
|
||||
has_pytorch = check_pytorch()
|
||||
|
||||
# Summary
|
||||
print("\n📊 SUMMARY")
|
||||
print("=" * 60)
|
||||
|
||||
if has_nvidia or has_cuda or has_pytorch:
|
||||
print("✅ GPU is available for mining!")
|
||||
|
||||
if args.stress > 0 or args.all:
|
||||
run_gpu_stress_test(args.stress if args.stress > 0 else 10)
|
||||
|
||||
print("\n💡 Miner can run GPU-intensive tasks:")
|
||||
print(" • Model inference (LLaMA, Stable Diffusion)")
|
||||
print(" • Training jobs")
|
||||
print(" • Batch processing")
|
||||
|
||||
else:
|
||||
print("❌ No GPU available - miner will run in CPU-only mode")
|
||||
print("\n💡 To enable GPU mining:")
|
||||
print(" 1. Install NVIDIA drivers")
|
||||
print(" 2. Install CUDA toolkit")
|
||||
print(" 3. Install PyTorch with CUDA: pip install torch")
|
||||
|
||||
# Check if miner service is running
|
||||
print("\n🔍 Checking miner service...")
|
||||
try:
|
||||
result = subprocess.run(
|
||||
["systemctl", "is-active", "aitbc-gpu-miner"],
|
||||
capture_output=True,
|
||||
text=True
|
||||
)
|
||||
|
||||
if result.stdout.strip() == "active":
|
||||
print("✅ Miner service is running")
|
||||
else:
|
||||
print("⚠️ Miner service is not running")
|
||||
print(" Start with: sudo systemctl start aitbc-gpu-miner")
|
||||
except:
|
||||
print("⚠️ Could not check miner service status")
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
286
cli/tests/gpu/miner_gpu_test.py
Executable file
286
cli/tests/gpu/miner_gpu_test.py
Executable file
@@ -0,0 +1,286 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Miner GPU Test - Test if the miner service can access and utilize GPU
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import httpx
|
||||
import json
|
||||
import time
|
||||
import sys
|
||||
|
||||
# Configuration
|
||||
DEFAULT_COORDINATOR = "http://localhost:8000"
|
||||
DEFAULT_API_KEY = "${MINER_API_KEY}"
|
||||
DEFAULT_MINER_ID = "localhost-gpu-miner"
|
||||
|
||||
def test_miner_registration(coordinator_url):
|
||||
"""Test if miner can register with GPU capabilities"""
|
||||
print("📝 Testing Miner Registration...")
|
||||
|
||||
gpu_capabilities = {
|
||||
"gpu": {
|
||||
"model": "NVIDIA GeForce RTX 4060 Ti",
|
||||
"memory_gb": 16,
|
||||
"cuda_version": "12.1",
|
||||
"compute_capability": "8.9"
|
||||
},
|
||||
"compute": {
|
||||
"type": "GPU",
|
||||
"platform": "CUDA",
|
||||
"supported_tasks": ["inference", "training", "stable-diffusion", "llama"],
|
||||
"max_concurrent_jobs": 1
|
||||
}
|
||||
}
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.post(
|
||||
f"{coordinator_url}/v1/miners/register?miner_id={DEFAULT_MINER_ID}",
|
||||
headers={
|
||||
"Content-Type": "application/json",
|
||||
"X-Api-Key": DEFAULT_API_KEY
|
||||
},
|
||||
json={"capabilities": gpu_capabilities}
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
print("✅ Miner registered with GPU capabilities")
|
||||
print(f" GPU Model: {gpu_capabilities['gpu']['model']}")
|
||||
print(f" Memory: {gpu_capabilities['gpu']['memory_gb']} GB")
|
||||
print(f" CUDA: {gpu_capabilities['gpu']['cuda_version']}")
|
||||
return True
|
||||
else:
|
||||
print(f"❌ Registration failed: {response.status_code}")
|
||||
print(f" Response: {response.text}")
|
||||
return False
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Error: {e}")
|
||||
return False
|
||||
|
||||
def test_job_processing(coordinator_url):
|
||||
"""Test if miner can process a GPU job"""
|
||||
print("\n⚙️ Testing GPU Job Processing...")
|
||||
|
||||
# First submit a test job
|
||||
print(" 1. Submitting test job...")
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
# Submit job as client
|
||||
job_response = client.post(
|
||||
f"{coordinator_url}/v1/jobs",
|
||||
headers={
|
||||
"Content-Type": "application/json",
|
||||
"X-Api-Key": "${CLIENT_API_KEY}"
|
||||
},
|
||||
json={
|
||||
"payload": {
|
||||
"type": "inference",
|
||||
"task": "gpu-test",
|
||||
"model": "test-gpu-model",
|
||||
"parameters": {
|
||||
"require_gpu": True,
|
||||
"memory_gb": 8
|
||||
}
|
||||
},
|
||||
"ttl_seconds": 300
|
||||
}
|
||||
)
|
||||
|
||||
if job_response.status_code != 201:
|
||||
print(f"❌ Failed to submit job: {job_response.status_code}")
|
||||
return False
|
||||
|
||||
job_id = job_response.json()['job_id']
|
||||
print(f" ✅ Job submitted: {job_id}")
|
||||
|
||||
# Poll for the job as miner
|
||||
print(" 2. Polling for job...")
|
||||
poll_response = client.post(
|
||||
f"{coordinator_url}/v1/miners/poll",
|
||||
headers={
|
||||
"Content-Type": "application/json",
|
||||
"X-Api-Key": DEFAULT_API_KEY
|
||||
},
|
||||
json={"max_wait_seconds": 5}
|
||||
)
|
||||
|
||||
if poll_response.status_code == 200:
|
||||
job = poll_response.json()
|
||||
print(f" ✅ Job received: {job['job_id']}")
|
||||
|
||||
# Simulate GPU processing
|
||||
print(" 3. Simulating GPU processing...")
|
||||
time.sleep(2)
|
||||
|
||||
# Submit result
|
||||
print(" 4. Submitting result...")
|
||||
result_response = client.post(
|
||||
f"{coordinator_url}/v1/miners/{job['job_id']}/result",
|
||||
headers={
|
||||
"Content-Type": "application/json",
|
||||
"X-Api-Key": DEFAULT_API_KEY
|
||||
},
|
||||
json={
|
||||
"result": {
|
||||
"status": "completed",
|
||||
"output": "GPU task completed successfully",
|
||||
"execution_time_ms": 2000,
|
||||
"gpu_utilization": 85,
|
||||
"memory_used_mb": 4096
|
||||
},
|
||||
"metrics": {
|
||||
"compute_time": 2.0,
|
||||
"energy_used": 0.05,
|
||||
"aitbc_earned": 25.0
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
if result_response.status_code == 200:
|
||||
print(" ✅ Result submitted successfully")
|
||||
print(f" 💰 Earned: 25.0 AITBC")
|
||||
return True
|
||||
else:
|
||||
print(f"❌ Failed to submit result: {result_response.status_code}")
|
||||
return False
|
||||
|
||||
elif poll_response.status_code == 204:
|
||||
print(" ⚠️ No jobs available")
|
||||
return False
|
||||
else:
|
||||
print(f"❌ Poll failed: {poll_response.status_code}")
|
||||
return False
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Error: {e}")
|
||||
return False
|
||||
|
||||
def test_gpu_heartbeat(coordinator_url):
|
||||
"""Test sending GPU metrics in heartbeat"""
|
||||
print("\n💓 Testing GPU Heartbeat...")
|
||||
|
||||
heartbeat_data = {
|
||||
"status": "ONLINE",
|
||||
"inflight": 0,
|
||||
"metadata": {
|
||||
"last_seen": time.time(),
|
||||
"gpu_utilization": 45,
|
||||
"gpu_memory_used": 8192,
|
||||
"gpu_temperature": 68,
|
||||
"gpu_power_usage": 220,
|
||||
"cuda_version": "12.1",
|
||||
"driver_version": "535.104.05"
|
||||
}
|
||||
}
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.post(
|
||||
f"{coordinator_url}/v1/miners/heartbeat?miner_id={DEFAULT_MINER_ID}",
|
||||
headers={
|
||||
"Content-Type": "application/json",
|
||||
"X-Api-Key": DEFAULT_API_KEY
|
||||
},
|
||||
json=heartbeat_data
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
print("✅ GPU heartbeat sent successfully")
|
||||
print(f" GPU Utilization: {heartbeat_data['metadata']['gpu_utilization']}%")
|
||||
print(f" Memory Used: {heartbeat_data['metadata']['gpu_memory_used']} MB")
|
||||
print(f" Temperature: {heartbeat_data['metadata']['gpu_temperature']}°C")
|
||||
return True
|
||||
else:
|
||||
print(f"❌ Heartbeat failed: {response.status_code}")
|
||||
return False
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Error: {e}")
|
||||
return False
|
||||
|
||||
def check_blockchain_status(coordinator_url):
|
||||
"""Check if processed jobs appear in blockchain"""
|
||||
print("\n📦 Checking Blockchain Status...")
|
||||
|
||||
try:
|
||||
with httpx.Client() as client:
|
||||
response = client.get(f"{coordinator_url}/v1/explorer/blocks")
|
||||
|
||||
if response.status_code == 200:
|
||||
blocks = response.json()
|
||||
print(f"✅ Found {len(blocks['items'])} blocks")
|
||||
|
||||
# Show latest blocks
|
||||
for i, block in enumerate(blocks['items'][:3]):
|
||||
print(f"\n Block {block['height']}:")
|
||||
print(f" Hash: {block['hash']}")
|
||||
print(f" Proposer: {block['proposer']}")
|
||||
print(f" Time: {block['timestamp']}")
|
||||
|
||||
return True
|
||||
else:
|
||||
print(f"❌ Failed to get blocks: {response.status_code}")
|
||||
return False
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Error: {e}")
|
||||
return False
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Test Miner GPU Access")
|
||||
parser.add_argument("--url", help="Coordinator URL")
|
||||
parser.add_argument("--full", action="store_true", help="Run full test suite")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
coordinator_url = args.url if args.url else DEFAULT_COORDINATOR
|
||||
|
||||
print("🚀 AITBC Miner GPU Test")
|
||||
print("=" * 60)
|
||||
print(f"Coordinator: {coordinator_url}")
|
||||
print(f"Miner ID: {DEFAULT_MINER_ID}")
|
||||
print()
|
||||
|
||||
# Run tests
|
||||
tests = [
|
||||
("Miner Registration", lambda: test_miner_registration(coordinator_url)),
|
||||
("GPU Heartbeat", lambda: test_gpu_heartbeat(coordinator_url)),
|
||||
]
|
||||
|
||||
if args.full:
|
||||
tests.append(("Job Processing", lambda: test_job_processing(coordinator_url)))
|
||||
tests.append(("Blockchain Status", lambda: check_blockchain_status(coordinator_url)))
|
||||
|
||||
results = []
|
||||
|
||||
for test_name, test_func in tests:
|
||||
print(f"🧪 Running: {test_name}")
|
||||
result = test_func()
|
||||
results.append((test_name, result))
|
||||
print()
|
||||
|
||||
# Summary
|
||||
print("📊 TEST RESULTS")
|
||||
print("=" * 60)
|
||||
|
||||
passed = 0
|
||||
for test_name, result in results:
|
||||
status = "✅ PASS" if result else "❌ FAIL"
|
||||
print(f"{status} {test_name}")
|
||||
if result:
|
||||
passed += 1
|
||||
|
||||
print(f"\nScore: {passed}/{len(results)} tests passed")
|
||||
|
||||
if passed == len(results):
|
||||
print("\n🎉 All tests passed! Miner is ready for GPU mining.")
|
||||
print("\n💡 Next steps:")
|
||||
print(" 1. Start continuous mining: python3 cli/miner.py mine")
|
||||
print(" 2. Monitor earnings: cd home/miner && python3 wallet.py balance")
|
||||
else:
|
||||
print("\n⚠️ Some tests failed. Check the errors above.")
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
84
cli/tests/gpu/test_gpu_access.py
Executable file
84
cli/tests/gpu/test_gpu_access.py
Executable file
@@ -0,0 +1,84 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Simple GPU Access Test - Verify miner can access GPU
|
||||
"""
|
||||
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
def main():
|
||||
print("🔍 GPU Access Test for AITBC Miner")
|
||||
print("=" * 50)
|
||||
|
||||
# Check if nvidia-smi is available
|
||||
print("\n1. Checking NVIDIA GPU...")
|
||||
try:
|
||||
result = subprocess.run(
|
||||
["nvidia-smi", "--query-gpu=name,memory.total", "--format=csv,noheader"],
|
||||
capture_output=True,
|
||||
text=True
|
||||
)
|
||||
|
||||
if result.returncode == 0:
|
||||
gpu_info = result.stdout.strip()
|
||||
print(f"✅ GPU Found: {gpu_info}")
|
||||
else:
|
||||
print("❌ No NVIDIA GPU detected")
|
||||
sys.exit(1)
|
||||
except FileNotFoundError:
|
||||
print("❌ nvidia-smi not found")
|
||||
sys.exit(1)
|
||||
|
||||
# Check CUDA with PyTorch
|
||||
print("\n2. Checking CUDA with PyTorch...")
|
||||
try:
|
||||
import torch
|
||||
|
||||
if torch.cuda.is_available():
|
||||
print(f"✅ CUDA Available: {torch.version.cuda}")
|
||||
print(f" GPU Count: {torch.cuda.device_count()}")
|
||||
|
||||
device = torch.device('cuda')
|
||||
|
||||
# Test computation
|
||||
print("\n3. Testing GPU computation...")
|
||||
a = torch.randn(1000, 1000, device=device)
|
||||
b = torch.randn(1000, 1000, device=device)
|
||||
c = torch.mm(a, b)
|
||||
|
||||
print("✅ GPU computation successful")
|
||||
|
||||
# Check memory
|
||||
memory_allocated = torch.cuda.memory_allocated() / 1024**2
|
||||
print(f" Memory used: {memory_allocated:.2f} MB")
|
||||
|
||||
else:
|
||||
print("❌ CUDA not available in PyTorch")
|
||||
sys.exit(1)
|
||||
|
||||
except ImportError:
|
||||
print("❌ PyTorch not installed")
|
||||
sys.exit(1)
|
||||
|
||||
# Check miner service
|
||||
print("\n4. Checking miner service...")
|
||||
try:
|
||||
result = subprocess.run(
|
||||
["systemctl", "is-active", "aitbc-gpu-miner"],
|
||||
capture_output=True,
|
||||
text=True
|
||||
)
|
||||
|
||||
if result.stdout.strip() == "active":
|
||||
print("✅ Miner service is running")
|
||||
else:
|
||||
print("⚠️ Miner service is not running")
|
||||
except:
|
||||
print("⚠️ Could not check miner service")
|
||||
|
||||
print("\n✅ GPU access test completed!")
|
||||
print("\n💡 Your GPU is ready for mining AITBC!")
|
||||
print(" Start mining with: python3 cli/miner.py mine")
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
294
cli/tests/gpu/test_gpu_marketplace_bids.py
Normal file
294
cli/tests/gpu/test_gpu_marketplace_bids.py
Normal file
@@ -0,0 +1,294 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
GPU Marketplace Bids Test
|
||||
Tests complete marketplace bid workflow: offers listing → bid submission → bid tracking.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import sys
|
||||
import time
|
||||
from typing import Optional
|
||||
|
||||
import httpx
|
||||
|
||||
DEFAULT_COORDINATOR = "http://localhost:8000"
|
||||
DEFAULT_API_KEY = "${CLIENT_API_KEY}"
|
||||
DEFAULT_PROVIDER = "test_miner_123"
|
||||
DEFAULT_CAPACITY = 100
|
||||
DEFAULT_PRICE = 0.05
|
||||
DEFAULT_TIMEOUT = 300
|
||||
POLL_INTERVAL = 5
|
||||
|
||||
|
||||
def list_offers(client: httpx.Client, base_url: str, api_key: str,
|
||||
status: Optional[str] = None, gpu_model: Optional[str] = None) -> Optional[dict]:
|
||||
"""List marketplace offers with optional filters"""
|
||||
params = {"limit": 20}
|
||||
if status:
|
||||
params["status"] = status
|
||||
if gpu_model:
|
||||
params["gpu_model"] = gpu_model
|
||||
|
||||
response = client.get(
|
||||
f"{base_url}/v1/marketplace/offers",
|
||||
headers={"X-Api-Key": api_key},
|
||||
params=params,
|
||||
timeout=10,
|
||||
)
|
||||
if response.status_code != 200:
|
||||
print(f"❌ Failed to list offers: {response.status_code} {response.text}")
|
||||
return None
|
||||
return response.json()
|
||||
|
||||
|
||||
def submit_bid(client: httpx.Client, base_url: str, api_key: str,
|
||||
provider: str, capacity: int, price: float,
|
||||
notes: Optional[str] = None) -> Optional[str]:
|
||||
"""Submit a marketplace bid"""
|
||||
payload = {
|
||||
"provider": provider,
|
||||
"capacity": capacity,
|
||||
"price": price
|
||||
}
|
||||
if notes:
|
||||
payload["notes"] = notes
|
||||
|
||||
response = client.post(
|
||||
f"{base_url}/v1/marketplace/bids",
|
||||
headers={"X-Api-Key": api_key, "Content-Type": "application/json"},
|
||||
json=payload,
|
||||
timeout=10,
|
||||
)
|
||||
if response.status_code != 202:
|
||||
print(f"❌ Bid submission failed: {response.status_code} {response.text}")
|
||||
return None
|
||||
return response.json().get("id")
|
||||
|
||||
|
||||
def list_bids(client: httpx.Client, base_url: str, api_key: str,
|
||||
status: Optional[str] = None, provider: Optional[str] = None) -> Optional[dict]:
|
||||
"""List marketplace bids with optional filters"""
|
||||
params = {"limit": 20}
|
||||
if status:
|
||||
params["status"] = status
|
||||
if provider:
|
||||
params["provider"] = provider
|
||||
|
||||
response = client.get(
|
||||
f"{base_url}/v1/marketplace/bids",
|
||||
headers={"X-Api-Key": api_key},
|
||||
params=params,
|
||||
timeout=10,
|
||||
)
|
||||
if response.status_code != 200:
|
||||
print(f"❌ Failed to list bids: {response.status_code} {response.text}")
|
||||
return None
|
||||
return response.json()
|
||||
|
||||
|
||||
def get_bid_details(client: httpx.Client, base_url: str, api_key: str, bid_id: str) -> Optional[dict]:
|
||||
"""Get detailed information about a specific bid"""
|
||||
response = client.get(
|
||||
f"{base_url}/v1/marketplace/bids/{bid_id}",
|
||||
headers={"X-Api-Key": api_key},
|
||||
timeout=10,
|
||||
)
|
||||
if response.status_code != 200:
|
||||
print(f"❌ Failed to get bid details: {response.status_code} {response.text}")
|
||||
return None
|
||||
return response.json()
|
||||
|
||||
|
||||
def get_marketplace_stats(client: httpx.Client, base_url: str, api_key: str) -> Optional[dict]:
|
||||
"""Get marketplace statistics"""
|
||||
response = client.get(
|
||||
f"{base_url}/v1/marketplace/stats",
|
||||
headers={"X-Api-Key": api_key},
|
||||
timeout=10,
|
||||
)
|
||||
if response.status_code != 200:
|
||||
print(f"❌ Failed to get marketplace stats: {response.status_code} {response.text}")
|
||||
return None
|
||||
return response.json()
|
||||
|
||||
|
||||
def monitor_bid_status(client: httpx.Client, base_url: str, api_key: str,
|
||||
bid_id: str, timeout: int) -> Optional[str]:
|
||||
"""Monitor bid status until it's accepted/rejected or timeout"""
|
||||
deadline = time.time() + timeout
|
||||
|
||||
while time.time() < deadline:
|
||||
bid_details = get_bid_details(client, base_url, api_key, bid_id)
|
||||
if not bid_details:
|
||||
return None
|
||||
|
||||
status = bid_details.get("status")
|
||||
print(f"⏳ Bid status: {status}")
|
||||
|
||||
if status in {"accepted", "rejected"}:
|
||||
return status
|
||||
|
||||
time.sleep(POLL_INTERVAL)
|
||||
|
||||
print("❌ Bid status monitoring timed out")
|
||||
return None
|
||||
|
||||
|
||||
def test_basic_workflow(client: httpx.Client, base_url: str, api_key: str,
|
||||
provider: str, capacity: int, price: float) -> bool:
|
||||
"""Test basic marketplace bid workflow"""
|
||||
print("🧪 Testing basic marketplace bid workflow...")
|
||||
|
||||
# Step 1: List available offers
|
||||
print("📋 Listing marketplace offers...")
|
||||
offers = list_offers(client, base_url, api_key, status="open")
|
||||
if not offers:
|
||||
print("❌ Failed to list offers")
|
||||
return False
|
||||
|
||||
offers_list = offers.get("offers", [])
|
||||
print(f"✅ Found {len(offers_list)} open offers")
|
||||
|
||||
if offers_list:
|
||||
print("📊 Sample offers:")
|
||||
for i, offer in enumerate(offers_list[:3]): # Show first 3 offers
|
||||
print(f" {i+1}. {offer.get('gpu_model', 'Unknown')} - ${offer.get('price', 0):.4f}/hr - {offer.get('provider', 'Unknown')}")
|
||||
|
||||
# Step 2: Submit bid
|
||||
print(f"💰 Submitting bid: {capacity} units at ${price:.4f}/unit from {provider}")
|
||||
bid_id = submit_bid(client, base_url, api_key, provider, capacity, price,
|
||||
notes="Test bid for GPU marketplace")
|
||||
if not bid_id:
|
||||
print("❌ Failed to submit bid")
|
||||
return False
|
||||
|
||||
print(f"✅ Bid submitted: {bid_id}")
|
||||
|
||||
# Step 3: Get bid details
|
||||
print("📄 Getting bid details...")
|
||||
bid_details = get_bid_details(client, base_url, api_key, bid_id)
|
||||
if not bid_details:
|
||||
print("❌ Failed to get bid details")
|
||||
return False
|
||||
|
||||
print(f"✅ Bid details: {bid_details['provider']} - {bid_details['capacity']} units - ${bid_details['price']:.4f}/unit - {bid_details['status']}")
|
||||
|
||||
# Step 4: List bids to verify it appears
|
||||
print("📋 Listing bids to verify...")
|
||||
bids = list_bids(client, base_url, api_key, provider=provider)
|
||||
if not bids:
|
||||
print("❌ Failed to list bids")
|
||||
return False
|
||||
|
||||
bids_list = bids.get("bids", [])
|
||||
our_bid = next((b for b in bids_list if b.get("id") == bid_id), None)
|
||||
if not our_bid:
|
||||
print("❌ Submitted bid not found in bid list")
|
||||
return False
|
||||
|
||||
print(f"✅ Bid found in list: {our_bid['status']}")
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def test_competitive_bidding(client: httpx.Client, base_url: str, api_key: str) -> bool:
|
||||
"""Test competitive bidding scenario with multiple providers"""
|
||||
print("🧪 Testing competitive bidding scenario...")
|
||||
|
||||
# Submit multiple bids from different providers
|
||||
providers = ["provider_alpha", "provider_beta", "provider_gamma"]
|
||||
bid_ids = []
|
||||
|
||||
for i, provider in enumerate(providers):
|
||||
price = 0.05 - (i * 0.01) # Decreasing prices
|
||||
print(f"💰 {provider} submitting bid at ${price:.4f}/unit")
|
||||
|
||||
bid_id = submit_bid(client, base_url, api_key, provider, 50, price,
|
||||
notes=f"Competitive bid from {provider}")
|
||||
if not bid_id:
|
||||
print(f"❌ {provider} failed to submit bid")
|
||||
return False
|
||||
|
||||
bid_ids.append((provider, bid_id))
|
||||
time.sleep(1) # Small delay between submissions
|
||||
|
||||
print(f"✅ All {len(bid_ids)} competitive bids submitted")
|
||||
|
||||
# List all bids to see the competition
|
||||
all_bids = list_bids(client, base_url, api_key)
|
||||
if not all_bids:
|
||||
print("❌ Failed to list all bids")
|
||||
return False
|
||||
|
||||
bids_list = all_bids.get("bids", [])
|
||||
competitive_bids = [b for b in bids_list if b.get("provider") in providers]
|
||||
|
||||
print(f"📊 Found {len(competitive_bids)} competitive bids:")
|
||||
for bid in sorted(competitive_bids, key=lambda x: x.get("price", 0)):
|
||||
print(f" {bid['provider']}: ${bid['price']:.4f}/unit - {bid['status']}")
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def test_marketplace_stats(client: httpx.Client, base_url: str, api_key: str) -> bool:
|
||||
"""Test marketplace statistics functionality"""
|
||||
print("🧪 Testing marketplace statistics...")
|
||||
|
||||
stats = get_marketplace_stats(client, base_url, api_key)
|
||||
if not stats:
|
||||
print("❌ Failed to get marketplace stats")
|
||||
return False
|
||||
|
||||
print(f"📊 Marketplace Statistics:")
|
||||
print(f" Total offers: {stats.get('totalOffers', 0)}")
|
||||
print(f" Open capacity: {stats.get('openCapacity', 0)}")
|
||||
print(f" Average price: ${stats.get('averagePrice', 0):.4f}")
|
||||
print(f" Active bids: {stats.get('activeBids', 0)}")
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def main() -> int:
|
||||
parser = argparse.ArgumentParser(description="GPU marketplace bids end-to-end test")
|
||||
parser.add_argument("--url", default=DEFAULT_COORDINATOR, help="Coordinator base URL")
|
||||
parser.add_argument("--api-key", default=DEFAULT_API_KEY, help="Client API key")
|
||||
parser.add_argument("--provider", default=DEFAULT_PROVIDER, help="Provider ID for bids")
|
||||
parser.add_argument("--capacity", type=int, default=DEFAULT_CAPACITY, help="Bid capacity")
|
||||
parser.add_argument("--price", type=float, default=DEFAULT_PRICE, help="Price per unit")
|
||||
parser.add_argument("--timeout", type=int, default=DEFAULT_TIMEOUT, help="Timeout in seconds")
|
||||
parser.add_argument("--test", choices=["basic", "competitive", "stats", "all"],
|
||||
default="all", help="Test scenario to run")
|
||||
args = parser.parse_args()
|
||||
|
||||
with httpx.Client() as client:
|
||||
print("🚀 Starting GPU marketplace bids test...")
|
||||
print(f"📍 Coordinator: {args.url}")
|
||||
print(f"🆔 Provider: {args.provider}")
|
||||
print(f"💰 Bid: {args.capacity} units at ${args.price:.4f}/unit")
|
||||
print()
|
||||
|
||||
success = True
|
||||
|
||||
if args.test in ["basic", "all"]:
|
||||
success &= test_basic_workflow(client, args.url, args.api_key,
|
||||
args.provider, args.capacity, args.price)
|
||||
print()
|
||||
|
||||
if args.test in ["competitive", "all"]:
|
||||
success &= test_competitive_bidding(client, args.url, args.api_key)
|
||||
print()
|
||||
|
||||
if args.test in ["stats", "all"]:
|
||||
success &= test_marketplace_stats(client, args.url, args.api_key)
|
||||
print()
|
||||
|
||||
if success:
|
||||
print("✅ All marketplace bid tests completed successfully!")
|
||||
return 0
|
||||
else:
|
||||
print("❌ Some marketplace bid tests failed!")
|
||||
return 1
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
Reference in New Issue
Block a user