chore: enhance .gitignore and remove obsolete documentation files

- Reorganize .gitignore with categorized sections for better maintainability
- Add comprehensive ignore patterns for Python, Node.js, databases, logs, and build artifacts
- Add project-specific ignore rules for coordinator, explorer, and deployment files
- Remove outdated documentation: BITCOIN-WALLET-SETUP.md, LOCAL_ASSETS_SUMMARY.md, README-CONTAINER-DEPLOYMENT.md, README-DOMAIN-DEPLOYMENT.md
```
This commit is contained in:
oib
2026-01-24 14:44:51 +01:00
parent 99bf335970
commit 9b9c5beb23
214 changed files with 25558 additions and 171 deletions

View File

@@ -0,0 +1,89 @@
#!/bin/bash
# Deploy GPU Miner to AITBC Container - All in One
set -e
echo "🚀 Deploying GPU Miner to AITBC Container..."
# Step 1: Copy files
echo "1. Copying GPU scripts..."
scp -o StrictHostKeyChecking=no /home/oib/windsurf/aitbc/gpu_registry_demo.py aitbc:/home/oib/
scp -o StrictHostKeyChecking=no /home/oib/windsurf/aitbc/gpu_miner_with_wait.py aitbc:/home/oib/
# Step 2: Install Python and deps
echo "2. Installing Python and dependencies..."
ssh aitbc 'sudo apt-get update -qq'
ssh aitbc 'sudo apt-get install -y -qq python3 python3-venv python3-pip'
ssh aitbc 'python3 -m venv /home/oib/.venv-gpu'
ssh aitbc '/home/oib/.venv-gpu/bin/pip install -q fastapi uvicorn httpx psutil'
# Step 3: Create GPU registry service
echo "3. Creating GPU registry service..."
ssh aitbc "sudo tee /etc/systemd/system/aitbc-gpu-registry.service >/dev/null <<'EOF'
[Unit]
Description=AITBC GPU Registry
After=network.target
[Service]
Type=simple
User=oib
WorkingDirectory=/home/oib
ExecStart=/home/oib/.venv-gpu/bin/python /home/oib/gpu_registry_demo.py
Restart=always
RestartSec=5
StandardOutput=journal
StandardError=journal
[Install]
WantedBy=multi-user.target
EOF"
# Step 4: Start GPU registry
echo "4. Starting GPU registry..."
ssh aitbc 'sudo systemctl daemon-reload'
ssh aitbc 'sudo systemctl enable --now aitbc-gpu-registry.service'
# Step 5: Create GPU miner service
echo "5. Creating GPU miner service..."
ssh aitbc "sudo tee /etc/systemd/system/aitbc-gpu-miner.service >/dev/null <<'EOF'
[Unit]
Description=AITBC GPU Miner Client
After=network.target aitbc-gpu-registry.service
Wants=aitbc-gpu-registry.service
[Service]
Type=simple
User=oib
WorkingDirectory=/home/oib
ExecStart=/home/oib/.venv-gpu/bin/python /home/oib/gpu_miner_with_wait.py
Restart=always
RestartSec=10
StandardOutput=journal
StandardError=journal
[Install]
WantedBy=multi-user.target
EOF"
# Step 6: Start GPU miner
echo "6. Starting GPU miner..."
ssh aitbc 'sudo systemctl daemon-reload'
ssh aitbc 'sudo systemctl enable --now aitbc-gpu-miner.service'
# Step 7: Check services
echo "7. Checking services..."
echo -e "\n=== GPU Registry Service ==="
ssh aitbc 'sudo systemctl status aitbc-gpu-registry.service --no-pager'
echo -e "\n=== GPU Miner Service ==="
ssh aitbc 'sudo systemctl status aitbc-gpu-miner.service --no-pager'
# Step 8: Verify GPU registration
echo -e "\n8. Verifying GPU registration..."
sleep 3
echo " curl http://10.1.223.93:8091/miners/list"
curl -s http://10.1.223.93:8091/miners/list | python3 -c "import sys,json; data=json.load(sys.stdin); print(f'✅ Found {len(data.get(\"gpus\", []))} GPU(s)'); [print(f' - {gpu[\"capabilities\"][\"gpu\"][\"model\"]} ({gpu[\"capabilities\"][\"gpu\"][\"memory_gb\"]}GB)') for gpu in data.get('gpus', [])]"
echo -e "\n✅ Deployment complete!"
echo "GPU Registry: http://10.1.223.93:8091"
echo "GPU Miner: Running and sending heartbeats"

View File

@@ -0,0 +1,89 @@
#!/bin/bash
# Deploy GPU Miner to AITBC Container
echo "🚀 Deploying GPU Miner to AITBC Container..."
# Check if container is accessible
echo "1. Checking container access..."
sudo incus exec aitbc -- whoami
# Copy GPU miner files
echo "2. Copying GPU miner files..."
sudo incus file push /home/oib/windsurf/aitbc/gpu_miner_with_wait.py aitbc/home/oib/
sudo incus file push /home/oib/windsurf/aitbc/gpu_registry_demo.py aitbc/home/oib/
# Install dependencies
echo "3. Installing dependencies..."
sudo incus exec aitbc -- pip install httpx fastapi uvicorn psutil
# Create GPU miner service
echo "4. Creating GPU miner service..."
cat << 'EOF' | sudo tee /tmp/gpu-miner.service
[Unit]
Description=AITBC GPU Miner Client
After=network.target
[Service]
Type=simple
User=oib
WorkingDirectory=/home/oib
ExecStart=/usr/bin/python3 gpu_miner_with_wait.py
Restart=always
RestartSec=30
StandardOutput=journal
StandardError=journal
[Install]
WantedBy=multi-user.target
EOF
sudo incus file push /tmp/gpu-miner.service aitbc/tmp/
sudo incus exec aitbc -- sudo mv /tmp/gpu-miner.service /etc/systemd/system/
sudo incus exec aitbc -- sudo systemctl daemon-reload
sudo incus exec aitbc -- sudo systemctl enable gpu-miner.service
sudo incus exec aitbc -- sudo systemctl start gpu-miner.service
# Create GPU registry service
echo "5. Creating GPU registry service..."
cat << 'EOF' | sudo tee /tmp/gpu-registry.service
[Unit]
Description=AITBC GPU Registry
After=network.target
[Service]
Type=simple
User=oib
WorkingDirectory=/home/oib
ExecStart=/usr/bin/python3 gpu_registry_demo.py
Restart=always
RestartSec=10
StandardOutput=journal
StandardError=journal
[Install]
WantedBy=multi-user.target
EOF
sudo incus file push /tmp/gpu-registry.service aitbc/tmp/
sudo incus exec aitbc -- sudo mv /tmp/gpu-registry.service /etc/systemd/system/
sudo incus exec aitbc -- sudo systemctl daemon-reload
sudo incus exec aitbc -- sudo systemctl enable gpu-registry.service
sudo incus exec aitbc -- sudo systemctl start gpu-registry.service
# Check services
echo "6. Checking services..."
echo "GPU Miner Service:"
sudo incus exec aitbc -- sudo systemctl status gpu-miner.service --no-pager
echo -e "\nGPU Registry Service:"
sudo incus exec aitbc -- sudo systemctl status gpu-registry.service --no-pager
# Show access URLs
echo -e "\n✅ Deployment complete!"
echo "Access URLs:"
echo " - Container IP: 10.1.223.93"
echo " - GPU Registry: http://10.1.223.93:8091/miners/list"
echo " - Coordinator API: http://10.1.223.93:8000"
echo -e "\nTo check GPU status:"
echo " curl http://10.1.223.93:8091/miners/list"

View File

@@ -0,0 +1,92 @@
#!/usr/bin/env python3
"""
GPU Exchange Integration Demo
Shows how the GPU miner is integrated with the exchange
"""
import json
import httpx
import subprocess
import time
from datetime import datetime
print("🔗 AITBC GPU Exchange Integration")
print("=" * 50)
# Check GPU Registry
print("\n1. 📊 Checking GPU Registry...")
try:
response = httpx.get("http://localhost:8091/miners/list")
if response.status_code == 200:
data = response.json()
gpus = data.get("gpus", [])
print(f" Found {len(gpus)} registered GPU(s)")
for gpu in gpus:
print(f"\n 🎮 GPU Details:")
print(f" Model: {gpu['capabilities']['gpu']['model']}")
print(f" Memory: {gpu['capabilities']['gpu']['memory_gb']} GB")
print(f" CUDA: {gpu['capabilities']['gpu']['cuda_version']}")
print(f" Status: {gpu.get('status', 'Unknown')}")
print(f" Region: {gpu.get('region', 'Unknown')}")
else:
print(" ❌ GPU Registry not accessible")
except Exception as e:
print(f" ❌ Error: {e}")
# Check Exchange
print("\n2. 💰 Checking Trade Exchange...")
try:
response = httpx.get("http://localhost:3002")
if response.status_code == 200:
print(" ✅ Trade Exchange is running")
print(" 🌐 URL: http://localhost:3002")
else:
print(" ❌ Trade Exchange not responding")
except:
print(" ❌ Trade Exchange not accessible")
# Check Blockchain
print("\n3. ⛓️ Checking Blockchain Node...")
try:
response = httpx.get("http://localhost:9080/rpc/head")
if response.status_code == 200:
data = response.json()
print(f" ✅ Blockchain Node active")
print(f" Block Height: {data.get('height', 'Unknown')}")
print(f" Block Hash: {data.get('hash', 'Unknown')[:16]}...")
else:
print(" ❌ Blockchain Node not responding")
except:
print(" ❌ Blockchain Node not accessible")
# Show Integration Points
print("\n4. 🔌 Integration Points:")
print(" • GPU Registry: http://localhost:8091/miners/list")
print(" • Trade Exchange: http://localhost:3002")
print(" • Blockchain RPC: http://localhost:9080")
print(" • GPU Marketplace: Exchange > Browse GPU Marketplace")
# Show API Usage
print("\n5. 📡 API Usage Examples:")
print("\n Get registered GPUs:")
print(" curl http://localhost:8091/miners/list")
print("\n Get GPU details:")
print(" curl http://localhost:8091/miners/localhost-gpu-miner")
print("\n Get blockchain info:")
print(" curl http://localhost:9080/rpc/head")
# Show Current Status
print("\n6. 📈 Current System Status:")
print(" ✅ GPU Miner: Running (systemd)")
print(" ✅ GPU Registry: Running on port 8091")
print(" ✅ Trade Exchange: Running on port 3002")
print(" ✅ Blockchain Node: Running on port 9080")
print("\n" + "=" * 50)
print("🎯 GPU is successfully integrated with the exchange!")
print("\nNext steps:")
print("1. Open http://localhost:3002 in your browser")
print("2. Click 'Browse GPU Marketplace'")
print("3. View the registered RTX 4060 Ti GPU")
print("4. Purchase GPU compute time with AITBC tokens")

View File

@@ -0,0 +1,60 @@
#!/usr/bin/env python3
"""
GPU Miner Registration Demo
Shows what data would be sent to register the GPU
"""
import json
from datetime import datetime
# GPU Information from nvidia-smi
GPU_INFO = {
"miner_id": "localhost-gpu-miner",
"capabilities": {
"gpu": {
"model": "NVIDIA GeForce RTX 4060 Ti",
"memory_gb": 16,
"cuda_version": "12.4",
"compute_capability": "8.9",
"driver_version": "550.163.01"
},
"compute": {
"type": "GPU",
"platform": "CUDA",
"supported_tasks": ["inference", "training", "stable-diffusion", "llama"],
"max_concurrent_jobs": 1
}
},
"concurrency": 1,
"region": "localhost"
}
print("=== GPU Miner Registration Data ===")
print(json.dumps(GPU_INFO, indent=2))
print("\n=== Registration Endpoint ===")
print("POST http://localhost:8000/miners/register")
print("\n=== Headers ===")
print("Authorization: Bearer REDACTED_MINER_KEY")
print("Content-Type: application/json")
print("\n=== Response Expected ===")
print("""
{
"status": "ok",
"session_token": "abc123..."
}
""")
print("\n=== Current GPU Status ===")
print(f"Model: NVIDIA GeForce RTX 4060 Ti")
print(f"Memory: 16GB (2682MB/16380MB used)")
print(f"Utilization: 9%")
print(f"Temperature: 43°C")
print(f"Status: Available for mining")
print("\n=== To Start the GPU Miner ===")
print("1. Ensure coordinator API is running on port 8000")
print("2. Run: python simple_gpu_miner.py")
print("3. The miner will:")
print(" - Register GPU capabilities")
print(" - Send heartbeats every 15 seconds")
print(" - Poll for jobs every 3 seconds")

View File

@@ -0,0 +1,396 @@
#!/usr/bin/env python3
"""
Real GPU Miner Client for AITBC - runs on host with actual GPU
"""
import json
import time
import httpx
import logging
import sys
import subprocess
import os
from datetime import datetime
# Configuration
COORDINATOR_URL = "http://127.0.0.1:18000"
MINER_ID = "REDACTED_MINER_KEY"
AUTH_TOKEN = "REDACTED_MINER_KEY"
HEARTBEAT_INTERVAL = 15
MAX_RETRIES = 10
RETRY_DELAY = 30
# Setup logging with explicit configuration
LOG_PATH = "/home/oib/windsurf/aitbc/logs/host_gpu_miner.log"
os.makedirs(os.path.dirname(LOG_PATH), exist_ok=True)
class FlushHandler(logging.StreamHandler):
def emit(self, record):
super().emit(record)
self.flush()
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s',
handlers=[
FlushHandler(sys.stdout),
logging.FileHandler(LOG_PATH)
]
)
logger = logging.getLogger(__name__)
# Force stdout to be unbuffered
sys.stdout.reconfigure(line_buffering=True)
sys.stderr.reconfigure(line_buffering=True)
# GPU capabilities (RTX 4060 Ti)
GPU_CAPABILITIES = {
"gpu": {
"model": "NVIDIA GeForce RTX 4060 Ti",
"memory_gb": 16,
"cuda_version": "12.4",
"platform": "CUDA",
"supported_tasks": ["inference", "training", "stable-diffusion", "llama"],
"max_concurrent_jobs": 1
}
}
def get_gpu_info():
"""Get real GPU information"""
try:
result = subprocess.run(['nvidia-smi', '--query-gpu=name,memory.total,memory.used,utilization.gpu',
'--format=csv,noheader,nounits'],
capture_output=True, text=True, timeout=5)
if result.returncode == 0:
info = result.stdout.strip().split(', ')
return {
"name": info[0],
"memory_total": int(info[1]),
"memory_used": int(info[2]),
"utilization": int(info[3])
}
except Exception as e:
logger.error(f"Failed to get GPU info: {e}")
return None
def check_ollama():
"""Check if Ollama is running and has models"""
try:
response = httpx.get("http://localhost:11434/api/tags", timeout=5)
if response.status_code == 200:
models = response.json().get('models', [])
model_names = [m['name'] for m in models]
logger.info(f"Ollama running with models: {model_names}")
return True, model_names
else:
logger.error("Ollama not responding")
return False, []
except Exception as e:
logger.error(f"Ollama check failed: {e}")
return False, []
def wait_for_coordinator():
"""Wait for coordinator to be available"""
for i in range(MAX_RETRIES):
try:
response = httpx.get(f"{COORDINATOR_URL}/v1/health", timeout=5)
if response.status_code == 200:
logger.info("Coordinator is available!")
return True
except:
pass
logger.info(f"Waiting for coordinator... ({i+1}/{MAX_RETRIES})")
time.sleep(RETRY_DELAY)
logger.error("Coordinator not available after max retries")
return False
def register_miner():
"""Register the miner with the coordinator"""
register_data = {
"capabilities": GPU_CAPABILITIES,
"concurrency": 1,
"region": "localhost"
}
headers = {
"X-Api-Key": AUTH_TOKEN,
"Content-Type": "application/json"
}
try:
response = httpx.post(
f"{COORDINATOR_URL}/v1/miners/register?miner_id={MINER_ID}",
json=register_data,
headers=headers,
timeout=10
)
if response.status_code == 200:
data = response.json()
logger.info(f"Successfully registered miner: {data}")
return data.get("session_token", "demo-token")
else:
logger.error(f"Registration failed: {response.status_code} - {response.text}")
return None
except Exception as e:
logger.error(f"Registration error: {e}")
return None
def send_heartbeat():
"""Send heartbeat to coordinator with real GPU stats"""
gpu_info = get_gpu_info()
if gpu_info:
heartbeat_data = {
"status": "active",
"current_jobs": 0,
"last_seen": datetime.utcnow().isoformat(),
"gpu_utilization": gpu_info["utilization"],
"memory_used": gpu_info["memory_used"],
"memory_total": gpu_info["memory_total"]
}
else:
heartbeat_data = {
"status": "active",
"current_jobs": 0,
"last_seen": datetime.utcnow().isoformat(),
"gpu_utilization": 0,
"memory_used": 0,
}
headers = {
"X-Api-Key": AUTH_TOKEN,
"Content-Type": "application/json"
}
try:
response = httpx.post(
f"{COORDINATOR_URL}/v1/miners/heartbeat?miner_id={MINER_ID}",
json=heartbeat_data,
headers=headers,
timeout=5
)
if response.status_code == 200:
logger.info(f"Heartbeat sent (GPU: {gpu_info['utilization'] if gpu_info else 'N/A'}%)")
else:
logger.error(f"Heartbeat failed: {response.status_code} - {response.text}")
except Exception as e:
logger.error(f"Heartbeat error: {e}")
def execute_job(job, available_models):
"""Execute a job using real GPU resources"""
job_id = job.get('job_id')
payload = job.get('payload', {})
logger.info(f"Executing job {job_id}: {payload}")
try:
if payload.get('type') == 'inference':
# Get the prompt and model
prompt = payload.get('prompt', '')
model = payload.get('model', 'llama3.2:latest')
# Check if model is available
if model not in available_models:
# Use first available model
if available_models:
model = available_models[0]
logger.info(f"Using available model: {model}")
else:
raise Exception("No models available in Ollama")
# Call Ollama API for real GPU inference
logger.info(f"Running inference on GPU with model: {model}")
start_time = time.time()
ollama_response = httpx.post(
"http://localhost:11434/api/generate",
json={
"model": model,
"prompt": prompt,
"stream": False
},
timeout=60
)
if ollama_response.status_code == 200:
result = ollama_response.json()
output = result.get('response', '')
execution_time = time.time() - start_time
# Get GPU stats after execution
gpu_after = get_gpu_info()
# Submit result back to coordinator
submit_result(job_id, {
"result": {
"status": "completed",
"output": output,
"model": model,
"tokens_processed": result.get('eval_count', 0),
"execution_time": execution_time,
"gpu_used": True
},
"metrics": {
"gpu_utilization": gpu_after["utilization"] if gpu_after else 0,
"memory_used": gpu_after["memory_used"] if gpu_after else 0,
"memory_peak": max(gpu_after["memory_used"] if gpu_after else 0, 2048)
}
})
logger.info(f"Job {job_id} completed in {execution_time:.2f}s")
return True
else:
logger.error(f"Ollama error: {ollama_response.status_code}")
submit_result(job_id, {
"result": {
"status": "failed",
"error": f"Ollama error: {ollama_response.text}"
}
})
return False
else:
# Unsupported job type
logger.error(f"Unsupported job type: {payload.get('type')}")
submit_result(job_id, {
"result": {
"status": "failed",
"error": f"Unsupported job type: {payload.get('type')}"
}
})
return False
except Exception as e:
logger.error(f"Job execution error: {e}")
submit_result(job_id, {
"result": {
"status": "failed",
"error": str(e)
}
})
return False
def submit_result(job_id, result):
"""Submit job result to coordinator"""
headers = {
"X-Api-Key": AUTH_TOKEN,
"Content-Type": "application/json"
}
try:
response = httpx.post(
f"{COORDINATOR_URL}/v1/miners/{job_id}/result",
json=result,
headers=headers,
timeout=10
)
if response.status_code == 200:
logger.info(f"Result submitted for job {job_id}")
else:
logger.error(f"Result submission failed: {response.status_code} - {response.text}")
except Exception as e:
logger.error(f"Result submission error: {e}")
def poll_for_jobs():
"""Poll for available jobs"""
poll_data = {
"max_wait_seconds": 5
}
headers = {
"X-Api-Key": AUTH_TOKEN,
"Content-Type": "application/json"
}
try:
response = httpx.post(
f"{COORDINATOR_URL}/v1/miners/poll",
json=poll_data,
headers=headers,
timeout=10
)
if response.status_code == 200:
job = response.json()
logger.info(f"Received job: {job}")
return job
elif response.status_code == 204:
return None
else:
logger.error(f"Poll failed: {response.status_code} - {response.text}")
return None
except Exception as e:
logger.error(f"Error polling for jobs: {e}")
return None
def main():
"""Main miner loop"""
logger.info("Starting Real GPU Miner Client on Host...")
# Check GPU availability
gpu_info = get_gpu_info()
if not gpu_info:
logger.error("GPU not available, exiting")
sys.exit(1)
logger.info(f"GPU detected: {gpu_info['name']} ({gpu_info['memory_total']}MB)")
# Check Ollama
ollama_available, models = check_ollama()
if not ollama_available:
logger.error("Ollama not available - please install and start Ollama")
sys.exit(1)
logger.info(f"Ollama models available: {', '.join(models)}")
# Wait for coordinator
if not wait_for_coordinator():
sys.exit(1)
# Register with coordinator
session_token = register_miner()
if not session_token:
logger.error("Failed to register, exiting")
sys.exit(1)
logger.info("Miner registered successfully, starting main loop...")
# Main loop
last_heartbeat = 0
last_poll = 0
try:
while True:
current_time = time.time()
# Send heartbeat
if current_time - last_heartbeat >= HEARTBEAT_INTERVAL:
send_heartbeat()
last_heartbeat = current_time
# Poll for jobs
if current_time - last_poll >= 3:
job = poll_for_jobs()
if job:
# Execute the job with real GPU
execute_job(job, models)
last_poll = current_time
time.sleep(1)
except KeyboardInterrupt:
logger.info("Shutting down miner...")
except Exception as e:
logger.error(f"Error in main loop: {e}")
sys.exit(1)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,3 @@
#!/bin/bash
# Wrapper script for GPU miner to ensure proper logging
exec /home/oib/windsurf/aitbc/.venv/bin/python -u /home/oib/windsurf/aitbc/scripts/gpu/gpu_miner_host.py 2>&1

View File

@@ -0,0 +1,329 @@
#!/usr/bin/env python3
"""
Real GPU Miner Client for AITBC with Ollama integration
"""
import json
import time
import httpx
import logging
import sys
import subprocess
import os
from datetime import datetime
# Configuration
COORDINATOR_URL = "http://127.0.0.1:8000"
MINER_ID = "localhost-gpu-miner"
AUTH_TOKEN = "REDACTED_MINER_KEY"
HEARTBEAT_INTERVAL = 15
MAX_RETRIES = 10
RETRY_DELAY = 30
# Setup logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
# GPU capabilities (RTX 4060 Ti)
GPU_CAPABILITIES = {
"gpu": {
"model": "NVIDIA GeForce RTX 4060 Ti",
"memory_gb": 16,
"cuda_version": "12.4",
"platform": "CUDA",
"supported_tasks": ["inference", "training", "stable-diffusion", "llama"],
"max_concurrent_jobs": 1
}
}
def check_gpu_available():
"""Check if GPU is available"""
try:
result = subprocess.run(['nvidia-smi', '--query-gpu=name,memory.total', '--format=csv,noheader,nounits'],
capture_output=True, text=True, timeout=5)
if result.returncode == 0:
gpu_info = result.stdout.strip().split(', ')
logger.info(f"GPU detected: {gpu_info[0]}, Memory: {gpu_info[1]}MB")
return True
else:
logger.error("nvidia-smi failed")
return False
except Exception as e:
logger.error(f"GPU check failed: {e}")
return False
def check_ollama():
"""Check if Ollama is running"""
try:
response = httpx.get("http://localhost:11434/api/tags", timeout=5)
if response.status_code == 200:
models = response.json().get('models', [])
logger.info(f"Ollama running with {len(models)} models")
return True
else:
logger.error("Ollama not responding")
return False
except Exception as e:
logger.error(f"Ollama check failed: {e}")
return False
def wait_for_coordinator():
"""Wait for coordinator to be available"""
for i in range(MAX_RETRIES):
try:
response = httpx.get(f"{COORDINATOR_URL}/v1/health", timeout=5)
if response.status_code == 200:
logger.info("Coordinator is available!")
return True
except:
pass
logger.info(f"Waiting for coordinator... ({i+1}/{MAX_RETRIES})")
time.sleep(RETRY_DELAY)
logger.error("Coordinator not available after max retries")
return False
def register_miner():
"""Register the miner with the coordinator"""
register_data = {
"capabilities": GPU_CAPABILITIES,
"concurrency": 1,
"region": "localhost"
}
headers = {
"X-Api-Key": AUTH_TOKEN,
"Content-Type": "application/json"
}
try:
response = httpx.post(
f"{COORDINATOR_URL}/v1/miners/register?miner_id={MINER_ID}",
json=register_data,
headers=headers,
timeout=10
)
if response.status_code == 200:
data = response.json()
logger.info(f"Successfully registered miner: {data}")
return data.get("session_token", "demo-token")
else:
logger.error(f"Registration failed: {response.status_code} - {response.text}")
return None
except Exception as e:
logger.error(f"Registration error: {e}")
return None
def send_heartbeat():
"""Send heartbeat to coordinator"""
heartbeat_data = {
"status": "active",
"current_jobs": 0,
"last_seen": datetime.utcnow().isoformat(),
"gpu_utilization": 45, # Simulated
"memory_used": 8192, # Simulated
}
headers = {
"X-Api-Key": AUTH_TOKEN,
"Content-Type": "application/json"
}
try:
response = httpx.post(
f"{COORDINATOR_URL}/v1/miners/heartbeat?miner_id={MINER_ID}",
json=heartbeat_data,
headers=headers,
timeout=5
)
if response.status_code == 200:
logger.info("Heartbeat sent successfully")
else:
logger.error(f"Heartbeat failed: {response.status_code} - {response.text}")
except Exception as e:
logger.error(f"Heartbeat error: {e}")
def execute_job(job):
"""Execute a job using GPU resources"""
job_id = job.get('job_id')
payload = job.get('payload', {})
logger.info(f"Executing job {job_id}: {payload}")
try:
if payload.get('type') == 'inference':
# Use Ollama for inference
prompt = payload.get('prompt', '')
model = payload.get('model', 'llama3.2:latest')
# Call Ollama API
ollama_response = httpx.post(
"http://localhost:11434/api/generate",
json={
"model": model,
"prompt": prompt,
"stream": False
},
timeout=60
)
if ollama_response.status_code == 200:
result = ollama_response.json()
output = result.get('response', '')
# Submit result back to coordinator
submit_result(job_id, {
"status": "completed",
"output": output,
"model": model,
"tokens_processed": result.get('eval_count', 0),
"execution_time": result.get('total_duration', 0) / 1000000000, # Convert to seconds
"gpu_used": True
})
logger.info(f"Job {job_id} completed successfully")
return True
else:
logger.error(f"Ollama error: {ollama_response.status_code}")
submit_result(job_id, {
"status": "failed",
"error": f"Ollama error: {ollama_response.text}"
})
return False
else:
# Unsupported job type
logger.error(f"Unsupported job type: {payload.get('type')}")
submit_result(job_id, {
"status": "failed",
"error": f"Unsupported job type: {payload.get('type')}"
})
return False
except Exception as e:
logger.error(f"Job execution error: {e}")
submit_result(job_id, {
"status": "failed",
"error": str(e)
})
return False
def submit_result(job_id, result):
"""Submit job result to coordinator"""
headers = {
"X-Api-Key": AUTH_TOKEN,
"Content-Type": "application/json"
}
try:
response = httpx.post(
f"{COORDINATOR_URL}/v1/jobs/{job_id}/result",
json=result,
headers=headers,
timeout=10
)
if response.status_code == 200:
logger.info(f"Result submitted for job {job_id}")
else:
logger.error(f"Result submission failed: {response.status_code} - {response.text}")
except Exception as e:
logger.error(f"Result submission error: {e}")
def poll_for_jobs():
"""Poll for available jobs"""
poll_data = {
"miner_id": MINER_ID,
"capabilities": GPU_CAPABILITIES,
"max_wait_seconds": 5
}
headers = {
"X-Api-Key": AUTH_TOKEN,
"Content-Type": "application/json"
}
try:
response = httpx.post(
f"{COORDINATOR_URL}/v1/miners/poll",
json=poll_data,
headers=headers,
timeout=10
)
if response.status_code == 200:
job = response.json()
logger.info(f"Received job: {job}")
return job
elif response.status_code == 204:
logger.info("No jobs available")
return None
else:
logger.error(f"Poll failed: {response.status_code} - {response.text}")
return None
except Exception as e:
logger.error(f"Error polling for jobs: {e}")
return None
def main():
"""Main miner loop"""
logger.info("Starting Real GPU Miner Client...")
# Check GPU availability (optional)
gpu_available = check_gpu_available()
if not gpu_available:
logger.warning("GPU not available - will run in CPU mode")
# Check Ollama
if not check_ollama():
logger.warning("Ollama not available - inference jobs will fail")
# Wait for coordinator
if not wait_for_coordinator():
sys.exit(1)
# Register with coordinator
session_token = register_miner()
if not session_token:
logger.error("Failed to register, exiting")
sys.exit(1)
logger.info("Miner registered successfully, starting main loop...")
# Main loop
last_heartbeat = 0
last_poll = 0
try:
while True:
current_time = time.time()
# Send heartbeat
if current_time - last_heartbeat >= HEARTBEAT_INTERVAL:
send_heartbeat()
last_heartbeat = current_time
# Poll for jobs
if current_time - last_poll >= 3:
job = poll_for_jobs()
if job:
# Execute the job
execute_job(job)
last_poll = current_time
time.sleep(1)
except KeyboardInterrupt:
logger.info("Shutting down miner...")
except Exception as e:
logger.error(f"Error in main loop: {e}")
sys.exit(1)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,299 @@
#!/usr/bin/env python3
"""
Simple GPU Miner Client for AITBC - simulates GPU work
"""
import json
import time
import httpx
import logging
import sys
import subprocess
from datetime import datetime
# Configuration
COORDINATOR_URL = "http://127.0.0.1:8000"
MINER_ID = "localhost-gpu-miner"
AUTH_TOKEN = "REDACTED_MINER_KEY"
HEARTBEAT_INTERVAL = 15
MAX_RETRIES = 10
RETRY_DELAY = 30
# Setup logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
# GPU capabilities (simulated)
GPU_CAPABILITIES = {
"gpu": {
"model": "NVIDIA GeForce RTX 4060 Ti",
"memory_gb": 16,
"cuda_version": "12.4",
"platform": "CUDA",
"supported_tasks": ["inference", "training", "stable-diffusion", "llama"],
"max_concurrent_jobs": 1
}
}
def simulate_gpu_work(prompt, duration=3):
"""Simulate GPU processing work"""
logger.info(f"Simulating GPU work for: '{prompt}'")
# Simulate processing time
time.sleep(duration)
# Generate a simple response based on the prompt
if "hello" in prompt.lower():
response = "Hello! I'm an AI assistant running on the AITBC network. Your request was processed by a GPU miner."
elif "ai" in prompt.lower():
response = "AI (Artificial Intelligence) is the simulation of human intelligence in machines that are programmed to think and learn."
elif "blockchain" in prompt.lower():
response = "Blockchain is a distributed ledger technology that maintains a secure and decentralized record of transactions."
else:
response = f"Processed request: {prompt}. This is a simulated GPU response from the AITBC network."
return response
def wait_for_coordinator():
"""Wait for coordinator to be available"""
for i in range(MAX_RETRIES):
try:
response = httpx.get(f"{COORDINATOR_URL}/v1/health", timeout=5)
if response.status_code == 200:
logger.info("Coordinator is available!")
return True
except:
pass
logger.info(f"Waiting for coordinator... ({i+1}/{MAX_RETRIES})")
time.sleep(RETRY_DELAY)
logger.error("Coordinator not available after max retries")
return False
def register_miner():
"""Register the miner with the coordinator"""
register_data = {
"capabilities": GPU_CAPABILITIES,
"concurrency": 1,
"region": "localhost"
}
headers = {
"X-Api-Key": AUTH_TOKEN,
"Content-Type": "application/json"
}
try:
response = httpx.post(
f"{COORDINATOR_URL}/v1/miners/register?miner_id={MINER_ID}",
json=register_data,
headers=headers,
timeout=10
)
if response.status_code == 200:
data = response.json()
logger.info(f"Successfully registered miner: {data}")
return data.get("session_token", "demo-token")
else:
logger.error(f"Registration failed: {response.status_code} - {response.text}")
return None
except Exception as e:
logger.error(f"Registration error: {e}")
return None
def send_heartbeat():
"""Send heartbeat to coordinator"""
heartbeat_data = {
"status": "active",
"current_jobs": 0,
"last_seen": datetime.utcnow().isoformat(),
"gpu_utilization": 45, # Simulated
"memory_used": 8192, # Simulated
}
headers = {
"X-Api-Key": AUTH_TOKEN,
"Content-Type": "application/json"
}
try:
response = httpx.post(
f"{COORDINATOR_URL}/v1/miners/heartbeat?miner_id={MINER_ID}",
json=heartbeat_data,
headers=headers,
timeout=5
)
if response.status_code == 200:
logger.info("Heartbeat sent successfully")
else:
logger.error(f"Heartbeat failed: {response.status_code} - {response.text}")
except Exception as e:
logger.error(f"Heartbeat error: {e}")
def execute_job(job):
"""Execute a job using simulated GPU processing"""
job_id = job.get('job_id')
payload = job.get('payload', {})
logger.info(f"Executing job {job_id}: {payload}")
try:
if payload.get('type') == 'inference':
# Get the prompt
prompt = payload.get('prompt', '')
# Simulate GPU processing
logger.info(f"Processing with GPU...")
result_text = simulate_gpu_work(prompt, duration=3)
# Submit result back to coordinator
submit_result(job_id, {
"result": {
"status": "completed",
"output": result_text,
"model": "simulated-gpu",
"tokens_processed": len(result_text.split()),
"execution_time": 3.0,
"gpu_used": True
},
"metrics": {
"gpu_utilization": 85,
"memory_used": 2048,
"power_consumption": 250
}
})
logger.info(f"Job {job_id} completed successfully")
return True
else:
# Unsupported job type
logger.error(f"Unsupported job type: {payload.get('type')}")
submit_result(job_id, {
"result": {
"status": "failed",
"error": f"Unsupported job type: {payload.get('type')}"
}
})
return False
except Exception as e:
logger.error(f"Job execution error: {e}")
submit_result(job_id, {
"result": {
"status": "failed",
"error": str(e)
}
})
return False
def submit_result(job_id, result):
"""Submit job result to coordinator"""
headers = {
"X-Api-Key": AUTH_TOKEN,
"Content-Type": "application/json"
}
try:
response = httpx.post(
f"{COORDINATOR_URL}/v1/miners/{job_id}/result",
json=result,
headers=headers,
timeout=10
)
if response.status_code == 200:
logger.info(f"Result submitted for job {job_id}")
else:
logger.error(f"Result submission failed: {response.status_code} - {response.text}")
except Exception as e:
logger.error(f"Result submission error: {e}")
def poll_for_jobs():
"""Poll for available jobs"""
poll_data = {
"miner_id": MINER_ID,
"capabilities": GPU_CAPABILITIES,
"max_wait_seconds": 5
}
headers = {
"X-Api-Key": AUTH_TOKEN,
"Content-Type": "application/json"
}
try:
response = httpx.post(
f"{COORDINATOR_URL}/v1/miners/poll",
json=poll_data,
headers=headers,
timeout=10
)
if response.status_code == 200:
job = response.json()
logger.info(f"Received job: {job}")
return job
elif response.status_code == 204:
logger.info("No jobs available")
return None
else:
logger.error(f"Poll failed: {response.status_code} - {response.text}")
return None
except Exception as e:
logger.error(f"Error polling for jobs: {e}")
return None
def main():
"""Main miner loop"""
logger.info("Starting Simple GPU Miner Client...")
# Wait for coordinator
if not wait_for_coordinator():
sys.exit(1)
# Register with coordinator
session_token = register_miner()
if not session_token:
logger.error("Failed to register, exiting")
sys.exit(1)
logger.info("Miner registered successfully, starting main loop...")
# Main loop
last_heartbeat = 0
last_poll = 0
try:
while True:
current_time = time.time()
# Send heartbeat
if current_time - last_heartbeat >= HEARTBEAT_INTERVAL:
send_heartbeat()
last_heartbeat = current_time
# Poll for jobs
if current_time - last_poll >= 3:
job = poll_for_jobs()
if job:
# Execute the job
execute_job(job)
last_poll = current_time
time.sleep(1)
except KeyboardInterrupt:
logger.info("Shutting down miner...")
except Exception as e:
logger.error(f"Error in main loop: {e}")
sys.exit(1)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,210 @@
#!/usr/bin/env python3
"""
GPU Miner Client with retry logic for AITBC
"""
import json
import time
import httpx
import logging
import sys
from datetime import datetime
# Configuration
COORDINATOR_URL = "http://127.0.0.1:8000"
MINER_ID = "localhost-gpu-miner"
AUTH_TOKEN = "REDACTED_MINER_KEY"
HEARTBEAT_INTERVAL = 15
MAX_RETRIES = 10
RETRY_DELAY = 30
# Setup logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
# GPU capabilities (RTX 4060 Ti)
GPU_CAPABILITIES = {
"gpu": {
"model": "NVIDIA GeForce RTX 4060 Ti",
"memory_gb": 16,
"cuda_version": "12.4",
"compute_capability": "8.9",
"driver_version": "550.163.01"
},
"compute": {
"type": "GPU",
"platform": "CUDA",
"supported_tasks": ["inference", "training", "stable-diffusion", "llama"],
"max_concurrent_jobs": 1
}
}
def wait_for_coordinator():
"""Wait for coordinator to be available"""
for i in range(MAX_RETRIES):
try:
response = httpx.get(f"{COORDINATOR_URL}/v1/health", timeout=5)
if response.status_code == 200:
logger.info("Coordinator is available!")
return True
except:
pass
logger.info(f"Waiting for coordinator... ({i+1}/{MAX_RETRIES})")
time.sleep(RETRY_DELAY)
logger.error("Coordinator not available after max retries")
return False
def register_miner():
"""Register the miner with the coordinator"""
register_data = {
"capabilities": GPU_CAPABILITIES,
"concurrency": 1,
"region": "localhost"
}
headers = {
"X-Api-Key": AUTH_TOKEN,
"Content-Type": "application/json"
}
try:
response = httpx.post(
f"{COORDINATOR_URL}/v1/miners/register?miner_id={MINER_ID}",
json=register_data,
headers=headers,
timeout=10
)
if response.status_code == 200:
data = response.json()
logger.info(f"Successfully registered miner: {data}")
# Don't require session_token for demo registry
return data.get("session_token", "demo-token")
else:
logger.error(f"Registration failed: {response.status_code} - {response.text}")
return None
except Exception as e:
logger.error(f"Error registering miner: {e}")
return None
def send_heartbeat():
"""Send heartbeat to coordinator"""
heartbeat_data = {
"inflight": 0,
"status": "ONLINE",
"metadata": {
"last_seen": datetime.utcnow().isoformat(),
"gpu_utilization": 9,
"gpu_memory_used": 2682,
"gpu_temperature": 43
}
}
headers = {
"X-Api-Key": AUTH_TOKEN,
"Content-Type": "application/json"
}
try:
response = httpx.post(
f"{COORDINATOR_URL}/v1/miners/heartbeat?miner_id={MINER_ID}",
json=heartbeat_data,
headers=headers,
timeout=5
)
if response.status_code == 200:
logger.info("Heartbeat sent successfully")
else:
logger.error(f"Heartbeat failed: {response.status_code} - {response.text}")
except Exception as e:
logger.error(f"Error sending heartbeat: {e}")
def poll_for_jobs():
"""Poll for available jobs"""
poll_data = {
"max_wait_seconds": 5
}
headers = {
"X-Api-Key": AUTH_TOKEN,
"Content-Type": "application/json"
}
try:
response = httpx.post(
f"{COORDINATOR_URL}/v1/miners/poll",
json=poll_data,
headers=headers,
timeout=10
)
if response.status_code == 200:
job = response.json()
logger.info(f"Received job: {job}")
return job
elif response.status_code == 204:
logger.info("No jobs available")
return None
elif response.status_code in (404, 405):
# Coordinator/registry may not implement job polling (e.g. demo registry).
# Keep running (heartbeats still work) but don't spam error logs.
return None
else:
logger.error(f"Poll failed: {response.status_code} - {response.text}")
return None
except Exception as e:
logger.error(f"Error polling for jobs: {e}")
return None
def main():
"""Main miner loop"""
logger.info("Starting GPU Miner Client...")
# Wait for coordinator
if not wait_for_coordinator():
sys.exit(1)
# Register with coordinator
session_token = register_miner()
if not session_token:
logger.error("Failed to register, exiting")
sys.exit(1)
logger.info("Miner registered successfully, starting main loop...")
# Main loop
last_heartbeat = 0
last_poll = 0
try:
while True:
current_time = time.time()
# Send heartbeat
if current_time - last_heartbeat >= HEARTBEAT_INTERVAL:
send_heartbeat()
last_heartbeat = current_time
# Poll for jobs
if current_time - last_poll >= 3:
job = poll_for_jobs()
if job:
logger.info(f"Would execute job: {job}")
last_poll = current_time
time.sleep(1)
except KeyboardInterrupt:
logger.info("Shutting down miner...")
except Exception as e:
logger.error(f"Error in main loop: {e}")
sys.exit(1)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,72 @@
#!/usr/bin/env python3
"""
Simple GPU Registry Server for demonstration
"""
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
from typing import Dict, Any, Optional
import uvicorn
from datetime import datetime
app = FastAPI(title="GPU Registry Demo")
# In-memory storage
registered_gpus: Dict[str, Dict] = {}
class GPURegistration(BaseModel):
capabilities: Dict[str, Any]
concurrency: int = 1
region: Optional[str] = None
class Heartbeat(BaseModel):
inflight: int = 0
status: str = "ONLINE"
metadata: Dict[str, Any] = {}
@app.get("/")
async def root():
return {"message": "GPU Registry Demo", "registered_gpus": len(registered_gpus)}
@app.get("/health")
async def health():
return {"status": "ok"}
@app.post("/miners/register")
async def register_gpu(miner_id: str, gpu_data: GPURegistration):
"""Register a GPU miner"""
registered_gpus[miner_id] = {
"id": miner_id,
"registered_at": datetime.utcnow().isoformat(),
"last_heartbeat": datetime.utcnow().isoformat(),
**gpu_data.dict()
}
return {"status": "ok", "message": f"GPU {miner_id} registered successfully"}
@app.post("/miners/heartbeat")
async def heartbeat(miner_id: str, heartbeat_data: Heartbeat):
"""Receive heartbeat from GPU miner"""
if miner_id not in registered_gpus:
raise HTTPException(status_code=404, detail="GPU not registered")
registered_gpus[miner_id]["last_heartbeat"] = datetime.utcnow().isoformat()
registered_gpus[miner_id]["status"] = heartbeat_data.status
registered_gpus[miner_id]["metadata"] = heartbeat_data.metadata
return {"status": "ok"}
@app.get("/miners/list")
async def list_gpus():
"""List all registered GPUs"""
return {"gpus": list(registered_gpus.values())}
@app.get("/miners/{miner_id}")
async def get_gpu(miner_id: str):
"""Get details of a specific GPU"""
if miner_id not in registered_gpus:
raise HTTPException(status_code=404, detail="GPU not registered")
return registered_gpus[miner_id]
if __name__ == "__main__":
print("Starting GPU Registry Demo on http://localhost:8091")
uvicorn.run(app, host="0.0.0.0", port=8091)

View File

@@ -0,0 +1,146 @@
#!/usr/bin/env python3
"""
Integrate GPU Miner with existing Trade Exchange
"""
import httpx
import json
import subprocess
import time
from datetime import datetime
# Configuration
EXCHANGE_URL = "http://localhost:3002"
GPU_REGISTRY_URL = "http://localhost:8091"
def update_exchange_with_gpu():
"""Update the exchange frontend to show registered GPUs"""
# Read the exchange HTML
with open('/home/oib/windsurf/aitbc/apps/trade-exchange/index.html', 'r') as f:
html_content = f.read()
# Add GPU marketplace integration
gpu_integration = """
<script>
// GPU Integration
async function loadRealGPUOffers() {
try {
const response = await fetch('http://localhost:8091/miners/list');
const data = await response.json();
if (data.gpus && data.gpus.length > 0) {
displayRealGPUOffers(data.gpus);
} else {
displayDemoOffers();
}
} catch (error) {
console.log('Using demo GPU offers');
displayDemoOffers();
}
}
function displayRealGPUOffers(gpus) {
const container = document.getElementById('gpuList');
container.innerHTML = '';
gpus.forEach(gpu => {
const gpuCard = `
<div class="bg-white rounded-lg shadow-lg p-6 card-hover">
<div class="flex justify-between items-start mb-4">
<h3 class="text-lg font-semibold">${gpu.capabilities.gpu.model}</h3>
<span class="bg-green-100 text-green-800 px-2 py-1 rounded text-sm">Available</span>
</div>
<div class="space-y-2 text-sm text-gray-600 mb-4">
<p><i data-lucide="monitor" class="w-4 h-4 inline mr-1"></i>Memory: ${gpu.capabilities.gpu.memory_gb} GB</p>
<p><i data-lucide="zap" class="w-4 h-4 inline mr-1"></i>CUDA: ${gpu.capabilities.gpu.cuda_version}</p>
<p><i data-lucide="cpu" class="w-4 h-4 inline mr-1"></i>Concurrency: ${gpu.concurrency}</p>
<p><i data-lucide="map-pin" class="w-4 h-4 inline mr-1"></i>Region: ${gpu.region}</p>
</div>
<div class="flex justify-between items-center">
<span class="text-2xl font-bold text-purple-600">50 AITBC/hr</span>
<button onclick="purchaseGPU('${gpu.id}')" class="bg-purple-600 text-white px-4 py-2 rounded hover:bg-purple-700 transition">
Purchase
</button>
</div>
</div>
`;
container.innerHTML += gpuCard;
});
lucide.createIcons();
}
// Override the loadGPUOffers function
const originalLoadGPUOffers = loadGPUOffers;
loadGPUOffers = loadRealGPUOffers;
</script>
"""
# Insert before closing body tag
if '</body>' in html_content:
html_content = html_content.replace('</body>', gpu_integration + '</body>')
# Write back to file
with open('/home/oib/windsurf/aitbc/apps/trade-exchange/index.html', 'w') as f:
f.write(html_content)
print("✅ Updated exchange with GPU integration!")
else:
print("❌ Could not find </body> tag in exchange HTML")
def create_gpu_api_endpoint():
"""Create an API endpoint in the exchange to serve GPU data"""
api_code = """
@app.get("/api/gpu/offers")
async def get_gpu_offers():
\"\"\"Get available GPU offers\"\"\"
try:
# Fetch from GPU registry
response = httpx.get("http://localhost:8091/miners/list")
if response.status_code == 200:
data = response.json()
return {"offers": data.get("gpus", [])}
except:
pass
# Return demo data if registry not available
return {
"offers": [{
"id": "demo-gpu-1",
"model": "NVIDIA RTX 4060 Ti",
"memory_gb": 16,
"price_per_hour": 50,
"available": True
}]
}
"""
print("\n📝 To add GPU API endpoint to exchange, add this code to simple_exchange_api.py:")
print(api_code)
def main():
print("🔗 Integrating GPU Miner with Trade Exchange...")
# Update exchange frontend
update_exchange_with_gpu()
# Show API integration code
create_gpu_api_endpoint()
print("\n📊 Integration Summary:")
print("1. ✅ Exchange frontend updated to show real GPUs")
print("2. 📝 See above for API endpoint code")
print("3. 🌐 Access the exchange at: http://localhost:3002")
print("4. 🎯 GPU Registry available at: http://localhost:8091/miners/list")
print("\n🔄 To see the integrated GPU marketplace:")
print("1. Restart the trade exchange if needed:")
print(" cd /home/oib/windsurf/aitbc/apps/trade-exchange")
print(" python simple_exchange_api.py")
print("2. Open http://localhost:3002 in browser")
print("3. Click 'Browse GPU Marketplace'")
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,182 @@
#!/usr/bin/env python3
"""
Simple GPU Miner Client for AITBC
Registers GPU with coordinator and sends heartbeats
"""
import json
import time
import httpx
import logging
from datetime import datetime
# Configuration
COORDINATOR_URL = "http://localhost:8000"
MINER_ID = "localhost-gpu-miner"
AUTH_TOKEN = "REDACTED_MINER_KEY"
HEARTBEAT_INTERVAL = 15
# Setup logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
# GPU capabilities (RTX 4060 Ti)
GPU_CAPABILITIES = {
"gpu": {
"model": "NVIDIA GeForce RTX 4060 Ti",
"memory_gb": 16,
"cuda_version": "12.4",
"compute_capability": "8.9",
"driver_version": "550.163.01"
},
"compute": {
"type": "GPU",
"platform": "CUDA",
"supported_tasks": ["inference", "training", "stable-diffusion", "llama"],
"max_concurrent_jobs": 1
}
}
def register_miner():
"""Register the miner with the coordinator"""
register_data = {
"capabilities": GPU_CAPABILITIES,
"concurrency": 1,
"region": "localhost"
}
headers = {
"Authorization": f"Bearer {AUTH_TOKEN}",
"Content-Type": "application/json"
}
try:
response = httpx.post(
f"{COORDINATOR_URL}/miners/register",
json=register_data,
headers=headers,
timeout=10
)
if response.status_code == 200:
data = response.json()
logger.info(f"Successfully registered miner: {data}")
return data.get("session_token")
else:
logger.error(f"Registration failed: {response.status_code} - {response.text}")
return None
except Exception as e:
logger.error(f"Error registering miner: {e}")
return None
def send_heartbeat():
"""Send heartbeat to coordinator"""
heartbeat_data = {
"inflight": 0,
"status": "ONLINE",
"metadata": {
"last_seen": datetime.utcnow().isoformat(),
"gpu_utilization": 9, # Current GPU utilization from nvidia-smi
"gpu_memory_used": 2682, # MB
"gpu_temperature": 43
}
}
headers = {
"Authorization": f"Bearer {AUTH_TOKEN}",
"Content-Type": "application/json"
}
try:
response = httpx.post(
f"{COORDINATOR_URL}/miners/heartbeat",
json=heartbeat_data,
headers=headers,
timeout=5
)
if response.status_code == 200:
logger.info("Heartbeat sent successfully")
else:
logger.error(f"Heartbeat failed: {response.status_code} - {response.text}")
except Exception as e:
logger.error(f"Error sending heartbeat: {e}")
def poll_for_jobs():
"""Poll for available jobs"""
poll_data = {
"max_wait_seconds": 5
}
headers = {
"Authorization": f"Bearer {AUTH_TOKEN}",
"Content-Type": "application/json"
}
try:
response = httpx.post(
f"{COORDINATOR_URL}/miners/poll",
json=poll_data,
headers=headers,
timeout=10
)
if response.status_code == 200:
job = response.json()
logger.info(f"Received job: {job}")
return job
elif response.status_code == 204:
logger.info("No jobs available")
return None
else:
logger.error(f"Poll failed: {response.status_code} - {response.text}")
return None
except Exception as e:
logger.error(f"Error polling for jobs: {e}")
return None
def main():
"""Main miner loop"""
logger.info("Starting GPU Miner Client...")
# Register with coordinator
session_token = register_miner()
if not session_token:
logger.error("Failed to register, exiting")
return
logger.info("Miner registered successfully, starting main loop...")
# Main loop
last_heartbeat = 0
last_poll = 0
try:
while True:
current_time = time.time()
# Send heartbeat
if current_time - last_heartbeat >= HEARTBEAT_INTERVAL:
send_heartbeat()
last_heartbeat = current_time
# Poll for jobs
if current_time - last_poll >= 3: # Poll every 3 seconds
job = poll_for_jobs()
if job:
# TODO: Execute job
logger.info(f"Would execute job: {job}")
last_poll = current_time
time.sleep(1)
except KeyboardInterrupt:
logger.info("Shutting down miner...")
except Exception as e:
logger.error(f"Error in main loop: {e}")
if __name__ == "__main__":
main()

32
scripts/gpu/start_gpu_miner.sh Executable file
View File

@@ -0,0 +1,32 @@
#!/bin/bash
# Start GPU Miner Client
echo "=== AITBC GPU Miner Client Startup ==="
echo "Starting GPU miner client..."
echo ""
# Check if GPU is available
if ! command -v nvidia-smi &> /dev/null; then
echo "WARNING: nvidia-smi not found, GPU may not be available"
fi
# Show GPU info
if command -v nvidia-smi &> /dev/null; then
echo "=== GPU Status ==="
nvidia-smi --query-gpu=name,memory.used,memory.total,utilization.gpu,temperature.gpu --format=csv,noheader,nounits
echo ""
fi
# Check if coordinator is running
echo "=== Checking Coordinator API ==="
if curl -s http://localhost:8000/health > /dev/null 2>&1; then
echo "✓ Coordinator API is running on port 8000"
else
echo "✗ Coordinator API is not accessible on port 8000"
echo " The miner will wait for the coordinator to start..."
fi
echo ""
echo "=== Starting GPU Miner ==="
cd /home/oib/windsurf/aitbc
python3 gpu_miner_with_wait.py