chore: remove outdated documentation and reference files
Some checks failed
AITBC CI/CD Pipeline / lint-and-test (3.11) (push) Has been cancelled
AITBC CI/CD Pipeline / lint-and-test (3.12) (push) Has been cancelled
AITBC CI/CD Pipeline / lint-and-test (3.13) (push) Has been cancelled
AITBC CI/CD Pipeline / test-cli (push) Has been cancelled
AITBC CI/CD Pipeline / test-services (push) Has been cancelled
AITBC CI/CD Pipeline / test-production-services (push) Has been cancelled
AITBC CI/CD Pipeline / security-scan (push) Has been cancelled
AITBC CI/CD Pipeline / build (push) Has been cancelled
AITBC CI/CD Pipeline / deploy-staging (push) Has been cancelled
AITBC CI/CD Pipeline / deploy-production (push) Has been cancelled
AITBC CI/CD Pipeline / performance-test (push) Has been cancelled
AITBC CI/CD Pipeline / docs (push) Has been cancelled
AITBC CI/CD Pipeline / release (push) Has been cancelled
AITBC CI/CD Pipeline / notify (push) Has been cancelled
Security Scanning / Bandit Security Scan (apps/coordinator-api/src) (push) Has been cancelled
Security Scanning / Bandit Security Scan (cli/aitbc_cli) (push) Has been cancelled
Security Scanning / Bandit Security Scan (packages/py/aitbc-core/src) (push) Has been cancelled
Security Scanning / Bandit Security Scan (packages/py/aitbc-crypto/src) (push) Has been cancelled
Security Scanning / Bandit Security Scan (packages/py/aitbc-sdk/src) (push) Has been cancelled
Security Scanning / Bandit Security Scan (tests) (push) Has been cancelled
Security Scanning / CodeQL Security Analysis (javascript) (push) Has been cancelled
Security Scanning / CodeQL Security Analysis (python) (push) Has been cancelled
Security Scanning / Dependency Security Scan (push) Has been cancelled
Security Scanning / Container Security Scan (push) Has been cancelled
Security Scanning / OSSF Scorecard (push) Has been cancelled
Security Scanning / Security Summary Report (push) Has been cancelled
AITBC CLI Level 1 Commands Test / test-cli-level1 (3.11) (push) Has been cancelled
AITBC CLI Level 1 Commands Test / test-cli-level1 (3.12) (push) Has been cancelled
AITBC CLI Level 1 Commands Test / test-cli-level1 (3.13) (push) Has been cancelled
AITBC CLI Level 1 Commands Test / test-summary (push) Has been cancelled

- Remove debugging service documentation (DEBUgging_SERVICES.md)
- Remove development logs policy and quick reference guides
- Remove E2E test creation summary
- Remove gift certificate example file
- Remove GitHub pull summary documentation
This commit is contained in:
2026-03-25 12:56:07 +01:00
parent 26f7dd5ad0
commit bfe6f94b75
229 changed files with 537 additions and 381 deletions

115
scripts/testing/debug-services.sh Executable file
View File

@@ -0,0 +1,115 @@
#!/bin/bash
# Debug script to identify malformed service names
set -e
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
print_status() {
echo -e "${BLUE}[DEBUG]${NC} $1"
}
print_success() {
echo -e "${GREEN}[SUCCESS]${NC} $1"
}
print_warning() {
echo -e "${YELLOW}[WARNING]${NC} $1"
}
print_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
print_status "Debugging AITBC service names..."
# Show raw systemctl output
print_status "Raw systemctl output for AITBC services:"
systemctl list-units --all | grep "aitbc-" | cat -A
echo ""
# Show each field separately
print_status "Analyzing service names field by field:"
systemctl list-units --all | grep "aitbc-" | while read -r line; do
echo "Raw line: '$line'"
# Extract each field
unit=$(echo "$line" | awk '{print $1}')
load=$(echo "$line" | awk '{print $2}')
active=$(echo "$line" | awk '{print $3}')
sub=$(echo "$line" | awk '{print $4}')
description=$(echo "$line" | cut -d' ' -f5-)
echo " Unit: '$unit'"
echo " Load: '$load'"
echo " Active: '$active'"
echo " Sub: '$sub'"
echo " Description: '$description'"
# Check if unit name is valid
if [[ "$unit" =~ [^a-zA-Z0-9\-\._] ]]; then
print_error " ❌ Invalid characters in unit name!"
echo " ❌ Hex representation: $(echo -n "$unit" | od -c)"
else
print_success " ✅ Valid unit name"
fi
echo ""
done
# Check for any hidden characters
print_status "Checking for hidden characters in service names:"
systemctl list-units --all | grep "aitbc-" | awk '{print $2}' | grep "\.service$" | while read -r service; do
echo "Service: '$service'"
echo "Length: ${#service}"
echo "Hex dump:"
echo -n "$service" | od -c
echo ""
done
# Show systemctl list-unit-files output
print_status "Checking systemctl list-unit-files:"
systemctl list-unit-files | grep "aitbc-" | cat -A
# Check service files on disk
print_status "Checking service files in /etc/systemd/system/:"
if [ -d "/etc/systemd/system" ]; then
find /etc/systemd/system/ -name "*aitbc*" -type f | while read -r file; do
echo "Found: $file"
basename "$file"
echo "Hex: $(basename "$file" | od -c)"
echo ""
done
fi
# Check service files in user directory
print_status "Checking service files in user directory:"
if [ -d "$HOME/.config/systemd/user" ]; then
find "$HOME/.config/systemd/user" -name "*aitbc*" -type f 2>/dev/null | while read -r file; do
echo "Found: $file"
basename "$file"
echo "Hex: $(basename "$file" | od -c)"
echo ""
done
fi
# Check for any encoding issues
print_status "Checking locale and encoding:"
echo "Current locale: $LANG"
echo "System encoding: $(locale charmap)"
echo ""
# Try to reload systemd daemon
print_status "Reloading systemd daemon to clear any cached issues:"
sudo systemctl daemon-reload
echo "Daemon reload completed"
echo ""
print_status "Debug complete. Review the output above to identify the source of the malformed service name."

View File

@@ -0,0 +1,160 @@
#!/usr/bin/env python3
"""
Simple Performance Test with Debugging and Timeout
"""
import time
import requests
import signal
import sys
from typing import Dict, List
class TimeoutError(Exception):
pass
def timeout_handler(signum, frame):
raise TimeoutError("Operation timed out")
def test_endpoint_with_timeout(url: str, method: str = "GET", data: Dict = None, timeout: int = 5) -> Dict:
"""Test single endpoint with timeout and debugging"""
print(f"🔍 Testing {method} {url}")
# Set timeout
signal.signal(signal.SIGALRM, timeout_handler)
signal.alarm(timeout)
try:
start_time = time.time()
if method == "GET":
response = requests.get(url, timeout=timeout)
elif method == "POST":
response = requests.post(url, json=data, timeout=timeout)
end_time = time.time()
signal.alarm(0) # Cancel timeout
response_time_ms = (end_time - start_time) * 1000
result = {
"url": url,
"method": method,
"status_code": response.status_code,
"response_time_ms": response_time_ms,
"success": True,
"error": None
}
print(f"✅ Status: {response.status_code}")
print(f"⏱️ Response Time: {response_time_ms:.2f}ms")
print(f"📄 Response Size: {len(response.content)} bytes")
return result
except TimeoutError as e:
signal.alarm(0)
print(f"❌ Timeout: {e}")
return {
"url": url,
"method": method,
"status_code": None,
"response_time_ms": timeout * 1000,
"success": False,
"error": str(e)
}
except Exception as e:
signal.alarm(0)
print(f"❌ Error: {e}")
return {
"url": url,
"method": method,
"status_code": None,
"response_time_ms": 0,
"success": False,
"error": str(e)
}
def run_performance_tests():
"""Run performance tests with debugging"""
print("🎯 AITBC GPU Marketplace Performance Test")
print("=" * 50)
base_url = "http://localhost:8000"
results = []
# Test 1: Health endpoint
print("\n1⃣ Health Endpoint Test")
result = test_endpoint_with_timeout(f"{base_url}/health", timeout=3)
results.append(result)
# Test 2: GPU List endpoint
print("\n2⃣ GPU List Endpoint Test")
result = test_endpoint_with_timeout(f"{base_url}/v1/marketplace/gpu/list", timeout=5)
results.append(result)
# Test 3: GPU Booking endpoint
print("\n3⃣ GPU Booking Endpoint Test")
booking_data = {"duration_hours": 1}
result = test_endpoint_with_timeout(
f"{base_url}/v1/marketplace/gpu/gpu_c5be877c/book",
"POST",
booking_data,
timeout=10
)
results.append(result)
# Test 4: GPU Release endpoint
print("\n4⃣ GPU Release Endpoint Test")
result = test_endpoint_with_timeout(
f"{base_url}/v1/marketplace/gpu/gpu_c5be877c/release",
"POST",
timeout=10
)
results.append(result)
# Summary
print("\n📊 PERFORMANCE SUMMARY")
print("=" * 50)
successful_tests = sum(1 for r in results if r["success"])
total_tests = len(results)
print(f"✅ Successful Tests: {successful_tests}/{total_tests} ({successful_tests/total_tests*100:.1f}%)")
print(f"\n📈 Response Times:")
for result in results:
if result["success"]:
status = "🟢" if result["response_time_ms"] < 100 else "🟡" if result["response_time_ms"] < 200 else "🔴"
endpoint = result['url'].split('/')[-1] if '/' in result['url'] else result['url']
print(f" {status} {result['method']} {endpoint}: {result['response_time_ms']:.2f}ms")
else:
endpoint = result['url'].split('/')[-1] if '/' in result['url'] else result['url']
print(f"{result['method']} {endpoint}: {result['error']}")
# Performance grade
successful_times = [r["response_time_ms"] for r in results if r["success"]]
if successful_times:
avg_response_time = sum(successful_times) / len(successful_times)
if avg_response_time < 50:
grade = "🟢 EXCELLENT"
elif avg_response_time < 100:
grade = "🟡 GOOD"
elif avg_response_time < 200:
grade = "🟠 FAIR"
else:
grade = "🔴 POOR"
print(f"\n🎯 Overall Performance: {grade}")
print(f"📊 Average Response Time: {avg_response_time:.2f}ms")
print(f"\n✅ Performance testing complete!")
if __name__ == "__main__":
try:
run_performance_tests()
except KeyboardInterrupt:
print("\n⚠️ Test interrupted by user")
sys.exit(1)
except Exception as e:
print(f"\n❌ Unexpected error: {e}")
sys.exit(1)

View File

@@ -0,0 +1,32 @@
import sys
from pathlib import Path
import json
# Setup sys.path
sys.path.insert(0, str(Path('/opt/aitbc/apps/blockchain-node/src')))
from aitbc_chain.config import settings
from aitbc_chain.mempool import init_mempool, get_mempool
# Use development mempool backend configuration exactly like main node
init_mempool(
backend=settings.mempool_backend,
db_path=str(settings.db_path.parent / "mempool.db"),
max_size=settings.mempool_max_size,
min_fee=settings.min_fee,
)
mempool = get_mempool()
print(f"Mempool class: {mempool.__class__.__name__}")
print(f"Mempool DB path: {mempool._db_path}")
chain_id = 'ait-mainnet'
rows = mempool._conn.execute("SELECT * FROM mempool WHERE chain_id = ?", (chain_id,)).fetchall()
print(f"Found {len(rows)} raw rows in DB")
for r in rows:
print(r)
txs = mempool.drain(100, 1000000, chain_id)
print(f"Drained {len(txs)} txs")
for tx in txs:
print(tx)

View File

@@ -0,0 +1,209 @@
#!/usr/bin/env python3
"""
Performance Testing Suite for AITBC Platform
Tests API endpoints, load handling, and system performance
"""
import asyncio
import aiohttp
import time
import json
import statistics
from typing import List, Dict, Any
from concurrent.futures import ThreadPoolExecutor
import subprocess
import sys
class PerformanceTester:
def __init__(self, base_url: str = "https://aitbc.bubuit.net/api/v1"):
self.base_url = base_url
self.api_key = "test_key_16_characters"
self.results = []
async def single_request(self, session: aiohttp.ClientSession,
method: str, endpoint: str, **kwargs) -> Dict[str, Any]:
"""Execute a single API request and measure performance"""
start_time = time.time()
headers = kwargs.pop('headers', {})
headers['X-Api-Key'] = self.api_key
try:
async with session.request(method, f"{self.base_url}{endpoint}",
headers=headers, **kwargs) as response:
content = await response.text()
end_time = time.time()
return {
'endpoint': endpoint,
'method': method,
'status_code': response.status,
'response_time': end_time - start_time,
'content_length': len(content),
'success': response.status < 400
}
except Exception as e:
end_time = time.time()
return {
'endpoint': endpoint,
'method': method,
'status_code': 0,
'response_time': end_time - start_time,
'content_length': 0,
'success': False,
'error': str(e)
}
async def load_test_endpoint(self, endpoint: str, method: str = "GET",
concurrent_users: int = 10, requests_per_user: int = 5,
**kwargs) -> Dict[str, Any]:
"""Perform load testing on a specific endpoint"""
print(f"🧪 Load testing {method} {endpoint} - {concurrent_users} users × {requests_per_user} requests")
connector = aiohttp.TCPConnector(limit=100, limit_per_host=100)
timeout = aiohttp.ClientTimeout(total=30)
async with aiohttp.ClientSession(connector=connector, timeout=timeout) as session:
tasks = []
for user in range(concurrent_users):
for req in range(requests_per_user):
task = self.single_request(session, method, endpoint, **kwargs)
tasks.append(task)
results = await asyncio.gather(*tasks, return_exceptions=True)
# Filter out exceptions and calculate metrics
valid_results = [r for r in results if isinstance(r, dict)]
successful_results = [r for r in valid_results if r['success']]
response_times = [r['response_time'] for r in successful_results]
return {
'endpoint': endpoint,
'total_requests': len(valid_results),
'successful_requests': len(successful_results),
'failed_requests': len(valid_results) - len(successful_results),
'success_rate': len(successful_results) / len(valid_results) * 100 if valid_results else 0,
'avg_response_time': statistics.mean(response_times) if response_times else 0,
'min_response_time': min(response_times) if response_times else 0,
'max_response_time': max(response_times) if response_times else 0,
'median_response_time': statistics.median(response_times) if response_times else 0,
'p95_response_time': statistics.quantiles(response_times, n=20)[18] if len(response_times) > 20 else 0,
'requests_per_second': len(successful_results) / (max(response_times) - min(response_times)) if len(response_times) > 1 else 0
}
async def run_performance_tests(self):
"""Run comprehensive performance tests"""
print("🚀 Starting AITBC Platform Performance Tests")
print("=" * 60)
test_endpoints = [
# Health check (baseline)
{'endpoint': '/health', 'method': 'GET', 'users': 20, 'requests': 10},
# Client endpoints
{'endpoint': '/client/jobs', 'method': 'GET', 'users': 5, 'requests': 5},
# Miner endpoints
{'endpoint': '/miners/register', 'method': 'POST', 'users': 3, 'requests': 3,
'json': {'capabilities': {'gpu': {'model': 'RTX 4090'}}},
'headers': {'Content-Type': 'application/json', 'X-Miner-ID': 'perf-test-miner'}},
# Blockchain endpoints
{'endpoint': '/blockchain/info', 'method': 'GET', 'users': 5, 'requests': 5},
]
results = []
for test_config in test_endpoints:
endpoint = test_config.pop('endpoint')
method = test_config.pop('method')
result = await self.load_test_endpoint(endpoint, method, **test_config)
results.append(result)
# Print immediate results
print(f"📊 {method} {endpoint}:")
print(f" ✅ Success Rate: {result['success_rate']:.1f}%")
print(f" ⏱️ Avg Response: {result['avg_response_time']:.3f}s")
print(f" 📈 RPS: {result['requests_per_second']:.1f}")
print(f" 📏 P95: {result['p95_response_time']:.3f}s")
print()
return results
def generate_report(self, results: List[Dict[str, Any]]):
"""Generate performance test report"""
print("📋 PERFORMANCE TEST REPORT")
print("=" * 60)
total_requests = sum(r['total_requests'] for r in results)
total_successful = sum(r['successful_requests'] for r in results)
overall_success_rate = (total_successful / total_requests * 100) if total_requests > 0 else 0
print(f"📊 Overall Statistics:")
print(f" Total Requests: {total_requests}")
print(f" Successful Requests: {total_successful}")
print(f" Overall Success Rate: {overall_success_rate:.1f}%")
print()
print(f"🎯 Endpoint Performance:")
for result in results:
status = "" if result['success_rate'] >= 95 else "⚠️" if result['success_rate'] >= 80 else ""
print(f" {status} {result['method']} {result['endpoint']}")
print(f" Success: {result['success_rate']:.1f}% | "
f"Avg: {result['avg_response_time']:.3f}s | "
f"P95: {result['p95_response_time']:.3f}s | "
f"RPS: {result['requests_per_second']:.1f}")
print()
print("🏆 Performance Benchmarks:")
print(" ✅ Excellent: <100ms response time, >95% success rate")
print(" ⚠️ Good: <500ms response time, >80% success rate")
print(" ❌ Needs Improvement: >500ms or <80% success rate")
# Recommendations
print()
print("💡 Recommendations:")
slow_endpoints = [r for r in results if r['avg_response_time'] > 0.5]
if slow_endpoints:
print(" 🐌 Slow endpoints detected - consider optimization:")
for r in slow_endpoints:
print(f" - {r['endpoint']} ({r['avg_response_time']:.3f}s avg)")
unreliable_endpoints = [r for r in results if r['success_rate'] < 95]
if unreliable_endpoints:
print(" 🔧 Unreliable endpoints detected - check for errors:")
for r in unreliable_endpoints:
print(f" - {r['endpoint']} ({r['success_rate']:.1f}% success)")
if not slow_endpoints and not unreliable_endpoints:
print(" 🎉 All endpoints performing well - ready for production!")
async def main():
"""Main performance testing execution"""
tester = PerformanceTester()
try:
results = await tester.run_performance_tests()
tester.generate_report(results)
# Return exit code based on performance
avg_success_rate = statistics.mean([r['success_rate'] for r in results])
avg_response_time = statistics.mean([r['avg_response_time'] for r in results])
if avg_success_rate >= 95 and avg_response_time < 0.5:
print("\n🎉 PERFORMANCE TESTS PASSED - Ready for production!")
return 0
else:
print("\n⚠️ PERFORMANCE TESTS COMPLETED - Review recommendations")
return 1
except Exception as e:
print(f"❌ Performance test failed: {e}")
return 1
if __name__ == "__main__":
exit_code = asyncio.run(main())
sys.exit(exit_code)

160
scripts/testing/qa-cycle.py Executable file
View File

@@ -0,0 +1,160 @@
#!/usr/bin/env python3
"""
QA Cycle: Run tests, exercise scenarios, find bugs, perform code reviews.
Runs periodically to ensure repository health and discover regressions.
"""
import os
import subprocess
import json
import sys
import shutil
import time
import random
from datetime import datetime
from pathlib import Path
# Jitter: random delay up to 15 minutes (900 seconds)
time.sleep(random.randint(0, 900))
REPO_DIR = '/opt/aitbc'
LOG_FILE = '/opt/aitbc/qa-cycle.log'
TOKEN_FILE = '/opt/aitbc/.gitea_token.sh'
def get_token():
if os.path.exists(TOKEN_FILE):
with open(TOKEN_FILE) as f:
for line in f:
if line.strip().startswith('GITEA_TOKEN='):
return line.strip().split('=', 1)[1].strip()
return os.getenv('GITEA_TOKEN', '')
GITEA_TOKEN = get_token()
API_BASE = os.getenv('GITEA_API_BASE', 'http://gitea.bubuit.net:3000/api/v1')
REPO = 'oib/aitbc'
def log(msg):
now = datetime.utcnow().isoformat() + 'Z'
with open(LOG_FILE, 'a') as f:
f.write(f"[{now}] {msg}\n")
print(msg)
def run_cmd(cmd, cwd=REPO_DIR, timeout=300):
try:
result = subprocess.run(cmd, shell=True, cwd=cwd, capture_output=True, text=True, timeout=timeout)
return result.returncode, result.stdout, result.stderr
except subprocess.TimeoutExpired:
return -1, "", "timeout"
except Exception as e:
return -2, "", str(e)
def fetch_latest_main():
log("Fetching latest main...")
rc, out, err = run_cmd("git fetch origin main")
if rc != 0:
log(f"Fetch failed: {err}")
return False
rc, out, err = run_cmd("git checkout main")
if rc != 0:
log(f"Checkout main failed: {err}")
return False
rc, out, err = run_cmd("git reset --hard origin/main")
if rc != 0:
log(f"Reset to origin/main failed: {err}")
return False
log("Main updated to latest.")
return True
def run_tests():
log("Running test suites...")
results = []
for pkg in ['aitbc-core', 'aitbc-sdk', 'aitbc-crypto']:
testdir = f"packages/py/{pkg}/tests"
if not os.path.exists(os.path.join(REPO_DIR, testdir)):
continue
log(f"Testing {pkg}...")
rc, out, err = run_cmd(f"python3 -m pytest {testdir} -q", timeout=120)
if rc == 0:
log(f"{pkg} tests passed.")
else:
log(f"{pkg} tests failed (rc={rc}). Output: {out}\nError: {err}")
results.append((pkg, rc == 0))
return results
def run_lint():
log("Running linters (flake8 if available)...")
if shutil.which('flake8'):
rc, out, err = run_cmd("flake8 packages/py/ --count --select=E9,F63,F7,F82 --show-source --statistics", timeout=60)
if rc == 0:
log("✅ No critical lint errors.")
else:
log(f"❌ Lint errors: {out}")
else:
log("flake8 not installed; skipping lint.")
def query_api(path, method='GET', data=None):
import urllib.request
import urllib.error
url = f"{API_BASE}/{path}"
headers = {'Authorization': f'token {GITEA_TOKEN}'}
if data:
headers['Content-Type'] = 'application/json'
data = json.dumps(data).encode()
req = urllib.request.Request(url, method=method, headers=headers, data=data)
try:
with urllib.request.urlopen(req, timeout=30) as resp:
return json.load(resp)
except Exception as e:
log(f"API error {path}: {e}")
return None
def review_my_open_prs():
log("Checking my open PRs for missing reviews...")
my_prs = query_api(f'repos/{REPO}/pulls?state=open&author={MY_AGENT}') or []
for pr in my_prs:
num = pr['number']
title = pr['title']
requested = pr.get('requested_reviewers', [])
if not any(r.get('login') == SIBLING_AGENT for r in requested):
log(f"PR #{num} '{title}' missing sibling review. Requesting...")
query_api(f'repos/{REPO}/pulls/{num}/requested_reviewers', method='POST', data={'reviewers': [SIBLING_AGENT]})
else:
log(f"PR #{num} already has sibling review requested.")
def synthesize_status():
log("Collecting repository status...")
issues = query_api(f'repos/{REPO}/issues?state=open') or []
prs = query_api(f'repos/{REPO}/pulls?state=open') or []
log(f"Open issues: {len(issues)}, open PRs: {len(prs)}")
unassigned_issues = [i for i in issues if not i.get('assignees') and 'pull_request' not in i]
log(f"Unassigned issues: {len(unassigned_issues)}")
if unassigned_issues:
for i in unassigned_issues[:3]:
log(f" - #{i['number']} {i['title'][:50]}")
# Check CI for open PRs
for pr in prs:
num = pr['number']
statuses = query_api(f'repos/{REPO}/commits/{pr["head"]["sha"]}/statuses') or []
failing = [s for s in statuses if s.get('status') not in ('success', 'pending')]
if failing:
log(f"PR #{num} has failing checks: {', '.join(s.get('context','?') for s in failing)}")
def main():
now = datetime.utcnow().isoformat() + 'Z'
log(f"\n=== QA Cycle start: {now} ===")
if not GITEA_TOKEN:
log("GITEA_TOKEN not set; aborting.")
sys.exit(1)
global MY_AGENT, SIBLING_AGENT
MY_AGENT = os.getenv('AGENT_NAME', 'aitbc1')
SIBLING_AGENT = 'aitbc' if MY_AGENT == 'aitbc1' else 'aitbc1'
if not fetch_latest_main():
log("Aborting due to fetch failure.")
return
run_tests()
run_lint()
review_my_open_prs()
synthesize_status()
log(f"=== QA Cycle complete ===")
if __name__ == '__main__':
main()

31
scripts/testing/quick_test.py Executable file
View File

@@ -0,0 +1,31 @@
#!/usr/bin/env python3
"""
Quick Performance Test
"""
import requests
import time
def test_endpoint(url, headers=None):
start = time.time()
try:
resp = requests.get(url, headers=headers, timeout=5)
end = time.time()
print(f"{url}: {resp.status_code} in {end-start:.3f}s")
return True
except Exception as e:
end = time.time()
print(f"{url}: Error in {end-start:.3f}s - {e}")
return False
print("🧪 Quick Performance Test")
print("=" * 30)
# Test health endpoint
test_endpoint("https://aitbc.bubuit.net/api/v1/health")
# Test with API key
headers = {"X-Api-Key": "test_key_16_characters"}
test_endpoint("https://aitbc.bubuit.net/api/v1/client/jobs", headers)
print("\n✅ Basic connectivity test complete")

261
scripts/testing/run_all_tests.sh Executable file
View File

@@ -0,0 +1,261 @@
#!/bin/bash
# Master Test Runner for Multi-Site AITBC Testing
echo "🚀 Multi-Site AITBC Test Suite Master Runner"
echo "=========================================="
echo "Testing localhost, aitbc, and aitbc1 with all CLI features"
echo ""
# Resolve project root (directory containing this script)
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR" && pwd)"
# Function to run a test scenario
run_scenario() {
local scenario_name=$1
local script_path=$2
echo ""
echo "🔧 Running $scenario_name"
echo "================================"
if [ -f "$script_path" ]; then
bash "$script_path"
local exit_code=$?
if [ $exit_code -eq 0 ]; then
echo "$scenario_name completed successfully"
else
echo "$scenario_name failed with exit code $exit_code"
fi
return $exit_code
else
echo "❌ Script not found: $script_path"
return 1
fi
}
# Function to check prerequisites
check_prerequisites() {
echo "🔍 Checking prerequisites..."
echo "=========================="
# Check if aitbc CLI is available
if command -v aitbc &> /dev/null; then
echo "✅ AITBC CLI found"
aitbc --version | head -1
else
echo "❌ AITBC CLI not found in PATH"
echo "Please ensure CLI is installed and in PATH"
return 1
fi
# Check if required services are running
echo ""
echo "🌐 Checking service connectivity..."
# Check aitbc connectivity
if curl -s http://127.0.0.1:18000/v1/health &> /dev/null; then
echo "✅ aitbc marketplace accessible (port 18000)"
else
echo "❌ aitbc marketplace not accessible (port 18000)"
fi
# Check aitbc1 connectivity
if curl -s http://127.0.0.1:18001/v1/health &> /dev/null; then
echo "✅ aitbc1 marketplace accessible (port 18001)"
else
echo "❌ aitbc1 marketplace not accessible (port 18001)"
fi
# Check Ollama
if ollama list &> /dev/null; then
echo "✅ Ollama GPU service available"
ollama list | head -3
else
echo "❌ Ollama GPU service not available"
fi
# Check SSH access to containers
echo ""
echo "🏢 Checking container access..."
if ssh aitbc-cascade "echo 'SSH OK'" &> /dev/null; then
echo "✅ SSH access to aitbc container"
else
echo "❌ SSH access to aitbc container failed"
fi
if ssh aitbc1-cascade "echo 'SSH OK'" &> /dev/null; then
echo "✅ SSH access to aitbc1 container"
else
echo "❌ SSH access to aitbc1 container failed"
fi
echo ""
echo "📋 Checking user configurations..."
# Check miner1 and client1 configurations (relative to project root)
local home_dir="$PROJECT_ROOT/home"
if [ -f "$home_dir/miner1/miner_wallet.json" ]; then
echo "✅ miner1 configuration found"
else
echo "❌ miner1 configuration missing"
fi
if [ -f "$home_dir/client1/client_wallet.json" ]; then
echo "✅ client1 configuration found"
else
echo "❌ client1 configuration missing"
fi
echo ""
echo "🔧 Prerequisite check complete"
echo "=============================="
}
# Function to run comprehensive CLI tests
run_cli_tests() {
echo ""
echo "🔧 Running Comprehensive CLI Tests"
echo "================================="
local cli_commands=(
"chain:list:aitbc chain list --node-endpoint http://127.0.0.1:18000"
"chain:list:aitbc1:aitbc chain list --node-endpoint http://127.0.0.1:18001"
"analytics:summary:aitbc:aitbc analytics summary --node-endpoint http://127.0.0.1:18000"
"analytics:summary:aitbc1:aitbc analytics summary --node-endpoint http://127.0.0.1:18001"
"marketplace:list:aitbc:aitbc marketplace list --marketplace-url http://127.0.0.1:18000"
"marketplace:list:aitbc1:aitbc marketplace list --marketplace-url http://127.0.0.1:18001"
"agent_comm:list:aitbc:aitbc agent_comm list --node-endpoint http://127.0.0.1:18000"
"agent_comm:list:aitbc1:aitbc agent_comm list --node-endpoint http://127.0.0.1:18001"
"deploy:overview:aitbc deploy overview --format table"
)
local passed=0
local total=0
for cmd_info in "${cli_commands[@]}"; do
IFS=':' read -r test_name command <<< "$cmd_info"
total=$((total + 1))
echo "Testing: $test_name"
if eval "$command" &> /dev/null; then
echo "$test_name - PASSED"
passed=$((passed + 1))
else
echo "$test_name - FAILED"
fi
done
echo ""
echo "CLI Test Results: $passed/$total passed"
return $((total - passed))
}
# Function to generate final report
generate_report() {
local total_scenarios=$1
local passed_scenarios=$2
local failed_scenarios=$((total_scenarios - passed_scenarios))
echo ""
echo "📊 FINAL TEST REPORT"
echo "==================="
echo "Total Scenarios: $total_scenarios"
echo "Passed: $passed_scenarios"
echo "Failed: $failed_scenarios"
if [ $failed_scenarios -eq 0 ]; then
echo ""
echo "🎉 ALL TESTS PASSED!"
echo "Multi-site AITBC ecosystem is fully functional"
return 0
else
echo ""
echo "⚠️ SOME TESTS FAILED"
echo "Please check the failed scenarios and fix issues"
return 1
fi
}
# Main execution
main() {
local scenario_count=0
local passed_count=0
# Check prerequisites
if ! check_prerequisites; then
echo "❌ Prerequisites not met. Exiting."
exit 1
fi
# Run CLI tests first
echo ""
if run_cli_tests; then
echo "✅ All CLI tests passed"
passed_count=$((passed_count + 1))
else
echo "❌ Some CLI tests failed"
fi
scenario_count=$((scenario_count + 1))
# Run scenario tests
local scenarios=(
"Scenario A: Localhost GPU Miner → aitbc Marketplace:$PROJECT_ROOT/test_scenario_a.sh"
"Scenario B: Localhost GPU Client → aitbc1 Marketplace:$PROJECT_ROOT/test_scenario_b.sh"
"Scenario C: aitbc Container User Operations:$PROJECT_ROOT/test_scenario_c.sh"
"Scenario D: aitbc1 Container User Operations:$PROJECT_ROOT/test_scenario_d.sh"
)
for scenario_info in "${scenarios[@]}"; do
IFS=':' read -r scenario_name script_path <<< "$scenario_info"
scenario_count=$((scenario_count + 1))
if run_scenario "$scenario_name" "$script_path"; then
passed_count=$((passed_count + 1))
fi
done
# Run comprehensive test suite
echo ""
echo "🔧 Running Comprehensive Test Suite"
echo "=================================="
if python3 "$PROJECT_ROOT/test_multi_site.py"; then
echo "✅ Comprehensive test suite passed"
passed_count=$((passed_count + 1))
else
echo "❌ Comprehensive test suite failed"
fi
scenario_count=$((scenario_count + 1))
# Generate final report
generate_report $scenario_count $passed_count
}
# Parse command line arguments
case "${1:-all}" in
"prereq")
check_prerequisites
;;
"cli")
run_cli_tests
;;
"scenario-a")
run_scenario "Scenario A" "$PROJECT_ROOT/test_scenario_a.sh"
;;
"scenario-b")
run_scenario "Scenario B" "$PROJECT_ROOT/test_scenario_b.sh"
;;
"scenario-c")
run_scenario "Scenario C" "$PROJECT_ROOT/test_scenario_c.sh"
;;
"scenario-d")
run_scenario "Scenario D" "$PROJECT_ROOT/test_scenario_d.sh"
;;
"comprehensive")
python3 "$PROJECT_ROOT/test_multi_site.py"
;;
"all"|*)
main
;;
esac

28
scripts/testing/run_test.py Executable file
View File

@@ -0,0 +1,28 @@
import sys
from click.testing import CliRunner
from aitbc_cli.commands.node import node
from aitbc_cli.core.config import MultiChainConfig
from unittest.mock import patch, MagicMock
import sys
runner = CliRunner()
with patch('aitbc_cli.commands.node.load_multichain_config') as mock_load:
with patch('aitbc_cli.commands.node.get_default_node_config') as mock_default:
with patch('aitbc_cli.commands.node.add_node_config') as mock_add:
# The function does `from ..core.config import save_multichain_config`
# This evaluates to `aitbc_cli.core.config` because node.py is in `aitbc_cli.commands`
with patch('aitbc_cli.core.config.save_multichain_config') as mock_save:
# The issue with the previous run was not that save_multichain_config wasn't patched correctly.
# The issue is that click catches exceptions and prints the generic "Error adding node: ...".
# Wait, "Failed to save configuration" actually implies the unpatched save_multichain_config was CALLED!
# Let's mock at sys.modules level for Python relative imports
pass
with patch('aitbc_cli.commands.node.load_multichain_config') as mock_load:
with patch('aitbc_cli.commands.node.get_default_node_config') as mock_default:
with patch('aitbc_cli.commands.node.add_node_config') as mock_add:
# the easiest way is to patch it in the exact module it is executed
# OR we can just avoid testing the mock_save and let it save to a temp config!
# Let's check how config is loaded in node.py
pass

View File

@@ -0,0 +1,315 @@
#!/usr/bin/env python3
"""
Scalability Validation for AITBC Platform
Tests system performance under load and validates scalability
"""
import asyncio
import aiohttp
import time
import statistics
import json
from concurrent.futures import ThreadPoolExecutor
import subprocess
import sys
from typing import List, Dict, Any
class ScalabilityValidator:
def __init__(self, base_url="https://aitbc.bubuit.net/api/v1"):
self.base_url = base_url
self.api_key = "test_key_16_characters"
self.results = []
async def measure_endpoint_performance(self, session, endpoint, method="GET", **kwargs):
"""Measure performance of a single endpoint"""
start_time = time.time()
headers = kwargs.pop('headers', {})
headers['X-Api-Key'] = self.api_key
try:
async with session.request(method, f"{self.base_url}{endpoint}",
headers=headers, timeout=30, **kwargs) as response:
content = await response.text()
end_time = time.time()
return {
'endpoint': endpoint,
'method': method,
'status_code': response.status,
'response_time': end_time - start_time,
'content_length': len(content),
'success': response.status < 400
}
except Exception as e:
end_time = time.time()
return {
'endpoint': endpoint,
'method': method,
'status_code': 0,
'response_time': end_time - start_time,
'content_length': 0,
'success': False,
'error': str(e)
}
async def load_test_endpoint(self, endpoint, method="GET", concurrent_users=10,
requests_per_user=5, ramp_up_time=5, **kwargs):
"""Perform load testing with gradual ramp-up"""
print(f"🧪 Load Testing {method} {endpoint}")
print(f" Users: {concurrent_users}, Requests/User: {requests_per_user}")
print(f" Total Requests: {concurrent_users * requests_per_user}")
connector = aiohttp.TCPConnector(limit=100, limit_per_host=100)
timeout = aiohttp.ClientTimeout(total=30)
async with aiohttp.ClientSession(connector=connector, timeout=timeout) as session:
tasks = []
# Gradual ramp-up
for user in range(concurrent_users):
# Add delay for ramp-up
if user > 0:
await asyncio.sleep(ramp_up_time / concurrent_users)
# Create requests for this user
for req in range(requests_per_user):
task = self.measure_endpoint_performance(session, method, endpoint, **kwargs)
tasks.append(task)
# Wait for all tasks to complete
results = await asyncio.gather(*tasks, return_exceptions=True)
# Filter valid results
valid_results = [r for r in results if isinstance(r, dict)]
successful_results = [r for r in valid_results if r['success']]
# Calculate metrics
response_times = [r['response_time'] for r in successful_results]
return {
'endpoint': endpoint,
'total_requests': len(valid_results),
'successful_requests': len(successful_results),
'failed_requests': len(valid_results) - len(successful_results),
'success_rate': len(successful_results) / len(valid_results) * 100 if valid_results else 0,
'avg_response_time': statistics.mean(response_times) if response_times else 0,
'min_response_time': min(response_times) if response_times else 0,
'max_response_time': max(response_times) if response_times else 0,
'median_response_time': statistics.median(response_times) if response_times else 0,
'p95_response_time': statistics.quantiles(response_times, n=20)[18] if len(response_times) > 20 else 0,
'p99_response_time': statistics.quantiles(response_times, n=100)[98] if len(response_times) > 100 else 0,
'requests_per_second': len(successful_results) / (max(response_times) - min(response_time)) if len(response_times) > 1 else 0
}
def get_system_metrics(self):
"""Get current system metrics"""
try:
# CPU usage
cpu_result = subprocess.run(['top', '-bn1', '|', 'grep', 'Cpu(s)', '|', "awk", "'{print $2}'"],
capture_output=True, text=True, shell=True)
cpu_usage = cpu_result.stdout.strip().replace('%us,', '')
# Memory usage
mem_result = subprocess.run(['free', '|', 'grep', 'Mem', '|', "awk", "'{printf \"%.1f\", $3/$2 * 100.0}'"],
capture_output=True, text=True, shell=True)
memory_usage = mem_result.stdout.strip()
# Disk usage
disk_result = subprocess.run(['df', '/', '|', 'awk', 'NR==2{print $5}'],
capture_output=True, text=True, shell=True)
disk_usage = disk_result.stdout.strip().replace('%', '')
return {
'cpu_usage': float(cpu_usage) if cpu_usage else 0,
'memory_usage': float(memory_usage) if memory_usage else 0,
'disk_usage': float(disk_usage) if disk_usage else 0
}
except Exception as e:
print(f"⚠️ Could not get system metrics: {e}")
return {'cpu_usage': 0, 'memory_usage': 0, 'disk_usage': 0}
async def run_scalability_tests(self):
"""Run comprehensive scalability tests"""
print("🚀 AITBC Platform Scalability Validation")
print("=" * 60)
# Record initial system metrics
initial_metrics = self.get_system_metrics()
print(f"📊 Initial System Metrics:")
print(f" CPU: {initial_metrics['cpu_usage']:.1f}%")
print(f" Memory: {initial_metrics['memory_usage']:.1f}%")
print(f" Disk: {initial_metrics['disk_usage']:.1f}%")
print()
# Test scenarios with increasing load
test_scenarios = [
# Light load
{'endpoint': '/health', 'method': 'GET', 'users': 5, 'requests': 5, 'name': 'Light Load'},
# Medium load
{'endpoint': '/health', 'method': 'GET', 'users': 20, 'requests': 10, 'name': 'Medium Load'},
# Heavy load
{'endpoint': '/health', 'method': 'GET', 'users': 50, 'requests': 10, 'name': 'Heavy Load'},
# Stress test
{'endpoint': '/health', 'method': 'GET', 'users': 100, 'requests': 5, 'name': 'Stress Test'},
]
results = []
for scenario in test_scenarios:
print(f"🎯 Scenario: {scenario['name']}")
endpoint = scenario['endpoint']
method = scenario['method']
users = scenario['users']
requests = scenario['requests']
# Get metrics before test
before_metrics = self.get_system_metrics()
# Run load test
result = await self.load_test_endpoint(endpoint, method, users, requests)
result['scenario'] = scenario['name']
result['concurrent_users'] = users
result['requests_per_user'] = requests
# Get metrics after test
after_metrics = self.get_system_metrics()
# Calculate resource impact
result['cpu_impact'] = after_metrics['cpu_usage'] - before_metrics['cpu_usage']
result['memory_impact'] = after_metrics['memory_usage'] - before_metrics['memory_usage']
results.append(result)
# Print scenario results
self.print_scenario_results(result)
# Wait between tests
await asyncio.sleep(2)
return results
def print_scenario_results(self, result):
"""Print results for a single scenario"""
status = "" if result['success_rate'] >= 95 else "⚠️" if result['success_rate'] >= 80 else ""
print(f" {status} {result['scenario']}:")
print(f" Success Rate: {result['success_rate']:.1f}%")
print(f" Avg Response: {result['avg_response_time']:.3f}s")
print(f" P95 Response: {result['p95_response_time']:.3f}s")
print(f" P99 Response: {result['p99_response_time']:.3f}s")
print(f" Requests/Second: {result['requests_per_second']:.1f}")
print(f" CPU Impact: +{result['cpu_impact']:.1f}%")
print(f" Memory Impact: +{result['memory_impact']:.1f}%")
print()
def generate_scalability_report(self, results):
"""Generate comprehensive scalability report"""
print("📋 SCALABILITY VALIDATION REPORT")
print("=" * 60)
# Overall statistics
total_requests = sum(r['total_requests'] for r in results)
total_successful = sum(r['successful_requests'] for r in results)
overall_success_rate = (total_successful / total_requests * 100) if total_requests > 0 else 0
print(f"📊 Overall Performance:")
print(f" Total Requests: {total_requests}")
print(f" Successful Requests: {total_successful}")
print(f" Overall Success Rate: {overall_success_rate:.1f}%")
print()
# Performance by scenario
print(f"🎯 Performance by Scenario:")
for result in results:
status = "" if result['success_rate'] >= 95 else "⚠️" if result['success_rate'] >= 80 else ""
print(f" {status} {result['scenario']} ({result['concurrent_users']} users)")
print(f" Success: {result['success_rate']:.1f}% | "
f"Avg: {result['avg_response_time']:.3f}s | "
f"P95: {result['p95_response_time']:.3f}s | "
f"RPS: {result['requests_per_second']:.1f}")
print()
# Scalability analysis
print(f"📈 Scalability Analysis:")
# Response time scalability
response_times = [(r['concurrent_users'], r['avg_response_time']) for r in results]
print(f" Response Time Scalability:")
for users, avg_time in response_times:
print(f" {users} users: {avg_time:.3f}s avg")
# Success rate scalability
success_rates = [(r['concurrent_users'], r['success_rate']) for r in results]
print(f" Success Rate Scalability:")
for users, success_rate in success_rates:
print(f" {users} users: {success_rate:.1f}% success")
# Resource impact analysis
cpu_impacts = [r['cpu_impact'] for r in results]
memory_impacts = [r['memory_impact'] for r in results]
print(f" Resource Impact:")
print(f" Max CPU Impact: +{max(cpu_impacts):.1f}%")
print(f" Max Memory Impact: +{max(memory_impacts):.1f}%")
print()
# Recommendations
print(f"💡 Scalability Recommendations:")
# Check if performance degrades significantly
max_response_time = max(r['avg_response_time'] for r in results)
min_success_rate = min(r['success_rate'] for r in results)
if max_response_time < 0.5 and min_success_rate >= 95:
print(" 🎉 Excellent scalability - system handles load well!")
print(" ✅ Ready for production deployment")
elif max_response_time < 1.0 and min_success_rate >= 90:
print(" ✅ Good scalability - suitable for production")
print(" 💡 Consider optimization for higher loads")
else:
print(" ⚠️ Scalability concerns detected:")
if max_response_time >= 1.0:
print(" - Response times exceed 1s under load")
if min_success_rate < 90:
print(" - Success rate drops below 90% under load")
print(" 🔧 Performance optimization recommended before production")
print()
print("🏆 Scalability Benchmarks:")
print(" ✅ Excellent: <500ms response, >95% success at 100+ users")
print(" ⚠️ Good: <1s response, >90% success at 50+ users")
print(" ❌ Needs Work: >1s response or <90% success rate")
async def main():
"""Main scalability validation"""
validator = ScalabilityValidator()
try:
results = await validator.run_scalability_tests()
validator.generate_scalability_report(results)
# Determine if system is production-ready
min_success_rate = min(r['success_rate'] for r in results)
max_response_time = max(r['avg_response_time'] for r in results)
if min_success_rate >= 90 and max_response_time < 1.0:
print("\n✅ SCALABILITY VALIDATION PASSED")
print("🚀 System is ready for production deployment!")
return 0
else:
print("\n⚠️ SCALABILITY VALIDATION NEEDS REVIEW")
print("🔧 Performance optimization recommended")
return 1
except Exception as e:
print(f"❌ Scalability validation failed: {e}")
return 1
if __name__ == "__main__":
exit_code = asyncio.run(main())
sys.exit(exit_code)

28
scripts/testing/simple-test.sh Executable file
View File

@@ -0,0 +1,28 @@
#!/bin/bash
# Simple AITBC Services Test
echo "=== 🧪 AITBC Services Test ==="
echo "Testing new port logic implementation"
echo ""
# Test Core Services
echo "🔍 Core Services:"
echo "Coordinator API (8000): $(curl -s http://localhost:8000/v1/health | jq -r .status 2>/dev/null || echo 'FAIL')"
echo "Exchange API (8001): $(curl -s http://localhost:8001/ | jq -r .detail 2>/dev/null || echo 'FAIL')"
echo "Blockchain RPC (8003): $(curl -s http://localhost:8003/rpc/head | jq -r .height 2>/dev/null || echo 'FAIL')"
echo ""
echo "🚀 Enhanced Services:"
echo "Multimodal GPU (8010): $(curl -s http://localhost:8010/health | jq -r .status 2>/dev/null || echo 'FAIL')"
echo "GPU Multimodal (8011): $(curl -s http://localhost:8011/health | jq -r .status 2>/dev/null || echo 'FAIL')"
echo "Modality Optimization (8012): $(curl -s http://localhost:8012/health | jq -r .status 2>/dev/null || echo 'FAIL')"
echo "Adaptive Learning (8013): $(curl -s http://localhost:8013/health | jq -r .status 2>/dev/null || echo 'FAIL')"
echo "Web UI (8016): $(curl -s http://localhost:8016/health | jq -r .status 2>/dev/null || echo 'FAIL')"
echo "Geographic Load Balancer (8017): $(curl -s http://localhost:8017/health | jq -r .status 2>/dev/null || echo 'FAIL')"
echo ""
echo "📊 Port Usage:"
sudo netstat -tlnp | grep -E ":(8000|8001|8003|8010|8011|8012|8013|8016|8017)" | sort
echo ""
echo "✅ All services tested!"

View File

@@ -0,0 +1,168 @@
#!/usr/bin/env python3
"""
Simple Performance Testing for AITBC Platform
"""
import time
import requests
import statistics
from concurrent.futures import ThreadPoolExecutor, as_completed
import json
class SimplePerformanceTester:
def __init__(self, base_url="https://aitbc.bubuit.net/api/v1"):
self.base_url = base_url
self.api_key = "test_key_16_characters"
def test_endpoint(self, method, endpoint, **kwargs):
"""Test a single endpoint"""
start_time = time.time()
headers = kwargs.pop('headers', {})
headers['X-Api-Key'] = self.api_key
try:
response = requests.request(method, f"{self.base_url}{endpoint}",
headers=headers, timeout=10, **kwargs)
end_time = time.time()
return {
'endpoint': endpoint,
'method': method,
'status_code': response.status_code,
'response_time': end_time - start_time,
'success': response.status_code < 400,
'content_length': len(response.text)
}
except Exception as e:
end_time = time.time()
return {
'endpoint': endpoint,
'method': method,
'status_code': 0,
'response_time': end_time - start_time,
'success': False,
'error': str(e)
}
def load_test_endpoint(self, method, endpoint, concurrent_users=5, requests_per_user=3, **kwargs):
"""Load test an endpoint"""
print(f"🧪 Testing {method} {endpoint} - {concurrent_users} users × {requests_per_user} requests")
def make_request():
return self.test_endpoint(method, endpoint, **kwargs)
with ThreadPoolExecutor(max_workers=concurrent_users) as executor:
futures = []
for _ in range(concurrent_users * requests_per_user):
future = executor.submit(make_request)
futures.append(future)
results = []
for future in as_completed(futures):
result = future.result()
results.append(result)
successful_results = [r for r in results if r['success']]
response_times = [r['response_time'] for r in successful_results]
return {
'endpoint': endpoint,
'total_requests': len(results),
'successful_requests': len(successful_results),
'failed_requests': len(results) - len(successful_results),
'success_rate': len(successful_results) / len(results) * 100 if results else 0,
'avg_response_time': statistics.mean(response_times) if response_times else 0,
'min_response_time': min(response_times) if response_times else 0,
'max_response_time': max(response_times) if response_times else 0,
'median_response_time': statistics.median(response_times) if response_times else 0,
}
def run_tests(self):
"""Run performance tests"""
print("🚀 AITBC Platform Performance Tests")
print("=" * 50)
test_cases = [
# Health check
{'method': 'GET', 'endpoint': '/health', 'users': 10, 'requests': 5},
# Client endpoints
{'method': 'GET', 'endpoint': '/client/jobs', 'users': 5, 'requests': 3},
# Miner endpoints
{'method': 'POST', 'endpoint': '/miners/register', 'users': 3, 'requests': 2,
'json': {'capabilities': {'gpu': {'model': 'RTX 4090'}}},
'headers': {'Content-Type': 'application/json', 'X-Miner-ID': 'perf-test-miner'}},
]
results = []
for test_case in test_cases:
method = test_case.pop('method')
endpoint = test_case.pop('endpoint')
result = self.load_test_endpoint(method, endpoint, **test_case)
results.append(result)
# Print results
status = "" if result['success_rate'] >= 80 else "⚠️" if result['success_rate'] >= 50 else ""
print(f"{status} {method} {endpoint}:")
print(f" Success Rate: {result['success_rate']:.1f}%")
print(f" Avg Response: {result['avg_response_time']:.3f}s")
print(f" Requests: {result['successful_requests']}/{result['total_requests']}")
print()
# Generate report
self.generate_report(results)
return results
def generate_report(self, results):
"""Generate performance report"""
print("📋 PERFORMANCE REPORT")
print("=" * 50)
total_requests = sum(r['total_requests'] for r in results)
total_successful = sum(r['successful_requests'] for r in results)
overall_success_rate = (total_successful / total_requests * 100) if total_requests > 0 else 0
print(f"📊 Overall:")
print(f" Total Requests: {total_requests}")
print(f" Successful: {total_successful}")
print(f" Success Rate: {overall_success_rate:.1f}%")
print()
print(f"🎯 Endpoint Performance:")
for result in results:
status = "" if result['success_rate'] >= 80 else "⚠️" if result['success_rate'] >= 50 else ""
print(f" {status} {result['method']} {result['endpoint']}")
print(f" Success: {result['success_rate']:.1f}% | "
f"Avg: {result['avg_response_time']:.3f}s | "
f"Requests: {result['successful_requests']}/{result['total_requests']}")
print()
print("💡 Recommendations:")
if overall_success_rate >= 80:
print(" 🎉 Good performance - ready for production!")
else:
print(" ⚠️ Performance issues detected - review endpoints")
slow_endpoints = [r for r in results if r['avg_response_time'] > 1.0]
if slow_endpoints:
print(" 🐌 Slow endpoints:")
for r in slow_endpoints:
print(f" - {r['endpoint']} ({r['avg_response_time']:.3f}s)")
if __name__ == "__main__":
tester = SimplePerformanceTester()
results = tester.run_tests()
# Exit code based on performance
avg_success_rate = statistics.mean([r['success_rate'] for r in results])
if avg_success_rate >= 80:
print("\n✅ PERFORMANCE TESTS PASSED")
exit(0)
else:
print("\n⚠️ PERFORMANCE TESTS NEED REVIEW")
exit(1)

View File

@@ -0,0 +1,128 @@
#!/bin/bash
# AITBC Comprehensive Services Test Script
# Tests all services with new port logic implementation
set -euo pipefail
echo "=== 🧪 AITBC Comprehensive Services Test ==="
echo "Date: $(date)"
echo "Testing all services with new port logic (8000-8003, 8010-8016)"
echo ""
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Test results
PASSED=0
FAILED=0
# Function to test a service
test_service() {
local name="$1"
local url="$2"
local expected_pattern="$3"
echo -n "Testing $name... "
if response=$(curl -s "$url" 2>/dev/null); then
if [[ $response =~ $expected_pattern ]]; then
echo -e "${GREEN}✅ PASS${NC}"
((PASSED++))
return 0
else
echo -e "${RED}❌ FAIL${NC} - Unexpected response"
echo " Expected: $expected_pattern"
echo " Got: $response"
((FAILED++))
return 1
fi
else
echo -e "${RED}❌ FAIL${NC} - No response"
((FAILED++))
return 1
fi
}
# Function to test port availability
test_port() {
local port="$1"
local name="$2"
echo -n "Testing port $port ($name)... "
if sudo netstat -tlnp 2>/dev/null | grep -q ":$port "; then
echo -e "${GREEN}✅ PASS${NC}"
((PASSED++))
return 0
else
echo -e "${RED}❌ FAIL${NC} - Port not listening"
((FAILED++))
return 1
fi
}
echo "🔍 Core Services Testing"
echo "====================="
# Test Core Services
test_service "Coordinator API (8000)" "http://localhost:8000/v1/health" '"status":"ok"'
test_service "Exchange API (8001)" "http://localhost:8001/" '"detail"'
test_service "Blockchain RPC (8003)" "http://localhost:8003/rpc/head" '"height"'
echo ""
echo "🚀 Enhanced Services Testing"
echo "=========================="
# Test Enhanced Services
test_service "Multimodal GPU (8010)" "http://localhost:8010/health" '"service":"gpu-multimodal"'
test_service "GPU Multimodal (8011)" "http://localhost:8011/health" '"service":"gpu-multimodal"'
test_service "Modality Optimization (8012)" "http://localhost:8012/health" '"service":"modality-optimization"'
test_service "Adaptive Learning (8013)" "http://localhost:8013/health" '"service":"adaptive-learning"'
test_service "Web UI (8016)" "http://localhost:8016/health" '"service":"web-ui"'
echo ""
echo "🔧 Service Features Testing"
echo "========================="
# Test Service Features
test_service "GPU Status (8010)" "http://localhost:8010/gpu/status" '"gpu_available"'
test_service "GPU Multimodal Features (8011)" "http://localhost:8011/gpu/multimodal" '"multimodal_capabilities"'
test_service "Modality Optimization (8012)" "http://localhost:8012/optimization/modality" '"optimization_active"'
test_service "Learning Status (8013)" "http://localhost:8013/learning/status" '"learning_active"'
echo ""
echo "🌐 Port Availability Testing"
echo "=========================="
# Test Port Availability
test_port "8000" "Coordinator API"
test_port "8001" "Exchange API"
test_port "8003" "Blockchain RPC"
test_port "8010" "Multimodal GPU"
test_port "8011" "GPU Multimodal"
test_port "8012" "Modality Optimization"
test_port "8013" "Adaptive Learning"
test_port "8016" "Web UI"
echo ""
echo "📊 Test Results Summary"
echo "===================="
TOTAL=$((PASSED + FAILED))
echo "Total Tests: $TOTAL"
echo -e "Passed: ${GREEN}$PASSED${NC}"
echo -e "Failed: ${RED}$FAILED${NC}"
if [ $FAILED -eq 0 ]; then
echo -e "${GREEN}🎉 All tests passed!${NC}"
echo "✅ AITBC services are fully operational with new port logic"
exit 0
else
echo -e "${RED}❌ Some tests failed!${NC}"
echo "⚠️ Please check the failed services above"
exit 1
fi

View File

@@ -0,0 +1,93 @@
#!/bin/bash
#
# AITBC Permission Test Suite
# Run this to verify your permission setup is working correctly
#
echo "=== 🧪 AITBC Permission Setup Test Suite ==="
echo ""
# Test 1: Service Management
echo "📋 Test 1: Service Management (should NOT ask for password)"
echo "Command: sudo systemctl status aitbc-coordinator-api.service --no-pager"
echo "Expected: Service status without password prompt"
echo ""
sudo systemctl status aitbc-coordinator-api.service --no-pager | head -3
echo "✅ Service management test completed"
echo ""
# Test 2: File Operations
echo "📋 Test 2: File Operations"
echo "Command: touch /opt/aitbc/test-permissions.txt"
echo "Expected: File creation without sudo"
echo ""
touch /opt/aitbc/test-permissions.txt
echo "✅ File created: /opt/aitbc/test-permissions.txt"
echo ""
echo "Command: rm /opt/aitbc/test-permissions.txt"
echo "Expected: File deletion without sudo"
echo ""
rm /opt/aitbc/test-permissions.txt
echo "✅ File deleted successfully"
echo ""
# Test 3: Development Tools
echo "📋 Test 3: Development Tools"
echo "Command: git status"
echo "Expected: Git status without password"
echo ""
git status --porcelain | head -3 || echo "✅ Git working (clean working directory)"
echo ""
# Test 4: Log Access
echo "📋 Test 4: Log Access (should NOT ask for password)"
echo "Command: sudo journalctl -u aitbc-coordinator-api.service --no-pager -n 2"
echo "Expected: Recent logs without password prompt"
echo ""
sudo journalctl -u aitbc-coordinator-api.service --no-pager -n 2
echo "✅ Log access test completed"
echo ""
# Test 5: Network Tools
echo "📋 Test 5: Network Tools (should NOT ask for password)"
echo "Command: sudo lsof -i :8000"
echo "Expected: Network info without password prompt"
echo ""
sudo lsof -i :8000 | head -2 || echo "✅ lsof command working"
echo ""
# Test 6: Helper Scripts
echo "📋 Test 6: Helper Scripts"
echo "Command: /opt/aitbc/scripts/fix-permissions.sh"
echo "Expected: Permission fix script runs"
echo ""
/opt/aitbc/scripts/fix-permissions.sh
echo "✅ Helper script test completed"
echo ""
# Test 7: Development Environment
echo "📋 Test 7: Development Environment"
echo "Command: source /opt/aitbc/.env.dev"
echo "Expected: Environment loads without errors"
echo ""
source /opt/aitbc/.env.dev
echo "✅ Development environment loaded"
echo ""
echo "=== 🎉 All Tests Completed! ==="
echo ""
echo "✅ Service Management: Working"
echo "✅ File Operations: Working"
echo "✅ Development Tools: Working"
echo "✅ Log Access: Working"
echo "✅ Network Tools: Working"
echo "✅ Helper Scripts: Working"
echo "✅ Development Environment: Working"
echo ""
echo "🚀 Your AITBC development environment is fully configured!"
echo ""
echo "💡 Available aliases (now active):"
echo " aitbc-services - Service management"
echo " aitbc-fix - Quick permission fix"
echo " aitbc-logs - View logs"

View File

@@ -0,0 +1,16 @@
import asyncio
from broadcaster import Broadcast
async def main():
broadcast = Broadcast("redis://localhost:6379")
await broadcast.connect()
print("connected")
async with broadcast.subscribe("test") as sub:
print("subscribed")
await broadcast.publish("test", "hello")
async for msg in sub:
print("msg:", msg.message)
break
await broadcast.disconnect()
asyncio.run(main())

View File

@@ -0,0 +1,10 @@
import requests
try:
response = requests.get('http://127.0.0.1:8000/v1/marketplace/offers')
print("Offers:", response.status_code)
response = requests.get('http://127.0.0.1:8000/v1/marketplace/stats')
print("Stats:", response.status_code)
except Exception as e:
print("Error:", e)

View File

@@ -0,0 +1,23 @@
import sys
import asyncio
from sqlmodel import Session, create_engine
from app.services.marketplace_enhanced_simple import EnhancedMarketplaceService
from app.database import engine
from app.domain.marketplace import MarketplaceBid
async def run():
with Session(engine) as session:
# insert a bid to test amount vs price
bid = MarketplaceBid(provider="prov", capacity=10, price=1.0)
session.add(bid)
session.commit()
service = EnhancedMarketplaceService(session)
try:
res = await service.get_marketplace_analytics(period_days=30, metrics=["volume", "revenue"])
print(res)
except Exception as e:
import traceback
traceback.print_exc()
asyncio.run(run())

View File

@@ -0,0 +1,98 @@
#!/usr/bin/env python3
"""
Direct test of GPU release functionality
"""
import sys
import os
sys.path.insert(0, '/home/oib/windsurf/aitbc/apps/coordinator-api/src')
from sqlmodel import Session, select
from sqlalchemy import create_engine
from app.domain.gpu_marketplace import GPURegistry, GPUBooking
def test_gpu_release():
"""Test GPU release directly"""
print("=== DIRECT GPU RELEASE TEST ===")
# Use the same database as coordinator
db_path = "/home/oib/windsurf/aitbc/apps/coordinator-api/data/coordinator.db"
engine = create_engine(f"sqlite:///{db_path}")
gpu_id = "gpu_c5be877c"
with Session(engine) as session:
print(f"1. Checking GPU {gpu_id}...")
gpu = session.exec(select(GPURegistry).where(GPURegistry.id == gpu_id)).first()
if not gpu:
print(f"❌ GPU {gpu_id} not found")
return False
print(f"✅ GPU found: {gpu.model} - Status: {gpu.status}")
print(f"2. Checking bookings for GPU {gpu_id}...")
bookings = session.exec(
select(GPUBooking).where(GPUBooking.gpu_id == gpu_id)
).all()
print(f"Found {len(bookings)} bookings:")
for booking in bookings:
print(f" - ID: {booking.id}, Status: {booking.status}, Total Cost: {getattr(booking, 'total_cost', 'MISSING')}")
print(f"3. Checking active bookings...")
active_booking = session.exec(
select(GPUBooking).where(
GPUBooking.gpu_id == gpu_id,
GPUBooking.status == "active"
)
).first()
if active_booking:
print(f"✅ Active booking found: {active_booking.id}")
print(f" Total Cost: {getattr(active_booking, 'total_cost', 'MISSING')}")
# Test refund calculation
try:
refund = active_booking.total_cost * 0.5
print(f"✅ Refund calculation successful: {refund}")
except AttributeError as e:
print(f"❌ Refund calculation failed: {e}")
return False
else:
print("❌ No active booking found")
print(f"4. Testing release logic...")
if active_booking:
try:
refund = active_booking.total_cost * 0.5
active_booking.status = "cancelled"
gpu.status = "available"
session.commit()
print(f"✅ Release successful")
print(f" GPU Status: {gpu.status}")
print(f" Booking Status: {active_booking.status}")
print(f" Refund: {refund}")
return True
except Exception as e:
print(f"❌ Release failed: {e}")
session.rollback()
return False
else:
print("⚠️ No active booking to release")
# Still try to make GPU available
gpu.status = "available"
session.commit()
print(f"✅ GPU marked as available")
return True
if __name__ == "__main__":
success = test_gpu_release()
if success:
print("\n🎉 GPU release test PASSED!")
else:
print("\n❌ GPU release test FAILED!")
sys.exit(1)

View File

@@ -0,0 +1,21 @@
import asyncio
from apps.agent_services.agent_bridge.src.integration_layer import AgentServiceBridge
async def main():
bridge = AgentServiceBridge()
# Let's inspect the actual payload
payload = {
"name": "test-agent-123",
"type": "trading",
"capabilities": ["trade"],
"chain_id": "ait-mainnet",
"endpoint": "http://localhost:8005",
"version": "1.0.0",
"description": "Test trading agent"
}
async with bridge.integration as integration:
result = await integration.register_agent_with_coordinator(payload)
print(f"Result: {result}")
if __name__ == "__main__":
asyncio.run(main())

2
scripts/testing/test_send.sh Executable file
View File

@@ -0,0 +1,2 @@
export AITBC_WALLET="test_wallet"
aitbc wallet send aitbc1my-test-wallet_hd 50

View File

@@ -0,0 +1,22 @@
import json
wallet_data = {
"name": "test_wallet",
"type": "hd",
"address": "aitbc1genesis",
"private_key": "dummy",
"public_key": "dummy",
"encrypted": False,
"transactions": [],
"balance": 1000000
}
import os
import pathlib
wallet_dir = pathlib.Path("/root/.aitbc/wallets")
wallet_dir.mkdir(parents=True, exist_ok=True)
wallet_path = wallet_dir / "test_wallet.json"
with open(wallet_path, "w") as f:
json.dump(wallet_data, f)