Add sys import to test files and remove obsolete integration tests
Some checks failed
API Endpoint Tests / test-api-endpoints (push) Successful in 9s
Blockchain Synchronization Verification / sync-verification (push) Failing after 1s
CLI Tests / test-cli (push) Failing after 3s
Documentation Validation / validate-docs (push) Successful in 6s
Documentation Validation / validate-policies-strict (push) Successful in 2s
Integration Tests / test-service-integration (push) Successful in 40s
Multi-Node Blockchain Health Monitoring / health-check (push) Successful in 1s
P2P Network Verification / p2p-verification (push) Successful in 2s
Production Tests / Production Integration Tests (push) Successful in 21s
Python Tests / test-python (push) Successful in 13s
Security Scanning / security-scan (push) Failing after 46s
Smart Contract Tests / test-solidity (map[name:aitbc-token path:packages/solidity/aitbc-token]) (push) Successful in 17s
Smart Contract Tests / lint-solidity (push) Successful in 10s
Some checks failed
API Endpoint Tests / test-api-endpoints (push) Successful in 9s
Blockchain Synchronization Verification / sync-verification (push) Failing after 1s
CLI Tests / test-cli (push) Failing after 3s
Documentation Validation / validate-docs (push) Successful in 6s
Documentation Validation / validate-policies-strict (push) Successful in 2s
Integration Tests / test-service-integration (push) Successful in 40s
Multi-Node Blockchain Health Monitoring / health-check (push) Successful in 1s
P2P Network Verification / p2p-verification (push) Successful in 2s
Production Tests / Production Integration Tests (push) Successful in 21s
Python Tests / test-python (push) Successful in 13s
Security Scanning / security-scan (push) Failing after 46s
Smart Contract Tests / test-solidity (map[name:aitbc-token path:packages/solidity/aitbc-token]) (push) Successful in 17s
Smart Contract Tests / lint-solidity (push) Successful in 10s
- Add sys import to 29 test files across agent-coordinator, blockchain-event-bridge, blockchain-node, and coordinator-api - Remove apps/blockchain-event-bridge/tests/test_integration.py (obsolete bridge integration tests) - Remove apps/coordinator-api/tests/test_integration.py (obsolete API integration tests) - Implement GPU registration in marketplace_gpu.py with GPURegistry model persistence
This commit is contained in:
1
apps/miner/tests/__init__.py
Normal file
1
apps/miner/tests/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
"""Miner service tests"""
|
||||
162
apps/miner/tests/test_edge_cases_miner.py
Normal file
162
apps/miner/tests/test_edge_cases_miner.py
Normal file
@@ -0,0 +1,162 @@
|
||||
"""Edge case and error handling tests for miner service"""
|
||||
|
||||
import pytest
|
||||
import sys
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from unittest.mock import Mock, patch
|
||||
|
||||
|
||||
import production_miner
|
||||
|
||||
|
||||
@pytest.mark.unit
|
||||
def test_classify_architecture_empty_string():
|
||||
"""Test architecture classification with empty string"""
|
||||
result = production_miner.classify_architecture("")
|
||||
assert result == "unknown"
|
||||
|
||||
|
||||
@pytest.mark.unit
|
||||
def test_classify_architecture_special_characters():
|
||||
"""Test architecture classification with special characters"""
|
||||
result = production_miner.classify_architecture("NVIDIA@#$%GPU")
|
||||
assert result == "unknown"
|
||||
|
||||
|
||||
@pytest.mark.unit
|
||||
@patch('production_miner.subprocess.run')
|
||||
def test_detect_cuda_version_timeout(mock_run):
|
||||
"""Test CUDA version detection with timeout"""
|
||||
mock_run.side_effect = subprocess.TimeoutExpired("nvidia-smi", 5)
|
||||
result = production_miner.detect_cuda_version()
|
||||
assert result is None
|
||||
|
||||
|
||||
@pytest.mark.unit
|
||||
@patch('production_miner.subprocess.run')
|
||||
def test_get_gpu_info_malformed_output(mock_run):
|
||||
"""Test GPU info with malformed output"""
|
||||
mock_run.return_value = Mock(returncode=0, stdout="malformed,data")
|
||||
result = production_miner.get_gpu_info()
|
||||
assert result is None
|
||||
|
||||
|
||||
@pytest.mark.unit
|
||||
@patch('production_miner.subprocess.run')
|
||||
def test_get_gpu_info_empty_output(mock_run):
|
||||
"""Test GPU info with empty output"""
|
||||
mock_run.return_value = Mock(returncode=0, stdout="")
|
||||
result = production_miner.get_gpu_info()
|
||||
assert result is None
|
||||
|
||||
|
||||
@pytest.mark.unit
|
||||
@patch('production_miner.get_gpu_info')
|
||||
def test_build_gpu_capabilities_negative_memory(mock_gpu):
|
||||
"""Test building GPU capabilities with negative memory"""
|
||||
mock_gpu.return_value = {"name": "RTX 4090", "memory_total": -24576}
|
||||
with patch('production_miner.detect_cuda_version') as mock_cuda, \
|
||||
patch('production_miner.classify_architecture') as mock_arch:
|
||||
mock_cuda.return_value = "12.0"
|
||||
mock_arch.return_value = "ada_lovelace"
|
||||
|
||||
result = production_miner.build_gpu_capabilities()
|
||||
assert result["gpu"]["memory_gb"] == -24576
|
||||
|
||||
|
||||
@pytest.mark.unit
|
||||
@patch('production_miner.get_gpu_info')
|
||||
def test_build_gpu_capabilities_zero_memory(mock_gpu):
|
||||
"""Test building GPU capabilities with zero memory"""
|
||||
mock_gpu.return_value = {"name": "RTX 4090", "memory_total": 0}
|
||||
with patch('production_miner.detect_cuda_version') as mock_cuda, \
|
||||
patch('production_miner.classify_architecture') as mock_arch:
|
||||
mock_cuda.return_value = "12.0"
|
||||
mock_arch.return_value = "ada_lovelace"
|
||||
|
||||
result = production_miner.build_gpu_capabilities()
|
||||
assert result["gpu"]["memory_gb"] == 0
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@patch('production_miner.httpx.get')
|
||||
def test_check_ollama_empty_models(mock_get):
|
||||
"""Test Ollama check with empty models list"""
|
||||
mock_get.return_value = Mock(status_code=200, json=lambda: {"models": []})
|
||||
available, models = production_miner.check_ollama()
|
||||
assert available is True
|
||||
assert len(models) == 0
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@patch('production_miner.httpx.get')
|
||||
def test_check_ollama_malformed_response(mock_get):
|
||||
"""Test Ollama check with malformed response"""
|
||||
mock_get.return_value = Mock(status_code=200, json=lambda: {})
|
||||
available, models = production_miner.check_ollama()
|
||||
assert available is True
|
||||
assert len(models) == 0
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@patch('production_miner.submit_result')
|
||||
@patch('production_miner.httpx.post')
|
||||
def test_execute_job_empty_payload(mock_post, mock_submit):
|
||||
"""Test executing job with empty payload"""
|
||||
mock_post.return_value = Mock(status_code=200, json=lambda: {"response": "test"})
|
||||
|
||||
job = {"job_id": "job_123", "payload": {}}
|
||||
result = production_miner.execute_job(job, ["llama3.2:latest"])
|
||||
assert result is False
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@patch('production_miner.submit_result')
|
||||
def test_execute_job_missing_job_id(mock_submit):
|
||||
"""Test executing job with missing job_id"""
|
||||
job = {"payload": {"type": "inference"}}
|
||||
result = production_miner.execute_job(job, ["llama3.2:latest"])
|
||||
assert result is False
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@patch('production_miner.submit_result')
|
||||
@patch('production_miner.httpx.post')
|
||||
def test_execute_job_model_fallback(mock_post, mock_submit):
|
||||
"""Test executing job with model fallback to first available"""
|
||||
mock_post.return_value = Mock(status_code=200, json=lambda: {"response": "test"})
|
||||
|
||||
job = {"job_id": "job_123", "payload": {"type": "inference", "prompt": "test", "model": "nonexistent"}}
|
||||
result = production_miner.execute_job(job, ["llama3.2:latest"])
|
||||
assert result is True
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@patch('production_miner.submit_result')
|
||||
def test_execute_job_timeout(mock_submit):
|
||||
"""Test executing job with timeout"""
|
||||
job = {"job_id": "job_123", "payload": {"type": "inference", "prompt": "test", "model": "llama3.2:latest"}}
|
||||
|
||||
with patch('production_miner.httpx.post') as mock_post:
|
||||
mock_post.side_effect = Exception("Timeout")
|
||||
result = production_miner.execute_job(job, ["llama3.2:latest"])
|
||||
assert result is False
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@patch('production_miner.httpx.post')
|
||||
def test_poll_for_jobs_malformed_response(mock_post):
|
||||
"""Test polling for jobs with malformed response"""
|
||||
mock_post.return_value = Mock(status_code=200, json=lambda: {})
|
||||
result = production_miner.poll_for_jobs()
|
||||
assert result is not None
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@patch('production_miner.httpx.post')
|
||||
def test_submit_result_malformed_response(mock_post):
|
||||
"""Test submitting result with malformed response"""
|
||||
mock_post.return_value = Mock(status_code=500, text="Error")
|
||||
production_miner.submit_result("job_123", {"result": {"status": "completed"}})
|
||||
assert mock_post.called
|
||||
241
apps/miner/tests/test_integration_miner.py
Normal file
241
apps/miner/tests/test_integration_miner.py
Normal file
@@ -0,0 +1,241 @@
|
||||
"""Integration tests for miner service"""
|
||||
|
||||
import pytest
|
||||
import sys
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from unittest.mock import Mock, patch, MagicMock
|
||||
from datetime import datetime
|
||||
|
||||
|
||||
import production_miner
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@patch('production_miner.httpx.get')
|
||||
def test_check_ollama_success(mock_get):
|
||||
"""Test Ollama check success"""
|
||||
mock_get.return_value = Mock(
|
||||
status_code=200,
|
||||
json=lambda: {"models": [{"name": "llama3.2:latest"}, {"name": "mistral:latest"}]}
|
||||
)
|
||||
available, models = production_miner.check_ollama()
|
||||
assert available is True
|
||||
assert len(models) == 2
|
||||
assert "llama3.2:latest" in models
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@patch('production_miner.httpx.get')
|
||||
def test_check_ollama_failure(mock_get):
|
||||
"""Test Ollama check failure"""
|
||||
mock_get.return_value = Mock(status_code=500)
|
||||
available, models = production_miner.check_ollama()
|
||||
assert available is False
|
||||
assert len(models) == 0
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@patch('production_miner.httpx.get')
|
||||
def test_check_ollama_exception(mock_get):
|
||||
"""Test Ollama check with exception"""
|
||||
mock_get.side_effect = Exception("Connection refused")
|
||||
available, models = production_miner.check_ollama()
|
||||
assert available is False
|
||||
assert len(models) == 0
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@patch('production_miner.httpx.get')
|
||||
def test_wait_for_coordinator_success(mock_get):
|
||||
"""Test waiting for coordinator success"""
|
||||
mock_get.return_value = Mock(status_code=200)
|
||||
result = production_miner.wait_for_coordinator()
|
||||
assert result is True
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@patch('production_miner.httpx.get')
|
||||
@patch('production_miner.time.sleep')
|
||||
def test_wait_for_coordinator_failure(mock_sleep, mock_get):
|
||||
"""Test waiting for coordinator failure after max retries"""
|
||||
mock_get.side_effect = Exception("Connection refused")
|
||||
result = production_miner.wait_for_coordinator()
|
||||
assert result is False
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@patch('production_miner.httpx.post')
|
||||
@patch('production_miner.build_gpu_capabilities')
|
||||
def test_register_miner_success(mock_build, mock_post):
|
||||
"""Test miner registration success"""
|
||||
mock_build.return_value = {"gpu": {"model": "RTX 4090"}}
|
||||
mock_post.return_value = Mock(
|
||||
status_code=200,
|
||||
json=lambda: {"session_token": "test-token-123"}
|
||||
)
|
||||
result = production_miner.register_miner()
|
||||
assert result == "test-token-123"
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@patch('production_miner.httpx.post')
|
||||
@patch('production_miner.build_gpu_capabilities')
|
||||
def test_register_miner_failure(mock_build, mock_post):
|
||||
"""Test miner registration failure"""
|
||||
mock_build.return_value = {"gpu": {"model": "RTX 4090"}}
|
||||
mock_post.return_value = Mock(status_code=400, text="Bad request")
|
||||
result = production_miner.register_miner()
|
||||
assert result is None
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@patch('production_miner.httpx.post')
|
||||
@patch('production_miner.build_gpu_capabilities')
|
||||
def test_register_miner_exception(mock_build, mock_post):
|
||||
"""Test miner registration with exception"""
|
||||
mock_build.return_value = {"gpu": {"model": "RTX 4090"}}
|
||||
mock_post.side_effect = Exception("Connection error")
|
||||
result = production_miner.register_miner()
|
||||
assert result is None
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@patch('production_miner.httpx.post')
|
||||
@patch('production_miner.get_gpu_info')
|
||||
@patch('production_miner.classify_architecture')
|
||||
@patch('production_miner.measure_coordinator_latency')
|
||||
def test_send_heartbeat_with_gpu(mock_latency, mock_arch, mock_gpu, mock_post):
|
||||
"""Test sending heartbeat with GPU info"""
|
||||
mock_gpu.return_value = {"name": "RTX 4090", "memory_total": 24576, "memory_used": 1024, "utilization": 45}
|
||||
mock_arch.return_value = "ada_lovelace"
|
||||
mock_latency.return_value = 50.0
|
||||
mock_post.return_value = Mock(status_code=200)
|
||||
|
||||
production_miner.send_heartbeat()
|
||||
assert mock_post.called
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@patch('production_miner.httpx.post')
|
||||
@patch('production_miner.get_gpu_info')
|
||||
@patch('production_miner.classify_architecture')
|
||||
@patch('production_miner.measure_coordinator_latency')
|
||||
def test_send_heartbeat_without_gpu(mock_latency, mock_arch, mock_gpu, mock_post):
|
||||
"""Test sending heartbeat without GPU info"""
|
||||
mock_gpu.return_value = None
|
||||
mock_post.return_value = Mock(status_code=200)
|
||||
|
||||
production_miner.send_heartbeat()
|
||||
assert mock_post.called
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@patch('production_miner.httpx.post')
|
||||
def test_submit_result_success(mock_post):
|
||||
"""Test submitting job result success"""
|
||||
mock_post.return_value = Mock(status_code=200)
|
||||
production_miner.submit_result("job_123", {"result": {"status": "completed"}})
|
||||
assert mock_post.called
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@patch('production_miner.httpx.post')
|
||||
def test_submit_result_failure(mock_post):
|
||||
"""Test submitting job result failure"""
|
||||
mock_post.return_value = Mock(status_code=500, text="Server error")
|
||||
production_miner.submit_result("job_123", {"result": {"status": "completed"}})
|
||||
assert mock_post.called
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@patch('production_miner.httpx.post')
|
||||
def test_poll_for_jobs_success(mock_post):
|
||||
"""Test polling for jobs success"""
|
||||
mock_post.return_value = Mock(
|
||||
status_code=200,
|
||||
json=lambda: {"job_id": "job_123", "payload": {"type": "inference"}}
|
||||
)
|
||||
result = production_miner.poll_for_jobs()
|
||||
assert result is not None
|
||||
assert result["job_id"] == "job_123"
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@patch('production_miner.httpx.post')
|
||||
def test_poll_for_jobs_no_job(mock_post):
|
||||
"""Test polling for jobs when no job available"""
|
||||
mock_post.return_value = Mock(status_code=204)
|
||||
result = production_miner.poll_for_jobs()
|
||||
assert result is None
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@patch('production_miner.httpx.post')
|
||||
def test_poll_for_jobs_failure(mock_post):
|
||||
"""Test polling for jobs failure"""
|
||||
mock_post.return_value = Mock(status_code=500, text="Server error")
|
||||
result = production_miner.poll_for_jobs()
|
||||
assert result is None
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@patch('production_miner.submit_result')
|
||||
@patch('production_miner.httpx.post')
|
||||
@patch('production_miner.get_gpu_info')
|
||||
def test_execute_job_inference_success(mock_gpu, mock_post, mock_submit):
|
||||
"""Test executing inference job success"""
|
||||
mock_gpu.return_value = {"utilization": 80, "memory_used": 4096}
|
||||
mock_post.return_value = Mock(
|
||||
status_code=200,
|
||||
json=lambda: {"response": "Test output", "eval_count": 100}
|
||||
)
|
||||
|
||||
job = {"job_id": "job_123", "payload": {"type": "inference", "prompt": "test", "model": "llama3.2:latest"}}
|
||||
result = production_miner.execute_job(job, ["llama3.2:latest"])
|
||||
assert result is True
|
||||
assert mock_submit.called
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@patch('production_miner.submit_result')
|
||||
@patch('production_miner.httpx.post')
|
||||
def test_execute_job_inference_no_models(mock_post, mock_submit):
|
||||
"""Test executing inference job with no available models"""
|
||||
job = {"job_id": "job_123", "payload": {"type": "inference", "prompt": "test"}}
|
||||
result = production_miner.execute_job(job, [])
|
||||
assert result is False
|
||||
assert mock_submit.called
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@patch('production_miner.submit_result')
|
||||
def test_execute_job_unsupported_type(mock_submit):
|
||||
"""Test executing unsupported job type"""
|
||||
job = {"job_id": "job_123", "payload": {"type": "unsupported"}}
|
||||
result = production_miner.execute_job(job, ["llama3.2:latest"])
|
||||
assert result is False
|
||||
assert mock_submit.called
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@patch('production_miner.submit_result')
|
||||
@patch('production_miner.httpx.post')
|
||||
def test_execute_job_ollama_error(mock_post, mock_submit):
|
||||
"""Test executing job when Ollama returns error"""
|
||||
mock_post.return_value = Mock(status_code=500, text="Ollama error")
|
||||
|
||||
job = {"job_id": "job_123", "payload": {"type": "inference", "prompt": "test", "model": "llama3.2:latest"}}
|
||||
result = production_miner.execute_job(job, ["llama3.2:latest"])
|
||||
assert result is False
|
||||
assert mock_submit.called
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@patch('production_miner.submit_result')
|
||||
def test_execute_job_exception(mock_submit):
|
||||
"""Test executing job with exception"""
|
||||
job = {"job_id": "job_123", "payload": {"type": "inference", "prompt": "test"}}
|
||||
result = production_miner.execute_job(job, ["llama3.2:latest"])
|
||||
assert result is False
|
||||
assert mock_submit.called
|
||||
181
apps/miner/tests/test_unit_miner.py
Normal file
181
apps/miner/tests/test_unit_miner.py
Normal file
@@ -0,0 +1,181 @@
|
||||
"""Unit tests for miner service"""
|
||||
|
||||
import pytest
|
||||
import sys
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from unittest.mock import Mock, patch, MagicMock
|
||||
import subprocess
|
||||
|
||||
|
||||
import production_miner
|
||||
|
||||
|
||||
@pytest.mark.unit
|
||||
def test_classify_architecture_4090():
|
||||
"""Test architecture classification for RTX 4090"""
|
||||
result = production_miner.classify_architecture("NVIDIA GeForce RTX 4090")
|
||||
assert result == "ada_lovelace"
|
||||
|
||||
|
||||
@pytest.mark.unit
|
||||
def test_classify_architecture_3080():
|
||||
"""Test architecture classification for RTX 3080"""
|
||||
result = production_miner.classify_architecture("NVIDIA GeForce RTX 3080")
|
||||
assert result == "ampere"
|
||||
|
||||
|
||||
@pytest.mark.unit
|
||||
def test_classify_architecture_2080():
|
||||
"""Test architecture classification for RTX 2080"""
|
||||
result = production_miner.classify_architecture("NVIDIA GeForce RTX 2080")
|
||||
assert result == "turing"
|
||||
|
||||
|
||||
@pytest.mark.unit
|
||||
def test_classify_architecture_1080():
|
||||
"""Test architecture classification for GTX 1080"""
|
||||
result = production_miner.classify_architecture("NVIDIA GeForce GTX 1080")
|
||||
assert result == "pascal"
|
||||
|
||||
|
||||
@pytest.mark.unit
|
||||
def test_classify_architecture_a100():
|
||||
"""Test architecture classification for A100"""
|
||||
result = production_miner.classify_architecture("NVIDIA A100")
|
||||
assert result == "datacenter"
|
||||
|
||||
|
||||
@pytest.mark.unit
|
||||
def test_classify_architecture_unknown():
|
||||
"""Test architecture classification for unknown GPU"""
|
||||
result = production_miner.classify_architecture("Unknown GPU")
|
||||
assert result == "unknown"
|
||||
|
||||
|
||||
@pytest.mark.unit
|
||||
def test_classify_architecture_case_insensitive():
|
||||
"""Test architecture classification is case insensitive"""
|
||||
result = production_miner.classify_architecture("nvidia rtx 4090")
|
||||
assert result == "ada_lovelace"
|
||||
|
||||
|
||||
@pytest.mark.unit
|
||||
@patch('production_miner.subprocess.run')
|
||||
def test_detect_cuda_version_success(mock_run):
|
||||
"""Test CUDA version detection success"""
|
||||
mock_run.return_value = Mock(returncode=0, stdout="12.0")
|
||||
result = production_miner.detect_cuda_version()
|
||||
assert result == "12.0"
|
||||
|
||||
|
||||
@pytest.mark.unit
|
||||
@patch('production_miner.subprocess.run')
|
||||
def test_detect_cuda_version_failure(mock_run):
|
||||
"""Test CUDA version detection failure"""
|
||||
mock_run.side_effect = Exception("nvidia-smi not found")
|
||||
result = production_miner.detect_cuda_version()
|
||||
assert result is None
|
||||
|
||||
|
||||
@pytest.mark.unit
|
||||
@patch('production_miner.subprocess.run')
|
||||
def test_get_gpu_info_success(mock_run):
|
||||
"""Test GPU info retrieval success"""
|
||||
mock_run.return_value = Mock(
|
||||
returncode=0,
|
||||
stdout="NVIDIA GeForce RTX 4090, 24576, 1024, 45"
|
||||
)
|
||||
result = production_miner.get_gpu_info()
|
||||
assert result is not None
|
||||
assert result["name"] == "NVIDIA GeForce RTX 4090"
|
||||
assert result["memory_total"] == 24576
|
||||
assert result["memory_used"] == 1024
|
||||
assert result["utilization"] == 45
|
||||
|
||||
|
||||
@pytest.mark.unit
|
||||
@patch('production_miner.subprocess.run')
|
||||
def test_get_gpu_info_failure(mock_run):
|
||||
"""Test GPU info retrieval failure"""
|
||||
mock_run.side_effect = Exception("nvidia-smi not found")
|
||||
result = production_miner.get_gpu_info()
|
||||
assert result is None
|
||||
|
||||
|
||||
@pytest.mark.unit
|
||||
@patch('production_miner.get_gpu_info')
|
||||
@patch('production_miner.detect_cuda_version')
|
||||
@patch('production_miner.classify_architecture')
|
||||
def test_build_gpu_capabilities(mock_arch, mock_cuda, mock_gpu):
|
||||
"""Test building GPU capabilities"""
|
||||
mock_gpu.return_value = {"name": "RTX 4090", "memory_total": 24576}
|
||||
mock_cuda.return_value = "12.0"
|
||||
mock_arch.return_value = "ada_lovelace"
|
||||
|
||||
result = production_miner.build_gpu_capabilities()
|
||||
assert result is not None
|
||||
assert "gpu" in result
|
||||
assert result["gpu"]["model"] == "RTX 4090"
|
||||
assert result["gpu"]["architecture"] == "ada_lovelace"
|
||||
assert result["gpu"]["edge_optimized"] is True
|
||||
|
||||
|
||||
@pytest.mark.unit
|
||||
@patch('production_miner.get_gpu_info')
|
||||
def test_build_gpu_capabilities_no_gpu(mock_gpu):
|
||||
"""Test building GPU capabilities when no GPU"""
|
||||
mock_gpu.return_value = None
|
||||
|
||||
result = production_miner.build_gpu_capabilities()
|
||||
assert result is not None
|
||||
assert result["gpu"]["model"] == "Unknown GPU"
|
||||
assert result["gpu"]["architecture"] == "unknown"
|
||||
|
||||
|
||||
@pytest.mark.unit
|
||||
@patch('production_miner.classify_architecture')
|
||||
def test_build_gpu_capabilities_edge_optimized(mock_arch):
|
||||
"""Test edge optimization flag"""
|
||||
mock_arch.return_value = "ada_lovelace"
|
||||
|
||||
with patch('production_miner.get_gpu_info') as mock_gpu, \
|
||||
patch('production_miner.detect_cuda_version') as mock_cuda:
|
||||
mock_gpu.return_value = {"name": "RTX 4090", "memory_total": 24576}
|
||||
mock_cuda.return_value = "12.0"
|
||||
|
||||
result = production_miner.build_gpu_capabilities()
|
||||
assert result["gpu"]["edge_optimized"] is True
|
||||
|
||||
|
||||
@pytest.mark.unit
|
||||
@patch('production_miner.classify_architecture')
|
||||
def test_build_gpu_capabilities_not_edge_optimized(mock_arch):
|
||||
"""Test edge optimization flag for non-edge GPU"""
|
||||
mock_arch.return_value = "pascal"
|
||||
|
||||
with patch('production_miner.get_gpu_info') as mock_gpu, \
|
||||
patch('production_miner.detect_cuda_version') as mock_cuda:
|
||||
mock_gpu.return_value = {"name": "GTX 1080", "memory_total": 8192}
|
||||
mock_cuda.return_value = "11.0"
|
||||
|
||||
result = production_miner.build_gpu_capabilities()
|
||||
assert result["gpu"]["edge_optimized"] is False
|
||||
|
||||
|
||||
@pytest.mark.unit
|
||||
@patch('production_miner.httpx.get')
|
||||
def test_measure_coordinator_latency_success(mock_get):
|
||||
"""Test coordinator latency measurement success"""
|
||||
mock_get.return_value = Mock(status_code=200)
|
||||
result = production_miner.measure_coordinator_latency()
|
||||
assert result >= 0
|
||||
|
||||
|
||||
@pytest.mark.unit
|
||||
@patch('production_miner.httpx.get')
|
||||
def test_measure_coordinator_latency_failure(mock_get):
|
||||
"""Test coordinator latency measurement failure"""
|
||||
mock_get.side_effect = Exception("Connection error")
|
||||
result = production_miner.measure_coordinator_latency()
|
||||
assert result == -1.0
|
||||
Reference in New Issue
Block a user