105 lines
3.8 KiB
Python
105 lines
3.8 KiB
Python
import os
|
|
import pytest
|
|
from datetime import datetime
|
|
from unittest.mock import AsyncMock, patch
|
|
from src.crawlers.dto import NewsItemDTO
|
|
from src.processor.dto import EnrichedNewsItemDTO
|
|
from src.processor.ollama_provider import OllamaProvider
|
|
|
|
@pytest.fixture
|
|
def sample_news_item():
|
|
return NewsItemDTO(
|
|
title="Test News",
|
|
url="http://example.com",
|
|
content_text="This is a test article about AI and NPU acceleration.",
|
|
source="Test Source",
|
|
timestamp=datetime.now()
|
|
)
|
|
|
|
def create_mock_session(mock_response_json):
|
|
class MockResponse:
|
|
async def __aenter__(self):
|
|
return self
|
|
async def __aexit__(self, exc_type, exc_val, exc_tb):
|
|
pass
|
|
async def json(self):
|
|
return mock_response_json
|
|
def raise_for_status(self):
|
|
pass
|
|
|
|
class MockSession:
|
|
async def __aenter__(self):
|
|
return self
|
|
async def __aexit__(self, exc_type, exc_val, exc_tb):
|
|
pass
|
|
def post(self, url, **kwargs):
|
|
return MockResponse()
|
|
|
|
return MockSession()
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_ollama_provider_analyze_success(sample_news_item):
|
|
os.environ['OLLAMA_API_URL'] = 'http://localhost:11434/api/generate'
|
|
mock_response_json = {
|
|
"response": '{"relevance_score": 8, "summary_ru": "Тестовая статья про ИИ.", "anomalies_detected": ["NPU acceleration"]}'
|
|
}
|
|
|
|
provider = OllamaProvider()
|
|
with patch('aiohttp.ClientSession', return_value=create_mock_session(mock_response_json)):
|
|
result = await provider.analyze(sample_news_item)
|
|
|
|
assert isinstance(result, EnrichedNewsItemDTO)
|
|
assert result.title == "Test News"
|
|
assert result.relevance_score == 8
|
|
assert result.summary_ru == "Тестовая статья про ИИ."
|
|
assert result.anomalies_detected == ["NPU acceleration"]
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_ollama_provider_analyze_empty_response(sample_news_item):
|
|
os.environ['OLLAMA_API_URL'] = 'http://localhost:11434/api/generate'
|
|
mock_response_json = {
|
|
"response": ""
|
|
}
|
|
|
|
provider = OllamaProvider()
|
|
with patch('aiohttp.ClientSession', return_value=create_mock_session(mock_response_json)):
|
|
result = await provider.analyze(sample_news_item)
|
|
|
|
assert isinstance(result, EnrichedNewsItemDTO)
|
|
assert result.relevance_score == 0
|
|
assert result.summary_ru == ""
|
|
assert result.anomalies_detected == []
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_ollama_provider_analyze_malformed_json(sample_news_item):
|
|
os.environ['OLLAMA_API_URL'] = 'http://localhost:11434/api/generate'
|
|
mock_response_json = {
|
|
"response": "{ invalid json"
|
|
}
|
|
|
|
provider = OllamaProvider()
|
|
with patch('aiohttp.ClientSession', return_value=create_mock_session(mock_response_json)):
|
|
result = await provider.analyze(sample_news_item)
|
|
|
|
assert isinstance(result, EnrichedNewsItemDTO)
|
|
assert result.relevance_score == 0
|
|
assert "Error parsing LLM response" in result.summary_ru
|
|
assert result.anomalies_detected == []
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_ollama_provider_analyze_markdown_json(sample_news_item):
|
|
os.environ['OLLAMA_API_URL'] = 'http://localhost:11434/api/generate'
|
|
mock_response_json = {
|
|
"response": "```json\n{\"relevance_score\": 5, \"summary_ru\": \"Markdown test\", \"anomalies_detected\": []}\n```"
|
|
}
|
|
|
|
provider = OllamaProvider()
|
|
with patch('aiohttp.ClientSession', return_value=create_mock_session(mock_response_json)):
|
|
result = await provider.analyze(sample_news_item)
|
|
|
|
assert isinstance(result, EnrichedNewsItemDTO)
|
|
assert result.relevance_score == 5
|
|
assert result.summary_ru == "Markdown test"
|
|
assert result.anomalies_detected == []
|
|
|