Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .github/workflows/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ env:
PINECONE_API_KEY: pcsk_7LufHa_aUYWm5r5WwF1LBhfujiKftHWLX9iU6fyYxtkDukMnZZQKMWQJcXrFmhzt7GtVtJ
AZURE_API_KEY: 1JCm7aFbY2zVyXndOwAaljohGFAeFKjvwmDLa200gjSdlsLOqP3yJQQJ99BBACREanaXJ3w3AbgAACOG2ZyA
GOOGLE_API_KEY: Adzac4B4-q3u3Q_lssqr_dc7k-WM28ygszsVrIe
CREDIBILITY_API_URL: https://credibility-api.example.com

on:
push:
Expand Down
1 change: 0 additions & 1 deletion backend/mainService/requirements-test.txt
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
pytest
pytest-asyncio
pytest-cov
httpx
pytest-mock
1 change: 1 addition & 0 deletions backend/mainService/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -24,4 +24,5 @@ lxml==5.3.0
google-genai
redis>=4.2.0
uvicorn
httpx>=0.28.1

Original file line number Diff line number Diff line change
Expand Up @@ -46,25 +46,18 @@ def _calculate_source_score(metric: Dict, source: Dict,
async def get_credibility_metrics(sources: List[Dict]) -> List[Dict]:
"""
Call the credibility API to get metrics for sources.
Uses connection pooling and timeout handling for better performance.

Args:
sources (List[Dict]): List of source metadata

Returns:
List[Dict]: Credibility metrics for each source
Uses timeout handling for better reliability.
"""
credibility_metrics_api = os.getenv('CREDIBILITY_API_URL','')
if not credibility_metrics_api:
logger.error("CREDIBILITY_API_URL is not set")
return []

# Configure timeout and connection settings
timeout = aiohttp.ClientTimeout(total=10) # 10 seconds total timeout
connector = aiohttp.TCPConnector(limit=10) # Limit concurrent connections
# Configure timeout
timeout = aiohttp.ClientTimeout(total=10)

try:
async with aiohttp.ClientSession(timeout=timeout, connector=connector) as session:
async with aiohttp.ClientSession(timeout=timeout) as session:
async with session.post(
credibility_metrics_api,
json={'sources': sources},
Expand All @@ -81,8 +74,6 @@ async def get_credibility_metrics(sources: List[Dict]) -> List[Dict]:
except Exception:
logger.exception("Error calling credibility API")
return []
finally:
connector.close()

async def calculate_overall_score(credibility_metrics: List[Dict], sources_with_scores: List[Dict],
rerank_weight: float = 0.6, credibility_weight: float = 0.4) -> Dict[str, Any]:
Expand Down
4 changes: 3 additions & 1 deletion backend/mainService/test/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,4 +45,6 @@ def mock_scraper():
def mock_playwright_driver():
mock_driver = AsyncMock()
mock_driver.quit = AsyncMock()
return mock_driver
return mock_driver

pytest_plugins = ['pytest_asyncio']
33 changes: 24 additions & 9 deletions backend/mainService/test/test_citation_service.py
Original file line number Diff line number Diff line change
Expand Up @@ -186,7 +186,10 @@ async def test_process_citation_auto_success(

# Mock credibility metrics
mock_get_credibility_metrics.return_value = [{"status": "success", "data": {"title": "Test Source"}}]
mock_calculate_overall_score.return_value = 0.8
mock_calculate_overall_score.return_value = {
"overall_score": 84.00,
"source_scores": [84.00]
}

# Mock document processing
mock_doc = MagicMock()
Expand All @@ -210,7 +213,7 @@ async def test_process_citation_auto_success(
assert "overall_score" in result
assert "sources" in result
assert result["result"] == ["Test Citation"]
assert result["overall_score"] == 0.8
assert result["overall_score"] == 84.00
assert len(result["sources"]) == 1

@pytest.mark.asyncio
Expand Down Expand Up @@ -299,7 +302,10 @@ async def test_process_citation_web_success(

# Mock credibility metrics
mock_get_credibility_metrics.return_value = [{"status": "success", "data": {"title": "Test Source"}}]
mock_calculate_overall_score.return_value = 0.8
mock_calculate_overall_score.return_value = {
"overall_score": 84.00,
"source_scores": [84.00]
}

# Mock document processing
mock_doc = MagicMock()
Expand All @@ -325,7 +331,7 @@ async def test_process_citation_web_success(
assert "overall_score" in result
assert "sources" in result
assert result["result"] == ["Test Citation"]
assert result["overall_score"] == 0.8
assert result["overall_score"] == 84.00
assert len(result["sources"]) == 1

@pytest.mark.asyncio
Expand Down Expand Up @@ -407,7 +413,10 @@ async def test_process_citation_source_success(

# Mock credibility metrics
mock_get_credibility_metrics.return_value = [{"status": "success", "data": {"title": "Test Source"}}]
mock_calculate_overall_score.return_value = 0.8
mock_calculate_overall_score.return_value = {
"overall_score": 84.00,
"source_scores": [84.00]
}

# Mock document processing
mock_doc = MagicMock()
Expand All @@ -432,7 +441,7 @@ async def test_process_citation_source_success(
assert "overall_score" in result
assert "sources" in result
assert result["result"] == ["Test Citation"]
assert result["overall_score"] == 0.8
assert result["overall_score"] == 84.00
assert len(result["sources"]) == 1

@pytest.mark.asyncio
Expand Down Expand Up @@ -506,7 +515,10 @@ async def test_process_citation_existing_index(

# Mock credibility metrics
mock_get_credibility_metrics.return_value = [{"status": "success", "data": {"title": "Test Source"}}]
mock_calculate_overall_score.return_value = 0.8
mock_calculate_overall_score.return_value = {
"overall_score": 84.00,
"source_scores": [84.00]
}

# Mock document processing
mock_doc = MagicMock()
Expand All @@ -530,7 +542,7 @@ async def test_process_citation_existing_index(
assert "overall_score" in result
assert "sources" in result
assert result["result"] == ["Test Citation"]
assert result["overall_score"] == 0.8
assert result["overall_score"] == 84.00
assert len(result["sources"]) == 1

@pytest.mark.asyncio
Expand Down Expand Up @@ -638,7 +650,10 @@ async def test_process_citation_mla_style(

# Mock credibility metrics
mock_get_credibility_metrics.return_value = [{"status": "success", "data": {"title": "Test Source"}}]
mock_calculate_overall_score.return_value = 0.8
mock_calculate_overall_score.return_value = {
"overall_score": 84.00,
"source_scores": [84.00]
}

# Mock document processing
mock_doc = MagicMock()
Expand Down
112 changes: 46 additions & 66 deletions backend/mainService/test/test_source_credibility_metric_service.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,89 +72,69 @@ async def test_get_credibility_metrics_exception():
# Assert
assert result == []

def test_calculate_overall_score_success():
# Arrange
@pytest.mark.asyncio
async def test_calculate_overall_score_success():
# Test data
credibility_metrics = [
{
"status": "success",
"data": {
"credibility_score": 0.85
}
},
"data": {"credibility_score": 0.8}
}
]
sources_with_scores = [
{
"status": "success",
"data": {
"credibility_score": 0.75
}
"rerank_score": 0.9
}
]

# Act
result = calculate_overall_score(credibility_metrics)
result = await calculate_overall_score(credibility_metrics, sources_with_scores)
assert isinstance(result, dict)
assert "overall_score" in result
assert "source_scores" in result
assert result["overall_score"] == 86.00 # (0.9 * 0.6 + 0.8 * 0.4) * 100

# Assert
assert result == 0.80 # (0.85 + 0.75) / 2

def test_calculate_overall_score_empty():
# Arrange
credibility_metrics = []

# Act
result = calculate_overall_score(credibility_metrics)

# Assert
assert result == 0.0
@pytest.mark.asyncio
async def test_calculate_overall_score_empty():
result = await calculate_overall_score([], [])
assert result["overall_score"] == 0.00
assert result["source_scores"] == []

def test_calculate_overall_score_mixed_status():
# Arrange
@pytest.mark.asyncio
async def test_calculate_overall_score_mixed_status():
credibility_metrics = [
{
"status": "success",
"data": {
"credibility_score": 0.85
}
},
{
"status": "error",
"data": {
"credibility_score": 0.75
}
}
{"status": "success", "data": {"credibility_score": 0.8}},
{"status": "failed", "data": {"credibility_score": 0.5}}
]
sources_with_scores = [
{"rerank_score": 0.9},
{"rerank_score": 0.7}
]

# Act
result = calculate_overall_score(credibility_metrics)

# Assert
assert result == 0.85 # Only considers successful responses
result = await calculate_overall_score(credibility_metrics, sources_with_scores)
print(result)
assert len(result["source_scores"]) == 2
assert result["source_scores"][0] == 86.00

def test_calculate_overall_score_missing_data():
# Arrange
@pytest.mark.asyncio
async def test_calculate_overall_score_missing_data():
credibility_metrics = [
{
"status": "success"
}
{"status": "success", "data": {}}
]
sources_with_scores = [
{"rerank_score": 0.9}
]

# Act
result = calculate_overall_score(credibility_metrics)

# Assert
assert result == 0.0
result = await calculate_overall_score(credibility_metrics, sources_with_scores)
assert result["overall_score"] == 0.00

def test_calculate_overall_score_exception():
# Arrange
@pytest.mark.asyncio
async def test_calculate_overall_score_exception():
credibility_metrics = [
{
"status": "success",
"data": {
"credibility_score": "invalid" # Invalid score type
}
}
{"status": "success", "data": None}
]
sources_with_scores = [
{"rerank_score": 0.9}
]

# Act
result = calculate_overall_score(credibility_metrics)

# Assert
assert result == 0.0
result = await calculate_overall_score(credibility_metrics, sources_with_scores)
assert result["overall_score"] == 0.00
60 changes: 55 additions & 5 deletions backend/metricsService/tests/utils/test_cache.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,18 +5,14 @@
@pytest.mark.asyncio
async def test_get_cache_miss():
"""Test get_cache when cache miss occurs"""
from src.utils.cache import get_cache

# Test with a non-existent key
result = await get_cache("non_existent_key")
assert result is None

@pytest.mark.asyncio
@pytest.mark.skip(reason="Not implemented yet")
async def test_set_get_cache():
"""Test setting and getting cache values"""
from src.utils.cache import get_cache, set_cache

# Test data
test_key = "test_key"
test_value = {"data": "test_value"}

Expand All @@ -26,4 +22,58 @@ async def test_set_get_cache():
# Get the cached value
result = await get_cache(test_key)

# Verify the result
assert result is not None
assert result == test_value
assert isinstance(result, dict)
assert result["data"] == "test_value"

@pytest.mark.asyncio
async def test_set_get_cache_with_expiry():
"""Test setting and getting cache values with expiration"""
test_key = "test_key_expiry"
test_value = {"data": "test_value"}
expiry = 60 # 60 seconds

# Set the cache value with expiry
await set_cache(test_key, test_value, expiry)

# Get the cached value
result = await get_cache(test_key)

# Verify the result
assert result is not None
assert result == test_value

@pytest.mark.asyncio
async def test_set_cache_invalid_value():
"""Test setting cache with invalid value"""
test_key = "test_key_invalid"
test_value = None

# Set the cache value
await set_cache(test_key, test_value)

# Get the cached value
result = await get_cache(test_key)

# Verify the result
assert result is None

@pytest.mark.asyncio
async def test_set_get_cache_multiple_values():
"""Test setting and getting multiple cache values"""
test_data = [
("key1", {"data": "value1"}),
("key2", {"data": "value2"}),
("key3", {"data": "value3"})
]

# Set multiple cache values
for key, value in test_data:
await set_cache(key, value)

# Get and verify each cached value
for key, expected_value in test_data:
result = await get_cache(key)
assert result == expected_value