Spaces:
Sleeping
Sleeping
File size: 2,305 Bytes
653865f 6ad997d b4bfa19 653865f b4bfa19 653865f | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 | import os
import psutil
import pytest
from src.data.stores.metadata_store import MetadataStore
from src.recsys.recall.itemcf import ItemCF
from src.core.recommendation_orchestrator import RecommendationOrchestrator
from src.services.recommend_service import RecommendationService
def get_process_memory():
process = psutil.Process(os.getpid())
return process.memory_info().rss / (1024 * 1024) # MB
def test_memory_usage_loading():
initial_mem = get_process_memory()
print(f"\nInitial RAM: {initial_mem:.2f} MB")
# 1. Initialize MetadataStore
store = MetadataStore()
mem_after_store = get_process_memory()
print(f"RAM after MetadataStore: {mem_after_store:.2f} MB (Delta: {mem_after_store - initial_mem:.2f} MB)")
# 2. Initialize ItemCF (Old one took 1.4GB+ on disk, 7GB+ in RAM)
itemcf = ItemCF()
itemcf.load()
mem_after_itemcf = get_process_memory()
print(f"RAM after ItemCF Load: {mem_after_itemcf:.2f} MB (Delta: {mem_after_itemcf - mem_after_store:.2f} MB)")
# 3. Initialize Recommender (Orchestrator)
recommender = RecommendationOrchestrator()
mem_after_rec = get_process_memory()
print(f"RAM after Recommender: {mem_after_rec:.2f} MB (Delta: {mem_after_rec - mem_after_itemcf:.2f} MB)")
# 4. Initialize RecommendationService
service = RecommendationService()
service.load_resources()
mem_after_service = get_process_memory()
print(f"RAM after Service Load: {mem_after_service:.2f} MB (Delta: {mem_after_service - mem_after_rec:.2f} MB)")
# Assertions
# We expect each step to add very little RAM (certainly not GBs)
# Most RAM will be taken by the Embedding model which is ~80-100MB
assert mem_after_service - initial_mem < 1000 # Total data overhead should be < 1GB (miniLM embedding is small)
def test_itemcf_functionality():
itemcf = ItemCF()
# Test recommendation for a real user in migrations if possible, or just mock
# Since we have data/recall_models.db, let's try a real query
recs = itemcf.recommend("A1ZQ1LUQ9R6JHZ", top_k=5)
print(f"\nItemCF Recs: {recs}")
assert isinstance(recs, list)
if recs:
assert len(recs) <= 5
assert isinstance(recs[0], (list, tuple))
assert len(recs[0]) == 2 # (isbn, score)
|