File size: 2,171 Bytes
b6b0c93
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
"""
Simple test to demonstrate the translation cache functionality.
Run this to verify cache hits provide instant responses.
"""

import asyncio
import time
from quickmt.manager import BatchTranslator
from quickmt.settings import settings


async def test_translation_cache():
    print("=== Translation Cache Test ===\n")

    # Create a mock BatchTranslator (would normally be created by ModelManager)
    # For this test, we'll just verify the cache mechanism
    print(f"Cache size configured: {settings.translation_cache_size}")

    # Simulate cache behavior
    from cachetools import LRUCache

    cache = LRUCache(maxsize=settings.translation_cache_size)

    # Test data
    test_text = "Hello, world!"
    src_lang = "en"
    tgt_lang = "fr"
    kwargs_tuple = tuple(sorted({"beam_size": 5, "patience": 1}.items()))

    cache_key = (test_text, src_lang, tgt_lang, kwargs_tuple)

    # First request - cache miss
    print("\n1. First translation (cache miss):")
    print(f"   Key: {cache_key}")
    if cache_key in cache:
        print("   βœ“ Cache HIT")
    else:
        print("   βœ— Cache MISS (expected)")
        # Simulate translation and caching
        cache[cache_key] = "Bonjour, monde!"
        print("   β†’ Cached result")

    # Second request - cache hit
    print("\n2. Repeated translation (cache hit):")
    print(f"   Key: {cache_key}")
    if cache_key in cache:
        print("   βœ“ Cache HIT (instant!)")
        print(f"   β†’ Result: {cache[cache_key]}")
    else:
        print("   βœ— Cache MISS (unexpected)")

    # Different parameters - cache miss
    different_kwargs = tuple(sorted({"beam_size": 10, "patience": 2}.items()))
    different_key = (test_text, src_lang, tgt_lang, different_kwargs)

    print("\n3. Same text, different parameters (cache miss):")
    print(f"   Key: {different_key}")
    if different_key in cache:
        print("   βœ“ Cache HIT")
    else:
        print("   βœ— Cache MISS (expected - different params)")

    print("\nβœ… Cache test complete!")
    print(f"Cache size: {len(cache)}/{settings.translation_cache_size}")


if __name__ == "__main__":
    asyncio.run(test_translation_cache())