navid72m commited on
Commit
0591f7d
·
verified ·
1 Parent(s): abb6b1a

Update document_processor_hf.py

Browse files
Files changed (1) hide show
  1. document_processor_hf.py +18 -4
document_processor_hf.py CHANGED
@@ -5,9 +5,9 @@ import tempfile
5
  from typing import List, Dict, Any, Tuple, Optional
6
  import numpy as np
7
  from sentence_transformers import SentenceTransformer
8
- import streamlit as st
9
  from dataclasses import dataclass
10
  from collections import defaultdict
 
11
 
12
  logger = logging.getLogger(__name__)
13
 
@@ -31,6 +31,20 @@ class Entity:
31
  start_pos: int
32
  end_pos: int
33
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34
  class DocumentProcessor:
35
  """
36
  Streamlined document processor for Hugging Face Spaces deployment.
@@ -78,9 +92,9 @@ class DocumentProcessor:
78
  'academic': ['abstract', 'introduction', 'literature review', 'methodology', 'results']
79
  }
80
 
81
- @st.cache_resource
82
- def _load_embedding_model(_self):
83
- """Load embedding model with Streamlit caching"""
84
  try:
85
  logger.info("🔄 Loading embedding model...")
86
  model = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2')
 
5
  from typing import List, Dict, Any, Tuple, Optional
6
  import numpy as np
7
  from sentence_transformers import SentenceTransformer
 
8
  from dataclasses import dataclass
9
  from collections import defaultdict
10
+ import functools
11
 
12
  logger = logging.getLogger(__name__)
13
 
 
31
  start_pos: int
32
  end_pos: int
33
 
34
+ # Simple caching decorator to replace Streamlit's cache
35
+ def simple_cache(func):
36
+ """Simple caching decorator"""
37
+ cache = {}
38
+
39
+ @functools.wraps(func)
40
+ def wrapper(*args, **kwargs):
41
+ # Create a simple key from args (excluding self)
42
+ key = str(args[1:]) + str(sorted(kwargs.items()))
43
+ if key not in cache:
44
+ cache[key] = func(*args, **kwargs)
45
+ return cache[key]
46
+ return wrapper
47
+
48
  class DocumentProcessor:
49
  """
50
  Streamlined document processor for Hugging Face Spaces deployment.
 
92
  'academic': ['abstract', 'introduction', 'literature review', 'methodology', 'results']
93
  }
94
 
95
+ @simple_cache
96
+ def _load_embedding_model(self):
97
+ """Load embedding model with simple caching"""
98
  try:
99
  logger.info("🔄 Loading embedding model...")
100
  model = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2')