Spaces:
Sleeping
Sleeping
Update core/extractor.py
Browse files- core/extractor.py +2 -2
core/extractor.py
CHANGED
|
@@ -26,10 +26,10 @@ class ExtractorEngine:
|
|
| 26 |
def __init__(self):
|
| 27 |
self.model_name = "Qwen/Qwen2.5-1.5B-Instruct"
|
| 28 |
if 'llm_model' not in st.session_state:
|
| 29 |
-
with st.spinner("Chargement initial du cerveau IA..."):
|
| 30 |
st.session_state.llm_tokenizer = AutoTokenizer.from_pretrained(self.model_name)
|
| 31 |
st.session_state.llm_model = AutoModelForCausalLM.from_pretrained(
|
| 32 |
-
self.model_name, torch_dtype=torch.float32, device_map=
|
| 33 |
)
|
| 34 |
self.tokenizer = st.session_state.llm_tokenizer
|
| 35 |
self.model = st.session_state.llm_model
|
|
|
|
| 26 |
def __init__(self):
|
| 27 |
self.model_name = "Qwen/Qwen2.5-1.5B-Instruct"
|
| 28 |
if 'llm_model' not in st.session_state:
|
| 29 |
+
with st.spinner("Chargement initial du cerveau IA (CPU)..."):
|
| 30 |
st.session_state.llm_tokenizer = AutoTokenizer.from_pretrained(self.model_name)
|
| 31 |
st.session_state.llm_model = AutoModelForCausalLM.from_pretrained(
|
| 32 |
+
self.model_name, torch_dtype=torch.float32, device_map=None, low_cpu_mem_usage=False
|
| 33 |
)
|
| 34 |
self.tokenizer = st.session_state.llm_tokenizer
|
| 35 |
self.model = st.session_state.llm_model
|