Spaces:
Sleeping
Sleeping
Update core/extractor.py
Browse files- core/extractor.py +3 -3
core/extractor.py
CHANGED
|
@@ -9,13 +9,13 @@ from transformers import AutoTokenizer, AutoModelForCausalLM
|
|
| 9 |
class Entity(BaseModel):
|
| 10 |
id: str = Field(description="ID unique (ex: E1).")
|
| 11 |
name: str = Field(description="Nom de l'entité.")
|
| 12 |
-
type: str = Field(description="Type d'
|
| 13 |
description: str = Field(description="Contexte ou rôle.")
|
| 14 |
|
| 15 |
class Relationship(BaseModel):
|
| 16 |
source: str = Field(alias="from", description="ID de l'entité source.")
|
| 17 |
target: str = Field(alias="to", description="ID de l'entité cible.")
|
| 18 |
-
type: str = Field(description="Action ou lien sémantique
|
| 19 |
description: str = Field(description="Explication du lien ou Détails.")
|
| 20 |
|
| 21 |
class KnowledgeGraph(BaseModel):
|
|
@@ -26,7 +26,7 @@ class ExtractorEngine:
|
|
| 26 |
def __init__(self):
|
| 27 |
self.model_name = "Qwen/Qwen2.5-1.5B-Instruct"
|
| 28 |
if 'llm_model' not in st.session_state:
|
| 29 |
-
with st.spinner("
|
| 30 |
st.session_state.llm_tokenizer = AutoTokenizer.from_pretrained(self.model_name)
|
| 31 |
st.session_state.llm_model = AutoModelForCausalLM.from_pretrained(
|
| 32 |
self.model_name, torch_dtype=torch.float32, device_map="cpu", low_cpu_mem_usage=True
|
|
|
|
| 9 |
class Entity(BaseModel):
|
| 10 |
id: str = Field(description="ID unique (ex: E1).")
|
| 11 |
name: str = Field(description="Nom de l'entité.")
|
| 12 |
+
type: str = Field(description="Type d'entité.")
|
| 13 |
description: str = Field(description="Contexte ou rôle.")
|
| 14 |
|
| 15 |
class Relationship(BaseModel):
|
| 16 |
source: str = Field(alias="from", description="ID de l'entité source.")
|
| 17 |
target: str = Field(alias="to", description="ID de l'entité cible.")
|
| 18 |
+
type: str = Field(description="Action ou lien sémantique.")
|
| 19 |
description: str = Field(description="Explication du lien ou Détails.")
|
| 20 |
|
| 21 |
class KnowledgeGraph(BaseModel):
|
|
|
|
| 26 |
def __init__(self):
|
| 27 |
self.model_name = "Qwen/Qwen2.5-1.5B-Instruct"
|
| 28 |
if 'llm_model' not in st.session_state:
|
| 29 |
+
with st.spinner("Chargement initial du cerveau IA..."):
|
| 30 |
st.session_state.llm_tokenizer = AutoTokenizer.from_pretrained(self.model_name)
|
| 31 |
st.session_state.llm_model = AutoModelForCausalLM.from_pretrained(
|
| 32 |
self.model_name, torch_dtype=torch.float32, device_map="cpu", low_cpu_mem_usage=True
|