Spaces:
Sleeping
Sleeping
Update langgraph_nodes.py
Browse files- langgraph_nodes.py +52 -41
langgraph_nodes.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
| 1 |
"""
|
| 2 |
-
LangGraph Nodes -
|
| 3 |
-
|
| 4 |
"""
|
| 5 |
|
| 6 |
import os
|
|
@@ -18,17 +18,29 @@ warnings.filterwarnings('ignore')
|
|
| 18 |
from langgraph_state import ReviewState, BatchState
|
| 19 |
from database_enhanced import EnhancedDatabase
|
| 20 |
|
| 21 |
-
# FIXED:
|
| 22 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 23 |
|
| 24 |
-
# Check if token exists
|
| 25 |
-
if not HF_TOKEN or HF_TOKEN.strip() == "":
|
| 26 |
-
print("β WARNING: HUGGINGFACE_API_KEY not set!")
|
| 27 |
-
print(" API calls will fail. Please set your API key.")
|
| 28 |
-
hf_client = None
|
| 29 |
-
else:
|
| 30 |
-
print(f"β
HF Token found: {HF_TOKEN[:8]}...")
|
| 31 |
-
hf_client = InferenceClient(token=HF_TOKEN)
|
| 32 |
|
| 33 |
# Initialize sentiment models (singleton) - load once
|
| 34 |
_sentiment_models_loaded = False
|
|
@@ -51,9 +63,12 @@ def load_sentiment_models():
|
|
| 51 |
_best_model = AutoModelForSequenceClassification.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment-latest")
|
| 52 |
_best_model.eval()
|
| 53 |
|
| 54 |
-
# Alternate Model
|
| 55 |
_alt_tokenizer = AutoTokenizer.from_pretrained("finiteautomata/bertweet-base-sentiment-analysis")
|
| 56 |
-
_alt_model = AutoModelForSequenceClassification.from_pretrained(
|
|
|
|
|
|
|
|
|
|
| 57 |
_alt_model.eval()
|
| 58 |
|
| 59 |
_sentiment_models_loaded = True
|
|
@@ -67,10 +82,10 @@ def load_sentiment_models():
|
|
| 67 |
def llm1_classify(review: Dict[str, Any]) -> Dict[str, Any]:
|
| 68 |
"""LLM1: Type, Department, Priority classification"""
|
| 69 |
|
| 70 |
-
# FIXED:
|
|
|
|
|
|
|
| 71 |
if hf_client is None:
|
| 72 |
-
print("β ERROR: HuggingFace client not initialized!")
|
| 73 |
-
print(" Make sure HUGGINGFACE_API_KEY environment variable is set")
|
| 74 |
return {
|
| 75 |
'type': 'unknown',
|
| 76 |
'department': 'unknown',
|
|
@@ -124,7 +139,6 @@ Respond ONLY in valid JSON format:
|
|
| 124 |
}}"""
|
| 125 |
|
| 126 |
try:
|
| 127 |
-
# FIXED: Better error logging
|
| 128 |
print(f" π Calling Qwen API...")
|
| 129 |
|
| 130 |
response = hf_client.text_generation(
|
|
@@ -151,10 +165,7 @@ Respond ONLY in valid JSON format:
|
|
| 151 |
return result
|
| 152 |
|
| 153 |
except Exception as e:
|
| 154 |
-
# FIXED: Show the actual error
|
| 155 |
print(f"β LLM1 ERROR: {type(e).__name__}: {str(e)}")
|
| 156 |
-
import traceback
|
| 157 |
-
traceback.print_exc()
|
| 158 |
|
| 159 |
return {
|
| 160 |
'type': 'unknown',
|
|
@@ -169,9 +180,10 @@ Respond ONLY in valid JSON format:
|
|
| 169 |
def llm2_analyze(review: Dict[str, Any]) -> Dict[str, Any]:
|
| 170 |
"""LLM2: User type, Emotion, Context analysis"""
|
| 171 |
|
| 172 |
-
# FIXED:
|
|
|
|
|
|
|
| 173 |
if hf_client is None:
|
| 174 |
-
print("β ERROR: HuggingFace client not initialized!")
|
| 175 |
return {
|
| 176 |
'user_type': 'unknown',
|
| 177 |
'emotion': 'unknown',
|
|
@@ -222,7 +234,6 @@ Respond ONLY in valid JSON format:
|
|
| 222 |
}}"""
|
| 223 |
|
| 224 |
try:
|
| 225 |
-
# FIXED: Better error logging
|
| 226 |
print(f" π Calling Mistral API...")
|
| 227 |
|
| 228 |
response = hf_client.text_generation(
|
|
@@ -249,10 +260,7 @@ Respond ONLY in valid JSON format:
|
|
| 249 |
return result
|
| 250 |
|
| 251 |
except Exception as e:
|
| 252 |
-
# FIXED: Show the actual error
|
| 253 |
print(f"β LLM2 ERROR: {type(e).__name__}: {str(e)}")
|
| 254 |
-
import traceback
|
| 255 |
-
traceback.print_exc()
|
| 256 |
|
| 257 |
return {
|
| 258 |
'user_type': 'unknown',
|
|
@@ -267,9 +275,10 @@ Respond ONLY in valid JSON format:
|
|
| 267 |
def manager_synthesize(llm1_result: Dict, llm2_result: Dict, review: Dict) -> Dict[str, Any]:
|
| 268 |
"""Manager: Synthesize LLM1 and LLM2 results"""
|
| 269 |
|
| 270 |
-
# FIXED:
|
|
|
|
|
|
|
| 271 |
if hf_client is None:
|
| 272 |
-
print("β ERROR: HuggingFace client not initialized!")
|
| 273 |
return {
|
| 274 |
'final_type': llm1_result.get('type', 'unknown'),
|
| 275 |
'final_department': llm1_result.get('department', 'unknown'),
|
|
@@ -308,7 +317,6 @@ Respond ONLY in valid JSON format:
|
|
| 308 |
}}"""
|
| 309 |
|
| 310 |
try:
|
| 311 |
-
# FIXED: Better error logging
|
| 312 |
print(f" π Calling Llama Manager API...")
|
| 313 |
|
| 314 |
response = hf_client.text_generation(
|
|
@@ -334,7 +342,6 @@ Respond ONLY in valid JSON format:
|
|
| 334 |
return result
|
| 335 |
|
| 336 |
except Exception as e:
|
| 337 |
-
# FIXED: Show the actual error
|
| 338 |
print(f"β MANAGER ERROR: {type(e).__name__}: {str(e)}")
|
| 339 |
|
| 340 |
return {
|
|
@@ -432,7 +439,7 @@ def analyze_best_sentiment(text: str) -> Dict[str, Any]:
|
|
| 432 |
|
| 433 |
|
| 434 |
def analyze_alt_sentiment(text: str) -> Dict[str, Any]:
|
| 435 |
-
"""Alternate Model: BERTweet"""
|
| 436 |
load_sentiment_models()
|
| 437 |
|
| 438 |
try:
|
|
@@ -440,7 +447,16 @@ def analyze_alt_sentiment(text: str) -> Dict[str, Any]:
|
|
| 440 |
|
| 441 |
with torch.no_grad():
|
| 442 |
outputs = _alt_model(**inputs)
|
| 443 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 444 |
prediction = torch.argmax(probs, dim=-1).item()
|
| 445 |
confidence = probs[0][prediction].item()
|
| 446 |
|
|
@@ -553,11 +569,10 @@ def stage3_finalization_node(state: ReviewState) -> Dict[str, Any]:
|
|
| 553 |
|
| 554 |
start_time = time.time()
|
| 555 |
|
| 556 |
-
# FIXED:
|
|
|
|
|
|
|
| 557 |
if hf_client is None:
|
| 558 |
-
print("β ERROR: HuggingFace client not initialized!")
|
| 559 |
-
print(" Skipping Stage 3 (requires API key)")
|
| 560 |
-
|
| 561 |
result = {
|
| 562 |
'final_sentiment': state.get('sentiment', 'NEUTRAL'),
|
| 563 |
'confidence': state.get('sentiment_confidence', 0.0),
|
|
@@ -630,7 +645,6 @@ Respond ONLY in valid JSON format:
|
|
| 630 |
}}"""
|
| 631 |
|
| 632 |
try:
|
| 633 |
-
# FIXED: Better error logging
|
| 634 |
print(f" π Calling Llama 70B API...")
|
| 635 |
|
| 636 |
response = hf_client.text_generation(
|
|
@@ -653,10 +667,7 @@ Respond ONLY in valid JSON format:
|
|
| 653 |
result['model'] = 'meta-llama/Llama-3.1-70B-Instruct'
|
| 654 |
|
| 655 |
except Exception as e:
|
| 656 |
-
# FIXED: Show the actual error
|
| 657 |
print(f"β STAGE 3 ERROR: {type(e).__name__}: {str(e)}")
|
| 658 |
-
import traceback
|
| 659 |
-
traceback.print_exc()
|
| 660 |
|
| 661 |
result = {
|
| 662 |
'final_sentiment': state.get('sentiment', 'NEUTRAL'),
|
|
|
|
| 1 |
"""
|
| 2 |
+
LangGraph Nodes - LAZY LOADING VERSION
|
| 3 |
+
Initializes HF client when needed, not at module import
|
| 4 |
"""
|
| 5 |
|
| 6 |
import os
|
|
|
|
| 18 |
from langgraph_state import ReviewState, BatchState
|
| 19 |
from database_enhanced import EnhancedDatabase
|
| 20 |
|
| 21 |
+
# FIXED: Don't initialize client at module import
|
| 22 |
+
# Initialize LAZILY when first needed
|
| 23 |
+
_hf_client = None
|
| 24 |
+
|
| 25 |
+
def get_hf_client():
|
| 26 |
+
"""Get or initialize HuggingFace client (lazy loading)"""
|
| 27 |
+
global _hf_client
|
| 28 |
+
|
| 29 |
+
if _hf_client is not None:
|
| 30 |
+
return _hf_client
|
| 31 |
+
|
| 32 |
+
# Try to get token from environment
|
| 33 |
+
HF_TOKEN = os.getenv("HUGGINGFACE_API_KEY")
|
| 34 |
+
|
| 35 |
+
if not HF_TOKEN or HF_TOKEN.strip() == "":
|
| 36 |
+
# No token available
|
| 37 |
+
return None
|
| 38 |
+
|
| 39 |
+
# Initialize client with token
|
| 40 |
+
print(f"β
Initializing HF client with token: {HF_TOKEN[:10]}...")
|
| 41 |
+
_hf_client = InferenceClient(token=HF_TOKEN)
|
| 42 |
+
return _hf_client
|
| 43 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 44 |
|
| 45 |
# Initialize sentiment models (singleton) - load once
|
| 46 |
_sentiment_models_loaded = False
|
|
|
|
| 63 |
_best_model = AutoModelForSequenceClassification.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment-latest")
|
| 64 |
_best_model.eval()
|
| 65 |
|
| 66 |
+
# Alternate Model - FIXED: Proper loading
|
| 67 |
_alt_tokenizer = AutoTokenizer.from_pretrained("finiteautomata/bertweet-base-sentiment-analysis")
|
| 68 |
+
_alt_model = AutoModelForSequenceClassification.from_pretrained(
|
| 69 |
+
"finiteautomata/bertweet-base-sentiment-analysis",
|
| 70 |
+
torch_dtype=torch.float32 # FIXED: Explicit dtype to avoid meta tensors
|
| 71 |
+
)
|
| 72 |
_alt_model.eval()
|
| 73 |
|
| 74 |
_sentiment_models_loaded = True
|
|
|
|
| 82 |
def llm1_classify(review: Dict[str, Any]) -> Dict[str, Any]:
|
| 83 |
"""LLM1: Type, Department, Priority classification"""
|
| 84 |
|
| 85 |
+
# FIXED: Get client lazily
|
| 86 |
+
hf_client = get_hf_client()
|
| 87 |
+
|
| 88 |
if hf_client is None:
|
|
|
|
|
|
|
| 89 |
return {
|
| 90 |
'type': 'unknown',
|
| 91 |
'department': 'unknown',
|
|
|
|
| 139 |
}}"""
|
| 140 |
|
| 141 |
try:
|
|
|
|
| 142 |
print(f" π Calling Qwen API...")
|
| 143 |
|
| 144 |
response = hf_client.text_generation(
|
|
|
|
| 165 |
return result
|
| 166 |
|
| 167 |
except Exception as e:
|
|
|
|
| 168 |
print(f"β LLM1 ERROR: {type(e).__name__}: {str(e)}")
|
|
|
|
|
|
|
| 169 |
|
| 170 |
return {
|
| 171 |
'type': 'unknown',
|
|
|
|
| 180 |
def llm2_analyze(review: Dict[str, Any]) -> Dict[str, Any]:
|
| 181 |
"""LLM2: User type, Emotion, Context analysis"""
|
| 182 |
|
| 183 |
+
# FIXED: Get client lazily
|
| 184 |
+
hf_client = get_hf_client()
|
| 185 |
+
|
| 186 |
if hf_client is None:
|
|
|
|
| 187 |
return {
|
| 188 |
'user_type': 'unknown',
|
| 189 |
'emotion': 'unknown',
|
|
|
|
| 234 |
}}"""
|
| 235 |
|
| 236 |
try:
|
|
|
|
| 237 |
print(f" π Calling Mistral API...")
|
| 238 |
|
| 239 |
response = hf_client.text_generation(
|
|
|
|
| 260 |
return result
|
| 261 |
|
| 262 |
except Exception as e:
|
|
|
|
| 263 |
print(f"β LLM2 ERROR: {type(e).__name__}: {str(e)}")
|
|
|
|
|
|
|
| 264 |
|
| 265 |
return {
|
| 266 |
'user_type': 'unknown',
|
|
|
|
| 275 |
def manager_synthesize(llm1_result: Dict, llm2_result: Dict, review: Dict) -> Dict[str, Any]:
|
| 276 |
"""Manager: Synthesize LLM1 and LLM2 results"""
|
| 277 |
|
| 278 |
+
# FIXED: Get client lazily
|
| 279 |
+
hf_client = get_hf_client()
|
| 280 |
+
|
| 281 |
if hf_client is None:
|
|
|
|
| 282 |
return {
|
| 283 |
'final_type': llm1_result.get('type', 'unknown'),
|
| 284 |
'final_department': llm1_result.get('department', 'unknown'),
|
|
|
|
| 317 |
}}"""
|
| 318 |
|
| 319 |
try:
|
|
|
|
| 320 |
print(f" π Calling Llama Manager API...")
|
| 321 |
|
| 322 |
response = hf_client.text_generation(
|
|
|
|
| 342 |
return result
|
| 343 |
|
| 344 |
except Exception as e:
|
|
|
|
| 345 |
print(f"β MANAGER ERROR: {type(e).__name__}: {str(e)}")
|
| 346 |
|
| 347 |
return {
|
|
|
|
| 439 |
|
| 440 |
|
| 441 |
def analyze_alt_sentiment(text: str) -> Dict[str, Any]:
|
| 442 |
+
"""Alternate Model: BERTweet - FIXED version"""
|
| 443 |
load_sentiment_models()
|
| 444 |
|
| 445 |
try:
|
|
|
|
| 447 |
|
| 448 |
with torch.no_grad():
|
| 449 |
outputs = _alt_model(**inputs)
|
| 450 |
+
logits = outputs.logits
|
| 451 |
+
|
| 452 |
+
# FIXED: Check if logits are on meta device
|
| 453 |
+
if logits.device.type == 'meta':
|
| 454 |
+
print("β οΈ Warning: Model on meta device, moving to CPU")
|
| 455 |
+
_alt_model.to('cpu')
|
| 456 |
+
outputs = _alt_model(**inputs)
|
| 457 |
+
logits = outputs.logits
|
| 458 |
+
|
| 459 |
+
probs = torch.nn.functional.softmax(logits, dim=-1)
|
| 460 |
prediction = torch.argmax(probs, dim=-1).item()
|
| 461 |
confidence = probs[0][prediction].item()
|
| 462 |
|
|
|
|
| 569 |
|
| 570 |
start_time = time.time()
|
| 571 |
|
| 572 |
+
# FIXED: Get client lazily
|
| 573 |
+
hf_client = get_hf_client()
|
| 574 |
+
|
| 575 |
if hf_client is None:
|
|
|
|
|
|
|
|
|
|
| 576 |
result = {
|
| 577 |
'final_sentiment': state.get('sentiment', 'NEUTRAL'),
|
| 578 |
'confidence': state.get('sentiment_confidence', 0.0),
|
|
|
|
| 645 |
}}"""
|
| 646 |
|
| 647 |
try:
|
|
|
|
| 648 |
print(f" π Calling Llama 70B API...")
|
| 649 |
|
| 650 |
response = hf_client.text_generation(
|
|
|
|
| 667 |
result['model'] = 'meta-llama/Llama-3.1-70B-Instruct'
|
| 668 |
|
| 669 |
except Exception as e:
|
|
|
|
| 670 |
print(f"β STAGE 3 ERROR: {type(e).__name__}: {str(e)}")
|
|
|
|
|
|
|
| 671 |
|
| 672 |
result = {
|
| 673 |
'final_sentiment': state.get('sentiment', 'NEUTRAL'),
|