Subha95 commited on
Commit
3bcc980
·
verified ·
1 Parent(s): 436e90f

Update chatbot_rag.py

Browse files
Files changed (1) hide show
  1. chatbot_rag.py +7 -18
chatbot_rag.py CHANGED
@@ -31,15 +31,13 @@ def build_qa():
31
  print("🔹 Loading LLM...")
32
  model_id = "google/flan-t5-small"
33
  tokenizer = AutoTokenizer.from_pretrained(model_id)
34
- model = AutoModelForSeq2SeqLM.from_pretrained(model_id, device_map="auto")
35
-
36
  pipe = pipeline(
37
- "text2text-generation",
38
  model=model,
39
  tokenizer=tokenizer,
40
- max_new_tokens=300,
41
- do_sample=True,
42
- temperature=0.2,
43
  )
44
  llm = HuggingFacePipeline(pipeline=pipe)
45
 
@@ -91,18 +89,9 @@ def build_qa():
91
  # Build once
92
  try:
93
  qa_pipeline = build_qa()
 
94
  except Exception as e:
95
  qa_pipeline = None
96
- print("❌ Failed to build QA pipeline:", e)
 
97
  traceback.print_exc()
98
-
99
-
100
- def get_answer(query: str) -> str:
101
- """Takes user query and returns chatbot response."""
102
- if qa_pipeline is None:
103
- return "⚠️ QA pipeline not initialized."
104
-
105
- try:
106
- return qa_pipeline.invoke(query)
107
- except Exception as e:
108
- return f"❌ QA run failed: {e}"
 
31
  print("🔹 Loading LLM...")
32
  model_id = "google/flan-t5-small"
33
  tokenizer = AutoTokenizer.from_pretrained(model_id)
34
+ model = AutoModelForSeq2SeqLM.from_pretrained(model_id)
35
+
36
  pipe = pipeline(
37
+ "text2text-generation", # <-- must be text2text for T5
38
  model=model,
39
  tokenizer=tokenizer,
40
+ max_new_tokens=200,
 
 
41
  )
42
  llm = HuggingFacePipeline(pipeline=pipe)
43
 
 
89
  # Build once
90
  try:
91
  qa_pipeline = build_qa()
92
+ print("✅ qa_pipeline built successfully:", type(qa_pipeline))
93
  except Exception as e:
94
  qa_pipeline = None
95
+ print("❌ Failed to build QA pipeline")
96
+ print("Error message:", str(e))
97
  traceback.print_exc()