manabb commited on
Commit
f8f85bd
Β·
verified Β·
1 Parent(s): fc8e85d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +48 -32
app.py CHANGED
@@ -34,17 +34,10 @@ os.environ['HUGGINGFACEHUB_API_TOKEN'] = os.getenv("HF_TOKEN")
34
  # Initialize embedding model
35
  embedding_model = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2")
36
 
37
- #Create pipeline
38
- pipe = pipeline("text-generation", model="TinyLlama/TinyLlama-1.1B-Chat-v1.0")
39
-
40
- #Build LLM
41
- llm = HuggingFacePipeline(pipeline=pipe)
42
- # Wrap in pipeline
43
- #pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, max_length=512)
44
- #llm = HuggingFacePipeline(pipeline=pipe)
45
 
46
  # Store the QA chain globally (across UI events)
47
  qa_chain = None
 
48
 
49
  repo_id="manabb/nrl"
50
 
@@ -53,17 +46,17 @@ repo_id="manabb/nrl"
53
 
54
  # Initialize embedding model
55
  #embedding_model = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2")
56
-
57
  # Load HF model (lightweight for CPU)
58
- #model_name = "google/flan-t5-small"
59
- #tokenizer = AutoTokenizer.from_pretrained(model_name)
60
- #model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
61
 
62
  # Wrap in pipeline
63
- #pipe = pipeline("text2text-generation", model=model, tokenizer=tokenizer, max_length=512)
64
- #llm = HuggingFacePipeline(pipeline=pipe)
65
 
66
- #======
67
  # Create optimized pipeline for TinyLlama
68
  pipe = pipeline(
69
  "text-generation",
@@ -82,7 +75,7 @@ pipe = pipeline(
82
 
83
  # Build LangChain LLM wrapper
84
  llm = HuggingFacePipeline(pipeline=pipe)
85
- #=====
86
 
87
  def create_faiss_index(repo_id, file, embedding_model="sentence-transformers/all-MiniLM-L6-v2"):
88
  """Create FAISS index from PDF and upload to HF dataset repo"""
@@ -324,38 +317,61 @@ def generate_qa_chain(repo_id, embedding_model="sentence-transformers/all-MiniLM
324
  def bePrepare():
325
  global qa_chain
326
  qa_chain = generate_qa_chain("manabb/nrl",llm=llm)
 
 
 
 
 
 
327
 
328
  def ask_question(query):
329
  if not qa_chain:
330
- return "❌ Please clik the button to get the udated resources first."
331
  response = qa_chain.invoke({"query": query})
332
  return response["result"]
333
 
 
 
 
 
 
334
  #====================
335
  # Gradio UI
336
  with gr.Blocks() as demo:
337
  gr.Markdown("## 🧠 For use of NRL procurement department Only")
338
-
339
  with gr.Row():
340
- Index_processing_output=gr.Textbox(label="πŸ“ Status", interactive=False)
341
- Index_processing_btn = gr.Button("πŸ”„ Clik to get the udated resources")
342
-
343
- with gr.Row():
344
- query_input = gr.Textbox(label="❓ This is for NRL commercial procurement deptd. Your Question pls")
345
- query_btn = gr.Button("🧠 Get Answer")
346
-
347
- answer_output = gr.Textbox(label="βœ… Answer", lines=10)
348
-
349
- output_msg = gr.Textbox(label="πŸ“ Authorization Message", interactive=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
350
  with gr.Row():
 
 
351
  file_input = gr.File(label="πŸ“„ Upload .pdf File by only authorized user", type="filepath")
352
  upload_btn = gr.Button("πŸ”„ Process Doc")
353
- manab1="Write the password to upload new Circular Doc."
354
- authorized_user=gr.Textbox(label=manab1)
355
  upload_btn.click(upload_and_prepare, inputs=[file_input,authorized_user], outputs=output_msg)
356
 
357
- query_btn.click(ask_question, inputs=query_input, outputs=answer_output)
358
- Index_processing_btn.click(bePrepare, inputs=None, outputs=Index_processing_output)
359
 
360
 
361
  # For local dev use: demo.launch()
 
34
  # Initialize embedding model
35
  embedding_model = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2")
36
 
 
 
 
 
 
 
 
 
37
 
38
  # Store the QA chain globally (across UI events)
39
  qa_chain = None
40
+ qa_chain1 = None
41
 
42
  repo_id="manabb/nrl"
43
 
 
46
 
47
  # Initialize embedding model
48
  #embedding_model = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2")
49
+ #=============================================google/flan-t5-small
50
  # Load HF model (lightweight for CPU)
51
+ model_name = "google/flan-t5-small"
52
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
53
+ model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
54
 
55
  # Wrap in pipeline
56
+ pipe1 = pipeline("text2text-generation", model=model, tokenizer=tokenizer, max_length=512)
57
+ llm1 = HuggingFacePipeline(pipeline=pipe1)
58
 
59
+ #=============================================TinyLlama/TinyLlama-1.1B-Chat-v1.0
60
  # Create optimized pipeline for TinyLlama
61
  pipe = pipeline(
62
  "text-generation",
 
75
 
76
  # Build LangChain LLM wrapper
77
  llm = HuggingFacePipeline(pipeline=pipe)
78
+ #=============================================
79
 
80
  def create_faiss_index(repo_id, file, embedding_model="sentence-transformers/all-MiniLM-L6-v2"):
81
  """Create FAISS index from PDF and upload to HF dataset repo"""
 
317
  def bePrepare():
318
  global qa_chain
319
  qa_chain = generate_qa_chain("manabb/nrl",llm=llm)
320
+ return "I am ready, ask me questions with model tiny Lama."
321
+
322
+ def bePrepare1():
323
+ global qa_chain1
324
+ qa_chain1 = generate_qa_chain("manabb/nrl",llm=llm1)
325
+ return "I am ready, ask me questions with model google flan-t5."
326
 
327
  def ask_question(query):
328
  if not qa_chain:
329
+ return "❌ Please clik the button to get the udated resources with tiny Lama."
330
  response = qa_chain.invoke({"query": query})
331
  return response["result"]
332
 
333
+ def ask_question1(query):
334
+ if not qa_chain1:
335
+ return "❌ Please clik the button to get the udated resources google flan-t5."
336
+ response1 = qa_chain1.invoke({"query": query})
337
+ return response1["result"]
338
  #====================
339
  # Gradio UI
340
  with gr.Blocks() as demo:
341
  gr.Markdown("## 🧠 For use of NRL procurement department Only")
 
342
  with gr.Row():
343
+ # LEFT COLUMN: Document Management
344
+ with gr.Column(scale=1):
345
+ gr.Markdown("## 🧠 Using TinyLama Model")
346
+ with gr.Row():
347
+ Index_processing_output=gr.Textbox(label="πŸ“ Status for tiny lama", interactive=False)
348
+ with gr.Row():
349
+ Index_processing_btn = gr.Button("πŸ”„ Clik to get the udated resources with tiny Lama")
350
+ Index_processing_btn.click(bePrepare, inputs=None, outputs=Index_processing_output)
351
+ with gr.Row():
352
+ query_input = gr.Textbox(label="❓ Your Question pls")
353
+ query_btn = gr.Button("🧠 Get Answer")
354
+ answer_output = gr.Textbox(label="βœ… Answer", lines=4)
355
+ query_btn.click(ask_question, inputs=query_input, outputs=answer_output)
356
+ # RIGHT COLUMN: Document Management
357
+ with gr.Column(scale=2):
358
+ gr.Markdown("## 🧠 Using google flan-t5")
359
+ Index_processing_outpu1t=gr.Textbox(label="πŸ“ Status for google flan-t5", interactive=False)
360
+ Index_processing_btn1 = gr.Button("πŸ”„ Clik to get the udated resources with google flan-t5")
361
+ Index_processing_btn1.click(bePrepare1, inputs=None, outputs=Index_processing_output1)
362
+ query_input1 = gr.Textbox(label="❓ Your Question pls")
363
+ query_btn1 = gr.Button("🧠 Get Answer")
364
+ answer_output1 = gr.Textbox(label="βœ… Answer", lines=4)
365
+ query_btn1.click(ask_question1, inputs=query_input1, outputs=answer_output1)
366
+
367
  with gr.Row():
368
+ gr.Markdown("## 🧠 Using google flan-t5")
369
+ output_msg = gr.Textbox(label="πŸ“ Authorization Message", interactive=False)
370
  file_input = gr.File(label="πŸ“„ Upload .pdf File by only authorized user", type="filepath")
371
  upload_btn = gr.Button("πŸ”„ Process Doc")
372
+ authorized_user=gr.Textbox(label="Write the password to upload new Circular Doc.")
 
373
  upload_btn.click(upload_and_prepare, inputs=[file_input,authorized_user], outputs=output_msg)
374
 
 
 
375
 
376
 
377
  # For local dev use: demo.launch()