Parth211 commited on
Commit
af8347e
·
verified ·
1 Parent(s): 379a52d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -39
app.py CHANGED
@@ -31,10 +31,6 @@ from transformers import GPT2LMHeadModel, GPT2Tokenizer, pipeline
31
  import nltk
32
  from nltk.util import ngrams
33
 
34
-
35
-
36
-
37
-
38
  api_key = os.getenv('API_KEY')
39
 
40
 
@@ -91,25 +87,6 @@ def load_db():
91
  # Initialize langchain LLM chain
92
  def initialize_llmchain(llm_model, temperature, max_tokens, top_k, vector_db, progress=gr.Progress()):
93
  progress(0.1, desc="Initializing HF tokenizer...")
94
- # HuggingFacePipeline uses local model
95
- # Note: it will download model locally...
96
- # tokenizer=AutoTokenizer.from_pretrained(llm_model)
97
- # progress(0.5, desc="Initializing HF pipeline...")
98
- # pipeline=transformers.pipeline(
99
- # "text-generation",
100
- # model=llm_model,
101
- # tokenizer=tokenizer,
102
- # torch_dtype=torch.bfloat16,
103
- # trust_remote_code=True,
104
- # device_map="auto",
105
- # # max_length=1024,
106
- # max_new_tokens=max_tokens,
107
- # do_sample=True,
108
- # top_k=top_k,
109
- # num_return_sequences=1,
110
- # eos_token_id=tokenizer.eos_token_id
111
- # )
112
- # llm = HuggingFacePipeline(pipeline=pipeline, model_kwargs={'temperature': temperature})
113
 
114
  # HuggingFaceHub uses HF inference endpoints
115
  progress(0.5, desc="Initializing HF Hub...")
@@ -484,17 +461,6 @@ def demo():
484
  clear_btn = gr.ClearButton([msg, chatbot], value="Clear conversation")
485
  with gr.Row("Metrics"):
486
  metrics_output = gr.Textbox(lines=10, label="Evaluation Metrics")
487
-
488
-
489
-
490
-
491
-
492
-
493
-
494
-
495
-
496
-
497
-
498
 
499
 
500
  # Preprocessing events
@@ -510,11 +476,7 @@ def demo():
510
  queue=False)
511
 
512
  # Chatbot events
513
-
514
-
515
-
516
-
517
- msg.submit(interact, inputs=[qa_chain, msg, chatbot], outputs=[
518
  gr.State(), chatbot, history, response_source1, response_source1_page,
519
  response_source2, response_source2_page, response_source3, response_source3_page,
520
  None, None, None, metrics_output
 
31
  import nltk
32
  from nltk.util import ngrams
33
 
 
 
 
 
34
  api_key = os.getenv('API_KEY')
35
 
36
 
 
87
  # Initialize langchain LLM chain
88
  def initialize_llmchain(llm_model, temperature, max_tokens, top_k, vector_db, progress=gr.Progress()):
89
  progress(0.1, desc="Initializing HF tokenizer...")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
90
 
91
  # HuggingFaceHub uses HF inference endpoints
92
  progress(0.5, desc="Initializing HF Hub...")
 
461
  clear_btn = gr.ClearButton([msg, chatbot], value="Clear conversation")
462
  with gr.Row("Metrics"):
463
  metrics_output = gr.Textbox(lines=10, label="Evaluation Metrics")
 
 
 
 
 
 
 
 
 
 
 
464
 
465
 
466
  # Preprocessing events
 
476
  queue=False)
477
 
478
  # Chatbot events
479
+ msg.submit(interact, inputs=[gr.State(),qa_chain, msg, history], outputs=[
 
 
 
 
480
  gr.State(), chatbot, history, response_source1, response_source1_page,
481
  response_source2, response_source2_page, response_source3, response_source3_page,
482
  None, None, None, metrics_output