srijaydeshpande commited on
Commit
b355172
·
verified ·
1 Parent(s): a5c9a00

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -1
app.py CHANGED
@@ -14,6 +14,8 @@ from langchain.chains import ConversationChain
14
  from langchain.memory import ConversationBufferMemory
15
  from langchain_community.llms import HuggingFaceEndpoint
16
  import torch
 
 
17
 
18
  list_llm = ["meta-llama/Meta-Llama-3-8B-Instruct", "mistralai/Mistral-7B-Instruct-v0.2"]
19
  list_llm_simple = [os.path.basename(llm) for llm in list_llm]
@@ -42,6 +44,7 @@ def create_db(splits):
42
 
43
 
44
  # Initialize langchain LLM chain
 
45
  def initialize_llmchain(llm_model, temperature, max_tokens, top_k, vector_db, progress=gr.Progress()):
46
  if llm_model == "meta-llama/Meta-Llama-3-8B-Instruct":
47
  llm = HuggingFaceEndpoint(
@@ -88,6 +91,7 @@ def initialize_database(list_file_obj, progress=gr.Progress()):
88
  return vector_db, "Database created!"
89
 
90
  # Initialize LLM
 
91
  def initialize_LLM(llm_option, llm_temperature, max_tokens, top_k, vector_db, progress=gr.Progress()):
92
  # print("llm_option",llm_option)
93
  llm_name = list_llm[llm_option]
@@ -103,7 +107,7 @@ def format_chat_history(message, chat_history):
103
  formatted_chat_history.append(f"Assistant: {bot_message}")
104
  return formatted_chat_history
105
 
106
-
107
  def conversation(qa_chain, message, history):
108
  formatted_chat_history = format_chat_history(message, history)
109
  # Generate response using QA chain
 
14
  from langchain.memory import ConversationBufferMemory
15
  from langchain_community.llms import HuggingFaceEndpoint
16
  import torch
17
+ import spaces
18
+
19
 
20
  list_llm = ["meta-llama/Meta-Llama-3-8B-Instruct", "mistralai/Mistral-7B-Instruct-v0.2"]
21
  list_llm_simple = [os.path.basename(llm) for llm in list_llm]
 
44
 
45
 
46
  # Initialize langchain LLM chain
47
+ @spaces.GPU(duration=60)
48
  def initialize_llmchain(llm_model, temperature, max_tokens, top_k, vector_db, progress=gr.Progress()):
49
  if llm_model == "meta-llama/Meta-Llama-3-8B-Instruct":
50
  llm = HuggingFaceEndpoint(
 
91
  return vector_db, "Database created!"
92
 
93
  # Initialize LLM
94
+ @spaces.GPU(duration=60)
95
  def initialize_LLM(llm_option, llm_temperature, max_tokens, top_k, vector_db, progress=gr.Progress()):
96
  # print("llm_option",llm_option)
97
  llm_name = list_llm[llm_option]
 
107
  formatted_chat_history.append(f"Assistant: {bot_message}")
108
  return formatted_chat_history
109
 
110
+ @spaces.GPU(duration=60)
111
  def conversation(qa_chain, message, history):
112
  formatted_chat_history = format_chat_history(message, history)
113
  # Generate response using QA chain