srijaydeshpande commited on
Commit
d12a540
·
verified ·
1 Parent(s): 4c9d420

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -14,7 +14,7 @@ from langchain.chains import ConversationChain
14
  from langchain.memory import ConversationBufferMemory
15
  from langchain_community.llms import HuggingFaceEndpoint
16
  import torch
17
- import spaces
18
 
19
 
20
  list_llm = ["meta-llama/Meta-Llama-3-8B-Instruct", "mistralai/Mistral-7B-Instruct-v0.2"]
@@ -44,7 +44,7 @@ def create_db(splits):
44
 
45
 
46
  # Initialize langchain LLM chain
47
- @spaces.GPU(duration=60)
48
  def initialize_llmchain(llm_model, temperature, max_tokens, top_k, vector_db, progress=gr.Progress()):
49
  if llm_model == "meta-llama/Meta-Llama-3-8B-Instruct":
50
  llm = HuggingFaceEndpoint(
@@ -81,7 +81,7 @@ def initialize_llmchain(llm_model, temperature, max_tokens, top_k, vector_db, pr
81
  return qa_chain
82
 
83
  # Initialize database
84
- @spaces.GPU(duration=60)
85
  def initialize_database(list_file_obj, progress=gr.Progress()):
86
  # Create a list of documents (when valid)
87
  list_file_path = [x.name for x in list_file_obj if x is not None]
@@ -92,7 +92,7 @@ def initialize_database(list_file_obj, progress=gr.Progress()):
92
  return vector_db, "Database created!"
93
 
94
  # Initialize LLM
95
- @spaces.GPU(duration=60)
96
  def initialize_LLM(llm_option, llm_temperature, max_tokens, top_k, vector_db, progress=gr.Progress()):
97
  # print("llm_option",llm_option)
98
  llm_name = list_llm[llm_option]
@@ -108,7 +108,7 @@ def format_chat_history(message, chat_history):
108
  formatted_chat_history.append(f"Assistant: {bot_message}")
109
  return formatted_chat_history
110
 
111
- @spaces.GPU(duration=60)
112
  def conversation(qa_chain, message, history):
113
  formatted_chat_history = format_chat_history(message, history)
114
  # Generate response using QA chain
 
14
  from langchain.memory import ConversationBufferMemory
15
  from langchain_community.llms import HuggingFaceEndpoint
16
  import torch
17
+ # import spaces
18
 
19
 
20
  list_llm = ["meta-llama/Meta-Llama-3-8B-Instruct", "mistralai/Mistral-7B-Instruct-v0.2"]
 
44
 
45
 
46
  # Initialize langchain LLM chain
47
+ # @spaces.GPU(duration=60)
48
  def initialize_llmchain(llm_model, temperature, max_tokens, top_k, vector_db, progress=gr.Progress()):
49
  if llm_model == "meta-llama/Meta-Llama-3-8B-Instruct":
50
  llm = HuggingFaceEndpoint(
 
81
  return qa_chain
82
 
83
  # Initialize database
84
+ # @spaces.GPU(duration=60)
85
  def initialize_database(list_file_obj, progress=gr.Progress()):
86
  # Create a list of documents (when valid)
87
  list_file_path = [x.name for x in list_file_obj if x is not None]
 
92
  return vector_db, "Database created!"
93
 
94
  # Initialize LLM
95
+ # @spaces.GPU(duration=60)
96
  def initialize_LLM(llm_option, llm_temperature, max_tokens, top_k, vector_db, progress=gr.Progress()):
97
  # print("llm_option",llm_option)
98
  llm_name = list_llm[llm_option]
 
108
  formatted_chat_history.append(f"Assistant: {bot_message}")
109
  return formatted_chat_history
110
 
111
+ # @spaces.GPU(duration=60)
112
  def conversation(qa_chain, message, history):
113
  formatted_chat_history = format_chat_history(message, history)
114
  # Generate response using QA chain