Cachoups commited on
Commit
83ebb0e
·
verified ·
1 Parent(s): 00d1251

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -2
app.py CHANGED
@@ -89,12 +89,17 @@ from langchain_core.runnables import RunnableLambda
89
  #prompt_template = ChatPromptTemplate.from_template(prompt)
90
  llm = HuggingFaceHub(
91
  repo_id="HuggingFaceH4/zephyr-7b-beta",
92
- model_kwargs={"temperature": 0.1, "max_length": 10, "return_full_text" : False}
 
 
 
 
 
93
  )
94
 
95
  def ra(user_question):
96
  #prompt = f"You know things about League of Legends. Please correct the following question for grammar and clarity.Do not give explaination:\n{user_question}\nCorrected question:"
97
- prompt = f"Correct the following question for grammar and clarity. Output only the corrected question. Do not explain.\n\n{user_question}\n\nCorrected question:"
98
 
99
  # Pass the prompt to the LLM and get the response
100
  rephrased_query = llm(prompt) # Replace `llm` with the appropriate LLM function or API call
 
89
  #prompt_template = ChatPromptTemplate.from_template(prompt)
90
  llm = HuggingFaceHub(
91
  repo_id="HuggingFaceH4/zephyr-7b-beta",
92
+ model_kwargs={
93
+ "temperature": 0.1,
94
+ "max_length": 10,
95
+ "return_full_text": False,
96
+ "stop_sequences": ["Explanation:", "Original question:", "\n"]
97
+ }
98
  )
99
 
100
  def ra(user_question):
101
  #prompt = f"You know things about League of Legends. Please correct the following question for grammar and clarity.Do not give explaination:\n{user_question}\nCorrected question:"
102
+ prompt = f"Rewrite the following question with correct grammar. ONLY return the corrected question. Do NOT include any explanation.\n\n{user_question}"
103
 
104
  # Pass the prompt to the LLM and get the response
105
  rephrased_query = llm(prompt) # Replace `llm` with the appropriate LLM function or API call