Spaces:
Sleeping
Sleeping
app.py
Browse files
app.py
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from langchain import HuggingFaceHub,PromptTemplate,LLMChain
|
| 3 |
+
|
| 4 |
+
from getpass import getpass
|
| 5 |
+
#HUGGINGFACE_API_TOKEN=getpass()
|
| 6 |
+
os.environ["HUGGINGFACEHUB_API_TOKEN"] = "Token"
|
| 7 |
+
model_id="mistralai/Mistral-7B-Instruct-v0.2"
|
| 8 |
+
conv_model=HuggingFaceHub(huggingfacehub_api_token=os.environ['HUGGINGFACEHUB_API_TOKEN'],
|
| 9 |
+
repo_id=model_id,
|
| 10 |
+
model_kwargs={"temperature":0.5,"max_new_tokens":5000})
|
| 11 |
+
|
| 12 |
+
def answer_question(question):
|
| 13 |
+
prompt = PromptTemplate( input_variables=['question'],
|
| 14 |
+
template="""You are a logical and a numerical question solver.Could you please answer this question ?{question}
|
| 15 |
+
""")
|
| 16 |
+
conv_chain = LLMChain(llm=conv_model, prompt=prompt, verbose=True,output_key="Answer")
|
| 17 |
+
response = conv_chain({'question':question})
|
| 18 |
+
return response
|
| 19 |
+
|
| 20 |
+
if __name__ == "__main__":
|
| 21 |
+
print("nothing given")
|