Update app.py
Browse files
app.py
CHANGED
|
@@ -11,4 +11,22 @@ llm_hf = HuggingFaceHub(
|
|
| 11 |
|
| 12 |
text = "Why did the chicken cross the road?"
|
| 13 |
|
| 14 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
|
| 12 |
text = "Why did the chicken cross the road?"
|
| 13 |
|
| 14 |
+
output_question_1 = llm_hf(text)
|
| 15 |
+
print(output_question_1)
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
###
|
| 20 |
+
## FACT EXTRACTION
|
| 21 |
+
###
|
| 22 |
+
|
| 23 |
+
fact_extraction_prompt = PromptTemplate(
|
| 24 |
+
input_variables=["text_input"],
|
| 25 |
+
template="Extract the key facts out of this text. Don't include opinions. Give each fact a number and keep them short sentences. :\n\n {text_input}"
|
| 26 |
+
)
|
| 27 |
+
|
| 28 |
+
fact_extraction_chain = LLMChain(llm=llm, prompt=fact_extraction_prompt)
|
| 29 |
+
|
| 30 |
+
facts = fact_extraction_chain.run(text + " " +output_question_1)
|
| 31 |
+
|
| 32 |
+
print(facts)
|