Update app.py
Browse files
app.py
CHANGED
|
@@ -64,26 +64,18 @@ llm_hf_sentiment = HuggingFaceHub(
|
|
| 64 |
model_kwargs={"temperature":0.9 }
|
| 65 |
)
|
| 66 |
|
| 67 |
-
|
| 68 |
input_variables=["text_input"],
|
| 69 |
template="Extract the key facts out of this text. Don't include opinions. Give each fact a number and keep them short sentences. :\n\n {text_input}"
|
| 70 |
)
|
| 71 |
|
| 72 |
-
def sentiment (
|
| 73 |
-
sentiment_chain = LLMChain(llm=
|
| 74 |
facts = sentiment_chain.run(message)
|
| 75 |
print(facts)
|
| 76 |
return facts
|
| 77 |
|
| 78 |
|
| 79 |
-
####
|
| 80 |
-
## models
|
| 81 |
-
# 1 seem best for testing
|
| 82 |
-
####
|
| 83 |
-
#download and setup the model and tokenizer
|
| 84 |
-
model_name = 'facebook/blenderbot-400M-distill'
|
| 85 |
-
tokenizer = BlenderbotTokenizer.from_pretrained(model_name)
|
| 86 |
-
model = BlenderbotForConditionalGeneration.from_pretrained(model_name)
|
| 87 |
|
| 88 |
|
| 89 |
|
|
@@ -121,12 +113,21 @@ fact_extraction_prompt = PromptTemplate(
|
|
| 121 |
)
|
| 122 |
|
| 123 |
def factextraction (message):
|
| 124 |
-
fact_extraction_chain = LLMChain(llm=
|
| 125 |
facts = fact_extraction_chain.run(message)
|
| 126 |
print(facts)
|
| 127 |
return facts
|
| 128 |
|
| 129 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 130 |
def func (message):
|
| 131 |
inputs = tokenizer(message, return_tensors="pt")
|
| 132 |
result = model.generate(**inputs)
|
|
|
|
| 64 |
model_kwargs={"temperature":0.9 }
|
| 65 |
)
|
| 66 |
|
| 67 |
+
sentiment_prompt = PromptTemplate(
|
| 68 |
input_variables=["text_input"],
|
| 69 |
template="Extract the key facts out of this text. Don't include opinions. Give each fact a number and keep them short sentences. :\n\n {text_input}"
|
| 70 |
)
|
| 71 |
|
| 72 |
+
def sentiment ( message):
|
| 73 |
+
sentiment_chain = LLMChain(llm=llm_hf_sentiment, prompt=sentiment_prompt)
|
| 74 |
facts = sentiment_chain.run(message)
|
| 75 |
print(facts)
|
| 76 |
return facts
|
| 77 |
|
| 78 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 79 |
|
| 80 |
|
| 81 |
|
|
|
|
| 113 |
)
|
| 114 |
|
| 115 |
def factextraction (message):
|
| 116 |
+
fact_extraction_chain = LLMChain(llm=llm_factextract, prompt=fact_extraction_prompt)
|
| 117 |
facts = fact_extraction_chain.run(message)
|
| 118 |
print(facts)
|
| 119 |
return facts
|
| 120 |
|
| 121 |
|
| 122 |
+
####
|
| 123 |
+
## models
|
| 124 |
+
# 1 seem best for testing
|
| 125 |
+
####
|
| 126 |
+
#download and setup the model and tokenizer
|
| 127 |
+
model_name = 'facebook/blenderbot-400M-distill'
|
| 128 |
+
tokenizer = BlenderbotTokenizer.from_pretrained(model_name)
|
| 129 |
+
model = BlenderbotForConditionalGeneration.from_pretrained(model_name)
|
| 130 |
+
|
| 131 |
def func (message):
|
| 132 |
inputs = tokenizer(message, return_tensors="pt")
|
| 133 |
result = model.generate(**inputs)
|