update sentence parsing, use model twice
Browse files
app.py
CHANGED
|
@@ -17,15 +17,31 @@ model = PeftModel.from_pretrained(model, peft_model_id)
|
|
| 17 |
|
| 18 |
|
| 19 |
def make_inference(original_text):
|
| 20 |
-
|
| 21 |
-
|
|
|
|
| 22 |
return_tensors="pt",
|
| 23 |
)
|
| 24 |
|
| 25 |
with torch.cuda.amp.autocast():
|
| 26 |
-
|
| 27 |
|
| 28 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 29 |
|
| 30 |
|
| 31 |
if __name__ == "__main__":
|
|
|
|
| 17 |
|
| 18 |
|
| 19 |
def make_inference(original_text):
|
| 20 |
+
str_strategy_prompt = f"### Negative sentence:\n{original_text}\n\n### Reframing strategy:\n"
|
| 21 |
+
batch_1 = tokenizer(
|
| 22 |
+
str_strategy_prompt,
|
| 23 |
return_tensors="pt",
|
| 24 |
)
|
| 25 |
|
| 26 |
with torch.cuda.amp.autocast():
|
| 27 |
+
output_tokens_1 = model.generate(**batch_1, max_new_tokens=50)
|
| 28 |
|
| 29 |
+
output_1 = tokenizer.decode(output_tokens_1[0], skip_special_tokens=True)
|
| 30 |
+
reframing_strategy = output_1[len(str_strategy_prompt):].partition('\n')[0]
|
| 31 |
+
|
| 32 |
+
str_reframing_prompt = f"### Negative sentence:\n{original_text}\n\n### Reframing strategy:\n{reframing_strategy}\n\n### Reframing sentence:\n"
|
| 33 |
+
batch_2 = tokenizer(
|
| 34 |
+
str_reframing_prompt,
|
| 35 |
+
return_tensors="pt",
|
| 36 |
+
)
|
| 37 |
+
|
| 38 |
+
with torch.cuda.amp.autocast():
|
| 39 |
+
output_tokens_2 = model.generate(**batch_2, max_new_tokens=100)
|
| 40 |
+
|
| 41 |
+
output_2 = tokenizer.decode(output_tokens_2[0], skip_special_tokens=True)
|
| 42 |
+
reframing_sentence = output_2[len(str_reframing_prompt):]
|
| 43 |
+
|
| 44 |
+
return reframing_sentence
|
| 45 |
|
| 46 |
|
| 47 |
if __name__ == "__main__":
|