jaimin commited on
Commit
05c3737
·
1 Parent(s): fa3e0a7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +45 -1
app.py CHANGED
@@ -64,6 +64,50 @@ def get_paraphrases(text, n_predictions=3, top_k=50, max_length=256, device="cpu
64
 
65
  return l
66
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
67
  iface = gr.Interface(fn=get_paraphrases, inputs=[gr.inputs.Textbox(lines=5)],outputs=["text"])
68
- iface.launch()
 
69
 
 
64
 
65
  return l
66
 
67
+
68
+ def get_paraphrases_pytorchlight(text, n_predictions=3, top_k=50, max_length=256, device="cpu"):
69
+ para = []
70
+ r = requests.post(
71
+ url="https://hf.space/embed/jaimin/CWI/+/api/predict",
72
+ json={"data": [text]},
73
+ )
74
+ response = r.json()
75
+ sentence = response["data"][0]
76
+ for sent in sent_tokenize(sentence):
77
+ text = "paraphrase: "+sent + " </s>"
78
+ encoding = T5_tokenizer.encode_plus(text, padding=True, return_tensors="pt", truncation=True)
79
+ input_ids, attention_masks = encoding["input_ids"].to(device), encoding["attention_mask"].to(device)
80
+ model_output = T5_model.generate(
81
+ input_ids=input_ids,attention_mask=attention_masks,
82
+ max_length = 512,
83
+ early_stopping=True,
84
+ num_beams=15,
85
+ num_beam_groups = 5,
86
+ num_return_sequences=n_predictions,
87
+ diversity_penalty = 0.70,
88
+ temperature=0.7)
89
+ outputs = []
90
+ for output in model_output:
91
+ generated_sent = T5_tokenizer.decode(
92
+ output, skip_special_tokens=True, clean_up_tokenization_spaces=True
93
+ )
94
+ if (
95
+ generated_sent.lower() != sentence.lower()
96
+ and generated_sent not in outputs
97
+ ):
98
+ outputs.append(generated_sent.replace('paraphrasedoutput:', ""))
99
+ para.append(outputs)
100
+ print(para)
101
+ a = list(itertools.product(*para))
102
+ random.shuffle(a)
103
+
104
+ l=[]
105
+ for i in range(len(a)):
106
+ l.append(" ".join(a[i]))
107
+
108
+ return l
109
+
110
  iface = gr.Interface(fn=get_paraphrases, inputs=[gr.inputs.Textbox(lines=5)],outputs=["text"])
111
+ iface1 = gr.Interface(fn=get_paraphrases_pytorchlight, inputs=[gr.inputs.Textbox(lines=5)],outputs=["text"])
112
+ Parallel(iface,iface1).launch()
113