Spaces:
Build error
Build error
Commit
·
796ae95
1
Parent(s):
594c919
Update app.py
Browse files
app.py
CHANGED
|
@@ -6,12 +6,12 @@ from transformers import (
|
|
| 6 |
AutoTokenizer
|
| 7 |
)
|
| 8 |
|
| 9 |
-
M0 = "
|
| 10 |
-
M1 = "
|
| 11 |
-
M2 = "
|
| 12 |
|
| 13 |
-
M4 = "
|
| 14 |
-
M5 = "
|
| 15 |
|
| 16 |
device = ['cuda' if torch.cuda.is_available() else 'cpu'][0]
|
| 17 |
|
|
@@ -50,7 +50,7 @@ def _formatQs(questions):
|
|
| 50 |
_finalQs = None
|
| 51 |
return _finalQs
|
| 52 |
|
| 53 |
-
def _generate(mode, context, hint=None, minLength=50, maxLength=500, lengthPenalty=2.0, earlyStopping=True, numReturnSequences=1, numBeams=2, noRepeatNGramSize=0, doSample=False, topK=0, topP=0, temperature=0):
|
| 54 |
|
| 55 |
predictionM0 = None
|
| 56 |
predictionM1 = None
|
|
@@ -60,7 +60,8 @@ def _generate(mode, context, hint=None, minLength=50, maxLength=500, lengthPenal
|
|
| 60 |
|
| 61 |
if mode == 'Auto':
|
| 62 |
_inputText = "question_context: " + context
|
| 63 |
-
|
|
|
|
| 64 |
_encoding = _tk0.encode(_inputText, return_tensors='pt', truncation=True, padding='max_length').to(device) # max_length=1024
|
| 65 |
_outputEncoded = _m0.generate(_encoding,
|
| 66 |
min_length=minLength,
|
|
@@ -72,6 +73,7 @@ def _generate(mode, context, hint=None, minLength=50, maxLength=500, lengthPenal
|
|
| 72 |
no_repeat_ngram_size=noRepeatNGramSize,
|
| 73 |
do_sample=doSample,
|
| 74 |
top_k=topK,
|
|
|
|
| 75 |
top_p=topP,
|
| 76 |
temperature=temperature
|
| 77 |
)
|
|
@@ -88,6 +90,7 @@ def _generate(mode, context, hint=None, minLength=50, maxLength=500, lengthPenal
|
|
| 88 |
no_repeat_ngram_size=noRepeatNGramSize,
|
| 89 |
do_sample=doSample,
|
| 90 |
top_k=topK,
|
|
|
|
| 91 |
top_p=topP,
|
| 92 |
temperature=temperature
|
| 93 |
)
|
|
@@ -104,6 +107,7 @@ def _generate(mode, context, hint=None, minLength=50, maxLength=500, lengthPenal
|
|
| 104 |
no_repeat_ngram_size=noRepeatNGramSize,
|
| 105 |
do_sample=doSample,
|
| 106 |
top_k=topK,
|
|
|
|
| 107 |
top_p=topP,
|
| 108 |
temperature=temperature
|
| 109 |
)
|
|
@@ -120,6 +124,7 @@ def _generate(mode, context, hint=None, minLength=50, maxLength=500, lengthPenal
|
|
| 120 |
no_repeat_ngram_size=noRepeatNGramSize,
|
| 121 |
do_sample=doSample,
|
| 122 |
top_k=topK,
|
|
|
|
| 123 |
top_p=topP,
|
| 124 |
temperature=temperature
|
| 125 |
)
|
|
@@ -136,6 +141,7 @@ def _generate(mode, context, hint=None, minLength=50, maxLength=500, lengthPenal
|
|
| 136 |
no_repeat_ngram_size=noRepeatNGramSize,
|
| 137 |
do_sample=doSample,
|
| 138 |
top_k=topK,
|
|
|
|
| 139 |
top_p=topP,
|
| 140 |
temperature=temperature
|
| 141 |
)
|
|
@@ -154,6 +160,7 @@ def _generate(mode, context, hint=None, minLength=50, maxLength=500, lengthPenal
|
|
| 154 |
no_repeat_ngram_size=noRepeatNGramSize,
|
| 155 |
do_sample=doSample,
|
| 156 |
top_k=topK,
|
|
|
|
| 157 |
top_p=topP,
|
| 158 |
temperature=temperature
|
| 159 |
)
|
|
@@ -170,6 +177,7 @@ def _generate(mode, context, hint=None, minLength=50, maxLength=500, lengthPenal
|
|
| 170 |
no_repeat_ngram_size=noRepeatNGramSize,
|
| 171 |
do_sample=doSample,
|
| 172 |
top_k=topK,
|
|
|
|
| 173 |
top_p=topP,
|
| 174 |
temperature=temperature
|
| 175 |
)
|
|
@@ -186,7 +194,7 @@ def _generate(mode, context, hint=None, minLength=50, maxLength=500, lengthPenal
|
|
| 186 |
with gr.Blocks() as demo:
|
| 187 |
gr.Markdown(value="# Question Generation Demo \n [question-generation-auto-t5-v1-base-s](https://huggingface.co/anshoomehra/question-generation-auto-t5-v1-base-s) ✫ [question-generation-auto-t5-v1-base-s-q](https://huggingface.co/anshoomehra/question-generation-auto-t5-v1-base-s-q) ✫ [question-generation-auto-t5-v1-base-s-q-c](https://huggingface.co/anshoomehra/question-generation-auto-t5-v1-base-s-q-c) ✫ [question-generation-auto-hints-t5-v1-base-s-q](https://huggingface.co/anshoomehra/question-generation-auto-hints-t5-v1-base-s-q) ✫ [question-generation-auto-hints-t5-v1-base-s-q-c](https://huggingface.co/anshoomehra/question-generation-auto-hints-t5-v1-base-s-q-c)\n\n Please be patient, 5 models may take up to 80 sec to run on CPU")
|
| 188 |
|
| 189 |
-
with gr.Accordion(variant='compact', label='
|
| 190 |
with gr.Row():
|
| 191 |
mode = gr.Radio(["Auto", "Hints"], value="Auto", label="Mode")
|
| 192 |
with gr.Row():
|
|
@@ -201,8 +209,12 @@ with gr.Blocks() as demo:
|
|
| 201 |
with gr.Row():
|
| 202 |
doSample = gr.Checkbox(label="Do Random Sample")
|
| 203 |
topK = gr.Slider(0, 50, 0, step=1, label="Top K")
|
|
|
|
| 204 |
topP = gr.Slider(0, 1, 0, label="Top P/Nucleus Sampling")
|
| 205 |
temperature = gr.Slider(0.01, 1, 1, label="Temperature")
|
|
|
|
|
|
|
|
|
|
| 206 |
|
| 207 |
with gr.Accordion(variant='compact', label='Input Values'):
|
| 208 |
with gr.Row(variant='compact'):
|
|
@@ -225,7 +237,7 @@ with gr.Blocks() as demo:
|
|
| 225 |
with gr.Row():
|
| 226 |
gen_btn = gr.Button("Generate Questions")
|
| 227 |
gen_btn.click(fn=_generate,
|
| 228 |
-
inputs=[mode, context, hint, minLength, maxLength, lengthPenalty, earlyStopping, numReturnSequences, numBeams, noRepeatNGramSize, doSample, topK, topP, temperature],
|
| 229 |
outputs=[_predictionM5, _predictionM4, _predictionM2, _predictionM1, _predictionM0]
|
| 230 |
)
|
| 231 |
|
|
|
|
| 6 |
AutoTokenizer
|
| 7 |
)
|
| 8 |
|
| 9 |
+
M0 = "consciousAI/question-generation-auto-t5-v1-base-s"
|
| 10 |
+
M1 = "consciousAI/question-generation-auto-t5-v1-base-s-q"
|
| 11 |
+
M2 = "consciousAI/question-generation-auto-t5-v1-base-s-q-c"
|
| 12 |
|
| 13 |
+
M4 = "consciousAI/question-generation-auto-hints-t5-v1-base-s-q"
|
| 14 |
+
M5 = "consciousAI/question-generation-auto-hints-t5-v1-base-s-q-c"
|
| 15 |
|
| 16 |
device = ['cuda' if torch.cuda.is_available() else 'cpu'][0]
|
| 17 |
|
|
|
|
| 50 |
_finalQs = None
|
| 51 |
return _finalQs
|
| 52 |
|
| 53 |
+
def _generate(mode, context, hint=None, minLength=50, maxLength=500, lengthPenalty=2.0, earlyStopping=True, numReturnSequences=1, numBeams=2, noRepeatNGramSize=0, doSample=False, topK=0, penaltyAlpha=0, topP=0, temperature=0, model="All"):
|
| 54 |
|
| 55 |
predictionM0 = None
|
| 56 |
predictionM1 = None
|
|
|
|
| 60 |
|
| 61 |
if mode == 'Auto':
|
| 62 |
_inputText = "question_context: " + context
|
| 63 |
+
if model == "All":
|
| 64 |
+
|
| 65 |
_encoding = _tk0.encode(_inputText, return_tensors='pt', truncation=True, padding='max_length').to(device) # max_length=1024
|
| 66 |
_outputEncoded = _m0.generate(_encoding,
|
| 67 |
min_length=minLength,
|
|
|
|
| 73 |
no_repeat_ngram_size=noRepeatNGramSize,
|
| 74 |
do_sample=doSample,
|
| 75 |
top_k=topK,
|
| 76 |
+
penalty_alpha=penaltyAlpha,
|
| 77 |
top_p=topP,
|
| 78 |
temperature=temperature
|
| 79 |
)
|
|
|
|
| 90 |
no_repeat_ngram_size=noRepeatNGramSize,
|
| 91 |
do_sample=doSample,
|
| 92 |
top_k=topK,
|
| 93 |
+
penalty_alpha=penaltyAlpha,
|
| 94 |
top_p=topP,
|
| 95 |
temperature=temperature
|
| 96 |
)
|
|
|
|
| 107 |
no_repeat_ngram_size=noRepeatNGramSize,
|
| 108 |
do_sample=doSample,
|
| 109 |
top_k=topK,
|
| 110 |
+
penalty_alpha=penaltyAlpha,
|
| 111 |
top_p=topP,
|
| 112 |
temperature=temperature
|
| 113 |
)
|
|
|
|
| 124 |
no_repeat_ngram_size=noRepeatNGramSize,
|
| 125 |
do_sample=doSample,
|
| 126 |
top_k=topK,
|
| 127 |
+
penalty_alpha=penaltyAlpha,
|
| 128 |
top_p=topP,
|
| 129 |
temperature=temperature
|
| 130 |
)
|
|
|
|
| 141 |
no_repeat_ngram_size=noRepeatNGramSize,
|
| 142 |
do_sample=doSample,
|
| 143 |
top_k=topK,
|
| 144 |
+
penalty_alpha=penaltyAlpha,
|
| 145 |
top_p=topP,
|
| 146 |
temperature=temperature
|
| 147 |
)
|
|
|
|
| 160 |
no_repeat_ngram_size=noRepeatNGramSize,
|
| 161 |
do_sample=doSample,
|
| 162 |
top_k=topK,
|
| 163 |
+
penalty_alpha=penaltyAlpha,
|
| 164 |
top_p=topP,
|
| 165 |
temperature=temperature
|
| 166 |
)
|
|
|
|
| 177 |
no_repeat_ngram_size=noRepeatNGramSize,
|
| 178 |
do_sample=doSample,
|
| 179 |
top_k=topK,
|
| 180 |
+
penalty_alpha=penaltyAlpha,
|
| 181 |
top_p=topP,
|
| 182 |
temperature=temperature
|
| 183 |
)
|
|
|
|
| 194 |
with gr.Blocks() as demo:
|
| 195 |
gr.Markdown(value="# Question Generation Demo \n [question-generation-auto-t5-v1-base-s](https://huggingface.co/anshoomehra/question-generation-auto-t5-v1-base-s) ✫ [question-generation-auto-t5-v1-base-s-q](https://huggingface.co/anshoomehra/question-generation-auto-t5-v1-base-s-q) ✫ [question-generation-auto-t5-v1-base-s-q-c](https://huggingface.co/anshoomehra/question-generation-auto-t5-v1-base-s-q-c) ✫ [question-generation-auto-hints-t5-v1-base-s-q](https://huggingface.co/anshoomehra/question-generation-auto-hints-t5-v1-base-s-q) ✫ [question-generation-auto-hints-t5-v1-base-s-q-c](https://huggingface.co/anshoomehra/question-generation-auto-hints-t5-v1-base-s-q-c)\n\n Please be patient, 5 models may take up to 80 sec to run on CPU")
|
| 196 |
|
| 197 |
+
with gr.Accordion(variant='compact', label='Search Methods: Deteriminstic / Stochastic / Contrastive', open=True):
|
| 198 |
with gr.Row():
|
| 199 |
mode = gr.Radio(["Auto", "Hints"], value="Auto", label="Mode")
|
| 200 |
with gr.Row():
|
|
|
|
| 209 |
with gr.Row():
|
| 210 |
doSample = gr.Checkbox(label="Do Random Sample")
|
| 211 |
topK = gr.Slider(0, 50, 0, step=1, label="Top K")
|
| 212 |
+
penaltyAlpha = gr.Slider(0.0, 1, 0, label="Penalty Alpha")
|
| 213 |
topP = gr.Slider(0, 1, 0, label="Top P/Nucleus Sampling")
|
| 214 |
temperature = gr.Slider(0.01, 1, 1, label="Temperature")
|
| 215 |
+
with gr.Row():
|
| 216 |
+
model = gr.Dropdown(["question-generation-auto-hints-t5-v1-base-s-q-c", "question-generation-auto-hints-t5-v1-base-s-q", "question-generation-auto-t5-v1-base-s-q-c", "question-generation-auto-t5-v1-base-s-q", "question-generation-auto-t5-v1-base-s", "All"], label="Model")
|
| 217 |
+
|
| 218 |
|
| 219 |
with gr.Accordion(variant='compact', label='Input Values'):
|
| 220 |
with gr.Row(variant='compact'):
|
|
|
|
| 237 |
with gr.Row():
|
| 238 |
gen_btn = gr.Button("Generate Questions")
|
| 239 |
gen_btn.click(fn=_generate,
|
| 240 |
+
inputs=[mode, context, hint, minLength, maxLength, lengthPenalty, earlyStopping, numReturnSequences, numBeams, noRepeatNGramSize, doSample, topK, penaltyAlpha, topP, temperature, model],
|
| 241 |
outputs=[_predictionM5, _predictionM4, _predictionM2, _predictionM1, _predictionM0]
|
| 242 |
)
|
| 243 |
|