Spaces:
Sleeping
Sleeping
File size: 1,206 Bytes
4bf6497 02ca452 06a6fc5 4bf6497 06a6fc5 4bf6497 02ca452 026ead7 51ea30b 06a6fc5 4bf6497 02ca452 bb0f64a 02ca452 4bf6497 bb0f64a 4bf6497 02ca452 700abd4 02ca452 5a2ec86 02ca452 4bf6497 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 |
import gradio as gr
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
import random
#model = AutoModelForSeq2SeqLM.from_pretrained("google/flan-t5-base")
#tokenizer = AutoTokenizer.from_pretrained("google/flan-t5-base")
def generate(text,ver):
if ver == "Hidden Identity 1":
guess = "Michael Jackson"
else:
guess = "Brad Pitt"
return random.choice(["Yes","No"])
#inputs = tokenizer(f"Answer with yes or no the following question about {guess}: {text}?", return_tensors="pt")
#return tokenizer.batch_decode(model.generate(**inputs), skip_special_tokens=True)[0]
examples = [
["Is he/she dead?"],
["Is he/she a female?"],
]
title = "Who is who chatgpt"
description = "Guess who is the person that chatgpt is thinking of today with the minimum number of questions!!"
demo = gr.Interface(
fn=generate,
inputs=[gr.inputs.Textbox(lines=5, label="Input Text"),
gr.inputs.Radio(["Hidden Identity 1","Hidden Identity 2"], type="value", default='Hidden Identity 1', label='Hidden identity')],
outputs=gr.outputs.Textbox(label="Generated Text"),
title=title,
description=description,
examples=examples
)
demo.launch()
|