import gradio as gr from transformers import AutoModelForSeq2SeqLM, AutoTokenizer import random #model = AutoModelForSeq2SeqLM.from_pretrained("google/flan-t5-base") #tokenizer = AutoTokenizer.from_pretrained("google/flan-t5-base") def generate(text,ver): if ver == "Hidden Identity 1": guess = "Michael Jackson" else: guess = "Brad Pitt" return random.choice(["Yes","No"]) #inputs = tokenizer(f"Answer with yes or no the following question about {guess}: {text}?", return_tensors="pt") #return tokenizer.batch_decode(model.generate(**inputs), skip_special_tokens=True)[0] examples = [ ["Is he/she dead?"], ["Is he/she a female?"], ] title = "Who is who chatgpt" description = "Guess who is the person that chatgpt is thinking of today with the minimum number of questions!!" demo = gr.Interface( fn=generate, inputs=[gr.inputs.Textbox(lines=5, label="Input Text"), gr.inputs.Radio(["Hidden Identity 1","Hidden Identity 2"], type="value", default='Hidden Identity 1', label='Hidden identity')], outputs=gr.outputs.Textbox(label="Generated Text"), title=title, description=description, examples=examples ) demo.launch()