Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| num_sequences=4 | |
| demo_mode = False | |
| if not demo_mode: | |
| from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline | |
| model = AutoModelForCausalLM.from_pretrained("d2weber/german-gpt2-finetuned-coldmirror-hpodcast1") | |
| tokenizer = AutoTokenizer.from_pretrained("dbmdz/german-gpt2", use_fast=True) | |
| lm = pipeline("text-generation", model=model, tokenizer=tokenizer) | |
| def generate(*args, **kwargs): | |
| return [o["generated_text"] for o in lm(*args, **kwargs, pad_token_id=tokenizer.eos_token_id)] | |
| with gr.Blocks() as app: | |
| prompt = gr.TextArea(value="Hallo und herzlich willkommen", label="Input") | |
| sequences = [] | |
| for _ in range(num_sequences): | |
| seq = gr.Textbox("", visible=False) | |
| box = gr.CheckboxGroup(choices=[], label="", interactive=True) | |
| sequences.append(seq) | |
| def split(seq): | |
| return gr.CheckboxGroup(seq.split(), value=[]) | |
| def handle(prompt, sequence, selected: gr.SelectData): | |
| to_append = " ".join(sequence.split()[:selected.index+1]) | |
| delimiter = " " if to_append[:1].isalnum() else "" | |
| return prompt.rstrip() + delimiter + to_append | |
| max_new_tokens = gr.Slider(1, 100, value=18, step=1, label="How long should the generated sequences be:") | |
| gr.Examples([ | |
| ["Hallo und herzlich willkommen"], | |
| ], prompt) | |
| def handle(prompt, max_new_tokens): | |
| prompt = prompt.rstrip() | |
| texts = ["some new words"]*num_sequences if demo_mode else generate( | |
| prompt, | |
| return_full_text=False, | |
| num_return_sequences=num_sequences, | |
| max_new_tokens=int(max_new_tokens), | |
| ) | |
| return texts | |
| app.launch() | |