| import gradio as gr | |
| from transformers import AutoTokenizer, AutoModelForCausalLM | |
| from transformers import AutoModelForCausalLM, AutoModelWithLMHead, AutoTokenizer, pipeline | |
| from transformers import GPT2Tokenizer, GPT2Model | |
| general_model = AutoModelForCausalLM.from_pretrained('youa/gpt2') | |
| general_generator = pipeline("text-generation", model=general_model, tokenizer="youa/gpt2") | |
| general_result = general_generator("Today is ", max_length=1500) | |
| general_result[0]["generated_text"] | |
| def generator(start_your_text = ''): | |
| result = general_generator(start_your_text) | |
| return result[0]["generated_text"] | |
| iface = gr.Interface(fn=generator, inputs="text", outputs="text") | |
| iface.launch() |