Spaces:
Runtime error
Runtime error
| # import os | |
| # import gradio as gr | |
| # HF_TOKEN = os.getenv('HF_TOKEN') | |
| # hf_writer = gr.HuggingFaceDatasetSaver(HF_TOKEN, "Rick-bot-flags") | |
| # title = "Talk To Me Morty" | |
| # description = """ | |
| # <p> | |
| # <center> | |
| # The bot was trained on Rick and Morty dialogues Kaggle Dataset using DialoGPT. | |
| # <img src="https://huggingface.co/spaces/kingabzpro/Rick_and_Morty_Bot/resolve/main/img/rick.png" alt="rick" width="200"/> | |
| # </center> | |
| # </p> | |
| # """ | |
| # article = "<p style='text-align: center'><a href='https://medium.com/geekculture/discord-bot-using-dailogpt-and-huggingface-api-c71983422701' target='_blank'>Complete Tutorial</a></p><p style='text-align: center'><a href='https://dagshub.com/kingabzpro/DailoGPT-RickBot' target='_blank'>Project is Available at DAGsHub</a></p></center><center><img src='https://visitor-badge.glitch.me/badge?page_id=kingabzpro/Rick_and_Morty_Bot' alt='visitor badge'></center></p>" | |
| # examples = [["How are you Rick?"]] | |
| # from transformers import AutoModelForCausalLM, AutoTokenizer | |
| # import torch | |
| # tokenizer = AutoTokenizer.from_pretrained("ericzhou/DialoGPT-Medium-Rick_v2") | |
| # model = AutoModelForCausalLM.from_pretrained("ericzhou/DialoGPT-Medium-Rick_v2") | |
| # def predict(input, history=[]): | |
| # # tokenize the new input sentence | |
| # new_user_input_ids = tokenizer.encode(input + tokenizer.eos_token, return_tensors='pt') | |
| # # append the new user input tokens to the chat history | |
| # bot_input_ids = torch.cat([torch.LongTensor(history), new_user_input_ids], dim=-1) | |
| # # generate a response | |
| # history = model.generate(bot_input_ids, max_length=4000, pad_token_id=tokenizer.eos_token_id).tolist() | |
| # # convert the tokens to text, and then split the responses into lines | |
| # response = tokenizer.decode(history[0]).split("<|endoftext|>") | |
| # #print('decoded_response-->>'+str(response)) | |
| # response = [(response[i], response[i+1]) for i in range(0, len(response)-1, 2)] # convert to tuples of list | |
| # #print('response-->>'+str(response)) | |
| # return response, history | |
| # gr.Interface(fn=predict, | |
| # title=title, | |
| # description=description, | |
| # examples=examples, | |
| # flagging_callback = hf_writer, | |
| # allow_flagging = "manual", | |
| # inputs=["text", "state"], | |
| # outputs=["chatbot", "state"], | |
| # theme='gradio/seafoam').launch() | |
| import gradio as gr | |
| with gr.Blocks() as demo: | |
| with gr.Tab("Translate to Spanish"): | |
| gr.load("gradio/en2es", src="spaces") | |
| with gr.Tab("Translate to French"): | |
| gr.load("abidlabs/en2fr", src="spaces") | |
| demo.launch() |