Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| from transformers import AutoModelForCausalLM, AutoTokenizer | |
| # # Specify the directory containing the model and tokenizer | |
| # model_name = "gpt4all" # Make sure this matches the actual model directory | |
| # model_path = f"./" # Path to the model directory | |
| # # Initialize the GPT-4 model and tokenizer | |
| # model = AutoModelForCausalLM.from_pretrained(model_path) | |
| # tokenizer = AutoTokenizer.from_pretrained(model_path) | |
| from gpt4all import GPT4All | |
| model = GPT4All("wizardlm-13b-v1.1-superhot-8k.ggmlv3.q4_0.bin") | |
| # output = model.generate("How to go to the hospital?") | |
| # print(output) | |
| def generate_text(input_text): | |
| # input_ids = tokenizer(input_text, return_tensors="pt").input_ids | |
| # generated_ids = model.generate(input_ids) | |
| # generated_text = tokenizer.decode(generated_ids[0], skip_special_tokens=True) | |
| output = model.generate(input_text) | |
| return output | |
| text_generation_interface = gr.Interface( | |
| fn=generate_text, | |
| inputs=[ | |
| gr.inputs.Textbox(label="Input Text"), | |
| ], | |
| outputs=gr.outputs.Textbox(label="Generated Text"), | |
| title="GPT-4 Text Generation", | |
| ).launch() | |
| # model_name = "" | |