import gradio as gr from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline # # Specify the directory containing the tokenizer's configuration file (config.json) model_name = "wizardlm-13b-v1.1-superhot-8k.ggmlv3.q4_0.bin" # # Initialize the GPT4All model model = AutoModelForCausalLM.from_pretrained(model_name) def generate_text(input_text): text= model.generate(input_text) return text text_generation_interface = gr.Interface( fn=generate_text, inputs=[ gr.inputs.Textbox(label="Input Text"), ], outputs=gr.outputs.Textbox(label="Generated Text"), title="GPT-4 Text Generation", ).launch()