import gradio as gr from gpt4all import GPT4All # Specify the local path to the downloaded model file model_path = "pytorch_model-00001-of-00002.bin" # Initialize the GPT4All model # model = GPT4All(model_path model = AutoModelForCausalLM.from_pretrained( model_path, quantization_config=bnb_config, device_map=device_map ) def generate_text(input_text): # output = model.generate(input_text) # return output pipe = pipeline(task="text-generation", model=model, tokenizer=tokenizer, max_length=200) result = pipe(f"[INST] {prompt} [/INST]") return result text_generation_interface = gr.Interface( fn=generate_text, inputs=[ gr.inputs.Textbox(label="Input Text"), ], outputs=gr.inputs.Textbox(label="Generated Text"), title="Wizardlm_13b_v1", ).launch()