wizardlm_api / app.py
DR-Rakshitha's picture
Update app.py
fd23bfc
raw
history blame
870 Bytes
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
from gpt4all import GPT4All
import os
# Specify the local path to the downloaded model file
model_path = "https://huggingface.co/spaces/DR-Rakshitha/wizardlm_api/blob/main/wizardlm-13b-v1.1-superhot-8k.ggmlv3.q4_0.bin"
# Check if the model file exists locally
if not os.path.exists(model_path):
raise FileNotFoundError(f"Model file not found at {model_path}. Please download it manually.")
# Initialize the GPT4All model
model = GPT4All(model_path)
def generate_text(input_text):
output = model.generate(input_text)
return output
text_generation_interface = gr.Interface(
fn=generate_text,
inputs=[
gr.inputs.Textbox(label="Input Text"),
],
outputs=gr.inputs.Textbox(label="Generated Text"),
title="Falcon-7B Instruct",
).launch()