File size: 870 Bytes
8a97208
0010001
 
 
8384864
0010001
8384864
fd23bfc
0010001
8384864
 
 
0010001
8384864
 
0010001
 
8384864
0010001
8a97208
0010001
8a97208
0010001
 
 
 
 
8384864
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
from gpt4all import GPT4All
import os

# Specify the local path to the downloaded model file
model_path = "https://huggingface.co/spaces/DR-Rakshitha/wizardlm_api/blob/main/wizardlm-13b-v1.1-superhot-8k.ggmlv3.q4_0.bin"

# Check if the model file exists locally
if not os.path.exists(model_path):
    raise FileNotFoundError(f"Model file not found at {model_path}. Please download it manually.")

# Initialize the GPT4All model
model = GPT4All(model_path)

def generate_text(input_text):
    output = model.generate(input_text)
    return output

text_generation_interface = gr.Interface(
    fn=generate_text,
    inputs=[
        gr.inputs.Textbox(label="Input Text"),
    ],
    outputs=gr.inputs.Textbox(label="Generated Text"),
    title="Falcon-7B Instruct",
).launch()