File size: 2,738 Bytes
0c8c4f5
 
8272482
0c8c4f5
 
319b4d3
0c8c4f5
52102b1
06e5052
0d5d23d
 
0c8c4f5
0d5d23d
d048f50
0d5d23d
 
 
 
 
 
 
 
52102b1
0c8c4f5
06e5052
 
 
 
 
bc29f4e
 
 
 
06e5052
0c8c4f5
 
bc29f4e
0c8c4f5
 
 
 
 
 
 
 
 
 
52102b1
 
 
 
 
 
 
0c8c4f5
319b4d3
0010001
8a97208
0010001
 
 
eb443cd
 
4789c94
c928ad3
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
# import gradio as gr
# from transformers import AutoModelForCausalLM, AutoTokenizer

# from gpt4all import GPT4All
# model = GPT4All("wizardlm-13b-v1.1-superhot-8k.ggmlv3.q4_0.bin")

#----------------------------------------------------------------------------------------------------------------------------
from transformers import AutoModelForCausalLM, AutoTokenizer

# Path to the model directory (assuming it's in the same directory as your script)
model_directory = "./"

# Load the model and tokenizer
model = AutoModelForCausalLM.from_pretrained(model_directory, from_tf=True)
tokenizer = AutoTokenizer.from_pretrained(model_directory, trust_remote_code=True)

# Now you can generate text as before
# prompt = "What is a large language model?"
# input_ids = tokenizer.encode(prompt, return_tensors="pt")

# output = model.generate(input_ids, max_length=200, num_return_sequences=1)
# generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
# print(generated_text)

# ---------------------------------------------------------------------------------------------------------------------------------------------------------------------
# Ignore warnings
logging.set_verbosity(logging.CRITICAL)

# Run text generation pipeline with our next model
# prompt = "What is a large language model?"
# pipe = pipeline(task="text-generation", model=model, tokenizer=tokenizer, max_length=200)
# result = pipe(f"<s>[INST] {prompt} [/INST]")
# print(result[0]['generated_text'])

#---------------------------------------------------------------------------------------------------------------------------------------------------------------------
# Ignore warnings
# logging.set_verbosity(logging.CRITICAL)

# Run text generation pipeline with our next model
# prompt = "What is a large language model?"
# pipe = pipeline(task="text-generation", model=model, tokenizer=tokenizer, max_length=200)
# result = pipe(f"<s>[INST] {prompt} [/INST]")
# print(result[0]['generated_text'])


def generate_text(prompt):
    # output = model.generate(input_text)
    # pipe = pipeline(task="text-generation", model=model, tokenizer=tokenizer, max_length=200)
    # result = pipe(f"<s>[INST] {prompt} [/INST]")
    # prompt = "What is a large language model?"
    input_ids = tokenizer.encode(prompt, return_tensors="pt")

    output = model.generate(input_ids, max_length=200, num_return_sequences=1)
    result = tokenizer.decode(output[0], skip_special_tokens=True)
    return result

text_generation_interface = gr.Interface(
    fn=generate_text,
    inputs=[
        gr.inputs.Textbox(label="Input Text"),
    ],
    outputs=gr.outputs.Textbox(label="Generated Text"),
    title="GPT-4 Text Generation",
).launch()



# model_name = ""