File size: 1,839 Bytes
78749bb
 
 
 
9292d8a
78749bb
9292d8a
78749bb
 
 
 
 
 
 
 
 
2a34a73
78749bb
 
 
 
 
 
2a34a73
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9292d8a
78749bb
2a34a73
 
 
 
 
 
 
9292d8a
78749bb
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
from transformers import AutoTokenizer, AutoModelForCausalLM
import transformers
import torch
import gradio as gr

model = "tiiuae/falcon-7b-instruct"

tokenizer = AutoTokenizer.from_pretrained(model)
pipeline = transformers.pipeline(
    "text-generation",
    model=model,
    tokenizer=tokenizer,
    torch_dtype=torch.bfloat16,
    trust_remote_code=True,
    device_map="auto",
)
sequences = pipeline(
       "Girafatron is obsessed with giraffes, the most glorious animal on the face of this Earth. Giraftron believes all other animals are irrelevant when compared to the glorious majesty of the giraffe.\nDaniel: Hello, Girafatron!\nGirafatron:",
        max_length=200,
        do_sample=True,
        top_k=10,
        num_return_sequences=1,
        eos_token_id=tokenizer.eos_token_id,
)

for seq in sequences:
    print(f"Result: {seq['generated_text']}")

# def generate_text(input_text):
#     sequences = pipeline(
#        "Girafatron is obsessed with giraffes, the most glorious animal on the face of this Earth. Giraftron believes all other animals are irrelevant when compared to the glorious majesty of the giraffe.\nDaniel: Hello, Girafatron!\nGirafatron:",
#         max_length=200,
#         do_sample=True,
#         top_k=10,
#         num_return_sequences=1,
#         eos_token_id=tokenizer.eos_token_id,
#     )
#     return sequences;
    
    
# gr.Interface(
#     generate_text,
#     inputs="text",
#     outputs="text",
#     title="Text Generation App",
#     description="Generate text based on input.",
# ).launch()

# def generate_text(input_text):
#     input_ids = tokenizer.encode(input_text, return_tensors="pt")
#     output = model.generate(input_ids, max_length=50, num_return_sequences=1)
#     generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
#     return generated_text