falcon-test-app / app.py
arshad615's picture
Update app.py
2a34a73
from transformers import AutoTokenizer, AutoModelForCausalLM
import transformers
import torch
import gradio as gr
model = "tiiuae/falcon-7b-instruct"
tokenizer = AutoTokenizer.from_pretrained(model)
pipeline = transformers.pipeline(
"text-generation",
model=model,
tokenizer=tokenizer,
torch_dtype=torch.bfloat16,
trust_remote_code=True,
device_map="auto",
)
sequences = pipeline(
"Girafatron is obsessed with giraffes, the most glorious animal on the face of this Earth. Giraftron believes all other animals are irrelevant when compared to the glorious majesty of the giraffe.\nDaniel: Hello, Girafatron!\nGirafatron:",
max_length=200,
do_sample=True,
top_k=10,
num_return_sequences=1,
eos_token_id=tokenizer.eos_token_id,
)
for seq in sequences:
print(f"Result: {seq['generated_text']}")
# def generate_text(input_text):
# sequences = pipeline(
# "Girafatron is obsessed with giraffes, the most glorious animal on the face of this Earth. Giraftron believes all other animals are irrelevant when compared to the glorious majesty of the giraffe.\nDaniel: Hello, Girafatron!\nGirafatron:",
# max_length=200,
# do_sample=True,
# top_k=10,
# num_return_sequences=1,
# eos_token_id=tokenizer.eos_token_id,
# )
# return sequences;
# gr.Interface(
# generate_text,
# inputs="text",
# outputs="text",
# title="Text Generation App",
# description="Generate text based on input.",
# ).launch()
# def generate_text(input_text):
# input_ids = tokenizer.encode(input_text, return_tensors="pt")
# output = model.generate(input_ids, max_length=50, num_return_sequences=1)
# generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
# return generated_text