braindeck
commited on
Commit
·
2b3dbdf
1
Parent(s):
f5b2793
Use prompt structure from prompts directory
Browse files
app.py
CHANGED
|
@@ -2,6 +2,7 @@ import gradio as gr
|
|
| 2 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 3 |
import torch
|
| 4 |
import spaces
|
|
|
|
| 5 |
|
| 6 |
# Load the model and tokenizer
|
| 7 |
tokenizer = AutoTokenizer.from_pretrained("braindeck/text2text", trust_remote_code=True, subfolder="checkpoints/model")
|
|
@@ -12,7 +13,7 @@ def generate_response(prompt):
|
|
| 12 |
"""
|
| 13 |
Generates a response from the model.
|
| 14 |
"""
|
| 15 |
-
chat =
|
| 16 |
inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)
|
| 17 |
input_length = inputs.shape[1]
|
| 18 |
outputs = model.generate(inputs, max_new_tokens=512, do_sample=False)
|
|
@@ -49,4 +50,4 @@ with gr.Blocks() as demo:
|
|
| 49 |
)
|
| 50 |
|
| 51 |
if __name__ == "__main__":
|
| 52 |
-
demo.launch()
|
|
|
|
| 2 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 3 |
import torch
|
| 4 |
import spaces
|
| 5 |
+
from prompts.base_instruction import basic_instruction
|
| 6 |
|
| 7 |
# Load the model and tokenizer
|
| 8 |
tokenizer = AutoTokenizer.from_pretrained("braindeck/text2text", trust_remote_code=True, subfolder="checkpoints/model")
|
|
|
|
| 13 |
"""
|
| 14 |
Generates a response from the model.
|
| 15 |
"""
|
| 16 |
+
chat = basic_instruction(prompt, "braindeck/text2text")
|
| 17 |
inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)
|
| 18 |
input_length = inputs.shape[1]
|
| 19 |
outputs = model.generate(inputs, max_new_tokens=512, do_sample=False)
|
|
|
|
| 50 |
)
|
| 51 |
|
| 52 |
if __name__ == "__main__":
|
| 53 |
+
demo.launch()
|