Spaces:
Runtime error
Runtime error
Commit
·
c12b438
1
Parent(s):
8272482
Update app.py
Browse files
app.py
CHANGED
|
@@ -18,23 +18,23 @@ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
|
| 18 |
# pipe = pipeline(task="text-generation", model=model, tokenizer=tokenizer, max_length=200)
|
| 19 |
# result = pipe(f"<s>[INST] {input_text} [/INST]")
|
| 20 |
# return result[0]['generated_text']
|
| 21 |
-
|
| 22 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 23 |
-
from fastapi import FastAPI
|
| 24 |
|
| 25 |
-
|
|
|
|
|
|
|
| 26 |
|
| 27 |
-
|
|
|
|
|
|
|
| 28 |
|
| 29 |
-
|
| 30 |
-
|
|
|
|
|
|
|
|
|
|
| 31 |
|
| 32 |
-
|
| 33 |
-
async def generate_text(prompt: str):
|
| 34 |
-
input_ids = tokenizer(prompt, return_tensors="pt").input_ids
|
| 35 |
-
output = model.generate(input_ids, max_length=50, num_return_sequences=1)
|
| 36 |
-
generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
|
| 37 |
-
return {"generated_text": generated_text}
|
| 38 |
|
| 39 |
|
| 40 |
text_generation_interface = gr.Interface(
|
|
|
|
| 18 |
# pipe = pipeline(task="text-generation", model=model, tokenizer=tokenizer, max_length=200)
|
| 19 |
# result = pipe(f"<s>[INST] {input_text} [/INST]")
|
| 20 |
# return result[0]['generated_text']
|
|
|
|
| 21 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
|
|
| 22 |
|
| 23 |
+
# Specify the path to your fine-tuned model and tokenizer
|
| 24 |
+
model_path = "./" # Assuming the model is in the same directory as your notebook
|
| 25 |
+
model_name = "pytorch_model-00001-of-00002.bin" # Replace with your model name
|
| 26 |
|
| 27 |
+
# Load the model and tokenizer
|
| 28 |
+
model = AutoModelForCausalLM.from_pretrained(model_path)
|
| 29 |
+
tokenizer = AutoTokenizer.from_pretrained(model_path)
|
| 30 |
|
| 31 |
+
# Example usage
|
| 32 |
+
input_text = "Once upon a time"
|
| 33 |
+
input_ids = tokenizer(input_text, return_tensors="pt").input_ids
|
| 34 |
+
output = model.generate(input_ids, max_length=50, num_return_sequences=1)
|
| 35 |
+
generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
|
| 36 |
|
| 37 |
+
print(generated_text)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 38 |
|
| 39 |
|
| 40 |
text_generation_interface = gr.Interface(
|