Update README.md
Browse files
README.md
CHANGED
|
@@ -13,4 +13,39 @@ tags:
|
|
| 13 |
- 3d
|
| 14 |
- prompt
|
| 15 |
- español
|
| 16 |
-
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 13 |
- 3d
|
| 14 |
- prompt
|
| 15 |
- español
|
| 16 |
+
---
|
| 17 |
+
|
| 18 |
+
**The model is still in the training phase. This is not the final version and may contain artifacts and perform poorly in some cases.**
|
| 19 |
+
|
| 20 |
+
## Setting Up
|
| 21 |
+
```python
|
| 22 |
+
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
| 23 |
+
from peft import PeftModel, PeftConfig
|
| 24 |
+
|
| 25 |
+
# Define the repository ID
|
| 26 |
+
repo_id = "Miguelpef/bart-base-lora-3DPrompt"
|
| 27 |
+
|
| 28 |
+
# Load the PEFT configuration from the Hub
|
| 29 |
+
peft_config = PeftConfig.from_pretrained(repo_id)
|
| 30 |
+
|
| 31 |
+
# Load the base model from the Hub
|
| 32 |
+
model = AutoModelForSeq2SeqLM.from_pretrained(peft_config.base_model_name_or_path)
|
| 33 |
+
|
| 34 |
+
# Load the tokenizer from the Hub
|
| 35 |
+
tokenizer = AutoTokenizer.from_pretrained(repo_id)
|
| 36 |
+
|
| 37 |
+
# Wrap the base model with PEFT
|
| 38 |
+
model = PeftModel.from_pretrained(model, repo_id)
|
| 39 |
+
|
| 40 |
+
# Now you can use the model for inference as before
|
| 41 |
+
def generar_prompt_desde_objeto(objeto):
|
| 42 |
+
prompt = objeto
|
| 43 |
+
inputs = tokenizer(prompt, return_tensors='pt').to(model.device)
|
| 44 |
+
outputs = model.generate(**inputs, max_length=100)
|
| 45 |
+
prompt_generado = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 46 |
+
return prompt_generado
|
| 47 |
+
|
| 48 |
+
mi_objeto = "Mesa grande marrón" #Change this object
|
| 49 |
+
prompt_generado = generar_prompt_desde_objeto(mi_objeto)
|
| 50 |
+
print({prompt_generado})
|
| 51 |
+
```
|