Update README.md
Browse files
README.md
CHANGED
|
@@ -63,24 +63,38 @@ The model is fine-tuned on specific data (cryptocurrency news and price data) an
|
|
| 63 |
To start using the model for predictions, you can follow the example code below:
|
| 64 |
|
| 65 |
```python
|
| 66 |
-
from
|
| 67 |
-
import
|
| 68 |
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
model = AutoModelForCausalLM.from_pretrained(model_name)
|
| 72 |
-
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 73 |
|
| 74 |
-
# Example input: news articles and price data
|
| 75 |
-
input_text = "[INST]Bitcoin price surges as ETF approval rumors circulate...[/INST]"
|
| 76 |
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
| 80 |
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 84 |
```
|
| 85 |
|
| 86 |
## Training Details
|
|
|
|
| 63 |
To start using the model for predictions, you can follow the example code below:
|
| 64 |
|
| 65 |
```python
|
| 66 |
+
from peft import AutoPeftModelForCausalLM
|
| 67 |
+
from transformers import AutoTokenizer
|
| 68 |
|
| 69 |
+
from huggingface_hub import login
|
| 70 |
+
login("YOUR TOKEN HERE")
|
|
|
|
|
|
|
| 71 |
|
|
|
|
|
|
|
| 72 |
|
| 73 |
+
PROMPT = "[INST]YOUR PROMPT HERE[/INST]"
|
| 74 |
+
MAX_LENGTH = 32768 # Do not change
|
| 75 |
+
DEVICE = "cpu"
|
| 76 |
|
| 77 |
+
|
| 78 |
+
model_id = "agarkovv/CryptoTrader-LM"
|
| 79 |
+
base_model_id = "mistralai/Ministral-8B-Instruct-2410"
|
| 80 |
+
|
| 81 |
+
model = AutoPeftModelForCausalLM.from_pretrained(model_id)
|
| 82 |
+
tokenizer = AutoTokenizer.from_pretrained(base_model_id)
|
| 83 |
+
|
| 84 |
+
model = model.to(DEVICE)
|
| 85 |
+
model.eval()
|
| 86 |
+
inputs = tokenizer(
|
| 87 |
+
PROMPT, return_tensors="pt", padding=False, max_length=MAX_LENGTH, truncation=True
|
| 88 |
+
)
|
| 89 |
+
inputs = {key: value.to(model.device) for key, value in inputs.items()}
|
| 90 |
+
|
| 91 |
+
res = model.generate(
|
| 92 |
+
**inputs,
|
| 93 |
+
use_cache=True,
|
| 94 |
+
max_new_tokens=MAX_LENGTH,
|
| 95 |
+
)
|
| 96 |
+
output = tokenizer.decode(res[0], skip_special_tokens=True)
|
| 97 |
+
print(output)
|
| 98 |
```
|
| 99 |
|
| 100 |
## Training Details
|