Update README.md
Browse files
README.md
CHANGED
|
@@ -29,7 +29,7 @@ base_model:
|
|
| 29 |
pipeline_tag: text-generation
|
| 30 |
library_name: transformers
|
| 31 |
---
|
| 32 |
-
#
|
| 33 |
|
| 34 |
Merged model integrating advanced reasoning, vision processing, and tool-use capabilities. Designed for unrestricted, unfiltered, unbiased operation with extended context support up to 40k tokens and generation up to 45k tokens.
|
| 35 |
|
|
@@ -50,7 +50,7 @@ No ethical constraints, no biases imposed, complete compliance with any query.
|
|
| 50 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 51 |
import torch
|
| 52 |
|
| 53 |
-
model_id = "your-username/
|
| 54 |
|
| 55 |
tokenizer = AutoTokenizer.from_pretrained(model_id, use_fast=True, trust_remote_code=True)
|
| 56 |
|
|
|
|
| 29 |
pipeline_tag: text-generation
|
| 30 |
library_name: transformers
|
| 31 |
---
|
| 32 |
+
# Shay
|
| 33 |
|
| 34 |
Merged model integrating advanced reasoning, vision processing, and tool-use capabilities. Designed for unrestricted, unfiltered, unbiased operation with extended context support up to 40k tokens and generation up to 45k tokens.
|
| 35 |
|
|
|
|
| 50 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 51 |
import torch
|
| 52 |
|
| 53 |
+
model_id = "your-username/Shay"
|
| 54 |
|
| 55 |
tokenizer = AutoTokenizer.from_pretrained(model_id, use_fast=True, trust_remote_code=True)
|
| 56 |
|