Adding `safetensors` variant of this model
#1
by
SFconvertbot
- opened
- .gitattributes +0 -4
- README.md +21 -43
- added_tokens.json +3 -0
- config.json +13 -18
- custom_tokenizer.json +1 -0
- generation_config.json +0 -7
- merges.txt +0 -0
- model.safetensors +0 -3
- model_card.yaml +17 -0
- special_tokens_map.json +13 -24
- tokenizer_config.json +57 -23
- vocab.json +0 -0
- vocab.txt +0 -0
.gitattributes
CHANGED
|
@@ -33,7 +33,3 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
-
Downloaded_Repo-134M-F16.gguf filter=lfs diff=lfs merge=lfs -text
|
| 37 |
-
KateAI.Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text
|
| 38 |
-
KateAI.Q5_K_M.gguf filter=lfs diff=lfs merge=lfs -text
|
| 39 |
-
KateAI.SOURCE.gguf filter=lfs diff=lfs merge=lfs -text
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
README.md
CHANGED
|
@@ -1,43 +1,21 @@
|
|
| 1 |
-
---
|
| 2 |
-
language:
|
| 3 |
-
- en
|
| 4 |
-
pipeline_tag:
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
- `model_type`: KateAIForCasualLM
|
| 24 |
-
|
| 25 |
-
## Usage
|
| 26 |
-
|
| 27 |
-
Please use the api of the space.
|
| 28 |
-
```
|
| 29 |
-
pip install gradio_client
|
| 30 |
-
```
|
| 31 |
-
```
|
| 32 |
-
from gradio_client import Client
|
| 33 |
-
|
| 34 |
-
client = Client("unamedai/Kate")
|
| 35 |
-
result = client.predict(
|
| 36 |
-
message="Once,",
|
| 37 |
-
max_tokens=512,
|
| 38 |
-
temperature=0.8,
|
| 39 |
-
top_p=0.95,
|
| 40 |
-
api_name="/predict"
|
| 41 |
-
)
|
| 42 |
-
print(result)
|
| 43 |
-
```
|
|
|
|
| 1 |
+
---
|
| 2 |
+
language:
|
| 3 |
+
- en
|
| 4 |
+
pipeline_tag: text2text-generation
|
| 5 |
+
---
|
| 6 |
+
# My Custom Model
|
| 7 |
+
|
| 8 |
+
This is a custom model for text generation.
|
| 9 |
+
|
| 10 |
+
## Model Details
|
| 11 |
+
|
| 12 |
+
- `model_type`: Sparkoo
|
| 13 |
+
|
| 14 |
+
## Usage
|
| 15 |
+
|
| 16 |
+
```python
|
| 17 |
+
from transformers import AutoModel, AutoTokenizer
|
| 18 |
+
|
| 19 |
+
tokenizer = AutoTokenizer.from_pretrained("Sparkoo/KateAi")
|
| 20 |
+
model = AutoModel.from_pretrained("Sparkoo/KateAi", from_safetensors=True)
|
| 21 |
+
```
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
added_tokens.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"[PAD]": 50257
|
| 3 |
+
}
|
config.json
CHANGED
|
@@ -1,7 +1,7 @@
|
|
| 1 |
{
|
| 2 |
"activation_function": "gelu_new",
|
| 3 |
"architectures": [
|
| 4 |
-
"
|
| 5 |
],
|
| 6 |
"attn_pdrop": 0.1,
|
| 7 |
"bos_token_id": 50256,
|
|
@@ -9,28 +9,23 @@
|
|
| 9 |
"eos_token_id": 50256,
|
| 10 |
"initializer_range": 0.02,
|
| 11 |
"layer_norm_epsilon": 1e-05,
|
| 12 |
-
"model_type": "
|
| 13 |
-
"n_ctx":
|
| 14 |
"n_embd": 768,
|
| 15 |
-
"
|
| 16 |
-
"
|
| 17 |
-
"
|
| 18 |
-
"hidden_size": 768,
|
| 19 |
-
"pad_token_id": 50256,
|
| 20 |
-
"max_seq_length": 512,
|
| 21 |
-
"intermediate_size": 3072,
|
| 22 |
-
"dropout": 0.1,
|
| 23 |
-
"reorder_and_upcast_attn": false,
|
| 24 |
"resid_pdrop": 0.1,
|
| 25 |
-
"scale_attn_by_inverse_layer_idx": false,
|
| 26 |
-
"scale_attn_weights": true,
|
| 27 |
"summary_activation": null,
|
| 28 |
"summary_first_dropout": 0.1,
|
| 29 |
"summary_proj_to_labels": true,
|
| 30 |
"summary_type": "cls_index",
|
| 31 |
"summary_use_proj": true,
|
| 32 |
-
"
|
| 33 |
-
|
| 34 |
-
|
|
|
|
|
|
|
|
|
|
| 35 |
"vocab_size": 50257
|
| 36 |
-
}
|
|
|
|
| 1 |
{
|
| 2 |
"activation_function": "gelu_new",
|
| 3 |
"architectures": [
|
| 4 |
+
"GPT2LMHeadModel"
|
| 5 |
],
|
| 6 |
"attn_pdrop": 0.1,
|
| 7 |
"bos_token_id": 50256,
|
|
|
|
| 9 |
"eos_token_id": 50256,
|
| 10 |
"initializer_range": 0.02,
|
| 11 |
"layer_norm_epsilon": 1e-05,
|
| 12 |
+
"model_type": "gpt2",
|
| 13 |
+
"n_ctx": 1024,
|
| 14 |
"n_embd": 768,
|
| 15 |
+
"n_head": 12,
|
| 16 |
+
"n_layer": 12,
|
| 17 |
+
"n_positions": 1024,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 18 |
"resid_pdrop": 0.1,
|
|
|
|
|
|
|
| 19 |
"summary_activation": null,
|
| 20 |
"summary_first_dropout": 0.1,
|
| 21 |
"summary_proj_to_labels": true,
|
| 22 |
"summary_type": "cls_index",
|
| 23 |
"summary_use_proj": true,
|
| 24 |
+
"task_specific_params": {
|
| 25 |
+
"text-generation": {
|
| 26 |
+
"do_sample": true,
|
| 27 |
+
"max_length": 50
|
| 28 |
+
}
|
| 29 |
+
},
|
| 30 |
"vocab_size": 50257
|
| 31 |
+
}
|
custom_tokenizer.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"word_to_index": {"[PAD]": 0, "[UNK]": 1, "[SOS]": 2, "[EOS]": 3, "a": 4, "sentence.": 5, "are": 6, "language": 7, "this": 8, "is": 9, "test": 10, "another": 11, "example": 12, "transformers": 13, "powerful": 14, "models.": 15, "let's": 16, "train": 17, "simple": 18, "model.": 19, "models": 20, "amazing": 21, "at": 22, "generating": 23, "text.": 24}, "special_tokens": ["[PAD]", "[UNK]", "[SOS]", "[EOS]"]}
|
generation_config.json
DELETED
|
@@ -1,7 +0,0 @@
|
|
| 1 |
-
{
|
| 2 |
-
"_from_model_config": true,
|
| 3 |
-
"bos_token_id": 50256,
|
| 4 |
-
"eos_token_id": 50256,
|
| 5 |
-
"pad_token_id": 50256,
|
| 6 |
-
"transformers_version": "4.47.1"
|
| 7 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
merges.txt
CHANGED
|
The diff for this file is too large to render.
See raw diff
|
|
|
model.safetensors
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:5f33c78bcb29e09bd4f398e95ae06c46770a137acfef824ecf28e017e7866aa5
|
| 3 |
-
size 537377892
|
|
|
|
|
|
|
|
|
|
|
|
model_card.yaml
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
---
|
| 3 |
+
model-index:
|
| 4 |
+
- name: sparkoo
|
| 5 |
+
results: []
|
| 6 |
+
model-type: sparkoo
|
| 7 |
+
paperswithcode-id: null
|
| 8 |
+
cn-model: null
|
| 9 |
+
hf-hub-id: Sparkoo/sparkooKateAI
|
| 10 |
+
release-notes: null
|
| 11 |
+
ethical-source: null
|
| 12 |
+
language: en
|
| 13 |
+
libs-version: 4.41.2
|
| 14 |
+
tags:
|
| 15 |
+
- sparkoo
|
| 16 |
+
- seq2seq
|
| 17 |
+
---
|
special_tokens_map.json
CHANGED
|
@@ -1,24 +1,13 @@
|
|
| 1 |
-
{
|
| 2 |
-
"
|
| 3 |
-
|
| 4 |
-
|
| 5 |
-
"
|
| 6 |
-
"
|
| 7 |
-
"
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
"single_word": false
|
| 15 |
-
},
|
| 16 |
-
"pad_token": "<|endoftext|>",
|
| 17 |
-
"unk_token": {
|
| 18 |
-
"content": "<|endoftext|>",
|
| 19 |
-
"lstrip": false,
|
| 20 |
-
"normalized": true,
|
| 21 |
-
"rstrip": false,
|
| 22 |
-
"single_word": false
|
| 23 |
-
}
|
| 24 |
-
}
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cls_token": "[CLS]",
|
| 3 |
+
"mask_token": "[MASK]",
|
| 4 |
+
"pad_token": {
|
| 5 |
+
"content": "[PAD]",
|
| 6 |
+
"lstrip": false,
|
| 7 |
+
"normalized": false,
|
| 8 |
+
"rstrip": false,
|
| 9 |
+
"single_word": false
|
| 10 |
+
},
|
| 11 |
+
"sep_token": "[SEP]",
|
| 12 |
+
"unk_token": "[UNK]"
|
| 13 |
+
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
tokenizer_config.json
CHANGED
|
@@ -1,23 +1,57 @@
|
|
| 1 |
-
{
|
| 2 |
-
"
|
| 3 |
-
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
"
|
| 7 |
-
"
|
| 8 |
-
"
|
| 9 |
-
"
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"added_tokens_decoder": {
|
| 3 |
+
"0": {
|
| 4 |
+
"content": "[PAD]",
|
| 5 |
+
"lstrip": false,
|
| 6 |
+
"normalized": false,
|
| 7 |
+
"rstrip": false,
|
| 8 |
+
"single_word": false,
|
| 9 |
+
"special": true
|
| 10 |
+
},
|
| 11 |
+
"100": {
|
| 12 |
+
"content": "[UNK]",
|
| 13 |
+
"lstrip": false,
|
| 14 |
+
"normalized": false,
|
| 15 |
+
"rstrip": false,
|
| 16 |
+
"single_word": false,
|
| 17 |
+
"special": true
|
| 18 |
+
},
|
| 19 |
+
"101": {
|
| 20 |
+
"content": "[CLS]",
|
| 21 |
+
"lstrip": false,
|
| 22 |
+
"normalized": false,
|
| 23 |
+
"rstrip": false,
|
| 24 |
+
"single_word": false,
|
| 25 |
+
"special": true
|
| 26 |
+
},
|
| 27 |
+
"102": {
|
| 28 |
+
"content": "[SEP]",
|
| 29 |
+
"lstrip": false,
|
| 30 |
+
"normalized": false,
|
| 31 |
+
"rstrip": false,
|
| 32 |
+
"single_word": false,
|
| 33 |
+
"special": true
|
| 34 |
+
},
|
| 35 |
+
"103": {
|
| 36 |
+
"content": "[MASK]",
|
| 37 |
+
"lstrip": false,
|
| 38 |
+
"normalized": false,
|
| 39 |
+
"rstrip": false,
|
| 40 |
+
"single_word": false,
|
| 41 |
+
"special": true
|
| 42 |
+
}
|
| 43 |
+
},
|
| 44 |
+
"clean_up_tokenization_spaces": true,
|
| 45 |
+
"cls_token": "[CLS]",
|
| 46 |
+
"do_basic_tokenize": true,
|
| 47 |
+
"do_lower_case": true,
|
| 48 |
+
"mask_token": "[MASK]",
|
| 49 |
+
"model_max_length": 512,
|
| 50 |
+
"never_split": null,
|
| 51 |
+
"pad_token": "[PAD]",
|
| 52 |
+
"sep_token": "[SEP]",
|
| 53 |
+
"strip_accents": null,
|
| 54 |
+
"tokenize_chinese_chars": true,
|
| 55 |
+
"tokenizer_class": "BertTokenizer",
|
| 56 |
+
"unk_token": "[UNK]"
|
| 57 |
+
}
|
vocab.json
CHANGED
|
The diff for this file is too large to render.
See raw diff
|
|
|
vocab.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|