claudiubarbu commited on
Commit
599bf18
·
verified ·
1 Parent(s): 42af5b7

Push model using huggingface_hub.

Browse files
Files changed (3) hide show
  1. README.md +4 -3
  2. model.safetensors +1 -1
  3. tokenizer_config.json +4 -0
README.md CHANGED
@@ -1,5 +1,6 @@
1
  ---
2
  license: apache-2.0
 
3
  tags:
4
  - trl
5
  - ppo
@@ -25,7 +26,7 @@ You can then generate text as follows:
25
  ```python
26
  from transformers import pipeline
27
 
28
- generator = pipeline("text-generation", model="claudiubarbu//tmp/tmp57jg9o2i/claudiubarbu/HW2-ppo")
29
  outputs = generator("Hello, my llama is cute")
30
  ```
31
 
@@ -35,8 +36,8 @@ If you want to use the model for training or to obtain the outputs from the valu
35
  from transformers import AutoTokenizer
36
  from trl import AutoModelForCausalLMWithValueHead
37
 
38
- tokenizer = AutoTokenizer.from_pretrained("claudiubarbu//tmp/tmp57jg9o2i/claudiubarbu/HW2-ppo")
39
- model = AutoModelForCausalLMWithValueHead.from_pretrained("claudiubarbu//tmp/tmp57jg9o2i/claudiubarbu/HW2-ppo")
40
 
41
  inputs = tokenizer("Hello, my llama is cute", return_tensors="pt")
42
  outputs = model(**inputs, labels=inputs["input_ids"])
 
1
  ---
2
  license: apache-2.0
3
+ library_name: transformers
4
  tags:
5
  - trl
6
  - ppo
 
26
  ```python
27
  from transformers import pipeline
28
 
29
+ generator = pipeline("text-generation", model="claudiubarbu//tmp/tmpdmfjw943/claudiubarbu/HW2-ppo")
30
  outputs = generator("Hello, my llama is cute")
31
  ```
32
 
 
36
  from transformers import AutoTokenizer
37
  from trl import AutoModelForCausalLMWithValueHead
38
 
39
+ tokenizer = AutoTokenizer.from_pretrained("claudiubarbu//tmp/tmpdmfjw943/claudiubarbu/HW2-ppo")
40
+ model = AutoModelForCausalLMWithValueHead.from_pretrained("claudiubarbu//tmp/tmpdmfjw943/claudiubarbu/HW2-ppo")
41
 
42
  inputs = tokenizer("Hello, my llama is cute", return_tensors="pt")
43
  outputs = model(**inputs, labels=inputs["input_ids"])
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:20385ac034306cbc59dbec1fc73d6952bb0c09f3846779e763d93b8b7a4a7693
3
  size 497777468
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7c1d0de370ae0f121a8ff56e74e1eda0e35af6a6397e1075596486ca8533bc61
3
  size 497777468
tokenizer_config.json CHANGED
@@ -13,8 +13,12 @@
13
  "bos_token": "<|endoftext|>",
14
  "clean_up_tokenization_spaces": true,
15
  "eos_token": "<|endoftext|>",
 
16
  "model_max_length": 1024,
17
  "pad_token": "<|endoftext|>",
 
18
  "tokenizer_class": "GPT2Tokenizer",
 
 
19
  "unk_token": "<|endoftext|>"
20
  }
 
13
  "bos_token": "<|endoftext|>",
14
  "clean_up_tokenization_spaces": true,
15
  "eos_token": "<|endoftext|>",
16
+ "max_length": 512,
17
  "model_max_length": 1024,
18
  "pad_token": "<|endoftext|>",
19
+ "stride": 0,
20
  "tokenizer_class": "GPT2Tokenizer",
21
+ "truncation_side": "right",
22
+ "truncation_strategy": "longest_first",
23
  "unk_token": "<|endoftext|>"
24
  }