SSahas commited on
Commit
b4977fa
·
verified ·
1 Parent(s): 5077c81

Delete load_model.py

Browse files
Files changed (1) hide show
  1. load_model.py +0 -44
load_model.py DELETED
@@ -1,44 +0,0 @@
1
- from transformers import AutoTokenizer, AutoModelForCausalLM, GenerationConfig
2
-
3
- model_link = "SSahas/openai_community_med_e3"
4
- tokenizer = AutoTokenizer.from_pretrained(model_link)
5
- finetuned_model = AutoModelForCausalLM.from_pretrained(model_link)
6
- original_model = AutoModelForCausalLM.from_pretrained("openai-community/gpt2-medium")
7
-
8
- prompt = [{'role': 'user', 'content': 'Hey man , you wanna buy some weed ?'},
9
- {'role': 'assistant', 'content': 'Some what ?'},
10
- {'role': 'user',
11
- 'content': 'Weed ! You know ? Pot , Ganja , Mary Jane some chronic !'},
12
- {'role': 'assistant', 'content': 'Oh , umm , no thanks .'},
13
- {'role': 'user',
14
- 'content': 'I also have blow if you prefer to do a few lines .'},
15
- {'role': 'assistant', 'content': 'No , I am ok , really .'},
16
- {'role': 'user',
17
- 'content': 'Come on man ! I even got dope and acid ! Try some !'},
18
- {'role': 'assistant',
19
- 'content': 'Do you really have all of these drugs ? Where do you get them from ?'},
20
- {'role': 'user',
21
- 'content': 'I got my connections ! Just tell me what you want and I ’ ll even give you one ounce for free .'},
22
- {'role': 'assistant', 'content': 'Sounds good ! Let''s see , I want .'},
23
- {'role': 'user', 'content': 'Yeah ?'}]
24
-
25
-
26
- prompt = [{'role': 'user', 'content': 'Hello, My name is Sahas., How are you?'},]
27
-
28
-
29
- input_text = tokenizer.apply_chat_template(prompt,tokenize = False, truncation=False, add_generation_prompt=True)
30
- #print(input_text)
31
- input_ids = tokenizer(input_text,padding = True, return_tensors = "pt")
32
- #print(input_ids)
33
- #output = model.generate(input_ids=input_ids['input_ids'], generation_config=GenerationConfig(max_new_tokens=25,temperature = 0.1, eos_token_id = 50256, repetition_penalty = 1.9, do_sample= True))
34
- finetuned_model_output = finetuned_model.generate(input_ids=input_ids['input_ids'], generation_config=GenerationConfig(max_new_tokens=20,pad_token_id = 50256, temperature = 0.5, do_sample= True))
35
- #print(output)
36
- original_model_output = original_model.generate(input_ids=input_ids['input_ids'], generation_config=GenerationConfig(max_new_tokens=20, temperature = 0.5, do_sample= True))
37
- finetuned_model_output = tokenizer.decode(finetuned_model_output[0][input_ids['input_ids'].shape[1]:], skip_special_tokens=True)
38
- original_model_output = tokenizer.decode(original_model_output[0][input_ids['input_ids'].shape[1]:], skip_special_tokens=True)
39
-
40
- print("finetuned_model outptut\\n")
41
- print(finetuned_model_output)
42
- print("original_model outptut\\n")
43
- print(original_model_output)
44
-