Girinath11 commited on
Commit
0efa1fa
·
verified ·
1 Parent(s): b8b9c16

Update model_usage.py

Browse files
Files changed (1) hide show
  1. model_usage.py +26 -14
model_usage.py CHANGED
@@ -1,18 +1,30 @@
1
- from transformers import AutoModelForCausalLM, GPT2Tokenizer
 
2
  model = AutoModelForCausalLM.from_pretrained(
3
  "Girinath11/recursive-language-model-48m",
4
  trust_remote_code=True
5
  )
6
- tokenizer = GPT2Tokenizer.from_pretrained(
7
- "Girinath11/recursive-language-model-48m"
8
- )
9
- print("Model loaded successfully!")
10
- prompt = "The future of artificial intelligence"
11
- input_ids = tokenizer.encode(prompt, return_tensors="pt")
12
- outputs = model.generate(
13
- input_ids,
14
- max_new_tokens=50,
15
- temperature=0.8,
16
- do_sample=True
17
- )
18
- print(tokenizer.decode(outputs[0], skip_special_tokens=True))
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoModelForCausalLM, AutoTokenizer
2
+ import torch
3
  model = AutoModelForCausalLM.from_pretrained(
4
  "Girinath11/recursive-language-model-48m",
5
  trust_remote_code=True
6
  )
7
+ tokenizer = AutoTokenizer.from_pretrained("gpt2")
8
+ tokenizer.pad_token = tokenizer.eos_token
9
+ device = "cuda" if torch.cuda.is_available() else "cpu"
10
+ model = model.to(device)
11
+ model.eval()
12
+ print(f"Model loaded on {device}\n")
13
+ prompts = [
14
+ "The future of artificial intelligence",
15
+ "Once upon a time",
16
+ "The key to success is"
17
+ ]
18
+ for prompt in prompts:
19
+ print(f"Prompt: {prompt}")
20
+ inputs = tokenizer.encode(prompt, return_tensors="pt").to(device)
21
+ with torch.no_grad():
22
+ outputs = model.generate(
23
+ inputs,
24
+ max_new_tokens=50,
25
+ temperature=0.8,
26
+ top_p=0.9,
27
+ do_sample=True
28
+ )
29
+ text = tokenizer.decode(outputs[0], skip_special_tokens=True)
30
+ print(f"{text}\n")