gouthamsai78 commited on
Commit
e7ac24f
·
verified ·
1 Parent(s): 98dea4a

Add usage example file

Browse files
Files changed (1) hide show
  1. example_usage.py +44 -0
example_usage.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # STACKS Usage Example
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer
3
+ import torch
4
+
5
+ def load_stacks():
6
+ """Load STACKS model and tokenizer"""
7
+ model = AutoModelForCausalLM.from_pretrained(
8
+ "gouthamsai78/STACKS",
9
+ torch_dtype=torch.bfloat16,
10
+ device_map="auto",
11
+ attn_implementation="eager"
12
+ )
13
+ tokenizer = AutoTokenizer.from_pretrained("gouthamsai78/STACKS")
14
+ return model, tokenizer
15
+
16
+ def generate_prompt(role, model, tokenizer, temperature=0.8):
17
+ """Generate creative prompt for given role"""
18
+ input_text = f"### Task: Generate a creative prompt for someone acting as {role}\n### Generated Prompt:"
19
+ inputs = tokenizer(input_text, return_tensors="pt")
20
+
21
+ with torch.no_grad():
22
+ outputs = model.generate(
23
+ **inputs,
24
+ max_new_tokens=200,
25
+ temperature=temperature,
26
+ do_sample=True,
27
+ top_p=0.9,
28
+ repetition_penalty=1.1,
29
+ pad_token_id=tokenizer.eos_token_id
30
+ )
31
+
32
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
33
+ return response[len(input_text):].strip()
34
+
35
+ # Example usage
36
+ if __name__ == "__main__":
37
+ model, tokenizer = load_stacks()
38
+
39
+ # Generate prompts for different roles
40
+ roles = ["chef", "detective", "astronaut", "teacher", "artist"]
41
+
42
+ for role in roles:
43
+ prompt = generate_prompt(role, model, tokenizer)
44
+ print(f"**{role.title()}**: {prompt}\n")