Add usage example file
Browse files- example_usage.py +4 -4
example_usage.py
CHANGED
|
@@ -17,7 +17,7 @@ def generate_prompt(role, model, tokenizer, temperature=0.8):
|
|
| 17 |
"""Generate creative prompt for given role"""
|
| 18 |
input_text = f"### Task: Generate a creative prompt for someone acting as {role}\n### Generated Prompt:"
|
| 19 |
inputs = tokenizer(input_text, return_tensors="pt")
|
| 20 |
-
|
| 21 |
with torch.no_grad():
|
| 22 |
outputs = model.generate(
|
| 23 |
**inputs,
|
|
@@ -28,17 +28,17 @@ def generate_prompt(role, model, tokenizer, temperature=0.8):
|
|
| 28 |
repetition_penalty=1.1,
|
| 29 |
pad_token_id=tokenizer.eos_token_id
|
| 30 |
)
|
| 31 |
-
|
| 32 |
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 33 |
return response[len(input_text):].strip()
|
| 34 |
|
| 35 |
# Example usage
|
| 36 |
if __name__ == "__main__":
|
| 37 |
model, tokenizer = load_stacks()
|
| 38 |
-
|
| 39 |
# Generate prompts for different roles
|
| 40 |
roles = ["chef", "detective", "astronaut", "teacher", "artist"]
|
| 41 |
-
|
| 42 |
for role in roles:
|
| 43 |
prompt = generate_prompt(role, model, tokenizer)
|
| 44 |
print(f"**{role.title()}**: {prompt}\n")
|
|
|
|
| 17 |
"""Generate creative prompt for given role"""
|
| 18 |
input_text = f"### Task: Generate a creative prompt for someone acting as {role}\n### Generated Prompt:"
|
| 19 |
inputs = tokenizer(input_text, return_tensors="pt")
|
| 20 |
+
|
| 21 |
with torch.no_grad():
|
| 22 |
outputs = model.generate(
|
| 23 |
**inputs,
|
|
|
|
| 28 |
repetition_penalty=1.1,
|
| 29 |
pad_token_id=tokenizer.eos_token_id
|
| 30 |
)
|
| 31 |
+
|
| 32 |
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 33 |
return response[len(input_text):].strip()
|
| 34 |
|
| 35 |
# Example usage
|
| 36 |
if __name__ == "__main__":
|
| 37 |
model, tokenizer = load_stacks()
|
| 38 |
+
|
| 39 |
# Generate prompts for different roles
|
| 40 |
roles = ["chef", "detective", "astronaut", "teacher", "artist"]
|
| 41 |
+
|
| 42 |
for role in roles:
|
| 43 |
prompt = generate_prompt(role, model, tokenizer)
|
| 44 |
print(f"**{role.title()}**: {prompt}\n")
|