clemsadand commited on
Commit
b37710e
·
verified ·
1 Parent(s): af20038

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +10 -6
README.md CHANGED
@@ -35,7 +35,7 @@ The Quote Generator is built on top of the GPT-2 model, fine-tuned using the Low
35
 
36
  - **Repository:** [Quote Generator](https://huggingface.co/clemsadand/quote_generator/)
37
  <!-- - **Paper [optional]:** N/A -->
38
- <!-- - **Demo [optional]:** N/A -->
39
 
40
  ## Uses
41
 
@@ -77,7 +77,7 @@ Use the code below to get started with the model.
77
 
78
  ```python
79
  from peft import PeftModel, PeftConfig
80
- from transformers import AutoModelForCausalLM
81
 
82
  config = PeftConfig.from_pretrained("clemsadand/quote_generator")
83
  base_model = AutoModelForCausalLM.from_pretrained("gpt2")
@@ -85,10 +85,14 @@ model = PeftModel.from_pretrained(base_model, "clemsadand/quote_generator")
85
 
86
  tokenizer = AutoTokenizer.from_pretrained("gpt2")
87
 
88
- input_text = "Generate a quote about kindness with the keywords compassion, empathy, help, generosity, care"
89
- input_ids = tokenizer.encode(input_text, return_tensors="pt")
 
 
 
 
 
90
 
91
- output = model.generate(input_ids)
92
- generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
93
 
94
  print(generated_text)
 
35
 
36
  - **Repository:** [Quote Generator](https://huggingface.co/clemsadand/quote_generator/)
37
  <!-- - **Paper [optional]:** N/A -->
38
+ - **Demo [optional]:** N/A
39
 
40
  ## Uses
41
 
 
77
 
78
  ```python
79
  from peft import PeftModel, PeftConfig
80
+ from transformers import AutoModelForCausalLM, AutoTokenizer
81
 
82
  config = PeftConfig.from_pretrained("clemsadand/quote_generator")
83
  base_model = AutoModelForCausalLM.from_pretrained("gpt2")
 
85
 
86
  tokenizer = AutoTokenizer.from_pretrained("gpt2")
87
 
88
+ def generate_quote(input_text):
89
+ input_tensor = tokenizer(input_text, return_tensors="pt")
90
+ output = model.generate(input_tensor["input_ids"], attention_mask=input_tensor["attention_mask"],
91
+ max_length=64, num_beams=5, no_repeat_ngram_size=2,
92
+ early_stopping=True, pad_token_id=tokenizer.eos_token_id, do_sample=True, temperature=0.7)
93
+ output = tokenizer.decode(output[0], ski_special_tokens=True, clean_up_tokenization_spaces=True)
94
+ return output
95
 
96
+ input_text = "Generate a quote about kindness with the keywords compassion, empathy, help, generosity, care"
 
97
 
98
  print(generated_text)