shing12345 commited on
Commit
5d37951
·
verified ·
1 Parent(s): aaaf3bd

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +82 -0
README.md CHANGED
@@ -54,5 +54,87 @@ def main():
54
  print("Summary:")
55
  print(summary)
56
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57
  if __name__ == "__main__":
58
  main()
 
54
  print("Summary:")
55
  print(summary)
56
 
57
+ if __name__ == "__main__":
58
+ main()
59
+ ---
60
+ import torch
61
+ from transformers import GPT2LMHeadModel, GPT2Tokenizer
62
+ import argparse
63
+ import sys
64
+
65
+ class AdvancedTextGenerator:
66
+ def __init__(self, model_name="gpt2-medium"):
67
+ try:
68
+ self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
69
+ print(f"Using device: {self.device}")
70
+ self.model = GPT2LMHeadModel.from_pretrained(model_name).to(self.device)
71
+ self.tokenizer = GPT2Tokenizer.from_pretrained(model_name)
72
+ except Exception as e:
73
+ print(f"Error initializing the model: {e}")
74
+ sys.exit(1)
75
+
76
+ def generate_text(self, prompt, max_length=100, num_return_sequences=1,
77
+ temperature=1.0, top_k=50, top_p=0.95, repetition_penalty=1.0):
78
+ try:
79
+ input_ids = self.tokenizer.encode(prompt, return_tensors="pt").to(self.device)
80
+
81
+ # Configure output parameters
82
+ output_sequences = self.model.generate(
83
+ input_ids=input_ids,
84
+ max_length=max_length + len(input_ids[0]),
85
+ temperature=temperature,
86
+ top_k=top_k,
87
+ top_p=top_p,
88
+ repetition_penalty=repetition_penalty,
89
+ do_sample=True,
90
+ num_return_sequences=num_return_sequences,
91
+ )
92
+
93
+ generated_sequences = []
94
+ for generated_sequence_idx, generated_sequence in enumerate(output_sequences):
95
+ generated_sequence = generated_sequence.tolist()
96
+ text = self.tokenizer.decode(generated_sequence, clean_up_tokenization_spaces=True)
97
+ total_sequence = text[len(self.tokenizer.decode(input_ids[0], clean_up_tokenization_spaces=True)):]
98
+ generated_sequences.append(total_sequence)
99
+
100
+ return generated_sequences
101
+ except Exception as e:
102
+ return [f"Error during text generation: {e}"]
103
+
104
+ def main():
105
+ parser = argparse.ArgumentParser(description="Advanced Text Generator")
106
+ parser.add_argument("--prompt", type=str, help="Starting prompt for text generation")
107
+ parser.add_argument("--max_length", type=int, default=100, help="Maximum length of generated text")
108
+ parser.add_argument("--num_sequences", type=int, default=1, help="Number of sequences to generate")
109
+ parser.add_argument("--temperature", type=float, default=1.0, help="Temperature for sampling")
110
+ parser.add_argument("--top_k", type=int, default=50, help="Top-k sampling parameter")
111
+ parser.add_argument("--top_p", type=float, default=0.95, help="Top-p sampling parameter")
112
+ parser.add_argument("--repetition_penalty", type=float, default=1.0, help="Repetition penalty")
113
+
114
+ args = parser.parse_args()
115
+
116
+ generator = AdvancedTextGenerator()
117
+
118
+ if args.prompt:
119
+ prompt = args.prompt
120
+ else:
121
+ print("Please enter the prompt for text generation:")
122
+ prompt = input().strip()
123
+
124
+ generated_texts = generator.generate_text(
125
+ prompt,
126
+ max_length=args.max_length,
127
+ num_return_sequences=args.num_sequences,
128
+ temperature=args.temperature,
129
+ top_k=args.top_k,
130
+ top_p=args.top_p,
131
+ repetition_penalty=args.repetition_penalty
132
+ )
133
+
134
+ print("\nGenerated Text(s):")
135
+ for i, text in enumerate(generated_texts, 1):
136
+ print(f"\n--- Sequence {i} ---")
137
+ print(text)
138
+
139
  if __name__ == "__main__":
140
  main()