Sirclavin commited on
Commit
68daafb
·
1 Parent(s): 7b043bd

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +36 -2
README.md CHANGED
@@ -43,8 +43,42 @@ NeXGen can be directly employed for:
43
 
44
  ## Getting Started
45
 
46
- To use NeXGen, follow these steps:
47
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
48
 
49
 
50
 
 
43
 
44
  ## Getting Started
45
 
46
+ To download NeXGen use this code:
47
+ ```python
48
+ from transformers import AutoTokenizer, AutoModelForCausalLM
49
+
50
+ # Specify the model name from Hugging Face Model Hub
51
+ model_name = "Sirclavin/NeXGen-based"
52
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
53
+ model = AutoModelForCausalLM.from_pretrained(model_name)
54
+
55
+ def generate_text(prompt, max_length=100, num_beams=5, no_repeat_ngram_size=2, top_k=50, top_p=0.95, temperature=0.7):
56
+ input_ids = tokenizer.encode(prompt, return_tensors="pt")
57
+
58
+ # Ensure attention_mask is provided
59
+ attention_mask = input_ids.ne(tokenizer.pad_token_id).float()
60
+
61
+ # Generate output text
62
+ output = model.generate(
63
+ input_ids,
64
+ max_length=max_length,
65
+ num_beams=num_beams,
66
+ no_repeat_ngram_size=no_repeat_ngram_size,
67
+ top_k=top_k,
68
+ top_p=top_p,
69
+ temperature=temperature,
70
+ attention_mask=attention_mask # Pass attention_mask to the generation method
71
+ )
72
+
73
+ decoded_output = tokenizer.decode(output[0], skip_special_tokens=True)
74
+ return decoded_output
75
+
76
+ # Example usage:
77
+ prompt = "Your prompt here"
78
+ generated_text = generate_text(prompt, max_length=200)
79
+
80
+ print("Generated Text:")
81
+ print(generated_text)
82
 
83
 
84