Update README.md
Browse files
README.md
CHANGED
|
@@ -9,81 +9,27 @@ This is a custom model for text generation.
|
|
| 9 |
|
| 10 |
## Model Details
|
| 11 |
|
| 12 |
-
- `model_type`:
|
| 13 |
|
| 14 |
-
##
|
| 15 |
|
| 16 |
-
|
| 17 |
-
import torch
|
| 18 |
-
from transformers import GPT2LMHeadModel, GPT2Tokenizer
|
| 19 |
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
tokenizer.pad_token = tokenizer.eos_token
|
| 35 |
-
|
| 36 |
-
# Move model to GPU if available
|
| 37 |
-
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 38 |
-
model = model.to(device)
|
| 39 |
-
model.eval()
|
| 40 |
-
|
| 41 |
-
# Encode the input prompt
|
| 42 |
-
encoded_prompt = tokenizer(prompt, return_tensors="pt", padding=True).to(device)
|
| 43 |
-
|
| 44 |
-
# Generate text
|
| 45 |
-
print("\nGenerating text...")
|
| 46 |
-
with torch.no_grad():
|
| 47 |
-
output_sequences = model.generate(
|
| 48 |
-
input_ids=encoded_prompt["input_ids"],
|
| 49 |
-
attention_mask=encoded_prompt["attention_mask"],
|
| 50 |
-
max_length=max_length,
|
| 51 |
-
temperature=0.7,
|
| 52 |
-
top_k=50,
|
| 53 |
-
top_p=0.95,
|
| 54 |
-
do_sample=True,
|
| 55 |
-
num_return_sequences=num_return_sequences,
|
| 56 |
-
pad_token_id=tokenizer.eos_token_id,
|
| 57 |
-
eos_token_id=tokenizer.eos_token_id
|
| 58 |
-
)
|
| 59 |
-
|
| 60 |
-
# Decode and print the generated text
|
| 61 |
-
for idx, sequence in enumerate(output_sequences):
|
| 62 |
-
generated_text = tokenizer.decode(sequence, skip_special_tokens=True)
|
| 63 |
-
print(f"\nGenerated sequence {idx + 1}:")
|
| 64 |
-
print(f"{generated_text}")
|
| 65 |
-
print("-" * 50)
|
| 66 |
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
prompts = [
|
| 70 |
-
"Once upon a time",
|
| 71 |
-
"The artificial intelligence",
|
| 72 |
-
"In the distant future",
|
| 73 |
-
"The scientist discovered"
|
| 74 |
-
]
|
| 75 |
-
|
| 76 |
-
model_name = "Sparkoo/KateAI50m"
|
| 77 |
-
|
| 78 |
-
# Generate text for each prompt
|
| 79 |
-
for prompt in prompts:
|
| 80 |
-
print("\n" + "="*50)
|
| 81 |
-
print(f"Prompt: {prompt}")
|
| 82 |
-
print("="*50)
|
| 83 |
-
generate_text(
|
| 84 |
-
prompt=prompt,
|
| 85 |
-
model_name=model_name,
|
| 86 |
-
max_length=200, # Adjust as needed
|
| 87 |
-
num_return_sequences=3
|
| 88 |
-
)
|
| 89 |
```
|
|
|
|
| 9 |
|
| 10 |
## Model Details
|
| 11 |
|
| 12 |
+
- `model_type`: GPT2*
|
| 13 |
|
| 14 |
+
## GPT2
|
| 15 |
|
| 16 |
+
This model is **NOT A FINETUNE!!**. It uses the GPT2 architecture but it doesnt finetune it.
|
|
|
|
|
|
|
| 17 |
|
| 18 |
+
```python
|
| 19 |
+
# Model configuration for a smaller GPT-2 style model
|
| 20 |
+
config = GPT2Config(
|
| 21 |
+
vocab_size=50257, # Standard GPT-2 vocabulary size
|
| 22 |
+
n_positions=512, # Maximum sequence length
|
| 23 |
+
n_ctx=512, # Context window size
|
| 24 |
+
n_embd=512, # Embedding dimension
|
| 25 |
+
n_layer=6, # Number of transformer layers
|
| 26 |
+
n_head=8, # Number of attention heads
|
| 27 |
+
bos_token_id=50256,
|
| 28 |
+
eos_token_id=50256,
|
| 29 |
+
pad_token_id=50256,
|
| 30 |
+
_name_or_path="" # Empty to ensure no pretrained weights are loaded
|
| 31 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 32 |
|
| 33 |
+
# Initialize model with random weights
|
| 34 |
+
model = GPT2LMHeadModel(config)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 35 |
```
|