Update README.md
Browse files
README.md
CHANGED
|
@@ -75,13 +75,18 @@ GPT-124M is a lightweight generative language model fine-tuned on the `fineweb-e
|
|
| 75 |
You can use this model for text generation using the `transformers` library.
|
| 76 |
|
| 77 |
```python
|
|
|
|
| 78 |
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
|
| 79 |
|
|
|
|
| 80 |
model_name = "samkeet/GPT_124M"
|
| 81 |
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
|
| 82 |
model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True)
|
| 83 |
|
|
|
|
| 84 |
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, trust_remote_code=True, device="cpu")
|
|
|
|
|
|
|
| 85 |
result = pipe("Earth revolves around the", do_sample=True, max_length=40, temperature=0.9, top_p=0.5, top_k=50)
|
| 86 |
print(result)
|
| 87 |
```
|
|
|
|
| 75 |
You can use this model for text generation using the `transformers` library.
|
| 76 |
|
| 77 |
```python
|
| 78 |
+
# Import necessary modules from transformers
|
| 79 |
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
|
| 80 |
|
| 81 |
+
# Load tokenizer and model
|
| 82 |
model_name = "samkeet/GPT_124M"
|
| 83 |
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
|
| 84 |
model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True)
|
| 85 |
|
| 86 |
+
# Create text generation pipeline
|
| 87 |
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, trust_remote_code=True, device="cpu")
|
| 88 |
+
|
| 89 |
+
# Generate text
|
| 90 |
result = pipe("Earth revolves around the", do_sample=True, max_length=40, temperature=0.9, top_p=0.5, top_k=50)
|
| 91 |
print(result)
|
| 92 |
```
|