yoon-eunbin commited on
Commit
00ef7ec
·
verified ·
1 Parent(s): 2562566

Create README.md

Browse files
Files changed (1) hide show
  1. README.md +43 -0
README.md ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ datasets:
3
+ - grammarly/coedit
4
+ - Owishiboo/grammar-correction
5
+ language:
6
+ - en
7
+ base_model:
8
+ - google-t5/t5-base
9
+ pipeline_tag: text-generation
10
+ ---
11
+
12
+ # Quick Start (Python)
13
+
14
+ ### Installation
15
+ ```bash
16
+ pip install transformers torch
17
+ ```
18
+
19
+ ### Basic Usage
20
+ ```python
21
+ from transformers import T5Tokenizer, T5ForConditionalGeneration
22
+ import torch
23
+
24
+ # Load model and tokenizer
25
+ model_name = "yoon-eunbin/t5-gec-model"
26
+ tokenizer = T5Tokenizer.from_pretrained(model_name)
27
+ model = T5ForConditionalGeneration.from_pretrained(model_name)
28
+
29
+ # Set device
30
+ device = 'cuda' if torch.cuda.is_available() else 'cpu'
31
+ model = model.to(device)
32
+
33
+ # Prepare input
34
+ text = "He has left the room when I came into the room."
35
+ inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=64).to(device)
36
+
37
+ # Generate correction
38
+ outputs = model.generate(**inputs, max_length=64)
39
+
40
+ # Decode output
41
+ corrected_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
42
+ print(f"Original: {text}")
43
+ print(f"Corrected: {corrected_text}")