Update README.md
Browse files
README.md
CHANGED
|
@@ -31,7 +31,7 @@ To use the SpanCNN model for toxic text classification, follow the example below
|
|
| 31 |
from transformers import pipeline
|
| 32 |
|
| 33 |
# Load the SpanCNN model
|
| 34 |
-
classifier = pipeline("spancnn-classification", model="ZetangForward/
|
| 35 |
|
| 36 |
# Example 1: Safe text
|
| 37 |
pos_text = "You look good today~!"
|
|
|
|
| 31 |
from transformers import pipeline
|
| 32 |
|
| 33 |
# Load the SpanCNN model
|
| 34 |
+
classifier = pipeline("spancnn-classification", model="ZetangForward/SegmentCNN", trust_remote_code=True)
|
| 35 |
|
| 36 |
# Example 1: Safe text
|
| 37 |
pos_text = "You look good today~!"
|