Delete pipeline.py
Browse files- pipeline.py +0 -51
pipeline.py
DELETED
|
@@ -1,51 +0,0 @@
|
|
| 1 |
-
import numpy as np
|
| 2 |
-
import tensorflow as tf
|
| 3 |
-
import keras
|
| 4 |
-
import json
|
| 5 |
-
from keras.preprocessing.sequence import pad_sequences
|
| 6 |
-
from huggingface_hub import hf_hub_download
|
| 7 |
-
|
| 8 |
-
# Download model
|
| 9 |
-
model_path = hf_hub_download(repo_id="NightPrince/Toxic_Classification", filename="toxic_classifier.keras")
|
| 10 |
-
|
| 11 |
-
# Download tokenizer
|
| 12 |
-
tokenizer_path = hf_hub_download(repo_id="NightPrince/Toxic_Classification", filename="tokenizer.json")
|
| 13 |
-
|
| 14 |
-
# Load model
|
| 15 |
-
model = keras.saving.load_model(model_path)
|
| 16 |
-
|
| 17 |
-
# Load tokenizer
|
| 18 |
-
from keras.preprocessing.text import tokenizer_from_json
|
| 19 |
-
with open(tokenizer_path, "r", encoding="utf-8") as f:
|
| 20 |
-
tokenizer_json = f.read()
|
| 21 |
-
tokenizer = tokenizer_from_json(tokenizer_json)
|
| 22 |
-
|
| 23 |
-
# Label map (same as config)
|
| 24 |
-
label_map = {
|
| 25 |
-
0: "Child Sexual Exploitation",
|
| 26 |
-
1: "Elections",
|
| 27 |
-
2: "Non-Violent Crimes",
|
| 28 |
-
3: "Safe",
|
| 29 |
-
4: "Sex-Related Crimes",
|
| 30 |
-
5: "Suicide & Self-Harm",
|
| 31 |
-
6: "Unknown S-Type",
|
| 32 |
-
7: "Violent Crimes",
|
| 33 |
-
8: "Unsafe"
|
| 34 |
-
}
|
| 35 |
-
|
| 36 |
-
# Pipeline function
|
| 37 |
-
def classify_toxic(query, image_description):
|
| 38 |
-
max_len = 150 # Keep it same as training
|
| 39 |
-
text = query + " " + image_description
|
| 40 |
-
seq = tokenizer.texts_to_sequences([text])
|
| 41 |
-
pad = pad_sequences(seq, maxlen=max_len, padding='post', truncating='post')
|
| 42 |
-
pred = model.predict(pad, verbose=0)
|
| 43 |
-
pred_label = np.argmax(pred, axis=1)[0]
|
| 44 |
-
return label_map.get(pred_label, "Unknown")
|
| 45 |
-
|
| 46 |
-
# Example usage
|
| 47 |
-
if __name__ == "__main__":
|
| 48 |
-
query = "This is a dangerous post"
|
| 49 |
-
image_desc = "Knife shown in the image"
|
| 50 |
-
result = classify_toxic(query, image_desc)
|
| 51 |
-
print("Predicted Category:", result)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|