NightPrince commited on
Commit
4e92e71
·
verified ·
1 Parent(s): eefec60

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -15
app.py CHANGED
@@ -1,22 +1,18 @@
1
  import gradio as gr
2
- import pickle
3
  import tensorflow as tf
4
  from tensorflow.keras.preprocessing.sequence import pad_sequences
5
  import numpy as np
6
-
7
-
8
  import keras
9
-
10
-
11
- # Load tokenizer and model
12
-
13
- #model_path = r"models/toxic_classifier.keras"
14
  from tensorflow.keras.preprocessing.text import tokenizer_from_json
15
  import json
16
 
17
- with open("tokenizer.json", "r") as f:
18
- tokenizer = tokenizer_from_json(json.load(f))
 
 
 
19
 
 
20
  model = keras.saving.load_model("hf://NightPrince/Toxic-Predict")
21
 
22
  # Label map
@@ -32,6 +28,7 @@ label_map = {
32
  8: "Unsafe"
33
  }
34
 
 
35
  def classify_toxic(query, image_desc):
36
  max_len = 150
37
  text = query + " " + image_desc
@@ -41,6 +38,7 @@ def classify_toxic(query, image_desc):
41
  pred_label = np.argmax(pred, axis=1)[0]
42
  return label_map.get(pred_label, "Unknown")
43
 
 
44
  iface = gr.Interface(
45
  fn=classify_toxic,
46
  inputs=[
@@ -49,8 +47,4 @@ iface = gr.Interface(
49
  ],
50
  outputs=gr.Textbox(label="Predicted Toxic Category"),
51
  title="Toxic Category Classifier",
52
- description="Enter a query and image description to classify the prompt into one of the toxic categories"
53
- )
54
-
55
- if __name__ == "__main__":
56
- iface.launch()
 
1
  import gradio as gr
 
2
  import tensorflow as tf
3
  from tensorflow.keras.preprocessing.sequence import pad_sequences
4
  import numpy as np
 
 
5
  import keras
 
 
 
 
 
6
  from tensorflow.keras.preprocessing.text import tokenizer_from_json
7
  import json
8
 
9
+ # Load tokenizer
10
+ tokenizer_path = "tokenizer.json"
11
+ with open(tokenizer_path, "r", encoding="utf-8") as f:
12
+ tokenizer_json = f.read()
13
+ tokenizer = tokenizer_from_json(tokenizer_json)
14
 
15
+ # Load model from Hugging Face Hub
16
  model = keras.saving.load_model("hf://NightPrince/Toxic-Predict")
17
 
18
  # Label map
 
28
  8: "Unsafe"
29
  }
30
 
31
+ # Inference function
32
  def classify_toxic(query, image_desc):
33
  max_len = 150
34
  text = query + " " + image_desc
 
38
  pred_label = np.argmax(pred, axis=1)[0]
39
  return label_map.get(pred_label, "Unknown")
40
 
41
+ # Gradio interface
42
  iface = gr.Interface(
43
  fn=classify_toxic,
44
  inputs=[
 
47
  ],
48
  outputs=gr.Textbox(label="Predicted Toxic Category"),
49
  title="Toxic Category Classifier",
50
+ description="Enter a query and image description