Update app.py
Browse files
app.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
| 1 |
-
import gradio as gr
|
| 2 |
import joblib
|
| 3 |
import numpy as np
|
| 4 |
import re
|
|
@@ -7,7 +7,7 @@ import re
|
|
| 7 |
model = joblib.load("ai_detector_model.pkl") # 確認路徑正確
|
| 8 |
|
| 9 |
# 自訂簡單分句函數
|
| 10 |
-
|
| 11 |
# 以句點、問號、驚嘆號拆分,保留句尾符號
|
| 12 |
sentences = re.split(r'(?<=[.!?])\s+', text.strip())
|
| 13 |
return [s for s in sentences if s]
|
|
@@ -101,7 +101,7 @@ import numpy as np
|
|
| 101 |
import pickle
|
| 102 |
|
| 103 |
# ---------------- 載入模型 ----------------
|
| 104 |
-
model = tf.keras.models.load_model("
|
| 105 |
with open("vectorizer.pkl", "rb") as f:
|
| 106 |
vectorizer = pickle.load(f)
|
| 107 |
with open("scaler.pkl", "rb") as f:
|
|
|
|
| 1 |
+
'''import gradio as gr
|
| 2 |
import joblib
|
| 3 |
import numpy as np
|
| 4 |
import re
|
|
|
|
| 7 |
model = joblib.load("ai_detector_model.pkl") # 確認路徑正確
|
| 8 |
|
| 9 |
# 自訂簡單分句函數
|
| 10 |
+
def simple_sent_tokenize(text):
|
| 11 |
# 以句點、問號、驚嘆號拆分,保留句尾符號
|
| 12 |
sentences = re.split(r'(?<=[.!?])\s+', text.strip())
|
| 13 |
return [s for s in sentences if s]
|
|
|
|
| 101 |
import pickle
|
| 102 |
|
| 103 |
# ---------------- 載入模型 ----------------
|
| 104 |
+
model = tf.keras.models.load_model("AIDetect.h5") # 你的模型資料夾
|
| 105 |
with open("vectorizer.pkl", "rb") as f:
|
| 106 |
vectorizer = pickle.load(f)
|
| 107 |
with open("scaler.pkl", "rb") as f:
|