Update app.py
Browse files
app.py
CHANGED
|
@@ -97,31 +97,32 @@ demo.launch()'''
|
|
| 97 |
|
| 98 |
import gradio as gr
|
| 99 |
import tensorflow as tf
|
| 100 |
-
import numpy as np
|
| 101 |
import pickle
|
| 102 |
|
| 103 |
# ---------------- 載入模型 ----------------
|
| 104 |
-
model = tf.keras.models.load_model("AIDetect.h5") # 你的模型
|
| 105 |
with open("vectorizer.pkl", "rb") as f:
|
| 106 |
vectorizer = pickle.load(f)
|
| 107 |
with open("scaler.pkl", "rb") as f:
|
| 108 |
scaler = pickle.load(f)
|
| 109 |
|
| 110 |
-
# ---------------- 特徵計算 ----------------
|
| 111 |
def compute_features(text):
|
| 112 |
words = text.split()
|
| 113 |
word_count = len(words)
|
| 114 |
unique_word_ratio = len(set(words)) / (word_count + 1e-6)
|
| 115 |
repeat_rate = 1 - unique_word_ratio
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
|
|
|
|
|
|
|
| 119 |
|
| 120 |
# ---------------- 生成解釋 ----------------
|
| 121 |
def explain_prediction(text):
|
| 122 |
# 文字向量化
|
| 123 |
-
seq = vectorizer
|
| 124 |
-
seq = tf.keras.
|
| 125 |
|
| 126 |
# 統計特徵
|
| 127 |
feat = compute_features(text)
|
|
@@ -134,11 +135,11 @@ def explain_prediction(text):
|
|
| 134 |
|
| 135 |
# 判斷依據
|
| 136 |
reasons = []
|
| 137 |
-
if feat[0
|
| 138 |
-
if feat[0
|
| 139 |
-
if feat[0
|
| 140 |
-
if feat[0
|
| 141 |
-
if feat[0
|
| 142 |
if not reasons: reasons.append("句子長度與用詞平均")
|
| 143 |
explanation = ";".join(reasons)
|
| 144 |
|
|
|
|
| 97 |
|
| 98 |
import gradio as gr
|
| 99 |
import tensorflow as tf
|
|
|
|
| 100 |
import pickle
|
| 101 |
|
| 102 |
# ---------------- 載入模型 ----------------
|
| 103 |
+
model = tf.keras.models.load_model("AIDetect.h5") # 你的模型檔案
|
| 104 |
with open("vectorizer.pkl", "rb") as f:
|
| 105 |
vectorizer = pickle.load(f)
|
| 106 |
with open("scaler.pkl", "rb") as f:
|
| 107 |
scaler = pickle.load(f)
|
| 108 |
|
| 109 |
+
# ---------------- 特徵計算(純 Python) ----------------
|
| 110 |
def compute_features(text):
|
| 111 |
words = text.split()
|
| 112 |
word_count = len(words)
|
| 113 |
unique_word_ratio = len(set(words)) / (word_count + 1e-6)
|
| 114 |
repeat_rate = 1 - unique_word_ratio
|
| 115 |
+
punctuation_count = sum(1 for c in text if c in ".,!?;:")
|
| 116 |
+
punctuation_ratio = punctuation_count / (len(text) + 1e-6)
|
| 117 |
+
avg_word_length = sum(len(w) for w in words) / (word_count + 1e-6) if words else 0
|
| 118 |
+
# 直接返回列表,不用 numpy
|
| 119 |
+
return [[word_count, unique_word_ratio, repeat_rate, punctuation_ratio, avg_word_length]]
|
| 120 |
|
| 121 |
# ---------------- 生成解釋 ----------------
|
| 122 |
def explain_prediction(text):
|
| 123 |
# 文字向量化
|
| 124 |
+
seq = vectorizer([text])
|
| 125 |
+
seq = tf.keras.preprocessing.sequence.pad_sequences(seq, maxlen=50, padding='pre')
|
| 126 |
|
| 127 |
# 統計特徵
|
| 128 |
feat = compute_features(text)
|
|
|
|
| 135 |
|
| 136 |
# 判斷依據
|
| 137 |
reasons = []
|
| 138 |
+
if feat[0][0] > 100: reasons.append("句子長度偏長")
|
| 139 |
+
if feat[0][2] > 0.3: reasons.append("重複率高")
|
| 140 |
+
if feat[0][1] < 0.2: reasons.append("詞彙多樣性低")
|
| 141 |
+
if feat[0][3] < 0.01: reasons.append("標點符號少")
|
| 142 |
+
if feat[0][4] > 6: reasons.append("平均詞長偏長")
|
| 143 |
if not reasons: reasons.append("句子長度與用詞平均")
|
| 144 |
explanation = ";".join(reasons)
|
| 145 |
|