David Fischinger
commited on
Commit
·
9a46ff1
1
Parent(s):
f5d6164
removed 3rd example image and only load models at startup
Browse files
app.py
CHANGED
|
@@ -19,8 +19,22 @@ IMG_SIZE=256
|
|
| 19 |
tf.experimental.numpy.experimental_enable_numpy_behavior()
|
| 20 |
#np.warnings.filterwarnings('error', category=np.VisibleDeprecationWarning)
|
| 21 |
|
| 22 |
-
|
| 23 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 24 |
|
| 25 |
def check_forgery_df(img):
|
| 26 |
shape_original = img.shape
|
|
@@ -30,7 +44,6 @@ def check_forgery_df(img):
|
|
| 30 |
pred1 = model_M1.predict(x, verbose=0)
|
| 31 |
pred2= model_M2.predict(x, verbose=0)
|
| 32 |
|
| 33 |
-
|
| 34 |
# # Ensure pred1 and pred2 are numpy arrays before proceeding
|
| 35 |
# if isinstance(pred1, dict):
|
| 36 |
# print("pred1 is dict!")
|
|
@@ -38,7 +51,6 @@ def check_forgery_df(img):
|
|
| 38 |
# if isinstance(pred2, dict):
|
| 39 |
# pred2 = pred2[next(iter(pred2))]
|
| 40 |
|
| 41 |
-
|
| 42 |
pred = np.max([pred1,pred2], axis=0)
|
| 43 |
|
| 44 |
pred = dfutils.create_mask(pred)
|
|
@@ -52,26 +64,8 @@ def evaluate(img):
|
|
| 52 |
pre_t = check_forgery_df(img)
|
| 53 |
st.image(pre_t, caption="White area indicates potential image manipulations.")
|
| 54 |
|
|
|
|
| 55 |
def start_evaluation(uploaded_file):
|
| 56 |
-
#load models
|
| 57 |
-
model_path1 = "IMVIP_Supplementary_Material/models/model1/"
|
| 58 |
-
model_path2 = "IMVIP_Supplementary_Material/models/model2/"
|
| 59 |
-
|
| 60 |
-
#tfsm_layer1 = tf.keras.layers.TFSMLayer(model_path1, call_endpoint='serving_default')
|
| 61 |
-
#tfsm_layer2 = tf.keras.layers.TFSMLayer(model_path2, call_endpoint='serving_default')
|
| 62 |
-
#
|
| 63 |
-
#input_shape = (256, 256, 3)
|
| 64 |
-
#inputs = Input(shape=input_shape)
|
| 65 |
-
|
| 66 |
-
##create the model
|
| 67 |
-
#outputs1 = tfsm_layer1(inputs)
|
| 68 |
-
#model_M1 = Model(inputs, outputs1)
|
| 69 |
-
|
| 70 |
-
#outputs2 = tfsm_layer2(inputs)
|
| 71 |
-
#model_M2 = Model(inputs, outputs2)
|
| 72 |
-
|
| 73 |
-
model_M1 = tf.keras.models.load_model("IMVIP_Supplementary_Material/models/model1/") #tf.keras.models.load_model("IMVIP_Supplementary_Material/models/model1/")
|
| 74 |
-
model_M2 = tf.keras.models.load_model("IMVIP_Supplementary_Material/models/model2/")
|
| 75 |
|
| 76 |
# Convert the file to an opencv image.
|
| 77 |
file_bytes = np.asarray(bytearray(uploaded_file.read()), dtype=np.uint8)
|
|
@@ -86,9 +80,9 @@ st.markdown(DESCRIPTION)
|
|
| 86 |
|
| 87 |
img_path1 = "example_images/Sp_D_NRD_A_nat0095_art0058_0582"
|
| 88 |
img_path2 = "example_images/Sp_D_NRN_A_nat0083_arc0080_0445"
|
| 89 |
-
img_path3 = "example_images/Sp_D_NRN_A_ani0088_cha0044_0441"
|
| 90 |
-
image_paths = [img_path1+".jpg", img_path2+".jpg"
|
| 91 |
-
gt_paths = [img_path1+"_gt.png", img_path2+"_gt.png"
|
| 92 |
# Display images in a table format
|
| 93 |
img = None
|
| 94 |
for idx, image_path in enumerate(image_paths):
|
|
@@ -113,37 +107,6 @@ def reset_image_select():
|
|
| 113 |
img = None
|
| 114 |
|
| 115 |
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
def start_evaluation(uploaded_file):
|
| 119 |
-
#load models
|
| 120 |
-
model_path1 = "IMVIP_Supplementary_Material/models/model1/"
|
| 121 |
-
model_path2 = "IMVIP_Supplementary_Material/models/model2/"
|
| 122 |
-
|
| 123 |
-
#tfsm_layer1 = tf.keras.layers.TFSMLayer(model_path1, call_endpoint='serving_default')
|
| 124 |
-
#tfsm_layer2 = tf.keras.layers.TFSMLayer(model_path2, call_endpoint='serving_default')
|
| 125 |
-
#
|
| 126 |
-
#input_shape = (256, 256, 3)
|
| 127 |
-
#inputs = Input(shape=input_shape)
|
| 128 |
-
|
| 129 |
-
##create the model
|
| 130 |
-
#outputs1 = tfsm_layer1(inputs)
|
| 131 |
-
#model_M1 = Model(inputs, outputs1)
|
| 132 |
-
|
| 133 |
-
#outputs2 = tfsm_layer2(inputs)
|
| 134 |
-
#model_M2 = Model(inputs, outputs2)
|
| 135 |
-
|
| 136 |
-
model_M1 = tf.keras.models.load_model("IMVIP_Supplementary_Material/models/model1/") #tf.keras.models.load_model("IMVIP_Supplementary_Material/models/model1/")
|
| 137 |
-
model_M2 = tf.keras.models.load_model("IMVIP_Supplementary_Material/models/model2/")
|
| 138 |
-
|
| 139 |
-
# Convert the file to an opencv image.
|
| 140 |
-
file_bytes = np.asarray(bytearray(uploaded_file.read()), dtype=np.uint8)
|
| 141 |
-
opencv_image = cv2.imdecode(file_bytes, 1)
|
| 142 |
-
reversed_image = opencv_image[:, :, ::-1]
|
| 143 |
-
st.image(reversed_image, caption="Input Image")
|
| 144 |
-
evaluate(reversed_image)
|
| 145 |
-
|
| 146 |
-
|
| 147 |
uploaded_file= None
|
| 148 |
uploaded_file = st.file_uploader("Please upload an image", type=["jpeg", "jpg", "png"], on_change=reset_image_select)
|
| 149 |
if (uploaded_file is not None) and (img is None):
|
|
|
|
| 19 |
tf.experimental.numpy.experimental_enable_numpy_behavior()
|
| 20 |
#np.warnings.filterwarnings('error', category=np.VisibleDeprecationWarning)
|
| 21 |
|
| 22 |
+
# function to load models
|
| 23 |
+
#@st.session_state better for hugging face?
|
| 24 |
+
@st.cache_resource
|
| 25 |
+
def load_models():
|
| 26 |
+
#load models
|
| 27 |
+
model_path1 = "IMVIP_Supplementary_Material/models/model1/"
|
| 28 |
+
model_path2 = "IMVIP_Supplementary_Material/models/model2/"
|
| 29 |
+
|
| 30 |
+
model_M1 = tf.keras.models.load_model("IMVIP_Supplementary_Material/models/model1/") #tf.keras.models.load_model("IMVIP_Supplementary_Material/models/model1/")
|
| 31 |
+
model_M2 = tf.keras.models.load_model("IMVIP_Supplementary_Material/models/model2/")
|
| 32 |
+
|
| 33 |
+
return model_M1, model_M2
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
model_M1, model_M2 = load_models()
|
| 37 |
+
|
| 38 |
|
| 39 |
def check_forgery_df(img):
|
| 40 |
shape_original = img.shape
|
|
|
|
| 44 |
pred1 = model_M1.predict(x, verbose=0)
|
| 45 |
pred2= model_M2.predict(x, verbose=0)
|
| 46 |
|
|
|
|
| 47 |
# # Ensure pred1 and pred2 are numpy arrays before proceeding
|
| 48 |
# if isinstance(pred1, dict):
|
| 49 |
# print("pred1 is dict!")
|
|
|
|
| 51 |
# if isinstance(pred2, dict):
|
| 52 |
# pred2 = pred2[next(iter(pred2))]
|
| 53 |
|
|
|
|
| 54 |
pred = np.max([pred1,pred2], axis=0)
|
| 55 |
|
| 56 |
pred = dfutils.create_mask(pred)
|
|
|
|
| 64 |
pre_t = check_forgery_df(img)
|
| 65 |
st.image(pre_t, caption="White area indicates potential image manipulations.")
|
| 66 |
|
| 67 |
+
|
| 68 |
def start_evaluation(uploaded_file):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 69 |
|
| 70 |
# Convert the file to an opencv image.
|
| 71 |
file_bytes = np.asarray(bytearray(uploaded_file.read()), dtype=np.uint8)
|
|
|
|
| 80 |
|
| 81 |
img_path1 = "example_images/Sp_D_NRD_A_nat0095_art0058_0582"
|
| 82 |
img_path2 = "example_images/Sp_D_NRN_A_nat0083_arc0080_0445"
|
| 83 |
+
#img_path3 = "example_images/Sp_D_NRN_A_ani0088_cha0044_0441"
|
| 84 |
+
image_paths = [img_path1+".jpg", img_path2+".jpg"] #, img_path3+".jpg"]
|
| 85 |
+
gt_paths = [img_path1+"_gt.png", img_path2+"_gt.png"] #, img_path3+"_gt.png"]
|
| 86 |
# Display images in a table format
|
| 87 |
img = None
|
| 88 |
for idx, image_path in enumerate(image_paths):
|
|
|
|
| 107 |
img = None
|
| 108 |
|
| 109 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 110 |
uploaded_file= None
|
| 111 |
uploaded_file = st.file_uploader("Please upload an image", type=["jpeg", "jpg", "png"], on_change=reset_image_select)
|
| 112 |
if (uploaded_file is not None) and (img is None):
|