Spaces:
Runtime error
Runtime error
Commit ·
d1bbd7a
1
Parent(s): cddd4a4
Update Utils.py
Browse files
Utils.py
CHANGED
|
@@ -23,10 +23,12 @@ alpha =['A', 'A1', 'A2' ,'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L','
|
|
| 23 |
|
| 24 |
|
| 25 |
# labels from old arabic
|
| 26 |
-
characters = [' ', '.', '[', ']', '؟', 'ء', 'آ', 'أ', 'ؤ', 'إ', 'ئ', 'ا', 'ب', 'ة', 'ت', 'ث', 'ج', 'ح', 'خ', 'د', 'ذ', 'ر', 'ز', 'س', 'ش', 'ص', 'ض', 'ط', 'ظ', 'ع', 'غ', 'ـ', 'ف', 'ق', 'ك', 'ل', 'م', 'ن', 'ه', 'و', 'ى', 'ي', 'ً', 'ٌ', 'ٍ', 'َ', 'ُ', 'ِ', 'ّ', 'ْ', 'ٔ', 'ٕ', '١', '٢', '٣', '٤', '٥', '٧', '٨', 'ٮ', 'ٯ', 'ٰ', 'ڡ', 'ک', 'ں', 'ی', '۴', '\u202c', 'ﭐ', 'ﺟ', 'ﺣ', 'ﻛ', '�']
|
| 27 |
-
characters.
|
| 28 |
|
| 29 |
characters.sort()
|
|
|
|
|
|
|
| 30 |
max_length = 132
|
| 31 |
img_height, img_width = 1056,64
|
| 32 |
|
|
@@ -75,16 +77,30 @@ def decode_predictions(pred,greedy = True):
|
|
| 75 |
|
| 76 |
|
| 77 |
|
| 78 |
-
def encode_single_sample(path_dir, label=None):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 79 |
img = tf.io.read_file(path_dir)
|
| 80 |
-
img = tf.io.decode_jpeg(img,
|
| 81 |
-
img.set_shape([img.shape[0], img.shape[1],
|
| 82 |
img = tf.image.rot90(img, k=1, name=None)
|
|
|
|
| 83 |
img = tf.image.resize(img, [img_height, img_width])
|
| 84 |
-
|
| 85 |
-
img
|
| 86 |
-
|
| 87 |
-
|
|
|
|
|
|
|
| 88 |
|
| 89 |
batch_size = 16
|
| 90 |
|
|
@@ -105,4 +121,4 @@ def Loadlines(path_lines):
|
|
| 105 |
|
| 106 |
""" load model_finetuned """
|
| 107 |
def load_model():
|
| 108 |
-
return keras.models.load_model('/home/user/app/
|
|
|
|
| 23 |
|
| 24 |
|
| 25 |
# labels from old arabic
|
| 26 |
+
# characters = [' ', '.', '[', ']', '؟', 'ء', 'آ', 'أ', 'ؤ', 'إ', 'ئ', 'ا', 'ب', 'ة', 'ت', 'ث', 'ج', 'ح', 'خ', 'د', 'ذ', 'ر', 'ز', 'س', 'ش', 'ص', 'ض', 'ط', 'ظ', 'ع', 'غ', 'ـ', 'ف', 'ق', 'ك', 'ل', 'م', 'ن', 'ه', 'و', 'ى', 'ي', 'ً', 'ٌ', 'ٍ', 'َ', 'ُ', 'ِ', 'ّ', 'ْ', 'ٔ', 'ٕ', '١', '٢', '٣', '٤', '٥', '٧', '٨', 'ٮ', 'ٯ', 'ٰ', 'ڡ', 'ک', 'ں', 'ی', '۴', '\u202c', 'ﭐ', 'ﺟ', 'ﺣ', 'ﻛ', '�']
|
| 27 |
+
characters = [' ', '.', '[', ']', '؟', 'ء', 'آ', 'أ', 'ؤ', 'إ', 'ئ', 'ا', 'ب', 'ة', 'ت', 'ث', 'ج', 'ح', 'خ', 'د', 'ذ', 'ر', 'ز', 'س', 'ش', 'ص', 'ض', 'ط', 'ظ', 'ع', 'غ', 'ـ', 'ف', 'ق', 'ك', 'ل', 'م', 'ن', 'ه', 'و', 'ى', 'ي', 'ً', 'ٌ', 'ٍ', 'َ', 'ُ', 'ِ', 'ّ', 'ْ', 'ٔ', 'ٕ', 'ٖ', '٠', '١', '٢', '٣', '٨', 'ٮ', 'ٰ', 'ڡ', 'ک', 'ں', 'ی', 'ݘ', '\u202c', 'ﭐ', 'ﺣ', 'ﻛ']
|
| 28 |
|
| 29 |
characters.sort()
|
| 30 |
+
|
| 31 |
+
# characters.sort()
|
| 32 |
max_length = 132
|
| 33 |
img_height, img_width = 1056,64
|
| 34 |
|
|
|
|
| 77 |
|
| 78 |
|
| 79 |
|
| 80 |
+
# def encode_single_sample(path_dir, label=None):
|
| 81 |
+
# img = tf.io.read_file(path_dir)
|
| 82 |
+
# img = tf.io.decode_jpeg(img, name=None)
|
| 83 |
+
# img.set_shape([img.shape[0], img.shape[1], img.shape[-1]])
|
| 84 |
+
# img = tf.image.rot90(img, k=1, name=None)
|
| 85 |
+
# img = tf.image.resize(img, [img_height, img_width])
|
| 86 |
+
# # img = tf.image.rgb_to_grayscale(img) # Convert image to grayscale
|
| 87 |
+
# img = img/255.0
|
| 88 |
+
# return img
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
def encode_single_sample(path_dir):
|
| 92 |
img = tf.io.read_file(path_dir)
|
| 93 |
+
img = tf.io.decode_jpeg( img, name=None)
|
| 94 |
+
img.set_shape([img.shape[0], img.shape[1],img.shape[-1]])
|
| 95 |
img = tf.image.rot90(img, k=1, name=None)
|
| 96 |
+
rand = tf.random.uniform( (), minval=0,maxval=1, dtype=tf.dtypes.float32,seed=None,name=None)
|
| 97 |
img = tf.image.resize(img, [img_height, img_width])
|
| 98 |
+
|
| 99 |
+
img=img/255.0
|
| 100 |
+
|
| 101 |
+
return img
|
| 102 |
+
|
| 103 |
+
|
| 104 |
|
| 105 |
batch_size = 16
|
| 106 |
|
|
|
|
| 121 |
|
| 122 |
""" load model_finetuned """
|
| 123 |
def load_model():
|
| 124 |
+
return keras.models.load_model('/home/user/app/RGB_old_low_resolution_v2.h5')
|