areeberg commited on
Commit
c19971d
·
verified ·
1 Parent(s): dc3be64

Upload 2408 files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +18 -0
  2. dfesa/Detection/callmethiago.py +4 -0
  3. dfesa/Detection/check_xml_label.py +43 -0
  4. dfesa/Detection/checkpoints/checkpoint +3 -0
  5. dfesa/Detection/checkpoints/eval_0/events.out.tfevents.1579781919.dfesa-PC +3 -0
  6. dfesa/Detection/checkpoints/events.out.tfevents.1579781609.dfesa-PC +3 -0
  7. dfesa/Detection/checkpoints/export/Servo/1579781919/saved_model.pb +3 -0
  8. dfesa/Detection/checkpoints/export/Servo/1579781919/variables/variables.data-00000-of-00001 +3 -0
  9. dfesa/Detection/checkpoints/export/Servo/1579781919/variables/variables.index +0 -0
  10. dfesa/Detection/checkpoints/graph.pbtxt +0 -0
  11. dfesa/Detection/checkpoints/model.ckpt-0.data-00000-of-00001 +3 -0
  12. dfesa/Detection/checkpoints/model.ckpt-0.index +0 -0
  13. dfesa/Detection/checkpoints/model.ckpt-0.meta +3 -0
  14. dfesa/Detection/checkpoints/model.ckpt-2000.data-00000-of-00001 +3 -0
  15. dfesa/Detection/checkpoints/model.ckpt-2000.index +0 -0
  16. dfesa/Detection/checkpoints/model.ckpt-2000.meta +3 -0
  17. dfesa/Detection/checkpoints2/checkpoint +4 -0
  18. dfesa/Detection/checkpoints2/eval_0/events.out.tfevents.1579782968.dfesa-PC +3 -0
  19. dfesa/Detection/checkpoints2/events.out.tfevents.1579782350.dfesa-PC +3 -0
  20. dfesa/Detection/checkpoints2/export/Servo/1579783100/saved_model.pb +3 -0
  21. dfesa/Detection/checkpoints2/export/Servo/1579783100/variables/variables.data-00000-of-00001 +3 -0
  22. dfesa/Detection/checkpoints2/export/Servo/1579783100/variables/variables.index +0 -0
  23. dfesa/Detection/checkpoints2/graph.pbtxt +0 -0
  24. dfesa/Detection/checkpoints2/model.ckpt-0.data-00000-of-00001 +3 -0
  25. dfesa/Detection/checkpoints2/model.ckpt-0.index +0 -0
  26. dfesa/Detection/checkpoints2/model.ckpt-0.meta +3 -0
  27. dfesa/Detection/checkpoints2/model.ckpt-4149.data-00000-of-00001 +3 -0
  28. dfesa/Detection/checkpoints2/model.ckpt-4149.index +0 -0
  29. dfesa/Detection/checkpoints2/model.ckpt-4149.meta +3 -0
  30. dfesa/Detection/checkpoints2/model.ckpt-5000.data-00000-of-00001 +3 -0
  31. dfesa/Detection/checkpoints2/model.ckpt-5000.index +0 -0
  32. dfesa/Detection/checkpoints2/model.ckpt-5000.meta +3 -0
  33. dfesa/Detection/csv2tfrecords.py +108 -0
  34. dfesa/Detection/det.png +3 -0
  35. dfesa/Detection/det1.png +3 -0
  36. dfesa/Detection/detect_object.py +170 -0
  37. dfesa/Detection/docker/2DJI_0707_02_01.jpg +3 -0
  38. dfesa/Detection/docker/Dockerfile +53 -0
  39. dfesa/Detection/docker/callmethiago.py +45 -0
  40. dfesa/Detection/docker/docker-compose.yml +15 -0
  41. dfesa/Detection/docker/frozen_inference_graph.pb +3 -0
  42. dfesa/Detection/docker/label_map.pbtxt +9 -0
  43. dfesa/Detection/docker/tuto.txt +11 -0
  44. dfesa/Detection/docker/v1Ad.py +157 -0
  45. dfesa/Detection/faster_rcnn_resnet50_coco.config +145 -0
  46. dfesa/Detection/faster_rcnn_resnet50_coco/checkpoint +2 -0
  47. dfesa/Detection/faster_rcnn_resnet50_coco/frozen_inference_graph.pb +3 -0
  48. dfesa/Detection/faster_rcnn_resnet50_coco/model.ckpt.data-00000-of-00001 +3 -0
  49. dfesa/Detection/faster_rcnn_resnet50_coco/model.ckpt.index +0 -0
  50. dfesa/Detection/faster_rcnn_resnet50_coco/model.ckpt.meta +3 -0
.gitattributes CHANGED
@@ -57,3 +57,21 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
60
+ dfesa/Detection/checkpoints/export/Servo/1579781919/variables/variables.data-00000-of-00001 filter=lfs diff=lfs merge=lfs -text
61
+ dfesa/Detection/checkpoints/model.ckpt-0.data-00000-of-00001 filter=lfs diff=lfs merge=lfs -text
62
+ dfesa/Detection/checkpoints/model.ckpt-0.meta filter=lfs diff=lfs merge=lfs -text
63
+ dfesa/Detection/checkpoints/model.ckpt-2000.data-00000-of-00001 filter=lfs diff=lfs merge=lfs -text
64
+ dfesa/Detection/checkpoints/model.ckpt-2000.meta filter=lfs diff=lfs merge=lfs -text
65
+ dfesa/Detection/checkpoints2/export/Servo/1579783100/variables/variables.data-00000-of-00001 filter=lfs diff=lfs merge=lfs -text
66
+ dfesa/Detection/checkpoints2/model.ckpt-0.data-00000-of-00001 filter=lfs diff=lfs merge=lfs -text
67
+ dfesa/Detection/checkpoints2/model.ckpt-0.meta filter=lfs diff=lfs merge=lfs -text
68
+ dfesa/Detection/checkpoints2/model.ckpt-4149.data-00000-of-00001 filter=lfs diff=lfs merge=lfs -text
69
+ dfesa/Detection/checkpoints2/model.ckpt-4149.meta filter=lfs diff=lfs merge=lfs -text
70
+ dfesa/Detection/checkpoints2/model.ckpt-5000.data-00000-of-00001 filter=lfs diff=lfs merge=lfs -text
71
+ dfesa/Detection/checkpoints2/model.ckpt-5000.meta filter=lfs diff=lfs merge=lfs -text
72
+ dfesa/Detection/faster_rcnn_resnet50_coco/model.ckpt.data-00000-of-00001 filter=lfs diff=lfs merge=lfs -text
73
+ dfesa/Detection/faster_rcnn_resnet50_coco/model.ckpt.meta filter=lfs diff=lfs merge=lfs -text
74
+ dfesa/Detection/test.record filter=lfs diff=lfs merge=lfs -text
75
+ dfesa/Detection/tfgraph/model.ckpt.data-00000-of-00001 filter=lfs diff=lfs merge=lfs -text
76
+ dfesa/Detection/tfgraph/model.ckpt.meta filter=lfs diff=lfs merge=lfs -text
77
+ dfesa/Detection/train.record filter=lfs diff=lfs merge=lfs -text
dfesa/Detection/callmethiago.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ import docker
2
+ client = docker.from_env()
3
+ client.containers.list()
4
+ #client.containers.run("ubuntu", "echo hello world")
dfesa/Detection/check_xml_label.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import glob
3
+ import pandas as pd
4
+ import xml.etree.ElementTree as ET
5
+ import pdb
6
+
7
+
8
+ def xml_to_csv(path):
9
+ xml_list = []
10
+ #pdb.set_trace()
11
+ for xml_file in glob.glob(path + '/*.xml'):
12
+ tree = ET.parse(xml_file)
13
+ root = tree.getroot()
14
+ parsed_xml=xml_file.split('/')
15
+ label_name=parsed_xml[-1]
16
+ label_name=label_name.split('.')
17
+ #pdb.set_trace()
18
+ root.find('filename').text=label_name[0]+'.jpg'
19
+ ET.tostring(root)
20
+ tree.write(path+parsed_xml[-1])
21
+ #pdb.set_trace()
22
+
23
+ #return
24
+
25
+
26
+
27
+ for directory in ['train']:
28
+ #image_path = os.path.join(os.getcwd(), '/root/drive/My Drive/PLACAS_BUNGE/PLACAS_BUNGE/SO_PLACAS/train/'.format(directory))
29
+
30
+ image_path = os.path.join(os.getcwd(), '/home/dfesa/Python_code/DFESA/Detection/train/'.format(directory))
31
+ print(os.path.join(os.getcwd(), '/home/dfesa/Python_code/DFESA/Detection/train/'.format(directory)))
32
+ #pdb.set_trace()
33
+ xml_df = xml_to_csv(image_path)
34
+ #xml_df.to_csv('/home/dfesa/Python_code/DFESA/Detection/{}_labels.csv'.format(directory), index=None)
35
+ print('Partial fim')
36
+ #pdb.set_trace()
37
+ for directory in ['test']:
38
+ # image_path = os.path.join(os.getcwd(), '/root/drive/My Drive/PLACAS_BUNGE/PLACAS_BUNGE/SO_PLACAS/test/'.format(directory))
39
+ image_path = os.path.join(os.getcwd(), '/home/dfesa/Documentos/Ts/teste/'.format(directory))
40
+ #pdb.set_trace()
41
+ xml_df = xml_to_csv(image_path)
42
+ #xml_df.to_csv('/home/dfesa/Python_code/DFESA/Detection/{}_labels.csv'.format(directory), index=None)
43
+ print('Fim')
dfesa/Detection/checkpoints/checkpoint ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ model_checkpoint_path: "model.ckpt-2000"
2
+ all_model_checkpoint_paths: "model.ckpt-0"
3
+ all_model_checkpoint_paths: "model.ckpt-2000"
dfesa/Detection/checkpoints/eval_0/events.out.tfevents.1579781919.dfesa-PC ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a45f60a417e26a4b2e6f88dd8b01aad1a294425e286f5c71af7803ab2b46d11c
3
+ size 19590388
dfesa/Detection/checkpoints/events.out.tfevents.1579781609.dfesa-PC ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a9f5ddade70d067c015e1c5359048a1387196b5c2037869a9146750b3613a3d1
3
+ size 16772446
dfesa/Detection/checkpoints/export/Servo/1579781919/saved_model.pb ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6eef0b100d381449f79ccd06e4386c00861bfb5652036e82c0e9fd11ed626528
3
+ size 2093058
dfesa/Detection/checkpoints/export/Servo/1579781919/variables/variables.data-00000-of-00001 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6c2b0f7d9867a22b3393cfc583cb679a415ea42a0c907d669fd63813aaa39b97
3
+ size 173307988
dfesa/Detection/checkpoints/export/Servo/1579781919/variables/variables.index ADDED
Binary file (14.5 kB). View file
 
dfesa/Detection/checkpoints/graph.pbtxt ADDED
The diff for this file is too large to render. See raw diff
 
dfesa/Detection/checkpoints/model.ckpt-0.data-00000-of-00001 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:75def7f119e6c6a832d0182fd013f89f40be5cc9dd89fe730dc30246d07521b2
3
+ size 286454432
dfesa/Detection/checkpoints/model.ckpt-0.index ADDED
Binary file (22.1 kB). View file
 
dfesa/Detection/checkpoints/model.ckpt-0.meta ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5455f6f4d5587834646dd37f307f4c983f548ba92da7bb788a6ec8aaccd70617
3
+ size 4635924
dfesa/Detection/checkpoints/model.ckpt-2000.data-00000-of-00001 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b472400b3a6d1dc439790bd79e9d3bb78b0686868e356328e9bcf8a9c7d88d38
3
+ size 286454432
dfesa/Detection/checkpoints/model.ckpt-2000.index ADDED
Binary file (22.1 kB). View file
 
dfesa/Detection/checkpoints/model.ckpt-2000.meta ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a949423d814e29e901a0851a337fa56f35f64050971f92676f6f4d7be3fc139b
3
+ size 4635924
dfesa/Detection/checkpoints2/checkpoint ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ model_checkpoint_path: "model.ckpt-5000"
2
+ all_model_checkpoint_paths: "model.ckpt-0"
3
+ all_model_checkpoint_paths: "model.ckpt-4149"
4
+ all_model_checkpoint_paths: "model.ckpt-5000"
dfesa/Detection/checkpoints2/eval_0/events.out.tfevents.1579782968.dfesa-PC ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f2dc31bb1d5f95772b09d897e61d8b33dd68857cca1e69a93d49f5e41079c245
3
+ size 33693216
dfesa/Detection/checkpoints2/events.out.tfevents.1579782350.dfesa-PC ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:873979fac8b1d159876c7aeac13aadbc66430f54510135c4f5e8b5a701ad4ffd
3
+ size 16779067
dfesa/Detection/checkpoints2/export/Servo/1579783100/saved_model.pb ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f3530bce2d17f6bac71b213fe74b28efb992648ec71607204377604108a88ea0
3
+ size 2093058
dfesa/Detection/checkpoints2/export/Servo/1579783100/variables/variables.data-00000-of-00001 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:59f193cbdbe97493e1b29d55790ddfa79be507ea4cea6007597894b08ea9428e
3
+ size 173307988
dfesa/Detection/checkpoints2/export/Servo/1579783100/variables/variables.index ADDED
Binary file (14.5 kB). View file
 
dfesa/Detection/checkpoints2/graph.pbtxt ADDED
The diff for this file is too large to render. See raw diff
 
dfesa/Detection/checkpoints2/model.ckpt-0.data-00000-of-00001 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3f8b1fdb2a2bee54c653bc9cd9e899df9cd2ef67aa8cf12320a761b6c1b47c61
3
+ size 286454432
dfesa/Detection/checkpoints2/model.ckpt-0.index ADDED
Binary file (22.1 kB). View file
 
dfesa/Detection/checkpoints2/model.ckpt-0.meta ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:31455795e5cb0933f546cf939d876cc370832f00b31624aeece4b52d5883968b
3
+ size 4635942
dfesa/Detection/checkpoints2/model.ckpt-4149.data-00000-of-00001 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dbec6243a38712f5ba98590b2f842e05420e9cdc50be7ab82d750ea0dc4c9900
3
+ size 286454432
dfesa/Detection/checkpoints2/model.ckpt-4149.index ADDED
Binary file (22.1 kB). View file
 
dfesa/Detection/checkpoints2/model.ckpt-4149.meta ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ff039f80aaf679ef53885ad74ddda3c9a55d190bfa23ebf105a17cc0d9e861b5
3
+ size 4635942
dfesa/Detection/checkpoints2/model.ckpt-5000.data-00000-of-00001 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1bdfdd32cd644b986441ad9e32856dbcb4805c41264dfe107b9e60e86df7cf26
3
+ size 286454432
dfesa/Detection/checkpoints2/model.ckpt-5000.index ADDED
Binary file (22.1 kB). View file
 
dfesa/Detection/checkpoints2/model.ckpt-5000.meta ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8ce5d36f27f7959b4deb87e687a8f83d69f2d03b49a281dd21bd4f1d53fba2b3
3
+ size 4635942
dfesa/Detection/csv2tfrecords.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Usage:
3
+ # From the data set dir
4
+ # Create train data:
5
+ python ../generate_tfrecord.py --csv_input=data/train_labels.csv --output_path=train.record
6
+ """
7
+ from __future__ import division
8
+ from __future__ import print_function
9
+ from __future__ import absolute_import
10
+
11
+ import os
12
+ import io
13
+ import pandas as pd
14
+ import tensorflow as tf
15
+ import pdb
16
+
17
+ from PIL import Image
18
+ from object_detection.utils import dataset_util
19
+ from collections import namedtuple, OrderedDict
20
+
21
+ flags = tf.app.flags
22
+ flags.DEFINE_string('csv_input', '', 'Path to the CSV input')
23
+ flags.DEFINE_string('output_path', '', 'Path to output TFRecord')
24
+ FLAGS = flags.FLAGS
25
+
26
+
27
+ # TO-DO replace this with label map
28
+ def class_text_to_int(row_label):
29
+ if row_label == 'Falta':
30
+ return 1
31
+ if row_label == 'Objeto':
32
+ return 2
33
+ else:
34
+ None
35
+
36
+
37
+ def split(df, group):
38
+ data = namedtuple('data', ['filename', 'object'])
39
+ gb = df.groupby(group)
40
+ return [data(filename, gb.get_group(x)) for filename, x in zip(gb.groups.keys(), gb.groups)]
41
+
42
+
43
+ def create_tf_example(group, path):
44
+ with tf.gfile.GFile(os.path.join(path, '{}'.format(group.filename)), 'rb') as fid:
45
+ encoded_jpg = fid.read()
46
+ encoded_jpg_io = io.BytesIO(encoded_jpg)
47
+ #pdb.set_trace()
48
+ image = Image.open(encoded_jpg_io)
49
+ #pdb.set_trace()
50
+ width, height = image.size
51
+
52
+ filename = group.filename.encode('utf8')
53
+
54
+ image_format = b'jpg'
55
+ xmins = []
56
+ xmaxs = []
57
+ ymins = []
58
+ ymaxs = []
59
+ classes_text = []
60
+ classes = []
61
+
62
+ for index, row in group.object.iterrows():
63
+ xmins.append(row['xmin'] / width)
64
+ xmaxs.append(row['xmax'] / width)
65
+ ymins.append(row['ymin'] / height)
66
+ ymaxs.append(row['ymax'] / height)
67
+ classes_text.append(row['class'].encode('utf8'))
68
+ classes.append(class_text_to_int(row['class']))
69
+ #pdb.set_trace()
70
+
71
+ tf_example = tf.train.Example(features=tf.train.Features(feature={
72
+ 'image/height': dataset_util.int64_feature(height),
73
+ 'image/width': dataset_util.int64_feature(width),
74
+ 'image/filename': dataset_util.bytes_feature(filename),
75
+ 'image/source_id': dataset_util.bytes_feature(filename),
76
+ 'image/encoded': dataset_util.bytes_feature(encoded_jpg),
77
+ 'image/format': dataset_util.bytes_feature(image_format),
78
+ 'image/object/bbox/xmin': dataset_util.float_list_feature(xmins),
79
+ 'image/object/bbox/xmax': dataset_util.float_list_feature(xmaxs),
80
+ 'image/object/bbox/ymin': dataset_util.float_list_feature(ymins),
81
+ 'image/object/bbox/ymax': dataset_util.float_list_feature(ymaxs),
82
+ 'image/object/class/text': dataset_util.bytes_list_feature(classes_text),
83
+ 'image/object/class/label': dataset_util.int64_list_feature(classes),
84
+ }))
85
+ return tf_example
86
+
87
+
88
+ def main(_):
89
+ writer = tf.python_io.TFRecordWriter(FLAGS.output_path)
90
+ path = os.path.join(os.getcwd())
91
+ path=path+'/teste/'
92
+ #pdb.set_trace()
93
+ examples = pd.read_csv(FLAGS.csv_input)
94
+ grouped = split(examples, 'filename')
95
+
96
+ #pdb.set_trace()
97
+ for group in grouped:
98
+ #pdb.set_trace()
99
+ tf_example = create_tf_example(group, path)
100
+ writer.write(tf_example.SerializeToString())
101
+
102
+ writer.close()
103
+ output_path = os.path.join(os.getcwd(), FLAGS.output_path)
104
+ print('Successfully created the TFRecords: {}'.format(output_path))
105
+
106
+
107
+ if __name__ == '__main__':
108
+ tf.app.run()
dfesa/Detection/det.png ADDED

Git LFS Details

  • SHA256: bb32f6714c63c9ffeb7a425909efd9e98e47363fc73f95b0c61bd23d9fd97daa
  • Pointer size: 132 Bytes
  • Size of remote file: 1.09 MB
dfesa/Detection/det1.png ADDED

Git LFS Details

  • SHA256: 9c75753ce2bd1ac24f6e8a3ffac6ef80886557d58ca7d2d890804125aca2d143
  • Pointer size: 131 Bytes
  • Size of remote file: 722 kB
dfesa/Detection/detect_object.py ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import os
3
+ import six.moves.urllib as urllib
4
+ import sys
5
+ import tarfile
6
+ import tensorflow as tf
7
+ import zipfile
8
+ import tkinter
9
+ import matplotlib
10
+ import pdb
11
+ import pandas as pd
12
+ from distutils.version import StrictVersion
13
+ from collections import defaultdict
14
+ from io import StringIO
15
+ from matplotlib import pyplot as plt
16
+ from PIL import Image
17
+
18
+ matplotlib.use('TkAgg')
19
+
20
+ config = tf.ConfigProto()
21
+ config.gpu_options.per_process_gpu_memory_fraction=0.8
22
+ config.gpu_options.allow_growth = True
23
+ sess = tf.Session(config=config)
24
+
25
+ # This is needed since the notebook is stored in the object_detection folder.
26
+ sys.path.append("/home/dfesa/models/research/")
27
+ from object_detection.utils import ops as utils_ops
28
+ from object_detection.utils import label_map_util
29
+ from object_detection.utils import visualization_utils as vis_util
30
+
31
+
32
+ MODEL_NAME = '/home/dfesa/Python_code/DFESA/Detection/tfgraph'
33
+
34
+ PATH_TO_FROZEN_GRAPH = MODEL_NAME + '/frozen_inference_graph.pb'
35
+ #PATH_TO_FROZEN_GRAPH = '/frozen_inference_graph.pb'
36
+ PATH_TO_LABELS = '/home/dfesa/Python_code/DFESA/Detection/label_map.pbtxt'
37
+
38
+
39
+
40
+ def detect_object(image_file):
41
+
42
+
43
+ NUM_CLASSES = 2
44
+
45
+ detection_graph = tf.Graph()
46
+ with detection_graph.as_default():
47
+ od_graph_def = tf.GraphDef()
48
+ with tf.gfile.GFile(PATH_TO_FROZEN_GRAPH, 'rb') as fid:
49
+ serialized_graph = fid.read()
50
+ od_graph_def.ParseFromString(serialized_graph)
51
+ tf.import_graph_def(od_graph_def, name='')
52
+
53
+ category_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS, use_display_name=True)
54
+
55
+ def load_image_into_numpy_array(image):
56
+ (im_width, im_height) = image.size
57
+ return np.array(image.getdata()).reshape(
58
+ (im_height, im_width, 3)).astype(np.uint8)
59
+
60
+ # Size, in inches, of the output images.
61
+ IMAGE_SIZE = (12, 8)
62
+
63
+ imagem = '/home/dfesa/Python_code/DFESA/Detection/teste/DJI_0652_01_02.jpg'
64
+
65
+ def run_inference_for_single_image(image, graph):
66
+ with graph.as_default():
67
+ with tf.Session() as sess:
68
+ # Get handles to input and output tensors
69
+ ops = tf.get_default_graph().get_operations()
70
+ all_tensor_names = {output.name for op in ops for output in op.outputs}
71
+ tensor_dict = {}
72
+ for key in [
73
+ 'num_detections', 'detection_boxes', 'detection_scores',
74
+ 'detection_classes', 'detection_masks'
75
+ ]:
76
+ tensor_name = key + ':0'
77
+ if tensor_name in all_tensor_names:
78
+ tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(
79
+ tensor_name)
80
+ if 'detection_masks' in tensor_dict:
81
+ # The following processing is only for single image
82
+ detection_boxes = tf.squeeze(tensor_dict['detection_boxes'], [0])
83
+ detection_masks = tf.squeeze(tensor_dict['detection_masks'], [0])
84
+ # Reframe is required to translate mask from box coordinates to image coordinates and fit the image size.
85
+ real_num_detection = tf.cast(tensor_dict['num_detections'][0], tf.int32)
86
+ detection_boxes = tf.slice(detection_boxes, [0, 0], [real_num_detection, -1])
87
+ detection_masks = tf.slice(detection_masks, [0, 0, 0], [real_num_detection, -1, -1])
88
+ detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(
89
+ detection_masks, detection_boxes, image.shape[1], image.shape[2])
90
+ detection_masks_reframed = tf.cast(
91
+ tf.greater(detection_masks_reframed, 0.5), tf.uint8)
92
+ # Follow the convention by adding back the batch dimension
93
+ tensor_dict['detection_masks'] = tf.expand_dims(
94
+ detection_masks_reframed, 0)
95
+ image_tensor = tf.get_default_graph().get_tensor_by_name('image_tensor:0')
96
+
97
+ # Run inference
98
+ output_dict = sess.run(tensor_dict,
99
+ feed_dict={image_tensor: image})
100
+
101
+ # all outputs are float32 numpy arrays, so convert types as appropriate
102
+ output_dict['num_detections'] = int(output_dict['num_detections'][0])
103
+ output_dict['detection_classes'] = output_dict[
104
+ 'detection_classes'][0].astype(np.int64)
105
+ output_dict['detection_boxes'] = output_dict['detection_boxes'][0]
106
+ output_dict['detection_scores'] = output_dict['detection_scores'][0]
107
+ #pdb.set_trace()
108
+ if 'detection_masks' in output_dict:
109
+ output_dict['detection_masks'] = output_dict['detection_masks'][0]
110
+ return output_dict
111
+
112
+ # Commented out IPython magic to ensure Python compatibility.
113
+ #SINGLE PREDICTION
114
+
115
+ # %matplotlib inline
116
+
117
+
118
+ image = Image.open(imagem)
119
+ # image = (imagem)
120
+ # the array based representation of the image will be used later in order to prepare the
121
+ # result image with boxes and labels on it.
122
+ image_np = load_image_into_numpy_array(image)
123
+ # Expand dimensions since the model expects images to have shape: [1, None, None, 3]
124
+ image_np_expanded = np.expand_dims(image_np, axis=0)
125
+ # Actual detection.
126
+ output_dict = run_inference_for_single_image(image_np_expanded, detection_graph)
127
+ # Visualization of the results of a detection.
128
+ dtaf=pd.DataFrame.from_dict(output_dict,orient="index")
129
+ #pdb.set_trace()
130
+ #largest_detection=dtaf.loc['detection_scores'][0].max()
131
+ #print(largest_detection)
132
+
133
+ maptrue=dtaf.loc['detection_scores'][0]>0.8
134
+ centerx=[]
135
+ centery=[]
136
+ centers=[]
137
+ yesno=[]
138
+
139
+ if np.any(maptrue == True):
140
+ boxes=dtaf.loc['detection_boxes'][0][maptrue]
141
+ for box in boxes:
142
+ centerx.append((box[0]+box[1])/2)
143
+ centery.append((box[2]+box[3])/2)
144
+ pdb.set_trace()
145
+ npcenterx=np.asarray(centerx)
146
+ npcentery=np.asarray(centery)
147
+ npcenterx=npcenterx.reshape(-1,1)
148
+ npcentery=npcentery.reshape(-1,1)
149
+ pdb.set_trace()
150
+ #TODO FIX THIS CENTERS
151
+ centers=np.concatenate((npcenterx,npcentery),axis=1)
152
+ return centers
153
+
154
+
155
+
156
+
157
+ # if largest_detection>0.8:
158
+ # yesno=True
159
+ # else:
160
+ # yesno=False
161
+
162
+
163
+ # return yesno
164
+
165
+ pdb.set_trace()
166
+ imagem2 = '/home/dfesa/Python_code/DFESA/Detection/teste/DJI_0652_01_02.jpg'
167
+ ave=detect_object(imagem2)
168
+ for i in ave:
169
+ print(i)
170
+ pdb.set_trace()
dfesa/Detection/docker/2DJI_0707_02_01.jpg ADDED

Git LFS Details

  • SHA256: d567a979d60d5402495b7ceaa9fb45db840af1491e602ce56b6e47a724693e1a
  • Pointer size: 131 Bytes
  • Size of remote file: 424 kB
dfesa/Detection/docker/Dockerfile ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM ubuntu:18.04
2
+
3
+ ENV DEBIAN_FRONTEND=noninteractive
4
+ # Common, note that two updates are needed
5
+ RUN apt-get update
6
+
7
+ RUN apt-get update && \
8
+ apt-get upgrade -y && \
9
+ apt-get install --assume-yes git && \
10
+ apt-get install build-essential -y --no-install-recommends python3 python3-pip python3-virtualenv python3-tk protobuf-compiler python-pil python-lxml python-tk
11
+
12
+ ENV VIRTUAL_ENV=/opt/venv
13
+ RUN python3 -m virtualenv --python=/usr/bin/python3 $VIRTUAL_ENV$
14
+ ENV PATH="/opt/venv/bin:$PATH"
15
+
16
+ # Install dependencies
17
+
18
+ RUN pip3 install --upgrade pip
19
+ RUN pip3 install setuptools
20
+ RUN pip3 install sklearn
21
+ RUN pip3 install pandas
22
+ RUN pip3 install matplotlib
23
+ RUN pip3 install pillow
24
+ RUN pip3 install requests
25
+ RUN pip3 install Cython
26
+ RUN pip3 install contextlib2
27
+ RUN pip3 install lxml
28
+ #RUN pip3 install h5py
29
+ RUN pip3 install tensorflow==1.14
30
+ #RUN pip3 install xgboost
31
+ RUN pip3 install -U Flask
32
+ RUN pip3 install requests
33
+
34
+ # Download tensorflow models
35
+ RUN git clone https://github.com/tensorflow/models.git /tensorflow/models
36
+ WORKDIR /tensorflow/models/research
37
+ RUN protoc object_detection/protos/*.proto --python_out=.
38
+ RUN export PYTHONPATH=$PYTHONPATH:`pwd`:`pwd`/slim
39
+
40
+
41
+
42
+ RUN pwd
43
+ RUN ls
44
+
45
+ #Run the application
46
+ RUN mkdir -p /tffiles
47
+ RUN mkdir -p /tempimgs
48
+
49
+ COPY ./label_map.pbtxt /tffiles/
50
+ COPY ./frozen_inference_graph.pb /tffiles/
51
+ COPY ./2DJI_0707_02_01.jpg /tffiles/
52
+ COPY v1A.py .
53
+ CMD ["python3","v1A.py"]
dfesa/Detection/docker/callmethiago.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import docker
2
+ import requests
3
+ import time
4
+ import pdb
5
+ import json
6
+
7
+ client = docker.from_env()
8
+
9
+ #client.containers.run("upbeat", detach=True)
10
+ #client.containers.run("ubuntu:latest", "echo hello world",detach=True)
11
+ print("uiases")
12
+
13
+ #print(client.containers.list())
14
+ #payload = {'dir1': '/tempimgs/DJI_0732_01_01.jpg'}
15
+ payload{'images': ['/tempimgs/DJI_0732_01_01.jpg', '/tempimgs/DJI_0732_01_01.jpg']}
16
+ json_string = json.dumps(payload)
17
+ #print(json_string)
18
+ #pdb.set_trace()
19
+
20
+
21
+ objeto=client.containers.run("upbeat",detach=True,ports= {'5000/tcp': ('localhost', 5000)},volumes={'/home/dfesa/.local/share/Agisoft/Metashape Pro/Projects/temp_files/': {'bind': '/tempimgs/', 'mode': 'rw'}} )
22
+ #pdb.set_trace()
23
+ time.sleep(3)
24
+ #route_get='localhost:5000/teste'
25
+ #headers = {'content-type': 'application/json'}
26
+
27
+ #json_string = json.dumps(payload)
28
+
29
+ response_get = requests.post('http://localhost:5000/teste',json=json_string)
30
+ #pdb.set_trace()
31
+ print(response_get.json())
32
+
33
+ #pdb.set_trace()
34
+ #pdb.set_trace()
35
+ print("asehsa2")
36
+ if response_get.status_code == 200:
37
+ #pdb.set_trace()
38
+ print(response_get.text)
39
+ else:
40
+ #pdb.set_trace()
41
+ print(response_get.status_code)
42
+
43
+ #pdb.set_trace()
44
+ #client.containers.kill()
45
+
dfesa/Detection/docker/docker-compose.yml ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ version: '2.3'
2
+
3
+ networks:
4
+ jupyter-newtwork:
5
+
6
+ services:
7
+ jupyter:
8
+ container_name: jupyter
9
+ build: .
10
+ ports:
11
+ - "8080:8080"
12
+ volumes:
13
+ - /home/dfesa/Python_code/DFESA/Detection/docker:/root/mount/
14
+ - "/home/dfesa/.local/share/Agisoft/Metashape Pro/Projects/temp_files:/tempimgs"
15
+ restart: always
dfesa/Detection/docker/frozen_inference_graph.pb ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ec109a26af464c42e6cbaf94fc2f63e460a80e1f632728d56fa8a71eca797470
3
+ size 114026995
dfesa/Detection/docker/label_map.pbtxt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ item {
2
+ id: 1
3
+ name: 'Falta'
4
+ }
5
+
6
+ item {
7
+ id: 2
8
+ name: 'Objeto'
9
+ }
dfesa/Detection/docker/tuto.txt ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ docker build . -t upbeat
2
+ docker run upbeat
3
+
4
+ docker run -p 5000:5000 -d upbeat (flask)
5
+
6
+
7
+
8
+ docker ps
9
+ docker kill
10
+ docker build . -t upbeat
11
+
dfesa/Detection/docker/v1Ad.py ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import os
3
+ import six.moves.urllib as urllib
4
+ import sys
5
+ import tarfile
6
+ import tensorflow as tf
7
+ import zipfile
8
+ import tkinter
9
+ import matplotlib
10
+ import pdb
11
+ import pandas as pd
12
+ from distutils.version import StrictVersion
13
+ from collections import defaultdict
14
+ from io import StringIO
15
+ from matplotlib import pyplot as plt
16
+ from PIL import Image
17
+ from flask import Flask
18
+ import json
19
+ import requests
20
+ from flask import request
21
+ from flask import jsonify
22
+
23
+
24
+ #matplotlib.use('TkAgg')
25
+ config = tf.ConfigProto()
26
+ config.gpu_options.per_process_gpu_memory_fraction=0.8
27
+ config.gpu_options.allow_growth = True
28
+ sess = tf.Session(config=config)
29
+
30
+ currentdir=os.getcwd()
31
+ print("asiufhsaueh")
32
+ print(currentdir)
33
+
34
+ # # This is needed since the notebook is stored in the object_detection folder.
35
+ sys.path.append("/models/research/")
36
+ #sys.path.append("/models/research/")
37
+
38
+ from object_detection.utils import ops as utils_ops
39
+ from object_detection.utils import label_map_util
40
+ from object_detection.utils import visualization_utils as vis_util
41
+
42
+ MODEL_NAME = '/tffiles'
43
+
44
+ PATH_TO_FROZEN_GRAPH = MODEL_NAME + '/frozen_inference_graph.pb'
45
+ #PATH_TO_FROZEN_GRAPH = '/frozen_inference_graph.pb'
46
+ PATH_TO_LABELS = '/tffiles/label_map.pbtxt'
47
+
48
+
49
+
50
+ NUM_CLASSES = 2
51
+
52
+ detection_graph = tf.Graph()
53
+ with detection_graph.as_default():
54
+ od_graph_def = tf.GraphDef()
55
+ with tf.gfile.GFile(PATH_TO_FROZEN_GRAPH, 'rb') as fid:
56
+ serialized_graph = fid.read()
57
+ od_graph_def.ParseFromString(serialized_graph)
58
+ tf.import_graph_def(od_graph_def, name='')
59
+
60
+ category_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS, use_display_name=True)
61
+
62
+ def load_image_into_numpy_array(image):
63
+ (im_width, im_height) = image.size
64
+ return np.array(image.getdata()).reshape(
65
+ (im_height, im_width, 3)).astype(np.uint8)
66
+
67
+ # Size, in inches, of the output images.
68
+ IMAGE_SIZE = (12, 8)
69
+
70
+
71
+ def run_inference_for_single_image(image, graph):
72
+ with graph.as_default():
73
+ with tf.Session() as sess:
74
+ # Get handles to input and output tensors
75
+ ops = tf.get_default_graph().get_operations()
76
+ all_tensor_names = {output.name for op in ops for output in op.outputs}
77
+ tensor_dict = {}
78
+ for key in [
79
+ 'num_detections', 'detection_boxes', 'detection_scores',
80
+ 'detection_classes', 'detection_masks'
81
+ ]:
82
+ tensor_name = key + ':0'
83
+ if tensor_name in all_tensor_names:
84
+ tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(
85
+ tensor_name)
86
+ if 'detection_masks' in tensor_dict:
87
+ # The following processing is only for single image
88
+ detection_boxes = tf.squeeze(tensor_dict['detection_boxes'], [0])
89
+ detection_masks = tf.squeeze(tensor_dict['detection_masks'], [0])
90
+ # Reframe is required to translate mask from box coordinates to image coordinates and fit the image size.
91
+ real_num_detection = tf.cast(tensor_dict['num_detections'][0], tf.int32)
92
+ detection_boxes = tf.slice(detection_boxes, [0, 0], [real_num_detection, -1])
93
+ detection_masks = tf.slice(detection_masks, [0, 0, 0], [real_num_detection, -1, -1])
94
+ detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(
95
+ detection_masks, detection_boxes, image.shape[1], image.shape[2])
96
+ detection_masks_reframed = tf.cast(
97
+ tf.greater(detection_masks_reframed, 0.5), tf.uint8)
98
+ # Follow the convention by adding back the batch dimension
99
+ tensor_dict['detection_masks'] = tf.expand_dims(
100
+ detection_masks_reframed, 0)
101
+ image_tensor = tf.get_default_graph().get_tensor_by_name('image_tensor:0')
102
+
103
+ # Run inference
104
+ output_dict = sess.run(tensor_dict,
105
+ feed_dict={image_tensor: image})
106
+
107
+ # all outputs are float32 numpy arrays, so convert types as appropriate
108
+ output_dict['num_detections'] = int(output_dict['num_detections'][0])
109
+ output_dict['detection_classes'] = output_dict[
110
+ 'detection_classes'][0].astype(np.int64)
111
+ output_dict['detection_boxes'] = output_dict['detection_boxes'][0]
112
+ output_dict['detection_scores'] = output_dict['detection_scores'][0]
113
+ if 'detection_masks' in output_dict:
114
+ output_dict['detection_masks'] = output_dict['detection_masks'][0]
115
+ return output_dict
116
+
117
+
118
+ fy({'prediction': list(prediction)})
119
+
120
+
121
+ app = Flask(__name__)
122
+ @app.route('/teste',methods=['POST'])
123
+ def find_failure():
124
+
125
+ imagem=request.json
126
+
127
+ json_data = json.loads(imagem)
128
+
129
+ imagem =json_data['dir1']
130
+
131
+
132
+ #-------------------------------------------------------------------------
133
+ image = Image.open(imagem)
134
+
135
+ image_np = load_image_into_numpy_array(image)
136
+ # Expand dimensions since the model expects images to have shape: [1, None, None, 3]
137
+ image_np_expanded = np.expand_dims(image_np, axis=0)
138
+ # Actual detection.
139
+ output_dict = run_inference_for_single_image(image_np_expanded, detection_graph)
140
+ # Visualization of the results of a detection.
141
+ dtaf=pd.DataFrame.from_dict(output_dict,orient="index")
142
+ largest_detection=dtaf.loc['detection_scores'][0].max()
143
+ print(largest_detection)
144
+
145
+ if largest_detection>0.8:
146
+ yesno="1"
147
+ else:
148
+ yesno="0"
149
+ print(yesno)
150
+ return jsonify(yesno)
151
+
152
+ #-------------------------------------------------------------------------------
153
+
154
+
155
+ if __name__ == '__main__':
156
+ app.run(host='0.0.0.0', port='5000', debug=True)
157
+
dfesa/Detection/faster_rcnn_resnet50_coco.config ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Faster R-CNN with Resnet-50 (v1), configuration for MSCOCO Dataset.
2
+ # Users should configure the fine_tune_checkpoint field in the train config as
3
+ # well as the label_map_path and input_path fields in the train_input_reader and
4
+ # eval_input_reader. Search for "PATH_TO_BE_CONFIGURED" to find the fields that
5
+ # should be configured.
6
+
7
+ model {
8
+ faster_rcnn {
9
+ num_classes: 2
10
+ image_resizer {
11
+ keep_aspect_ratio_resizer {
12
+ min_dimension: 600
13
+ max_dimension: 1024
14
+ }
15
+ }
16
+ feature_extractor {
17
+ type: 'faster_rcnn_resnet50'
18
+ first_stage_features_stride: 16
19
+ }
20
+ first_stage_anchor_generator {
21
+ grid_anchor_generator {
22
+ scales: [0.25, 0.5, 1.0, 2.0]
23
+ aspect_ratios: [0.5, 1.0, 2.0]
24
+ height_stride: 16
25
+ width_stride: 16
26
+ }
27
+ }
28
+ first_stage_box_predictor_conv_hyperparams {
29
+ op: CONV
30
+ regularizer {
31
+ l2_regularizer {
32
+ weight: 0.0
33
+ }
34
+ }
35
+ initializer {
36
+ truncated_normal_initializer {
37
+ stddev: 0.01
38
+ }
39
+ }
40
+ }
41
+ first_stage_nms_score_threshold: 0.0
42
+ first_stage_nms_iou_threshold: 0.7
43
+ first_stage_max_proposals: 300
44
+ first_stage_localization_loss_weight: 2.0
45
+ first_stage_objectness_loss_weight: 1.0
46
+ initial_crop_size: 14
47
+ maxpool_kernel_size: 2
48
+ maxpool_stride: 2
49
+ second_stage_box_predictor {
50
+ mask_rcnn_box_predictor {
51
+ use_dropout: false
52
+ dropout_keep_probability: 1.0
53
+ fc_hyperparams {
54
+ op: FC
55
+ regularizer {
56
+ l2_regularizer {
57
+ weight: 0.0
58
+ }
59
+ }
60
+ initializer {
61
+ variance_scaling_initializer {
62
+ factor: 1.0
63
+ uniform: true
64
+ mode: FAN_AVG
65
+ }
66
+ }
67
+ }
68
+ }
69
+ }
70
+ second_stage_post_processing {
71
+ batch_non_max_suppression {
72
+ score_threshold: 0.0
73
+ iou_threshold: 0.6
74
+ max_detections_per_class: 100
75
+ max_total_detections: 300
76
+ }
77
+ score_converter: SOFTMAX
78
+ }
79
+ second_stage_localization_loss_weight: 2.0
80
+ second_stage_classification_loss_weight: 1.0
81
+ }
82
+ }
83
+
84
+ train_config: {
85
+ batch_size: 1
86
+ optimizer {
87
+ momentum_optimizer: {
88
+ learning_rate: {
89
+ manual_step_learning_rate {
90
+ initial_learning_rate: 0.0003
91
+ schedule {
92
+ step: 900000
93
+ learning_rate: .00003
94
+ }
95
+ schedule {
96
+ step: 1200000
97
+ learning_rate: .000003
98
+ }
99
+ }
100
+ }
101
+ momentum_optimizer_value: 0.9
102
+ }
103
+ use_moving_average: false
104
+ }
105
+ gradient_clipping_by_norm: 10.0
106
+ fine_tune_checkpoint: "/home/dfesa/Python_code/DFESA/Detection/faster_rcnn_resnet50_coco/model.ckpt"
107
+ from_detection_checkpoint: true
108
+ # Note: The below line limits the training process to 200K steps, which we
109
+ # empirically found to be sufficient enough to train the pets dataset. This
110
+ # effectively bypasses the learning rate schedule (the learning rate will
111
+ # never decay). Remove the below line to train indefinitely.
112
+ num_steps: 200000
113
+ data_augmentation_options {
114
+ random_horizontal_flip {
115
+ }
116
+ }
117
+ data_augmentation_options {
118
+ random_image_scale {
119
+ }
120
+ }
121
+ }
122
+
123
+ train_input_reader: {
124
+ tf_record_input_reader {
125
+ input_path: "/home/dfesa/Python_code/DFESA/Detection/train.record"
126
+ }
127
+ label_map_path: "/home/dfesa/Python_code/DFESA/Detection/label_map.pbtxt"
128
+ }
129
+
130
+ eval_config: {
131
+ batch_size: 1
132
+ num_examples: 97
133
+ # Note: The below line limits the evaluation process to 10 evaluations.
134
+ # Remove the below line to evaluate indefinitely.
135
+ # max_evals: 10
136
+ }
137
+
138
+ eval_input_reader: {
139
+ tf_record_input_reader {
140
+ input_path: "/home/dfesa/Python_code/DFESA/Detection/test.record"
141
+ }
142
+ label_map_path: "/home/dfesa/Python_code/DFESA/Detection/label_map.pbtxt"
143
+ shuffle: false
144
+ num_readers: 1
145
+ }
dfesa/Detection/faster_rcnn_resnet50_coco/checkpoint ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ model_checkpoint_path: "model.ckpt"
2
+ all_model_checkpoint_paths: "model.ckpt"
dfesa/Detection/faster_rcnn_resnet50_coco/frozen_inference_graph.pb ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e12cca9868c73f83940adafdcb18cda429ff398454505a721f95277b728c82af
3
+ size 120549957
dfesa/Detection/faster_rcnn_resnet50_coco/model.ckpt.data-00000-of-00001 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:118f28f83927eb96e72edf5e3c85e397bb07a6f83a9527d95f75c1b7e02fe831
3
+ size 176914228
dfesa/Detection/faster_rcnn_resnet50_coco/model.ckpt.index ADDED
Binary file (14.5 kB). View file
 
dfesa/Detection/faster_rcnn_resnet50_coco/model.ckpt.meta ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a248853b541d36bdf98550b6c94913e9c9a4e057fbad0cb7a8862068ef288aab
3
+ size 5675175