File size: 4,029 Bytes
9b4f4f7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
# Code and weights taken from
# https://github.com/maciejczyzewski/neural-chessboard/

import deps

import numpy as np
import cv2
import collections
import scipy
import scipy.cluster
import tensorflow as tf
import os

# Try to load the complete model from .h5 file
model_h5_path = "data/laps_models/laps.h5"
try:
    NEURAL_MODEL = tf.keras.models.load_model(model_h5_path, compile=False)
    # Recompile with current optimizer
    from tensorflow.keras.optimizers import RMSprop
    NEURAL_MODEL.compile(RMSprop(learning_rate=0.001),
                        loss='categorical_crossentropy',
                        metrics=['categorical_accuracy'])
except Exception as e:
    print(f"Warning: Could not load model from {model_h5_path}: {e}")
    # Fallback to creating model from deps.laps
    from deps.laps import model as NEURAL_MODEL


def laps_intersections(lines):
    '''Find all intersections'''
    __lines = [[(a[0], a[1]), (b[0], b[1])] for a, b in lines]
    return deps.geometry.isect_segments(__lines)


def laps_cluster(points, max_dist=10):
    """cluster very similar points"""
    Y = scipy.spatial.distance.pdist(points)
    Z = scipy.cluster.hierarchy.single(Y)
    T = scipy.cluster.hierarchy.fcluster(Z, max_dist, 'distance')
    clusters = collections.defaultdict(list)
    for i in range(len(T)):
        clusters[T[i]].append(points[i])
    clusters = clusters.values()
    clusters = map(lambda arr: (np.mean(np.array(arr)[:, 0]),
                                np.mean(np.array(arr)[:, 1])), clusters)
    # if two points are close, they become one mean point
    return list(clusters)


def laps_detector(img):
    """determine if that shape is positive"""
    global NC_LAYER

    hashid = str(hash(img.tobytes()))

    img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    img = cv2.threshold(img, 0, 255, cv2.THRESH_OTSU)[1]
    img = cv2.Canny(img, 0, 255)
    img = cv2.resize(img, (21, 21), interpolation=cv2.INTER_CUBIC)

    imgd = img

    X = [np.where(img > int(255/2), 1, 0).ravel()]
    X = X[0].reshape([-1, 21, 21, 1])

    img = cv2.dilate(img, None)
    mask = cv2.copyMakeBorder(img, top=1, bottom=1, left=1, right=1,
                              borderType=cv2.BORDER_CONSTANT, value=[255, 255, 255])
    mask = cv2.bitwise_not(mask)
    i = 0
    contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL,
                                   cv2.CHAIN_APPROX_NONE)

    _c = np.zeros((23, 23, 3), np.uint8)

    # geometric detector
    for cnt in contours:
        (x, y), radius = cv2.minEnclosingCircle(cnt)
        x, y = int(x), int(y)
        approx = cv2.approxPolyDP(cnt, 0.1*cv2.arcLength(cnt, True), True)
        if len(approx) == 4 and radius < 14:
            cv2.drawContours(_c, [cnt], 0, (0, 255, 0), 1)
            i += 1
        else:
            cv2.drawContours(_c, [cnt], 0, (0, 0, 255), 1)

    if i == 4:
        return (True, 1)

    pred = NEURAL_MODEL.predict(X)
    a, b = pred[0][0], pred[0][1]
    t = a > b and b < 0.03 and a > 0.975

    # decision
    if t:
        return (True, pred[0])
    else:
        return (False, pred[0])

################################################################################


def LAPS(img, lines, size=10):

    __points, points = laps_intersections(lines), []

    for pt in __points:
        # pixels are in integers
        pt = list(map(int, pt))

        # size of our analysis area
        lx1 = max(0, int(pt[0]-size-1))
        lx2 = max(0, int(pt[0]+size))
        ly1 = max(0, int(pt[1]-size))
        ly2 = max(0, int(pt[1]+size+1))

        # cropping for detector
        dimg = img[ly1:ly2, lx1:lx2]
        dimg_shape = np.shape(dimg)

        # not valid
        if dimg_shape[0] <= 0 or dimg_shape[1] <= 0:
            continue

        # use neural network
        re_laps = laps_detector(dimg)
        if not re_laps[0]:
            continue

        # add if okay
        if pt[0] < 0 or pt[1] < 0:
            continue
        points += [pt]
    points = laps_cluster(points)

    return points