code stringlengths 17 6.64M |
|---|
def moveBothHands(handRight, handLeft, addX, addY):
refX = (handRight[0] + addX)
refY = (handRight[1] + addY)
handRightResults = []
handRightPoints = []
handRightX = []
handRightY = []
for x in range(0, len(handRight), 2):
handRightX.append(handRight[x])
for x in range(1, len(handRight), 2):
handRightY.append(handRight[x])
p1 = [handRightX[0], handRightY[0]]
p2 = [refX, refY]
distanceX = (p1[0] - p2[0])
distanceY = (p1[1] - p2[1])
for x in range(len(handRightX)):
handRightX[x] -= distanceX
for x in range(len(handRightY)):
handRightY[x] -= distanceY
for x in range(len(handRightX)):
handRightPoints.append((int(handRightX[x]), int(handRightY[x])))
handRightResults.append(handRightX[x])
handRightResults.append(handRightY[x])
refX = (handLeft[0] + addX)
refY = (handLeft[1] + addY)
handLeftResults = []
handLeftPoints = []
handLeftX = []
handLeftY = []
for x in range(0, len(handLeft), 2):
handLeftX.append(handLeft[x])
for x in range(1, len(handLeft), 2):
handLeftY.append(handLeft[x])
p1 = [handLeftX[0], handLeftY[0]]
p2 = [refX, refY]
distanceX = (p1[0] - p2[0])
distanceY = (p1[1] - p2[1])
for x in range(len(handLeftX)):
if (handLeftX[x] != 0):
handLeftX[x] -= distanceX
for x in range(len(handLeftY)):
if (handLeftX[x] != 0):
handLeftY[x] -= distanceY
for x in range(len(handLeftX)):
handLeftPoints.append((int(handLeftX[x]), int(handLeftY[x])))
handLeftResults.append(handLeftX[x])
handLeftResults.append(handLeftY[x])
return (handRightResults, handRightPoints, handLeftResults, handLeftPoints)
|
def move_to_wrist(handRight, wristX, wristY):
refX = wristX
refY = wristY
handRightResults = []
handRightPoints = []
handRightX = []
handRightY = []
for x in range(0, len(handRight), 2):
handRightX.append(handRight[x])
for x in range(1, len(handRight), 2):
handRightY.append(handRight[x])
p1 = [handRightX[0], handRightY[0]]
p2 = [refX, refY]
distanceX = (p1[0] - p2[0])
distanceY = (p1[1] - p2[1])
for x in range(len(handRightX)):
handRightX[x] -= distanceX
for x in range(len(handRightY)):
handRightY[x] -= distanceY
for x in range(len(handRightX)):
handRightPoints.append((int(handRightX[x]), int(handRightY[x])))
handRightResults.append(handRightX[x])
handRightResults.append(handRightY[x])
return (handRightResults, handRightPoints)
|
def scaleBody(handRight, distance):
ref = 200
handRightResults = []
handRightPoints = []
handRightX = []
handRightY = []
for x in range(0, len(handRight), 2):
handRightX.append(handRight[x])
for x in range(1, len(handRight), 2):
handRightY.append(handRight[x])
scale = (ref / distance)
for x in range(len(handRightX)):
handRightX[x] *= scale
for x in range(len(handRightY)):
handRightY[x] *= scale
for x in range(len(handRightX)):
handRightPoints.append((int(handRightX[x]), int(handRightY[x])))
handRightResults.append(handRightX[x])
handRightResults.append(handRightY[x])
return (handRightResults, handRightPoints)
|
def moveBody(handRight):
refX = 1000
refY = 400
handRightResults = []
handRightPoints = []
handRightX = []
handRightY = []
for x in range(0, len(handRight), 2):
handRightX.append(handRight[x])
for x in range(1, len(handRight), 2):
handRightY.append(handRight[x])
p1 = [handRightX[1], handRightY[1]]
p2 = [refX, refY]
distanceX = (p1[0] - p2[0])
distanceY = (p1[1] - p2[1])
for x in range(len(handRightX)):
if (handRightX[x] != 0):
handRightX[x] -= distanceX
for x in range(len(handRightY)):
if (handRightY[x] != 0):
handRightY[x] -= distanceY
for x in range(len(handRightX)):
handRightPoints.append((int(handRightX[x]), int(handRightY[x])))
handRightResults.append(handRightX[x])
handRightResults.append(handRightY[x])
return (handRightResults, handRightPoints)
|
def dummyMoveBody(handRight):
refX = 400
refY = 200
handRightResults = []
handRightPoints = []
handRightX = []
handRightY = []
for x in range(0, len(handRight), 2):
handRightX.append(handRight[x])
for x in range(1, len(handRight), 2):
handRightY.append(handRight[x])
p1 = [handRightX[1], handRightY[1]]
p2 = [refX, refY]
distanceX = (p1[0] - p2[0])
distanceY = (p1[1] - p2[1])
for x in range(len(handRightX)):
if (handRightX[x] != 0):
handRightX[x] -= distanceX
for x in range(len(handRightY)):
if (handRightY[x] != 0):
handRightY[x] -= distanceY
for x in range(len(handRightX)):
handRightPoints.append((int(handRightX[x]), int(handRightY[x])))
handRightResults.append(handRightX[x])
handRightResults.append(handRightY[x])
return (handRightResults, handRightPoints)
|
def dummyScaleBody(handRight, distance):
ref = 500
handRightResults = []
handRightPoints = []
handRightX = []
handRightY = []
for x in range(0, len(handRight), 2):
handRightX.append(handRight[x])
for x in range(1, len(handRight), 2):
handRightY.append(handRight[x])
scale = (ref / distance)
for x in range(len(handRightX)):
handRightX[x] *= scale
for x in range(len(handRightY)):
handRightY[x] *= scale
for x in range(len(handRightY)):
handRightX[x] *= 2
handRightY[x] *= 2
for x in range(len(handRightX)):
handRightPoints.append((int(handRightX[x]), int(handRightY[x])))
handRightResults.append(handRightX[x])
handRightResults.append(handRightY[x])
return (handRightResults, handRightPoints)
|
def plot_skeleton(fileName, background, isMove, isScale):
js = json.loads(open(fileName).read())
for items in js['people']:
handRight = items['hand_right_keypoints_2d']
handCoord = helper.getCoordPoints(handRight)
handPoints = helper.removePoints(handRight)
p1 = [handPoints[0], handPoints[1]]
p2 = [handPoints[18], handPoints[19]]
distance = math.sqrt((((p1[0] - p2[0]) ** 2) + ((p1[1] - p2[1]) ** 2)))
if isScale:
(handRightResult, handRightPoints) = scale.scalePoints(handPoints, distance)
else:
handRightResult = handPoints
handRightPoints = handCoord
if isMove:
(handRightResult, handRightPoints) = move.centerPoints(handRightResult)
p1 = [handRightResult[0], handRightResult[1]]
p2 = [handRightResult[18], handRightResult[19]]
distance = math.sqrt((((p1[0] - p2[0]) ** 2) + ((p1[1] - p2[1]) ** 2)))
frame = cv2.imread(('C:\\123Drive\\Python\\Sign_Language_Interpreter\\' + background))
for pair in POSE_PAIRS:
partA = pair[0]
partB = pair[1]
if (handRightPoints[partA] and handRightPoints[partB]):
cv2.line(frame, handRightPoints[partA], handRightPoints[partB], (0, 255, 255), 2)
cv2.circle(frame, handRightPoints[partA], 5, (0, 0, 255), thickness=(- 1), lineType=cv2.FILLED)
cv2.circle(frame, handRightPoints[partB], 5, (0, 0, 255), thickness=(- 1), lineType=cv2.FILLED)
return frame
|
def plot_points(points, background):
handRight = points
handRightPoints = []
handRightX = []
handRightY = []
for x in range(0, len(handRight), 2):
handRightX.append(handRight[x])
for x in range(1, len(handRight), 2):
handRightY.append(handRight[x])
for x in range(len(handRightX)):
handRightPoints.append((int(handRightX[x]), int(handRightY[x])))
frame = cv2.imread(('' + background))
for pair in POSE_PAIRS:
partA = pair[0]
partB = pair[1]
if (handRightPoints[partA] and handRightPoints[partB]):
cv2.line(frame, handRightPoints[partA], handRightPoints[partB], (0, 255, 255), 2)
cv2.circle(frame, handRightPoints[partA], 5, (0, 0, 255), thickness=(- 1), lineType=cv2.FILLED)
return frame
|
def plot_db():
ret_frame = []
POSE_PAIRS = [[0, 1], [1, 2], [2, 3], [3, 4], [0, 5], [5, 6], [6, 7], [7, 8], [0, 9], [9, 10], [10, 11], [11, 12], [0, 13], [13, 14], [14, 15], [15, 16], [0, 17], [17, 18], [18, 19], [19, 20]]
background = 'big_background.png'
connection = sqlite3.connect('db\\main_dataset.db')
crsr = connection.cursor()
sql = 'SELECT x1,y1'
for x in range(2, 22):
sql = ((((sql + ',x') + str(x)) + ',y') + str(x))
sql = (sql + ' FROM rightHandDataset WHERE 1')
crsr.execute(sql)
feature_res = crsr.fetchall()
for x in range(len(feature_res)):
points = feature_res[x]
handRight = points
handRightPoints = []
handRightX = []
handRightY = []
for x in range(0, len(handRight), 2):
handRightX.append(handRight[x])
for x in range(1, len(handRight), 2):
handRightY.append(handRight[x])
for x in range(len(handRightX)):
handRightPoints.append((int(handRightX[x]), int(handRightY[x])))
frame = cv2.imread(background)
for pair in POSE_PAIRS:
partA = pair[0]
partB = pair[1]
if (handRightPoints[partA] and handRightPoints[partB]):
cv2.line(frame, handRightPoints[partA], handRightPoints[partB], (0, 255, 255), 2)
cv2.circle(frame, handRightPoints[partA], 5, (0, 0, 255), thickness=(- 1), lineType=cv2.FILLED)
cv2.circle(frame, handRightPoints[partB], 5, (0, 0, 255), thickness=(- 1), lineType=cv2.FILLED)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
ret_frame.append(frame)
frame = cv2.imread(background)
return ret_frame
|
def plot_db_label(label):
ret_frame = []
POSE_PAIRS = [[0, 1], [1, 2], [2, 3], [3, 4], [0, 5], [5, 6], [6, 7], [7, 8], [0, 9], [9, 10], [10, 11], [11, 12], [0, 13], [13, 14], [14, 15], [15, 16], [0, 17], [17, 18], [18, 19], [19, 20]]
background = 'big_background.png'
connection = sqlite3.connect('db\\main_dataset.db')
crsr = connection.cursor()
label = label.strip()
label = (("'" + label) + "'")
sql = 'SELECT x1,y1'
for x in range(2, 22):
sql = ((((sql + ',x') + str(x)) + ',y') + str(x))
sql = ((sql + ' FROM rightHandDataset WHERE label = ') + label)
crsr.execute(sql)
feature_res = crsr.fetchall()
for x in range(len(feature_res)):
points = feature_res[x]
handRight = points
handRightPoints = []
handRightX = []
handRightY = []
for x in range(0, len(handRight), 2):
handRightX.append(handRight[x])
for x in range(1, len(handRight), 2):
handRightY.append(handRight[x])
for x in range(len(handRightX)):
handRightPoints.append((int(handRightX[x]), int(handRightY[x])))
frame = cv2.imread(background)
for pair in POSE_PAIRS:
partA = pair[0]
partB = pair[1]
if (handRightPoints[partA] and handRightPoints[partB]):
cv2.line(frame, handRightPoints[partA], handRightPoints[partB], (0, 255, 255), 2)
cv2.circle(frame, handRightPoints[partA], 5, (0, 0, 255), thickness=(- 1), lineType=cv2.FILLED)
cv2.circle(frame, handRightPoints[partB], 5, (0, 0, 255), thickness=(- 1), lineType=cv2.FILLED)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
ret_frame.append(frame)
frame = cv2.imread(background)
return ret_frame
|
def plot_dataset(handRightPoints, color):
ret_frame = []
POSE_PAIRS = [[0, 1], [1, 2], [2, 3], [3, 4], [0, 5], [5, 6], [6, 7], [7, 8], [0, 9], [9, 10], [10, 11], [11, 12], [0, 13], [13, 14], [14, 15], [15, 16], [0, 17], [17, 18], [18, 19], [19, 20]]
colors = [[0, 0, 130], [0, 0, 175], [0, 0, 210], [0, 0, 250], [0, 200, 160], [0, 180, 150], [0, 230, 186], [0, 255, 255], [82, 201, 8], [82, 204, 0], [92, 230, 0], [102, 252, 6], [197, 88, 17], [204, 82, 0], [179, 71, 0], [227, 94, 5], [204, 0, 163], [200, 0, 163], [196, 0, 163], [230, 0, 184]]
color = color.capitalize()
background = (color + '_background.jpg')
frame = cv2.imread(('PSL\\' + background))
count = 0
for pair in POSE_PAIRS:
partA = pair[0]
partB = pair[1]
if (handRightPoints[partA] and handRightPoints[partB]):
if (color == 'White'):
cv2.line(frame, handRightPoints[partA], handRightPoints[partB], colors[count], 10)
cv2.circle(frame, handRightPoints[partA], 5, colors[count], thickness=10, lineType=cv2.FILLED)
cv2.circle(frame, handRightPoints[partB], 15, (0, 0, 0), thickness=5, lineType=(- 1))
else:
cv2.line(frame, handRightPoints[partA], handRightPoints[partB], colors[count], 10)
cv2.circle(frame, handRightPoints[partA], 5, (0, 0, 255), thickness=10, lineType=cv2.FILLED)
cv2.circle(frame, handRightPoints[partB], 5, (255, 255, 255), thickness=15, lineType=cv2.FILLED)
count += 1
ret_frame.append(frame)
return frame
|
def save_old_dataset(handRightPoints, color, name):
POSE_PAIRS = [[0, 1], [1, 2], [2, 3], [3, 4], [0, 5], [5, 6], [6, 7], [7, 8], [0, 9], [9, 10], [10, 11], [11, 12], [0, 13], [13, 14], [14, 15], [15, 16], [0, 17], [17, 18], [18, 19], [19, 20]]
colors = [[0, 0, 130], [0, 0, 175], [0, 0, 210], [0, 0, 250], [0, 200, 160], [0, 180, 150], [0, 230, 186], [0, 255, 255], [82, 201, 8], [82, 204, 0], [92, 230, 0], [102, 252, 6], [197, 88, 17], [204, 82, 0], [179, 71, 0], [227, 94, 5], [204, 0, 163], [200, 0, 163], [196, 0, 163], [230, 0, 184]]
color = color.capitalize()
background = (color + '_background.jpg')
frame = cv2.imread(background)
count = 0
for pair in POSE_PAIRS:
partA = pair[0]
partB = pair[1]
if (handRightPoints[partA] and handRightPoints[partB]):
if (color == 'White'):
cv2.line(frame, handRightPoints[partA], handRightPoints[partB], colors[count], 10)
cv2.circle(frame, handRightPoints[partA], 5, colors[count], thickness=10, lineType=cv2.FILLED)
cv2.circle(frame, handRightPoints[partB], 15, (0, 0, 0), thickness=5, lineType=(- 1))
else:
cv2.line(frame, handRightPoints[partA], handRightPoints[partB], colors[count], 10)
cv2.circle(frame, handRightPoints[partA], 5, (0, 0, 255), thickness=10, lineType=cv2.FILLED)
cv2.circle(frame, handRightPoints[partB], 5, (255, 255, 255), thickness=15, lineType=cv2.FILLED)
count += 1
os.chdir('temp_old_dataset_processing')
cv2.imwrite((name + '.png'), frame)
os.chdir('..')
|
def plotPose(posePoints, handRightPoints, handLeftPoints):
POSE_PAIRS = [[1, 0], [1, 2], [1, 5], [2, 3], [3, 4], [5, 6], [6, 7], [1, 8], [0, 15], [15, 17], [0, 16], [16, 18]]
HAND_PAIRS = [[0, 1], [1, 2], [2, 3], [3, 4], [0, 5], [5, 6], [6, 7], [7, 8], [0, 9], [9, 10], [10, 11], [11, 12], [0, 13], [13, 14], [14, 15], [15, 16], [0, 17], [17, 18], [18, 19], [19, 20]]
colors = [[0, 0, 130], [0, 0, 175], [0, 0, 210], [0, 0, 250], [0, 200, 160], [0, 180, 150], [0, 230, 186], [0, 255, 255], [82, 201, 8], [82, 204, 0], [92, 230, 0], [102, 252, 6], [197, 88, 17], [204, 82, 0], [179, 71, 0], [227, 94, 5], [204, 0, 163], [200, 0, 163], [196, 0, 163], [230, 0, 184]]
color = 'black'
color = color.capitalize()
background = (color + '_background.jpg')
frame = cv2.imread(background)
count = 0
for pair in POSE_PAIRS:
partA = pair[0]
partB = pair[1]
if (posePoints[partA] and posePoints[partB] and (posePoints[partA][0] != 0) and (posePoints[partA][1] != 0) and (posePoints[partB][0] != 0) and (posePoints[partB][1] != 0)):
if (color == 'White'):
cv2.line(frame, posePoints[partA], posePoints[partB], colors[count], 10)
cv2.circle(frame, posePoints[partA], 5, colors[count], thickness=10, lineType=cv2.FILLED)
cv2.circle(frame, posePoints[partB], 15, (0, 0, 0), thickness=5, lineType=(- 1))
else:
cv2.line(frame, posePoints[partA], posePoints[partB], colors[count], 10)
cv2.circle(frame, posePoints[partA], 5, (0, 0, 255), thickness=10, lineType=cv2.FILLED)
cv2.circle(frame, posePoints[partB], 5, (255, 255, 255), thickness=15, lineType=cv2.FILLED)
count += 1
count = 0
for pair in HAND_PAIRS:
partA = pair[0]
partB = pair[1]
if (handRightPoints[partA] and handRightPoints[partB]):
if (color == 'White'):
cv2.line(frame, handRightPoints[partA], handRightPoints[partB], colors[count], 10)
cv2.circle(frame, handRightPoints[partA], 5, colors[count], thickness=10, lineType=cv2.FILLED)
cv2.circle(frame, handRightPoints[partB], 15, (0, 0, 0), thickness=5, lineType=(- 1))
else:
cv2.line(frame, handRightPoints[partA], handRightPoints[partB], colors[count], 10)
cv2.circle(frame, handRightPoints[partA], 5, (0, 0, 255), thickness=3, lineType=cv2.FILLED)
cv2.circle(frame, handRightPoints[partB], 5, (255, 255, 255), thickness=4, lineType=cv2.FILLED)
count += 1
count = 0
for pair in HAND_PAIRS:
partA = pair[0]
partB = pair[1]
if (handLeftPoints[partA] and handLeftPoints[partB]):
if (color == 'White'):
cv2.line(frame, handLeftPoints[partA], handLeftPoints[partB], colors[count], 10)
cv2.circle(frame, handLeftPoints[partA], 5, colors[count], thickness=10, lineType=cv2.FILLED)
cv2.circle(frame, handLeftPoints[partB], 15, (0, 0, 0), thickness=5, lineType=(- 1))
else:
cv2.line(frame, handLeftPoints[partA], handLeftPoints[partB], colors[count], 10)
cv2.circle(frame, handLeftPoints[partA], 5, (0, 0, 255), thickness=3, lineType=cv2.FILLED)
cv2.circle(frame, handLeftPoints[partB], 5, (255, 255, 255), thickness=4, lineType=cv2.FILLED)
count += 1
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
return frame
|
def plotPoseDataset():
POSE_PAIRS = [[1, 0], [1, 2], [1, 5], [2, 3], [3, 4], [5, 6], [6, 7], [1, 8], [0, 9], [9, 11], [0, 10], [10, 12]]
HAND_PAIRS = [[0, 1], [1, 2], [2, 3], [3, 4], [0, 5], [5, 6], [6, 7], [7, 8], [0, 9], [9, 10], [10, 11], [11, 12], [0, 13], [13, 14], [14, 15], [15, 16], [0, 17], [17, 18], [18, 19], [19, 20]]
colors = [[0, 0, 130], [0, 0, 175], [0, 0, 210], [0, 0, 250], [0, 200, 160], [0, 180, 150], [0, 230, 186], [0, 255, 255], [82, 201, 8], [82, 204, 0], [92, 230, 0], [102, 252, 6], [197, 88, 17], [204, 82, 0], [179, 71, 0], [227, 94, 5], [204, 0, 163], [200, 0, 163], [196, 0, 163], [230, 0, 184]]
'\n extracting data from db\n '
connection = sqlite3.connect('..\\data\\db\\main_dataset.db')
crsr = connection.cursor()
sql = 'SELECT Rx1,Ry1'
for x in range(2, 22):
sql = ((((sql + ',Rx') + str(x)) + ',Ry') + str(x))
for x in range(1, 22):
sql = ((((sql + ',Lx') + str(x)) + ',Ly') + str(x))
for x in range(1, 14):
sql = ((((sql + ',Px') + str(x)) + ',Py') + str(x))
sql = (sql + ' FROM poseDataset WHERE 1')
crsr.execute(sql)
feature_res = crsr.fetchall()
feature_res = np.asarray(feature_res)
features = []
for x in feature_res:
features.append(x)
print(features[0][22])
for i in range(len(features)):
posePoints = []
for x in range(84, 110, 2):
posePoints.append((int(features[i][x]), int(features[i][(x + 1)])))
handRightPoints = []
for x in range(0, 42, 2):
handRightPoints.append((int(features[i][x]), int(features[i][(x + 1)])))
handLeftPoints = []
for x in range(0, 42, 2):
handLeftPoints.append((int(features[i][x]), int(features[i][(x + 1)])))
color = 'black'
color = color.capitalize()
background = (color + '_background.jpg')
frame = cv2.imread(background)
count = 0
for pair in POSE_PAIRS:
partA = pair[0]
partB = pair[1]
if (posePoints[partA] and posePoints[partB] and (posePoints[partA][0] != 0) and (posePoints[partA][1] != 0) and (posePoints[partB][0] != 0) and (posePoints[partB][1] != 0)):
if (color == 'White'):
cv2.line(frame, posePoints[partA], posePoints[partB], colors[count], 10)
cv2.circle(frame, posePoints[partA], 5, colors[count], thickness=10, lineType=cv2.FILLED)
cv2.circle(frame, posePoints[partB], 15, (0, 0, 0), thickness=5, lineType=(- 1))
else:
cv2.line(frame, posePoints[partA], posePoints[partB], colors[count], 10)
cv2.circle(frame, posePoints[partA], 5, (0, 0, 255), thickness=10, lineType=cv2.FILLED)
cv2.circle(frame, posePoints[partB], 5, (255, 255, 255), thickness=15, lineType=cv2.FILLED)
count += 1
count = 0
for pair in HAND_PAIRS:
partA = pair[0]
partB = pair[1]
if (handRightPoints[partA] and handRightPoints[partB]):
if (color == 'White'):
cv2.line(frame, handRightPoints[partA], handRightPoints[partB], colors[count], 10)
cv2.circle(frame, handRightPoints[partA], 5, colors[count], thickness=10, lineType=cv2.FILLED)
cv2.circle(frame, handRightPoints[partB], 15, (0, 0, 0), thickness=5, lineType=(- 1))
else:
cv2.line(frame, handRightPoints[partA], handRightPoints[partB], colors[count], 10)
cv2.circle(frame, handRightPoints[partA], 5, (0, 0, 255), thickness=3, lineType=cv2.FILLED)
cv2.circle(frame, handRightPoints[partB], 5, (255, 255, 255), thickness=4, lineType=cv2.FILLED)
count += 1
count = 0
for pair in HAND_PAIRS:
partA = pair[0]
partB = pair[1]
if (handLeftPoints[partA] and handLeftPoints[partB]):
if (color == 'White'):
cv2.line(frame, handLeftPoints[partA], handLeftPoints[partB], colors[count], 10)
cv2.circle(frame, handLeftPoints[partA], 5, colors[count], thickness=10, lineType=cv2.FILLED)
cv2.circle(frame, handLeftPoints[partB], 15, (0, 0, 0), thickness=5, lineType=(- 1))
else:
cv2.line(frame, handLeftPoints[partA], handLeftPoints[partB], colors[count], 10)
cv2.circle(frame, handLeftPoints[partA], 5, (0, 0, 255), thickness=3, lineType=cv2.FILLED)
cv2.circle(frame, handLeftPoints[partB], 5, (255, 255, 255), thickness=4, lineType=cv2.FILLED)
count += 1
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
fig2 = plt.figure(figsize=(10, 10))
ax3 = fig2.add_subplot(111)
ax3.imshow(frame, interpolation='none')
plt.imshow(frame)
plt.show()
|
def rotate(point, angle, center_point=(0, 0)):
'Rotates a point around center_point(origin by default)\n Angle is in degrees.\n Rotation is counter-clockwise\n '
angle_rad = radians((angle % 360))
new_point = ((point[0] - center_point[0]), (point[1] - center_point[1]))
new_point = (((new_point[0] * cos(angle_rad)) - (new_point[1] * sin(angle_rad))), ((new_point[0] * sin(angle_rad)) + (new_point[1] * cos(angle_rad))))
new_point = (int((new_point[0] + center_point[0])), int((new_point[1] + center_point[1])))
return new_point
|
def rotate_file(fileName):
js = json.loads(open(fileName).read())
for items in js['people']:
handRight = items['hand_right_keypoints_2d']
handPoints = helper.removePoints(handRight)
p1 = [handPoints[0], handPoints[1]]
p2 = [handPoints[18], handPoints[19]]
distance = math.sqrt((((p1[0] - p2[0]) ** 2) + ((p1[1] - p2[1]) ** 2)))
(Result, Points) = scale.scalePoints(handPoints, distance)
(handRightResults, handRightPoints) = move.centerPoints(Result)
newPoints = [handRightPoints[0]]
for x in range(1, len(handRightPoints)):
newPoints.append(rotate(handRightPoints[x], (- 60), handRightPoints[0]))
newPoints = helper.seperate_points(newPoints)
return newPoints
|
def rotate_points(points, angle):
coordPoints = helper.join_points(points)
newPoints = [coordPoints[0]]
for x in range(1, len(coordPoints)):
newPoints.append(rotate(coordPoints[x], angle, coordPoints[0]))
return newPoints
|
def rotate_line(origin, point, angle):
'\n Rotate a point counterclockwise by a given angle around a given origin.\n\n The angle should be given in radians.\n '
(ox, oy) = origin
(px, py) = point
qx = ((ox + (math.cos(angle) * (px - ox))) - (math.sin(angle) * (py - oy)))
qy = ((oy + (math.sin(angle) * (px - ox))) + (math.cos(angle) * (py - oy)))
return (qx, qy)
|
def scalePoints(handRight, distance):
ref = 50
handRightResults = []
handRightPoints = []
handRightX = []
handRightY = []
for x in range(0, len(handRight), 2):
handRightX.append(handRight[x])
for x in range(1, len(handRight), 2):
handRightY.append(handRight[x])
scale = (ref / distance)
for x in range(len(handRightX)):
handRightX[x] *= scale
for x in range(len(handRightY)):
handRightY[x] *= scale
for x in range(len(handRightY)):
handRightX[x] *= 2
handRightY[x] *= 2
for x in range(len(handRightX)):
handRightPoints.append((int(handRightX[x]), int(handRightY[x])))
handRightResults.append(handRightX[x])
handRightResults.append(handRightY[x])
return (handRightResults, handRightPoints)
|
def dummy_scalePoints(handRight, distance):
ref = 200
handRightResults = []
handRightPoints = []
handRightX = []
handRightY = []
for x in range(0, len(handRight), 2):
handRightX.append(handRight[x])
for x in range(1, len(handRight), 2):
handRightY.append(handRight[x])
scale = (ref / distance)
for x in range(len(handRightX)):
handRightX[x] *= scale
for x in range(len(handRightY)):
handRightY[x] *= scale
for x in range(len(handRightY)):
handRightX[x] *= 2
handRightY[x] *= 2
for x in range(len(handRightX)):
handRightPoints.append((int(handRightX[x]), int(handRightY[x])))
handRightResults.append(handRightX[x])
handRightResults.append(handRightY[x])
return (handRightResults, handRightPoints)
|
def synthesize(angle):
'\n extracting data from db\n '
connection = sqlite3.connect('data\\db\\main_dataset.db')
crsr = connection.cursor()
sql = 'SELECT x1,y1'
for x in range(2, 22):
sql = ((((sql + ',x') + str(x)) + ',y') + str(x))
sql = (sql + ' FROM rightHandDataset WHERE 1')
crsr.execute(sql)
feature_res = crsr.fetchall()
features = []
for x in feature_res:
features.append(x)
crsr.execute('SELECT label FROM rightHandDataset WHERE 1')
label_res = crsr.fetchall()
labels = []
for x in label_res:
labels.append(x)
connection = sqlite3.connect('data\\db\\main_dataset.db')
crsr = connection.cursor()
for x in range(len(features)):
rotated = rotate.rotate_points(features[x], (- angle))
handRightResults = helper.seperate_points(rotated)
parentName = (("'" + str(labels[x][0])) + "'")
sql_command = (((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((('INSERT INTO rightHandDataset VALUES (NULL, ' + str(handRightResults[0])) + ', ') + str(handRightResults[1])) + ',') + str(handRightResults[2])) + ',') + str(handRightResults[3])) + ',') + str(handRightResults[4])) + ',') + str(handRightResults[5])) + ',') + str(handRightResults[6])) + ',') + str(handRightResults[7])) + ',') + str(handRightResults[8])) + ',') + str(handRightResults[9])) + ',') + str(handRightResults[10])) + ',') + str(handRightResults[11])) + ',') + str(handRightResults[12])) + ',') + str(handRightResults[13])) + ',') + str(handRightResults[14])) + ',') + str(handRightResults[15])) + ',') + str(handRightResults[16])) + ',') + str(handRightResults[17])) + ',') + str(handRightResults[18])) + ',') + str(handRightResults[19])) + ',') + str(handRightResults[20])) + ',') + str(handRightResults[21])) + ',') + str(handRightResults[22])) + ',') + str(handRightResults[23])) + ',') + str(handRightResults[24])) + ',') + str(handRightResults[25])) + ',') + str(handRightResults[26])) + ',') + str(handRightResults[27])) + ',') + str(handRightResults[28])) + ',') + str(handRightResults[29])) + ',') + str(handRightResults[30])) + ',') + str(handRightResults[31])) + ',') + str(handRightResults[32])) + ',') + str(handRightResults[33])) + ',') + str(handRightResults[34])) + ',') + str(handRightResults[35])) + ',') + str(handRightResults[36])) + ',') + str(handRightResults[37])) + ',') + str(handRightResults[38])) + ',') + str(handRightResults[39])) + ',') + str(handRightResults[40])) + ',') + str(handRightResults[41])) + ',') + parentName) + ');')
crsr.execute(sql_command)
rotated = rotate.rotate_points(features[x], angle)
handRightResults = helper.seperate_points(rotated)
sql_command = (((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((('INSERT INTO rightHandDataset VALUES (NULL, ' + str(handRightResults[0])) + ', ') + str(handRightResults[1])) + ',') + str(handRightResults[2])) + ',') + str(handRightResults[3])) + ',') + str(handRightResults[4])) + ',') + str(handRightResults[5])) + ',') + str(handRightResults[6])) + ',') + str(handRightResults[7])) + ',') + str(handRightResults[8])) + ',') + str(handRightResults[9])) + ',') + str(handRightResults[10])) + ',') + str(handRightResults[11])) + ',') + str(handRightResults[12])) + ',') + str(handRightResults[13])) + ',') + str(handRightResults[14])) + ',') + str(handRightResults[15])) + ',') + str(handRightResults[16])) + ',') + str(handRightResults[17])) + ',') + str(handRightResults[18])) + ',') + str(handRightResults[19])) + ',') + str(handRightResults[20])) + ',') + str(handRightResults[21])) + ',') + str(handRightResults[22])) + ',') + str(handRightResults[23])) + ',') + str(handRightResults[24])) + ',') + str(handRightResults[25])) + ',') + str(handRightResults[26])) + ',') + str(handRightResults[27])) + ',') + str(handRightResults[28])) + ',') + str(handRightResults[29])) + ',') + str(handRightResults[30])) + ',') + str(handRightResults[31])) + ',') + str(handRightResults[32])) + ',') + str(handRightResults[33])) + ',') + str(handRightResults[34])) + ',') + str(handRightResults[35])) + ',') + str(handRightResults[36])) + ',') + str(handRightResults[37])) + ',') + str(handRightResults[38])) + ',') + str(handRightResults[39])) + ',') + str(handRightResults[40])) + ',') + str(handRightResults[41])) + ',') + parentName) + ');')
crsr.execute(sql_command)
connection.commit()
connection.close()
|
def synthesize_multiple(angle1, angle2):
'\n extracting data from db\n '
connection = sqlite3.connect('..\\..\\data\\db\\main_dataset.db')
crsr = connection.cursor()
sql = 'SELECT x1,y1'
for x in range(2, 22):
sql = ((((sql + ',x') + str(x)) + ',y') + str(x))
sql = (sql + ' FROM rightHandDataset WHERE 1')
crsr.execute(sql)
feature_res = crsr.fetchall()
features = []
for x in feature_res:
features.append(x)
crsr.execute('SELECT label FROM rightHandDataset WHERE 1')
label_res = crsr.fetchall()
labels = []
for x in label_res:
labels.append(x)
connection = sqlite3.connect('..\\..\\data\\db\\main_dataset.db')
crsr = connection.cursor()
for x in range(len(features)):
'\n sythesizing at angle 1\n '
rotated = rotate.rotate_points(features[x], (- angle1))
handRightResults = helper.seperate_points(rotated)
parentName = (("'" + str(labels[x][0])) + "'")
sql_command = (((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((('INSERT INTO rightHandDataset VALUES (NULL, ' + str(handRightResults[0])) + ', ') + str(handRightResults[1])) + ',') + str(handRightResults[2])) + ',') + str(handRightResults[3])) + ',') + str(handRightResults[4])) + ',') + str(handRightResults[5])) + ',') + str(handRightResults[6])) + ',') + str(handRightResults[7])) + ',') + str(handRightResults[8])) + ',') + str(handRightResults[9])) + ',') + str(handRightResults[10])) + ',') + str(handRightResults[11])) + ',') + str(handRightResults[12])) + ',') + str(handRightResults[13])) + ',') + str(handRightResults[14])) + ',') + str(handRightResults[15])) + ',') + str(handRightResults[16])) + ',') + str(handRightResults[17])) + ',') + str(handRightResults[18])) + ',') + str(handRightResults[19])) + ',') + str(handRightResults[20])) + ',') + str(handRightResults[21])) + ',') + str(handRightResults[22])) + ',') + str(handRightResults[23])) + ',') + str(handRightResults[24])) + ',') + str(handRightResults[25])) + ',') + str(handRightResults[26])) + ',') + str(handRightResults[27])) + ',') + str(handRightResults[28])) + ',') + str(handRightResults[29])) + ',') + str(handRightResults[30])) + ',') + str(handRightResults[31])) + ',') + str(handRightResults[32])) + ',') + str(handRightResults[33])) + ',') + str(handRightResults[34])) + ',') + str(handRightResults[35])) + ',') + str(handRightResults[36])) + ',') + str(handRightResults[37])) + ',') + str(handRightResults[38])) + ',') + str(handRightResults[39])) + ',') + str(handRightResults[40])) + ',') + str(handRightResults[41])) + ',') + parentName) + ');')
crsr.execute(sql_command)
rotated = rotate.rotate_points(features[x], angle1)
handRightResults = helper.seperate_points(rotated)
sql_command = (((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((('INSERT INTO rightHandDataset VALUES (NULL, ' + str(handRightResults[0])) + ', ') + str(handRightResults[1])) + ',') + str(handRightResults[2])) + ',') + str(handRightResults[3])) + ',') + str(handRightResults[4])) + ',') + str(handRightResults[5])) + ',') + str(handRightResults[6])) + ',') + str(handRightResults[7])) + ',') + str(handRightResults[8])) + ',') + str(handRightResults[9])) + ',') + str(handRightResults[10])) + ',') + str(handRightResults[11])) + ',') + str(handRightResults[12])) + ',') + str(handRightResults[13])) + ',') + str(handRightResults[14])) + ',') + str(handRightResults[15])) + ',') + str(handRightResults[16])) + ',') + str(handRightResults[17])) + ',') + str(handRightResults[18])) + ',') + str(handRightResults[19])) + ',') + str(handRightResults[20])) + ',') + str(handRightResults[21])) + ',') + str(handRightResults[22])) + ',') + str(handRightResults[23])) + ',') + str(handRightResults[24])) + ',') + str(handRightResults[25])) + ',') + str(handRightResults[26])) + ',') + str(handRightResults[27])) + ',') + str(handRightResults[28])) + ',') + str(handRightResults[29])) + ',') + str(handRightResults[30])) + ',') + str(handRightResults[31])) + ',') + str(handRightResults[32])) + ',') + str(handRightResults[33])) + ',') + str(handRightResults[34])) + ',') + str(handRightResults[35])) + ',') + str(handRightResults[36])) + ',') + str(handRightResults[37])) + ',') + str(handRightResults[38])) + ',') + str(handRightResults[39])) + ',') + str(handRightResults[40])) + ',') + str(handRightResults[41])) + ',') + parentName) + ');')
crsr.execute(sql_command)
'\n sythesizing at angle 2\n '
rotated = rotate.rotate_points(features[x], (- angle2))
handRightResults = helper.seperate_points(rotated)
parentName = (("'" + str(labels[x][0])) + "'")
sql_command = (((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((('INSERT INTO rightHandDataset VALUES (NULL, ' + str(handRightResults[0])) + ', ') + str(handRightResults[1])) + ',') + str(handRightResults[2])) + ',') + str(handRightResults[3])) + ',') + str(handRightResults[4])) + ',') + str(handRightResults[5])) + ',') + str(handRightResults[6])) + ',') + str(handRightResults[7])) + ',') + str(handRightResults[8])) + ',') + str(handRightResults[9])) + ',') + str(handRightResults[10])) + ',') + str(handRightResults[11])) + ',') + str(handRightResults[12])) + ',') + str(handRightResults[13])) + ',') + str(handRightResults[14])) + ',') + str(handRightResults[15])) + ',') + str(handRightResults[16])) + ',') + str(handRightResults[17])) + ',') + str(handRightResults[18])) + ',') + str(handRightResults[19])) + ',') + str(handRightResults[20])) + ',') + str(handRightResults[21])) + ',') + str(handRightResults[22])) + ',') + str(handRightResults[23])) + ',') + str(handRightResults[24])) + ',') + str(handRightResults[25])) + ',') + str(handRightResults[26])) + ',') + str(handRightResults[27])) + ',') + str(handRightResults[28])) + ',') + str(handRightResults[29])) + ',') + str(handRightResults[30])) + ',') + str(handRightResults[31])) + ',') + str(handRightResults[32])) + ',') + str(handRightResults[33])) + ',') + str(handRightResults[34])) + ',') + str(handRightResults[35])) + ',') + str(handRightResults[36])) + ',') + str(handRightResults[37])) + ',') + str(handRightResults[38])) + ',') + str(handRightResults[39])) + ',') + str(handRightResults[40])) + ',') + str(handRightResults[41])) + ',') + parentName) + ');')
crsr.execute(sql_command)
rotated = rotate.rotate_points(features[x], angle2)
handRightResults = helper.seperate_points(rotated)
sql_command = (((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((('INSERT INTO rightHandDataset VALUES (NULL, ' + str(handRightResults[0])) + ', ') + str(handRightResults[1])) + ',') + str(handRightResults[2])) + ',') + str(handRightResults[3])) + ',') + str(handRightResults[4])) + ',') + str(handRightResults[5])) + ',') + str(handRightResults[6])) + ',') + str(handRightResults[7])) + ',') + str(handRightResults[8])) + ',') + str(handRightResults[9])) + ',') + str(handRightResults[10])) + ',') + str(handRightResults[11])) + ',') + str(handRightResults[12])) + ',') + str(handRightResults[13])) + ',') + str(handRightResults[14])) + ',') + str(handRightResults[15])) + ',') + str(handRightResults[16])) + ',') + str(handRightResults[17])) + ',') + str(handRightResults[18])) + ',') + str(handRightResults[19])) + ',') + str(handRightResults[20])) + ',') + str(handRightResults[21])) + ',') + str(handRightResults[22])) + ',') + str(handRightResults[23])) + ',') + str(handRightResults[24])) + ',') + str(handRightResults[25])) + ',') + str(handRightResults[26])) + ',') + str(handRightResults[27])) + ',') + str(handRightResults[28])) + ',') + str(handRightResults[29])) + ',') + str(handRightResults[30])) + ',') + str(handRightResults[31])) + ',') + str(handRightResults[32])) + ',') + str(handRightResults[33])) + ',') + str(handRightResults[34])) + ',') + str(handRightResults[35])) + ',') + str(handRightResults[36])) + ',') + str(handRightResults[37])) + ',') + str(handRightResults[38])) + ',') + str(handRightResults[39])) + ',') + str(handRightResults[40])) + ',') + str(handRightResults[41])) + ',') + parentName) + ');')
crsr.execute(sql_command)
connection.commit()
connection.close()
|
def re_train(mode):
if (mode == 0):
dbh.create_table()
dbh.populate_db()
synth.synthesize(20)
alphabet_model.train_alphabets()
if (mode == 1):
dbh.create_pose_table()
dbh.populate_words()
word_model.train_words()
|
def match_ann(fileName):
js = json.loads(open(fileName).read())
for items in js['people']:
pose = items['pose_keypoints_2d']
handRight = items['hand_right_keypoints_2d']
handLeft = items['hand_left_keypoints_2d']
RightConfPoints = helper.confidencePoints(handRight)
LeftConfPoints = helper.confidencePoints(handLeft)
RightConfidence = helper.confidence(RightConfPoints)
LeftConfidence = helper.confidence(LeftConfPoints)
if (RightConfidence > 12):
if ((LeftConfidence > 12) or (LeftConfidence < 2)):
pose_points = helper.removePoints(pose)
p1 = [pose_points[0], pose_points[1]]
p2 = [pose_points[2], pose_points[3]]
distance = math.sqrt((((p1[0] - p2[0]) ** 2) + ((p1[1] - p2[1]) ** 2)))
(scaled_results, scaled_points) = norm.scaleBody(pose_points, distance)
(poseResults, posePoints) = norm.moveBody(scaled_results)
hand_right_points = helper.removePoints(handRight)
p1 = [hand_right_points[0], hand_right_points[1]]
p2 = [hand_right_points[18], hand_right_points[19]]
distance = math.sqrt((((p1[0] - p2[0]) ** 2) + ((p1[1] - p2[1]) ** 2)))
(RightResult, Points) = scale.scalePoints(hand_right_points, distance)
(handRightResults, handRightPoints) = norm.move_to_wrist(RightResult, poseResults[8], poseResults[9])
if (LeftConfidence > 3):
hand_left_points = helper.removePoints(handLeft)
p1 = [hand_left_points[0], hand_left_points[1]]
p2 = [hand_left_points[18], hand_left_points[19]]
distance = math.sqrt((((p1[0] - p2[0]) ** 2) + ((p1[1] - p2[1]) ** 2)))
if (distance != 0):
(LeftResult, Points) = scale.scalePoints(hand_left_points, distance)
(handLeftResults, handLeftPoints) = norm.move_to_wrist(LeftResult, poseResults[14], poseResults[15])
else:
(handLeftResults, handLeftPoints) = norm.move_to_wrist(hand_left_points, poseResults[14], poseResults[15])
else:
handLeftResults = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
posePoints = []
for x in range(18):
posePoints.append(poseResults[x])
for x in range(30, 38):
posePoints.append(poseResults[x])
results = ((handRightResults + handLeftResults) + posePoints)
connection = sqlite3.connect('data\\db\\main_dataset.db')
crsr = connection.cursor()
sql = 'SELECT Rx1,Ry1'
for x in range(2, 22):
sql = ((((sql + ',Rx') + str(x)) + ',Ry') + str(x))
for x in range(1, 22):
sql = ((((sql + ',Lx') + str(x)) + ',Ly') + str(x))
for x in range(1, 14):
sql = ((((sql + ',Px') + str(x)) + ',Py') + str(x))
sql = (sql + ' FROM poseDataset WHERE 1')
crsr.execute(sql)
feature_res = crsr.fetchall()
feature_res = np.asarray(feature_res)
features = []
for x in feature_res:
features.append(x)
crsr.execute('SELECT label FROM poseDataset WHERE 1')
label_res = crsr.fetchall()
labels = []
for x in label_res:
labels.append(x)
le = preprocessing.LabelEncoder()
label_encoded = le.fit_transform(labels)
label_encoded = to_categorical(label_encoded)
(X_train, X_test, y_train, y_test) = train_test_split(features, label_encoded, test_size=0.2)
scaler = StandardScaler().fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
y_pred = model.predict(scaler.transform(np.array([results])))
C = np.argmax(y_pred)
result = le.inverse_transform([C])
return result[0]
else:
return 'no confidence'
else:
return 'no confidence'
|
def signal_handler(signal, frame):
shutil.rmtree('Keypoints', ignore_errors=True, onerror=handleRemoveReadonly)
shutil.rmtree('gui\\Learn_images', ignore_errors=True, onerror=handleRemoveReadonly)
os.system('taskkill /f /im OpenPoseDemo.exe')
print('All done')
sys.exit(0)
|
def handleRemoveReadonly(func, path, exc):
excvalue = exc[1]
if ((func in (os.rmdir, os.remove)) and (excvalue.errno == errno.EACCES)):
os.chmod(path, ((stat.S_IRWXU | stat.S_IRWXG) | stat.S_IRWXO))
func(path)
else:
raise Exception
|
@eel.expose
def skip_Sign():
global skip_sign
skip_sign = True
print('skip_sign')
|
@eel.expose
def openposelearn():
'\n Starting OpenPoseDemo.exe\n and storing json files to temporary folder [Keypoints]\n '
print('Starting OpenPose')
os.chdir('bin\\openpose')
subprocess.Popen('bin\\OpenPoseDemo.exe --hand --write_json ..\\..\\Keypoints --net_resolution 128x128 --number_people_max 1', shell=True)
os.chdir('..\\..')
|
def plotPose(posePoints, handRightPoints, handLeftPoints):
POSE_PAIRS = [[1, 0], [1, 2], [1, 5], [2, 3], [3, 4], [5, 6], [6, 7], [1, 8], [0, 15], [15, 17], [0, 16], [16, 18]]
HAND_PAIRS = [[0, 1], [1, 2], [2, 3], [3, 4], [0, 5], [5, 6], [6, 7], [7, 8], [0, 9], [9, 10], [10, 11], [11, 12], [0, 13], [13, 14], [14, 15], [15, 16], [0, 17], [17, 18], [18, 19], [19, 20]]
colors = [[0, 0, 130], [0, 0, 175], [0, 0, 210], [0, 0, 250], [0, 200, 160], [0, 180, 150], [0, 230, 186], [0, 255, 255], [82, 201, 8], [82, 204, 0], [92, 230, 0], [102, 252, 6], [197, 88, 17], [204, 82, 0], [179, 71, 0], [227, 94, 5], [204, 0, 163], [200, 0, 163], [196, 0, 163], [230, 0, 184]]
background = 'PSL\\BLACK_background.jpg'
frame = cv2.imread(background)
count = 0
for pair in POSE_PAIRS:
partA = pair[0]
partB = pair[1]
if (posePoints[partA] and posePoints[partB] and (posePoints[partA][0] != 0) and (posePoints[partA][1] != 0) and (posePoints[partB][0] != 0) and (posePoints[partB][1] != 0)):
cv2.line(frame, posePoints[partA], posePoints[partB], colors[count], 10)
cv2.circle(frame, posePoints[partA], 5, (0, 0, 255), thickness=10, lineType=cv2.FILLED)
cv2.circle(frame, posePoints[partB], 5, (255, 255, 255), thickness=15, lineType=cv2.FILLED)
count += 1
count = 0
for pair in HAND_PAIRS:
partA = pair[0]
partB = pair[1]
if (handRightPoints[partA] and handRightPoints[partB]):
cv2.line(frame, handRightPoints[partA], handRightPoints[partB], colors[count], 10)
cv2.circle(frame, handRightPoints[partA], 5, (0, 0, 255), thickness=3, lineType=cv2.FILLED)
cv2.circle(frame, handRightPoints[partB], 5, (255, 255, 255), thickness=4, lineType=cv2.FILLED)
count += 1
count = 0
for pair in HAND_PAIRS:
partA = pair[0]
partB = pair[1]
if (handLeftPoints[partA] and handLeftPoints[partB]):
cv2.line(frame, handLeftPoints[partA], handLeftPoints[partB], colors[count], 10)
cv2.circle(frame, handLeftPoints[partA], 5, (0, 0, 255), thickness=3, lineType=cv2.FILLED)
cv2.circle(frame, handLeftPoints[partB], 5, (255, 255, 255), thickness=4, lineType=cv2.FILLED)
count += 1
return frame
|
@eel.expose
def learning():
global skip_sign
'\n storing json files to temporary folder [Keypoints]\n Creating temp folder and initializing with zero padded json file\n '
dirName = 'Keypoints'
fileName = 'PSL\\000000000000_keypoints.json'
try:
os.mkdir(dirName)
shutil.copy(fileName, dirName)
print('Directory ', dirName, ' Created ')
except FileExistsError:
print('Directory ', dirName, ' already exists')
label = ''
for x in range(len(fileNames)):
skip_sign = False
eel.get_Alphabet((x + 1))
while (label != labels[x]):
for entry in os.scandir('Keypoints'):
if entry.is_file():
if (os.path.splitext(entry)[1] == '.json'):
filePlotName = entry.name
try:
js = json.loads(open(('Keypoints\\' + filePlotName)).read())
except ValueError:
print('Decoding JSON has failed')
pass
for items in js['people']:
pose = items['pose_keypoints_2d']
handRight = items['hand_right_keypoints_2d']
handLeft = items['hand_left_keypoints_2d']
pose_points = helper.removePoints(pose)
posePoints = helper.join_points(pose_points)
hand_right_Points = helper.removePoints(handRight)
handRightPoints = helper.join_points(hand_right_Points)
hand_left_points = helper.removePoints(handLeft)
handLeftPoints = helper.join_points(hand_left_points)
frame = plotPose(posePoints, handRightPoints, handLeftPoints)
if (hand_right_Points[0] != 0):
cv2.imwrite((('gui\\Learn_images\\' + filePlotName) + '.jpg'), frame)
frame = cv2.imread('PSL\\BLACK_background.jpg')
eel.get_fileName(filePlotName)
eel.sleep(0.05)
if (skip_sign == True):
break
try:
for entry in os.scandir('Keypoints'):
if entry.is_file():
if (os.path.splitext(entry)[1] == '.json'):
fileName = entry.name
try:
label = alphabet.match_ann(('Keypoints\\' + fileName))
except:
pass
except UnboundLocalError:
print('UnboundLocalError')
eel.get_status()
print('end while')
return True
|
def on_close(page, sockets):
print(page, 'closed')
print('Still have sockets open to', sockets)
|
def signal_handler(signal, frame):
shutil.rmtree('Keypoints', ignore_errors=True, onerror=handleRemoveReadonly)
os.system('taskkill /f /im OpenPoseDemo.exe')
print('All done')
sys.exit(0)
|
def handleRemoveReadonly(func, path, exc):
excvalue = exc[1]
if ((func in (os.rmdir, os.remove)) and (excvalue.errno == errno.EACCES)):
os.chmod(path, ((stat.S_IRWXU | stat.S_IRWXG) | stat.S_IRWXO))
func(path)
else:
raise Exception
|
def plotPose(posePoints, handRightPoints, handLeftPoints):
POSE_PAIRS = [[1, 0], [1, 2], [1, 5], [2, 3], [3, 4], [5, 6], [6, 7], [1, 8], [0, 15], [15, 17], [0, 16], [16, 18]]
HAND_PAIRS = [[0, 1], [1, 2], [2, 3], [3, 4], [0, 5], [5, 6], [6, 7], [7, 8], [0, 9], [9, 10], [10, 11], [11, 12], [0, 13], [13, 14], [14, 15], [15, 16], [0, 17], [17, 18], [18, 19], [19, 20]]
colors = [[0, 0, 130], [0, 0, 175], [0, 0, 210], [0, 0, 250], [0, 200, 160], [0, 180, 150], [0, 230, 186], [0, 255, 255], [82, 201, 8], [82, 204, 0], [92, 230, 0], [102, 252, 6], [197, 88, 17], [204, 82, 0], [179, 71, 0], [227, 94, 5], [204, 0, 163], [200, 0, 163], [196, 0, 163], [230, 0, 184]]
background = 'PSL\\BLACK_background.jpg'
frame = cv2.imread(background)
count = 0
for pair in POSE_PAIRS:
partA = pair[0]
partB = pair[1]
if (posePoints[partA] and posePoints[partB] and (posePoints[partA][0] != 0) and (posePoints[partA][1] != 0) and (posePoints[partB][0] != 0) and (posePoints[partB][1] != 0)):
cv2.line(frame, posePoints[partA], posePoints[partB], colors[count], 10)
cv2.circle(frame, posePoints[partA], 5, (0, 0, 255), thickness=10, lineType=cv2.FILLED)
cv2.circle(frame, posePoints[partB], 5, (255, 255, 255), thickness=15, lineType=cv2.FILLED)
count += 1
count = 0
for pair in HAND_PAIRS:
partA = pair[0]
partB = pair[1]
if (handRightPoints[partA] and handRightPoints[partB]):
cv2.line(frame, handRightPoints[partA], handRightPoints[partB], colors[count], 10)
cv2.circle(frame, handRightPoints[partA], 5, (0, 0, 255), thickness=3, lineType=cv2.FILLED)
cv2.circle(frame, handRightPoints[partB], 5, (255, 255, 255), thickness=4, lineType=cv2.FILLED)
count += 1
count = 0
for pair in HAND_PAIRS:
partA = pair[0]
partB = pair[1]
if (handLeftPoints[partA] and handLeftPoints[partB]):
cv2.line(frame, handLeftPoints[partA], handLeftPoints[partB], colors[count], 10)
cv2.circle(frame, handLeftPoints[partA], 5, (0, 0, 255), thickness=3, lineType=cv2.FILLED)
cv2.circle(frame, handLeftPoints[partB], 5, (255, 255, 255), thickness=4, lineType=cv2.FILLED)
count += 1
return frame
|
@eel.expose
def exit_openpose():
os.system('taskkill /f /im OpenPoseDemo.exe')
|
@eel.expose
def openpose():
'\n Starting OpenPoseDemo.exe\n and storing json files to temporary folder [Keypoints]\n '
print('Starting OpenPose')
os.chdir('bin\\openpose')
subprocess.Popen('bin\\OpenPoseDemo.exe --hand --write_json ..\\..\\Keypoints --net_resolution 128x128 --number_people_max 1', shell=True)
os.chdir('..\\..')
'\n Creating temp folder and initializing with zero padded json file\n '
dirName = 'Keypoints'
fileName = 'PSL\\000000000000_keypoints.json'
try:
os.mkdir(dirName)
shutil.copy(fileName, dirName)
print('Directory ', dirName, ' Created ')
except FileExistsError:
print('Directory ', dirName, ' already exists')
|
@eel.expose
def match(speech, mode):
global label, lastLabel
'\n Load each .json file from Keypoints folder and\n predict the label\n '
for entry in os.scandir('Keypoints'):
if entry.is_file():
if (os.path.splitext(entry)[1] == '.json'):
filePlotName = entry.name
try:
js = json.loads(open(('Keypoints\\' + filePlotName)).read())
for items in js['people']:
pose = items['pose_keypoints_2d']
handRight = items['hand_right_keypoints_2d']
handLeft = items['hand_left_keypoints_2d']
pose_points = helper.removePoints(pose)
posePoints = helper.join_points(pose_points)
hand_right_Points = helper.removePoints(handRight)
handRightPoints = helper.join_points(hand_right_Points)
hand_left_points = helper.removePoints(handLeft)
handLeftPoints = helper.join_points(hand_left_points)
frame = plotPose(posePoints, handRightPoints, handLeftPoints)
cv2.imwrite((('gui\\Learn_images\\' + filePlotName) + '.jpg'), frame)
frame = cv2.imread('PSL\\BLACK_background.jpg')
eel.get_fileName(filePlotName)
except:
print('Decoding JSON has failed')
pass
try:
if (mode == 0):
label = alphabet.match_ann(('Keypoints\\' + filePlotName))
if (mode == 1):
label = word.match_ann(('Keypoints\\' + filePlotName))
print(label)
except Exception:
pass
if ((label != 'no match') and (label != 'no confidence') and (label != lastLabel)):
lastLabel = label
if (speech == 1):
try:
mp3 = (('data\\speech\\' + label) + '.mp3')
mixer.init()
mixer.music.load(mp3)
mixer.music.play()
except:
pass
return label
|
def test_file1_method1():
x = 5
y = 6
assert ((x + 1) == y), 'test failed'
assert (x == y), 'test failed'
|
def test_file1_method2():
x = 5
y = 6
assert ((x + 1) == y), 'test failed'
|
class CheffAEModel():
def __init__(self, model_path: str, device: Union[(str, int, torch.device)]='cuda') -> None:
self.device = device
with contextlib.redirect_stdout(None):
self.model = AutoencoderKL(embed_dim=3, ckpt_path=model_path, ddconfig={'double_z': True, 'z_channels': 3, 'resolution': 256, 'in_channels': 3, 'out_ch': 3, 'ch': 128, 'ch_mult': (1, 2, 4), 'num_res_blocks': 2, 'attn_resolutions': [], 'dropout': 0.0}, lossconfig={'target': 'torch.nn.Identity'})
self.model = self.model.to(device)
self.model.eval()
@torch.no_grad()
def encode(self, x: Tensor) -> Tensor:
return self.model.encode(x).mode()
@torch.no_grad()
def decode(self, z: Tensor) -> Tensor:
return self.model.decode(z)
|
class CheffLDM():
def __init__(self, model_path: str, ae_path: Optional[str]=None, device: Union[(str, int, torch.device)]='cuda') -> None:
self.device = device
with contextlib.redirect_stdout(None):
self.model = self._init_checkpoint(model_path, ae_path)
self.model = self.model.to(self.device)
self.model.model = self.model.model.to(self.device)
self.model.eval()
self.sample_shape = [self.model.model.diffusion_model.in_channels, self.model.model.diffusion_model.image_size, self.model.model.diffusion_model.image_size]
@torch.no_grad()
def sample(self, batch_size: int=1, sampling_steps: int=100, eta: float=1.0, decode: bool=True, *args, **kwargs) -> Tensor:
ddim = DDIMSampler(self.model)
(samples, _) = ddim.sample(sampling_steps, batch_size=batch_size, shape=self.sample_shape, eta=eta, verbose=False)
if decode:
samples = self.model.decode_first_stage(samples)
return samples
@torch.no_grad()
def sample_inpaint(self, target_img: Tensor, mask: Tensor, sampling_steps: int=100, eta: float=1.0, decode: bool=True, *args, **kwargs) -> Tensor:
target_enc = self.model.encode_first_stage(target_img).mode()
ddim = DDIMSampler(self.model)
(samples, _) = ddim.sample(sampling_steps, batch_size=target_img.shape[0], shape=self.sample_shape, eta=eta, verbose=False, mask=mask, x0=target_enc)
if decode:
samples = self.model.decode_first_stage(samples)
return samples
def _init_checkpoint(self, model_path: str, ae_path: Optional[str]=None) -> LatentDiffusion:
config_dict = self._get_config_dict(ae_path)
model = LatentDiffusion(**config_dict)
state_dict = torch.load(model_path, map_location=self.device)
model.load_state_dict(state_dict['state_dict'], strict=False)
return model
@staticmethod
def _get_config_dict(ae_path: Optional[str]=None) -> Dict:
return {'linear_start': 0.0015, 'linear_end': 0.0295, 'num_timesteps_cond': 1, 'log_every_t': 200, 'timesteps': 1000, 'first_stage_key': 'image', 'image_size': 64, 'channels': 3, 'monitor': 'val/loss_simple_ema', 'unet_config': CheffLDM._get_unet_config_dict(), 'first_stage_config': CheffLDM._get_first_stage_config_dict(ae_path), 'cond_stage_config': '__is_unconditional__'}
@staticmethod
def _get_unet_config_dict() -> Dict:
return {'target': 'cheff.ldm.modules.diffusionmodules.openaimodel.UNetModel', 'params': {'image_size': 64, 'in_channels': 3, 'out_channels': 3, 'model_channels': 224, 'attention_resolutions': [8, 4, 2], 'num_res_blocks': 2, 'channel_mult': [1, 2, 3, 4], 'num_head_channels': 32}}
@staticmethod
def _get_first_stage_config_dict(ae_path: Optional[str]=None) -> Dict:
return {'target': 'cheff.ldm.models.autoencoder.AutoencoderKL', 'params': {'embed_dim': 3, 'ckpt_path': ae_path, 'ddconfig': {'double_z': True, 'z_channels': 3, 'resolution': 256, 'in_channels': 3, 'out_ch': 3, 'ch': 128, 'ch_mult': (1, 2, 4), 'num_res_blocks': 2, 'attn_resolutions': [], 'dropout': 0.0}, 'lossconfig': {'target': 'torch.nn.Identity'}}}
|
class CheffLDMT2I(CheffLDM):
@torch.no_grad()
def sample(self, sampling_steps: int=100, eta: float=1.0, decode: bool=True, conditioning: str='', *args, **kwargs) -> Tensor:
conditioning = self.model.get_learned_conditioning(conditioning)
ddim = DDIMSampler(self.model)
(samples, _) = ddim.sample(sampling_steps, conditioning=conditioning, batch_size=1, shape=self.sample_shape, eta=eta, verbose=False)
if decode:
samples = self.model.decode_first_stage(samples)
return samples
@torch.no_grad()
def sample_inpaint(self, target_img: Tensor, mask: Tensor, sampling_steps: int=100, eta: float=1.0, decode: bool=True, conditioning: str='', *args, **kwargs) -> Tensor:
assert (target_img.shape[0] == 1), 'Method implemented only for batch size = 1.'
target_enc = self.model.encode_first_stage(target_img).mode()
conditioning = self.model.get_learned_conditioning(conditioning)
ddim = DDIMSampler(self.model)
(samples, _) = ddim.sample(sampling_steps, conditioning=conditioning, batch_size=1, shape=self.sample_shape, eta=eta, verbose=False, mask=mask, x0=target_enc)
if decode:
samples = self.model.decode_first_stage(samples)
return samples
@staticmethod
def _get_config_dict(ae_path: Optional[str]=None) -> Dict:
return {'linear_start': 0.0015, 'linear_end': 0.0295, 'num_timesteps_cond': 1, 'log_every_t': 200, 'timesteps': 1000, 'first_stage_key': 'image', 'cond_stage_key': 'caption', 'image_size': 64, 'channels': 3, 'cond_stage_trainable': True, 'conditioning_key': 'crossattn', 'monitor': 'val/loss_simple_ema', 'scale_factor': 0.18215, 'unet_config': CheffLDMT2I._get_unet_config_dict(), 'first_stage_config': CheffLDMT2I._get_first_stage_config_dict(ae_path), 'cond_stage_config': CheffLDMT2I._get_cond_config_dict()}
@staticmethod
def _get_cond_config_dict() -> Dict:
return {'target': 'cheff.ldm.modules.encoders.modules.BERTEmbedder', 'params': {'n_embed': 1280, 'n_layer': 32}}
@staticmethod
def _get_unet_config_dict() -> Dict:
return {'target': 'cheff.ldm.modules.diffusionmodules.openaimodel.UNetModel', 'params': {'image_size': 64, 'in_channels': 3, 'out_channels': 3, 'model_channels': 224, 'attention_resolutions': [8, 4, 2], 'num_res_blocks': 2, 'channel_mult': [1, 2, 4, 4], 'num_heads': 8, 'use_spatial_transformer': True, 'transformer_depth': 1, 'context_dim': 1280, 'use_checkpoint': True, 'legacy': False}}
|
class LambdaWarmUpCosineScheduler():
'\n note: use with a base_lr of 1.0\n '
def __init__(self, warm_up_steps, lr_min, lr_max, lr_start, max_decay_steps, verbosity_interval=0):
self.lr_warm_up_steps = warm_up_steps
self.lr_start = lr_start
self.lr_min = lr_min
self.lr_max = lr_max
self.lr_max_decay_steps = max_decay_steps
self.last_lr = 0.0
self.verbosity_interval = verbosity_interval
def schedule(self, n, **kwargs):
if (self.verbosity_interval > 0):
if ((n % self.verbosity_interval) == 0):
print(f'current step: {n}, recent lr-multiplier: {self.last_lr}')
if (n < self.lr_warm_up_steps):
lr = ((((self.lr_max - self.lr_start) / self.lr_warm_up_steps) * n) + self.lr_start)
self.last_lr = lr
return lr
else:
t = ((n - self.lr_warm_up_steps) / (self.lr_max_decay_steps - self.lr_warm_up_steps))
t = min(t, 1.0)
lr = (self.lr_min + ((0.5 * (self.lr_max - self.lr_min)) * (1 + np.cos((t * np.pi)))))
self.last_lr = lr
return lr
def __call__(self, n, **kwargs):
return self.schedule(n, **kwargs)
|
class LambdaWarmUpCosineScheduler2():
'\n supports repeated iterations, configurable via lists\n note: use with a base_lr of 1.0.\n '
def __init__(self, warm_up_steps, f_min, f_max, f_start, cycle_lengths, verbosity_interval=0):
assert (len(warm_up_steps) == len(f_min) == len(f_max) == len(f_start) == len(cycle_lengths))
self.lr_warm_up_steps = warm_up_steps
self.f_start = f_start
self.f_min = f_min
self.f_max = f_max
self.cycle_lengths = cycle_lengths
self.cum_cycles = np.cumsum(([0] + list(self.cycle_lengths)))
self.last_f = 0.0
self.verbosity_interval = verbosity_interval
def find_in_interval(self, n):
interval = 0
for cl in self.cum_cycles[1:]:
if (n <= cl):
return interval
interval += 1
def schedule(self, n, **kwargs):
cycle = self.find_in_interval(n)
n = (n - self.cum_cycles[cycle])
if (self.verbosity_interval > 0):
if ((n % self.verbosity_interval) == 0):
print(f'current step: {n}, recent lr-multiplier: {self.last_f}, current cycle {cycle}')
if (n < self.lr_warm_up_steps[cycle]):
f = ((((self.f_max[cycle] - self.f_start[cycle]) / self.lr_warm_up_steps[cycle]) * n) + self.f_start[cycle])
self.last_f = f
return f
else:
t = ((n - self.lr_warm_up_steps[cycle]) / (self.cycle_lengths[cycle] - self.lr_warm_up_steps[cycle]))
t = min(t, 1.0)
f = (self.f_min[cycle] + ((0.5 * (self.f_max[cycle] - self.f_min[cycle])) * (1 + np.cos((t * np.pi)))))
self.last_f = f
return f
def __call__(self, n, **kwargs):
return self.schedule(n, **kwargs)
|
class LambdaLinearScheduler(LambdaWarmUpCosineScheduler2):
def schedule(self, n, **kwargs):
cycle = self.find_in_interval(n)
n = (n - self.cum_cycles[cycle])
if (self.verbosity_interval > 0):
if ((n % self.verbosity_interval) == 0):
print(f'current step: {n}, recent lr-multiplier: {self.last_f}, current cycle {cycle}')
if (n < self.lr_warm_up_steps[cycle]):
f = ((((self.f_max[cycle] - self.f_start[cycle]) / self.lr_warm_up_steps[cycle]) * n) + self.f_start[cycle])
self.last_f = f
return f
else:
f = (self.f_min[cycle] + (((self.f_max[cycle] - self.f_min[cycle]) * (self.cycle_lengths[cycle] - n)) / self.cycle_lengths[cycle]))
self.last_f = f
return f
|
class VQModel(pl.LightningModule):
def __init__(self, ddconfig, lossconfig, n_embed, embed_dim, ckpt_path=None, ignore_keys=[], image_key='image', colorize_nlabels=None, monitor=None, batch_resize_range=None, scheduler_config=None, lr_g_factor=1.0, remap=None, sane_index_shape=False, use_ema=False):
super().__init__()
self.embed_dim = embed_dim
self.n_embed = n_embed
self.image_key = image_key
self.encoder = Encoder(**ddconfig)
self.decoder = Decoder(**ddconfig)
self.loss = instantiate_from_config(lossconfig)
self.quantize = VectorQuantizer(n_embed, embed_dim, beta=0.25, remap=remap, sane_index_shape=sane_index_shape)
self.quant_conv = torch.nn.Conv2d(ddconfig['z_channels'], embed_dim, 1)
self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig['z_channels'], 1)
if (colorize_nlabels is not None):
assert (type(colorize_nlabels) == int)
self.register_buffer('colorize', torch.randn(3, colorize_nlabels, 1, 1))
if (monitor is not None):
self.monitor = monitor
self.batch_resize_range = batch_resize_range
if (self.batch_resize_range is not None):
print(f'{self.__class__.__name__}: Using per-batch resizing in range {batch_resize_range}.')
self.use_ema = use_ema
if self.use_ema:
self.model_ema = LitEma(self)
print(f'Keeping EMAs of {len(list(self.model_ema.buffers()))}.')
if (ckpt_path is not None):
self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)
self.scheduler_config = scheduler_config
self.lr_g_factor = lr_g_factor
@contextmanager
def ema_scope(self, context=None):
if self.use_ema:
self.model_ema.store(self.parameters())
self.model_ema.copy_to(self)
if (context is not None):
print(f'{context}: Switched to EMA weights')
try:
(yield None)
finally:
if self.use_ema:
self.model_ema.restore(self.parameters())
if (context is not None):
print(f'{context}: Restored training weights')
def init_from_ckpt(self, path, ignore_keys=list()):
sd = torch.load(path, map_location='cpu')['state_dict']
keys = list(sd.keys())
for k in keys:
for ik in ignore_keys:
if k.startswith(ik):
print('Deleting key {} from state_dict.'.format(k))
del sd[k]
(missing, unexpected) = self.load_state_dict(sd, strict=False)
print(f'Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys')
if (len(missing) > 0):
print(f'Missing Keys: {missing}')
print(f'Unexpected Keys: {unexpected}')
def on_train_batch_end(self, *args, **kwargs):
if self.use_ema:
self.model_ema(self)
def encode(self, x):
h = self.encoder(x)
h = self.quant_conv(h)
(quant, emb_loss, info) = self.quantize(h)
return (quant, emb_loss, info)
def encode_to_prequant(self, x):
h = self.encoder(x)
h = self.quant_conv(h)
return h
def decode(self, quant):
quant = self.post_quant_conv(quant)
dec = self.decoder(quant)
return dec
def decode_code(self, code_b):
quant_b = self.quantize.embed_code(code_b)
dec = self.decode(quant_b)
return dec
def forward(self, input, return_pred_indices=False):
(quant, diff, (_, _, ind)) = self.encode(input)
dec = self.decode(quant)
if return_pred_indices:
return (dec, diff, ind)
return (dec, diff)
def get_input(self, batch, k):
x = batch[k]
if (len(x.shape) == 3):
x = x[(..., None)]
x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()
if (self.batch_resize_range is not None):
lower_size = self.batch_resize_range[0]
upper_size = self.batch_resize_range[1]
if (self.global_step <= 4):
new_resize = upper_size
else:
new_resize = np.random.choice(np.arange(lower_size, (upper_size + 16), 16))
if (new_resize != x.shape[2]):
x = F.interpolate(x, size=new_resize, mode='bicubic')
x = x.detach()
return x
def training_step(self, batch, batch_idx, optimizer_idx):
x = self.get_input(batch, self.image_key)
(xrec, qloss, ind) = self(x, return_pred_indices=True)
if (optimizer_idx == 0):
(aeloss, log_dict_ae) = self.loss(qloss, x, xrec, optimizer_idx, self.global_step, last_layer=self.get_last_layer(), split='train', predicted_indices=ind)
self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=True)
return aeloss
if (optimizer_idx == 1):
(discloss, log_dict_disc) = self.loss(qloss, x, xrec, optimizer_idx, self.global_step, last_layer=self.get_last_layer(), split='train')
self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=True)
return discloss
def validation_step(self, batch, batch_idx):
log_dict = self._validation_step(batch, batch_idx)
with self.ema_scope():
log_dict_ema = self._validation_step(batch, batch_idx, suffix='_ema')
return log_dict
def _validation_step(self, batch, batch_idx, suffix=''):
x = self.get_input(batch, self.image_key)
(xrec, qloss, ind) = self(x, return_pred_indices=True)
(aeloss, log_dict_ae) = self.loss(qloss, x, xrec, 0, self.global_step, last_layer=self.get_last_layer(), split=('val' + suffix), predicted_indices=ind)
(discloss, log_dict_disc) = self.loss(qloss, x, xrec, 1, self.global_step, last_layer=self.get_last_layer(), split=('val' + suffix), predicted_indices=ind)
rec_loss = log_dict_ae[f'val{suffix}/rec_loss']
self.log(f'val{suffix}/rec_loss', rec_loss, prog_bar=True, logger=True, on_step=False, on_epoch=True, sync_dist=True)
self.log(f'val{suffix}/aeloss', aeloss, prog_bar=True, logger=True, on_step=False, on_epoch=True, sync_dist=True)
if (version.parse(pl.__version__) >= version.parse('1.4.0')):
del log_dict_ae[f'val{suffix}/rec_loss']
self.log_dict(log_dict_ae)
self.log_dict(log_dict_disc)
return self.log_dict
def configure_optimizers(self):
lr_d = self.learning_rate
lr_g = (self.lr_g_factor * self.learning_rate)
print('lr_d', lr_d)
print('lr_g', lr_g)
opt_ae = torch.optim.Adam(((((list(self.encoder.parameters()) + list(self.decoder.parameters())) + list(self.quantize.parameters())) + list(self.quant_conv.parameters())) + list(self.post_quant_conv.parameters())), lr=lr_g, betas=(0.5, 0.9))
opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(), lr=lr_d, betas=(0.5, 0.9))
if (self.scheduler_config is not None):
scheduler = instantiate_from_config(self.scheduler_config)
print('Setting up LambdaLR scheduler...')
scheduler = [{'scheduler': LambdaLR(opt_ae, lr_lambda=scheduler.schedule), 'interval': 'step', 'frequency': 1}, {'scheduler': LambdaLR(opt_disc, lr_lambda=scheduler.schedule), 'interval': 'step', 'frequency': 1}]
return ([opt_ae, opt_disc], scheduler)
return ([opt_ae, opt_disc], [])
def get_last_layer(self):
return self.decoder.conv_out.weight
def log_images(self, batch, only_inputs=False, plot_ema=False, **kwargs):
log = dict()
x = self.get_input(batch, self.image_key)
x = x.to(self.device)
if only_inputs:
log['inputs'] = x
return log
(xrec, _) = self(x)
if (x.shape[1] > 3):
assert (xrec.shape[1] > 3)
x = self.to_rgb(x)
xrec = self.to_rgb(xrec)
log['inputs'] = x
log['reconstructions'] = xrec
if plot_ema:
with self.ema_scope():
(xrec_ema, _) = self(x)
if (x.shape[1] > 3):
xrec_ema = self.to_rgb(xrec_ema)
log['reconstructions_ema'] = xrec_ema
return log
def to_rgb(self, x):
assert (self.image_key == 'segmentation')
if (not hasattr(self, 'colorize')):
self.register_buffer('colorize', torch.randn(3, x.shape[1], 1, 1).to(x))
x = F.conv2d(x, weight=self.colorize)
x = (((2.0 * (x - x.min())) / (x.max() - x.min())) - 1.0)
return x
|
class VQModelInterface(VQModel):
def __init__(self, embed_dim, *args, **kwargs):
super().__init__(*args, embed_dim=embed_dim, **kwargs)
self.embed_dim = embed_dim
def encode(self, x):
h = self.encoder(x)
h = self.quant_conv(h)
return h
def decode(self, h, force_not_quantize=False):
if (not force_not_quantize):
(quant, emb_loss, info) = self.quantize(h)
else:
quant = h
quant = self.post_quant_conv(quant)
dec = self.decoder(quant)
return dec
|
class AutoencoderKL(pl.LightningModule):
def __init__(self, ddconfig, lossconfig, embed_dim, ckpt_path=None, ignore_keys=[], image_key='image', colorize_nlabels=None, monitor=None):
super().__init__()
self.image_key = image_key
self.encoder = Encoder(**ddconfig)
self.decoder = Decoder(**ddconfig)
self.loss = instantiate_from_config(lossconfig)
assert ddconfig['double_z']
self.quant_conv = torch.nn.Conv2d((2 * ddconfig['z_channels']), (2 * embed_dim), 1)
self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig['z_channels'], 1)
self.embed_dim = embed_dim
if (colorize_nlabels is not None):
assert (type(colorize_nlabels) == int)
self.register_buffer('colorize', torch.randn(3, colorize_nlabels, 1, 1))
if (monitor is not None):
self.monitor = monitor
if (ckpt_path is not None):
self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)
def init_from_ckpt(self, path, ignore_keys=list()):
sd = torch.load(path, map_location='cpu')['state_dict']
keys = list(sd.keys())
for k in keys:
for ik in ignore_keys:
if k.startswith(ik):
print('Deleting key {} from state_dict.'.format(k))
del sd[k]
self.load_state_dict(sd, strict=False)
print(f'Restored from {path}')
def encode(self, x):
h = self.encoder(x)
moments = self.quant_conv(h)
posterior = DiagonalGaussianDistribution(moments)
return posterior
def decode(self, z):
z = self.post_quant_conv(z)
dec = self.decoder(z)
return dec
def forward(self, input, sample_posterior=True):
posterior = self.encode(input)
if sample_posterior:
z = posterior.sample()
else:
z = posterior.mode()
dec = self.decode(z)
return (dec, posterior)
def get_input(self, batch, k):
x = batch['img']
return x
def training_step(self, batch, batch_idx, optimizer_idx):
inputs = self.get_input(batch, self.image_key)
(reconstructions, posterior) = self(inputs)
if (optimizer_idx == 0):
(aeloss, log_dict_ae) = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step, last_layer=self.get_last_layer(), split='train')
self.log('aeloss', aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False)
return aeloss
if (optimizer_idx == 1):
(discloss, log_dict_disc) = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step, last_layer=self.get_last_layer(), split='train')
self.log('discloss', discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False)
return discloss
def validation_step(self, batch, batch_idx):
inputs = self.get_input(batch, self.image_key)
(reconstructions, posterior) = self(inputs)
(aeloss, log_dict_ae) = self.loss(inputs, reconstructions, posterior, 0, self.global_step, last_layer=self.get_last_layer(), split='val')
(discloss, log_dict_disc) = self.loss(inputs, reconstructions, posterior, 1, self.global_step, last_layer=self.get_last_layer(), split='val')
self.log('val/rec_loss', log_dict_ae['val/rec_loss'])
self.log_dict(log_dict_ae)
self.log_dict(log_dict_disc)
return self.log_dict
def configure_optimizers(self):
lr = self.learning_rate
opt_ae = torch.optim.Adam((((list(self.encoder.parameters()) + list(self.decoder.parameters())) + list(self.quant_conv.parameters())) + list(self.post_quant_conv.parameters())), lr=lr, betas=(0.5, 0.9))
opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(), lr=lr, betas=(0.5, 0.9))
return ([opt_ae, opt_disc], [])
def get_last_layer(self):
return self.decoder.conv_out.weight
@torch.no_grad()
def log_images(self, batch, only_inputs=False, **kwargs):
log = dict()
x = self.get_input(batch, self.image_key)
x = x.to(self.device)
if (not only_inputs):
(xrec, posterior) = self(x)
if (x.shape[1] > 3):
assert (xrec.shape[1] > 3)
x = self.to_rgb(x)
xrec = self.to_rgb(xrec)
log['samples'] = self.decode(torch.randn_like(posterior.sample()))
log['reconstructions'] = xrec
log['inputs'] = x
return log
def to_rgb(self, x):
assert (self.image_key == 'segmentation')
if (not hasattr(self, 'colorize')):
self.register_buffer('colorize', torch.randn(3, x.shape[1], 1, 1).to(x))
x = F.conv2d(x, weight=self.colorize)
x = (((2.0 * (x - x.min())) / (x.max() - x.min())) - 1.0)
return x
|
class IdentityFirstStage(torch.nn.Module):
def __init__(self, *args, vq_interface=False, **kwargs):
self.vq_interface = vq_interface
super().__init__()
def encode(self, x, *args, **kwargs):
return x
def decode(self, x, *args, **kwargs):
return x
def quantize(self, x, *args, **kwargs):
if self.vq_interface:
return (x, None, [None, None, None])
return x
def forward(self, x, *args, **kwargs):
return x
|
class DDIMSampler(object):
def __init__(self, model, schedule='linear', **kwargs):
super().__init__()
self.model = model
self.ddpm_num_timesteps = model.num_timesteps
self.schedule = schedule
def register_buffer(self, name, attr):
if (type(attr) == torch.Tensor):
if (attr.device != torch.device('cuda')):
pass
setattr(self, name, attr)
def make_schedule(self, ddim_num_steps, ddim_discretize='uniform', ddim_eta=0.0, verbose=True):
self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps, num_ddpm_timesteps=self.ddpm_num_timesteps, verbose=verbose)
alphas_cumprod = self.model.alphas_cumprod
assert (alphas_cumprod.shape[0] == self.ddpm_num_timesteps), 'alphas have to be defined for each timestep'
to_torch = (lambda x: x.clone().detach().to(torch.float32).to(self.model.device))
self.register_buffer('betas', to_torch(self.model.betas))
self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))
self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))
self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt((1.0 - alphas_cumprod.cpu()))))
self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log((1.0 - alphas_cumprod.cpu()))))
self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt((1.0 / alphas_cumprod.cpu()))))
self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(((1.0 / alphas_cumprod.cpu()) - 1))))
(ddim_sigmas, ddim_alphas, ddim_alphas_prev) = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(), ddim_timesteps=self.ddim_timesteps, eta=ddim_eta, verbose=verbose)
self.register_buffer('ddim_sigmas', ddim_sigmas)
self.register_buffer('ddim_alphas', ddim_alphas)
self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)
self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt((1.0 - ddim_alphas)))
sigmas_for_original_sampling_steps = (ddim_eta * torch.sqrt((((1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod)) * (1 - (self.alphas_cumprod / self.alphas_cumprod_prev)))))
self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)
@torch.no_grad()
def sample(self, S, batch_size, shape, conditioning=None, callback=None, normals_sequence=None, img_callback=None, quantize_x0=False, eta=0.0, mask=None, x0=None, temperature=1.0, noise_dropout=0.0, score_corrector=None, corrector_kwargs=None, verbose=True, x_T=None, log_every_t=100, unconditional_guidance_scale=1.0, unconditional_conditioning=None, **kwargs):
if (conditioning is not None):
if isinstance(conditioning, dict):
cbs = conditioning[list(conditioning.keys())[0]].shape[0]
if (cbs != batch_size):
print(f'Warning: Got {cbs} conditionings but batch-size is {batch_size}')
elif (conditioning.shape[0] != batch_size):
print(f'Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}')
self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)
(C, H, W) = shape
size = (batch_size, C, H, W)
print(f'Data shape for DDIM sampling is {size}, eta {eta}')
(samples, intermediates) = self.ddim_sampling(conditioning, size, callback=callback, img_callback=img_callback, quantize_denoised=quantize_x0, mask=mask, x0=x0, ddim_use_original_steps=False, noise_dropout=noise_dropout, temperature=temperature, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs, x_T=x_T, log_every_t=log_every_t, unconditional_guidance_scale=unconditional_guidance_scale, unconditional_conditioning=unconditional_conditioning)
return (samples, intermediates)
@torch.no_grad()
def ddim_sampling(self, cond, shape, x_T=None, ddim_use_original_steps=False, callback=None, timesteps=None, quantize_denoised=False, mask=None, x0=None, img_callback=None, log_every_t=100, temperature=1.0, noise_dropout=0.0, score_corrector=None, corrector_kwargs=None, unconditional_guidance_scale=1.0, unconditional_conditioning=None):
device = self.model.betas.device
b = shape[0]
if (x_T is None):
img = torch.randn(shape, device=device)
else:
img = x_T
if (timesteps is None):
timesteps = (self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps)
elif ((timesteps is not None) and (not ddim_use_original_steps)):
subset_end = (int((min((timesteps / self.ddim_timesteps.shape[0]), 1) * self.ddim_timesteps.shape[0])) - 1)
timesteps = self.ddim_timesteps[:subset_end]
intermediates = {'x_inter': [img], 'pred_x0': [img]}
time_range = (reversed(range(0, timesteps)) if ddim_use_original_steps else np.flip(timesteps))
total_steps = (timesteps if ddim_use_original_steps else timesteps.shape[0])
print(f'Running DDIM Sampling with {total_steps} timesteps')
iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)
for (i, step) in enumerate(iterator):
index = ((total_steps - i) - 1)
ts = torch.full((b,), step, device=device, dtype=torch.long)
if (mask is not None):
assert (x0 is not None)
img_orig = self.model.q_sample(x0, ts)
img = ((img_orig * mask) + ((1.0 - mask) * img))
outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps, quantize_denoised=quantize_denoised, temperature=temperature, noise_dropout=noise_dropout, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs, unconditional_guidance_scale=unconditional_guidance_scale, unconditional_conditioning=unconditional_conditioning)
(img, pred_x0) = outs
if callback:
callback(i)
if img_callback:
img_callback(pred_x0, i)
if (((index % log_every_t) == 0) or (index == (total_steps - 1))):
intermediates['x_inter'].append(img)
intermediates['pred_x0'].append(pred_x0)
return (img, intermediates)
@torch.no_grad()
def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False, temperature=1.0, noise_dropout=0.0, score_corrector=None, corrector_kwargs=None, unconditional_guidance_scale=1.0, unconditional_conditioning=None):
(b, *_, device) = (*x.shape, x.device)
if ((unconditional_conditioning is None) or (unconditional_guidance_scale == 1.0)):
e_t = self.model.apply_model(x, t, c)
else:
x_in = torch.cat(([x] * 2))
t_in = torch.cat(([t] * 2))
c_in = torch.cat([unconditional_conditioning, c])
(e_t_uncond, e_t) = self.model.apply_model(x_in, t_in, c_in).chunk(2)
e_t = (e_t_uncond + (unconditional_guidance_scale * (e_t - e_t_uncond)))
if (score_corrector is not None):
assert (self.model.parameterization == 'eps')
e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)
alphas = (self.model.alphas_cumprod if use_original_steps else self.ddim_alphas)
alphas_prev = (self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev)
sqrt_one_minus_alphas = (self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas)
sigmas = (self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas)
a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)
a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)
sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)
sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index], device=device)
pred_x0 = ((x - (sqrt_one_minus_at * e_t)) / a_t.sqrt())
if quantize_denoised:
(pred_x0, _, *_) = self.model.first_stage_model.quantize(pred_x0)
dir_xt = (((1.0 - a_prev) - (sigma_t ** 2)).sqrt() * e_t)
noise = ((sigma_t * noise_like(x.shape, device, repeat_noise)) * temperature)
if (noise_dropout > 0.0):
noise = torch.nn.functional.dropout(noise, p=noise_dropout)
x_prev = (((a_prev.sqrt() * pred_x0) + dir_xt) + noise)
return (x_prev, pred_x0)
|
class PLMSSampler(object):
def __init__(self, model, schedule='linear', **kwargs):
super().__init__()
self.model = model
self.ddpm_num_timesteps = model.num_timesteps
self.schedule = schedule
def register_buffer(self, name, attr):
if (type(attr) == torch.Tensor):
if (attr.device != torch.device('cuda')):
attr = attr.to(torch.device('cuda'))
setattr(self, name, attr)
def make_schedule(self, ddim_num_steps, ddim_discretize='uniform', ddim_eta=0.0, verbose=True):
if (ddim_eta != 0):
raise ValueError('ddim_eta must be 0 for PLMS')
self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps, num_ddpm_timesteps=self.ddpm_num_timesteps, verbose=verbose)
alphas_cumprod = self.model.alphas_cumprod
assert (alphas_cumprod.shape[0] == self.ddpm_num_timesteps), 'alphas have to be defined for each timestep'
to_torch = (lambda x: x.clone().detach().to(torch.float32).to(self.model.device))
self.register_buffer('betas', to_torch(self.model.betas))
self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))
self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))
self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt((1.0 - alphas_cumprod.cpu()))))
self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log((1.0 - alphas_cumprod.cpu()))))
self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt((1.0 / alphas_cumprod.cpu()))))
self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(((1.0 / alphas_cumprod.cpu()) - 1))))
(ddim_sigmas, ddim_alphas, ddim_alphas_prev) = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(), ddim_timesteps=self.ddim_timesteps, eta=ddim_eta, verbose=verbose)
self.register_buffer('ddim_sigmas', ddim_sigmas)
self.register_buffer('ddim_alphas', ddim_alphas)
self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)
self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt((1.0 - ddim_alphas)))
sigmas_for_original_sampling_steps = (ddim_eta * torch.sqrt((((1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod)) * (1 - (self.alphas_cumprod / self.alphas_cumprod_prev)))))
self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)
@torch.no_grad()
def sample(self, S, batch_size, shape, conditioning=None, callback=None, normals_sequence=None, img_callback=None, quantize_x0=False, eta=0.0, mask=None, x0=None, temperature=1.0, noise_dropout=0.0, score_corrector=None, corrector_kwargs=None, verbose=True, x_T=None, log_every_t=100, unconditional_guidance_scale=1.0, unconditional_conditioning=None, **kwargs):
if (conditioning is not None):
if isinstance(conditioning, dict):
cbs = conditioning[list(conditioning.keys())[0]].shape[0]
if (cbs != batch_size):
print(f'Warning: Got {cbs} conditionings but batch-size is {batch_size}')
elif (conditioning.shape[0] != batch_size):
print(f'Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}')
self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)
(C, H, W) = shape
size = (batch_size, C, H, W)
print(f'Data shape for PLMS sampling is {size}')
(samples, intermediates) = self.plms_sampling(conditioning, size, callback=callback, img_callback=img_callback, quantize_denoised=quantize_x0, mask=mask, x0=x0, ddim_use_original_steps=False, noise_dropout=noise_dropout, temperature=temperature, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs, x_T=x_T, log_every_t=log_every_t, unconditional_guidance_scale=unconditional_guidance_scale, unconditional_conditioning=unconditional_conditioning)
return (samples, intermediates)
@torch.no_grad()
def plms_sampling(self, cond, shape, x_T=None, ddim_use_original_steps=False, callback=None, timesteps=None, quantize_denoised=False, mask=None, x0=None, img_callback=None, log_every_t=100, temperature=1.0, noise_dropout=0.0, score_corrector=None, corrector_kwargs=None, unconditional_guidance_scale=1.0, unconditional_conditioning=None):
device = self.model.betas.device
b = shape[0]
if (x_T is None):
img = torch.randn(shape, device=device)
else:
img = x_T
if (timesteps is None):
timesteps = (self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps)
elif ((timesteps is not None) and (not ddim_use_original_steps)):
subset_end = (int((min((timesteps / self.ddim_timesteps.shape[0]), 1) * self.ddim_timesteps.shape[0])) - 1)
timesteps = self.ddim_timesteps[:subset_end]
intermediates = {'x_inter': [img], 'pred_x0': [img]}
time_range = (list(reversed(range(0, timesteps))) if ddim_use_original_steps else np.flip(timesteps))
total_steps = (timesteps if ddim_use_original_steps else timesteps.shape[0])
print(f'Running PLMS Sampling with {total_steps} timesteps')
iterator = tqdm(time_range, desc='PLMS Sampler', total=total_steps)
old_eps = []
for (i, step) in enumerate(iterator):
index = ((total_steps - i) - 1)
ts = torch.full((b,), step, device=device, dtype=torch.long)
ts_next = torch.full((b,), time_range[min((i + 1), (len(time_range) - 1))], device=device, dtype=torch.long)
if (mask is not None):
assert (x0 is not None)
img_orig = self.model.q_sample(x0, ts)
img = ((img_orig * mask) + ((1.0 - mask) * img))
outs = self.p_sample_plms(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps, quantize_denoised=quantize_denoised, temperature=temperature, noise_dropout=noise_dropout, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs, unconditional_guidance_scale=unconditional_guidance_scale, unconditional_conditioning=unconditional_conditioning, old_eps=old_eps, t_next=ts_next)
(img, pred_x0, e_t) = outs
old_eps.append(e_t)
if (len(old_eps) >= 4):
old_eps.pop(0)
if callback:
callback(i)
if img_callback:
img_callback(pred_x0, i)
if (((index % log_every_t) == 0) or (index == (total_steps - 1))):
intermediates['x_inter'].append(img)
intermediates['pred_x0'].append(pred_x0)
return (img, intermediates)
@torch.no_grad()
def p_sample_plms(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False, temperature=1.0, noise_dropout=0.0, score_corrector=None, corrector_kwargs=None, unconditional_guidance_scale=1.0, unconditional_conditioning=None, old_eps=None, t_next=None):
(b, *_, device) = (*x.shape, x.device)
def get_model_output(x, t):
if ((unconditional_conditioning is None) or (unconditional_guidance_scale == 1.0)):
e_t = self.model.apply_model(x, t, c)
else:
x_in = torch.cat(([x] * 2))
t_in = torch.cat(([t] * 2))
c_in = torch.cat([unconditional_conditioning, c])
(e_t_uncond, e_t) = self.model.apply_model(x_in, t_in, c_in).chunk(2)
e_t = (e_t_uncond + (unconditional_guidance_scale * (e_t - e_t_uncond)))
if (score_corrector is not None):
assert (self.model.parameterization == 'eps')
e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)
return e_t
alphas = (self.model.alphas_cumprod if use_original_steps else self.ddim_alphas)
alphas_prev = (self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev)
sqrt_one_minus_alphas = (self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas)
sigmas = (self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas)
def get_x_prev_and_pred_x0(e_t, index):
a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)
a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)
sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)
sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index], device=device)
pred_x0 = ((x - (sqrt_one_minus_at * e_t)) / a_t.sqrt())
if quantize_denoised:
(pred_x0, _, *_) = self.model.first_stage_model.quantize(pred_x0)
dir_xt = (((1.0 - a_prev) - (sigma_t ** 2)).sqrt() * e_t)
noise = ((sigma_t * noise_like(x.shape, device, repeat_noise)) * temperature)
if (noise_dropout > 0.0):
noise = torch.nn.functional.dropout(noise, p=noise_dropout)
x_prev = (((a_prev.sqrt() * pred_x0) + dir_xt) + noise)
return (x_prev, pred_x0)
e_t = get_model_output(x, t)
if (len(old_eps) == 0):
(x_prev, pred_x0) = get_x_prev_and_pred_x0(e_t, index)
e_t_next = get_model_output(x_prev, t_next)
e_t_prime = ((e_t + e_t_next) / 2)
elif (len(old_eps) == 1):
e_t_prime = (((3 * e_t) - old_eps[(- 1)]) / 2)
elif (len(old_eps) == 2):
e_t_prime = ((((23 * e_t) - (16 * old_eps[(- 1)])) + (5 * old_eps[(- 2)])) / 12)
elif (len(old_eps) >= 3):
e_t_prime = (((((55 * e_t) - (59 * old_eps[(- 1)])) + (37 * old_eps[(- 2)])) - (9 * old_eps[(- 3)])) / 24)
(x_prev, pred_x0) = get_x_prev_and_pred_x0(e_t_prime, index)
return (x_prev, pred_x0, e_t)
|
def exists(val):
return (val is not None)
|
def uniq(arr):
return {el: True for el in arr}.keys()
|
def default(val, d):
if exists(val):
return val
return (d() if isfunction(d) else d)
|
def max_neg_value(t):
return (- torch.finfo(t.dtype).max)
|
def init_(tensor):
dim = tensor.shape[(- 1)]
std = (1 / math.sqrt(dim))
tensor.uniform_((- std), std)
return tensor
|
class GEGLU(nn.Module):
def __init__(self, dim_in, dim_out):
super().__init__()
self.proj = nn.Linear(dim_in, (dim_out * 2))
def forward(self, x):
(x, gate) = self.proj(x).chunk(2, dim=(- 1))
return (x * F.gelu(gate))
|
class FeedForward(nn.Module):
def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.0):
super().__init__()
inner_dim = int((dim * mult))
dim_out = default(dim_out, dim)
project_in = (nn.Sequential(nn.Linear(dim, inner_dim), nn.GELU()) if (not glu) else GEGLU(dim, inner_dim))
self.net = nn.Sequential(project_in, nn.Dropout(dropout), nn.Linear(inner_dim, dim_out))
def forward(self, x):
return self.net(x)
|
def zero_module(module):
'\n Zero out the parameters of a module and return it.\n '
for p in module.parameters():
p.detach().zero_()
return module
|
def Normalize(in_channels):
return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-06, affine=True)
|
class LinearAttention(nn.Module):
def __init__(self, dim, heads=4, dim_head=32):
super().__init__()
self.heads = heads
hidden_dim = (dim_head * heads)
self.to_qkv = nn.Conv2d(dim, (hidden_dim * 3), 1, bias=False)
self.to_out = nn.Conv2d(hidden_dim, dim, 1)
def forward(self, x):
(b, c, h, w) = x.shape
qkv = self.to_qkv(x)
(q, k, v) = rearrange(qkv, 'b (qkv heads c) h w -> qkv b heads c (h w)', heads=self.heads, qkv=3)
k = k.softmax(dim=(- 1))
context = torch.einsum('bhdn,bhen->bhde', k, v)
out = torch.einsum('bhde,bhdn->bhen', context, q)
out = rearrange(out, 'b heads c (h w) -> b (heads c) h w', heads=self.heads, h=h, w=w)
return self.to_out(out)
|
class SpatialSelfAttention(nn.Module):
def __init__(self, in_channels):
super().__init__()
self.in_channels = in_channels
self.norm = Normalize(in_channels)
self.q = torch.nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0)
self.k = torch.nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0)
self.v = torch.nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0)
self.proj_out = torch.nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0)
def forward(self, x):
h_ = x
h_ = self.norm(h_)
q = self.q(h_)
k = self.k(h_)
v = self.v(h_)
(b, c, h, w) = q.shape
q = rearrange(q, 'b c h w -> b (h w) c')
k = rearrange(k, 'b c h w -> b c (h w)')
w_ = torch.einsum('bij,bjk->bik', q, k)
w_ = (w_ * (int(c) ** (- 0.5)))
w_ = torch.nn.functional.softmax(w_, dim=2)
v = rearrange(v, 'b c h w -> b c (h w)')
w_ = rearrange(w_, 'b i j -> b j i')
h_ = torch.einsum('bij,bjk->bik', v, w_)
h_ = rearrange(h_, 'b c (h w) -> b c h w', h=h)
h_ = self.proj_out(h_)
return (x + h_)
|
class CrossAttention(nn.Module):
def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.0):
super().__init__()
inner_dim = (dim_head * heads)
context_dim = default(context_dim, query_dim)
self.scale = (dim_head ** (- 0.5))
self.heads = heads
self.to_q = nn.Linear(query_dim, inner_dim, bias=False)
self.to_k = nn.Linear(context_dim, inner_dim, bias=False)
self.to_v = nn.Linear(context_dim, inner_dim, bias=False)
self.to_out = nn.Sequential(nn.Linear(inner_dim, query_dim), nn.Dropout(dropout))
def forward(self, x, context=None, mask=None):
h = self.heads
q = self.to_q(x)
context = default(context, x)
k = self.to_k(context)
v = self.to_v(context)
(q, k, v) = map((lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h)), (q, k, v))
sim = (einsum('b i d, b j d -> b i j', q, k) * self.scale)
if exists(mask):
mask = rearrange(mask, 'b ... -> b (...)')
max_neg_value = (- torch.finfo(sim.dtype).max)
mask = repeat(mask, 'b j -> (b h) () j', h=h)
sim.masked_fill_((~ mask), max_neg_value)
attn = sim.softmax(dim=(- 1))
out = einsum('b i j, b j d -> b i d', attn, v)
out = rearrange(out, '(b h) n d -> b n (h d)', h=h)
return self.to_out(out)
|
class BasicTransformerBlock(nn.Module):
def __init__(self, dim, n_heads, d_head, dropout=0.0, context_dim=None, gated_ff=True, checkpoint=True):
super().__init__()
self.attn1 = CrossAttention(query_dim=dim, heads=n_heads, dim_head=d_head, dropout=dropout)
self.ff = FeedForward(dim, dropout=dropout, glu=gated_ff)
self.attn2 = CrossAttention(query_dim=dim, context_dim=context_dim, heads=n_heads, dim_head=d_head, dropout=dropout)
self.norm1 = nn.LayerNorm(dim)
self.norm2 = nn.LayerNorm(dim)
self.norm3 = nn.LayerNorm(dim)
self.checkpoint = checkpoint
def forward(self, x, context=None):
return checkpoint(self._forward, (x, context), self.parameters(), self.checkpoint)
def _forward(self, x, context=None):
x = (self.attn1(self.norm1(x)) + x)
x = (self.attn2(self.norm2(x), context=context) + x)
x = (self.ff(self.norm3(x)) + x)
return x
|
class SpatialTransformer(nn.Module):
'\n Transformer block for image-like data.\n First, project the input (aka embedding)\n and reshape to b, t, d.\n Then apply standard transformer action.\n Finally, reshape to image\n '
def __init__(self, in_channels, n_heads, d_head, depth=1, dropout=0.0, context_dim=None):
super().__init__()
self.in_channels = in_channels
inner_dim = (n_heads * d_head)
self.norm = Normalize(in_channels)
self.proj_in = nn.Conv2d(in_channels, inner_dim, kernel_size=1, stride=1, padding=0)
self.transformer_blocks = nn.ModuleList([BasicTransformerBlock(inner_dim, n_heads, d_head, dropout=dropout, context_dim=context_dim) for d in range(depth)])
self.proj_out = zero_module(nn.Conv2d(inner_dim, in_channels, kernel_size=1, stride=1, padding=0))
def forward(self, x, context=None):
(b, c, h, w) = x.shape
x_in = x
x = self.norm(x)
x = self.proj_in(x)
x = rearrange(x, 'b c h w -> b (h w) c')
for block in self.transformer_blocks:
x = block(x, context=context)
x = rearrange(x, 'b (h w) c -> b c h w', h=h, w=w)
x = self.proj_out(x)
return (x + x_in)
|
class AbstractDistribution():
def sample(self):
raise NotImplementedError()
def mode(self):
raise NotImplementedError()
|
class DiracDistribution(AbstractDistribution):
def __init__(self, value):
self.value = value
def sample(self):
return self.value
def mode(self):
return self.value
|
class DiagonalGaussianDistribution(object):
def __init__(self, parameters, deterministic=False):
self.parameters = parameters
(self.mean, self.logvar) = torch.chunk(parameters, 2, dim=1)
self.logvar = torch.clamp(self.logvar, (- 30.0), 20.0)
self.deterministic = deterministic
self.std = torch.exp((0.5 * self.logvar))
self.var = torch.exp(self.logvar)
if self.deterministic:
self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device)
def sample(self):
x = (self.mean + (self.std * torch.randn(self.mean.shape).to(device=self.parameters.device)))
return x
def kl(self, other=None):
if self.deterministic:
return torch.Tensor([0.0])
elif (other is None):
return (0.5 * torch.sum((((torch.pow(self.mean, 2) + self.var) - 1.0) - self.logvar), dim=[1, 2, 3]))
else:
return (0.5 * torch.sum((((((torch.pow((self.mean - other.mean), 2) / other.var) + (self.var / other.var)) - 1.0) - self.logvar) + other.logvar), dim=[1, 2, 3]))
def nll(self, sample, dims=[1, 2, 3]):
if self.deterministic:
return torch.Tensor([0.0])
logtwopi = np.log((2.0 * np.pi))
return (0.5 * torch.sum(((logtwopi + self.logvar) + (torch.pow((sample - self.mean), 2) / self.var)), dim=dims))
def mode(self):
return self.mean
|
def normal_kl(mean1, logvar1, mean2, logvar2):
'\n source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12\n Compute the KL divergence between two gaussians.\n Shapes are automatically broadcasted, so batches can be compared to\n scalars, among other use cases.\n '
tensor = None
for obj in (mean1, logvar1, mean2, logvar2):
if isinstance(obj, torch.Tensor):
tensor = obj
break
assert (tensor is not None), 'at least one argument must be a Tensor'
(logvar1, logvar2) = [(x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)) for x in (logvar1, logvar2)]
return (0.5 * (((((- 1.0) + logvar2) - logvar1) + torch.exp((logvar1 - logvar2))) + (((mean1 - mean2) ** 2) * torch.exp((- logvar2)))))
|
class LitEma(nn.Module):
def __init__(self, model, decay=0.9999, use_num_upates=True):
super().__init__()
if ((decay < 0.0) or (decay > 1.0)):
raise ValueError('Decay must be between 0 and 1')
self.m_name2s_name = {}
self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))
self.register_buffer('num_updates', (torch.tensor(0, dtype=torch.int) if use_num_upates else torch.tensor((- 1), dtype=torch.int)))
for (name, p) in model.named_parameters():
if p.requires_grad:
s_name = name.replace('.', '')
self.m_name2s_name.update({name: s_name})
self.register_buffer(s_name, p.clone().detach().data)
self.collected_params = []
def forward(self, model):
decay = self.decay
if (self.num_updates >= 0):
self.num_updates += 1
decay = min(self.decay, ((1 + self.num_updates) / (10 + self.num_updates)))
one_minus_decay = (1.0 - decay)
with torch.no_grad():
m_param = dict(model.named_parameters())
shadow_params = dict(self.named_buffers())
for key in m_param:
if m_param[key].requires_grad:
sname = self.m_name2s_name[key]
shadow_params[sname] = shadow_params[sname].type_as(m_param[key])
shadow_params[sname].sub_((one_minus_decay * (shadow_params[sname] - m_param[key])))
else:
assert (not (key in self.m_name2s_name))
def copy_to(self, model):
m_param = dict(model.named_parameters())
shadow_params = dict(self.named_buffers())
for key in m_param:
if m_param[key].requires_grad:
m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)
else:
assert (not (key in self.m_name2s_name))
def store(self, parameters):
'\n Save the current parameters for restoring later.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n '
self.collected_params = [param.clone() for param in parameters]
def restore(self, parameters):
'\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n '
for (c_param, param) in zip(self.collected_params, parameters):
param.data.copy_(c_param.data)
|
class LPIPSWithDiscriminator(nn.Module):
def __init__(self, disc_start, logvar_init=0.0, kl_weight=1.0, pixelloss_weight=1.0, disc_num_layers=3, disc_in_channels=3, disc_factor=1.0, disc_weight=1.0, perceptual_weight=1.0, use_actnorm=False, disc_conditional=False, disc_loss='hinge'):
super().__init__()
assert (disc_loss in ['hinge', 'vanilla'])
self.kl_weight = kl_weight
self.pixel_weight = pixelloss_weight
self.perceptual_loss = LPIPS().eval()
self.perceptual_weight = perceptual_weight
self.logvar = nn.Parameter((torch.ones(size=()) * logvar_init))
self.discriminator = NLayerDiscriminator(input_nc=disc_in_channels, n_layers=disc_num_layers, use_actnorm=use_actnorm).apply(weights_init)
self.discriminator_iter_start = disc_start
self.disc_loss = (hinge_d_loss if (disc_loss == 'hinge') else vanilla_d_loss)
self.disc_factor = disc_factor
self.discriminator_weight = disc_weight
self.disc_conditional = disc_conditional
def calculate_adaptive_weight(self, nll_loss, g_loss, last_layer=None):
if (last_layer is not None):
nll_grads = torch.autograd.grad(nll_loss, last_layer, retain_graph=True)[0]
g_grads = torch.autograd.grad(g_loss, last_layer, retain_graph=True)[0]
else:
nll_grads = torch.autograd.grad(nll_loss, self.last_layer[0], retain_graph=True)[0]
g_grads = torch.autograd.grad(g_loss, self.last_layer[0], retain_graph=True)[0]
d_weight = (torch.norm(nll_grads) / (torch.norm(g_grads) + 0.0001))
d_weight = torch.clamp(d_weight, 0.0, 10000.0).detach()
d_weight = (d_weight * self.discriminator_weight)
return d_weight
def forward(self, inputs, reconstructions, posteriors, optimizer_idx, global_step, last_layer=None, cond=None, split='train', weights=None):
rec_loss = torch.abs((inputs.contiguous() - reconstructions.contiguous()))
if (self.perceptual_weight > 0):
p_loss = self.perceptual_loss(inputs.contiguous(), reconstructions.contiguous())
rec_loss = (rec_loss + (self.perceptual_weight * p_loss))
nll_loss = ((rec_loss / torch.exp(self.logvar)) + self.logvar)
weighted_nll_loss = nll_loss
if (weights is not None):
weighted_nll_loss = (weights * nll_loss)
weighted_nll_loss = (torch.sum(weighted_nll_loss) / weighted_nll_loss.shape[0])
nll_loss = (torch.sum(nll_loss) / nll_loss.shape[0])
kl_loss = posteriors.kl()
kl_loss = (torch.sum(kl_loss) / kl_loss.shape[0])
if (optimizer_idx == 0):
if (cond is None):
assert (not self.disc_conditional)
logits_fake = self.discriminator(reconstructions.contiguous())
else:
assert self.disc_conditional
logits_fake = self.discriminator(torch.cat((reconstructions.contiguous(), cond), dim=1))
g_loss = (- torch.mean(logits_fake))
if (self.disc_factor > 0.0):
try:
d_weight = self.calculate_adaptive_weight(nll_loss, g_loss, last_layer=last_layer)
except RuntimeError:
assert (not self.training)
d_weight = torch.tensor(0.0)
else:
d_weight = torch.tensor(0.0)
disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start)
loss = ((weighted_nll_loss + (self.kl_weight * kl_loss)) + ((d_weight * disc_factor) * g_loss))
log = {'{}/total_loss'.format(split): loss.clone().detach().mean(), '{}/logvar'.format(split): self.logvar.detach(), '{}/kl_loss'.format(split): kl_loss.detach().mean(), '{}/nll_loss'.format(split): nll_loss.detach().mean(), '{}/rec_loss'.format(split): rec_loss.detach().mean(), '{}/d_weight'.format(split): d_weight.detach(), '{}/disc_factor'.format(split): torch.tensor(disc_factor), '{}/g_loss'.format(split): g_loss.detach().mean()}
return (loss, log)
if (optimizer_idx == 1):
if (cond is None):
logits_real = self.discriminator(inputs.contiguous().detach())
logits_fake = self.discriminator(reconstructions.contiguous().detach())
else:
logits_real = self.discriminator(torch.cat((inputs.contiguous().detach(), cond), dim=1))
logits_fake = self.discriminator(torch.cat((reconstructions.contiguous().detach(), cond), dim=1))
disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start)
d_loss = (disc_factor * self.disc_loss(logits_real, logits_fake))
log = {'{}/disc_loss'.format(split): d_loss.clone().detach().mean(), '{}/logits_real'.format(split): logits_real.detach().mean(), '{}/logits_fake'.format(split): logits_fake.detach().mean()}
return (d_loss, log)
|
def hinge_d_loss_with_exemplar_weights(logits_real, logits_fake, weights):
assert (weights.shape[0] == logits_real.shape[0] == logits_fake.shape[0])
loss_real = torch.mean(F.relu((1.0 - logits_real)), dim=[1, 2, 3])
loss_fake = torch.mean(F.relu((1.0 + logits_fake)), dim=[1, 2, 3])
loss_real = ((weights * loss_real).sum() / weights.sum())
loss_fake = ((weights * loss_fake).sum() / weights.sum())
d_loss = (0.5 * (loss_real + loss_fake))
return d_loss
|
def adopt_weight(weight, global_step, threshold=0, value=0.0):
if (global_step < threshold):
weight = value
return weight
|
def measure_perplexity(predicted_indices, n_embed):
encodings = F.one_hot(predicted_indices, n_embed).float().reshape((- 1), n_embed)
avg_probs = encodings.mean(0)
perplexity = (- (avg_probs * torch.log((avg_probs + 1e-10))).sum()).exp()
cluster_use = torch.sum((avg_probs > 0))
return (perplexity, cluster_use)
|
def l1(x, y):
return torch.abs((x - y))
|
def l2(x, y):
return torch.pow((x - y), 2)
|
class VQLPIPSWithDiscriminator(nn.Module):
def __init__(self, disc_start, codebook_weight=1.0, pixelloss_weight=1.0, disc_num_layers=3, disc_in_channels=3, disc_factor=1.0, disc_weight=1.0, perceptual_weight=1.0, use_actnorm=False, disc_conditional=False, disc_ndf=64, disc_loss='hinge', n_classes=None, perceptual_loss='lpips', pixel_loss='l1'):
super().__init__()
assert (disc_loss in ['hinge', 'vanilla'])
assert (perceptual_loss in ['lpips', 'clips', 'dists'])
assert (pixel_loss in ['l1', 'l2'])
self.codebook_weight = codebook_weight
self.pixel_weight = pixelloss_weight
if (perceptual_loss == 'lpips'):
print(f'{self.__class__.__name__}: Running with LPIPS.')
self.perceptual_loss = LPIPS().eval()
else:
raise ValueError(f'Unknown perceptual loss: >> {perceptual_loss} <<')
self.perceptual_weight = perceptual_weight
if (pixel_loss == 'l1'):
self.pixel_loss = l1
else:
self.pixel_loss = l2
self.discriminator = NLayerDiscriminator(input_nc=disc_in_channels, n_layers=disc_num_layers, use_actnorm=use_actnorm, ndf=disc_ndf).apply(weights_init)
self.discriminator_iter_start = disc_start
if (disc_loss == 'hinge'):
self.disc_loss = hinge_d_loss
elif (disc_loss == 'vanilla'):
self.disc_loss = vanilla_d_loss
else:
raise ValueError(f"Unknown GAN loss '{disc_loss}'.")
print(f'VQLPIPSWithDiscriminator running with {disc_loss} loss.')
self.disc_factor = disc_factor
self.discriminator_weight = disc_weight
self.disc_conditional = disc_conditional
self.n_classes = n_classes
def calculate_adaptive_weight(self, nll_loss, g_loss, last_layer=None):
if (last_layer is not None):
nll_grads = torch.autograd.grad(nll_loss, last_layer, retain_graph=True)[0]
g_grads = torch.autograd.grad(g_loss, last_layer, retain_graph=True)[0]
else:
nll_grads = torch.autograd.grad(nll_loss, self.last_layer[0], retain_graph=True)[0]
g_grads = torch.autograd.grad(g_loss, self.last_layer[0], retain_graph=True)[0]
d_weight = (torch.norm(nll_grads) / (torch.norm(g_grads) + 0.0001))
d_weight = torch.clamp(d_weight, 0.0, 10000.0).detach()
d_weight = (d_weight * self.discriminator_weight)
return d_weight
def forward(self, codebook_loss, inputs, reconstructions, optimizer_idx, global_step, last_layer=None, cond=None, split='train', predicted_indices=None):
if (not exists(codebook_loss)):
codebook_loss = torch.tensor([0.0]).to(inputs.device)
rec_loss = self.pixel_loss(inputs.contiguous(), reconstructions.contiguous())
if (self.perceptual_weight > 0):
p_loss = self.perceptual_loss(inputs.contiguous(), reconstructions.contiguous())
rec_loss = (rec_loss + (self.perceptual_weight * p_loss))
else:
p_loss = torch.tensor([0.0])
nll_loss = rec_loss
nll_loss = torch.mean(nll_loss)
if (optimizer_idx == 0):
if (cond is None):
assert (not self.disc_conditional)
logits_fake = self.discriminator(reconstructions.contiguous())
else:
assert self.disc_conditional
logits_fake = self.discriminator(torch.cat((reconstructions.contiguous(), cond), dim=1))
g_loss = (- torch.mean(logits_fake))
try:
d_weight = self.calculate_adaptive_weight(nll_loss, g_loss, last_layer=last_layer)
except RuntimeError:
assert (not self.training)
d_weight = torch.tensor(0.0)
disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start)
loss = ((nll_loss + ((d_weight * disc_factor) * g_loss)) + (self.codebook_weight * codebook_loss.mean()))
log = {'{}/total_loss'.format(split): loss.clone().detach().mean(), '{}/quant_loss'.format(split): codebook_loss.detach().mean(), '{}/nll_loss'.format(split): nll_loss.detach().mean(), '{}/rec_loss'.format(split): rec_loss.detach().mean(), '{}/p_loss'.format(split): p_loss.detach().mean(), '{}/d_weight'.format(split): d_weight.detach(), '{}/disc_factor'.format(split): torch.tensor(disc_factor), '{}/g_loss'.format(split): g_loss.detach().mean()}
if (predicted_indices is not None):
assert (self.n_classes is not None)
with torch.no_grad():
(perplexity, cluster_usage) = measure_perplexity(predicted_indices, self.n_classes)
log[f'{split}/perplexity'] = perplexity
log[f'{split}/cluster_usage'] = cluster_usage
return (loss, log)
if (optimizer_idx == 1):
if (cond is None):
logits_real = self.discriminator(inputs.contiguous().detach())
logits_fake = self.discriminator(reconstructions.contiguous().detach())
else:
logits_real = self.discriminator(torch.cat((inputs.contiguous().detach(), cond), dim=1))
logits_fake = self.discriminator(torch.cat((reconstructions.contiguous().detach(), cond), dim=1))
disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start)
d_loss = (disc_factor * self.disc_loss(logits_real, logits_fake))
log = {'{}/disc_loss'.format(split): d_loss.clone().detach().mean(), '{}/logits_real'.format(split): logits_real.detach().mean(), '{}/logits_fake'.format(split): logits_fake.detach().mean()}
return (d_loss, log)
|
def log_txt_as_img(wh, xc, size=10):
b = len(xc)
txts = list()
for bi in range(b):
txt = Image.new('RGB', wh, color='white')
draw = ImageDraw.Draw(txt)
font = ImageFont.truetype('data/DejaVuSans.ttf', size=size)
nc = int((40 * (wh[0] / 256)))
lines = '\n'.join((xc[bi][start:(start + nc)] for start in range(0, len(xc[bi]), nc)))
try:
draw.text((0, 0), lines, fill='black', font=font)
except UnicodeEncodeError:
print('Cant encode string for logging. Skipping.')
txt = ((np.array(txt).transpose(2, 0, 1) / 127.5) - 1.0)
txts.append(txt)
txts = np.stack(txts)
txts = torch.tensor(txts)
return txts
|
def ismap(x):
if (not isinstance(x, torch.Tensor)):
return False
return ((len(x.shape) == 4) and (x.shape[1] > 3))
|
def isimage(x):
if (not isinstance(x, torch.Tensor)):
return False
return ((len(x.shape) == 4) and ((x.shape[1] == 3) or (x.shape[1] == 1)))
|
def exists(x):
return (x is not None)
|
def default(val, d):
if exists(val):
return val
return (d() if isfunction(d) else d)
|
def mean_flat(tensor):
'\n https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86\n Take the mean over all non-batch dimensions.\n '
return tensor.mean(dim=list(range(1, len(tensor.shape))))
|
def count_params(model, verbose=False):
total_params = sum((p.numel() for p in model.parameters()))
if verbose:
print(f'{model.__class__.__name__} has {(total_params * 1e-06):.2f} M params.')
return total_params
|
def instantiate_from_config(config):
if (not ('target' in config)):
if (config == '__is_first_stage__'):
return None
elif (config == '__is_unconditional__'):
return None
raise KeyError('Expected key `target` to instantiate.')
return get_obj_from_str(config['target'])(**config.get('params', dict()))
|
def get_obj_from_str(string, reload=False):
(module, cls) = string.rsplit('.', 1)
if reload:
module_imp = importlib.import_module(module)
importlib.reload(module_imp)
return getattr(importlib.import_module(module, package=None), cls)
|
def _do_parallel_data_prefetch(func, Q, data, idx, idx_to_fn=False):
if idx_to_fn:
res = func(data, worker_id=idx)
else:
res = func(data)
Q.put([idx, res])
Q.put('Done')
|
def parallel_data_prefetch(func: callable, data, n_proc, target_data_type='ndarray', cpu_intensive=True, use_worker_id=False):
if (isinstance(data, np.ndarray) and (target_data_type == 'list')):
raise ValueError('list expected but function got ndarray.')
elif isinstance(data, abc.Iterable):
if isinstance(data, dict):
print(f'WARNING:"data" argument passed to parallel_data_prefetch is a dict: Using only its values and disregarding keys.')
data = list(data.values())
if (target_data_type == 'ndarray'):
data = np.asarray(data)
else:
data = list(data)
else:
raise TypeError(f'The data, that shall be processed parallel has to be either an np.ndarray or an Iterable, but is actually {type(data)}.')
if cpu_intensive:
Q = mp.Queue(1000)
proc = mp.Process
else:
Q = Queue(1000)
proc = Thread
if (target_data_type == 'ndarray'):
arguments = [[func, Q, part, i, use_worker_id] for (i, part) in enumerate(np.array_split(data, n_proc))]
else:
step = (int(((len(data) / n_proc) + 1)) if ((len(data) % n_proc) != 0) else int((len(data) / n_proc)))
arguments = [[func, Q, part, i, use_worker_id] for (i, part) in enumerate([data[i:(i + step)] for i in range(0, len(data), step)])]
processes = []
for i in range(n_proc):
p = proc(target=_do_parallel_data_prefetch, args=arguments[i])
processes += [p]
print(f'Start prefetching...')
import time
start = time.time()
gather_res = [[] for _ in range(n_proc)]
try:
for p in processes:
p.start()
k = 0
while (k < n_proc):
res = Q.get()
if (res == 'Done'):
k += 1
else:
gather_res[res[0]] = res[1]
except Exception as e:
print('Exception: ', e)
for p in processes:
p.terminate()
raise e
finally:
for p in processes:
p.join()
print(f'Prefetching complete. [{(time.time() - start)} sec.]')
if (target_data_type == 'ndarray'):
if (not isinstance(gather_res[0], np.ndarray)):
return np.concatenate([np.asarray(r) for r in gather_res], axis=0)
return np.concatenate(gather_res, axis=0)
elif (target_data_type == 'list'):
out = []
for r in gather_res:
out.extend(r)
return out
else:
return gather_res
|
class ChestXrayDataset(Dataset):
'Class for handling datasets in the MaCheX composition.'
def __init__(self, root: str, transforms: Optional[Compose]=None) -> None:
'Initialize ChestXrayDataset.'
self.root = root
json_path = os.path.join(self.root, 'index.json')
self.index_dict = ChestXrayDataset._load_json(json_path)
self.keys = list(self.index_dict.keys())
if (transforms is None):
self.transforms = ToTensor()
else:
self.transforms = transforms
@staticmethod
def _load_json(file_path: str) -> Dict:
'Load a json file as dictionary.'
with open(file_path, 'r') as f:
return json.load(f)
def __len__(self):
'Return length of the dataset.'
return len(self.keys)
def __getitem__(self, idx: int) -> Dict:
'Get dataset element.'
meta = self.index_dict[self.keys[idx]]
img = Image.open(meta['path'])
img = self.transforms(img)
return {'img': img}
|
class MaCheXDataset(Dataset):
'Massive chest X-ray dataset.'
def __init__(self, root: str, transforms: Optional[Compose]=None) -> None:
'Initialize MaCheXDataset'
self.root = root
sub_dataset_roots = os.listdir(self.root)
datasets = [ChestXrayDataset(root=os.path.join(root, r), transforms=transforms) for r in sub_dataset_roots]
self.ds = ConcatDataset(datasets)
def __len__(self):
'Return length of the dataset.'
return len(self.ds)
def __getitem__(self, idx: int) -> Dict:
'Get dataset element.'
return self.ds[idx]
|
class MimicT2IDataset(ChestXrayDataset):
'Mimic subset with reports.'
def __init__(self, root: str, transforms: Optional[Compose]=None) -> None:
root = os.path.join(root, 'mimic')
super().__init__(root, transforms)
def __getitem__(self, idx: int) -> Dict:
'Get dataset element.'
meta = self.index_dict[self.keys[idx]]
img = Image.open(meta['path'])
img = self.transforms(img)
return {'img': img, 'caption': meta['report']}
|
class LabelChestXrayDataset(ChestXrayDataset):
'A Chest X-ray dataset that returns class labels.'
def __init__(self, root: str, transforms: Optional[Compose]=None) -> None:
super().__init__(root, transforms)
keys = []
for key in self.keys:
if (self.index_dict[key].get('class_label') is not None):
keys.append(key)
self.keys = keys
def __getitem__(self, idx: int) -> Dict:
'Get dataset element.'
meta = self.index_dict[self.keys[idx]]
img = Image.open(meta['path'])
img = self.transforms(img)
return {'img': img, 'class_label': torch.tensor(meta['class_label']).float()}
|
class CombinedLabelChestXrayDataset(Dataset):
def __init__(self, root: str, transforms: Optional[Compose]=None) -> None:
'Initialize MaCheXDataset'
self.root = root
sub_dataset_roots = ['mimic', 'chexpert']
datasets = [LabelChestXrayDataset(root=os.path.join(root, r), transforms=transforms) for r in sub_dataset_roots]
self.ds = ConcatDataset(datasets)
def __len__(self):
'Return length of the dataset.'
return len(self.ds)
def __getitem__(self, idx: int) -> Dict:
'Get dataset element.'
return self.ds[idx]
|
class Diffusor():
'Class for modelling the sr process.'
def __init__(self, model: nn.Module, schedule: BaseSchedule, device: Optional[torch.device]=None, clip_denoised: bool=True) -> None:
'Initialize Diffusor.'
self.model = model
self.schedule = schedule
self.clip_denoised = clip_denoised
if (device is None):
self.device = schedule.device
else:
self.device = device
@staticmethod
def extract_vals(a: Tensor, t: Tensor, x_shape: Tuple) -> Tensor:
'Extract timestep values from tensor and reshape to target dims.'
batch_size = t.shape[0]
out = a.gather((- 1), t)
return out.reshape(batch_size, *((1,) * (len(x_shape) - 1)))
def q_sample(self, x_start: Tensor, t: Tensor, noise: Optional[Tensor]=None) -> Tensor:
'\n Sample from forward process q.\n\n Given an initial `x_start` and a timestep `t, return perturbed images `x_t`.\n '
if (noise is None):
noise = torch.randn_like(x_start)
sqrt_alphas_cumprod_t = Diffusor.extract_vals(self.schedule.sqrt_alphas_cumprod, t, x_start.shape)
sqrt_one_minus_alphas_cumprod_t = Diffusor.extract_vals(self.schedule.sqrt_one_minus_alphas_cumprod, t, x_start.shape)
return ((sqrt_alphas_cumprod_t * x_start) + (sqrt_one_minus_alphas_cumprod_t * noise))
def predict_start_from_noise(self, x_t: Tensor, t: Tensor, noise: Tensor) -> Tensor:
'Subtract noise from x_t over variance schedule.'
sqrt_recip_alphas_cumprod_t = Diffusor.extract_vals(self.schedule.sqrt_recip_alphas_cumprod, t, x_t.shape)
sqrt_recipm1_alphas_cumprod_t = Diffusor.extract_vals(self.schedule.sqrt_recipm1_alphas_cumprod, t, x_t.shape)
x_rec = ((sqrt_recip_alphas_cumprod_t * x_t) - (sqrt_recipm1_alphas_cumprod_t * noise))
return x_rec
def q_posterior(self, x_start: Tensor, x_t: Tensor, t: Tensor) -> Tuple[(Tensor, Tensor, Tensor)]:
'Compute posterior q.'
posterior_mean_coef1 = Diffusor.extract_vals(self.schedule.post_mean_coef1, t, x_t.shape)
posterior_mean_coef2 = Diffusor.extract_vals(self.schedule.post_mean_coef2, t, x_t.shape)
post_var = Diffusor.extract_vals(self.schedule.post_var, t, x_t.shape)
posterior_log_var_clipped = Diffusor.extract_vals(self.schedule.post_log_var_clipped, t, x_t.shape)
posterior_mean = ((posterior_mean_coef1 * x_start) + (posterior_mean_coef2 * x_t))
return (posterior_mean, post_var, posterior_log_var_clipped)
def p_mean_variance(self, x: Tensor, t: Tensor) -> Tuple[(Tensor, Tensor, Tensor)]:
'Compute mean and variance for reverse process.'
noise_pred = self.model(x, t)
x_recon = self.predict_start_from_noise(x, t=t, noise=noise_pred)
if self.clip_denoised:
x_recon.clamp_((- 1.0), 1.0)
return self.q_posterior(x_start=x_recon, x_t=x, t=t)
@torch.no_grad()
def p_sample(self, x: Tensor, t: Tensor) -> Tensor:
'Sample from reverse process.'
(model_mean, _, model_log_variance) = self.p_mean_variance(x=x, t=t)
noise = torch.randn_like(x)
if (t[0] == 0):
return model_mean
else:
return (model_mean + ((0.5 * model_log_variance).exp() * noise))
@torch.no_grad()
def p_sample_loop(self, shape: Tuple) -> Tensor:
'Initiate generation process.'
batch_size = shape[0]
img = torch.randn(shape, device=self.device)
pbar = tqdm(iterable=reversed(range(0, self.schedule.timesteps)), desc='sampling loop time step', total=self.schedule.timesteps, leave=False)
for i in pbar:
t = torch.full((batch_size,), i, device=self.device, dtype=torch.long)
img = self.p_sample(img, t)
return img
@torch.no_grad()
def p_sample_loop_with_steps(self, shape: Tuple, log_every_t: int) -> Tensor:
'Initiate generation process and return intermediate steps.'
batch_size = shape[0]
img = torch.randn(shape, device=self.device)
result = [img]
pbar = tqdm(iterable=reversed(range(0, self.schedule.timesteps)), desc='sampling loop time step', total=self.schedule.timesteps, leave=False)
for i in pbar:
t = torch.full((batch_size,), i, device=self.device, dtype=torch.long)
img = self.p_sample(img, t)
if (((i % log_every_t) == 0) or (i == (self.schedule.timesteps - 1))):
result.append(img)
return torch.stack(result)
|
class SR3Diffusor(Diffusor):
'Class for modelling the sr process in SR3.'
def p_mean_variance(self, x: Tensor, sr: Tensor, t: Tensor) -> Tuple[(Tensor, Tensor, Tensor)]:
'Compute mean and variance for reverse process.'
x_in = torch.cat([x, sr], dim=1)
noise_pred = self.model(x_in, t)
x_recon = self.predict_start_from_noise(x, t=t, noise=noise_pred)
if self.clip_denoised:
x_recon.clamp_((- 1.0), 1.0)
return self.q_posterior(x_start=x_recon, x_t=x, t=t)
@torch.no_grad()
def p_sample(self, x: Tensor, sr: Tensor, t: Tensor) -> Tensor:
'Sample from reverse process.'
(model_mean, _, model_log_variance) = self.p_mean_variance(x=x, sr=sr, t=t)
noise = torch.randn_like(x)
if (t[0] == 0):
return model_mean
else:
return (model_mean + ((0.5 * model_log_variance).exp() * noise))
@torch.no_grad()
def p_sample_loop(self, sr: Tensor) -> Tensor:
'Initiate generation process.'
batch_size = sr.shape[0]
img = torch.randn(sr.shape, device=self.device)
pbar = tqdm(iterable=reversed(range(0, self.schedule.timesteps)), desc='sampling loop time step', total=self.schedule.timesteps, leave=False)
for i in pbar:
t = torch.full((batch_size,), i, device=self.device, dtype=torch.long)
img = self.p_sample(img, sr, t)
return img
@torch.no_grad()
def p_sample_loop_with_steps(self, sr: Tensor, log_every_t: int) -> Tensor:
'Initiate generation process and return intermediate steps.'
batch_size = sr.shape[0]
img = torch.randn(sr.shape, device=self.device)
result = [img]
pbar = tqdm(iterable=reversed(range(0, self.schedule.timesteps)), desc='sampling loop time step', total=self.schedule.timesteps, leave=False)
for i in pbar:
t = torch.full((batch_size,), i, device=self.device, dtype=torch.long)
img = self.p_sample(img, sr, t)
if (((i % log_every_t) == 0) or (i == (self.schedule.timesteps - 1))):
result.append(img)
return torch.stack(result)
|
class DDIMDiffusor(Diffusor):
'Class for modelling the sr process with DDIM.'
def __init__(self, model: nn.Module, schedule: BaseSchedule, sampling_steps: Optional[int]=100, eta: Optional[float]=0.0, device: Optional[torch.device]=None, clip_denoised: bool=True) -> None:
'Initialize DDIM sampler.'
super().__init__(model, schedule, device, clip_denoised)
self.sampling_steps = sampling_steps
self.eta = eta
self.ddim_timesteps = torch.arange(start=0, end=schedule.timesteps, step=(schedule.timesteps // self.sampling_steps))
self.ddim_alpha_cumprod = self.schedule.alphas_cumprod[self.ddim_timesteps]
self.ddim_alphas_cumprod_prev = torch.cat([self.schedule.alphas_cumprod[0].unsqueeze(0), self.schedule.alphas_cumprod[self.ddim_timesteps[:(- 1)]]])
self.sigmas = (self.eta * torch.sqrt((((1 - self.ddim_alphas_cumprod_prev) / (1 - self.ddim_alpha_cumprod)) * (1 - (self.ddim_alpha_cumprod / self.ddim_alphas_cumprod_prev)))))
self.ddim_sqrt_one_minus_alphas_cumprod = torch.sqrt((1.0 - self.ddim_alpha_cumprod))
@torch.no_grad()
def p_sample(self, x: Tensor, t: Tensor, index: int) -> Tensor:
'Sample from reverse process.'
b = x.shape[0]
a_t = torch.full((b, 1, 1, 1), self.ddim_alpha_cumprod[index], device=self.device)
a_prev = torch.full((b, 1, 1, 1), self.ddim_alphas_cumprod_prev[index], device=self.device)
sigma_t = torch.full((b, 1, 1, 1), self.sigmas[index], device=self.device)
sqrt_one_minus_at = torch.full((b, 1, 1, 1), self.ddim_sqrt_one_minus_alphas_cumprod[index], device=self.device)
e_t = self.model(x, t)
pred_x0 = ((x - (sqrt_one_minus_at * e_t)) / a_t.sqrt())
dir_xt = (((1.0 - a_prev) - (sigma_t ** 2)).sqrt() * e_t)
noise = (sigma_t * torch.randn_like(x))
x_prev = (((a_prev.sqrt() * pred_x0) + dir_xt) + noise)
return x_prev
@torch.no_grad()
def p_sample_loop(self, shape: Tuple) -> Tensor:
'Initiate generation process.'
batch_size = shape[0]
img = torch.randn(shape, device=self.device)
pbar = tqdm(iterable=torch.flip(self.ddim_timesteps, dims=(0,)), desc='DDIM sampling loop time step', total=len(self.ddim_timesteps), leave=False)
for (i, step) in enumerate(pbar):
index = ((len(self.ddim_timesteps) - i) - 1)
t = torch.full((batch_size,), step, device=self.device, dtype=torch.long)
img = self.p_sample(img, t, index)
return img
@torch.no_grad()
def p_sample_loop_with_steps(self, shape: Tuple, log_every_t: int) -> Tensor:
'Initiate generation process and return intermediate steps.'
batch_size = shape[0]
img = torch.randn(shape, device=self.device)
result = [img]
pbar = tqdm(iterable=torch.flip(self.ddim_timesteps, dims=(0,)), desc='DDIM sampling loop time step', total=len(self.ddim_timesteps), leave=False)
for (i, step) in enumerate(pbar):
index = ((len(self.ddim_timesteps) - i) - 1)
t = torch.full((batch_size,), step, device=self.device, dtype=torch.long)
img = self.p_sample(img, t, index)
if (((i % log_every_t) == 0) or (i == (self.schedule.timesteps - 1))):
result.append(img)
return torch.stack(result)
|
class SR3DDIMDiffusor(DDIMDiffusor):
'Class for modelling the sr process with DDIM for super resolution.'
@torch.no_grad()
def p_sample(self, x: Tensor, sr: Tensor, t: Tensor, index: int) -> Tensor:
'Sample from reverse process.'
b = x.shape[0]
a_t = torch.full((b, 1, 1, 1), self.ddim_alpha_cumprod[index], device=self.device)
a_prev = torch.full((b, 1, 1, 1), self.ddim_alphas_cumprod_prev[index], device=self.device)
sigma_t = torch.full((b, 1, 1, 1), self.sigmas[index], device=self.device)
sqrt_one_minus_at = torch.full((b, 1, 1, 1), self.ddim_sqrt_one_minus_alphas_cumprod[index], device=self.device)
x_in = torch.cat([x, sr], dim=1)
e_t = self.model(x_in, t)
pred_x0 = ((x - (sqrt_one_minus_at * e_t)) / a_t.sqrt())
dir_xt = (((1.0 - a_prev) - (sigma_t ** 2)).sqrt() * e_t)
noise = (sigma_t * torch.randn_like(x))
x_prev = (((a_prev.sqrt() * pred_x0) + dir_xt) + noise)
return x_prev
@torch.no_grad()
def p_sample_loop(self, sr: Tensor) -> Tensor:
'Initiate generation process.'
batch_size = sr.shape[0]
img = torch.randn(sr.shape, device=self.device)
pbar = tqdm(iterable=torch.flip(self.ddim_timesteps, dims=(0,)), desc='DDIM sampling loop time step', total=len(self.ddim_timesteps), leave=False)
for (i, step) in enumerate(pbar):
index = ((len(self.ddim_timesteps) - i) - 1)
t = torch.full((batch_size,), step, device=self.device, dtype=torch.long)
img = self.p_sample(img, sr, t, index)
return img
@torch.no_grad()
def p_sample_loop_with_steps(self, sr: Tensor, log_every_t: int) -> Tensor:
'Initiate generation process and return intermediate steps.'
batch_size = sr.shape[0]
img = torch.randn(sr.shape, device=self.device)
result = [img]
pbar = tqdm(iterable=torch.flip(self.ddim_timesteps, dims=(0,)), desc='DDIM sampling loop time step', total=len(self.ddim_timesteps), leave=False)
for (i, step) in enumerate(pbar):
index = ((len(self.ddim_timesteps) - i) - 1)
t = torch.full((batch_size,), step, device=self.device, dtype=torch.long)
img = self.p_sample(img, sr, t, index)
if (((i % log_every_t) == 0) or (i == (self.schedule.timesteps - 1))):
result.append(img)
return torch.stack(result)
|
class CheffSRModel():
def __init__(self, model_path: str, device: Union[(str, int, torch.device)]='cuda') -> None:
self.device = device
self.model = Unet(dim=16, channels=2, out_dim=1, dim_mults=(1, 2, 4, 8, 16, 32, 32, 32))
state_dict = torch.load(model_path, map_location='cpu')
self.model.load_state_dict(state_dict['model'])
self.model.to(self.device)
self.model.eval()
self.schedule = ScheduleFactory.get_schedule(name='cosine', timesteps=2000, device=self.device)
def sample_directory(self, source_dir: str, target_dir: str, batch_size: int=1, method: str='ddim', sampling_steps: int=100, eta: float=0.0) -> None:
ds = DirectoryDataset(source_dir)
loader = DataLoader(ds, batch_size=batch_size, pin_memory=True)
os.makedirs(target_dir, exist_ok=True)
for (f_names, imgs) in loader:
imgs_sr = self.sample(imgs, method, sampling_steps, eta)
for (f_name, img_sr) in zip(f_names, imgs_sr):
path = os.path.join(target_dir, f_name)
save_image(img_sr, path)
def sample_path(self, path: str, method: str='ddim', sampling_steps: int=100, eta: float=0.0) -> Tensor:
img = Image.open(path)
img = to_tensor(to_grayscale(img)).unsqueeze(0)
return self.sample(img, method, sampling_steps, eta)
@torch.no_grad()
def sample(self, img: Tensor, method: str='ddim', sampling_steps: int=100, eta: float=0.0) -> Tensor:
img = img.to(self.device)
img = ((img * 2) - 1)
img = resize(img, [1024, 1024], InterpolationMode.BICUBIC)
if (method == 'ddim'):
diffusor = SR3DDIMDiffusor(model=self.model, schedule=self.schedule, sampling_steps=sampling_steps, eta=eta)
else:
diffusor = SR3Diffusor(model=self.model, schedule=self.schedule)
img_sr = diffusor.p_sample_loop(sr=img)
img_sr.clamp_((- 1), 1)
img_sr = ((img_sr + 1) / 2)
return img_sr
|
class DirectoryDataset(Dataset):
def __init__(self, root: str) -> None:
self.root = root
self.files = os.listdir(root)
def __len__(self):
return len(self.files)
def __getitem__(self, idx: int) -> Tuple[(str, Tensor)]:
fp = os.path.join(self.root, self.files[idx])
img = Image.open(fp)
img = to_tensor(to_grayscale(img))
return (self.files[idx], img)
|
class BaseSchedule(ABC):
'Base class for deriving schedules.'
def __init__(self, timesteps: int, device: Optional[torch.device]=None, *args, **kwargs) -> None:
'Initialize BaseSchedule.'
self.timesteps = timesteps
if (device is None):
self.device = torch.device('cpu')
else:
self.device = device
self.betas = self._get_betas(timesteps).to(device)
self.alphas = (1.0 - self.betas)
self.alphas_cumprod = torch.cumprod(self.alphas, dim=0)
self.alphas_cumprod_prev = F.pad(self.alphas_cumprod[:(- 1)], (1, 0), value=1.0)
self.sqrt_recip_alphas = torch.sqrt((1.0 / self.alphas))
self.sqrt_alphas_cumprod = torch.sqrt(self.alphas_cumprod)
self.sqrt_one_minus_alphas_cumprod = torch.sqrt((1.0 - self.alphas_cumprod))
self.log_one_minus_alphas_cumprod = torch.log((1.0 - self.alphas_cumprod))
self.sqrt_recip_alphas_cumprod = torch.sqrt((1.0 / self.alphas_cumprod))
self.sqrt_recipm1_alphas_cumprod = torch.sqrt(((1.0 / self.alphas_cumprod) - 1))
self.post_var = ((self.betas * (1.0 - self.alphas_cumprod_prev)) / (1.0 - self.alphas_cumprod))
self.post_log_var_clipped = torch.log(torch.maximum(self.post_var, torch.tensor(1e-20)))
self.post_mean_coef1 = ((self.betas * torch.sqrt(self.alphas_cumprod_prev)) / (1.0 - self.alphas_cumprod))
self.post_mean_coef2 = (((1.0 - self.alphas_cumprod_prev) * torch.sqrt(self.alphas)) / (1.0 - self.alphas_cumprod))
@abstractmethod
def _get_betas(self, timesteps: int) -> Tensor:
'Get betas.'
pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.