code stringlengths 17 6.64M |
|---|
def parse_col(toks, start_idx, tables_with_alias, schema, default_tables=None):
'\n :returns next idx, column id\n '
tok = toks[start_idx]
if (tok == '*'):
return ((start_idx + 1), schema.idMap[tok])
if ('.' in tok):
(alias, col) = tok.split('.')
key = ((tables_with_alias[alias] + '.') + col)
return ((start_idx + 1), schema.idMap[key])
assert ((default_tables is not None) and (len(default_tables) > 0)), 'Default tables should not be None or empty'
for alias in default_tables:
table = tables_with_alias[alias]
if (tok in schema.schema[table]):
key = ((table + '.') + tok)
return ((start_idx + 1), schema.idMap[key])
assert False, 'Error col: {}'.format(tok)
|
def parse_col_unit(toks, start_idx, tables_with_alias, schema, default_tables=None):
'\n :returns next idx, (agg_op id, col_id)\n '
idx = start_idx
len_ = len(toks)
isBlock = False
isDistinct = False
if (toks[idx] == '('):
isBlock = True
idx += 1
if (toks[idx] in AGG_OPS):
agg_id = AGG_OPS.index(toks[idx])
idx += 1
assert ((idx < len_) and (toks[idx] == '('))
idx += 1
if (toks[idx] == 'distinct'):
idx += 1
isDistinct = True
(idx, col_id) = parse_col(toks, idx, tables_with_alias, schema, default_tables)
assert ((idx < len_) and (toks[idx] == ')'))
idx += 1
return (idx, (agg_id, col_id, isDistinct))
if (toks[idx] == 'distinct'):
idx += 1
isDistinct = True
agg_id = AGG_OPS.index('none')
(idx, col_id) = parse_col(toks, idx, tables_with_alias, schema, default_tables)
if isBlock:
assert (toks[idx] == ')')
idx += 1
return (idx, (agg_id, col_id, isDistinct))
|
def parse_val_unit(toks, start_idx, tables_with_alias, schema, default_tables=None):
idx = start_idx
len_ = len(toks)
isBlock = False
if (toks[idx] == '('):
isBlock = True
idx += 1
col_unit1 = None
col_unit2 = None
unit_op = UNIT_OPS.index('none')
(idx, col_unit1) = parse_col_unit(toks, idx, tables_with_alias, schema, default_tables)
if ((idx < len_) and (toks[idx] in UNIT_OPS)):
unit_op = UNIT_OPS.index(toks[idx])
idx += 1
(idx, col_unit2) = parse_col_unit(toks, idx, tables_with_alias, schema, default_tables)
if isBlock:
assert (toks[idx] == ')')
idx += 1
return (idx, (unit_op, col_unit1, col_unit2))
|
def parse_table_unit(toks, start_idx, tables_with_alias, schema):
'\n :returns next idx, table id, table name\n '
idx = start_idx
len_ = len(toks)
key = tables_with_alias[toks[idx]]
if (((idx + 1) < len_) and (toks[(idx + 1)] == 'as')):
idx += 3
else:
idx += 1
return (idx, schema.idMap[key], key)
|
def parse_value(toks, start_idx, tables_with_alias, schema, default_tables=None):
idx = start_idx
len_ = len(toks)
isBlock = False
if (toks[idx] == '('):
isBlock = True
idx += 1
if (toks[idx] == 'select'):
(idx, val) = parse_sql(toks, idx, tables_with_alias, schema)
elif ('"' in toks[idx]):
val = toks[idx]
idx += 1
else:
try:
val = float(toks[idx])
idx += 1
except:
end_idx = idx
while ((end_idx < len_) and (toks[end_idx] != ',') and (toks[end_idx] != ')') and (toks[end_idx] != 'and') and (toks[end_idx] not in CLAUSE_KEYWORDS) and (toks[end_idx] not in JOIN_KEYWORDS)):
end_idx += 1
(idx, val) = parse_col_unit(toks[start_idx:end_idx], 0, tables_with_alias, schema, default_tables)
idx = end_idx
if isBlock:
assert (toks[idx] == ')')
idx += 1
return (idx, val)
|
def parse_condition(toks, start_idx, tables_with_alias, schema, default_tables=None):
idx = start_idx
len_ = len(toks)
conds = []
while (idx < len_):
(idx, val_unit) = parse_val_unit(toks, idx, tables_with_alias, schema, default_tables)
not_op = False
if (toks[idx] == 'not'):
not_op = True
idx += 1
assert ((idx < len_) and (toks[idx] in WHERE_OPS)), 'Error condition: idx: {}, tok: {}'.format(idx, toks[idx])
op_id = WHERE_OPS.index(toks[idx])
idx += 1
val1 = val2 = None
if (op_id == WHERE_OPS.index('between')):
(idx, val1) = parse_value(toks, idx, tables_with_alias, schema, default_tables)
assert (toks[idx] == 'and')
idx += 1
(idx, val2) = parse_value(toks, idx, tables_with_alias, schema, default_tables)
else:
(idx, val1) = parse_value(toks, idx, tables_with_alias, schema, default_tables)
val2 = None
conds.append((not_op, op_id, val_unit, val1, val2))
if ((idx < len_) and ((toks[idx] in CLAUSE_KEYWORDS) or (toks[idx] in (')', ';')) or (toks[idx] in JOIN_KEYWORDS))):
break
if ((idx < len_) and (toks[idx] in COND_OPS)):
conds.append(toks[idx])
idx += 1
return (idx, conds)
|
def parse_select(toks, start_idx, tables_with_alias, schema, default_tables=None):
idx = start_idx
len_ = len(toks)
assert (toks[idx] == 'select'), "'select' not found"
idx += 1
isDistinct = False
if ((idx < len_) and (toks[idx] == 'distinct')):
idx += 1
isDistinct = True
val_units = []
while ((idx < len_) and (toks[idx] not in CLAUSE_KEYWORDS)):
agg_id = AGG_OPS.index('none')
if (toks[idx] in AGG_OPS):
agg_id = AGG_OPS.index(toks[idx])
idx += 1
(idx, val_unit) = parse_val_unit(toks, idx, tables_with_alias, schema, default_tables)
val_units.append((agg_id, val_unit))
if ((idx < len_) and (toks[idx] == ',')):
idx += 1
return (idx, (isDistinct, val_units))
|
def parse_from(toks, start_idx, tables_with_alias, schema):
'\n Assume in the from clause, all table units are combined with join\n '
assert ('from' in toks[start_idx:]), "'from' not found"
len_ = len(toks)
idx = (toks.index('from', start_idx) + 1)
default_tables = []
table_units = []
conds = []
while (idx < len_):
isBlock = False
if (toks[idx] == '('):
isBlock = True
idx += 1
if (toks[idx] == 'select'):
(idx, sql) = parse_sql(toks, idx, tables_with_alias, schema)
table_units.append((TABLE_TYPE['sql'], sql))
else:
if ((idx < len_) and (toks[idx] == 'join')):
idx += 1
(idx, table_unit, table_name) = parse_table_unit(toks, idx, tables_with_alias, schema)
table_units.append((TABLE_TYPE['table_unit'], table_unit))
default_tables.append(table_name)
if ((idx < len_) and (toks[idx] == 'on')):
idx += 1
(idx, this_conds) = parse_condition(toks, idx, tables_with_alias, schema, default_tables)
if (len(conds) > 0):
conds.append('and')
conds.extend(this_conds)
if isBlock:
assert (toks[idx] == ')')
idx += 1
if ((idx < len_) and ((toks[idx] in CLAUSE_KEYWORDS) or (toks[idx] in (')', ';')))):
break
return (idx, table_units, conds, default_tables)
|
def parse_where(toks, start_idx, tables_with_alias, schema, default_tables):
idx = start_idx
len_ = len(toks)
if ((idx >= len_) or (toks[idx] != 'where')):
return (idx, [])
idx += 1
(idx, conds) = parse_condition(toks, idx, tables_with_alias, schema, default_tables)
return (idx, conds)
|
def parse_group_by(toks, start_idx, tables_with_alias, schema, default_tables):
idx = start_idx
len_ = len(toks)
col_units = []
if ((idx >= len_) or (toks[idx] != 'group')):
return (idx, col_units)
idx += 1
assert (toks[idx] == 'by')
idx += 1
while ((idx < len_) and (not ((toks[idx] in CLAUSE_KEYWORDS) or (toks[idx] in (')', ';'))))):
(idx, col_unit) = parse_col_unit(toks, idx, tables_with_alias, schema, default_tables)
col_units.append(col_unit)
if ((idx < len_) and (toks[idx] == ',')):
idx += 1
else:
break
return (idx, col_units)
|
def parse_order_by(toks, start_idx, tables_with_alias, schema, default_tables):
idx = start_idx
len_ = len(toks)
val_units = []
order_type = 'asc'
if ((idx >= len_) or (toks[idx] != 'order')):
return (idx, val_units)
idx += 1
assert (toks[idx] == 'by')
idx += 1
while ((idx < len_) and (not ((toks[idx] in CLAUSE_KEYWORDS) or (toks[idx] in (')', ';'))))):
(idx, val_unit) = parse_val_unit(toks, idx, tables_with_alias, schema, default_tables)
val_units.append(val_unit)
if ((idx < len_) and (toks[idx] in ORDER_OPS)):
order_type = toks[idx]
idx += 1
if ((idx < len_) and (toks[idx] == ',')):
idx += 1
else:
break
return (idx, (order_type, val_units))
|
def parse_having(toks, start_idx, tables_with_alias, schema, default_tables):
idx = start_idx
len_ = len(toks)
if ((idx >= len_) or (toks[idx] != 'having')):
return (idx, [])
idx += 1
(idx, conds) = parse_condition(toks, idx, tables_with_alias, schema, default_tables)
return (idx, conds)
|
def parse_limit(toks, start_idx):
idx = start_idx
len_ = len(toks)
if ((idx < len_) and (toks[idx] == 'limit')):
idx += 2
if (type(toks[(idx - 1)]) != int):
return (idx, 1)
return (idx, int(toks[(idx - 1)]))
return (idx, None)
|
def parse_sql(toks, start_idx, tables_with_alias, schema):
isBlock = False
len_ = len(toks)
idx = start_idx
sql = {}
if (toks[idx] == '('):
isBlock = True
idx += 1
(from_end_idx, table_units, conds, default_tables) = parse_from(toks, start_idx, tables_with_alias, schema)
sql['from'] = {'table_units': table_units, 'conds': conds}
(_, select_col_units) = parse_select(toks, idx, tables_with_alias, schema, default_tables)
idx = from_end_idx
sql['select'] = select_col_units
(idx, where_conds) = parse_where(toks, idx, tables_with_alias, schema, default_tables)
sql['where'] = where_conds
(idx, group_col_units) = parse_group_by(toks, idx, tables_with_alias, schema, default_tables)
sql['groupBy'] = group_col_units
(idx, having_conds) = parse_having(toks, idx, tables_with_alias, schema, default_tables)
sql['having'] = having_conds
(idx, order_col_units) = parse_order_by(toks, idx, tables_with_alias, schema, default_tables)
sql['orderBy'] = order_col_units
(idx, limit_val) = parse_limit(toks, idx)
sql['limit'] = limit_val
idx = skip_semicolon(toks, idx)
if isBlock:
assert (toks[idx] == ')')
idx += 1
idx = skip_semicolon(toks, idx)
for op in SQL_OPS:
sql[op] = None
if ((idx < len_) and (toks[idx] in SQL_OPS)):
sql_op = toks[idx]
idx += 1
(idx, IUE_sql) = parse_sql(toks, idx, tables_with_alias, schema)
sql[sql_op] = IUE_sql
return (idx, sql)
|
def load_data(fpath):
with open(fpath) as f:
data = json.load(f)
return data
|
def get_sql(schema, query):
toks = tokenize(query)
tables_with_alias = get_tables_with_alias(schema.schema, toks)
(_, sql) = parse_sql(toks, 0, tables_with_alias, schema)
return sql
|
def skip_semicolon(toks, start_idx):
idx = start_idx
while ((idx < len(toks)) and (toks[idx] == ';')):
idx += 1
return idx
|
def clean_str_month(o):
if isinstance(o, int):
o = str(o)
for (month, n) in month2num.items():
o = o.replace(month, n)
return o
|
def date_parser(o):
default_result = {'value': None, 'template': 'N/A'}
if (o is None):
return default_result
o = clean_str_month(o)
for t in templates:
try:
d = datetime.datetime.strptime(o, t)
return {'value': d, 'template': t}
except ValueError:
pass
return default_result
|
class cv_colors(Enum):
RED = (0, 0, 255)
GREEN = (0, 255, 0)
BLUE = (255, 0, 0)
PURPLE = (247, 44, 200)
ORANGE = (44, 162, 247)
MINT = (239, 255, 66)
YELLOW = (2, 255, 250)
|
def constraint_to_color(constraint_idx):
return {0: cv_colors.PURPLE.value, 1: cv_colors.ORANGE.value, 2: cv_colors.MINT.value, 3: cv_colors.YELLOW.value}[constraint_idx]
|
def create_2d_box(box_2d):
corner1_2d = box_2d[0]
corner2_2d = box_2d[1]
pt1 = corner1_2d
pt2 = (corner1_2d[0], corner2_2d[1])
pt3 = corner2_2d
pt4 = (corner2_2d[0], corner1_2d[1])
return (pt1, pt2, pt3, pt4)
|
def project_3d_pt(pt, cam_to_img, calib_file=None):
if (calib_file is not None):
cam_to_img = get_calibration_cam_to_image(calib_file)
R0_rect = get_R0(calib_file)
Tr_velo_to_cam = get_tr_to_velo(calib_file)
point = np.array(pt)
point = np.append(point, 1)
point = np.dot(cam_to_img, point)
point = (point[:2] / point[2])
point = point.astype(np.int16)
return point
|
def plot_3d_pts(img, pts, center, calib_file=None, cam_to_img=None, relative=False, constraint_idx=None):
if (calib_file is not None):
cam_to_img = get_calibration_cam_to_image(calib_file)
for pt in pts:
if relative:
pt = [(i + center[j]) for (j, i) in enumerate(pt)]
point = project_3d_pt(pt, cam_to_img)
color = cv_colors.RED.value
if (constraint_idx is not None):
color = constraint_to_color(constraint_idx)
cv2.circle(img, (point[0], point[1]), 3, color, thickness=(- 1))
|
def plot_3d_box(img, cam_to_img, ry, dimension, center):
R = rotation_matrix(ry)
corners = create_corners(dimension, location=center, R=R)
box_3d = []
for corner in corners:
point = project_3d_pt(corner, cam_to_img)
box_3d.append(point)
cv2.line(img, (box_3d[0][0], box_3d[0][1]), (box_3d[2][0], box_3d[2][1]), cv_colors.GREEN.value, 2)
cv2.line(img, (box_3d[4][0], box_3d[4][1]), (box_3d[6][0], box_3d[6][1]), cv_colors.GREEN.value, 2)
cv2.line(img, (box_3d[0][0], box_3d[0][1]), (box_3d[4][0], box_3d[4][1]), cv_colors.GREEN.value, 2)
cv2.line(img, (box_3d[2][0], box_3d[2][1]), (box_3d[6][0], box_3d[6][1]), cv_colors.GREEN.value, 2)
cv2.line(img, (box_3d[1][0], box_3d[1][1]), (box_3d[3][0], box_3d[3][1]), cv_colors.GREEN.value, 2)
cv2.line(img, (box_3d[1][0], box_3d[1][1]), (box_3d[5][0], box_3d[5][1]), cv_colors.GREEN.value, 2)
cv2.line(img, (box_3d[7][0], box_3d[7][1]), (box_3d[3][0], box_3d[3][1]), cv_colors.GREEN.value, 2)
cv2.line(img, (box_3d[7][0], box_3d[7][1]), (box_3d[5][0], box_3d[5][1]), cv_colors.GREEN.value, 2)
for i in range(0, 7, 2):
cv2.line(img, (box_3d[i][0], box_3d[i][1]), (box_3d[(i + 1)][0], box_3d[(i + 1)][1]), cv_colors.GREEN.value, 2)
frame = np.zeros_like(img, np.uint8)
cv2.fillPoly(frame, np.array([[[box_3d[0]], [box_3d[1]], [box_3d[3]], [box_3d[2]]]], dtype=np.int32), cv_colors.BLUE.value)
alpha = 0.5
mask = frame.astype(bool)
img[mask] = cv2.addWeighted(img, alpha, frame, (1 - alpha), 0)[mask]
|
def plot_2d_box(img, box_2d):
(pt1, pt2, pt3, pt4) = create_2d_box(box_2d)
cv2.line(img, pt1, pt2, cv_colors.BLUE.value, 2)
cv2.line(img, pt2, pt3, cv_colors.BLUE.value, 2)
cv2.line(img, pt3, pt4, cv_colors.BLUE.value, 2)
cv2.line(img, pt4, pt1, cv_colors.BLUE.value, 2)
|
@app.route('/')
def start_page():
print('Start')
return render_template('index.html')
|
@app.route('/upload', methods=['POST'])
def upload_file():
FILENAME = {}
image = request.files['image']
image.save('static/image_eval.png')
if ('image' in request.files):
detect = True
detect3d(reg_weights='weights/epoch_10.pkl', model_select='resnet', source='static', calib_file='eval/camera_cal/calib_cam_to_cam.txt', save_result=True, show_result=False, output_path='static/')
with open('static/000.png', 'rb') as image_file:
img_encode = base64.b64encode(image_file.read())
to_send = ('data:image/png;base64, ' + str(img_encode, 'utf-8'))
else:
detect = False
return render_template('index.html', init=True, detect=detect, image_to_show=to_send)
|
def parse_opt():
parser = argparse.ArgumentParser()
parser.add_argument('--weights', nargs='+', type=str, default=(ROOT / 'yolov5s.pt'), help='model path(s)')
parser.add_argument('--source', type=str, default=(ROOT / 'eval/image_2'), help='file/dir/URL/glob, 0 for webcam')
parser.add_argument('--data', type=str, default=(ROOT / 'data/coco128.yaml'), help='(optional) dataset.yaml path')
parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w')
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--classes', default=[0, 2, 3, 5], nargs='+', type=int, help='filter by class: --classes 0, or --classes 0 2 3')
parser.add_argument('--reg_weights', type=str, default='weights/epoch_10.pkl', help='Regressor model weights')
parser.add_argument('--model_select', type=str, default='resnet', help='Regressor model list: resnet, vgg, eff')
parser.add_argument('--calib_file', type=str, default=(ROOT / 'eval/camera_cal/calib_cam_to_cam.txt'), help='Calibration file or path')
parser.add_argument('--show_result', action='store_true', help='Show Results with imshow')
parser.add_argument('--save_result', action='store_true', help='Save result')
parser.add_argument('--output_path', type=str, default=(ROOT / 'output'), help='Save output pat')
opt = parser.parse_args()
opt.imgsz *= (2 if (len(opt.imgsz) == 1) else 1)
return opt
|
class CrossConv(nn.Module):
def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False):
super().__init__()
c_ = int((c2 * e))
self.cv1 = Conv(c1, c_, (1, k), (1, s))
self.cv2 = Conv(c_, c2, (k, 1), (s, 1), g=g)
self.add = (shortcut and (c1 == c2))
def forward(self, x):
return ((x + self.cv2(self.cv1(x))) if self.add else self.cv2(self.cv1(x)))
|
class Sum(nn.Module):
def __init__(self, n, weight=False):
super().__init__()
self.weight = weight
self.iter = range((n - 1))
if weight:
self.w = nn.Parameter(((- torch.arange(1.0, n)) / 2), requires_grad=True)
def forward(self, x):
y = x[0]
if self.weight:
w = (torch.sigmoid(self.w) * 2)
for i in self.iter:
y = (y + (x[(i + 1)] * w[i]))
else:
for i in self.iter:
y = (y + x[(i + 1)])
return y
|
class MixConv2d(nn.Module):
def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True):
super().__init__()
n = len(k)
if equal_ch:
i = torch.linspace(0, (n - 1e-06), c2).floor()
c_ = [(i == g).sum() for g in range(n)]
else:
b = ([c2] + ([0] * n))
a = np.eye((n + 1), n, k=(- 1))
a -= np.roll(a, 1, axis=1)
a *= (np.array(k) ** 2)
a[0] = 1
c_ = np.linalg.lstsq(a, b, rcond=None)[0].round()
self.m = nn.ModuleList([nn.Conv2d(c1, int(c_), k, s, (k // 2), groups=math.gcd(c1, int(c_)), bias=False) for (k, c_) in zip(k, c_)])
self.bn = nn.BatchNorm2d(c2)
self.act = nn.SiLU()
def forward(self, x):
return self.act(self.bn(torch.cat([m(x) for m in self.m], 1)))
|
class Ensemble(nn.ModuleList):
def __init__(self):
super().__init__()
def forward(self, x, augment=False, profile=False, visualize=False):
y = []
for module in self:
y.append(module(x, augment, profile, visualize)[0])
y = torch.cat(y, 1)
return (y, None)
|
def attempt_load(weights, map_location=None, inplace=True, fuse=True):
from models.yolo import Detect, Model
model = Ensemble()
for w in (weights if isinstance(weights, list) else [weights]):
ckpt = torch.load(attempt_download(w), map_location=map_location)
if fuse:
model.append(ckpt[('ema' if ckpt.get('ema') else 'model')].float().fuse().eval())
else:
model.append(ckpt[('ema' if ckpt.get('ema') else 'model')].float().eval())
for m in model.modules():
if (type(m) in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU, Detect, Model]):
m.inplace = inplace
if (type(m) is Detect):
if (not isinstance(m.anchor_grid, list)):
delattr(m, 'anchor_grid')
setattr(m, 'anchor_grid', ([torch.zeros(1)] * m.nl))
elif (type(m) is Conv):
m._non_persistent_buffers_set = set()
if (len(model) == 1):
return model[(- 1)]
else:
print(f'''Ensemble created with {weights}
''')
for k in ['names']:
setattr(model, k, getattr(model[(- 1)], k))
model.stride = model[torch.argmax(torch.tensor([m.stride.max() for m in model])).int()].stride
return model
|
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
return json.JSONEncoder.default(self, obj)
|
class ClassAverages():
def __init__(self, classes=[]):
self.dimension_map = {}
self.filename = (os.path.abspath(os.path.dirname(__file__)) + '/class_averages.txt')
if (len(classes) == 0):
self.load_items_from_file()
for detection_class in classes:
class_ = detection_class.lower()
if (class_ in self.dimension_map.keys()):
continue
self.dimension_map[class_] = {}
self.dimension_map[class_]['count'] = 0
self.dimension_map[class_]['total'] = np.zeros(3, dtype=np.double)
def add_item(self, class_, dimension):
class_ = class_.lower()
self.dimension_map[class_]['count'] += 1
self.dimension_map[class_]['total'] += dimension
def get_item(self, class_):
class_ = class_.lower()
return (self.dimension_map[class_]['total'] / self.dimension_map[class_]['count'])
def dump_to_file(self):
f = open(self.filename, 'w')
f.write(json.dumps(self.dimension_map, cls=NumpyEncoder))
f.close()
def load_items_from_file(self):
f = open(self.filename, 'r')
dimension_map = json.load(f)
for class_ in dimension_map:
dimension_map[class_]['total'] = np.asarray(dimension_map[class_]['total'])
self.dimension_map = dimension_map
def recognized_class(self, class_):
return (class_.lower() in self.dimension_map)
|
def train(epochs=10, batch_size=32, alpha=0.6, w=0.4, num_workers=2, lr=0.0001, save_epoch=10, train_path=(ROOT / 'dataset/KITTI/training'), model_path=(ROOT / 'weights/'), select_model='resnet18', api_key=''):
train_path = str(train_path)
model_path = str(model_path)
print('[INFO] Loading dataset...')
dataset = Dataset(train_path)
hyper_params = {'epochs': epochs, 'batch_size': batch_size, 'w': w, 'num_workers': num_workers, 'lr': lr, 'shuffle': True}
experiment = Experiment(api_key, project_name='YOLO3D')
experiment.log_parameters(hyper_params)
data_gen = data.DataLoader(dataset, batch_size=hyper_params['batch_size'], shuffle=hyper_params['shuffle'], num_workers=hyper_params['num_workers'])
base_model = model_factory[select_model]
model = regressor_factory[select_model](model=base_model).cuda()
opt_SGD = torch.optim.SGD(model.parameters(), lr=hyper_params['lr'], momentum=0.9)
conf_loss_func = nn.CrossEntropyLoss().cuda()
dim_loss_func = nn.MSELoss().cuda()
orient_loss_func = OrientationLoss
latest_model = None
first_epoch = 1
if (not os.path.isdir(model_path)):
os.mkdir(model_path)
else:
try:
latest_model = [x for x in sorted(os.listdir(model_path)) if x.endswith('.pkl')][(- 1)]
except:
pass
if (latest_model is not None):
checkpoint = torch.load((model_path + latest_model))
model.load_state_dict(checkpoint['model_state_dict'])
opt_SGD.load_state_dict(checkpoint['optimizer_state_dict'])
first_epoch = checkpoint['epoch']
loss = checkpoint['loss']
print(f'[INFO] Using previous model {latest_model} at {first_epoch} epochs')
print('[INFO] Resuming training...')
total_num_batches = int((len(dataset) / hyper_params['batch_size']))
with experiment.train():
for epoch in range(first_epoch, (int(hyper_params['epochs']) + 1)):
curr_batch = 0
passes = 0
with tqdm(data_gen, unit='batch') as tepoch:
for (local_batch, local_labels) in tepoch:
tepoch.set_description(f'Epoch {epoch}')
truth_orient = local_labels['Orientation'].float().cuda()
truth_conf = local_labels['Confidence'].float().cuda()
truth_dim = local_labels['Dimensions'].float().cuda()
local_batch = local_batch.float().cuda()
[orient, conf, dim] = model(local_batch)
orient_loss = orient_loss_func(orient, truth_orient, truth_conf)
dim_loss = dim_loss_func(dim, truth_dim)
truth_conf = torch.max(truth_conf, dim=1)[1]
conf_loss = conf_loss_func(conf, truth_conf)
loss_theta = (conf_loss + (w * orient_loss))
loss = ((alpha * dim_loss) + loss_theta)
writer.add_scalar('Loss/train', loss, epoch)
experiment.log_metric('Loss/train', loss, epoch=epoch)
opt_SGD.zero_grad()
loss.backward()
opt_SGD.step()
tepoch.set_postfix(loss=loss.item())
if ((epoch % save_epoch) == 0):
model_name = os.path.join(model_path, f'{select_model}_epoch_{epoch}.pkl')
torch.save({'epoch': epoch, 'model_state_dict': model.state_dict(), 'optimizer_state_dict': opt_SGD.state_dict(), 'loss': loss}, model_name)
print(f'[INFO] Saving weights as {model_name}')
writer.flush()
writer.close()
|
def parse_opt():
parser = argparse.ArgumentParser(description='Regressor Model Training')
parser.add_argument('--epochs', type=int, default=10, help='Number of epochs')
parser.add_argument('--batch_size', type=int, default=32, help='Number of batch size')
parser.add_argument('--alpha', type=float, default=0.6, help='Aplha default=0.6 DONT CHANGE')
parser.add_argument('--w', type=float, default=0.4, help='w DONT CHANGE')
parser.add_argument('--num_workers', type=int, default=2, help='Total # workers, for colab & kaggle use 2')
parser.add_argument('--lr', type=float, default=0.0001, help='Learning rate')
parser.add_argument('--save_epoch', type=int, default=10, help='Save model every # epochs')
parser.add_argument('--train_path', type=str, default=(ROOT / 'dataset/KITTI/training'), help='Training path KITTI')
parser.add_argument('--model_path', type=str, default=(ROOT / 'weights'), help='Weights path, for load and save model')
parser.add_argument('--select_model', type=str, default='resnet18', help='Model selection: {resnet18, vgg11}')
parser.add_argument('--api_key', type=str, default='', help='API key for comet.ml')
opt = parser.parse_args()
return opt
|
def main(opt):
train(**vars(opt))
|
def train(train_path=(ROOT / 'dataset/KITTI/training'), checkpoint_path=(ROOT / 'weights/checkpoints'), model_select='resnet18', epochs=10, batch_size=32, num_workers=2, gpu=1, val_split=0.1, model_path=(ROOT / 'weights/'), api_key=''):
comet_logger = CometLogger(api_key=api_key, project_name='YOLO3D')
checkpoint_callback = ModelCheckpoint(monitor='val_loss', dirpath=checkpoint_path, filename='model_{epoch:02d}_{val_loss:.2f}', save_top_k=3, mode='min')
trainer = Trainer(logger=comet_logger, callbacks=[checkpoint_callback], gpus=gpu, min_epochs=1, max_epochs=epochs)
model = Model(model_select=model_select)
try:
latest_model = [x for x in sorted(os.listdir(model_path)) if x.endswith('.pkl')][(- 1)]
except:
latest_model = None
if (latest_model is not None):
model.load_from_checkpoint(latest_model)
print(f'[INFO] Use previous model {latest_model}')
dataset = KITTIDataModule(dataset_path=train_path, batch_size=batch_size, num_workers=num_workers, val_split=val_split)
trainer.fit(model=model, datamodule=dataset)
|
def parse_opt():
parser = argparse.ArgumentParser(description='Regressor Model Training')
parser.add_argument('--train_path', type=str, default=(ROOT / 'dataset_dummy/training'), help='Training path KITTI')
parser.add_argument('--checkpoint_path', type=str, default=(ROOT / 'weights/checkpoint'), help='Checkpoint directory')
parser.add_argument('--model_select', type=str, default='resnet18', help='Model selection: {resnet18, vgg11}')
parser.add_argument('--epochs', type=int, default=10, help='Number of epochs')
parser.add_argument('--batch_size', type=int, default=32, help='Number of batch size')
parser.add_argument('--num_workers', type=int, default=2, help='Total # workers, for colab & kaggle use 2')
parser.add_argument('--gpu', type=int, default=0, help='Numbers of GPU, default=1')
parser.add_argument('--val_split', type=float, default=0.2, help='Validation split percentage')
parser.add_argument('--model_path', type=str, default=(ROOT / 'weights'), help='Weights path, for load and save model')
parser.add_argument('--api_key', type=str, default='', help='API key for comet.ml')
opt = parser.parse_args()
return opt
|
def main(opt):
train(**vars(opt))
|
def notebook_init(verbose=True):
print('Checking setup...')
import os
import shutil
from utils.general import check_requirements, emojis, is_colab
from utils.torch_utils import select_device
check_requirements(('psutil', 'IPython'))
import psutil
from IPython import display
if is_colab():
shutil.rmtree('/content/sample_data', ignore_errors=True)
if verbose:
gib = (1 / (1024 ** 3))
ram = psutil.virtual_memory().total
(total, used, free) = shutil.disk_usage('/')
display.clear_output()
s = f'({os.cpu_count()} CPUs, {(ram * gib):.1f} GB RAM, {((total - free) * gib):.1f}/{(total * gib):.1f} GB disk)'
else:
s = ''
select_device(newline=False)
print(emojis(f'Setup complete ✅ {s}'))
return display
|
class SiLU(nn.Module):
@staticmethod
def forward(x):
return (x * torch.sigmoid(x))
|
class Hardswish(nn.Module):
@staticmethod
def forward(x):
return ((x * F.hardtanh((x + 3), 0.0, 6.0)) / 6.0)
|
class Mish(nn.Module):
@staticmethod
def forward(x):
return (x * F.softplus(x).tanh())
|
class MemoryEfficientMish(nn.Module):
class F(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return x.mul(torch.tanh(F.softplus(x)))
@staticmethod
def backward(ctx, grad_output):
x = ctx.saved_tensors[0]
sx = torch.sigmoid(x)
fx = F.softplus(x).tanh()
return (grad_output * (fx + ((x * sx) * (1 - (fx * fx)))))
def forward(self, x):
return self.F.apply(x)
|
class FReLU(nn.Module):
def __init__(self, c1, k=3):
super().__init__()
self.conv = nn.Conv2d(c1, c1, k, 1, 1, groups=c1, bias=False)
self.bn = nn.BatchNorm2d(c1)
def forward(self, x):
return torch.max(x, self.bn(self.conv(x)))
|
class AconC(nn.Module):
' ACON activation (activate or not).\n AconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is a learnable parameter\n according to "Activate or Not: Learning Customized Activation" <https://arxiv.org/pdf/2009.04759.pdf>.\n '
def __init__(self, c1):
super().__init__()
self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1))
self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1))
self.beta = nn.Parameter(torch.ones(1, c1, 1, 1))
def forward(self, x):
dpx = ((self.p1 - self.p2) * x)
return ((dpx * torch.sigmoid((self.beta * dpx))) + (self.p2 * x))
|
class MetaAconC(nn.Module):
' ACON activation (activate or not).\n MetaAconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is generated by a small network\n according to "Activate or Not: Learning Customized Activation" <https://arxiv.org/pdf/2009.04759.pdf>.\n '
def __init__(self, c1, k=1, s=1, r=16):
super().__init__()
c2 = max(r, (c1 // r))
self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1))
self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1))
self.fc1 = nn.Conv2d(c1, c2, k, s, bias=True)
self.fc2 = nn.Conv2d(c2, c1, k, s, bias=True)
def forward(self, x):
y = x.mean(dim=2, keepdims=True).mean(dim=3, keepdims=True)
beta = torch.sigmoid(self.fc2(self.fc1(y)))
dpx = ((self.p1 - self.p2) * x)
return ((dpx * torch.sigmoid((beta * dpx))) + (self.p2 * x))
|
def check_train_batch_size(model, imgsz=640):
with amp.autocast():
return autobatch(deepcopy(model).train(), imgsz)
|
def autobatch(model, imgsz=640, fraction=0.9, batch_size=16):
prefix = colorstr('AutoBatch: ')
LOGGER.info(f'{prefix}Computing optimal batch size for --imgsz {imgsz}')
device = next(model.parameters()).device
if (device.type == 'cpu'):
LOGGER.info(f'{prefix}CUDA not detected, using default CPU batch-size {batch_size}')
return batch_size
d = str(device).upper()
properties = torch.cuda.get_device_properties(device)
t = (properties.total_memory / (1024 ** 3))
r = (torch.cuda.memory_reserved(device) / (1024 ** 3))
a = (torch.cuda.memory_allocated(device) / (1024 ** 3))
f = (t - (r + a))
LOGGER.info(f'{prefix}{d} ({properties.name}) {t:.2f}G total, {r:.2f}G reserved, {a:.2f}G allocated, {f:.2f}G free')
batch_sizes = [1, 2, 4, 8, 16]
try:
img = [torch.zeros(b, 3, imgsz, imgsz) for b in batch_sizes]
y = profile(img, model, n=3, device=device)
except Exception as e:
LOGGER.warning(f'{prefix}{e}')
y = [x[2] for x in y if x]
batch_sizes = batch_sizes[:len(y)]
p = np.polyfit(batch_sizes, y, deg=1)
b = int((((f * fraction) - p[1]) / p[0]))
LOGGER.info(f'{prefix}Using batch-size {b} for {d} {(t * fraction):.2f}G/{t:.2f}G ({(fraction * 100):.0f}%)')
return b
|
class Callbacks():
'"\n Handles all registered callbacks for YOLOv5 Hooks\n '
def __init__(self):
self._callbacks = {'on_pretrain_routine_start': [], 'on_pretrain_routine_end': [], 'on_train_start': [], 'on_train_epoch_start': [], 'on_train_batch_start': [], 'optimizer_step': [], 'on_before_zero_grad': [], 'on_train_batch_end': [], 'on_train_epoch_end': [], 'on_val_start': [], 'on_val_batch_start': [], 'on_val_image_end': [], 'on_val_batch_end': [], 'on_val_end': [], 'on_fit_epoch_end': [], 'on_model_save': [], 'on_train_end': [], 'on_params_update': [], 'teardown': []}
def register_action(self, hook, name='', callback=None):
'\n Register a new action to a callback hook\n\n Args:\n hook The callback hook name to register the action to\n name The name of the action for later reference\n callback The callback to fire\n '
assert (hook in self._callbacks), f"hook '{hook}' not found in callbacks {self._callbacks}"
assert callable(callback), f"callback '{callback}' is not callable"
self._callbacks[hook].append({'name': name, 'callback': callback})
def get_registered_actions(self, hook=None):
'"\n Returns all the registered actions by callback hook\n\n Args:\n hook The name of the hook to check, defaults to all\n '
if hook:
return self._callbacks[hook]
else:
return self._callbacks
def run(self, hook, *args, **kwargs):
'\n Loop through the registered actions and fire all callbacks\n\n Args:\n hook The name of the hook to check, defaults to all\n args Arguments to receive from YOLOv5\n kwargs Keyword Arguments to receive from YOLOv5\n '
assert (hook in self._callbacks), f"hook '{hook}' not found in callbacks {self._callbacks}"
for logger in self._callbacks[hook]:
logger['callback'](*args, **kwargs)
|
def gsutil_getsize(url=''):
s = subprocess.check_output(f'gsutil du {url}', shell=True).decode('utf-8')
return (eval(s.split(' ')[0]) if len(s) else 0)
|
def safe_download(file, url, url2=None, min_bytes=1.0, error_msg=''):
file = Path(file)
assert_msg = f"Downloaded file '{file}' does not exist or size is < min_bytes={min_bytes}"
try:
print(f'Downloading {url} to {file}...')
torch.hub.download_url_to_file(url, str(file))
assert (file.exists() and (file.stat().st_size > min_bytes)), assert_msg
except Exception as e:
file.unlink(missing_ok=True)
print(f'''ERROR: {e}
Re-attempting {(url2 or url)} to {file}...''')
os.system(f"curl -L '{(url2 or url)}' -o '{file}' --retry 3 -C -")
finally:
if ((not file.exists()) or (file.stat().st_size < min_bytes)):
file.unlink(missing_ok=True)
print(f'''ERROR: {assert_msg}
{error_msg}''')
print('')
|
def attempt_download(file, repo='ultralytics/yolov5'):
file = Path(str(file).strip().replace("'", ''))
if (not file.exists()):
name = Path(urllib.parse.unquote(str(file))).name
if str(file).startswith(('http:/', 'https:/')):
url = str(file).replace(':/', '://')
file = name.split('?')[0]
if Path(file).is_file():
print(f'Found {url} locally at {file}')
else:
safe_download(file=file, url=url, min_bytes=100000.0)
return file
file.parent.mkdir(parents=True, exist_ok=True)
try:
response = requests.get(f'https://api.github.com/repos/{repo}/releases/latest').json()
assets = [x['name'] for x in response['assets']]
tag = response['tag_name']
except:
assets = ['yolov5n.pt', 'yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt', 'yolov5n6.pt', 'yolov5s6.pt', 'yolov5m6.pt', 'yolov5l6.pt', 'yolov5x6.pt']
try:
tag = subprocess.check_output('git tag', shell=True, stderr=subprocess.STDOUT).decode().split()[(- 1)]
except:
tag = 'v6.0'
if (name in assets):
safe_download(file, url=f'https://github.com/{repo}/releases/download/{tag}/{name}', min_bytes=100000.0, error_msg=f'{file} missing, try downloading from https://github.com/{repo}/releases/')
return str(file)
|
def gdrive_download(id='16TiPfZj7htmTyhntwcZyEEAejOUxuT6m', file='tmp.zip'):
t = time.time()
file = Path(file)
cookie = Path('cookie')
print(f'Downloading https://drive.google.com/uc?export=download&id={id} as {file}... ', end='')
file.unlink(missing_ok=True)
cookie.unlink(missing_ok=True)
out = ('NUL' if (platform.system() == 'Windows') else '/dev/null')
os.system(f'curl -c ./cookie -s -L "drive.google.com/uc?export=download&id={id}" > {out}')
if os.path.exists('cookie'):
s = f'curl -Lb ./cookie "drive.google.com/uc?export=download&confirm={get_token()}&id={id}" -o {file}'
else:
s = f'curl -s -L -o {file} "drive.google.com/uc?export=download&id={id}"'
r = os.system(s)
cookie.unlink(missing_ok=True)
if (r != 0):
file.unlink(missing_ok=True)
print('Download error ')
return r
if (file.suffix == '.zip'):
print('unzipping... ', end='')
ZipFile(file).extractall(path=file.parent)
file.unlink()
print(f'Done ({(time.time() - t):.1f}s)')
return r
|
def get_token(cookie='./cookie'):
with open(cookie) as f:
for line in f:
if ('download' in line):
return line.split()[(- 1)]
return ''
|
@app.route(DETECTION_URL, methods=['POST'])
def predict():
if (not (request.method == 'POST')):
return
if request.files.get('image'):
image_file = request.files['image']
image_bytes = image_file.read()
img = Image.open(io.BytesIO(image_bytes))
results = model(img, size=640)
return results.pandas().xyxy[0].to_json(orient='records')
|
def create_dataset_artifact(opt):
logger = WandbLogger(opt, None, job_type='Dataset Creation')
if (not logger.wandb):
LOGGER.info('install wandb using `pip install wandb` to log the dataset')
|
def sweep():
wandb.init()
hyp_dict = vars(wandb.config).get('_items')
opt = parse_opt(known=True)
opt.batch_size = hyp_dict.get('batch_size')
opt.save_dir = str(increment_path((Path(opt.project) / opt.name), exist_ok=(opt.exist_ok or opt.evolve)))
opt.epochs = hyp_dict.get('epochs')
opt.nosave = True
opt.data = hyp_dict.get('data')
opt.weights = str(opt.weights)
opt.cfg = str(opt.cfg)
opt.data = str(opt.data)
opt.hyp = str(opt.hyp)
opt.project = str(opt.project)
device = select_device(opt.device, batch_size=opt.batch_size)
train(hyp_dict, opt, device, callbacks=Callbacks())
|
@contextmanager
def torch_distributed_zero_first(local_rank: int):
'\n Decorator to make all processes in distributed training wait for each local_master to do something.\n '
if (local_rank not in [(- 1), 0]):
dist.barrier(device_ids=[local_rank])
(yield)
if (local_rank == 0):
dist.barrier(device_ids=[0])
|
def date_modified(path=__file__):
t = datetime.datetime.fromtimestamp(Path(path).stat().st_mtime)
return f'{t.year}-{t.month}-{t.day}'
|
def git_describe(path=Path(__file__).parent):
s = f'git -C {path} describe --tags --long --always'
try:
return subprocess.check_output(s, shell=True, stderr=subprocess.STDOUT).decode()[:(- 1)]
except subprocess.CalledProcessError as e:
return ''
|
def select_device(device='', batch_size=0, newline=True):
s = f'YOLOv5 🚀 {(git_describe() or date_modified())} torch {torch.__version__} '
device = str(device).strip().lower().replace('cuda:', '')
cpu = (device == 'cpu')
if cpu:
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
elif device:
os.environ['CUDA_VISIBLE_DEVICES'] = device
assert torch.cuda.is_available(), f'CUDA unavailable, invalid device {device} requested'
cuda = ((not cpu) and torch.cuda.is_available())
if cuda:
devices = (device.split(',') if device else '0')
n = len(devices)
if ((n > 1) and (batch_size > 0)):
assert ((batch_size % n) == 0), f'batch-size {batch_size} not multiple of GPU count {n}'
space = (' ' * (len(s) + 1))
for (i, d) in enumerate(devices):
p = torch.cuda.get_device_properties(i)
s += f'''{('' if (i == 0) else space)}CUDA:{d} ({p.name}, {(p.total_memory / (1024 ** 2)):.0f}MiB)
'''
else:
s += 'CPU\n'
if (not newline):
s = s.rstrip()
LOGGER.info((s.encode().decode('ascii', 'ignore') if (platform.system() == 'Windows') else s))
return torch.device(('cuda:0' if cuda else 'cpu'))
|
def time_sync():
if torch.cuda.is_available():
torch.cuda.synchronize()
return time.time()
|
def profile(input, ops, n=10, device=None):
results = []
device = (device or select_device())
print(f"{'Params':>12s}{'GFLOPs':>12s}{'GPU_mem (GB)':>14s}{'forward (ms)':>14s}{'backward (ms)':>14s}{'input':>24s}{'output':>24s}")
for x in (input if isinstance(input, list) else [input]):
x = x.to(device)
x.requires_grad = True
for m in (ops if isinstance(ops, list) else [ops]):
m = (m.to(device) if hasattr(m, 'to') else m)
m = (m.half() if (hasattr(m, 'half') and isinstance(x, torch.Tensor) and (x.dtype is torch.float16)) else m)
(tf, tb, t) = (0, 0, [0, 0, 0])
try:
flops = ((thop.profile(m, inputs=(x,), verbose=False)[0] / 1000000000.0) * 2)
except:
flops = 0
try:
for _ in range(n):
t[0] = time_sync()
y = m(x)
t[1] = time_sync()
try:
_ = (sum((yi.sum() for yi in y)) if isinstance(y, list) else y).sum().backward()
t[2] = time_sync()
except Exception as e:
t[2] = float('nan')
tf += (((t[1] - t[0]) * 1000) / n)
tb += (((t[2] - t[1]) * 1000) / n)
mem = ((torch.cuda.memory_reserved() / 1000000000.0) if torch.cuda.is_available() else 0)
s_in = (tuple(x.shape) if isinstance(x, torch.Tensor) else 'list')
s_out = (tuple(y.shape) if isinstance(y, torch.Tensor) else 'list')
p = (sum(list((x.numel() for x in m.parameters()))) if isinstance(m, nn.Module) else 0)
print(f'{p:12}{flops:12.4g}{mem:>14.3f}{tf:14.4g}{tb:14.4g}{str(s_in):>24s}{str(s_out):>24s}')
results.append([p, flops, mem, tf, tb, s_in, s_out])
except Exception as e:
print(e)
results.append(None)
torch.cuda.empty_cache()
return results
|
def is_parallel(model):
return (type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel))
|
def de_parallel(model):
return (model.module if is_parallel(model) else model)
|
def initialize_weights(model):
for m in model.modules():
t = type(m)
if (t is nn.Conv2d):
pass
elif (t is nn.BatchNorm2d):
m.eps = 0.001
m.momentum = 0.03
elif (t in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU]):
m.inplace = True
|
def find_modules(model, mclass=nn.Conv2d):
return [i for (i, m) in enumerate(model.module_list) if isinstance(m, mclass)]
|
def sparsity(model):
(a, b) = (0, 0)
for p in model.parameters():
a += p.numel()
b += (p == 0).sum()
return (b / a)
|
def prune(model, amount=0.3):
import torch.nn.utils.prune as prune
print('Pruning model... ', end='')
for (name, m) in model.named_modules():
if isinstance(m, nn.Conv2d):
prune.l1_unstructured(m, name='weight', amount=amount)
prune.remove(m, 'weight')
print((' %.3g global sparsity' % sparsity(model)))
|
def fuse_conv_and_bn(conv, bn):
fusedconv = nn.Conv2d(conv.in_channels, conv.out_channels, kernel_size=conv.kernel_size, stride=conv.stride, padding=conv.padding, groups=conv.groups, bias=True).requires_grad_(False).to(conv.weight.device)
w_conv = conv.weight.clone().view(conv.out_channels, (- 1))
w_bn = torch.diag(bn.weight.div(torch.sqrt((bn.eps + bn.running_var))))
fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.shape))
b_conv = (torch.zeros(conv.weight.size(0), device=conv.weight.device) if (conv.bias is None) else conv.bias)
b_bn = (bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt((bn.running_var + bn.eps))))
fusedconv.bias.copy_((torch.mm(w_bn, b_conv.reshape((- 1), 1)).reshape((- 1)) + b_bn))
return fusedconv
|
def model_info(model, verbose=False, img_size=640):
n_p = sum((x.numel() for x in model.parameters()))
n_g = sum((x.numel() for x in model.parameters() if x.requires_grad))
if verbose:
print(f"{'layer':>5} {'name':>40} {'gradient':>9} {'parameters':>12} {'shape':>20} {'mu':>10} {'sigma':>10}")
for (i, (name, p)) in enumerate(model.named_parameters()):
name = name.replace('module_list.', '')
print(('%5g %40s %9s %12g %20s %10.3g %10.3g' % (i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std())))
try:
from thop import profile
stride = (max(int(model.stride.max()), 32) if hasattr(model, 'stride') else 32)
img = torch.zeros((1, model.yaml.get('ch', 3), stride, stride), device=next(model.parameters()).device)
flops = ((profile(deepcopy(model), inputs=(img,), verbose=False)[0] / 1000000000.0) * 2)
img_size = (img_size if isinstance(img_size, list) else [img_size, img_size])
fs = (', %.1f GFLOPs' % ((((flops * img_size[0]) / stride) * img_size[1]) / stride))
except (ImportError, Exception):
fs = ''
LOGGER.info(f'Model Summary: {len(list(model.modules()))} layers, {n_p} parameters, {n_g} gradients{fs}')
|
def scale_img(img, ratio=1.0, same_shape=False, gs=32):
if (ratio == 1.0):
return img
else:
(h, w) = img.shape[2:]
s = (int((h * ratio)), int((w * ratio)))
img = F.interpolate(img, size=s, mode='bilinear', align_corners=False)
if (not same_shape):
(h, w) = ((math.ceil(((x * ratio) / gs)) * gs) for x in (h, w))
return F.pad(img, [0, (w - s[1]), 0, (h - s[0])], value=0.447)
|
def copy_attr(a, b, include=(), exclude=()):
for (k, v) in b.__dict__.items():
if ((len(include) and (k not in include)) or k.startswith('_') or (k in exclude)):
continue
else:
setattr(a, k, v)
|
class EarlyStopping():
def __init__(self, patience=30):
self.best_fitness = 0.0
self.best_epoch = 0
self.patience = (patience or float('inf'))
self.possible_stop = False
def __call__(self, epoch, fitness):
if (fitness >= self.best_fitness):
self.best_epoch = epoch
self.best_fitness = fitness
delta = (epoch - self.best_epoch)
self.possible_stop = (delta >= (self.patience - 1))
stop = (delta >= self.patience)
if stop:
LOGGER.info(f'''Stopping training early as no improvement observed in last {self.patience} epochs. Best results observed at epoch {self.best_epoch}, best model saved as best.pt.
To update EarlyStopping(patience={self.patience}) pass a new patience value, i.e. `python train.py --patience 300` or use `--patience 0` to disable EarlyStopping.''')
return stop
|
class ModelEMA():
' Model Exponential Moving Average from https://github.com/rwightman/pytorch-image-models\n Keep a moving average of everything in the model state_dict (parameters and buffers).\n This is intended to allow functionality like\n https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage\n A smoothed version of the weights is necessary for some training schemes to perform well.\n This class is sensitive where it is initialized in the sequence of model init,\n GPU assignment and distributed training wrappers.\n '
def __init__(self, model, decay=0.9999, updates=0):
self.ema = deepcopy((model.module if is_parallel(model) else model)).eval()
self.updates = updates
self.decay = (lambda x: (decay * (1 - math.exp(((- x) / 2000)))))
for p in self.ema.parameters():
p.requires_grad_(False)
def update(self, model):
with torch.no_grad():
self.updates += 1
d = self.decay(self.updates)
msd = (model.module.state_dict() if is_parallel(model) else model.state_dict())
for (k, v) in self.ema.state_dict().items():
if v.dtype.is_floating_point:
v *= d
v += ((1 - d) * msd[k].detach())
def update_attr(self, model, include=(), exclude=('process_group', 'reducer')):
copy_attr(self.ema, model, include, exclude)
|
def get_weights(weights):
weights_list = {'resnet': '1Bw4gUsRBxy8XZDGchPJ_URQjbHItikjw', 'resnet18': '1k_v1RrDO6da_NDhBtMZL5c0QSogCmiRn', 'vgg11': '1vZcB-NaPUCovVA-pH-g-3NNJuUA948ni'}
url = f'https://drive.google.com/uc?id={weights_list[weights]}'
output = f'./{weights}.pkl'
gdown.download(url, output, quiet=False)
|
def geodesic_fps(points, n_samples):
if (n_samples > points.shape[0]):
warnings.warn('Number of samples is larger than number of points.')
if (type(points) is not np.ndarray):
raise ValueError('`points` should be a numpy array')
if ((len(points.shape) != 2) or (points.shape[1] != 3)):
raise ValueError(('`points` should have shape (V,3), shape is ' + str(points.shape)))
sample_id = geodesicFPS(points, n_samples)
return sample_id.squeeze()
|
def batch_dot(a, b):
return torch.bmm(a.unsqueeze(1), b.unsqueeze((- 1))).squeeze((- 1))
|
class DeltaNetBase(torch.nn.Module):
def __init__(self, in_channels, conv_channels, mlp_depth, num_neighbors, grad_regularizer, grad_kernel_width, centralize_first=True):
"Classification of Point Clouds with DeltaConv.\n The architecture is based on the architecture used by DGCNN (https://dl.acm.org/doi/10.1145/3326362.\n\n Args:\n in_channels (int): the number of channels provided as input.\n conv_channels (list[int]): the number of output channels of each convolution.\n mlp_depth (int): the depth of the MLPs of each convolution.\n num_neighbors (int): the number of neighbors to use in estimating the gradient.\n grad_regularizer (float): the regularizer value used in the least-squares fitting procedure.\n In the paper, this value is referred to as \\lambda.\n Larger grad_regularizer gives a smoother, but less accurate gradient.\n Lower grad_regularizer gives a more accurate, but more variable gradient.\n The grad_regularizer value should be >0 (e.g., 1e-4) to prevent exploding values.\n grad_kernel_width (float): the width of the gaussian kernel used to weight the\n least-squares problem to approximate the gradient.\n Larger kernel width means that more points are included, which is a 'smoother' gradient.\n Lower kernel width gives a more accurate, but possibly noisier gradient.\n centralize_first (bool, optional): whether to centralize the input features (default: True).\n "
super().__init__()
self.k = num_neighbors
self.grad_regularizer = grad_regularizer
self.grad_kernel_width = grad_kernel_width
conv_channels = ([in_channels] + conv_channels)
self.convs = torch.nn.ModuleList()
for i in range((len(conv_channels) - 1)):
last_layer = (i == (len(conv_channels) - 2))
self.convs.append(DeltaConv(conv_channels[i], conv_channels[(i + 1)], depth=mlp_depth, centralized=(centralize_first and (i == 0)), vector=(not last_layer)))
def forward(self, data):
pos = data.pos
batch = data.batch
edge_index = knn_graph(pos, self.k, batch, loop=True, flow='target_to_source')
if (hasattr(data, 'norm') and (data.norm is not None)):
normal = data.norm
(x_basis, y_basis) = build_tangent_basis(normal)
else:
edge_index_normal = knn_graph(pos, 10, batch, loop=True, flow='target_to_source')
(normal, x_basis, y_basis) = estimate_basis(pos, edge_index_normal, orientation=pos)
(grad, div) = build_grad_div(pos, normal, x_basis, y_basis, edge_index, batch, kernel_width=self.grad_kernel_width, regularizer=self.grad_regularizer)
x = (data.x if (hasattr(data, 'x') and (data.x is not None)) else pos)
v = (grad @ x)
out = []
for conv in self.convs:
(x, v) = conv(x, v, grad, div, edge_index)
out.append(x)
return out
|
class DeltaNetClassification(torch.nn.Module):
def __init__(self, in_channels, num_classes, conv_channels=[64, 64, 128, 256], num_neighbors=20, grad_regularizer=0.001, grad_kernel_width=1):
"Classification of Point Clouds with DeltaConv.\n The architecture is based on the architecture used by DGCNN (https://dl.acm.org/doi/10.1145/3326362.\n\n Args:\n in_channels (int): the number of channels provided as input.\n num_classes (int): the number of classes to classify.\n conv_channels (list[int]): the number of output channels of each convolution.\n num_neighbors (int): the number of neighbors to use in estimating the gradient.\n grad_regularizer (float): the regularizer value used in the least-squares fitting procedure.\n In the paper, this value is referred to as \\lambda.\n Larger grad_regularizer gives a smoother, but less accurate gradient.\n Lower grad_regularizer gives a more accurate, but more variable gradient.\n The grad_regularizer value should be >0 (e.g., 1e-4) to prevent exploding values.\n grad_kernel_width (float): the width of the gaussian kernel used to weight the\n least-squares problem to approximate the gradient.\n Larger kernel width means that more points are included, which is a 'smoother' gradient.\n Lower kernel width gives a more accurate, but possibly noisier gradient.\n "
super().__init__()
self.deltanet_base = DeltaNetBase(in_channels, conv_channels, 1, num_neighbors, grad_regularizer, grad_kernel_width)
self.lin_embedding = MLP([sum(conv_channels), 1024])
self.classification_head = Seq(MLP([(1024 * 2), 512]), Dropout(0.5), MLP([512, 256]), Dropout(0.5), Linear(256, num_classes))
def forward(self, data):
conv_out = self.deltanet_base(data)
x = torch.cat(conv_out, dim=1)
x = self.lin_embedding(x)
batch = data.batch
x_max = global_max_pool(x, batch)
x_mean = global_mean_pool(x, batch)
x = torch.cat([x_max, x_mean], dim=1)
return self.classification_head(x)
|
class DeltaNetSegmentation(torch.nn.Module):
def __init__(self, in_channels, num_classes, conv_channels=[64, 128, 256], mlp_depth=2, embedding_size=1024, categorical_vector=False, num_neighbors=20, grad_regularizer=0.001, grad_kernel_width=1):
"Segmentation of Point Clouds with DeltaConv.\n The architecture is based on the architecture used by DGCNN (https://dl.acm.org/doi/10.1145/3326362.\n\n Args:\n in_channels (int): the number of channels provided as input.\n num_classes (int): the number of classes to segment.\n conv_channels (list[int]): the number of output channels of each convolution.\n mlp_depth (int): the depth of the MLPs of each convolution.\n embedding_size (int): the embedding size before the segmentation head is applied.\n categorical_vector(bool, optional): whether to use the categorical encoding in the model.\n Many authors use this in their models for ShapeNet.\n num_neighbors (int): the number of neighbors to use in estimating the gradient.\n grad_regularizer (float): the regularizer value used in the least-squares fitting procedure.\n In the paper, this value is referred to as \\lambda.\n Larger grad_regularizer gives a smoother, but less accurate gradient.\n Lower grad_regularizer gives a more accurate, but more variable gradient.\n The grad_regularizer value should be >0 (e.g., 1e-4) to prevent exploding values.\n grad_kernel_width (float): the width of the gaussian kernel used to weight the\n least-squares problem to approximate the gradient.\n Larger kernel width means that more points are included, which is a 'smoother' gradient.\n Lower kernel width gives a more accurate, but possibly noisier gradient.\n "
super().__init__()
self.categorical_vector = categorical_vector
self.deltanet_base = DeltaNetBase(in_channels, conv_channels, mlp_depth, num_neighbors, grad_regularizer, grad_kernel_width)
self.lin_global = MLP([sum(conv_channels), embedding_size])
if categorical_vector:
self.lin_categorical = MLP([16, 64])
self.segmentation_head = Seq(MLP([((embedding_size + sum(conv_channels)) + 64), 256]), Dropout(0.5), MLP([256, 256]), Dropout(0.5), Linear(256, 128), LeakyReLU(negative_slope=0.2), Linear(128, num_classes))
else:
self.segmentation_head = Seq(MLP([(embedding_size + sum(conv_channels)), 256]), Dropout(0.5), MLP([256, 256]), Dropout(0.5), Linear(256, 128), LeakyReLU(negative_slope=0.2), Linear(128, num_classes))
def forward(self, data):
conv_out = self.deltanet_base(data)
x = torch.cat(conv_out, dim=1)
x = self.lin_global(x)
batch = data.batch
x_max = global_max_pool(x, batch)[batch]
if self.categorical_vector:
cat = self.lin_categorical(data.category)[batch]
x_max = torch.cat([x_max, cat], dim=1)
x = torch.cat(([x_max] + conv_out), dim=1)
return self.segmentation_head(x)
|
class DeltaConv(torch.nn.Module):
' DeltaConv convolution from the paper \n "DeltaConv: Anisotropic Operators for Geometric Deep Learning on Point Clouds".\n This convolution learns a combination of operators from vector calculus:\n grad, co-grad, div, curl; and their compositions Laplacian and Hodge-Laplacian\n and separates features into a scalar and vector stream.\n\n DeltaConv can be applied to any discretization. Simply provide the discretized gradient and divergence\n Depending on the discretization, the implementation of the rotation matrix (J) and norm should be updated.\n\n Args:\n in_channels (int): the number of input channels of the features.\n out_channels (int): the number of output channels after the convolution.\n depth (int, optional): the depth of the MLPs (default: 1).\n centralized (bool, optional): centralizes the input features\n before maximum aggregation if set to True (default: False):\n p_j = p_j - p_i.\n vector (bool, optional): determines whether the vector stream is propagated \n set this to false in the last layer of a network that only outputs scalars (default: True).\n aggr (string, optional): the type of aggregation used in the scalar stream (default: \'max\').\n '
def __init__(self, in_channels, out_channels, depth=1, centralized=False, vector=True, aggr='max'):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.centralized = centralized
self.aggr = aggr
self.s_mlp_max = MLP(([in_channels] + ([out_channels] * depth)))
self.s_mlp = MLP(([(in_channels * 4)] + ([out_channels] * depth)))
if vector:
self.v_mlp = VectorMLP(([((in_channels * 4) + (out_channels * 2))] + ([out_channels] * depth)))
else:
self.v_mlp = None
def forward(self, x, v, grad, div, edge_index):
if self.centralized:
x_edge = (x[edge_index[1]] - x[edge_index[0]])
x_max = scatter(self.s_mlp_max(x_edge), edge_index[0], dim=0, reduce=self.aggr)
else:
x_max = scatter(self.s_mlp_max(x)[edge_index[1]], edge_index[0], dim=0, reduce=self.aggr)
x_cat = torch.cat([x, (div @ v), curl(v, div), norm(v)], dim=1)
x = (x_max + self.s_mlp(x_cat))
if (self.v_mlp is not None):
v_cat = torch.cat([v, hodge_laplacian(v, grad, div), (grad @ x)], dim=1)
v = self.v_mlp(I_J(v_cat))
return (x, v)
def __repr__(self):
return f'{self.__class__.__name__}({self.in_channels}, {self.out_channels})'
|
def MLP(channels, bias=False, nonlin=LeakyReLU(negative_slope=0.2)):
return Seq(*[Seq(Lin(channels[(i - 1)], channels[i], bias=bias), BatchNorm1d(channels[i]), nonlin) for i in range(1, len(channels))])
|
def VectorMLP(channels, batchnorm=True):
return Seq(*[Seq(Lin(channels[(i - 1)], channels[i], bias=False), VectorNonLin(channels[i], batchnorm=(BatchNorm1d(channels[i]) if batchnorm else None))) for i in range(1, len(channels))])
|
class ScalarVectorMLP(torch.nn.Module):
def __init__(self, channels, nonlin=True, vector_stream=True):
super(ScalarVectorMLP, self).__init__()
self.scalar_mlp = MLP(channels, nonlin=(LeakyReLU(negative_slope=0.2) if nonlin else torch.nn.Identity()))
self.vector_mlp = None
if vector_stream:
self.vector_mlp = VectorMLP(channels)
def forward(self, x):
assert ((self.vector_mlp is None) or ((self.vector_mlp is not None) and (type(x) is tuple)))
if (type(x) is tuple):
(x, v) = x
x = self.scalar_mlp(x)
if (self.vector_mlp is not None):
v = self.vector_mlp(v)
x = (x, v)
return x
|
class ScalarVectorIdentity(torch.nn.Module):
def __init__(self, *args, **kwargs):
super(ScalarVectorIdentity, self).__init__()
def forward(self, input):
return input
|
class BatchNorm1d(torch.nn.Module):
'Convenience wrapper around BatchNorm1d that transforms an\n input tensor from [N x C] to [1 x C x N] so that it uses the faster\n batch-wise implementation of PyTorch.\n '
def __init__(self, in_channels, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True):
super(BatchNorm1d, self).__init__()
self.bn = torch.nn.BatchNorm1d(in_channels, eps, momentum, affine, track_running_stats)
self.reset_parameters()
def reset_parameters(self):
self.bn.reset_parameters()
def forward(self, x: Tensor) -> Tensor:
x = x.unsqueeze(2).transpose(0, 2)
x = self.bn(x)
return x.transpose(0, 2).squeeze(2).contiguous()
def __repr__(self):
return f'{self.__class__.__name__}({self.bn.num_features})'
|
class VectorNonLin(torch.nn.Module):
'Applies a non-linearity to the norm of vector features.\n\n Args:\n in_channels (int): the number of channels in the input tensor.\n nonlin (Module, optional): non-linearity that will be applied\n to the features (default: ReLU).\n batchnorm (Module, optional): batchnorm operation to call before\n the non-linearity is applied (default: None).\n '
def __init__(self, in_channels, nonlin=torch.nn.ReLU(), batchnorm=None):
super(VectorNonLin, self).__init__()
self.bias = torch.nn.Parameter(torch.Tensor(in_channels))
self.nonlin = nonlin
self.batchnorm = batchnorm
self.reset_parameters()
def reset_parameters(self):
zeros(self.bias)
if (self.batchnorm is not None):
self.batchnorm.reset_parameters()
def forward(self, x: Tensor) -> Tensor:
(N, C) = x.size()
x = x.view((- 1), 2, C)
norm = LA.norm(x, dim=1, keepdim=False)
if (self.batchnorm is None):
norm_shifted = (norm + self.bias.view(1, (- 1)))
else:
norm_shifted = self.batchnorm(norm)
norm_nonlin = self.nonlin(norm_shifted)
x_nonlin = (x * (norm_nonlin / norm.clamp(EPS)).unsqueeze(1))
return x_nonlin.view(N, C).contiguous()
def __repr__(self):
return f'{self.__class__.__name__}(batchnorm={self.batchnorm.__repr__()})'
|
class GeodesicFPS(object):
'Sample points using geodesic furthest point samples.\n '
def __init__(self, n_samples=None, store_original=False):
self.n_samples = n_samples
self.store_original = store_original
return
def __call__(self, data):
if (self.n_samples is None):
self.n_samples = data.pos.size(0)
idx = torch.from_numpy(geodesic_fps(data.pos.cpu().numpy(), self.n_samples)).long()
if (data.pos.size(0) < self.n_samples):
idx = idx[:data.pos.size(0)].repeat(ceil((self.n_samples / data.pos.size(0))))
idx = idx[:self.n_samples]
assert (idx.max() <= data.pos.size(0))
assert (idx.min() >= 0)
data.sample_idx = idx
if self.store_original:
data.pos_original = data.pos
data.y_original = data.y
data.pos = data.pos[idx]
if (hasattr(data, 'norm') and (data.norm is not None)):
data.norm = data.norm[idx]
if (hasattr(data, 'normal') and (data.normal is not None)):
data.norm = data.normal[idx]
if (hasattr(data, 'x') and (data.x is not None)):
data.x = data.x[idx]
if (hasattr(data, 'y') and (data.y is not None) and (type(data.y) is not int) and (data.y.size(0) > 1)):
data.y = data.y[idx]
return data
def __repr__(self):
return '{}()'.format(self.__class__.__name__)
|
class NormalizeScale(object):
'Centers and normalizes node positions to the interval :math:`(-1, 1)`.\n '
def __init__(self, norm_ord=2, scaling_factor=None):
self.norm_ord = norm_ord
self.scaling_factor = scaling_factor
def __call__(self, data):
data.pos = (data.pos - ((torch.max(data.pos, dim=0)[0] + torch.min(data.pos, dim=0)[0]) / 2))
if (self.scaling_factor is None):
scale = ((1 / LA.norm(data.pos, ord=self.norm_ord, dim=1).max()) * 0.999999)
else:
scale = ((1 / self.scaling_factor) * 0.999999)
data.pos = (data.pos * scale)
return data
def __repr__(self):
return '{}()'.format(self.__class__.__name__)
|
class RandomNormals(object):
'Jitters normals by a translation within a given interval.\n This is followed by normalization to ensure unit normals.\n\n Args:\n translate (sequence or float or int): Maximum translation in each\n dimension, defining the range\n :math:`(-\\mathrm{translate}, +\\mathrm{translate})` to sample from.\n If :obj:`translate` is a number instead of a sequence, the same\n range is used for each dimension.\n '
def __init__(self, translate):
self.translate = translate
def __call__(self, data):
((n, dim), t) = (data.pos.size(), self.translate)
if isinstance(t, numbers.Number):
t = list(repeat(t, times=dim))
assert (len(t) == dim)
ts = []
for d in range(dim):
ts.append(data.pos.new_empty(n).uniform_((- abs(t[d])), abs(t[d])))
data.norm = (data.norm + torch.stack(ts, dim=(- 1)))
data.norm = (data.norm / LA.norm(data.norm, dim=(- 1), keepdims=True).clamp(1e-05))
return data
def __repr__(self):
return '{}({})'.format(self.__class__.__name__, self.translate)
|
class RandomRotate(object):
'Rotates node positions around a specific axis by a randomly sampled\n angle within a given interval.\n\n Args:\n degrees (tuple or float): Rotation interval from which the rotation\n angle is sampled. If :obj:`degrees` is a number instead of a\n tuple, the interval is given by :math:`[-\\mathrm{degrees},\n \\mathrm{degrees}]`.\n axis (int, optional): The rotation axis. (default: :obj:`0`)\n '
def __init__(self, degrees, axis=0):
if isinstance(degrees, numbers.Number):
degrees = ((- abs(degrees)), abs(degrees))
assert (isinstance(degrees, (tuple, list)) and (len(degrees) == 2))
self.degrees = degrees
self.axis = axis
def __call__(self, data):
degree = ((math.pi * random.uniform(*self.degrees)) / 180.0)
(sin, cos) = (math.sin(degree), math.cos(degree))
if (data.pos.size((- 1)) == 2):
matrix = [[cos, sin], [(- sin), cos]]
elif (self.axis == 0):
matrix = [[1, 0, 0], [0, cos, sin], [0, (- sin), cos]]
elif (self.axis == 1):
matrix = [[cos, 0, (- sin)], [0, 1, 0], [sin, 0, cos]]
else:
matrix = [[cos, sin, 0], [(- sin), cos, 0], [0, 0, 1]]
matrix = torch.Tensor(matrix)
data.pos = torch.matmul(data.pos, matrix.to(data.pos.dtype).to(data.pos.device))
if hasattr(data, 'norm'):
data.norm = torch.matmul(data.norm, matrix.to(data.norm.dtype).to(data.norm.device))
return data
def __repr__(self):
return '{}({}, axis={})'.format(self.__class__.__name__, self.degrees, self.axis)
|
class RandomScale(object):
'Scales node positions by a randomly sampled factor :math:`s` within a\n given interval, *e.g.*, resulting in the transformation matrix\n\n .. math::\n \\begin{bmatrix}\n s & 0 & 0 \\\\\n 0 & s & 0 \\\\\n 0 & 0 & s \\\\\n \\end{bmatrix}\n\n for three-dimensional positions.\n\n Args:\n scales (tuple): scaling factor interval, e.g. :obj:`(a, b)`, then scale\n is randomly sampled from the range\n :math:`a \\leq \\mathrm{scale} \\leq b`.\n '
def __init__(self, scales):
assert (isinstance(scales, (tuple, list)) and (len(scales) == 2))
self.scales = scales
def __call__(self, data):
scale = data.pos.new_empty(3).uniform_(*self.scales)
data.pos = (data.pos * scale)
if (hasattr(data, 'norm') and (data.norm is not None)):
data.norm = (data.norm * (1 / scale))
data.norm = (data.norm / LA.norm(data.norm, dim=1, keepdim=True))
return data
def __repr__(self):
return '{}({})'.format(self.__class__.__name__, self.scales)
|
class RandomTranslateGlobal(object):
'Translates shapes by randomly sampled translation values\n within a given interval. This translation happens for the entire shape,\n retaining local relationships.\n\n Args:\n translate (sequence or float or int): Maximum translation in each\n dimension, defining the range\n :math:`(-\\mathrm{translate}, +\\mathrm{translate})` to sample from.\n If :obj:`translate` is a number instead of a sequence, the same\n range is used for each dimension.\n '
def __init__(self, translate):
self.translate = translate
def __call__(self, data):
((_, dim), t) = (data.pos.size(), self.translate)
if isinstance(t, numbers.Number):
t = list(repeat(t, times=dim))
assert (len(t) == dim)
ts = []
for d in range(dim):
ts.append(data.pos.new_empty(1).uniform_((- abs(t[d])), abs(t[d])))
data.pos = (data.pos + torch.stack(ts, dim=(- 1)))
return data
def __repr__(self):
return '{}({})'.format(self.__class__.__name__, self.translate)
|
class ModelNet(InMemoryDataset):
'The ModelNet10/40 datasets from the `"3D ShapeNets: A Deep\n Representation for Volumetric Shapes"\n <https://people.csail.mit.edu/khosla/papers/cvpr2015_wu.pdf>`_ paper,\n containing CAD models of 10 and 40 categories, respectively.\n\n .. note::\n\n Data objects hold mesh faces instead of edge indices.\n To convert the mesh to a graph, use the\n :obj:`torch_geometric.transforms.FaceToEdge` as :obj:`pre_transform`.\n To convert the mesh to a point cloud, use the\n :obj:`torch_geometric.transforms.SamplePoints` as :obj:`transform` to\n sample a fixed number of points on the mesh faces according to their\n face area.\n\n Args:\n root (string): Root directory where the dataset should be saved.\n name (string, optional): The name of the dataset (:obj:`"10"` for\n ModelNet10, :obj:`"40"` for ModelNet40). (default: :obj:`"10"`)\n train (bool, optional): If :obj:`True`, loads the training dataset,\n otherwise the test dataset. (default: :obj:`True`)\n transform (callable, optional): A function/transform that takes in an\n :obj:`torch_geometric.data.Data` object and returns a transformed\n version. The data object will be transformed before every access.\n (default: :obj:`None`)\n pre_transform (callable, optional): A function/transform that takes in\n an :obj:`torch_geometric.data.Data` object and returns a\n transformed version. The data object will be transformed before\n being saved to disk. (default: :obj:`None`)\n pre_filter (callable, optional): A function that takes in an\n :obj:`torch_geometric.data.Data` object and returns a boolean\n value, indicating whether the data object should be included in the\n final dataset. (default: :obj:`None`)\n '
urls = {'10': 'http://vision.princeton.edu/projects/2014/3DShapeNets/ModelNet10.zip', '40': 'http://modelnet.cs.princeton.edu/ModelNet40.zip'}
def __init__(self, root, n_per_class=None, name='10', train=True, transform=None, pre_transform=None, pre_filter=None):
assert (name in ['10', '40'])
self.name = name
self.n_per_class = n_per_class
super(ModelNet, self).__init__(root, transform, pre_transform, pre_filter)
path = (self.processed_paths[0] if train else self.processed_paths[1])
(self.data, self.slices) = torch.load(path)
@property
def raw_file_names(self):
return ['bathtub', 'bed', 'chair', 'desk', 'dresser', 'monitor', 'night_stand', 'sofa', 'table', 'toilet']
@property
def processed_file_names(self):
return ['training.pt', 'test.pt']
def download(self):
path = download_url(self.urls[self.name], self.root)
extract_zip(path, self.root)
os.unlink(path)
folder = osp.join(self.root, 'ModelNet{}'.format(self.name))
shutil.rmtree(self.raw_dir)
os.rename(folder, self.raw_dir)
metadata_folder = osp.join(self.root, '__MACOSX')
if osp.exists(metadata_folder):
shutil.rmtree(metadata_folder)
def process(self):
torch.save(self.process_set('train'), self.processed_paths[0])
torch.save(self.process_set('test'), self.processed_paths[1])
def process_set(self, dataset):
categories = glob.glob(osp.join(self.raw_dir, '*', ''))
categories = sorted([x.split(os.sep)[(- 2)] for x in categories])
data_list = []
for (target, category) in enumerate(categories):
folder = osp.join(self.raw_dir, category, dataset)
paths = glob.glob('{}/{}_*.off'.format(folder, category))
for (i, path) in enumerate(paths):
if ((self.n_per_class is not None) and (i > self.n_per_class)):
continue
data = read_off(path)
data.y = torch.tensor([target])
data_list.append(data)
if (self.pre_filter is not None):
data_list = [d for d in data_list if self.pre_filter(d)]
if (self.pre_transform is not None):
data_list = [self.pre_transform(d) for d in data_list]
return self.collate(data_list)
def __repr__(self):
return '{}{}({})'.format(self.__class__.__name__, self.name, len(self))
|
class ScanObjectNN(InMemoryDataset):
"The pre-processed ScanObjectNN dataset from the paper\n 'Revisiting Point Cloud Classification: A New Benchmark Dataset and Classification Model on Real-World Data'\n https://arxiv.org/pdf/1908.04616.pdf\n\n Args:\n root (string): Root directory where the dataset should be saved.\n train (bool, optional): If :obj:`True`, loads the training dataset,\n otherwise the test dataset. (default: :obj:`True`)\n transform (callable, optional): A function/transform that takes in an\n :obj:`torch_geometric.data.Data` object and returns a transformed\n version. The data object will be transformed before every access.\n (default: :obj:`None`)\n pre_transform (callable, optional): A function/transform that takes in\n an :obj:`torch_geometric.data.Data` object and returns a\n transformed version. The data object will be transformed before\n being saved to disk. (default: :obj:`None`)\n pre_filter (callable, optional): A function that takes in an\n :obj:`torch_geometric.data.Data` object and returns a boolean\n value, indicating whether the data object should be included in the\n final dataset. (default: :obj:`None`)\n "
url = 'https://hkust-vgd.github.io/scanobjectnn/'
def __init__(self, root, background=False, augmentation=None, train=True, transform=None, pre_transform=None, pre_filter=None):
assert (augmentation in self.augmentation_variants)
self.augmentation = augmentation
self.background = background
self.bg_path = ('main_split' if background else 'main_split_nobg')
super(ScanObjectNN, self).__init__(root, transform, pre_transform, pre_filter)
path = (self.processed_paths[0] if train else self.processed_paths[1])
(self.data, self.slices) = torch.load(path)
@property
def class_names(self):
return ['bag', 'bed', 'bin', 'box', 'cabinets', 'chair', 'desk', 'display', 'door', 'pillow', 'shelves', 'sink', 'sofa', 'table', 'toilet']
@property
def augmentation_variants(self):
return [None, 'PB_T25', 'PB_T25_R', 'PB_T50_R', 'PB_T50_RS']
@property
def raw_file_dict(self):
return {None: ['training_objectdataset.h5', 'test_objectdataset.h5'], 'PB_T25': ['training_objectdataset_augmented25_norot.h5', 'test_objectdataset_augmented25_norot.h5'], 'PB_T25_R': ['training_objectdataset_augmented25rot.h5', 'test_objectdataset_augmented25rot.h5'], 'PB_T50_R': ['training_objectdataset_augmentedrot.h5', 'test_objectdataset_augmentedrot.h5'], 'PB_T50_RS': ['training_objectdataset_augmentedrot_scale75.h5', 'test_objectdataset_augmentedrot_scale75.h5']}
@property
def raw_file_names(self):
return [os.path.join(self.bg_path, filename) for filename in self.raw_file_dict[self.augmentation]]
@property
def processed_file_names(self):
bg_string = ('bg' if self.background else 'nobg')
augmentation_string = (self.augmentation if (self.augmentation is not None) else 'vanilla')
folder = ((bg_string + '_') + augmentation_string)
return [os.path.join(folder, 'training.pt'), os.path.join(folder, 'test.pt')]
def download(self):
if (not os.path.exists(os.path.join(self.raw_dir, self.raw_file_names[0]))):
raise RuntimeError('Dataset not found, please download the dataset from {} and place the files in {}.'.format(self.url, self.raw_dir))
return
def process(self):
for (raw_path, path) in zip(self.raw_paths, self.processed_paths):
training = h5py.File(os.path.join(self.raw_dir, raw_path), 'r')
data_list = []
for (i, pos) in enumerate(training['data']):
y = training['label'][i]
data_list.append(Data(pos=torch.from_numpy(pos), y=torch.Tensor([y]).long()))
if (self.pre_filter is not None):
data_list = [d for d in data_list if self.pre_filter(d)]
if (self.pre_transform is not None):
data_list = [self.pre_transform(d) for d in data_list]
if (not os.path.exists(os.path.dirname(path))):
os.makedirs(os.path.dirname(path))
torch.save(self.collate(data_list), path)
def __repr__(self):
return '{}({})'.format(self.__class__.__name__, len(self))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.