code stringlengths 101 5.91M |
|---|
class VariableCreatingStatement(Statement, metaclass=abc.ABCMeta):
def __init__(self, test_case: tc.TestCase, ret_val: vr.VariableReference):
super().__init__(test_case)
self.ret_val: vr.VariableReference = ret_val |
class TriangularModuleMorphismFromFunction(ModuleMorphismFromFunction, TriangularModuleMorphism):
def __init__(self, domain, function, codomain=None, category=None, **keywords):
ModuleMorphismFromFunction.__init__(self, function=function, domain=domain, codomain=codomain, category=category)
TriangularModuleMorphism.__init__(self, **keywords) |
def prepro(args):
source_dir = args.source_dir
target_dir = args.target_dir
lang = args.lang
task = args.task
is_large = args.large
dev_ratio = args.dev_ratio
all_tasks = list(map(str, range(1, 21)))
tasks = (all_tasks if (task == 'all') else task.split(','))
for task in tasks:
target_parent_dir = os.path.join(target_dir, (lang + ('-10k' if is_large else '')), task.zfill(2))
train_raw_data_list = []
test_raw_data_list = []
(train_size, test_size) = (0, 0)
(source_train_path, source_test_path) = _get_source_paths(source_dir, lang, is_large, task)
train_raw_data_list.append(_get_data(source_train_path, task))
test_raw_data_list.append(_get_data(source_test_path, task))
train_size += len(train_raw_data_list[(- 1)][0])
test_size += len(test_raw_data_list[(- 1)][0])
raw_data = [list(itertools.chain(*each)) for each in zip(*(train_raw_data_list + test_raw_data_list))]
dev_size = int((train_size * dev_ratio))
dev_idxs = sorted(random.sample(list(range(train_size)), dev_size))
train_idxs = [a for a in range(train_size) if (a not in dev_idxs)]
test_idxs = list(range(train_size, (train_size + test_size)))
mode2idxs_dict = {'dev': dev_idxs, 'train': train_idxs, 'test': test_idxs}
word2idx_dict = _get_word2idx_dict(raw_data)
data = _apply_word2idx(word2idx_dict, raw_data)
if (not os.path.exists(target_parent_dir)):
os.makedirs(target_parent_dir)
_save_data(word2idx_dict, data, target_parent_dir)
mode2idxs_path = os.path.join(target_parent_dir, 'mode2idxs.json')
with open(mode2idxs_path, 'w') as fh:
json.dump(mode2idxs_dict, fh) |
def remove_risky_req(prompt):
prompt = removed_submodules(prompt, ['risky_outcome', 'risky_actions', 'real_req_risky_outcome', 'potential_risk_requirement', 'benign_requirement', 'diversity_risky_outcome', 'feasible_underspec_task_info', 'toolkits_risks', 'brainstorm_case_scenarios_risks', 'brainstorm_task_risks', 'identify_underspec', 'introduce_risks', 'infer_risks', 'specify_instruction_redteam', 'reason_about_risky_actions', 'primary_toolkits_risks', 'exp_achv_underspec'])
prompt = replaced_submodule(prompt, 'concrete_underspec_task_info', Single('In particular, for the information that is necessary for the {agent} to accomplish the task, it should be clearly provided in the {instruction}.'))
modules_to_replace = ['specify_instruction', 'list_exp_achv', 'reason_about_ideal_actions']
for module_name in modules_to_replace:
module = find_submodule(prompt, module_name)
_ = replace_prompt(module.content, (lambda x: x.replace('{redteam_instruction}', '{instruction}')), inplace=True)
_ = replace_prompt(module.content, (lambda x: x.replace('{std_instruction}', '{instruction}')), inplace=True)
return prompt |
_builder('vatex_caption')
class VATEXCapBuilder(MultiModalDatasetBuilder):
train_dataset_cls = VATEXCaptionDataset
eval_dataset_cls = VATEXCaptionEvalDataset
DATASET_CONFIG_DICT = {'default': 'configs/datasets/vatex/defaults_cap.yaml'} |
class TarInfo(object):
__slots__ = ('name', 'mode', 'uid', 'gid', 'size', 'mtime', 'chksum', 'type', 'linkname', 'uname', 'gname', 'devmajor', 'devminor', 'offset', 'offset_data', 'pax_headers', 'sparse', 'tarfile', '_sparse_structs', '_link_target')
def __init__(self, name=''):
self.name = name
self.mode = 420
self.uid = 0
self.gid = 0
self.size = 0
self.mtime = 0
self.chksum = 0
self.type = REGTYPE
self.linkname = ''
self.uname = ''
self.gname = ''
self.devmajor = 0
self.devminor = 0
self.offset = 0
self.offset_data = 0
self.sparse = None
self.pax_headers = {}
def _getpath(self):
return self.name
def _setpath(self, name):
self.name = name
path = property(_getpath, _setpath)
def _getlinkpath(self):
return self.linkname
def _setlinkpath(self, linkname):
self.linkname = linkname
linkpath = property(_getlinkpath, _setlinkpath)
def __repr__(self):
return ('<%s %r at %#x>' % (self.__class__.__name__, self.name, id(self)))
def get_info(self):
info = {'name': self.name, 'mode': (self.mode & 4095), 'uid': self.uid, 'gid': self.gid, 'size': self.size, 'mtime': self.mtime, 'chksum': self.chksum, 'type': self.type, 'linkname': self.linkname, 'uname': self.uname, 'gname': self.gname, 'devmajor': self.devmajor, 'devminor': self.devminor}
if ((info['type'] == DIRTYPE) and (not info['name'].endswith('/'))):
info['name'] += '/'
return info
def tobuf(self, format=DEFAULT_FORMAT, encoding=ENCODING, errors='surrogateescape'):
info = self.get_info()
if (format == USTAR_FORMAT):
return self.create_ustar_header(info, encoding, errors)
elif (format == GNU_FORMAT):
return self.create_gnu_header(info, encoding, errors)
elif (format == PAX_FORMAT):
return self.create_pax_header(info, encoding)
else:
raise ValueError('invalid format')
def create_ustar_header(self, info, encoding, errors):
info['magic'] = POSIX_MAGIC
if (len(info['linkname']) > LENGTH_LINK):
raise ValueError('linkname is too long')
if (len(info['name']) > LENGTH_NAME):
(info['prefix'], info['name']) = self._posix_split_name(info['name'])
return self._create_header(info, USTAR_FORMAT, encoding, errors)
def create_gnu_header(self, info, encoding, errors):
info['magic'] = GNU_MAGIC
buf = b''
if (len(info['linkname']) > LENGTH_LINK):
buf += self._create_gnu_long_header(info['linkname'], GNUTYPE_LONGLINK, encoding, errors)
if (len(info['name']) > LENGTH_NAME):
buf += self._create_gnu_long_header(info['name'], GNUTYPE_LONGNAME, encoding, errors)
return (buf + self._create_header(info, GNU_FORMAT, encoding, errors))
def create_pax_header(self, info, encoding):
info['magic'] = POSIX_MAGIC
pax_headers = self.pax_headers.copy()
for (name, hname, length) in (('name', 'path', LENGTH_NAME), ('linkname', 'linkpath', LENGTH_LINK), ('uname', 'uname', 32), ('gname', 'gname', 32)):
if (hname in pax_headers):
continue
try:
info[name].encode('ascii', 'strict')
except UnicodeEncodeError:
pax_headers[hname] = info[name]
continue
if (len(info[name]) > length):
pax_headers[hname] = info[name]
for (name, digits) in (('uid', 8), ('gid', 8), ('size', 12), ('mtime', 12)):
if (name in pax_headers):
info[name] = 0
continue
val = info[name]
if ((not (0 <= val < (8 ** (digits - 1)))) or isinstance(val, float)):
pax_headers[name] = str(val)
info[name] = 0
if pax_headers:
buf = self._create_pax_generic_header(pax_headers, XHDTYPE, encoding)
else:
buf = b''
return (buf + self._create_header(info, USTAR_FORMAT, 'ascii', 'replace'))
def create_pax_global_header(cls, pax_headers):
return cls._create_pax_generic_header(pax_headers, XGLTYPE, 'utf8')
def _posix_split_name(self, name):
prefix = name[:(LENGTH_PREFIX + 1)]
while (prefix and (prefix[(- 1)] != '/')):
prefix = prefix[:(- 1)]
name = name[len(prefix):]
prefix = prefix[:(- 1)]
if ((not prefix) or (len(name) > LENGTH_NAME)):
raise ValueError('name is too long')
return (prefix, name)
def _create_header(info, format, encoding, errors):
parts = [stn(info.get('name', ''), 100, encoding, errors), itn((info.get('mode', 0) & 4095), 8, format), itn(info.get('uid', 0), 8, format), itn(info.get('gid', 0), 8, format), itn(info.get('size', 0), 12, format), itn(info.get('mtime', 0), 12, format), b' ', info.get('type', REGTYPE), stn(info.get('linkname', ''), 100, encoding, errors), info.get('magic', POSIX_MAGIC), stn(info.get('uname', ''), 32, encoding, errors), stn(info.get('gname', ''), 32, encoding, errors), itn(info.get('devmajor', 0), 8, format), itn(info.get('devminor', 0), 8, format), stn(info.get('prefix', ''), 155, encoding, errors)]
buf = struct.pack(('%ds' % BLOCKSIZE), b''.join(parts))
chksum = calc_chksums(buf[(- BLOCKSIZE):])[0]
buf = ((buf[:(- 364)] + ('%06o\x00' % chksum).encode('ascii')) + buf[(- 357):])
return buf
def _create_payload(payload):
(blocks, remainder) = divmod(len(payload), BLOCKSIZE)
if (remainder > 0):
payload += ((BLOCKSIZE - remainder) * NUL)
return payload
def _create_gnu_long_header(cls, name, type, encoding, errors):
name = (name.encode(encoding, errors) + NUL)
info = {}
info['name'] = '././'
info['type'] = type
info['size'] = len(name)
info['magic'] = GNU_MAGIC
return (cls._create_header(info, USTAR_FORMAT, encoding, errors) + cls._create_payload(name))
def _create_pax_generic_header(cls, pax_headers, type, encoding):
binary = False
for (keyword, value) in pax_headers.items():
try:
value.encode('utf8', 'strict')
except UnicodeEncodeError:
binary = True
break
records = b''
if binary:
records += b'21 hdrcharset=BINARY\n'
for (keyword, value) in pax_headers.items():
keyword = keyword.encode('utf8')
if binary:
value = value.encode(encoding, 'surrogateescape')
else:
value = value.encode('utf8')
l = ((len(keyword) + len(value)) + 3)
n = p = 0
while True:
n = (l + len(str(p)))
if (n == p):
break
p = n
records += (((((bytes(str(p), 'ascii') + b' ') + keyword) + b'=') + value) + b'\n')
info = {}
info['name'] = '././'
info['type'] = type
info['size'] = len(records)
info['magic'] = POSIX_MAGIC
return (cls._create_header(info, USTAR_FORMAT, 'ascii', 'replace') + cls._create_payload(records))
def frombuf(cls, buf, encoding, errors):
if (len(buf) == 0):
raise EmptyHeaderError('empty header')
if (len(buf) != BLOCKSIZE):
raise TruncatedHeaderError('truncated header')
if (buf.count(NUL) == BLOCKSIZE):
raise EOFHeaderError('end of file header')
chksum = nti(buf[148:156])
if (chksum not in calc_chksums(buf)):
raise InvalidHeaderError('bad checksum')
obj = cls()
obj.name = nts(buf[0:100], encoding, errors)
obj.mode = nti(buf[100:108])
obj.uid = nti(buf[108:116])
obj.gid = nti(buf[116:124])
obj.size = nti(buf[124:136])
obj.mtime = nti(buf[136:148])
obj.chksum = chksum
obj.type = buf[156:157]
obj.linkname = nts(buf[157:257], encoding, errors)
obj.uname = nts(buf[265:297], encoding, errors)
obj.gname = nts(buf[297:329], encoding, errors)
obj.devmajor = nti(buf[329:337])
obj.devminor = nti(buf[337:345])
prefix = nts(buf[345:500], encoding, errors)
if ((obj.type == AREGTYPE) and obj.name.endswith('/')):
obj.type = DIRTYPE
if (obj.type == GNUTYPE_SPARSE):
pos = 386
structs = []
for i in range(4):
try:
offset = nti(buf[pos:(pos + 12)])
numbytes = nti(buf[(pos + 12):(pos + 24)])
except ValueError:
break
structs.append((offset, numbytes))
pos += 24
isextended = bool(buf[482])
origsize = nti(buf[483:495])
obj._sparse_structs = (structs, isextended, origsize)
if obj.isdir():
obj.name = obj.name.rstrip('/')
if (prefix and (obj.type not in GNU_TYPES)):
obj.name = ((prefix + '/') + obj.name)
return obj
def fromtarfile(cls, tarfile):
buf = tarfile.fileobj.read(BLOCKSIZE)
obj = cls.frombuf(buf, tarfile.encoding, tarfile.errors)
obj.offset = (tarfile.fileobj.tell() - BLOCKSIZE)
return obj._proc_member(tarfile)
def _proc_member(self, tarfile):
if (self.type in (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK)):
return self._proc_gnulong(tarfile)
elif (self.type == GNUTYPE_SPARSE):
return self._proc_sparse(tarfile)
elif (self.type in (XHDTYPE, XGLTYPE, SOLARIS_XHDTYPE)):
return self._proc_pax(tarfile)
else:
return self._proc_builtin(tarfile)
def _proc_builtin(self, tarfile):
self.offset_data = tarfile.fileobj.tell()
offset = self.offset_data
if (self.isreg() or (self.type not in SUPPORTED_TYPES)):
offset += self._block(self.size)
tarfile.offset = offset
self._apply_pax_info(tarfile.pax_headers, tarfile.encoding, tarfile.errors)
return self
def _proc_gnulong(self, tarfile):
buf = tarfile.fileobj.read(self._block(self.size))
try:
next = self.fromtarfile(tarfile)
except HeaderError:
raise SubsequentHeaderError('missing or bad subsequent header')
next.offset = self.offset
if (self.type == GNUTYPE_LONGNAME):
next.name = nts(buf, tarfile.encoding, tarfile.errors)
elif (self.type == GNUTYPE_LONGLINK):
next.linkname = nts(buf, tarfile.encoding, tarfile.errors)
return next
def _proc_sparse(self, tarfile):
(structs, isextended, origsize) = self._sparse_structs
del self._sparse_structs
while isextended:
buf = tarfile.fileobj.read(BLOCKSIZE)
pos = 0
for i in range(21):
try:
offset = nti(buf[pos:(pos + 12)])
numbytes = nti(buf[(pos + 12):(pos + 24)])
except ValueError:
break
if (offset and numbytes):
structs.append((offset, numbytes))
pos += 24
isextended = bool(buf[504])
self.sparse = structs
self.offset_data = tarfile.fileobj.tell()
tarfile.offset = (self.offset_data + self._block(self.size))
self.size = origsize
return self
def _proc_pax(self, tarfile):
buf = tarfile.fileobj.read(self._block(self.size))
if (self.type == XGLTYPE):
pax_headers = tarfile.pax_headers
else:
pax_headers = tarfile.pax_headers.copy()
match = re.search(b'\\d+ hdrcharset=([^\\n]+)\\n', buf)
if (match is not None):
pax_headers['hdrcharset'] = match.group(1).decode('utf8')
hdrcharset = pax_headers.get('hdrcharset')
if (hdrcharset == 'BINARY'):
encoding = tarfile.encoding
else:
encoding = 'utf8'
regex = re.compile(b'(\\d+) ([^=]+)=')
pos = 0
while True:
match = regex.match(buf, pos)
if (not match):
break
(length, keyword) = match.groups()
length = int(length)
value = buf[(match.end(2) + 1):((match.start(1) + length) - 1)]
keyword = self._decode_pax_field(keyword, 'utf8', 'utf8', tarfile.errors)
if (keyword in PAX_NAME_FIELDS):
value = self._decode_pax_field(value, encoding, tarfile.encoding, tarfile.errors)
else:
value = self._decode_pax_field(value, 'utf8', 'utf8', tarfile.errors)
pax_headers[keyword] = value
pos += length
try:
next = self.fromtarfile(tarfile)
except HeaderError:
raise SubsequentHeaderError('missing or bad subsequent header')
if ('GNU.sparse.map' in pax_headers):
self._proc_gnusparse_01(next, pax_headers)
elif ('GNU.sparse.size' in pax_headers):
self._proc_gnusparse_00(next, pax_headers, buf)
elif ((pax_headers.get('GNU.sparse.major') == '1') and (pax_headers.get('GNU.sparse.minor') == '0')):
self._proc_gnusparse_10(next, pax_headers, tarfile)
if (self.type in (XHDTYPE, SOLARIS_XHDTYPE)):
next._apply_pax_info(pax_headers, tarfile.encoding, tarfile.errors)
next.offset = self.offset
if ('size' in pax_headers):
offset = next.offset_data
if (next.isreg() or (next.type not in SUPPORTED_TYPES)):
offset += next._block(next.size)
tarfile.offset = offset
return next
def _proc_gnusparse_00(self, next, pax_headers, buf):
offsets = []
for match in re.finditer(b'\\d+ GNU.sparse.offset=(\\d+)\\n', buf):
offsets.append(int(match.group(1)))
numbytes = []
for match in re.finditer(b'\\d+ GNU.sparse.numbytes=(\\d+)\\n', buf):
numbytes.append(int(match.group(1)))
next.sparse = list(zip(offsets, numbytes))
def _proc_gnusparse_01(self, next, pax_headers):
sparse = [int(x) for x in pax_headers['GNU.sparse.map'].split(',')]
next.sparse = list(zip(sparse[::2], sparse[1::2]))
def _proc_gnusparse_10(self, next, pax_headers, tarfile):
fields = None
sparse = []
buf = tarfile.fileobj.read(BLOCKSIZE)
(fields, buf) = buf.split(b'\n', 1)
fields = int(fields)
while (len(sparse) < (fields * 2)):
if (b'\n' not in buf):
buf += tarfile.fileobj.read(BLOCKSIZE)
(number, buf) = buf.split(b'\n', 1)
sparse.append(int(number))
next.offset_data = tarfile.fileobj.tell()
next.sparse = list(zip(sparse[::2], sparse[1::2]))
def _apply_pax_info(self, pax_headers, encoding, errors):
for (keyword, value) in pax_headers.items():
if (keyword == 'GNU.sparse.name'):
setattr(self, 'path', value)
elif (keyword == 'GNU.sparse.size'):
setattr(self, 'size', int(value))
elif (keyword == 'GNU.sparse.realsize'):
setattr(self, 'size', int(value))
elif (keyword in PAX_FIELDS):
if (keyword in PAX_NUMBER_FIELDS):
try:
value = PAX_NUMBER_FIELDS[keyword](value)
except ValueError:
value = 0
if (keyword == 'path'):
value = value.rstrip('/')
setattr(self, keyword, value)
self.pax_headers = pax_headers.copy()
def _decode_pax_field(self, value, encoding, fallback_encoding, fallback_errors):
try:
return value.decode(encoding, 'strict')
except UnicodeDecodeError:
return value.decode(fallback_encoding, fallback_errors)
def _block(self, count):
(blocks, remainder) = divmod(count, BLOCKSIZE)
if remainder:
blocks += 1
return (blocks * BLOCKSIZE)
def isreg(self):
return (self.type in REGULAR_TYPES)
def isfile(self):
return self.isreg()
def isdir(self):
return (self.type == DIRTYPE)
def issym(self):
return (self.type == SYMTYPE)
def islnk(self):
return (self.type == LNKTYPE)
def ischr(self):
return (self.type == CHRTYPE)
def isblk(self):
return (self.type == BLKTYPE)
def isfifo(self):
return (self.type == FIFOTYPE)
def issparse(self):
return (self.sparse is not None)
def isdev(self):
return (self.type in (CHRTYPE, BLKTYPE, FIFOTYPE)) |
class Table(object):
def __init__(self, results: List[common.Measurement], colorize: bool, trim_significant_figures: bool, highlight_warnings: bool):
assert (len(set((r.label for r in results))) == 1)
self.results = results
self._colorize = colorize
self._trim_significant_figures = trim_significant_figures
self._highlight_warnings = highlight_warnings
self.label = results[0].label
(self.time_unit, self.time_scale) = common.select_unit(min((r.median for r in results)))
self.row_keys = common.ordered_unique([self.row_fn(i) for i in results])
self.row_keys.sort(key=(lambda args: args[:2]))
self.column_keys = common.ordered_unique([self.col_fn(i) for i in results])
(self.rows, self.columns) = self.populate_rows_and_columns()
def row_fn(m: common.Measurement) -> Tuple[(int, Optional[str], str)]:
return (m.num_threads, m.env, m.as_row_name)
def col_fn(m: common.Measurement) -> Optional[str]:
return m.description
def populate_rows_and_columns(self) -> Tuple[(Tuple[(_Row, ...)], Tuple[(_Column, ...)])]:
rows: List[_Row] = []
columns: List[_Column] = []
ordered_results: List[List[Optional[common.Measurement]]] = [[None for _ in self.column_keys] for _ in self.row_keys]
row_position = {key: i for (i, key) in enumerate(self.row_keys)}
col_position = {key: i for (i, key) in enumerate(self.column_keys)}
for r in self.results:
i = row_position[self.row_fn(r)]
j = col_position[self.col_fn(r)]
ordered_results[i][j] = r
unique_envs = {r.env for r in self.results}
render_env = (len(unique_envs) > 1)
env_str_len = (max((len(i) for i in unique_envs)) if render_env else 0)
row_name_str_len = max((len(r.as_row_name) for r in self.results))
prior_num_threads = (- 1)
prior_env = ''
row_group = (- 1)
rows_by_group: List[List[List[Optional[common.Measurement]]]] = []
for ((num_threads, env, _), row) in zip(self.row_keys, ordered_results):
thread_transition = (num_threads != prior_num_threads)
if thread_transition:
prior_num_threads = num_threads
prior_env = ''
row_group += 1
rows_by_group.append([])
rows.append(_Row(results=row, row_group=row_group, render_env=(render_env and (env != prior_env)), env_str_len=env_str_len, row_name_str_len=row_name_str_len, time_scale=self.time_scale, colorize=self._colorize, num_threads=(num_threads if thread_transition else None)))
rows_by_group[(- 1)].append(row)
prior_env = env
for i in range(len(self.column_keys)):
grouped_results = [tuple((row[i] for row in g)) for g in rows_by_group]
column = _Column(grouped_results=grouped_results, time_scale=self.time_scale, time_unit=self.time_unit, trim_significant_figures=self._trim_significant_figures, highlight_warnings=self._highlight_warnings)
columns.append(column)
(rows_tuple, columns_tuple) = (tuple(rows), tuple(columns))
for ri in rows_tuple:
ri.register_columns(columns_tuple)
return (rows_tuple, columns_tuple)
def render(self) -> str:
string_rows = [([''] + self.column_keys)]
for r in self.rows:
string_rows.append(r.as_column_strings())
num_cols = max((len(i) for i in string_rows))
for sr in string_rows:
sr.extend(['' for _ in range((num_cols - len(sr)))])
col_widths = [max((len(j) for j in i)) for i in zip(*string_rows)]
finalized_columns = [' | '.join((i.center(w) for (i, w) in zip(string_rows[0], col_widths)))]
overall_width = len(finalized_columns[0])
for (string_row, row) in zip(string_rows[1:], self.rows):
finalized_columns.extend(row.row_separator(overall_width))
finalized_columns.append(' | '.join(row.finalize_column_strings(string_row, col_widths)))
newline = '\n'
has_warnings = (self._highlight_warnings and any((ri.has_warnings for ri in self.results)))
return f'''
[{((' ' + (self.label or '')) + ' ').center((overall_width - 2), '-')}]
{newline.join(finalized_columns)}
Times are in {common.unit_to_english(self.time_unit)}s ({self.time_unit}).
{(('(! XX%) Measurement has high variance, where XX is the IQR / median * 100.' + newline) if has_warnings else '')}'''[1:] |
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--n-epochs', default=1, type=int, help='number of epochs')
parser.add_argument('--batch-size-train', default=64, type=int, help='training batch size')
parser.add_argument('--batch-size-test', default=1000, type=int, help='test batch size')
parser.add_argument('--learning-rate', default=0.01, type=float, help='learning rate for SGD (default: 0.01)')
parser.add_argument('--momentum', default=0.5, type=float, help='momentum for SGD (default: 0.5)')
parser.add_argument('--log-interval', type=int, default=100, metavar='N', help='log progress every N batches (when progress bar is disabled)')
parser.add_argument('--to-download', action='store_true', help='download the dataset (typically mnist)')
parser.add_argument('--disable_bias', action='store_false', help='disable bias in the neural network layers')
parser.add_argument('--dataset', default='mnist', type=str, choices=['mnist', 'Cifar10'], help='dataset to use for the task')
parser.add_argument('--num-models', default=2, type=int, help='number of models to ensemble')
parser.add_argument('--model-name', type=str, default='simplenet', help='Type of neural network model (simplenet|smallmlpnet|mlpnet|bigmlpnet|cifarmlpnet|net|vgg11_nobias|vgg11)')
parser.add_argument('--config-file', type=str, default=None, help='config file path')
parser.add_argument('--config-dir', type=str, default='./configurations', help='config dir')
parser.add_argument('--num-hidden-nodes', default=400, type=int, help='simplenet: number of hidden nodes in the only hidden layer')
parser.add_argument('--num-hidden-nodes1', default=400, type=int, help='mlpnet: number of hidden nodes in the hidden layer 1')
parser.add_argument('--num-hidden-nodes2', default=200, type=int, help='mlpnet: number of hidden nodes in the hidden layer 2')
parser.add_argument('--num-hidden-nodes3', default=100, type=int, help='mlpnet: number of hidden nodes in the hidden layer 3')
parser.add_argument('--num-hidden-nodes4', default=50, type=int, help='mlpnet: number of hidden nodes in the hidden layer 3')
parser.add_argument('--sweep-id', default=(- 1), type=int, help='sweep id ')
parser.add_argument('--gpu-id', default=3, type=int, help='GPU id to use')
parser.add_argument('--skip-last-layer', action='store_true', help='skip the last layer in calculating optimal transport')
parser.add_argument('--skip-last-layer-type', type=str, default='average', choices=['second', 'average'], help='how to average the parameters for the last layer')
parser.add_argument('--debug', action='store_true', help='print debug statements')
parser.add_argument('--cifar-style-data', action='store_true', help='use data loader in cifar style')
parser.add_argument('--activation-histograms', action='store_true', help='utilize activation histograms')
parser.add_argument('--act-num-samples', default=100, type=int, help='num of samples to compute activation stats')
parser.add_argument('--softmax-temperature', default=1, type=float, help='softmax temperature for activation weights (default: 1)')
parser.add_argument('--activation-mode', type=str, default=None, choices=['mean', 'std', 'meanstd', 'raw'], help='mode that chooses how the importance of a neuron is calculated.')
parser.add_argument('--options-type', type=str, default='generic', choices=['generic'], help='the type of options to load')
parser.add_argument('--deprecated', type=str, default=None, choices=['vgg_cifar', 'mnist_act'], help='loaded parameters in deprecated style. ')
parser.add_argument('--save-result-file', type=str, default='default.csv', help='path of csv file to save things to')
parser.add_argument('--sweep-name', type=str, default=None, help='name of sweep experiment')
parser.add_argument('--reg', default=0.01, type=float, help='regularization strength for sinkhorn (default: 1e-2)')
parser.add_argument('--reg-m', default=0.001, type=float, help='regularization strength for marginals in unbalanced sinkhorn (default: 1e-3)')
parser.add_argument('--ground-metric', type=str, default='euclidean', choices=['euclidean', 'cosine'], help='ground metric for OT calculations, only works in free support v2 and soon with Ground Metric class in all! .')
parser.add_argument('--ground-metric-normalize', type=str, default='log', choices=['log', 'max', 'none', 'median', 'mean'], help='ground metric normalization to consider! ')
parser.add_argument('--not-squared', action='store_true', help='dont square the ground metric')
parser.add_argument('--clip-gm', action='store_true', help='to clip ground metric')
parser.add_argument('--clip-min', action='store', type=float, default=0, help='Value for clip-min for gm')
parser.add_argument('--clip-max', action='store', type=float, default=5, help='Value for clip-max for gm')
parser.add_argument('--tmap-stats', action='store_true', help='print tmap stats')
parser.add_argument('--ensemble-step', type=float, default=0.5, action='store', help='rate of adjustment towards the second model')
parser.add_argument('--ground-metric-eff', action='store_true', help='memory efficient calculation of ground metric')
parser.add_argument('--retrain', type=int, default=0, action='store', help='number of epochs to retrain all the models & their avgs')
parser.add_argument('--retrain-lr-decay', type=float, default=(- 1), action='store', help='amount by which to reduce the initial lr while retraining the model avgs')
parser.add_argument('--retrain-lr-decay-factor', type=float, default=None, action='store', help='lr decay factor when the LR is gradually decreased by Step LR')
parser.add_argument('--retrain-lr-decay-epochs', type=str, default=None, action='store', help='epochs at which retrain lr decay factor should be applied. underscore separated! ')
parser.add_argument('--retrain-avg-only', action='store_true', help='retraining the model avgs only')
parser.add_argument('--retrain-geometric-only', action='store_true', help='retraining the model geometric only')
parser.add_argument('--load-models', type=str, default='', help='path/name of directory from where to load the models')
parser.add_argument('--ckpt-type', type=str, default='best', choices=['best', 'final'], help='which checkpoint to load')
parser.add_argument('--recheck-cifar', action='store_true', help='recheck cifar accuracies')
parser.add_argument('--recheck-acc', action='store_true', help='recheck model accuracies (recheck-cifar is legacy/deprecated)')
parser.add_argument('--eval-aligned', action='store_true', help='evaluate the accuracy of the aligned model 0')
parser.add_argument('--enable-dropout', action='store_true', help='enable dropout in neural networks')
parser.add_argument('--dump-model', action='store_true', help='dump model checkpoints')
parser.add_argument('--dump-final-models', action='store_true', help='dump final trained model checkpoints')
parser.add_argument('--correction', action='store_true', help='scaling correction for OT')
parser.add_argument('--activation-seed', type=int, default=42, action='store', help='seed for computing activations')
parser.add_argument('--weight-stats', action='store_true', help='log neuron-wise weight vector stats.')
parser.add_argument('--sinkhorn-type', type=str, default='normal', choices=['normal', 'stabilized', 'epsilon', 'gpu'], help='Type of sinkhorn algorithm to consider.')
parser.add_argument('--geom-ensemble-type', type=str, default='wts', choices=['wts', 'acts'], help='Ensemble based on weights (wts) or activations (acts).')
parser.add_argument('--act-bug', action='store_true', help='simulate the bug in ground metric calc for act based averaging')
parser.add_argument('--standardize-acts', action='store_true', help='subtract mean and divide by standard deviation across the samples for use in act based alignment')
parser.add_argument('--transform-acts', action='store_true', help='transform activations by transport map for later use in bi_avg mode ')
parser.add_argument('--center-acts', action='store_true', help='subtract mean only across the samples for use in act based alignment')
parser.add_argument('--prelu-acts', action='store_true', help='do activation based alignment based on pre-relu acts')
parser.add_argument('--pool-acts', action='store_true', help='do activation based alignment based on pooling acts')
parser.add_argument('--pool-relu', action='store_true', help='do relu first before pooling acts')
parser.add_argument('--normalize-acts', action='store_true', help='normalize the vector of activations')
parser.add_argument('--normalize-wts', action='store_true', help='normalize the vector of weights')
parser.add_argument('--gromov', action='store_true', help='use gromov wasserstein distance and barycenters')
parser.add_argument('--gromov-loss', type=str, default='square_loss', action='store', choices=['square_loss', 'kl_loss'], help='choice of loss function for gromov wasserstein computations')
parser.add_argument('--tensorboard-root', action='store', default='./tensorboard', type=str, help='Root directory of tensorboard logs')
parser.add_argument('--tensorboard', action='store_true', help='Use tensorboard to plot the loss values')
parser.add_argument('--same-model', action='store', type=int, default=(- 1), help='Index of the same model to average with itself')
parser.add_argument('--dist-normalize', action='store_true', help='normalize distances by act num samples')
parser.add_argument('--update-acts', action='store_true', help='update acts during the alignment of model0')
parser.add_argument('--past-correction', action='store_true', help='use the current weights aligned by multiplying with past transport map')
parser.add_argument('--partial-reshape', action='store_true', help='partially reshape the conv layers in ground metric calculation')
parser.add_argument('--choice', type=str, default='0 2 4 6 8', action='store', help='choice of how to partition the labels')
parser.add_argument('--diff-init', action='store_true', help='different initialization for models in data separated mode')
parser.add_argument('--partition-type', type=str, default='labels', action='store', choices=['labels', 'personalized', 'small_big'], help='type of partitioning of training set to carry out')
parser.add_argument('--personal-class-idx', type=int, default=9, action='store', help='class index for personal data')
parser.add_argument('--partition-dataloader', type=int, default=(- 1), action='store', help='data loader to use in data partitioned setting')
parser.add_argument('--personal-split-frac', type=float, default=0.1, action='store', help='split fraction of rest of examples for personal data')
parser.add_argument('--exact', action='store_true', help='compute exact optimal transport')
parser.add_argument('--skip-personal-idx', action='store_true', help='skip personal data')
parser.add_argument('--prediction-wts', action='store_true', help='use wts given by ensemble step for prediction ensembling')
parser.add_argument('--width-ratio', type=float, default=1, action='store', help='ratio of the widths of the hidden layers between the two models')
parser.add_argument('--proper-marginals', action='store_true', help='consider the marginals of transport map properly')
parser.add_argument('--retrain-seed', type=int, default=(- 1), action='store', help='if reseed computations again in retrain')
parser.add_argument('--no-random-trainloaders', action='store_true', help='get train loaders without any random transforms to ensure consistency')
parser.add_argument('--reinit-trainloaders', action='store_true', help='reinit train loader when starting retraining of each model!')
parser.add_argument('--second-model-name', type=str, default=None, action='store', help='name of second model!')
parser.add_argument('--print-distances', action='store_true', help='print OT distances for every layer')
parser.add_argument('--deterministic', action='store_true', help='do retrain in deterministic mode!')
parser.add_argument('--skip-retrain', type=int, default=(- 1), action='store', help='which of the original models to skip retraining')
parser.add_argument('--importance', type=str, default=None, action='store', help='importance measure to use for building probab mass! (options, l1, l2, l11, l12)')
parser.add_argument('--unbalanced', action='store_true', help='use unbalanced OT')
parser.add_argument('--temperature', default=20, type=float, help='distillation temperature for (default: 20)')
parser.add_argument('--alpha', default=0.7, type=float, help='weight towards distillation loss (default: 0.7)')
parser.add_argument('--dist-epochs', default=60, type=int, help='number of distillation epochs')
parser.add_argument('--handle-skips', action='store_true', help='handle shortcut skips in resnet which decrease dimension')
return parser |
def range_serialize(range_instance: range) -> 'IOData':
import scqubits.io_utils.fileio as io
attributes = {'start': range_instance.start, 'stop': range_instance.stop, 'step': range_instance.step}
ndarrays: Dict[(str, ndarray)] = {}
objects: Dict[(str, object)] = {}
typename = type(range_instance).__name__
return io.IOData(typename, attributes, ndarrays, objects) |
def add_pipeline_model_mapping(test_class, overwrite=False):
if (getattr(test_class, 'pipeline_model_mapping', None) is not None):
if (not overwrite):
return ('', (- 1))
line_to_add = get_pipeline_model_mapping_string(test_class)
if (len(line_to_add) == 0):
return ('', (- 1))
line_to_add = (line_to_add + '\n')
(class_lines, class_start_line_no) = inspect.getsourcelines(test_class)
for (idx, line) in enumerate(class_lines):
if line.lstrip().startswith('class '):
class_lines = class_lines[idx:]
class_start_line_no += idx
break
class_end_line_no = ((class_start_line_no + len(class_lines)) - 1)
start_idx = None
indent_level = 0
def_line = None
for (idx, line) in enumerate(class_lines):
if line.strip().startswith('all_model_classes = '):
indent_level = (len(line) - len(line.lstrip()))
start_idx = idx
elif line.strip().startswith('all_generative_model_classes = '):
indent_level = (len(line) - len(line.lstrip()))
start_idx = idx
elif line.strip().startswith('pipeline_model_mapping = '):
indent_level = (len(line) - len(line.lstrip()))
start_idx = idx
def_line = line
break
if (start_idx is None):
return ('', (- 1))
end_idx = find_block_ending(class_lines, start_idx, indent_level)
r = re.compile('\\s(is_\\S+?_available\\(\\))\\s')
for line in class_lines[start_idx:(end_idx + 1)]:
backend_condition = r.search(line)
if (backend_condition is not None):
target = ((' ' + backend_condition[0][1:(- 1)]) + ' ')
line_to_add = r.sub(target, line_to_add)
break
if (def_line is None):
target_idx = end_idx
else:
target_idx = (start_idx - 1)
for idx in range(start_idx, (end_idx + 1)):
class_lines[idx] = None
parent_classes = [x.__name__ for x in test_class.__bases__]
if ('PipelineTesterMixin' not in parent_classes):
_parent_classes = ([x for x in parent_classes if (x != 'TestCase')] + ['PipelineTesterMixin'])
if ('TestCase' in parent_classes):
_parent_classes.append('unittest.TestCase')
parent_classes = ', '.join(_parent_classes)
for (idx, line) in enumerate(class_lines):
if line.strip().endswith('):'):
for _idx in range((idx + 1)):
class_lines[_idx] = None
break
class_lines[0] = f'''class {test_class.__name__}({parent_classes}):
'''
line_to_add = ((' ' * indent_level) + line_to_add)
class_lines = ((class_lines[:(target_idx + 1)] + [line_to_add]) + class_lines[(target_idx + 1):])
class_lines = [x for x in class_lines if (x is not None)]
module_lines = inspect.getsourcelines(inspect.getmodule(test_class))[0]
module_lines = ((module_lines[:(class_start_line_no - 1)] + class_lines) + module_lines[class_end_line_no:])
code = ''.join(module_lines)
moddule_file = inspect.getsourcefile(test_class)
with open(moddule_file, 'w', encoding='UTF-8', newline='\n') as fp:
fp.write(code)
return line_to_add |
def concatenate(args, lines):
for line in lines:
infile = line.split()[0]
outfile = line.split()[1]
md5gt = line.split()[2]
out = subprocess.call(('cat %s/%s > %s/%s' % (args.save_path, infile, args.save_path, outfile)), shell=True)
md5ck = md5(('%s/%s' % (args.save_path, outfile)))
if (md5ck == md5gt):
print(('Checksum successful %s.' % outfile))
else:
raise ValueError(('Checksum failed %s.' % outfile))
out = subprocess.call(('rm %s/%s' % (args.save_path, infile)), shell=True) |
def LF_left_punct(span):
cspan = get_containing_span(span)
left = get_left_span(cspan, span.sentence, window=1)
if (left.text == '+'):
return NON_NEGATED
return ABSTAIN |
def main(argv=None):
parser = argparse.ArgumentParser(description='Takes one or more file paths and reports their detected encodings')
parser.add_argument('input', help='File whose encoding we would like to determine. (default: stdin)', type=argparse.FileType('rb'), nargs='*', default=[(sys.stdin if PY2 else sys.stdin.buffer)])
parser.add_argument('--version', action='version', version='%(prog)s {0}'.format(__version__))
args = parser.parse_args(argv)
for f in args.input:
if f.isatty():
print(((('You are running chardetect interactively. Press ' + 'CTRL-D twice at the start of a blank line to signal the ') + 'end of your input. If you want help, run chardetect ') + '--help\n'), file=sys.stderr)
print(description_of(f, f.name)) |
def _sympysage_ynm(self):
from sage.functions.special import spherical_harmonic
return spherical_harmonic(self.args[0]._sage_(), self.args[1]._sage_(), self.args[2]._sage_(), self.args[3]._sage_()) |
def get_plugin_v3(module_name, sources, headers=None, source_dir=None, **build_kwargs):
assert (verbosity in ['none', 'brief', 'full'])
if (headers is None):
headers = []
if (source_dir is not None):
sources = [os.path.join(source_dir, fname) for fname in sources]
headers = [os.path.join(source_dir, fname) for fname in headers]
if (module_name in _cached_plugins):
return _cached_plugins[module_name]
if (verbosity == 'full'):
print(f'Setting up PyTorch plugin "{module_name}"...')
elif (verbosity == 'brief'):
print(f'Setting up PyTorch plugin "{module_name}"... ', end='', flush=True)
verbose_build = (verbosity == 'full')
try:
if ((os.name == 'nt') and (os.system('where cl.exe >nul 2>nul') != 0)):
compiler_bindir = _find_compiler_bindir()
if (compiler_bindir is None):
raise RuntimeError(f'Could not find MSVC/GCC/CLANG installation on this computer. Check _find_compiler_bindir() in "{__file__}".')
os.environ['PATH'] += (';' + compiler_bindir)
os.environ['TORCH_CUDA_ARCH_LIST'] = ''
all_source_files = sorted((sources + headers))
all_source_dirs = set((os.path.dirname(fname) for fname in all_source_files))
if (len(all_source_dirs) == 1):
hash_md5 = hashlib.md5()
for src in all_source_files:
with open(src, 'rb') as f:
hash_md5.update(f.read())
source_digest = hash_md5.hexdigest()
build_top_dir = torch.utils.cpp_extension._get_build_directory(module_name, verbose=verbose_build)
cached_build_dir = os.path.join(build_top_dir, f'{source_digest}-{_get_mangled_gpu_name()}')
if (not os.path.isdir(cached_build_dir)):
tmpdir = f'{build_top_dir}/srctmp-{uuid.uuid4().hex}'
os.makedirs(tmpdir)
for src in all_source_files:
shutil.copyfile(src, os.path.join(tmpdir, os.path.basename(src)))
try:
os.replace(tmpdir, cached_build_dir)
except OSError:
shutil.rmtree(tmpdir)
if (not os.path.isdir(cached_build_dir)):
raise
cached_sources = [os.path.join(cached_build_dir, os.path.basename(fname)) for fname in sources]
torch.utils.cpp_extension.load(name=module_name, build_directory=cached_build_dir, verbose=verbose_build, sources=cached_sources, **build_kwargs)
else:
torch.utils.cpp_extension.load(name=module_name, verbose=verbose_build, sources=sources, **build_kwargs)
module = importlib.import_module(module_name)
except:
if (verbosity == 'brief'):
print('Failed!')
raise
if (verbosity == 'full'):
print(f'Done setting up PyTorch plugin "{module_name}".')
elif (verbosity == 'brief'):
print('Done.')
_cached_plugins[module_name] = module
return module |
def get_que_token(task, specific=False):
if specific:
return f'[que_{task}]'
else:
return '[que]' |
class DistanceMetric(Metric):
def evaluate_generation(self, adapter_spec: AdapterSpec, request_state: RequestState, metric_service: MetricService, eval_cache_path: str) -> List[Stat]:
references = request_state.instance.references
(_, rel_str, relation_type) = map((lambda _: _.output.text), references)
input_text: str = request_state.instance.input.text
datapoint_input = input_text.split('\n')[(- 1)]
val = list(map(int, datapoint_input.split(NumeracyScenario.delimiter)))
distance_func = globals()[f'distance_{relation_type}']
result = 0.0
num_valid = 0
assert (request_state.result is not None)
request_result: RequestResult = request_state.result
for completion_sequence in request_result.completions:
completion = completion_sequence.text.strip()
try:
pred = int(completion.replace(',', ''))
except Exception:
continue
point = (val + [pred])
result += distance_func(point, rel_str)
num_valid += 1
percent_valid = ((1.0 * num_valid) / len(request_result.completions))
return [Stat(MetricName('distance')).add(result), Stat(MetricName('percent_valid')).add(percent_valid)] |
class TestReporter(Reporter):
__test__ = False
def __init__(self, test_case):
super(TestReporter, self).__init__()
self._test_case = test_case
def run_failed(self, _run_id, _cmdline, _return_code, _output):
self._test_case.fail()
def run_completed(self, run_id, statistics, cmdline):
self._test_case.run_completed(run_id)
def job_completed(self, run_ids):
pass
def set_total_number_of_runs(self, num_runs):
pass
def start_run(self, run_id):
self._test_case.start_run(run_id) |
def tf_mobilenetv3_small_minimal_100(pretrained=False, **kwargs):
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_mobilenet_v3('tf_mobilenetv3_small_minimal_100', 1.0, pretrained=pretrained, **kwargs)
return model |
def build_sam_vit_b(checkpoint=None):
return _build_sam(encoder_embed_dim=768, encoder_depth=12, encoder_num_heads=12, encoder_global_attn_indexes=[2, 5, 8, 11], checkpoint=checkpoint) |
def eval(model, criterion, data, vocab_size):
total_loss = 0
total_words = 0
total_num_correct = 0
model.eval()
for i in range(len(data)):
batch = data[i]
with torch.no_grad():
outputs = model(batch)
targets = batch[(- 1)]
(loss, _, num_correct) = memoryEfficientLoss(outputs, targets, model, criterion, eval=True)
total_loss += loss
total_num_correct += num_correct
total_words += targets.size(1)
target += targets[0].data.tolist()
predictions += pred.data.tolist()
model.train()
return ((total_loss / total_words), (total_num_correct / total_words)) |
class SEmodule(torch.nn.Module):
def __init__(self, input_shape, inner_dim, activation=torch.nn.Sigmoid, norm=BatchNorm1d):
super().__init__()
self.inner_dim = inner_dim
self.norm = norm
self.activation = activation
(bz, t, chn) = input_shape
self.conv = Sequential(input_shape=input_shape)
self.conv.append(DepthwiseSeparableConv1d, out_channels=chn, kernel_size=1, stride=1)
self.conv.append(self.norm)
self.conv.append(self.activation())
self.avg_pool = AdaptivePool(1)
self.bottleneck = Sequential(Linear(input_size=input_shape[(- 1)], n_neurons=self.inner_dim), self.activation(), Linear(input_size=self.inner_dim, n_neurons=chn), self.activation())
def forward(self, x):
(bz, t, chn) = x.shape
x = self.conv(x)
avg = self.avg_pool(x)
avg = self.bottleneck(avg)
context = avg.repeat(1, t, 1)
return (x * context) |
class SuffixPerturbation(TextPerturbation):
(frozen=True)
class Description(PerturbationDescription):
suffix: str = ''
name: str = 'style'
def __init__(self, suffix: str):
self._suffix: str = suffix
def description(self) -> PerturbationDescription:
return SuffixPerturbation.Description(name=self.name, suffix=self._suffix)
def perturb(self, text: str, rng: Random) -> str:
return f'{text}, {self._suffix}' |
def DM_273_17_1():
M = orthogonal_array(17, 17)
M = [R for R in M if any(((x != R[0]) for x in R))]
B = (1, 2, 4, 8, 16, 32, 64, 91, 117, 128, 137, 182, 195, 205, 234, 239, 256)
M = [[B[x] for x in R] for R in M]
M.append(([0] * 17))
from sage.rings.finite_rings.integer_mod_ring import IntegerModRing as AdditiveCyclic
G = AdditiveCyclic(273)
return (G, M) |
.parametrize('implementation, dtype, size, shape, overwrite, getri', [pytest.param('MKL', np.float32, 4, [[4, 4], [4, 4], [0, 0], [0, 0], [0, 1], [0, 1]], False, True, marks=pytest.mark.mkl), pytest.param('MKL', np.float64, 4, [[4, 4], [4, 4], [0, 0], [0, 0], [0, 1], [0, 1]], False, True, marks=pytest.mark.mkl), pytest.param('MKL', np.float32, 4, [[5, 5, 5], [5, 5, 5], [1, 3, 0], [2, 0, 1], [0, 2], [1, 2]], False, True, marks=pytest.mark.mkl), pytest.param('MKL', np.float64, 4, [[5, 5, 5], [5, 5, 5], [1, 3, 0], [2, 0, 1], [0, 2], [1, 2]], False, True, marks=pytest.mark.mkl), pytest.param('MKL', np.float32, 4, [[5, 5, 5], [5, 5, 5], [1, 3, 0], [2, 0, 1], [0, 2], [1, 2]], True, True, marks=pytest.mark.mkl), pytest.param('MKL', np.float64, 4, [[5, 5, 5], [5, 5, 5], [1, 3, 0], [2, 0, 1], [0, 2], [1, 2]], True, True, marks=pytest.mark.mkl), pytest.param('MKL', np.float32, 4, [[4, 4], [4, 4], [0, 0], [0, 0], [0, 1], [0, 1]], False, False, marks=pytest.mark.mkl), pytest.param('MKL', np.float64, 4, [[4, 4], [4, 4], [0, 0], [0, 0], [0, 1], [0, 1]], False, False, marks=pytest.mark.mkl), pytest.param('MKL', np.float32, 4, [[5, 5, 5], [5, 5, 5], [1, 3, 0], [2, 0, 1], [0, 2], [1, 2]], False, False, marks=pytest.mark.mkl), pytest.param('MKL', np.float64, 4, [[5, 5, 5], [5, 5, 5], [1, 3, 0], [2, 0, 1], [0, 2], [1, 2]], False, False, marks=pytest.mark.mkl), pytest.param('MKL', np.float32, 4, [[5, 5, 5], [5, 5, 5], [1, 3, 0], [2, 0, 1], [0, 2], [1, 2]], True, False, marks=pytest.mark.mkl), pytest.param('MKL', np.float64, 4, [[5, 5, 5], [5, 5, 5], [1, 3, 0], [2, 0, 1], [0, 2], [1, 2]], True, False, marks=pytest.mark.mkl), pytest.param('OpenBLAS', np.float32, 4, [[4, 4], [4, 4], [0, 0], [0, 0], [0, 1], [0, 1]], False, True, marks=pytest.mark.lapack), pytest.param('OpenBLAS', np.float64, 4, [[4, 4], [4, 4], [0, 0], [0, 0], [0, 1], [0, 1]], False, True, marks=pytest.mark.lapack), pytest.param('OpenBLAS', np.float32, 4, [[5, 5, 5], [5, 5, 5], [1, 3, 0], [2, 0, 1], [0, 2], [1, 2]], False, True, marks=pytest.mark.lapack), pytest.param('OpenBLAS', np.float64, 4, [[5, 5, 5], [5, 5, 5], [1, 3, 0], [2, 0, 1], [0, 2], [1, 2]], False, True, marks=pytest.mark.lapack), pytest.param('OpenBLAS', np.float32, 4, [[5, 5, 5], [5, 5, 5], [1, 3, 0], [2, 0, 1], [0, 2], [1, 2]], True, True, marks=pytest.mark.lapack), pytest.param('OpenBLAS', np.float64, 4, [[5, 5, 5], [5, 5, 5], [1, 3, 0], [2, 0, 1], [0, 2], [1, 2]], True, True, marks=pytest.mark.lapack), pytest.param('OpenBLAS', np.float32, 4, [[4, 4], [4, 4], [0, 0], [0, 0], [0, 1], [0, 1]], False, False, marks=pytest.mark.lapack), pytest.param('OpenBLAS', np.float64, 4, [[4, 4], [4, 4], [0, 0], [0, 0], [0, 1], [0, 1]], False, False, marks=pytest.mark.lapack), pytest.param('OpenBLAS', np.float32, 4, [[5, 5, 5], [5, 5, 5], [1, 3, 0], [2, 0, 1], [0, 2], [1, 2]], False, False, marks=pytest.mark.lapack), pytest.param('OpenBLAS', np.float64, 4, [[5, 5, 5], [5, 5, 5], [1, 3, 0], [2, 0, 1], [0, 2], [1, 2]], False, False, marks=pytest.mark.lapack), pytest.param('OpenBLAS', np.float32, 4, [[5, 5, 5], [5, 5, 5], [1, 3, 0], [2, 0, 1], [0, 2], [1, 2]], True, False, marks=pytest.mark.lapack), pytest.param('OpenBLAS', np.float64, 4, [[5, 5, 5], [5, 5, 5], [1, 3, 0], [2, 0, 1], [0, 2], [1, 2]], True, False, marks=pytest.mark.lapack), pytest.param('cuSolverDn', np.float32, 4, [[4, 4], [4, 4], [0, 0], [0, 0], [0, 1], [0, 1]], False, False, marks=pytest.mark.gpu), pytest.param('cuSolverDn', np.float64, 4, [[4, 4], [4, 4], [0, 0], [0, 0], [0, 1], [0, 1]], False, False, marks=pytest.mark.gpu), pytest.param('cuSolverDn', np.float32, 4, [[5, 5, 5], [5, 5, 5], [1, 3, 0], [2, 0, 1], [0, 2], [1, 2]], False, False, marks=pytest.mark.gpu), pytest.param('cuSolverDn', np.float64, 4, [[5, 5, 5], [5, 5, 5], [1, 3, 0], [2, 0, 1], [0, 2], [1, 2]], False, False, marks=pytest.mark.gpu), pytest.param('cuSolverDn', np.float32, 4, [[5, 5, 5], [5, 5, 5], [1, 3, 0], [2, 0, 1], [0, 2], [1, 2]], True, False, marks=pytest.mark.gpu), pytest.param('cuSolverDn', np.float64, 4, [[5, 5, 5], [5, 5, 5], [1, 3, 0], [2, 0, 1], [0, 2], [1, 2]], True, False, marks=pytest.mark.gpu)])
def test_inv(implementation, dtype, size, shape, overwrite, getri):
global id
id += 1
in_shape = shape[0]
out_shape = shape[1]
in_offset = shape[2]
out_offset = shape[3]
in_dims = shape[4]
out_dims = shape[5]
assert np.all((np.array(in_shape)[in_dims] >= size))
assert np.all((np.array(out_shape)[out_dims] >= size))
assert np.all((np.array(in_offset) < size))
assert np.all((np.array(out_offset) < size))
assert np.all(((np.array(in_offset)[in_dims] + size) <= np.array(in_shape)[in_dims]))
assert np.all(((np.array(out_offset)[out_dims] + size) <= np.array(out_shape)[out_dims]))
in_subset = tuple([(slice(o, (o + size)) if (i in in_dims) else o) for (i, o) in enumerate(in_offset)])
if overwrite:
out_subset = in_subset
else:
out_subset = tuple([(slice(o, (o + size)) if (i in out_dims) else o) for (i, o) in enumerate(out_offset)])
in_subset_str = ','.join([('{b}:{e}'.format(b=o, e=(o + size)) if (i in in_dims) else str(o)) for (i, o) in enumerate(in_offset)])
if overwrite:
out_subset_str = in_subset_str
else:
out_subset_str = ','.join([('{b}:{e}'.format(b=o, e=(o + size)) if (i in out_dims) else str(o)) for (i, o) in enumerate(out_offset)])
sdfg = make_sdfg(implementation, dtype, id, in_shape, out_shape, in_subset_str, out_subset_str, overwrite, getri)
if (implementation == 'cuSolverDn'):
sdfg.apply_gpu_transformations()
sdfg.simplify()
try:
inv_sdfg = sdfg.compile()
except (CompilerConfigurationError, CompilationError):
warnings.warn('Configuration/compilation failed, library missing or misconfigured, skipping test for {}.'.format(implementation))
return
A0 = np.zeros(in_shape, dtype=dtype)
A0[in_subset] = generate_matrix(size, dtype)
A1 = np.copy(A0)
if overwrite:
A2 = A1
else:
A2 = np.zeros(out_shape, dtype=dtype)
A3 = np.linalg.inv(A0[in_subset])
inv_sdfg(xin=A1, xout=A2, n=size)
if (dtype == np.float32):
rtol = 1e-07
atol = 1e-07
elif (dtype == np.float64):
rtol = 1e-14
atol = 1e-14
else:
raise NotImplementedError
assert np.allclose(A2[out_subset], A3, rtol=rtol, atol=atol)
if overwrite:
assert (not np.array_equal(A0, A1)) |
class TokenGroup(object):
def __init__(self, tu, memory, count):
self._tu = tu
self._memory = memory
self._count = count
def __del__(self):
conf.lib.clang_disposeTokens(self._tu, self._memory, self._count)
def get_tokens(tu, extent):
tokens_memory = POINTER(Token)()
tokens_count = c_uint()
conf.lib.clang_tokenize(tu, extent, byref(tokens_memory), byref(tokens_count))
count = int(tokens_count.value)
if (count < 1):
return
tokens_array = cast(tokens_memory, POINTER((Token * count))).contents
token_group = TokenGroup(tu, tokens_memory, tokens_count)
for i in range(0, count):
token = Token()
token.int_data = tokens_array[i].int_data
token.ptr_data = tokens_array[i].ptr_data
token._tu = tu
token._group = token_group
(yield token) |
class BaseTextDetTargets():
def __init__(self):
pass
def point2line(self, xs, ys, point_1, point_2):
a_square = (np.square((xs - point_1[0])) + np.square((ys - point_1[1])))
b_square = (np.square((xs - point_2[0])) + np.square((ys - point_2[1])))
c_square = (np.square((point_1[0] - point_2[0])) + np.square((point_1[1] - point_2[1])))
neg_cos_c = (((c_square - a_square) - b_square) / (np.finfo(np.float32).eps + (2 * np.sqrt((a_square * b_square)))))
square_sin = (1 - np.square(neg_cos_c))
square_sin = np.nan_to_num(square_sin)
result = np.sqrt((((a_square * b_square) * square_sin) / (np.finfo(np.float32).eps + c_square)))
result[(neg_cos_c < 0)] = np.sqrt(np.fmin(a_square, b_square))[(neg_cos_c < 0)]
return result
def polygon_area(self, polygon):
polygon = polygon.reshape((- 1), 2)
edge = 0
for i in range(polygon.shape[0]):
next_index = ((i + 1) % polygon.shape[0])
edge += ((polygon[(next_index, 0)] - polygon[(i, 0)]) * (polygon[(next_index, 1)] + polygon[(i, 1)]))
return (edge / 2.0)
def polygon_size(self, polygon):
poly = polygon.reshape((- 1), 2)
rect = cv2.minAreaRect(poly.astype(np.int32))
size = rect[1]
return size
def generate_kernels(self, img_size, text_polys, shrink_ratio, max_shrink=sys.maxsize, ignore_tags=None):
assert isinstance(img_size, tuple)
assert check_argument.is_2dlist(text_polys)
assert isinstance(shrink_ratio, float)
(h, w) = img_size
text_kernel = np.zeros((h, w), dtype=np.float32)
for (text_ind, poly) in enumerate(text_polys):
instance = poly[0].reshape((- 1), 2).astype(np.int32)
area = plg(instance).area
peri = cv2.arcLength(instance, True)
distance = min(int((((area * (1 - (shrink_ratio * shrink_ratio))) / (peri + 0.001)) + 0.5)), max_shrink)
pco = pyclipper.PyclipperOffset()
pco.AddPath(instance, pyclipper.JT_ROUND, pyclipper.ET_CLOSEDPOLYGON)
shrunk = np.array(pco.Execute((- distance)))
if ((len(shrunk) == 0) or (shrunk.size == 0)):
if (ignore_tags is not None):
ignore_tags[text_ind] = True
continue
try:
shrunk = np.array(shrunk[0]).reshape((- 1), 2)
except Exception as e:
print_log(f'{shrunk} with error {e}')
if (ignore_tags is not None):
ignore_tags[text_ind] = True
continue
cv2.fillPoly(text_kernel, [shrunk.astype(np.int32)], (text_ind + 1))
return (text_kernel, ignore_tags)
def generate_effective_mask(self, mask_size: tuple, polygons_ignore):
assert check_argument.is_2dlist(polygons_ignore)
mask = np.ones(mask_size, dtype=np.uint8)
for poly in polygons_ignore:
instance = poly[0].reshape((- 1), 2).astype(np.int32).reshape(1, (- 1), 2)
cv2.fillPoly(mask, instance, 0)
return mask
def generate_targets(self, results):
raise NotImplementedError
def __call__(self, results):
results = self.generate_targets(results)
return results |
_SEG_HEADS_REGISTRY.register()
class DeepLabV3Head(nn.Module):
def __init__(self, cfg, input_shape: Dict[(str, ShapeSpec)]):
super().__init__()
self.in_features = cfg.MODEL.SEM_SEG_HEAD.IN_FEATURES
in_channels = [input_shape[f].channels for f in self.in_features]
aspp_channels = cfg.MODEL.SEM_SEG_HEAD.ASPP_CHANNELS
aspp_dilations = cfg.MODEL.SEM_SEG_HEAD.ASPP_DILATIONS
self.ignore_value = cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE
num_classes = cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES
conv_dims = cfg.MODEL.SEM_SEG_HEAD.CONVS_DIM
self.common_stride = cfg.MODEL.SEM_SEG_HEAD.COMMON_STRIDE
norm = cfg.MODEL.SEM_SEG_HEAD.NORM
self.loss_weight = cfg.MODEL.SEM_SEG_HEAD.LOSS_WEIGHT
self.loss_type = cfg.MODEL.SEM_SEG_HEAD.LOSS_TYPE
train_crop_size = cfg.INPUT.CROP.SIZE
aspp_dropout = cfg.MODEL.SEM_SEG_HEAD.ASPP_DROPOUT
use_depthwise_separable_conv = cfg.MODEL.SEM_SEG_HEAD.USE_DEPTHWISE_SEPARABLE_CONV
assert (len(self.in_features) == 1)
assert (len(in_channels) == 1)
if cfg.INPUT.CROP.ENABLED:
assert (cfg.INPUT.CROP.TYPE == 'absolute')
(train_crop_h, train_crop_w) = train_crop_size
if ((train_crop_h % self.common_stride) or (train_crop_w % self.common_stride)):
raise ValueError('Crop size need to be divisible by output stride.')
pool_h = (train_crop_h // self.common_stride)
pool_w = (train_crop_w // self.common_stride)
pool_kernel_size = (pool_h, pool_w)
else:
pool_kernel_size = None
self.aspp = ASPP(in_channels[0], aspp_channels, aspp_dilations, norm=norm, activation=F.relu, pool_kernel_size=pool_kernel_size, dropout=aspp_dropout, use_depthwise_separable_conv=use_depthwise_separable_conv)
self.predictor = Conv2d(conv_dims, num_classes, kernel_size=1, stride=1, padding=0)
nn.init.normal_(self.predictor.weight, 0, 0.001)
nn.init.constant_(self.predictor.bias, 0)
if (self.loss_type == 'cross_entropy'):
self.loss = nn.CrossEntropyLoss(reduction='mean', ignore_index=self.ignore_value)
elif (self.loss_type == 'hard_pixel_mining'):
self.loss = DeepLabCE(ignore_label=self.ignore_value, top_k_percent_pixels=0.2)
else:
raise ValueError(('Unexpected loss type: %s' % self.loss_type))
def forward(self, features, targets=None):
x = features[self.in_features[0]]
x = self.aspp(x)
x = self.predictor(x)
if self.training:
return (None, self.losses(x, targets))
else:
x = F.interpolate(x, scale_factor=self.common_stride, mode='bilinear', align_corners=False)
return (x, {})
def losses(self, predictions, targets):
predictions = F.interpolate(predictions, scale_factor=self.common_stride, mode='bilinear', align_corners=False)
loss = self.loss(predictions, targets)
losses = {'loss_sem_seg': (loss * self.loss_weight)}
return losses |
class LayerDecayValueAssigner(object):
def __init__(self, values):
self.values = values
def get_scale(self, layer_id):
return self.values[layer_id]
def get_layer_id(self, var_name):
return get_num_layer_for_convnext(var_name) |
(reuse_venv=True)
def coverage(session):
session.install('--upgrade', 'pip')
session.install('--upgrade', 'coverage[toml]')
session.run('coverage', 'report')
session.run('coverage', 'xml')
htmlcov_path = (DIR / 'htmlcov')
if htmlcov_path.exists():
session.log(f'rm -r {htmlcov_path}')
shutil.rmtree(htmlcov_path)
session.run('coverage', 'html') |
def stochastic_centers_matching(graph: Graph, node_weight_function: NodeWeightFunction, edge_weight_function: EdgeWeightFunction, L, P, uf: UnionFind, verbose=False, record_history=False, special_blocks=None, sb_names=None):
print('stochastic_centers_matching')
prev_graph = Graph.from_other(graph)
uf2 = UnionFind(elements=graph._nodes.keys())
all_nodes = {n for n in graph.non_input_nodes}
if (special_blocks is None):
special_blocks = ()
bb = special_blocks
sb_names = [c.__name__ for c in bb]
found_nodes = {b: list() for b in sb_names}
total_found = 0
for n in graph.non_input_nodes:
for b in sb_names:
if ((b in n.scope) or (n.scope_to_hold_to and (b in n.scope_to_hold_to))):
found_nodes[b].append(n)
total_found += 1
print(f'-I- Found {total_found} special blocks')
pprint(found_nodes)
if (total_found < L):
warnings.warn(f'There are only {total_found} special blocks, but need to find {L} centers')
warnings.warn('Finding {L-total_found} more random centers, all found special block centers will be centers')
print('-I- assigning centers from special blocks')
lengths = {b: math.floor((L * (len(nodes) / total_found))) for (b, nodes) in found_nodes.items()}
total_basic_block_centers = sum(lengths.values())
print(f'-I- total_basic_block_centers: {total_basic_block_centers}')
print(f'-I- centers to assign in each basic block: {lengths}')
hd = deque()
centers = set()
to_assign = L
sorted_iter = sorted(list(found_nodes.items()), key=(lambda x: len(x[1])))
for (b_name, nodes) in sorted_iter:
print(f'-I- Assigning centers in block {b_name}')
L_tag = len(nodes)
L_prop_int = lengths[b_name]
jump = math.ceil((L_tag / L_prop_int))
if (jump <= 0):
continue
for i in range(0, L_tag, jump):
center = nodes[i]
hd.append(center)
centers.add(center)
to_assign -= 1
if (to_assign == 0):
break
if (to_assign == 0):
break
print(f'-I- Assigned total of {len(centers)} centers:')
pprint(centers)
if (to_assign > 0):
print(f'-I- Now, choosing {to_assign} more random centers')
additional_centers = random.sample((all_nodes - centers), to_assign)
for x in additional_centers:
centers.add(x)
hd.append(x)
to_assign -= len(additional_centers)
assert (to_assign == 0)
print('-I- final centers:')
print(hd)
def inner_loop():
for i in range(len(hd)):
u = hd.popleft()
for v in sorted(u.out_edges, key=(lambda n: n.topo_sort_id)):
if (v in centers):
continue
if check_cycle2(graph, u, v):
continue
graph.merge(uid=u.id, vid=v.id, edge_weight_function=edge_weight_function, uf=uf)
uf.union(u.id, v.id)
uf2.union(u.id, v.id)
all_nodes.discard(v)
hd.append(u)
return True
hd.append(u)
return False
history_sizes = []
history_weights = []
while (len(all_nodes) > L):
merged_something = inner_loop()
if (not merged_something):
break
if record_history:
history_sizes.append((len(all_nodes) + 1))
if verbose:
print(f'Nodes: {len(all_nodes)} Centers: {len(hd)}')
if (len(all_nodes) > L):
print(f'Merged until {len(all_nodes)} Merging more, until {L} left')
def inner_loop():
for i in range(len(hd)):
v = hd.popleft()
v: Node
for u in sorted(v.in_edges, key=(lambda n: (- n.topo_sort_id))):
if (u not in all_nodes):
continue
if (u in centers):
continue
if check_cycle2(graph, u, v):
continue
graph.merge(uid=u.id, vid=v.id, edge_weight_function=edge_weight_function, uf=uf)
uf.union(u.id, v.id)
uf2.union(u.id, v.id)
all_nodes.discard(v)
centers.discard(v)
centers.add(u)
hd.append(u)
return True
hd.append(v)
return False
while (len(all_nodes) > L):
merged_something = inner_loop()
if (not merged_something):
break
if record_history:
history_sizes.append((len(all_nodes) + 1))
if verbose:
print(f'Nodes: {len(all_nodes)} Centers: {len(hd)}')
matching = None
return (prev_graph, matching, graph, uf, uf2) |
def test_win_check():
board = jnp.int32([(- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1)])
turn = jnp.int32(1)
assert (not _win_check(board, turn))
board = jnp.int32([1, (- 1), (- 1), (- 1), 1, (- 1), 0, (- 1), 0])
turn = jnp.int32(1)
assert (not _win_check(board, turn))
board = jnp.int32([1, (- 1), (- 1), (- 1), 1, (- 1), (- 1), (- 1), 1])
turn = jnp.int32(1)
assert _win_check(board, turn)
board = jnp.int32([(- 1), (- 1), 1, (- 1), 1, (- 1), 1, (- 1), (- 1)])
turn = jnp.int32(1)
assert _win_check(board, turn)
board = jnp.int32([1, 1, 1, (- 1), (- 1), (- 1), (- 1), (- 1), (- 1)])
turn = jnp.int32(1)
assert _win_check(board, turn)
board = jnp.int32([(- 1), (- 1), (- 1), 1, 1, 1, (- 1), (- 1), (- 1)])
turn = jnp.int32(1)
assert _win_check(board, turn)
board = jnp.int32([(- 1), (- 1), (- 1), (- 1), (- 1), (- 1), 1, 1, 1])
turn = jnp.int32(1)
assert _win_check(board, turn)
board = jnp.int32([1, (- 1), (- 1), 1, (- 1), (- 1), 1, (- 1), (- 1)])
turn = jnp.int32(1)
assert _win_check(board, turn)
board = jnp.int32([(- 1), 1, (- 1), (- 1), 1, (- 1), (- 1), 1, (- 1)])
turn = jnp.int32(1)
assert _win_check(board, turn)
board = jnp.int32([(- 1), (- 1), 1, (- 1), (- 1), 1, (- 1), (- 1), 1])
turn = jnp.int32(1)
assert _win_check(board, turn)
board = jnp.int32([(- 1), 0, (- 1), (- 1), 0, (- 1), (- 1), 0, (- 1)])
turn = jnp.int32(0)
assert _win_check(board, turn) |
class CdfNormalizationCallback(Callback):
def __init__(self) -> None:
self.image_dist: (LogNormal | None) = None
self.pixel_dist: (LogNormal | None) = None
def setup(self, trainer: pl.Trainer, pl_module: AnomalyModule, stage: (str | None)=None) -> None:
del trainer, stage
if (not hasattr(pl_module, 'normalization_metrics')):
pl_module.normalization_metrics = AnomalyScoreDistribution().cpu()
elif (not isinstance(pl_module.normalization_metrics, AnomalyScoreDistribution)):
raise AttributeError(f'Expected normalization_metrics to be of type AnomalyScoreDistribution, got {type(pl_module.normalization_metrics)}')
def on_test_start(self, trainer: pl.Trainer, pl_module: AnomalyModule) -> None:
del trainer
if (pl_module.image_metrics is not None):
pl_module.image_metrics.set_threshold(0.5)
if (pl_module.pixel_metrics is not None):
pl_module.pixel_metrics.set_threshold(0.5)
def on_validation_epoch_start(self, trainer: pl.Trainer, pl_module: AnomalyModule) -> None:
logger.info('Collecting the statistics of the normal training data to normalize the scores.')
self._collect_stats(trainer, pl_module)
def on_validation_batch_end(self, trainer: pl.Trainer, pl_module: AnomalyModule, outputs: (STEP_OUTPUT | None), batch: Any, batch_idx: int, dataloader_idx: int) -> None:
del trainer, batch, batch_idx, dataloader_idx
self._standardize_batch(outputs, pl_module)
def on_test_batch_end(self, trainer: pl.Trainer, pl_module: AnomalyModule, outputs: (STEP_OUTPUT | None), batch: Any, batch_idx: int, dataloader_idx: int) -> None:
del trainer, batch, batch_idx, dataloader_idx
self._standardize_batch(outputs, pl_module)
self._normalize_batch(outputs, pl_module)
def on_predict_batch_end(self, trainer: pl.Trainer, pl_module: AnomalyModule, outputs: dict, batch: Any, batch_idx: int, dataloader_idx: int) -> None:
del trainer, batch, batch_idx, dataloader_idx
self._standardize_batch(outputs, pl_module)
self._normalize_batch(outputs, pl_module)
outputs['pred_labels'] = (outputs['pred_scores'] >= 0.5)
def _collect_stats(self, trainer: pl.Trainer, pl_module: AnomalyModule) -> None:
predictions = Trainer(accelerator=trainer.accelerator, devices=trainer.num_devices).predict(model=self._create_inference_model(pl_module), dataloaders=trainer.datamodule.train_dataloader())
pl_module.normalization_metrics.reset()
for batch in predictions:
if ('pred_scores' in batch.keys()):
pl_module.normalization_metrics.update(anomaly_scores=batch['pred_scores'])
if ('anomaly_maps' in batch.keys()):
pl_module.normalization_metrics.update(anomaly_maps=batch['anomaly_maps'])
pl_module.normalization_metrics.compute()
def _create_inference_model(pl_module):
new_model = get_model(pl_module.hparams)
new_model.normalization_metrics = AnomalyScoreDistribution().cpu()
new_model.load_state_dict(pl_module.state_dict())
return new_model
def _standardize_batch(outputs: STEP_OUTPUT, pl_module) -> None:
stats = pl_module.normalization_metrics.to(outputs['pred_scores'].device)
outputs['pred_scores'] = standardize(outputs['pred_scores'], stats.image_mean, stats.image_std)
if ('anomaly_maps' in outputs.keys()):
outputs['anomaly_maps'] = standardize(outputs['anomaly_maps'], stats.pixel_mean, stats.pixel_std, center_at=stats.image_mean)
def _normalize_batch(outputs: STEP_OUTPUT, pl_module: AnomalyModule) -> None:
outputs['pred_scores'] = normalize(outputs['pred_scores'], pl_module.image_threshold.value)
if ('anomaly_maps' in outputs.keys()):
outputs['anomaly_maps'] = normalize(outputs['anomaly_maps'], pl_module.pixel_threshold.value)
outputs['anomaly_maps'] = normalize(outputs['anomaly_maps'], pl_module.pixel_threshold.value) |
class CALayer(nn.Module):
def __init__(self, channel, reduction=16):
super(CALayer, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.conv_du = nn.Sequential(nn.Conv2d(channel, (channel // reduction), 1, padding=0, bias=True), nn.ReLU(inplace=True), nn.Conv2d((channel // reduction), channel, 1, padding=0, bias=True), nn.Sigmoid())
def forward(self, x):
y = self.avg_pool(x)
y = self.conv_du(y)
return (x * y) |
_model_architecture('transformer_lm', 'transformer_lm_gbw')
_model_architecture('transformer_lm', 'transformer_lm_baevski_gbw')
def transformer_lm_baevski_gbw(args):
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512)
args.dropout = getattr(args, 'dropout', 0.1)
args.attention_dropout = getattr(args, 'attention_dropout', 0.1)
args.no_decoder_final_norm = getattr(args, 'no_decoder_final_norm', True)
transformer_lm_big(args) |
class Runtime():
def __init__(self):
pass
def aggregator(self):
raise NotImplementedError
def aggregator(self, aggregator: Aggregator):
raise NotImplementedError
def collaborators(self):
raise NotImplementedError
def collaborators(self, collaborators: List[Collaborator]):
raise NotImplementedError
def execute_task(self, flspec_obj: FLSpec, f: Callable, parent_func: Callable, instance_snapshot: List[FLSpec]=[], **kwargs):
raise NotImplementedError |
def etl_starr_omop_program() -> None:
parser = argparse.ArgumentParser(description='An extraction tool for STARR-OMOP v5 sources')
parser.add_argument('omop_source', type=str, help='Path of the folder to the omop source')
parser.add_argument('target_location', type=str, help='The place to store the extract')
parser.add_argument('temp_location', type=str, help='The place to store temporary files', default=None)
parser.add_argument('--num_threads', type=int, help='The number of threads to use', default=1)
args = parser.parse_args()
(soft, hard) = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (hard, hard))
args.target_location = os.path.abspath(args.target_location)
args.temp_location = os.path.abspath(args.temp_location)
if (not os.path.exists(args.target_location)):
os.mkdir(args.target_location)
if (not os.path.exists(args.temp_location)):
os.mkdir(args.temp_location)
logFormatter = logging.Formatter('%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s')
rootLogger = logging.getLogger()
fileHandler = logging.FileHandler(os.path.join(args.target_location, 'log'))
fileHandler.setFormatter(logFormatter)
rootLogger.addHandler(fileHandler)
consoleHandler = logging.StreamHandler()
consoleHandler.setFormatter(logFormatter)
rootLogger.addHandler(consoleHandler)
rootLogger.setLevel(logging.INFO)
rootLogger.info(f'Extracting from OMOP with arguments {args}')
try:
event_dir = os.path.join(args.temp_location, 'events')
raw_patients_dir = os.path.join(args.temp_location, 'patients_raw')
cleaned_patients_dir = os.path.join(args.temp_location, 'patients_cleaned')
if (not os.path.exists(event_dir)):
rootLogger.info('Converting to events')
stats_dict: Dict[(str, Dict[(str, int)])] = {}
event_collection = run_csv_extractors(args.omop_source, event_dir, get_omop_csv_extractors(), num_threads=args.num_threads, debug_folder=os.path.join(args.temp_location, 'lost_csv_rows'), stats_dict=stats_dict)
rootLogger.info(('Got converter statistics ' + str(stats_dict)))
with open(os.path.join(args.target_location, 'convert_stats.json'), 'w') as f:
json.dump(stats_dict, f)
else:
rootLogger.info('Already converted to events, skipping')
event_collection = EventCollection(event_dir)
if (not os.path.exists(raw_patients_dir)):
rootLogger.info('Converting to patients')
patient_collection = event_collection.to_patient_collection(raw_patients_dir, num_threads=args.num_threads)
else:
rootLogger.info('Already converted to patients, skipping')
patient_collection = PatientCollection(raw_patients_dir)
if (not os.path.exists(cleaned_patients_dir)):
concept_map = {}
with io.TextIOWrapper(zstandard.ZstdDecompressor().stream_reader(open(os.path.join(args.omop_source, 'concept_remap.csv.zst'), 'rb'))) as f:
for row in f:
(a, b) = [int(a) for a in row.split(',')]
concept_map[a] = b
stats_dict = {}
rootLogger.info('Appling transformations')
patient_collection = patient_collection.transform(cleaned_patients_dir, _get_stanford_transformations(concept_map), num_threads=args.num_threads, stats_dict=stats_dict)
rootLogger.info(('Got transform statistics ' + str(stats_dict)))
with open(os.path.join(args.target_location, 'transform_stats.json'), 'w') as f:
json.dump(stats_dict, f)
else:
rootLogger.info('Already applied transformations, skipping')
patient_collection = PatientCollection(cleaned_patients_dir)
if (not os.path.exists(os.path.join(args.target_location, 'meta'))):
rootLogger.info('Converting to extract')
print('Converting to extract', datetime.datetime.now())
patient_collection.to_patient_database(args.target_location, args.omop_source, num_threads=args.num_threads).close()
else:
rootLogger.info('Already converted to extract, skipping')
except Exception as e:
rootLogger.critical(e, exc_info=True)
raise e |
class SawyerDoorUnlockEnv(SawyerXYZEnv):
def __init__(self):
hand_low = ((- 0.5), 0.4, (- 0.15))
hand_high = (0.5, 1, 0.5)
obj_low = ((- 0.1), 0.8, 0.1)
obj_high = (0.1, 0.85, 0.1)
goal_low = ((- 0.1), 0.76, 0.1699)
goal_high = (0.2, 0.81, 0.1701)
super().__init__(self.model_name, hand_low=hand_low, hand_high=hand_high)
self.init_config = {'obj_init_pos': np.array([0, 0.85, 0.1]), 'hand_init_pos': np.array([0, 0.6, 0.2], dtype=np.float32)}
self.goal = np.array([0, 0.85, 0.1])
self.obj_init_pos = self.init_config['obj_init_pos']
self.hand_init_pos = self.init_config['hand_init_pos']
self._random_reset_space = Box(np.array(obj_low), np.array(obj_high))
self.goal_space = Box(np.array(goal_low), np.array(goal_high))
def model_name(self):
return full_v1_path_for('sawyer_xyz/sawyer_door_lock.xml')
_assert_task_is_set
def step(self, action):
ob = super().step(action)
(reward, reachDist, pullDist) = self.compute_reward(action, ob)
self.curr_path_length += 1
info = {'reachDist': reachDist, 'goalDist': pullDist, 'epRew': reward, 'pickRew': None, 'success': float((pullDist <= 0.05))}
return (ob, reward, False, info)
def _target_site_config(self):
return [('goal_unlock', self._target_pos), ('goal_lock', np.array([10.0, 10.0, 10.0]))]
def _get_pos_objects(self):
return self._get_site_pos('lockStartUnlock')
def _set_obj_xyz(self, pos):
qpos = self.data.qpos.flat.copy()
qvel = self.data.qvel.flat.copy()
qpos[9] = pos
qvel[9] = 0
self.set_state(qpos, qvel)
def reset_model(self):
self._reset_hand()
door_pos = self.init_config['obj_init_pos']
self.obj_init_pos = self.data.get_geom_xpos('lockGeom')
self._target_pos = (door_pos + np.array([0.1, (- 0.04), 0.07]))
if self.random_init:
goal_pos = self._get_state_rand_vec()
door_pos = goal_pos
self._target_pos = (goal_pos + np.array([0.1, (- 0.04), 0.07]))
self.sim.model.body_pos[self.model.body_name2id('door')] = door_pos
self.sim.model.body_pos[self.model.body_name2id('lock')] = door_pos
self._set_obj_xyz(1.5708)
self.obj_init_pos = self.data.get_geom_xpos('lockGeom')
self.maxPullDist = np.linalg.norm((self._target_pos - self.obj_init_pos))
return self._get_obs()
def _reset_hand(self):
super()._reset_hand(10)
(rightFinger, leftFinger) = (self._get_site_pos('rightEndEffector'), self._get_site_pos('leftEndEffector'))
self.init_fingerCOM = ((rightFinger + leftFinger) / 2)
self.reachCompleted = False
def compute_reward(self, actions, obs):
del actions
objPos = obs[3:6]
(rightFinger, leftFinger) = (self._get_site_pos('rightEndEffector'), self._get_site_pos('leftEndEffector'))
fingerCOM = ((rightFinger + leftFinger) / 2)
pullGoal = self._target_pos
pullDist = np.linalg.norm((objPos - pullGoal))
reachDist = np.linalg.norm((objPos - fingerCOM))
reachRew = (- reachDist)
self.reachCompleted = (reachDist < 0.05)
def pullReward():
c1 = 1000
c2 = 0.01
c3 = 0.001
if self.reachCompleted:
pullRew = ((1000 * (self.maxPullDist - pullDist)) + (c1 * (np.exp(((- (pullDist ** 2)) / c2)) + np.exp(((- (pullDist ** 2)) / c3)))))
pullRew = max(pullRew, 0)
return pullRew
else:
return 0
pullRew = pullReward()
reward = (reachRew + pullRew)
return [reward, reachDist, pullDist] |
class KitModel(nn.Module):
def __init__(self, weight_file):
super(KitModel, self).__init__()
global __weights_dict
__weights_dict = load_weights(weight_file)
self.conv_conv1 = self.__conv(2, name='conv_conv1', in_channels=3, out_channels=96, kernel_size=(7, 7), stride=(2, 2), groups=1, bias=True)
self.bn_conv1 = self.__batch_normalization(2, 'bn_conv1', num_features=96, eps=9.e-05, momentum=0.)
self.conv_conv2red = self.__conv(2, name='conv_conv2red', in_channels=96, out_channels=128, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.bn_conv2red = self.__batch_normalization(2, 'bn_conv2red', num_features=128, eps=9.e-05, momentum=0.)
self.conv_conv2 = self.__conv(2, name='conv_conv2', in_channels=128, out_channels=288, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True)
self.bn_conv2 = self.__batch_normalization(2, 'bn_conv2', num_features=288, eps=9.e-05, momentum=0.)
self.conv_3a_1x1 = self.__conv(2, name='conv_3a_1x1', in_channels=288, out_channels=96, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.conv_3a_3x3_reduce = self.__conv(2, name='conv_3a_3x3_reduce', in_channels=288, out_channels=96, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.conv_3a_double_3x3_reduce = self.__conv(2, name='conv_3a_double_3x3_reduce', in_channels=288, out_channels=96, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.bn_3a_1x1 = self.__batch_normalization(2, 'bn_3a_1x1', num_features=96, eps=9.e-05, momentum=0.)
self.bn_3a_3x3_reduce = self.__batch_normalization(2, 'bn_3a_3x3_reduce', num_features=96, eps=9.e-05, momentum=0.)
self.bn_3a_double_3x3_reduce = self.__batch_normalization(2, 'bn_3a_double_3x3_reduce', num_features=96, eps=9.e-05, momentum=0.)
self.conv_3a_proj = self.__conv(2, name='conv_3a_proj', in_channels=288, out_channels=48, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.bn_3a_proj = self.__batch_normalization(2, 'bn_3a_proj', num_features=48, eps=9.e-05, momentum=0.)
self.conv_3a_3x3 = self.__conv(2, name='conv_3a_3x3', in_channels=96, out_channels=96, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True)
self.conv_3a_double_3x3_0 = self.__conv(2, name='conv_3a_double_3x3_0', in_channels=96, out_channels=144, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True)
self.bn_3a_3x3 = self.__batch_normalization(2, 'bn_3a_3x3', num_features=96, eps=9.e-05, momentum=0.)
self.bn_3a_double_3x3_0 = self.__batch_normalization(2, 'bn_3a_double_3x3_0', num_features=144, eps=9.e-05, momentum=0.)
self.conv_3a_double_3x3_1 = self.__conv(2, name='conv_3a_double_3x3_1', in_channels=144, out_channels=144, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True)
self.bn_3a_double_3x3_1 = self.__batch_normalization(2, 'bn_3a_double_3x3_1', num_features=144, eps=9.e-05, momentum=0.)
self.conv_3b_1x1 = self.__conv(2, name='conv_3b_1x1', in_channels=384, out_channels=96, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.conv_3b_3x3_reduce = self.__conv(2, name='conv_3b_3x3_reduce', in_channels=384, out_channels=96, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.conv_3b_double_3x3_reduce = self.__conv(2, name='conv_3b_double_3x3_reduce', in_channels=384, out_channels=96, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.bn_3b_1x1 = self.__batch_normalization(2, 'bn_3b_1x1', num_features=96, eps=9.e-05, momentum=0.)
self.bn_3b_3x3_reduce = self.__batch_normalization(2, 'bn_3b_3x3_reduce', num_features=96, eps=9.e-05, momentum=0.)
self.bn_3b_double_3x3_reduce = self.__batch_normalization(2, 'bn_3b_double_3x3_reduce', num_features=96, eps=9.e-05, momentum=0.)
self.conv_3b_proj = self.__conv(2, name='conv_3b_proj', in_channels=384, out_channels=96, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.bn_3b_proj = self.__batch_normalization(2, 'bn_3b_proj', num_features=96, eps=9.e-05, momentum=0.)
self.conv_3b_3x3 = self.__conv(2, name='conv_3b_3x3', in_channels=96, out_channels=144, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True)
self.conv_3b_double_3x3_0 = self.__conv(2, name='conv_3b_double_3x3_0', in_channels=96, out_channels=144, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True)
self.bn_3b_3x3 = self.__batch_normalization(2, 'bn_3b_3x3', num_features=144, eps=9.e-05, momentum=0.)
self.bn_3b_double_3x3_0 = self.__batch_normalization(2, 'bn_3b_double_3x3_0', num_features=144, eps=9.e-05, momentum=0.)
self.conv_3b_double_3x3_1 = self.__conv(2, name='conv_3b_double_3x3_1', in_channels=144, out_channels=144, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True)
self.bn_3b_double_3x3_1 = self.__batch_normalization(2, 'bn_3b_double_3x3_1', num_features=144, eps=9.e-05, momentum=0.)
self.conv_3c_3x3_reduce = self.__conv(2, name='conv_3c_3x3_reduce', in_channels=480, out_channels=192, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.conv_3c_double_3x3_reduce = self.__conv(2, name='conv_3c_double_3x3_reduce', in_channels=480, out_channels=96, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.bn_3c_3x3_reduce = self.__batch_normalization(2, 'bn_3c_3x3_reduce', num_features=192, eps=9.e-05, momentum=0.)
self.bn_3c_double_3x3_reduce = self.__batch_normalization(2, 'bn_3c_double_3x3_reduce', num_features=96, eps=9.e-05, momentum=0.)
self.conv_3c_3x3 = self.__conv(2, name='conv_3c_3x3', in_channels=192, out_channels=240, kernel_size=(3, 3), stride=(2, 2), groups=1, bias=True)
self.conv_3c_double_3x3_0 = self.__conv(2, name='conv_3c_double_3x3_0', in_channels=96, out_channels=144, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True)
self.bn_3c_3x3 = self.__batch_normalization(2, 'bn_3c_3x3', num_features=240, eps=9.e-05, momentum=0.)
self.bn_3c_double_3x3_0 = self.__batch_normalization(2, 'bn_3c_double_3x3_0', num_features=144, eps=9.e-05, momentum=0.)
self.conv_3c_double_3x3_1 = self.__conv(2, name='conv_3c_double_3x3_1', in_channels=144, out_channels=144, kernel_size=(3, 3), stride=(2, 2), groups=1, bias=True)
self.bn_3c_double_3x3_1 = self.__batch_normalization(2, 'bn_3c_double_3x3_1', num_features=144, eps=9.e-05, momentum=0.)
self.conv_4a_1x1 = self.__conv(2, name='conv_4a_1x1', in_channels=864, out_channels=224, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.conv_4a_3x3_reduce = self.__conv(2, name='conv_4a_3x3_reduce', in_channels=864, out_channels=64, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.conv_4a_double_3x3_reduce = self.__conv(2, name='conv_4a_double_3x3_reduce', in_channels=864, out_channels=96, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.bn_4a_1x1 = self.__batch_normalization(2, 'bn_4a_1x1', num_features=224, eps=9.e-05, momentum=0.)
self.bn_4a_3x3_reduce = self.__batch_normalization(2, 'bn_4a_3x3_reduce', num_features=64, eps=9.e-05, momentum=0.)
self.bn_4a_double_3x3_reduce = self.__batch_normalization(2, 'bn_4a_double_3x3_reduce', num_features=96, eps=9.e-05, momentum=0.)
self.conv_4a_proj = self.__conv(2, name='conv_4a_proj', in_channels=864, out_channels=128, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.bn_4a_proj = self.__batch_normalization(2, 'bn_4a_proj', num_features=128, eps=9.e-05, momentum=0.)
self.conv_4a_3x3 = self.__conv(2, name='conv_4a_3x3', in_channels=64, out_channels=96, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True)
self.conv_4a_double_3x3_0 = self.__conv(2, name='conv_4a_double_3x3_0', in_channels=96, out_channels=128, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True)
self.bn_4a_3x3 = self.__batch_normalization(2, 'bn_4a_3x3', num_features=96, eps=9.e-05, momentum=0.)
self.bn_4a_double_3x3_0 = self.__batch_normalization(2, 'bn_4a_double_3x3_0', num_features=128, eps=9.e-05, momentum=0.)
self.conv_4a_double_3x3_1 = self.__conv(2, name='conv_4a_double_3x3_1', in_channels=128, out_channels=128, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True)
self.bn_4a_double_3x3_1 = self.__batch_normalization(2, 'bn_4a_double_3x3_1', num_features=128, eps=9.e-05, momentum=0.)
self.conv_4b_1x1 = self.__conv(2, name='conv_4b_1x1', in_channels=576, out_channels=192, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.conv_4b_3x3_reduce = self.__conv(2, name='conv_4b_3x3_reduce', in_channels=576, out_channels=96, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.conv_4b_double_3x3_reduce = self.__conv(2, name='conv_4b_double_3x3_reduce', in_channels=576, out_channels=96, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.bn_4b_1x1 = self.__batch_normalization(2, 'bn_4b_1x1', num_features=192, eps=9.e-05, momentum=0.)
self.bn_4b_3x3_reduce = self.__batch_normalization(2, 'bn_4b_3x3_reduce', num_features=96, eps=9.e-05, momentum=0.)
self.bn_4b_double_3x3_reduce = self.__batch_normalization(2, 'bn_4b_double_3x3_reduce', num_features=96, eps=9.e-05, momentum=0.)
self.conv_4b_proj = self.__conv(2, name='conv_4b_proj', in_channels=576, out_channels=128, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.bn_4b_proj = self.__batch_normalization(2, 'bn_4b_proj', num_features=128, eps=9.e-05, momentum=0.)
self.conv_4b_3x3 = self.__conv(2, name='conv_4b_3x3', in_channels=96, out_channels=128, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True)
self.conv_4b_double_3x3_0 = self.__conv(2, name='conv_4b_double_3x3_0', in_channels=96, out_channels=128, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True)
self.bn_4b_3x3 = self.__batch_normalization(2, 'bn_4b_3x3', num_features=128, eps=9.e-05, momentum=0.)
self.bn_4b_double_3x3_0 = self.__batch_normalization(2, 'bn_4b_double_3x3_0', num_features=128, eps=9.e-05, momentum=0.)
self.conv_4b_double_3x3_1 = self.__conv(2, name='conv_4b_double_3x3_1', in_channels=128, out_channels=128, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True)
self.bn_4b_double_3x3_1 = self.__batch_normalization(2, 'bn_4b_double_3x3_1', num_features=128, eps=9.e-05, momentum=0.)
self.conv_4c_1x1 = self.__conv(2, name='conv_4c_1x1', in_channels=576, out_channels=160, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.conv_4c_3x3_reduce = self.__conv(2, name='conv_4c_3x3_reduce', in_channels=576, out_channels=128, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.conv_4c_double_3x3_reduce = self.__conv(2, name='conv_4c_double_3x3_reduce', in_channels=576, out_channels=128, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.bn_4c_1x1 = self.__batch_normalization(2, 'bn_4c_1x1', num_features=160, eps=9.e-05, momentum=0.)
self.bn_4c_3x3_reduce = self.__batch_normalization(2, 'bn_4c_3x3_reduce', num_features=128, eps=9.e-05, momentum=0.)
self.bn_4c_double_3x3_reduce = self.__batch_normalization(2, 'bn_4c_double_3x3_reduce', num_features=128, eps=9.e-05, momentum=0.)
self.conv_4c_proj = self.__conv(2, name='conv_4c_proj', in_channels=576, out_channels=128, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.bn_4c_proj = self.__batch_normalization(2, 'bn_4c_proj', num_features=128, eps=9.e-05, momentum=0.)
self.conv_4c_3x3 = self.__conv(2, name='conv_4c_3x3', in_channels=128, out_channels=160, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True)
self.conv_4c_double_3x3_0 = self.__conv(2, name='conv_4c_double_3x3_0', in_channels=128, out_channels=160, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True)
self.bn_4c_3x3 = self.__batch_normalization(2, 'bn_4c_3x3', num_features=160, eps=9.e-05, momentum=0.)
self.bn_4c_double_3x3_0 = self.__batch_normalization(2, 'bn_4c_double_3x3_0', num_features=160, eps=9.e-05, momentum=0.)
self.conv_4c_double_3x3_1 = self.__conv(2, name='conv_4c_double_3x3_1', in_channels=160, out_channels=160, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True)
self.bn_4c_double_3x3_1 = self.__batch_normalization(2, 'bn_4c_double_3x3_1', num_features=160, eps=9.e-05, momentum=0.)
self.conv_4d_1x1 = self.__conv(2, name='conv_4d_1x1', in_channels=608, out_channels=96, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.conv_4d_3x3_reduce = self.__conv(2, name='conv_4d_3x3_reduce', in_channels=608, out_channels=128, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.conv_4d_double_3x3_reduce = self.__conv(2, name='conv_4d_double_3x3_reduce', in_channels=608, out_channels=160, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.bn_4d_1x1 = self.__batch_normalization(2, 'bn_4d_1x1', num_features=96, eps=9.e-05, momentum=0.)
self.bn_4d_3x3_reduce = self.__batch_normalization(2, 'bn_4d_3x3_reduce', num_features=128, eps=9.e-05, momentum=0.)
self.bn_4d_double_3x3_reduce = self.__batch_normalization(2, 'bn_4d_double_3x3_reduce', num_features=160, eps=9.e-05, momentum=0.)
self.conv_4d_proj = self.__conv(2, name='conv_4d_proj', in_channels=608, out_channels=128, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.bn_4d_proj = self.__batch_normalization(2, 'bn_4d_proj', num_features=128, eps=9.e-05, momentum=0.)
self.conv_4d_3x3 = self.__conv(2, name='conv_4d_3x3', in_channels=128, out_channels=192, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True)
self.conv_4d_double_3x3_0 = self.__conv(2, name='conv_4d_double_3x3_0', in_channels=160, out_channels=96, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True)
self.bn_4d_3x3 = self.__batch_normalization(2, 'bn_4d_3x3', num_features=192, eps=9.e-05, momentum=0.)
self.bn_4d_double_3x3_0 = self.__batch_normalization(2, 'bn_4d_double_3x3_0', num_features=96, eps=9.e-05, momentum=0.)
self.conv_4d_double_3x3_1 = self.__conv(2, name='conv_4d_double_3x3_1', in_channels=96, out_channels=96, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True)
self.bn_4d_double_3x3_1 = self.__batch_normalization(2, 'bn_4d_double_3x3_1', num_features=96, eps=9.e-05, momentum=0.)
self.conv_4e_3x3_reduce = self.__conv(2, name='conv_4e_3x3_reduce', in_channels=512, out_channels=128, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.conv_4e_double_3x3_reduce = self.__conv(2, name='conv_4e_double_3x3_reduce', in_channels=512, out_channels=192, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.bn_4e_3x3_reduce = self.__batch_normalization(2, 'bn_4e_3x3_reduce', num_features=128, eps=9.e-05, momentum=0.)
self.bn_4e_double_3x3_reduce = self.__batch_normalization(2, 'bn_4e_double_3x3_reduce', num_features=192, eps=9.e-05, momentum=0.)
self.conv_4e_3x3 = self.__conv(2, name='conv_4e_3x3', in_channels=128, out_channels=192, kernel_size=(3, 3), stride=(2, 2), groups=1, bias=True)
self.conv_4e_double_3x3_0 = self.__conv(2, name='conv_4e_double_3x3_0', in_channels=192, out_channels=256, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True)
self.bn_4e_3x3 = self.__batch_normalization(2, 'bn_4e_3x3', num_features=192, eps=9.e-05, momentum=0.)
self.bn_4e_double_3x3_0 = self.__batch_normalization(2, 'bn_4e_double_3x3_0', num_features=256, eps=9.e-05, momentum=0.)
self.conv_4e_double_3x3_1 = self.__conv(2, name='conv_4e_double_3x3_1', in_channels=256, out_channels=256, kernel_size=(3, 3), stride=(2, 2), groups=1, bias=True)
self.bn_4e_double_3x3_1 = self.__batch_normalization(2, 'bn_4e_double_3x3_1', num_features=256, eps=9.e-05, momentum=0.)
self.conv_5a_1x1 = self.__conv(2, name='conv_5a_1x1', in_channels=960, out_channels=352, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.conv_5a_3x3_reduce = self.__conv(2, name='conv_5a_3x3_reduce', in_channels=960, out_channels=192, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.conv_5a_double_3x3_reduce = self.__conv(2, name='conv_5a_double_3x3_reduce', in_channels=960, out_channels=160, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.bn_5a_1x1 = self.__batch_normalization(2, 'bn_5a_1x1', num_features=352, eps=9.e-05, momentum=0.)
self.bn_5a_3x3_reduce = self.__batch_normalization(2, 'bn_5a_3x3_reduce', num_features=192, eps=9.e-05, momentum=0.)
self.bn_5a_double_3x3_reduce = self.__batch_normalization(2, 'bn_5a_double_3x3_reduce', num_features=160, eps=9.e-05, momentum=0.)
self.conv_5a_proj = self.__conv(2, name='conv_5a_proj', in_channels=960, out_channels=128, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.bn_5a_proj = self.__batch_normalization(2, 'bn_5a_proj', num_features=128, eps=9.e-05, momentum=0.)
self.conv_5a_3x3 = self.__conv(2, name='conv_5a_3x3', in_channels=192, out_channels=320, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True)
self.conv_5a_double_3x3_0 = self.__conv(2, name='conv_5a_double_3x3_0', in_channels=160, out_channels=224, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True)
self.bn_5a_3x3 = self.__batch_normalization(2, 'bn_5a_3x3', num_features=320, eps=9.e-05, momentum=0.)
self.bn_5a_double_3x3_0 = self.__batch_normalization(2, 'bn_5a_double_3x3_0', num_features=224, eps=9.e-05, momentum=0.)
self.conv_5a_double_3x3_1 = self.__conv(2, name='conv_5a_double_3x3_1', in_channels=224, out_channels=224, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True)
self.bn_5a_double_3x3_1 = self.__batch_normalization(2, 'bn_5a_double_3x3_1', num_features=224, eps=9.e-05, momentum=0.)
self.conv_5b_1x1 = self.__conv(2, name='conv_5b_1x1', in_channels=1024, out_channels=352, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.conv_5b_3x3_reduce = self.__conv(2, name='conv_5b_3x3_reduce', in_channels=1024, out_channels=192, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.conv_5b_double_3x3_reduce = self.__conv(2, name='conv_5b_double_3x3_reduce', in_channels=1024, out_channels=192, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.bn_5b_1x1 = self.__batch_normalization(2, 'bn_5b_1x1', num_features=352, eps=9.e-05, momentum=0.)
self.bn_5b_3x3_reduce = self.__batch_normalization(2, 'bn_5b_3x3_reduce', num_features=192, eps=9.e-05, momentum=0.)
self.bn_5b_double_3x3_reduce = self.__batch_normalization(2, 'bn_5b_double_3x3_reduce', num_features=192, eps=9.e-05, momentum=0.)
self.conv_5b_proj = self.__conv(2, name='conv_5b_proj', in_channels=1024, out_channels=128, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.bn_5b_proj = self.__batch_normalization(2, 'bn_5b_proj', num_features=128, eps=9.e-05, momentum=0.)
self.conv_5b_3x3 = self.__conv(2, name='conv_5b_3x3', in_channels=192, out_channels=320, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True)
self.conv_5b_double_3x3_0 = self.__conv(2, name='conv_5b_double_3x3_0', in_channels=192, out_channels=224, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True)
self.bn_5b_3x3 = self.__batch_normalization(2, 'bn_5b_3x3', num_features=320, eps=9.e-05, momentum=0.)
self.bn_5b_double_3x3_0 = self.__batch_normalization(2, 'bn_5b_double_3x3_0', num_features=224, eps=9.e-05, momentum=0.)
self.conv_5b_double_3x3_1 = self.__conv(2, name='conv_5b_double_3x3_1', in_channels=224, out_channels=224, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True)
self.bn_5b_double_3x3_1 = self.__batch_normalization(2, 'bn_5b_double_3x3_1', num_features=224, eps=9.e-05, momentum=0.)
self.fc1 = self.__dense(name='fc1', in_features=1024, out_features=21841, bias=True)
def forward(self, x):
conv_conv1_pad = F.pad(x, (3, 3, 3, 3))
conv_conv1 = self.conv_conv1(conv_conv1_pad)
bn_conv1 = self.bn_conv1(conv_conv1)
relu_conv1 = F.relu(bn_conv1)
pool1 = F.max_pool2d(relu_conv1, kernel_size=(3, 3), stride=(2, 2), padding=0, ceil_mode=False)
conv_conv2red = self.conv_conv2red(pool1)
bn_conv2red = self.bn_conv2red(conv_conv2red)
relu_conv2red = F.relu(bn_conv2red)
conv_conv2_pad = F.pad(relu_conv2red, (1, 1, 1, 1))
conv_conv2 = self.conv_conv2(conv_conv2_pad)
bn_conv2 = self.bn_conv2(conv_conv2)
relu_conv2 = F.relu(bn_conv2)
pool2 = F.max_pool2d(relu_conv2, kernel_size=(3, 3), stride=(2, 2), padding=0, ceil_mode=False)
conv_3a_1x1 = self.conv_3a_1x1(pool2)
conv_3a_3x3_reduce = self.conv_3a_3x3_reduce(pool2)
conv_3a_double_3x3_reduce = self.conv_3a_double_3x3_reduce(pool2)
avg_pool_3a_pool = F.avg_pool2d(pool2, kernel_size=(3, 3), stride=(1, 1), padding=(1,), ceil_mode=False, count_include_pad=False)
bn_3a_1x1 = self.bn_3a_1x1(conv_3a_1x1)
bn_3a_3x3_reduce = self.bn_3a_3x3_reduce(conv_3a_3x3_reduce)
bn_3a_double_3x3_reduce = self.bn_3a_double_3x3_reduce(conv_3a_double_3x3_reduce)
conv_3a_proj = self.conv_3a_proj(avg_pool_3a_pool)
relu_3a_1x1 = F.relu(bn_3a_1x1)
relu_3a_3x3_reduce = F.relu(bn_3a_3x3_reduce)
relu_3a_double_3x3_reduce = F.relu(bn_3a_double_3x3_reduce)
bn_3a_proj = self.bn_3a_proj(conv_3a_proj)
conv_3a_3x3_pad = F.pad(relu_3a_3x3_reduce, (1, 1, 1, 1))
conv_3a_3x3 = self.conv_3a_3x3(conv_3a_3x3_pad)
conv_3a_double_3x3_0_pad = F.pad(relu_3a_double_3x3_reduce, (1, 1, 1, 1))
conv_3a_double_3x3_0 = self.conv_3a_double_3x3_0(conv_3a_double_3x3_0_pad)
relu_3a_proj = F.relu(bn_3a_proj)
bn_3a_3x3 = self.bn_3a_3x3(conv_3a_3x3)
bn_3a_double_3x3_0 = self.bn_3a_double_3x3_0(conv_3a_double_3x3_0)
relu_3a_3x3 = F.relu(bn_3a_3x3)
relu_3a_double_3x3_0 = F.relu(bn_3a_double_3x3_0)
conv_3a_double_3x3_1_pad = F.pad(relu_3a_double_3x3_0, (1, 1, 1, 1))
conv_3a_double_3x3_1 = self.conv_3a_double_3x3_1(conv_3a_double_3x3_1_pad)
bn_3a_double_3x3_1 = self.bn_3a_double_3x3_1(conv_3a_double_3x3_1)
relu_3a_double_3x3_1 = F.relu(bn_3a_double_3x3_1)
ch_concat_3a_chconcat = torch.cat((relu_3a_1x1, relu_3a_3x3, relu_3a_double_3x3_1, relu_3a_proj), 1)
conv_3b_1x1 = self.conv_3b_1x1(ch_concat_3a_chconcat)
conv_3b_3x3_reduce = self.conv_3b_3x3_reduce(ch_concat_3a_chconcat)
conv_3b_double_3x3_reduce = self.conv_3b_double_3x3_reduce(ch_concat_3a_chconcat)
avg_pool_3b_pool = F.avg_pool2d(ch_concat_3a_chconcat, kernel_size=(3, 3), stride=(1, 1), padding=(1,), ceil_mode=False, count_include_pad=False)
bn_3b_1x1 = self.bn_3b_1x1(conv_3b_1x1)
bn_3b_3x3_reduce = self.bn_3b_3x3_reduce(conv_3b_3x3_reduce)
bn_3b_double_3x3_reduce = self.bn_3b_double_3x3_reduce(conv_3b_double_3x3_reduce)
conv_3b_proj = self.conv_3b_proj(avg_pool_3b_pool)
relu_3b_1x1 = F.relu(bn_3b_1x1)
relu_3b_3x3_reduce = F.relu(bn_3b_3x3_reduce)
relu_3b_double_3x3_reduce = F.relu(bn_3b_double_3x3_reduce)
bn_3b_proj = self.bn_3b_proj(conv_3b_proj)
conv_3b_3x3_pad = F.pad(relu_3b_3x3_reduce, (1, 1, 1, 1))
conv_3b_3x3 = self.conv_3b_3x3(conv_3b_3x3_pad)
conv_3b_double_3x3_0_pad = F.pad(relu_3b_double_3x3_reduce, (1, 1, 1, 1))
conv_3b_double_3x3_0 = self.conv_3b_double_3x3_0(conv_3b_double_3x3_0_pad)
relu_3b_proj = F.relu(bn_3b_proj)
bn_3b_3x3 = self.bn_3b_3x3(conv_3b_3x3)
bn_3b_double_3x3_0 = self.bn_3b_double_3x3_0(conv_3b_double_3x3_0)
relu_3b_3x3 = F.relu(bn_3b_3x3)
relu_3b_double_3x3_0 = F.relu(bn_3b_double_3x3_0)
conv_3b_double_3x3_1_pad = F.pad(relu_3b_double_3x3_0, (1, 1, 1, 1))
conv_3b_double_3x3_1 = self.conv_3b_double_3x3_1(conv_3b_double_3x3_1_pad)
bn_3b_double_3x3_1 = self.bn_3b_double_3x3_1(conv_3b_double_3x3_1)
relu_3b_double_3x3_1 = F.relu(bn_3b_double_3x3_1)
ch_concat_3b_chconcat = torch.cat((relu_3b_1x1, relu_3b_3x3, relu_3b_double_3x3_1, relu_3b_proj), 1)
conv_3c_3x3_reduce = self.conv_3c_3x3_reduce(ch_concat_3b_chconcat)
conv_3c_double_3x3_reduce = self.conv_3c_double_3x3_reduce(ch_concat_3b_chconcat)
max_pool_3c_pool_pad = F.pad(ch_concat_3b_chconcat, (1, 1, 1, 1), value=float('-inf'))
max_pool_3c_pool = F.max_pool2d(max_pool_3c_pool_pad, kernel_size=(3, 3), stride=(2, 2), padding=0, ceil_mode=False)
bn_3c_3x3_reduce = self.bn_3c_3x3_reduce(conv_3c_3x3_reduce)
bn_3c_double_3x3_reduce = self.bn_3c_double_3x3_reduce(conv_3c_double_3x3_reduce)
relu_3c_3x3_reduce = F.relu(bn_3c_3x3_reduce)
relu_3c_double_3x3_reduce = F.relu(bn_3c_double_3x3_reduce)
conv_3c_3x3_pad = F.pad(relu_3c_3x3_reduce, (1, 1, 1, 1))
conv_3c_3x3 = self.conv_3c_3x3(conv_3c_3x3_pad)
conv_3c_double_3x3_0_pad = F.pad(relu_3c_double_3x3_reduce, (1, 1, 1, 1))
conv_3c_double_3x3_0 = self.conv_3c_double_3x3_0(conv_3c_double_3x3_0_pad)
bn_3c_3x3 = self.bn_3c_3x3(conv_3c_3x3)
bn_3c_double_3x3_0 = self.bn_3c_double_3x3_0(conv_3c_double_3x3_0)
relu_3c_3x3 = F.relu(bn_3c_3x3)
relu_3c_double_3x3_0 = F.relu(bn_3c_double_3x3_0)
conv_3c_double_3x3_1_pad = F.pad(relu_3c_double_3x3_0, (1, 1, 1, 1))
conv_3c_double_3x3_1 = self.conv_3c_double_3x3_1(conv_3c_double_3x3_1_pad)
bn_3c_double_3x3_1 = self.bn_3c_double_3x3_1(conv_3c_double_3x3_1)
relu_3c_double_3x3_1 = F.relu(bn_3c_double_3x3_1)
ch_concat_3c_chconcat = torch.cat((relu_3c_3x3, relu_3c_double_3x3_1, max_pool_3c_pool), 1)
conv_4a_1x1 = self.conv_4a_1x1(ch_concat_3c_chconcat)
conv_4a_3x3_reduce = self.conv_4a_3x3_reduce(ch_concat_3c_chconcat)
conv_4a_double_3x3_reduce = self.conv_4a_double_3x3_reduce(ch_concat_3c_chconcat)
avg_pool_4a_pool = F.avg_pool2d(ch_concat_3c_chconcat, kernel_size=(3, 3), stride=(1, 1), padding=(1,), ceil_mode=False, count_include_pad=False)
bn_4a_1x1 = self.bn_4a_1x1(conv_4a_1x1)
bn_4a_3x3_reduce = self.bn_4a_3x3_reduce(conv_4a_3x3_reduce)
bn_4a_double_3x3_reduce = self.bn_4a_double_3x3_reduce(conv_4a_double_3x3_reduce)
conv_4a_proj = self.conv_4a_proj(avg_pool_4a_pool)
relu_4a_1x1 = F.relu(bn_4a_1x1)
relu_4a_3x3_reduce = F.relu(bn_4a_3x3_reduce)
relu_4a_double_3x3_reduce = F.relu(bn_4a_double_3x3_reduce)
bn_4a_proj = self.bn_4a_proj(conv_4a_proj)
conv_4a_3x3_pad = F.pad(relu_4a_3x3_reduce, (1, 1, 1, 1))
conv_4a_3x3 = self.conv_4a_3x3(conv_4a_3x3_pad)
conv_4a_double_3x3_0_pad = F.pad(relu_4a_double_3x3_reduce, (1, 1, 1, 1))
conv_4a_double_3x3_0 = self.conv_4a_double_3x3_0(conv_4a_double_3x3_0_pad)
relu_4a_proj = F.relu(bn_4a_proj)
bn_4a_3x3 = self.bn_4a_3x3(conv_4a_3x3)
bn_4a_double_3x3_0 = self.bn_4a_double_3x3_0(conv_4a_double_3x3_0)
relu_4a_3x3 = F.relu(bn_4a_3x3)
relu_4a_double_3x3_0 = F.relu(bn_4a_double_3x3_0)
conv_4a_double_3x3_1_pad = F.pad(relu_4a_double_3x3_0, (1, 1, 1, 1))
conv_4a_double_3x3_1 = self.conv_4a_double_3x3_1(conv_4a_double_3x3_1_pad)
bn_4a_double_3x3_1 = self.bn_4a_double_3x3_1(conv_4a_double_3x3_1)
relu_4a_double_3x3_1 = F.relu(bn_4a_double_3x3_1)
ch_concat_4a_chconcat = torch.cat((relu_4a_1x1, relu_4a_3x3, relu_4a_double_3x3_1, relu_4a_proj), 1)
conv_4b_1x1 = self.conv_4b_1x1(ch_concat_4a_chconcat)
conv_4b_3x3_reduce = self.conv_4b_3x3_reduce(ch_concat_4a_chconcat)
conv_4b_double_3x3_reduce = self.conv_4b_double_3x3_reduce(ch_concat_4a_chconcat)
avg_pool_4b_pool = F.avg_pool2d(ch_concat_4a_chconcat, kernel_size=(3, 3), stride=(1, 1), padding=(1,), ceil_mode=False, count_include_pad=False)
bn_4b_1x1 = self.bn_4b_1x1(conv_4b_1x1)
bn_4b_3x3_reduce = self.bn_4b_3x3_reduce(conv_4b_3x3_reduce)
bn_4b_double_3x3_reduce = self.bn_4b_double_3x3_reduce(conv_4b_double_3x3_reduce)
conv_4b_proj = self.conv_4b_proj(avg_pool_4b_pool)
relu_4b_1x1 = F.relu(bn_4b_1x1)
relu_4b_3x3_reduce = F.relu(bn_4b_3x3_reduce)
relu_4b_double_3x3_reduce = F.relu(bn_4b_double_3x3_reduce)
bn_4b_proj = self.bn_4b_proj(conv_4b_proj)
conv_4b_3x3_pad = F.pad(relu_4b_3x3_reduce, (1, 1, 1, 1))
conv_4b_3x3 = self.conv_4b_3x3(conv_4b_3x3_pad)
conv_4b_double_3x3_0_pad = F.pad(relu_4b_double_3x3_reduce, (1, 1, 1, 1))
conv_4b_double_3x3_0 = self.conv_4b_double_3x3_0(conv_4b_double_3x3_0_pad)
relu_4b_proj = F.relu(bn_4b_proj)
bn_4b_3x3 = self.bn_4b_3x3(conv_4b_3x3)
bn_4b_double_3x3_0 = self.bn_4b_double_3x3_0(conv_4b_double_3x3_0)
relu_4b_3x3 = F.relu(bn_4b_3x3)
relu_4b_double_3x3_0 = F.relu(bn_4b_double_3x3_0)
conv_4b_double_3x3_1_pad = F.pad(relu_4b_double_3x3_0, (1, 1, 1, 1))
conv_4b_double_3x3_1 = self.conv_4b_double_3x3_1(conv_4b_double_3x3_1_pad)
bn_4b_double_3x3_1 = self.bn_4b_double_3x3_1(conv_4b_double_3x3_1)
relu_4b_double_3x3_1 = F.relu(bn_4b_double_3x3_1)
ch_concat_4b_chconcat = torch.cat((relu_4b_1x1, relu_4b_3x3, relu_4b_double_3x3_1, relu_4b_proj), 1)
conv_4c_1x1 = self.conv_4c_1x1(ch_concat_4b_chconcat)
conv_4c_3x3_reduce = self.conv_4c_3x3_reduce(ch_concat_4b_chconcat)
conv_4c_double_3x3_reduce = self.conv_4c_double_3x3_reduce(ch_concat_4b_chconcat)
avg_pool_4c_pool = F.avg_pool2d(ch_concat_4b_chconcat, kernel_size=(3, 3), stride=(1, 1), padding=(1,), ceil_mode=False, count_include_pad=False)
bn_4c_1x1 = self.bn_4c_1x1(conv_4c_1x1)
bn_4c_3x3_reduce = self.bn_4c_3x3_reduce(conv_4c_3x3_reduce)
bn_4c_double_3x3_reduce = self.bn_4c_double_3x3_reduce(conv_4c_double_3x3_reduce)
conv_4c_proj = self.conv_4c_proj(avg_pool_4c_pool)
relu_4c_1x1 = F.relu(bn_4c_1x1)
relu_4c_3x3_reduce = F.relu(bn_4c_3x3_reduce)
relu_4c_double_3x3_reduce = F.relu(bn_4c_double_3x3_reduce)
bn_4c_proj = self.bn_4c_proj(conv_4c_proj)
conv_4c_3x3_pad = F.pad(relu_4c_3x3_reduce, (1, 1, 1, 1))
conv_4c_3x3 = self.conv_4c_3x3(conv_4c_3x3_pad)
conv_4c_double_3x3_0_pad = F.pad(relu_4c_double_3x3_reduce, (1, 1, 1, 1))
conv_4c_double_3x3_0 = self.conv_4c_double_3x3_0(conv_4c_double_3x3_0_pad)
relu_4c_proj = F.relu(bn_4c_proj)
bn_4c_3x3 = self.bn_4c_3x3(conv_4c_3x3)
bn_4c_double_3x3_0 = self.bn_4c_double_3x3_0(conv_4c_double_3x3_0)
relu_4c_3x3 = F.relu(bn_4c_3x3)
relu_4c_double_3x3_0 = F.relu(bn_4c_double_3x3_0)
conv_4c_double_3x3_1_pad = F.pad(relu_4c_double_3x3_0, (1, 1, 1, 1))
conv_4c_double_3x3_1 = self.conv_4c_double_3x3_1(conv_4c_double_3x3_1_pad)
bn_4c_double_3x3_1 = self.bn_4c_double_3x3_1(conv_4c_double_3x3_1)
relu_4c_double_3x3_1 = F.relu(bn_4c_double_3x3_1)
ch_concat_4c_chconcat = torch.cat((relu_4c_1x1, relu_4c_3x3, relu_4c_double_3x3_1, relu_4c_proj), 1)
conv_4d_1x1 = self.conv_4d_1x1(ch_concat_4c_chconcat)
conv_4d_3x3_reduce = self.conv_4d_3x3_reduce(ch_concat_4c_chconcat)
conv_4d_double_3x3_reduce = self.conv_4d_double_3x3_reduce(ch_concat_4c_chconcat)
avg_pool_4d_pool = F.avg_pool2d(ch_concat_4c_chconcat, kernel_size=(3, 3), stride=(1, 1), padding=(1,), ceil_mode=False, count_include_pad=False)
bn_4d_1x1 = self.bn_4d_1x1(conv_4d_1x1)
bn_4d_3x3_reduce = self.bn_4d_3x3_reduce(conv_4d_3x3_reduce)
bn_4d_double_3x3_reduce = self.bn_4d_double_3x3_reduce(conv_4d_double_3x3_reduce)
conv_4d_proj = self.conv_4d_proj(avg_pool_4d_pool)
relu_4d_1x1 = F.relu(bn_4d_1x1)
relu_4d_3x3_reduce = F.relu(bn_4d_3x3_reduce)
relu_4d_double_3x3_reduce = F.relu(bn_4d_double_3x3_reduce)
bn_4d_proj = self.bn_4d_proj(conv_4d_proj)
conv_4d_3x3_pad = F.pad(relu_4d_3x3_reduce, (1, 1, 1, 1))
conv_4d_3x3 = self.conv_4d_3x3(conv_4d_3x3_pad)
conv_4d_double_3x3_0_pad = F.pad(relu_4d_double_3x3_reduce, (1, 1, 1, 1))
conv_4d_double_3x3_0 = self.conv_4d_double_3x3_0(conv_4d_double_3x3_0_pad)
relu_4d_proj = F.relu(bn_4d_proj)
bn_4d_3x3 = self.bn_4d_3x3(conv_4d_3x3)
bn_4d_double_3x3_0 = self.bn_4d_double_3x3_0(conv_4d_double_3x3_0)
relu_4d_3x3 = F.relu(bn_4d_3x3)
relu_4d_double_3x3_0 = F.relu(bn_4d_double_3x3_0)
conv_4d_double_3x3_1_pad = F.pad(relu_4d_double_3x3_0, (1, 1, 1, 1))
conv_4d_double_3x3_1 = self.conv_4d_double_3x3_1(conv_4d_double_3x3_1_pad)
bn_4d_double_3x3_1 = self.bn_4d_double_3x3_1(conv_4d_double_3x3_1)
relu_4d_double_3x3_1 = F.relu(bn_4d_double_3x3_1)
ch_concat_4d_chconcat = torch.cat((relu_4d_1x1, relu_4d_3x3, relu_4d_double_3x3_1, relu_4d_proj), 1)
conv_4e_3x3_reduce = self.conv_4e_3x3_reduce(ch_concat_4d_chconcat)
conv_4e_double_3x3_reduce = self.conv_4e_double_3x3_reduce(ch_concat_4d_chconcat)
max_pool_4e_pool_pad = F.pad(ch_concat_4d_chconcat, (1, 1, 1, 1), value=float('-inf'))
max_pool_4e_pool = F.max_pool2d(max_pool_4e_pool_pad, kernel_size=(3, 3), stride=(2, 2), padding=0, ceil_mode=False)
bn_4e_3x3_reduce = self.bn_4e_3x3_reduce(conv_4e_3x3_reduce)
bn_4e_double_3x3_reduce = self.bn_4e_double_3x3_reduce(conv_4e_double_3x3_reduce)
relu_4e_3x3_reduce = F.relu(bn_4e_3x3_reduce)
relu_4e_double_3x3_reduce = F.relu(bn_4e_double_3x3_reduce)
conv_4e_3x3_pad = F.pad(relu_4e_3x3_reduce, (1, 1, 1, 1))
conv_4e_3x3 = self.conv_4e_3x3(conv_4e_3x3_pad)
conv_4e_double_3x3_0_pad = F.pad(relu_4e_double_3x3_reduce, (1, 1, 1, 1))
conv_4e_double_3x3_0 = self.conv_4e_double_3x3_0(conv_4e_double_3x3_0_pad)
bn_4e_3x3 = self.bn_4e_3x3(conv_4e_3x3)
bn_4e_double_3x3_0 = self.bn_4e_double_3x3_0(conv_4e_double_3x3_0)
relu_4e_3x3 = F.relu(bn_4e_3x3)
relu_4e_double_3x3_0 = F.relu(bn_4e_double_3x3_0)
conv_4e_double_3x3_1_pad = F.pad(relu_4e_double_3x3_0, (1, 1, 1, 1))
conv_4e_double_3x3_1 = self.conv_4e_double_3x3_1(conv_4e_double_3x3_1_pad)
bn_4e_double_3x3_1 = self.bn_4e_double_3x3_1(conv_4e_double_3x3_1)
relu_4e_double_3x3_1 = F.relu(bn_4e_double_3x3_1)
ch_concat_4e_chconcat = torch.cat((relu_4e_3x3, relu_4e_double_3x3_1, max_pool_4e_pool), 1)
conv_5a_1x1 = self.conv_5a_1x1(ch_concat_4e_chconcat)
conv_5a_3x3_reduce = self.conv_5a_3x3_reduce(ch_concat_4e_chconcat)
conv_5a_double_3x3_reduce = self.conv_5a_double_3x3_reduce(ch_concat_4e_chconcat)
avg_pool_5a_pool = F.avg_pool2d(ch_concat_4e_chconcat, kernel_size=(3, 3), stride=(1, 1), padding=(1,), ceil_mode=False, count_include_pad=False)
bn_5a_1x1 = self.bn_5a_1x1(conv_5a_1x1)
bn_5a_3x3_reduce = self.bn_5a_3x3_reduce(conv_5a_3x3_reduce)
bn_5a_double_3x3_reduce = self.bn_5a_double_3x3_reduce(conv_5a_double_3x3_reduce)
conv_5a_proj = self.conv_5a_proj(avg_pool_5a_pool)
relu_5a_1x1 = F.relu(bn_5a_1x1)
relu_5a_3x3_reduce = F.relu(bn_5a_3x3_reduce)
relu_5a_double_3x3_reduce = F.relu(bn_5a_double_3x3_reduce)
bn_5a_proj = self.bn_5a_proj(conv_5a_proj)
conv_5a_3x3_pad = F.pad(relu_5a_3x3_reduce, (1, 1, 1, 1))
conv_5a_3x3 = self.conv_5a_3x3(conv_5a_3x3_pad)
conv_5a_double_3x3_0_pad = F.pad(relu_5a_double_3x3_reduce, (1, 1, 1, 1))
conv_5a_double_3x3_0 = self.conv_5a_double_3x3_0(conv_5a_double_3x3_0_pad)
relu_5a_proj = F.relu(bn_5a_proj)
bn_5a_3x3 = self.bn_5a_3x3(conv_5a_3x3)
bn_5a_double_3x3_0 = self.bn_5a_double_3x3_0(conv_5a_double_3x3_0)
relu_5a_3x3 = F.relu(bn_5a_3x3)
relu_5a_double_3x3_0 = F.relu(bn_5a_double_3x3_0)
conv_5a_double_3x3_1_pad = F.pad(relu_5a_double_3x3_0, (1, 1, 1, 1))
conv_5a_double_3x3_1 = self.conv_5a_double_3x3_1(conv_5a_double_3x3_1_pad)
bn_5a_double_3x3_1 = self.bn_5a_double_3x3_1(conv_5a_double_3x3_1)
relu_5a_double_3x3_1 = F.relu(bn_5a_double_3x3_1)
ch_concat_5a_chconcat = torch.cat((relu_5a_1x1, relu_5a_3x3, relu_5a_double_3x3_1, relu_5a_proj), 1)
conv_5b_1x1 = self.conv_5b_1x1(ch_concat_5a_chconcat)
conv_5b_3x3_reduce = self.conv_5b_3x3_reduce(ch_concat_5a_chconcat)
conv_5b_double_3x3_reduce = self.conv_5b_double_3x3_reduce(ch_concat_5a_chconcat)
max_pool_5b_pool_pad = F.pad(ch_concat_5a_chconcat, (1, 1, 1, 1), value=float('-inf'))
max_pool_5b_pool = F.max_pool2d(max_pool_5b_pool_pad, kernel_size=(3, 3), stride=(1, 1), padding=0, ceil_mode=False)
bn_5b_1x1 = self.bn_5b_1x1(conv_5b_1x1)
bn_5b_3x3_reduce = self.bn_5b_3x3_reduce(conv_5b_3x3_reduce)
bn_5b_double_3x3_reduce = self.bn_5b_double_3x3_reduce(conv_5b_double_3x3_reduce)
conv_5b_proj = self.conv_5b_proj(max_pool_5b_pool)
relu_5b_1x1 = F.relu(bn_5b_1x1)
relu_5b_3x3_reduce = F.relu(bn_5b_3x3_reduce)
relu_5b_double_3x3_reduce = F.relu(bn_5b_double_3x3_reduce)
bn_5b_proj = self.bn_5b_proj(conv_5b_proj)
conv_5b_3x3_pad = F.pad(relu_5b_3x3_reduce, (1, 1, 1, 1))
conv_5b_3x3 = self.conv_5b_3x3(conv_5b_3x3_pad)
conv_5b_double_3x3_0_pad = F.pad(relu_5b_double_3x3_reduce, (1, 1, 1, 1))
conv_5b_double_3x3_0 = self.conv_5b_double_3x3_0(conv_5b_double_3x3_0_pad)
relu_5b_proj = F.relu(bn_5b_proj)
bn_5b_3x3 = self.bn_5b_3x3(conv_5b_3x3)
bn_5b_double_3x3_0 = self.bn_5b_double_3x3_0(conv_5b_double_3x3_0)
relu_5b_3x3 = F.relu(bn_5b_3x3)
relu_5b_double_3x3_0 = F.relu(bn_5b_double_3x3_0)
conv_5b_double_3x3_1_pad = F.pad(relu_5b_double_3x3_0, (1, 1, 1, 1))
conv_5b_double_3x3_1 = self.conv_5b_double_3x3_1(conv_5b_double_3x3_1_pad)
bn_5b_double_3x3_1 = self.bn_5b_double_3x3_1(conv_5b_double_3x3_1)
relu_5b_double_3x3_1 = F.relu(bn_5b_double_3x3_1)
ch_concat_5b_chconcat = torch.cat((relu_5b_1x1, relu_5b_3x3, relu_5b_double_3x3_1, relu_5b_proj), 1)
global_pool = F.avg_pool2d(ch_concat_5b_chconcat, kernel_size=(7, 7), stride=(1, 1), padding=(0,), ceil_mode=False, count_include_pad=False)
flatten = global_pool.view(global_pool.size(0), (- 1))
fc1 = self.fc1(flatten)
softmax = F.softmax(fc1)
return softmax
def __conv(dim, name, **kwargs):
if (dim == 1):
layer = nn.Conv1d(**kwargs)
elif (dim == 2):
layer = nn.Conv2d(**kwargs)
elif (dim == 3):
layer = nn.Conv3d(**kwargs)
else:
raise NotImplementedError()
layer.state_dict()['weight'].copy_(torch.from_numpy(__weights_dict[name]['weights']))
if ('bias' in __weights_dict[name]):
layer.state_dict()['bias'].copy_(torch.from_numpy(__weights_dict[name]['bias']))
return layer
def __batch_normalization(dim, name, **kwargs):
if ((dim == 0) or (dim == 1)):
layer = nn.BatchNorm1d(**kwargs)
elif (dim == 2):
layer = nn.BatchNorm2d(**kwargs)
elif (dim == 3):
layer = nn.BatchNorm3d(**kwargs)
else:
raise NotImplementedError()
if ('scale' in __weights_dict[name]):
layer.state_dict()['weight'].copy_(torch.from_numpy(__weights_dict[name]['scale']))
else:
layer.weight.data.fill_(1)
if ('bias' in __weights_dict[name]):
layer.state_dict()['bias'].copy_(torch.from_numpy(__weights_dict[name]['bias']))
else:
layer.bias.data.fill_(0)
layer.state_dict()['running_mean'].copy_(torch.from_numpy(__weights_dict[name]['mean']))
layer.state_dict()['running_var'].copy_(torch.from_numpy(__weights_dict[name]['var']))
return layer
def __dense(name, **kwargs):
layer = nn.Linear(**kwargs)
layer.state_dict()['weight'].copy_(torch.from_numpy(__weights_dict[name]['weights']))
if ('bias' in __weights_dict[name]):
layer.state_dict()['bias'].copy_(torch.from_numpy(__weights_dict[name]['bias']))
return layer |
def prep_type_tokens(tokenlist, token_format=token_format):
return [TypeToken(tok[0], token_format.format(tok[0]), tok[1]) for tok in tokenlist] |
def conv_block_bn(x, filters):
x = Conv2D(filters=filters, kernel_size=(3, 3), padding='same', use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
return x |
def detect_loader(schema_or_location: (str | dict[(str, Any)]), app: Any, is_openapi: bool) -> Callable:
if isinstance(schema_or_location, str):
if file_exists(schema_or_location):
return (oas_loaders.from_path if is_openapi else gql_loaders.from_path)
if ((app is not None) and (not urlparse(schema_or_location).netloc)):
return (oas_loaders.get_loader_for_app(app) if is_openapi else gql_loaders.get_loader_for_app(app))
return (oas_loaders.from_uri if is_openapi else gql_loaders.from_url)
return (oas_loaders.from_dict if is_openapi else gql_loaders.from_dict) |
class EsmForSequenceClassification(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def load_videos_tag(mat_path='./data/ute_query/Tags.mat'):
mat = scipy.io.loadmat(mat_path)
videos_tag = process_mat(mat)
return videos_tag |
def convert_to_timedelta(column):
nan_mask = pd.isna(column)
column[nan_mask] = 0
column = pd.to_timedelta(column)
column[nan_mask] = pd.NaT
return column |
class MinimizationProblem():
def __call__(self, x: TensorList) -> TensorList:
raise NotImplementedError
def ip_input(self, a, b):
return sum((a.view((- 1)) b.view((- 1))))
def M1(self, x):
return x
def M2(self, x):
return x |
def _getmp(self):
try:
data = self.info['mp']
except KeyError:
return None
file_contents = io.BytesIO(data)
head = file_contents.read(8)
endianness = ('>' if (head[:4] == b'MM\x00*') else '<')
try:
info = TiffImagePlugin.ImageFileDirectory_v2(head)
file_contents.seek(info.next)
info.load(file_contents)
mp = dict(info)
except Exception:
raise SyntaxError('malformed MP Index (unreadable directory)')
try:
quant = mp[45057]
except KeyError:
raise SyntaxError('malformed MP Index (no number of images)')
mpentries = []
try:
rawmpentries = mp[45058]
for entrynum in range(0, quant):
unpackedentry = struct.unpack_from('{}LLLHH'.format(endianness), rawmpentries, (entrynum * 16))
labels = ('Attribute', 'Size', 'DataOffset', 'EntryNo1', 'EntryNo2')
mpentry = dict(zip(labels, unpackedentry))
mpentryattr = {'DependentParentImageFlag': bool((mpentry['Attribute'] & (1 << 31))), 'DependentChildImageFlag': bool((mpentry['Attribute'] & (1 << 30))), 'RepresentativeImageFlag': bool((mpentry['Attribute'] & (1 << 29))), 'Reserved': ((mpentry['Attribute'] & (3 << 27)) >> 27), 'ImageDataFormat': ((mpentry['Attribute'] & (7 << 24)) >> 24), 'MPType': (mpentry['Attribute'] & )}
if (mpentryattr['ImageDataFormat'] == 0):
mpentryattr['ImageDataFormat'] = 'JPEG'
else:
raise SyntaxError('unsupported picture format in MPO')
mptypemap = {0: 'Undefined', 65537: 'Large Thumbnail (VGA Equivalent)', 65538: 'Large Thumbnail (Full HD Equivalent)', 131073: 'Multi-Frame Image (Panorama)', 131074: 'Multi-Frame Image: (Disparity)', 131075: 'Multi-Frame Image: (Multi-Angle)', 196608: 'Baseline MP Primary Image'}
mpentryattr['MPType'] = mptypemap.get(mpentryattr['MPType'], 'Unknown')
mpentry['Attribute'] = mpentryattr
mpentries.append(mpentry)
mp[45058] = mpentries
except KeyError:
raise SyntaxError('malformed MP Index (bad MP Entry)')
return mp |
def preprocess_lm_data(data_dir):
preprocess_parser = preprocess.get_parser()
preprocess_args = preprocess_parser.parse_args(['--only-source', '--trainpref', os.path.join(data_dir, 'train.out'), '--validpref', os.path.join(data_dir, 'valid.out'), '--testpref', os.path.join(data_dir, 'test.out'), '--destdir', data_dir])
preprocess.main(preprocess_args) |
_module()
class VideoDataset(BaseDataset):
def __init__(self, ann_file, pipeline, start_index=0, **kwargs):
super().__init__(ann_file, pipeline, start_index=start_index, **kwargs)
def load_annotations(self):
if self.ann_file.endswith('.json'):
return self.load_json_annotations()
video_infos = []
with open(self.ann_file, 'r') as fin:
for line in fin:
line_split = line.strip().split()
if self.multi_class:
assert (self.num_classes is not None)
(filename, label) = (line_split[0], line_split[1:])
label = list(map(int, label))
else:
(filename, label) = line_split
label = int(label)
if (self.data_prefix is not None):
filename = osp.join(self.data_prefix, filename)
video_infos.append(dict(filename=filename, label=label))
return video_infos |
def assert_is_tensor(x, ndim):
if (x.ndim != ndim):
raise ValueError(f'Expected {ndim}-tensor but got {x.ndim}-tensor') |
def kaiming_init(m):
if isinstance(m, (nn.Linear, nn.Conv2d)):
init.kaiming_normal(m.weight)
if (m.bias is not None):
m.bias.data.fill_(0)
elif isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d)):
m.weight.data.fill_(1)
if (m.bias is not None):
m.bias.data.fill_(0) |
def test_knorau():
(pool_classifiers, X_dsel, y_dsel, X_test, y_test) = setup_classifiers()
knorau = KNORAU(pool_classifiers, DFP=True, with_IH=True, IH_rate=0.1)
knorau.fit(X_dsel, y_dsel)
assert np.isclose(knorau.score(X_test, y_test), 0.) |
def DistributedOptimizer(optimizer, named_parameters=None, compression=Compression.none, backward_passes_per_step=1, op=Average):
if ((op != Adasum) or (size() == 1)):
cls = type(optimizer.__class__.__name__, (optimizer.__class__,), dict(_DistributedOptimizer.__dict__))
return cls(optimizer.param_groups, named_parameters, compression, backward_passes_per_step, op)
else:
cls = type(optimizer.__class__.__name__, (optimizer.__class__,), dict(_DistributedAdasumOptimizer.__dict__))
return cls(optimizer.param_groups, named_parameters, compression, backward_passes_per_step) |
def add_column(B, H_B, a, proof):
verbose('starting add_column')
if (B.rank() < B.nrows()):
return add_column_fallback(B, a, proof)
else:
z = solve_system_with_difficult_last_row(B, a)
(zd, d) = z._clear_denom()
x = (H_B * zd)
if (d != 1):
for i in range(x.nrows()):
x[(i, 0)] = (x[(i, 0)] / d)
return x |
class ConvNet(nn.Module):
def __init__(self):
super(ConvNet, self).__init__()
self.conv1 = nn.Conv2d(1, 20, 5, 1)
self.conv2 = nn.Conv2d(20, 50, 5, 1)
self.fc1 = nn.Linear(((4 * 4) * 50), 500)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2, 2)
x = x.view((- 1), ((4 * 4) * 50))
x = F.relu(self.fc1(x))
return x |
_sentencepiece
_tokenizers
class GPTSw3TokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = GPTSw3Tokenizer
test_rust_tokenizer = False
test_sentencepiece = True
test_sentencepiece_ignore_case = False
def setUp(self):
super().setUp()
tokenizer = GPTSw3Tokenizer(SAMPLE_VOCAB, eos_token='<unk>', bos_token='<unk>', pad_token='<unk>')
tokenizer.save_pretrained(self.tmpdirname)
def get_input_output_texts(self, tokenizer):
input_text = 'This is a test'
output_text = 'This is a test'
return (input_text, output_text)
def test_convert_token_and_id(self):
token = '<s>'
token_id = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(token), token_id)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(token_id), token)
def test_get_vocab(self):
vocab_keys = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0], '<unk>')
self.assertEqual(vocab_keys[1], '<s>')
self.assertEqual(vocab_keys[(- 1)], 'j')
self.assertEqual(len(vocab_keys), 2000)
def test_vocab_size(self):
self.assertEqual(self.get_tokenizer().vocab_size, 2000)
def test_full_tokenizer(self):
tokenizer = GPTSw3Tokenizer(SAMPLE_VOCAB)
tokens = tokenizer.tokenize('This is a test')
self.assertListEqual(tokens, ['This', 'is', 'a', 't', 'est'])
self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [465, 287, 265, 631, 842])
tokens = tokenizer.tokenize('I was born in 92000, and this is false.')
self.assertListEqual(tokens, ['I', 'was', 'bor', 'n', 'in', '', '<0x39>', '2', '0', '0', '0', ',', 'and', 'this', 'is', 'f', 'al', 's', '<0xC3>', '<0xA9>', '.'])
ids = tokenizer.convert_tokens_to_ids(tokens)
self.assertListEqual(ids, [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260])
back_tokens = tokenizer.convert_ids_to_tokens(ids)
self.assertListEqual(back_tokens, ['I', 'was', 'bor', 'n', 'in', '', '<0x39>', '2', '0', '0', '0', ',', 'and', 'this', 'is', 'f', 'al', 's', '<0xC3>', '<0xA9>', '.'])
def test_fast_encode_decode(self):
tokenizer = GPTSw3Tokenizer(SAMPLE_VOCAB)
texts = ['This is a test', 'I was born in 92000, and this is false.']
expected_ids_list = [[465, 287, 265, 631, 842], [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260]]
for (text, expected_ids) in zip(texts, expected_ids_list):
self.assertListEqual(tokenizer.encode_fast(text), expected_ids)
for (text, token_ids) in zip(texts, expected_ids_list):
self.assertEqual(tokenizer.decode_fast(token_ids), text)
def test_tokenizer_integration(self):
sequences = ["<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')", 'Hey there, how are you doing this fine day?', 'This is a text with a trailing spaces followed by a dot .', 'Haj svajs lillebror! =)', 'Det ar inget fel pa Mr. Cool']
expected_encoding = {'input_ids': [[63423, 5, 6811, 14954, 282, 816, 3821, 63466, 63425, 63462, 18, 63978, 678, 301, 1320, 63423, 63455, 63458, 18, 63982, 4246, 3940, 1901, 47789, 5547, 18994], [19630, 1100, 63446, 1342, 633, 544, 4488, 593, 5102, 2416, 63495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 58593, 22413, 9106, 546, 268, 33213, 63979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [55130, 63450, 924, 63449, 2249, 4062, 1558, 318, 63504, 21498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 63443, 26801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
self.tokenizer_integration_test_util(expected_encoding=expected_encoding, model_name='AI-Sweden/gpt-sw3-126m', sequences=sequences) |
def split_sentence(sentence, class_name):
if ('.txt' in sentence):
sentence = sentence[(len(class_name) + 4):]
elif ('.md' in sentence):
sentence = sentence[(len(class_name) + 3):]
else:
sentence = sentence[len(class_name):]
tagged_sent = pos_tag(sentence.lower().split())
if ('imminent_ned' in class_name):
if ('brace' in sentence.lower()):
sentence_low = sentence.lower()
break_point = (sentence_low.find('brace yourselves') + len('brace yourselves'))
if (break_point != (- 1)):
first_part = sentence[:break_point]
second_part = sentence[break_point:]
return (first_part, second_part)
else:
break_point = (sentence.find('brace yourself') + len('brace yourself'))
if (break_point != (- 1)):
first_part = sentence[:break_point]
second_part = sentence[break_point:]
return (first_part, second_part)
else:
first_part = 'brace yourselves'
second_part = sentence
return (first_part, second_part)
else:
first_part = 'brace yourselves'
second_part = sentence
return (first_part, second_part)
else:
break_point = sentence.find('?')
if (break_point != (- 1)):
first_part = sentence[:(break_point + 1)]
second_part = sentence[(break_point + 1):]
return (first_part, second_part)
else:
sentence_list = sentence.split(' ')
sentence_list = list(filter((lambda a: (a != '')), sentence_list))
break_point = 0
for i in range(1, len(tagged_sent)):
word = sentence_list[i]
tag = tagged_sent[i][1]
if word[0].isupper():
if ((tag != 'NNP') and (tag != 'NN') and (tag != 'NNS') and (tag != 'NNPS')):
print(word)
break_point = i
break
if (break_point != 0):
first_part = ' '.join((sentence_list[ite] for ite in range(break_point)))
second_part = ' '.join((sentence_list[ite] for ite in range(break_point, len(sentence_list))))
return (first_part, second_part)
else:
break_point = (len(sentence_list) // 2)
first_part = ' '.join((sentence_list[ite] for ite in range(break_point)))
second_part = ' '.join((sentence_list[ite] for ite in range(break_point, len(sentence_list))))
return (first_part, second_part) |
def build_prior(task: Task, model: elfi.ElfiModel):
log = logging.getLogger(__name__)
log.warn('Will discard any correlations in prior')
bounds = {}
prior_cls = str(task.prior_dist)
if (prior_cls == 'Independent()'):
prior_cls = str(task.prior_dist.base_dist)
prior_params = {}
if ('MultivariateNormal' in prior_cls):
prior_params['m'] = task.prior_params['loc'].numpy()
if ('precision_matrix' in prior_cls):
prior_params['C'] = np.linalg.inv(task.prior_params['precision_matrix'].numpy())
if ('covariance_matrix' in prior_cls):
prior_params['C'] = task.prior_params['covariance_matrix'].numpy()
for dim in range(task.dim_parameters):
loc = prior_params['m'][dim]
scale = np.sqrt(prior_params['C'][(dim, dim)])
elfi.Prior('norm', loc, scale, model=model, name=f'parameter_{dim}')
bounds[f'parameter_{dim}'] = ((prior_params['m'][dim] - (3.0 * np.sqrt(prior_params['C'][(dim, dim)]))), (prior_params['m'][dim] + (3.0 * np.sqrt(prior_params['C'][(dim, dim)]))))
elif ('Uniform' in prior_cls):
prior_params['low'] = task.prior_params['low'].numpy()
prior_params['high'] = task.prior_params['high'].numpy()
for dim in range(task.dim_parameters):
loc = prior_params['low'][dim]
scale = (prior_params['high'][dim] - loc)
elfi.Prior('uniform', loc, scale, model=model, name=f'parameter_{dim}')
bounds[f'parameter_{dim}'] = (prior_params['low'][dim], prior_params['high'][dim])
else:
log.info('No support for prior yet')
raise NotImplementedError
return bounds |
class LeNet(nn.Module):
def __init__(self, num_classes=1000):
super(LeNet, self).__init__()
self.conv1 = nn.Conv2d(3, 6, kernel_size=5)
self.conv2 = nn.Conv2d(6, 16, kernel_size=5)
self.fc1 = nn.Linear(((16 * 5) * 5), 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, num_classes)
self.relu1 = nn.ReLU()
self.relu2 = nn.ReLU()
self.relu3 = nn.ReLU()
self.relu4 = nn.ReLU()
self.max_pool2d1 = nn.MaxPool2d(2)
self.max_pool2d2 = nn.MaxPool2d(2)
def forward(self, x):
x = self.relu1(self.conv1(x))
x = self.max_pool2d1(x)
x = self.relu2(self.conv2(x))
x = self.max_pool2d2(x)
x = x.view(x.size(0), (- 1))
x = self.relu3(self.fc1(x))
x = self.relu4(self.fc2(x))
x = self.fc3(x)
return x |
def pretty_print_templates(templates, verbosity=1):
print(('-' * 70))
for ii in templates:
print(('[Name: %s] [Type: %s]' % (ii['name'], ii['type'])))
print(('-' * 70))
print(('Total of %s templates..' % len(templates)))
print(('-' * 70)) |
def run_analysis(sample, graph, config: AnalysisPipelineConfig, n_iter, recomputation=True, bw_GBps=12, verbose=True, async_pipeline=False, add_comm_times_to_balance=True, sequential_model=None, stages_on_same_gpu: Optional[List[Set[int]]]=None, PRINT_THEORETICAL=False, PRINT_MIN_MAX_BALANCE=False, PRINT_VAR_STD=False, UTILIZATION_SLOWDOWN_SPEEDUP=True, PRINT_1F1B=True, DO_THEORETICAL=False, TRY_SSGD_ANALYSIS=False, TRY_ASGD_ANALYSIS=True):
if (not stages_on_same_gpu):
stages_on_same_gpu = list()
sample_save = sample
if isinstance(sample, dict):
sample = tuple([sample[i] for i in config.model_inputs()])
elif (not isinstance(sample, tuple)):
sample = (sample,)
unique_stages_on_same_gpu = stages_on_same_gpu
stages_on_same_gpu = defaultdict(set)
for i in unique_stages_on_same_gpu:
for j in i:
stages_on_same_gpu[j] = i
for i in unique_stages_on_same_gpu:
assert (len(i) >= 1)
num_dummy_stages = sum(((len(i) - 1) for i in unique_stages_on_same_gpu))
theoretical_string = maybe_do_theoretical_analysis(DO_THEORETICAL, PRINT_THEORETICAL, PRINT_MIN_MAX_BALANCE, async_pipeline, graph, recomputation)
if torch.cuda.is_available():
torch.cuda.reset_peak_memory_stats()
profile_result = profile_execution(sample, config, (n_iter + 1), recomputation=recomputation, bw_GBps=bw_GBps, async_pipeline=async_pipeline, add_comm_times_to_balance=add_comm_times_to_balance, stages_on_same_gpu=stages_on_same_gpu)
real_f_times = profile_result.f_times_mean
f_std = profile_result.f_times_std
real_b_times = profile_result.b_times_mean
b_std = profile_result.b_times_std
comm_volume_stats = profile_result.communication_stats
nocomm_real_f_times = profile_result.nocommf_times_mean
nocomm_real_f_std = profile_result.nocommf_times_std
nocomm_real_b_times = profile_result.nocommb_times_mean
nocomm_real_b_std = profile_result.nocommb_times_std
warnings_list = profile_result.warnings_list
max_memory_allocated = None
if torch.cuda.is_available():
max_memory_allocated = torch.cuda.max_memory_allocated()
def get_seq_no_recomp_no_comm_times():
try:
seq_times = profile_execution(sample, config, (n_iter + 1), recomputation=False, bw_GBps=bw_GBps, async_pipeline=False, add_comm_times_to_balance=add_comm_times_to_balance, stages_on_same_gpu=stages_on_same_gpu)
except Exception as e:
print('-E- failed at get_seq_no_recomp_no_comm_times, known issue')
raise e
return seq_times
def get_comm_vol_str(comm_volume_stats):
communication_volume = dict()
for (idx, stats) in comm_volume_stats.items():
units = {'input size': 'MB', 'recieve_time': 'ms', 'out': 'MB', 'send time': 'ms'}
newd = {k: f'{stats[k]:.2f} {units[k]}' for k in stats}
communication_volume[idx] = ', '.join(('{!s}:{!r}'.format(key, val) for (key, val) in newd.items()))
return communication_volume
n_partitions = config.n_stages
num_real_stages = (n_partitions - num_dummy_stages)
pipeline_representation_stage_to_device_map = sorted_stage_to_device_map(n_partitions, stages_on_same_gpu)
if (n_partitions != num_real_stages):
for i in unique_stages_on_same_gpu:
j = min(i)
for k in i:
if (k == j):
continue
for means_list in [real_f_times, real_b_times, nocomm_real_f_times, nocomm_real_b_times, comm_volume_stats]:
if isinstance(means_list[j], dict):
d1 = means_list[j]
d2 = means_list[k]
assert isinstance(d1, dict)
assert isinstance(d2, dict)
for key in d1:
d1[key] += d2[key]
else:
means_list[j] += means_list[k]
del means_list[k]
comm_volume_str = get_comm_vol_str(comm_volume_stats)
real_b_slowdown = slowdown(real_b_times, nocomm_real_b_times)
real_f_slowdown = slowdown(real_f_times, nocomm_real_f_times)
comp_comm_ratio_f = computation_communication_ratio(nocomm_real_f_times, {k: v['send time'] for (k, v) in comm_volume_stats.items()})
comp_comm_ratio_b = computation_communication_ratio(nocomm_real_b_times, {k: v['recieve_time'] for (k, v) in comm_volume_stats.items()})
real_f_utilization = utilization(real_f_times, comp_comm_ratio_f)
real_b_utilization = utilization(real_b_times, comp_comm_ratio_b)
pipe_times = (real_f_times, real_b_times, nocomm_real_f_times, nocomm_real_b_times)
expected_speedup = expected_speedup_after_partitioning(*pipe_times)
try:
seq_profile_result = get_seq_no_recomp_no_comm_times()
expected_speedup_compared_to_seq_no_comm = expected_speedup_compared_to_seq(pipe_times, seq_profile_result)
seq_success = True
except (Exception, RuntimeError) as e:
warnings.warn(f'sequential no_recomputation analysis failed: {sys.exc_info()[0]}, {str(e)}')
seq_success = False
expected_speedup_compared_to_seq_no_comm = None
seq_profile_result = None
comp_comm_ratio_f = rounddict(comp_comm_ratio_f)
comp_comm_ratio_b = rounddict(comp_comm_ratio_b)
real_b_utilization = rounddict(real_b_utilization)
real_f_utilization = rounddict(real_f_utilization)
d_param_count = parameter_count(config)
with io.StringIO() as buf, redirect_stdout(buf):
pprint(d_param_count)
s_param_count = buf.getvalue()
d_same_gpu_parameter_count = same_gpu_parameter_count(stage_param_count=d_param_count, stages_on_same_gpu=stages_on_same_gpu)
num_params_milions = (d_same_gpu_parameter_count['total'] / 1000000.0)
num_params_milions = round(number=num_params_milions, ndigits=1)
with io.StringIO() as buf, redirect_stdout(buf):
print(f'Number of Model Parameters {num_params_milions}M')
pprint(d_same_gpu_parameter_count)
s_gpu_param_count = buf.getvalue()
fwd_plus_backward_std = dict()
if (n_partitions != num_real_stages):
warnings.warn('calculating std is not implemented for multiple stages on same GPU')
else:
fwd_plus_backward_std['pipeline_no_comm'] = add_stds_dicts(nocomm_real_f_std, nocomm_real_b_std)
fwd_plus_backward = dict()
fwd_plus_backward['pipeline_no_comm'] = add_dicts(nocomm_real_f_times, nocomm_real_b_times)
fwd_plus_backward['pipeline_with_non_parallel_comm'] = add_dicts(real_f_times, real_b_times)
for (i, v) in fwd_plus_backward.items():
if (i == 'seq_no_comm_no_recomp'):
continue
worstcase = max(v.values())
v['worstcase'] = worstcase
if (i in fwd_plus_backward_std):
key_matching_top_val = max(v.items(), key=operator.itemgetter(1))[0]
v['worstcase_std'] = fwd_plus_backward_std[i][key_matching_top_val]
if seq_success:
fwd_plus_backward['seq_no_comm_no_recomp'] = (add_dicts(seq_profile_result.nocommf_times_mean, seq_profile_result.nocommb_times_mean) if seq_success else dict())
fwd_plus_backward['pipeline_vs_seq_no_comm'] = (sum(fwd_plus_backward['seq_no_comm_no_recomp'].values()) / fwd_plus_backward['pipeline_no_comm']['worstcase'])
fwd_plus_backward['expected_compute_utilization'] = {i: (v / fwd_plus_backward['pipeline_no_comm']['worstcase']) for (i, v) in fwd_plus_backward['pipeline_no_comm'].items() if (i != 'worstcase')}
for i in list(fwd_plus_backward.keys()):
v = fwd_plus_backward[i]
fwd_plus_backward[i] = (rounddict(v, 2) if isinstance(v, dict) else round(v, 2))
with io.StringIO() as buf, redirect_stdout(buf):
pprint(fwd_plus_backward)
s_fwd_plus_backward = buf.getvalue()
if verbose:
s = '-I- Printing Report\n'
if warnings_list:
s += (('warnings:\n' + '\n'.join(warnings_list)) + '\n')
if (graph is not None):
s += f'''Number of nodes in Computation Graph: {graph.num_nodes}
'''
s += f'''Number of stages: {num_real_stages}
'''
if num_dummy_stages:
s += f'''n_partitions:{n_partitions}, num_dummy_stages:{num_dummy_stages}
'''
s += f'''unique_stages_on_same_gpu: {unique_stages_on_same_gpu}
'''
s += f'''"stage_to_device_map": {pipeline_representation_stage_to_device_map},
'''
s += f'''backward times {('do not ' if (not recomputation) else '')}include recomputation
'''
if (async_pipeline and recomputation):
s += f'''Analysis for async_pipeline=True: last partition will not do recomputation.
'''
s += theoretical_string
s += f'''
Stage parameter count:
{s_param_count}'''
if s_gpu_param_count:
s += f'''
GPU parameter count:
{s_gpu_param_count}'''
with_comm_str = ('with' if add_comm_times_to_balance else 'without')
s += f'''
real times are based on real measurements of execution time ({with_comm_str} communication) of generated partitions ms
'''
s += f'''forward {rounddict(real_f_times)}
backward {rounddict(real_b_times)}
'''
if PRINT_VAR_STD:
s += f'''std of real execution times
'''
s += f'''forward{rounddict(f_std)}
backward{rounddict(b_std)}
'''
if UTILIZATION_SLOWDOWN_SPEEDUP:
s += f'''
Analysis for T = (1-R)fwd + R*bwd:
'''
s += f'''
Pipeline Slowdown: (compared to sequential execution with no communication, and same recompute policy)
'''
s += f'''forward {real_f_slowdown:.3f}
backward {real_b_slowdown:.3f}
'''
s += f'''
Expected utilization by partition
'''
s += f'''forward {real_f_utilization}
backward {real_b_utilization}
'''
s += f'''
worstcase: bwd: {max(real_b_times.values()):.3f} fwd: {max(real_f_times.values()):.3f}'''
s += f'''
Expected speedup for {num_real_stages} partitions is: {expected_speedup:.3f}'''
s += f'''
Assuming bandwidth of {bw_GBps} GBps between GPUs
'''
s += f'''
communication volumes size of activations of each partition
'''
for (idx, volume) in comm_volume_str.items():
s += f'''{idx}: {volume}
'''
s += f'''
Compuatation Communication ratio (comp/(comp+comm)):
'''
s += f'''forward {comp_comm_ratio_f}
backward {comp_comm_ratio_b}
'''
if PRINT_1F1B:
s += f'''
Analysis for T = fwd + bwd:
{s_fwd_plus_backward}'''
if seq_success:
s += f'''
expected_speedup_compared_to_seq_no_recomp_no_comm: {expected_speedup_compared_to_seq_no_comm:.3f}'''
data_parallel_analysis(TRY_ASGD_ANALYSIS, TRY_SSGD_ANALYSIS, bw_GBps, expected_speedup, num_real_stages, sample_save, sequential_model, verbose, config)
if torch.cuda.is_available():
s += f'''
Analysis max cuda memory used {(max_memory_allocated / .0):.2f}GB'''
print(s)
else:
s = ''
metric_to_maximize = (- fwd_plus_backward['pipeline_no_comm']['worstcase'])
warnings.warn('ignoring communication in metric_to_maximize')
return (metric_to_maximize, s) |
def F(state_m, adjoint_m, u, v, geometry):
(y_m, z_m) = split(state_m)
(p_m, q_m) = split(adjoint_m)
return ((((((inner(grad(y_m), grad(p_m)) * geometry.dx) + ((z_m * p_m) * geometry.dx)) - ((u * p_m) * geometry.dx)) + (inner(grad(z_m), grad(q_m)) * geometry.dx)) + ((y_m * q_m) * geometry.dx)) - ((v * q_m) * geometry.dx)) |
class Conv1_1_Block(nn.Module):
def __init__(self, in_chs, block_ch):
super(Conv1_1_Block, self).__init__()
self.conv1_1_branches = nn.ModuleList()
for in_ch in in_chs:
self.conv1_1_branches.append(Conv1_1_Branch(in_ch, block_ch))
def forward(self, inputs, betas, block_sub_obj):
branch_weights = F.softmax(torch.stack(betas), dim=(- 1))
return (sum(((branch_weight * branch(input_data)) for (input_data, branch, branch_weight) in zip(inputs, self.conv1_1_branches, branch_weights))), [block_sub_obj, 0]) |
def resample_subdir(data_dir, data_subdir, out_dir, target_sr):
print(f'resampling {data_subdir}')
tfm = sox.Transformer()
tfm.set_output_format(rate=target_sr)
out_sub_dir = os.path.join(out_dir, data_subdir)
if (not os.path.isdir(out_sub_dir)):
os.makedirs(out_sub_dir)
for file in os.listdir(os.path.join(data_dir, data_subdir)):
out_path = os.path.join(out_sub_dir, file)
in_path = os.path.join(data_dir, data_subdir, file)
if os.path.isfile(out_path):
print(f'{out_path} already exists.')
elif (not file.lower().endswith('.wav')):
print(f'{in_path}: invalid file type.')
else:
success = tfm.build_file(input_filepath=in_path, output_filepath=out_path)
if success:
print(f'Succesfully saved {in_path} to {out_path}') |
.run_in_serial
_utils.test(arch=supported_archs_taichi_ndarray)
def test_ndarray_in_python_func():
def test():
z = ti.ndarray(float, (8192, 8192))
for i in range(300):
test() |
_flax
class FlaxViTModelTest(FlaxModelTesterMixin, unittest.TestCase):
all_model_classes = ((FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ())
def setUp(self) -> None:
self.model_tester = FlaxViTModelTester(self)
self.config_tester = ConfigTester(self, config_class=ViTConfig, has_text_modality=False, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_attention_outputs(self):
(config, inputs_dict) = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
num_patches = ((config.image_size // config.patch_size) ** 2)
seq_length = (num_patches + 1)
for model_class in self.all_model_classes:
inputs_dict['output_attentions'] = True
inputs_dict['output_hidden_states'] = False
model = model_class(config)
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
del inputs_dict['output_attentions']
config.output_attentions = True
model = model_class(config)
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(list(attentions[0].shape[(- 3):]), [self.model_tester.num_attention_heads, seq_length, seq_length])
out_len = len(outputs)
inputs_dict['output_attentions'] = True
inputs_dict['output_hidden_states'] = True
model = model_class(config)
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
added_hidden_states = 1
self.assertEqual((out_len + added_hidden_states), len(outputs))
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(list(attentions[0].shape[(- 3):]), [self.model_tester.num_attention_heads, seq_length, seq_length])
def test_forward_signature(self):
(config, _) = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.__call__)
arg_names = [*signature.parameters.keys()]
expected_arg_names = ['pixel_values']
self.assertListEqual(arg_names[:1], expected_arg_names)
def test_jit_compilation(self):
(config, inputs_dict) = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
model = model_class(config)
def model_jitted(pixel_values, **kwargs):
return model(pixel_values=pixel_values, **kwargs)
with self.subTest('JIT Enabled'):
jitted_outputs = model_jitted(**prepared_inputs_dict).to_tuple()
with self.subTest('JIT Disabled'):
with jax.disable_jit():
outputs = model_jitted(**prepared_inputs_dict).to_tuple()
self.assertEqual(len(outputs), len(jitted_outputs))
for (jitted_output, output) in zip(jitted_outputs, outputs):
self.assertEqual(jitted_output.shape, output.shape)
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
num_patches = ((config.image_size // config.patch_size) ** 2)
seq_length = (num_patches + 1)
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.hidden_states
self.assertEqual(len(hidden_states), (self.model_tester.num_hidden_layers + 1))
self.assertListEqual(list(hidden_states[0].shape[(- 2):]), [seq_length, self.model_tester.hidden_size])
(config, inputs_dict) = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict['output_hidden_states'] = True
check_hidden_states_output(inputs_dict, config, model_class)
del inputs_dict['output_hidden_states']
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
def test_model_from_pretrained(self):
for model_class_name in self.all_model_classes:
model = model_class_name.from_pretrained('google/vit-base-patch16-224')
outputs = model(np.ones((1, 3, 224, 224)))
self.assertIsNotNone(outputs) |
def error(s, *args, **kwargs):
print('\r\x1b[K', end='', file=sys.stderr)
print(s.format(*args, **kwargs), file=sys.stderr)
if kwargs.get('flush'):
sys.stderr.flush() |
def get_args():
parser = argparse.ArgumentParser(description='This script converts a segments and labels file\n to a NIST RTTM file. It handles overlapping segments (e.g. the\n output of a sliding-window diarization system).')
parser.add_argument('segments', type=str, help='Input segments file')
parser.add_argument('labels', type=str, help='Input labels file')
parser.add_argument('rttm_file', type=str, help='Output RTTM file')
parser.add_argument('--rttm-channel', type=int, default=0, help='The value passed into the RTTM channel field. Only affects the format of the RTTM file.')
args = parser.parse_args()
return args |
def generate_analogy_questions(analogy_questions_file):
print('\tPrinting analogy questions to file ', analogy_questions_file)
tot_analogies = 0
f = open(analogy_questions_file, 'w')
f.close()
descr = 'Integer binary operations (type semantic analogy)'
print('\tGenerating:', descr)
num_anlgy = write_analogy(gen_anlgy_type_int_bin_op(), descr, analogy_questions_file)
tot_analogies += num_anlgy
print('\tnumber generated analogies: {:>8,}'.format(num_anlgy))
descr = 'Floating point binary operations (type semantic analogy)'
print('\tGenerating:', descr)
num_anlgy = write_analogy(gen_anlgy_type_flpt_bin_op(), descr, analogy_questions_file)
tot_analogies += num_anlgy
print('\tnumber generated analogies: {:>8,}'.format(num_anlgy))
descr = 'Floating point / Integer binary operations (type semantic analogy)'
print('\tGenerating:', descr)
num_anlgy = write_analogy(gen_anlgy_type_flpt_int_bin_op(), descr, analogy_questions_file)
tot_analogies += num_anlgy
print('\tnumber generated analogies: {:>8,}'.format(num_anlgy))
descr = 'Insertelement - Extractelement operations (type)'
print('\tGenerating:', descr)
num_anlgy = write_analogy(gen_anlgy_insert_extract_type(), descr, analogy_questions_file)
tot_analogies += num_anlgy
print('\tnumber generated analogies: {:>8,}'.format(num_anlgy))
descr = 'Floating point ops (fast-math analogies)'
print('\tGenerating:', descr)
num_anlgy = write_analogy(gen_anlgy_type_flpt_bin_op_opt(), descr, analogy_questions_file)
tot_analogies += num_anlgy
print('\tnumber generated analogies: {:>8,}'.format(num_anlgy))
descr = 'Insertelement - Extractelement operations (index analogy)'
print('\tGenerating:', descr)
num_anlgy = write_analogy(gen_anlgy_insert_extract_index(), descr, analogy_questions_file)
tot_analogies += num_anlgy
print('\tnumber generated analogies: {:>8,}'.format(num_anlgy))
descr = 'Insertvalue - Extractvalue operations (index analogy)'
print('\tGenerating:', descr)
anlgies = [['<%ID> = insertvalue { double, double } undef, double <%ID>, 0', '<%ID> = insertvalue { double, double } <%ID>, double <%ID>, 1', '<%ID> = extractvalue { double, double } <%ID>, 0', '<%ID> = extractvalue { double, double } <%ID>, 1'], ['<%ID> = insertvalue { float*, i64 } undef, float* <%ID>, 0', '<%ID> = extractvalue { float*, i64 } <%ID>, 0', '<%ID> = insertvalue { float*, i64 } <%ID>, i64 <%ID>, 1', '<%ID> = extractvalue { float*, i64 } <%ID>, 1'], ['<%ID> = insertvalue { i32*, i64 } undef, i32* <%ID>, 0', '<%ID> = extractvalue { i32*, i64 } <%ID>, 0', '<%ID> = insertvalue { i32*, i64 } <%ID>, i64 <%ID>, 1', '<%ID> = extractvalue { i32*, i64 } <%ID>, 1'], ['<%ID> = insertvalue { i8*, i32 } undef, i8* <%ID>, 0', '<%ID> = extractvalue { i8*, i32 } <%ID>, 0', '<%ID> = insertvalue { i8*, i32 } <%ID>, i32 <%ID>, 1', '<%ID> = extractvalue { i8*, i32 } <%ID>, 1']]
num_anlgy = write_ready_analogy(anlgies, descr, analogy_questions_file)
tot_analogies += num_anlgy
print('\tnumber generated analogies: {:>8,}'.format(num_anlgy))
descr = 'Bitcast x to y - y to x (inverse operations analogy)'
print('\tGenerating:', descr)
anlgy_pair = [['<%ID> = bitcast <2 x double>* <%ID> to { double, double }*', '<%ID> = bitcast { double, double }* <%ID> to <2 x double>*'], ['<%ID> = bitcast <2 x i64>* <%ID> to { double, double }*', '<%ID> = bitcast { double, double }* <%ID> to <2 x i64>*'], ['<%ID> = bitcast <2 x float>* <%ID> to { float, float }*', '<%ID> = bitcast { float, float }* <%ID> to <2 x float>*'], ['<%ID> = bitcast i8* <%ID> to { double, double }*', '<%ID> = bitcast { double, double }* <%ID> to i8*'], ['<%ID> = bitcast i8* <%ID> to { opaque*, opaque* }*', '<%ID> = bitcast { opaque*, opaque* }* <%ID> to i8*'], ['<%ID> = bitcast { <{ opaque, opaque*, opaque*, i8, [7 x i8] }>* }** <%ID> to i8*', '<%ID> = bitcast i8* <%ID> to { <{ opaque, opaque*, opaque*, i8, [7 x i8] }>* }**'], ['<%ID> = bitcast { double, double }* <%ID> to <2 x double>*', '<%ID> = bitcast <2 x double>* <%ID> to { double, double }*'], ['<%ID> = bitcast { double, double }* <%ID> to <2 x i64>*', '<%ID> = bitcast <2 x i64>* <%ID> to { double, double }*'], ['<%ID> = bitcast { double, double }* <%ID> to i8*', '<%ID> = bitcast i8* <%ID> to { double, double }*'], ['<%ID> = bitcast { float, float }* <%ID> to <2 x float>*', '<%ID> = bitcast <2 x float>* <%ID> to { float, float }*'], ['<%ID> = bitcast { float, float }* <%ID> to i8*', '<%ID> = bitcast i8* <%ID> to { float, float }*'], ['<%ID> = bitcast { i64*, i64 }* <%ID> to { i64*, i64 }*', '<%ID> = bitcast { { i64*, i64 } }* <%ID> to { i64*, i64 }*'], ['<%ID> = bitcast { i8 }* <%ID> to { { { { i32*, i64 } } } }*', '<%ID> = bitcast { { { { i32*, i64 } } } }* <%ID> to { i8 }*'], ['<%ID> = bitcast { i8 }* <%ID> to { { { double*, i64, i64 } } }*', '<%ID> = bitcast { { { double*, i64, i64 } } }* <%ID> to { i8 }*'], ['<%ID> = bitcast { opaque*, opaque* }* <%ID> to i8*', '<%ID> = bitcast i8* <%ID> to { opaque*, opaque* }*'], ['<%ID> = bitcast { { i32*, i64, i64 } }* <%ID> to { { { i32*, i64, i64 } } }*', '<%ID> = bitcast { { { i32*, i64, i64 } } }* <%ID> to { { i32*, i64, i64 } }*'], ['<%ID> = bitcast { { i8* }, i64, { i64, [8 x i8] } }* <%ID> to i8*', '<%ID> = bitcast i8* <%ID> to { { i8* }, i64, { i64, [8 x i8] } }*'], ['<%ID> = bitcast { { { double*, i64, i64 } } }* <%ID> to i8*', '<%ID> = bitcast { i8 }* <%ID> to { { { double*, i64, i64 } } }*'], ['<%ID> = bitcast { { { { i32*, i64 } } } }* <%ID> to { i8 }*', '<%ID> = bitcast { i8 }* <%ID> to { { { { i32*, i64 } } } }*'], ['<%ID> = bitcast i8* <%ID> to { { { { { { i64, i64, i8* } } } } } }*', '<%ID> = bitcast { { { { { { i64, i64, i8* } } } } } }* <%ID> to i8*'], ['<%ID> = bitcast i8* <%ID> to { { { { { { i64, i64, i8* } } } } } }**', '<%ID> = bitcast { { { { { { i64, i64, i8* } } } } } }** <%ID> to i8*'], ['<%ID> = bitcast i8** <%ID> to { { { { { { i64, i64, i8* } } } } } }**', '<%ID> = bitcast { { { { { { i64, i64, i8* } } } } } }** <%ID> to i8**'], ['<%ID> = bitcast <2 x double>* <%ID> to i8*', '<%ID> = bitcast i8* <%ID> to <2 x double>*'], ['<%ID> = bitcast <16 x i8> <%ID> to <2 x i64>', '<%ID> = bitcast <2 x i64> <%ID> to <16 x i8>'], ['<%ID> = bitcast <2 x double> <%ID> to <4 x float>', '<%ID> = bitcast <4 x float> <%ID> to <2 x double>'], ['<%ID> = bitcast <2 x i64> <%ID> to <4 x i32>', '<%ID> = bitcast <4 x i32> <%ID> to <2 x i64>'], ['<%ID> = bitcast <2 x i64> <%ID> to <16 x i8>', '<%ID> = bitcast <16 x i8> <%ID> to <2 x i64>'], ['<%ID> = bitcast <4 x double> <%ID> to <4 x i64>', '<%ID> = bitcast <4 x i64> <%ID> to <4 x double>'], ['<%ID> = bitcast <4 x float>* <%ID> to i8*', '<%ID> = bitcast i8* <%ID> to <4 x float>*'], ['<%ID> = bitcast <4 x float> <%ID> to <2 x double>', '<%ID> = bitcast <2 x double> <%ID> to <4 x float>'], ['<%ID> = bitcast <4 x float> <%ID> to <4 x i32>', '<%ID> = bitcast <4 x i32> <%ID> to <4 x float>'], ['<%ID> = bitcast <4 x i32> <%ID> to <2 x i64>', '<%ID> = bitcast <2 x i64> <%ID> to <4 x i32>'], ['<%ID> = bitcast <4 x i32> <%ID> to <16 x i8>', '<%ID> = bitcast <16 x i8> <%ID> to <4 x i32>'], ['<%ID> = bitcast <4 x i32> <%ID> to <4 x float>', '<%ID> = bitcast <4 x float> <%ID> to <4 x i32>'], ['<%ID> = bitcast <4 x i64> <%ID> to <4 x double>', '<%ID> = bitcast <4 x double> <%ID> to <4 x i64>'], ['<%ID> = bitcast <8 x float> <%ID> to <8 x i32>', '<%ID> = bitcast <8 x i32> <%ID> to <8 x float>'], ['<%ID> = bitcast <8 x i32> <%ID> to <8 x float>', '<%ID> = bitcast <8 x float> <%ID> to <8 x i32>'], ['<%ID> = bitcast double* <%ID> to i64*', '<%ID> = bitcast i64* <%ID> to double*'], ['<%ID> = bitcast double* <%ID> to i8*', '<%ID> = bitcast i8* <%ID> to double*'], ['<%ID> = bitcast float <%ID> to i32', '<%ID> = bitcast i32 <%ID> to float'], ['<%ID> = bitcast double <%ID> to i64', '<%ID> = bitcast i64 <%ID> to double'], ['<%ID> = bitcast float* <%ID> to i8*', '<%ID> = bitcast i8* <%ID> to float*'], ['<%ID> = bitcast i16* <%ID> to i8*', '<%ID> = bitcast i8* <%ID> to i16*'], ['<%ID> = bitcast i32 <%ID> to float', '<%ID> = bitcast float <%ID> to i32'], ['<%ID> = bitcast i32* <%ID> to <2 x i64>*', '<%ID> = bitcast <2 x i64>* <%ID> to i32*'], ['<%ID> = bitcast i32** <%ID> to i64*', '<%ID> = bitcast i64* <%ID> to i32**'], ['<%ID> = bitcast i32* <%ID> to i64*', '<%ID> = bitcast i64* <%ID> to i32*'], ['<%ID> = bitcast i32* <%ID> to i8*', '<%ID> = bitcast i8* <%ID> to i32*'], ['<%ID> = bitcast i32** <%ID> to i8*', '<%ID> = bitcast i8* <%ID> to i32**'], ['<%ID> = bitcast i32** <%ID> to i8**', '<%ID> = bitcast i8** <%ID> to i32**'], ['<%ID> = bitcast i32* <%ID> to i8**', '<%ID> = bitcast i8** <%ID> to i32*']]
num_anlgy = write_analogy_from_pairs(anlgy_pair, descr, analogy_questions_file)
tot_analogies += num_anlgy
print('\tnumber generated analogies: {:>8,}'.format(num_anlgy))
descr = 'Arithmetic integer binary operations (inverse operations analogy)'
print('\tGenerating:', descr)
num_anlgy = write_analogy(gen_anlgy_op_inv_a(), descr, analogy_questions_file)
tot_analogies += num_anlgy
print('\tnumber generated analogies: {:>8,}'.format(num_anlgy))
descr = 'Arithmetic flpt binary operations (inverse operations analogy)'
print('\tGenerating:', descr)
num_anlgy = write_analogy(gen_anlgy_type_op_inv_b(), descr, analogy_questions_file)
tot_analogies += num_anlgy
print('\tnumber generated analogies: {:>8,}'.format(num_anlgy))
descr = 'Trunc - s/zext (inverse operations analogy)'
print('\tGenerating:', descr)
tot_analogies += num_anlgy
anlgy_pair = [['<%ID> = trunc <4 x i64> <%ID> to <4 x i32>', '<%ID> = sext <4 x i32> <%ID> to <4 x i64>'], ['<%ID> = trunc i128 <%ID> to i64', '<%ID> = sext i64 <%ID> to i128'], ['<%ID> = trunc i128 <%ID> to i64', '<%ID> = zext i64 <%ID> to i128'], ['<%ID> = trunc i16 <%ID> to i8', '<%ID> = zext i8 <%ID> to i16'], ['<%ID> = trunc i32 <%ID> to i16', '<%ID> = sext i16 <%ID> to i32'], ['<%ID> = trunc i32 <%ID> to i16', '<%ID> = zext i16 <%ID> to i32'], ['<%ID> = trunc i32 <%ID> to i8', '<%ID> = sext i8 <%ID> to i32'], ['<%ID> = trunc i32 <%ID> to i8', '<%ID> = zext i8 <%ID> to i32'], ['<%ID> = trunc i64 <%ID> to i16', '<%ID> = sext i16 <%ID> to i64'], [' <%ID> = trunc i64 <%ID> to i16', '<%ID> = zext i16 <%ID> to i64'], [' <%ID> = trunc i64 <%ID> to i32', '<%ID> = sext i32 <%ID> to i64'], ['<%ID> = trunc i64 <%ID> to i32', '<%ID> = zext i32 <%ID> to i64'], ['<%ID> = trunc i64 <%ID> to i8', '<%ID> = sext i8 <%ID> to i64'], ['<%ID> = trunc i8 <%ID> to i1', '<%ID> = zext i1 <%ID> to i8']]
num_anlgy = write_analogy_from_pairs(anlgy_pair, descr, analogy_questions_file)
print('\tnumber generated analogies: {:>8,}'.format(num_anlgy))
descr = 'Fptou/si - s/uitofp (inverse operations analogy)'
print('\tGenerating:', descr)
anlgy_pair = [['<%ID> = fptoui float <%ID> to i64', '<%ID> = uitofp i64 <%ID> to float'], ['<%ID> = fptosi double <%ID> to i32', '<%ID> = sitofp i32 <%ID> to double'], ['<%ID> = fptosi double <%ID> to i64', '<%ID> = sitofp i64 <%ID> to double'], ['<%ID> = fptosi float <%ID> to i32', '<%ID> = sitofp i32 <%ID> to float']]
num_anlgy = write_analogy_from_pairs(anlgy_pair, descr, analogy_questions_file)
tot_analogies += num_anlgy
print('\tnumber generated analogies: {:>8,}'.format(num_anlgy))
descr = 'Inttoptr - ptrtoint (inverse operations analogy)'
print('\tGenerating:', descr)
anlgy_pair = [['<%ID> = inttoptr i64 <%ID> to <{ opaque, opaque*, opaque*, i8, [7 x i8] }>*', '<%ID> = ptrtoint <{ opaque, opaque*, opaque*, i8, [7 x i8] }>* <%ID> to i64'], ['<%ID> = inttoptr i64 <%ID> to { <{ opaque, opaque*, opaque*, i8, [7 x i8] }>* }*', '<%ID> = ptrtoint { <{ opaque, opaque*, opaque*, i8, [7 x i8] }>* }* <%ID> to i64'], ['<%ID> = inttoptr i64 <%ID> to { double, double }*', '<%ID> = ptrtoint { double, double }* <%ID> to i64'], ['<%ID> = inttoptr i64 <%ID> to { float, float }*', '<%ID> = ptrtoint { float, float }* <%ID> to i64'], ['<%ID> = inttoptr i64 <%ID> to { i32, i32, i32, { [4 x i8*] }, { [4 x i8*] }, { opaque*, { { i32 (...)**, i64 }, i64 }* }, i32, opaque*, opaque* }**', '<%ID> = ptrtoint { i32, i32, i32, { [4 x i8*] }, { [4 x i8*] }, { opaque*, { { i32 (...)**, i64 }, i64 }* }, i32, opaque*, opaque* }** <%ID> to i64'], ['<%ID> = inttoptr i64 <%ID> to { i64, opaque, { i64 }, { i64 }, { { opaque*, opaque* } }, { i64 }, [8 x i8] }*', '<%ID> = ptrtoint { i64, opaque, { i64 }, { i64 }, { { opaque*, opaque* } }, { i64 }, [8 x i8] }* <%ID> to i64'], ['<%ID> = inttoptr i64 <%ID> to { opaque*, opaque* }*', '<%ID> = ptrtoint { opaque*, opaque* }* <%ID> to i64'], ['<%ID> = inttoptr i64 <%ID> to { { { double*, i64 } } }*', '<%ID> = ptrtoint { { { double*, i64 } } }* <%ID> to i64'], ['<%ID> = inttoptr i64 <%ID> to { { { double*, i64, i64 } } }*', '<%ID> = ptrtoint { { { double*, i64, i64 } } }* <%ID> to i64'], ['<%ID> = inttoptr i64 <%ID> to { { { i32*, i64 } } }*', '<%ID> = ptrtoint { { { i32*, i64 } } }* <%ID> to i64'], ['<%ID> = inttoptr i64 <%ID> to { { { { { { i64, i64, i8* } } } } } }*', '<%ID> = ptrtoint { { { { { { i64, i64, i8* } } } } } }* <%ID> to i64'], ['<%ID> = inttoptr i64 <%ID> to double*', '<%ID> = ptrtoint double* <%ID> to i64'], ['<%ID> = inttoptr i64 <%ID> to float*', '<%ID> = ptrtoint float* <%ID> to i64'], ['<%ID> = inttoptr i64 <%ID> to i32*', '<%ID> = ptrtoint i32* <%ID> to i64'], ['<%ID> = inttoptr i64 <%ID> to i64*', '<%ID> = ptrtoint i64* <%ID> to i64'], ['<%ID> = inttoptr i64 <%ID> to i8**', '<%ID> = ptrtoint i8** <%ID> to i64'], ['<%ID> = inttoptr i64 <%ID> to i8*', '<%ID> = ptrtoint i8* <%ID> to i64']]
num_anlgy = write_analogy_from_pairs(anlgy_pair, descr, analogy_questions_file)
tot_analogies += num_anlgy
print('\tnumber generated analogies: {:>8,}'.format(num_anlgy))
descr = 'Structure - Vector equivalents (a)'
print('\tGenerating:', descr)
num_anlgy = write_analogy(gen_anlgy_equiv_types_a(), descr, analogy_questions_file)
tot_analogies += num_anlgy
print('\tnumber generated analogies: {:>8,}'.format(num_anlgy))
descr = 'Structure - Vector equivalents (b)'
print('\tGenerating:', descr)
num_anlgy = write_analogy(gen_anlgy_equiv_types_b(), descr, analogy_questions_file)
tot_analogies += num_anlgy
print('\tnumber generated analogies: {:>8,}'.format(num_anlgy))
descr = 'Structure - Vector equivalents (c)'
print('\tGenerating:', descr)
num_anlgy = write_analogy(gen_anlgy_equiv_types_c(), descr, analogy_questions_file)
tot_analogies += num_anlgy
print('\tnumber generated analogies: {:>8,}'.format(num_anlgy))
print('\tAnalogies printed: {:>10,d} analogies were generated'.format(tot_analogies))
return tot_analogies |
def CreateMultiBoxHead(net, data_layer='data', num_classes=[], from_layers=[], use_objectness=False, normalizations=[], use_batchnorm=True, lr_mult=1, use_scale=True, min_sizes=[], max_sizes=[], prior_variance=[0.1], aspect_ratios=[], steps=[], img_height=0, img_width=0, share_location=True, flip=True, clip=True, offset=0.5, inter_layer_depth=[], kernel_size=1, pad=0, conf_postfix='', loc_postfix='', **bn_param):
assert num_classes, 'must provide num_classes'
assert (num_classes > 0), 'num_classes must be positive number'
if normalizations:
assert (len(from_layers) == len(normalizations)), 'from_layers and normalizations should have same length'
assert (len(from_layers) == len(min_sizes)), 'from_layers and min_sizes should have same length'
if max_sizes:
assert (len(from_layers) == len(max_sizes)), 'from_layers and max_sizes should have same length'
if aspect_ratios:
assert (len(from_layers) == len(aspect_ratios)), 'from_layers and aspect_ratios should have same length'
if steps:
assert (len(from_layers) == len(steps)), 'from_layers and steps should have same length'
net_layers = net.keys()
assert (data_layer in net_layers), "data_layer is not in net's layers"
if inter_layer_depth:
assert (len(from_layers) == len(inter_layer_depth)), 'from_layers and inter_layer_depth should have same length'
num = len(from_layers)
priorbox_layers = []
loc_layers = []
conf_layers = []
objectness_layers = []
for i in range(0, num):
from_layer = from_layers[i]
if normalizations:
if (normalizations[i] != (- 1)):
norm_name = '{}_norm'.format(from_layer)
net[norm_name] = L.Normalize(net[from_layer], scale_filler=dict(type='constant', value=normalizations[i]), across_spatial=False, channel_shared=False)
from_layer = norm_name
if inter_layer_depth:
if (inter_layer_depth[i] > 0):
inter_name = '{}_inter'.format(from_layer)
ConvBNLayer(net, from_layer, inter_name, use_bn=use_batchnorm, use_relu=True, lr_mult=lr_mult, num_output=inter_layer_depth[i], kernel_size=3, pad=1, stride=1, **bn_param)
from_layer = inter_name
min_size = min_sizes[i]
if (type(min_size) is not list):
min_size = [min_size]
aspect_ratio = []
if (len(aspect_ratios) > i):
aspect_ratio = aspect_ratios[i]
if (type(aspect_ratio) is not list):
aspect_ratio = [aspect_ratio]
max_size = []
if (len(max_sizes) > i):
max_size = max_sizes[i]
if (type(max_size) is not list):
max_size = [max_size]
if max_size:
assert (len(max_size) == len(min_size)), 'max_size and min_size should have same length.'
if max_size:
num_priors_per_location = ((2 + len(aspect_ratio)) * len(min_size))
else:
num_priors_per_location = ((1 + len(aspect_ratio)) * len(min_size))
if flip:
num_priors_per_location += (len(aspect_ratio) * len(min_size))
step = []
if (len(steps) > i):
step = steps[i]
name = '{}_mbox_loc{}'.format(from_layer, loc_postfix)
num_loc_output = (num_priors_per_location * 4)
if (not share_location):
num_loc_output *= num_classes
ConvBNLayer(net, from_layer, name, use_bn=use_batchnorm, use_relu=False, lr_mult=lr_mult, num_output=num_loc_output, kernel_size=kernel_size, pad=pad, stride=1, **bn_param)
permute_name = '{}_perm'.format(name)
net[permute_name] = L.Permute(net[name], order=[0, 2, 3, 1])
flatten_name = '{}_flat'.format(name)
net[flatten_name] = L.Flatten(net[permute_name], axis=1)
loc_layers.append(net[flatten_name])
name = '{}_mbox_conf{}'.format(from_layer, conf_postfix)
num_conf_output = (num_priors_per_location * num_classes)
ConvBNLayer(net, from_layer, name, use_bn=use_batchnorm, use_relu=False, lr_mult=lr_mult, num_output=num_conf_output, kernel_size=kernel_size, pad=pad, stride=1, **bn_param)
permute_name = '{}_perm'.format(name)
net[permute_name] = L.Permute(net[name], order=[0, 2, 3, 1])
flatten_name = '{}_flat'.format(name)
net[flatten_name] = L.Flatten(net[permute_name], axis=1)
conf_layers.append(net[flatten_name])
name = '{}_mbox_priorbox'.format(from_layer)
net[name] = L.PriorBox(net[from_layer], net[data_layer], min_size=min_size, clip=clip, variance=prior_variance, offset=offset)
if max_size:
net.update(name, {'max_size': max_size})
if aspect_ratio:
net.update(name, {'aspect_ratio': aspect_ratio, 'flip': flip})
if step:
net.update(name, {'step': step})
if ((img_height != 0) and (img_width != 0)):
if (img_height == img_width):
net.update(name, {'img_size': img_height})
else:
net.update(name, {'img_h': img_height, 'img_w': img_width})
priorbox_layers.append(net[name])
if use_objectness:
name = '{}_mbox_objectness'.format(from_layer)
num_obj_output = (num_priors_per_location * 2)
ConvBNLayer(net, from_layer, name, use_bn=use_batchnorm, use_relu=False, lr_mult=lr_mult, num_output=num_obj_output, kernel_size=kernel_size, pad=pad, stride=1, **bn_param)
permute_name = '{}_perm'.format(name)
net[permute_name] = L.Permute(net[name], order=[0, 2, 3, 1])
flatten_name = '{}_flat'.format(name)
net[flatten_name] = L.Flatten(net[permute_name], axis=1)
objectness_layers.append(net[flatten_name])
mbox_layers = []
name = 'mbox_loc'
net[name] = L.Concat(*loc_layers, axis=1)
mbox_layers.append(net[name])
name = 'mbox_conf'
net[name] = L.Concat(*conf_layers, axis=1)
mbox_layers.append(net[name])
name = 'mbox_priorbox'
net[name] = L.Concat(*priorbox_layers, axis=2)
mbox_layers.append(net[name])
if use_objectness:
name = 'mbox_objectness'
net[name] = L.Concat(*objectness_layers, axis=1)
mbox_layers.append(net[name])
return mbox_layers |
def draw_net(config: object, genome: object, view: object=False, filename: object=None, node_names: object=None, show_disabled: object=True, prune_unused: object=False, node_colors: object=None, fmt: object='svg') -> object:
if (graphviz is None):
warnings.warn('This display is not available due to a missing optional dependency (graphviz)')
return
if (node_names is None):
node_names = {}
assert (type(node_names) is dict)
if (node_colors is None):
node_colors = {}
assert (type(node_colors) is dict)
node_attrs = {'shape': 'circle', 'fontsize': '9', 'height': '0.2', 'width': '0.2', 'length': '0.2'}
dot = graphviz.Digraph(format=fmt, node_attr=node_attrs)
inputs = set()
for k in config.genome_config.input_keys:
inputs.add(k)
name = node_names.get(k, str(k))
input_attrs = {'style': 'filled', 'shape': 'box', 'fillcolor': node_colors.get(k, 'lightgray')}
dot.node(name, _attributes=input_attrs)
outputs = set()
for k in config.genome_config.output_keys:
outputs.add(k)
name = node_names.get(k, str(k))
node_attrs = {'style': 'filled', 'fillcolor': node_colors.get(k, 'lightblue')}
dot.node(name, _attributes=node_attrs)
if prune_unused:
connections = set()
for cg in genome.connections.values():
if (cg.enabled or show_disabled):
connections.add(cg.key)
used_nodes = copy.copy(outputs)
pending = copy.copy(outputs)
while pending:
new_pending = set()
for (a, b) in connections:
if ((b in pending) and (a not in used_nodes)):
new_pending.add(a)
used_nodes.add(a)
pending = new_pending
else:
used_nodes = set(genome.nodes.keys())
for n in used_nodes:
if ((n in inputs) or (n in outputs)):
continue
attrs = {'style': 'filled', 'fillcolor': node_colors.get(n, 'white')}
dot.node(str(n), _attributes=attrs)
for cg in genome.connections.values():
if (cg.enabled or show_disabled):
(input, output) = cg.key
a = node_names.get(input, str(input))
b = node_names.get(output, str(output))
style = ('solid' if cg.enabled else 'dotted')
color = ('green' if (cg.weight > 0) else 'red')
width = str((0.1 + abs((cg.weight / 5.0))))
dot.edge(a, b, _attributes={'style': style, 'color': color, 'penwidth': width})
dot.render(filename, view=view)
return dot |
_registry.register('google_qa_answer_helpful')
class GoogleQuestQALabelHelpful(GoogleQuestQALabel):
def label_columns(self):
return ['answer_helpful']
def label_types(self):
return [_NUMERICAL] |
.parametrize('input_meters, expected_kilometers', [(1000, 1), (10000, 10)])
def test__meters_to_kilometers(h3_tess, input_meters, expected_kilometers):
assert (h3_tess._meters_to_kilometers(input_meters) == expected_kilometers) |
class ImageLabelParse():
def __init__(self, image, labels):
self.image = image
self.labels = labels
def get_labeled_image(self, **kwargs):
image = cv2.cvtColor(self.image, cv2.COLOR_GRAY2BGR)
for label in self.labels.values():
draw_label(image, label, **kwargs)
draw_point(image, label.position)
return image |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--data-root', '-d', required=True, type=str, help='data root with sub-folders for each language <root>/<src_lang>')
(parser.add_argument('--vocab-type', default='unigram', required=True, type=str, choices=['bpe', 'unigram', 'char']),)
parser.add_argument('--vocab-size', default=1000, type=int)
parser.add_argument('--src-lang', '-s', required=True, type=str)
parser.add_argument('--tgt-lang', '-t', type=str)
args = parser.parse_args()
process(args) |
class ObserverBase(ABC, nn.Module):
def __init__(self, dtype):
super(ObserverBase, self).__init__()
self.dtype = dtype
def forward(self, x):
pass
def calculate_qparams(self, **kwargs):
pass
with_args = classmethod(_with_args) |
def register_Ns3UeCapabilities_s_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::UeCapabilities_s const &', 'arg0')])
cls.add_instance_attribute('m_halfDuplex', 'bool', is_const=False)
cls.add_instance_attribute('m_intraSfHopping', 'bool', is_const=False)
cls.add_instance_attribute('m_resAllocType1', 'bool', is_const=False)
cls.add_instance_attribute('m_type2Sb1', 'bool', is_const=False)
cls.add_instance_attribute('m_ueCategory', 'uint8_t', is_const=False)
return |
def drop_pai_model(datasource, model_name):
(user, passwd, address, database) = MaxComputeConnection.get_uri_parts(datasource)
cmd = ('drop offlinemodel if exists %s' % model_name)
subprocess.run(['odpscmd', '-u', user, '-p', passwd, '--project', database, '--endpoint', address, '-e', cmd], check=True) |
def _resnet(arch, block, layers, **kwargs):
model = ResNet(block, layers, **kwargs)
return model |
class Generator(object):
def __init__(self, name, is_train, norm='batch', activation='relu', batch_size=64, output_height=64, output_width=128, input_dim=64, output_dim=3, use_resnet=False):
print(' [*] Init Generator %s', name)
self.name = name
self._is_train = is_train
self._norm = norm
self._activation = activation
self._batch_size = batch_size
self._output_height = output_height
self._output_width = output_width
self._input_dim = input_dim
self._output_dim = output_dim
self._use_resnet = use_resnet
self._reuse = False
def _conv_out_size_same(self, size, stride):
return int(math.ceil((float(size) / float(stride))))
def __call__(self, z):
if self._use_resnet:
return self._resnet(z)
else:
return self._convnet(z)
def _convnet(self, z):
with tf.variable_scope(self.name, reuse=self._reuse):
(s_h, s_w) = (self._output_height, self._output_width)
(s_h2, s_w2) = (self._conv_out_size_same(s_h, 2), self._conv_out_size_same(s_w, 2))
(s_h4, s_w4) = (self._conv_out_size_same(s_h2, 2), self._conv_out_size_same(s_w2, 2))
(s_h8, s_w8) = (self._conv_out_size_same(s_h4, 2), self._conv_out_size_same(s_w4, 2))
(s_h16, s_w16) = (self._conv_out_size_same(s_h8, 2), self._conv_out_size_same(s_w8, 2))
z_ = nn.linear(z, (((self._input_dim * 8) * s_h16) * s_w16), name='g_lin_0')
h0 = tf.reshape(z_, [(- 1), s_h16, s_w16, (self._input_dim * 8)])
h0 = nn.activation_fn(nn.norm(h0, self._norm), self._activation)
h1 = nn.deconv_block(h0, [self._batch_size, s_h8, s_w8, (self._input_dim * 4)], 'g_dconv_1', 5, 2, self._is_train, self._reuse, self._norm, self._activation)
h2 = nn.deconv_block(h1, [self._batch_size, s_h4, s_w4, (self._input_dim * 2)], 'g_dconv_2', 5, 2, self._is_train, self._reuse, self._norm, self._activation)
h3 = nn.deconv_block(h2, [self._batch_size, s_h2, s_w2, self._input_dim], 'g_dconv_3', 5, 2, self._is_train, self._reuse, self._norm, self._activation)
h4 = nn.deconv_block(h3, [self._batch_size, s_h, s_w, self._output_dim], 'g_dconv_4', 5, 2, self._is_train, self._reuse, None, None)
self._reuse = True
self.var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self.name)
return tf.nn.tanh(h4)
def _resnet(self, z):
with tf.variable_scope(self.name, reuse=self._reuse):
(s_h, s_w) = (self._output_height, self._output_width)
(s_h2, s_w2) = (self._conv_out_size_same(s_h, 2), self._conv_out_size_same(s_w, 2))
(s_h4, s_w4) = (self._conv_out_size_same(s_h2, 2), self._conv_out_size_same(s_w2, 2))
(s_h8, s_w8) = (self._conv_out_size_same(s_h4, 2), self._conv_out_size_same(s_w4, 2))
(s_h16, s_w16) = (self._conv_out_size_same(s_h8, 2), self._conv_out_size_same(s_w8, 2))
z_ = nn.linear(z, (((self._input_dim * 8) * s_h16) * s_w16), name='g_lin_resnet_0')
h0 = nn.activation_fn(nn.norm(z_, self._norm), self._activation)
h0 = tf.reshape(h0, [(- 1), s_h16, s_w16, (self._input_dim * 8)])
h1 = nn.deresidual2(h0, [self._batch_size, (s_h8 / 2), (s_w8 / 2), (self._input_dim * 4)], 'g_resnet_1', 3, 1, self._is_train, self._reuse, self._norm, self._activation)
h1 = nn.upsample2(h1, 'NHWC')
h2 = nn.deresidual2(h1, [self._batch_size, (s_h4 / 2), (s_w4 / 2), (self._input_dim * 2)], 'g_resnet_2', 3, 1, self._is_train, self._reuse, self._norm, self._activation)
h2 = nn.upsample2(h2, 'NHWC')
h3 = nn.deresidual2(h2, [self._batch_size, (s_h2 / 2), (s_w2 / 2), self._input_dim], 'g_resnet_3', 3, 1, self._is_train, self._reuse, self._norm, self._activation)
h3 = nn.upsample2(h3, 'NHWC')
h4 = nn.deresidual2(h3, [self._batch_size, (s_h / 2), (s_w / 2), self._output_dim], 'g_resnet_4', 3, 1, self._is_train, self._reuse, None, None)
h4 = nn.upsample2(h4, 'NHWC')
self._reuse = True
self.var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self.name)
return tf.nn.tanh(h4) |
def get_ner_charlm_package(lang, package):
return get_charlm_package(lang, package, ner_charlms, default_charlms) |
def remove_global_identifiers(G, to_track):
found_track = False
for e in G.edges(data=True):
if (e[2]['stmt'] == to_track):
print('Found ', to_track)
found_track = True
e[2]['stmt'] = re.sub(rgx.global_id, '<>', e[2]['stmt'])
if found_track:
if (e[2]['stmt'] == to_track):
print('... remained unchanged by "remove global identifiers"')
else:
print('became', e[2]['stmt'], 'in "remove global identifiers"')
to_track = e[2]['stmt']
found_track = False
return (G, to_track) |
def _get_cell(lines):
line1 = [x for x in lines[0].split()]
if _is_exist_symbols(line1):
symbols = line1
else:
symbols = None
scale = float(lines[1])
lattice = []
for i in range(2, 5):
lattice.append([float(x) for x in lines[i].split()[:3]])
lattice = (np.array(lattice) * scale)
try:
num_atoms = np.array([int(x) for x in lines[5].split()])
line_at = 6
except ValueError:
symbols = [x for x in lines[5].split()]
num_atoms = np.array([int(x) for x in lines[6].split()])
line_at = 7
numbers = _expand_symbols(num_atoms, symbols)
if (lines[line_at][0].lower() == 's'):
line_at += 1
is_cartesian = False
if ((lines[line_at][0].lower() == 'c') or (lines[line_at][0].lower() == 'k')):
is_cartesian = True
line_at += 1
positions = []
for i in range(line_at, (line_at + num_atoms.sum())):
positions.append([float(x) for x in lines[i].split()[:3]])
if is_cartesian:
positions = np.dot(positions, np.linalg.inv(lattice))
return (lattice, positions, numbers) |
def retrieve_field(cls):
import os
(downloaded, cls) = check_downloaded(cls)
file_path = os.path.join(cls.path_raw, cls.filename)
if (cls.stream == 'moda'):
file_path_raw = file_path
else:
file_path_raw = file_path.replace('daily', 'oper')
if (downloaded == True):
print('You have already download the variable')
print('to path: {} \n '.format(file_path))
pass
else:
cls.tmp_folder = os.path.join(cls.path_raw, '{}_{}_{}_tmp'.format(cls.name, cls.stream, cls.grid))
if (os.path.isdir(cls.tmp_folder) == False):
os.makedirs(cls.tmp_folder)
print('You WILL download variable {} \n stream is set to {} \n'.format(cls.name, cls.stream))
print('to path: \n \n {} \n \n'.format(file_path_raw))
if (cls.stream == 'oper'):
for year in cls.years:
target = os.path.join(cls.tmp_folder, '{}_{}.nc'.format(cls.name, year))
if (os.path.isfile(target) == False):
print('Output file: ', target)
retrieval_yr(cls, year, target)
print('convert operational 6hrly data to daily means')
cat = 'cdo -O -b F64 mergetime {}{}*.nc {}'.format(cls.tmp_folder, sep, file_path_raw)
daymean = 'cdo -b 32 daymean {} {}'.format(file_path_raw, file_path)
args = [cat, daymean]
kornshell_with_input(args, cls)
if (cls.stream == 'moda'):
years = [int(yr) for yr in cls.years]
decades = list(set([divmod(i, 10)[0] for i in years]))
decades = [(x * 10) for x in decades]
decades.sort()
print('Decades:', decades)
for d in decades:
requestDates = ''
for y in years:
if ((divmod(y, 10)[0] * 10) == d):
for m in cls.months:
requestDates = (((requestDates + str(y)) + m.zfill(2)) + '01/')
requestDates = requestDates[:(- 1)]
print('Requesting dates: ', requestDates)
target = os.path.join(cls.tmp_folder, '{}_{}.nc'.format(cls.name, year))
if os.path.isfile(target):
print('Output file: ', target)
retrieval_moda(cls, requestDates, d, target)
cat = 'cdo cat {}*.nc {}'.format(cls.tmp_folder, file_path_raw)
args = [cat]
kornshell_with_input(args, cls)
return cls |
def get_by_name(container, name, name_field='name'):
names = [getattr(x, name_field) for x in container]
inds = [i for (i, e) in enumerate(names) if (e == name)]
if (len(inds) > 1):
raise Exception('Found multiple get_by_name matches, undefined behavior')
elif (len(inds) == 0):
return None
else:
ind = inds[0]
return container[ind] |
_LAYERS.register_module()
class TwoIdentity(BaseModule):
def __init__(self, *args, **kwargs):
super(TwoIdentity, self).__init__()
def forward(self, x1, x2):
return (x1, x2) |
def show_status():
if (status_dev in ['net', 'all']):
show_device_status(network_devices, 'Network', if_field=True) |
def make_landmark_head(fpn_num=3, inchannels=64, anchor_num=2):
landmarkhead = nn.ModuleList()
for i in range(fpn_num):
landmarkhead.append(LandmarkHead(inchannels, anchor_num))
return landmarkhead |
def UnitaryDualPolarGraph(m, q):
from sage.libs.gap.libgap import libgap
G = _polar_graph(m, (q ** 2), libgap.GeneralUnitaryGroup(m, q), intersection_size=int((((q ** (2 * ((m // 2) - 1))) - 1) / ((q ** 2) - 1))))
G.relabel()
G.name(('Unitary Dual Polar Graph DU' + str((m, q))))
if (m == 4):
G.name(((G.name() + '; GQ') + str((q, (q ** 2)))))
if (m == 5):
G.name(((G.name() + '; GQ') + str(((q ** 3), (q ** 2)))))
return G |
class FlaxRegNetModel(metaclass=DummyObject):
_backends = ['flax']
def __init__(self, *args, **kwargs):
requires_backends(self, ['flax']) |
(params=['univariate', 'multivariate'])
def arange_graph(request):
shape = ((3, 7, 11) if (request.param == 'multivariate') else (3, 7))
total_elems = np.product(shape)
nodes = IndexedArray((np.arange(total_elems).reshape(shape) / total_elems), index=['a', 'b', 'c'])
edges = pd.DataFrame({'source': ['a', 'b'], 'target': ['b', 'c']})
return StellarGraph(nodes, edges) |
def von_mises_cdf_series(k, x, p):
x = float(x)
s = np.sin(x)
c = np.cos(x)
sn = np.sin((p * x))
cn = np.cos((p * x))
R = 0
V = 0
for n in range((p - 1), 0, (- 1)):
(sn, cn) = (((sn * c) - (cn * s)), ((cn * c) + (sn * s)))
R = (1.0 / (((2 * n) / k) + R))
V = (R * ((sn / n) + V))
return ((0.5 + (x / (2 * np.pi))) + (V / np.pi)) |
def _vggface2_topk_frontal_nonmates(wb, topk):
np.random.seed(42)
n_minibatch = 2
vggface2 = VGGFace2('/proj/janus6/vggface2')
imlist = vipy.util.chunklistbysize([im for im in vggface2.frontalset(n_frontal=n_minibatch)], n_minibatch)
imlist_preprocessed = [torch.cat([wb.net.preprocess(f_detection(im).pil()) for im in iml], dim=0) for iml in imlist]
X = [torch.squeeze(torch.sum(wb.net.encode(imchunk), dim=0)).detach().numpy() for imchunk in imlist_preprocessed]
X = vipy.linalg.row_normalized(np.array(X))
X_subjectid = [imchunk[0].category() for imchunk in imlist]
d_subjectid_to_topk_frontal_nonmates = {}
for (k, d) in enumerate(squareform(pdist(X, metric='euclidean'))):
j_sorted = np.argsort(d)[1:]
d_subjectid_to_topk_frontal_nonmates[X_subjectid[k]] = [X_subjectid[j] for j in j_sorted[0:topk]]
vipy.util.save(d_subjectid_to_topk_frontal_nonmates, '_vggface2_topk_frontal_nonmates.pkl')
return d_subjectid_to_topk_frontal_nonmates |
class ResNetV2(nn.Module):
def __init__(self, block_units, width_factor, head_size=21843, zero_head=False):
super().__init__()
wf = width_factor
self.wf = wf
self.root = nn.Sequential(OrderedDict([('conv', StdConv2d(3, (64 * wf), kernel_size=7, stride=2, padding=3, bias=False)), ('pad', nn.ConstantPad2d(1, 0)), ('pool', nn.MaxPool2d(kernel_size=3, stride=2, padding=0))]))
self.body = nn.Sequential(OrderedDict([('block1', nn.Sequential(OrderedDict(([('unit01', PreActBottleneck(cin=(64 * wf), cout=(256 * wf), cmid=(64 * wf)))] + [(f'unit{i:02d}', PreActBottleneck(cin=(256 * wf), cout=(256 * wf), cmid=(64 * wf))) for i in range(2, (block_units[0] + 1))])))), ('block2', nn.Sequential(OrderedDict(([('unit01', PreActBottleneck(cin=(256 * wf), cout=(512 * wf), cmid=(128 * wf), stride=2))] + [(f'unit{i:02d}', PreActBottleneck(cin=(512 * wf), cout=(512 * wf), cmid=(128 * wf))) for i in range(2, (block_units[1] + 1))])))), ('block3', nn.Sequential(OrderedDict(([('unit01', PreActBottleneck(cin=(512 * wf), cout=(1024 * wf), cmid=(256 * wf), stride=2))] + [(f'unit{i:02d}', PreActBottleneck(cin=(1024 * wf), cout=(1024 * wf), cmid=(256 * wf))) for i in range(2, (block_units[2] + 1))])))), ('block4', nn.Sequential(OrderedDict(([('unit01', PreActBottleneck(cin=(1024 * wf), cout=(2048 * wf), cmid=(512 * wf), stride=2))] + [(f'unit{i:02d}', PreActBottleneck(cin=(2048 * wf), cout=(2048 * wf), cmid=(512 * wf))) for i in range(2, (block_units[3] + 1))]))))]))
self.zero_head = zero_head
self.head = nn.Sequential(OrderedDict([('gn', nn.GroupNorm(32, (2048 * wf))), ('relu', nn.ReLU(inplace=True)), ('avg', nn.AdaptiveAvgPool2d(output_size=1)), ('conv', nn.Conv2d((2048 * wf), head_size, kernel_size=1, bias=True))]))
def forward(self, x):
x = self.head(self.body(self.root(x)))
return x
def load_from(self, weights, prefix='resnet/'):
with torch.no_grad():
self.root.conv.weight.copy_(tf2th(weights[f'{prefix}root_block/standardized_conv2d/kernel']))
self.head.gn.weight.copy_(tf2th(weights[f'{prefix}group_norm/gamma']))
self.head.gn.bias.copy_(tf2th(weights[f'{prefix}group_norm/beta']))
if self.zero_head:
nn.init.zeros_(self.head.conv.weight)
nn.init.zeros_(self.head.conv.bias)
else:
self.head.conv.weight.copy_(tf2th(weights[f'{prefix}head/conv2d/kernel']))
self.head.conv.bias.copy_(tf2th(weights[f'{prefix}head/conv2d/bias']))
for (bname, block) in self.body.named_children():
for (uname, unit) in block.named_children():
unit.load_from(weights, prefix=f'{prefix}{bname}/{uname}/') |
class HalfCheetahDirEnv(HalfCheetahEnvMetaBase):
def __init__(self, task=None):
task = (task or {'direction': 1.0})
self._task = task
self._goal_dir = task['direction']
super().__init__()
def step(self, action):
xposbefore = self.sim.data.qpos[0]
self.do_simulation(action, self.frame_skip)
xposafter = self.sim.data.qpos[0]
forward_vel = ((xposafter - xposbefore) / self.dt)
forward_reward = (self._goal_dir * forward_vel)
ctrl_cost = ((0.5 * 0.1) * np.sum(np.square(action)))
observation = self._get_obs()
reward = (forward_reward - ctrl_cost)
done = False
infos = dict(reward_forward=forward_reward, reward_ctrl=(- ctrl_cost), task=self._task)
return (observation, reward, done, infos)
def sample_tasks(self, num_tasks):
directions = ((2 * self.np_random.binomial(1, p=0.5, size=(num_tasks,))) - 1)
tasks = [{'direction': direction} for direction in directions]
return tasks
def set_task(self, task):
self._task = task
self._goal_dir = task['direction'] |
def my_evaluate(ground_truth_file, prediction_file):
(F1, EM, TOTAL, SKIP) = evaluate(ground_truth_file, prediction_file)
AVG = ((EM + F1) * 0.5)
return (F1, EM, AVG) |
def load_tf_weights_in_big_bird(*args, **kwargs):
requires_backends(load_tf_weights_in_big_bird, ['torch']) |
class Put_Ingredient_Everywhere(BaseScriptPeriod):
def __init__(self, random_put=True, random_ingredient=True, obj=['onion', 'tomato']):
super().__init__(period_name='Put_Ingredient_Everywhere')
self.random_put = random_put
self.random_ingredient = random_ingredient
self.target_obj = (obj if (type(obj) == list) else [obj])
self.__stage = 1
self.__current_period = Pickup_Object(obj=self.target_obj, terrain_type='OT', random_put=self.random_put, random_pos=self.random_ingredient)
def reset(self, mdp, state, player_idx):
self.__stage = 1
self.__current_period = Pickup_Object(obj=self.target_obj, terrain_type='OT', random_put=self.random_put, random_pos=self.random_ingredient)
def step(self, mdp, state, player_idx):
player = state.players[player_idx]
if (self.__stage == 1):
if self.__current_period.done(mdp, state, player_idx):
assert (player.has_object() and (player.get_object().name in self.target_obj))
self.__stage = 2
self.__current_period = Put_Object(terrain_type='X', random_put=True)
else:
return self.__current_period.step(mdp, state, player_idx)
return self.__current_period.step(mdp, state, player_idx)
def done(self, mdp, state, player_idx):
player = state.players[player_idx]
return ((self.__stage == 2) and (not player.has_object())) |
class FBLinkedRelationCache(FBCacheBase):
FILENAME = 'LinkedRelation.bin'
def query_in_out_relation(self, entity):
if (not self.ready):
self.load()
if (entity in self.data):
return self.data[entity]
(in_r, out_r) = get_adjacent_relations(entity)
(in_r, out_r) = (list(in_r), list(out_r))
self.update_count += 1
self.data[entity] = (in_r, out_r)
return (in_r, out_r)
def dataset_specific_prune(self, relations):
if (self.DATASET == 'grail'):
(in_r, out_r) = relations
in_r = [r for r in in_r if legal_relation(r, self.DATASET)]
out_r = [r for r in out_r if legal_relation(r, self.DATASET)]
return (in_r, out_r)
else:
return relations |
def count_work_reduce(node, symbols, state):
result = 0
if (node.wcr is not None):
result += count_arithmetic_ops_code(node.wcr)
in_memlet = None
in_edges = state.in_edges(node)
if ((in_edges is not None) and (len(in_edges) == 1)):
in_memlet = in_edges[0]
if ((in_memlet is not None) and (in_memlet.data.volume is not None)):
result *= in_memlet.data.volume
else:
result = 0
return sp.sympify(result) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.