code stringlengths 281 23.7M |
|---|
def test_base_recognizer():
cls_score = torch.rand(5, 400)
with pytest.raises(KeyError):
wrong_test_cfg = dict(clip='score')
recognizer = ExampleRecognizer(None, wrong_test_cfg)
recognizer.average_clip(cls_score)
with pytest.raises(ValueError):
wrong_test_cfg = dict(average_clips='softmax')
recognizer = ExampleRecognizer(None, wrong_test_cfg)
recognizer.average_clip(cls_score)
with pytest.raises(ValueError):
recognizer = ExampleRecognizer(None, None)
recognizer(torch.tensor(0))
test_cfg = dict(average_clips=None)
recognizer = ExampleRecognizer(None, test_cfg)
score = recognizer.average_clip(cls_score, num_segs=5)
assert torch.equal(score, cls_score)
test_cfg = dict(average_clips='score')
recognizer = ExampleRecognizer(None, test_cfg)
score = recognizer.average_clip(cls_score, num_segs=5)
assert torch.equal(score, cls_score.mean(dim=0, keepdim=True))
test_cfg = dict(average_clips='prob')
recognizer = ExampleRecognizer(None, test_cfg)
score = recognizer.average_clip(cls_score, num_segs=5)
assert torch.equal(score, F.softmax(cls_score, dim=1).mean(dim=0, keepdim=True)) |
class Locker(BaseLocker):
def __init__(self, lock_path: Path) -> None:
self._lock = (lock_path / 'poetry.lock')
self._written_data = None
self._locked = False
self._lock_data = None
self._content_hash = self._get_content_hash()
def written_data(self) -> dict[(str, Any)]:
assert (self._written_data is not None)
return self._written_data
def set_lock_path(self, lock: Path) -> Locker:
self._lock = (lock / 'poetry.lock')
return self
def locked(self, is_locked: bool=True) -> Locker:
self._locked = is_locked
return self
def mock_lock_data(self, data: dict[(str, Any)]) -> None:
self._lock_data = data
def is_locked(self) -> bool:
return self._locked
def is_fresh(self) -> bool:
return True
def _get_content_hash(self) -> str:
return ''
def _write_lock_data(self, data: dict[(str, Any)]) -> None:
for package in data['package']:
python_versions = str(package['python-versions'])
package['python-versions'] = python_versions
self._written_data = json.loads(json.dumps(data))
self._lock_data = data |
def test_points_from_angles():
distance = [1]
elevation = [30]
azimuth = [45]
point1 = points_from_angles(distance=distance, elevation=elevation, azimuth=azimuth, is_degree=True)
assert (point1.shape == (1, 3))
assert (point1.dtype == np.float64)
point2 = points_from_angles(distance=distance, elevation=np.deg2rad(elevation), azimuth=np.deg2rad(azimuth), is_degree=False)
assert (point2.shape == (1, 3))
assert (point2.dtype == np.float64)
np.testing.assert_allclose(point1, point2) |
class FilePathValidator(Validator):
def validate(self, value):
if len(value.text):
if os.path.isfile(value.text):
return True
else:
raise ValidationError(message='File not found', cursor_position=len(value.text))
else:
return True |
class SimpleLayer(caffe.Layer):
def setup(self, bottom, top):
pass
def reshape(self, bottom, top):
top[0].reshape(*bottom[0].data.shape)
def forward(self, bottom, top):
top[0].data[...] = (10 * bottom[0].data)
def backward(self, top, propagate_down, bottom):
bottom[0].diff[...] = (10 * top[0].diff) |
def test_preserve_keys_order(pytester: Pytester) -> None:
from _pytest.cacheprovider import Cache
config = pytester.parseconfig()
cache = Cache.for_config(config, _ispytest=True)
cache.set('foo', {'z': 1, 'b': 2, 'a': 3, 'd': 10})
read_back = cache.get('foo', None)
assert (list(read_back.items()) == [('z', 1), ('b', 2), ('a', 3), ('d', 10)]) |
def testPropEvo():
a = destroy(5)
H = (a.dag() * a)
U = Propagator([H, [(a + a.dag()), 'w*t']], args={'w': 1})
psi = (QobjEvo(U) basis(5, 4))
tlist = np.linspace(0, 1, 6)
psi_expected = sesolve([H, [(a + a.dag()), 'w*t']], basis(5, 4), tlist=tlist, args={'w': 1}).states
for (t, psi_t) in zip(tlist, psi_expected):
assert (abs(psi(t).overlap(psi_t)) > (1 - 1e-06)) |
def main(client, config):
wcs_df = benchmark(read_tables, config=config, compute_result=config['get_read_time'])
f_wcs_df = wcs_df.map_partitions(pre_repartition_task)
f_wcs_df = f_wcs_df.shuffle(on=['wcs_user_sk'])
grouped_df = f_wcs_df.map_partitions(reduction_function, q02_session_timeout_inSec)
items_value_counts = grouped_df.groupby(['i_item_sk']).cnt.sum()
items_value_counts = items_value_counts.map_partitions((lambda ser: ser.sort_values(ascending=False)))
result_df = items_value_counts.reset_index(drop=False)
result_df.columns = ['item_sk_1', 'cnt']
result_df = result_df.head(q02_limit)
result_df['item_sk_2'] = q02_item_sk
result_order = ['item_sk_1', 'item_sk_2', 'cnt']
result_df = result_df[result_order]
return result_df |
def test_lint():
assert_lints('%s', [])
assert_lints('%.1%', ['using % combined with optional specifiers does not make sense'])
assert_lints('%(a)s%s', ['cannot combine specifiers that require a mapping with those that do not'])
assert_lints('%(a)*d', ['cannot combine specifiers that require a mapping with those that do not'])
assert_lints('%(a).*d', ['cannot combine specifiers that require a mapping with those that do not'])
assert_lints('%k', ['invalid conversion specifier in %k'])
assert_lints('%b', ['the %b conversion specifier works only on Python 3 bytes patterns']) |
def evaluate(args, model, tokenizer, prefix=''):
(dataset, examples, features) = load_and_cache_examples(args, tokenizer, evaluate=True, output_examples=True)
if ((not os.path.exists(args.output_dir)) and (args.local_rank in [(- 1), 0])):
os.makedirs(args.output_dir)
args.eval_batch_size = (args.per_gpu_eval_batch_size * max(1, args.n_gpu))
eval_sampler = (SequentialSampler(dataset) if (args.local_rank == (- 1)) else DistributedSampler(dataset))
eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
if (args.n_gpu > 1):
model = torch.nn.DataParallel(model)
logger.info('***** Running evaluation {} *****'.format(prefix))
logger.info(' Num examples = %d', len(dataset))
logger.info(' Batch size = %d', args.eval_batch_size)
all_results = []
start_time = timeit.default_timer()
for batch in tqdm(eval_dataloader, desc='Evaluating'):
model.eval()
batch = tuple((t.to(args.device) for t in batch))
with torch.no_grad():
inputs = {'input_ids': batch[0], 'attention_mask': batch[1]}
if (args.model_type != 'distilbert'):
inputs['token_type_ids'] = (None if (args.model_type == 'xlm') else batch[2])
example_indices = batch[3]
if (args.model_type in ['xlnet', 'xlm']):
inputs.update({'cls_index': batch[4], 'p_mask': batch[5]})
outputs = model(**inputs)
for (i, example_index) in enumerate(example_indices):
eval_feature = features[example_index.item()]
unique_id = int(eval_feature.unique_id)
if (args.model_type in ['xlnet', 'xlm']):
result = RawResultExtended(unique_id=unique_id, start_top_log_probs=to_list(outputs[0][i]), start_top_index=to_list(outputs[1][i]), end_top_log_probs=to_list(outputs[2][i]), end_top_index=to_list(outputs[3][i]), cls_logits=to_list(outputs[4][i]))
else:
result = RawResult(unique_id=unique_id, start_logits=to_list(outputs[0][i]), end_logits=to_list(outputs[1][i]))
all_results.append(result)
evalTime = (timeit.default_timer() - start_time)
logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, (evalTime / len(dataset)))
output_prediction_file = os.path.join(args.output_dir, 'predictions_{}.json'.format(prefix))
output_nbest_file = os.path.join(args.output_dir, 'nbest_predictions_{}.json'.format(prefix))
if args.version_2_with_negative:
output_null_log_odds_file = os.path.join(args.output_dir, 'null_odds_{}.json'.format(prefix))
else:
output_null_log_odds_file = None
if (args.model_type in ['xlnet', 'xlm']):
write_predictions_extended(examples, features, all_results, args.n_best_size, args.max_answer_length, output_prediction_file, output_nbest_file, output_null_log_odds_file, args.predict_file, model.config.start_n_top, model.config.end_n_top, args.version_2_with_negative, tokenizer, args.verbose_logging)
else:
write_predictions(examples, features, all_results, args.n_best_size, args.max_answer_length, args.do_lower_case, output_prediction_file, output_nbest_file, output_null_log_odds_file, args.verbose_logging, args.version_2_with_negative, args.null_score_diff_threshold)
evaluate_options = EVAL_OPTS(data_file=args.predict_file, pred_file=output_prediction_file, na_prob_file=output_null_log_odds_file)
results = evaluate_on_squad(evaluate_options)
return results |
class Processor():
def __init__(self, arg):
self.arg = arg
self.save_arg()
if self.arg.random_fix:
self.rng = RandomState(seed=self.arg.random_seed)
self.device = GpuDataParallel()
self.recoder = Recorder(self.arg.work_dir, self.arg.print_log)
self.data_loader = {}
self.topk = (1, 5)
self.stat = Stat(self.arg.model_args['num_classes'], self.topk)
(self.model, self.optimizer) = self.Loading()
self.loss = self.criterion()
def criterion(self):
loss = nn.CrossEntropyLoss(reduction='none')
return self.device.criterion_to_device(loss)
def train(self, epoch):
self.model.train()
self.recoder.print_log('Training epoch: {}'.format((epoch + 1)))
loader = self.data_loader['train']
loss_value = []
self.recoder.timer_reset()
current_learning_rate = [group['lr'] for group in self.optimizer.optimizer.param_groups]
for (batch_idx, data) in enumerate(loader):
self.recoder.record_timer('dataloader')
image = self.device.data_to_device(data[0])
label = self.device.data_to_device(data[1])
self.recoder.record_timer('device')
output = self.model(image)
self.recoder.record_timer('forward')
loss = torch.mean(self.loss(output, label))
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.recoder.record_timer('backward')
loss_value.append(loss.item())
if ((batch_idx % self.arg.log_interval) == 0):
self.recoder.print_log('\tEpoch: {}, Batch({}/{}) done. Loss: {:.8f} lr:{:.6f}'.format(epoch, batch_idx, len(loader), loss.item(), current_learning_rate[0]))
self.recoder.print_time_statistics()
self.optimizer.scheduler.step()
self.recoder.print_log('\tMean training loss: {:.10f}.'.format(np.mean(loss_value)))
def eval(self, loader_name):
self.model.eval()
for l_name in loader_name:
loader = self.data_loader[l_name]
loss_mean = []
for (batch_idx, data) in enumerate(loader):
image = self.device.data_to_device(data[0])
label = self.device.data_to_device(data[1])
with torch.no_grad():
output = self.model(image)
loss_mean += self.loss(output, label).cpu().detach().numpy().tolist()
self.stat.update_accuracy(output.data.cpu(), label.cpu(), topk=self.topk)
self.recoder.print_log(('mean loss: ' + str(np.mean(loss_mean))))
def Loading(self):
self.device.set_device(self.arg.device)
print('Loading model')
if self.arg.model:
model_class = import_class(self.arg.model)
model = self.device.model_to_device(model_class(**self.arg.model_args))
if self.arg.weights:
try:
print('Loading pretrained model...')
state_dict = torch.load(self.arg.weights)
for w in self.arg.ignore_weights:
if (state_dict.pop(w, None) is not None):
print('Sucessfully Remove Weights: {}.'.format(w))
else:
print('Can Not Remove Weights: {}.'.format(w))
model.load_state_dict(state_dict, strict=True)
optimizer = Optimizer(model, self.arg.optimizer_args)
except RuntimeError:
print('Loading from checkpoint...')
state_dict = torch.load(self.arg.weights)
self.rng.set_rng_state(state_dict['rng_state'])
self.arg.optimizer_args['start_epoch'] = (state_dict['epoch'] + 1)
self.recoder.print_log('Resuming from checkpoint: epoch {}'.format(self.arg.optimizer_args['start_epoch']))
model = self.device.load_weights(model, self.arg.weights, self.arg.ignore_weights)
optimizer = Optimizer(model, self.arg.optimizer_args)
optimizer.optimizer.load_state_dict(state_dict['optimizer_state_dict'])
optimizer.scheduler.load_state_dict(state_dict['scheduler_state_dict'])
else:
optimizer = Optimizer(model, self.arg.optimizer_args)
else:
raise ValueError('No Models.')
print('Loading model finished.')
self.load_data()
return (model, optimizer)
def load_data(self):
print('Loading data')
Feeder = import_class(self.arg.dataloader)
self.data_loader = dict()
if (self.arg.train_loader_args != {}):
self.data_loader['train'] = torch.utils.data.DataLoader(dataset=Feeder(**self.arg.train_loader_args), batch_size=self.arg.batch_size, shuffle=True, drop_last=True, num_workers=self.arg.num_worker)
if (self.arg.valid_loader_args != {}):
self.data_loader['valid'] = torch.utils.data.DataLoader(dataset=Feeder(**self.arg.valid_loader_args), batch_size=self.arg.test_batch_size, shuffle=False, drop_last=False, num_workers=self.arg.num_worker)
if (self.arg.test_loader_args != {}):
test_dataset = Feeder(**self.arg.test_loader_args)
self.stat.test_size = len(test_dataset)
self.data_loader['test'] = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=self.arg.test_batch_size, shuffle=False, drop_last=False, num_workers=self.arg.num_worker)
print('Loading data finished.')
def start(self):
if (self.arg.phase == 'train'):
self.recoder.print_log('Parameters:\n{}\n'.format(str(vars(self.arg))))
for epoch in range(self.arg.optimizer_args['start_epoch'], self.arg.num_epoch):
save_model = ((((epoch + 1) % self.arg.save_interval) == 0) or ((epoch + 1) == self.arg.num_epoch))
eval_model = ((((epoch + 1) % self.arg.eval_interval) == 0) or ((epoch + 1) == self.arg.num_epoch))
self.train(epoch)
if save_model:
model_path = '{}/epoch{}_model.pt'.format(self.arg.work_dir, (epoch + 1))
self.save_model(epoch, self.model, self.optimizer, model_path)
if eval_model:
if (self.arg.valid_loader_args != {}):
self.stat.reset_statistic()
self.eval(loader_name=['valid'])
self.print_inf_log((epoch + 1), 'Valid')
if (self.arg.test_loader_args != {}):
self.stat.reset_statistic()
self.eval(loader_name=['test'])
self.print_inf_log((epoch + 1), 'Test')
elif (self.arg.phase == 'test'):
if (self.arg.weights is None):
raise ValueError('Please appoint --weights.')
self.recoder.print_log('Model: {}.'.format(self.arg.model))
self.recoder.print_log('Weights: {}.'.format(self.arg.weights))
if (self.arg.valid_loader_args != {}):
self.stat.reset_statistic()
self.eval(loader_name=['valid'])
self.print_inf_log(self.arg.optimizer_args['start_epoch'], 'Valid')
if (self.arg.test_loader_args != {}):
self.stat.reset_statistic()
self.eval(loader_name=['test'])
self.print_inf_log(self.arg.optimizer_args['start_epoch'], 'Test')
self.recoder.print_log('Evaluation Done.\n')
def print_inf_log(self, epoch, mode):
static = self.stat.show_accuracy('{}/{}_confusion_mat'.format(self.arg.work_dir, mode))
prec1 = ((static[str(self.topk[0])] / self.stat.test_size) * 100)
prec5 = ((static[str(self.topk[1])] / self.stat.test_size) * 100)
self.recoder.print_log('Epoch {}, {}, Evaluation: prec1 {:.4f}, prec5 {:.4f}'.format(epoch, mode, prec1, prec5), '{}/{}.txt'.format(self.arg.work_dir, self.arg.phase))
def save_model(self, epoch, model, optimizer, save_path):
torch.save({'epoch': epoch, 'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.optimizer.state_dict(), 'scheduler_state_dict': optimizer.scheduler.state_dict(), 'rng_state': self.rng.save_rng_state()}, save_path)
def save_arg(self):
arg_dict = vars(self.arg)
if (not os.path.exists(self.arg.work_dir)):
os.makedirs(self.arg.work_dir)
with open('{}/config.yaml'.format(self.arg.work_dir), 'w') as f:
yaml.dump(arg_dict, f) |
def main(source, output, java, prefix_filter, exclude_filter, jars_list):
reports_dir = 'jacoco_reports_dir'
mkdir_p(reports_dir)
with tarfile.open(source) as tf:
def is_within_directory(directory, target):
abs_directory = os.path.abspath(directory)
abs_target = os.path.abspath(target)
prefix = os.path.commonprefix([abs_directory, abs_target])
return (prefix == abs_directory)
def safe_extract(tar, path='.', members=None, *, numeric_owner=False):
for member in tar.getmembers():
member_path = os.path.join(path, member.name)
if (not is_within_directory(path, member_path)):
raise Exception('Attempted Path Traversal in Tar File')
tar.extractall(path, members, numeric_owner=numeric_owner)
safe_extract(tf, reports_dir)
reports = [os.path.join(reports_dir, fname) for fname in os.listdir(reports_dir)]
with open(jars_list) as f:
jars = f.read().strip().split()
src_dir = 'sources_dir'
cls_dir = 'classes_dir'
mkdir_p(src_dir)
mkdir_p(cls_dir)
agent_disposition = None
for jar in jars:
if jar.endswith('devtools-jacoco-agent.jar'):
agent_disposition = jar
with zipfile.ZipFile(jar) as jf:
for entry in jf.infolist():
if entry.filename.endswith('.java'):
dest = src_dir
elif entry.filename.endswith('.class'):
dest = cls_dir
else:
continue
jf.extract(entry, dest)
if (not agent_disposition):
((print >> sys.stderr), "Can't find jacoco agent. Will not generate html report for java coverage.")
report_dir = 'java.report.temp'
mkdir_p(report_dir)
if agent_disposition:
agent_cmd = [java, '-jar', agent_disposition, src_dir, cls_dir, (prefix_filter or '.'), (exclude_filter or '__no_exclude__'), report_dir]
agent_cmd += reports
subprocess.check_call(agent_cmd)
with tarfile.open(output, 'w') as outf:
outf.add(report_dir, arcname='.') |
class NullSection(Section):
allLines = True
def __init__(self, *args, **kwargs):
Section.__init__(self, *args, **kwargs)
self.sectionOpen = kwargs.get('sectionOpen')
self._args = []
self._body = []
def handleHeader(self, lineno, args):
self._args = args
def handleLine(self, line):
self._body.append(line)
def finalize(self):
body = '\n'.join(self._body)
if body:
s = ('%s\n%s\n%%end' % (' '.join(self._args), body))
else:
s = ('%s\n%%end' % ' '.join(self._args))
self.handler._null_section_strings.append(s)
self._args = []
self._body = [] |
.parametrize('run_at, start, end, idle, with_warning', [('document-start', True, False, False, False), ('document-end', False, True, False, False), ('document-idle', False, False, True, False), ('', False, True, False, False), ('bla', False, True, False, True)])
def test_run_at(gm_manager, run_at, start, end, idle, with_warning, caplog):
script = greasemonkey.GreasemonkeyScript.parse(textwrap.dedent(f'''
// ==UserScript==
// run-at-tester
// -at {run_at}
// ==/UserScript==
return document.readyState;
'''))
if with_warning:
with caplog.at_level(logging.WARNING):
gm_manager.add_script(script)
msg = 'Script run-at-tester has invalid run-at defined, defaulting to document-end'
assert (caplog.messages == [msg])
else:
gm_manager.add_script(script)
assert (gm_manager._run_start == ([script] if start else []))
assert (gm_manager._run_end == ([script] if end else []))
assert (gm_manager._run_idle == ([script] if idle else [])) |
class ComPort(object):
def __init__(self, usb_device, start=True):
self.device = usb_device
self._isFTDI = False
self._rxinterval = 0.005
self._rxqueue = queue.Queue()
self._rxthread = None
self._rxactive = False
self.baudrate = 9600
self.parity = 0
self.stopbits = 1
self.databits = 8
cfg = usb_device.get_active_configuration()
if (self.device.idVendor == 1027):
self._isFTDI = True
log.debug('Configuring as an FTDI device, no cmd itf')
cmd_itfs = None
data_itfs = list(usb.util.find_descriptor(cfg, find_all=True, custom_match=(lambda e: (e.bInterfaceClass == 255))))
data_itf = data_itfs[0]
itf_num = data_itf.bInterfaceNumber
else:
data_itfs = list(usb.util.find_descriptor(cfg, find_all=True, custom_match=(lambda e: (e.bInterfaceClass == 10))))
if (not data_itfs):
print('Unable to connect. No data interfaces on device')
exit()
data_itf = data_itfs[0]
cmd_itfs = list(usb.util.find_descriptor(cfg, find_all=True, custom_match=(lambda e: (e.bInterfaceClass == 2))))
itf_num = cmd_itfs[0].bInterfaceNumber
if (len(cmd_itfs) != len(data_itfs)):
log.debug('COM port data / command interface mismatch')
ports = len(data_itfs)
log.debug('found {0} COM port\n'.format(ports))
try:
self.device.detach_kernel_driver(itf_num)
except usb.USBError:
pass
except NotImplementedError:
pass
self._ep_in = usb.util.find_descriptor(data_itf, custom_match=(lambda e: (e.bEndpointAddress & 128)))
self._ep_out = usb.util.find_descriptor(data_itf, custom_match=(lambda e: (not (e.bEndpointAddress & 128))))
if start:
self._startRx()
def _startRx(self):
if ((self._rxthread is not None) and (self._rxactive or self._rxthread.isAlive())):
return
self._rxactive = True
self._rxthread = threading.Thread(target=self._read)
self._rxthread.daemon = True
self._rxthread.start()
def _endRx(self):
self._rxactive = False
def _read(self):
while self._rxactive:
try:
rv = self._ep_in.read(self._ep_in.wMaxPacketSize)
if self._isFTDI:
status = rv[:2]
if ((status[0] != 1) or (status[1] != 96)):
log.info('USB Status: 0x{0:02X} 0x{1:02X}'.format(*status))
rv = rv[2:]
for rvi in rv:
self._rxqueue.put(rvi)
except usb.USBError as e:
log.warn('USB Error on _read {}'.format(e))
return
time.sleep(self._rxinterval)
def _getRxLen(self):
return self._rxqueue.qsize()
rxlen = property(fget=_getRxLen)
def readBytes(self):
rx = []
while (not self._rxqueue.empty()):
rx.append(self._rxqueue.get())
return rx
def readText(self):
return ''.join((chr(c) for c in self.readBytes()))
def write(self, data):
try:
ret = self._ep_out.write(data)
except usb.USBError as e:
log.warn('USB Error on write {}'.format(e))
return
if (len(data) != ret):
log.error('Bytes written mismatch {0} vs {1}'.format(len(data), ret))
else:
log.debug('{} bytes written to ep'.format(ret))
def setControlLineState(self, RTS=None, DTR=None):
ctrlstate = ((2 if RTS else 0) + (1 if DTR else 0))
if self._isFTDI:
ctrlstate += ((1 << 8) if (DTR is not None) else 0)
ctrlstate += ((2 << 8) if (RTS is not None) else 0)
txdir = 0
req_type = (2 if self._isFTDI else 1)
recipient = (0 if self._isFTDI else 1)
req_type = (((txdir << 7) + (req_type << 5)) + recipient)
wlen = self.device.ctrl_transfer(bmRequestType=req_type, bRequest=(1 if self._isFTDI else CDC_CMDS['SET_CONTROL_LINE_STATE']), wValue=ctrlstate, wIndex=(1 if self._isFTDI else 0), data_or_wLength=0)
log.debug('Linecoding set, {}b sent'.format(wlen))
def setLineCoding(self, baudrate=None, parity=None, databits=None, stopbits=None):
sbits = {1: 0, 1.5: 1, 2: 2}
dbits = {5, 6, 7, 8, 16}
pmodes = {0, 1, 2, 3, 4}
brates = {300, 600, 1200, 2400, 4800, 9600, 14400, 19200, 28800, 38400, 57600, 115200, 230400}
if (stopbits is not None):
if (stopbits not in sbits.keys()):
valid = ', '.join((str(k) for k in sorted(sbits.keys())))
raise ValueError(('Valid stopbits are ' + valid))
self.stopbits = stopbits
if (databits is not None):
if (databits not in dbits):
valid = ', '.join((str(d) for d in sorted(dbits)))
raise ValueError(('Valid databits are ' + valid))
self.databits = databits
if (parity is not None):
if (parity not in pmodes):
valid = ', '.join((str(pm) for pm in sorted(pmodes)))
raise ValueError(('Valid parity modes are ' + valid))
self.parity = parity
if (baudrate is not None):
if (baudrate not in brates.keys()):
brs = sorted(brates.keys())
dif = [abs((br - baudrate)) for br in brs]
best = brs[dif.index(min(dif))]
raise ValueError('Invalid baudrates, nearest valid is {}'.format(best))
self.baudrate = baudrate
if self._isFTDI:
self._setBaudFTDI(self.baudrate)
self._setLineCodeFTDI(bits=self.databits, stopbits=sbits[self.stopbits], parity=self.parity, breaktype=0)
else:
linecode = [(self.baudrate & 255), ((self.baudrate >> 8) & 255), ((self.baudrate >> 16) & 255), ((self.baudrate >> 24) & 255), sbits[self.stopbits], self.parity, self.databits]
txdir = 0
req_type = 1
recipient = 1
req_type = (((txdir << 7) + (req_type << 5)) + recipient)
wlen = self.device.ctrl_transfer(req_type, CDC_CMDS['SET_LINE_CODING'], data_or_wLength=linecode)
log.debug('Linecoding set, {}b sent'.format(wlen))
def _setBaudFTDI(self, baudrate):
if (not self._isFTDI):
return
(actual_baud, value, ndex) = ftdi_to_clkbits(baudrate)
log.debug('Actual baud: {}, Value 0x{:X}, Index {}'.format(actual_baud, value, ndex))
txdir = 0
req_type = 2
recipient = 0
req_type = (((txdir << 7) + (req_type << 5)) + recipient)
self.device.ctrl_transfer(bmRequestType=req_type, bRequest=3, wValue=value, wIndex=ndex, data_or_wLength=0)
log.debug('FTDI Baudrate set to {}'.format(actual_baud))
def _setLineCodeFTDI(self, bits, stopbits, parity, breaktype=0):
if (not self._isFTDI):
return
value = bits
value += (parity << 8)
value += (stopbits << 11)
value += (breaktype << 14)
txdir = 0
req_type = 2
recipient = 0
req_type = (((txdir << 7) + (req_type << 5)) + recipient)
wlen = self.device.ctrl_transfer(bmRequestType=req_type, bRequest=4, wValue=value, wIndex=1, data_or_wLength=0)
return wlen
def _resetFTDI(self):
if (not self._isFTDI):
return
txdir = 0
req_type = 2
recipient = 0
req_type = (((txdir << 7) + (req_type << 5)) + recipient)
self.device.ctrl_transfer(bmRequestType=req_type, bRequest=0, wValue=0, wIndex=1, data_or_wLength=0)
def _flushFTDI(self, rx=True, tx=True):
if (not self._isFTDI):
return
txdir = 0
req_type = (2 if self._isFTDI else 1)
recipient = (0 if self._isFTDI else 1)
req_type = (((txdir << 7) + (req_type << 5)) + recipient)
if rx:
self.device.ctrl_transfer(bmRequestType=req_type, bRequest=0, wValue=1, wIndex=1, data_or_wLength=0)
if tx:
self.device.ctrl_transfer(bmRequestType=req_type, bRequest=0, wValue=2, wIndex=1, data_or_wLength=0)
def getLineCoding(self):
if self._isFTDI:
log.warning('FTDI does not support reading baud parameters')
txdir = 1
req_type = 1
recipient = 1
req_type = (((txdir << 7) + (req_type << 5)) + recipient)
buf = self.device.ctrl_transfer(bmRequestType=req_type, bRequest=CDC_CMDS['GET_LINE_CODING'], wValue=0, wIndex=0, data_or_wLength=255)
self.baudrate = (((buf[0] + (buf[1] << 8)) + (buf[2] << 16)) + (buf[3] << 24))
self.stopbits = (1 + (buf[4] / 2.0))
self.parity = buf[5]
self.databits = buf[6]
print('LINE CODING:')
print(' {0} baud, parity mode {1}'.format(self.baudrate, self.parity))
print(' {0} data bits, {1} stop bits'.format(self.databits, self.stopbits))
def disconnect(self):
self._endRx()
while ((self._rxthread is not None) and self._rxthread.isAlive()):
pass
usb.util.dispose_resources(self.device)
if (self._rxthread is None):
log.debug('Rx thread never existed')
else:
log.debug('Rx thread is {}'.format(('alive' if self._rxthread.isAlive() else 'dead')))
attempt = 1
while (attempt < 10):
try:
self.device.attach_kernel_driver(0)
log.debug('Attach kernal driver on attempt {0}'.format(attempt))
break
except usb.USBError:
attempt += 1
time.sleep(0.1)
if (attempt == 10):
log.error('Could not attach kernal driver') |
def set_initial_resolution(request: WSGIRequest) -> HttpResponse:
(value, response) = extract_value(request.POST)
resolution = tuple(map(int, value.split('x')))
resolution = cast(Tuple[(int, int)], resolution)
storage.put('initial_resolution', resolution)
_notify_settings_changed('adjust_screen')
return response |
def test_multidim_register():
r = Register('my_reg', bitsize=1, shape=(2, 3), side=Side.RIGHT)
idxs = list(r.all_idxs())
assert (len(idxs) == (2 * 3))
assert (not (r.side & Side.LEFT))
assert (r.side & Side.THRU)
assert (r.total_bits() == (2 * 3))
assert (r.adjoint() == Register('my_reg', bitsize=1, shape=(2, 3), side=Side.LEFT)) |
class InteractionTask(AbstractData):
__slots__ = ['iid', 'input', 'structure', 'preset', 'output', 'data', 'title', 'description', 'plugin']
def __init__(self, iid=None, input=None, structure=None, preset=None, output=None, data=None, title=None, description=None, plugin=None):
self.iid = iid
self.input = input
self.structure = structure
self.preset = preset
self.output = output
self.data = data
self.title = title
self.description = description
self.plugin = plugin |
class CSVDIRBundle():
def __init__(self, tframes=None, csvdir=None):
self.tframes = tframes
self.csvdir = csvdir
def ingest(self, environ, asset_db_writer, minute_bar_writer, daily_bar_writer, adjustment_writer, calendar, start_session, end_session, cache, show_progress, output_dir):
csvdir_bundle(environ, asset_db_writer, minute_bar_writer, daily_bar_writer, adjustment_writer, calendar, start_session, end_session, cache, show_progress, output_dir, self.tframes, self.csvdir) |
_register
class MBIDMassager(Massager):
tags = ['musicbrainz_trackid', 'musicbrainz_albumid', 'musicbrainz_artistid', 'musicbrainz_albumartistid', 'musicbrainz_trmid', 'musicip_puid']
error = _('MusicBrainz IDs must be in UUID format.')
def validate(self, value):
value = value.encode('ascii', 'replace').decode('ascii')
value = ''.join(filter(str.isalnum, value.strip().lower()))
try:
int(value, 16)
except ValueError as e:
raise ValidationError from e
else:
if (len(value) != 32):
raise ValidationError
else:
return '-'.join([value[:8], value[8:12], value[12:16], value[16:20], value[20:]]) |
def test_wr_As_wr_At_disjoint():
class Top(ComponentLevel3):
def construct(s):
s.A = Wire(Bits32)
def up_wr_As():
s.A[1:3] = Bits2(2)
def up_wr_At():
s.A[5:7] = Bits2(2)
def up_rd_A():
z = s.A
_test_model(Top) |
class LLTM(nn.Module):
def __init__(self, input_features, state_size):
super(LLTM, self).__init__()
self.input_features = input_features
self.state_size = state_size
self.weights = nn.Parameter(torch.Tensor((3 * state_size), (input_features + state_size)))
self.bias = nn.Parameter(torch.Tensor(1, (3 * state_size)))
self.reset_parameters()
def reset_parameters(self):
stdv = (1.0 / math.sqrt(self.state_size))
for weight in self.parameters():
weight.data.uniform_((- stdv), (+ stdv))
def forward(self, input, state):
return LLTMFunction.apply(input, self.weights, self.bias, *state) |
class resnet_v1_101_fpn_dcn_rcnn_rep_noemb(Symbol):
def __init__(self):
self.shared_param_list = ['offset_p2', 'offset_p3', 'offset_p4', 'offset_p5', 'rpn_conv', 'rpn_cls_score', 'rpn_bbox_pred']
self.shared_param_dict = {}
for name in self.shared_param_list:
self.shared_param_dict[(name + '_weight')] = mx.sym.Variable((name + '_weight'))
self.shared_param_dict[(name + '_bias')] = mx.sym.Variable((name + '_bias'))
self.constants_dict = {}
def get_resnet_backbone(self, data, with_dilated=False, with_dconv=False, with_dpyramid=False, eps=1e-05):
conv1 = mx.symbol.Convolution(name='conv1', data=data, num_filter=64, pad=(3, 3), kernel=(7, 7), stride=(2, 2), no_bias=True)
bn_conv1 = mx.symbol.BatchNorm(name='bn_conv1', data=conv1, use_global_stats=True, fix_gamma=False, eps=eps)
scale_conv1 = bn_conv1
conv1_relu = mx.symbol.Activation(name='conv1_relu', data=scale_conv1, act_type='relu')
pool1 = mx.symbol.Pooling(name='pool1', data=conv1_relu, pooling_convention='full', pad=(0, 0), kernel=(3, 3), stride=(2, 2), pool_type='max')
res2a_branch1 = mx.symbol.Convolution(name='res2a_branch1', data=pool1, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2a_branch1 = mx.symbol.BatchNorm(name='bn2a_branch1', data=res2a_branch1, use_global_stats=True, fix_gamma=False, eps=eps)
scale2a_branch1 = bn2a_branch1
res2a_branch2a = mx.symbol.Convolution(name='res2a_branch2a', data=pool1, num_filter=64, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2a_branch2a = mx.symbol.BatchNorm(name='bn2a_branch2a', data=res2a_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale2a_branch2a = bn2a_branch2a
res2a_branch2a_relu = mx.symbol.Activation(name='res2a_branch2a_relu', data=scale2a_branch2a, act_type='relu')
res2a_branch2b = mx.symbol.Convolution(name='res2a_branch2b', data=res2a_branch2a_relu, num_filter=64, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn2a_branch2b = mx.symbol.BatchNorm(name='bn2a_branch2b', data=res2a_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale2a_branch2b = bn2a_branch2b
res2a_branch2b_relu = mx.symbol.Activation(name='res2a_branch2b_relu', data=scale2a_branch2b, act_type='relu')
res2a_branch2c = mx.symbol.Convolution(name='res2a_branch2c', data=res2a_branch2b_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2a_branch2c = mx.symbol.BatchNorm(name='bn2a_branch2c', data=res2a_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale2a_branch2c = bn2a_branch2c
res2a = mx.symbol.broadcast_add(*[scale2a_branch1, scale2a_branch2c], name='res2a')
res2a_relu = mx.symbol.Activation(name='res2a_relu', data=res2a, act_type='relu')
res2b_branch2a = mx.symbol.Convolution(name='res2b_branch2a', data=res2a_relu, num_filter=64, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2b_branch2a = mx.symbol.BatchNorm(name='bn2b_branch2a', data=res2b_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale2b_branch2a = bn2b_branch2a
res2b_branch2a_relu = mx.symbol.Activation(name='res2b_branch2a_relu', data=scale2b_branch2a, act_type='relu')
res2b_branch2b = mx.symbol.Convolution(name='res2b_branch2b', data=res2b_branch2a_relu, num_filter=64, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn2b_branch2b = mx.symbol.BatchNorm(name='bn2b_branch2b', data=res2b_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale2b_branch2b = bn2b_branch2b
res2b_branch2b_relu = mx.symbol.Activation(name='res2b_branch2b_relu', data=scale2b_branch2b, act_type='relu')
res2b_branch2c = mx.symbol.Convolution(name='res2b_branch2c', data=res2b_branch2b_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2b_branch2c = mx.symbol.BatchNorm(name='bn2b_branch2c', data=res2b_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale2b_branch2c = bn2b_branch2c
res2b = mx.symbol.broadcast_add(*[res2a_relu, scale2b_branch2c], name='res2b')
res2b_relu = mx.symbol.Activation(name='res2b_relu', data=res2b, act_type='relu')
res2c_branch2a = mx.symbol.Convolution(name='res2c_branch2a', data=res2b_relu, num_filter=64, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2c_branch2a = mx.symbol.BatchNorm(name='bn2c_branch2a', data=res2c_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale2c_branch2a = bn2c_branch2a
res2c_branch2a_relu = mx.symbol.Activation(name='res2c_branch2a_relu', data=scale2c_branch2a, act_type='relu')
res2c_branch2b = mx.symbol.Convolution(name='res2c_branch2b', data=res2c_branch2a_relu, num_filter=64, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn2c_branch2b = mx.symbol.BatchNorm(name='bn2c_branch2b', data=res2c_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale2c_branch2b = bn2c_branch2b
res2c_branch2b_relu = mx.symbol.Activation(name='res2c_branch2b_relu', data=scale2c_branch2b, act_type='relu')
res2c_branch2c = mx.symbol.Convolution(name='res2c_branch2c', data=res2c_branch2b_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2c_branch2c = mx.symbol.BatchNorm(name='bn2c_branch2c', data=res2c_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale2c_branch2c = bn2c_branch2c
res2c = mx.symbol.broadcast_add(*[res2b_relu, scale2c_branch2c], name='res2c')
res2c_relu = mx.symbol.Activation(name='res2c_relu', data=res2c, act_type='relu')
res3a_branch1 = mx.symbol.Convolution(name='res3a_branch1', data=res2c_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(2, 2), no_bias=True)
bn3a_branch1 = mx.symbol.BatchNorm(name='bn3a_branch1', data=res3a_branch1, use_global_stats=True, fix_gamma=False, eps=eps)
scale3a_branch1 = bn3a_branch1
res3a_branch2a = mx.symbol.Convolution(name='res3a_branch2a', data=res2c_relu, num_filter=128, pad=(0, 0), kernel=(1, 1), stride=(2, 2), no_bias=True)
bn3a_branch2a = mx.symbol.BatchNorm(name='bn3a_branch2a', data=res3a_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale3a_branch2a = bn3a_branch2a
res3a_branch2a_relu = mx.symbol.Activation(name='res3a_branch2a_relu', data=scale3a_branch2a, act_type='relu')
res3a_branch2b = mx.symbol.Convolution(name='res3a_branch2b', data=res3a_branch2a_relu, num_filter=128, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn3a_branch2b = mx.symbol.BatchNorm(name='bn3a_branch2b', data=res3a_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale3a_branch2b = bn3a_branch2b
res3a_branch2b_relu = mx.symbol.Activation(name='res3a_branch2b_relu', data=scale3a_branch2b, act_type='relu')
res3a_branch2c = mx.symbol.Convolution(name='res3a_branch2c', data=res3a_branch2b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3a_branch2c = mx.symbol.BatchNorm(name='bn3a_branch2c', data=res3a_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale3a_branch2c = bn3a_branch2c
res3a = mx.symbol.broadcast_add(*[scale3a_branch1, scale3a_branch2c], name='res3a')
res3a_relu = mx.symbol.Activation(name='res3a_relu', data=res3a, act_type='relu')
res3b1_branch2a = mx.symbol.Convolution(name='res3b1_branch2a', data=res3a_relu, num_filter=128, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b1_branch2a = mx.symbol.BatchNorm(name='bn3b1_branch2a', data=res3b1_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale3b1_branch2a = bn3b1_branch2a
res3b1_branch2a_relu = mx.symbol.Activation(name='res3b1_branch2a_relu', data=scale3b1_branch2a, act_type='relu')
res3b1_branch2b = mx.symbol.Convolution(name='res3b1_branch2b', data=res3b1_branch2a_relu, num_filter=128, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn3b1_branch2b = mx.symbol.BatchNorm(name='bn3b1_branch2b', data=res3b1_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale3b1_branch2b = bn3b1_branch2b
res3b1_branch2b_relu = mx.symbol.Activation(name='res3b1_branch2b_relu', data=scale3b1_branch2b, act_type='relu')
res3b1_branch2c = mx.symbol.Convolution(name='res3b1_branch2c', data=res3b1_branch2b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b1_branch2c = mx.symbol.BatchNorm(name='bn3b1_branch2c', data=res3b1_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale3b1_branch2c = bn3b1_branch2c
res3b1 = mx.symbol.broadcast_add(*[res3a_relu, scale3b1_branch2c], name='res3b1')
res3b1_relu = mx.symbol.Activation(name='res3b1_relu', data=res3b1, act_type='relu')
res3b2_branch2a = mx.symbol.Convolution(name='res3b2_branch2a', data=res3b1_relu, num_filter=128, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b2_branch2a = mx.symbol.BatchNorm(name='bn3b2_branch2a', data=res3b2_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale3b2_branch2a = bn3b2_branch2a
res3b2_branch2a_relu = mx.symbol.Activation(name='res3b2_branch2a_relu', data=scale3b2_branch2a, act_type='relu')
res3b2_branch2b = mx.symbol.Convolution(name='res3b2_branch2b', data=res3b2_branch2a_relu, num_filter=128, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn3b2_branch2b = mx.symbol.BatchNorm(name='bn3b2_branch2b', data=res3b2_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale3b2_branch2b = bn3b2_branch2b
res3b2_branch2b_relu = mx.symbol.Activation(name='res3b2_branch2b_relu', data=scale3b2_branch2b, act_type='relu')
res3b2_branch2c = mx.symbol.Convolution(name='res3b2_branch2c', data=res3b2_branch2b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b2_branch2c = mx.symbol.BatchNorm(name='bn3b2_branch2c', data=res3b2_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale3b2_branch2c = bn3b2_branch2c
res3b2 = mx.symbol.broadcast_add(*[res3b1_relu, scale3b2_branch2c], name='res3b2')
res3b2_relu = mx.symbol.Activation(name='res3b2_relu', data=res3b2, act_type='relu')
res3b3_branch2a = mx.symbol.Convolution(name='res3b3_branch2a', data=res3b2_relu, num_filter=128, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b3_branch2a = mx.symbol.BatchNorm(name='bn3b3_branch2a', data=res3b3_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale3b3_branch2a = bn3b3_branch2a
res3b3_branch2a_relu = mx.symbol.Activation(name='res3b3_branch2a_relu', data=scale3b3_branch2a, act_type='relu')
if with_dpyramid:
res3b3_branch2b_offset = mx.symbol.Convolution(name='res3b3_branch2b_offset', data=res3b3_branch2a_relu, num_filter=72, pad=(1, 1), kernel=(3, 3), stride=(1, 1))
res3b3_branch2b = mx.contrib.symbol.DeformableConvolution(name='res3b3_branch2b', data=res3b3_branch2a_relu, offset=res3b3_branch2b_offset, num_filter=128, pad=(1, 1), kernel=(3, 3), num_deformable_group=4, stride=(1, 1), no_bias=True)
else:
res3b3_branch2b = mx.symbol.Convolution(name='res3b3_branch2b', data=res3b3_branch2a_relu, num_filter=128, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn3b3_branch2b = mx.symbol.BatchNorm(name='bn3b3_branch2b', data=res3b3_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale3b3_branch2b = bn3b3_branch2b
res3b3_branch2b_relu = mx.symbol.Activation(name='res3b3_branch2b_relu', data=scale3b3_branch2b, act_type='relu')
res3b3_branch2c = mx.symbol.Convolution(name='res3b3_branch2c', data=res3b3_branch2b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b3_branch2c = mx.symbol.BatchNorm(name='bn3b3_branch2c', data=res3b3_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale3b3_branch2c = bn3b3_branch2c
res3b3 = mx.symbol.broadcast_add(*[res3b2_relu, scale3b3_branch2c], name='res3b3')
res3b3_relu = mx.symbol.Activation(name='res3b3_relu', data=res3b3, act_type='relu')
res4a_branch1 = mx.symbol.Convolution(name='res4a_branch1', data=res3b3_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(2, 2), no_bias=True)
bn4a_branch1 = mx.symbol.BatchNorm(name='bn4a_branch1', data=res4a_branch1, use_global_stats=True, fix_gamma=False, eps=eps)
scale4a_branch1 = bn4a_branch1
res4a_branch2a = mx.symbol.Convolution(name='res4a_branch2a', data=res3b3_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(2, 2), no_bias=True)
bn4a_branch2a = mx.symbol.BatchNorm(name='bn4a_branch2a', data=res4a_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4a_branch2a = bn4a_branch2a
res4a_branch2a_relu = mx.symbol.Activation(name='res4a_branch2a_relu', data=scale4a_branch2a, act_type='relu')
res4a_branch2b = mx.symbol.Convolution(name='res4a_branch2b', data=res4a_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4a_branch2b = mx.symbol.BatchNorm(name='bn4a_branch2b', data=res4a_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4a_branch2b = bn4a_branch2b
res4a_branch2b_relu = mx.symbol.Activation(name='res4a_branch2b_relu', data=scale4a_branch2b, act_type='relu')
res4a_branch2c = mx.symbol.Convolution(name='res4a_branch2c', data=res4a_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4a_branch2c = mx.symbol.BatchNorm(name='bn4a_branch2c', data=res4a_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4a_branch2c = bn4a_branch2c
res4a = mx.symbol.broadcast_add(*[scale4a_branch1, scale4a_branch2c], name='res4a')
res4a_relu = mx.symbol.Activation(name='res4a_relu', data=res4a, act_type='relu')
res4b1_branch2a = mx.symbol.Convolution(name='res4b1_branch2a', data=res4a_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b1_branch2a = mx.symbol.BatchNorm(name='bn4b1_branch2a', data=res4b1_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b1_branch2a = bn4b1_branch2a
res4b1_branch2a_relu = mx.symbol.Activation(name='res4b1_branch2a_relu', data=scale4b1_branch2a, act_type='relu')
res4b1_branch2b = mx.symbol.Convolution(name='res4b1_branch2b', data=res4b1_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b1_branch2b = mx.symbol.BatchNorm(name='bn4b1_branch2b', data=res4b1_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b1_branch2b = bn4b1_branch2b
res4b1_branch2b_relu = mx.symbol.Activation(name='res4b1_branch2b_relu', data=scale4b1_branch2b, act_type='relu')
res4b1_branch2c = mx.symbol.Convolution(name='res4b1_branch2c', data=res4b1_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b1_branch2c = mx.symbol.BatchNorm(name='bn4b1_branch2c', data=res4b1_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b1_branch2c = bn4b1_branch2c
res4b1 = mx.symbol.broadcast_add(*[res4a_relu, scale4b1_branch2c], name='res4b1')
res4b1_relu = mx.symbol.Activation(name='res4b1_relu', data=res4b1, act_type='relu')
res4b2_branch2a = mx.symbol.Convolution(name='res4b2_branch2a', data=res4b1_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b2_branch2a = mx.symbol.BatchNorm(name='bn4b2_branch2a', data=res4b2_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b2_branch2a = bn4b2_branch2a
res4b2_branch2a_relu = mx.symbol.Activation(name='res4b2_branch2a_relu', data=scale4b2_branch2a, act_type='relu')
res4b2_branch2b = mx.symbol.Convolution(name='res4b2_branch2b', data=res4b2_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b2_branch2b = mx.symbol.BatchNorm(name='bn4b2_branch2b', data=res4b2_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b2_branch2b = bn4b2_branch2b
res4b2_branch2b_relu = mx.symbol.Activation(name='res4b2_branch2b_relu', data=scale4b2_branch2b, act_type='relu')
res4b2_branch2c = mx.symbol.Convolution(name='res4b2_branch2c', data=res4b2_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b2_branch2c = mx.symbol.BatchNorm(name='bn4b2_branch2c', data=res4b2_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b2_branch2c = bn4b2_branch2c
res4b2 = mx.symbol.broadcast_add(*[res4b1_relu, scale4b2_branch2c], name='res4b2')
res4b2_relu = mx.symbol.Activation(name='res4b2_relu', data=res4b2, act_type='relu')
res4b3_branch2a = mx.symbol.Convolution(name='res4b3_branch2a', data=res4b2_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b3_branch2a = mx.symbol.BatchNorm(name='bn4b3_branch2a', data=res4b3_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b3_branch2a = bn4b3_branch2a
res4b3_branch2a_relu = mx.symbol.Activation(name='res4b3_branch2a_relu', data=scale4b3_branch2a, act_type='relu')
res4b3_branch2b = mx.symbol.Convolution(name='res4b3_branch2b', data=res4b3_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b3_branch2b = mx.symbol.BatchNorm(name='bn4b3_branch2b', data=res4b3_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b3_branch2b = bn4b3_branch2b
res4b3_branch2b_relu = mx.symbol.Activation(name='res4b3_branch2b_relu', data=scale4b3_branch2b, act_type='relu')
res4b3_branch2c = mx.symbol.Convolution(name='res4b3_branch2c', data=res4b3_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b3_branch2c = mx.symbol.BatchNorm(name='bn4b3_branch2c', data=res4b3_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b3_branch2c = bn4b3_branch2c
res4b3 = mx.symbol.broadcast_add(*[res4b2_relu, scale4b3_branch2c], name='res4b3')
res4b3_relu = mx.symbol.Activation(name='res4b3_relu', data=res4b3, act_type='relu')
res4b4_branch2a = mx.symbol.Convolution(name='res4b4_branch2a', data=res4b3_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b4_branch2a = mx.symbol.BatchNorm(name='bn4b4_branch2a', data=res4b4_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b4_branch2a = bn4b4_branch2a
res4b4_branch2a_relu = mx.symbol.Activation(name='res4b4_branch2a_relu', data=scale4b4_branch2a, act_type='relu')
res4b4_branch2b = mx.symbol.Convolution(name='res4b4_branch2b', data=res4b4_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b4_branch2b = mx.symbol.BatchNorm(name='bn4b4_branch2b', data=res4b4_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b4_branch2b = bn4b4_branch2b
res4b4_branch2b_relu = mx.symbol.Activation(name='res4b4_branch2b_relu', data=scale4b4_branch2b, act_type='relu')
res4b4_branch2c = mx.symbol.Convolution(name='res4b4_branch2c', data=res4b4_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b4_branch2c = mx.symbol.BatchNorm(name='bn4b4_branch2c', data=res4b4_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b4_branch2c = bn4b4_branch2c
res4b4 = mx.symbol.broadcast_add(*[res4b3_relu, scale4b4_branch2c], name='res4b4')
res4b4_relu = mx.symbol.Activation(name='res4b4_relu', data=res4b4, act_type='relu')
res4b5_branch2a = mx.symbol.Convolution(name='res4b5_branch2a', data=res4b4_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b5_branch2a = mx.symbol.BatchNorm(name='bn4b5_branch2a', data=res4b5_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b5_branch2a = bn4b5_branch2a
res4b5_branch2a_relu = mx.symbol.Activation(name='res4b5_branch2a_relu', data=scale4b5_branch2a, act_type='relu')
res4b5_branch2b = mx.symbol.Convolution(name='res4b5_branch2b', data=res4b5_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b5_branch2b = mx.symbol.BatchNorm(name='bn4b5_branch2b', data=res4b5_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b5_branch2b = bn4b5_branch2b
res4b5_branch2b_relu = mx.symbol.Activation(name='res4b5_branch2b_relu', data=scale4b5_branch2b, act_type='relu')
res4b5_branch2c = mx.symbol.Convolution(name='res4b5_branch2c', data=res4b5_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b5_branch2c = mx.symbol.BatchNorm(name='bn4b5_branch2c', data=res4b5_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b5_branch2c = bn4b5_branch2c
res4b5 = mx.symbol.broadcast_add(*[res4b4_relu, scale4b5_branch2c], name='res4b5')
res4b5_relu = mx.symbol.Activation(name='res4b5_relu', data=res4b5, act_type='relu')
res4b6_branch2a = mx.symbol.Convolution(name='res4b6_branch2a', data=res4b5_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b6_branch2a = mx.symbol.BatchNorm(name='bn4b6_branch2a', data=res4b6_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b6_branch2a = bn4b6_branch2a
res4b6_branch2a_relu = mx.symbol.Activation(name='res4b6_branch2a_relu', data=scale4b6_branch2a, act_type='relu')
res4b6_branch2b = mx.symbol.Convolution(name='res4b6_branch2b', data=res4b6_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b6_branch2b = mx.symbol.BatchNorm(name='bn4b6_branch2b', data=res4b6_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b6_branch2b = bn4b6_branch2b
res4b6_branch2b_relu = mx.symbol.Activation(name='res4b6_branch2b_relu', data=scale4b6_branch2b, act_type='relu')
res4b6_branch2c = mx.symbol.Convolution(name='res4b6_branch2c', data=res4b6_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b6_branch2c = mx.symbol.BatchNorm(name='bn4b6_branch2c', data=res4b6_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b6_branch2c = bn4b6_branch2c
res4b6 = mx.symbol.broadcast_add(*[res4b5_relu, scale4b6_branch2c], name='res4b6')
res4b6_relu = mx.symbol.Activation(name='res4b6_relu', data=res4b6, act_type='relu')
res4b7_branch2a = mx.symbol.Convolution(name='res4b7_branch2a', data=res4b6_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b7_branch2a = mx.symbol.BatchNorm(name='bn4b7_branch2a', data=res4b7_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b7_branch2a = bn4b7_branch2a
res4b7_branch2a_relu = mx.symbol.Activation(name='res4b7_branch2a_relu', data=scale4b7_branch2a, act_type='relu')
res4b7_branch2b = mx.symbol.Convolution(name='res4b7_branch2b', data=res4b7_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b7_branch2b = mx.symbol.BatchNorm(name='bn4b7_branch2b', data=res4b7_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b7_branch2b = bn4b7_branch2b
res4b7_branch2b_relu = mx.symbol.Activation(name='res4b7_branch2b_relu', data=scale4b7_branch2b, act_type='relu')
res4b7_branch2c = mx.symbol.Convolution(name='res4b7_branch2c', data=res4b7_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b7_branch2c = mx.symbol.BatchNorm(name='bn4b7_branch2c', data=res4b7_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b7_branch2c = bn4b7_branch2c
res4b7 = mx.symbol.broadcast_add(*[res4b6_relu, scale4b7_branch2c], name='res4b7')
res4b7_relu = mx.symbol.Activation(name='res4b7_relu', data=res4b7, act_type='relu')
res4b8_branch2a = mx.symbol.Convolution(name='res4b8_branch2a', data=res4b7_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b8_branch2a = mx.symbol.BatchNorm(name='bn4b8_branch2a', data=res4b8_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b8_branch2a = bn4b8_branch2a
res4b8_branch2a_relu = mx.symbol.Activation(name='res4b8_branch2a_relu', data=scale4b8_branch2a, act_type='relu')
res4b8_branch2b = mx.symbol.Convolution(name='res4b8_branch2b', data=res4b8_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b8_branch2b = mx.symbol.BatchNorm(name='bn4b8_branch2b', data=res4b8_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b8_branch2b = bn4b8_branch2b
res4b8_branch2b_relu = mx.symbol.Activation(name='res4b8_branch2b_relu', data=scale4b8_branch2b, act_type='relu')
res4b8_branch2c = mx.symbol.Convolution(name='res4b8_branch2c', data=res4b8_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b8_branch2c = mx.symbol.BatchNorm(name='bn4b8_branch2c', data=res4b8_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b8_branch2c = bn4b8_branch2c
res4b8 = mx.symbol.broadcast_add(*[res4b7_relu, scale4b8_branch2c], name='res4b8')
res4b8_relu = mx.symbol.Activation(name='res4b8_relu', data=res4b8, act_type='relu')
res4b9_branch2a = mx.symbol.Convolution(name='res4b9_branch2a', data=res4b8_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b9_branch2a = mx.symbol.BatchNorm(name='bn4b9_branch2a', data=res4b9_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b9_branch2a = bn4b9_branch2a
res4b9_branch2a_relu = mx.symbol.Activation(name='res4b9_branch2a_relu', data=scale4b9_branch2a, act_type='relu')
res4b9_branch2b = mx.symbol.Convolution(name='res4b9_branch2b', data=res4b9_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b9_branch2b = mx.symbol.BatchNorm(name='bn4b9_branch2b', data=res4b9_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b9_branch2b = bn4b9_branch2b
res4b9_branch2b_relu = mx.symbol.Activation(name='res4b9_branch2b_relu', data=scale4b9_branch2b, act_type='relu')
res4b9_branch2c = mx.symbol.Convolution(name='res4b9_branch2c', data=res4b9_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b9_branch2c = mx.symbol.BatchNorm(name='bn4b9_branch2c', data=res4b9_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b9_branch2c = bn4b9_branch2c
res4b9 = mx.symbol.broadcast_add(*[res4b8_relu, scale4b9_branch2c], name='res4b9')
res4b9_relu = mx.symbol.Activation(name='res4b9_relu', data=res4b9, act_type='relu')
res4b10_branch2a = mx.symbol.Convolution(name='res4b10_branch2a', data=res4b9_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b10_branch2a = mx.symbol.BatchNorm(name='bn4b10_branch2a', data=res4b10_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b10_branch2a = bn4b10_branch2a
res4b10_branch2a_relu = mx.symbol.Activation(name='res4b10_branch2a_relu', data=scale4b10_branch2a, act_type='relu')
res4b10_branch2b = mx.symbol.Convolution(name='res4b10_branch2b', data=res4b10_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b10_branch2b = mx.symbol.BatchNorm(name='bn4b10_branch2b', data=res4b10_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b10_branch2b = bn4b10_branch2b
res4b10_branch2b_relu = mx.symbol.Activation(name='res4b10_branch2b_relu', data=scale4b10_branch2b, act_type='relu')
res4b10_branch2c = mx.symbol.Convolution(name='res4b10_branch2c', data=res4b10_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b10_branch2c = mx.symbol.BatchNorm(name='bn4b10_branch2c', data=res4b10_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b10_branch2c = bn4b10_branch2c
res4b10 = mx.symbol.broadcast_add(*[res4b9_relu, scale4b10_branch2c], name='res4b10')
res4b10_relu = mx.symbol.Activation(name='res4b10_relu', data=res4b10, act_type='relu')
res4b11_branch2a = mx.symbol.Convolution(name='res4b11_branch2a', data=res4b10_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b11_branch2a = mx.symbol.BatchNorm(name='bn4b11_branch2a', data=res4b11_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b11_branch2a = bn4b11_branch2a
res4b11_branch2a_relu = mx.symbol.Activation(name='res4b11_branch2a_relu', data=scale4b11_branch2a, act_type='relu')
res4b11_branch2b = mx.symbol.Convolution(name='res4b11_branch2b', data=res4b11_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b11_branch2b = mx.symbol.BatchNorm(name='bn4b11_branch2b', data=res4b11_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b11_branch2b = bn4b11_branch2b
res4b11_branch2b_relu = mx.symbol.Activation(name='res4b11_branch2b_relu', data=scale4b11_branch2b, act_type='relu')
res4b11_branch2c = mx.symbol.Convolution(name='res4b11_branch2c', data=res4b11_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b11_branch2c = mx.symbol.BatchNorm(name='bn4b11_branch2c', data=res4b11_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b11_branch2c = bn4b11_branch2c
res4b11 = mx.symbol.broadcast_add(*[res4b10_relu, scale4b11_branch2c], name='res4b11')
res4b11_relu = mx.symbol.Activation(name='res4b11_relu', data=res4b11, act_type='relu')
res4b12_branch2a = mx.symbol.Convolution(name='res4b12_branch2a', data=res4b11_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b12_branch2a = mx.symbol.BatchNorm(name='bn4b12_branch2a', data=res4b12_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b12_branch2a = bn4b12_branch2a
res4b12_branch2a_relu = mx.symbol.Activation(name='res4b12_branch2a_relu', data=scale4b12_branch2a, act_type='relu')
res4b12_branch2b = mx.symbol.Convolution(name='res4b12_branch2b', data=res4b12_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b12_branch2b = mx.symbol.BatchNorm(name='bn4b12_branch2b', data=res4b12_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b12_branch2b = bn4b12_branch2b
res4b12_branch2b_relu = mx.symbol.Activation(name='res4b12_branch2b_relu', data=scale4b12_branch2b, act_type='relu')
res4b12_branch2c = mx.symbol.Convolution(name='res4b12_branch2c', data=res4b12_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b12_branch2c = mx.symbol.BatchNorm(name='bn4b12_branch2c', data=res4b12_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b12_branch2c = bn4b12_branch2c
res4b12 = mx.symbol.broadcast_add(*[res4b11_relu, scale4b12_branch2c], name='res4b12')
res4b12_relu = mx.symbol.Activation(name='res4b12_relu', data=res4b12, act_type='relu')
res4b13_branch2a = mx.symbol.Convolution(name='res4b13_branch2a', data=res4b12_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b13_branch2a = mx.symbol.BatchNorm(name='bn4b13_branch2a', data=res4b13_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b13_branch2a = bn4b13_branch2a
res4b13_branch2a_relu = mx.symbol.Activation(name='res4b13_branch2a_relu', data=scale4b13_branch2a, act_type='relu')
res4b13_branch2b = mx.symbol.Convolution(name='res4b13_branch2b', data=res4b13_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b13_branch2b = mx.symbol.BatchNorm(name='bn4b13_branch2b', data=res4b13_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b13_branch2b = bn4b13_branch2b
res4b13_branch2b_relu = mx.symbol.Activation(name='res4b13_branch2b_relu', data=scale4b13_branch2b, act_type='relu')
res4b13_branch2c = mx.symbol.Convolution(name='res4b13_branch2c', data=res4b13_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b13_branch2c = mx.symbol.BatchNorm(name='bn4b13_branch2c', data=res4b13_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b13_branch2c = bn4b13_branch2c
res4b13 = mx.symbol.broadcast_add(*[res4b12_relu, scale4b13_branch2c], name='res4b13')
res4b13_relu = mx.symbol.Activation(name='res4b13_relu', data=res4b13, act_type='relu')
res4b14_branch2a = mx.symbol.Convolution(name='res4b14_branch2a', data=res4b13_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b14_branch2a = mx.symbol.BatchNorm(name='bn4b14_branch2a', data=res4b14_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b14_branch2a = bn4b14_branch2a
res4b14_branch2a_relu = mx.symbol.Activation(name='res4b14_branch2a_relu', data=scale4b14_branch2a, act_type='relu')
res4b14_branch2b = mx.symbol.Convolution(name='res4b14_branch2b', data=res4b14_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b14_branch2b = mx.symbol.BatchNorm(name='bn4b14_branch2b', data=res4b14_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b14_branch2b = bn4b14_branch2b
res4b14_branch2b_relu = mx.symbol.Activation(name='res4b14_branch2b_relu', data=scale4b14_branch2b, act_type='relu')
res4b14_branch2c = mx.symbol.Convolution(name='res4b14_branch2c', data=res4b14_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b14_branch2c = mx.symbol.BatchNorm(name='bn4b14_branch2c', data=res4b14_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b14_branch2c = bn4b14_branch2c
res4b14 = mx.symbol.broadcast_add(*[res4b13_relu, scale4b14_branch2c], name='res4b14')
res4b14_relu = mx.symbol.Activation(name='res4b14_relu', data=res4b14, act_type='relu')
res4b15_branch2a = mx.symbol.Convolution(name='res4b15_branch2a', data=res4b14_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b15_branch2a = mx.symbol.BatchNorm(name='bn4b15_branch2a', data=res4b15_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b15_branch2a = bn4b15_branch2a
res4b15_branch2a_relu = mx.symbol.Activation(name='res4b15_branch2a_relu', data=scale4b15_branch2a, act_type='relu')
res4b15_branch2b = mx.symbol.Convolution(name='res4b15_branch2b', data=res4b15_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b15_branch2b = mx.symbol.BatchNorm(name='bn4b15_branch2b', data=res4b15_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b15_branch2b = bn4b15_branch2b
res4b15_branch2b_relu = mx.symbol.Activation(name='res4b15_branch2b_relu', data=scale4b15_branch2b, act_type='relu')
res4b15_branch2c = mx.symbol.Convolution(name='res4b15_branch2c', data=res4b15_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b15_branch2c = mx.symbol.BatchNorm(name='bn4b15_branch2c', data=res4b15_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b15_branch2c = bn4b15_branch2c
res4b15 = mx.symbol.broadcast_add(*[res4b14_relu, scale4b15_branch2c], name='res4b15')
res4b15_relu = mx.symbol.Activation(name='res4b15_relu', data=res4b15, act_type='relu')
res4b16_branch2a = mx.symbol.Convolution(name='res4b16_branch2a', data=res4b15_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b16_branch2a = mx.symbol.BatchNorm(name='bn4b16_branch2a', data=res4b16_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b16_branch2a = bn4b16_branch2a
res4b16_branch2a_relu = mx.symbol.Activation(name='res4b16_branch2a_relu', data=scale4b16_branch2a, act_type='relu')
res4b16_branch2b = mx.symbol.Convolution(name='res4b16_branch2b', data=res4b16_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b16_branch2b = mx.symbol.BatchNorm(name='bn4b16_branch2b', data=res4b16_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b16_branch2b = bn4b16_branch2b
res4b16_branch2b_relu = mx.symbol.Activation(name='res4b16_branch2b_relu', data=scale4b16_branch2b, act_type='relu')
res4b16_branch2c = mx.symbol.Convolution(name='res4b16_branch2c', data=res4b16_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b16_branch2c = mx.symbol.BatchNorm(name='bn4b16_branch2c', data=res4b16_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b16_branch2c = bn4b16_branch2c
res4b16 = mx.symbol.broadcast_add(*[res4b15_relu, scale4b16_branch2c], name='res4b16')
res4b16_relu = mx.symbol.Activation(name='res4b16_relu', data=res4b16, act_type='relu')
res4b17_branch2a = mx.symbol.Convolution(name='res4b17_branch2a', data=res4b16_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b17_branch2a = mx.symbol.BatchNorm(name='bn4b17_branch2a', data=res4b17_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b17_branch2a = bn4b17_branch2a
res4b17_branch2a_relu = mx.symbol.Activation(name='res4b17_branch2a_relu', data=scale4b17_branch2a, act_type='relu')
res4b17_branch2b = mx.symbol.Convolution(name='res4b17_branch2b', data=res4b17_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b17_branch2b = mx.symbol.BatchNorm(name='bn4b17_branch2b', data=res4b17_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b17_branch2b = bn4b17_branch2b
res4b17_branch2b_relu = mx.symbol.Activation(name='res4b17_branch2b_relu', data=scale4b17_branch2b, act_type='relu')
res4b17_branch2c = mx.symbol.Convolution(name='res4b17_branch2c', data=res4b17_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b17_branch2c = mx.symbol.BatchNorm(name='bn4b17_branch2c', data=res4b17_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b17_branch2c = bn4b17_branch2c
res4b17 = mx.symbol.broadcast_add(*[res4b16_relu, scale4b17_branch2c], name='res4b17')
res4b17_relu = mx.symbol.Activation(name='res4b17_relu', data=res4b17, act_type='relu')
res4b18_branch2a = mx.symbol.Convolution(name='res4b18_branch2a', data=res4b17_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b18_branch2a = mx.symbol.BatchNorm(name='bn4b18_branch2a', data=res4b18_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b18_branch2a = bn4b18_branch2a
res4b18_branch2a_relu = mx.symbol.Activation(name='res4b18_branch2a_relu', data=scale4b18_branch2a, act_type='relu')
res4b18_branch2b = mx.symbol.Convolution(name='res4b18_branch2b', data=res4b18_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b18_branch2b = mx.symbol.BatchNorm(name='bn4b18_branch2b', data=res4b18_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b18_branch2b = bn4b18_branch2b
res4b18_branch2b_relu = mx.symbol.Activation(name='res4b18_branch2b_relu', data=scale4b18_branch2b, act_type='relu')
res4b18_branch2c = mx.symbol.Convolution(name='res4b18_branch2c', data=res4b18_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b18_branch2c = mx.symbol.BatchNorm(name='bn4b18_branch2c', data=res4b18_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b18_branch2c = bn4b18_branch2c
res4b18 = mx.symbol.broadcast_add(*[res4b17_relu, scale4b18_branch2c], name='res4b18')
res4b18_relu = mx.symbol.Activation(name='res4b18_relu', data=res4b18, act_type='relu')
res4b19_branch2a = mx.symbol.Convolution(name='res4b19_branch2a', data=res4b18_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b19_branch2a = mx.symbol.BatchNorm(name='bn4b19_branch2a', data=res4b19_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b19_branch2a = bn4b19_branch2a
res4b19_branch2a_relu = mx.symbol.Activation(name='res4b19_branch2a_relu', data=scale4b19_branch2a, act_type='relu')
res4b19_branch2b = mx.symbol.Convolution(name='res4b19_branch2b', data=res4b19_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b19_branch2b = mx.symbol.BatchNorm(name='bn4b19_branch2b', data=res4b19_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b19_branch2b = bn4b19_branch2b
res4b19_branch2b_relu = mx.symbol.Activation(name='res4b19_branch2b_relu', data=scale4b19_branch2b, act_type='relu')
res4b19_branch2c = mx.symbol.Convolution(name='res4b19_branch2c', data=res4b19_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b19_branch2c = mx.symbol.BatchNorm(name='bn4b19_branch2c', data=res4b19_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b19_branch2c = bn4b19_branch2c
res4b19 = mx.symbol.broadcast_add(*[res4b18_relu, scale4b19_branch2c], name='res4b19')
res4b19_relu = mx.symbol.Activation(name='res4b19_relu', data=res4b19, act_type='relu')
res4b20_branch2a = mx.symbol.Convolution(name='res4b20_branch2a', data=res4b19_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b20_branch2a = mx.symbol.BatchNorm(name='bn4b20_branch2a', data=res4b20_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b20_branch2a = bn4b20_branch2a
res4b20_branch2a_relu = mx.symbol.Activation(name='res4b20_branch2a_relu', data=scale4b20_branch2a, act_type='relu')
res4b20_branch2b = mx.symbol.Convolution(name='res4b20_branch2b', data=res4b20_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b20_branch2b = mx.symbol.BatchNorm(name='bn4b20_branch2b', data=res4b20_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b20_branch2b = bn4b20_branch2b
res4b20_branch2b_relu = mx.symbol.Activation(name='res4b20_branch2b_relu', data=scale4b20_branch2b, act_type='relu')
res4b20_branch2c = mx.symbol.Convolution(name='res4b20_branch2c', data=res4b20_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b20_branch2c = mx.symbol.BatchNorm(name='bn4b20_branch2c', data=res4b20_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b20_branch2c = bn4b20_branch2c
res4b20 = mx.symbol.broadcast_add(*[res4b19_relu, scale4b20_branch2c], name='res4b20')
res4b20_relu = mx.symbol.Activation(name='res4b20_relu', data=res4b20, act_type='relu')
res4b21_branch2a = mx.symbol.Convolution(name='res4b21_branch2a', data=res4b20_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b21_branch2a = mx.symbol.BatchNorm(name='bn4b21_branch2a', data=res4b21_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b21_branch2a = bn4b21_branch2a
res4b21_branch2a_relu = mx.symbol.Activation(name='res4b21_branch2a_relu', data=scale4b21_branch2a, act_type='relu')
res4b21_branch2b = mx.symbol.Convolution(name='res4b21_branch2b', data=res4b21_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b21_branch2b = mx.symbol.BatchNorm(name='bn4b21_branch2b', data=res4b21_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b21_branch2b = bn4b21_branch2b
res4b21_branch2b_relu = mx.symbol.Activation(name='res4b21_branch2b_relu', data=scale4b21_branch2b, act_type='relu')
res4b21_branch2c = mx.symbol.Convolution(name='res4b21_branch2c', data=res4b21_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b21_branch2c = mx.symbol.BatchNorm(name='bn4b21_branch2c', data=res4b21_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b21_branch2c = bn4b21_branch2c
res4b21 = mx.symbol.broadcast_add(*[res4b20_relu, scale4b21_branch2c], name='res4b21')
res4b21_relu = mx.symbol.Activation(name='res4b21_relu', data=res4b21, act_type='relu')
res4b22_branch2a = mx.symbol.Convolution(name='res4b22_branch2a', data=res4b21_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b22_branch2a = mx.symbol.BatchNorm(name='bn4b22_branch2a', data=res4b22_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b22_branch2a = bn4b22_branch2a
res4b22_branch2a_relu = mx.symbol.Activation(name='res4b22_branch2a_relu', data=scale4b22_branch2a, act_type='relu')
if with_dpyramid:
res4b22_branch2b_offset = mx.symbol.Convolution(name='res4b22_branch2b_offset', data=res4b22_branch2a_relu, num_filter=72, pad=(1, 1), kernel=(3, 3), stride=(1, 1))
res4b22_branch2b = mx.contrib.symbol.DeformableConvolution(name='res4b22_branch2b', data=res4b22_branch2a_relu, offset=res4b22_branch2b_offset, num_filter=256, pad=(1, 1), kernel=(3, 3), num_deformable_group=4, stride=(1, 1), no_bias=True)
else:
res4b22_branch2b = mx.symbol.Convolution(name='res4b22_branch2b', data=res4b22_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b22_branch2b = mx.symbol.BatchNorm(name='bn4b22_branch2b', data=res4b22_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b22_branch2b = bn4b22_branch2b
res4b22_branch2b_relu = mx.symbol.Activation(name='res4b22_branch2b_relu', data=scale4b22_branch2b, act_type='relu')
res4b22_branch2c = mx.symbol.Convolution(name='res4b22_branch2c', data=res4b22_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b22_branch2c = mx.symbol.BatchNorm(name='bn4b22_branch2c', data=res4b22_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b22_branch2c = bn4b22_branch2c
res4b22 = mx.symbol.broadcast_add(*[res4b21_relu, scale4b22_branch2c], name='res4b22')
res4b22_relu = mx.symbol.Activation(name='res4b22_relu', data=res4b22, act_type='relu')
if with_dilated:
res5_stride = (1, 1)
res5_dilate = (2, 2)
else:
res5_stride = (2, 2)
res5_dilate = (1, 1)
res5a_branch2a = mx.symbol.Convolution(name='res5a_branch2a', data=res4b22_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=res5_stride, no_bias=True)
bn5a_branch2a = mx.symbol.BatchNorm(name='bn5a_branch2a', data=res5a_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale5a_branch2a = bn5a_branch2a
res5a_branch2a_relu = mx.symbol.Activation(name='res5a_branch2a_relu', data=scale5a_branch2a, act_type='relu')
if with_dconv:
res5a_branch2b_offset = mx.symbol.Convolution(name='res5a_branch2b_offset', data=res5a_branch2a_relu, num_filter=72, pad=res5_dilate, kernel=(3, 3), dilate=res5_dilate)
res5a_branch2b = mx.contrib.symbol.DeformableConvolution(name='res5a_branch2b', data=res5a_branch2a_relu, offset=res5a_branch2b_offset, num_filter=512, pad=res5_dilate, kernel=(3, 3), num_deformable_group=4, stride=(1, 1), dilate=res5_dilate, no_bias=True)
else:
res5a_branch2b = mx.symbol.Convolution(name='res5a_branch2b', data=res5a_branch2a_relu, num_filter=512, pad=res5_dilate, kernel=(3, 3), stride=(1, 1), dilate=res5_dilate, no_bias=True)
bn5a_branch2b = mx.symbol.BatchNorm(name='bn5a_branch2b', data=res5a_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale5a_branch2b = bn5a_branch2b
res5a_branch2b_relu = mx.symbol.Activation(name='res5a_branch2b_relu', data=scale5a_branch2b, act_type='relu')
res5a_branch2c = mx.symbol.Convolution(name='res5a_branch2c', data=res5a_branch2b_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5a_branch2c = mx.symbol.BatchNorm(name='bn5a_branch2c', data=res5a_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale5a_branch2c = bn5a_branch2c
res5a_branch1 = mx.symbol.Convolution(name='res5a_branch1', data=res4b22_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=res5_stride, no_bias=True)
bn5a_branch1 = mx.symbol.BatchNorm(name='bn5a_branch1', data=res5a_branch1, use_global_stats=True, fix_gamma=False, eps=eps)
scale5a_branch1 = bn5a_branch1
res5a = mx.symbol.broadcast_add(*[scale5a_branch1, scale5a_branch2c], name='res5a')
res5a_relu = mx.symbol.Activation(name='res5a_relu', data=res5a, act_type='relu')
res5b_branch2a = mx.symbol.Convolution(name='res5b_branch2a', data=res5a_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5b_branch2a = mx.symbol.BatchNorm(name='bn5b_branch2a', data=res5b_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale5b_branch2a = bn5b_branch2a
res5b_branch2a_relu = mx.symbol.Activation(name='res5b_branch2a_relu', data=scale5b_branch2a, act_type='relu')
if with_dconv:
res5b_branch2b_offset = mx.symbol.Convolution(name='res5b_branch2b_offset', data=res5b_branch2a_relu, num_filter=72, pad=res5_dilate, kernel=(3, 3), dilate=res5_dilate)
res5b_branch2b = mx.contrib.symbol.DeformableConvolution(name='res5b_branch2b', data=res5b_branch2a_relu, offset=res5b_branch2b_offset, num_filter=512, pad=res5_dilate, kernel=(3, 3), num_deformable_group=4, dilate=res5_dilate, no_bias=True)
else:
res5b_branch2b = mx.symbol.Convolution(name='res5b_branch2b', data=res5b_branch2a_relu, num_filter=512, pad=res5_dilate, kernel=(3, 3), stride=(1, 1), dilate=res5_dilate, no_bias=True)
bn5b_branch2b = mx.symbol.BatchNorm(name='bn5b_branch2b', data=res5b_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale5b_branch2b = bn5b_branch2b
res5b_branch2b_relu = mx.symbol.Activation(name='res5b_branch2b_relu', data=scale5b_branch2b, act_type='relu')
res5b_branch2c = mx.symbol.Convolution(name='res5b_branch2c', data=res5b_branch2b_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5b_branch2c = mx.symbol.BatchNorm(name='bn5b_branch2c', data=res5b_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale5b_branch2c = bn5b_branch2c
res5b = mx.symbol.broadcast_add(*[res5a_relu, scale5b_branch2c], name='res5b')
res5b_relu = mx.symbol.Activation(name='res5b_relu', data=res5b, act_type='relu')
res5c_branch2a = mx.symbol.Convolution(name='res5c_branch2a', data=res5b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5c_branch2a = mx.symbol.BatchNorm(name='bn5c_branch2a', data=res5c_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale5c_branch2a = bn5c_branch2a
res5c_branch2a_relu = mx.symbol.Activation(name='res5c_branch2a_relu', data=scale5c_branch2a, act_type='relu')
if with_dconv:
res5c_branch2b_offset = mx.symbol.Convolution(name='res5c_branch2b_offset', data=res5c_branch2a_relu, num_filter=72, pad=res5_dilate, kernel=(3, 3), dilate=res5_dilate)
res5c_branch2b = mx.contrib.symbol.DeformableConvolution(name='res5c_branch2b', data=res5c_branch2a_relu, offset=res5c_branch2b_offset, num_filter=512, pad=res5_dilate, kernel=(3, 3), num_deformable_group=4, dilate=res5_dilate, no_bias=True)
else:
res5c_branch2b = mx.symbol.Convolution(name='res5c_branch2b', data=res5c_branch2a_relu, num_filter=512, pad=res5_dilate, kernel=(3, 3), stride=(1, 1), dilate=res5_dilate, no_bias=True)
bn5c_branch2b = mx.symbol.BatchNorm(name='bn5c_branch2b', data=res5c_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale5c_branch2b = bn5c_branch2b
res5c_branch2b_relu = mx.symbol.Activation(name='res5c_branch2b_relu', data=scale5c_branch2b, act_type='relu')
res5c_branch2c = mx.symbol.Convolution(name='res5c_branch2c', data=res5c_branch2b_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5c_branch2c = mx.symbol.BatchNorm(name='bn5c_branch2c', data=res5c_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale5c_branch2c = bn5c_branch2c
res5c = mx.symbol.broadcast_add(*[res5b_relu, scale5c_branch2c], name='res5c')
res5c_relu = mx.symbol.Activation(name='res5c_relu', data=res5c, act_type='relu')
return (res2c_relu, res3b3_relu, res4b22_relu, res5c_relu)
def get_fpn_feature(self, c2, c3, c4, c5, feature_dim=256):
fpn_p5_1x1 = mx.symbol.Convolution(data=c5, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p5_1x1')
fpn_p4_1x1 = mx.symbol.Convolution(data=c4, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p4_1x1')
fpn_p3_1x1 = mx.symbol.Convolution(data=c3, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p3_1x1')
fpn_p2_1x1 = mx.symbol.Convolution(data=c2, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p2_1x1')
fpn_p5_upsample = mx.symbol.UpSampling(fpn_p5_1x1, scale=2, sample_type='nearest', name='fpn_p5_upsample')
fpn_p4_plus = mx.sym.ElementWiseSum(*[fpn_p5_upsample, fpn_p4_1x1], name='fpn_p4_sum')
fpn_p4_upsample = mx.symbol.UpSampling(fpn_p4_plus, scale=2, sample_type='nearest', name='fpn_p4_upsample')
fpn_p3_plus = mx.sym.ElementWiseSum(*[fpn_p4_upsample, fpn_p3_1x1], name='fpn_p3_sum')
fpn_p3_upsample = mx.symbol.UpSampling(fpn_p3_plus, scale=2, sample_type='nearest', name='fpn_p3_upsample')
fpn_p2_plus = mx.sym.ElementWiseSum(*[fpn_p3_upsample, fpn_p2_1x1], name='fpn_p2_sum')
fpn_p6 = mx.sym.Convolution(data=c5, kernel=(3, 3), pad=(1, 1), stride=(2, 2), num_filter=feature_dim, name='fpn_p6')
fpn_p5 = mx.symbol.Convolution(data=fpn_p5_1x1, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p5')
fpn_p4 = mx.symbol.Convolution(data=fpn_p4_plus, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p4')
fpn_p3 = mx.symbol.Convolution(data=fpn_p3_plus, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p3')
fpn_p2 = mx.symbol.Convolution(data=fpn_p2_plus, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p2')
return (fpn_p2, fpn_p3, fpn_p4, fpn_p5, fpn_p6)
def get_rpn_subnet(self, data, num_anchors, suffix):
rpn_conv = mx.sym.Convolution(data=data, kernel=(3, 3), pad=(1, 1), num_filter=512, name=('rpn_conv_' + suffix), weight=self.shared_param_dict['rpn_conv_weight'], bias=self.shared_param_dict['rpn_conv_bias'])
rpn_relu = mx.sym.Activation(data=rpn_conv, act_type='relu', name=('rpn_relu_' + suffix))
rpn_cls_score = mx.sym.Convolution(data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=(2 * num_anchors), name=('rpn_cls_score_' + suffix), weight=self.shared_param_dict['rpn_cls_score_weight'], bias=self.shared_param_dict['rpn_cls_score_bias'])
rpn_bbox_pred = mx.sym.Convolution(data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=(4 * num_anchors), name=('rpn_bbox_pred_' + suffix), weight=self.shared_param_dict['rpn_bbox_pred_weight'], bias=self.shared_param_dict['rpn_bbox_pred_bias'])
rpn_cls_score_t1 = mx.sym.Reshape(data=rpn_cls_score, shape=(0, 2, (- 1), 0), name=('rpn_cls_score_t1_' + suffix))
rpn_cls_score_t2 = mx.sym.Reshape(data=rpn_cls_score_t1, shape=(0, 2, (- 1)), name=('rpn_cls_score_t2_' + suffix))
rpn_cls_prob = mx.sym.SoftmaxActivation(data=rpn_cls_score_t1, mode='channel', name=('rpn_cls_prob_' + suffix))
rpn_cls_prob_t = mx.sym.Reshape(data=rpn_cls_prob, shape=(0, (2 * num_anchors), (- 1), 0), name=('rpn_cls_prob_t_' + suffix))
rpn_bbox_pred_t = mx.sym.Reshape(data=rpn_bbox_pred, shape=(0, 0, (- 1)), name=('rpn_bbox_pred_t_' + suffix))
return (rpn_cls_score_t2, rpn_cls_prob_t, rpn_bbox_pred_t, rpn_bbox_pred)
def get_deformable_roipooling(self, name, data, rois, output_dim, spatial_scale, param_name, group_size=1, pooled_size=7, sample_per_part=4, part_size=7):
offset = mx.contrib.sym.DeformablePSROIPooling(name=(('offset_' + name) + '_t'), data=data, rois=rois, group_size=group_size, pooled_size=pooled_size, sample_per_part=sample_per_part, no_trans=True, part_size=part_size, output_dim=output_dim, spatial_scale=spatial_scale)
offset = mx.sym.FullyConnected(name=('offset_' + name), data=offset, num_hidden=((part_size * part_size) * 2), lr_mult=0.01, weight=self.shared_param_dict[(('offset_' + param_name) + '_weight')], bias=self.shared_param_dict[(('offset_' + param_name) + '_bias')])
offset_reshape = mx.sym.Reshape(data=offset, shape=((- 1), 2, part_size, part_size), name=('offset_reshape_' + name))
output = mx.contrib.sym.DeformablePSROIPooling(name=('deformable_roi_pool_' + name), data=data, rois=rois, trans=offset_reshape, group_size=group_size, pooled_size=pooled_size, sample_per_part=sample_per_part, no_trans=False, part_size=part_size, output_dim=output_dim, spatial_scale=spatial_scale, trans_std=0.1)
return output
def get_constant_symbol(self, const_val):
if (const_val in self.constants_dict):
return self.constants_dict[const_val]
name = 'const_eq_{0}'.format(const_val)
c = mx.sym.Variable(name, shape=(1,), init=MyConstant(value=[const_val]))
c = mx.sym.BlockGrad(c)
self.constants_dict[const_val] = c
return c
def cos_sim_2_dist_generic(self, cos_sim, x=None, y=None, x_is_norm=True, y_is_norm=True):
if x_is_norm:
x_norm = self.get_constant_symbol(1)
else:
assert (x is not None), 'if x is not L2 normalized then x must be provided'
x_norm = mx.sym.sum_axis(mx.sym.square(x), axis=0, keepdims=True)
x_norm = mx.sym.transpose(x_norm, axes=(1, 0))
if y_is_norm:
y_norm = self.get_constant_symbol(1)
else:
assert (y is not None), 'if y is not L2 normalized then y must be provided'
y_norm = mx.sym.sum_axis(mx.sym.square(y), axis=0, keepdims=True)
dist = mx.sym.broadcast_add(mx.sym.broadcast_sub(x_norm, mx.sym.broadcast_mul(self.get_constant_symbol(2), cos_sim)), y_norm)
return dist
def cos_sim_2_dist(self, cos_sim, cfg=None, embd=None, reps=None):
if cfg.network.EMBED_L2_NORM:
embd_norm = self.get_constant_symbol(1)
else:
assert (embd is not None), 'if embedding is not L2 normalized then embd must be provided'
embd_norm = mx.sym.sum_axis(mx.sym.square(embd), axis=1, keepdims=True)
embd_norm = mx.sym.reshape(embd_norm, shape=(0, 1, 1))
if cfg.network.REP_L2_NORM:
reps_norm = self.get_constant_symbol(1)
else:
assert (reps is not None), 'if representatives are not L2 normalized then reps must be provided'
reps_norm = mx.sym.sum_axis(mx.sym.square(reps), axis=0, keepdims=True)
dist = mx.sym.broadcast_add(mx.sym.broadcast_sub(embd_norm, mx.sym.broadcast_mul(self.get_constant_symbol(2), cos_sim)), reps_norm)
return dist
def get_symbol(self, cfg, is_train=True):
num_classes = cfg.dataset.NUM_CLASSES
num_reg_classes = (2 if cfg.CLASS_AGNOSTIC else num_classes)
data = mx.sym.Variable(name='data')
im_info = mx.sym.Variable(name='im_info')
(res2, res3, res4, res5) = self.get_resnet_backbone(data, with_dpyramid=True, with_dconv=True)
(fpn_p2, fpn_p3, fpn_p4, fpn_p5, fpn_p6) = self.get_fpn_feature(res2, res3, res4, res5)
(rpn_cls_score_p2, rpn_prob_p2, rpn_bbox_loss_p2, rpn_bbox_pred_p2) = self.get_rpn_subnet(fpn_p2, cfg.network.NUM_ANCHORS, 'p2')
(rpn_cls_score_p3, rpn_prob_p3, rpn_bbox_loss_p3, rpn_bbox_pred_p3) = self.get_rpn_subnet(fpn_p3, cfg.network.NUM_ANCHORS, 'p3')
(rpn_cls_score_p4, rpn_prob_p4, rpn_bbox_loss_p4, rpn_bbox_pred_p4) = self.get_rpn_subnet(fpn_p4, cfg.network.NUM_ANCHORS, 'p4')
(rpn_cls_score_p5, rpn_prob_p5, rpn_bbox_loss_p5, rpn_bbox_pred_p5) = self.get_rpn_subnet(fpn_p5, cfg.network.NUM_ANCHORS, 'p5')
(rpn_cls_score_p6, rpn_prob_p6, rpn_bbox_loss_p6, rpn_bbox_pred_p6) = self.get_rpn_subnet(fpn_p6, cfg.network.NUM_ANCHORS, 'p6')
rpn_cls_prob_dict = {'rpn_cls_prob_stride64': rpn_prob_p6, 'rpn_cls_prob_stride32': rpn_prob_p5, 'rpn_cls_prob_stride16': rpn_prob_p4, 'rpn_cls_prob_stride8': rpn_prob_p3, 'rpn_cls_prob_stride4': rpn_prob_p2}
rpn_bbox_pred_dict = {'rpn_bbox_pred_stride64': rpn_bbox_pred_p6, 'rpn_bbox_pred_stride32': rpn_bbox_pred_p5, 'rpn_bbox_pred_stride16': rpn_bbox_pred_p4, 'rpn_bbox_pred_stride8': rpn_bbox_pred_p3, 'rpn_bbox_pred_stride4': rpn_bbox_pred_p2}
arg_dict = dict((rpn_cls_prob_dict.items() + rpn_bbox_pred_dict.items()))
if is_train:
if (not cfg.network.base_net_lock):
rpn_label = mx.sym.Variable(name='label')
rpn_bbox_target = mx.sym.Variable(name='bbox_target')
rpn_bbox_weight = mx.sym.Variable(name='bbox_weight')
gt_boxes = mx.sym.Variable(name='gt_boxes')
if (not cfg.network.base_net_lock):
rpn_cls_score = mx.sym.Concat(rpn_cls_score_p2, rpn_cls_score_p3, rpn_cls_score_p4, rpn_cls_score_p5, rpn_cls_score_p6, dim=2)
rpn_bbox_loss = mx.sym.Concat(rpn_bbox_loss_p2, rpn_bbox_loss_p3, rpn_bbox_loss_p4, rpn_bbox_loss_p5, rpn_bbox_loss_p6, dim=2)
rpn_cls_output = mx.sym.SoftmaxOutput(data=rpn_cls_score, label=rpn_label, multi_output=True, normalization='valid', use_ignore=True, ignore_label=(- 1), name='rpn_cls_prob')
rpn_bbox_loss = (rpn_bbox_weight * mx.sym.smooth_l1(name='rpn_bbox_loss_l1', scalar=3.0, data=(rpn_bbox_loss - rpn_bbox_target)))
rpn_bbox_loss = mx.sym.MakeLoss(name='rpn_bbox_loss', data=rpn_bbox_loss, grad_scale=(1.0 / cfg.TRAIN.RPN_BATCH_SIZE))
aux_dict = {'op_type': 'pyramid_proposal', 'name': 'rois', 'im_info': im_info, 'feat_stride': tuple(cfg.network.RPN_FEAT_STRIDE), 'scales': tuple(cfg.network.ANCHOR_SCALES), 'ratios': tuple(cfg.network.ANCHOR_RATIOS), 'rpn_pre_nms_top_n': cfg.TRAIN.RPN_PRE_NMS_TOP_N, 'rpn_post_nms_top_n': cfg.TRAIN.RPN_POST_NMS_TOP_N, 'threshold': cfg.TRAIN.RPN_NMS_THRESH, 'rpn_min_size': cfg.TRAIN.RPN_MIN_SIZE}
rois = mx.sym.Custom(**dict((arg_dict.items() + aux_dict.items())))
gt_boxes_reshape = mx.sym.Reshape(data=gt_boxes, shape=((- 1), 5), name='gt_boxes_reshape')
(rois, label, bbox_target, bbox_weight) = mx.sym.Custom(rois=rois, gt_boxes=gt_boxes_reshape, op_type='proposal_target', num_classes=num_reg_classes, batch_images=cfg.TRAIN.BATCH_IMAGES, batch_rois=cfg.TRAIN.BATCH_ROIS, cfg=cPickle.dumps(cfg), fg_fraction=cfg.TRAIN.FG_FRACTION)
else:
aux_dict = {'op_type': 'pyramid_proposal', 'name': 'rois', 'im_info': im_info, 'feat_stride': tuple(cfg.network.RPN_FEAT_STRIDE), 'scales': tuple(cfg.network.ANCHOR_SCALES), 'ratios': tuple(cfg.network.ANCHOR_RATIOS), 'rpn_pre_nms_top_n': cfg.TEST.RPN_PRE_NMS_TOP_N, 'rpn_post_nms_top_n': cfg.TEST.RPN_POST_NMS_TOP_N, 'threshold': cfg.TEST.RPN_NMS_THRESH, 'rpn_min_size': cfg.TEST.RPN_MIN_SIZE}
rois = mx.sym.Custom(**dict((arg_dict.items() + aux_dict.items())))
offset_p2_weight = mx.sym.Variable(name='offset_p2_weight', dtype=np.float32, lr_mult=0.01)
offset_p3_weight = mx.sym.Variable(name='offset_p3_weight', dtype=np.float32, lr_mult=0.01)
offset_p4_weight = mx.sym.Variable(name='offset_p4_weight', dtype=np.float32, lr_mult=0.01)
offset_p5_weight = mx.sym.Variable(name='offset_p5_weight', dtype=np.float32, lr_mult=0.01)
offset_p2_bias = mx.sym.Variable(name='offset_p2_bias', dtype=np.float32, lr_mult=0.01)
offset_p3_bias = mx.sym.Variable(name='offset_p3_bias', dtype=np.float32, lr_mult=0.01)
offset_p4_bias = mx.sym.Variable(name='offset_p4_bias', dtype=np.float32, lr_mult=0.01)
offset_p5_bias = mx.sym.Variable(name='offset_p5_bias', dtype=np.float32, lr_mult=0.01)
roi_pool = mx.symbol.Custom(data_p2=fpn_p2, data_p3=fpn_p3, data_p4=fpn_p4, data_p5=fpn_p5, offset_weight_p2=offset_p2_weight, offset_bias_p2=offset_p2_bias, offset_weight_p3=offset_p3_weight, offset_bias_p3=offset_p3_bias, offset_weight_p4=offset_p4_weight, offset_bias_p4=offset_p4_bias, offset_weight_p5=offset_p5_weight, offset_bias_p5=offset_p5_bias, rois=rois, op_type='fpn_roi_pooling', name='fpn_roi_pooling', with_deformable=True)
fc_new_1 = mx.symbol.FullyConnected(name='fc_new_1', data=roi_pool, num_hidden=1024)
fc_new_1_relu = mx.sym.Activation(data=fc_new_1, act_type='relu', name='fc_new_1_relu')
fc_new_2 = mx.symbol.FullyConnected(name='fc_new_2', data=fc_new_1_relu, num_hidden=1024)
fc_new_2_relu = mx.sym.Activation(data=fc_new_2, act_type='relu', name='fc_new_2_relu')
if (is_train and cfg.network.base_net_lock):
fc_new_2_relu = mx.sym.BlockGrad(fc_new_2_relu)
rois = mx.sym.BlockGrad(rois)
label = mx.sym.BlockGrad(label)
bbox_target = mx.sym.BlockGrad(bbox_target)
bbox_weight = mx.sym.BlockGrad(bbox_weight)
lr_mult = cfg.TRAIN.REPS_LR_MULT
if cfg.network.SEPARABLE_REPS:
base = mx.sym.FullyConnected(data=self.get_constant_symbol(1), name='fc_representatives_base', num_hidden=(cfg.network.EMBEDDING_DIM * (num_classes - 1)), no_bias=True, lr_mult=lr_mult)
offset = mx.sym.FullyConnected(data=self.get_constant_symbol(1), name='fc_representatives_offset', num_hidden=(cfg.network.EMBEDDING_DIM * cfg.network.REPS_PER_CLASS), no_bias=True, lr_mult=lr_mult)
base = mx.sym.reshape(base, shape=(cfg.network.EMBEDDING_DIM, 1, (num_classes - 1)))
offset = mx.sym.reshape(offset, shape=(cfg.network.EMBEDDING_DIM, cfg.network.REPS_PER_CLASS, 1))
representatives = mx.sym.broadcast_add(base, offset, name='fc_representatives')
else:
representatives = mx.sym.FullyConnected(data=self.get_constant_symbol(1), name='fc_representatives', num_hidden=((cfg.network.EMBEDDING_DIM * cfg.network.REPS_PER_CLASS) * (num_classes - 1)), no_bias=True, lr_mult=lr_mult)
representatives = mx.sym.reshape(representatives, shape=(cfg.network.EMBEDDING_DIM, cfg.network.REPS_PER_CLASS, (num_classes - 1)))
if cfg.network.REP_L2_NORM:
representatives = mx.sym.transpose(mx.sym.L2Normalization(mx.sym.transpose(representatives, axes=(1, 0, 2)), mode='channel'), axes=(1, 0, 2))
extra_outputs = [mx.sym.BlockGrad(representatives)]
eps = 1e-05
x = mx.sym.FullyConnected(name='embed_dense_1', data=fc_new_2_relu, num_hidden=2048)
x = mx.sym.BatchNorm(name='embed_batchNorm_1', data=x, use_global_stats=True, fix_gamma=False, eps=eps)
x = mx.sym.Activation(name='embed_relu_1', data=x, act_type='relu')
x = mx.sym.FullyConnected(name='embed_dense_2', data=x, num_hidden=1024)
x = mx.sym.BatchNorm(name='embed_batchNorm_2', data=x, use_global_stats=True, fix_gamma=False, eps=eps)
x = mx.sym.Activation(name='embed_relu_2', data=x, act_type='relu')
x = mx.sym.FullyConnected(name='embed_dense_3', data=x, num_hidden=1024)
batch_embed = mx.sym.identity(name='batch_embed', data=fc_new_2_relu)
if cfg.network.EMBED_L2_NORM:
batch_embed = mx.sym.L2Normalization(data=batch_embed, name='batch_embed_nrm', mode='instance')
cos_sim = mx.sym.dot(batch_embed, representatives, transpose_b=False)
all_cls_rep_dist = self.cos_sim_2_dist(cos_sim, cfg, embd=batch_embed, reps=representatives)
if (is_train and cfg.network.EMBED_LOSS_ENABLED):
all_cls_min_dist = mx.sym.min_axis(all_cls_rep_dist, axis=1, keepdims=True)
all_cls_min_dist = mx.sym.reshape(all_cls_min_dist, shape=(0, (num_classes - 1)))
mod_true_class = mx.sym.slice_axis(mx.sym.one_hot(label, depth=num_classes, on_value=1, off_value=0), axis=1, begin=1, end=None)
mod_false_class = mx.sym.slice_axis(mx.sym.one_hot(label, depth=num_classes, on_value=1000, off_value=0), axis=1, begin=1, end=None)
min_dist_true = mx.sym.sum_axis(mx.sym.broadcast_mul(all_cls_min_dist, mod_true_class), axis=1)
min_dist_false = mx.sym.min_axis(mx.sym.broadcast_add(all_cls_min_dist, mod_false_class), axis=1)
embed_loss_val = mx.sym.broadcast_sub(min_dist_true, min_dist_false)
embed_loss_val = mx.sym.broadcast_add(embed_loss_val, self.get_constant_symbol(cfg.network.EMBED_LOSS_MARGIN))
embed_loss_val = mx.sym.relu(embed_loss_val)
embed_loss_val = mx.sym.reshape(embed_loss_val, shape=(0, 1))
if (is_train and cfg.network.REPS_CLS_LOSS):
mask_block_ones = mx.sym.ones(shape=(cfg.network.REPS_PER_CLASS, cfg.network.REPS_PER_CLASS))
mask_block_zeros = mx.sym.zeros(shape=(cfg.network.REPS_PER_CLASS, cfg.network.REPS_PER_CLASS))
mask = None
for iC1 in range((num_classes - 1)):
mask_row = None
for iC2 in range((num_classes - 1)):
if (iC1 == iC2):
cblock = mask_block_ones
else:
cblock = mask_block_zeros
if (mask_row is None):
mask_row = cblock
else:
mask_row = mx.sym.concat(mask_row, cblock, dim=1)
if (mask is None):
mask = mask_row
else:
mask = mx.sym.concat(mask, mask_row, dim=0)
mask_NC = mx.sym.broadcast_mul(self.get_constant_symbol(1000), mask)
mask_C = mx.sym.broadcast_sub(self.get_constant_symbol(1000), mask_NC)
mask_C = mx.sym.BlockGrad(mask_C)
mask_NC = mx.sym.BlockGrad(mask_NC)
R = mx.sym.reshape(mx.sym.transpose(representatives, axes=(0, 2, 1)), shape=(0, (- 1)))
R2R_cos_sim = mx.sym.dot(R, R, transpose_a=True)
R2R = self.cos_sim_2_dist_generic(R2R_cos_sim, x=R, y=R, x_is_norm=cfg.network.REP_L2_NORM, y_is_norm=cfg.network.REP_L2_NORM)
C2C = mx.sym.broadcast_add(R2R, mask_C)
C2NC = mx.sym.broadcast_add(R2R, mask_NC)
min_dist_C = mx.sym.topk(C2C, axis=1, k=2, ret_typ='value', is_ascend=True)
min_dist_C = mx.sym.slice_axis(min_dist_C, axis=1, begin=1, end=2)
min_dist_NC = mx.sym.min_axis(C2NC, axis=1, keepdims=True)
reps_cls_loss_val = mx.sym.broadcast_sub(min_dist_C, min_dist_NC)
reps_cls_loss_val = mx.sym.broadcast_add(reps_cls_loss_val, self.get_constant_symbol(cfg.network.EMBED_LOSS_MARGIN))
reps_cls_loss_val = mx.sym.relu(reps_cls_loss_val)
probs = mx.sym.exp(mx.sym.broadcast_mul(all_cls_rep_dist, self.get_constant_symbol(((- 0.5) / float((cfg.network.SIGMA ** 2))))))
comb_cls_scores = mx.sym.max_axis(probs, axis=1, keepdims=False)
comb_cls_scores = mx.sym.broadcast_add(comb_cls_scores, self.get_constant_symbol(1e-07))
bg_scores = mx.sym.broadcast_sub(self.get_constant_symbol((1 + 1e-07)), mx.sym.max_axis(comb_cls_scores, axis=1, keepdims=True))
cls_score = mx.sym.concat(bg_scores, comb_cls_scores, dim=1, name='bg_concat')
cls_score = mx.sym.reshape(cls_score, shape=(0, (- 1)))
if cfg.network.SOFTMAX_ENABLED:
cls_score = mx.sym.broadcast_mul(self.get_constant_symbol(cfg.network.SOFTMAX_MUL), cls_score)
if cfg.network.ADDITIONAL_LINEAR_CLS_LOSS:
cls_score_lin = mx.symbol.FullyConnected(name='cls_score_lin', data=fc_new_2_relu, num_hidden=num_classes)
bbox_pred = mx.symbol.FullyConnected(name='bbox_pred', data=fc_new_2_relu, num_hidden=(num_reg_classes * 4))
if is_train:
if cfg.TRAIN.ENABLE_OHEM:
(labels_ohem, bbox_weights_ohem) = mx.sym.Custom(op_type='BoxAnnotatorOHEM', num_classes=num_classes, num_reg_classes=num_reg_classes, roi_per_img=cfg.TRAIN.BATCH_ROIS_OHEM, cls_score=cls_score, bbox_pred=bbox_pred, labels=label, bbox_targets=bbox_target, bbox_weights=bbox_weight)
if cfg.network.SOFTMAX_ENABLED:
cls_prob = mx.sym.SoftmaxOutput(name='cls_prob', data=cls_score, label=labels_ohem, normalization='valid', use_ignore=True, ignore_label=(- 1))
else:
zz = mx.sym.zeros_like(label)
cls_prob = mx.sym.BlockGrad(cls_score)
invalid = mx.sym.broadcast_equal(labels_ohem, self.get_constant_symbol((- 1)))
minoh_labels = mx.sym.one_hot(mx.sym.broadcast_add(mx.sym.cast(invalid, dtype='float32'), labels_ohem), depth=num_classes, on_value=(- 1), off_value=0)
ce_loss = mx.sym.where(invalid, x=zz, y=mx.sym.sum(mx.sym.broadcast_mul(minoh_labels, mx.sym.log(mx.sym.broadcast_add(cls_score, self.get_constant_symbol(1e-07)))), axis=1))
ce_loss = mx.sym.MakeLoss(ce_loss, normalization='valid')
bbox_loss_ = (bbox_weights_ohem * mx.sym.smooth_l1(name='bbox_loss_', scalar=1.0, data=(bbox_pred - bbox_target)))
bbox_loss = mx.sym.MakeLoss(name='bbox_loss', data=bbox_loss_, grad_scale=(1.0 / cfg.TRAIN.BATCH_ROIS_OHEM))
rcnn_label = labels_ohem
if cfg.network.ADDITIONAL_LINEAR_CLS_LOSS:
cls_prob_lin = mx.sym.SoftmaxOutput(name='cls_prob_lin', data=cls_score_lin, label=labels_ohem, normalization='valid', use_ignore=True, ignore_label=(- 1))
if cfg.network.EMBED_LOSS_ENABLED:
embed_loss_ = (mx.sym.slice_axis(bbox_weights_ohem, axis=1, begin=(- 1), end=None) * embed_loss_val)
embed_loss = mx.sym.MakeLoss(name='embed_loss', data=embed_loss_, grad_scale=(1.0 / cfg.TRAIN.BATCH_ROIS_OHEM))
else:
cls_prob = mx.sym.SoftmaxOutput(name='cls_prob', data=cls_score, label=label, normalization='valid')
bbox_loss_ = (bbox_weight * mx.sym.smooth_l1(name='bbox_loss_', scalar=1.0, data=(bbox_pred - bbox_target)))
bbox_loss = mx.sym.MakeLoss(name='bbox_loss', data=bbox_loss_, grad_scale=(1.0 / cfg.TRAIN.BATCH_ROIS))
rcnn_label = label
if cfg.network.ADDITIONAL_LINEAR_CLS_LOSS:
cls_prob_lin = mx.sym.SoftmaxOutput(name='cls_prob_lin', data=cls_score_lin, label=label, normalization='valid')
if cfg.network.EMBED_LOSS_ENABLED:
embed_loss_ = (mx.sym.slice_axis(bbox_weight, axis=1, begin=0, end=1) * embed_loss_val)
embed_loss = mx.sym.MakeLoss(name='embed_loss', data=embed_loss_, grad_scale=(1.0 / cfg.TRAIN.BATCH_ROIS))
if cfg.network.EMBED_LOSS_ENABLED:
extra_outputs += [embed_loss]
if cfg.network.REPS_CLS_LOSS:
extra_outputs += [mx.sym.MakeLoss(name='reps_cls_loss', data=reps_cls_loss_val, grad_scale=(1.0 / (cfg.network.REPS_PER_CLASS * (num_classes - 1))))]
if cfg.network.ADDITIONAL_LINEAR_CLS_LOSS:
extra_outputs += [cls_prob_lin]
if (not cfg.network.SOFTMAX_ENABLED):
extra_outputs += [ce_loss]
extra_outputs += [mx.sym.BlockGrad(rois), mx.sym.identity(mx.sym.BlockGrad(batch_embed), name='psp_final_embed')]
rcnn_label = mx.sym.Reshape(data=rcnn_label, shape=(cfg.TRAIN.BATCH_IMAGES, (- 1)), name='label_reshape')
cls_prob = mx.sym.Reshape(data=cls_prob, shape=(cfg.TRAIN.BATCH_IMAGES, (- 1), num_classes), name='cls_prob_reshape')
bbox_loss = mx.sym.Reshape(data=bbox_loss, shape=(cfg.TRAIN.BATCH_IMAGES, (- 1), (4 * num_reg_classes)), name='bbox_loss_reshape')
if cfg.network.base_net_lock:
group = mx.sym.Group(([cls_prob, bbox_loss, mx.sym.BlockGrad(rcnn_label)] + extra_outputs))
else:
group = mx.sym.Group(([rpn_cls_output, rpn_bbox_loss, cls_prob, bbox_loss, mx.sym.BlockGrad(rcnn_label)] + extra_outputs))
else:
cls_prob = mx.sym.SoftmaxActivation(name='cls_prob', data=cls_score)
cls_prob = mx.sym.Reshape(data=cls_prob, shape=(cfg.TEST.BATCH_IMAGES, (- 1), num_classes), name='cls_prob_reshape')
bbox_pred = mx.sym.Reshape(data=bbox_pred, shape=(cfg.TEST.BATCH_IMAGES, (- 1), (4 * num_reg_classes)), name='bbox_pred_reshape')
cls_score_orig = cls_score
if cfg.network.SOFTMAX_ENABLED:
cls_score_orig = mx.sym.broadcast_div(cls_score_orig, self.get_constant_symbol(cfg.network.SOFTMAX_MUL))
cls_score_orig = mx.sym.Reshape(data=cls_score_orig, shape=(cfg.TEST.BATCH_IMAGES, (- 1), num_classes))
group = mx.sym.Group([rois, cls_prob, bbox_pred, mx.sym.identity(batch_embed, name='psp_final_embed'), mx.sym.identity(cls_score_orig, name='cls_score')])
self.sym = group
return group
def init_weight_rcnn(self, cfg, arg_params, aux_params):
arg_params['fc_new_1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fc_new_1_weight'])
arg_params['fc_new_1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fc_new_1_bias'])
arg_params['fc_new_2_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fc_new_2_weight'])
arg_params['fc_new_2_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fc_new_2_bias'])
arg_params['bbox_pred_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['bbox_pred_weight'])
arg_params['bbox_pred_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['bbox_pred_bias'])
name = 'fc_representatives'
if cfg.network.SEPARABLE_REPS:
arg_params[(name + '_base_weight')] = mx.random.normal(0, 0.1, shape=self.arg_shape_dict[(name + '_base_weight')])
arg_params[(name + '_offset_weight')] = mx.random.normal(0, 0.1, shape=self.arg_shape_dict[(name + '_offset_weight')])
elif cfg.network.SEPARABLE_REPS_INIT:
C = mx.random.normal(0, 0.1, shape=(cfg.network.EMBEDDING_DIM, 1, (cfg.dataset.NUM_CLASSES - 1)))
R = mx.random.normal(0, 0.05, shape=(cfg.network.EMBEDDING_DIM, cfg.network.REPS_PER_CLASS, 1))
CR = (C + R)
arg_params[(name + '_weight')] = mx.nd.reshape(CR, shape=((- 1), 1))
else:
arg_params[(name + '_weight')] = mx.random.normal(0, 0.1, shape=self.arg_shape_dict[(name + '_weight')])
if cfg.network.ADDITIONAL_LINEAR_CLS_LOSS:
arg_params['cls_score_lin_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['cls_score_lin_weight'])
arg_params['cls_score_lin_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['cls_score_lin_bias'])
def init_deformable_convnet(self, cfg, arg_params, aux_params):
arg_params['res5a_branch2b_offset_weight'] = mx.nd.zeros(shape=self.arg_shape_dict['res5a_branch2b_offset_weight'])
arg_params['res5a_branch2b_offset_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['res5a_branch2b_offset_bias'])
arg_params['res5b_branch2b_offset_weight'] = mx.nd.zeros(shape=self.arg_shape_dict['res5b_branch2b_offset_weight'])
arg_params['res5b_branch2b_offset_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['res5b_branch2b_offset_bias'])
arg_params['res5c_branch2b_offset_weight'] = mx.nd.zeros(shape=self.arg_shape_dict['res5c_branch2b_offset_weight'])
arg_params['res5c_branch2b_offset_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['res5c_branch2b_offset_bias'])
arg_params['res3b3_branch2b_offset_weight'] = mx.nd.zeros(shape=self.arg_shape_dict['res3b3_branch2b_offset_weight'])
arg_params['res3b3_branch2b_offset_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['res3b3_branch2b_offset_bias'])
arg_params['res4b22_branch2b_offset_weight'] = mx.nd.zeros(shape=self.arg_shape_dict['res4b22_branch2b_offset_weight'])
arg_params['res4b22_branch2b_offset_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['res4b22_branch2b_offset_bias'])
def init_weight_fpn(self, cfg, arg_params, aux_params):
arg_params['fpn_p6_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p6_weight'])
arg_params['fpn_p6_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p6_bias'])
arg_params['fpn_p5_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p5_weight'])
arg_params['fpn_p5_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p5_bias'])
arg_params['fpn_p4_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p4_weight'])
arg_params['fpn_p4_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p4_bias'])
arg_params['fpn_p3_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p3_weight'])
arg_params['fpn_p3_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p3_bias'])
arg_params['fpn_p2_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p2_weight'])
arg_params['fpn_p2_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p2_bias'])
arg_params['fpn_p5_1x1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p5_1x1_weight'])
arg_params['fpn_p5_1x1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p5_1x1_bias'])
arg_params['fpn_p4_1x1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p4_1x1_weight'])
arg_params['fpn_p4_1x1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p4_1x1_bias'])
arg_params['fpn_p3_1x1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p3_1x1_weight'])
arg_params['fpn_p3_1x1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p3_1x1_bias'])
arg_params['fpn_p2_1x1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p2_1x1_weight'])
arg_params['fpn_p2_1x1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p2_1x1_bias'])
def init_weight(self, cfg, arg_params, aux_params):
(arg_params2, aux_params2) = ({}, {})
for name in self.shared_param_list:
if ('offset' in name):
arg_params2[(name + '_weight')] = mx.nd.zeros(shape=self.arg_shape_dict[(name + '_weight')])
else:
arg_params2[(name + '_weight')] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict[(name + '_weight')])
arg_params2[(name + '_bias')] = mx.nd.zeros(shape=self.arg_shape_dict[(name + '_bias')])
self.init_deformable_convnet(cfg, arg_params2, aux_params2)
self.init_weight_rcnn(cfg, arg_params2, aux_params2)
self.init_weight_fpn(cfg, arg_params2, aux_params2)
for k in arg_params2:
if cfg.network.pretrained_weights_are_priority:
if ((k not in arg_params) or (arg_params[k].shape != arg_params2[k].shape)):
arg_params[k] = arg_params2[k]
else:
arg_params[k] = arg_params2[k]
for k in aux_params2:
if cfg.network.pretrained_weights_are_priority:
if (k not in aux_params):
aux_params[k] = aux_params2[k]
else:
aux_params[k] = aux_params2[k] |
def test_setting_list_option_completion(qtmodeltester, config_stub, configdata_stub, info):
model = configmodel.list_option(info=info)
model.set_pattern('')
qtmodeltester.check(model)
_check_completions(model, {'List options': [('completion.open_categories', 'Which categories to show (in which order) in the :open completion.', '["searchengines", "quickmarks", "bookmarks", "history"]')]}) |
class FC4_TestCase(CommandTest):
command = 'mediacheck'
def runTest(self):
self.assert_parse('mediacheck', 'mediacheck\n')
self.assert_parse_error('mediacheck --cheese')
self.assert_parse_error('mediacheck --crackers=CRUNCHY')
self.assert_parse_error('mediacheck cheese crackers') |
class CometCallback(TrainerCallback):
def __init__(self):
if (not _has_comet):
raise RuntimeError('CometCallback requires comet-ml to be installed. Run `pip install comet-ml`.')
self._initialized = False
self._log_assets = False
def setup(self, args, state, model):
self._initialized = True
log_assets = os.getenv('COMET_LOG_ASSETS', 'FALSE').upper()
if (log_assets in {'TRUE', '1'}):
self._log_assets = True
if state.is_world_process_zero:
comet_mode = os.getenv('COMET_MODE', 'ONLINE').upper()
experiment = None
experiment_kwargs = {'project_name': os.getenv('COMET_PROJECT_NAME', 'huggingface')}
if (comet_mode == 'ONLINE'):
experiment = comet_ml.Experiment(**experiment_kwargs)
experiment.log_other('Created from', 'transformers')
logger.info('Automatic Comet.ml online logging enabled')
elif (comet_mode == 'OFFLINE'):
experiment_kwargs['offline_directory'] = os.getenv('COMET_OFFLINE_DIRECTORY', './')
experiment = comet_ml.OfflineExperiment(**experiment_kwargs)
experiment.log_other('Created from', 'transformers')
logger.info('Automatic Comet.ml offline logging enabled; use `comet upload` when finished')
if (experiment is not None):
experiment._set_model_graph(model, framework='transformers')
experiment._log_parameters(args, prefix='args/', framework='transformers')
if hasattr(model, 'config'):
experiment._log_parameters(model.config, prefix='config/', framework='transformers')
def on_train_begin(self, args, state, control, model=None, **kwargs):
if (not self._initialized):
self.setup(args, state, model)
def on_log(self, args, state, control, model=None, logs=None, **kwargs):
if (not self._initialized):
self.setup(args, state, model)
if state.is_world_process_zero:
experiment = comet_ml.config.get_global_experiment()
if (experiment is not None):
experiment._log_metrics(logs, step=state.global_step, epoch=state.epoch, framework='transformers')
def on_train_end(self, args, state, control, **kwargs):
if (self._initialized and state.is_world_process_zero):
experiment = comet_ml.config.get_global_experiment()
if (experiment is not None):
if (self._log_assets is True):
logger.info('Logging checkpoints. This may take time.')
experiment.log_asset_folder(args.output_dir, recursive=True, log_file_name=True, step=state.global_step)
experiment.end() |
class AdvertisingIntegrationTests(BaseApiTest):
def setUp(self):
super().setUp()
self.user.publishers.add(self.publisher2)
self.publisher_group.publishers.add(self.publisher2)
self.page_url = '
def test_ad_view_and_tracking(self):
data = {'placements': self.placements, 'publisher': self.publisher1.slug, 'user_ip': self.ip_address, 'user_ua': self.user_agent}
resp = self.client.post(self.url, json.dumps(data), content_type='application/json')
self.assertEqual(resp.status_code, 200, resp.content)
data = resp.json()
nonce = data['nonce']
impression = self.ad.impressions.filter(publisher=self.publisher1).first()
self.assertEqual(impression.decisions, 1)
self.assertEqual(impression.offers, 1)
self.assertEqual(impression.views, 0)
self.assertEqual(Offer.objects.filter(advertisement=self.ad, publisher=self.publisher1).count(), 1)
view_url = reverse('view-proxy', kwargs={'advertisement_id': self.ad.pk, 'nonce': nonce})
resp = self.proxy_client.get(view_url)
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp['X-Adserver-Reason'], 'Billed view')
impression = self.ad.impressions.filter(publisher=self.publisher1).first()
self.assertEqual(impression.decisions, 1)
self.assertEqual(impression.offers, 1)
self.assertEqual(impression.views, 1)
self.assertEqual(View.objects.filter(advertisement=self.ad, publisher=self.publisher1).count(), 1)
view_time_url = (reverse('view-time-proxy', kwargs={'advertisement_id': self.ad.pk, 'nonce': nonce}) + '?view_time=a')
resp = self.proxy_client.get(view_time_url)
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp['X-Adserver-Reason'], 'Invalid view time')
view_time_url = (reverse('view-time-proxy', kwargs={'advertisement_id': self.ad.pk, 'nonce': 'invalid-nonce'}) + '?view_time=10')
resp = self.proxy_client.get(view_time_url)
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp['X-Adserver-Reason'], 'Invalid view time')
view_time_url = (reverse('view-time-proxy', kwargs={'advertisement_id': self.ad.pk, 'nonce': nonce}) + '?view_time=10')
resp = self.proxy_client.get(view_time_url)
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp['X-Adserver-Reason'], 'Updated view time')
data = {'placements': self.placements, 'publisher': self.publisher2.slug}
resp = self.client.post(self.url, json.dumps(data), content_type='application/json')
self.assertEqual(resp.status_code, 200, resp.content)
impression = self.ad.impressions.filter(publisher=self.publisher2).first()
self.assertEqual(impression.decisions, 1)
self.assertEqual(impression.offers, 1)
self.assertEqual(impression.views, 0)
def test_multiple_ad_offers_views(self):
data = {'placements': self.placements, 'publisher': self.publisher1.slug, 'user_ip': self.ip_address, 'user_ua': self.user_agent}
times = 5
for _ in range(times):
resp = self.client.post(self.url, json.dumps(data), content_type='application/json')
self.assertEqual(resp.status_code, 200, resp.content)
view_url = reverse('view-proxy', kwargs={'advertisement_id': self.ad.pk, 'nonce': resp.json()['nonce']})
resp = self.proxy_client.get(view_url)
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp['X-Adserver-Reason'], 'Billed view')
impression = self.ad.impressions.filter(publisher=self.publisher1).first()
self.assertEqual(impression.decisions, times)
self.assertEqual(impression.offers, times)
self.assertEqual(impression.views, times)
def test_ad_views_for_forced_ads(self):
data = {'placements': self.placements, 'publisher': self.publisher1.slug, 'force_ad': self.ad.slug}
resp = self.client.post(self.url, json.dumps(data), content_type='application/json')
self.assertEqual(resp.status_code, 200, resp.content)
data = resp.json()
nonce = data['nonce']
self.assertEqual(nonce, 'forced')
view_url = reverse('view-proxy', kwargs={'advertisement_id': self.ad.pk, 'nonce': nonce})
resp = self.proxy_client.get(view_url)
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp['X-Adserver-Reason'], 'Unknown offer')
impression = self.ad.impressions.filter(publisher=self.publisher1).first()
self.assertEqual(impression.decisions, 1)
self.assertEqual(impression.offers, 1)
self.assertEqual(impression.views, 0)
def test_ad_click_and_tracking(self):
data = {'placements': self.placements, 'publisher': self.publisher1.slug, 'url': self.page_url, 'user_ip': self.ip_address, 'user_ua': self.user_agent}
resp = self.client.post(self.url, json.dumps(data), content_type='application/json')
self.assertEqual(resp.status_code, 200, resp.content)
data = resp.json()
nonce = data['nonce']
impression = self.ad.impressions.filter(publisher=self.publisher1).first()
self.assertEqual(impression.decisions, 1)
self.assertEqual(impression.offers, 1)
self.assertEqual(impression.clicks, 0)
offer = Offer.objects.filter(advertisement=self.ad, publisher=self.publisher1).first()
self.assertIsNotNone(offer)
self.assertEqual(offer.url, self.page_url)
click_url = reverse('click-proxy', kwargs={'advertisement_id': self.ad.pk, 'nonce': nonce})
resp = self.proxy_client.get(click_url)
self.assertEqual(resp.status_code, 302)
self.assertEqual(resp['X-Adserver-Reason'], 'Old/Invalid nonce')
click_url = reverse('view-proxy', kwargs={'advertisement_id': self.ad.pk, 'nonce': nonce})
resp = self.proxy_client.get(click_url)
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp['X-Adserver-Reason'], 'Billed view')
click_url = reverse('click-proxy', kwargs={'advertisement_id': self.ad.pk, 'nonce': nonce})
resp = self.proxy_client.get(click_url)
self.assertEqual(resp.status_code, 302)
self.assertEqual(resp['X-Adserver-Reason'], 'Billed click')
impression = self.ad.impressions.filter(publisher=self.publisher1).first()
self.assertEqual(impression.decisions, 1)
self.assertEqual(impression.offers, 1)
self.assertEqual(impression.clicks, 1)
clicks = Click.objects.filter(advertisement=self.ad, publisher=self.publisher1)
self.assertEqual(clicks.count(), 1)
click = clicks.first()
self.assertEqual(click.ip, '8.8.0.0')
self.assertEqual(click.publisher, self.publisher1)
self.assertEqual(click.advertisement, self.ad)
self.assertEqual(click.os_family, 'Mac OS X')
self.assertEqual(click.url, self.page_url)
def test_user_geoip_passed_ip(self):
new_ip = '255.255.255.255'
data = {'placements': self.placements, 'publisher': self.publisher1.slug, 'url': self.page_url, 'user_ip': new_ip, 'user_ua': self.user_agent}
with mock.patch('adserver.utils.get_geoipdb_geolocation') as get_geo:
get_geo.return_value = GeolocationData('US', 'NY')
resp = self.client.post(self.url, json.dumps(data), content_type='application/json')
self.assertEqual(resp.status_code, 200, resp.content)
self.assertEqual(resp['X-Adserver-RealIP'], new_ip)
self.assertEqual(resp['X-Adserver-Country'], 'US')
self.assertEqual(resp['X-Adserver-Region'], 'NY')
_settings(ADSERVER_RECORD_VIEWS=False)
def test_record_views_false(self):
self.publisher1.record_views = False
self.publisher1.slug = 'readthedocs-test'
self.publisher1.save()
data = {'placements': self.placements, 'publisher': self.publisher1.slug, 'user_ip': self.ip_address, 'user_ua': self.user_agent}
resp = self.client.post(self.url, json.dumps(data), content_type='application/json')
self.assertEqual(resp.status_code, 200, resp.content)
nonce = resp.json()['nonce']
view_url = reverse('view-proxy', kwargs={'advertisement_id': self.ad.pk, 'nonce': nonce})
resp = self.proxy_client.get(view_url)
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp['X-Adserver-Reason'], 'Billed view')
impression = self.ad.impressions.filter(publisher=self.publisher1).first()
self.assertEqual(impression.decisions, 1)
self.assertEqual(impression.offers, 1)
self.assertEqual(impression.views, 1)
self.assertFalse(View.objects.filter(advertisement=self.ad, publisher=self.publisher1).exists())
_settings(ADSERVER_RECORD_VIEWS=False)
def test_record_views_ad_network(self):
self.publisher1.record_views = True
self.publisher1.record_placements = True
self.publisher1.save()
data = {'placements': self.placements, 'publisher': self.publisher1.slug, 'user_ip': self.ip_address, 'user_ua': self.user_agent}
resp = self.client.post(self.url, json.dumps(data), content_type='application/json')
self.assertEqual(resp.status_code, 200, resp.content)
nonce = resp.json()['nonce']
view_url = reverse('view-proxy', kwargs={'advertisement_id': self.ad.pk, 'nonce': nonce})
resp = self.proxy_client.get(view_url)
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp['X-Adserver-Reason'], 'Billed view')
impression = self.ad.impressions.filter(publisher=self.publisher1).first()
self.assertEqual(impression.decisions, 1)
self.assertEqual(impression.offers, 1)
self.assertEqual(impression.views, 1)
self.assertTrue(View.objects.filter(advertisement=self.ad, publisher=self.publisher1).exists())
def test_record_uplift(self):
data = {'placements': self.placements, 'publisher': self.publisher1.slug, 'user_ip': self.ip_address, 'user_ua': self.user_agent}
resp = self.client.post(self.url, json.dumps(data), content_type='application/json')
self.assertEqual(resp.status_code, 200, resp.content)
nonce = resp.json()['nonce']
view_url = reverse('view-proxy', kwargs={'advertisement_id': self.ad.pk, 'nonce': nonce})
self.assertFalse(Offer.objects.filter(advertisement=self.ad, publisher=self.publisher1, uplifted=True).exists())
resp = self.proxy_client.get(view_url, {'uplift': True})
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp['X-Adserver-Reason'], 'Billed view')
self.assertTrue(Offer.objects.filter(advertisement=self.ad, publisher=self.publisher1, uplifted=True, viewed=True).exists())
self.assertFalse(Offer.objects.filter(advertisement=self.ad, publisher=self.publisher1, uplifted=True, viewed=False).exists())
def test_view_time(self):
data = {'placements': self.placements, 'publisher': self.publisher1.slug, 'user_ip': self.ip_address, 'user_ua': self.user_agent}
resp = self.client.post(self.url, json.dumps(data), content_type='application/json')
self.assertEqual(resp.status_code, 200, resp.content)
nonce = resp.json()['nonce']
view_url = reverse('view-proxy', kwargs={'advertisement_id': self.ad.pk, 'nonce': nonce})
view_time_url = reverse('view-time-proxy', kwargs={'advertisement_id': self.ad.pk, 'nonce': nonce})
resp = self.proxy_client.get(view_url)
self.assertEqual(resp.status_code, 200)
offer = Offer.objects.filter(advertisement=self.ad, publisher=self.publisher1, viewed=True).first()
self.assertIsNotNone(offer)
self.assertIsNone(offer.view_time)
resp = self.proxy_client.get(view_time_url, {'view_time': 'invalid'})
self.assertEqual(resp.status_code, 200)
offer.refresh_from_db()
self.assertIsNone(offer.view_time)
resp = self.proxy_client.get(view_time_url, {'view_time': '-1'})
self.assertEqual(resp.status_code, 200)
offer.refresh_from_db()
self.assertIsNone(offer.view_time)
time_viewed = 55
resp = self.proxy_client.get(view_time_url, {'view_time': time_viewed})
self.assertEqual(resp.status_code, 200)
offer.refresh_from_db()
self.assertEqual(offer.view_time, time_viewed)
def test_nullable_offers(self):
self.ad.live = False
self.ad.save()
resp = self.client.post(self.url, json.dumps(self.data), content_type='application/json')
self.assertEqual(resp.status_code, 200, resp.content)
self.assertEqual(Offer.objects.filter(advertisement=None, publisher=self.publisher1).count(), 1)
def test_offer_url(self):
referrer_url = '
post_url = '
resp = self.client.post(self.url, json.dumps(self.data), content_type='application/json', headers={'referer': referrer_url})
self.assertEqual(resp.status_code, 200, resp.content)
offer = Offer.objects.filter(id=resp.json()['nonce']).first()
self.assertIsNotNone(offer)
self.assertEqual(offer.url, referrer_url)
self.data['url'] = post_url
resp = self.client.post(self.url, json.dumps(self.data), content_type='application/json', headers={'referer': referrer_url})
self.assertEqual(resp.status_code, 200, resp.content)
offer = Offer.objects.filter(id=resp.json()['nonce']).first()
self.assertIsNotNone(offer)
self.assertEqual(offer.url, post_url)
self.data['url'] = 'invalid-url'
resp = self.client.post(self.url, json.dumps(self.data), content_type='application/json')
self.assertEqual(resp.status_code, 200, resp.content)
offer = Offer.objects.filter(id=resp.json()['nonce']).first()
self.assertIsNotNone(offer)
self.assertIsNone(offer.url)
del self.data['url']
resp = self.client.post(self.url, json.dumps(self.data), content_type='application/json')
self.assertEqual(resp.status_code, 200, resp.content)
offer = Offer.objects.filter(id=resp.json()['nonce']).first()
self.assertIsNotNone(offer)
self.assertIsNone(offer.url)
def test_publisher_daily_cap(self):
self.publisher1.daily_cap = 1.75
self.publisher1.save()
data = {'placements': self.placements, 'publisher': self.publisher1.slug, 'user_ip': self.ip_address, 'user_ua': self.user_agent}
for i in range(2):
resp = self.client.post(self.url, json.dumps(data), content_type='application/json')
self.assertEqual(resp.status_code, 200, resp.content)
resp_data = resp.json()
self.assertTrue(('nonce' in resp_data), resp.content)
nonce = resp_data['nonce']
view_url = reverse('view-proxy', kwargs={'advertisement_id': self.ad.pk, 'nonce': nonce})
click_url = reverse('click-proxy', kwargs={'advertisement_id': self.ad.pk, 'nonce': nonce})
self.proxy_client.get(view_url)
resp = self.proxy_client.get(click_url)
self.assertEqual(resp['X-Adserver-Reason'], 'Billed click')
self.assertAlmostEqual(self.publisher1.get_daily_earn(), ((i + 1) * self.flight.cpc))
resp = self.client.post(self.url, json.dumps(data), content_type='application/json')
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.json(), {}) |
class TestDocumentDataRetrivalMethods(unittest.TestCase):
def test_get_method(self):
document = parse(USER_ONLY)
user = document.get('user')
expected_body = ['id = 1', "name = 'alex'"]
self.assertEqual(expected_body, user.body)
user = document.get('user', 1)
expected_body = ['id = 2', "name = 'rustic'"]
self.assertEqual(expected_body, user.body)
user = document.get('user', (- 1))
expected_body = ['id = 2', "name = 'rustic'"]
self.assertEqual(expected_body, user.body)
def test_get_all_method(self):
document = parse(USER_ONLY)
users = document.get_all('user')
expected_bodies = (['id = 1', "name = 'alex'"], ['id = 2', "name = 'rustic'"])
for (i, user) in enumerate(users):
self.assertEqual(expected_bodies[i], user.body)
def test_count_method(self):
document = parse(USER_ONLY)
self.assertEqual(2, document.count('user')) |
def parse_args():
special_args = [{'name': ['-b', '--backend'], 'choices': ['dask', 'explicit-comms', 'dask-noop'], 'default': 'dask', 'type': str, 'help': 'The backend to use.'}, {'name': ['-t', '--type'], 'choices': ['cpu', 'gpu'], 'default': 'gpu', 'type': str, 'help': 'Do merge with GPU or CPU dataframes'}, {'name': ['-c', '--chunk-size'], 'default': 1000000, 'metavar': 'n', 'type': int, 'help': 'Chunk size (default 1_000_000)'}, {'name': '--base-chunks', 'default': None, 'type': int, 'help': 'Number of base-DataFrame partitions (default: n_workers)'}, {'name': '--other-chunks', 'default': None, 'type': int, 'help': 'Number of other-DataFrame partitions (default: n_workers)'}, {'name': '--broadcast-join', 'action': 'store_true', 'help': 'Use broadcast join when possible.'}, {'name': '--shuffle-join', 'action': 'store_true', 'help': "Use shuffle join (takes precedence over '--broadcast-join')."}, {'name': '--ignore-size', 'default': '1 MiB', 'metavar': 'nbytes', 'type': parse_bytes, 'help': "Ignore messages smaller than this (default '1 MB')"}, {'name': '--frac-match', 'default': 0.3, 'type': float, 'help': 'Fraction of rows that matches (default 0.3)'}, {'name': '--no-shuffle', 'action': 'store_true', 'help': "Don't shuffle the keys of the left (base) dataframe."}, {'name': '--runs', 'default': 3, 'type': int, 'help': 'Number of runs'}, {'name': ['-s', '--set-index'], 'action': 'store_true', 'help': 'Call set_index on the key column to sort the joined dataframe.'}]
return parse_benchmark_args(description='Distributed merge (dask/cudf) benchmark', args_list=special_args) |
def crosses(shape, other):
if (not hasattr(shape, GEO_INTERFACE_ATTR)):
raise TypeError((SHAPE_TYPE_ERR % shape))
if (not hasattr(other, GEO_INTERFACE_ATTR)):
raise TypeError((SHAPE_TYPE_ERR % shape))
o = geom.shape(shape)
o2 = geom.shape(other)
return o.crosses(o2) |
class EvolutionSampler(BaseSampler):
def __init__(self, **kwargs):
super().__init__(**kwargs)
if (not hasattr(self, 'heads_share')):
self.heads_share = False
assert (self.heads_share == True), f'We need share heads for Block opeartion'
if (not hasattr(self, 'GPU_search')):
self.GPU_search = False
self.init_evolution()
if self.GPU_search:
self.prop_search_GPU()
else:
self.prop_search()
torch.distributed.barrier()
self.num = 0
assert (self.side_scheme in ['BCNet', 'AutoSlim'])
def prop_search(self):
self.id_prop = 0.5
if (self.rank == 0):
print(f'start prop_search: {self.id_prop}')
while True:
FLOPs_count = 0
for i in range(self.ps_num):
subnet = self.generate_subnet()
FLOPs_count += count_flops(self.model, subnet_m=subnet[:self.len_block], subnet_c=self.covert_channels(subnet[self.len_block:]), heads_share=self.heads_share)
FLOPs_count = (FLOPs_count / self.ps_num)
if (FLOPs_count > self.flops_constraint):
self.id_prop = (self.id_prop + 0.01)
if (self.rank == 0):
print(f'Too large, id_prop: {self.id_prop}, FLOPs_count: {FLOPs_count}, flops_constraint: {self.flops_constraint}')
elif (FLOPs_count < self.flops_min):
self.id_prop = (self.id_prop - 0.01)
if (self.rank == 0):
print(f'Too small, id_prop: {self.id_prop}, FLOPs_count: {FLOPs_count}, flops_constraint: {self.flops_min}')
else:
print('all_right, id_prop is {}'.format(self.id_prop))
break
if ((self.id_prop >= 1) or (self.id_prop <= 0)):
raise RuntimeError(f'stop, id_prop: {self.id_prop}')
def prop_search_GPU(self):
self.id_prop = 0.5
if (self.rank == 0):
print(f'start prop_search_GPU: {self.id_prop}')
finished_GPU = torch.Tensor([0]).cuda()
if (self.rank == 0):
while True:
Pics_count = 0
time_cost = []
for i in range(self.ps_num):
subnet = self.generate_subnet()
subnet_m = subnet[:self.len_block]
subnet_c = self.covert_channels(subnet[self.len_block:])
subnet_m.extend(subnet_c)
subnet = subnet_m
time_cost.append(measure_model(self.model, subnet=subnet))
Pics_count = (sum(time_cost[((self.ps_num // 2) - 3):((self.ps_num // 2) + 3)]) / 6)
if (Pics_count > self.GPUs_constraint):
self.id_prop = (self.id_prop - 0.01)
if (self.rank == 0):
print(f'Too large, id_prop: {self.id_prop}, Pics_count: {Pics_count}, GPUs_constraint: {self.GPUs_constraint}')
finished_GPU = torch.Tensor([0]).cuda()
dist.broadcast(finished_GPU, 0)
elif (Pics_count < self.GPUs_min):
self.id_prop = (self.id_prop + 0.01)
if (self.rank == 0):
print(f'Too small, id_prop: {self.id_prop}, Pics_count: {Pics_count}, GPUs_min: {self.GPUs_min}')
finished_GPU = torch.Tensor([0]).cuda()
dist.broadcast(finished_GPU, 0)
else:
print('all_right, id_prop is {}'.format(self.id_prop))
finished_GPU = torch.Tensor([1]).cuda()
dist.broadcast(finished_GPU, 0)
break
if ((self.id_prop >= 1) or (self.id_prop <= 0)):
raise RuntimeError(f'stop, id_prop: {self.id_prop}')
else:
while True:
finished_GPU = torch.Tensor([0]).cuda()
for i in range(self.ps_num):
subnet = self.generate_subnet()
subnet_m = subnet[:self.len_block]
subnet_c = self.covert_channels(subnet[self.len_block:])
subnet_m.extend(subnet_c)
subnet = subnet_m
measure_model(self.model, subnet=subnet)
dist.broadcast(finished_GPU, 0)
if (finished_GPU[0] == 1):
break
def covert_channels(self, subnet_c):
subnet_percent = []
for i in subnet_c:
subnet_percent.append(self.channel_percent[i])
return subnet_percent
def generate_subnet(self):
assert (self.id_prop is not None), 'id_prop should be a small number'
subnet_m = []
subnet_c = []
for (name, block) in self.model.module.net.named_children():
if (('id' in name) or ('Block' in name)):
if (random.random() < self.id_prop):
if self.heads_share:
subnet_m.append(((len(block) - 1) + 3))
else:
subnet_m.append((len(block) - 1))
elif self.heads_share:
subnet_m.append(random.randint(0, ((len(block) - 2) + 3)))
else:
subnet_m.append(random.randint(0, max((len(block) - 2), 0)))
elif ('Patch_init' in name):
subnet_m.append(random.randint(0, (len(block) - 1)))
else:
subnet_m.append(0)
if ('Patch_init' in name):
subnet_c.append(random.randint(0, (len(self.channel_percent) - 1)))
if (('Block' in name) or ('id' in name)):
subnet_c.append(random.randint(0, (len(self.channel_percent) - 1)))
subnet_c.append(random.randint(0, (len(self.channel_percent) - 1)))
subnet_m = torch.IntTensor(subnet_m).cuda()
subnet_c = torch.IntTensor(subnet_c).cuda()
dist.broadcast(subnet_m, 0)
dist.broadcast(subnet_c, 0)
subnet_m = subnet_m.tolist()
subnet_c = subnet_c.tolist()
subnet_m.extend(subnet_c)
return subnet_m
def init_evolution(self):
self.name_list = []
self.block_list = []
self.block_len = []
self.channels_len = []
for (name, block) in self.model.module.net.named_children():
self.name_list.append(name)
self.block_list.append(block)
if (('id' in name) or ('Block' in name)):
if self.heads_share:
self.block_len.append(((len(block) - 1) + 3))
else:
self.block_len.append((len(block) - 1))
else:
self.block_len.append((len(block) - 1))
if ('Patch_init' in name):
self.channels_len.append((len(self.channel_percent) - 1))
elif (('Block' in name) or ('id' in name)):
self.channels_len.append((len(self.channel_percent) - 1))
self.channels_len.append((len(self.channel_percent) - 1))
all_list = copy.deepcopy(self.block_len)
all_list.extend(self.channels_len)
self.n_var = len(all_list)
self.lb = np.zeros(self.n_var)
self.ub = np.array(all_list, dtype=float)
self.len_block = len(self.name_list)
def init_population(self):
initial_pop = []
if (self.rank == 0):
print('start init_population')
while (len(initial_pop) < self.pop_size):
subnet = self.generate_subnet()
flops = count_flops(self.model, subnet_m=subnet[:self.len_block], subnet_c=self.covert_channels(subnet[self.len_block:]), heads_share=self.heads_share)
if ((flops <= self.flops_constraint) and (flops >= self.flops_min)):
if (self.rank == 0):
print('adopt subnet: {}, FLOPs: {}'.format(subnet, flops))
initial_pop.append(subnet)
elif (self.rank == 0):
print('not adopt subnet: {}, FLOPs: {}'.format(subnet, flops))
initial_pop = np.array(initial_pop, dtype=np.int)
if (self.rank == 0):
print('init_population done')
return initial_pop
def init_population_GPU(self):
initial_pop = []
if (self.rank == 0):
print('start init_population_GPU')
while (len(initial_pop) < self.pop_size):
subnet = self.generate_subnet()
subnet_m = subnet[:self.len_block]
subnet_c = self.covert_channels(subnet[self.len_block:])
subnet_m.extend(subnet_c)
Pics_count = measure_model(self.model, subnet=subnet_m)
Pics_count = int(Pics_count)
Pics_count = torch.IntTensor([Pics_count]).cuda()
dist.broadcast(Pics_count, 0)
Pics_count = Pics_count[0].item()
if ((Pics_count <= self.GPUs_constraint) and (Pics_count >= self.GPUs_min)):
if (self.rank == 0):
print('adopt subnet: {}, Pics_count: {}'.format(subnet, Pics_count))
initial_pop.append(subnet)
elif (self.rank == 0):
print('not adopt subnet: {}, Pics_count: {}'.format(subnet, Pics_count))
initial_pop = np.array(initial_pop, dtype=np.int)
if (self.rank == 0):
print('init_population done')
return initial_pop
def eval_subnet_host(self, subnet):
finished = torch.Tensor([0]).cuda()
dist.broadcast(finished, 0)
dist.broadcast(torch.Tensor(subnet).cuda(), 0)
score = self.eval_subnet(subnet)
return score
def sample(self, sampling=None):
if getattr(self, 'init_pop', True):
if self.GPU_search:
sampling = self.init_population_GPU()
else:
sampling = self.init_population()
else:
raise RuntimeError('need a init_population')
if (self.rank == 0):
print('initial_population Done')
subnet_eval_dict = {}
if (self.rank == 0):
n_offspring = None
nas_problem = NAS(n_var=self.n_var, n_obj=1, n_constr=0, lb=self.lb, ub=self.ub, eval_func=(lambda subnet: self.eval_subnet_host(subnet)), result_dict=subnet_eval_dict)
if (sampling is not None):
method = engine.nsganet(pop_size=self.pop_size, n_offsprings=n_offspring, eliminate_duplicates=True, sampling=sampling)
else:
method = engine.nsganet(pop_size=self.pop_size, n_offsprings=n_offspring, eliminate_duplicates=True)
res = minimize(nas_problem, method, callback=(lambda algorithm: self.generation_callback(algorithm)), termination=('n_gen', self.n_gens))
else:
while True:
finished = torch.Tensor([0]).cuda()
dist.broadcast(finished, 0)
if (finished[0] == 1):
break
subnet = torch.zeros([self.n_var]).cuda()
dist.broadcast(subnet, 0)
subnet = [int(x) for x in subnet.tolist()]
self.eval_subnet(subnet)
if (self.rank == 0):
finished = torch.Tensor([1]).cuda()
dist.broadcast(finished, 0)
subnet_topk = []
if (self.rank == 0):
sorted_subnet = sorted(subnet_eval_dict.items(), key=(lambda i: i[1]), reverse=True)
sorted_subnet_key = [x[0] for x in sorted_subnet]
subnet_topk = sorted_subnet_key[:self.sample_num]
self.subnet_top1 = sorted_subnet_key[:1]
if (self.rank == 0):
print('== search result ==')
print(sorted_subnet)
print('== best subnet ==')
print(subnet_topk)
print('== id_prop ==')
print(self.id_prop)
self.subnet_topk = subnet_topk
raise RuntimeError('sampling over, please check the answer')
def generation_callback(self, algorithm):
gen = algorithm.n_gen
pop_var = algorithm.pop.get('X')
pop_obj = algorithm.pop.get('F')
print(f'==Finished generation: {gen}') |
class GPT2TokenizerFast(PreTrainedTokenizerFast):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ['input_ids', 'attention_mask']
slow_tokenizer_class = GPT2Tokenizer
def __init__(self, vocab_file=None, merges_file=None, tokenizer_file=None, unk_token='<|endoftext|>', bos_token='<|endoftext|>', eos_token='<|endoftext|>', add_prefix_space=False, **kwargs):
super().__init__(vocab_file, merges_file, tokenizer_file=tokenizer_file, unk_token=unk_token, bos_token=bos_token, eos_token=eos_token, add_prefix_space=add_prefix_space, **kwargs)
if kwargs.pop('add_bos_token', False):
model_id = kwargs.pop('name_or_path', '')
raise ValueError(f'''Currenty GPT2's fast tokenizer does NOT support adding a BOS token.Instead you should use GPT2's slow tokenizer class `GPT2Tokenizer` as follows:
`GPT2Tokenizer.from_pretrained('{model_id}')`
or
`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`
This issue will be fixed soon, see: so that the fast tokenizer works correctly.''')
pre_tok_state = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if (pre_tok_state.get('add_prefix_space', add_prefix_space) != add_prefix_space):
pre_tok_class = getattr(pre_tokenizers, pre_tok_state.pop('type'))
pre_tok_state['add_prefix_space'] = add_prefix_space
self.backend_tokenizer.pre_tokenizer = pre_tok_class(**pre_tok_state)
self.add_prefix_space = add_prefix_space
def _batch_encode_plus(self, *args, **kwargs) -> BatchEncoding:
is_split_into_words = kwargs.get('is_split_into_words', False)
assert (self.add_prefix_space or (not is_split_into_words)), f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with pretokenized inputs.'
return super()._batch_encode_plus(*args, **kwargs)
def _encode_plus(self, *args, **kwargs) -> BatchEncoding:
is_split_into_words = kwargs.get('is_split_into_words', False)
assert (self.add_prefix_space or (not is_split_into_words)), f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with pretokenized inputs.'
return super()._encode_plus(*args, **kwargs)
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
files = self._tokenizer.model.save(save_directory, name=filename_prefix)
return tuple(files)
def _build_conversation_input_ids(self, conversation: 'Conversation') -> List[int]:
input_ids = []
for (is_user, text) in conversation.iter_texts():
input_ids.extend((self.encode(text, add_special_tokens=False) + [self.eos_token_id]))
if (len(input_ids) > self.model_max_length):
input_ids = input_ids[(- self.model_max_length):]
return input_ids |
class TenluaVn(SimpleDownloader):
__name__ = 'TenluaVn'
__type__ = 'downloader'
__version__ = '0.04'
__status__ = 'testing'
__pattern__ = '
__config__ = [('enabled', 'bool', 'Activated', True), ('use_premium', 'bool', 'Use premium account if available', True), ('fallback', 'bool', 'Fallback to free download if premium fails', True), ('chk_filesize', 'bool', 'Check file size', True), ('max_wait', 'int', 'Reconnect if waiting time is greater than minutes', 10)]
__description__ = 'Tenlua.vn downloader plugin'
__license__ = 'GPLv3'
__authors__ = [('GammaC0de', 'nitzo2001[AT]yahoo[DOT]com')]
API_URL = '
def api_request(self, method, **kwargs):
kwargs['a'] = method
sid = kwargs.pop('sid', None)
return json.loads(self.load(self.API_URL, get=({'sid': sid} if (sid is not None) else {}), post=json.dumps([kwargs])))
def api_info(self, url):
file_id = re.match(self.__pattern__, url).group('ID')
file_info = self.api_request('filemanager_builddownload_getinfo', n=file_id, r=gen_r())[0]
if (file_info['type'] == 'none'):
return {'status': 1}
else:
return {'name': file_info['n'], 'size': file_info['real_size'], 'status': 2, 'tenlua': {'link': file_info['dlink'], 'password': bool(file_info['passwd'])}}
def handle_free(self, pyfile):
self.handle_download()
def handle_premium(self, pyfile):
sid = self.account.info['data']['sid']
self.handle_download(sid)
def handle_download(self, sid=None):
if self.info['tenlua']['password']:
password = self.get_password()
if password:
file_id = self.info['pattern']['ID']
args = dict(n=file_id, p=password, r=gen_r())
if (sid is not None):
args['sid'] = sid
password_status = self.api_request('filemanager_builddownload_checkpassword', **args)
if (password_status['status'] == '0'):
self.fail(self._('Wrong password'))
else:
url = password_status['url']
else:
self.fail(self._('Download is password protected'))
else:
url = self.info['tenlua']['link']
if (sid is None):
self.wait(30)
self.link = url |
_REGISTRY.register()
class REDSRecurrentDataset(data.Dataset):
def __init__(self, opt):
super(REDSRecurrentDataset, self).__init__()
self.opt = opt
(self.gt_root, self.lq_root) = (Path(opt['dataroot_gt']), Path(opt['dataroot_lq']))
self.num_frame = opt['num_frame']
self.keys = []
with open(opt['meta_info_file'], 'r') as fin:
for line in fin:
(folder, frame_num, _) = line.split(' ')
self.keys.extend([f'{folder}/{i:08d}' for i in range(int(frame_num))])
if (opt['val_partition'] == 'REDS4'):
val_partition = ['000', '011', '015', '020']
elif (opt['val_partition'] == 'official'):
val_partition = [f'{v:03d}' for v in range(240, 270)]
else:
raise ValueError(f"Wrong validation partition {opt['val_partition']}.Supported ones are ['official', 'REDS4'].")
if opt['test_mode']:
self.keys = [v for v in self.keys if (v.split('/')[0] in val_partition)]
else:
self.keys = [v for v in self.keys if (v.split('/')[0] not in val_partition)]
self.file_client = None
self.io_backend_opt = opt['io_backend']
self.is_lmdb = False
if (self.io_backend_opt['type'] == 'lmdb'):
self.is_lmdb = True
if (hasattr(self, 'flow_root') and (self.flow_root is not None)):
self.io_backend_opt['db_paths'] = [self.lq_root, self.gt_root, self.flow_root]
self.io_backend_opt['client_keys'] = ['lq', 'gt', 'flow']
else:
self.io_backend_opt['db_paths'] = [self.lq_root, self.gt_root]
self.io_backend_opt['client_keys'] = ['lq', 'gt']
self.interval_list = opt.get('interval_list', [1])
self.random_reverse = opt.get('random_reverse', False)
interval_str = ','.join((str(x) for x in self.interval_list))
logger = get_root_logger()
logger.info(f'Temporal augmentation interval list: [{interval_str}]; random reverse is {self.random_reverse}.')
def __getitem__(self, index):
if (self.file_client is None):
self.file_client = FileClient(self.io_backend_opt.pop('type'), **self.io_backend_opt)
scale = self.opt['scale']
gt_size = self.opt['gt_size']
key = self.keys[index]
(clip_name, frame_name) = key.split('/')
interval = random.choice(self.interval_list)
start_frame_idx = int(frame_name)
if (start_frame_idx > (100 - (self.num_frame * interval))):
start_frame_idx = random.randint(0, (100 - (self.num_frame * interval)))
end_frame_idx = (start_frame_idx + (self.num_frame * interval))
neighbor_list = list(range(start_frame_idx, end_frame_idx, interval))
if (self.random_reverse and (random.random() < 0.5)):
neighbor_list.reverse()
img_lqs = []
img_gts = []
for neighbor in neighbor_list:
if self.is_lmdb:
img_lq_path = f'{clip_name}/{neighbor:08d}'
img_gt_path = f'{clip_name}/{neighbor:08d}'
else:
img_lq_path = ((self.lq_root / clip_name) / f'{neighbor:08d}.png')
img_gt_path = ((self.gt_root / clip_name) / f'{neighbor:08d}.png')
img_bytes = self.file_client.get(img_lq_path, 'lq')
img_lq = imfrombytes(img_bytes, float32=True)
img_lqs.append(img_lq)
img_bytes = self.file_client.get(img_gt_path, 'gt')
img_gt = imfrombytes(img_bytes, float32=True)
img_gts.append(img_gt)
(img_gts, img_lqs) = paired_random_crop(img_gts, img_lqs, gt_size, scale, img_gt_path)
img_lqs.extend(img_gts)
img_results = augment(img_lqs, self.opt['use_hflip'], self.opt['use_rot'])
img_results = img2tensor(img_results)
img_gts = torch.stack(img_results[(len(img_lqs) // 2):], dim=0)
img_lqs = torch.stack(img_results[:(len(img_lqs) // 2)], dim=0)
return {'lq': img_lqs, 'gt': img_gts, 'key': key}
def __len__(self):
return len(self.keys) |
def conduit_draw_color(conduit):
if ('draw_color' in conduit.axes[0]):
return conduit.draw_color
fill = '#787882'
if (('MaxQPerc' in conduit) and (conduit.MaxQPerc >= 1)):
capacity = (conduit.MaxQ / conduit.MaxQPerc)
stress = (conduit.MaxQ / capacity)
fill = gradient_grey_red((conduit.MaxQ * 100), 0, (capacity * 300))
return fill |
.parametrize('basedirs, expected_basedirs, os_name', [(['foo', 'bar'], ['foo', 'bar'], 'posix'), (['foo:bar', 'foobar'], ['foo', 'bar', 'foobar'], 'posix'), (['foo:bar', 'foobar', 'one:two:three'], ['foo', 'bar', 'foobar', 'one', 'two', 'three'], 'posix'), (['foo:', ':bar'], ['foo', 'bar'], 'posix'), (['C:\\windows\\radish'], ['C:\\windows\\radish'], 'nt'), (['C:\\windows;radish'], ['C:\\windows', 'radish'], 'nt')])
def test_flattened_basedirs(mocker, basedirs, expected_basedirs, os_name):
mocker.patch('os.name', os_name)
actual_basedirs = utils.flattened_basedirs(basedirs)
assert (actual_basedirs == expected_basedirs) |
def test_function_overloading():
assert (m.test_function() == 'test_function()')
assert (m.test_function(7) == 'test_function(7)')
assert (m.test_function(m.MyEnum.EFirstEntry) == 'test_function(enum=1)')
assert (m.test_function(m.MyEnum.ESecondEntry) == 'test_function(enum=2)')
assert (m.test_function() == 'test_function()')
assert (m.test_function('abcd') == 'test_function(char *)')
assert (m.test_function(1, 1.0) == 'test_function(int, float)')
assert (m.test_function(1, 1.0) == 'test_function(int, float)')
assert (m.test_function(2.0, 2) == 'test_function(float, int)') |
.parametrize('tp', [str, *cond_list(HAS_PY_311, (lambda : [typing.LiteralString]))])
def test_str_loader_provider(strict_coercion, debug_trail, tp):
retort = Retort(strict_coercion=strict_coercion, debug_trail=debug_trail)
loader = retort.get_loader(tp)
assert (loader('foo') == 'foo')
if strict_coercion:
raises_exc(TypeLoadError(str, None), (lambda : loader(None)))
else:
assert (loader(None) == 'None') |
def date2juldate(val: date) -> float:
f = (((12 * val.year) + val.month) - 22803)
fq = (f // 12)
fr = (f % 12)
dt = (((((fr * 153) + 302) // 5) + val.day) + ((fq * 1461) // 4))
if isinstance(val, datetime):
return (dt + ((val.hour + ((val.minute + ((val.second + (1e-06 * val.microsecond)) / 60.0)) / 60.0)) / 24.0))
else:
return dt |
class ApplyClassicalTest(Bloq):
def signature(self) -> 'Signature':
return Signature([Register('x', 1, shape=(5,)), Register('z', 1, shape=(5,), side=Side.RIGHT)])
def on_classical_vals(self, *, x: NDArray[np.uint8]) -> Dict[(str, NDArray[np.uint8])]:
const = np.array([1, 0, 1, 0, 1], dtype=np.uint8)
z = np.logical_xor(x, const).astype(np.uint8)
return {'x': x, 'z': z} |
class Uniform(BoundedContinuous):
rv_op = uniform
bound_args_indices = (3, 4)
def dist(cls, lower=0, upper=1, **kwargs):
lower = pt.as_tensor_variable(floatX(lower))
upper = pt.as_tensor_variable(floatX(upper))
return super().dist([lower, upper], **kwargs)
def moment(rv, size, lower, upper):
(lower, upper) = pt.broadcast_arrays(lower, upper)
moment = ((lower + upper) / 2)
if (not rv_size_is_none(size)):
moment = pt.full(size, moment)
return moment
def logp(value, lower, upper):
res = pt.switch(pt.bitwise_and(pt.ge(value, lower), pt.le(value, upper)), pt.fill(value, (- pt.log((upper - lower)))), (- np.inf))
return check_parameters(res, (lower <= upper), msg='lower <= upper')
def logcdf(value, lower, upper):
res = pt.switch(pt.lt(value, lower), (- np.inf), pt.switch(pt.lt(value, upper), (pt.log((value - lower)) - pt.log((upper - lower))), 0))
return check_parameters(res, (lower <= upper), msg='lower <= upper')
def icdf(value, lower, upper):
res = (lower + ((upper - lower) * value))
res = check_icdf_value(res, value)
return check_icdf_parameters(res, (lower < upper)) |
def caffenet(lmdb, batch_size=256, include_acc=False):
(data, label) = L.Data(source=lmdb, backend=P.Data.LMDB, batch_size=batch_size, ntop=2, transform_param=dict(crop_size=227, mean_value=[104, 117, 123], mirror=True))
(conv1, relu1) = conv_relu(data, 11, 96, stride=4)
pool1 = max_pool(relu1, 3, stride=2)
norm1 = L.LRN(pool1, local_size=5, alpha=0.0001, beta=0.75)
(conv2, relu2) = conv_relu(norm1, 5, 256, pad=2, group=2)
pool2 = max_pool(relu2, 3, stride=2)
norm2 = L.LRN(pool2, local_size=5, alpha=0.0001, beta=0.75)
(conv3, relu3) = conv_relu(norm2, 3, 384, pad=1)
(conv4, relu4) = conv_relu(relu3, 3, 384, pad=1, group=2)
(conv5, relu5) = conv_relu(relu4, 3, 256, pad=1, group=2)
pool5 = max_pool(relu5, 3, stride=2)
(fc6, relu6) = fc_relu(pool5, 4096)
drop6 = L.Dropout(relu6, in_place=True)
(fc7, relu7) = fc_relu(drop6, 4096)
drop7 = L.Dropout(relu7, in_place=True)
fc8 = L.InnerProduct(drop7, num_output=1000)
loss = L.SoftmaxWithLoss(fc8, label)
if include_acc:
acc = L.Accuracy(fc8, label)
return to_proto(loss, acc)
else:
return to_proto(loss) |
class GroupAll(nn.Module):
def __init__(self, use_xyz=True):
super(GroupAll, self).__init__()
self.use_xyz = use_xyz
def forward(self, xyz, new_xyz, features=None):
grouped_xyz = xyz.transpose(1, 2).unsqueeze(2)
if (features is not None):
grouped_features = features.unsqueeze(2)
if self.use_xyz:
new_features = torch.cat([grouped_xyz, grouped_features], dim=1)
else:
new_features = grouped_features
else:
new_features = grouped_xyz
return new_features |
def main(args):
meta_path = os.path.abspath(args[0])
timeout_code = int(args[1])
subprocess.check_call(args[2:])
with open(meta_path) as f:
meta_info = json.loads(f.read())
if (meta_info['exit_code'] == timeout_code):
((print >> sys.stderr), meta_info['project'], 'crashed by timeout, use --test-disable-timeout option')
return 1
return 0 |
class StereoDataset(BaseDataset):
def __init__(self, data_path, filenames_file, args, dataset, mode, ret_meta_info=False):
super(StereoDataset, self).__init__(data_path, filenames_file, dataset, mode, args.height, args.width)
assert (self.dataset in ['sceneflow', 'kitti', 'cityscapes'])
self.ret_meta_info = ret_meta_info
if (self.mode != 'train'):
if (self.dataset == 'kitti'):
(self.image_width, self.image_height) = (1280, 384)
def __getitem__(self, idx):
(left_fn, right_fn) = self.image_file_list[idx]
left_image_path = os.path.join(self.data_path, left_fn)
right_image_path = os.path.join(self.data_path, right_fn)
left_image = self.load_image(left_image_path)
right_image = self.load_image(right_image_path)
original_left_image = left_image.copy()
original_right_image = right_image.copy()
(original_height, original_width) = left_image.shape[:2]
original_left_image = self.resize_img(original_left_image, 1280, 384)
original_right_image = self.resize_img(original_right_image, 1280, 384)
left_disp_gt = self.load_disp(left_fn, img_shape=left_image.shape)
right_disp_gt = self.load_disp(right_fn, img_shape=right_image.shape)
if (self.mode == 'train'):
do_swap = (np.random.rand() > 0.5)
if do_swap:
(left_image, right_image, left_disp_gt, right_disp_gt) = self.augment_swap(left_image, right_image, left_disp_gt, right_disp_gt)
(original_left_image, original_right_image) = (cv2.flip(original_right_image, 1), cv2.flip(original_left_image, 1))
ret = self.augment_crop(left_image, right_image, left_disp_gt, right_disp_gt, ret_meta_info=self.ret_meta_info)
(left_image, right_image, left_disp_gt, right_disp_gt) = ret[:4]
if self.ret_meta_info:
meta_info = ret[4]
(left_image, right_image) = self.augment_color(left_image, right_image)
else:
(left_image, right_image) = self.resize_lr_imgs(left_image, right_image)
(left_disp_gt, right_disp_gt) = self.resize_lr_disps(left_disp_gt, right_disp_gt)
sample = {'left': left_image, 'right': right_image, 'original_left': original_left_image, 'original_right': original_right_image, 'left_disp_gt': left_disp_gt, 'right_disp_gt': right_disp_gt, 'left_fn': left_fn, 'right_fn': right_fn}
if ((self.mode == 'train') and self.ret_meta_info):
sample.update(meta_info)
sample.update({'original_height': original_height, 'original_width': original_width})
return self.convert_to_tensor(sample) |
def test_check_python_script(capsys):
mmcv.utils.check_python_script('./tests/data/scripts/hello.py zz')
captured = capsys.readouterr().out
assert (captured == 'hello zz!\n')
mmcv.utils.check_python_script('./tests/data/scripts/hello.py agent')
captured = capsys.readouterr().out
assert (captured == 'hello agent!\n')
with pytest.raises(SystemExit):
mmcv.utils.check_python_script('./tests/data/scripts/hello.py li zz') |
.isolated
def test_build_raises_build_backend_exception(mocker, package_test_flit):
mocker.patch('build.ProjectBuilder.get_requires_for_build', side_effect=build.BuildBackendException(Exception('a')))
mocker.patch('build.env.DefaultIsolatedEnv.install')
msg = f"Backend operation failed: Exception('a'{(',' if (sys.version_info < (3, 7)) else '')})"
with pytest.raises(build.BuildBackendException, match=re.escape(msg)):
build.__main__.build_package(package_test_flit, '.', ['sdist']) |
class Trainer():
def __init__(self, model: torch.nn.Module, train_data: DataLoader, optimizer: torch.optim.Optimizer, save_every: int, snapshot_path: str) -> None:
self.gpu_id = int(os.environ['LOCAL_RANK'])
self.model = model.to(self.gpu_id)
self.train_data = train_data
self.optimizer = optimizer
self.save_every = save_every
self.epochs_run = 0
self.snapshot_path = snapshot_path
if os.path.exists(snapshot_path):
print('Loading snapshot')
self._load_snapshot(snapshot_path)
self.model = DDP(self.model, device_ids=[self.gpu_id])
def _load_snapshot(self, snapshot_path):
loc = f'cuda:{self.gpu_id}'
snapshot = torch.load(snapshot_path, map_location=loc)
self.model.load_state_dict(snapshot['MODEL_STATE'])
self.epochs_run = snapshot['EPOCHS_RUN']
print(f'Resuming training from snapshot at Epoch {self.epochs_run}')
def _run_batch(self, source, targets):
self.optimizer.zero_grad()
output = self.model(source)
loss = F.cross_entropy(output, targets)
loss.backward()
self.optimizer.step()
def _run_epoch(self, epoch):
b_sz = len(next(iter(self.train_data))[0])
print(f'[GPU{self.gpu_id}] Epoch {epoch} | Batchsize: {b_sz} | Steps: {len(self.train_data)}')
self.train_data.sampler.set_epoch(epoch)
for (source, targets) in self.train_data:
source = source.to(self.gpu_id)
targets = targets.to(self.gpu_id)
self._run_batch(source, targets)
def _save_snapshot(self, epoch):
snapshot = {'MODEL_STATE': self.model.module.state_dict(), 'EPOCHS_RUN': epoch}
torch.save(snapshot, self.snapshot_path)
print(f'Epoch {epoch} | Training snapshot saved at {self.snapshot_path}')
def train(self, max_epochs: int):
for epoch in range(self.epochs_run, max_epochs):
self._run_epoch(epoch)
if ((self.gpu_id == 0) and ((epoch % self.save_every) == 0)):
self._save_snapshot(epoch) |
class TestTrialUnittest():
def setup_class(cls):
cls.ut = pytest.importorskip('twisted.trial.unittest')
cls.ignore_unclosed_socket_warning = ('-W', 'always')
def test_trial_testcase_runtest_not_collected(self, pytester: Pytester) -> None:
pytester.makepyfile('\n from twisted.trial.unittest import TestCase\n\n class TC(TestCase):\n def test_hello(self):\n pass\n ')
reprec = pytester.inline_run(*self.ignore_unclosed_socket_warning)
reprec.assertoutcome(passed=1)
pytester.makepyfile('\n from twisted.trial.unittest import TestCase\n\n class TC(TestCase):\n def runTest(self):\n pass\n ')
reprec = pytester.inline_run(*self.ignore_unclosed_socket_warning)
reprec.assertoutcome(passed=1)
def test_trial_exceptions_with_skips(self, pytester: Pytester) -> None:
pytester.makepyfile('\n from twisted.trial import unittest\n import pytest\n class TC(unittest.TestCase):\n def test_hello(self):\n pytest.skip("skip_in_method")\n .skipif("sys.version_info != 1")\n def test_hello2(self):\n pass\n .xfail(reason="iwanto")\n def test_hello3(self):\n assert 0\n def test_hello4(self):\n pytest.xfail("i2wanto")\n def test_trial_skip(self):\n pass\n test_trial_skip.skip = "trialselfskip"\n\n def test_trial_todo(self):\n assert 0\n test_trial_todo.todo = "mytodo"\n\n def test_trial_todo_success(self):\n pass\n test_trial_todo_success.todo = "mytodo"\n\n class TC2(unittest.TestCase):\n def setup_class(cls):\n pytest.skip("skip_in_setup_class")\n def test_method(self):\n pass\n ')
result = pytester.runpytest('-rxs', *self.ignore_unclosed_socket_warning)
result.stdout.fnmatch_lines_random(['*XFAIL*test_trial_todo*', '*trialselfskip*', '*skip_in_setup_class*', '*iwanto*', '*i2wanto*', '*sys.version_info*', '*skip_in_method*', '*1 failed*4 skipped*3 xfailed*'])
assert (result.ret == 1)
def test_trial_error(self, pytester: Pytester) -> None:
pytester.makepyfile('\n from twisted.trial.unittest import TestCase\n from twisted.internet.defer import Deferred\n from twisted.internet import reactor\n\n class TC(TestCase):\n def test_one(self):\n crash\n\n def test_two(self):\n def f(_):\n crash\n\n d = Deferred()\n d.addCallback(f)\n reactor.callLater(0.3, d.callback, None)\n return d\n\n def test_three(self):\n def f():\n pass # will never get called\n reactor.callLater(0.3, f)\n # will crash at teardown\n\n def test_four(self):\n def f(_):\n reactor.callLater(0.3, f)\n crash\n\n d = Deferred()\n d.addCallback(f)\n reactor.callLater(0.3, d.callback, None)\n return d\n # will crash both at test time and at teardown\n ')
result = pytester.runpytest('-vv', '-oconsole_output_style=classic', '-W', 'ignore::DeprecationWarning')
result.stdout.fnmatch_lines(['test_trial_error.py::TC::test_four FAILED', 'test_trial_error.py::TC::test_four ERROR', 'test_trial_error.py::TC::test_one FAILED', 'test_trial_error.py::TC::test_three FAILED', 'test_trial_error.py::TC::test_two FAILED', '*ERRORS*', '*_ ERROR at teardown of TC.test_four _*', '*DelayedCalls*', '*= FAILURES =*', '*_ TC.test_four _*', '*NameError*crash*', '*_ TC.test_one _*', '*NameError*crash*', '*_ TC.test_three _*', '*DelayedCalls*', '*_ TC.test_two _*', '*NameError*crash*', '*= 4 failed, 1 error in *'])
def test_trial_pdb(self, pytester: Pytester) -> None:
p = pytester.makepyfile('\n from twisted.trial import unittest\n import pytest\n class TC(unittest.TestCase):\n def test_hello(self):\n assert 0, "hellopdb"\n ')
child = pytester.spawn_pytest(str(p))
child.expect('hellopdb')
child.sendeof()
def test_trial_testcase_skip_property(self, pytester: Pytester) -> None:
testpath = pytester.makepyfile("\n from twisted.trial import unittest\n class MyTestCase(unittest.TestCase):\n skip = 'dont run'\n def test_func(self):\n pass\n ")
reprec = pytester.inline_run(testpath, '-s')
reprec.assertoutcome(skipped=1)
def test_trial_testfunction_skip_property(self, pytester: Pytester) -> None:
testpath = pytester.makepyfile("\n from twisted.trial import unittest\n class MyTestCase(unittest.TestCase):\n def test_func(self):\n pass\n test_func.skip = 'dont run'\n ")
reprec = pytester.inline_run(testpath, '-s')
reprec.assertoutcome(skipped=1)
def test_trial_testcase_todo_property(self, pytester: Pytester) -> None:
testpath = pytester.makepyfile("\n from twisted.trial import unittest\n class MyTestCase(unittest.TestCase):\n todo = 'dont run'\n def test_func(self):\n assert 0\n ")
reprec = pytester.inline_run(testpath, '-s')
reprec.assertoutcome(skipped=1)
def test_trial_testfunction_todo_property(self, pytester: Pytester) -> None:
testpath = pytester.makepyfile("\n from twisted.trial import unittest\n class MyTestCase(unittest.TestCase):\n def test_func(self):\n assert 0\n test_func.todo = 'dont run'\n ")
reprec = pytester.inline_run(testpath, '-s', *self.ignore_unclosed_socket_warning)
reprec.assertoutcome(skipped=1) |
class DetectionModel(object):
__metaclass__ = ABCMeta
def __init__(self, num_classes):
self._num_classes = num_classes
self._groundtruth_lists = {}
def num_classes(self):
return self._num_classes
def groundtruth_lists(self, field):
if (field not in self._groundtruth_lists):
raise RuntimeError('Groundtruth tensor %s has not been provided', field)
return self._groundtruth_lists[field]
def preprocess(self, inputs):
pass
def predict(self, preprocessed_inputs):
pass
def postprocess(self, prediction_dict, **params):
pass
def loss(self, prediction_dict):
pass
def provide_groundtruth(self, groundtruth_boxes_list, groundtruth_classes_list, groundtruth_masks_list=None, groundtruth_keypoints_list=None):
self._groundtruth_lists[fields.BoxListFields.boxes] = groundtruth_boxes_list
self._groundtruth_lists[fields.BoxListFields.classes] = groundtruth_classes_list
if groundtruth_masks_list:
self._groundtruth_lists[fields.BoxListFields.masks] = groundtruth_masks_list
if groundtruth_keypoints_list:
self._groundtruth_lists[fields.BoxListFields.keypoints] = groundtruth_keypoints_list
def restore_map(self, from_detection_checkpoint=True):
pass |
def overlap_detection(radius_list, x, y, z, show_info=0):
overlapOrNot = 0
for ii in range(0, len(radius_list)):
for jj in range((ii + 1), len(radius_list)):
tempR1 = radius_list[ii][0]
tempR2 = radius_list[jj][0]
tempDis = ((((x[ii] - x[jj]) ** 2) + ((y[ii] - y[jj]) ** 2)) + ((z[ii] - z[jj]) ** 2))
Distance = math.sqrt(tempDis)
if (show_info != 0):
print('radius of NO.', ii, ': ', tempR1, '\tradius of NO.', jj, ': ', tempR2, '\tDistance: ', Distance)
if ((tempR1 + tempR2) > Distance):
overlapOrNot = 1
break
return overlapOrNot |
def test_progress_with_exception(workspace, consumer):
workspace._config.capabilities['window'] = {'workDoneProgress': True}
class DummyError(Exception):
pass
try:
with workspace.report_progress('some_title'):
raise DummyError('something')
except DummyError:
pass
(init_call, *progress_calls) = consumer.call_args_list
assert (init_call[0][0]['method'] == 'window/workDoneProgress/create')
assert all(((call[0][0]['method'] == '$/progress') for call in progress_calls))
assert (len(({call[0][0]['params']['token'] for call in progress_calls} | {init_call[0][0]['params']['token']})) == 1)
assert ([call[0][0]['params']['value'] for call in progress_calls] == [{'kind': 'begin', 'title': 'some_title'}, {'kind': 'end'}]) |
def test_read_only(tmpdir_cwd):
dist = Distribution(dict(script_name='setup.py', script_args=['build_py'], packages=['pkg'], package_data={'pkg': ['data.dat']}))
os.makedirs('pkg')
open('pkg/__init__.py', 'w').close()
open('pkg/data.dat', 'w').close()
os.chmod('pkg/__init__.py', stat.S_IREAD)
os.chmod('pkg/data.dat', stat.S_IREAD)
dist.parse_command_line()
dist.run_commands()
shutil.rmtree('build') |
class BILSTMCRF(object):
def __init__(self, params: dict):
self.char_embedding = tf.Variable(np.load(params['embedding_path']), dtype=tf.float32, name='input_char_embedding')
self.word_embedding = tf.Variable(np.load(params['word_embedding_path']), dtype=tf.float32, name='input_word_embedding')
self.dropout_rate = params['dropout_prob']
self.num_labels = params['num_labels']
self.rnn_size = params['rnn_size']
self.num_layers = params['num_layers']
self.hidden_units = params['hidden_units']
def __call__(self, input_ids=None, input_word_ids=None, labels=None, text_length_list=None, is_training=True, is_testing=False):
input_char_embeddings = tf.nn.embedding_lookup(self.char_embedding, input_ids)
input_word_embeddings = tf.nn.embedding_lookup(self.word_embedding, input_word_ids)
input_embeddings = (input_char_embeddings + input_word_embeddings)
input_embeddings = tf.layers.dropout(input_embeddings, rate=0.5, training=is_training)
lstm_layer = BLSTM(input_embeddings, self.rnn_size, self.num_layers, (1.0 - self.dropout_rate), lengths=text_length_list, is_training=is_training)
lstm_output = lstm_layer.blstm_layer(input_embeddings)
lstm_project = tf.layers.dense(lstm_output, self.num_labels)
weight = tf.sequence_mask(text_length_list, dtype=tf.float32, name='mask')
crf_layer = CRF(self.num_labels, labels, text_length_list)
(loss, trans) = crf_layer.crf_layer(lstm_project)
pred_ids = crf_layer.crf_decoding(lstm_project, trans)
if (not is_testing):
return (loss, trans, pred_ids, weight)
else:
return pred_ids |
class ForbiddenExtraKeysError(Exception):
def __init__(self, message: Optional[str], cl: Type, extra_fields: Set[str]) -> None:
self.cl = cl
self.extra_fields = extra_fields
cln = cl.__name__
super().__init__((message or f"Extra fields in constructor for {cln}: {', '.join(extra_fields)}")) |
def default_errformat(val):
it = val._e_metas()
if (val.creator is not None):
try:
after = (os.linesep + format_element(val.creator))
except Exception:
after = ('Element Traceback of %r caused exception:%s' % (type(val.creator).__name__, os.linesep))
after += indent(traceback.format_exc())
after = (os.linesep + indent(after).rstrip())
else:
after = ''
return ((((next(it)[1] + os.linesep) + ' ') + (os.linesep + ' ').join((((k + ': ') + v) for (k, v) in it))) + after) |
class Dequantization(nn.Module):
filters: int = 96
components: int = 4
blocks: int = 5
attn_heads: int = 4
dropout_p: float = 0.0
use_nin: bool = True
use_ln: bool = True
def __call__(self, eps, x, inverse=False, train=False):
logp_eps = jnp.sum((((- (eps ** 2)) / 2.0) - (0.5 * np.log((2 * np.pi)))), axis=(1, 2, 3))
coupling_params = dict(filters=self.filters, blocks=self.blocks, components=self.components, heads=self.attn_heads, use_nin=self.use_nin, use_ln=self.use_ln)
modules = [CheckerboardSplit(), Norm(), MixLogisticCoupling(**coupling_params), TupleFlip(), Norm(), MixLogisticCoupling(**coupling_params), TupleFlip(), Norm(), MixLogisticCoupling(**coupling_params), TupleFlip(), Norm(), MixLogisticCoupling(**coupling_params), TupleFlip(), CheckerboardSplit(inverse_module=True), Sigmoid()]
context = DeepProcessor(dropout_p=self.dropout_p)(x, train=train)
if (not inverse):
logp_sum = 0.0
h = eps
for module in modules:
if isinstance(module, MixLogisticCoupling):
(h, logp) = module(h, context=context, inverse=inverse, train=train)
else:
(h, logp) = module(h, inverse=inverse)
logp_sum = ((logp_sum + logp) if (logp is not None) else logp_sum)
return (h, (logp_sum - logp_eps))
else:
logp_sum = 0.0
h = eps
for module in modules[::(- 1)]:
if isinstance(module, MixLogisticCoupling):
(h, logp) = module(h, context=context, inverse=inverse, train=train)
else:
(h, logp) = module(h, inverse=inverse)
logp_sum = ((logp_sum + logp) if (logp is not None) else logp_sum)
return (h, (logp_sum - logp_eps)) |
def get_versioned_symbols(libs):
result = {}
for (path, elf) in elf_file_filter(libs.keys()):
elf_versioned_symbols = defaultdict(set)
for (key, value) in elf_find_versioned_symbols(elf):
log.debug('path %s, key %s, value %s', path, key, value)
elf_versioned_symbols[key].add(value)
result[libs[path]] = elf_versioned_symbols
return result |
def test_configure_multiple_modules():
def configure_a(binder):
binder.bind(DependsOnEmptyClass)
def configure_b(binder):
binder.bind(EmptyClass)
injector = Injector([configure_a, configure_b])
a = injector.get(DependsOnEmptyClass)
assert isinstance(a, DependsOnEmptyClass)
assert isinstance(a.b, EmptyClass) |
class CLIPTextCfg():
context_length: int = 77
vocab_size: int = 49408
width: int = 512
heads: int = 8
layers: int = 12
ls_init_value: Optional[float] = None
hf_model_name: str = None
hf_tokenizer_name: str = None
hf_model_pretrained: bool = True
proj: str = 'mlp'
pooler_type: str = 'mean_pooler'
masked_language_modeling: bool = False
fusedLN: bool = False
xattn: bool = False
attn_mask: bool = True |
class INR(nn.Module):
def __init__(self, in_features, hidden_features, hidden_layers, out_features, outermost_linear=True, sigma=10.0, pos_encode_configs={'type': None, 'use_nyquist': None, 'scale_B': None, 'mapping_input': None}):
super().__init__()
self.pos_encode = pos_encode_configs['type']
if (self.pos_encode in Encoding().encoding_dict.keys()):
self.positional_encoding = Encoding(self.pos_encode).run(in_features=in_features, pos_encode_configs=pos_encode_configs)
in_features = self.positional_encoding.out_dim
elif (self.pos_encode == None):
self.pos_encode = False
else:
assert 'Invalid pos_encode. Choose from: [frequency, Gaussian]'
self.complex = False
self.nonlin = GaussLayer
self.net = []
self.net.append(self.nonlin(in_features, hidden_features, sigma=sigma))
for i in range(hidden_layers):
self.net.append(self.nonlin(hidden_features, hidden_features, sigma=sigma))
if outermost_linear:
if self.complex:
dtype = torch.cfloat
else:
dtype = torch.float
final_linear = nn.Linear(hidden_features, out_features, dtype=dtype)
self.net.append(final_linear)
else:
self.net.append(self.nonlin(hidden_features, out_features, sigma=sigma))
self.net = nn.Sequential(*self.net)
def forward(self, coords):
if self.pos_encode:
coords = self.positional_encoding(coords)
output = self.net(coords)
return output |
def meta_next_trading_day(is_trading_day):
def next_trading_day(dt):
if (type(dt) is datetime.datetime):
dt = dt.date()
while True:
dt = (dt + datetime.timedelta(days=1))
if is_trading_day(dt):
return dt
return next_trading_day |
class GdbExit(sublime_plugin.WindowCommand):
def run(self):
global gdb_shutting_down
gdb_shutting_down = True
wait_until_stopped()
run_cmd('-gdb-exit', True)
if gdb_server_process:
gdb_server_process.terminate()
def is_enabled(self):
return is_running()
def is_visible(self):
return is_running() |
def _test():
import torch
pretrained = False
models = [fbnet_cb]
for model in models:
net = model(pretrained=pretrained)
net.eval()
weight_count = _calc_width(net)
print('m={}, {}'.format(model.__name__, weight_count))
assert ((model != fbnet_cb) or (weight_count == 5572200))
x = torch.randn(1, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 1000)) |
class StochasticEncoderLayer(EncoderLayer):
def __init__(self, h, d_model, p, d_ff, attn_p=0.1, version=1.0, death_rate=0.0):
super().__init__(h, d_model, p, d_ff, attn_p, version)
self.death_rate = death_rate
def forward(self, input, attn_mask):
coin = True
if self.training:
coin = (torch.rand(1)[0].item() >= self.death_rate)
if coin:
query = self.preprocess_attn(input)
(out, _) = self.multihead(query, query, query, attn_mask)
if self.training:
out = (out / (1 - self.death_rate))
input = self.postprocess_attn(out, input)
out = self.feedforward(self.preprocess_ffn(input))
if self.training:
out = (out / (1 - self.death_rate))
input = self.postprocess_ffn(out, input)
return input |
class SemsegMeter(object):
def __init__(self, num_classes, class_names, has_bg=True, ignore_index=255):
self.num_classes = (num_classes + int(has_bg))
self.class_names = class_names
self.tp = ([0] * self.num_classes)
self.fp = ([0] * self.num_classes)
self.fn = ([0] * self.num_classes)
assert (ignore_index == 255)
self.ignore_index = ignore_index
def update(self, pred, gt):
valid = (gt != self.ignore_index)
for i_part in range(0, self.num_classes):
tmp_gt = (gt == i_part)
tmp_pred = (pred == i_part)
self.tp[i_part] += torch.sum(((tmp_gt & tmp_pred) & valid)).item()
self.fp[i_part] += torch.sum((((~ tmp_gt) & tmp_pred) & valid)).item()
self.fn[i_part] += torch.sum(((tmp_gt & (~ tmp_pred)) & valid)).item()
def reset(self):
self.tp = ([0] * self.num_classes)
self.fp = ([0] * self.num_classes)
self.fn = ([0] * self.num_classes)
def return_score(self, verbose=True):
jac = ([0] * self.num_classes)
for i_part in range(self.num_classes):
jac[i_part] = (float(self.tp[i_part]) / max(float(((self.tp[i_part] + self.fp[i_part]) + self.fn[i_part])), 1e-08))
eval_result = dict()
eval_result['jaccards_all_categs'] = jac
eval_result['mIoU'] = np.mean(jac)
print('Evaluation of semantic segmentation ')
print(('mIoU is %.2f' % (100 * eval_result['mIoU'])))
for i_part in range(self.num_classes):
print(('IoU class %s is %.2f' % (self.class_names[i_part], (100 * jac[i_part]))))
return eval_result |
def parse_args():
parser = argparse.ArgumentParser(description='Train keypoints network')
parser.add_argument('--cfg', help='experiment configure file name', required=True, type=str)
(args, rest) = parser.parse_known_args()
update_config(args.cfg)
parser.add_argument('--frequent', help='frequency of logging', default=config.PRINT_FREQ, type=int)
parser.add_argument('--gpus', help='gpus', type=str)
parser.add_argument('--workers', help='num of dataloader workers', type=int)
parser.add_argument('--model-file', help='model state file', type=str)
parser.add_argument('--use-detect-bbox', help='use detect bbox', action='store_true')
parser.add_argument('--flip-test', help='use flip test', action='store_true')
parser.add_argument('--post-process', help='use post process', action='store_true')
parser.add_argument('--shift-heatmap', help='shift heatmap', action='store_true')
parser.add_argument('--coco-bbox-file', help='coco detection bbox file', type=str)
parser.add_argument('--modelDir', help='model directory', type=str, default='')
parser.add_argument('--logDir', help='log directory', type=str, default='')
parser.add_argument('--dataDir', help='data directory', type=str, default='')
parser.add_argument('--data-format', help='data format', type=str, default='')
parser.add_argument('--NoDebug', type=str, default='', help='create model without Debug')
args = parser.parse_args()
update_dir(args.modelDir, args.logDir, args.dataDir)
return args |
def test_resnext_backbone():
with pytest.raises(KeyError):
ResNeXt(depth=18)
model = ResNeXt(depth=50, groups=32, base_width=4)
print(model)
for m in model.modules():
if is_block(m):
assert (m.conv2.groups == 32)
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert (len(feat) == 4)
assert (feat[0].shape == torch.Size([1, 256, 56, 56]))
assert (feat[1].shape == torch.Size([1, 512, 28, 28]))
assert (feat[2].shape == torch.Size([1, 1024, 14, 14]))
assert (feat[3].shape == torch.Size([1, 2048, 7, 7])) |
def ComboBoxDroppedHeightTest(windows):
bugs = []
for win in windows:
if (not win.ref):
continue
if ((win.class_name() != 'ComboBox') or (win.ref.class_name() != 'ComboBox')):
continue
if (win.dropped_rect().height() != win.ref.dropped_rect().height()):
bugs.append(([win], {}, testname, 0))
return bugs |
def main():
parser = HfArgumentParser((ModelArguments,))
(model_args,) = parser.parse_args_into_dataclasses()
if model_args.encoder_config_name:
encoder_config = AutoConfig.from_pretrained(model_args.encoder_config_name)
else:
encoder_config = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path)
if model_args.decoder_config_name:
decoder_config = AutoConfig.from_pretrained(model_args.decoder_config_name)
else:
decoder_config = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path)
decoder_config.is_decoder = True
decoder_config.add_cross_attention = True
model = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path, decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path, encoder_config=encoder_config, decoder_config=decoder_config)
decoder_start_token_id = decoder_config.decoder_start_token_id
pad_token_id = decoder_config.pad_token_id
if (decoder_start_token_id is None):
decoder_start_token_id = decoder_config.bos_token_id
if (pad_token_id is None):
pad_token_id = decoder_config.eos_token_id
model.config.eos_token_id = decoder_config.eos_token_id
model.config.decoder_start_token_id = decoder_start_token_id
model.config.pad_token_id = pad_token_id
image_processor = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path)
tokenizer = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path)
tokenizer.pad_token = tokenizer.convert_ids_to_tokens(model.config.pad_token_id)
model.save_pretrained(model_args.output_dir)
image_processor.save_pretrained(model_args.output_dir)
tokenizer.save_pretrained(model_args.output_dir) |
('pypyr.moduleloader.get_module')
('unittest.mock.MagicMock', new=DeepCopyMagicMock)
def test_run_pipeline_steps_complex_swallow_true_error(mock_get_module):
step = Step({'name': 'step1', 'swallow': 1})
context = get_test_context()
original_len = len(context)
arb_error = ValueError('arb error here')
with patch.object(Step, 'invoke_step', side_effect=arb_error) as mock_invoke_step:
with patch_logger('pypyr.dsl', logging.DEBUG) as mock_logger_debug:
with patch_logger('pypyr.dsl', logging.ERROR) as mock_logger_error:
step.run_step(context)
mock_logger_debug.assert_any_call('done')
mock_logger_error.assert_called_once_with('step1 Ignoring error because swallow is True for this step.\nValueError: arb error here')
mock_invoke_step.assert_called_once_with(context={'key1': 'value1', 'key2': 'value2', 'key3': 'value3', 'key4': [{'k4lk1': 'value4', 'k4lk2': 'value5'}, {'k4lk1': 'value6', 'k4lk2': 'value7'}], 'key5': False, 'key6': True, 'key7': 77})
assert (len(context) == (original_len + 1))
assert (context['runErrors'] == [{'col': None, 'customError': {}, 'description': 'arb error here', 'exception': arb_error, 'line': None, 'name': 'ValueError', 'step': step.name, 'swallowed': True}]) |
class HotelRoomReservationFactory(DjangoModelFactory):
order_code = 'AAAABB'
room = factory.SubFactory(HotelRoomFactory)
checkin = factory.Faker('past_date')
checkout = factory.Faker('future_date')
user = factory.SubFactory(UserFactory)
class Meta():
model = HotelRoomReservation |
class TimeLabel(Gtk.Label):
def __init__(self, time_=0):
Gtk.Label.__init__(self)
self.__widths = {}
self._disabled = False
self.set_time(time_)
def do_get_preferred_width(self):
widths = Gtk.Label.do_get_preferred_width(self)
num_chars = len(self.get_text())
max_widths = self.__widths.get(num_chars, widths)
widths = (max(widths[0], max_widths[0]), max(widths[1], max_widths[1]))
self.__widths[num_chars] = widths
return widths
def set_time(self, time_):
self._last_time = time_
if self._disabled:
return
self.set_text(util.format_time_display(time_))
def set_disabled(self, disabled):
self._disabled = disabled
if disabled:
self.set_text('')
else:
self.set_time(self._last_time) |
class Test_avl_iter(unittest.TestCase):
def setUp(self):
self.n = 1000
self.t = range_tree(0, self.n)
self.orig = list(range(self.n))
def testiter_forloop(self):
list = self.orig[:]
for i in range(5):
random.shuffle(list)
for k in avl.new(list):
self.assertTrue((k == self.orig[k]))
def testiter_forward(self):
j = self.t.iter()
for k in gen_ints(0, self.n):
self.assertTrue(((next(j) == k) and (j.index() == k) and (j.cur() == k)))
self.assertRaises(StopIteration, j.__next__)
self.assertRaises(avl.Error, j.cur)
self.assertTrue((j.index() == self.n))
def testiter_backward(self):
j = self.t.iter(1)
for k in gen_ints(1, (self.n + 1)):
self.assertTrue((((j.prev() + k) == self.n) and ((j.index() + k) == self.n) and ((j.cur() + k) == self.n)))
self.assertRaises(StopIteration, j.prev)
self.assertRaises(avl.Error, j.cur)
self.assertTrue((j.index() == (- 1)))
def testiter_basic(self):
t = avl.new()
j = iter(t)
k = t.iter(1)
self.assertRaises(StopIteration, j.__next__)
self.assertRaises(StopIteration, j.prev)
self.assertRaises(StopIteration, k.__next__)
self.assertRaises(StopIteration, k.prev)
t.insert('bb')
self.assertRaises(StopIteration, j.prev)
self.assertRaises(StopIteration, k.__next__)
self.assertTrue((next(j) == 'bb'))
self.assertTrue((k.prev() == 'bb'))
self.assertRaises(StopIteration, j.__next__)
self.assertRaises(StopIteration, k.prev)
self.assertTrue((j.prev() == 'bb'))
self.assertTrue((next(k) == 'bb'))
self.assertTrue(((j.cur() == 'bb') and (k.cur() == 'bb')))
t.insert('aa')
self.assertTrue((j.prev() == 'aa'))
t.insert('cc')
self.assertTrue((next(k) == 'cc'))
def testiter_remove(self):
for start in range(1, (self.n + 1)):
u = avl.new(self.t)
self.assertTrue((u.verify() == 1))
j = iter(u)
for i in range(start):
next(j)
index = j.index()
self.assertTrue((index == (start - 1)))
while (index < len(u)):
j.remove()
self.assertTrue((j.index() == index))
self.assertRaises(avl.Error, j.remove)
self.assertTrue((u.verify() == 1))
def tearDown(self):
self.t.clear()
self.t = None
self.orig = None |
def main(client, config):
(ws_df, item_df, imp_df, ss_df) = benchmark(read_tables, config=config, compute_result=config['get_read_time'])
item_imp_join_df = get_helper_query_table(imp_df, item_df)
r_ss = get_ss(ss_df, item_imp_join_df)
r_ws = get_ws(ws_df, item_imp_join_df)
result_df = r_ws.merge(r_ss, left_on=['ws_item_sk', 'imp_sk'], right_on=['ss_item_sk', 'imp_sk'], how='inner', suffixes=('ws', 'ss'))
result_df['cross_price_elasticity'] = (((result_df['current_ss_quant'] + result_df['current_ws_quant']) - result_df['prev_ss_quant']) - result_df['prev_ws_quant'])
result_df['cross_price_elasticity'] = (result_df['cross_price_elasticity'] / ((result_df['prev_ss_quant'] + result_df['prev_ws_quant']) * result_df['price_change']))
final_cols_2_keep = ['ws_item_sk', 'cross_price_elasticity']
result_df = result_df[final_cols_2_keep]
result_df = result_df.groupby(['ws_item_sk']).agg({'cross_price_elasticity': 'mean'})
result_df = result_df.reset_index(drop=False)
wait(result_df)
return result_df |
_pipeline_test
_vision
class ZeroShotImageClassificationPipelineTests(unittest.TestCase):
_torch
def test_small_model_pt(self):
image_classifier = pipeline(model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification')
image = Image.open('./tests/fixtures/tests_samples/COCO/.png')
output = image_classifier(image, candidate_labels=['a', 'b', 'c'])
self.assertIn(nested_simplify(output), [[{'score': 0.333, 'label': 'a'}, {'score': 0.333, 'label': 'b'}, {'score': 0.333, 'label': 'c'}], [{'score': 0.333, 'label': 'a'}, {'score': 0.333, 'label': 'c'}, {'score': 0.333, 'label': 'b'}]])
output = image_classifier(([image] * 5), candidate_labels=['A', 'B', 'C'], batch_size=2)
self.assertEqual(nested_simplify(output), [[{'score': 0.333, 'label': ANY(str)}, {'score': 0.333, 'label': ANY(str)}, {'score': 0.333, 'label': ANY(str)}], [{'score': 0.333, 'label': ANY(str)}, {'score': 0.333, 'label': ANY(str)}, {'score': 0.333, 'label': ANY(str)}], [{'score': 0.333, 'label': ANY(str)}, {'score': 0.333, 'label': ANY(str)}, {'score': 0.333, 'label': ANY(str)}], [{'score': 0.333, 'label': ANY(str)}, {'score': 0.333, 'label': ANY(str)}, {'score': 0.333, 'label': ANY(str)}], [{'score': 0.333, 'label': ANY(str)}, {'score': 0.333, 'label': ANY(str)}, {'score': 0.333, 'label': ANY(str)}]])
_tf
def test_small_model_tf(self):
image_classifier = pipeline(model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification', framework='tf')
image = Image.open('./tests/fixtures/tests_samples/COCO/.png')
output = image_classifier(image, candidate_labels=['a', 'b', 'c'])
self.assertEqual(nested_simplify(output), [{'score': 0.333, 'label': 'a'}, {'score': 0.333, 'label': 'b'}, {'score': 0.333, 'label': 'c'}])
output = image_classifier(([image] * 5), candidate_labels=['A', 'B', 'C'], batch_size=2)
self.assertEqual(nested_simplify(output), [[{'score': 0.333, 'label': ANY(str)}, {'score': 0.333, 'label': ANY(str)}, {'score': 0.333, 'label': ANY(str)}], [{'score': 0.333, 'label': ANY(str)}, {'score': 0.333, 'label': ANY(str)}, {'score': 0.333, 'label': ANY(str)}], [{'score': 0.333, 'label': ANY(str)}, {'score': 0.333, 'label': ANY(str)}, {'score': 0.333, 'label': ANY(str)}], [{'score': 0.333, 'label': ANY(str)}, {'score': 0.333, 'label': ANY(str)}, {'score': 0.333, 'label': ANY(str)}], [{'score': 0.333, 'label': ANY(str)}, {'score': 0.333, 'label': ANY(str)}, {'score': 0.333, 'label': ANY(str)}]])
_torch
def test_large_model_pt(self):
image_classifier = pipeline(task='zero-shot-image-classification', model='openai/clip-vit-base-patch32')
image = Image.open('./tests/fixtures/tests_samples/COCO/.png')
output = image_classifier(image, candidate_labels=['cat', 'plane', 'remote'])
self.assertEqual(nested_simplify(output), [{'score': 0.511, 'label': 'remote'}, {'score': 0.485, 'label': 'cat'}, {'score': 0.004, 'label': 'plane'}])
output = image_classifier(([image] * 5), candidate_labels=['cat', 'plane', 'remote'], batch_size=2)
self.assertEqual(nested_simplify(output), ([[{'score': 0.511, 'label': 'remote'}, {'score': 0.485, 'label': 'cat'}, {'score': 0.004, 'label': 'plane'}]] * 5))
_tf
def test_large_model_tf(self):
image_classifier = pipeline(task='zero-shot-image-classification', model='openai/clip-vit-base-patch32', framework='tf')
image = Image.open('./tests/fixtures/tests_samples/COCO/.png')
output = image_classifier(image, candidate_labels=['cat', 'plane', 'remote'])
self.assertEqual(nested_simplify(output), [{'score': 0.511, 'label': 'remote'}, {'score': 0.485, 'label': 'cat'}, {'score': 0.004, 'label': 'plane'}])
output = image_classifier(([image] * 5), candidate_labels=['cat', 'plane', 'remote'], batch_size=2)
self.assertEqual(nested_simplify(output), ([[{'score': 0.511, 'label': 'remote'}, {'score': 0.485, 'label': 'cat'}, {'score': 0.004, 'label': 'plane'}]] * 5)) |
class TestAssertionWarnings():
def assert_result_warns(result, msg) -> None:
result.stdout.fnmatch_lines([('*PytestAssertRewriteWarning: %s*' % msg)])
def test_tuple_warning(self, pytester: Pytester) -> None:
pytester.makepyfile(' def test_foo():\n assert (1,2)\n ')
result = pytester.runpytest()
self.assert_result_warns(result, 'assertion is always true, perhaps remove parentheses?') |
def test_search_for_directory_setup_read_setup_with_extras(provider: Provider, mocker: MockerFixture, fixture_dir: FixtureDirGetter) -> None:
mocker.patch('poetry.utils.env.EnvManager.get', return_value=MockEnv())
dependency = DirectoryDependency('demo', (((fixture_dir('git') / 'github.com') / 'demo') / 'demo'), extras=['foo'])
package = provider.search_for_direct_origin_dependency(dependency)
assert (package.name == 'demo')
assert (package.version.text == '0.1.2')
required = [r for r in package.requires if (not r.is_optional())]
optional = [r for r in package.requires if r.is_optional()]
assert (required == [get_dependency('pendulum', '>=1.4.4')])
assert (optional == [get_dependency('tomlkit'), get_dependency('cleo')])
assert (package.extras == {'foo': [get_dependency('cleo')], 'bar': [get_dependency('tomlkit')]}) |
class DescribeImagePart():
def it_is_used_by_PartFactory_to_construct_image_part(self, image_part_load_, partname_, blob_, package_, image_part_):
content_type = CT.JPEG
reltype = RT.IMAGE
image_part_load_.return_value = image_part_
part = PartFactory(partname_, content_type, reltype, blob_, package_)
image_part_load_.assert_called_once_with(partname_, content_type, blob_, package_)
assert (part is image_part_)
def it_can_construct_from_an_Image_instance(self, image_, partname_, _init_):
image_part = ImagePart.from_image(image_, partname_)
_init_.assert_called_once_with(ANY, partname_, image_.content_type, image_.blob, image_)
assert isinstance(image_part, ImagePart)
def it_knows_its_default_dimensions_in_EMU(self, dimensions_fixture):
(image_part, cx, cy) = dimensions_fixture
assert (image_part.default_cx == cx)
assert (image_part.default_cy == cy)
def it_knows_its_filename(self, filename_fixture):
(image_part, expected_filename) = filename_fixture
assert (image_part.filename == expected_filename)
def it_knows_the_sha1_of_its_image(self):
blob = b'fO0Bar'
image_part = ImagePart(None, None, blob)
assert (image_part.sha1 == '4921e7002ddfba690a937d54bda226a7b8bdeb68')
def blob_(self, request):
return instance_mock(request, str)
(params=['loaded', 'new'])
def dimensions_fixture(self, request):
image_file_path = test_file('monty-truth.png')
image = Image.from_file(image_file_path)
(expected_cx, expected_cy) = (1905000, 2717800)
if (request.param == 'loaded'):
partname = PackURI('/word/media/image1.png')
content_type = CT.PNG
image_part = ImagePart.load(partname, content_type, image.blob, None)
elif (request.param == 'new'):
image_part = ImagePart.from_image(image, None)
return (image_part, expected_cx, expected_cy)
(params=['loaded', 'new'])
def filename_fixture(self, request, image_):
partname = PackURI('/word/media/image666.png')
if (request.param == 'loaded'):
image_part = ImagePart(partname, None, None, None)
expected_filename = 'image.png'
elif (request.param == 'new'):
image_.filename = 'foobar.PXG'
image_part = ImagePart(partname, None, None, image_)
expected_filename = image_.filename
return (image_part, expected_filename)
def image_(self, request):
return instance_mock(request, Image)
def _init_(self, request):
return initializer_mock(request, ImagePart)
def image_part_(self, request):
return instance_mock(request, ImagePart)
def image_part_load_(self, request):
return method_mock(request, ImagePart, 'load', autospec=False)
def package_(self, request):
return instance_mock(request, Package)
def partname_(self, request):
return instance_mock(request, PackURI) |
class Migration(migrations.Migration):
dependencies = [('comms', '0008_auto__0902')]
operations = [migrations.AlterField(model_name='msg', name='db_hide_from_channels', field=models.ManyToManyField(blank=True, null=True, related_name='hide_from_channels_set', to='comms.ChannelDB')), migrations.AlterField(model_name='msg', name='db_hide_from_objects', field=models.ManyToManyField(blank=True, null=True, related_name='hide_from_objects_set', to='objects.ObjectDB')), migrations.AlterField(model_name='msg', name='db_hide_from_accounts', field=models.ManyToManyField(blank=True, null=True, related_name='hide_from_accounts_set', to=settings.AUTH_USER_MODEL)), migrations.AlterField(model_name='msg', name='db_receivers_channels', field=models.ManyToManyField(blank=True, help_text='channel recievers', null=True, related_name='channel_set', to='comms.ChannelDB')), migrations.AlterField(model_name='msg', name='db_receivers_objects', field=models.ManyToManyField(blank=True, help_text='object receivers', null=True, related_name='receiver_object_set', to='objects.ObjectDB')), migrations.AlterField(model_name='msg', name='db_receivers_accounts', field=models.ManyToManyField(blank=True, help_text='account receivers', null=True, related_name='receiver_account_set', to=settings.AUTH_USER_MODEL)), migrations.AlterField(model_name='msg', name='db_sender_external', field=models.CharField(blank=True, db_index=True, help_text="identifier for external sender, for example a sender over an IRC connection (i.e. someone who doesn't have an exixtence in-game).", max_length=255, null=True, verbose_name='external sender')), migrations.AlterField(model_name='msg', name='db_sender_objects', field=models.ManyToManyField(blank=True, db_index=True, null=True, related_name='sender_object_set', to='objects.ObjectDB', verbose_name='sender(object)')), migrations.AlterField(model_name='msg', name='db_sender_accounts', field=models.ManyToManyField(blank=True, db_index=True, null=True, related_name='sender_account_set', to=settings.AUTH_USER_MODEL, verbose_name='sender(account)')), migrations.AlterField(model_name='msg', name='db_tags', field=models.ManyToManyField(blank=True, help_text='tags on this message. Tags are simple string markers to identify, group and alias messages.', null=True, to='typeclasses.Tag'))] |
def test_param_name_duplicates():
with pytest.raises(ValueError, match=full_match_regex_str("Parameter names {'a'} are duplicated")):
InputShape(constructor=stub_constructor, kwargs=None, fields=(InputField(id='a1', type=int, default=NoDefault(), is_required=True, metadata={}, original=None), InputField(id='a2', type=int, default=NoDefault(), is_required=True, metadata={}, original=None)), params=(Param(field_id='a1', name='a', kind=ParamKind.POS_OR_KW), Param(field_id='a2', name='a', kind=ParamKind.POS_OR_KW)), overriden_types=frozenset({'a1', 'a2'})) |
def logged_in_admin_user(e2e_tests_django_db_setup, page: Page) -> Page:
page.goto('/account/login')
page.get_by_label('Username').fill('admin', timeout=5000)
page.get_by_label('Password').fill('admin')
page.get_by_role('button', name='Login').click()
page.goto('/management')
(yield page) |
class TestArtifactVersion(unittest.TestCase, ArchiveTestingMixin):
def setUp(self):
prefix = 'qiime2-test-temp-'
self.temp_dir = tempfile.TemporaryDirectory(prefix=prefix)
self.provenance_capture = archive.ImportProvenanceCapture()
def tearDown(self):
self.temp_dir.cleanup()
def test_nonexistent_archive_format(self):
with self.assertRaisesRegex(ValueError, 'Version foo not supported'):
with artifact_version('foo'):
pass
def test_write_v0_archive(self):
fp = os.path.join(self.temp_dir.name, 'artifact_v0.qza')
with artifact_version(0):
artifact = Artifact._from_view(FourInts, [(- 1), 42, 0, 43], list, self.provenance_capture)
artifact.save(fp)
root_dir = str(artifact.uuid)
expected = {'VERSION', 'metadata.yaml', 'data/file1.txt', 'data/file2.txt', 'data/nested/file3.txt', 'data/nested/file4.txt'}
self.assertArchiveMembers(fp, root_dir, expected)
with zipfile.ZipFile(fp, mode='r') as zf:
version = zf.read(os.path.join(root_dir, 'VERSION'))
self.assertRegex(str(version), '^.*archive: 0.*$')
def test_write_v4_archive(self):
fp = os.path.join(self.temp_dir.name, 'artifact_v4.qza')
with artifact_version(4):
artifact = Artifact._from_view(FourInts, [(- 1), 42, 0, 43], list, self.provenance_capture)
artifact.save(fp)
root_dir = str(artifact.uuid)
expected = {'VERSION', 'metadata.yaml', 'data/file1.txt', 'data/file2.txt', 'data/nested/file3.txt', 'data/nested/file4.txt', 'provenance/metadata.yaml', 'provenance/VERSION', 'provenance/citations.bib', 'provenance/action/action.yaml'}
self.assertArchiveMembers(fp, root_dir, expected)
with zipfile.ZipFile(fp, mode='r') as zf:
version = zf.read(os.path.join(root_dir, 'VERSION'))
self.assertRegex(str(version), '^.*archive: 4.*$') |
def main(id: int, skip_crawling: bool, with_quote: bool):
parsed_json = Crawler.crawl(id)
cache_dir = os.sep.join([os.curdir, 'cache', str(id), 'data.json'])
with open(cache_dir, 'r', encoding='utf-8') as file:
radio = Radio.load_from_json(parsed_json)
if (not skip_crawling):
Crawler.download_assets(radio, ((os.curdir + os.sep) + 'cache'), with_quote)
Video.create_video(radio) |
(epilog=rgroup2smarts_epilog)
('--cut-rgroup', metavar='SMILES', multiple=True, help='R-group SMILES to use')
('--single', '-s', default=False, is_flag=True, help='Generate a SMARTS for each R-group SMILES (default: generate a single recursive SMARTS)')
('--check', '-c', default=False, is_flag=True, help='Check that the SMARTS strings are valid (default: assume they are valid)')
('--explain', is_flag=True, default=False, help='Write conversion and check details to stderr')
('rgroup_filename', metavar='FILENAME', required=False)
_obj
def rgroup2smarts(reporter, check, explain, cut_rgroup, rgroup_filename, single):
from rdkit import Chem
from .. import rgroup2smarts as _rgroup2smarts
reporter.set_explain(explain)
explain = reporter.explain
close = None
if cut_rgroup:
if (rgroup_filename is not None):
die('Cannot specify both an R-group filename and a --cut-rgroup')
location = _rgroup2smarts.ListLocation('--cut-rgroup SMILES')
location.save(recno=1)
explain('Using --cut-rgroup SMILES from the command-line')
record_reader = _rgroup2smarts.iter_smiles_list(cut_rgroup, location)
elif (rgroup_filename is not None):
explain(f'Reading R-group SMILES from {rgroup_filename!r}')
location = _rgroup2smarts.FileLocation(rgroup_filename)
try:
f = open(rgroup_filename)
except OSError as err:
die(f'Cannot open input file: {err}')
close = f.close
record_reader = _rgroup2smarts.parse_rgroup_file(f, location)
else:
explain('Reading R-group SMILES from <stdin>')
location = _rgroup2smarts.FileLocation('<stdin>')
record_reader = _rgroup2smarts.parse_rgroup_file(sys.stdin, location)
if check:
all_mols = []
else:
all_mols = None
outfile = sys.stdout
iter_smarts = _rgroup2smarts.iter_smiles_as_smarts(record_reader, location, explain, all_mols)
all_smarts = None
try:
if single:
for smarts in iter_smarts:
outfile.write((smarts + '\n'))
else:
all_smarts = []
for smarts in iter_smarts:
assert smarts.startswith('*-!'), (smarts, location)
all_smarts.append(smarts)
if (not all_smarts):
die(f'Cannot make a SMARTS: no SMILES strings found in {location.filename!r}')
except _rgroup2smarts.ParseError as err:
die(f'Cannot parse input file: {err}')
except _rgroup2smarts.ConversionError as err:
die(str(err))
finally:
if (close is not None):
close()
if (not single):
smarts = _rgroup2smarts.make_recursive_smarts(all_smarts)
try:
if check:
explain('Checking that the SMARTS matches all of the input molecules')
all_pat = Chem.MolFromSmarts(smarts)
if (all_pat is None):
die(f'Cannot process final SMARTS: {smarts!r}')
for (i, (mol, where, smiles)) in enumerate(all_mols):
if (not mol.HasSubstructMatch(all_pat)):
die(f'final SMARTS does not match SMILES from {where} ({smiles!r})')
explain(f'checked #{i}')
finally:
outfile.write((smarts + '\n'))
outfile.flush() |
def test_get_pipeline_path_raises_no_parent():
with pytest.raises(PipelineNotFoundError) as err:
fileloader.get_pipeline_path('unlikelypipeherexyz', None)
cwd_pipes_path = cwd.joinpath('pipelines')
expected_msg = f'''unlikelypipeherexyz.yaml not found in any of the following:
{cwd}
{cwd_pipes_path}
{pypyr_path}'''
assert (str(err.value) == expected_msg) |
def _create_s3_backend(session: Session, bucket: str, table: str, region_name: str) -> None:
account_id = get_account_id(session)
bucket_arn = _get_s3_bucket_arn(region_name, account_id, bucket)
table_arn = _get_dynamodb_table_arn(region_name, account_id, table)
log.ok(f'backend: {bucket_arn}')
log.ok(f'backend: {table_arn}')
if (not log.accept('backend: create backend resources')):
log.bad('backend: not created')
raise SystemExit(1)
if (bucket == table):
stack_name = bucket
else:
stack_name = f'{bucket}-{table}'
stack_arn = _get_cloudformation_stack_arn(region_name, account_id, stack_name)
log.ok(f'backend: creating {stack_arn}')
cloudformation_client = session.client('cloudformation', region_name=region_name)
cloudformation_client.create_stack(StackName=stack_name, ResourceTypes=['AWS::DynamoDB::Table', 'AWS::S3::Bucket'], TemplateBody=json.dumps({'Resources': {'Table': {'Type': 'AWS::DynamoDB::Table', 'Properties': {'TableName': table, 'AttributeDefinitions': [{'AttributeName': 'LockID', 'AttributeType': 'S'}], 'KeySchema': [{'AttributeName': 'LockID', 'KeyType': 'HASH'}], 'BillingMode': 'PAY_PER_REQUEST'}}, 'Bucket': {'Type': 'AWS::S3::Bucket', 'Properties': {'AccessControl': 'Private', 'BucketName': bucket, 'VersioningConfiguration': {'Status': 'Enabled'}}}}}))
log.ok('backend: please wait...')
while True:
sleep(10)
response = cloudformation_client.describe_stacks(StackName=stack_name)
for stack in response['Stacks']:
if (stack['StackStatus'] == 'CREATE_IN_PROGRESS'):
pass
elif (stack['StackStatus'] == 'CREATE_COMPLETE'):
log.ok('backend: create complete')
return
else:
log.bad(f"backend: {stack['StackStatus']}")
log.bad(f"backend: {stack['StackStatusReason']}") |
class MSVCCompiler(CCompiler):
compiler_type = 'msvc'
executables = {}
_c_extensions = ['.c']
_cpp_extensions = ['.cc', '.cpp', '.cxx']
_rc_extensions = ['.rc']
_mc_extensions = ['.mc']
src_extensions = (((_c_extensions + _cpp_extensions) + _rc_extensions) + _mc_extensions)
res_extension = '.res'
obj_extension = '.obj'
static_lib_extension = '.lib'
shared_lib_extension = '.dll'
static_lib_format = shared_lib_format = '%s%s'
exe_extension = '.exe'
def __init__(self, verbose=0, dry_run=0, force=0):
super().__init__(verbose, dry_run, force)
self.__version = get_build_version()
self.__arch = get_build_architecture()
if (self.__arch == 'Intel'):
if (self.__version >= 7):
self.__root = 'Software\\Microsoft\\VisualStudio'
self.__macros = MacroExpander(self.__version)
else:
self.__root = 'Software\\Microsoft\\Devstudio'
self.__product = ('Visual Studio version %s' % self.__version)
else:
self.__product = ('Microsoft SDK compiler %s' % (self.__version + 6))
self.initialized = False
def initialize(self):
self.__paths = []
if (('DISTUTILS_USE_SDK' in os.environ) and ('MSSdk' in os.environ) and self.find_exe('cl.exe')):
self.cc = 'cl.exe'
self.linker = 'link.exe'
self.lib = 'lib.exe'
self.rc = 'rc.exe'
self.mc = 'mc.exe'
else:
self.__paths = self.get_msvc_paths('path')
if (len(self.__paths) == 0):
raise DistutilsPlatformError(("Python was built with %s, and extensions need to be built with the same version of the compiler, but it isn't installed." % self.__product))
self.cc = self.find_exe('cl.exe')
self.linker = self.find_exe('link.exe')
self.lib = self.find_exe('lib.exe')
self.rc = self.find_exe('rc.exe')
self.mc = self.find_exe('mc.exe')
self.set_path_env_var('lib')
self.set_path_env_var('include')
try:
for p in os.environ['path'].split(';'):
self.__paths.append(p)
except KeyError:
pass
self.__paths = normalize_and_reduce_paths(self.__paths)
os.environ['path'] = ';'.join(self.__paths)
self.preprocess_options = None
if (self.__arch == 'Intel'):
self.compile_options = ['/nologo', '/O2', '/MD', '/W3', '/GX', '/DNDEBUG']
self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3', '/GX', '/Z7', '/D_DEBUG']
else:
self.compile_options = ['/nologo', '/O2', '/MD', '/W3', '/GS-', '/DNDEBUG']
self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3', '/GS-', '/Z7', '/D_DEBUG']
self.ldflags_shared = ['/DLL', '/nologo', '/INCREMENTAL:NO']
if (self.__version >= 7):
self.ldflags_shared_debug = ['/DLL', '/nologo', '/INCREMENTAL:no', '/DEBUG']
else:
self.ldflags_shared_debug = ['/DLL', '/nologo', '/INCREMENTAL:no', '/pdb:None', '/DEBUG']
self.ldflags_static = ['/nologo']
self.initialized = True
def object_filenames(self, source_filenames, strip_dir=0, output_dir=''):
if (output_dir is None):
output_dir = ''
obj_names = []
for src_name in source_filenames:
(base, ext) = os.path.splitext(src_name)
base = os.path.splitdrive(base)[1]
base = base[os.path.isabs(base):]
if (ext not in self.src_extensions):
raise CompileError(("Don't know how to compile %s" % src_name))
if strip_dir:
base = os.path.basename(base)
if (ext in self._rc_extensions):
obj_names.append(os.path.join(output_dir, (base + self.res_extension)))
elif (ext in self._mc_extensions):
obj_names.append(os.path.join(output_dir, (base + self.res_extension)))
else:
obj_names.append(os.path.join(output_dir, (base + self.obj_extension)))
return obj_names
def compile(self, sources, output_dir=None, macros=None, include_dirs=None, debug=0, extra_preargs=None, extra_postargs=None, depends=None):
if (not self.initialized):
self.initialize()
compile_info = self._setup_compile(output_dir, macros, include_dirs, sources, depends, extra_postargs)
(macros, objects, extra_postargs, pp_opts, build) = compile_info
compile_opts = (extra_preargs or [])
compile_opts.append('/c')
if debug:
compile_opts.extend(self.compile_options_debug)
else:
compile_opts.extend(self.compile_options)
for obj in objects:
try:
(src, ext) = build[obj]
except KeyError:
continue
if debug:
src = os.path.abspath(src)
if (ext in self._c_extensions):
input_opt = ('/Tc' + src)
elif (ext in self._cpp_extensions):
input_opt = ('/Tp' + src)
elif (ext in self._rc_extensions):
input_opt = src
output_opt = ('/fo' + obj)
try:
self.spawn(((([self.rc] + pp_opts) + [output_opt]) + [input_opt]))
except DistutilsExecError as msg:
raise CompileError(msg)
continue
elif (ext in self._mc_extensions):
h_dir = os.path.dirname(src)
rc_dir = os.path.dirname(obj)
try:
self.spawn((([self.mc] + ['-h', h_dir, '-r', rc_dir]) + [src]))
(base, _) = os.path.splitext(os.path.basename(src))
rc_file = os.path.join(rc_dir, (base + '.rc'))
self.spawn((([self.rc] + [('/fo' + obj)]) + [rc_file]))
except DistutilsExecError as msg:
raise CompileError(msg)
continue
else:
raise CompileError("Don't know how to compile {} to {}".format(src, obj))
output_opt = ('/Fo' + obj)
try:
self.spawn((((([self.cc] + compile_opts) + pp_opts) + [input_opt, output_opt]) + extra_postargs))
except DistutilsExecError as msg:
raise CompileError(msg)
return objects
def create_static_lib(self, objects, output_libname, output_dir=None, debug=0, target_lang=None):
if (not self.initialized):
self.initialize()
(objects, output_dir) = self._fix_object_args(objects, output_dir)
output_filename = self.library_filename(output_libname, output_dir=output_dir)
if self._need_link(objects, output_filename):
lib_args = (objects + [('/OUT:' + output_filename)])
if debug:
pass
try:
self.spawn(([self.lib] + lib_args))
except DistutilsExecError as msg:
raise LibError(msg)
else:
log.debug('skipping %s (up-to-date)', output_filename)
def link(self, target_desc, objects, output_filename, output_dir=None, libraries=None, library_dirs=None, runtime_library_dirs=None, export_symbols=None, debug=0, extra_preargs=None, extra_postargs=None, build_temp=None, target_lang=None):
if (not self.initialized):
self.initialize()
(objects, output_dir) = self._fix_object_args(objects, output_dir)
fixed_args = self._fix_lib_args(libraries, library_dirs, runtime_library_dirs)
(libraries, library_dirs, runtime_library_dirs) = fixed_args
if runtime_library_dirs:
self.warn(("I don't know what to do with 'runtime_library_dirs': " + str(runtime_library_dirs)))
lib_opts = gen_lib_options(self, library_dirs, runtime_library_dirs, libraries)
if (output_dir is not None):
output_filename = os.path.join(output_dir, output_filename)
if self._need_link(objects, output_filename):
if (target_desc == CCompiler.EXECUTABLE):
if debug:
ldflags = self.ldflags_shared_debug[1:]
else:
ldflags = self.ldflags_shared[1:]
elif debug:
ldflags = self.ldflags_shared_debug
else:
ldflags = self.ldflags_shared
export_opts = []
for sym in (export_symbols or []):
export_opts.append(('/EXPORT:' + sym))
ld_args = ((((ldflags + lib_opts) + export_opts) + objects) + [('/OUT:' + output_filename)])
if (export_symbols is not None):
(dll_name, dll_ext) = os.path.splitext(os.path.basename(output_filename))
implib_file = os.path.join(os.path.dirname(objects[0]), self.library_filename(dll_name))
ld_args.append(('/IMPLIB:' + implib_file))
if extra_preargs:
ld_args[:0] = extra_preargs
if extra_postargs:
ld_args.extend(extra_postargs)
self.mkpath(os.path.dirname(output_filename))
try:
self.spawn(([self.linker] + ld_args))
except DistutilsExecError as msg:
raise LinkError(msg)
else:
log.debug('skipping %s (up-to-date)', output_filename)
def library_dir_option(self, dir):
return ('/LIBPATH:' + dir)
def runtime_library_dir_option(self, dir):
raise DistutilsPlatformError("don't know how to set runtime library search path for MSVC++")
def library_option(self, lib):
return self.library_filename(lib)
def find_library_file(self, dirs, lib, debug=0):
if debug:
try_names = [(lib + '_d'), lib]
else:
try_names = [lib]
for dir in dirs:
for name in try_names:
libfile = os.path.join(dir, self.library_filename(name))
if os.path.exists(libfile):
return libfile
else:
return None
def find_exe(self, exe):
for p in self.__paths:
fn = os.path.join(os.path.abspath(p), exe)
if os.path.isfile(fn):
return fn
for p in os.environ['Path'].split(';'):
fn = os.path.join(os.path.abspath(p), exe)
if os.path.isfile(fn):
return fn
return exe
def get_msvc_paths(self, path, platform='x86'):
if (not _can_read_reg):
return []
path = (path + ' dirs')
if (self.__version >= 7):
key = '{}\\{:0.1f}\\VC\\VC_OBJECTS_PLATFORM_INFO\\Win32\\Directories'.format(self.__root, self.__version)
else:
key = ('%s\\6.0\\Build System\\Components\\Platforms\\Win32 (%s)\\Directories' % (self.__root, platform))
for base in HKEYS:
d = read_values(base, key)
if d:
if (self.__version >= 7):
return self.__macros.sub(d[path]).split(';')
else:
return d[path].split(';')
if (self.__version == 6):
for base in HKEYS:
if (read_values(base, ('%s\\6.0' % self.__root)) is not None):
self.warn('It seems you have Visual Studio 6 installed, but the expected registry settings are not present.\nYou must at least run the Visual Studio GUI once so that these entries are created.')
break
return []
def set_path_env_var(self, name):
if (name == 'lib'):
p = self.get_msvc_paths('library')
else:
p = self.get_msvc_paths(name)
if p:
os.environ[name] = ';'.join(p) |
def test_correctness_vertex_set_contiguity_distinct():
data = geopandas.GeoSeries((shapely.box(0, 0, 1, 1), shapely.box(0.5, 1, 1.5, 2)))
vs_rook = _vertex_set_intersection(data, rook=True)
rook = _rook(data)
assert (set(zip(*vs_rook, strict=True)) != set(zip(*rook, strict=True)))
vs_queen = _vertex_set_intersection(data, rook=False)
queen = _queen(data)
assert (set(zip(*vs_queen, strict=True)) != set(zip(*queen, strict=True))) |
def get_interp_fun(variable_name, domain):
variable = comsol_variables[variable_name]
if (domain == ['negative electrode']):
comsol_x = comsol_variables['x_n']
elif (domain == ['positive electrode']):
comsol_x = comsol_variables['x_p']
elif (domain == whole_cell):
comsol_x = comsol_variables['x']
pybamm_x = mesh[domain].nodes
variable = interp.interp1d(comsol_x, variable, axis=0)(pybamm_x)
fun = pybamm.Interpolant(comsol_t, variable.T, pybamm.t)
fun.domains = {'primary': domain}
fun.mesh = mesh[domain]
fun.secondary_mesh = None
return fun |
(device=True)
def imt_func_o25(value, other_value):
return ((((((0 + ((1.0 * value[2]) * other_value[30])) + ((1.0 * value[1]) * other_value[29])) + (((- 1.0) * value[30]) * other_value[2])) + (((- 1.0) * value[6]) * other_value[31])) + (((- 1.0) * value[29]) * other_value[1])) + (((- 1.0) * value[31]) * other_value[6])) |
def load_data_for_worker(base_samples, batch_size, class_cond):
with bf.BlobFile(base_samples, 'rb') as f:
obj = np.load(f)
image_arr = obj['arr_0']
if class_cond:
label_arr = obj['arr_1']
rank = dist.get_rank()
num_ranks = dist.get_world_size()
buffer = []
label_buffer = []
while True:
for i in range(rank, len(image_arr), num_ranks):
buffer.append(image_arr[i])
if class_cond:
label_buffer.append(label_arr[i])
if (len(buffer) == batch_size):
batch = th.from_numpy(np.stack(buffer)).float()
batch = ((batch / 127.5) - 1.0)
batch = batch.permute(0, 3, 1, 2)
res = dict(low_res=batch)
if class_cond:
res['y'] = th.from_numpy(np.stack(label_buffer))
(yield res)
(buffer, label_buffer) = ([], []) |
_funcify.register(Solve)
def numba_funcify_Solve(op, node, **kwargs):
assume_a = op.assume_a
if (assume_a != 'gen'):
lower = op.lower
warnings.warn('Numba will use object mode to allow the `compute_uv` argument to `numpy.linalg.svd`.', UserWarning)
ret_sig = get_numba_type(node.outputs[0].type)
_njit
def solve(a, b):
with numba.objmode(ret=ret_sig):
ret = scipy.linalg.solve_triangular(a, b, lower=lower)
return ret
else:
out_dtype = node.outputs[0].type.numpy_dtype
inputs_cast = int_to_float_fn(node.inputs, out_dtype)
_njit
def solve(a, b):
return np.linalg.solve(inputs_cast(a), inputs_cast(b)).astype(out_dtype)
return solve |
('/v1/organization/<orgname>/quota/<quota_id>/limit/<limit_id>')
_if(features.SUPER_USERS)
_if(features.QUOTA_MANAGEMENT)
class OrganizationQuotaLimit(ApiResource):
schemas = {'UpdateOrgQuotaLimit': {'type': 'object', 'description': 'Description of changing organization quota limit', 'properties': {'type': {'type': 'string', 'description': 'Type of quota limit: "Warning" or "Reject"'}, 'threshold_percent': {'type': 'integer', 'description': 'Quota threshold, in percent of quota'}}}}
('getOrganizationQuotaLimit')
def get(self, orgname, quota_id, limit_id):
orgperm = OrganizationMemberPermission(orgname)
if (not orgperm.can()):
raise Unauthorized()
quota = get_quota(orgname, quota_id)
quota_limit = model.namespacequota.get_namespace_quota_limit(quota, limit_id)
if (quota_limit is None):
raise NotFound()
return limit_view(quota_limit)
('changeOrganizationQuotaLimit')
_json_request('UpdateOrgQuotaLimit')
_scope(scopes.SUPERUSER)
def put(self, orgname, quota_id, limit_id):
if (not SuperUserPermission().can()):
raise Unauthorized()
quota_limit_data = request.get_json()
quota = get_quota(orgname, quota_id)
quota_limit = model.namespacequota.get_namespace_quota_limit(quota, limit_id)
if (quota_limit is None):
raise NotFound()
if ('type' in quota_limit_data):
new_type = quota_limit_data['type']
model.namespacequota.update_namespace_quota_limit_type(quota_limit, new_type)
if ('threshold_percent' in quota_limit_data):
new_threshold = quota_limit_data['threshold_percent']
model.namespacequota.update_namespace_quota_limit_threshold(quota_limit, new_threshold)
return quota_view(quota)
('deleteOrganizationQuotaLimit')
_scope(scopes.SUPERUSER)
def delete(self, orgname, quota_id, limit_id):
if (not SuperUserPermission().can()):
raise Unauthorized()
quota = get_quota(orgname, quota_id)
quota_limit = model.namespacequota.get_namespace_quota_limit(quota, limit_id)
if (quota_limit is None):
raise NotFound()
try:
model.namespacequota.delete_namespace_quota_limit(quota_limit)
return ('', 204)
except model.DataModelException as ex:
raise request_error(exception=ex) |
class GroupElasticNet(ElasticNetConfig):
def __init__(self, groups=None, pen_val=1, mix_val=0.5, lasso_weights=None, lasso_flavor=None, ridge_weights=None):
pass
def _get_sum_configs(self):
lasso_config = GroupLasso(groups=self.groups, pen_val=(self.pen_val * self.mix_val), weights=self.lasso_weights, flavor=self.lasso_flavor)
ridge_config = Ridge(pen_val=(self.pen_val * (1 - self.mix_val)), weights=self.ridge_weights)
return (lasso_config, ridge_config)
def get_sum_names(self):
return ['lasso', 'ridge'] |
def obtain_confused_result(out: defaultdict, threshole=0.6, confuse_value=0.15, num_template=2):
hard_answer = dict()
for (k, v) in out.items():
if ('ner' in k.lower()):
continue
is_filter = False
(best_result, best_prob) = (v.most_common()[0][0], v.most_common()[0][1])
(second_result, second_prob) = (v.most_common()[1][0], v.most_common()[1][1])
if (best_prob <= ((threshole * len(predict_result_files)) * num_template)):
is_filter = True
if (abs((best_prob - second_prob)) < ((confuse_value * len(predict_result_files)) * num_template)):
is_filter = True
if is_filter:
num = len(v.most_common())
hard_answer[k] = {v.most_common()[i][0]: v.most_common()[i][1] for i in range(num)}
print('hard answer num: {}'.format(len(hard_answer.keys())))
return hard_answer |
class CheckInService():
_EARLY_CHECK_IN_OFFSET: int = 3
_LATE_CHECK_IN_OFFSET: int = 6
def _is_valid_date(reservation: Reservation) -> bool:
return ((reservation.date_in - timedelta(hours=CheckInService._EARLY_CHECK_IN_OFFSET)) <= datetime.utcnow() <= (reservation.date_out - timedelta(hours=CheckInService._LATE_CHECK_IN_OFFSET)))
def _is_valid_guest(reservation: Reservation, mobile: mobile_type) -> bool:
return (reservation.guest.mobile == mobile)
def check_in(self, reservation: Reservation, mobile: str) -> None:
if (not self._is_valid_date(reservation=reservation)):
raise CheckInDateException
if (not self._is_valid_guest(reservation=reservation, mobile=mobile)):
raise CheckInAuthenticationException
reservation.check_in() |
class InteriorSquirmer(DynSys):
def _rhs_static(r, th, t, a, g, n):
nvals = np.arange(1, (n + 1))
(sinvals, cosvals) = (np.sin((th * nvals)), np.cos((th * nvals)))
rnvals = (r ** nvals)
vrn = ((g * cosvals) + (a * sinvals))
vrn *= (((nvals * rnvals) * ((r ** 2) - 1)) / r)
vth = ((2 * r) + ((((r ** 2) - 1) * nvals) / r))
vth *= ((a * cosvals) - (g * sinvals))
vth *= rnvals
return (np.sum(vrn), (np.sum(vth) / r))
def _jac_static(r, th, t, a, g, n):
nvals = np.arange(1, (n + 1))
(sinvals, cosvals) = (np.sin((th * nvals)), np.cos((th * nvals)))
rnvals = (r ** nvals)
trigsum = ((a * sinvals) + (g * cosvals))
trigskew = ((a * cosvals) - (g * sinvals))
j11 = np.copy(trigsum)
j11 *= ((nvals * rnvals) * ((2 * (r ** 2)) + (((r ** 2) - 1) * (nvals - 1))))
j11 = ((1 / (r ** 2)) * np.sum(j11))
j12 = np.copy(trigskew)
j12 *= ((((- (nvals ** 2)) * rnvals) * (1 - (r ** 2))) / r)
j12 = np.sum(j12)
j21 = (((2 * rnvals) * ((2 * nvals) + 1)) * (- np.copy(trigskew)))
j21 += (((((n * (1 - (r ** 2))) * rnvals) * (nvals - 1)) / (r ** 2)) * np.copy(((g * sinvals) + (a * cosvals))))
j21 = (- np.sum(j21))
j22 = np.copy(trigsum)
j22 *= (((- nvals) * rnvals) * ((2 * r) + ((((r ** 2) - 1) * nvals) / r)))
j22 = np.sum(j22)
vth = np.copy(trigskew)
vth *= ((2 * r) + ((((r ** 2) - 1) * nvals) / r))
vth *= rnvals
vth = (np.sum(vth) / r)
j21 = ((j21 / r) - (vth / r))
j22 /= r
return np.array([[j11, j12], [j21, j22]])
def _protocol(t, tau, stiffness=20):
return (0.5 + (0.5 * np.tanh(((tau * stiffness) * np.sin((((2 * np.pi) * t) / tau))))))
def _postprocessing(self, r, th, tt):
return ((r * np.cos(th)), (r * np.sin(th)), np.sin((((2 * np.pi) * tt) / self.tau)))
def jac(self, X, t):
(r, th) = (X[0], X[1])
phase = self._protocol(t, self.tau)
return self._jac_static(r, th, t, (self.a * phase), (self.g * (1 - phase)), self.n)
def rhs(self, X, t):
(r, th, tt) = X
phase = self._protocol(tt, self.tau)
dtt = 1
(dr, dth) = self._rhs_static(r, th, t, (self.a * phase), (self.g * (1 - phase)), self.n)
return (dr, dth, dtt) |
def clean_folder(folder):
for filename in os.listdir(folder):
file_path = os.path.join(folder, filename)
try:
if (os.path.isfile(file_path) or os.path.islink(file_path)):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
print(('Failed to delete %s. Reason: %s' % (file_path, e))) |
def batchify_data(batch):
enc_input_ids = [f['enc_input'] for f in batch]
(enc_input_ids, enc_lens) = padded_sequence(enc_input_ids, batch[0]['pad'])
enc_input_ids = torch.tensor(enc_input_ids, dtype=torch.long)
enc_lens = torch.tensor(enc_lens, dtype=torch.long)
enc_batch_extend_vocab = [f['enc_input_extend'] for f in batch]
(enc_batch_extend_vocab, _) = padded_sequence(enc_batch_extend_vocab, batch[0]['pad'])
enc_batch_extend_vocab = torch.tensor(enc_batch_extend_vocab, dtype=torch.long)
art_oovs = [f['oovs'] for f in batch]
max_art_oovs = max([len(f['oovs']) for f in batch])
dec_input_ids = [f['dec_input'] for f in batch]
dec_output_ids = [f['dec_output'] for f in batch]
if config.pointer_gen:
dec_output_ids = [f['dec_output_extend'] for f in batch]
(dec_input_ids, dec_lens) = padded_sequence(dec_input_ids, batch[0]['pad'])
(dec_output_ids, dec_lens) = padded_sequence(dec_output_ids, batch[0]['pad'])
dec_input_ids = torch.tensor(dec_input_ids, dtype=torch.long)
dec_output_ids = torch.tensor(dec_output_ids, dtype=torch.long)
dec_lens = torch.tensor(dec_lens, dtype=torch.long)
summ_sent = [f['summ_sent'] for f in batch]
return ((enc_input_ids, enc_lens, enc_batch_extend_vocab, art_oovs, max_art_oovs), (dec_input_ids, dec_output_ids, dec_lens, summ_sent)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.