code stringlengths 281 23.7M |
|---|
class RHEL5_Network(FC6_Network):
removedKeywords = FC6_Network.removedKeywords
removedAttrs = FC6_Network.removedAttrs
def __init__(self, writePriority=0, *args, **kwargs):
FC6_Network.__init__(self, writePriority, *args, **kwargs)
self.bootprotoList.append(BOOTPROTO_QUERY)
def _getParser(self):
op = FC6_Network._getParser(self)
for action in op._actions:
if ('--bootproto' in action.option_strings):
action.help += dedent(("\n\n .. versionchanged:: %s\n\n The 'query' value was added." % versionToLongString(RHEL5)))
break
return op |
class InteractiveBrowser(RefBrowser):
def __init__(self, rootobject, maxdepth=3, str_func=gui_default_str_function, repeat=True):
if (tkinter is None):
raise ImportError('InteractiveBrowser requires Tkinter to be installed.')
RefBrowser.__init__(self, rootobject, maxdepth, str_func, repeat)
def main(self, standalone=False):
window = tkinter.Tk()
sc = _TreeWidget.ScrolledCanvas(window, bg='white', highlightthickness=0, takefocus=1)
sc.frame.pack(expand=1, fill='both')
item = _ReferrerTreeItem(window, self.get_tree(), self)
node = _TreeNode(sc.canvas, None, item)
node.expand()
if standalone:
window.mainloop() |
def build_prompt(cur_cls_name: str, sentence: str, event_ontology: dict, args) -> str:
if args.event_detection:
sentence = sentence.replace('"', "'")
text_prompt = 'def assert_event_trigger_words_and_type(event_text, trigger_words: List[str], event_type):\n # trigger word need to be a word in the original sentence\n for word in trigger_words:\n assert word in event_text.split()\n event = convert_text_to_event(event_text)\n assert event.trigger_words == trigger_words\n assert isinstance(event, event_type)\n'
instantiation_prompt = f'''
assert_event_trigger_words_and_type(
"{sentence}", '''
return (text_prompt, instantiation_prompt)
if args.pure_text_prompt:
if args.mark_trigger:
text_prompt = f'''Translate the following sentence into an instance of {cur_cls_name} event. The trigger word(s) of the event is marked with **trigger word**.
"{sentence}"
'''
else:
text_prompt = f'''Translate the following sentence into an instance of {cur_cls_name} event:
{sentence}
'''
instantiation_prompt = f"1. {_sort_unique_roles(event_ontology['event']['roles'])[0].lower()}: ("
return (text_prompt, instantiation_prompt)
if args.predict_event_type:
text_prompt = f'''"""
Translate the following sentences to event(s):
"{sentence}"
"""
'''
instantiation_prompt = f'''events: List[Event] = [
'''
return (text_prompt, instantiation_prompt)
if args.reduce_hallucination:
text_prompt = f'''"""
Translate the following sentence into an instance of {cur_cls_name}. Only use information that can be founded in the text as arguments. Use [] as arguments when no information about them is presented in the text.
"{sentence}"
'''
elif args.mark_trigger:
text_prompt = f'''"""
Translate the following sentence into an instance of {cur_cls_name}. The trigger word(s) of the event is marked with **trigger word**.
"{sentence}"
'''
else:
text_prompt = f'''"""
Translate the following sentence into an instance of {cur_cls_name}:
"{sentence}"
'''
if args.add_amr:
global AMR_STOG
amr_graphs = AMR_STOG.parse_sents([sentence])
assert (len(amr_graphs) == 1)
amr_graph = amr_graphs[0]
amr_str = '\n'.join(filter((lambda s: (not s.startswith('#'))), amr_graph.splitlines()))
text_prompt += f'''
Abstract Meaning Representation of the given sentence:
{amr_str}
"""
'''
else:
text_prompt += '"""\n'
instantiation_prompt = f'''{cur_cls_name.lower()}_event = {cur_cls_name}(
'''
return (text_prompt, instantiation_prompt) |
class OneSlackSSVM(BaseSSVM):
def __init__(self, model, max_iter=10000, C=1.0, check_constraints=False, verbose=0, negativity_constraint=None, n_jobs=1, break_on_bad=False, show_loss_every=0, tol=0.001, inference_cache=0, inactive_threshold=1e-05, inactive_window=50, logger=None, cache_tol='auto', switch_to=None):
BaseSSVM.__init__(self, model, max_iter, C, verbose=verbose, n_jobs=n_jobs, show_loss_every=show_loss_every, logger=logger)
self.negativity_constraint = negativity_constraint
self.check_constraints = check_constraints
self.break_on_bad = break_on_bad
self.tol = tol
self.cache_tol = cache_tol
self.inference_cache = inference_cache
self.inactive_threshold = inactive_threshold
self.inactive_window = inactive_window
self.switch_to = switch_to
def _solve_1_slack_qp(self, constraints, n_samples):
C = (np.float(self.C) * n_samples)
joint_features = [c[0] for c in constraints]
losses = [c[1] for c in constraints]
joint_feature_matrix = np.vstack(joint_features)
n_constraints = len(joint_features)
P = cvxopt.matrix(np.dot(joint_feature_matrix, joint_feature_matrix.T))
q = cvxopt.matrix((- np.array(losses, dtype=np.float)))
idy = np.identity(n_constraints)
tmp1 = np.zeros(n_constraints)
if (self.negativity_constraint is None):
zero_constr = np.zeros(0)
joint_features_constr = np.zeros((0, n_constraints))
else:
joint_features_constr = joint_feature_matrix.T[self.negativity_constraint]
zero_constr = np.zeros(len(self.negativity_constraint))
G = cvxopt.sparse(cvxopt.matrix(np.vstack(((- idy), joint_features_constr))))
h = cvxopt.matrix(np.hstack((tmp1, zero_constr)))
A = cvxopt.matrix(np.ones((1, n_constraints)))
b = cvxopt.matrix([C])
cvxopt.solvers.options['feastol'] = 1e-05
try:
solution = cvxopt.solvers.qp(P, q, G, h, A, b)
except ValueError:
solution = {'status': 'error'}
if (solution['status'] != 'optimal'):
print('regularizing QP!')
P = cvxopt.matrix((np.dot(joint_feature_matrix, joint_feature_matrix.T) + (1e-08 * np.eye(joint_feature_matrix.shape[0]))))
solution = cvxopt.solvers.qp(P, q, G, h, A, b)
if (solution['status'] != 'optimal'):
raise ValueError('QP solver failed. Try regularizing your QP.')
a = np.ravel(solution['x'])
self.old_solution = solution
self.prune_constraints(constraints, a)
sv = (a > (self.inactive_threshold * C))
if (self.verbose > 1):
print(('%d support vectors out of %d points' % (np.sum(sv), n_constraints)))
self.w = np.dot(a, joint_feature_matrix)
return (- solution['primal objective'])
def prune_constraints(self, constraints, a):
self.alphas.append([])
assert (len(self.alphas) == len(constraints))
for (constraint, alpha) in zip(self.alphas, a):
constraint.append(alpha)
constraint = constraint[(- self.inactive_window):]
if (self.inactive_window != 0):
max_active = [np.max(constr[(- self.inactive_window):]) for constr in self.alphas]
strongest = np.max(max_active[1:])
inactive = np.where((max_active < (self.inactive_threshold * strongest)))[0]
for idx in reversed(inactive):
del constraints[idx]
del self.alphas[idx]
def _check_bad_constraint(self, violation, djoint_feature_mean, loss, old_constraints, break_on_bad, tol=None):
violation_difference = (violation - self.last_slack_)
if (self.verbose > 1):
print(('New violation: %f difference to last: %f' % (violation, violation_difference)))
if ((violation_difference < 0) and (violation > 0) and break_on_bad):
raise ValueError('Bad inference: new violation is smaller than old.')
if (tol is None):
tol = self.tol
if (violation_difference < tol):
if self.verbose:
print('new constraint too weak.')
return True
equals = [True for (djoint_feature_, loss_) in old_constraints if (np.all((djoint_feature_ == djoint_feature_mean)) and (loss == loss_))]
if np.any(equals):
return True
if self.check_constraints:
for con in old_constraints:
violation_tmp = max((con[1] - np.dot(self.w, con[0])), 0)
if (self.verbose > 5):
print(('violation old constraint: %f' % violation_tmp))
if ((violation - violation_tmp) < (- 1e-05)):
if self.verbose:
print(('bad inference: %f' % (violation_tmp - violation)))
if break_on_bad:
raise ValueError('Bad inference: new violation is weaker than previous constraint.')
return True
return False
def constraint_equal(cls, y_1, y_2):
if isinstance(y_1, tuple):
(u_m_1, pw_m_1) = y_1
if isinstance(y_2, tuple):
(u_m_2, pw_m_2) = y_2
if isinstance(u_m_1, list):
return (all((np.all((_um1 == _um2)) for (_um1, _um2) in zip(u_m_1, u_m_2))) and all((np.all((_pw1 == _pw2)) for (_pw1, _pw2) in zip(pw_m_1, pw_m_2))))
else:
return (np.all((u_m_1 == u_m_2)) and np.all(pw_m_1, pw_m_2))
else:
return False
return np.all((y_1 == y_2))
def _update_cache(self, X, Y, Y_hat):
if (self.inference_cache == 0):
return
if ((not hasattr(self, 'inference_cache_')) or (self.inference_cache_ is None)):
self.inference_cache_ = [[] for y in Y_hat]
for (sample, x, y, y_hat) in zip(self.inference_cache_, X, Y, Y_hat):
already_there = [self.constraint_equal(y_hat, cache[2]) for cache in sample]
if np.any(already_there):
continue
if (len(sample) > self.inference_cache):
sample.pop(0)
sample.append((self.model.joint_feature(x, y_hat), self.model.loss(y, y_hat), y_hat))
def _constraint_from_cache(self, X, Y, joint_feature_gt, constraints):
if ((not getattr(self, 'inference_cache_', False)) or (self.inference_cache_ is False)):
if (self.verbose > 10):
print('Empty cache.')
raise NoConstraint
gap = (self.primal_objective_curve_[(- 1)] - self.objective_curve_[(- 1)])
if ((self.cache_tol == 'auto') and (gap < self.cache_tol_)):
if (self.verbose > 1):
print(('Last gap too small (%f < %f), not loading constraint from cache.' % (gap, self.cache_tol_)))
raise NoConstraint
Y_hat = []
joint_feature_acc = np.zeros(self.model.size_joint_feature)
loss_mean = 0
for cached in self.inference_cache_:
violations = [(np.dot(joint_feature, self.w) + loss) for (joint_feature, loss, _) in cached]
(joint_feature, loss, y_hat) = cached[np.argmax(violations)]
Y_hat.append(y_hat)
joint_feature_acc += joint_feature
loss_mean += loss
djoint_feature = ((joint_feature_gt - joint_feature_acc) / len(X))
loss_mean = (loss_mean / len(X))
violation = (loss_mean - np.dot(self.w, djoint_feature))
if self._check_bad_constraint(violation, djoint_feature, loss_mean, constraints, break_on_bad=False):
if (self.verbose > 1):
print('No constraint from cache.')
raise NoConstraint
return (Y_hat, djoint_feature, loss_mean)
def _find_new_constraint(self, X, Y, joint_feature_gt, constraints, check=True):
if (self.n_jobs != 1):
verbose = max(0, (self.verbose - 3))
Y_hat = Parallel(n_jobs=self.n_jobs, verbose=verbose)((delayed(loss_augmented_inference)(self.model, x, y, self.w, relaxed=True) for (x, y) in zip(X, Y)))
else:
Y_hat = self.model.batch_loss_augmented_inference(X, Y, self.w, relaxed=True)
if getattr(self.model, 'rescale_C', False):
djoint_feature = ((joint_feature_gt - self.model.batch_joint_feature(X, Y_hat, Y)) / len(X))
else:
djoint_feature = ((joint_feature_gt - self.model.batch_joint_feature(X, Y_hat)) / len(X))
loss_mean = np.mean(self.model.batch_loss(Y, Y_hat))
violation = (loss_mean - np.dot(self.w, djoint_feature))
if (check and self._check_bad_constraint(violation, djoint_feature, loss_mean, constraints, break_on_bad=self.break_on_bad)):
raise NoConstraint
return (Y_hat, djoint_feature, loss_mean)
def fit(self, X, Y, constraints=None, warm_start=False, initialize=True):
if self.verbose:
print('Training 1-slack dual structural SVM')
cvxopt.solvers.options['show_progress'] = (self.verbose > 3)
if initialize:
self.model.initialize(X, Y)
if ((self.cache_tol is None) or (self.cache_tol == 'auto')):
self.cache_tol_ = self.tol
else:
self.cache_tol_ = self.cache_tol
if (not warm_start):
self.w = np.zeros(self.model.size_joint_feature)
constraints = []
(self.objective_curve_, self.primal_objective_curve_) = ([], [])
self.cached_constraint_ = []
self.alphas = []
constraints.append((np.zeros(self.model.size_joint_feature), 0))
self.alphas.append([self.C])
self.inference_cache_ = None
self.timestamps_ = [time()]
elif (warm_start == 'soft'):
self.w = np.zeros(self.model.size_joint_feature)
constraints = []
self.alphas = []
constraints.append((np.zeros(self.model.size_joint_feature), 0))
self.alphas.append([self.C])
else:
constraints = self.constraints_
self.last_slack_ = (- 1)
if getattr(self.model, 'rescale_C', False):
joint_feature_gt = self.model.batch_joint_feature(X, Y, Y)
else:
joint_feature_gt = self.model.batch_joint_feature(X, Y)
try:
for iteration in range(self.max_iter):
cached_constraint = False
if (self.verbose > 0):
print(('iteration %d' % iteration))
if (self.verbose > 2):
print(self)
try:
(Y_hat, djoint_feature, loss_mean) = self._constraint_from_cache(X, Y, joint_feature_gt, constraints)
cached_constraint = True
except NoConstraint:
try:
(Y_hat, djoint_feature, loss_mean) = self._find_new_constraint(X, Y, joint_feature_gt, constraints)
self._update_cache(X, Y, Y_hat)
except NoConstraint:
if self.verbose:
print('no additional constraints')
if ((self.switch_to is not None) and (self.model.inference_method != self.switch_to)):
if self.verbose:
print(('Switching to %s inference' % str(self.switch_to)))
self.model.inference_method_ = self.model.inference_method
self.model.inference_method = self.switch_to
continue
else:
break
self.timestamps_.append((time() - self.timestamps_[0]))
self._compute_training_loss(X, Y, iteration)
constraints.append((djoint_feature, loss_mean))
last_slack = ((- np.dot(self.w, djoint_feature)) + loss_mean)
primal_objective = (((self.C * len(X)) * max(last_slack, 0)) + (np.sum((self.w ** 2)) / 2))
self.primal_objective_curve_.append(primal_objective)
self.cached_constraint_.append(cached_constraint)
objective = self._solve_1_slack_qp(constraints, n_samples=len(X))
if ((self.cache_tol == 'auto') and (not cached_constraint)):
self.cache_tol_ = ((primal_objective - objective) / 4)
self.last_slack_ = np.max([((- np.dot(self.w, djoint_feature)) + loss_mean) for (djoint_feature, loss_mean) in constraints])
self.last_slack_ = max(self.last_slack_, 0)
if (self.verbose > 0):
print(('cutting plane objective: %f, primal objective %f' % (objective, primal_objective)))
self.objective_curve_.append(objective)
self.constraints_ = constraints
if (self.logger is not None):
self.logger(self, iteration)
if (self.verbose > 5):
print(self.w)
except KeyboardInterrupt:
pass
if (self.verbose and (self.n_jobs == 1)):
print(('calls to inference: %d' % self.model.inference_calls))
self.timestamps_.append((time() - self.timestamps_[0]))
primal_objective = self._objective(X, Y)
self.primal_objective_curve_.append(primal_objective)
self.objective_curve_.append(objective)
self.cached_constraint_.append(False)
if (self.logger is not None):
self.logger(self, 'final')
if (self.verbose > 0):
print(('final primal objective: %f gap: %f' % (primal_objective, (primal_objective - objective))))
return self |
def validator(xmlfile):
try:
tree = ET.parse(xmlfile)
except ET.ParseError:
raise ValidationError(_('Cannot parse the style file. Please ensure your file is correct.'))
root = tree.getroot()
if ((not root) or (not (root.tag == 'qgis_style'))):
raise ValidationError(_('Invalid root tag of style file. Please ensure your file is correct.'))
symbol = root.find('./symbols/symbol')
colorramp = root.find('./colorramps/colorramp')
labelsetting = root.find('./labelsettings/labelsetting')
legendpatchshape = root.find('./legendpatchshapes/legendpatchshape')
symbol3d = root.find('./symbols3d/symbol3d')
textformat = root.find('./textformats/textformat')
if ((not symbol) and (not colorramp) and (not labelsetting) and (not legendpatchshape) and (not symbol3d) and (not textformat)):
raise ValidationError(_('Undefined style type. Please register your style type.'))
if symbol:
_check_name_type_attribute(symbol)
elif colorramp:
_check_name_type_attribute(colorramp)
elif labelsetting:
_check_name_type_attribute(labelsetting)
elif legendpatchshape:
_check_name_type_attribute(legendpatchshape)
elif symbol3d:
_check_name_type_attribute(symbol3d)
elif textformat:
_check_name_type_attribute(textformat)
xmlfile.seek(0)
return True |
class PytorchModuleHook(metaclass=ABCMeta):
def hook(self, *args, **kwargs):
def hook_type(self) -> str:
def register(self, module):
assert isinstance(module, torch.nn.Module)
if (self.hook_type == 'forward'):
h = module.register_forward_hook(self.hook)
elif (self.hook_type == 'forward_pre'):
h = module.register_forward_pre_hook(self.hook)
elif (self.hook_type == 'backward'):
h = module.register_backward_hook(self.hook)
else:
raise ValueError(f'Invalid hook type {self.hook}')
return h |
class TrainRegSet(torch.utils.data.Dataset):
def __init__(self, data_root, image_size):
super().__init__()
self.transform = transforms.Compose([transforms.Resize((image_size, image_size)), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
self.imgs = torchvision.datasets.ImageFolder(root=data_root, transform=self.transform)
def __getitem__(self, idx):
sample = {'img': self.imgs[idx][0], 'keypoints': torch.tensor(0)}
return sample
def __len__(self):
return len(self.imgs) |
class Effect8468(BaseEffect):
type = 'passive'
def handler(fit, module, context, projectionRange, **kwargs):
module.forceItemAttr('isBlackOpsJumpPortalPassenger', module.getModifiedChargeAttr('isBlackOpsJumpPortalPassenger'), **kwargs)
module.forceItemAttr('isBlackOpsJumpConduitPassenger', module.getModifiedChargeAttr('isBlackOpsJumpConduitPassenger'), **kwargs) |
class TFConvNextPreTrainedModel(TFPreTrainedModel):
config_class = ConvNextConfig
base_model_prefix = 'convnext'
main_input_name = 'pixel_values'
def dummy_inputs(self) -> Dict[(str, tf.Tensor)]:
VISION_DUMMY_INPUTS = tf.random.uniform(shape=(3, self.config.num_channels, self.config.image_size, self.config.image_size), dtype=tf.float32)
return {'pixel_values': tf.constant(VISION_DUMMY_INPUTS)}
(input_signature=[{'pixel_values': tf.TensorSpec((None, None, None, None), tf.float32, name='pixel_values')}])
def serving(self, inputs):
return self.call(inputs) |
def direct_junction_right_multi_lane_fixture():
junction_creator_direct = xodr.DirectJunctionCreator(id=400, name='second_highway_connection')
main_road = xodr.create_road(xodr.Line(200), 1, right_lanes=3, left_lanes=3)
small_road = xodr.create_road(xodr.Line(200), 2, right_lanes=2, left_lanes=0)
return (main_road, small_road, junction_creator_direct) |
class TLNK(TestCase):
def test_default(self):
frame = LNK()
self.assertEqual(frame.frameid, u'XXX')
self.assertEqual(frame.url, u'')
def test_upgrade(self):
url = '
frame = LNK(frameid='PIC', url=url, data=b'\x00')
new = LINK(frame)
self.assertEqual(new.frameid, 'APIC')
self.assertEqual(new.url, url)
self.assertEqual(new.data, b'\x00')
frame = LNK(frameid='XYZ')
new = LINK(frame)
self.assertEqual(new.frameid, 'XYZ ') |
class TestPositions(zf.WithMakeAlgo, zf.ZiplineTestCase):
START_DATE = pd.Timestamp('2006-01-03', tz='utc')
END_DATE = pd.Timestamp('2006-01-06', tz='utc')
SIM_PARAMS_CAPITAL_BASE = 1000
ASSET_FINDER_EQUITY_SIDS = (1, 133)
SIM_PARAMS_DATA_FREQUENCY = 'daily'
def make_equity_daily_bar_data(cls, country_code, sids):
frame = pd.DataFrame({'open': [90, 95, 100, 105], 'high': [90, 95, 100, 105], 'low': [90, 95, 100, 105], 'close': [90, 95, 100, 105], 'volume': 100}, index=cls.equity_daily_bar_days)
return ((sid, frame) for sid in sids)
def make_futures_info(cls):
return pd.DataFrame.from_dict({1000: {'symbol': 'CLF06', 'root_symbol': 'CL', 'start_date': cls.START_DATE, 'end_date': cls.END_DATE, 'auto_close_date': (cls.END_DATE + cls.trading_calendar.day), 'exchange': 'CMES', 'multiplier': 100}}, orient='index')
def make_future_minute_bar_data(cls):
trading_calendar = cls.trading_calendars[Future]
sids = cls.asset_finder.futures_sids
minutes = trading_calendar.minutes_for_sessions_in_range(cls.future_minute_bar_days[0], cls.future_minute_bar_days[(- 1)])
frame = pd.DataFrame({'open': 2.0, 'high': 2.0, 'low': 2.0, 'close': 2.0, 'volume': 100}, index=minutes)
return ((sid, frame) for sid in sids)
def test_portfolio_exited_position(self):
def initialize(context, sids):
context.ordered = False
context.exited = False
context.sids = sids
def handle_data(context, data):
if (not context.ordered):
for s in context.sids:
context.order(context.sid(s), 1)
context.ordered = True
if (not context.exited):
amounts = [pos.amount for pos in itervalues(context.portfolio.positions)]
if ((len(amounts) > 0) and all([(amount == 1) for amount in amounts])):
for stock in context.portfolio.positions:
context.order(context.sid(stock), (- 1))
context.exited = True
context.record(num_positions=len(context.portfolio.positions))
result = self.run_algorithm(initialize=initialize, handle_data=handle_data, sids=self.ASSET_FINDER_EQUITY_SIDS)
expected_position_count = [0, 2, 0, 0]
for (i, expected) in enumerate(expected_position_count):
self.assertEqual(result.ix[i]['num_positions'], expected)
def test_noop_orders(self):
asset = self.asset_finder.retrieve_asset(1)
def handle_data(algo, data):
algo.order(asset, 100, limit_price=1)
algo.order(asset, 100, stop_price=)
algo.order(asset, 100, limit_price=, stop_price=)
algo.order(asset, 100, limit_price=1, stop_price=1)
algo.order(asset, (- 100), limit_price=1000000)
algo.order(asset, (- 100), stop_price=1)
algo.order(asset, (- 100), limit_price=1000000, stop_price=1000000)
algo.order(asset, (- 100), limit_price=1, stop_price=1)
algo.order(asset, 100, limit_price=1e-08)
algo.order(asset, (- 100), stop_price=1e-08)
daily_stats = self.run_algorithm(handle_data=handle_data)
empty_positions = daily_stats.positions.map((lambda x: (len(x) == 0)))
self.assertTrue(empty_positions.all())
def test_position_weights(self):
sids = (1, 133, 1000)
(equity_1, equity_133, future_1000) = self.asset_finder.retrieve_all(sids)
def initialize(algo, sids_and_amounts, *args, **kwargs):
algo.ordered = False
algo.sids_and_amounts = sids_and_amounts
algo.set_commission(us_equities=PerTrade(0), us_futures=PerTrade(0))
algo.set_slippage(us_equities=FixedSlippage(0), us_futures=FixedSlippage(0))
def handle_data(algo, data):
if (not algo.ordered):
for (s, amount) in algo.sids_and_amounts:
algo.order(algo.sid(s), amount)
algo.ordered = True
algo.record(position_weights=algo.portfolio.current_portfolio_weights)
daily_stats = self.run_algorithm(sids_and_amounts=zip(sids, [2, (- 1), 1]), initialize=initialize, handle_data=handle_data)
expected_position_weights = [pd.Series({}), pd.Series({equity_1: (190.0 / ((190.0 - 95.0) + 905.0)), equity_133: ((- 95.0) / ((190.0 - 95.0) + 905.0)), future_1000: (200.0 / ((190.0 - 95.0) + 905.0))}), pd.Series({equity_1: (200.0 / ((200.0 - 100.0) + 905.0)), equity_133: ((- 100.0) / ((200.0 - 100.0) + 905.0)), future_1000: (200.0 / ((200.0 - 100.0) + 905.0))}), pd.Series({equity_1: (210.0 / ((210.0 - 105.0) + 905.0)), equity_133: ((- 105.0) / ((210.0 - 105.0) + 905.0)), future_1000: (200.0 / ((210.0 - 105.0) + 905.0))})]
for (i, expected) in enumerate(expected_position_weights):
assert_equal(daily_stats.iloc[i]['position_weights'], expected) |
def test_unique_uri_validator_serializer_create_error(db):
validator = ViewUniqueURIValidator()
serializer = ViewSerializer()
with pytest.raises(RestFameworkValidationError):
validator({'uri_prefix': settings.DEFAULT_URI_PREFIX, 'uri_path': View.objects.filter(uri_prefix=settings.DEFAULT_URI_PREFIX).last().uri_path}, serializer) |
class VcfWriter():
def __init__(self, output, header_str):
self.output = output
self.header_str = header_str
tmp = tempfile.NamedTemporaryFile(mode='w', suffix='.vcf')
self.vcf = Writer.from_string(tmp, self.header_str)
print(self.header_str, end='', file=self.output)
def write(self, variant):
v = self.vcf.variant_from_string(str(variant))
print(str(v), end='', file=self.output) |
class discriminatorLoss(nn.Module):
def __init__(self, dim_ins, loss=nn.BCEWithLogitsLoss()):
super(discriminatorLoss, self).__init__()
self.classifier = []
for dim in dim_ins:
self.classifier.append(Discriminator(dim_in=dim, dim_out=2).cuda())
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
self.loss = loss
def forward(self, features1, features2):
gan_loss = torch.tensor(0.0).cuda()
if (isinstance(features1, list) is False):
features1 = [features1]
features2 = [features2]
for i in range(len(self.classifier)):
inputs = torch.cat((features1[i], features2[i]), 0)
if (len(inputs.size()) > 2):
inputs = self.avg_pool(inputs).view(inputs.size(0), (- 1))
batch_size = inputs.size(0)
target = torch.FloatTensor(([[1, 0] for _ in range((batch_size // 2))] + [[0, 1] for _ in range((batch_size // 2))])).cuda()
outputs = self.classifier[i](inputs)
gan_loss += self.loss(outputs, target)
return gan_loss |
class biased_softplus(nn.Module):
def __init__(self, bias: float, min_val: float=0.01) -> None:
super().__init__()
self.bias = inv_softplus((bias - min_val))
self.min_val = min_val
def forward(self, x: torch.Tensor) -> torch.Tensor:
return (torch.nn.functional.softplus((x + self.bias)) + self.min_val) |
class Network():
def __init__(self, name=None, func=None, **static_kwargs):
self._init_fields()
self.name = name
self.static_kwargs = dict(static_kwargs)
(module, self._build_func_name) = import_module(func)
self._build_module_src = inspect.getsource(module)
self._build_func = find_obj_in_module(module, self._build_func_name)
self._init_graph()
self.reset_vars()
def _init_fields(self):
self.name = None
self.scope = None
self.static_kwargs = dict()
self.num_inputs = 0
self.num_outputs = 0
self.input_shapes = [[]]
self.output_shapes = [[]]
self.input_shape = []
self.output_shape = []
self.input_templates = []
self.output_templates = []
self.input_names = []
self.output_names = []
self.vars = OrderedDict()
self.trainables = OrderedDict()
self._build_func = None
self._build_func_name = None
self._build_module_src = None
self._run_cache = dict()
def _init_graph(self):
self.input_names = []
for param in inspect.signature(self._build_func).parameters.values():
if ((param.kind == param.POSITIONAL_OR_KEYWORD) and (param.default is param.empty)):
self.input_names.append(param.name)
self.num_inputs = len(self.input_names)
assert (self.num_inputs >= 1)
if (self.name is None):
self.name = self._build_func_name
self.scope = tf.get_default_graph().unique_name(self.name.replace('/', '_'), mark_as_used=False)
with tf.variable_scope(self.scope, reuse=tf.AUTO_REUSE):
assert (tf.get_variable_scope().name == self.scope)
with absolute_name_scope(self.scope):
with tf.control_dependencies(None):
self.input_templates = [tf.placeholder(tf.float32, name=name) for name in self.input_names]
out_expr = self._build_func(*self.input_templates, is_template_graph=True, **self.static_kwargs)
assert (is_tf_expression(out_expr) or isinstance(out_expr, tuple))
self.output_templates = ([out_expr] if is_tf_expression(out_expr) else list(out_expr))
self.output_names = [t.name.split('/')[(- 1)].split(':')[0] for t in self.output_templates]
self.num_outputs = len(self.output_templates)
assert (self.num_outputs >= 1)
self.input_shapes = [shape_to_list(t.shape) for t in self.input_templates]
self.output_shapes = [shape_to_list(t.shape) for t in self.output_templates]
self.input_shape = self.input_shapes[0]
self.output_shape = self.output_shapes[0]
self.vars = OrderedDict([(self.get_var_localname(var), var) for var in tf.global_variables((self.scope + '/'))])
self.trainables = OrderedDict([(self.get_var_localname(var), var) for var in tf.trainable_variables((self.scope + '/'))])
def reset_vars(self):
run([var.initializer for var in self.vars.values()])
def reset_trainables(self):
run([var.initializer for var in self.trainables.values()])
def get_output_for(self, *in_expr, return_as_list=False, **dynamic_kwargs):
assert (len(in_expr) == self.num_inputs)
all_kwargs = dict(self.static_kwargs)
all_kwargs.update(dynamic_kwargs)
with tf.variable_scope(self.scope, reuse=True):
assert (tf.get_variable_scope().name == self.scope)
named_inputs = [tf.identity(expr, name=name) for (expr, name) in zip(in_expr, self.input_names)]
out_expr = self._build_func(*named_inputs, **all_kwargs)
assert (is_tf_expression(out_expr) or isinstance(out_expr, tuple))
if return_as_list:
out_expr = ([out_expr] if is_tf_expression(out_expr) else list(out_expr))
return out_expr
def get_var_localname(self, var_or_globalname):
assert (is_tf_expression(var_or_globalname) or isinstance(var_or_globalname, str))
globalname = (var_or_globalname if isinstance(var_or_globalname, str) else var_or_globalname.name)
assert globalname.startswith((self.scope + '/'))
localname = globalname[(len(self.scope) + 1):]
localname = localname.split(':')[0]
return localname
def find_var(self, var_or_localname):
assert (is_tf_expression(var_or_localname) or isinstance(var_or_localname, str))
return (self.vars[var_or_localname] if isinstance(var_or_localname, str) else var_or_localname)
def get_var(self, var_or_localname):
return self.find_var(var_or_localname).eval()
def set_var(self, var_or_localname, new_value):
return set_vars({self.find_var(var_or_localname): new_value})
def __getstate__(self):
return {'version': 2, 'name': self.name, 'static_kwargs': self.static_kwargs, 'build_module_src': self._build_module_src, 'build_func_name': self._build_func_name, 'variables': list(zip(self.vars.keys(), run(list(self.vars.values()))))}
def __setstate__(self, state):
self._init_fields()
for handler in network_import_handlers:
state = handler(state)
assert (state['version'] == 2)
self.name = state['name']
self.static_kwargs = state['static_kwargs']
self._build_module_src = state['build_module_src']
self._build_func_name = state['build_func_name']
module = imp.new_module(('_tfutil_network_import_module_%d' % len(_network_import_modules)))
exec(self._build_module_src, module.__dict__)
self._build_func = find_obj_in_module(module, self._build_func_name)
_network_import_modules.append(module)
self._init_graph()
self.reset_vars()
set_vars({self.find_var(name): value for (name, value) in state['variables']})
def clone(self, name=None):
net = object.__new__(Network)
net._init_fields()
net.name = (name if (name is not None) else self.name)
net.static_kwargs = dict(self.static_kwargs)
net._build_module_src = self._build_module_src
net._build_func_name = self._build_func_name
net._build_func = self._build_func
net._init_graph()
net.copy_vars_from(self)
return net
def copy_vars_from(self, src_net):
assert isinstance(src_net, Network)
name_to_value = run({name: src_net.find_var(name) for name in self.vars.keys()})
set_vars({self.find_var(name): value for (name, value) in name_to_value.items()})
def copy_trainables_from(self, src_net):
assert isinstance(src_net, Network)
name_to_value = run({name: src_net.find_var(name) for name in self.trainables.keys()})
set_vars({self.find_var(name): value for (name, value) in name_to_value.items()})
def convert(self, name=None, func=None, **static_kwargs):
net = Network(name, func, **static_kwargs)
net.copy_vars_from(self)
return net
def setup_as_moving_average_of(self, src_net, beta=0.99, beta_nontrainable=0.0):
assert isinstance(src_net, Network)
with absolute_name_scope(self.scope):
with tf.name_scope('MovingAvg'):
ops = []
for (name, var) in self.vars.items():
if (name in src_net.vars):
cur_beta = (beta if (name in self.trainables) else beta_nontrainable)
new_value = lerp(src_net.vars[name], var, cur_beta)
ops.append(var.assign(new_value))
return tf.group(*ops)
def run(self, *in_arrays, return_as_list=False, print_progress=False, minibatch_size=None, num_gpus=1, out_mul=1.0, out_add=0.0, out_shrink=1, out_dtype=None, **dynamic_kwargs):
assert (len(in_arrays) == self.num_inputs)
num_items = in_arrays[0].shape[0]
if (minibatch_size is None):
minibatch_size = num_items
key = str([list(sorted(dynamic_kwargs.items())), num_gpus, out_mul, out_add, out_shrink, out_dtype])
if (key not in self._run_cache):
with absolute_name_scope((self.scope + '/Run')), tf.control_dependencies(None):
in_split = list(zip(*[tf.split(x, num_gpus) for x in self.input_templates]))
out_split = []
for gpu in range(num_gpus):
with tf.device(('/gpu:%d' % gpu)):
out_expr = self.get_output_for(*in_split[gpu], return_as_list=True, **dynamic_kwargs)
if (out_mul != 1.0):
out_expr = [(x * out_mul) for x in out_expr]
if (out_add != 0.0):
out_expr = [(x + out_add) for x in out_expr]
if (out_shrink > 1):
ksize = [1, 1, out_shrink, out_shrink]
out_expr = [tf.nn.avg_pool(x, ksize=ksize, strides=ksize, padding='VALID', data_format='NCHW') for x in out_expr]
if (out_dtype is not None):
if tf.as_dtype(out_dtype).is_integer:
out_expr = [tf.round(x) for x in out_expr]
out_expr = [tf.saturate_cast(x, out_dtype) for x in out_expr]
out_split.append(out_expr)
self._run_cache[key] = [tf.concat(outputs, axis=0) for outputs in zip(*out_split)]
out_expr = self._run_cache[key]
out_arrays = [np.empty(([num_items] + shape_to_list(expr.shape)[1:]), expr.dtype.name) for expr in out_expr]
for mb_begin in range(0, num_items, minibatch_size):
if print_progress:
print(('\r%d / %d' % (mb_begin, num_items)), end='')
mb_end = min((mb_begin + minibatch_size), num_items)
mb_in = [src[mb_begin:mb_end] for src in in_arrays]
mb_out = tf.get_default_session().run(out_expr, dict(zip(self.input_templates, mb_in)))
for (dst, src) in zip(out_arrays, mb_out):
dst[mb_begin:mb_end] = src
if print_progress:
print(('\r%d / %d' % (num_items, num_items)))
if (not return_as_list):
out_arrays = (out_arrays[0] if (len(out_arrays) == 1) else tuple(out_arrays))
return out_arrays
def list_layers(self):
patterns_to_ignore = ['/Setter', '/new_value', '/Shape', '/strided_slice', '/Cast', '/concat']
all_ops = tf.get_default_graph().get_operations()
all_ops = [op for op in all_ops if (not any(((p in op.name) for p in patterns_to_ignore)))]
layers = []
def recurse(scope, parent_ops, level):
prefix = (scope + '/')
ops = [op for op in parent_ops if ((op.name == scope) or op.name.startswith(prefix))]
if ((level == 0) or all((('/' in op.name[len(prefix):]) for op in ops))):
visited = set()
for op in ops:
suffix = op.name[len(prefix):]
if ('/' in suffix):
suffix = suffix[:suffix.index('/')]
if (suffix not in visited):
recurse((prefix + suffix), ops, (level + 1))
visited.add(suffix)
else:
layer_name = scope[(len(self.scope) + 1):]
layer_output = ops[(- 1)].outputs[0]
layer_trainables = [op.outputs[0] for op in ops if (op.type.startswith('Variable') and (self.get_var_localname(op.name) in self.trainables))]
layers.append((layer_name, layer_output, layer_trainables))
recurse(self.scope, all_ops, 0)
return layers
def print_layers(self, title=None, hide_layers_with_no_params=False):
if (title is None):
title = self.name
print()
print(('%-28s%-12s%-24s%-24s' % (title, 'Params', 'OutputShape', 'WeightShape')))
print(('%-28s%-12s%-24s%-24s' % (('---',) * 4)))
total_params = 0
for (layer_name, layer_output, layer_trainables) in self.list_layers():
weights = [var for var in layer_trainables if var.name.endswith('/weight:0')]
num_params = sum((np.prod(shape_to_list(var.shape)) for var in layer_trainables))
total_params += num_params
if (hide_layers_with_no_params and (num_params == 0)):
continue
print(('%-28s%-12s%-24s%-24s' % (layer_name, (num_params if num_params else '-'), layer_output.shape, (weights[0].shape if (len(weights) == 1) else '-'))))
print(('%-28s%-12s%-24s%-24s' % (('---',) * 4)))
print(('%-28s%-12s%-24s%-24s' % ('Total', total_params, '', '')))
print()
def setup_weight_histograms(self, title=None):
if (title is None):
title = self.name
with tf.name_scope(None), tf.device(None), tf.control_dependencies(None):
for (localname, var) in self.trainables.items():
if ('/' in localname):
p = localname.split('/')
name = ((((title + '_') + p[(- 1)]) + '/') + '_'.join(p[:(- 1)]))
else:
name = ((title + '_toplevel/') + localname)
tf.summary.histogram(name, var) |
class Effect3480(BaseEffect):
type = 'passive'
def handler(fit, ship, context, projectionRange, **kwargs):
fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Large Energy Turret')), 'trackingSpeed', ship.getModifiedItemAttr('shipBonusAB2'), skill='Amarr Battleship', **kwargs) |
class FC3_DisplayMode(KickstartCommand):
removedKeywords = KickstartCommand.removedKeywords
removedAttrs = KickstartCommand.removedAttrs
def __init__(self, writePriority=0, *args, **kwargs):
KickstartCommand.__init__(self, writePriority, *args, **kwargs)
self.displayMode = kwargs.get('displayMode', None)
self.op = self._getParser()
def __str__(self):
retval = KickstartCommand.__str__(self)
if (self.displayMode is None):
return retval
if (self.displayMode == DISPLAY_MODE_CMDLINE):
retval += 'cmdline\n'
elif (self.displayMode == DISPLAY_MODE_GRAPHICAL):
retval += '# Use graphical install\ngraphical\n'
elif (self.displayMode == DISPLAY_MODE_TEXT):
retval += '# Use text mode install\ntext\n'
return retval
def parse(self, args):
ns = self.op.parse_args(args=args, lineno=self.lineno)
self.set_to_self(ns)
if (self.currentCmd == 'cmdline'):
self.displayMode = DISPLAY_MODE_CMDLINE
elif (self.currentCmd == 'graphical'):
self.displayMode = DISPLAY_MODE_GRAPHICAL
elif (self.currentCmd == 'text'):
self.displayMode = DISPLAY_MODE_TEXT
else:
raise KickstartParseError((_('Unknown command %s') % self.currentCmd), lineno=self.lineno)
return self
def _getParser(self):
op = KSOptionParser(prog='graphical|text|cmdline', version=FC3, description='\n Controls which display mode will be used for the\n installation and for the installed system. If ``text``\n or ``cmdline`` is chosen the system will boot in text\n mode. And when ``cmdline`` is used all required installation\n options must be configured via kickstart, otherwise the \n installation will fail.')
return op |
class AWSSession(Session):
def __init__(self, session=None, aws_unsigned=None, aws_access_key_id=None, aws_secret_access_key=None, aws_session_token=None, region_name=None, profile_name=None, endpoint_url=None, requester_pays=False):
if (aws_unsigned is None):
aws_unsigned = parse_bool(os.getenv('AWS_NO_SIGN_REQUEST', False))
if session:
self._session = session
elif aws_unsigned:
self._session = SimpleNamespace(region_name=region_name)
else:
self._session = boto3.Session(aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, aws_session_token=aws_session_token, region_name=region_name, profile_name=profile_name)
self.requester_pays = requester_pays
self.unsigned = aws_unsigned
self.endpoint_url = endpoint_url
self._creds = (self._session.get_credentials() if (not self.unsigned) else None)
def hascreds(cls, config):
return ((('AWS_ACCESS_KEY_ID' in config) and ('AWS_SECRET_ACCESS_KEY' in config)) or ('AWS_NO_SIGN_REQUEST' in config))
def credentials(self):
res = {}
if self._creds:
frozen_creds = self._creds.get_frozen_credentials()
if frozen_creds.access_key:
res['aws_access_key_id'] = frozen_creds.access_key
if frozen_creds.secret_key:
res['aws_secret_access_key'] = frozen_creds.secret_key
if frozen_creds.token:
res['aws_session_token'] = frozen_creds.token
if self._session.region_name:
res['aws_region'] = self._session.region_name
if self.requester_pays:
res['aws_request_payer'] = 'requester'
if self.endpoint_url:
res['aws_s3_endpoint'] = self.endpoint_url
return res
def get_credential_options(self):
if self.unsigned:
opts = {'AWS_NO_SIGN_REQUEST': 'YES'}
opts.update({k.upper(): v for (k, v) in self.credentials.items() if (k in ('aws_region', 'aws_s3_endpoint'))})
return opts
else:
return {k.upper(): v for (k, v) in self.credentials.items()} |
def get_source_fields(fields=None):
if (fields is None):
fields = {}
fields['src'] = torchtext.data.Field(pad_token=Constants.PAD_WORD, eos_token=Constants.EOS_WORD, include_lengths=True)
fields['indices'] = torchtext.data.Field(use_vocab=False, dtype=torch.long, sequential=False)
return fields |
def get_all_metrics(test, gen, k=None, n_jobs=1, device='cpu', batch_size=512, test_scaffolds=None, ptest=None, ptest_scaffolds=None, pool=None, gpu=None, train=None):
if (k is None):
k = [1000, 10000]
disable_rdkit_log()
metrics = {}
if (gpu is not None):
warnings.warn('parameter `gpu` is deprecated. Use `device`', DeprecationWarning)
if (gpu == (- 1)):
device = 'cpu'
else:
device = 'cuda:{}'.format(gpu)
close_pool = False
if (pool is None):
if (n_jobs != 1):
pool = Pool(n_jobs)
close_pool = True
else:
pool = 1
metrics['valid'] = fraction_valid(gen, n_jobs=pool)
gen = remove_invalid(gen, canonize=True)
if (not isinstance(k, (list, tuple))):
k = [k]
for _k in k:
metrics['{}'.format(_k)] = fraction_unique(gen, _k, pool)
if (ptest is None):
ptest = compute_intermediate_statistics(test, n_jobs=n_jobs, device=device, batch_size=batch_size, pool=pool)
if ((test_scaffolds is not None) and (ptest_scaffolds is None)):
ptest_scaffolds = compute_intermediate_statistics(test_scaffolds, n_jobs=n_jobs, device=device, batch_size=batch_size, pool=pool)
mols = mapper(pool)(get_mol, gen)
kwargs = {'n_jobs': pool, 'device': device, 'batch_size': batch_size}
kwargs_fcd = {'n_jobs': n_jobs, 'device': device, 'batch_size': batch_size}
metrics['FCD/Test'] = FCDMetric(**kwargs_fcd)(gen=gen, pref=ptest['FCD'])
metrics['SNN/Test'] = SNNMetric(**kwargs)(gen=mols, pref=ptest['SNN'])
metrics['Frag/Test'] = FragMetric(**kwargs)(gen=mols, pref=ptest['Frag'])
metrics['Scaf/Test'] = ScafMetric(**kwargs)(gen=mols, pref=ptest['Scaf'])
if (ptest_scaffolds is not None):
metrics['FCD/TestSF'] = FCDMetric(**kwargs_fcd)(gen=gen, pref=ptest_scaffolds['FCD'])
metrics['SNN/TestSF'] = SNNMetric(**kwargs)(gen=mols, pref=ptest_scaffolds['SNN'])
metrics['Frag/TestSF'] = FragMetric(**kwargs)(gen=mols, pref=ptest_scaffolds['Frag'])
metrics['Scaf/TestSF'] = ScafMetric(**kwargs)(gen=mols, pref=ptest_scaffolds['Scaf'])
metrics['IntDiv'] = internal_diversity(mols, pool, device=device)
metrics['IntDiv2'] = internal_diversity(mols, pool, device=device, p=2)
metrics['Filters'] = fraction_passes_filters(mols, pool)
for (name, func) in [('logP', logP), ('SA', SA), ('QED', QED), ('NP', NP), ('weight', weight)]:
metrics[name] = FrechetMetric(func, **kwargs)(gen=mols, pref=ptest[name])
if (train is not None):
metrics['Novelty'] = novelty(mols, train, pool)
enable_rdkit_log()
if close_pool:
pool.close()
pool.join()
return metrics |
def format_args(args: Sequence[Any]=None, kwargs: Mapping[(str, Any)]=None) -> str:
if (args is not None):
arglist = [utils.compact_text(repr(arg), 200) for arg in args]
else:
arglist = []
if (kwargs is not None):
for (k, v) in kwargs.items():
arglist.append('{}={}'.format(k, utils.compact_text(repr(v), 200)))
return ', '.join(arglist) |
class Effect6683(BaseEffect):
type = ('projected', 'active')
def handler(fit, container, context, projectionRange, **kwargs):
if ('projected' not in context):
return
if fit.ship.getModifiedItemAttr('disallowOffensiveModifiers'):
return
appliedBoost = container.getModifiedItemAttr('signatureRadiusBonus')
appliedBoost *= calculateRangeFactor(srcOptimalRange=container.getModifiedItemAttr('maxRange'), srcFalloffRange=container.getModifiedItemAttr('falloffEffectiveness'), distance=projectionRange)
fit.ship.boostItemAttr('signatureRadius', appliedBoost, stackingPenalties=True, **kwargs) |
def test_hrnet_backbone():
extra = dict(stage1=dict(num_modules=1, num_branches=1, block='BOTTLENECK', num_blocks=(4,), num_channels=(64,)), stage2=dict(num_modules=1, num_branches=2, block='BASIC', num_blocks=(4, 4), num_channels=(32, 64)), stage3=dict(num_modules=4, num_branches=3, block='BASIC', num_blocks=(4, 4, 4), num_channels=(32, 64, 128)), stage4=dict(num_modules=3, num_branches=4, block='BASIC', num_blocks=(4, 4, 4, 4), num_channels=(32, 64, 128, 256)))
model = HRNet(extra, in_channels=3)
imgs = torch.randn(2, 3, 224, 224)
feat = model(imgs)
assert (len(feat) == 1)
assert (feat[0].shape == torch.Size([2, 32, 56, 56]))
model = HRNet(extra, in_channels=3, zero_init_residual=True)
model.init_weights()
for m in model.modules():
if isinstance(m, Bottleneck):
assert all_zeros(m.norm3)
model.train()
imgs = torch.randn(2, 3, 224, 224)
feat = model(imgs)
assert (len(feat) == 1)
assert (feat[0].shape == torch.Size([2, 32, 56, 56]))
frozen_stages = 3
model = HRNet(extra, in_channels=3, frozen_stages=frozen_stages)
model.init_weights()
model.train()
if (frozen_stages >= 0):
assert (model.norm1.training is False)
assert (model.norm2.training is False)
for layer in [model.conv1, model.norm1, model.conv2, model.norm2]:
for param in layer.parameters():
assert (param.requires_grad is False)
for i in range(1, (frozen_stages + 1)):
if (i == 1):
layer = getattr(model, 'layer1')
else:
layer = getattr(model, f'stage{i}')
for mod in layer.modules():
if isinstance(mod, _BatchNorm):
assert (mod.training is False)
for param in layer.parameters():
assert (param.requires_grad is False)
if (i < 4):
layer = getattr(model, f'transition{i}')
for mod in layer.modules():
if isinstance(mod, _BatchNorm):
assert (mod.training is False)
for param in layer.parameters():
assert (param.requires_grad is False) |
class BiLevelDatasetSpecification(collections.namedtuple('BiLevelDatasetSpecification', 'name, superclasses_per_split, classes_per_superclass, images_per_class, superclass_names, class_names, path, file_pattern')):
def initialize(self, restricted_classes_per_split=None):
if (self.file_pattern not in ['{}.tfrecords', '{}_{}.tfrecords']):
raise ValueError('file_pattern must be either "{}.tfrecords" or "{}_{}.tfrecords" to support shards or splits.')
if (restricted_classes_per_split is not None):
classes_per_split = {}
for split in self.superclasses_per_split.keys():
num_split_classes = self._count_classes_in_superclasses(self.get_superclasses(split))
classes_per_split[split] = num_split_classes
_check_validity_of_restricted_classes_per_split(restricted_classes_per_split, classes_per_split)
self.restricted_classes_per_split = restricted_classes_per_split
def get_total_images_per_class(self, class_id=None, pool=None):
return get_total_images_per_class(self, class_id, pool=pool)
def get_superclasses(self, split):
return get_classes(split, self.superclasses_per_split)
def _count_classes_in_superclasses(self, superclass_ids):
return sum([self.classes_per_superclass[superclass_id] for superclass_id in superclass_ids])
def _get_split_offset(self, split):
if (split == learning_spec.Split.TRAIN):
offset = 0
elif (split == learning_spec.Split.VALID):
previous_superclasses = range(0, self.superclasses_per_split[learning_spec.Split.TRAIN])
offset = self._count_classes_in_superclasses(previous_superclasses)
elif (split == learning_spec.Split.TEST):
previous_superclasses = range(0, (self.superclasses_per_split[learning_spec.Split.TRAIN] + self.superclasses_per_split[learning_spec.Split.VALID]))
offset = self._count_classes_in_superclasses(previous_superclasses)
else:
raise ValueError('Invalid dataset split.')
return offset
def get_classes(self, split):
if (not hasattr(self, 'restricted_classes_per_split')):
self.initialize()
offset = self._get_split_offset(split)
if ((self.restricted_classes_per_split is not None) and (split in self.restricted_classes_per_split)):
num_split_classes = self.restricted_classes_per_split[split]
else:
num_split_classes = self._count_classes_in_superclasses(self.get_superclasses(split))
return range(offset, (offset + num_split_classes))
def get_class_ids_from_superclass_subclass_inds(self, split, superclass_id, class_inds):
superclass_offset = self._count_classes_in_superclasses(range(superclass_id))
class_ids = [(superclass_offset + class_ind) for class_ind in class_inds]
rel_class_ids = [(class_id - self._get_split_offset(split)) for class_id in class_ids]
return (rel_class_ids, class_ids)
def to_dict(self):
ret_dict = self._asdict()
ret_dict['__class__'] = self.__class__.__name__
ret_dict['superclasses_per_split'] = {split.name: count for (split, count) in six.iteritems(ret_dict['superclasses_per_split'])}
return ret_dict |
def test_list_lid_groups():
with Simulation(MODEL_LIDS_PATH) as sim:
for (i, group) in enumerate(LidGroups(sim)):
if (i == 0):
assert ('subcatchment {} has {} lid units'.format(group, len(group)) == 'subcatchment 1 has 0 lid units')
if (i == 1):
assert ('subcatchment {} has {} lid units'.format(group, len(group)) == 'subcatchment 2 has 3 lid units')
if (i == 2):
assert ('subcatchment {} has {} lid units'.format(group, len(group)) == 'subcatchment 3 has 0 lid units')
if (i == 3):
assert ('subcatchment {} has {} lid units'.format(group, len(group)) == 'subcatchment 4 has 0 lid units')
if (i == 7):
assert ('subcatchment {} has {} lid units'.format(group, len(group)) == 'subcatchment 8 has 0 lid units') |
class Discriminator(BaseNetwork):
def __init__(self, style_dim=64, max_conv_dim=512):
super().__init__()
dim_in = 64
blocks = []
blocks += [spectral_norm(nn.Conv2d(3, dim_in, 3, 1, 1))]
repeat_num = (int(np.log2(256)) - 2)
for _ in range(repeat_num):
dim_out = min((dim_in * 2), max_conv_dim)
blocks += [ResnetBlock(dim_in, dim_out, downsample=True)]
dim_in = dim_out
blocks += [nn.LeakyReLU(0.2)]
blocks += [spectral_norm(nn.Conv2d(dim_out, dim_out, 4, 1, 0))]
blocks += [nn.LeakyReLU(0.2)]
self.model = nn.Sequential(*blocks)
self.linear = spectral_norm(nn.Conv2d(dim_out, 1, 1, 1, 0))
def forward(self, x, layer=10):
if isinstance(layer, list):
feat = self.model[0:layer[0]](x)
style_feat = [feat.mean(3).mean(2)]
for i in range((len(layer) - 1)):
feat = self.model[layer[i]:layer[(i + 1)]](feat)
style_feat += [feat.mean(3).mean(2)]
if (layer[(- 1)] != 10):
feat = self.model[layer[(- 1)]:10](feat)
else:
feat = self.model[0:layer](x)
style_feat = feat.mean(3).mean(2)
if (layer != 10):
feat = self.model[layer:10](feat)
out = self.linear(feat)
return (out.view((- 1)), style_feat) |
class ReadPoTestCase(unittest.TestCase):
def test_preserve_locale(self):
buf = StringIO('msgid "foo"\nmsgstr "Voh"')
catalog = pofile.read_po(buf, locale='en_US')
assert (Locale('en', 'US') == catalog.locale)
def test_locale_gets_overridden_by_file(self):
buf = StringIO('\nmsgid ""\nmsgstr ""\n"Language: en_US\\n"')
catalog = pofile.read_po(buf, locale='de')
assert (Locale('en', 'US') == catalog.locale)
buf = StringIO('\nmsgid ""\nmsgstr ""\n"Language: ko-KR\\n"')
catalog = pofile.read_po(buf, locale='de')
assert (Locale('ko', 'KR') == catalog.locale)
def test_preserve_domain(self):
buf = StringIO('msgid "foo"\nmsgstr "Voh"')
catalog = pofile.read_po(buf, domain='mydomain')
assert (catalog.domain == 'mydomain')
def test_applies_specified_encoding_during_read(self):
buf = BytesIO('\nmsgid ""\nmsgstr ""\n"Project-Id-Version: 3.15\\n"\n"Report-Msgid-Bugs-To: Fliegender Zirkus <>\\n"\n"POT-Creation-Date: 2007-09-27 11:19+0700\\n"\n"PO-Revision-Date: 2007-09-27 21:42-0700\\n"\n"Last-Translator: John <>\\n"\n"Language-Team: German Lang <>\\n"\n"Plural-Forms: nplurals=2; plural=(n != 1);\\n"\n"MIME-Version: 1.0\\n"\n"Content-Type: text/plain; charset=iso-8859-1\\n"\n"Content-Transfer-Encoding: 8bit\\n"\n"Generated-By: Babel 1.0dev-r313\\n"\n\nmsgid "foo"\nmsgstr "bar"'.encode('iso-8859-1'))
catalog = pofile.read_po(buf, locale='de_DE')
assert (catalog.get('foo').string == 'bar')
def test_encoding_header_read(self):
buf = BytesIO(b'msgid ""\nmsgstr ""\n"Content-Type: text/plain; charset=mac_roman\\n"\n')
catalog = pofile.read_po(buf, locale='xx_XX')
assert (catalog.charset == 'mac_roman')
def test_plural_forms_header_parsed(self):
buf = BytesIO(b'msgid ""\nmsgstr ""\n"Plural-Forms: nplurals=42; plural=(n % 11);\\n"\n')
catalog = pofile.read_po(buf, locale='xx_XX')
assert (catalog.plural_expr == '(n % 11)')
assert (catalog.num_plurals == 42)
def test_read_multiline(self):
buf = StringIO('msgid ""\n"Here\'s some text that\\n"\n"includesareallylongwordthatmightbutshouldnt"\n" throw us into an infinite "\n"loop\\n"\nmsgstr ""')
catalog = pofile.read_po(buf)
assert (len(catalog) == 1)
message = list(catalog)[1]
assert (message.id == "Here's some text that\nincludesareallylongwordthatmightbutshouldnt throw us into an infinite loop\n")
def test_fuzzy_header(self):
buf = StringIO('\n# Translations template for AReallyReallyLongNameForAProject.\n# Copyright (C) 2007 ORGANIZATION\n# This file is distributed under the same license as the\n# AReallyReallyLongNameForAProject project.\n# FIRST AUTHOR <>, 2007.\n#\n#, fuzzy\n')
catalog = pofile.read_po(buf)
assert (len(list(catalog)) == 1)
assert list(catalog)[0].fuzzy
def test_not_fuzzy_header(self):
buf = StringIO('\n# Translations template for AReallyReallyLongNameForAProject.\n# Copyright (C) 2007 ORGANIZATION\n# This file is distributed under the same license as the\n# AReallyReallyLongNameForAProject project.\n# FIRST AUTHOR <>, 2007.\n#\n')
catalog = pofile.read_po(buf)
assert (len(list(catalog)) == 1)
assert (not list(catalog)[0].fuzzy)
def test_header_entry(self):
buf = StringIO('\n# SOME DESCRIPTIVE TITLE.\n# Copyright (C) 2007 THE PACKAGE\'S COPYRIGHT HOLDER\n# This file is distributed under the same license as the PACKAGE package.\n# FIRST AUTHOR <>, 2007.\n#\n#, fuzzy\nmsgid ""\nmsgstr ""\n"Project-Id-Version: 3.15\\n"\n"Report-Msgid-Bugs-To: Fliegender Zirkus <>\\n"\n"POT-Creation-Date: 2007-09-27 11:19+0700\\n"\n"PO-Revision-Date: 2007-09-27 21:42-0700\\n"\n"Last-Translator: John <>\\n"\n"Language: de\\n"\n"Language-Team: German Lang <>\\n"\n"Plural-Forms: nplurals=2; plural=(n != 1);\\n"\n"MIME-Version: 1.0\\n"\n"Content-Type: text/plain; charset=iso-8859-2\\n"\n"Content-Transfer-Encoding: 8bit\\n"\n"Generated-By: Babel 1.0dev-r313\\n"\n')
catalog = pofile.read_po(buf)
assert (len(list(catalog)) == 1)
assert (catalog.version == '3.15')
assert (catalog.msgid_bugs_address == 'Fliegender Zirkus <>')
assert (datetime(2007, 9, 27, 11, 19, tzinfo=FixedOffsetTimezone((7 * 60))) == catalog.creation_date)
assert (catalog.last_translator == 'John <>')
assert (Locale('de') == catalog.locale)
assert (catalog.language_team == 'German Lang <>')
assert (catalog.charset == 'iso-8859-2')
assert list(catalog)[0].fuzzy
def test_obsolete_message(self):
buf = StringIO('# This is an obsolete message\n#~ msgid "foo"\n#~ msgstr "Voh"\n\n# This message is not obsolete\n#: main.py:1\nmsgid "bar"\nmsgstr "Bahr"\n')
catalog = pofile.read_po(buf)
assert (len(catalog) == 1)
assert (len(catalog.obsolete) == 1)
message = catalog.obsolete['foo']
assert (message.id == 'foo')
assert (message.string == 'Voh')
assert (message.user_comments == ['This is an obsolete message'])
def test_obsolete_message_ignored(self):
buf = StringIO('# This is an obsolete message\n#~ msgid "foo"\n#~ msgstr "Voh"\n\n# This message is not obsolete\n#: main.py:1\nmsgid "bar"\nmsgstr "Bahr"\n')
catalog = pofile.read_po(buf, ignore_obsolete=True)
assert (len(catalog) == 1)
assert (len(catalog.obsolete) == 0)
def test_multi_line_obsolete_message(self):
buf = StringIO('# This is an obsolete message\n#~ msgid ""\n#~ "foo"\n#~ "foo"\n#~ msgstr ""\n#~ "Voh"\n#~ "Vooooh"\n\n# This message is not obsolete\n#: main.py:1\nmsgid "bar"\nmsgstr "Bahr"\n')
catalog = pofile.read_po(buf)
assert (len(catalog.obsolete) == 1)
message = catalog.obsolete['foofoo']
assert (message.id == 'foofoo')
assert (message.string == 'VohVooooh')
assert (message.user_comments == ['This is an obsolete message'])
def test_unit_following_multi_line_obsolete_message(self):
buf = StringIO('# This is an obsolete message\n#~ msgid ""\n#~ "foo"\n#~ "fooooooo"\n#~ msgstr ""\n#~ "Voh"\n#~ "Vooooh"\n\n# This message is not obsolete\n#: main.py:1\nmsgid "bar"\nmsgstr "Bahr"\n')
catalog = pofile.read_po(buf)
assert (len(catalog) == 1)
message = catalog['bar']
assert (message.id == 'bar')
assert (message.string == 'Bahr')
assert (message.user_comments == ['This message is not obsolete'])
def test_unit_before_obsolete_is_not_obsoleted(self):
buf = StringIO('\n# This message is not obsolete\n#: main.py:1\nmsgid "bar"\nmsgstr "Bahr"\n\n# This is an obsolete message\n#~ msgid ""\n#~ "foo"\n#~ "fooooooo"\n#~ msgstr ""\n#~ "Voh"\n#~ "Vooooh"\n')
catalog = pofile.read_po(buf)
assert (len(catalog) == 1)
message = catalog['bar']
assert (message.id == 'bar')
assert (message.string == 'Bahr')
assert (message.user_comments == ['This message is not obsolete'])
def test_with_context(self):
buf = BytesIO(b'# Some string in the menu\n#: main.py:1\nmsgctxt "Menu"\nmsgid "foo"\nmsgstr "Voh"\n\n# Another string in the menu\n#: main.py:2\nmsgctxt "Menu"\nmsgid "bar"\nmsgstr "Bahr"\n')
catalog = pofile.read_po(buf, ignore_obsolete=True)
assert (len(catalog) == 2)
message = catalog.get('foo', context='Menu')
assert (message.context == 'Menu')
message = catalog.get('bar', context='Menu')
assert (message.context == 'Menu')
out_buf = BytesIO()
pofile.write_po(out_buf, catalog, omit_header=True)
assert (out_buf.getvalue().strip() == buf.getvalue().strip())
def test_obsolete_message_with_context(self):
buf = StringIO('\n# This message is not obsolete\nmsgid "baz"\nmsgstr "Bazczch"\n\n# This is an obsolete message\n#~ msgctxt "other"\n#~ msgid "foo"\n#~ msgstr "Voh"\n\n# This message is not obsolete\n#: main.py:1\nmsgid "bar"\nmsgstr "Bahr"\n')
catalog = pofile.read_po(buf)
assert (len(catalog) == 2)
assert (len(catalog.obsolete) == 1)
message = catalog.obsolete['foo']
assert (message.context == 'other')
assert (message.string == 'Voh')
def test_multiline_context(self):
buf = StringIO('\nmsgctxt "a really long "\n"message context "\n"why?"\nmsgid "mid"\nmsgstr "mst"\n ')
catalog = pofile.read_po(buf)
assert (len(catalog) == 1)
message = catalog.get('mid', context='a really long message context why?')
assert (message is not None)
assert (message.context == 'a really long message context why?')
def test_with_context_two(self):
buf = BytesIO(b'msgctxt "Menu"\nmsgid "foo"\nmsgstr "Voh"\n\nmsgctxt "Mannu"\nmsgid "bar"\nmsgstr "Bahr"\n')
catalog = pofile.read_po(buf, ignore_obsolete=True)
assert (len(catalog) == 2)
message = catalog.get('foo', context='Menu')
assert (message.context == 'Menu')
message = catalog.get('bar', context='Mannu')
assert (message.context == 'Mannu')
out_buf = BytesIO()
pofile.write_po(out_buf, catalog, omit_header=True)
assert (out_buf.getvalue().strip() == buf.getvalue().strip()), out_buf.getvalue()
def test_single_plural_form(self):
buf = StringIO('msgid "foo"\nmsgid_plural "foos"\nmsgstr[0] "Voh"')
catalog = pofile.read_po(buf, locale='ja_JP')
assert (len(catalog) == 1)
assert (catalog.num_plurals == 1)
message = catalog['foo']
assert (len(message.string) == 1)
def test_singular_plural_form(self):
buf = StringIO('msgid "foo"\nmsgid_plural "foos"\nmsgstr[0] "Voh"\nmsgstr[1] "Vohs"')
catalog = pofile.read_po(buf, locale='nl_NL')
assert (len(catalog) == 1)
assert (catalog.num_plurals == 2)
message = catalog['foo']
assert (len(message.string) == 2)
def test_more_than_two_plural_forms(self):
buf = StringIO('msgid "foo"\nmsgid_plural "foos"\nmsgstr[0] "Voh"\nmsgstr[1] "Vohs"\nmsgstr[2] "Vohss"')
catalog = pofile.read_po(buf, locale='lv_LV')
assert (len(catalog) == 1)
assert (catalog.num_plurals == 3)
message = catalog['foo']
assert (len(message.string) == 3)
assert (message.string[2] == 'Vohss')
def test_plural_with_square_brackets(self):
buf = StringIO('msgid "foo"\nmsgid_plural "foos"\nmsgstr[0] "Voh [text]"\nmsgstr[1] "Vohs [text]"')
catalog = pofile.read_po(buf, locale='nb_NO')
assert (len(catalog) == 1)
assert (catalog.num_plurals == 2)
message = catalog['foo']
assert (len(message.string) == 2)
def test_obsolete_plural_with_square_brackets(self):
buf = StringIO('#~ msgid "foo"\n#~ msgid_plural "foos"\n#~ msgstr[0] "Voh [text]"\n#~ msgstr[1] "Vohs [text]"\n')
catalog = pofile.read_po(buf, locale='nb_NO')
assert (len(catalog) == 0)
assert (len(catalog.obsolete) == 1)
assert (catalog.num_plurals == 2)
message = catalog.obsolete[('foo', 'foos')]
assert (len(message.string) == 2)
assert (message.string[0] == 'Voh [text]')
assert (message.string[1] == 'Vohs [text]')
def test_missing_plural(self):
buf = StringIO('msgid ""\nmsgstr ""\n"Plural-Forms: nplurals=3; plural=(n < 2) ? n : 2;\n"\n\nmsgid "foo"\nmsgid_plural "foos"\nmsgstr[0] "Voh [text]"\nmsgstr[1] "Vohs [text]"\n')
catalog = pofile.read_po(buf, locale='nb_NO')
assert (len(catalog) == 1)
assert (catalog.num_plurals == 3)
message = catalog['foo']
assert (len(message.string) == 3)
assert (message.string[0] == 'Voh [text]')
assert (message.string[1] == 'Vohs [text]')
assert (message.string[2] == '')
def test_missing_plural_in_the_middle(self):
buf = StringIO('msgid ""\nmsgstr ""\n"Plural-Forms: nplurals=3; plural=(n < 2) ? n : 2;\n"\n\nmsgid "foo"\nmsgid_plural "foos"\nmsgstr[0] "Voh [text]"\nmsgstr[2] "Vohs [text]"\n')
catalog = pofile.read_po(buf, locale='nb_NO')
assert (len(catalog) == 1)
assert (catalog.num_plurals == 3)
message = catalog['foo']
assert (len(message.string) == 3)
assert (message.string[0] == 'Voh [text]')
assert (message.string[1] == '')
assert (message.string[2] == 'Vohs [text]')
def test_abort_invalid_po_file(self):
invalid_po = '\n msgctxt ""\n "{"checksum": , "cxt": "collector_thankyou", "id": "\n "}"\n msgid ""\n "Thank you very much for your time.\n"\n "If you have any questions regarding this survey, please contact Fulano "\n "at "\n msgstr "Merci de prendre le temps de remplir le sondage.\n Pour toute question, veuillez communiquer avec Fulano a \n "\n '
invalid_po_2 = '\n msgctxt ""\n "{"checksum": , "cxt": "collector_thankyou", "id": "\n "}"\n msgid ""\n "Thank you very much for your time.\n"\n "If you have any questions regarding this survey, please contact Fulano "\n "at ."\n msgstr "Merci de prendre le temps de remplir le sondage.\n Pour toute question, veuillez communiquer avec Fulano a \n "\n '
buf = StringIO(invalid_po)
output = pofile.read_po(buf, locale='fr', abort_invalid=False)
assert isinstance(output, Catalog)
buf = StringIO(invalid_po_2)
with pytest.raises(pofile.PoFileError):
pofile.read_po(buf, locale='fr', abort_invalid=True)
buf = StringIO(invalid_po_2)
output = pofile.read_po(buf, locale='fr', abort_invalid=False)
assert isinstance(output, Catalog)
buf = StringIO(invalid_po_2)
with pytest.raises(pofile.PoFileError):
pofile.read_po(buf, locale='fr', abort_invalid=True)
def test_invalid_pofile_with_abort_flag(self):
parser = pofile.PoFileParser(None, abort_invalid=True)
lineno = 10
line = 'Algo esta mal'
msg = 'invalid file'
with pytest.raises(pofile.PoFileError):
parser._invalid_pofile(line, lineno, msg) |
class SAM(nn.Module):
def __init__(self, n_feat, kernel_size, bias):
super(SAM, self).__init__()
self.conv1 = conv(n_feat, n_feat, kernel_size, bias=bias)
self.conv2 = conv(n_feat, 1, kernel_size, bias=bias)
self.conv3 = conv(1, n_feat, kernel_size, bias=bias)
def forward(self, x, x_img):
x1 = self.conv1(x)
img = (self.conv2(x) + x_img)
x2 = torch.sigmoid(self.conv3(img))
x1 = (x1 * x2)
x1 = (x1 + x)
return (x1, img) |
def main() -> None:
parser = argparse.ArgumentParser(description='Format files with usort.', fromfile_prefix_chars='')
parser.add_argument('filenames', nargs='+', help='paths to lint')
args = parser.parse_args()
with concurrent.futures.ThreadPoolExecutor(max_workers=os.cpu_count(), thread_name_prefix='Thread') as executor:
futures = {executor.submit(check_file, filename): filename for filename in args.filenames}
for future in concurrent.futures.as_completed(futures):
try:
for lint_message in future.result():
print(json.dumps(lint_message._asdict()), flush=True)
except Exception:
raise RuntimeError(f'Failed at {futures[future]}') |
def get_split(split_name, dataset_dir, file_pattern=None, reader=None):
if (split_name not in SPLITS_TO_SIZES):
raise ValueError(('split name %s was not recognized.' % split_name))
if (not file_pattern):
file_pattern = _FILE_PATTERN
file_pattern = os.path.join(dataset_dir, (file_pattern % split_name))
if (not reader):
reader = tf.TFRecordReader
keys_to_features = {'image/encoded': tf.FixedLenFeature((), tf.string, default_value=''), 'image/format': tf.FixedLenFeature((), tf.string, default_value='png'), 'image/class/label': tf.FixedLenFeature([], tf.int64, default_value=tf.zeros([], dtype=tf.int64))}
items_to_handlers = {'image': slim.tfexample_decoder.Image(shape=[32, 32, 3]), 'label': slim.tfexample_decoder.Tensor('image/class/label')}
decoder = slim.tfexample_decoder.TFExampleDecoder(keys_to_features, items_to_handlers)
return slim.dataset.Dataset(data_sources=file_pattern, reader=reader, decoder=decoder, num_samples=SPLITS_TO_SIZES[split_name], items_to_descriptions=_ITEMS_TO_DESCRIPTIONS, num_classes=_NUM_CLASSES) |
def direct_junction_left_lane_fixture():
junction_creator_direct = xodr.DirectJunctionCreator(id=400, name='second_highway_connection')
main_road = xodr.create_road(xodr.Line(200), 1, right_lanes=3, left_lanes=3)
small_road = xodr.create_road(xodr.Line(200), 2, right_lanes=0, left_lanes=1)
return (main_road, small_road, junction_creator_direct) |
def main():
args = parser.parse_args()
if (args.seed is not None):
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. This will turn on the CUDNN deterministic setting, which can slow down your training considerably! You may see unexpected behavior when restarting from checkpoints.')
if (args.gpu is not None):
warnings.warn('You have chosen a specific GPU. This will completely disable data parallelism.')
if ((args.dist_url == 'env://') and (args.world_size == (- 1))):
args.world_size = int(os.environ['WORLD_SIZE'])
args.distributed = ((args.world_size > 1) or args.multiprocessing_distributed)
ngpus_per_node = torch.cuda.device_count()
print('using GPUs', ngpus_per_node)
if args.multiprocessing_distributed:
args.world_size = (ngpus_per_node * args.world_size)
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
main_worker(args.gpu, ngpus_per_node, args) |
def test_typeddict_attribute_errors(c: Converter) -> None:
class C(TypedDict):
a: int
b: int
try:
c.structure({}, C)
except Exception as exc:
assert (transform_error(exc) == ['required field missing $.a', 'required field missing $.b'])
try:
c.structure({'b': 1}, C)
except Exception as exc:
assert (transform_error(exc) == ['required field missing $.a'])
try:
c.structure({'a': 1, 'b': 'str'}, C)
except Exception as exc:
assert (transform_error(exc) == ['invalid value for type, expected int $.b'])
class D(TypedDict):
c: C
try:
c.structure({}, D)
except Exception as exc:
assert (transform_error(exc) == ['required field missing $.c'])
try:
c.structure({'c': {}}, D)
except Exception as exc:
assert (transform_error(exc) == ['required field missing $.c.a', 'required field missing $.c.b'])
try:
c.structure({'c': 1}, D)
except Exception as exc:
assert (transform_error(exc) == ['expected a mapping $.c'])
try:
c.structure({'c': {'a': 'str'}}, D)
except Exception as exc:
assert (transform_error(exc) == ['invalid value for type, expected int $.c.a', 'required field missing $.c.b'])
class E(TypedDict):
a: Optional[int]
with raises(Exception) as exc:
c.structure({'a': 'str'}, E)
tn = (Optional[int].__name__ if hasattr(Optional[int], '__name__') else repr(Optional[int]))
assert (transform_error(exc.value) == [f'invalid value for type, expected {tn} $.a']) |
def nufft_adjoint(input, coord, oshape, oversamp=1.25, width=4):
ndim = coord.shape[(- 1)]
beta = (np.pi * (((((width / oversamp) * (oversamp - 0.5)) ** 2) - 0.8) ** 0.5))
oshape = list(oshape)
os_shape = _get_oversamp_shape(oshape, ndim, oversamp)
coord = _scale_coord(coord, oshape, oversamp)
output = interp.gridding(input, coord, os_shape, kernel='kaiser_bessel', width=width, param=beta)
output /= (width ** ndim)
output = ifft(output, axes=range((- ndim), 0), norm=None)
output = util.resize(output, oshape)
output *= (util.prod(os_shape[(- ndim):]) / (util.prod(oshape[(- ndim):]) ** 0.5))
output = _apodize(output, ndim, oversamp, width, beta)
return output |
class RaveberryTest(TransactionTestCase):
celery_worker: Any
def setUpClass(cls) -> None:
super().setUpClass()
cls.celery_worker = start_worker(app, perform_ping_check=False)
cls.celery_worker.__enter__()
logging.getLogger().setLevel(logging.WARNING)
def tearDownClass(cls) -> None:
super().tearDownClass()
cls.celery_worker.__exit__(None, None, None)
def setUp(self) -> None:
self.client = Client()
util.admin_login(self.client)
redis.start()
def _poll_state(self, state_url: str, break_condition: Callable[([dict], bool)], timeout: float=1) -> dict:
timeout *= 10
counter = 0
while (counter < timeout):
state = json.loads(self.client.get(reverse(state_url)).content)
if break_condition(state):
break
time.sleep(0.1)
counter += 1
else:
self.fail(f'enqueue timeout. state: {state}')
return state
def _poll_musiq_state(self, break_condition: Callable[([dict], bool)], timeout: float=1) -> dict:
return self._poll_state('musiq-state', break_condition, timeout=timeout)
def _poll_lights_state(self, break_condition: Callable[([dict], bool)], timeout: float=1) -> dict:
return self._poll_state('lights-state', break_condition, timeout=timeout) |
def test_change_level_undo(pytester: Pytester) -> None:
pytester.makepyfile("\n import logging\n\n def test1(caplog):\n caplog.set_level(logging.INFO)\n # using + operator here so fnmatch_lines doesn't match the code in the traceback\n logging.info('log from ' + 'test1')\n assert 0\n\n def test2(caplog):\n # using + operator here so fnmatch_lines doesn't match the code in the traceback\n logging.info('log from ' + 'test2')\n assert 0\n ")
result = pytester.runpytest()
result.stdout.fnmatch_lines(['*log from test1*', '*2 failed in *'])
result.stdout.no_fnmatch_line('*log from test2*') |
def test_canonicalize_vcf(shared_datadir, tmp_path):
path = path_for_test(shared_datadir, 'sample.vcf.gz')
output = tmp_path.joinpath('vcf.zarr').as_posix()
canonicalize_vcf(path, output)
with gzip.open(path, 'rt') as f:
assert ('NS=3;DP=9;AA=G;AN=6;AC=3,1' in f.read())
with open(output, 'r') as f:
assert ('NS=3;AN=6;AC=3,1;DP=9;AA=G' in f.read()) |
def m3u8_to_mp3(url, name):
ts_content = get_ts(url)
if (ts_content is None):
raise TypeError('Empty mp3 content to save.')
tmp_file = NamedTemporaryFile(delete=False, suffix='.mp3')
tmp_file.write(ts_content)
tmp_file.close()
audioclip = AudioFileClip(tmp_file.name)
audioclip.write_audiofile(name, bitrate='3000k')
audioclip.close()
os.unlink(tmp_file.name) |
(shared_memory=True)
def createvectors(smm=None, sm=None):
vec_size =
start = timer()
a = b = np.array(np.random.sample(vec_size), dtype=np.float32)
shma = smm.SharedMemory(a.nbytes)
shmb = smm.SharedMemory(b.nbytes)
names = (shma.name, shmb.name, a.shape, b.shape, a.dtype, b.dtype)
duration = (timer() - start)
print('Create Vectors Time: ', duration)
return names |
def _get_best_indexes(logits, n_best_size):
index_and_score = sorted(enumerate(logits), key=(lambda x: x[1]), reverse=True)
best_indexes = []
for i in range(len(index_and_score)):
if (i >= n_best_size):
break
best_indexes.append(index_and_score[i][0])
return best_indexes |
def main(cfg, args):
print(f'...Evaluating on {args.eval_ds.lower()} {args.eval_set.lower()} set...')
device = 'cuda'
model = Token3d(num_blocks=cfg.MODEL.ENCODER.NUM_BLOCKS, num_heads=cfg.MODEL.ENCODER.NUM_HEADS, st_mode=cfg.MODEL.ENCODER.SPA_TEMP_MODE, mask_ratio=cfg.MODEL.MASK_RATIO, temporal_layers=cfg.MODEL.TEMPORAL_LAYERS, temporal_num_heads=cfg.MODEL.TEMPORAL_NUM_HEADS, enable_temp_modeling=cfg.MODEL.ENABLE_TEMP_MODELING, enable_temp_embedding=cfg.MODEL.ENABLE_TEMP_EMBEDDING)
print('model params:{:.3f}M (/1000^2)'.format((sum([p.numel() for p in model.parameters()]) / (1000 ** 2))))
if ((args.pretrained != '') and os.path.isfile(args.pretrained)):
checkpoint = torch.load(args.pretrained, map_location='cpu')
history_best_performance = (checkpoint['history_best_peformance'] if ('history_best_peformance' in checkpoint) else checkpoint['performance'])
state_dict = {}
for (k, w) in checkpoint['state_dict'].items():
if k.startswith('module.'):
state_dict[k[len('module.'):]] = w
elif (k in model.state_dict()):
state_dict[k] = w
else:
continue
temp_embedding_shape = state_dict['temporal_pos_embedding'].shape
if (model.temporal_pos_embedding.shape[1] != temp_embedding_shape[1]):
model.temporal_pos_embedding = torch.nn.Parameter(torch.zeros(1, temp_embedding_shape[1], temp_embedding_shape[2]))
model.load_state_dict(state_dict, strict=False)
print(f'==> Loaded pretrained model from {args.pretrained}...')
print(f'==> History best Performance on 3DPW test set {history_best_performance}')
else:
print(f'{args.pretrained} is not a pretrained model!!!!')
exit()
model = model.to(device)
transforms = torchvision.transforms.Compose([CropVideo(cfg.DATASET.HEIGHT, cfg.DATASET.WIDTH, default_bbox_scale=cfg.EVAL.BBOX_SCALE), StackFrames(), ToTensorVideo(), NormalizeVideo()])
test_db = VideoDataset(args.eval_ds.lower(), set=args.eval_set.lower(), transforms=transforms, sample_pool=cfg.EVAL.SAMPLE_POOL, random_sample=False, random_start=False, verbose=True, debug=cfg.DEBUG)
test_loader = DataLoader(dataset=test_db, batch_size=cfg.EVAL.BATCH_SIZE, shuffle=False, num_workers=cfg.NUM_WORKERS)
Evaluator().run(model=model, dataloader=test_loader, seqlen=cfg.EVAL.SEQLEN, interp=cfg.EVAL.INTERPOLATION, save_path=args.output_path, device=cfg.DEVICE) |
def test_DecisionMatrix_self_eq(data_values):
(mtx, objectives, weights, alternatives, criteria) = data_values(seed=42)
dm = data.mkdm(matrix=mtx, objectives=objectives, weights=weights, alternatives=alternatives, criteria=criteria)
same = dm
assert (dm is same)
assert dm.equals(same) |
def add_aoi_metadata_to_map(aoi, map):
aoi = dataset.loc[aoi]
aoi_style = {'color': '#c0392b', 'fill': False}
aoi_polygon = Polygon(AOIGenerator.bounds_to_bounding_box(*eval(aoi['bounds'])))
aoi_geojson = folium.GeoJson(aoi_polygon, style_function=(lambda x: aoi_style))
aoi_geojson.add_to(map)
aoi_tooltip = folium.Tooltip(f"<strong>AOI:</strong> {aoi.name} <br> <strong>IPCC:</strong> {aoi['IPCC']} <br> <strong>LCCS:</strong> {aoi['SMOD Class']}")
aoi_tooltip.add_to(map)
folium.LatLngPopup().add_to(map)
return map |
def _walk_refs(log):
for (i, line) in enumerate(log.split('\n')):
for ref in line.split(', '):
match = re.fullmatch('origin/chromium/(\\d+)', ref.strip())
if match:
return (int(match.group(1)), i)
assert False, 'Failed to find versioned commit - log too small?' |
def ADMM_bqp_unconstrained(A, b, all_params=None):
initial_params = {'std_threshold': 1e-06, 'gamma_val': 1.0, 'gamma_factor': 0.99, 'initial_rho': 5, 'learning_fact': (1 + (3 / 100)), 'rho_upper_limit': 1000, 'history_size': 5, 'rho_change_step': 5, 'rel_tol': 1e-05, 'stop_threshold': 0.001, 'max_iters': 10000.0, 'projection_lp': 2, 'pcg_tol': 0.001, 'pcg_maxiters': 1000.0}
if (all_params == None):
all_params = initial_params
else:
for k in initial_params.keys():
if (k not in all_params.keys()):
all_params[k] = initial_params[k]
n = b.size
stop_threshold = all_params['stop_threshold']
std_threshold = all_params['std_threshold']
max_iters = all_params['max_iters']
initial_rho = all_params['initial_rho']
rho_change_step = all_params['rho_change_step']
gamma_val = all_params['gamma_val']
learning_fact = all_params['learning_fact']
history_size = all_params['history_size']
projection_lp = all_params['projection_lp']
gamma_factor = all_params['gamma_factor']
pcg_tol = all_params['pcg_tol']
pcg_maxiters = all_params['pcg_maxiters']
x_sol = all_params['x0']
y1 = x_sol
y2 = x_sol
z1 = np.zeros_like(y1)
z2 = np.zeros_like(y2)
rho1 = initial_rho
rho2 = rho1
obj_list = []
std_obj = 1
prev_idx = x_sol
best_sol = prev_idx
best_bin_obj = compute_cost(best_sol, A, b)
time_elapsed = 0
for iter in range(int(max_iters)):
t1 = time.time()
y1 = project_box((x_sol + (z1 / rho1)))
y2 = project_shifted_Lp_ball((x_sol + (z2 / rho2)), projection_lp)
row = np.array(range(n))
colum = np.array(range(n))
data = ((rho1 + rho2) * np.ones(n))
sparse_matrix = csc_matrix((data, (row, colum)), shape=(n, n))
(x_sol, cg_flag) = linalg.cg(((2 * A) + sparse_matrix), (((rho1 * y1) + (rho2 * y2)) - ((b + z1) + z2)), y1, tol=pcg_tol, maxiter=pcg_maxiters)
x_sol = x_sol.reshape((- 1), 1)
z1 = (z1 + ((gamma_val * rho1) * (x_sol - y1)))
z2 = (z2 + ((gamma_val * rho2) * (x_sol - y2)))
t2 = time.time()
time_elapsed = (time_elapsed + (t2 - t1))
if (np.mod((iter + 2), rho_change_step) == 0):
rho1 = (learning_fact * rho1)
rho2 = (learning_fact * rho2)
gamma_val = max((gamma_val * gamma_factor), 1)
temp1 = (np.linalg.norm((x_sol - y1)) / max(np.linalg.norm(x_sol), 2.2204e-16))
temp2 = (np.linalg.norm((x_sol - y2)) / max(np.linalg.norm(x_sol), 2.2204e-16))
if (max(temp1, temp2) <= stop_threshold):
print(('iter: %d, stop_threshold: %.6f' % (iter, max(temp1, temp2))))
break
obj_list.append(compute_cost(x_sol, A, b))
if (len(obj_list) >= history_size):
std_obj = compute_std_obj(obj_list, history_size)
if (std_obj <= std_threshold):
print(('iter: %d, std_threshold: %.6f' % (iter, std_obj)))
break
cur_idx = (x_sol >= 0.5)
prev_idx = cur_idx
cur_obj = compute_cost(prev_idx, A, b)
if (best_bin_obj >= cur_obj):
best_bin_obj = cur_obj
best_sol = x_sol
return (best_sol, x_sol, y1, y2, time_elapsed) |
def test_no_keys_with_formatting():
context = Context({'k1': 'v1', 'k2': 'x{k1}x', 'k3': [0, 1, 2], 'debug': {'format': True}})
with patch_logger('pypyr.steps.debug', logging.INFO) as mock_logger_info:
debug.run_step(context)
assert (mock_logger_info.mock_calls == [call("\n{'debug': {'format': True}, 'k1': 'v1', 'k2': 'xv1x', 'k3': [0, 1, 2]}")]) |
class OHMultiView(object):
def __init__(self, mean, std, views='ww', aug='auto'):
assert all(((v in 'wst') for v in views))
assert (aug in ['rand', 'auto'])
self.views = views
self.weak = transforms.Compose([transforms.Resize((256, 256)), transforms.RandomCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)])
self.strong = transforms.Compose([transforms.Resize((256, 256)), transforms.RandomCrop(224), transforms.RandomHorizontalFlip(), (RandAugmentMC(n=2, m=10) if (aug == 'rand') else ImageNetPolicy()), transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)])
self.test = transforms.Compose([transforms.Resize((256, 256)), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)])
def __call__(self, x):
ret_views = []
for v in self.views:
if (v == 'w'):
ret_views.append(self.weak(x))
elif (v == 's'):
ret_views.append(self.strong(x))
else:
ret_views.append(self.test(x))
if (len(ret_views) > 1):
return ret_views
else:
return ret_views[0] |
def _do_download(version, download_base, to_dir, download_delay):
egg = os.path.join(to_dir, ('setuptools-%s-py%d.%d.egg' % (version, sys.version_info[0], sys.version_info[1])))
if (not os.path.exists(egg)):
archive = download_setuptools(version, download_base, to_dir, download_delay)
_build_egg(egg, archive, to_dir)
sys.path.insert(0, egg)
if ('pkg_resources' in sys.modules):
del sys.modules['pkg_resources']
import setuptools
setuptools.bootstrap_install_from = egg |
def sorino_ratio(qf_series: QFSeries, frequency: Frequency, risk_free: float=0) -> float:
annualised_growth_rate = cagr(qf_series, frequency)
negative_returns = qf_series[(qf_series < 0)]
annualised_downside_vol = get_volatility(negative_returns, frequency, annualise=True)
ratio = ((annualised_growth_rate - risk_free) / annualised_downside_vol)
return ratio |
def test_bpe_codes_adapter():
test_f = StringIO('#version:2.0\ne n \ne r \ne n</w> ')
adapted = adapt_bpe_codes(test_f)
assert (adapted.readline() == '#version:2.0\n')
assert (adapted.readline() == 'e n\n')
assert (adapted.readline() == 'e r\n')
for line in adapted:
assert (line == 'e n</w>')
adapted.seek(0)
for line in adapted:
assert (line == '#version:2.0\n')
break |
def test_jedi_completion_ordering(config, workspace):
com_position = {'line': 8, 'character': 0}
doc = Document(DOC_URI, workspace, DOC)
config.update({'plugins': {'jedi_completion': {'resolve_at_most': math.inf}}})
completions = pylsp_jedi_completions(config, doc, com_position)
items = {c['label']: c['sortText'] for c in completions}
assert (items['hello()'] < items['_a_hello()']) |
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
padding = (2 - stride)
if ((downsample is not None) and (dilation > 1)):
dilation = (dilation // 2)
padding = dilation
assert ((stride == 1) or (dilation == 1)), 'stride and dilation must have one equals to zero at least'
if (dilation > 1):
padding = dilation
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=padding, bias=False, dilation=dilation)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, (planes * 4), kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d((planes * 4))
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if (self.downsample is not None):
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out |
class KnownValues(unittest.TestCase):
def test_ea_adc2(self):
myadc.method_type = 'ea'
(e, v, p, x) = myadc.kernel(nroots=3)
e_corr = myadc.e_corr
self.assertAlmostEqual(e_corr, (- 0.), 6)
self.assertAlmostEqual(e[0], (- 0.), 6)
self.assertAlmostEqual(e[1], 0., 6)
self.assertAlmostEqual(e[2], 0., 6)
self.assertAlmostEqual(p[0], 0., 6)
self.assertAlmostEqual(p[1], 0., 6)
self.assertAlmostEqual(p[2], 0., 6)
def test_ea_adc2_oneroot(self):
myadc.method_type = 'ea'
(e, v, p, x) = myadc.kernel(nroots=1)
self.assertAlmostEqual(e[0], 0., 6)
self.assertAlmostEqual(p[0], 0., 6)
def test_ea_adc2x(self):
myadc.method = 'adc(2)-x'
myadc.method_type = 'ea'
(e, v, p, x) = myadc.kernel(nroots=3)
self.assertAlmostEqual(e[0], (- 0.), 6)
self.assertAlmostEqual(e[1], 0., 6)
self.assertAlmostEqual(e[2], 0., 6)
self.assertAlmostEqual(p[0], 0., 6)
self.assertAlmostEqual(p[1], 0., 6)
self.assertAlmostEqual(p[2], 0., 6)
def test_ea_adc3(self):
myadc.method = 'adc(3)'
(e, t_amp1, t_amp2) = myadc.kernel_gs()
self.assertAlmostEqual(e, (- 0.), 6)
myadc.method_type = 'ea'
(e, v, p, x) = myadc.kernel(nroots=3)
myadc.analyze()
self.assertAlmostEqual(e[0], (- 0.), 6)
self.assertAlmostEqual(e[1], 0., 6)
self.assertAlmostEqual(e[2], 0., 6)
self.assertAlmostEqual(p[0], 0., 6)
self.assertAlmostEqual(p[1], 0., 6)
self.assertAlmostEqual(p[2], 0., 6) |
class ThreeInterpolate(Function):
def forward(ctx, features: torch.Tensor, idx: torch.Tensor, weight: torch.Tensor) -> torch.Tensor:
assert features.is_contiguous()
assert idx.is_contiguous()
assert weight.is_contiguous()
(B, c, m) = features.size()
n = idx.size(1)
ctx.three_interpolate_for_backward = (idx, weight, m)
output = torch.cuda.FloatTensor(B, c, n)
_ext.three_interpolate(B, c, m, n, features, idx, weight, output)
return output
def backward(ctx, grad_out: torch.Tensor) -> Tuple[(torch.Tensor, torch.Tensor, torch.Tensor)]:
(idx, weight, m) = ctx.three_interpolate_for_backward
(B, c, n) = grad_out.size()
grad_features = Variable(torch.cuda.FloatTensor(B, c, m).zero_())
grad_out_data = grad_out.data.contiguous()
_ext.three_interpolate_grad(B, c, n, m, grad_out_data, idx, weight, grad_features.data)
return (grad_features, None, None) |
def AugmentedLayer(pad_zeros):
def init_fun(rng, input_shape):
output_shape = (input_shape[:(- 1)] + ((pad_zeros + input_shape[(- 1)]),))
return (output_shape, ())
def apply_fun(params, inputs, **kwargs):
x = inputs
xzeros = _augment(x, pad_zeros)
return xzeros
return (init_fun, apply_fun) |
_transform('ImgPilToMultiCrop')
class ImgPilToMultiCrop(ClassyTransform):
def __init__(self, total_num_crops, num_crops, size_crops, crop_scales):
assert (np.sum(num_crops) == total_num_crops)
assert (len(size_crops) == len(num_crops))
assert (len(size_crops) == len(crop_scales))
trans = []
for (i, sc) in enumerate(size_crops):
trans.extend(([pth_transforms.Compose([pth_transforms.RandomResizedCrop(sc, scale=crop_scales[i])])] * num_crops[i]))
self.transforms = trans
def __call__(self, image):
return list(map((lambda trans: trans(image)), self.transforms))
def from_config(cls, config: Dict[(str, Any)]) -> 'ImgPilToMultiCrop':
return cls(**config) |
_module()
class SCNetMaskHead(FCNMaskHead):
def __init__(self, conv_to_res=True, **kwargs):
super(SCNetMaskHead, self).__init__(**kwargs)
self.conv_to_res = conv_to_res
if conv_to_res:
assert (self.conv_kernel_size == 3)
self.num_res_blocks = (self.num_convs // 2)
self.convs = ResLayer(SimplifiedBasicBlock, self.in_channels, self.conv_out_channels, self.num_res_blocks, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg) |
class ExportToFolderDialog(Dialog):
def __init__(self, parent, pattern):
super().__init__(title=_('Export Playlist to Folder'), transient_for=parent, use_header_bar=True)
self.set_default_size(400, (- 1))
self.set_resizable(True)
self.set_border_width(6)
self.vbox.set_spacing(6)
self.add_button(_('_Cancel'), Gtk.ResponseType.CANCEL)
self.add_button(_('_Export'), Gtk.ResponseType.OK)
self.set_default_response(Gtk.ResponseType.OK)
box = Gtk.VBox(spacing=6)
destination_label = Gtk.Label(_('Destination folder:'))
destination_label.set_line_wrap(True)
destination_label.set_xalign(0.0)
box.pack_start(destination_label, False, False, 0)
frame = Gtk.Frame()
self.directory_chooser = Gtk.FileChooserWidget(action=Gtk.FileChooserAction.SELECT_FOLDER)
self.directory_chooser.set_select_multiple(False)
self.directory_chooser.set_border_width(1)
frame.add(self.directory_chooser)
frame.set_shadow_type(Gtk.ShadowType.IN)
frame.set_border_width(0)
box.pack_start(frame, True, True, 0)
pattern_label = Gtk.Label(_('Filename pattern:'))
pattern_label.set_line_wrap(True)
pattern_label.set_xalign(0.0)
box.pack_start(pattern_label, False, False, 0)
self.pattern_entry = UndoEntry()
self.pattern_entry.set_text(pattern)
box.pack_start(self.pattern_entry, False, False, 0)
self.vbox.pack_start(box, True, True, 0)
self.set_response_sensitive(Gtk.ResponseType.OK, False)
def changed(*args):
has_directory = (self.directory_chooser.get_filename() is not None)
self.set_response_sensitive(Gtk.ResponseType.OK, has_directory)
pattern_text = self.pattern_entry.get_text()
has_pattern = bool(pattern_text)
self.set_response_sensitive(Gtk.ResponseType.OK, has_pattern)
self.directory_chooser.connect('selection-changed', changed)
self.pattern_entry.connect('changed', changed)
self.get_child().show_all() |
class ParameterQuantizer(torch.autograd.Function):
def compute_gradients(tensor: torch.Tensor, grad: torch.Tensor, intermediate_result: IntermediateResult, channel_axis: int) -> Tuple[(torch.Tensor, torch.Tensor)]:
tensor_grad = (intermediate_result.mask_tensor * grad)
(tensor_encoding_min_grad, tensor_encoding_max_grad) = grad_fn.calculate_gradients(tensor, grad, intermediate_result, channel_axis)
tensor.grad = tensor_grad
return (tensor_encoding_min_grad, tensor_encoding_max_grad)
def quantize_parameters(trainable_wrapper, encoding_params: List):
for (index, named_param) in enumerate(trainable_wrapper.get_named_parameters()):
(name, param) = named_param
if trainable_wrapper.param_quantizers[name].enabled:
encoding_min = encoding_params[(index * 2)]
encoding_max = encoding_params[((index * 2) + 1)]
param_quantizer = trainable_wrapper.param_quantizers[name]
(param_quantizer.scaling, param_quantizer.offset) = param_quantizer.compute_scaling_offset(encoding_min, encoding_max)
if (hasattr(trainable_wrapper, '_is_replica') and trainable_wrapper._is_replica):
param_tensor = param.data.clone()
else:
param_tensor = param.data
param.data = param_quantizer.quantize_dequantize(param_tensor, encoding_min, encoding_max)
def backward_pass_for_parameters(trainable_wrapper) -> List[Union[(None, torch.Tensor)]]:
param_encoding_grads = []
for (name, param) in trainable_wrapper.get_named_parameters():
param_quantizer = trainable_wrapper.param_quantizers[name]
if (param_quantizer.enabled and (param.grad is not None)):
if ((param_quantizer.bitwidth == 32) or (param_quantizer.data_type == QuantizationDataType.float)):
return [None, None]
encoding_min = getattr(trainable_wrapper, f'{name}_encoding_min')
encoding_max = getattr(trainable_wrapper, f'{name}_encoding_max')
(_, intermediate_result) = grad_fn.calculate_forward_pass(param, param_quantizer, encoding_min, encoding_max)
(param_encoding_min_grad, param_encoding_max_grad) = ParameterQuantizer.compute_gradients(param, param.grad, intermediate_result, param_quantizer.channel_axis)
param_encoding_grads.append(param_encoding_min_grad)
param_encoding_grads.append(param_encoding_max_grad)
elif param_quantizer.enabled:
param_encoding_grads.append(None)
param_encoding_grads.append(None)
return param_encoding_grads
def forward(ctx, input_tensor: torch.Tensor, trainable_wrapper, *encoding_params):
ParameterQuantizer.quantize_parameters(trainable_wrapper, encoding_params)
ctx.trainable_module = trainable_wrapper
return input_tensor
def backward(ctx, *output_grad):
trainable_wrapper = ctx.trainable_module
param_encoding_grads = ParameterQuantizer.backward_pass_for_parameters(trainable_wrapper)
return (*output_grad, None, *param_encoding_grads) |
class DebugMode(contextlib.AbstractContextManager):
from setuptools_scm import _log as __module
def __init__(self) -> None:
self.__stack = contextlib.ExitStack()
def __enter__(self) -> Self:
self.enable()
return self
def __exit__(self, exc_type: (type[BaseException] | None), exc_val: (BaseException | None), exc_tb: (TracebackType | None)) -> None:
self.disable()
def enable(self) -> None:
self.__stack.enter_context(self.__module.defer_to_pytest())
def disable(self) -> None:
self.__stack.close() |
def extract_javascript(fileobj: _FileObj, keywords: Mapping[(str, _Keyword)], comment_tags: Collection[str], options: _JSOptions, lineno: int=1) -> Generator[(_ExtractionResult, None, None)]:
from babel.messages.jslexer import Token, tokenize, unquote_string
funcname = message_lineno = None
messages = []
last_argument = None
translator_comments = []
concatenate_next = False
encoding = options.get('encoding', 'utf-8')
last_token = None
call_stack = (- 1)
dotted = any((('.' in kw) for kw in keywords))
for token in tokenize(fileobj.read().decode(encoding), jsx=options.get('jsx', True), template_string=options.get('template_string', True), dotted=dotted, lineno=lineno):
if (funcname and (last_token and (last_token.type == 'name')) and (token.type == 'template_string')):
message_lineno = token.lineno
messages = [unquote_string(token.value)]
call_stack = 0
token = Token('operator', ')', token.lineno)
if (options.get('parse_template_string') and (not funcname) and (token.type == 'template_string')):
(yield from parse_template_string(token.value, keywords, comment_tags, options, token.lineno))
elif ((token.type == 'operator') and (token.value == '(')):
if funcname:
message_lineno = token.lineno
call_stack += 1
elif ((call_stack == (- 1)) and (token.type == 'linecomment')):
value = token.value[2:].strip()
if (translator_comments and (translator_comments[(- 1)][0] == (token.lineno - 1))):
translator_comments.append((token.lineno, value))
continue
for comment_tag in comment_tags:
if value.startswith(comment_tag):
translator_comments.append((token.lineno, value.strip()))
break
elif (token.type == 'multilinecomment'):
translator_comments = []
value = token.value[2:(- 2)].strip()
for comment_tag in comment_tags:
if value.startswith(comment_tag):
lines = value.splitlines()
if lines:
lines[0] = lines[0].strip()
lines[1:] = dedent('\n'.join(lines[1:])).splitlines()
for (offset, line) in enumerate(lines):
translator_comments.append(((token.lineno + offset), line))
break
elif (funcname and (call_stack == 0)):
if ((token.type == 'operator') and (token.value == ')')):
if (last_argument is not None):
messages.append(last_argument)
if (len(messages) > 1):
messages = tuple(messages)
elif messages:
messages = messages[0]
else:
messages = None
if (translator_comments and (translator_comments[(- 1)][0] < (message_lineno - 1))):
translator_comments = []
if (messages is not None):
(yield (message_lineno, funcname, messages, [comment[1] for comment in translator_comments]))
funcname = message_lineno = last_argument = None
concatenate_next = False
translator_comments = []
messages = []
call_stack = (- 1)
elif (token.type in ('string', 'template_string')):
new_value = unquote_string(token.value)
if concatenate_next:
last_argument = ((last_argument or '') + new_value)
concatenate_next = False
else:
last_argument = new_value
elif (token.type == 'operator'):
if (token.value == ','):
if (last_argument is not None):
messages.append(last_argument)
last_argument = None
else:
messages.append(None)
concatenate_next = False
elif (token.value == '+'):
concatenate_next = True
elif ((call_stack > 0) and (token.type == 'operator') and (token.value == ')')):
call_stack -= 1
elif (funcname and (call_stack == (- 1))):
funcname = None
elif ((call_stack == (- 1)) and (token.type == 'name') and (token.value in keywords) and ((last_token is None) or (last_token.type != 'name') or (last_token.value != 'function'))):
funcname = token.value
last_token = token |
def simxGetObjectPosition(clientID, objectHandle, relativeToObjectHandle, operationMode):
position = (ct.c_float * 3)()
ret = c_GetObjectPosition(clientID, objectHandle, relativeToObjectHandle, position, operationMode)
arr = []
for i in range(3):
arr.append(position[i])
return (ret, arr) |
def GetUnit(itemsets):
count = 0
unit = S[str(itemsets[0])][:]
for i in range(1, len(itemsets)):
for j in range(SeqNum):
unit[j] = sorted(list((set(unit[j]) & set(S[str(itemsets[i])][j]))))
for i in range(SeqNum):
count += len(unit[i])
return (count, unit) |
def main() -> None:
args = _get_command_line_arguments()
logging.getLogger().setLevel(logging.DEBUG)
input_dir = Path(args[Args.INPUT_VIDEOS_DIR])
if (not input_dir.is_dir()):
raise ValueError(f'Input directory failed is_dir(): {input_dir}')
features_dir = Path(args[Args.FEATURES_DIR])
features_dir.mkdir(parents=True, exist_ok=True)
feature_extractor = create_feature_extractor(args[Args.FEATURES], Path(args[Args.FEATURES_MODELS_DIR]))
video_paths = list_video_paths(input_dir)
extract_features_from_videos(video_paths, features_dir, feature_extractor) |
class AddNewModelCommand(BaseTransformersCLICommand):
def register_subcommand(parser: ArgumentParser):
add_new_model_parser = parser.add_parser('add-new-model')
add_new_model_parser.add_argument('--testing', action='store_true', help='If in testing mode.')
add_new_model_parser.add_argument('--testing_file', type=str, help='Configuration file on which to run.')
add_new_model_parser.add_argument('--path', type=str, help='Path to cookiecutter. Should only be used for testing purposes.')
add_new_model_parser.set_defaults(func=add_new_model_command_factory)
def __init__(self, testing: bool, testing_file: str, path=None, *args):
self._testing = testing
self._testing_file = testing_file
self._path = path
def run(self):
warnings.warn("The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. It is not actively maintained anymore, so might give a result that won't pass all tests and quality checks, you should use `transformers-cli add-new-model-like` instead.")
if (not _has_cookiecutter):
raise ImportError('Model creation dependencies are required to use the `add_new_model` command. Install them by running the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n')
directories = [directory for directory in os.listdir() if ('cookiecutter-template-' == directory[:22])]
if (len(directories) > 0):
raise ValueError('Several directories starting with `cookiecutter-template-` in current working directory. Please clean your directory by removing all folders starting with `cookiecutter-template-` or change your working directory.')
path_to_transformer_root = (Path(__file__).parent.parent.parent.parent if (self._path is None) else Path(self._path).parent.parent)
path_to_cookiecutter = ((path_to_transformer_root / 'templates') / 'adding_a_new_model')
if (not self._testing):
cookiecutter(str(path_to_cookiecutter))
else:
with open(self._testing_file, 'r') as configuration_file:
testing_configuration = json.load(configuration_file)
cookiecutter(str((path_to_cookiecutter if (self._path is None) else self._path)), no_input=True, extra_context=testing_configuration)
directory = [directory for directory in os.listdir() if ('cookiecutter-template-' in directory[:22])][0]
with open((directory + '/configuration.json'), 'r') as configuration_file:
configuration = json.load(configuration_file)
lowercase_model_name = configuration['lowercase_modelname']
generate_tensorflow_pytorch_and_flax = configuration['generate_tensorflow_pytorch_and_flax']
os.remove(f'{directory}/configuration.json')
output_pytorch = ('PyTorch' in generate_tensorflow_pytorch_and_flax)
output_tensorflow = ('TensorFlow' in generate_tensorflow_pytorch_and_flax)
output_flax = ('Flax' in generate_tensorflow_pytorch_and_flax)
model_dir = f'{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'
os.makedirs(model_dir, exist_ok=True)
os.makedirs(f'{path_to_transformer_root}/tests/models/{lowercase_model_name}', exist_ok=True)
with open(f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py', 'w'):
pass
shutil.move(f'{directory}/__init__.py', f'{model_dir}/__init__.py')
shutil.move(f'{directory}/configuration_{lowercase_model_name}.py', f'{model_dir}/configuration_{lowercase_model_name}.py')
def remove_copy_lines(path):
with open(path, 'r') as f:
lines = f.readlines()
with open(path, 'w') as f:
for line in lines:
if ('# Copied from transformers.' not in line):
f.write(line)
if output_pytorch:
if (not self._testing):
remove_copy_lines(f'{directory}/modeling_{lowercase_model_name}.py')
shutil.move(f'{directory}/modeling_{lowercase_model_name}.py', f'{model_dir}/modeling_{lowercase_model_name}.py')
shutil.move(f'{directory}/test_modeling_{lowercase_model_name}.py', f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py')
else:
os.remove(f'{directory}/modeling_{lowercase_model_name}.py')
os.remove(f'{directory}/test_modeling_{lowercase_model_name}.py')
if output_tensorflow:
if (not self._testing):
remove_copy_lines(f'{directory}/modeling_tf_{lowercase_model_name}.py')
shutil.move(f'{directory}/modeling_tf_{lowercase_model_name}.py', f'{model_dir}/modeling_tf_{lowercase_model_name}.py')
shutil.move(f'{directory}/test_modeling_tf_{lowercase_model_name}.py', f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py')
else:
os.remove(f'{directory}/modeling_tf_{lowercase_model_name}.py')
os.remove(f'{directory}/test_modeling_tf_{lowercase_model_name}.py')
if output_flax:
if (not self._testing):
remove_copy_lines(f'{directory}/modeling_flax_{lowercase_model_name}.py')
shutil.move(f'{directory}/modeling_flax_{lowercase_model_name}.py', f'{model_dir}/modeling_flax_{lowercase_model_name}.py')
shutil.move(f'{directory}/test_modeling_flax_{lowercase_model_name}.py', f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py')
else:
os.remove(f'{directory}/modeling_flax_{lowercase_model_name}.py')
os.remove(f'{directory}/test_modeling_flax_{lowercase_model_name}.py')
shutil.move(f'{directory}/{lowercase_model_name}.mdx', f'{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.mdx')
shutil.move(f'{directory}/tokenization_{lowercase_model_name}.py', f'{model_dir}/tokenization_{lowercase_model_name}.py')
shutil.move(f'{directory}/tokenization_fast_{lowercase_model_name}.py', f'{model_dir}/tokenization_{lowercase_model_name}_fast.py')
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(original_file: str, line_to_copy_below: str, lines_to_copy: List[str]):
(fh, abs_path) = mkstemp()
line_found = False
with fdopen(fh, 'w') as new_file:
with open(original_file) as old_file:
for line in old_file:
new_file.write(line)
if (line_to_copy_below in line):
line_found = True
for line_to_copy in lines_to_copy:
new_file.write(line_to_copy)
if (not line_found):
raise ValueError(f'Line {line_to_copy_below} was not found in file.')
copymode(original_file, abs_path)
remove(original_file)
move(abs_path, original_file)
def skip_units(line):
return ((('generating PyTorch' in line) and (not output_pytorch)) or (('generating TensorFlow' in line) and (not output_tensorflow)) or (('generating Flax' in line) and (not output_flax)))
def replace_in_files(path_to_datafile):
with open(path_to_datafile) as datafile:
lines_to_copy = []
skip_file = False
skip_snippet = False
for line in datafile:
if (('# To replace in: ' in line) and ('##' not in line)):
file_to_replace_in = line.split('"')[1]
skip_file = skip_units(line)
elif (('# Below: ' in line) and ('##' not in line)):
line_to_copy_below = line.split('"')[1]
skip_snippet = skip_units(line)
elif (('# End.' in line) and ('##' not in line)):
if ((not skip_file) and (not skip_snippet)):
replace(file_to_replace_in, line_to_copy_below, lines_to_copy)
lines_to_copy = []
elif (('# Replace with' in line) and ('##' not in line)):
lines_to_copy = []
elif ('##' not in line):
lines_to_copy.append(line)
remove(path_to_datafile)
replace_in_files(f'{directory}/to_replace_{lowercase_model_name}.py')
os.rmdir(directory) |
class RSAPrivateKey(PrivateKey):
def __init__(self):
self._private_key = rsa.generate_private_key(public_exponent=65537, key_size=2048)
_process_wide_key = None
_process_wide_key_lock = threading.RLock()
def process_wide(cls) -> RSAPrivateKey:
if (cls._process_wide_key is None):
with cls._process_wide_key_lock:
if (cls._process_wide_key is None):
cls._process_wide_key = cls()
return cls._process_wide_key
def sign(self, data: bytes) -> bytes:
signature = self._private_key.sign(data, _RSA_PADDING, _RSA_HASH_ALGORITHM)
return base64.b64encode(signature)
def get_public_key(self) -> RSAPublicKey:
return RSAPublicKey(self._private_key.public_key())
def __getstate__(self):
state = self.__dict__.copy()
state['_private_key'] = self._private_key.private_bytes(encoding=serialization.Encoding.PEM, format=serialization.PrivateFormat.OpenSSH, encryption_algorithm=serialization.NoEncryption())
return state
def __setstate__(self, state):
self.__dict__.update(state)
self._private_key = serialization.load_ssh_private_key(self._private_key, password=None) |
class Effect604(BaseEffect):
type = 'passive'
def handler(fit, ship, context, projectionRange, **kwargs):
fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Large Projectile Turret')), 'speed', ship.getModifiedItemAttr('shipBonusMB2'), skill='Minmatar Battleship', **kwargs) |
def process_dataset(fasta_dir: Path, h5_dir: Optional[Path], glob_pattern: str, num_workers: int, tokenizer_file: Path, tokenizer_blocksize: int, kmer_size: int, train_val_test_split: Optional[Dict[(str, float)]], node_rank: int, num_nodes: int, subsample: int) -> None:
if (not fasta_dir):
raise ValueError('Fasta dir not present')
if (not tokenizer_file):
raise ValueError('Tokenizer file not present')
if (not h5_dir):
raise ValueError('Output dir not present')
h5_dir.mkdir(exist_ok=True)
tokenizer = PreTrainedTokenizerFast(tokenizer_object=Tokenizer.from_file(str(tokenizer_file)))
tokenizer.add_special_tokens({'pad_token': '[PAD]'})
files = list(fasta_dir.glob(glob_pattern))
out_files = [(h5_dir / f'{f.stem}.h5') for f in files]
already_done = set((f.name for f in h5_dir.glob('**/*.h5')))
if (len(already_done) == len(files)):
raise ValueError(f'Already processed all files in {fasta_dir}')
(files, out_files) = zip(*[(fin, fout) for (fin, fout) in zip(files, out_files) if (fout.name not in already_done)])
if (train_val_test_split is not None):
(h5_dir / 'train').mkdir(exist_ok=True)
(h5_dir / 'test').mkdir(exist_ok=True)
(h5_dir / 'val').mkdir(exist_ok=True)
if (num_nodes > 1):
chunk_size = (len(files) // num_nodes)
start_idx = (node_rank * chunk_size)
end_idx = (start_idx + chunk_size)
if ((node_rank + 1) == num_nodes):
end_idx = len(files)
print(f'Node {node_rank}/{num_nodes} starting at {start_idx}, ending at {end_idx} (len(files)={len(files)!r}')
files = files[start_idx:end_idx]
out_files = out_files[start_idx:end_idx]
print(f'Processing {len(files)} files from {fasta_dir}...')
func = functools.partial(H5Dataset.preprocess, tokenizer=tokenizer, block_size=tokenizer_blocksize, train_val_test_split=train_val_test_split, subsample=subsample, kmer_size=kmer_size)
with ProcessPoolExecutor(max_workers=num_workers) as pool:
for _ in pool.map(func, files, out_files):
pass
print(f'Completed, saved files to {h5_dir}') |
def _parse_converter(ctx: mypy.plugin.ClassDefContext, converter_expr: (Expression | None)) -> (Converter | None):
if (not converter_expr):
return None
converter_info = Converter()
if (isinstance(converter_expr, CallExpr) and isinstance(converter_expr.callee, RefExpr) and (converter_expr.callee.fullname in attr_optional_converters) and converter_expr.args and converter_expr.args[0]):
converter_expr = converter_expr.args[0]
is_attr_converters_optional = True
else:
is_attr_converters_optional = False
converter_type: (Type | None) = None
if (isinstance(converter_expr, RefExpr) and converter_expr.node):
if isinstance(converter_expr.node, FuncDef):
if (converter_expr.node.type and isinstance(converter_expr.node.type, FunctionLike)):
converter_type = converter_expr.node.type
else:
converter_info.init_type = AnyType(TypeOfAny.unannotated)
return converter_info
elif (isinstance(converter_expr.node, OverloadedFuncDef) and is_valid_overloaded_converter(converter_expr.node)):
converter_type = converter_expr.node.type
elif isinstance(converter_expr.node, TypeInfo):
from mypy.checkmember import type_object_type
converter_type = type_object_type(converter_expr.node, ctx.api.named_type)
elif (isinstance(converter_expr, IndexExpr) and isinstance(converter_expr.analyzed, TypeApplication) and isinstance(converter_expr.base, RefExpr) and isinstance(converter_expr.base.node, TypeInfo)):
from mypy.checkmember import type_object_type
converter_type = type_object_type(converter_expr.base.node, ctx.api.named_type)
if isinstance(converter_type, CallableType):
converter_type = apply_generic_arguments(converter_type, converter_expr.analyzed.types, ctx.api.msg.incompatible_typevar_value, converter_type)
else:
converter_type = None
if isinstance(converter_expr, LambdaExpr):
converter_info.init_type = AnyType(TypeOfAny.unannotated)
return converter_info
if (not converter_type):
ctx.api.fail('Unsupported converter, only named functions, types and lambdas are currently supported', converter_expr)
converter_info.init_type = AnyType(TypeOfAny.from_error)
return converter_info
converter_type = get_proper_type(converter_type)
if (isinstance(converter_type, CallableType) and converter_type.arg_types):
converter_info.init_type = converter_type.arg_types[0]
if (not is_attr_converters_optional):
converter_info.ret_type = converter_type.ret_type
elif isinstance(converter_type, Overloaded):
types: list[Type] = []
for item in converter_type.items:
num_arg_types = len(item.arg_types)
if (not num_arg_types):
continue
if ((num_arg_types > 1) and any(((kind == ARG_POS) for kind in item.arg_kinds[1:]))):
continue
types.append(item.arg_types[0])
if types:
converter_info.init_type = make_simplified_union(types)
if (is_attr_converters_optional and converter_info.init_type):
converter_info.init_type = UnionType.make_union([converter_info.init_type, NoneType()])
return converter_info |
class AllowMoveAZPSimulatedAnnealing(AllowMoveStrategy):
def __init__(self, init_temperature, sa_moves_term=float('inf')):
self.observers_min_sa_moves = []
self.observers_move_made = []
self.t = init_temperature
if ((not isinstance(sa_moves_term, numbers.Integral)) or (sa_moves_term < 1)):
raise ValueError('The sa_moves_term argument must be a positive integer.')
self.sa_moves_term = sa_moves_term
self.sa = 0
super().__init__()
def __call__(self, moving_area, new_region, labels):
diff = self.objective_func.update(moving_area, new_region, labels, self.attr)
if (diff <= 0):
self.objective_val += diff
self.notify_move_made()
return True
else:
prob = math.exp(((- diff) / self.t))
move_allowed = (random.random() < prob)
if move_allowed:
self.notify_move_made()
self.sa += 1
if (self.sa >= self.sa_moves_term):
self.notify_min_sa_moves()
self.objective_val += diff
return True
return False
def register_sa_moves_term(self, observer_func):
if callable(observer_func):
self.observers_min_sa_moves.append(observer_func)
else:
raise ValueError('The observer_func must be callable.')
def register_move_made(self, observer_func):
if callable(observer_func):
self.observers_move_made.append(observer_func)
else:
raise ValueError('The observer_func must be callable.')
def notify_min_sa_moves(self):
for observer_func in self.observers_min_sa_moves:
observer_func()
def notify_move_made(self):
for observer_func in self.observers_move_made:
observer_func()
def update_temperature(self, temp):
self.t = temp
def reset(self):
self.sa = 0 |
def test_geographic_crs__from_methods():
assert_maker_inheritance_valid(GeographicCRS.from_epsg(4326), GeographicCRS)
assert_maker_inheritance_valid(GeographicCRS.from_string('EPSG:4326'), GeographicCRS)
assert_maker_inheritance_valid(GeographicCRS.from_proj4('+proj=latlon'), GeographicCRS)
assert_maker_inheritance_valid(GeographicCRS.from_user_input(GeographicCRS.from_string('EPSG:4326')), GeographicCRS)
assert_maker_inheritance_valid(GeographicCRS.from_json(CRS(4326).to_json()), GeographicCRS)
assert_maker_inheritance_valid(GeographicCRS.from_json_dict(CRS(4326).to_json_dict()), GeographicCRS)
with pytest.raises(CRSError, match='Invalid type'):
GeographicCRS.from_epsg(6933) |
class Pick(Object):
public_id = ResourceReference.T(xmlstyle='attribute', xmltagname='publicID')
comment_list = List.T(Comment.T())
time = TimeQuantity.T()
waveform_id = WaveformStreamID.T(xmltagname='waveformID')
filter_id = ResourceReference.T(optional=True, xmltagname='filterID')
method_id = ResourceReference.T(optional=True, xmltagname='methodID')
horizontal_slowness = RealQuantity.T(optional=True)
backazimuth = RealQuantity.T(optional=True)
slowness_method_id = ResourceReference.T(optional=True, xmltagname='slownessMethodID')
onset = PickOnset.T(optional=True)
phase_hint = Phase.T(optional=True)
polarity = PickPolarity.T(optional=True)
evaluation_mode = EvaluationMode.T(optional=True)
evaluation_status = EvaluationStatus.T(optional=True)
creation_info = CreationInfo.T(optional=True)
def pyrocko_polarity(self):
return polarity_choices.get(self.polarity, None)
def get_pyrocko_phase_marker(self, event=None):
if (not self.phase_hint):
logger.warning(('Pick %s: phase_hint undefined' % self.public_id))
phasename = 'undefined'
else:
phasename = self.phase_hint.value
return marker.PhaseMarker(event=event, nslc_ids=[self.waveform_id.nslc_id], tmin=self.time.value, tmax=self.time.value, phasename=phasename, polarity=self.pyrocko_polarity, automatic=self.evaluation_mode) |
class CanineConfig(PretrainedConfig):
model_type = 'canine'
def __init__(self, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=16384, type_vocab_size=16, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, bos_token_id=57344, eos_token_id=57345, downsampling_rate=4, upsampling_kernel_size=4, num_hash_functions=8, num_hash_buckets=16384, local_transformer_stride=128, **kwargs):
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.initializer_range = initializer_range
self.type_vocab_size = type_vocab_size
self.layer_norm_eps = layer_norm_eps
self.downsampling_rate = downsampling_rate
self.upsampling_kernel_size = upsampling_kernel_size
self.num_hash_functions = num_hash_functions
self.num_hash_buckets = num_hash_buckets
self.local_transformer_stride = local_transformer_stride |
class COR(IntFlag):
COMPARISON_RESULT_HI = (1 << 14)
COMPARISON_RESULT_GO = (1 << 13)
COMPARISON_RESULT_LO = (1 << 12)
OVERHEAT_DETECTION = (1 << 11)
OVERLOAD_DETECTION = (1 << 10)
OSCILLATION_DETECTION = (1 << 9)
COMPLIANCE_DETECTION = (1 << 8)
SYNCHRONOUS_OPERATION_MASTER_CHANNEL = (1 << 7)
MEASUREMENT_DATA_OUTPUT_SPECIFICATION = (1 << 6)
HAS_MEASUREMENT_DATA = (1 << 5)
SELF_TEST_ERROR_ANALOG_SECTION = (1 << 4)
MEASUREMENT_DATA_BUFFER_FULL = (1 << 3)
WAITING_FOR_TRIGGER = (1 << 2)
END_OF_SWEEP = (1 << 1)
OPERATED_STATE = (1 << 0) |
class RegistryQueryCommand(ops.cmd.DszCommand):
def __init__(self, plugin='registryquery', prefixes=[], arglist=None, dszquiet=True, hive='l', **optdict):
ops.cmd.DszCommand.__init__(self, plugin=plugin, dszquiet=dszquiet, **optdict)
self.hive = hive
if ('key' in optdict):
self.key = optdict['key']
if ('value' in optdict):
self.value = optdict['value']
def _getHive(self):
return self.optdict['hive']
def _setHive(self, val):
if (val is None):
raise OpsCommandException('You must set hive, hive cannot be None')
if (val.lower() in HIVES):
self.optdict['hive'] = val.lower()
else:
raise OpsCommandException(('Invalid hive %s' % val))
hive = property(_getHive, _setHive)
def _getKey(self):
if ('key' in self.optdict):
return self.optdict['key']
else:
return None
def _setKey(self, val):
if ((val is None) or (val.strip() == '')):
if ('key' in self.optdict):
del self.optdict['key']
return
if ((val.find(' ') > (- 1)) and (val[0] != '"')):
val = ('"%s"' % val)
if (val.find('""') > (- 1)):
val = val.replace('""', '"')
self.optdict['key'] = val
key = property(_getKey, _setKey)
def _getValue(self):
if ('value' in self.optdict):
return self.optdict['value']
else:
return None
def _setValue(self, val):
if (val is None):
if ('value' in self.optdict):
del self.optdict['value']
return
if ((val.find(' ') > (- 1)) and (val[0] != '"')):
val = ('"%s"' % val)
if (val.find('""') > (- 1)):
val = val.replace('""', '"')
self.optdict['value'] = val
value = property(_getValue, _setValue)
def _getRecursive(self):
if (('recursive' in self.optdict) and self.optdict['recursive']):
return True
else:
return False
def _setRecursive(self, val):
if val:
self.optdict['recursive'] = True
elif ('recursive' in self.optdict):
del self.optdict['recursive']
recursive = property(_getRecursive, _setRecursive)
def _getTarget(self):
if ('target' in self.optdict):
return self.optdict['target']
else:
return None
def _setTarget(self, val):
if (val is None):
if ('target' in self.optdict):
del self.optdict['target']
return
self.optdict['target'] = val
target = property(_getTarget, _setTarget)
def _getWow64(self):
if (('wow64' in self.optdict) and self.optdict['wow64']):
return True
else:
return False
def _setWow64(self, val):
if val:
self.optdict['wow64'] = val
elif ('wow64' in self.optdict):
del self.optdict['wow64']
wow64 = property(_getWow64, _setWow64)
def _getWow32(self):
if (('wow32' in self.optdict) and self.optdict['wow32']):
return True
else:
return False
def _setWow32(self, val):
if val:
self.optdict['wow32'] = val
elif ('wow32' in self.optdict):
del self.optdict['wow32']
wow32 = property(_getWow32, _setWow32)
def _getChunksize(self):
if ('chunksize' in self.optdict):
return self.optdict['chunksize']
else:
return None
def _setChunksize(self, val):
if (val is None):
if ('chunksize' in self.optdict):
del self.optdict['chunksize']
return
if (type(val) is int):
self.optdict['chunksize'] = val
else:
raise OpsCommandException('chunksize is required to be an integer')
chunksize = property(_getChunksize, _setChunksize) |
('auditwheel.elfutils.open')
('auditwheel.elfutils.ELFFile')
class TestElfFileFilter():
def test_filter(self, elffile_mock, open_mock):
result = elf_file_filter(['file1.so', 'file2.so'])
assert (len(list(result)) == 2)
def test_some_py_files(self, elffile_mock, open_mock):
result = elf_file_filter(['file1.py', 'file2.so', 'file3.py'])
assert (len(list(result)) == 1)
def test_not_elf(self, elffile_mock, open_mock):
elffile_mock.side_effect = ELFError
result = elf_file_filter(['file1.notelf', 'file2.notelf'])
assert (len(list(result)) == 0) |
class A2C_ACKTR():
def __init__(self, actor_critic, value_loss_coef, entropy_coef, lr=None, eps=None, alpha=None, max_grad_norm=None, acktr=False, dril=None):
self.actor_critic = actor_critic
self.acktr = acktr
self.value_loss_coef = value_loss_coef
self.entropy_coef = entropy_coef
self.max_grad_norm = max_grad_norm
if acktr:
self.optimizer = KFACOptimizer(actor_critic)
else:
self.optimizer = optim.RMSprop(actor_critic.parameters(), lr, eps=eps, alpha=alpha)
self.dril = dril
def update(self, rollouts):
obs_shape = rollouts.obs.size()[2:]
action_shape = rollouts.actions.size()[(- 1)]
(num_steps, num_processes, _) = rollouts.rewards.size()
(values, action_log_probs, dist_entropy, _) = self.actor_critic.evaluate_actions(rollouts.obs[:(- 1)].view((- 1), *obs_shape), rollouts.recurrent_hidden_states[0].view((- 1), self.actor_critic.recurrent_hidden_state_size), rollouts.masks[:(- 1)].view((- 1), 1), rollouts.actions.view((- 1), action_shape))
values = values.view(num_steps, num_processes, 1)
action_log_probs = action_log_probs.view(num_steps, num_processes, 1)
advantages = (rollouts.returns[:(- 1)] - values)
value_loss = advantages.pow(2).mean()
action_loss = (- (advantages.detach() * action_log_probs).mean())
if (self.acktr and ((self.optimizer.steps % self.optimizer.Ts) == 0)):
self.actor_critic.zero_grad()
pg_fisher_loss = (- action_log_probs.mean())
value_noise = torch.randn(values.size())
if values.is_cuda:
value_noise = value_noise.cuda()
sample_values = (values + value_noise)
vf_fisher_loss = (- (values - sample_values.detach()).pow(2).mean())
fisher_loss = (pg_fisher_loss + vf_fisher_loss)
self.optimizer.acc_stats = True
fisher_loss.backward(retain_graph=True)
self.optimizer.acc_stats = False
self.optimizer.zero_grad()
(((value_loss * self.value_loss_coef) + action_loss) - (dist_entropy * self.entropy_coef)).backward()
if (self.acktr == False):
nn.utils.clip_grad_norm_(self.actor_critic.parameters(), self.max_grad_norm)
self.optimizer.step()
if self.dril:
self.dril.bc_update()
return (value_loss.item(), action_loss.item(), dist_entropy.item()) |
.parametrize('num_workers', [1, 2])
def test_train_client(tmpdir, start_ray_client_server_2_cpus, num_workers):
assert ray.util.client.ray.is_connected()
model = BoringModel()
strategy = RayStrategy(num_workers=num_workers)
trainer = get_trainer(tmpdir, strategy=strategy)
train_test(trainer, model) |
class CifarResNet(nn.Module):
def __init__(self, block, depth, channels=3):
super(CifarResNet, self).__init__()
assert (((depth - 2) % 6) == 0), 'depth should be one of 20, 32, 44, 56, 110'
layer_blocks = ((depth - 2) // 6)
self.conv_1_3x3 = nn.Conv2d(channels, 16, kernel_size=3, stride=1, padding=1, bias=False)
self.bn_1 = nn.BatchNorm2d(16)
self.inplanes = 16
self.stage_1 = self._make_layer(block, 16, layer_blocks, 1)
self.stage_2 = self._make_layer(block, 32, layer_blocks, 2)
self.stage_3 = self._make_layer(block, 64, layer_blocks, 2)
self.avgpool = nn.AvgPool2d(8)
self.out_dim = (64 * block.expansion)
self.fc = nn.Linear((64 * block.expansion), 10)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
nn.init.kaiming_normal_(m.weight)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = DownsampleA(self.inplanes, (planes * block.expansion), stride)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = (planes * block.expansion)
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv_1_3x3(x)
x = F.relu(self.bn_1(x), inplace=True)
x_1 = self.stage_1(x)
x_2 = self.stage_2(x_1)
x_3 = self.stage_3(x_2)
pooled = self.avgpool(x_3)
features = pooled.view(pooled.size(0), (- 1))
return {'fmaps': [x_1, x_2, x_3], 'features': features}
def last_conv(self):
return self.stage_3[(- 1)].conv_b |
class ThreeParallelBloqs(Bloq):
def signature(self) -> Signature:
return Signature.build(stuff=3)
def build_composite_bloq(self, bb: 'BloqBuilder', stuff: 'SoquetT') -> Dict[(str, 'SoquetT')]:
stuff = bb.add(TestParallelCombo(), reg=stuff)
stuff = bb.add(TestParallelCombo(), reg=stuff)
stuff = bb.add(TestParallelCombo(), reg=stuff)
return {'stuff': stuff} |
class nnUNetTrainerVanillaAdam3en4(nnUNetTrainerVanillaAdam):
def __init__(self, plans: dict, configuration: str, fold: int, dataset_json: dict, unpack_dataset: bool=True, device: torch.device=torch.device('cuda')):
super().__init__(plans, configuration, fold, dataset_json, unpack_dataset, device)
self.initial_lr = 0.0003 |
def test_read_tmy3_no_coerce_year():
coerce_year = None
(data, _) = tmy.read_tmy3(TMY3_TESTFILE, coerce_year=coerce_year, map_variables=False)
assert (1997 and (1999 in data.index.year))
assert (data.index[(- 2)] == pd.Timestamp('1998-12-31 23:00:00-09:00'))
assert (data.index[(- 1)] == pd.Timestamp('1999-01-01 00:00:00-09:00')) |
def os_stat():
orig_os_stat = os.stat
file_mappings = {}
def add_mapping(original, replacement):
file_mappings[original] = replacement
def my_os_stat(*args, **kwargs):
if (args[0] in file_mappings):
args = ((file_mappings.pop(args[0]),) + args[1:])
return orig_os_stat(*args, **kwargs)
with mock.patch('os.stat', my_os_stat):
(yield add_mapping)
if file_mappings:
raise NotImplementedError(('Orphaned os.stat calls: ' + ', '.join(file_mappings.keys()))) |
class OpsTestIndicesToDenseVector(tf.test.TestCase):
def test_indices_to_dense_vector(self):
size = 10000
num_indices = np.random.randint(size)
rand_indices = np.random.permutation(np.arange(size))[0:num_indices]
expected_output = np.zeros(size, dtype=np.float32)
expected_output[rand_indices] = 1.0
tf_rand_indices = tf.constant(rand_indices)
indicator = ops.indices_to_dense_vector(tf_rand_indices, size)
with self.test_session() as sess:
output = sess.run(indicator)
self.assertAllEqual(output, expected_output)
self.assertEqual(output.dtype, expected_output.dtype)
def test_indices_to_dense_vector_size_at_inference(self):
size = 5000
num_indices = 250
all_indices = np.arange(size)
rand_indices = np.random.permutation(all_indices)[0:num_indices]
expected_output = np.zeros(size, dtype=np.float32)
expected_output[rand_indices] = 1.0
tf_all_indices = tf.placeholder(tf.int32)
tf_rand_indices = tf.constant(rand_indices)
indicator = ops.indices_to_dense_vector(tf_rand_indices, tf.shape(tf_all_indices)[0])
feed_dict = {tf_all_indices: all_indices}
with self.test_session() as sess:
output = sess.run(indicator, feed_dict=feed_dict)
self.assertAllEqual(output, expected_output)
self.assertEqual(output.dtype, expected_output.dtype)
def test_indices_to_dense_vector_int(self):
size = 500
num_indices = 25
rand_indices = np.random.permutation(np.arange(size))[0:num_indices]
expected_output = np.zeros(size, dtype=np.int64)
expected_output[rand_indices] = 1
tf_rand_indices = tf.constant(rand_indices)
indicator = ops.indices_to_dense_vector(tf_rand_indices, size, 1, dtype=tf.int64)
with self.test_session() as sess:
output = sess.run(indicator)
self.assertAllEqual(output, expected_output)
self.assertEqual(output.dtype, expected_output.dtype)
def test_indices_to_dense_vector_custom_values(self):
size = 100
num_indices = 10
rand_indices = np.random.permutation(np.arange(size))[0:num_indices]
indices_value = np.random.rand(1)
default_value = np.random.rand(1)
expected_output = np.float32((np.ones(size) * default_value))
expected_output[rand_indices] = indices_value
tf_rand_indices = tf.constant(rand_indices)
indicator = ops.indices_to_dense_vector(tf_rand_indices, size, indices_value=indices_value, default_value=default_value)
with self.test_session() as sess:
output = sess.run(indicator)
self.assertAllClose(output, expected_output)
self.assertEqual(output.dtype, expected_output.dtype)
def test_indices_to_dense_vector_all_indices_as_input(self):
size = 500
num_indices = 500
rand_indices = np.random.permutation(np.arange(size))[0:num_indices]
expected_output = np.ones(size, dtype=np.float32)
tf_rand_indices = tf.constant(rand_indices)
indicator = ops.indices_to_dense_vector(tf_rand_indices, size)
with self.test_session() as sess:
output = sess.run(indicator)
self.assertAllEqual(output, expected_output)
self.assertEqual(output.dtype, expected_output.dtype)
def test_indices_to_dense_vector_empty_indices_as_input(self):
size = 500
rand_indices = []
expected_output = np.zeros(size, dtype=np.float32)
tf_rand_indices = tf.constant(rand_indices)
indicator = ops.indices_to_dense_vector(tf_rand_indices, size)
with self.test_session() as sess:
output = sess.run(indicator)
self.assertAllEqual(output, expected_output)
self.assertEqual(output.dtype, expected_output.dtype) |
def test_simple_unittest(pytester: Pytester) -> None:
testpath = pytester.makepyfile("\n import unittest\n class MyTestCase(unittest.TestCase):\n def testpassing(self):\n self.assertEqual('foo', 'foo')\n def test_failing(self):\n self.assertEqual('foo', 'bar')\n ")
reprec = pytester.inline_run(testpath)
assert reprec.matchreport('testpassing').passed
assert reprec.matchreport('test_failing').failed |
def RewireTails():
ToAdd = []
ToRemove = []
for source in Graph:
for target in Graph[source]:
if (len(target) == 1):
NewTarget = (source + target)
while (len(NewTarget) > 1):
if (NewTarget in Graph):
ToAdd.append((source, NewTarget, Graph[source][target]))
ToRemove.append((source, target))
break
else:
NewTarget = NewTarget[1:]
for (source, target, weight) in ToAdd:
Graph[source][target] = weight
for (source, target) in ToRemove:
del Graph[source][target] |
class GeneratorHubInterface(nn.Module):
def __init__(self, args, task, models):
super().__init__()
self.args = args
self.task = task
self.models = nn.ModuleList(models)
self.src_dict = task.source_dictionary
self.tgt_dict = task.target_dictionary
for model in self.models:
model.make_generation_fast_(beamable_mm_beam_size=(None if getattr(args, 'no_beamable_mm', False) else getattr(args, 'beam', 5)), need_attn=getattr(args, 'print_alignment', False))
self.align_dict = utils.load_align_dict(getattr(args, 'replace_unk', None))
self.tokenizer = encoders.build_tokenizer(args)
self.bpe = encoders.build_bpe(args)
self.register_buffer('_float_tensor', torch.tensor([0], dtype=torch.float))
def device(self):
return self._float_tensor.device
def translate(self, sentence: str, beam: int=5, verbose: bool=False, **kwargs) -> str:
return self.sample(sentence, beam, verbose, **kwargs)
def sample(self, sentence: str, beam: int=1, verbose: bool=False, **kwargs) -> str:
input = self.encode(sentence)
hypo = self.generate(input, beam, verbose, **kwargs)[0]['tokens']
return self.decode(hypo)
def generate(self, tokens: torch.LongTensor, beam: int=5, verbose: bool=False, **kwargs) -> torch.LongTensor:
sample = self._build_sample(tokens)
gen_args = copy.copy(self.args)
gen_args.beam = beam
for (k, v) in kwargs.items():
setattr(gen_args, k, v)
generator = self.task.build_generator(gen_args)
translations = self.task.inference_step(generator, self.models, sample)
if verbose:
src_str_with_unk = self.string(tokens)
print('S\t{}'.format(src_str_with_unk))
def getarg(name, default):
return getattr(gen_args, name, getattr(self.args, name, default))
hypos = translations[0]
if verbose:
for hypo in hypos:
hypo_str = self.decode(hypo['tokens'])
print('H\t{}\t{}'.format(hypo['score'], hypo_str))
print('P\t{}'.format(' '.join(map((lambda x: '{:.4f}'.format(x)), hypo['positional_scores'].tolist()))))
if ((hypo['alignment'] is not None) and getarg('print_alignment', False)):
print('A\t{}'.format(' '.join(map((lambda x: str(utils.item(x))), hypo['alignment'].int().cpu()))))
return hypos
def encode(self, sentence: str) -> torch.LongTensor:
sentence = self.tokenize(sentence)
sentence = self.apply_bpe(sentence)
return self.binarize(sentence)
def decode(self, tokens: torch.LongTensor) -> str:
sentence = self.string(tokens)
sentence = self.remove_bpe(sentence)
return self.detokenize(sentence)
def tokenize(self, sentence: str) -> str:
if (self.tokenizer is not None):
sentence = self.tokenizer.encode(sentence)
return sentence
def detokenize(self, sentence: str) -> str:
if (self.tokenizer is not None):
sentence = self.tokenizer.decode(sentence)
return sentence
def apply_bpe(self, sentence: str) -> str:
if (self.bpe is not None):
sentence = self.bpe.encode(sentence)
return sentence
def remove_bpe(self, sentence: str) -> str:
if (self.bpe is not None):
sentence = self.bpe.decode(sentence)
return sentence
def binarize(self, sentence: str) -> torch.LongTensor:
return self.src_dict.encode_line(sentence, add_if_not_exist=False).long()
def string(self, tokens: torch.LongTensor) -> str:
return self.tgt_dict.string(tokens)
def _build_sample(self, src_tokens: torch.LongTensor):
assert torch.is_tensor(src_tokens)
dataset = self.task.build_dataset_for_inference([src_tokens], [src_tokens.numel()])
sample = dataset.collater([dataset[0]])
sample = utils.apply_to_sample((lambda tensor: tensor.to(self.device)), sample)
return sample |
class ELF_Phdr():
def __init__(self, p_type, p_offset, p_vaddr, p_paddr, p_filesz, p_memsz, p_flags, p_align):
self.p_type = p_type
self.p_offset = p_offset
self.p_vaddr = p_vaddr
self.p_paddr = p_paddr
self.p_filesz = p_filesz
self.p_memsz = p_memsz
self.p_flags = p_flags
self.p_align = p_align |
def test_validate_manifest_with_unencoded_unicode():
test_dir = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(test_dir, 'manifest_unencoded_unicode.json'), 'r') as f:
manifest_bytes = f.read()
manifest = DockerSchema1Manifest(Bytes.for_string_or_unicode(manifest_bytes))
digest = manifest.digest
assert (digest == 'sha256:5d8a0f34744a39bf566ba430251adc0cc86587f86aed3ac2acfb897f349777bc')
assert manifest.created_datetime
layers = list(manifest.get_layers(None))
assert (layers[(- 1)].author == 'Some guy') |
def find_boundaries(s, w):
ind = w.i
if (((ind + 2) < len(s)) and (s[(ind + 1)].text == "'") and s[(ind + 2)].like_num):
return (ind, (ind + 3))
if (((ind - 2) >= 0) and (s[(ind - 1)].text == "'") and s[(ind - 2)].like_num):
return ((ind - 2), (ind + 1))
if (s[ind].ent_iob == 2):
return (ind, (ind + 1))
if (ind != (len(s) - 1)):
i = (ind + 1)
while ((s[i].ent_iob == 1) and ((s[i].pos_ == 'NUM') or s[i].like_num or (((i + 1) < len(s)) and ((s[(i + 1)].pos_ == 'NUM') or s[(i + 1)].like_num)))):
i += 1
if (i == len(s)):
break
if ((s[(i - 1)].pos_ == 'NUM') or s[(i - 1)].like_num or (s[(i - 1)].lemma_ in ['one'])):
end = i
else:
end = (i - 1)
else:
end = (ind + 1)
if (s[ind].ent_iob == 3):
return (ind, end)
i = (ind - 1)
while ((s[i].ent_iob != 2) and ((s[i].pos_ == 'NUM') or s[i].like_num or (s[(i - 1)].pos_ == 'NUM') or s[(i - 1)].like_num)):
i -= 1
if (i == (- 1)):
break
i += 1
if ((s[i].pos_ != 'NUM') and (not s[i].like_num)):
i += 1
return (i, end) |
class TrackNumbers(Gtk.VBox):
def __init__(self, prop, library):
super().__init__(spacing=6)
self.title = _('Track Numbers')
self.set_border_width(12)
label_start = Gtk.Label(label=_('Start fro_m:'), halign=Gtk.Align.END)
label_start.set_use_underline(True)
spin_start = Gtk.SpinButton()
spin_start.set_range(0, 999)
spin_start.set_increments(1, 10)
spin_start.set_value(1)
label_start.set_mnemonic_widget(spin_start)
label_total = Gtk.Label(label=_('_Total tracks:'), halign=Gtk.Align.END)
label_total.set_use_underline(True)
spin_total = Gtk.SpinButton()
spin_total.set_range(0, 999)
spin_total.set_increments(1, 10)
label_total.set_mnemonic_widget(spin_total)
preview = qltk.Button(_('_Preview'), Icons.VIEW_REFRESH)
grid = Gtk.Grid(row_spacing=4, column_spacing=4)
grid.add(label_start)
grid.attach_next_to(spin_start, label_start, Gtk.PositionType.RIGHT, 1, 1)
grid.attach_next_to(label_total, label_start, Gtk.PositionType.BOTTOM, 1, 1)
grid.attach_next_to(spin_total, label_total, Gtk.PositionType.RIGHT, 1, 1)
grid.attach_next_to(Align(preview, halign=Gtk.Align.END), spin_start, Gtk.PositionType.RIGHT, 1, 1)
preview.props.hexpand = True
model = ObjectStore()
view = HintedTreeView(model=model)
self.pack_start(grid, False, True, 0)
render = Gtk.CellRendererText()
column = TreeViewColumn(title=_('File'))
column.pack_start(render, True)
column.set_sizing(Gtk.TreeViewColumnSizing.AUTOSIZE)
def cell_data_file(column, cell, model, iter_, data):
entry = model.get_value(iter_)
cell.set_property('text', entry.name)
column.set_cell_data_func(render, cell_data_file)
view.append_column(column)
render = Gtk.CellRendererText()
render.set_property('editable', True)
column = TreeViewColumn(title=_('Track'))
column.pack_start(render, True)
column.set_sizing(Gtk.TreeViewColumnSizing.AUTOSIZE)
def cell_data_track(column, cell, model, iter_, data):
entry = model.get_value(iter_)
cell.set_property('text', entry.tracknumber)
column.set_cell_data_func(render, cell_data_track)
view.append_column(column)
view.set_reorderable(True)
w = Gtk.ScrolledWindow()
w.set_shadow_type(Gtk.ShadowType.IN)
w.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
w.add(view)
self.pack_start(w, True, True, 0)
bbox = Gtk.HButtonBox()
bbox.set_spacing(6)
bbox.set_layout(Gtk.ButtonBoxStyle.END)
save = Button(_('_Save'), Icons.DOCUMENT_SAVE)
self.save = save
connect_obj(save, 'clicked', self.__save_files, prop, model, library)
revert = Button(_('_Revert'), Icons.DOCUMENT_REVERT)
self.revert = revert
bbox.pack_start(revert, True, True, 0)
bbox.pack_start(save, True, True, 0)
self.pack_start(bbox, False, True, 0)
preview_args = [spin_start, spin_total, model, save, revert]
preview.connect('clicked', self.__preview_tracks, *preview_args)
connect_obj(revert, 'clicked', self.__update, None, *preview_args[1:])
spin_total.connect('value-changed', self.__preview_tracks, *preview_args)
spin_start.connect('value-changed', self.__preview_tracks, *preview_args)
connect_obj(view, 'drag-end', self.__class__.__preview_tracks, self, *preview_args)
render.connect('edited', self.__row_edited, model, preview, save)
connect_obj(prop, 'changed', self.__class__.__update, self, spin_total, model, save, revert)
for child in self.get_children():
child.show_all()
def __row_edited(self, render, path, new, model, preview, save):
path = Gtk.TreePath.new_from_string(path)
row = model[path]
entry = row[0]
if (entry.tracknumber != new):
entry.tracknumber = new
preview.set_sensitive(True)
save.set_sensitive(True)
model.path_changed(path)
def __save_files(self, parent, model, library):
win = WritingWindow(parent, len(model))
was_changed = set()
all_done = False
for entry in model.values():
(song, track) = (entry.song, entry.tracknumber)
if (song.get('tracknumber') == track):
win.step()
continue
if (not song.valid()):
win.hide()
dialog = OverwriteWarning(self, song)
resp = dialog.run()
win.show()
if (resp != OverwriteWarning.RESPONSE_SAVE):
break
song['tracknumber'] = track
try:
song.write()
except AudioFileError:
util.print_exc()
WriteFailedError(self, song).run()
library.reload(song, changed=was_changed)
break
was_changed.add(song)
if win.step():
break
else:
all_done = True
library.changed(was_changed)
win.destroy()
self.save.set_sensitive((not all_done))
self.revert.set_sensitive((not all_done))
def __preview_tracks(self, ctx, start, total, model, save, revert):
start = start.get_value_as_int()
total = total.get_value_as_int()
for row in model:
if total:
s = ('%d/%d' % ((row.path.get_indices()[0] + start), total))
else:
s = str((row.path.get_indices()[0] + start))
entry = row[0]
entry.tracknumber = s
model.row_changed(row.path, row.iter)
save.set_sensitive(True)
revert.set_sensitive(True)
def __update(self, songs, total, model, save, revert):
if (songs is None):
songs = [e.song for e in model.values()]
else:
songs = list(songs)
def sort_key(song):
return (song('~#track', 0), song('~basename'), song)
songs.sort(key=sort_key)
model.clear()
total.set_value(len(songs))
for song in songs:
if (not song.can_change('tracknumber')):
self.set_sensitive(False)
break
else:
self.set_sensitive(True)
for song in songs:
model.append([Entry(song)])
save.set_sensitive(False)
revert.set_sensitive(False) |
def parse_args():
parser = argparse.ArgumentParser(description='kmeans for anchor box')
parser.add_argument('-root', '--data_root', default='/mnt/share/ssd2/dataset', help='dataset root')
parser.add_argument('-d', '--dataset', default='coco', help='coco, voc.')
parser.add_argument('-na', '--num_anchorbox', default=9, type=int, help='number of anchor box.')
parser.add_argument('-size', '--input_size', default=416, type=int, help='input size.')
parser.add_argument('--scale', action='store_true', default=False, help='divide the sizes of anchor boxes by 32 .')
return parser.parse_args() |
def test_DecisionMatrixStatsAccessor_dir(decision_matrix):
dm = decision_matrix(seed=42, min_alternatives=10, max_alternatives=10, min_criteria=3, max_criteria=3)
stats = data.DecisionMatrixStatsAccessor(dm)
expected = set(data.DecisionMatrixStatsAccessor._DF_WHITELIST)
result = dir(stats)
assert (not expected.difference(result)) |
def init_from_config(conf: 'configmodule.ConfigContainer') -> None:
assert (_args is not None)
if _args.debug:
init.debug('--debug flag overrides log configs')
return
if ram_handler:
ramlevel = conf.logging.level.ram
init.debug('Configuring RAM loglevel to %s', ramlevel)
ram_handler.setLevel(LOG_LEVELS[ramlevel.upper()])
if console_handler:
consolelevel = conf.logging.level.console
if _args.loglevel:
init.debug('--loglevel flag overrides logging.level.console')
else:
init.debug('Configuring console loglevel to %s', consolelevel)
level = LOG_LEVELS[consolelevel.upper()]
console_handler.setLevel(level)
change_console_formatter(level) |
def prenet(inputs, is_training, layer_sizes, scope=None):
x = inputs
drop_rate = (0.5 if is_training else 0.0)
with tf.variable_scope((scope or 'prenet')):
for (i, size) in enumerate(layer_sizes):
dense = tf.layers.dense(x, units=size, activation=tf.nn.relu, name=('dense_%d' % (i + 1)))
x = tf.layers.dropout(dense, rate=drop_rate, training=is_training, name=('dropout_%d' % (i + 1)))
return x |
_ARCH_REGISTRY.register()
class Distiller(Baseline):
def __init__(self, cfg):
super(Distiller, self).__init__(cfg)
num_classes = cfg.MODEL.HEADS.NUM_CLASSES
feat_dim = cfg.MODEL.BACKBONE.FEAT_DIM
norm_type = cfg.MODEL.HEADS.NORM
cfg_t = get_cfg()
cfg_t.merge_from_file(cfg.KD.MODEL_CONFIG)
model_t = build_model(cfg_t)
logger.info('Teacher model:\n{}'.format(model_t))
for param in model_t.parameters():
param.requires_grad_(False)
logger.info('Loading teacher model weights ...')
Checkpointer(model_t).load(cfg.KD.MODEL_WEIGHTS)
self.model_t = [model_t.backbone, model_t.heads]
self.classifier = CircleSoftmax(cfg, feat_dim, num_classes)
self.classifier.apply(weights_init_classifier)
self.loss = nn.MSELoss()
def forward(self, batched_inputs, unbatched_inputs):
if self.training:
targets = batched_inputs['targets'].to(self.device)
untargets = unbatched_inputs['targets'].to(self.device)
images = self.preprocess_image(batched_inputs)
unimages = self.preprocess_image(unbatched_inputs)
s_feat = self.backbone(images)
s_outputs = self.heads(s_feat, targets)
unimages = self.preprocess_image(unbatched_inputs)
uns_feat = self.backbone(unimages)
uns_feat = self.heads.pool_layer(uns_feat)
uns_feat = self.heads.bottleneck(uns_feat)
uns_feat = uns_feat[(..., 0, 0)]
pred_class_logits = (self.classifier.s * F.linear(F.normalize(uns_feat), F.normalize(self.classifier.weight)))
uns_outputs = {'pred_class_logits': pred_class_logits, 'features': uns_feat}
with torch.no_grad():
t_feat = self.model_t[0](images)
t_outputs = self.model_t[1](t_feat, targets)
unt_feat = self.model_t[0](unimages)
unt_outputs = self.model_t[1](unt_feat, untargets)
losses = self.losses(s_outputs, t_outputs, uns_outputs, unt_outputs, targets)
return losses
else:
return super().forward(batched_inputs, unbatched_inputs)
def losses(self, s_outputs, t_outputs, uns_outputs, unt_outputs, gt_labels):
loss_dict = super(Distiller, self).losses(s_outputs, gt_labels)
t_logits = t_outputs['pred_class_logits'].detach()
s_logits = s_outputs['pred_class_logits']
unt_feat = unt_outputs['features'].detach()
uns_feat = uns_outputs['features']
unt_logits = unt_outputs['pred_class_logits'].detach()
uns_logits = uns_outputs['pred_class_logits']
t_dist = compute_cosine_distance(unt_feat, unt_feat)
s_dist = compute_cosine_distance(uns_feat, uns_feat)
loss_dict['ukl_loss'] = self.kl_loss(uns_logits, unt_logits, gt_labels, 6)
loss_dict['loss_kldiv'] = self.kl_loss(s_logits, t_logits, gt_labels, 16)
return loss_dict
def kl_loss(cls, y_s, y_t, gt_labels, t):
p_s = F.log_softmax((y_s / t), dim=1)
p_t = F.softmax((y_t / t), dim=1)
loss = ((F.kl_div(p_s, p_t, reduction='sum') * (t ** 2)) / y_s.shape[0])
return loss
def cross_entropy_loss(self, pred_class_outputs, gt_classes):
num_classes = pred_class_outputs.size(1)
log_probs = F.log_softmax((pred_class_outputs / 3), dim=1)
with torch.no_grad():
targets = gt_classes
loss = ((- F.softmax((targets / 3), dim=1)) * log_probs).sum(dim=1)
with torch.no_grad():
non_zero_cnt = max(loss.nonzero(as_tuple=False).size(0), 1)
loss = (loss.sum() / non_zero_cnt)
return loss
def updata_parameter(self):
for (param_q, param_k) in zip(self.backbone.parameters(), self.model_t[0].parameters()):
param_k.data = ((param_k.data * 0.99) + (param_q.data * 0.01))
for (param_q, param_k) in zip(self.heads.parameters(), self.model_t[1].parameters()):
param_k.data = ((param_k.data * 0.99) + (param_q.data * 0.01)) |
def fetch_RW(path):
data_path = os.path.join(path, 'rw/rw.txt')
if (not os.path.exists(data_path)):
os.makedirs(path, exist_ok=True)
archive_path = os.path.join(path, 'rw.zip')
download(' archive_path)
with zipfile.ZipFile(archive_path, 'r') as zip_ref:
zip_ref.extractall(path)
data = pd.read_csv(data_path, sep='\t', header=None)
return dict(word_pairs=data[[0, 1]].values, scores=data[2].values) |
def test_resolve_module_exports_from_file_log_on_max_depth(caplog):
path = ((JS_FIXTURES_DIR / 'export-resolution') / 'index.js')
assert (resolve_module_exports_from_file(path, 0) == set())
assert (len(caplog.records) == 1)
assert caplog.records[0].message.endswith('max depth reached')
caplog.records.clear()
assert (resolve_module_exports_from_file(path, 2) == {'Index', 'One'})
assert (len(caplog.records) == 1)
assert caplog.records[0].message.endswith('max depth reached') |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.