code stringlengths 281 23.7M |
|---|
class _TopologicalLattice(Generic[TQubit], metaclass=ABCMeta):
def H(self) -> int:
return DH
def W(self) -> int:
return DW
def SYNX(self) -> int:
return 0
def SYNZ(self) -> int:
return 1
def __init__(self, params: Dict[(str, Any)], name: str, circ: QuantumCircuit):
self.name = name
self.circ = circ
self.params: Dict[(str, Any)] = params
self._params_validate_and_generate()
self.qregisters: Dict[(str, QuantumRegister)] = {}
self.cregisters: Dict[(str, ClassicalRegister)] = {}
self._gen_registers()
assert ('data' in self.qregisters), 'There should be a data qubits register.'
registers = (list(self.qregisters.values()) + list(self.cregisters.values()))
self.circ.add_register(*registers)
(self.qubit_indices, self.stabilizers) = self._gen_qubit_indices_and_stabilizers()
def _params_validate_and_generate(self) -> None:
def _gen_registers(self) -> None:
def _gen_qubit_indices_and_stabilizers(self) -> Tuple[(List[List[Qubit]], List[Type[Any]])]:
def entangle(self, qubit_indices: Optional[List[List[Qubit]]]=None, stabilizers: Optional[List[Type[_Stabilizer]]]=None) -> None:
qubit_indices = (qubit_indices if qubit_indices else self.qubit_indices)
stabilizers = (stabilizers if stabilizers else self.stabilizers)
for (i, stabilizer_cls) in enumerate(stabilizers):
stabilizer = stabilizer_cls(self.circ, qubit_indices[i])
stabilizer.entangle()
self.circ.barrier()
def reset_x(self) -> None:
def reset_z(self) -> None:
def x(self) -> None:
def z(self) -> None:
def x_c_if(self, classical: ClassicalRegister, val: int) -> None:
def z_c_if(self, classical: ClassicalRegister, val: int) -> None:
def cx(self, control: Optional[Qubit]=None, target: Optional[Qubit]=None):
def readout_x(self, readout_creg: Optional[ClassicalRegister]=None) -> None:
def readout_z(self, readout_creg: Optional[ClassicalRegister]=None) -> None:
def lattice_readout_x(self) -> None:
def lattice_readout_z(self) -> None:
def parse_readout(self, readout_string: str, readout_type: Optional[str]=None) -> Tuple[(int, Dict[(str, List[TQubit])])]: |
_factory
def factory():
search = SerpAPIWrapper()
tools = [Tool(name='Search', func=search.run, description='useful for when you need to answer questions about current events. You should ask targeted questions')]
llm = OpenAI(temperature=0, model_name='gpt-3.5-turbo')
agent = initialize_agent(tools, llm=llm, agent='chat-zero-shot-react-description', verbose=True)
return agent |
def inference_run(model, hparams, output_dir):
tf.logging.info('Build Model...')
model_fn_inference = model_builder_inference(model, hparams=hparams)
tf.logging.info('Build Graph...')
checkpoint_path = saver.latest_checkpoint(output_dir)
if (not checkpoint_path):
raise NotFittedError(("Couldn't find trained model at %s." % output_dir))
with ops.Graph().as_default() as g:
tf.train.create_global_step(g)
inputs_ph = tf.placeholder(tf.int32, [None, None])
features = {'inputs': inputs_ph}
labels = None
infer_ops = model_fn_inference(features, labels)
predictions = infer_ops[0]
mon_sess = tf.train.MonitoredSession(session_creator=tf.train.ChiefSessionCreator(checkpoint_filename_with_path=checkpoint_path, config=session_config(gpu_mem_fraction=FLAGS.gpu_mem_fraction)))
def predict_func(feed_fn=None):
with ops.Graph().as_default() as g:
inputs = feed_fn['inputs']
feed = {inputs_ph: inputs}
preds = mon_sess.run(predictions, feed)
first_tensor = list(preds.values())[0]
batch_length = first_tensor.shape[0]
for i in range(batch_length):
(yield {key: value[i] for (key, value) in six.iteritems(preds)})
tf.logging.info('Begin Decoding...')
inference.decode_from_file(predict_func, hparams, FLAGS.decode_from_file, FLAGS.decode_to_file, FLAGS.decode_batch_size, FLAGS.decode_beam_size, FLAGS.decode_return_beams) |
class AttrVI_ATTR_SRC_INCREMENT(RangeAttribute):
resources = [(constants.InterfaceType.pxi, 'INSTR'), (constants.InterfaceType.pxi, 'MEMACC'), (constants.InterfaceType.vxi, 'INSTR'), (constants.InterfaceType.vxi, 'MEMACC')]
py_name = 'source_increment'
visa_name = 'VI_ATTR_SRC_INCREMENT'
visa_type = 'ViInt32'
default = 1
(read, write, local) = (True, True, True)
(min_value, max_value, values) = (0, 1, None) |
def orthoFrames2Versor_dist(A, B, eps=None):
A = A[:]
B = B[:]
if (len(A) != len(B)):
raise ValueError('len(A)!=len(B)')
if (eps is None):
eps = global_eps()
r_list = []
dist = [abs(((a - b) ** 2)) for (a, b) in zip(A, B)]
k = dist.index(max(dist))
while (dist[k] >= eps):
r = ((A[k] - B[k]) / abs((A[k] - B[k])))
r_list.append(r)
A = A[1:]
B = B[1:]
if (len(A) == 0):
break
for j in range(len(A)):
A[j] = (((- r) * A[j]) * r)
dist = [abs(((a - b) ** 2)) for (a, b) in zip(A, B)]
k = dist.index(max(dist))
R = reduce(gp, r_list[::(- 1)])
return (R, r_list) |
.fast
def test_Morse_Potential_effect_CO(T=3000, rtol=0.0001, verbose=True, warnings=True, *args, **kwargs):
vmax = 11
vmax_morse = 48
jmax = 300
iso = 1
S = Molecules['CO'][iso]['X']
db = PartFunc_Dunham(S, vmax=vmax, vmax_morse=0, Jmax=jmax, use_cached=False)
Q_nomorse = db.at(T)
db = PartFunc_Dunham(S, vmax=vmax, vmax_morse=vmax_morse, Jmax=jmax, use_cached=False)
Q_morse = db.at(T)
if verbose:
printm('Morse vs no Morse potential (T={0}K)'.format(T))
printm('Q_morse: {0:.3f}'.format(Q_morse))
printm('Q_nomorse: {0:.3f}'.format(Q_nomorse))
printm('Difference: {0:.4f}%'.format(((abs((Q_nomorse - Q_morse)) / Q_morse) * 100)))
assert ((abs((Q_nomorse - Q_morse)) / Q_morse) < rtol) |
class Effect5757(BaseEffect):
type = 'overheat'
def handler(fit, module, context, projectionRange, **kwargs):
module.boostItemAttr('maxTargetRangeBonus', module.getModifiedItemAttr('overloadSensorModuleStrengthBonus'), **kwargs)
module.boostItemAttr('scanResolutionBonus', module.getModifiedItemAttr('overloadSensorModuleStrengthBonus'), stackingPenalties=True, **kwargs)
for scanType in ('Gravimetric', 'Magnetometric', 'Radar', 'Ladar'):
module.boostItemAttr('scan{}StrengthPercent'.format(scanType), module.getModifiedItemAttr('overloadSensorModuleStrengthBonus'), stackingPenalties=True, **kwargs) |
def DenseNet201(pretrained=False, **kwargs):
model = DenseNet(num_init_features=64, growth_rate=32, block_config=(6, 12, 48, 32), **kwargs)
if pretrained:
pattern = re.compile('^(.*denselayer\\d+\\.(?:norm|relu|conv))\\.((?:[12])\\.(?:weight|bias|running_mean|running_var))$')
state_dict = model_zoo.load_url(model_urls['densenet201'], model_dir='models/pretrained')
for key in list(state_dict.keys()):
res = pattern.match(key)
if res:
new_key = (res.group(1) + res.group(2))
state_dict[new_key] = state_dict[key]
del state_dict[key]
model.load_state_dict(state_dict, strict=False)
return model |
class SawyerHandlePullV1Policy(Policy):
_fully_parsed
def _parse_obs(obs):
return {'hand_pos': obs[:3], 'handle_pos': obs[3:6], 'unused_info': obs[6:]}
def get_action(self, obs):
o_d = self._parse_obs(obs)
action = Action({'delta_pos': np.arange(3), 'grab_effort': 3})
action['delta_pos'] = move(o_d['hand_pos'], to_xyz=self._desired_pos(o_d), p=25.0)
action['grab_effort'] = 1.0
return action.array
def _desired_pos(o_d):
pos_curr = o_d['hand_pos']
pos_button = (o_d['handle_pos'] + np.array([0.0, (- 0.02), 0.0]))
if (abs((pos_curr[0] - pos_button[0])) > 0.04):
return (pos_button + np.array([0.0, 0.0, 0.2]))
elif (abs((pos_curr[2] - pos_button[2])) > 0.03):
return (pos_button + np.array([0.0, (- 0.1), (- 0.01)]))
elif (abs((pos_curr[1] - pos_button[1])) > 0.01):
return np.array([pos_button[0], (pos_button[1] + 0.04), pos_curr[2]])
else:
return (pos_button + np.array([0.0, 0.04, 0.1])) |
class OrbitController(PanZoomController):
_default_controls = {'mouse1': ('rotate', 'drag', (0.005, 0.005)), 'mouse2': ('pan', 'drag', (1, 1)), 'mouse4': ('quickzoom', 'peek', 2), 'wheel': ('zoom', 'push', (- 0.001)), 'alt+wheel': ('fov', 'push', (- 0.01))}
def rotate(self, delta: Tuple, rect: Tuple, *, animate=False):
if animate:
action_tuple = ('rotate', 'push', (1.0, 1.0))
action = self._create_action(None, action_tuple, (0.0, 0.0), None, rect)
action.set_target(delta)
action.snap_distance = 0.01
action.done = True
elif self._cameras:
self._update_rotate(delta)
return self._update_cameras()
def _update_rotate(self, delta):
assert (isinstance(delta, tuple) and (len(delta) == 2))
(delta_azimuth, delta_elevation) = delta
camera_state = self._get_camera_state()
position = camera_state['position']
rotation = camera_state['rotation']
up = camera_state['reference_up']
forward = la.vec_transform_quat((0, 0, (- 1)), rotation)
elevation = (la.vec_angle(forward, up) - (0.5 * np.pi))
new_elevation = (elevation + delta_elevation)
bounds = ((((- 89) * np.pi) / 180), ((89 * np.pi) / 180))
if (new_elevation < bounds[0]):
delta_elevation = (bounds[0] - elevation)
elif (new_elevation > bounds[1]):
delta_elevation = (bounds[1] - elevation)
r_azimuth = la.quat_from_axis_angle(up, (- delta_azimuth))
r_elevation = la.quat_from_axis_angle((1, 0, 0), (- delta_elevation))
rot1 = rotation
rot2 = la.quat_mul(r_azimuth, la.quat_mul(rot1, r_elevation))
pos1 = position
pos2target1 = self._get_target_vec(camera_state, rotation=rot1)
pos2target2 = self._get_target_vec(camera_state, rotation=rot2)
pos2 = ((pos1 + pos2target1) - pos2target2)
new_camera_state = {'position': pos2, 'rotation': rot2}
self._set_camera_state(new_camera_state) |
def simxLoadModel(clientID, modelPathAndName, options, operationMode):
baseHandle = ct.c_int()
if ((sys.version_info[0] == 3) and (type(modelPathAndName) is str)):
modelPathAndName = modelPathAndName.encode('utf-8')
return (c_LoadModel(clientID, modelPathAndName, options, ct.byref(baseHandle), operationMode), baseHandle.value) |
.parametrize('names,expect', [([1, 2, 3], ['1', '2', '3']), (['', np.nan], ['', '']), (['', np.nan], ['', '']), (['', '', np.nan], ['', '', '']), (repair_names(['', '', np.nan], repair='minimal'), ['', '', ''])])
def test_minimal(names, expect):
assert (repair_names(names, repair='minimal') == expect) |
class IPTest(object):
.parametrize('value', ['200.8.9.10', '127.0.0.1', '2001:db8:85a3::8a2e:370:7334', '::1'])
def test_valid_value(self, value):
assert (inputs.ip(value) == value)
.parametrize('value', ['foo', ' ' ' ' ' ' ' ' 'foo bar baz', 'foo ', ' ' ' '127.0'])
def test_bad_value(self, value):
with pytest.raises(ValueError):
inputs.ip(value)
def test_schema(self):
assert (inputs.ip.__schema__ == {'type': 'string', 'format': 'ip'}) |
class Path(object):
def __init__(self, label_name, fold_id):
self.label_name = label_name
self.fold_id = fold_id
self.phase_path = {}
a_root = '/media/newssd/Aff-Wild_experiments/phase_diff_5_fold/valence_loss_type:ccc_batch_size:64_alpha:1.0/model'
v_root = '/media/newssd/Aff-Wild_experiments/phase_diff_5_fold/arousal_loss_type:ccc_batch_size:256_alpha:1.0/model'
self.root_dir = (a_root if (label_name == 'arousal') else v_root)
def path(self):
model_path = glob.glob(os.path.join(self.root_dir, '*best_ccc*'))
model_path = [path for path in model_path if ('fold_{}'.format(self.fold_id) in path)]
assert (len(model_path) == 1)
return model_path[0] |
def get_semanal_options(program_text: str, testcase: DataDrivenTestCase) -> Options:
options = parse_options(program_text, testcase, 1)
options.use_builtins_fixtures = True
options.semantic_analysis_only = True
options.show_traceback = True
options.python_version = PYTHON3_VERSION
options.force_uppercase_builtins = True
return options |
.parametrize('screen,location,attribute', parameters)
def test_default_settings(manager_nospawn, minimal_conf_noscreen, screen, location, attribute):
config = minimal_conf_noscreen
config.screens = [screen]
manager_nospawn.start(config)
bar = manager_nospawn.c.bar[location]
info = bar.info()
for dimension in ['height', 'width']:
assert (info['widgets'][0][dimension] == info[attribute]) |
class TanhBlurBlock(nn.Module):
def __init__(self, in_filters, temp=10.0, sfilter=(1, 1), pad_mode='constant', **kwargs):
super(TanhBlurBlock, self).__init__()
self.temp = temp
self.relu = layers.relu()
self.tanh = nn.Tanh()
self.blur = layers.blur(in_filters, sfilter=sfilter, pad_mode=pad_mode)
def forward(self, x):
x = (self.temp * self.tanh((x / self.temp)))
x = self.relu(x)
x = self.blur(x)
return x
def extra_repr(self):
return ('temp=%.3e' % self.temp) |
.parametrize('properties', [{}, create_test_properties()])
def test_object_features(properties: dict):
(obj, _) = create_test_object()
obj.properties = properties
assert (obj.n_features == 0)
keys = list(properties.keys())
obj.set_features(keys)
n_keys = sum((np.asarray(p).size for p in properties.values()))
assert (obj.n_features == n_keys) |
def test_select_column_in_subquery_with_two_parenthesis_and_union_v2():
sql = 'INSERT INTO tab1\nSELECT col1\nFROM (\n SELECT col1 FROM tab2\n UNION ALL\n SELECT col1 FROM tab3\n) dt'
assert_column_lineage_equal(sql, [(ColumnQualifierTuple('col1', 'tab2'), ColumnQualifierTuple('col1', 'tab1')), (ColumnQualifierTuple('col1', 'tab3'), ColumnQualifierTuple('col1', 'tab1'))]) |
def tar_archive(context_tar):
logger.debug('start')
mode = get_file_mode_for_writing(context_tar)
for item in context_tar['archive']:
destination = item['out']
source = item['in']
with tarfile.open(destination, mode) as archive_me:
logger.debug("Archiving '%s' to '%s'", source, destination)
archive_me.add(source, arcname='.')
logger.info("Archived '%s' to '%s'", source, destination)
logger.debug('end') |
def setup_checkpoint_file_name_prefix(args):
checkpoint_file_name_prefix = ''
for (i, name) in enumerate(args.checkpoint_file_name_save_list):
checkpoint_file_name_prefix += str(getattr(args, name))
if (i != (len(args.checkpoint_file_name_save_list) - 1)):
checkpoint_file_name_prefix += '-'
return checkpoint_file_name_prefix |
def makeUpdateMatrixDis(qnnArch, qnnArchGen, unitaries, storedStates, storedStatesDis, lda, ep, l, j, trainingData):
numInputQubits = qnnArch[(l - 1)]
summ = 0
for x in range(len(storedStates)):
firstPart = updateMatrixFirstPartDis(qnnArch, qnnArchGen, unitaries, storedStates, storedStatesDis, l, j, x)
secondPart = updateMatrixSecondPartDis(qnnArch, qnnArchGen, unitaries, l, j, x, trainingData)
mat = qt.commutator(firstPart, secondPart)
keep = list(range(numInputQubits))
keep.append((numInputQubits + j))
mat = partialTraceKeep(mat, keep)
summ = (summ + mat)
summ = ((((- ep) * (2 ** numInputQubits)) / (lda * len(storedStates))) * summ)
return summ.expm() |
def fill_template(template, *args):
parts = TEMPLATE_PATTERN.findall(template)
kids = []
for p in parts:
if (p == ''):
continue
elif (p in '\x01\x02\x03\x04\x05'):
p = args[(ord(p) - 1)]
p.prefix = ''
else:
p = Name(p)
kids.append(p.clone())
return kids |
class ListSponsorsTemplateTag(TestCase):
def test_filter_sponsorship_with_logo_placement_benefits(self):
sponsorship = baker.make_recipe('sponsors.tests.finalized_sponsorship')
baker.make_recipe('sponsors.tests.logo_at_download_feature', sponsor_benefit__sponsorship=sponsorship)
context = list_sponsors('download')
self.assertEqual('download', context['logo_place'])
self.assertEqual(1, len(context['sponsorships']))
self.assertIn(sponsorship, context['sponsorships']) |
def infer_conv_output_attrs(module, input_channels, input_dim, batch_size=1, max_length=8):
input = torch.randn(batch_size, input_channels, max_length, input_dim)
output = module(input)
output_channels = output.shape[1]
output_dim = output.shape[(- 1)]
return (output_channels, output_dim) |
class RectROI(ROI):
def __init__(self, pos, size, centered=False, sideScalers=False, **args):
ROI.__init__(self, pos, size, **args)
if centered:
center = [0.5, 0.5]
else:
center = [0, 0]
self.addScaleHandle([1, 1], center)
if sideScalers:
self.addScaleHandle([1, 0.5], [center[0], 0.5])
self.addScaleHandle([0.5, 1], [0.5, center[1]]) |
def _add_perm(caller, perm, **kwargs):
if perm:
perm_low = perm.lower()
perms = _caller_permissions(caller)
perms_low = [prm.lower() for prm in perms]
if ('delete' in kwargs):
try:
ind = perms_low.index(perm_low)
del perms[ind]
text = "Removed Permission '{}'.".format(perm)
except ValueError:
text = 'Found no Permission to remove.'
elif (perm_low in perms_low):
text = 'Permission already set.'
else:
perms.append(perm)
_set_prototype_value(caller, 'permissions', perms)
text = "Added Permission '{}'".format(perm)
return text |
class IPTW():
def __init__(self, df, treatment, outcome, weights=None, standardize='population'):
self.treatment = treatment
self.outcome = outcome
self._missing_indicator = '__missing_indicator__'
(self.df, self._miss_flag, self._continuous_outcome) = check_input_data(data=df, exposure=treatment, outcome=outcome, estimator='IPTW', drop_censoring=False, drop_missing=True, binary_exposure_only=True)
self.average_treatment_effect = None
self.risk_difference = None
self.risk_ratio = None
self.odds_ratio = None
if (standardize in ['population', 'exposed', 'unexposed']):
self.standardize = standardize
if (standardize in ['exposed', 'unexposed']):
warnings.warn('For the ATT and the ATU, confidence intervals calculated using the robust-variance approach (what is currently done in zEpid) may underestimate the variance. Therefore when requesting the ATT or the ATU, it is recommended to use bootstrapped confidence intervals instead.', UserWarning)
else:
raise ValueError(('Please specify one of the currently supported weighting schemes: ' + 'population, exposed, unexposed'))
self._weight_ = weights
self.iptw = None
self.ipmw = None
self.ms_model = None
self.__mdenom = None
self._fit_missing_ = False
self._miss_model = None
self._continuous_y_type = None
self._pos_avg = None
self._pos_min = None
self._pos_max = None
self._pos_sd = None
def treatment_model(self, model_denominator, model_numerator='1', stabilized=True, bound=False, print_results=True):
self.__mdenom = model_denominator
(self.df['__denom__'], self.df['__numer__'], self.iptw) = iptw_calculator(df=self.df, treatment=self.treatment, model_denom=model_denominator, model_numer=model_numerator, weight=self._weight_, stabilized=stabilized, standardize=self.standardize, bound=bound, print_results=print_results)
def missing_model(self, model_denominator, model_numerator=None, stabilized=True, bound=False, print_results=True):
if (not self._miss_flag):
raise ValueError('No missing outcome data is present in the data set')
if (self.treatment not in model_denominator):
warnings.warn('For the specified missing outcome model, the exposure variable should be included in the model', UserWarning)
self._miss_model = ((self._missing_indicator + ' ~ ') + model_denominator)
fitmodel = propensity_score(self.df, self._miss_model, print_results=print_results)
if stabilized:
if (model_numerator is None):
mnum = self.treatment
else:
mnum = model_numerator
numerator_model = propensity_score(self.df, ((self._missing_indicator + ' ~ ') + mnum), weights=self._weight_, print_results=print_results)
n = numerator_model.predict(self.df)
else:
n = 1
if bound:
d = probability_bounds(fitmodel.predict(self.df), bounds=bound)
else:
d = fitmodel.predict(self.df)
self.ipmw = np.where((self.df[self._missing_indicator] == 1), (n / d), np.nan)
self._fit_missing_ = True
def marginal_structural_model(self, model):
if (self.treatment not in model):
raise ValueError('The treatment variable must be specified in the marginal structural model')
self.ms_model = model
def fit(self, continuous_distribution='gaussian'):
if (self.__mdenom is None):
raise ValueError('No model has been fit to generated predicted probabilities')
if (self.ms_model is None):
raise ValueError('No marginal structural model has been specified')
if (self._miss_flag and (not self._fit_missing_)):
warnings.warn('All missing outcome data is assumed to be missing completely at random. To relax this assumption to outcome data is missing at random please use the `missing_model()` function', UserWarning)
ind = sm.cov_struct.Independence()
full_msm = ((self.outcome + ' ~ ') + self.ms_model)
df = self.df.copy()
if (self.ipmw is None):
if (self._weight_ is None):
df['_ipfw_'] = self.iptw
else:
df['_ipfw_'] = (self.iptw * self.df[self._weight_])
elif (self._weight_ is None):
df['_ipfw_'] = (self.iptw * self.ipmw)
else:
df['_ipfw_'] = ((self.iptw * self.ipmw) * self.df[self._weight_])
df = df.dropna()
if self._continuous_outcome:
if ((continuous_distribution == 'gaussian') or (continuous_distribution == 'normal')):
f = sm.families.family.Gaussian()
elif (continuous_distribution == 'poisson'):
f = sm.families.family.Poisson()
else:
raise ValueError("Only 'gaussian' and 'poisson' distributions are supported")
self._continuous_y_type = continuous_distribution
fm = smf.gee(full_msm, df.index, df, cov_struct=ind, family=f, weights=df['_ipfw_']).fit()
self.average_treatment_effect = pd.DataFrame()
self.average_treatment_effect['labels'] = np.asarray(fm.params.index)
self.average_treatment_effect.set_index(keys=['labels'], inplace=True)
self.average_treatment_effect['ATE'] = np.asarray(fm.params)
self.average_treatment_effect['SE(ATE)'] = np.asarray(fm.bse)
self.average_treatment_effect['95%LCL'] = np.asarray(fm.conf_int()[0])
self.average_treatment_effect['95%UCL'] = np.asarray(fm.conf_int()[1])
else:
with warnings.catch_warnings():
warnings.simplefilter('ignore', DomainWarning)
f = sm.families.family.Binomial(sm.families.links.identity())
fm = smf.gee(full_msm, df.index, df, cov_struct=ind, family=f, weights=df['_ipfw_']).fit()
self.risk_difference = pd.DataFrame()
self.risk_difference['labels'] = np.asarray(fm.params.index)
self.risk_difference.set_index(keys=['labels'], inplace=True)
self.risk_difference['RD'] = np.asarray(fm.params)
self.risk_difference['SE(RD)'] = np.asarray(fm.bse)
self.risk_difference['95%LCL'] = np.asarray(fm.conf_int()[0])
self.risk_difference['95%UCL'] = np.asarray(fm.conf_int()[1])
f = sm.families.family.Binomial(sm.families.links.log())
fm = smf.gee(full_msm, df.index, df, cov_struct=ind, family=f, weights=df['_ipfw_']).fit()
self.risk_ratio = pd.DataFrame()
self.risk_ratio['labels'] = np.asarray(fm.params.index)
self.risk_ratio.set_index(keys=['labels'], inplace=True)
self.risk_ratio['RR'] = np.exp(np.asarray(fm.params))
self.risk_ratio['SE(log(RR))'] = np.asarray(fm.bse)
self.risk_ratio['95%LCL'] = np.exp(np.asarray(fm.conf_int()[0]))
self.risk_ratio['95%UCL'] = np.exp(np.asarray(fm.conf_int()[1]))
f = sm.families.family.Binomial()
fm = smf.gee(full_msm, df.index, df, cov_struct=ind, family=f, weights=df['_ipfw_']).fit()
self.odds_ratio = pd.DataFrame()
self.odds_ratio['labels'] = np.asarray(fm.params.index)
self.odds_ratio.set_index(keys=['labels'], inplace=True)
self.odds_ratio['OR'] = np.exp(np.asarray(fm.params))
self.odds_ratio['SE(log(OR))'] = np.asarray(fm.bse)
self.odds_ratio['95%LCL'] = np.exp(np.asarray(fm.conf_int()[0]))
self.odds_ratio['95%UCL'] = np.exp(np.asarray(fm.conf_int()[1]))
def summary(self, decimal=3):
print('')
print(' Inverse Probability of Treatment Weights ')
print('')
fmt = 'Treatment: {:<15} No. Observations: {:<20}'
print(fmt.format(self.treatment, self.df.shape[0]))
fmt = 'Outcome: {:<15} No. Missing Outcome: {:<20}'
print(fmt.format(self.outcome, np.sum(self.df[self.outcome].isnull())))
fmt = 'g-Model: {:<15} Missing Model: {:<20}'
if self._fit_missing_:
m = 'Logistic'
else:
m = 'None'
print(fmt.format('Logistic', m))
print('')
if self._continuous_outcome:
print('Average Treatment Effect')
print('')
print(np.round(self.average_treatment_effect, decimals=decimal))
else:
print('Risk Difference')
print('')
print(np.round(self.risk_difference, decimals=decimal))
print('')
print('Risk Ratio')
print(np.round(self.risk_ratio, decimals=decimal))
print('')
print('Odds Ratio')
print(np.round(self.odds_ratio, decimals=decimal))
print('')
def run_diagnostics(self, iptw_only=True):
self.positivity(iptw_only=iptw_only)
print('\n')
print(' Standardized Mean Differences')
print('')
print(self.standardized_mean_differences(iptw_only=iptw_only).set_index(keys='labels'))
print('')
plt.figure(figsize=[9, 4])
plt.subplot(122)
self.plot_kde()
plt.title('Kernel Density of Propensity Scores')
plt.subplot(121)
self.plot_love(iptw_only=iptw_only)
plt.title('Love Plot')
plt.tight_layout()
plt.show()
def plot_kde(self, measure='probability', bw_method='scott', fill=True, color_e='b', color_u='r'):
ax = plot_kde(df=self.df, treatment=self.treatment, probability='__denom__', measure=measure, bw_method=bw_method, fill=fill, color_e=color_e, color_u=color_u)
return ax
def plot_boxplot(self, measure='probability'):
ax = plot_boxplot(df=self.df, treatment=self.treatment, probability='__denom__', measure=measure)
return ax
def positivity(self, decimal=3, iptw_only=True):
if iptw_only:
df = self.df
df['_ipfw_'] = self.iptw
else:
df = self.df
df['_ipfw_'] = (self.iptw * self.ipmw)
(self._pos_avg, self._pos_sd, self._pos_min, self._pos_max) = positivity(df=df, weights='_ipfw_')
print('')
print(' Weight Positivity Diagnostics')
print('')
print('If the mean of the weights is far from either the min or max, this may\n indicate the model is incorrect or positivity is violated')
print('Average weight should be')
print('\t1.0 for stabilized')
print('\t2.0 for unstabilized')
print('')
print('Mean weight: ', round(self._pos_avg, decimal))
print('Standard Deviation: ', round(self._pos_sd, decimal))
print('Minimum weight: ', round(self._pos_min, decimal))
print('Maximum weight: ', round(self._pos_max, decimal))
print('')
def standardized_mean_differences(self, iptw_only=True):
if iptw_only:
df = self.df
df['_ipfw_'] = self.iptw
else:
df = self.df
df['_ipfw_'] = (self.iptw * self.ipmw)
s = standardized_mean_differences(df=df, treatment=self.treatment, weight='_ipfw_', formula=self.__mdenom)
return s
def plot_love(self, color_unweighted='r', color_weighted='b', shape_unweighted='o', shape_weighted='o', iptw_only=True):
if iptw_only:
df = self.df
df['_ipfw_'] = self.iptw
else:
df = self.df
df['_ipfw_'] = (self.iptw * self.ipmw)
ax = plot_love(df=self.df, treatment=self.treatment, weight='_ipfw_', formula=self.__mdenom, color_unweighted=color_unweighted, color_weighted=color_weighted, shape_unweighted=shape_unweighted, shape_weighted=shape_weighted)
return ax |
def test_back_and_forth_ito():
(f, g, b, y0, ts, dt) = make_example_sde(dt=0.0001)
(fr, gr, br, tr) = time_reflect_ito(f, g, b, ts)
ys = ito_integrate(f, g, y0, ts, b, dt)
rys = ito_integrate(fr, gr, ys[(- 1)], tr, br, dt)[::(- 1)]
assert np.allclose(ys[0], rys[0], rtol=0.001, atol=0.001) |
def extra_english(corpus_path, split):
split_type_file_path = os.path.join(corpus_path, f'all_talks_{split}.tsv')
output_split_type_file_path = os.path.join(corpus_path, f'all_talks_{split}.en')
with io.open(split_type_file_path, 'r', encoding='utf8') as fp, io.open(output_split_type_file_path, 'w', encoding='utf8') as fw:
reader = csv.DictReader(fp, delimiter='\t', quoting=csv.QUOTE_NONE)
for row in reader:
line = row['en']
fw.write((line + '\n'))
de_tok(output_split_type_file_path, 'en') |
.parametrize('setting, third_party, accepted', [('all', False, True), ('never', False, False), ('no-3rdparty', False, True), ('no-3rdparty', True, False)])
def test_accept_cookie(config_stub, filter_request, setting, third_party, accepted):
config_stub.val.content.cookies.accept = setting
filter_request.thirdParty = third_party
assert (cookies._accept_cookie(filter_request) == accepted) |
def setup(args):
cfg = get_cfg()
add_deeplab_config(cfg)
add_maskformer2_config(cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
default_setup(cfg, args)
setup_logger(output=cfg.OUTPUT_DIR, distributed_rank=comm.get_rank(), name='mask2former')
return cfg |
def _accuracy_param_check(average: Optional[str], num_classes: Optional[int], k: int) -> None:
average_options = ('micro', 'macro', 'none', None)
if (average not in average_options):
raise ValueError(f'`average` was not in the allowed value of {average_options}, got {average}.')
if ((average != 'micro') and ((num_classes is None) or (num_classes <= 0))):
raise ValueError(f'num_classes should be a positive number when average={average}. Got num_classes={num_classes}.')
if (type(k) != int):
raise TypeError(f'Expected `k` to be an integer, but {type(k)} was provided.')
if (k < 1):
raise ValueError(f'Expected `k` to be an integer greater than 0, but {k} was provided.') |
def get_normalize_layer(dataset: str, diff=None, vit=None) -> torch.nn.Module:
if diff:
return NormalizeLayer(_DIFF_MEAN, _DIFF_STD)
if vit:
return NormalizeLayer(_CIFAR10_MEAN_VIT, _CIFAR10_STDDEV_VIT)
if (dataset == 'imagenet'):
return NormalizeLayer(_IMAGENET_MEAN, _IMAGENET_STDDEV)
elif (dataset == 'cifar10'):
return NormalizeLayer(_CIFAR10_MEAN, _CIFAR10_STDDEV)
elif (dataset == 'cifar10_vit'):
return NormalizeLayer(_CIFAR10_MEAN_VIT, _CIFAR10_STDDEV_VIT)
print('vit norm')
elif (dataset == 'imagenet32'):
return NormalizeLayer(_IMAGENET_MEAN, _IMAGENET_STDDEV) |
def prettify_print_name(name):
if ((name is None) or ('{' in name) or ('\\' in name)):
return name
subscripts = []
superscripts = []
average = False
processing = True
while processing:
processing = False
for superscript in ['init', 'ref', 'typ', 'max', '0', 'surf']:
if ((f'_{superscript}_' in name) or name.endswith(f'_{superscript}')):
superscripts.append(superscript)
name = name.replace(f'_{superscript}', '')
processing = True
break
for superscript in ['0']:
if (superscript in name):
superscripts.append(superscript)
name = name.replace(superscript, '')
processing = True
break
for subscript in ['cc', 'dl', 'R', 'e', 's', 'n', 'p', 'amb']:
if ((f'_{subscript}_' in name) or name.endswith(f'_{subscript}')):
subscripts.append(subscript)
name = name.replace(f'_{subscript}', '')
processing = True
break
for av in ['av', 'xav']:
if ((f'_{av}_' in name) or name.endswith(f'_{av}')):
average = True
name = name.replace(f'_{av}', '')
processing = True
break
if (name in PRINT_NAME_OVERRIDES):
name = PRINT_NAME_OVERRIDES[name]
if (name == 'eps'):
name = '\\epsilon'
if (name == 'eps_c'):
name = '(\\epsilon c)'
if (name.lower() in GREEK_LETTERS):
name = ('\\' + name)
if average:
name = (('\\overline{' + name) + '}')
if subscripts:
name += (('_{\\mathrm{' + ','.join(subscripts)) + '}}')
if superscripts:
name += (('^{\\mathrm{' + ','.join(superscripts)) + '}}')
return name |
def ComputeCoverage(p, bias, norm):
q = quaternion.vec2vec2quat(norm, [0, 0, 1])
def ang(p):
c = quaternion.rotvecquat(vector.sub(p[:3], bias), q)
d = quaternion.rotvecquat(p[3:6], q)
v = quaternion.rotvecquat(c, quaternion.vec2vec2quat(d, [0, 0, 1]))
v = vector.normalize(v)
return math.degrees(math.atan2(v[1], v[0]))
spacing = 20
angles = ([False] * int((360 / spacing)))
count = 0
for a in lmap(ang, p):
i = int((resolv(a, 180) / spacing))
if (not angles[i]):
angles[i] = True
count += 1
return count |
class CeleryRouterConfigTest(harness.CustomRouterMixin, TestCase):
router_class = 'rapidsms.router.celery.CeleryRouter'
def test_eager_invalid_backend(self):
self.backends = {'mockbackend': {'ENGINE': harness.MockBackend}}
self.set_backends()
router = get_router()
self.assertFalse(router.is_eager('foo'))
def test_eager_not_set(self):
self.backends = {'mockbackend': {'ENGINE': harness.MockBackend}}
self.set_backends()
router = get_router()
self.assertFalse(router.is_eager('mockbackend'))
def test_outgoing(self):
self.backends = {'mockbackend': {'ENGINE': harness.MockBackend, 'router.celery.eager': True}}
self.set_backends()
router = get_router()
self.assertTrue(router.is_eager('mockbackend')) |
def ceaf(clusters, gold_clusters, phi_similarity):
scores = np.zeros((len(gold_clusters), len(clusters)))
for i in range(len(gold_clusters)):
for j in range(len(clusters)):
scores[(i, j)] = phi_similarity(gold_clusters[i], clusters[j])
(row_ind, col_ind) = linear_sum_assignment((- scores))
similarity = sum(scores[(row_ind, col_ind)])
return (similarity, len(clusters), similarity, len(gold_clusters)) |
def filter_lines(lines, n_jobs, isomeric):
logger.info('Filtering SMILES')
with Pool(n_jobs) as pool:
process_molecule_p = partial(process_molecule, isomeric=isomeric)
dataset = [x for x in tqdm(pool.imap_unordered(process_molecule_p, lines), total=len(lines), miniters=1000) if (x is not None)]
dataset = pd.DataFrame(dataset, columns=['ID', 'SMILES'])
dataset = dataset.sort_values(by=['ID', 'SMILES'])
dataset = dataset.drop_duplicates('ID')
dataset = dataset.sort_values(by='ID')
dataset = dataset.drop_duplicates('SMILES')
dataset['scaffold'] = pool.map(compute_scaffold, dataset['SMILES'].values)
return dataset |
class linear_attribute_model(nn.Module):
def __init__(self, args, input_dim=1536, output_dim=128):
super().__init__()
self.fc1 = nn.Linear(input_dim, 1024, True)
self.fc2 = nn.Linear(1024, 512, True)
self.fc3 = nn.Linear(512, 256, True)
self.fc4 = nn.Linear(256, output_dim, True)
self.logit_scale = nn.Parameter((torch.ones([]) * np.log((1 / 0.07))))
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
x = self.fc4(x)
return x |
_machine.travel('2020-10-10 10:00:00', tick=False)
def test_send_voucher_via_email(rf, grant_factory, conference_factory, mocker):
mocker.patch('grants.admin.messages')
mock_send_email = mocker.patch('grants.admin.send_grant_voucher_email')
conference = conference_factory(pretix_speaker_voucher_quota_id=123)
grant = grant_factory(status=Grant.Status.confirmed, conference=conference, pretix_voucher_id=2345, voucher_code='GRANT-532VCT')
send_voucher_via_email(None, rf.get('/'), queryset=Grant.objects.filter(conference=conference))
mock_send_email.delay.assert_has_calls([call(grant_id=grant.id)]) |
def as_tuple(tr, dataquality='D'):
from pyrocko import mseed_ext
itmin = int(round((tr.tmin * mseed_ext.HPTMODULUS)))
itmax = int(round((tr.tmax * mseed_ext.HPTMODULUS)))
srate = (1.0 / tr.deltat)
return (tr.network, tr.station, tr.location, tr.channel, itmin, itmax, srate, dataquality, tr.get_ydata()) |
class TerminusFindTerminalMixin():
def find_terminal(self, window, tag=None, panel_only=False, visible_only=False):
if tag:
terminal = Terminal.from_tag(tag)
if terminal:
return terminal
view = None
recency_manager = RecencyManager.from_window(window)
if (not recency_manager):
return
if (not view):
view = recency_manager.recent_view()
if view:
terminal = Terminal.from_id(view.id())
if ((not terminal) or (panel_only and (not terminal.show_in_panel))):
view = None
if view:
if terminal.show_in_panel:
if (not panel_is_visible(view)):
view = None
elif (not view_is_visible(view)):
view = None
if (not view):
panel_name = recency_manager.recent_panel()
if panel_name:
view = window.find_output_panel(panel_name)
if view:
terminal = Terminal.from_id(view.id())
if (not terminal):
view = None
if view:
if terminal.show_in_panel:
if (not panel_is_visible(view)):
view = None
elif (not view_is_visible(view)):
view = None
if (not view):
view = self.get_terminus_panel(window, visible_only=True)
if ((not view) and (not panel_only)):
view = self.get_terminus_view(window, visible_only=True)
if (not visible_only):
if (not view):
view = recency_manager.recent_view()
if view:
terminal = Terminal.from_id(view.id())
if ((not terminal) or (panel_only and (not terminal.show_in_panel))):
view = None
if (not view):
panel_name = recency_manager.recent_panel()
if panel_name:
view = window.find_output_panel(panel_name)
if view:
terminal = Terminal.from_id(view.id())
if (not terminal):
view = None
if (not view):
view = self.get_terminus_panel(window, visible_only=False)
if ((not view) and (not panel_only)):
view = self.get_terminus_view(window, visible_only=False)
if view:
terminal = Terminal.from_id(view.id())
else:
terminal = None
return terminal
def get_terminus_panel(self, window, visible_only=False):
if visible_only:
active_panel = window.active_panel()
panels = ([active_panel] if active_panel else [])
else:
panels = window.panels()
for panel in panels:
panel_name = panel.replace('output.', '')
if (panel_name == EXEC_PANEL):
continue
panel_view = window.find_output_panel(panel_name)
if panel_view:
terminal = Terminal.from_id(panel_view.id())
if terminal:
return panel_view
return None
def get_terminus_view(self, window, visible_only=False):
for view in window.views():
if visible_only:
if (not view_is_visible(view)):
continue
terminal = Terminal.from_id(view.id())
if terminal:
return view |
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10, num_of_channels=3):
super(ResNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(num_of_channels, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear((512 * block.expansion), num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = ([stride] + ([1] * (num_blocks - 1)))
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = (planes * block.expansion)
return nn.Sequential(*layers)
def forward(self, x):
out = self.features(x)
out = self.linear(out)
return out
def features(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), (- 1))
return out |
def test_emit_warning_when_event_loop_is_explicitly_requested_in_coroutine_method(pytester: Pytester):
pytester.makepyfile(dedent(' import pytest\n\n class TestEmitsWarning:\n .asyncio\n async def test_coroutine_emits_warning(self, event_loop):\n pass\n '))
result = pytester.runpytest('--asyncio-mode=strict', '-W default')
result.assert_outcomes(passed=1, warnings=1)
result.stdout.fnmatch_lines(['*is asynchronous and explicitly requests the "event_loop" fixture*']) |
class GroundStateTest(unittest.TestCase):
def test_get_ground_state_hermitian(self):
ground = get_ground_state(get_sparse_operator((QubitOperator('Y0 X1') + QubitOperator('Z0 Z1'))))
expected_state = csc_matrix(([1j, 1], ([1, 2], [0, 0])), shape=(4, 1), dtype=numpy.complex128).A
expected_state /= numpy.sqrt(2.0)
self.assertAlmostEqual(ground[0], (- 2))
self.assertAlmostEqual(numpy.absolute(expected_state.T.conj().dot(ground[1]))[0], 1.0) |
def _get_layer_input(layer: tf.keras.layers.Layer, model_layers_connections: ModelLayerConnectionsProperties.TYPE) -> tf.keras.layers.Layer:
try:
layer_input = [model_layers_connections[ModelLayerConnectionsProperties.OUTPUT_TENSORS][layer_aux] for layer_aux in model_layers_connections[ModelLayerConnectionsProperties.INBOUND_NODES][layer.name]]
if (len(layer_input) == 1):
layer_input = layer_input[0]
except KeyError:
layer_input = _get_most_recently_added_output_tensor(model_layers_connections)
model_layers_connections[ModelLayerConnectionsProperties.INBOUND_NODES].update({layer.name: [layer_input.name]})
_logger.warning('Could not find input tensor for layer: %s. Using %s as input, the most recent output tensor.', layer.name, layer_input.name)
return layer_input |
class SplitAttentionConv2d(nn.Module):
def __init__(self, in_channels, channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, radix=2, reduction_factor=4, conv_cfg=None, norm_cfg=dict(type='BN')):
super().__init__()
inter_channels = max(((in_channels * radix) // reduction_factor), 32)
self.radix = radix
self.groups = groups
self.channels = channels
self.conv = build_conv_layer(conv_cfg, in_channels, (channels * radix), kernel_size, stride=stride, padding=padding, dilation=dilation, groups=(groups * radix), bias=False)
(self.norm0_name, norm0) = build_norm_layer(norm_cfg, (channels * radix), postfix=0)
self.add_module(self.norm0_name, norm0)
self.relu = nn.ReLU(inplace=True)
self.fc1 = build_conv_layer(None, channels, inter_channels, 1, groups=self.groups)
(self.norm1_name, norm1) = build_norm_layer(norm_cfg, inter_channels, postfix=1)
self.add_module(self.norm1_name, norm1)
self.fc2 = build_conv_layer(None, inter_channels, (channels * radix), 1, groups=self.groups)
self.rsoftmax = RSoftmax(radix, groups)
def norm0(self):
return getattr(self, self.norm0_name)
def norm1(self):
return getattr(self, self.norm1_name)
def forward(self, x):
x = self.conv(x)
x = self.norm0(x)
x = self.relu(x)
(batch, rchannel) = x.shape[:2]
if (self.radix > 1):
splits = x.view(batch, self.radix, (- 1), *x.shape[2:])
gap = splits.sum(dim=1)
else:
gap = x
gap = F.adaptive_avg_pool2d(gap, 1)
gap = self.fc1(gap)
gap = self.norm1(gap)
gap = self.relu(gap)
atten = self.fc2(gap)
atten = self.rsoftmax(atten).view(batch, (- 1), 1, 1)
if (self.radix > 1):
attens = atten.view(batch, self.radix, (- 1), *atten.shape[2:])
out = torch.sum((attens * splits), dim=1)
else:
out = (atten * x)
return out.contiguous() |
def load_resnet_encoder(checkpoint_path, device):
model = ResNetModel(512).eval().to(device)
checkpoint = torch.load(checkpoint_path, map_location=device)
new_state_dict = {}
for (k, v) in checkpoint.items():
try:
new_state_dict[k[6:]] = checkpoint[k]
except KeyError:
new_state_dict[k] = v
model.load_state_dict(new_state_dict)
return model |
def nice_time_diff(time_base: datetime, time_now: datetime) -> Tuple[(str, float)]:
delta = (time_now - time_base)
total_seconds = delta.total_seconds()
if (total_seconds < 0.001):
return (f'+ {delta.microseconds: 10.0f} s', total_seconds)
if (total_seconds < 1):
return (f'+ {(delta.microseconds / 1000): 10.3f} ms', total_seconds)
if (total_seconds < 10):
formatted_seconds = f'{total_seconds: 9.6f}'
formatted_seconds = f'{formatted_seconds[:6]} {formatted_seconds[6:]}'
return (f'+ {formatted_seconds} s', total_seconds)
return (time_now.isoformat(), total_seconds) |
def convert_label_map_to_categories(label_map, max_num_classes, use_display_name=True):
categories = []
list_of_ids_already_added = []
if (not label_map):
label_id_offset = 1
for class_id in range(max_num_classes):
categories.append({'id': (class_id + label_id_offset), 'name': 'category_{}'.format((class_id + label_id_offset))})
return categories
for item in label_map.item:
if (not (0 < item.id <= max_num_classes)):
logging.info('Ignore item %d since it falls outside of requested label range.', item.id)
continue
if (use_display_name and item.HasField('display_name')):
name = item.display_name
else:
name = item.name
if (item.id not in list_of_ids_already_added):
list_of_ids_already_added.append(item.id)
categories.append({'id': item.id, 'name': name})
return categories |
def get_parser(parser=None):
if (parser is None):
parser = argparse.ArgumentParser()
model_arg = parser.add_argument_group('Model')
model_arg.add_argument('--q_cell', type=str, default='gru', choices=['gru'], help='Encoder rnn cell type')
model_arg.add_argument('--q_bidir', default=False, action='store_true', help='If to add second direction to encoder')
model_arg.add_argument('--q_d_h', type=int, default=256, help='Encoder h dimensionality')
model_arg.add_argument('--q_n_layers', type=int, default=1, help='Encoder number of layers')
model_arg.add_argument('--q_dropout', type=float, default=0.5, help='Encoder layers dropout')
model_arg.add_argument('--d_cell', type=str, default='gru', choices=['gru'], help='Decoder rnn cell type')
model_arg.add_argument('--d_n_layers', type=int, default=3, help='Decoder number of layers')
model_arg.add_argument('--d_dropout', type=float, default=0, help='Decoder layers dropout')
model_arg.add_argument('--d_z', type=int, default=128, help='Latent vector dimensionality')
model_arg.add_argument('--d_d_h', type=int, default=512, help='Decoder hidden dimensionality')
model_arg.add_argument('--freeze_embeddings', default=False, action='store_true', help='If to freeze embeddings while training')
train_arg = parser.add_argument_group('Train')
train_arg.add_argument('--n_batch', type=int, default=512, help='Batch size')
train_arg.add_argument('--clip_grad', type=int, default=50, help='Clip gradients to this value')
train_arg.add_argument('--kl_start', type=int, default=0, help='Epoch to start change kl weight from')
train_arg.add_argument('--kl_w_start', type=float, default=0, help='Initial kl weight value')
train_arg.add_argument('--kl_w_end', type=float, default=0.05, help='Maximum kl weight value')
train_arg.add_argument('--lr_start', type=float, default=(3 * 0.0001), help='Initial lr value')
train_arg.add_argument('--lr_n_period', type=int, default=10, help='Epochs before first restart in SGDR')
train_arg.add_argument('--lr_n_restarts', type=int, default=10, help='Number of restarts in SGDR')
train_arg.add_argument('--lr_n_mult', type=int, default=1, help='Mult coefficient after restart in SGDR')
train_arg.add_argument('--lr_end', type=float, default=(3 * 0.0001), help='Maximum lr weight value')
train_arg.add_argument('--n_last', type=int, default=1000, help='Number of iters to smooth loss calc')
train_arg.add_argument('--n_jobs', type=int, default=1, help='Number of threads')
train_arg.add_argument('--n_workers', type=int, default=1, help='Number of workers for DataLoaders')
return parser |
def _interpolate_get_scales(g, scale_factor, dim):
offsets = g.op('Constant', value_t=torch.ones(2, dtype=torch.float32))
if isinstance(scale_factor.type(), torch._C.ListType):
return g.op('Concat', offsets, scale_factor, axis_i=0)
else:
scale_factor = _unsqueeze_helper(g, scale_factor, 0)
scale_factor = g.op('Cast', scale_factor, to_i=cast_pytorch_to_onnx['Float'])
scales = [scale_factor for i in range((dim - 2))]
scale_factor = g.op('Concat', offsets, *scales, axis_i=0)
return scale_factor |
class Migration(migrations.Migration):
dependencies = [('questions', '0091_alter_questionset_options')]
operations = [migrations.RemoveField(model_name='page', name='verbose_name_plural_lang1'), migrations.RemoveField(model_name='page', name='verbose_name_plural_lang2'), migrations.RemoveField(model_name='page', name='verbose_name_plural_lang3'), migrations.RemoveField(model_name='page', name='verbose_name_plural_lang4'), migrations.RemoveField(model_name='page', name='verbose_name_plural_lang5'), migrations.RemoveField(model_name='question', name='verbose_name_plural_lang1'), migrations.RemoveField(model_name='question', name='verbose_name_plural_lang2'), migrations.RemoveField(model_name='question', name='verbose_name_plural_lang3'), migrations.RemoveField(model_name='question', name='verbose_name_plural_lang4'), migrations.RemoveField(model_name='question', name='verbose_name_plural_lang5'), migrations.RemoveField(model_name='questionset', name='verbose_name_plural_lang1'), migrations.RemoveField(model_name='questionset', name='verbose_name_plural_lang2'), migrations.RemoveField(model_name='questionset', name='verbose_name_plural_lang3'), migrations.RemoveField(model_name='questionset', name='verbose_name_plural_lang4'), migrations.RemoveField(model_name='questionset', name='verbose_name_plural_lang5')] |
def make_recursive_list(fn):
def recursive_map(tensors):
if (tensors is None):
return tensors
elif (isinstance(tensors[0], list) or isinstance(tensors[0], tuple)):
return type(tensors[0])(map(recursive_map, zip(*tensors)))
elif isinstance(tensors[0], dict):
return map_dict(recursive_map, listdict2dictlist(tensors))
elif isinstance(tensors[0], torch.Tensor):
return fn(*tensors)
else:
try:
return fn(*tensors)
except Exception as e:
print('The following error was raised when recursively applying a function:')
print(e)
raise ValueError('Type {} not supported for recursive map'.format(type(tensors)))
return recursive_map |
_fixtures(WebFixture, QueryStringFixture, ResponsiveWidgetScenarios)
def test_focus_location_after_refresh_without_tabbing(web_fixture, query_string_fixture, responsive_widget_scenarios):
fixture = responsive_widget_scenarios
wsgi_app = web_fixture.new_wsgi_app(enable_js=True, child_factory=fixture.MainWidget.factory())
web_fixture.reahl_server.set_app(wsgi_app)
browser = web_fixture.driver_browser
browser.open('/')
assert browser.wait_for(query_string_fixture.is_state_now, fixture.initial_state)
fixture.change_value(browser)
assert browser.wait_for(query_string_fixture.is_state_now, fixture.changed_state)
assert browser.is_focus_on(fixture.expected_focussed_element) |
def init_distributed_mode(args):
if args.dist_on_itp:
args.rank = int(os.environ['OMPI_COMM_WORLD_RANK'])
args.world_size = int(os.environ['OMPI_COMM_WORLD_SIZE'])
args.gpu = int(os.environ['OMPI_COMM_WORLD_LOCAL_RANK'])
args.dist_url = ('tcp://%s:%s' % (os.environ['MASTER_ADDR'], os.environ['MASTER_PORT']))
os.environ['LOCAL_RANK'] = str(args.gpu)
os.environ['RANK'] = str(args.rank)
os.environ['WORLD_SIZE'] = str(args.world_size)
elif (('RANK' in os.environ) and ('WORLD_SIZE' in os.environ)):
args.rank = int(os.environ['RANK'])
args.world_size = int(os.environ['WORLD_SIZE'])
args.gpu = int(os.environ['LOCAL_RANK'])
elif ('SLURM_PROCID' in os.environ):
args.rank = int(os.environ['SLURM_PROCID'])
args.gpu = (args.rank % torch.cuda.device_count())
os.environ['RANK'] = str(args.rank)
os.environ['LOCAL_RANK'] = str(args.gpu)
os.environ['WORLD_SIZE'] = str(args.world_size)
else:
print('Not using distributed mode')
args.distributed = False
return
args.distributed = True
torch.cuda.set_device(args.gpu)
args.dist_backend = 'nccl'
print('| distributed init (rank {}): {}, gpu {}'.format(args.rank, args.dist_url, args.gpu), flush=True)
torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank)
torch.distributed.barrier()
setup_for_distributed((args.rank == 0)) |
class Migration(migrations.Migration):
dependencies = [('questions', '0076_questionset_remove_section')]
operations = [migrations.AlterModelOptions(name='question', options={'ordering': ('uri',), 'verbose_name': 'Question', 'verbose_name_plural': 'Questions'}), migrations.AlterModelOptions(name='questionset', options={'ordering': ('uri',), 'verbose_name': 'Question set', 'verbose_name_plural': 'Question set'}), migrations.AlterField(model_name='question', name='width', field=models.IntegerField(blank=True, help_text='Width for the widget of this question (optional, full width: 12).', null=True, verbose_name='Width'))] |
class DebertaV2OnnxConfig(OnnxConfig):
def inputs(self) -> Mapping[(str, Mapping[(int, str)])]:
if (self.task == 'multiple-choice'):
dynamic_axis = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
dynamic_axis = {0: 'batch', 1: 'sequence'}
if (self._config.type_vocab_size > 0):
return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis)])
else:
return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)])
def default_onnx_opset(self) -> int:
return 12
def generate_dummy_inputs(self, preprocessor: Union[('PreTrainedTokenizerBase', 'FeatureExtractionMixin')], batch_size: int=(- 1), seq_length: int=(- 1), num_choices: int=(- 1), is_pair: bool=False, framework: Optional['TensorType']=None, num_channels: int=3, image_width: int=40, image_height: int=40, tokenizer: 'PreTrainedTokenizerBase'=None) -> Mapping[(str, Any)]:
dummy_inputs = super().generate_dummy_inputs(preprocessor=preprocessor, framework=framework)
if ((self._config.type_vocab_size == 0) and ('token_type_ids' in dummy_inputs)):
del dummy_inputs['token_type_ids']
return dummy_inputs |
def voc_eval(result_file, dataset, iou_thr=0.5):
det_results = mmcv.load(result_file)
gt_bboxes = []
gt_labels = []
gt_ignore = []
for i in range(len(dataset)):
ann = dataset.get_ann_info(i)
bboxes = ann['bboxes']
labels = ann['labels']
if ('bboxes_ignore' in ann):
ignore = np.concatenate([np.zeros(bboxes.shape[0], dtype=np.bool), np.ones(ann['bboxes_ignore'].shape[0], dtype=np.bool)])
gt_ignore.append(ignore)
bboxes = np.vstack([bboxes, ann['bboxes_ignore']])
labels = np.concatenate([labels, ann['labels_ignore']])
gt_bboxes.append(bboxes)
gt_labels.append(labels)
if (not gt_ignore):
gt_ignore = gt_ignore
if (hasattr(dataset, 'year') and (dataset.year == 2007)):
dataset_name = 'voc07'
else:
dataset_name = dataset.CLASSES
eval_map(det_results, gt_bboxes, gt_labels, gt_ignore=gt_ignore, scale_ranges=None, iou_thr=iou_thr, dataset=dataset_name, print_summary=True) |
class Effect4044(BaseEffect):
runTime = 'early'
type = ('projected', 'passive')
def handler(fit, module, context, projectionRange, **kwargs):
fit.modules.filteredItemMultiply((lambda mod: ('overloadSpeedFactorBonus' in mod.itemModifiedAttributes)), 'overloadSpeedFactorBonus', module.getModifiedItemAttr('overloadBonusMultiplier'), **kwargs) |
def load_archive_file(archive_file):
try:
resolved_archive_file = cached_path(archive_file, cache_dir=None)
except EnvironmentError:
logger.info("Archive name '{}' was not found in archive name list. We assumed '{}' was a path or URL but couldn't find any file associated to this path or URL.".format(archive_file, archive_file))
return None
if (resolved_archive_file == archive_file):
logger.info('loading archive file {}'.format(archive_file))
else:
logger.info('loading archive file {} from cache at {}'.format(archive_file, resolved_archive_file))
tempdir = None
if (not os.path.isdir(resolved_archive_file)):
tempdir = tempfile.mkdtemp()
logger.info('extracting archive file {} to temp dir {}'.format(resolved_archive_file, tempdir))
ext = os.path.splitext(archive_file)[1][1:]
with tarfile.open(resolved_archive_file, ('r:' + ext)) as archive:
top_dir = os.path.commonprefix(archive.getnames())
archive.extractall(tempdir)
os.remove(resolved_archive_file)
shutil.move(os.path.join(tempdir, top_dir), resolved_archive_file)
shutil.rmtree(tempdir)
return resolved_archive_file |
def annotate_value(origin: Value, metadata: Sequence[Union[(Value, Extension)]]) -> Value:
if (not metadata):
return origin
if isinstance(origin, AnnotatedValue):
metadata = (*origin.metadata, *metadata)
origin = origin.value
hashable_vals = {}
unhashable_vals = []
for item in metadata:
try:
if (item not in hashable_vals):
hashable_vals[item] = None
except Exception:
unhashable_vals.append(item)
metadata = (*hashable_vals, *unhashable_vals)
return AnnotatedValue(origin, metadata) |
def main(opt):
if opt.disable_cudnn:
torch.backends.cudnn.enabled = False
print('Cudnn is disabled.')
logger = Logger(opt)
opt.device = torch.device('cuda:{}'.format(opt.gpus[0]))
Dataset = dataset_factory[opt.dataset]
(train, val) = task_factory[opt.task]
(model, optimizer, start_epoch) = create_model(opt)
if (len(opt.gpus) > 1):
model = torch.nn.DataParallel(model, device_ids=opt.gpus).cuda(opt.device)
else:
model = model.cuda(opt.device)
val_loader = torch.utils.data.DataLoader(Dataset(opt, 'val'), batch_size=1, shuffle=False, num_workers=1, pin_memory=True)
if opt.test:
(log_dict_train, preds) = val(0, opt, val_loader, model)
sio.savemat(os.path.join(opt.save_dir, 'preds.mat'), mdict={'preds': preds})
return
train_loader = torch.utils.data.DataLoader(Dataset(opt, 'train'), batch_size=(opt.batch_size * len(opt.gpus)), shuffle=True, num_workers=opt.num_workers, pin_memory=True)
best = (- 1)
for epoch in range(start_epoch, (opt.num_epochs + 1)):
mark = (epoch if opt.save_all_models else 'last')
(log_dict_train, _) = train(epoch, opt, train_loader, model, optimizer)
for (k, v) in log_dict_train.items():
logger.scalar_summary('train_{}'.format(k), v, epoch)
logger.write('{} {:8f} | '.format(k, v))
if ((opt.val_intervals > 0) and ((epoch % opt.val_intervals) == 0)):
save_model(os.path.join(opt.save_dir, 'model_{}.pth'.format(mark)), epoch, model, optimizer)
(log_dict_val, preds) = val(epoch, opt, val_loader, model)
for (k, v) in log_dict_val.items():
logger.scalar_summary('val_{}'.format(k), v, epoch)
logger.write('{} {:8f} | '.format(k, v))
if (log_dict_val[opt.metric] > best):
best = log_dict_val[opt.metric]
save_model(os.path.join(opt.save_dir, 'model_best.pth'), epoch, model)
else:
save_model(os.path.join(opt.save_dir, 'model_last.pth'), epoch, model, optimizer)
logger.write('\n')
if (epoch in opt.lr_step):
lr = (opt.lr * (0.1 ** (opt.lr_step.index(epoch) + 1)))
print('Drop LR to', lr)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
logger.close() |
def test_fib_ycombinator():
Y = '\n (lambda (f)\n ((lambda (x) (x x))\n (lambda (g)\n (f (lambda (z) ((g g) z))))))\n'
fac = '\n (lambda (f)\n (lambda (x)\n (if (< x 2)\n 1\n (* x (f (- x 1))))))\n '
fib = '\n (lambda (f)\n (lambda (x)\n (if (< x 2)\n x\n (+ (f (- x 1)) (f (- x 2))))))\n'
run_fix(('((%s %s) 2)' % (Y, fib)), 1)
run_fix(('((%s %s) 2)' % (Y, fac)), 2) |
class PancakeHouseMenu(Menu):
menuItems: List[MenuItem]
def __init__(self):
self.menuItems = []
self.addItem("K&B's Pancake Breakfast", 'Pancakes with scrambled eggs and toast', True, 2.99)
self.addItem('Regular Pancake Breakfast', 'Pancakes with fried eggs, sausage', False, 2.99)
self.addItem('Blueberry Pancakes', 'Pancakes made with fresh blueberries and blueberry syrup', True, 3.49)
self.addItem('Waffles', 'Waffles with your choice of blueberries or strawberries', True, 3.59)
def addItem(self, name: str, description: str, vegetarian: bool, price: float) -> None:
menuItem: MenuItem = MenuItem(name, description, vegetarian, price)
self.menuItems.append(menuItem)
def getMenuItems(self) -> List[MenuItem]:
return self.menuItems
def createIterator(self) -> Iterator[MenuItem]:
return PancakeHouseMenuIterator(self.menuItems)
def __str__(self) -> str:
return 'Objectville Pancake House Menu' |
class Kernel(W):
def __init__(self, data, bandwidth=None, fixed=True, k=2, function='triangular', eps=1.0000001, ids=None, diagonal=False, distance_metric='euclidean', radius=None, **kwargs):
if (radius is not None):
distance_metric = 'arc'
if isKDTree(data):
self.kdtree = data
self.data = self.kdtree.data
data = self.data
else:
self.kdtree = KDTree(data, distance_metric=distance_metric, radius=radius)
self.data = self.kdtree.data
self.k = (k + 1)
self.function = function.lower()
self.fixed = fixed
self.eps = eps
if bandwidth:
try:
bandwidth = np.array(bandwidth)
bandwidth.shape = (len(bandwidth), 1)
except:
bandwidth = (np.ones((len(data), 1), 'float') * bandwidth)
self.bandwidth = bandwidth
else:
self._set_bw()
self._eval_kernel()
(neighbors, weights) = self._k_to_W(ids)
if diagonal:
for i in neighbors:
weights[i][neighbors[i].index(i)] = 1.0
W.__init__(self, neighbors, weights, ids, **kwargs)
def from_shapefile(cls, filepath, idVariable=None, **kwargs):
points = get_points_array_from_shapefile(filepath)
ids = (get_ids(filepath, idVariable) if (idVariable is not None) else None)
return cls.from_array(points, ids=ids, **kwargs)
def from_array(cls, array, **kwargs):
return cls(array, **kwargs)
def from_dataframe(cls, df, geom_col=None, ids=None, use_index=True, **kwargs):
if (geom_col is None):
geom_col = df.geometry.name
pts = get_points_array(df[geom_col])
if ((ids is None) and use_index):
ids = df.index.tolist()
elif isinstance(ids, str):
ids = df[ids].tolist()
return cls(pts, ids=ids, **kwargs)
def _k_to_W(self, ids=None):
allneighbors = {}
weights = {}
ids = (np.array(ids) if ids else np.arange(len(self.data)))
for (i, _) in enumerate(self.kernel):
if (len(self.neigh[i]) == 0):
allneighbors[ids[i]] = []
weights[ids[i]] = []
else:
allneighbors[ids[i]] = list(ids[self.neigh[i]])
weights[ids[i]] = self.kernel[i].tolist()
return (allneighbors, weights)
def _set_bw(self):
(dmat, neigh) = self.kdtree.query(self.data, k=self.k)
if self.fixed:
bandwidth = (dmat.max() * self.eps)
n = len(dmat)
self.bandwidth = (np.ones((n, 1), 'float') * bandwidth)
else:
self.bandwidth = (dmat.max(axis=1) * self.eps)
self.bandwidth.shape = (self.bandwidth.size, 1)
nnq = self.kdtree.query(self.data, k=self.k)
self.neigh = nnq[1]
def _eval_kernel(self):
if (not hasattr(self, 'neigh')):
kdtq = self.kdtree.query_ball_point
neighbors = [kdtq(self.data[i], r=bwi[0]) for (i, bwi) in enumerate(self.bandwidth)]
self.neigh = neighbors
bw = self.bandwidth
kdtq = self.kdtree.query
z = []
for (i, nids) in enumerate(self.neigh):
(di, ni) = kdtq(self.data[i], k=len(nids))
if (not isinstance(di, np.ndarray)):
di = np.asarray(([di] * len(nids)))
ni = np.asarray(([ni] * len(nids)))
zi = (np.array([dict(list(zip(ni, di, strict=True)))[nid] for nid in nids]) / bw[i])
z.append(zi)
zs = z
if (self.function == 'triangular'):
self.kernel = [(1 - zi) for zi in zs]
elif (self.function == 'uniform'):
self.kernel = [(np.ones(zi.shape) * 0.5) for zi in zs]
elif (self.function == 'quadratic'):
self.kernel = [((3.0 / 4) * (1 - (zi ** 2))) for zi in zs]
elif (self.function == 'quartic'):
self.kernel = [((15.0 / 16) * ((1 - (zi ** 2)) ** 2)) for zi in zs]
elif (self.function == 'gaussian'):
c = (np.pi * 2)
c = (c ** (- 0.5))
self.kernel = [(c * np.exp(((- (zi ** 2)) / 2.0))) for zi in zs]
else:
print(('Unsupported kernel function', self.function)) |
class CenterCrop(object):
def __init__(self, size):
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
def __call__(self, img):
(w, h) = img.size
(th, tw) = self.size
x1 = int(round(((w - tw) / 2.0)))
y1 = int(round(((h - th) / 2.0)))
return img.crop((x1, y1, (x1 + tw), (y1 + th)))
def randomize_parameters(self):
pass |
class PreOCIModel(RepoEmailDataInterface):
def get_email_authorized_for_repo(self, namespace_name, repository_name, email):
return _return_none_or_data(model.repository.get_email_authorized_for_repo, namespace_name, repository_name, email)
def create_email_authorization_for_repo(self, namespace_name, repository_name, email):
return _return_none_or_data(model.repository.create_email_authorization_for_repo, namespace_name, repository_name, email) |
def encode_path(value):
if (value is None):
return None
if (not isinstance(value, (str, bytes))):
value = (repr(value) if isinstance(value, type) else repr(type(value)))
if isinstance(value, bytes):
value = value.decode(sys.getfilesystemencoding())
return value |
class CifarResNet(nn.Module):
def __init__(self, block, depth, channels=3):
super(CifarResNet, self).__init__()
assert (((depth - 2) % 6) == 0), 'depth should be one of 20, 32, 44, 56, 110'
layer_blocks = ((depth - 2) // 6)
self.conv_1_3x3 = nn.Conv2d(channels, 16, kernel_size=3, stride=1, padding=1, bias=False)
self.bn_1 = nn.BatchNorm2d(16)
self.inplanes = 16
self.stage_1 = self._make_layer(block, 16, layer_blocks, 1)
self.stage_2 = self._make_layer(block, 32, layer_blocks, 2)
self.stage_3 = self._make_layer(block, 64, layer_blocks, 2, last_phase=True)
self.avgpool = nn.AvgPool2d(8)
self.out_dim = (64 * block.expansion)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride=1, last_phase=False):
downsample = None
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = DownsampleB(self.inplanes, (planes * block.expansion), stride)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = (planes * block.expansion)
if last_phase:
for i in range(1, (blocks - 1)):
layers.append(block(self.inplanes, planes))
layers.append(block(self.inplanes, planes, last=True))
else:
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv_1_3x3(x)
x = F.relu(self.bn_1(x), inplace=True)
x_1 = self.stage_1(x)
x_2 = self.stage_2(x_1)
x_3 = self.stage_3(x_2)
pooled = self.avgpool(x_3)
features = pooled.view(pooled.size(0), (- 1))
return {'fmaps': [x_1, x_2, x_3], 'features': features}
def last_conv(self):
return self.stage_3[(- 1)].conv_b |
class UDPServer(Server):
def __init__(self, host, prog, vers, port):
Server.__init__(self, host, prog, vers, port)
self.connect()
def connect(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.prot = IPPROTO_UDP
self.sock.bind((self.host, self.port))
def loop(self):
while 1:
self.session()
def session(self):
(call, host_port) = self.sock.recvfrom(8192)
reply = self.handle(call)
if (reply is not None):
_sendto(self.sock, reply, host_port) |
class Isothermal(BaseThermal):
def __init__(self, param, options=None):
super().__init__(param, options=options)
def get_fundamental_variables(self):
y = pybamm.standard_spatial_vars.y
z = pybamm.standard_spatial_vars.z
T_x_av = self.param.T_amb(y, z, pybamm.t)
T_dict = {'negative current collector': T_x_av, 'positive current collector': T_x_av, 'x-averaged cell': T_x_av, 'volume-averaged cell': T_x_av}
for domain in ['negative electrode', 'separator', 'positive electrode']:
T_dict[domain] = pybamm.PrimaryBroadcast(T_x_av, domain)
variables = self._get_standard_fundamental_variables(T_dict)
return variables
def get_coupled_variables(self, variables):
if (self.options['calculate heat source for isothermal models'] == 'true'):
variables.update(self._get_standard_coupled_variables(variables))
else:
zero = pybamm.Scalar(0)
for var in ['Ohmic heating [W.m-3]', 'X-averaged Ohmic heating [W.m-3]', 'Volume-averaged Ohmic heating [W.m-3]', 'Irreversible electrochemical heating [W.m-3]', 'X-averaged irreversible electrochemical heating [W.m-3]', 'Volume-averaged irreversible electrochemical heating [W.m-3]', 'Reversible heating [W.m-3]', 'X-averaged reversible heating [W.m-3]', 'Volume-averaged reversible heating [W.m-3]', 'Total heating [W.m-3]', 'X-averaged total heating [W.m-3]', 'Volume-averaged total heating [W.m-3]']:
variables.update({var: zero})
return variables |
def test_demand_saving_with_indexed_array_from_hdf():
model = load_model('demand_saving_hdf.json')
model.timestepper.end = pd.Timestamp('2016-01-31')
rec_demand = NumpyArrayNodeRecorder(model, model.nodes['Demand'])
rec_storage = NumpyArrayStorageRecorder(model, model.nodes['Reservoir'])
model.check()
model.run()
max_volume = model.nodes['Reservoir'].max_volume
demand_baseline = 50.0
demand_saving = 1.0
assert_allclose(rec_demand.data[(0, 0)], (demand_baseline * demand_saving))
demand_saving = 0.8
assert_allclose(rec_demand.data[(11, 0)], (demand_baseline * demand_saving))
demand_saving = 0.5
assert_allclose(rec_demand.data[(12, 0)], (demand_baseline * demand_saving))
demand_saving = 0.25
assert_allclose(rec_demand.data[(13, 0)], (demand_baseline * demand_saving)) |
.parametrize('x_val, unique_axis, repeats, repeat_axis', [(np.array([[(- 10), (- 3)], [(- 10), 2]], dtype=np.int64), None, (1, 2), 0)])
.parametrize('return_index', [False])
.parametrize('return_counts', [False])
.parametrize('return_inverse', [False])
def test_local_Unique_Repeat(x_val, unique_axis, repeats, repeat_axis, return_index, return_counts, return_inverse):
x = as_tensor_variable(x_val).type()
y = unique(repeat(x, tuple(repeats), axis=repeat_axis), return_index=return_index, return_counts=return_counts, return_inverse=return_inverse, axis=unique_axis)
if isinstance(y, list):
(y, *_) = y
y_fg = FunctionGraph(outputs=[y], copy_inputs=False)
y_rewritten_fg = rewrite_graph(y_fg, clone=False, include=['canonicalize', 'local_Unique_Repeat_lift'], exclude=['local_Unique_scalar'])
y_rewritten = y_rewritten_fg.outputs[0]
y_rewritten_start = y_rewritten
assert isinstance(y_rewritten_start.owner.op, Unique)
assert (y_rewritten_start.owner.inputs[0] == x)
assert (not any((isinstance(node.op, Repeat) for node in y_rewritten_fg.apply_nodes)))
default_mode = get_default_mode()
rewrite_mode = default_mode.excluding('local_Unique_Repeat_lift')
y_fn = function([x], [y, y_rewritten], mode=rewrite_mode)
assert any((isinstance(node.op, Repeat) for node in y_fn.maker.fgraph.apply_nodes))
(y_exp_val, y_val) = y_fn(x_val)
assert np.array_equal(y_exp_val, y_val) |
class TestClientSubscription(ClientTestCase):
def setUp(self):
super(TestClientSubscription, self).setUp()
self.base_url = '{}/subscriptions'.format(self.base_url)
self.subscription_id = 'sub_8RlLljfA4AnDVx'
def test_subscription_fetch_all(self):
result = mock_file('subscription_collection')
url = self.base_url
responses.add(responses.GET, url, status=200, body=json.dumps(result), match_querystring=True)
self.assertEqual(self.client.subscription.all(), result)
def test_subscription_fetch(self):
result = mock_file('fake_subscription')
url = '{}/{}'.format(self.base_url, 'fake_subscription_id')
responses.add(responses.GET, url, status=200, body=json.dumps(result), match_querystring=True)
self.assertEqual(self.client.subscription.fetch('fake_subscription_id'), result)
def test_subscription_create(self):
init = mock_file('init_subscription')
result = mock_file('fake_subscription')
url = self.base_url
responses.add(responses.POST, url, status=200, body=json.dumps(result), match_querystring=True)
self.assertEqual(self.client.subscription.create(init), result)
def test_subscription_cancel(self):
result = mock_file('fake_subscription_cancelled')
url = '{}/{}/cancel'.format(self.base_url, self.subscription_id)
responses.add(responses.POST, url, status=200, body=json.dumps(result), match_querystring=True)
response = json.loads(self.client.subscription.cancel(self.subscription_id))
self.assertEqual(response['id'], self.subscription_id)
self.assertEqual(response['entity'], 'subscription')
self.assertEqual(response['status'], 'cancelled')
def test_subscription_cancel_scheduled_changes(self):
result = mock_file('fake_subscription_resumed')
url = '{}/{}/cancel_scheduled_changes'.format(self.base_url, self.subscription_id)
responses.add(responses.POST, url, status=200, body=json.dumps(result), match_querystring=True)
response = json.loads(self.client.subscription.cancel_scheduled_changes(self.subscription_id))
self.assertEqual(response['id'], self.subscription_id)
self.assertEqual(response['entity'], 'subscription')
def test_subscription_create_addon(self):
result = mock_file('fake_subscription_addon')
url = '{}/{}/addons'.format(self.base_url, self.subscription_id)
responses.add(responses.POST, url, status=200, body=json.dumps(result), match_querystring=True)
response = json.loads(self.client.subscription.createAddon(self.subscription_id, {'item': {'name': 'Extra Chair', 'amount': 30000, 'currency': 'INR'}, 'quantity': 2}))
self.assertEqual(response['subscription_id'], self.subscription_id)
self.assertEqual(response['entity'], 'addon')
self.assertEqual(response['item']['name'], 'Extra Chair')
self.assertEqual(response['item']['amount'], 30000)
def test_subscription_edit(self):
param = {'quantity': 2, 'schedule_change_at': 'cycle_end'}
result = mock_file('fake_subscription')
url = '{}/{}'.format(self.base_url, 'subscription_id')
responses.add(responses.PATCH, url, status=200, body=json.dumps(result), match_querystring=True)
self.assertEqual(self.client.subscription.edit('subscription_id', param), result)
def test_subscription_pending_update(self):
result = mock_file('fake_subscription')
url = '{}/{}/retrieve_scheduled_changes'.format(self.base_url, 'fake_subscription_id')
responses.add(responses.GET, url, status=200, body=json.dumps(result), match_querystring=True)
self.assertEqual(self.client.subscription.pending_update('fake_subscription_id'), result)
def test_subscription_pause(self):
result = mock_file('fake_subscription_paused')
url = '{}/{}/pause'.format(self.base_url, self.subscription_id)
responses.add(responses.POST, url, status=200, body=json.dumps(result), match_querystring=True)
response = json.loads(self.client.subscription.pause(self.subscription_id))
self.assertEqual(response['id'], self.subscription_id)
self.assertEqual(response['entity'], 'subscription')
self.assertEqual(response['status'], 'paused')
def test_subscription_resume(self):
result = mock_file('fake_subscription_resumed')
url = '{}/{}/resume'.format(self.base_url, self.subscription_id)
responses.add(responses.POST, url, status=200, body=json.dumps(result), match_querystring=True)
response = json.loads(self.client.subscription.resume(self.subscription_id))
self.assertEqual(response['id'], self.subscription_id)
self.assertEqual(response['entity'], 'subscription')
self.assertEqual(response['status'], 'active')
def test_subscription_delete_offer(self):
result = mock_file('fake_subscription')
url = '{}/{}/{}'.format(self.base_url, 'sub_8kip7ybbcOyc9J', 'offer_IjA06IHSz33cw2')
responses.add(responses.DELETE, url, status=200, body=json.dumps(result), match_querystring=True)
response = json.loads(self.client.subscription.delete_offer('sub_8kip7ybbcOyc9J', 'offer_IjA06IHSz33cw2'))
self.assertEqual(response['id'], 'sub_8kip7ybbcOyc9J')
self.assertEqual(response['entity'], 'subscription') |
class SawyerButtonPressTopdownEnvV2(SawyerXYZEnv):
def __init__(self):
hand_low = ((- 0.5), 0.4, 0.05)
hand_high = (0.5, 1, 0.5)
obj_low = ((- 0.1), 0.8, 0.115)
obj_high = (0.1, 0.9, 0.115)
super().__init__(self.model_name, hand_low=hand_low, hand_high=hand_high)
self.init_config = {'obj_init_pos': np.array([0, 0.8, 0.115], dtype=np.float32), 'hand_init_pos': np.array([0, 0.4, 0.2], dtype=np.float32)}
self.goal = np.array([0, 0.88, 0.1])
self.obj_init_pos = self.init_config['obj_init_pos']
self.hand_init_pos = self.init_config['hand_init_pos']
goal_low = self.hand_low
goal_high = self.hand_high
self._random_reset_space = Box(np.array(obj_low), np.array(obj_high))
self.goal_space = Box(np.array(goal_low), np.array(goal_high))
def model_name(self):
return full_v2_path_for('sawyer_xyz/sawyer_button_press_topdown.xml')
_assert_task_is_set
def evaluate_state(self, obs, action):
(reward, tcp_to_obj, tcp_open, obj_to_target, near_button, button_pressed) = self.compute_reward(action, obs)
info = {'success': float((obj_to_target <= 0.02)), 'near_object': float((tcp_to_obj <= 0.05)), 'grasp_success': float((tcp_open > 0)), 'grasp_reward': near_button, 'in_place_reward': button_pressed, 'obj_to_target': obj_to_target, 'unscaled_reward': reward}
return (reward, info)
def _target_site_config(self):
return []
def _get_id_main_object(self):
return self.unwrapped.model.geom_name2id('btnGeom')
def _get_pos_objects(self):
return (self.get_body_com('button') + np.array([0.0, 0.0, 0.193]))
def _get_quat_objects(self):
return self.sim.data.get_body_xquat('button')
def _set_obj_xyz(self, pos):
qpos = self.data.qpos.flat.copy()
qvel = self.data.qvel.flat.copy()
qpos[9] = pos
qvel[9] = 0
self.set_state(qpos, qvel)
def reset_model(self):
self._reset_hand()
self._target_pos = self.goal.copy()
if self.random_init:
goal_pos = self._get_state_rand_vec()
self.obj_init_pos = goal_pos
self.sim.model.body_pos[self.model.body_name2id('box')] = self.obj_init_pos
self._target_pos = self._get_site_pos('hole')
self._obj_to_target_init = abs((self._target_pos[2] - self._get_site_pos('buttonStart')[2]))
return self._get_obs()
def compute_reward(self, action, obs):
del action
obj = obs[4:7]
tcp = self.tcp_center
tcp_to_obj = np.linalg.norm((obj - tcp))
tcp_to_obj_init = np.linalg.norm((obj - self.init_tcp))
obj_to_target = abs((self._target_pos[2] - obj[2]))
tcp_closed = (1 - obs[3])
near_button = reward_utils.tolerance(tcp_to_obj, bounds=(0, 0.01), margin=tcp_to_obj_init, sigmoid='long_tail')
button_pressed = reward_utils.tolerance(obj_to_target, bounds=(0, 0.005), margin=self._obj_to_target_init, sigmoid='long_tail')
reward = (5 * reward_utils.hamacher_product(tcp_closed, near_button))
if (tcp_to_obj <= 0.03):
reward += (5 * button_pressed)
return (reward, tcp_to_obj, obs[3], obj_to_target, near_button, button_pressed) |
_cache()
def setup_logger(output=None, distributed_rank=0, *, color=True, name='imagenet', abbrev_name=None):
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
logger.propagate = False
if (abbrev_name is None):
abbrev_name = name
plain_formatter = logging.Formatter('[%(asctime)s.%(msecs)03d]: %(message)s', datefmt='%m/%d %H:%M:%S')
if (distributed_rank == 0):
ch = logging.StreamHandler(stream=sys.stdout)
ch.setLevel(logging.DEBUG)
if color:
formatter = _ColorfulFormatter((colored('[%(asctime)s.%(msecs)03d]: ', 'green') + '%(message)s'), datefmt='%m/%d %H:%M:%S', root_name=name, abbrev_name=str(abbrev_name))
else:
formatter = plain_formatter
ch.setFormatter(formatter)
logger.addHandler(ch)
if (output is not None):
if (output.endswith('.txt') or output.endswith('.log')):
filename = output
else:
filename = os.path.join(output, 'log.txt')
if (distributed_rank > 0):
filename = (filename + f'.rank{distributed_rank}')
os.makedirs(os.path.dirname(filename), exist_ok=True)
fh = logging.StreamHandler(_cached_log_stream(filename))
fh.setLevel(logging.DEBUG)
fh.setFormatter(plain_formatter)
logger.addHandler(fh)
return logger |
class SupportsCompositeMetricCompute(Protocol):
composite_metric_name: str
requires_metric: List[str]
requires_validator: List[str]
def compute(self, metric_results: Dict[(str, torch.Tensor)], validation_results: Dict[(str, validators.ValidatorOutput)], simulation_output: SimulationOutputCLE) -> float:
raise NotImplementedError |
class MNISTInstance(datasets.MNIST):
def __getitem__(self, index):
if self.train:
(img, target) = (self.train_data[index], self.train_labels[index])
else:
(img, target) = (self.test_data[index], self.test_labels[index])
img = Image.fromarray(img.numpy(), mode='L')
if (self.transform is not None):
img = self.transform(img)
if (self.target_transform is not None):
target = self.target_transform(target)
return (img, target, index) |
def create_generators(args):
common_args = {'batch_size': args.batch_size, 'phi': args.phi, 'detect_text': args.detect_text, 'detect_quadrangle': args.detect_quadrangle}
if args.random_transform:
misc_effect = MiscEffect()
visual_effect = VisualEffect()
else:
misc_effect = None
visual_effect = None
if (args.dataset_type == 'pascal'):
from generators.pascal import PascalVocGenerator
train_generator = PascalVocGenerator(args.pascal_path, 'trainval', skip_difficult=True, misc_effect=misc_effect, visual_effect=visual_effect, **common_args)
validation_generator = PascalVocGenerator(args.pascal_path, 'val', skip_difficult=True, shuffle_groups=False, **common_args)
elif (args.dataset_type == 'csv'):
from generators.csv_ import CSVGenerator
train_generator = CSVGenerator(args.annotations_path, args.classes_path, misc_effect=misc_effect, visual_effect=visual_effect, **common_args)
if args.val_annotations_path:
validation_generator = CSVGenerator(args.val_annotations_path, args.classes_path, shuffle_groups=False, **common_args)
else:
validation_generator = None
elif (args.dataset_type == 'coco'):
from generators.coco import CocoGenerator
train_generator = CocoGenerator(args.coco_path, 'train2017', misc_effect=misc_effect, visual_effect=visual_effect, group_method='random', **common_args)
validation_generator = CocoGenerator(args.coco_path, 'val2017', shuffle_groups=False, **common_args)
else:
raise ValueError('Invalid data type received: {}'.format(args.dataset_type))
return (train_generator, validation_generator) |
def compose_transform(R=None, t=None):
xp = cuda.get_array_module(R, t)
if (R is None):
Rs = xp.eye(3)[None]
else:
Rs = R[None]
if (t is None):
ts = xp.zeros((1, 3))
else:
ts = t[None]
with chainer.no_backprop_mode():
Ts = compose_transform_function(Rs, ts).array
T = Ts[0]
return T |
class TCOMM(TestCase):
def test_default(self):
frame = COMM()
self.assertEqual(frame.encoding, 1)
self.assertEqual(frame.lang, u'XXX')
self.assertEqual(frame.desc, u'')
self.assertEqual(frame.text, [])
def test_hash(self):
frame = COMM(encoding=0, lang='foo', desc='d')
self.assertEqual(frame.HashKey, 'COMM:d:foo')
frame._pprint()
self.assertEquals(COMM(text='a').HashKey, COMM(text='b').HashKey)
self.assertNotEquals(COMM(desc='a').HashKey, COMM(desc='b').HashKey)
self.assertNotEquals(COMM(lang='abc').HashKey, COMM(lang='def').HashKey)
def test_bad_unicodedecode(self):
data = b'\x01\x00\x00\x00\xff\xfe\x00\xff\xfeh\x00'
self.assertRaises(ID3JunkFrameError, COMM._fromData, _24, 0, data) |
class _EnsurePackagesDiscovered(_expand.EnsurePackagesDiscovered):
def __init__(self, distribution: 'Distribution', project_cfg: dict, setuptools_cfg: dict):
super().__init__(distribution)
self._project_cfg = project_cfg
self._setuptools_cfg = setuptools_cfg
def __enter__(self):
(dist, cfg) = (self._dist, self._setuptools_cfg)
package_dir: Dict[(str, str)] = cfg.setdefault('package-dir', {})
package_dir.update((dist.package_dir or {}))
dist.package_dir = package_dir
dist.set_defaults._ignore_ext_modules()
if (dist.metadata.name is None):
dist.metadata.name = self._project_cfg.get('name')
if (dist.py_modules is None):
dist.py_modules = cfg.get('py-modules')
if (dist.packages is None):
dist.packages = cfg.get('packages')
return super().__enter__()
def __exit__(self, exc_type, exc_value, traceback):
self._setuptools_cfg.setdefault('packages', self._dist.packages)
self._setuptools_cfg.setdefault('py-modules', self._dist.py_modules)
return super().__exit__(exc_type, exc_value, traceback) |
def check_unix_fs_mocked(tmpdir: Any, mocker: MockerFixture) -> Callable[([Any, Any], None)]:
def check(mocked_rm, mocked_ls):
assert (mocked_rm is os.remove)
assert (mocked_ls is os.listdir)
file_name = (tmpdir / 'foo.txt')
file_name.ensure()
UnixFS.rm(str(file_name))
mocked_rm.assert_called_once_with(str(file_name))
assert os.path.isfile(str(file_name))
mocked_ls.return_value = ['bar.txt']
assert (UnixFS.ls(str(tmpdir)) == ['bar.txt'])
mocked_ls.assert_called_once_with(str(tmpdir))
mocker.stopall()
assert (UnixFS.ls(str(tmpdir)) == ['foo.txt'])
UnixFS.rm(str(file_name))
assert (not os.path.isfile(str(file_name)))
return check |
def test_python_nodes_are_unique(tmp_path):
tmp_path.joinpath('a').mkdir()
tmp_path.joinpath('a', 'task_example.py').write_text('def task_example(a=1): pass')
tmp_path.joinpath('b').mkdir()
tmp_path.joinpath('b', 'task_example.py').write_text('def task_example(a=2): pass')
session = build(paths=tmp_path)
assert (session.exit_code == ExitCode.OK)
assert (len(session.dag.nodes) == 4) |
def test_return_on_hover(page: Page):
page.get_by_role('link', name='simple popup').click()
page.get_by_role('link', name='simple popup').click()
expect(page.get_by_text('Popup: None')).to_be_visible()
expect(page.get_by_text('Tooltip: None')).to_be_visible()
page.get_by_text('Return on hover?').click()
page.frame_locator('iframe[title="streamlit_folium\\.st_folium"]').get_by_role('img').nth(1).hover()
try:
expect(page.get_by_text('Popup: Popup 2!')).to_be_visible()
expect(page.get_by_text('Tooltip: Tooltip 2!')).to_be_visible()
except Exception as e:
page.screenshot(path='screenshot-popup2.png')
raise e |
class Effect7062(BaseEffect):
runTime = 'early'
type = ('projected', 'passive', 'gang')
def handler(fit, beacon, context, projectionRange, **kwargs):
for x in range(1, 3):
if beacon.getModifiedItemAttr('warfareBuff{}ID'.format(x)):
value = beacon.getModifiedItemAttr('warfareBuff{}Value'.format(x))
id = beacon.getModifiedItemAttr('warfareBuff{}ID'.format(x))
if id:
fit.addCommandBonus(id, value, beacon, kwargs['effect'], 'early') |
def test_joined_validators():
tst_validator = joined_validators(strict_discrete_set, strict_range)
values = [['ON', 'OFF'], range(10)]
assert (tst_validator(5, values) == 5)
assert (tst_validator(5.1, values) == 5.1)
assert (tst_validator('ON', values) == 'ON')
with pytest.raises(ValueError):
tst_validator('OUT', values)
with pytest.raises(ValueError):
tst_validator(20, values) |
class CalcToggleCommandFitStatesCommand(wx.Command):
def __init__(self, fitID, mainCommandFitID, commandFitIDs, forceStates=None):
wx.Command.__init__(self, True, 'Toggle Command Fit States')
self.fitID = fitID
self.mainCommandFitID = mainCommandFitID
self.commandFitIDs = commandFitIDs
self.forceStates = forceStates
self.savedStates = None
def Do(self):
pyfalog.debug('Doing toggling of command fit {}/{} state for fit {}'.format(self.mainCommandFitID, self.commandFitIDs, self.fitID))
sFit = Fit.getInstance()
commandFitIDs = self.commandFitIDs[:]
if (self.mainCommandFitID not in commandFitIDs):
commandFitIDs.append(self.mainCommandFitID)
commandInfos = {}
for commandFitID in commandFitIDs:
commandFit = sFit.getFit(commandFitID)
if (commandFit is None):
pyfalog.debug('Command fit is not available')
continue
commandInfo = commandFit.getCommandInfo(self.fitID)
if (commandInfo is None):
pyfalog.warning('Fit command info is not available')
continue
commandInfos[commandFitID] = commandInfo
if (len(commandInfos) == 0):
return False
self.savedStates = {cfid: ci.active for (cfid, ci) in commandInfos.items()}
mainCommandInfo = commandInfos.get(self.mainCommandFitID)
if (self.forceStates is not None):
for (commandFitID, state) in self.forceStates.items():
commandInfo = commandInfos.get(commandFitID)
if (commandInfo is None):
continue
commandInfo.active = state
elif ((mainCommandInfo is not None) and mainCommandInfo.active):
for commandInfo in commandInfos.values():
commandInfo.active = False
elif ((mainCommandInfo is not None) and (not mainCommandInfo.active)):
for commandInfo in commandInfos.values():
commandInfo.active = True
else:
return False
return True
def Undo(self):
pyfalog.debug('Undoing toggling of command fit {}/{} state for fit {}'.format(self.mainCommandFitID, self.commandFitIDs, self.fitID))
cmd = CalcToggleCommandFitStatesCommand(fitID=self.fitID, mainCommandFitID=self.mainCommandFitID, commandFitIDs=self.commandFitIDs, forceStates=self.savedStates)
return cmd.Do() |
class KsymAdaptedKRKS(krks.KRKS, khf_ksymm.KRHF):
get_veff = get_veff
get_rho = get_rho
kpts = khf_ksymm.KsymAdaptedKSCF.kpts
get_ovlp = khf_ksymm.KsymAdaptedKSCF.get_ovlp
get_hcore = khf_ksymm.KsymAdaptedKSCF.get_hcore
get_jk = khf_ksymm.KsymAdaptedKSCF.get_jk
get_occ = khf_ksymm.KsymAdaptedKSCF.get_occ
init_guess_by_chkfile = khf_ksymm.KsymAdaptedKSCF.init_guess_by_chkfile
dump_chk = khf_ksymm.KsymAdaptedKSCF.dump_chk
eig = khf_ksymm.KsymAdaptedKSCF.eig
get_orbsym = khf_ksymm.KsymAdaptedKSCF.get_orbsym
orbsym = khf_ksymm.KsymAdaptedKSCF.orbsym
_finalize = khf_ksymm.KsymAdaptedKSCF._finalize
get_init_guess = khf_ksymm.KRHF.get_init_guess
def __init__(self, cell, kpts=libkpts.KPoints(), xc='LDA,VWN', exxdiv=getattr(__config__, 'pbc_scf_SCF_exxdiv', 'ewald'), **kwargs):
khf_ksymm.KRHF.__init__(self, cell, kpts, exxdiv=exxdiv, **kwargs)
rks.KohnShamDFT.__init__(self, xc)
def dump_flags(self, verbose=None):
khf_ksymm.KRHF.dump_flags(self, verbose)
rks.KohnShamDFT.dump_flags(self, verbose)
return self
def energy_elec(self, dm_kpts=None, h1e_kpts=None, vhf=None):
if (h1e_kpts is None):
h1e_kpts = self.get_hcore(self.cell, self.kpts)
if (dm_kpts is None):
dm_kpts = self.make_rdm1()
if ((vhf is None) or (getattr(vhf, 'ecoul', None) is None)):
vhf = self.get_veff(self.cell, dm_kpts)
weight = self.kpts.weights_ibz
e1 = np.einsum('k,kij,kji', weight, h1e_kpts, dm_kpts)
ecoul = vhf.ecoul
tot_e = ((e1 + ecoul) + vhf.exc)
self.scf_summary['e1'] = e1.real
self.scf_summary['coul'] = ecoul.real
self.scf_summary['exc'] = vhf.exc.real
logger.debug(self, 'E1 = %s Ecoul = %s Exc = %s', e1, ecoul, vhf.exc)
if (khf.CHECK_COULOMB_IMAG and abs((ecoul.imag > (self.cell.precision * 10)))):
logger.warn(self, 'Coulomb energy has imaginary part %s. Coulomb integrals (e-e, e-N) may not converge !', ecoul.imag)
return (tot_e.real, (vhf.ecoul + vhf.exc))
def to_hf(self):
from pyscf.pbc.scf.khf_ksymm import KRHF
return self._transfer_attrs_(KRHF(self.cell, self.kpts)) |
def test_slope_aware_backtracking():
index = pd.date_range('2019-01-01T08:00', '2019-01-01T17:00', freq='h')
index = index.tz_localize('Etc/GMT+5')
expected_data = pd.DataFrame(index=index, data=[(2.404287, 122.79177, (- 84.44), (- 10.899)), (11.263058, 133.288729, (- 72.604), (- 25.747)), (18.733558, 145.285552, (- 59.861), (- 59.861)), (24.109076, 158.939435, (- 45.578), (- 45.578)), (26.810735, 173.931802, (- 28.764), (- 28.764)), (26.482495, 189.371536, (- 8.475), (- 8.475)), (23.170447, 204.13681, 15.12, 15.12), (17.296785, 217.446538, 39.562, 39.562), (9.461862, 229.102218, 61.587, 32.339), (0.524817, 239.330401, 79.53, 5.49)], columns=['ApparentElevation', 'SolarAzimuth', 'TrueTracking', 'Backtracking'])
expected_axis_tilt = 9.666
expected_slope_angle = (- 2.576)
(slope_azimuth, slope_tilt) = (180.0, 10.0)
axis_azimuth = 195.0
axis_tilt = tracking.calc_axis_tilt(slope_azimuth, slope_tilt, axis_azimuth)
assert np.isclose(axis_tilt, expected_axis_tilt, rtol=0.001, atol=0.001)
cross_axis_tilt = tracking.calc_cross_axis_tilt(slope_azimuth, slope_tilt, axis_azimuth, axis_tilt)
assert np.isclose(cross_axis_tilt, expected_slope_angle, rtol=0.001, atol=0.001)
sat = tracking.singleaxis((90.0 - expected_data['ApparentElevation']), expected_data['SolarAzimuth'], axis_tilt, axis_azimuth, max_angle=90.0, backtrack=True, gcr=0.5, cross_axis_tilt=cross_axis_tilt)
assert_series_equal(sat['tracker_theta'], expected_data['Backtracking'].rename('tracker_theta'), check_less_precise=True)
truetracking = tracking.singleaxis((90.0 - expected_data['ApparentElevation']), expected_data['SolarAzimuth'], axis_tilt, axis_azimuth, max_angle=90.0, backtrack=False, gcr=0.5, cross_axis_tilt=cross_axis_tilt)
assert_series_equal(truetracking['tracker_theta'], expected_data['TrueTracking'].rename('tracker_theta'), check_less_precise=True) |
class TestKeyedOptimizer(unittest.TestCase):
def _assert_state_dict_equals(self, dict1: Dict[(str, Any)], dict2: Dict[(str, Any)]) -> None:
self.assertEqual(dict1['param_groups'], dict2['param_groups'])
self.assertEqual(dict1['state']['param_2'], dict2['state']['param_2'])
torch.testing.assert_close(dict1['state']['param_1']['tensor'], dict2['state']['param_1']['tensor'])
torch.testing.assert_close(dict1['state']['param_1']['nested_dictionary']['tensor'], dict2['state']['param_1']['nested_dictionary']['tensor'])
torch.testing.assert_close(dict1['state']['param_1']['optimizer_module']['tensor'], dict2['state']['param_1']['optimizer_module']['tensor'])
torch.testing.assert_close(dict1['state']['param_1']['sharded_tensor'].local_shards()[0].tensor, dict2['state']['param_1']['sharded_tensor'].local_shards()[0].tensor)
def test_load_state_dict(self) -> None:
os.environ['MASTER_ADDR'] = str('localhost')
os.environ['MASTER_PORT'] = str(get_free_port())
dist.init_process_group('gloo', rank=0, world_size=1)
(param_1_t, param_2_t) = (torch.tensor([1.0, 2.0]), torch.tensor([3.0, 4.0]))
(param_1, param_2) = (Variable(param_1_t), Variable(param_2_t))
keyed_optimizer = KeyedOptimizer({'param_1': param_1, 'param_2': param_2}, {param_1: {'one': 1.0, 'tensor': torch.tensor([5.0, 6.0]), 'sharded_tensor': sharded_tensor.full(sharding_spec.ChunkShardingSpec(dim=0, placements=['rank:0/cpu']), (4,), fill_value=1.0), 'nested_dictionary': {'tensor': torch.tensor([7.0, 8.0])}, 'optimizer_module': DummyOptimizerModule(torch.tensor([9.0, 10.0]))}, param_2: {'two': 2.0}}, [{'params': [param_1], 'param_group_val_0': 3.0, 'param_group_val_1': 4.0}, {'params': [param_2], 'param_group_val_0': 5.0, 'param_group_val_1': 6.0}])
keyed_optimizer.save_param_groups(True)
state: Dict[(str, Any)] = {'param_1': {'one': 1.0, 'tensor': torch.tensor([5.0, 6.0]), 'sharded_tensor': sharded_tensor.full(sharding_spec.ChunkShardingSpec(dim=0, placements=['rank:0/cpu']), (4,), fill_value=1.0), 'nested_dictionary': {'tensor': torch.tensor([7.0, 8.0])}, 'optimizer_module': {'tensor': torch.tensor([9.0, 10.0])}}, 'param_2': {'two': 2.0}}
param_groups: List[Dict[(str, Any)]] = [{'params': ['param_1'], 'param_group_val_0': 3.0, 'param_group_val_1': 4.0}, {'params': ['param_2'], 'param_group_val_0': 5.0, 'param_group_val_1': 6.0}]
expected_state_dict = {'state': state, 'param_groups': param_groups}
self._assert_state_dict_equals(expected_state_dict, keyed_optimizer.state_dict())
expected_state_dict['state']['param_1']['one'] = 10.0
expected_state_dict['state']['param_1']['tensor'] = torch.tensor([50.0, 60.0])
expected_state_dict['state']['param_1']['sharded_tensor'] = sharded_tensor.full(sharding_spec.ChunkShardingSpec(dim=0, placements=['rank:0/cpu']), (4,), fill_value=10.0)
expected_state_dict['state']['param_1']['nested_dictionary']['tensor'] = torch.tensor([70.0, 80.0])
expected_state_dict['state']['param_1']['optimizer_module']['tensor'] = torch.tensor([90.0, 100.0])
expected_state_dict['param_groups'][0]['param_group_val_0'] = 8.0
expected_state_dict['param_groups'][1]['param_group_val_1'] = 9.0
keyed_optimizer.load_state_dict(expected_state_dict)
self._assert_state_dict_equals(expected_state_dict, keyed_optimizer.state_dict())
dist.destroy_process_group()
def test_non_param_state_key(self) -> None:
with self.assertRaisesRegex(ValueError, 'All state keys must be params.'):
param_1_t = torch.tensor([1.0, 2.0])
param_1 = Variable(param_1_t)
KeyedOptimizer({'param_1': param_1}, {param_1: 1.0, 'non_param_state_key': 2.0}, [{'params': [param_1], 'param_group_val_0': 3.0}])
def test_init_state(self) -> None:
dense = torch.nn.Parameter(torch.ones((2, 3), dtype=torch.float))
sparse = torch.nn.Parameter(torch.ones((1, 4), dtype=torch.float))
opt = KeyedOptimizerWrapper({'dense': dense, 'sparse': sparse}, (lambda params: torch.optim.SGD(params, lr=0.1)))
opt.init_state({'sparse'})
self.assertTrue((dense.grad is not None))
self.assertFalse(dense.grad.is_sparse)
self.assertTrue(('momentum_buffer' in opt.state_dict()['state']['dense']))
self.assertTrue((sparse.grad is not None))
self.assertTrue(sparse.grad.is_sparse)
self.assertTrue(('momentum_buffer' in opt.state_dict()['state']['sparse']))
def test_pickle(self) -> None:
dense = torch.nn.Parameter(torch.ones((2, 3), dtype=torch.float))
sparse = torch.nn.Parameter(torch.ones((1, 4), dtype=torch.float))
opt = KeyedOptimizerWrapper({'dense': dense, 'sparse': sparse}, (lambda params: torch.optim.SGD(params, lr=0.1)))
opt.init_state({'sparse'})
bytesIO = io.BytesIO()
torch.save(opt, bytesIO)
bytesIO.seek(0)
reload_opt = torch.load(bytesIO)
for k in reload_opt.state_dict():
self.assertEqual(opt.state_dict()[k], reload_opt.state_dict()[k]) |
def has_aer():
if (not _PROVIDER_CHECK.checked_aer):
try:
from qiskit.providers.aer import AerProvider
_PROVIDER_CHECK.has_aer = True
except Exception as ex:
_PROVIDER_CHECK.has_aer = False
logger.debug("AerProvider not loaded: '%s'", str(ex))
_PROVIDER_CHECK.checked_aer = True
return _PROVIDER_CHECK.has_aer |
class TestBmshj2018Factorized():
def test_params(self):
for i in range(1, 6):
net = bmshj2018_factorized(i, metric='mse')
assert isinstance(net, FactorizedPrior)
assert (net.state_dict()['g_a.0.weight'].size(0) == 128)
assert (net.state_dict()['g_a.6.weight'].size(0) == 192)
for i in range(6, 9):
net = bmshj2018_factorized(i, metric='mse')
assert isinstance(net, FactorizedPrior)
assert (net.state_dict()['g_a.0.weight'].size(0) == 192)
def test_invalid_params(self):
with pytest.raises(ValueError):
bmshj2018_factorized((- 1))
with pytest.raises(ValueError):
bmshj2018_factorized(10)
with pytest.raises(ValueError):
bmshj2018_factorized(10, metric='ssim')
with pytest.raises(ValueError):
bmshj2018_factorized(1, metric='ssim')
.slow
.pretrained
.parametrize('metric', [('mse',), ('ms-ssim',)])
def test_pretrained(self, metric):
metric = metric[0]
for i in range(1, 6):
net = bmshj2018_factorized(i, metric=metric, pretrained=True)
assert (net.state_dict()['g_a.0.weight'].size(0) == 128)
assert (net.state_dict()['g_a.6.weight'].size(0) == 192)
for i in range(6, 9):
net = bmshj2018_factorized(i, metric=metric, pretrained=True)
assert (net.state_dict()['g_a.0.weight'].size(0) == 192)
assert (net.state_dict()['g_a.6.weight'].size(0) == 320) |
def usymeig(A: LinearOperator, neig: Optional[int]=None, M: Optional[LinearOperator]=None, bck_options: Mapping[(str, Any)]={}, method: Union[(str, Callable, None)]=None, **fwd_options) -> Tuple[(torch.Tensor, torch.Tensor)]:
return symeig(A, neig, 'uppest', M, method=method, bck_options=bck_options, **fwd_options) |
(*specs)
def test_env_semantics(spec):
with open(ROLLOUT_FILE) as data_file:
rollout_dict = json.load(data_file)
if (spec.id not in rollout_dict):
if ((not spec.nondeterministic) or should_skip_env_spec_for_tests(spec)):
logger.warn('Rollout does not exist for {}, run generate_json.py to generate rollouts for new envs'.format(spec.id))
return
logger.info('Testing rollout for {} environment...'.format(spec.id))
(observations_now, actions_now, rewards_now, dones_now) = generate_rollout_hash(spec)
assert (rollout_dict[spec.id]['observations'] == observations_now), 'Observations not equal for {}'.format(spec.id)
assert (rollout_dict[spec.id]['actions'] == actions_now), 'Actions not equal for {}'.format(spec.id)
assert (rollout_dict[spec.id]['rewards'] == rewards_now), 'Rewards not equal for {}'.format(spec.id)
assert (rollout_dict[spec.id]['dones'] == dones_now), 'Dones not equal for {}'.format(spec.id) |
def ranolazine_mpo() -> GoalDirectedBenchmark:
ranolazine = 'COc1ccccc1OCC(O)CN2CCN(CC(=O)Nc3c(C)cccc3C)CC2'
modifier = ClippedScoreModifier(upper_x=0.7)
similar_to_ranolazine = TanimotoScoringFunction(ranolazine, fp_type='AP', score_modifier=modifier)
logP_under_4 = RdkitScoringFunction(descriptor=logP, score_modifier=MaxGaussianModifier(mu=7, sigma=1))
tpsa_f = RdkitScoringFunction(descriptor=tpsa, score_modifier=MaxGaussianModifier(mu=95, sigma=20))
fluorine = RdkitScoringFunction(descriptor=AtomCounter('F'), score_modifier=GaussianModifier(mu=1, sigma=1.0))
optimize_ranolazine = GeometricMeanScoringFunction([similar_to_ranolazine, logP_under_4, fluorine, tpsa_f])
specification = uniform_specification(1, 10, 100)
return GoalDirectedBenchmark(name='Ranolazine MPO', objective=optimize_ranolazine, contribution_specification=specification, starting_population=[ranolazine]) |
class SPP(nn.Module):
def __init__(self):
super(SPP, self).__init__()
def forward(self, x):
x_1 = torch.nn.functional.max_pool2d(x, 5, stride=1, padding=2)
x_2 = torch.nn.functional.max_pool2d(x, 9, stride=1, padding=4)
x_3 = torch.nn.functional.max_pool2d(x, 13, stride=1, padding=6)
x = torch.cat([x, x_1, x_2, x_3], dim=1)
return x |
def test_create():
builder = NintendontConnectorBuilder('102.168.1.1')
assert (builder.configuration_params() == {'ip': '102.168.1.1'})
assert (builder.connector_builder_choice == ConnectorBuilderChoice.NINTENDONT)
assert (builder.pretty_text == 'Nintendont: 102.168.1.1')
executor = builder.create_executor()
assert isinstance(executor, NintendontExecutor)
assert (executor.ip == '102.168.1.1') |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.