code stringlengths 281 23.7M |
|---|
class HfArgumentParserTest(unittest.TestCase):
def test_set_level(self):
logger = logging.get_logger()
level_origin = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel(), logging.get_verbosity())
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel(), logging.get_verbosity())
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel(), logging.get_verbosity())
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel(), logging.get_verbosity())
logging.set_verbosity(level_origin)
def test_integration(self):
level_origin = logging.get_verbosity()
logger = logging.get_logger('transformers.models.bart.tokenization_bart')
msg = 'Testing 1, 2, 3'
if (level_origin <= logging.WARNING):
with CaptureLogger(logger) as cl:
logger.warning(msg)
self.assertEqual(cl.out, (msg + '\n'))
logging.set_verbosity_error()
with CaptureLogger(logger) as cl:
logger.warning(msg)
self.assertEqual(cl.out, '')
logging.set_verbosity_warning()
with CaptureLogger(logger) as cl:
logger.warning(msg)
self.assertEqual(cl.out, (msg + '\n'))
logging.set_verbosity(level_origin)
(TRANSFORMERS_VERBOSITY='error')
def test_env_override(self):
transformers.utils.logging._reset_library_root_logger()
_ = logging.get_logger('transformers.models.bart.tokenization_bart')
env_level_str = os.getenv('TRANSFORMERS_VERBOSITY', None)
env_level = logging.log_levels[env_level_str]
current_level = logging.get_verbosity()
self.assertEqual(env_level, current_level, f'TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}')
os.environ['TRANSFORMERS_VERBOSITY'] = ''
transformers.utils.logging._reset_library_root_logger()
(TRANSFORMERS_VERBOSITY='super-error')
def test_env_invalid_override(self):
transformers.utils.logging._reset_library_root_logger()
logger = logging.logging.getLogger()
with CaptureLogger(logger) as cl:
logging.get_logger('transformers.models.bart.tokenization_bart')
self.assertIn('Unknown option TRANSFORMERS_VERBOSITY=super-error', cl.out)
def test_advisory_warnings(self):
logger = logging.get_logger('transformers.models.bart.tokenization_bart')
msg = 'Testing 1, 2, 3'
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='1'):
with CaptureLogger(logger) as cl:
logger.warning_advice(msg)
self.assertEqual(cl.out, '')
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS=''):
with CaptureLogger(logger) as cl:
logger.warning_advice(msg)
self.assertEqual(cl.out, (msg + '\n')) |
class LLTM(torch.nn.Module):
def __init__(self, input_features, state_size):
super(LLTM, self).__init__()
self.input_features = input_features
self.state_size = state_size
self.weights = torch.nn.Parameter(torch.Tensor((3 * state_size), (input_features + state_size)))
self.bias = torch.nn.Parameter(torch.Tensor(1, (3 * state_size)))
self.reset_parameters()
def reset_parameters(self):
stdv = (1.0 / math.sqrt(self.state_size))
for weight in self.parameters():
weight.data.uniform_((- stdv), (+ stdv))
def forward(self, input, state):
(old_h, old_cell) = state
X = torch.cat([old_h, input], dim=1)
gate_weights = F.linear(X, self.weights, self.bias)
gates = gate_weights.chunk(3, dim=1)
input_gate = torch.sigmoid(gates[0])
output_gate = torch.sigmoid(gates[1])
candidate_cell = F.elu(gates[2])
new_cell = (old_cell + (candidate_cell * input_gate))
new_h = (torch.tanh(new_cell) * output_gate)
return (new_h, new_cell) |
def _extract_tar_info(tarfile, class_to_idx=None, sort=True):
files = []
labels = []
for ti in tarfile.getmembers():
if (not ti.isfile()):
continue
(dirname, basename) = os.path.split(ti.path)
label = os.path.basename(dirname)
ext = os.path.splitext(basename)[1]
if (ext.lower() in IMG_EXTENSIONS):
files.append(ti)
labels.append(label)
if (class_to_idx is None):
unique_labels = set(labels)
sorted_labels = list(sorted(unique_labels, key=natural_key))
class_to_idx = {c: idx for (idx, c) in enumerate(sorted_labels)}
tarinfo_and_targets = [(f, class_to_idx[l]) for (f, l) in zip(files, labels) if (l in class_to_idx)]
if sort:
tarinfo_and_targets = sorted(tarinfo_and_targets, key=(lambda k: natural_key(k[0].path)))
return (tarinfo_and_targets, class_to_idx) |
class StaffGeoReportView(AllReportMixin, GeoReportMixin, BaseReportView):
fieldnames = ['index', 'views', 'clicks', 'ctr', 'ecpm', 'revenue', 'our_revenue']
impression_model = GeoImpression
force_revshare = 70.0
report = PublisherGeoReport
template_name = 'adserver/reports/staff-geos.html' |
def init_dist(launcher, backend='nccl', **kwargs):
if (mp.get_start_method(allow_none=True) is None):
mp.set_start_method('spawn')
if (launcher == 'pytorch'):
_init_dist_pytorch(backend, **kwargs)
elif (launcher == 'mpi'):
_init_dist_mpi(backend, **kwargs)
elif (launcher == 'slurm'):
_init_dist_slurm(backend, **kwargs)
else:
raise ValueError('Invalid launcher type: {}'.format(launcher)) |
def get_gold_word_buget(sum_file, buget_file):
bugets = []
with open(buget_file, 'w', encoding='utf8', buffering=1) as fout:
for line in open(sum_file, encoding='utf8'):
nword = len(line.strip().split())
if (nword == 0):
nsent = 1
fout.write('{}\n'.format(nword))
bugets.append(nword)
import numpy
bugets = numpy.asarray(bugets)
print('word buget: min {} max {} avg {} std {}'.format(bugets.min(), bugets.max(), bugets.mean(), bugets.std())) |
class TLowercase(TFilter, TFilterMixin):
Kind = Lowercase
def test_conv(self):
empty = fsnative('')
v = self.c.filter(empty, fsnative('foobar baz'))
self.assertEqual(v, fsnative('foobar baz'))
self.assertTrue(isinstance(v, fsnative))
v = self.c.filter(empty, fsnative('Foobar.BAZ'))
self.assertEqual(v, fsnative('foobar.baz'))
self.assertTrue(isinstance(v, fsnative)) |
class ParameterSet(NamedTuple):
values: Sequence[Union[(object, NotSetType)]]
marks: Collection[Union[('MarkDecorator', 'Mark')]]
id: Optional[str]
def param(cls, *values: object, marks: Union[('MarkDecorator', Collection[Union[('MarkDecorator', 'Mark')]])]=(), id: Optional[str]=None) -> 'ParameterSet':
if isinstance(marks, MarkDecorator):
marks = (marks,)
else:
assert isinstance(marks, collections.abc.Collection)
if (id is not None):
if (not isinstance(id, str)):
raise TypeError(f'Expected id to be a string, got {type(id)}: {id!r}')
id = ascii_escaped(id)
return cls(values, marks, id)
def extract_from(cls, parameterset: Union[('ParameterSet', Sequence[object], object)], force_tuple: bool=False) -> 'ParameterSet':
if isinstance(parameterset, cls):
return parameterset
if force_tuple:
return cls.param(parameterset)
else:
return cls(parameterset, marks=[], id=None)
def _parse_parametrize_args(argnames: Union[(str, Sequence[str])], argvalues: Iterable[Union[('ParameterSet', Sequence[object], object)]], *args, **kwargs) -> Tuple[(Sequence[str], bool)]:
if isinstance(argnames, str):
argnames = [x.strip() for x in argnames.split(',') if x.strip()]
force_tuple = (len(argnames) == 1)
else:
force_tuple = False
return (argnames, force_tuple)
def _parse_parametrize_parameters(argvalues: Iterable[Union[('ParameterSet', Sequence[object], object)]], force_tuple: bool) -> List['ParameterSet']:
return [ParameterSet.extract_from(x, force_tuple=force_tuple) for x in argvalues]
def _for_parametrize(cls, argnames: Union[(str, Sequence[str])], argvalues: Iterable[Union[('ParameterSet', Sequence[object], object)]], func, config: Config, nodeid: str) -> Tuple[(Sequence[str], List['ParameterSet'])]:
(argnames, force_tuple) = cls._parse_parametrize_args(argnames, argvalues)
parameters = cls._parse_parametrize_parameters(argvalues, force_tuple)
del argvalues
if parameters:
for param in parameters:
if (len(param.values) != len(argnames)):
msg = '{nodeid}: in "parametrize" the number of names ({names_len}):\n {names}\nmust be equal to the number of values ({values_len}):\n {values}'
fail(msg.format(nodeid=nodeid, values=param.values, names=argnames, names_len=len(argnames), values_len=len(param.values)), pytrace=False)
else:
mark = get_empty_parameterset_mark(config, argnames, func)
parameters.append(ParameterSet(values=((NOTSET,) * len(argnames)), marks=[mark], id=None))
return (argnames, parameters) |
class PreciseBN(HookBase):
def __init__(self, period, model, data_loader, num_iter):
self._logger = logging.getLogger(__name__)
if (len(get_bn_modules(model)) == 0):
self._logger.info('PreciseBN is disabled because model does not contain BN layers in training mode.')
self._disabled = True
return
self._model = model
self._data_loader = data_loader
self._num_iter = num_iter
self._period = period
self._disabled = False
self._data_iter = None
def after_step(self):
next_iter = (self.trainer.iter + 1)
is_final = (next_iter == self.trainer.max_iter)
if (is_final or ((self._period > 0) and ((next_iter % self._period) == 0))):
self.update_stats()
def update_stats(self):
if self._disabled:
return
if (self._data_iter is None):
self._data_iter = iter(self._data_loader)
def data_loader():
for num_iter in itertools.count(1):
if ((num_iter % 100) == 0):
self._logger.info('Running precise-BN ... {}/{} iterations.'.format(num_iter, self._num_iter))
(yield next(self._data_iter))
with EventStorage():
self._logger.info(('Running precise-BN for {} iterations... '.format(self._num_iter) + 'Note that this could produce different statistics every time.'))
update_bn_stats(self._model, data_loader(), self._num_iter) |
def can_perform_action(action, o1_id, agent_id, graph, object_restrictions=None, teleport=True):
if (action == 'no_action'):
return None
obj2_str = ''
obj1_str = ''
id2node = {node['id']: node for node in graph['nodes']}
o1 = id2node[o1_id]['class_name']
num_args = (0 if (o1 is None) else 1)
if (num_args != args_per_action(action)):
return None
grabbed_objects = [edge['to_id'] for edge in graph['edges'] if ((edge['from_id'] == agent_id) and (edge['relation_type'] in ['HOLDS_RH', 'HOLD_LH']))]
close_edge = (len([edge['to_id'] for edge in graph['edges'] if ((edge['from_id'] == agent_id) and (edge['to_id'] == o1_id) and (edge['relation_type'] == 'CLOSE'))]) > 0)
if (action == 'grab'):
if (len(grabbed_objects) > 0):
return None
if action.startswith('walk'):
if (o1_id in grabbed_objects):
return None
if (o1_id == agent_id):
return None
if (o1_id == agent_id):
return None
if ((action in ['grab', 'open', 'close']) and (not close_edge)):
return None
if (action == 'open'):
if (object_restrictions is not None):
if (id2node[o1_id]['class_name'] not in object_restrictions['objects_inside']):
return None
if (('OPEN' in id2node[o1_id]['states']) or ('CLOSED' not in id2node[o1_id]['states'])):
return None
if (action == 'close'):
if (object_restrictions is not None):
if (id2node[o1_id]['class_name'] not in object_restrictions['objects_inside']):
return None
if (('CLOSED' in id2node[o1_id]['states']) or ('OPEN' not in id2node[o1_id]['states'])):
return None
if ('put' in action):
if (len(grabbed_objects) == 0):
return None
else:
o2_id = grabbed_objects[0]
if (o2_id == o1_id):
return None
o2 = id2node[o2_id]['class_name']
obj2_str = f'<{o2}> ({o2_id})'
if (o1 is not None):
obj1_str = f'<{o1}> ({o1_id})'
if (o1_id in id2node.keys()):
if (id2node[o1_id]['class_name'] == 'character'):
return None
if action.startswith('put'):
if (object_restrictions is not None):
if (id2node[o1_id]['class_name'] in object_restrictions['objects_inside']):
action = 'putin'
if (id2node[o1_id]['class_name'] in object_restrictions['objects_surface']):
action = 'putback'
elif ('CONTAINERS' in id2node[o1_id]['properties']):
action = 'putin'
elif ('SURFACES' in id2node[o1_id]['properties']):
action = 'putback'
if (action.startswith('walk') and teleport):
action = 'walkto'
action_str = f'[{action}] {obj2_str} {obj1_str}'.strip()
return action_str |
class SiamRPN(nn.Module):
def __init__(self, size=2, feature_out=512, anchor=5):
configs = [3, 96, 256, 384, 384, 256]
configs = list(map((lambda x: (3 if (x == 3) else (x * size))), configs))
feat_in = configs[(- 1)]
super(SiamRPN, self).__init__()
self.featureExtract = nn.Sequential(nn.Conv2d(configs[0], configs[1], kernel_size=11, stride=2), nn.BatchNorm2d(configs[1]), nn.MaxPool2d(kernel_size=3, stride=2), nn.ReLU(inplace=True), nn.Conv2d(configs[1], configs[2], kernel_size=5), nn.BatchNorm2d(configs[2]), nn.MaxPool2d(kernel_size=3, stride=2), nn.ReLU(inplace=True), nn.Conv2d(configs[2], configs[3], kernel_size=3), nn.BatchNorm2d(configs[3]), nn.ReLU(inplace=True), nn.Conv2d(configs[3], configs[4], kernel_size=3), nn.BatchNorm2d(configs[4]), nn.ReLU(inplace=True), nn.Conv2d(configs[4], configs[5], kernel_size=3), nn.BatchNorm2d(configs[5]))
self.anchor = anchor
self.feature_out = feature_out
self.conv_r1 = nn.Conv2d(feat_in, ((feature_out * 4) * anchor), 3)
self.conv_r2 = nn.Conv2d(feat_in, feature_out, 3)
self.conv_cls1 = nn.Conv2d(feat_in, ((feature_out * 2) * anchor), 3)
self.conv_cls2 = nn.Conv2d(feat_in, feature_out, 3)
self.regress_adjust = nn.Conv2d((4 * anchor), (4 * anchor), 1)
self.r1_kernel = []
self.cls1_kernel = []
self.cfg = {}
def forward(self, x):
x_f = self.featureExtract(x)
return (self.regress_adjust(F.conv2d(self.conv_r2(x_f), self.r1_kernel)), F.conv2d(self.conv_cls2(x_f), self.cls1_kernel)) |
class EfficientNetConfig(PretrainedConfig):
model_type = 'efficientnet'
def __init__(self, num_channels: int=3, image_size: int=600, width_coefficient: float=2.0, depth_coefficient: float=3.1, depth_divisor: int=8, kernel_sizes: List[int]=[3, 3, 5, 3, 5, 5, 3], in_channels: List[int]=[32, 16, 24, 40, 80, 112, 192], out_channels: List[int]=[16, 24, 40, 80, 112, 192, 320], depthwise_padding: List[int]=[], strides: List[int]=[1, 2, 2, 2, 1, 2, 1], num_block_repeats: List[int]=[1, 2, 2, 3, 3, 4, 1], expand_ratios: List[int]=[1, 6, 6, 6, 6, 6, 6], squeeze_expansion_ratio: float=0.25, hidden_act: str='swish', hidden_dim: int=2560, pooling_type: str='mean', initializer_range: float=0.02, batch_norm_eps: float=0.001, batch_norm_momentum: float=0.99, dropout_rate: float=0.5, drop_connect_rate: float=0.2, **kwargs):
super().__init__(**kwargs)
self.num_channels = num_channels
self.image_size = image_size
self.width_coefficient = width_coefficient
self.depth_coefficient = depth_coefficient
self.depth_divisor = depth_divisor
self.kernel_sizes = kernel_sizes
self.in_channels = in_channels
self.out_channels = out_channels
self.depthwise_padding = depthwise_padding
self.strides = strides
self.num_block_repeats = num_block_repeats
self.expand_ratios = expand_ratios
self.squeeze_expansion_ratio = squeeze_expansion_ratio
self.hidden_act = hidden_act
self.hidden_dim = hidden_dim
self.pooling_type = pooling_type
self.initializer_range = initializer_range
self.batch_norm_eps = batch_norm_eps
self.batch_norm_momentum = batch_norm_momentum
self.dropout_rate = dropout_rate
self.drop_connect_rate = drop_connect_rate
self.num_hidden_layers = (sum(num_block_repeats) * 4) |
def create_model(opt):
model = None
if (opt.model == 'motion'):
assert (opt.dataset_mode == 'aligned')
from .motion_model import Motion_Model
model = Motion_Model()
elif (opt.model == 'test'):
assert (opt.dataset_mode == 'single')
from .test_model import TestModel
model = TestModel()
else:
raise NotImplementedError(('model [%s] not implemented.' % opt.model))
model.initialize(opt)
print(('model [%s] was created' % model.name()))
return model |
class Popup(Element):
_template = Template("\n var {{this.get_name()}} = L.popup({{ this.options|tojson }});\n\n {% for name, element in this.html._children.items() %}\n {% if this.lazy %}\n {{ this._parent.get_name() }}.once('click', function() {\n {{ this.get_name() }}.setContent($(`{{ element.render(**kwargs).replace('\\n',' ') }}`)[0]);\n });\n {% else %}\n var {{ name }} = $(`{{ element.render(**kwargs).replace('\\n',' ') }}`)[0];\n {{ this.get_name() }}.setContent({{ name }});\n {% endif %}\n {% endfor %}\n\n {{ this._parent.get_name() }}.bindPopup({{ this.get_name() }})\n {% if this.show %}.openPopup(){% endif %};\n\n {% for name, element in this.script._children.items() %}\n {{element.render()}}\n {% endfor %}\n ")
def __init__(self, html: Union[(str, Element, None)]=None, parse_html: bool=False, max_width: Union[(str, int)]='100%', show: bool=False, sticky: bool=False, lazy: bool=False, **kwargs: TypeJsonValue):
super().__init__()
self._name = 'Popup'
self.header = Element()
self.html = Element()
self.script = Element()
self.header._parent = self
self.html._parent = self
self.script._parent = self
script = (not parse_html)
if isinstance(html, Element):
self.html.add_child(html)
elif isinstance(html, str):
html = escape_backticks(html)
self.html.add_child(Html(html, script=script))
self.show = show
self.lazy = lazy
self.options = parse_options(max_width=max_width, autoClose=(False if (show or sticky) else None), closeOnClick=(False if sticky else None), **kwargs)
def render(self, **kwargs) -> None:
for (name, child) in self._children.items():
child.render(**kwargs)
figure = self.get_root()
assert isinstance(figure, Figure), 'You cannot render this Element if it is not in a Figure.'
figure.script.add_child(Element(self._template.render(this=self, kwargs=kwargs)), name=self.get_name()) |
class GuiImportImplantsCommand(wx.Command):
def __init__(self, fitID, implants):
wx.Command.__init__(self, True, 'Import Implants')
self.internalHistory = InternalCommandHistory()
self.fitID = fitID
self.implants = set((i[0] for i in implants))
def Do(self):
if (not self.implants):
return False
sFit = Fit.getInstance()
fit = sFit.getFit(self.fitID)
if (fit.implantSource != ImplantLocation.FIT):
cmd = CalcChangeImplantLocationCommand(fitID=self.fitID, source=ImplantLocation.FIT)
successSource = self.internalHistory.submit(cmd)
else:
successSource = False
resultsImplants = []
for itemID in self.implants:
cmd = CalcAddImplantCommand(fitID=self.fitID, implantInfo=ImplantInfo(itemID=itemID))
resultsImplants.append(self.internalHistory.submit(cmd))
successImplants = any(resultsImplants)
success = (successSource or successImplants)
eos.db.flush()
sFit = Fit.getInstance()
sFit.recalc(self.fitID)
sFit.fill(self.fitID)
eos.db.commit()
wx.PostEvent(gui.mainFrame.MainFrame.getInstance(), GE.FitChanged(fitIDs=(self.fitID,)))
return success
def Undo(self):
success = self.internalHistory.undoAll()
eos.db.flush()
sFit = Fit.getInstance()
sFit.recalc(self.fitID)
sFit.fill(self.fitID)
eos.db.commit()
wx.PostEvent(gui.mainFrame.MainFrame.getInstance(), GE.FitChanged(fitIDs=(self.fitID,)))
return success |
class NumpyDataCollatorIntegrationTest(unittest.TestCase):
def setUp(self):
self.tmpdirname = tempfile.mkdtemp()
vocab_tokens = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]']
self.vocab_file = os.path.join(self.tmpdirname, 'vocab.txt')
with open(self.vocab_file, 'w', encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([(x + '\n') for x in vocab_tokens]))
def tearDown(self):
shutil.rmtree(self.tmpdirname)
def test_default_with_dict(self):
features = [{'label': i, 'inputs': [0, 1, 2, 3, 4, 5]} for i in range(8)]
batch = default_data_collator(features, return_tensors='np')
self.assertEqual(batch['labels'].tolist(), list(range(8)))
self.assertEqual(batch['labels'].dtype, np.int64)
self.assertEqual(batch['inputs'].shape, (8, 6))
features = [{'label_ids': [0, 1, 2], 'inputs': [0, 1, 2, 3, 4, 5]} for i in range(8)]
batch = default_data_collator(features, return_tensors='np')
self.assertEqual(batch['labels'].tolist(), ([[0, 1, 2]] * 8))
self.assertEqual(batch['labels'].dtype, np.int64)
self.assertEqual(batch['inputs'].shape, (8, 6))
features = [{'label': i, 'inputs': np.random.randint(0, 10, [10])} for i in range(8)]
batch = default_data_collator(features, return_tensors='np')
self.assertEqual(batch['labels'].tolist(), list(range(8)))
self.assertEqual(batch['labels'].dtype, np.int64)
self.assertEqual(batch['inputs'].shape, (8, 10))
features = [{'label': np.array(i), 'inputs': np.random.randint(0, 10, [10])} for i in range(8)]
batch = default_data_collator(features, return_tensors='np')
self.assertEqual(batch['labels'].dtype, np.int64)
self.assertEqual(batch['labels'].tolist(), list(range(8)))
self.assertEqual(batch['labels'].dtype, np.int64)
self.assertEqual(batch['inputs'].shape, (8, 10))
def test_default_classification_and_regression(self):
data_collator = default_data_collator
features = [{'input_ids': [0, 1, 2, 3, 4], 'label': i} for i in range(4)]
batch = data_collator(features, return_tensors='np')
self.assertEqual(batch['labels'].dtype, np.int64)
features = [{'input_ids': [0, 1, 2, 3, 4], 'label': float(i)} for i in range(4)]
batch = data_collator(features, return_tensors='np')
self.assertEqual(batch['labels'].dtype, np.float32)
def test_default_with_no_labels(self):
features = [{'label': None, 'inputs': [0, 1, 2, 3, 4, 5]} for i in range(8)]
batch = default_data_collator(features, return_tensors='np')
self.assertTrue(('labels' not in batch))
self.assertEqual(batch['inputs'].shape, (8, 6))
features = [{'label_ids': None, 'inputs': [0, 1, 2, 3, 4, 5]} for i in range(8)]
batch = default_data_collator(features, return_tensors='np')
self.assertTrue(('labels' not in batch))
self.assertEqual(batch['inputs'].shape, (8, 6))
def test_data_collator_with_padding(self):
tokenizer = BertTokenizer(self.vocab_file)
features = [{'input_ids': [0, 1, 2]}, {'input_ids': [0, 1, 2, 3, 4, 5]}]
data_collator = DataCollatorWithPadding(tokenizer, return_tensors='np')
batch = data_collator(features)
self.assertEqual(batch['input_ids'].shape, (2, 6))
self.assertEqual(batch['input_ids'][0].tolist(), ([0, 1, 2] + ([tokenizer.pad_token_id] * 3)))
data_collator = DataCollatorWithPadding(tokenizer, padding='max_length', max_length=10, return_tensors='np')
batch = data_collator(features)
self.assertEqual(batch['input_ids'].shape, (2, 10))
data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8, return_tensors='np')
batch = data_collator(features)
self.assertEqual(batch['input_ids'].shape, (2, 8))
def test_data_collator_for_token_classification(self):
tokenizer = BertTokenizer(self.vocab_file)
features = [{'input_ids': [0, 1, 2], 'labels': [0, 1, 2]}, {'input_ids': [0, 1, 2, 3, 4, 5], 'labels': [0, 1, 2, 3, 4, 5]}]
data_collator = DataCollatorForTokenClassification(tokenizer, return_tensors='np')
batch = data_collator(features)
self.assertEqual(batch['input_ids'].shape, (2, 6))
self.assertEqual(batch['input_ids'][0].tolist(), ([0, 1, 2] + ([tokenizer.pad_token_id] * 3)))
self.assertEqual(batch['labels'].shape, (2, 6))
self.assertEqual(batch['labels'][0].tolist(), ([0, 1, 2] + ([(- 100)] * 3)))
data_collator = DataCollatorForTokenClassification(tokenizer, padding='max_length', max_length=10, return_tensors='np')
batch = data_collator(features)
self.assertEqual(batch['input_ids'].shape, (2, 10))
self.assertEqual(batch['labels'].shape, (2, 10))
data_collator = DataCollatorForTokenClassification(tokenizer, pad_to_multiple_of=8, return_tensors='np')
batch = data_collator(features)
self.assertEqual(batch['input_ids'].shape, (2, 8))
self.assertEqual(batch['labels'].shape, (2, 8))
data_collator = DataCollatorForTokenClassification(tokenizer, label_pad_token_id=(- 1), return_tensors='np')
batch = data_collator(features)
self.assertEqual(batch['input_ids'].shape, (2, 6))
self.assertEqual(batch['input_ids'][0].tolist(), ([0, 1, 2] + ([tokenizer.pad_token_id] * 3)))
self.assertEqual(batch['labels'].shape, (2, 6))
self.assertEqual(batch['labels'][0].tolist(), ([0, 1, 2] + ([(- 1)] * 3)))
def _test_no_pad_and_pad(self, no_pad_features, pad_features):
tokenizer = BertTokenizer(self.vocab_file)
data_collator = DataCollatorForLanguageModeling(tokenizer, mlm=False, return_tensors='np')
batch = data_collator(no_pad_features)
self.assertEqual(batch['input_ids'].shape, (2, 10))
self.assertEqual(batch['labels'].shape, (2, 10))
batch = data_collator(pad_features, return_tensors='np')
self.assertEqual(batch['input_ids'].shape, (2, 10))
self.assertEqual(batch['labels'].shape, (2, 10))
data_collator = DataCollatorForLanguageModeling(tokenizer, mlm=False, pad_to_multiple_of=8, return_tensors='np')
batch = data_collator(no_pad_features)
self.assertEqual(batch['input_ids'].shape, (2, 16))
self.assertEqual(batch['labels'].shape, (2, 16))
batch = data_collator(pad_features, return_tensors='np')
self.assertEqual(batch['input_ids'].shape, (2, 16))
self.assertEqual(batch['labels'].shape, (2, 16))
tokenizer._pad_token = None
data_collator = DataCollatorForLanguageModeling(tokenizer, mlm=False, return_tensors='np')
with self.assertRaises(ValueError):
data_collator(pad_features)
set_seed(42)
tokenizer = BertTokenizer(self.vocab_file)
data_collator = DataCollatorForLanguageModeling(tokenizer, return_tensors='np')
batch = data_collator(no_pad_features)
self.assertEqual(batch['input_ids'].shape, (2, 10))
self.assertEqual(batch['labels'].shape, (2, 10))
masked_tokens = (batch['input_ids'] == tokenizer.mask_token_id)
self.assertTrue(np.any(masked_tokens))
batch = data_collator(pad_features)
self.assertEqual(batch['input_ids'].shape, (2, 10))
self.assertEqual(batch['labels'].shape, (2, 10))
masked_tokens = (batch['input_ids'] == tokenizer.mask_token_id)
self.assertTrue(np.any(masked_tokens))
data_collator = DataCollatorForLanguageModeling(tokenizer, pad_to_multiple_of=8, return_tensors='np')
batch = data_collator(no_pad_features)
self.assertEqual(batch['input_ids'].shape, (2, 16))
self.assertEqual(batch['labels'].shape, (2, 16))
masked_tokens = (batch['input_ids'] == tokenizer.mask_token_id)
self.assertTrue(np.any(masked_tokens))
batch = data_collator(pad_features)
self.assertEqual(batch['input_ids'].shape, (2, 16))
self.assertEqual(batch['labels'].shape, (2, 16))
masked_tokens = (batch['input_ids'] == tokenizer.mask_token_id)
self.assertTrue(np.any(masked_tokens))
def test_data_collator_for_language_modeling(self):
no_pad_features = [{'input_ids': list(range(10))}, {'input_ids': list(range(10))}]
pad_features = [{'input_ids': list(range(5))}, {'input_ids': list(range(10))}]
self._test_no_pad_and_pad(no_pad_features, pad_features)
no_pad_features = [list(range(10)), list(range(10))]
pad_features = [list(range(5)), list(range(10))]
self._test_no_pad_and_pad(no_pad_features, pad_features)
def test_data_collator_for_whole_word_mask(self):
features = [{'input_ids': list(range(10))}, {'input_ids': list(range(10))}]
tokenizer = BertTokenizer(self.vocab_file)
data_collator = DataCollatorForWholeWordMask(tokenizer, return_tensors='np')
batch = data_collator(features)
self.assertEqual(batch['input_ids'].shape, (2, 10))
self.assertEqual(batch['labels'].shape, (2, 10))
def test_plm(self):
tokenizer = BertTokenizer(self.vocab_file)
no_pad_features = [{'input_ids': list(range(10))}, {'input_ids': list(range(10))}]
pad_features = [{'input_ids': list(range(5))}, {'input_ids': list(range(10))}]
data_collator = DataCollatorForPermutationLanguageModeling(tokenizer, return_tensors='np')
batch = data_collator(pad_features)
self.assertIsInstance(batch, dict)
self.assertEqual(batch['input_ids'].shape, (2, 10))
self.assertEqual(batch['perm_mask'].shape, (2, 10, 10))
self.assertEqual(batch['target_mapping'].shape, (2, 10, 10))
self.assertEqual(batch['labels'].shape, (2, 10))
batch = data_collator(no_pad_features)
self.assertIsInstance(batch, dict)
self.assertEqual(batch['input_ids'].shape, (2, 10))
self.assertEqual(batch['perm_mask'].shape, (2, 10, 10))
self.assertEqual(batch['target_mapping'].shape, (2, 10, 10))
self.assertEqual(batch['labels'].shape, (2, 10))
example = [np.random.randint(0, 5, [5])]
with self.assertRaises(ValueError):
data_collator(example)
def test_nsp(self):
tokenizer = BertTokenizer(self.vocab_file)
features = [{'input_ids': [0, 1, 2, 3, 4], 'token_type_ids': [0, 1, 2, 3, 4], 'next_sentence_label': i} for i in range(2)]
data_collator = DataCollatorForLanguageModeling(tokenizer, return_tensors='np')
batch = data_collator(features)
self.assertEqual(batch['input_ids'].shape, (2, 5))
self.assertEqual(batch['token_type_ids'].shape, (2, 5))
self.assertEqual(batch['labels'].shape, (2, 5))
self.assertEqual(batch['next_sentence_label'].shape, (2,))
data_collator = DataCollatorForLanguageModeling(tokenizer, pad_to_multiple_of=8, return_tensors='np')
batch = data_collator(features)
self.assertEqual(batch['input_ids'].shape, (2, 8))
self.assertEqual(batch['token_type_ids'].shape, (2, 8))
self.assertEqual(batch['labels'].shape, (2, 8))
self.assertEqual(batch['next_sentence_label'].shape, (2,))
def test_sop(self):
tokenizer = BertTokenizer(self.vocab_file)
features = [{'input_ids': np.array([0, 1, 2, 3, 4]), 'token_type_ids': np.array([0, 1, 2, 3, 4]), 'sentence_order_label': i} for i in range(2)]
data_collator = DataCollatorForLanguageModeling(tokenizer, return_tensors='np')
batch = data_collator(features)
self.assertEqual(batch['input_ids'].shape, (2, 5))
self.assertEqual(batch['token_type_ids'].shape, (2, 5))
self.assertEqual(batch['labels'].shape, (2, 5))
self.assertEqual(batch['sentence_order_label'].shape, (2,))
data_collator = DataCollatorForLanguageModeling(tokenizer, pad_to_multiple_of=8, return_tensors='np')
batch = data_collator(features)
self.assertEqual(batch['input_ids'].shape, (2, 8))
self.assertEqual(batch['token_type_ids'].shape, (2, 8))
self.assertEqual(batch['labels'].shape, (2, 8))
self.assertEqual(batch['sentence_order_label'].shape, (2,)) |
def collect_data(gamefiles, args):
print('Using {} processes.'.format(args.nb_processes))
desc = 'Extracting data from {} games'.format(len(gamefiles))
pbar = tqdm.tqdm(total=len(gamefiles), desc=desc)
outfile = open(args.output, 'w')
outfile.write('[\n')
def _assemble_results(args):
(gamefile, data) = args
pbar.set_postfix_str(gamefile)
pbar.update()
outfile.write((',\n'.join((json.dumps(d) for d in data)) + ',\n'))
if (args.nb_processes > 1):
pool = multiprocessing.Pool(args.nb_processes)
results = []
for gamefile in gamefiles:
result = pool.apply_async(collect_data_from_game, (gamefile,), callback=_assemble_results)
results.append(result)
for result in results:
result.get()
pool.close()
pool.join()
else:
for (i, gamefile) in enumerate(gamefiles):
data = collect_data_from_game(gamefile)
_assemble_results(data)
pbar.close()
outfile.seek((outfile.tell() - 2), os.SEEK_SET)
outfile.write('\n]')
outfile.close() |
def pblock_043(content):
stage_number = (- 1)
pzs = sxml.PolesZeros(pz_transfer_function_type=ptftype(get1(content, b'05')), input_units=sxml.Units(name=punit(get1(content, b'06'))), output_units=sxml.Units(name=punit(get1(content, b'07'))), normalization_factor=float(get1(content, b'08')), normalization_frequency=sxml.Frequency(value=float(get1(content, b'09'))), zero_list=list(map(ppolezero, getn(content, b'11-14'))), pole_list=list(map(ppolezero, getn(content, b'16-19'))))
for (i, x) in enumerate(pzs.zero_list):
x.number = i
for (i, x) in enumerate(pzs.pole_list):
x.number = i
return (stage_number, pzs) |
def main():
parser = ArgumentParser()
parser.add_argument('--dgx', type=bool, default=True, help='whether it is on dgx')
parser.add_argument('--resume', default='/cluster/work/cvl/cany/lanefinder/polyline', help='path to an experiment to resume')
parser.add_argument('--exp', default='/cluster/home/cany/lanefinder_github/baseline/Experiments/mle.json', help='path to an experiment to resume')
parser.add_argument('--only_big', type=bool, default=False, help='whether it is on dgx')
parser.add_argument('--use_gt', type=bool, default=use_gt, help='whether it is on dgx')
parser.add_argument('--split_pe', type=bool, default=split_pe, help='whether it is on dgx')
parser.add_argument('--only_bev_pe', type=bool, default=only_bev_pe, help='whether it is on dgx')
parser.add_argument('--bev_pe', type=bool, default=apply_bev_pe, help='whether it is on dgx')
parser.add_argument('--abs_bev', type=bool, default=abs_bev, help='whether it is on dgx')
parser.add_argument('--apply_poly_loss', type=bool, default=apply_poly_loss, help='whether it is on dgx')
parser.add_argument('--objects', type=bool, default=True, help='whether estimate objects')
parser.add_argument('--num_object_queries', default=100, type=int, help='Number of query slots')
parser.add_argument('--num_object_classes', default=8, type=int, help='Num object classes')
parser.add_argument('--num_spline_points', default=3, type=int, help='Num object classes')
parser.add_argument('--frozen_weights', type=str, default=None, help='Path to the pretrained model. If set, only the mask head will be trained')
parser.add_argument('--backbone', default='resnet50', type=str, help='Name of the convolutional backbone to use')
parser.add_argument('--dilation', default=True, help='If true, we replace stride with dilation in the last convolutional block (DC5)')
parser.add_argument('--position_embedding', default='sine', type=str, choices=('sine', 'learned'), help='Type of positional embedding to use on top of the image features')
parser.add_argument('--dim_feedforward', default=256, type=int, help='Intermediate size of the feedforward layers in the transformer blocks')
parser.add_argument('--hidden_dim', default=256, type=int, help='Size of the embeddings (dimension of the transformer)')
parser.add_argument('--dropout', default=0.1, type=float, help='Dropout applied in the transformer')
parser.add_argument('--nheads', default=4, type=int, help="Number of attention heads inside the transformer's attentions")
parser.add_argument('--num_queries', default=100, type=int, help='Number of query slots')
parser.add_argument('--pre_norm', action='store_true')
parser.add_argument('--masks', default=False, help='Train segmentation head if the flag is provided')
parser.add_argument('--no_aux_loss', dest='aux_loss', action='store_false', help='Disables auxiliary decoding losses (loss at each layer)')
parser.add_argument('--set_obj_cost_class', default=2, type=float, help='Class coefficient in the matching cost')
parser.add_argument('--set_obj_cost_center', default=3, type=float, help='Class coefficient in the matching cost')
parser.add_argument('--set_obj_cost_len', default=0.5, type=float, help='Class coefficient in the matching cost')
parser.add_argument('--set_obj_cost_orient', default=1, type=float, help='Class coefficient in the matching cost')
parser.add_argument('--set_cost_class', default=2, type=float, help='Class coefficient in the matching cost')
parser.add_argument('--set_cost_bbox', default=1, type=float, help='L1 box coefficient in the matching cost')
parser.add_argument('--set_cost_end', default=1, type=float, help='L1 endpoint coefficient in the matching cost')
parser.add_argument('--set_cost_giou', default=1, type=float, help='giou box coefficient in the matching cost')
parser.add_argument('--object_detection_loss_coef', default=4, type=float)
parser.add_argument('--object_center_loss_coef', default=3, type=float)
parser.add_argument('--object_len_loss_coef', default=0.5, type=float)
parser.add_argument('--object_orient_loss_coef', default=0.5, type=float)
parser.add_argument('--polyline_loss_coef', default=2, type=float)
parser.add_argument('--mask_loss_coef', default=1, type=float)
parser.add_argument('--dice_loss_coef', default=1, type=float)
parser.add_argument('--assoc_loss_coef', default=1, type=float)
parser.add_argument('--detection_loss_coef', default=3, type=float)
parser.add_argument('--endpoints_loss_coef', default=2, type=float)
parser.add_argument('--bbox_loss_coef', default=2, type=float)
parser.add_argument('--focal_loss_coef', default=0.1, type=float)
parser.add_argument('--init_points_loss_coef', default=1, type=float)
parser.add_argument('--loss_end_match_coef', default=1, type=float)
parser.add_argument('--giou_loss_coef', default=2, type=float)
parser.add_argument('--visible_loss_coef', default=1, type=float)
parser.add_argument('--eos_coef', default=0.01, type=float, help='Relative classification weight of the no-object class')
parser.add_argument('--object_eos_coef', default=0.1, type=float, help='Relative classification weight of the no-object class')
parser.add_argument('--dataset_file', default='coco')
parser.add_argument('--coco_path', type=str)
parser.add_argument('--coco_panoptic_path', type=str)
parser.add_argument('--remove_difficult', action='store_true')
parser.add_argument('--output_dir', default='', help='path where to save, empty for no saving')
parser.add_argument('--device', default='cuda', help='device to use for training / testing')
parser.add_argument('--seed', default=42, type=int)
parser.add_argument('--start_epoch', default=0, type=int, metavar='N', help='start epoch')
parser.add_argument('--eval', default=False, action='store_true')
parser.add_argument('--num_workers', default=2, type=int)
parser.add_argument('--world_size', default=1, type=int, help='number of distributed processes')
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
args = parser.parse_args()
print('GOT ARGS ')
logging.error(str(args))
config = get_configuration(args)
opts = json.load(open(args.exp, 'r'))
logdir = create_experiment(config, args.resume)
config.save_logdir = logdir
config.n_control_points = args.num_spline_points
config.freeze()
device = torch.device(args.device)
(model, criterion, postprocessors) = build(args, config, opts)
model.to(device)
if (config.train_dataset == 'nuscenes'):
(train_loader, train_dataset, val_loader, val_dataset) = data_factory.build_nuscenes_dataloader(config, args, val=True)
else:
(train_loader, train_dataset, val_loader, val_dataset) = data_factory.build_argoverse_dataloader(config, args, val=True)
(epoch, best_iou, iteration) = load_checkpoint(os.path.join('/cluster/work/cvl/cany/simplice/ckpts/poly-base-False', 'polyrnn-base.pth'), model)
logging.error('LOADED MY CHECKPOINT')
freeze_backbone_layers(model)
thresh = 0.3
val_con = evaluate(val_loader, model, criterion, postprocessors, BinaryConfusionMatrix(1, args.num_object_classes), config, args, thresh)
(static_res_dict, object_res_dict) = val_con.get_res_dict
file1 = open(os.path.join(logdir, (('val_res_thresh_' + str(thresh)) + '.txt')), 'a')
for k in static_res_dict.keys():
logging.error(((str(k) + ' : ') + str(static_res_dict[k])))
file1.write((((str(k) + ' : ') + str(static_res_dict[k])) + ' \n'))
for k in object_res_dict.keys():
logging.error(((str(k) + ' : ') + str(object_res_dict[k])))
file1.write((((str(k) + ' : ') + str(object_res_dict[k])) + ' \n'))
file1.close() |
def main():
args = parse_args()
accelerator = Accelerator()
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO)
logger.info(accelerator.state)
logger.setLevel((logging.INFO if accelerator.is_local_main_process else logging.ERROR))
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
if (args.seed is not None):
set_seed(args.seed)
if accelerator.is_main_process:
if args.push_to_hub:
if (args.hub_model_id is None):
repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token)
else:
repo_name = args.hub_model_id
repo = Repository(args.output_dir, clone_from=repo_name)
elif (args.output_dir is not None):
os.makedirs(args.output_dir, exist_ok=True)
accelerator.wait_for_everyone()
if (args.task_name is not None):
raw_datasets = load_dataset('glue', args.task_name)
else:
data_files = {}
if (args.train_file is not None):
data_files['train'] = args.train_file
if (args.validation_file is not None):
data_files['validation'] = args.validation_file
extension = (args.train_file if (args.train_file is not None) else args.valid_file).split('.')[(- 1)]
raw_datasets = load_dataset(extension, data_files=data_files)
if (args.task_name is not None):
is_regression = (args.task_name == 'stsb')
if (not is_regression):
label_list = raw_datasets['train'].features['label'].names
num_labels = len(label_list)
else:
num_labels = 1
else:
is_regression = (raw_datasets['train'].features['label'].dtype in ['float32', 'float64'])
if is_regression:
num_labels = 1
else:
label_list = raw_datasets['train'].unique('label')
label_list.sort()
num_labels = len(label_list)
config = AutoConfig.from_pretrained(args.model_name_or_path, num_labels=num_labels, finetuning_task=args.task_name)
tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, use_fast=(not args.use_slow_tokenizer))
model = AutoModelForSequenceClassification.from_pretrained(args.model_name_or_path, from_tf=bool(('.ckpt' in args.model_name_or_path)), config=config)
if (args.task_name is not None):
(sentence1_key, sentence2_key) = task_to_keys[args.task_name]
else:
non_label_column_names = [name for name in raw_datasets['train'].column_names if (name != 'label')]
if (('sentence1' in non_label_column_names) and ('sentence2' in non_label_column_names)):
(sentence1_key, sentence2_key) = ('sentence1', 'sentence2')
elif (len(non_label_column_names) >= 2):
(sentence1_key, sentence2_key) = non_label_column_names[:2]
else:
(sentence1_key, sentence2_key) = (non_label_column_names[0], None)
label_to_id = None
if ((model.config.label2id != PretrainedConfig(num_labels=num_labels).label2id) and (args.task_name is not None) and (not is_regression)):
label_name_to_id = {k.lower(): v for (k, v) in model.config.label2id.items()}
if (list(sorted(label_name_to_id.keys())) == list(sorted(label_list))):
logger.info(f'The configuration of the model provided the following label correspondence: {label_name_to_id}. Using it!')
label_to_id = {i: label_name_to_id[label_list[i]] for i in range(num_labels)}
else:
logger.warning("Your model seems to have been trained with labels, but they don't match the dataset: ", f'''model labels: {list(sorted(label_name_to_id.keys()))}, dataset labels: {list(sorted(label_list))}.
Ignoring the model labels as a result.''')
elif (args.task_name is None):
label_to_id = {v: i for (i, v) in enumerate(label_list)}
if (label_to_id is not None):
model.config.label2id = label_to_id
model.config.id2label = {id: label for (label, id) in config.label2id.items()}
elif ((args.task_name is not None) and (not is_regression)):
model.config.label2id = {l: i for (i, l) in enumerate(label_list)}
model.config.id2label = {id: label for (label, id) in config.label2id.items()}
padding = ('max_length' if args.pad_to_max_length else False)
def preprocess_function(examples):
texts = ((examples[sentence1_key],) if (sentence2_key is None) else (examples[sentence1_key], examples[sentence2_key]))
result = tokenizer(*texts, padding=padding, max_length=args.max_length, truncation=True)
if ('label' in examples):
if (label_to_id is not None):
result['labels'] = [label_to_id[l] for l in examples['label']]
else:
result['labels'] = examples['label']
return result
with accelerator.main_process_first():
processed_datasets = raw_datasets.map(preprocess_function, batched=True, remove_columns=raw_datasets['train'].column_names, desc='Running tokenizer on dataset')
train_dataset = processed_datasets['train']
eval_dataset = processed_datasets[('validation_matched' if (args.task_name == 'mnli') else 'validation')]
for index in random.sample(range(len(train_dataset)), 3):
logger.info(f'Sample {index} of the training set: {train_dataset[index]}.')
if args.pad_to_max_length:
data_collator = default_data_collator
else:
data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=(8 if accelerator.use_fp16 else None))
train_dataloader = DataLoader(train_dataset, shuffle=True, collate_fn=data_collator, batch_size=args.per_device_train_batch_size)
eval_dataloader = DataLoader(eval_dataset, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [{'params': [p for (n, p) in model.named_parameters() if (not any(((nd in n) for nd in no_decay)))], 'weight_decay': args.weight_decay}, {'params': [p for (n, p) in model.named_parameters() if any(((nd in n) for nd in no_decay))], 'weight_decay': 0.0}]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate)
(model, optimizer, train_dataloader, eval_dataloader) = accelerator.prepare(model, optimizer, train_dataloader, eval_dataloader)
num_update_steps_per_epoch = math.ceil((len(train_dataloader) / args.gradient_accumulation_steps))
if (args.max_train_steps is None):
args.max_train_steps = (args.num_train_epochs * num_update_steps_per_epoch)
else:
args.num_train_epochs = math.ceil((args.max_train_steps / num_update_steps_per_epoch))
lr_scheduler = get_scheduler(name=args.lr_scheduler_type, optimizer=optimizer, num_warmup_steps=args.num_warmup_steps, num_training_steps=args.max_train_steps)
if (args.task_name is not None):
metric = load_metric('glue', args.task_name)
else:
metric = load_metric('accuracy')
total_batch_size = ((args.per_device_train_batch_size * accelerator.num_processes) * args.gradient_accumulation_steps)
logger.info('***** Running training *****')
logger.info(f' Num examples = {len(train_dataset)}')
logger.info(f' Num Epochs = {args.num_train_epochs}')
logger.info(f' Instantaneous batch size per device = {args.per_device_train_batch_size}')
logger.info(f' Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}')
logger.info(f' Gradient Accumulation steps = {args.gradient_accumulation_steps}')
logger.info(f' Total optimization steps = {args.max_train_steps}')
progress_bar = tqdm(range(args.max_train_steps), disable=(not accelerator.is_local_main_process))
completed_steps = 0
for epoch in range(args.num_train_epochs):
model.train()
for (step, batch) in enumerate(train_dataloader):
outputs = model(**batch)
loss = outputs.loss
loss = (loss / args.gradient_accumulation_steps)
accelerator.backward(loss)
if (((step % args.gradient_accumulation_steps) == 0) or (step == (len(train_dataloader) - 1))):
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
progress_bar.update(1)
completed_steps += 1
if (completed_steps >= args.max_train_steps):
break
model.eval()
for (step, batch) in enumerate(eval_dataloader):
outputs = model(**batch)
predictions = (outputs.logits.argmax(dim=(- 1)) if (not is_regression) else outputs.logits.squeeze())
metric.add_batch(predictions=accelerator.gather(predictions), references=accelerator.gather(batch['labels']))
eval_metric = metric.compute()
logger.info(f'epoch {epoch}: {eval_metric}')
if (args.push_to_hub and (epoch < (args.num_train_epochs - 1))):
accelerator.wait_for_everyone()
unwrapped_model = accelerator.unwrap_model(model)
unwrapped_model.save_pretrained(args.output_dir, save_function=accelerator.save)
if accelerator.is_main_process:
tokenizer.save_pretrained(args.output_dir)
repo.push_to_hub(commit_message=f'Training in progress epoch {epoch}', blocking=False, auto_lfs_prune=True)
if (args.output_dir is not None):
accelerator.wait_for_everyone()
unwrapped_model = accelerator.unwrap_model(model)
unwrapped_model.save_pretrained(args.output_dir, save_function=accelerator.save)
if accelerator.is_main_process:
tokenizer.save_pretrained(args.output_dir)
if args.push_to_hub:
repo.push_to_hub(commit_message='End of training', auto_lfs_prune=True)
if (args.task_name == 'mnli'):
eval_dataset = processed_datasets['validation_mismatched']
eval_dataloader = DataLoader(eval_dataset, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size)
eval_dataloader = accelerator.prepare(eval_dataloader)
model.eval()
for (step, batch) in enumerate(eval_dataloader):
outputs = model(**batch)
predictions = outputs.logits.argmax(dim=(- 1))
metric.add_batch(predictions=accelerator.gather(predictions), references=accelerator.gather(batch['labels']))
eval_metric = metric.compute()
logger.info(f'mnli-mm: {eval_metric}') |
def setup_experiments(auto_var):
exp_name = 'experiment01'
mkdir_p(f'./results/{exp_name}')
auto_var.register_experiment(f'{exp_name}', run_experiment01, {'file_format': 'pickle', 'result_file_dir': f'./results/experiment02'})
exp_name = 'restrictedImgnet'
mkdir_p(f'./results/{exp_name}')
auto_var.register_experiment(f'{exp_name}', run_restrictedImgnet, {'file_format': 'pickle', 'result_file_dir': f'./results/restrictedImgnet3/'})
exp_name = 'hypo'
mkdir_p(f'./results/{exp_name}')
auto_var.register_experiment(f'{exp_name}', run_hypo, {'file_format': 'pickle', 'result_file_dir': f'./results/hypo/'})
exp_name = 'restrictedImgnetHypo'
mkdir_p(f'./results/{exp_name}')
auto_var.register_experiment(f'{exp_name}', run_restrictedImgnetHypo, {'file_format': 'pickle', 'result_file_dir': f'./results/restrictedImgnetHypo/'}) |
class Lookahead(Optimizer):
def __init__(self, optimizer, la_steps=5, la_alpha=0.8, pullback_momentum='none'):
self.optimizer = optimizer
self._la_step = 0
self.la_alpha = la_alpha
self._total_la_steps = la_steps
pullback_momentum = pullback_momentum.lower()
assert (pullback_momentum in ['reset', 'pullback', 'none'])
self.pullback_momentum = pullback_momentum
self.state = defaultdict(dict)
for group in optimizer.param_groups:
for p in group['params']:
param_state = self.state[p]
param_state['cached_params'] = torch.zeros_like(p.data)
param_state['cached_params'].copy_(p.data)
if (self.pullback_momentum == 'pullback'):
param_state['cached_mom'] = torch.zeros_like(p.data)
def __getstate__(self):
return {'state': self.state, 'optimizer': self.optimizer, 'la_alpha': self.la_alpha, '_la_step': self._la_step, '_total_la_steps': self._total_la_steps, 'pullback_momentum': self.pullback_momentum}
def zero_grad(self):
self.optimizer.zero_grad()
def get_la_step(self):
return self._la_step
def state_dict(self):
return self.optimizer.state_dict()
def load_state_dict(self, state_dict):
self.optimizer.load_state_dict(state_dict)
def _backup_and_load_cache(self):
for group in self.optimizer.param_groups:
for p in group['params']:
param_state = self.state[p]
param_state['backup_params'] = torch.zeros_like(p.data)
param_state['backup_params'].copy_(p.data)
p.data.copy_(param_state['cached_params'])
def _clear_and_load_backup(self):
for group in self.optimizer.param_groups:
for p in group['params']:
param_state = self.state[p]
p.data.copy_(param_state['backup_params'])
del param_state['backup_params']
def param_groups(self):
return self.optimizer.param_groups
def step(self, closure=None):
loss = self.optimizer.step(closure)
self._la_step += 1
if (self._la_step >= self._total_la_steps):
self._la_step = 0
for group in self.optimizer.param_groups:
for p in group['params']:
param_state = self.state[p]
p.data.mul_(self.la_alpha).add_(param_state['cached_params'], alpha=(1.0 - self.la_alpha))
param_state['cached_params'].copy_(p.data)
if (self.pullback_momentum == 'pullback'):
internal_momentum = self.optimizer.state[p]['momentum_buffer']
self.optimizer.state[p]['momentum_buffer'] = internal_momentum.mul_(self.la_alpha).add_((1.0 - self.la_alpha), param_state['cached_mom'])
param_state['cached_mom'] = self.optimizer.state[p]['momentum_buffer']
elif (self.pullback_momentum == 'reset'):
self.optimizer.state[p]['momentum_buffer'] = torch.zeros_like(p.data)
return loss |
def target_df_fixed_agg(spark_context, spark_session):
data = [{'id': 1, 'timestamp': '2016-04-11 11:31:11', 'feature1': 200, 'feature2': 200, 'feature1__avg_over_2_minutes_fixed_windows': 200, 'feature1__avg_over_15_minutes_fixed_windows': 200}, {'id': 1, 'timestamp': '2016-04-11 11:44:12', 'feature1': 300, 'feature2': 300, 'feature1__avg_over_2_minutes_fixed_windows': 300, 'feature1__avg_over_15_minutes_fixed_windows': 250}, {'id': 1, 'timestamp': '2016-04-11 11:46:24', 'feature1': 400, 'feature2': 400, 'feature1__avg_over_2_minutes_fixed_windows': 400, 'feature1__avg_over_15_minutes_fixed_windows': 350}, {'id': 1, 'timestamp': '2016-04-11 12:03:21', 'feature1': 500, 'feature2': 500, 'feature1__avg_over_2_minutes_fixed_windows': 500, 'feature1__avg_over_15_minutes_fixed_windows': 500}]
df = spark_session.read.json(spark_context.parallelize(data, 1))
df = df.withColumn(TIMESTAMP_COLUMN, df.timestamp.cast(DataType.TIMESTAMP.spark))
return df |
def build_parser():
parser = argparse.ArgumentParser(usage='Convert ATAC h5ad to infered gene activity scores', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('input_h5ad', type=str, nargs='*', help='ATAC h5ad to infer gene activity scores for')
parser.add_argument('output_h5ad', type=str, help='h5ad file to write gene activity scores to')
parser.add_argument('--genome', '-g', choices=ANNOTATION_DICT.keys(), default='hg38', help='Genome annotation to use')
parser.add_argument('--raw', action='store_true', help='Use raw atribaute of input')
parser.add_argument('--sizenorm', action='store_true', help='Normalize gene activity scores by span of gene')
parser.add_argument('--naive', action='store_true', help='Use naive method instead of archr-derived method')
return parser |
def get_module_from_file(module_file: Union[(str, os.PathLike)]) -> str:
full_module_path = Path(module_file).absolute()
module_parts = full_module_path.with_suffix('').parts
idx = (len(module_parts) - 1)
while ((idx >= 0) and (module_parts[idx] != 'transformers')):
idx -= 1
if (idx < 0):
raise ValueError(f'{module_file} is not a transformers module.')
return '.'.join(module_parts[idx:]) |
def create_vocab_dict(dict_path):
with open(dict_path, 'r', encoding='utf-8') as f:
lines = f.readlines()
words = [line.split(' ')[0] for line in lines]
num_words = len(words)
vocab_dict = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
vocab_dict.update(dict(zip(words, range(4, (num_words + 4)))))
return vocab_dict |
def _resnetv2(layers=(3, 4, 9), **kwargs):
padding_same = kwargs.get('padding_same', True)
stem_type = ('same' if padding_same else '')
conv_layer = (partial(StdConv2dSame, eps=1e-08) if padding_same else partial(StdConv2d, eps=1e-08))
if len(layers):
backbone = ResNetV2(layers=layers, num_classes=0, global_pool='', in_chans=kwargs.get('in_chans', 3), preact=False, stem_type=stem_type, conv_layer=conv_layer)
else:
backbone = create_resnetv2_stem(kwargs.get('in_chans', 3), stem_type=stem_type, preact=False, conv_layer=conv_layer)
return backbone |
def save_features(features, res_path):
print('Saving to {}'.format(os.path.abspath(res_path)))
with open(os.path.abspath(res_path), 'w') as feat_file:
feat_writer = csv.writer(feat_file)
feat_writer.writerow(['commit', 'number_of_cruical_files', 'number_of_moderate_risk_cruical_files', 'number_of_high_risk_cruical_files', 'number_of_non_modified_change_couplings'])
for feature in features:
feat_writer.writerow(feature) |
class GPUUsageHandler(APIHandler):
.authenticated
def get(self):
memory_usage = [pynvml.nvmlDeviceGetMemoryInfo(handle).used for handle in gpu_handles]
total_memory = [pynvml.nvmlDeviceGetMemoryInfo(handle).total for handle in gpu_handles]
self.finish(json.dumps({'memory_usage': memory_usage, 'total_memory': total_memory})) |
def propose_interpreters(spec, try_first_with, app_data, env=None):
env = (os.environ if (env is None) else env)
for py_exe in try_first_with:
path = os.path.abspath(py_exe)
try:
os.lstat(path)
except OSError:
pass
else:
(yield (PythonInfo.from_exe(os.path.abspath(path), app_data, env=env), True))
if (spec.path is not None):
try:
os.lstat(spec.path)
except OSError:
if spec.is_abs:
raise
else:
(yield (PythonInfo.from_exe(os.path.abspath(spec.path), app_data, env=env), True))
if spec.is_abs:
return
else:
(yield (PythonInfo.current_system(app_data), True))
if IS_WIN:
from .windows import propose_interpreters
for interpreter in propose_interpreters(spec, app_data, env):
(yield (interpreter, True))
paths = get_paths(env)
tested_exes = set()
for (pos, path) in enumerate(paths):
path_str = str(path)
logging.debug(LazyPathDump(pos, path_str, env))
for (candidate, match) in possible_specs(spec):
found = check_path(candidate, path_str)
if (found is not None):
exe = os.path.abspath(found)
if (exe not in tested_exes):
tested_exes.add(exe)
interpreter = PathPythonInfo.from_exe(exe, app_data, raise_on_error=False, env=env)
if (interpreter is not None):
(yield (interpreter, match)) |
class IRCBot(irc.IRCClient, Session):
lineRate = 1
nickname = None
logger = None
factory = None
channel = None
sourceURL = '
def signedOn(self):
self.join(self.channel)
self.stopping = False
self.factory.bot = self
address = ('%%s' % (self.channel, self.network))
self.init_session('ircbot', address, self.factory.sessionhandler)
self.uid = int(self.factory.uid)
self.logged_in = True
self.factory.sessionhandler.connect(self)
logger.log_info(("IRC bot '%s' connected to %s at %s:%s." % (self.nickname, self.channel, self.network, self.port)))
def disconnect(self, reason=''):
self.sessionhandler.disconnect(self)
self.stopping = True
self.transport.loseConnection()
def at_login(self):
pass
def privmsg(self, user, channel, msg):
if (channel == self.nickname):
user = user.split('!', 1)[0]
self.data_in(text=msg, type='privmsg', user=user, channel=channel)
elif (not msg.startswith('***')):
user = user.split('!', 1)[0]
user = ansi.raw(user)
self.data_in(text=msg, type='msg', user=user, channel=channel)
def action(self, user, channel, msg):
if (not msg.startswith('**')):
user = user.split('!', 1)[0]
self.data_in(text=msg, type='action', user=user, channel=channel)
def get_nicklist(self):
if (not self.nicklist):
self.sendLine(('NAMES %s' % self.channel))
def irc_RPL_NAMREPLY(self, prefix, params):
channel = params[2].lower()
if (channel != self.channel.lower()):
return
self.nicklist += params[3].split(' ')
def irc_RPL_ENDOFNAMES(self, prefix, params):
channel = params[1].lower()
if (channel != self.channel.lower()):
return
self.data_in(text='', type='nicklist', user='server', channel=channel, nicklist=self.nicklist)
self.nicklist = []
def pong(self, user, time):
self.data_in(text='', type='ping', user='server', channel=self.channel, timing=time)
def data_in(self, text=None, **kwargs):
self.sessionhandler.data_in(self, bot_data_in=[parse_irc_to_ansi(text), kwargs])
def send_channel(self, *args, **kwargs):
text = (args[0] if args else '')
if text:
text = parse_ansi_to_irc(text)
self.say(self.channel, text)
def send_privmsg(self, *args, **kwargs):
text = (args[0] if args else '')
user = kwargs.get('user', None)
if (text and user):
text = parse_ansi_to_irc(text)
self.msg(user, text)
def send_request_nicklist(self, *args, **kwargs):
self.get_nicklist()
def send_ping(self, *args, **kwargs):
self.ping(self.nickname)
def send_reconnect(self, *args, **kwargs):
self.factory.reconnect()
def send_default(self, *args, **kwargs):
pass |
class CloseObjectAction(BaseAction):
valid_actions = {'CloseObject'}
def get_reward(self, state, prev_state, expert_plan, goal_idx, low_idx=None):
if (low_idx is None):
subgoal = expert_plan[goal_idx]['planner_action']
else:
subgoal = expert_plan[goal_idx]['planner_action']['parameter'][low_idx]
(reward, done) = (self.rewards['negative'], False)
target_recep = get_object(subgoal['objectId'], state.metadata)
if (target_recep is not None):
is_target_closed = (not target_recep['isOpen'])
(reward, done) = ((self.rewards['positive'], True) if is_target_closed else (self.rewards['negative'], False))
return (reward, done) |
class FiveHundredPxOAuth(BaseOAuth1):
name = '500px'
AUTHORIZATION_URL = '
REQUEST_TOKEN_URL = '
ACCESS_TOKEN_URL = '
def get_user_details(self, user):
(fullname, first_name, last_name) = self.get_user_names(user.get('fullname'))
return {'username': (user.get('username') or user.get('id')), 'email': user.get('email'), 'fullname': fullname, 'first_name': first_name, 'last_name': last_name}
def user_data(self, access_token, *args, **kwargs):
response = self.get_json(' auth=self.oauth_auth(access_token))
return response.get('user') |
def fetch_system_diagnostics_multi_linac(machine_ip_map, storage_directory, to_be_indexed='to_be_indexed', already_indexed='already_indexed'):
for (machine, ip) in machine_ip_map.items():
print('\nFetching diagnostic zip files from {} {}'.format(machine, ip))
machine_storage_directory = os.path.join(storage_directory, to_be_indexed, machine)
already_indexed_directory = os.path.join(storage_directory, already_indexed, machine)
pathlib.Path(machine_storage_directory).mkdir(parents=True, exist_ok=True)
fetch_system_diagnostics(ip, machine_storage_directory, already_indexed_directory)
print('') |
def is_expr_literal_type(node: Expression) -> bool:
if isinstance(node, IndexExpr):
base = node.base
return (isinstance(base, RefExpr) and (base.fullname in LITERAL_TYPE_NAMES))
if isinstance(node, NameExpr):
underlying = node.node
return (isinstance(underlying, TypeAlias) and isinstance(get_proper_type(underlying.target), LiteralType))
return False |
class AnsibleRole(models.Model):
role_name = models.CharField(max_length=100, verbose_name='role', unique=True)
role_file = models.FileField(upload_to='roles/')
role_user = models.ForeignKey('users.UserProfile', verbose_name='', on_delete=models.CASCADE)
role_time = models.DateTimeField(auto_now_add=True, verbose_name='')
role_desc = models.TextField(verbose_name='role', null=True, blank=True)
class Meta():
db_table = 'ops_ansible_role'
verbose_name = 'AnsibleRole'
verbose_name_plural = 'AnsibleRole' |
def set_elements(obj, ypath, value, validate=False, regularize=False):
ynames = ypath.split('.')
try:
d = _parse_yname(ynames[(- 1)])
if ynames[:(- 1)]:
it = iter_elements(obj, ynames[:(- 1)])
else:
it = [obj]
for sobj in it:
if (d['name'] not in sobj.T.propnames):
raise AttributeError(d['name'])
if ('index' in d):
ssobj = getattr(sobj, d['name'])
ssobj[d['index']] = value
elif ('slice' in d):
ssobj = getattr(sobj, d['name'])
for i in range(*slice(*d['slice']).indices(len(ssobj))):
ssobj[i] = value
else:
setattr(sobj, d['name'], value)
if regularize:
sobj.regularize()
if validate:
sobj.validate()
except (AttributeError, IndexError, YPathError) as e:
raise YPathError(('Invalid ypath: "%s" (%s)' % (ypath, str(e)))) |
class XGLMConverter(SpmConverter):
def vocab(self, proto):
vocab = [('<s>', 0.0), ('<pad>', 0.0), ('</s>', 0.0), ('<unk>', 0.0)]
vocab += [(piece.piece, piece.score) for piece in proto.pieces[3:]]
vocab += [('<madeupword0>', 0.0), ('<madeupword1>', 0.0), ('<madeupword2>', 0.0), ('<madeupword3>', 0.0), ('<madeupword4>', 0.0), ('<madeupword5>', 0.0), ('<madeupword6>', 0.0)]
return vocab
def unk_id(self, proto):
unk_id = 3
return unk_id
def post_processor(self):
return processors.TemplateProcessing(single='</s> $A', pair='</s> $A </s> </s> $B', special_tokens=[('<s>', self.original_tokenizer.convert_tokens_to_ids('<s>')), ('</s>', self.original_tokenizer.convert_tokens_to_ids('</s>'))]) |
def test_serviceinfo_address_updates():
type_ = '_homeassistant._tcp.local.'
name = 'MyTestHome'
with pytest.raises(TypeError):
info_service = ServiceInfo(type_, f'{name}.{type_}', 80, 0, 0, {'path': '/~paulsm/'}, 'ash-2.local.', addresses=[socket.inet_aton('10.0.1.2')], parsed_addresses=['10.0.1.2'])
info_service = ServiceInfo(type_, f'{name}.{type_}', 80, 0, 0, {'path': '/~paulsm/'}, 'ash-2.local.', addresses=[socket.inet_aton('10.0.1.2')])
info_service.addresses = [socket.inet_aton('10.0.1.3')]
assert (info_service.addresses == [socket.inet_aton('10.0.1.3')]) |
_module()
class MoshDataset(Dataset, metaclass=ABCMeta):
def __init__(self, ann_file, pipeline, test_mode=False):
self.ann_file = ann_file
self.pipeline = pipeline
self.test_mode = test_mode
self.db = self._get_db(ann_file)
self.pipeline = Compose(self.pipeline)
def _get_db(ann_file):
data = np.load(ann_file)
_betas = data['shape'].astype(np.float32)
_poses = data['pose'].astype(np.float32)
tmpl = dict(pose=None, beta=None)
gt_db = []
dataset_len = len(_betas)
for i in range(dataset_len):
newitem = cp.deepcopy(tmpl)
newitem['pose'] = _poses[i]
newitem['beta'] = _betas[i]
gt_db.append(newitem)
return gt_db
def __len__(self):
return len(self.db)
def __getitem__(self, idx):
item = cp.deepcopy(self.db[idx])
(trivial, pose, beta) = (np.zeros(3, dtype=np.float32), item['pose'], item['beta'])
results = {'mosh_theta': np.concatenate((trivial, pose, beta), axis=0).astype(np.float32)}
return self.pipeline(results) |
class MainWindow(QMainWindow):
def __init__(self, *args, **kwargs):
super(MainWindow, self).__init__(*args, **kwargs)
layout = QVBoxLayout()
self.editor = QPlainTextEdit()
fixedfont = QFontDatabase.systemFont(QFontDatabase.FixedFont)
fixedfont.setPointSize(12)
self.editor.setFont(fixedfont)
self.path = None
layout.addWidget(self.editor)
container = QWidget()
container.setLayout(layout)
self.setCentralWidget(container)
self.status = QStatusBar()
self.setStatusBar(self.status)
file_toolbar = QToolBar('File')
file_toolbar.setIconSize(QSize(14, 14))
self.addToolBar(file_toolbar)
file_menu = self.menuBar().addMenu('&File')
open_file_action = QAction(QIcon(os.path.join('images', 'blue-folder-open-document.png')), 'Open file...', self)
open_file_action.setStatusTip('Open file')
open_file_action.triggered.connect(self.file_open)
file_menu.addAction(open_file_action)
file_toolbar.addAction(open_file_action)
save_file_action = QAction(QIcon(os.path.join('images', 'disk.png')), 'Save', self)
save_file_action.setStatusTip('Save current page')
save_file_action.triggered.connect(self.file_save)
file_menu.addAction(save_file_action)
file_toolbar.addAction(save_file_action)
saveas_file_action = QAction(QIcon(os.path.join('images', 'disk--pencil.png')), 'Save As...', self)
saveas_file_action.setStatusTip('Save current page to specified file')
saveas_file_action.triggered.connect(self.file_saveas)
file_menu.addAction(saveas_file_action)
file_toolbar.addAction(saveas_file_action)
print_action = QAction(QIcon(os.path.join('images', 'printer.png')), 'Print...', self)
print_action.setStatusTip('Print current page')
print_action.triggered.connect(self.file_print)
file_menu.addAction(print_action)
file_toolbar.addAction(print_action)
edit_toolbar = QToolBar('Edit')
edit_toolbar.setIconSize(QSize(16, 16))
self.addToolBar(edit_toolbar)
edit_menu = self.menuBar().addMenu('&Edit')
undo_action = QAction(QIcon(os.path.join('images', 'arrow-curve-180-left.png')), 'Undo', self)
undo_action.setStatusTip('Undo last change')
undo_action.triggered.connect(self.editor.undo)
edit_menu.addAction(undo_action)
redo_action = QAction(QIcon(os.path.join('images', 'arrow-curve.png')), 'Redo', self)
redo_action.setStatusTip('Redo last change')
redo_action.triggered.connect(self.editor.redo)
edit_toolbar.addAction(redo_action)
edit_menu.addAction(redo_action)
edit_menu.addSeparator()
cut_action = QAction(QIcon(os.path.join('images', 'scissors.png')), 'Cut', self)
cut_action.setStatusTip('Cut selected text')
cut_action.triggered.connect(self.editor.cut)
edit_toolbar.addAction(cut_action)
edit_menu.addAction(cut_action)
copy_action = QAction(QIcon(os.path.join('images', 'document-copy.png')), 'Copy', self)
copy_action.setStatusTip('Copy selected text')
copy_action.triggered.connect(self.editor.copy)
edit_toolbar.addAction(copy_action)
edit_menu.addAction(copy_action)
paste_action = QAction(QIcon(os.path.join('images', 'clipboard-paste-document-text.png')), 'Paste', self)
paste_action.setStatusTip('Paste from clipboard')
paste_action.triggered.connect(self.editor.paste)
edit_toolbar.addAction(paste_action)
edit_menu.addAction(paste_action)
select_action = QAction(QIcon(os.path.join('images', 'selection-input.png')), 'Select all', self)
select_action.setStatusTip('Select all text')
select_action.triggered.connect(self.editor.selectAll)
edit_menu.addAction(select_action)
edit_menu.addSeparator()
wrap_action = QAction(QIcon(os.path.join('images', 'arrow-continue.png')), 'Wrap text to window', self)
wrap_action.setStatusTip('Toggle wrap text to window')
wrap_action.setCheckable(True)
wrap_action.setChecked(True)
wrap_action.triggered.connect(self.edit_toggle_wrap)
edit_menu.addAction(wrap_action)
self.update_title()
self.show()
def dialog_critical(self, s):
dlg = QMessageBox(self)
dlg.setText(s)
dlg.setIcon(QMessageBox.Critical)
dlg.show()
def file_open(self):
(path, _) = QFileDialog.getOpenFileName(self, 'Open file', '', 'Text documents (*.txt);All files (*.*)')
if path:
try:
with open(path, 'rU') as f:
text = f.read()
except Exception as e:
self.dialog_critical(str(e))
else:
self.path = path
self.editor.setPlainText(text)
self.update_title()
def file_save(self):
if (self.path is None):
return self.file_saveas()
self._save_to_path(self.path)
def file_saveas(self):
(path, _) = QFileDialog.getSaveFileName(self, 'Save file', '', 'Text documents (*.txt);All files (*.*)')
if (not path):
return
self._save_to_path(path)
def _save_to_path(self, path):
text = self.editor.toPlainText()
try:
with open(path, 'w') as f:
f.write(text)
except Exception as e:
self.dialog_critical(str(e))
else:
self.path = path
self.update_title()
def file_print(self):
dlg = QPrintDialog()
if dlg.exec_():
self.editor.print_(dlg.printer())
def update_title(self):
self.setWindowTitle(('%s - No2Pads' % (os.path.basename(self.path) if self.path else 'Untitled')))
def edit_toggle_wrap(self):
self.editor.setLineWrapMode((1 if (self.editor.lineWrapMode() == 0) else 0)) |
class TestTraceOption():
def test_trace_sets_breakpoint(self, pytester: Pytester) -> None:
p1 = pytester.makepyfile('\n def test_1():\n assert True\n\n def test_2():\n pass\n\n def test_3():\n pass\n ')
child = pytester.spawn_pytest(('--trace ' + str(p1)))
child.expect('test_1')
child.expect('Pdb')
child.sendline('c')
child.expect('test_2')
child.expect('Pdb')
child.sendline('c')
child.expect('test_3')
child.expect('Pdb')
child.sendline('q')
child.expect_exact('Exit: Quitting debugger')
rest = child.read().decode('utf8')
assert ('= 2 passed in' in rest)
assert ('reading from stdin while output' not in rest)
assert ('Exit: Quitting debugger' not in child.before.decode('utf8'))
TestPDB.flush(child)
def test_trace_with_parametrize_handles_shared_fixtureinfo(self, pytester: Pytester) -> None:
p1 = pytester.makepyfile('\n import pytest\n .parametrize(\'myparam\', [1,2])\n def test_1(myparam, request):\n assert myparam in (1, 2)\n assert request.function.__name__ == "test_1"\n .parametrize(\'func\', [1,2])\n def test_func(func, request):\n assert func in (1, 2)\n assert request.function.__name__ == "test_func"\n .parametrize(\'myparam\', [1,2])\n def test_func_kw(myparam, request, func="func_kw"):\n assert myparam in (1, 2)\n assert func == "func_kw"\n assert request.function.__name__ == "test_func_kw"\n ')
child = pytester.spawn_pytest(('--trace ' + str(p1)))
for (func, argname) in [('test_1', 'myparam'), ('test_func', 'func'), ('test_func_kw', 'myparam')]:
child.expect_exact('> PDB runcall (IO-capturing turned off) >')
child.expect_exact(func)
child.expect_exact('Pdb')
child.sendline('args')
child.expect_exact(f'''{argname} = 1
''')
child.expect_exact('Pdb')
child.sendline('c')
child.expect_exact('Pdb')
child.sendline('args')
child.expect_exact(f'''{argname} = 2
''')
child.expect_exact('Pdb')
child.sendline('c')
child.expect_exact('> PDB continue (IO-capturing resumed) >')
rest = child.read().decode('utf8')
assert ('= 6 passed in' in rest)
assert ('reading from stdin while output' not in rest)
assert ('Exit: Quitting debugger' not in child.before.decode('utf8'))
TestPDB.flush(child) |
def _sampling_negative_items(user_pos_len, neg_num, item_num, user_pos_dict):
if (neg_num <= 0):
raise ValueError("'neg_num' must be a positive integer.")
(users, n_pos) = list(zip(*user_pos_len))
users_n_pos = DataIterator(users, n_pos, batch_size=1024, shuffle=False, drop_last=False)
neg_items_list = []
for (bat_user, batch_num) in users_n_pos:
batch_num = [(num * neg_num) for num in batch_num]
exclusion = [user_pos_dict[u] for u in bat_user]
bat_neg_items = batch_randint_choice(item_num, batch_num, replace=True, exclusion=exclusion)
for (user, neg_items, n_item) in zip(bat_user, bat_neg_items, batch_num):
if isinstance(neg_items, Iterable):
if (neg_num > 1):
neg_items = np.reshape(neg_items, newshape=[(- 1), neg_num])
neg_items_list.extend(neg_items)
else:
neg_items_list.append(neg_items)
return neg_items_list |
class MediaPartner(BaseDbModel):
class Meta():
table = 'tm.media_partners'
channel_id = fields.BigIntField(pk=True, generated=False)
tourney_id = fields.IntField()
slots: fields.ManyToManyRelation['PartnerSlot'] = fields.ManyToManyField('models.PartnerSlot')
def channel(self) -> Optional[discord.TextChannel]:
return self.bot.get_channel(self.channel_id) |
def test_json_parse_not_mapping_at_root(fs):
in_path = './tests/testfiles/singleliteral.json'
fs.create_file(in_path, contents='123')
with pytest.raises(TypeError) as err_info:
pypyr.parser.jsonfile.get_parsed_context([in_path])
assert (str(err_info.value) == 'json input should describe an object at the top level. You should have something like\n{\n"key1":"value1",\n"key2":"value2"\n}\nat the json top-level, not an [array] or literal.') |
def design_prototype_filter(taps=62, cutoff_ratio=0.15, beta=9.0):
assert ((taps % 2) == 0), 'The number of taps mush be even number.'
assert (0.0 < cutoff_ratio < 1.0), 'Cutoff ratio must be > 0.0 and < 1.0.'
omega_c = (np.pi * cutoff_ratio)
with np.errstate(invalid='ignore'):
h_i = (np.sin((omega_c * (np.arange((taps + 1)) - (0.5 * taps)))) / (np.pi * (np.arange((taps + 1)) - (0.5 * taps))))
h_i[(taps // 2)] = (np.cos(0) * cutoff_ratio)
w = kaiser((taps + 1), beta)
h = (h_i * w)
return h |
('pypyr.venv.EnvBuilderWithExtraDeps')
def test_venv_dsl_list_of_str_and_mapping_list_no_pip(mock_builder):
context = get_simple_context()
mocked_builder = mock_builder.return_value
mocked_builder.context = context
step = VenvCreatorStep.from_context(Context({'venv': ['/arb1', {'path': ['/arb2', '/arb3'], 'with_pip': False}]}))
expected_path1 = str(Path('/arb1').expanduser().resolve())
expected_path2 = str(Path('/arb2').expanduser().resolve())
expected_path3 = str(Path('/arb3').expanduser().resolve())
assert (len(step.venvs) == 3)
venv_creator = step.venvs[0]
assert (venv_creator.path == expected_path1)
venv_creator = step.venvs[1]
assert (venv_creator.path == expected_path2)
venv_creator = step.venvs[2]
assert (venv_creator.path == expected_path3)
assert (mock_builder.call_count == 3)
step.run_step()
assert (mocked_builder.create.call_count == 3)
assert (sorted(mocked_builder.create.mock_calls) == sorted([call(expected_path1), call(expected_path2), call(expected_path3)]))
mocked_builder.upgrade_dependencies.assert_called_once_with(context)
mocked_builder.pip_install_extras.assert_not_called() |
def manual_bn(x, gain=None, bias=None, return_mean_var=False, eps=1e-05):
float_x = x.float()
m = torch.mean(float_x, [0, 2, 3], keepdim=True)
m2 = torch.mean((float_x ** 2), [0, 2, 3], keepdim=True)
var = (m2 - (m ** 2))
var = var.type(x.type())
m = m.type(x.type())
if return_mean_var:
return (fused_bn(x, m, var, gain, bias, eps), m.squeeze(), var.squeeze())
else:
return fused_bn(x, m, var, gain, bias, eps) |
def train(epochs, decay=0, threshold=0.0):
model.train()
pbar = tqdm(range(epochs), total=epochs)
for epoch in pbar:
for (batch_idx, (data, target)) in enumerate(train_loader):
(data, target) = (data.to(device), target.to(device))
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
reg = 0.0
if decay:
reg = 0.0
for (name, param) in model.named_parameters():
if (param.requires_grad and ('weight' in name) and (torch.sum(torch.abs(param)) > 0)):
if (args.reg_type == 2):
reg += (((torch.sum(torch.sqrt(torch.sum((param ** 2), 0))) ** 2) + (torch.sum(torch.sqrt(torch.sum((param ** 2), 1))) ** 2)) / torch.sum((param ** 2)))
elif (args.reg_type == 1):
reg += (torch.sum(torch.sqrt(torch.sum((param ** 2), 0))) + torch.sum(torch.sqrt(torch.sum((param ** 2), 1))))
else:
reg = 0.0
total_loss = (loss + (decay * reg))
total_loss.backward()
optimizer.step()
if ((batch_idx % args.log_interval) == 0):
done = (batch_idx * len(data))
percentage = ((100.0 * batch_idx) / len(train_loader))
pbar.set_description(f'Train Epoch: {epoch} [{done:5}/{len(train_loader.dataset)} ({percentage:3.0f}%)] Loss: {loss.item():.3f} Reg: {reg:.3f}') |
def _make_triple_iff(first, second, third):
return InputShape(constructor=stub_constructor, kwargs=None, fields=(InputField(id='a', type=int, default=NoDefault(), is_required=True, metadata={}, original=None), InputField(id='b', type=int, default=NoDefault(), is_required=False, metadata={}, original=None), InputField(id='c', type=int, default=NoDefault(), is_required=True, metadata={}, original=None)), params=(Param(field_id='a', name='a', kind=first), Param(field_id='b', name='b', kind=second), Param(field_id='c', name='c', kind=third)), overriden_types=frozenset({'a', 'b', 'c'})) |
class AdamW(Optimizer):
def __init__(self, params, lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, warmup=0):
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad, use_variance=True, warmup=warmup)
super(AdamW, self).__init__(params, defaults)
def __setstate__(self, state):
super(AdamW, self).__setstate__(state)
def step(self, closure=None):
loss = None
if (closure is not None):
loss = closure()
for group in self.param_groups:
for p in group['params']:
if (p.grad is None):
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
p_data_fp32 = p.data.float()
state = self.state[p]
if (len(state) == 0):
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)
(exp_avg, exp_avg_sq) = (state['exp_avg'], state['exp_avg_sq'])
(beta1, beta2) = group['betas']
state['step'] += 1
exp_avg_sq.mul_(beta2).addcmul_((1 - beta2), grad, grad)
exp_avg.mul_(beta1).add_((1 - beta1), grad)
denom = exp_avg_sq.sqrt().add_(group['eps'])
bias_correction1 = (1 - (beta1 ** state['step']))
bias_correction2 = (1 - (beta2 ** state['step']))
if (group['warmup'] > state['step']):
scheduled_lr = (1e-08 + ((state['step'] * group['lr']) / group['warmup']))
else:
scheduled_lr = group['lr']
step_size = ((group['lr'] * math.sqrt(bias_correction2)) / bias_correction1)
if (group['weight_decay'] != 0):
p_data_fp32.add_(((- group['weight_decay']) * scheduled_lr), p_data_fp32)
p_data_fp32.addcdiv_((- step_size), exp_avg, denom)
p.data.copy_(p_data_fp32)
return loss |
def test_list_short(pipx_temp_env, monkeypatch, capsys):
assert (not run_pipx_cli(['install', PKG['pycowsay']['spec']]))
assert (not run_pipx_cli(['install', PKG['pylint']['spec']]))
captured = capsys.readouterr()
assert (not run_pipx_cli(['list', '--short']))
captured = capsys.readouterr()
assert ('pycowsay 0.0.0.2' in captured.out)
assert ('pylint 2.3.1' in captured.out) |
class Model(ModelBase):
def __init__(self, *args, **kwargs):
logger.debug('Initializing %s: (args: %s, kwargs: %s', self.__class__.__name__, args, kwargs)
self.configfile = kwargs.get('configfile', None)
kwargs['input_shape'] = (self.config['input_size'], self.config['input_size'], 3)
super().__init__(*args, **kwargs)
logger.debug('Initialized %s', self.__class__.__name__)
def architecture(self):
return self.config['architecture'].lower()
def use_mask(self):
return (self.config.get('mask_type', None) is not None)
def ae_dims(self):
retval = self.config['autoencoder_dims']
if (retval == 0):
retval = (256 if (self.architecture == 'liae') else 512)
return retval
def multiscale_count(self):
retval = (3 if self.config['multiscale_decoder'] else 1)
return retval
def add_networks(self):
logger.debug('Adding networks')
self.add_network('encoder', None, getattr(self, 'encoder_{}'.format(self.architecture))())
if (self.architecture == 'liae'):
self.add_network('intermediate', 'b', self.inter_liae())
self.add_network('intermediate', None, self.inter_liae())
decoder_sides = ([None] if (self.architecture == 'liae') else ['a', 'b'])
for side in decoder_sides:
self.add_network('decoder', side, self.decoder(), is_output=True)
logger.debug('Added networks')
def build_autoencoders(self, inputs):
logger.debug('Initializing model')
getattr(self, 'build_{}_autoencoder'.format(self.architecture))(inputs)
logger.debug('Initialized model')
def build_liae_autoencoder(self, inputs):
for side in ('a', 'b'):
encoder = self.networks['encoder'].network(inputs[0])
if (side == 'a'):
intermediate = Concatenate()([self.networks['intermediate'].network(encoder), self.networks['intermediate'].network(encoder)])
else:
intermediate = Concatenate()([self.networks['intermediate_b'].network(encoder), self.networks['intermediate'].network(encoder)])
output = self.networks['decoder'].network(intermediate)
autoencoder = KerasModel(inputs, output)
self.add_predictor(side, autoencoder)
def build_df_autoencoder(self, inputs):
for side in ('a', 'b'):
logger.debug('Adding Autoencoder. Side: %s', side)
decoder = self.networks['decoder_{}'.format(side)].network
output = decoder(self.networks['encoder'].network(inputs[0]))
autoencoder = KerasModel(inputs, output)
self.add_predictor(side, autoencoder)
def encoder_df(self):
input_ = Input(shape=self.input_shape)
dims = (self.input_shape[(- 1)] * self.config['encoder_dims'])
lowest_dense_res = (self.input_shape[0] // 16)
var_x = input_
var_x = self.blocks.conv(var_x, dims)
var_x = self.blocks.conv(var_x, (dims * 2))
var_x = self.blocks.conv(var_x, (dims * 4))
var_x = self.blocks.conv(var_x, (dims * 8))
var_x = Dense(self.ae_dims)(Flatten()(var_x))
var_x = Dense(((lowest_dense_res * lowest_dense_res) * self.ae_dims))(var_x)
var_x = Reshape((lowest_dense_res, lowest_dense_res, self.ae_dims))(var_x)
var_x = self.blocks.upscale(var_x, self.ae_dims)
return KerasModel(input_, var_x)
def encoder_liae(self):
input_ = Input(shape=self.input_shape)
dims = (self.input_shape[(- 1)] * self.config['encoder_dims'])
var_x = input_
var_x = self.blocks.conv(var_x, dims)
var_x = self.blocks.conv(var_x, (dims * 2))
var_x = self.blocks.conv(var_x, (dims * 4))
var_x = self.blocks.conv(var_x, (dims * 8))
var_x = Flatten()(var_x)
return KerasModel(input_, var_x)
def inter_liae(self):
input_ = Input(shape=self.networks['encoder'].output_shapes[0][1:])
lowest_dense_res = (self.input_shape[0] // 16)
var_x = input_
var_x = Dense(self.ae_dims)(var_x)
var_x = Dense((((lowest_dense_res * lowest_dense_res) * self.ae_dims) * 2))(var_x)
var_x = Reshape((lowest_dense_res, lowest_dense_res, (self.ae_dims * 2)))(var_x)
var_x = self.blocks.upscale(var_x, (self.ae_dims * 2))
return KerasModel(input_, var_x)
def decoder(self):
if (self.architecture == 'liae'):
input_shape = (np.array(self.networks['intermediate'].output_shapes[0][1:]) * (1, 1, 2))
else:
input_shape = self.networks['encoder'].output_shapes[0][1:]
input_ = Input(shape=input_shape)
outputs = list()
dims = (self.input_shape[(- 1)] * self.config['decoder_dims'])
var_x = input_
var_x1 = self.blocks.upscale(var_x, (dims * 8), res_block_follows=True)
var_x1 = self.blocks.res_block(var_x1, (dims * 8))
var_x1 = self.blocks.res_block(var_x1, (dims * 8))
if (self.multiscale_count >= 3):
outputs.append(self.blocks.conv2d(var_x1, 3, kernel_size=5, padding='same', activation='sigmoid', name='face_out_32'))
var_x2 = self.blocks.upscale(var_x1, (dims * 4), res_block_follows=True)
var_x2 = self.blocks.res_block(var_x2, (dims * 4))
var_x2 = self.blocks.res_block(var_x2, (dims * 4))
if (self.multiscale_count >= 2):
outputs.append(self.blocks.conv2d(var_x2, 3, kernel_size=5, padding='same', activation='sigmoid', name='face_out_64'))
var_x3 = self.blocks.upscale(var_x2, (dims * 2), res_block_follows=True)
var_x3 = self.blocks.res_block(var_x3, (dims * 2))
var_x3 = self.blocks.res_block(var_x3, (dims * 2))
outputs.append(self.blocks.conv2d(var_x3, 3, kernel_size=5, padding='same', activation='sigmoid', name='face_out_128'))
if self.use_mask:
var_y = input_
var_y = self.blocks.upscale(var_y, (self.config['decoder_dims'] * 8))
var_y = self.blocks.upscale(var_y, (self.config['decoder_dims'] * 4))
var_y = self.blocks.upscale(var_y, (self.config['decoder_dims'] * 2))
var_y = self.blocks.conv2d(var_y, 1, kernel_size=5, padding='same', activation='sigmoid', name='mask_out')
outputs.append(var_y)
return KerasModel(input_, outputs=outputs) |
class uvm_subscriber(uvm_component):
class uvm_AnalysisImp(uvm_analysis_export):
def __init__(self, name, parent, write_fn):
super().__init__(name, parent)
self.write_fn = write_fn
def write(self, tt):
self.write_fn(tt)
def __init__(self, name, parent):
super().__init__(name, parent)
self.analysis_export = self.uvm_AnalysisImp('analysis_export', self, self.write)
def write(self, tt):
raise error_classes.UVMFatalError(f'You must override the write() method inuvm_subscriber {self.get_full_name()}') |
def test_round_trip_binary_no_presets(multiworld_rdvgame):
layout = LayoutDescription.from_json_dict(multiworld_rdvgame)
presets = [VersionedPreset.with_preset(preset) for preset in layout.all_presets]
encoded = layout.as_binary(include_presets=False)
assert (LayoutDescription.from_bytes(encoded, presets=presets) == layout) |
def Voronoi(points, criterion='rook', clip='ahull', **kwargs):
from ..cg.voronoi import voronoi_frames
(region_df, _) = voronoi_frames(points, clip=clip)
if (criterion.lower() == 'queen'):
cls = Queen
elif (criterion.lower() == 'rook'):
cls = Rook
else:
raise ValueError(f"Contiguity criterion {criterion} not supported. Only 'rook' and 'queen' are supported.")
return cls.from_dataframe(region_df, **kwargs) |
class Effect6173(BaseEffect):
type = 'passive'
def handler(fit, ship, context, projectionRange, **kwargs):
fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Medium Hybrid Turret')), 'maxRange', ship.getModifiedItemAttr('roleBonusCBC'), **kwargs)
fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Medium Hybrid Turret')), 'falloff', ship.getModifiedItemAttr('roleBonusCBC'), **kwargs) |
def export_assets_as_zipfile(ModelAdmin, request, queryset):
if (not queryset.exists()):
ModelAdmin.message_user(request, f'You have to select at least one asset to export.', messages.WARNING)
return redirect(request.path)
assets_without_values = [asset for asset in queryset if (not asset.has_value)]
if any(assets_without_values):
ModelAdmin.message_user(request, f"{len(assets_without_values)} assets from the selection doesn't have data to export. Please review your selection!", messages.WARNING)
return redirect(request.path)
buffer = io.BytesIO()
zip_file = zipfile.ZipFile(buffer, 'w')
for asset in queryset:
zipdir = 'unknown'
if asset.from_sponsorship:
zipdir = asset.content_object.sponsor.name
elif asset.from_sponsor:
zipdir = asset.content_object.name
if (not asset.is_file):
zip_file.writestr(f'{zipdir}/{asset.internal_name}.txt', asset.value)
else:
suffix = ('.' + asset.value.name.split('.')[(- 1)])
prefix = asset.internal_name
temp_file = NamedTemporaryFile(suffix=suffix, prefix=prefix)
temp_file.write(asset.value.read())
zip_file.write(temp_file.name, arcname=f'{zipdir}/{prefix}{suffix}')
zip_file.close()
response = HttpResponse(buffer.getvalue())
response['Content-Type'] = 'application/x-zip-compressed'
response['Content-Disposition'] = 'attachment; filename=assets.zip'
return response |
class INFO(Page):
def __init__(self, stdscr, jetson):
super(INFO, self).__init__('INFO', stdscr, jetson)
self._hide_serial_number = None
hardware = self.jetson.board['hardware']
if ('Serial Number' in hardware):
if self.jetson.board['hardware']['Serial Number']:
serial_number = hardware['Serial Number']
self._hide_serial_number = HideButton(stdscr, 's', serial_number)
def draw(self, key, mouse):
(_, width, first) = self.size_page()
start_pos = (first + 3)
self.stdscr.move(0, 0)
self.stdscr.clrtoeol()
string_author = 'jtop {version} - {copyright} [{email}]'.format(version=get_var(VERSION_RE), copyright=get_var(COPYRIGHT_RE), email=get_var(EMAIL_RE))
self.stdscr.addstr(first, 0, string_author, curses.A_BOLD)
self.stdscr.addstr((first + 1), 0, 'Website: curses.A_BOLD)
(platform_size_y, platform_size_x) = plot_dictionary(self.stdscr, start_pos, 1, 'Platform', self.jetson.board['platform'])
(libraries_size_y, libraries_size_x) = plot_libraries(self.stdscr, ((start_pos + platform_size_y) + 1), 1, self.jetson.board['libraries'])
size_hardware_x = ((width - platform_size_x) - 2)
offset_y_sn = 0
if ('Serial Number' in self.jetson.board['hardware']):
if self.jetson.board['hardware']['Serial Number']:
try:
self.stdscr.addstr(start_pos, ((1 + platform_size_x) + 1), 'Serial Number:', curses.A_BOLD)
except curses.error:
pass
self._hide_serial_number.update(start_pos, ((1 + platform_size_x) + 16), key=key, mouse=mouse)
offset_y_sn = 1
(hardware_size_y, hardware_size_x) = plot_hardware(self.stdscr, (start_pos + offset_y_sn), ((1 + platform_size_x) + 1), self.jetson.board['hardware'], size_hardware_x)
hardware_size_y += offset_y_sn
interfaces = self.jetson.local_interfaces['interfaces']
hostname = self.jetson.local_interfaces['hostname']
max_size_x = max(platform_size_x, libraries_size_x)
plot_name_info(self.stdscr, ((start_pos + hardware_size_y) + 1), (2 + max_size_x), 'Hostname', hostname)
(interfaces_size_y, interfaces_size_x) = plot_dictionary(self.stdscr, ((start_pos + hardware_size_y) + 2), (2 + max_size_x), 'Interfaces', interfaces) |
def setUpModule():
global mol, mol1
mol = gto.Mole()
mol.atom = '\nO 0 0 0\nH 0. -0.757 0.587\nH 0. 0.757 0.587'
mol.spin = None
mol.basis = 'sto3g'
mol.verbose = 7
mol.output = '/dev/null'
mol.build()
mol1 = gto.M(verbose=0, atom='\nO 0 0 0\nH 0. -0.757 0.587\nH 0. 0.757 0.587', charge=1, spin=1, basis='sto3g') |
class EagerModel(Model):
def __init__(self, assignment, environment=None):
if (environment is None):
environment = get_env()
Model.__init__(self, environment)
self.environment = environment
self.assignment = dict(assignment)
self.completed_assignment = dict(self.assignment)
def get_value(self, formula, model_completion=True):
if model_completion:
syms = formula.get_free_variables()
self._complete_model(syms)
r = formula.substitute(self.completed_assignment)
else:
r = formula.substitute(self.assignment)
res = r.simplify()
if (not res.is_constant()):
raise PysmtTypeError(('Was expecting a constant but got %s' % res))
return res
def _complete_model(self, symbols):
undefined_symbols = (s for s in symbols if (s not in self.completed_assignment))
mgr = self.environment.formula_manager
for s in undefined_symbols:
if (not s.is_symbol()):
raise PysmtTypeError(('Was expecting a symbol but got %s' % s))
if s.symbol_type().is_bool_type():
value = mgr.Bool(False)
elif s.symbol_type().is_real_type():
value = mgr.Real(0)
elif s.symbol_type().is_int_type():
value = mgr.Int(0)
elif s.symbol_type().is_bv_type():
value = mgr.BVZero(s.bv_width())
else:
raise PysmtTypeError(('Unhandled type for %s: %s' % (s, s.symbol_type())))
self.completed_assignment[s] = value
def iterator_over(self, language):
for x in language:
(yield (x, self.get_value(x, model_completion=True)))
def __iter__(self):
return iter(self.assignment.items())
def __contains__(self, x):
return (x in self.assignment) |
def get_current_balanceproof(end_state: NettingChannelEndState) -> BalanceProofData:
balance_proof = end_state.balance_proof
if balance_proof:
locksroot = balance_proof.locksroot
nonce = end_state.nonce
transferred_amount = balance_proof.transferred_amount
locked_amount = get_amount_locked(end_state)
else:
locksroot = Locksroot(LOCKSROOT_OF_NO_LOCKS)
nonce = Nonce(0)
transferred_amount = TokenAmount(0)
locked_amount = LockedAmount(0)
return (locksroot, nonce, transferred_amount, locked_amount) |
.parametrize('inverter_model', ['sandia', 'adr', 'pvwatts', 'sandia_multi', 'pvwatts_multi'])
def test_ac_models(sapm_dc_snl_ac_system, cec_dc_adr_ac_system, pvwatts_dc_pvwatts_ac_system, cec_dc_snl_ac_arrays, pvwatts_dc_pvwatts_ac_system_arrays, location, inverter_model, weather, mocker):
ac_systems = {'sandia': sapm_dc_snl_ac_system, 'sandia_multi': cec_dc_snl_ac_arrays, 'adr': cec_dc_adr_ac_system, 'pvwatts': pvwatts_dc_pvwatts_ac_system, 'pvwatts_multi': pvwatts_dc_pvwatts_ac_system_arrays}
inverter_to_ac_model = {'sandia': 'sandia', 'sandia_multi': 'sandia', 'adr': 'adr', 'pvwatts': 'pvwatts', 'pvwatts_multi': 'pvwatts'}
ac_model = inverter_to_ac_model[inverter_model]
system = ac_systems[inverter_model]
mc_inferred = ModelChain(system, location, aoi_model='no_loss', spectral_model='no_loss')
mc = ModelChain(system, location, ac_model=ac_model, aoi_model='no_loss', spectral_model='no_loss')
assert (mc_inferred.ac_model.__name__ == mc.ac_model.__name__)
m = mocker.spy(inverter, inverter_model)
mc.run_model(weather)
assert (m.call_count == 1)
assert isinstance(mc.results.ac, pd.Series)
assert (not mc.results.ac.empty)
assert (mc.results.ac.iloc[1] < 1) |
(scope='session')
.parametrize('script', scripts)
def test_script_execution(script):
'Run all examples in `radis/examples <
import matplotlib.pyplot as plt
plt.ion()
runpy.run_path(script, init_globals=locals())
plt.close('all')
if exists((Path.cwd() / 'SpecDatabase_Test')):
shutil.rmtree((Path.cwd() / 'SpecDatabase_Test')) |
def test_initial_state_coords_passed(tmpdir):
with tmpdir.as_cwd():
mol = Ligand.from_file(get_data('ethane.sdf'))
bond = mol.find_rotatable_bonds()[0]
dihedral = mol.dihedrals[bond.indices][0]
tdriver = TorsionDriver()
dihedral_data = TorsionScan(torsion=dihedral, scan_range=((- 165), 180))
coords = [np.random.random(size=(mol.n_atoms, 3)) for _ in range(4)]
td_state = tdriver._create_initial_state(molecule=mol, dihedral_data=dihedral_data, qc_spec=QCOptions(), seed_coordinates=coords)
assert (len(td_state['init_coords']) == 4)
for i in range(4):
assert np.allclose((coords[i] * constants.ANGS_TO_BOHR), td_state['init_coords'][i]) |
def launch(fn, n_gpu_per_machine, n_machine=1, machine_rank=0, dist_url=None, args=()):
world_size = (n_machine * n_gpu_per_machine)
if (world_size > 1):
if (dist_url == 'auto'):
if (n_machine != 1):
raise ValueError('dist_url="auto" not supported in multi-machine jobs')
port = find_free_port()
dist_url = f'tcp://127.0.0.1:{port}'
print('dist_url ', dist_url)
print('n_machine ', n_machine)
print('args ', args)
print('world_size ', world_size)
print('machine_rank ', machine_rank)
if ((n_machine > 1) and dist_url.startswith('file://')):
raise ValueError('file:// is not a reliable init method in multi-machine jobs. Prefer tcp://')
mp.spawn(distributed_worker, nprocs=n_gpu_per_machine, args=(fn, world_size, n_gpu_per_machine, machine_rank, dist_url, args), daemon=False)
else:
local_rank = 0
fn(local_rank, *args) |
def test_nested_struct(do_test):
class inner_struct():
foo: Bits32
class struct():
bar: Bits32
inner: inner_struct
class A(Component):
def construct(s):
s.in_ = [InPort(struct) for _ in range(2)]
a = A()
inner = rdt.Struct(inner_struct, {'foo': rdt.Vector(32)})
st = rdt.Struct(struct, {'bar': rdt.Vector(32), 'inner': inner})
a._ref_ports = [(['clk'], 'clk', rt.Port('input', rdt.Vector(1)), 0), (['in_'], 'in_', rt.Array([2], rt.Port('input', st)), 0), (['reset'], 'reset', rt.Port('input', rdt.Vector(1)), 0)]
a._ref_ports_yosys = [(['clk'], 'clk', rt.Port('input', rdt.Vector(1)), 0), (['in_[0].bar'], 'in___0__bar', rt.Port('input', rdt.Vector(32)), 0), (['in_[0].inner.foo'], 'in___0__inner__foo', rt.Port('input', rdt.Vector(32)), 0), (['in_[1].bar'], 'in___1__bar', rt.Port('input', rdt.Vector(32)), 0), (['in_[1].inner.foo'], 'in___1__inner__foo', rt.Port('input', rdt.Vector(32)), 0), (['reset'], 'reset', rt.Port('input', rdt.Vector(1)), 0)]
do_test(a) |
def test_variational_model():
from bioptim.examples.discrete_mechanics_and_optimal_control import example_variational_integrator_pendulum as ocp_module
bioptim_folder = os.path.dirname(ocp_module.__file__)
biorbd_model_path = (bioptim_folder + '/models/pendulum.bioMod')
model = VariationalBiorbdModel(biorbd_model_path)
q = MX([3.0, 4.0])
qdot = MX([1.0, 2.0])
TestUtils.assert_equal(model.lagrangian(q, qdot), (- 4.))
time_step = MX(0.5)
TestUtils.assert_equal(model.discrete_lagrangian(q, qdot, time_step), 1.1629533)
model_mid_point = VariationalBiorbdModel(biorbd_model_path, discrete_approximation=QuadratureRule.MIDPOINT)
TestUtils.assert_equal(model_mid_point.discrete_lagrangian(q, qdot, time_step), (- 4.))
model_right = VariationalBiorbdModel(biorbd_model_path, discrete_approximation=QuadratureRule.RECTANGLE_RIGHT)
TestUtils.assert_equal(model_right.discrete_lagrangian(q, qdot, time_step), 1.)
model_left = VariationalBiorbdModel(biorbd_model_path, discrete_approximation=QuadratureRule.RECTANGLE_LEFT)
TestUtils.assert_equal(model_left.discrete_lagrangian(q, qdot, time_step), 0.)
control0 = MX([10.0, 5.0])
control1 = MX([3.0, 9.0])
TestUtils.assert_equal(model.control_approximation(control0, control1, time_step), [2.5, 1.25])
model_linear = VariationalBiorbdModel(biorbd_model_path, control_type=ControlType.LINEAR_CONTINUOUS)
TestUtils.assert_equal(model_linear.control_approximation(control0, control1, time_step), [1.625, 1.75])
model_linear_left = VariationalBiorbdModel(biorbd_model_path, control_type=ControlType.LINEAR_CONTINUOUS, control_discrete_approximation=QuadratureRule.RECTANGLE_LEFT)
TestUtils.assert_equal(model_linear_left.control_approximation(control0, control1, time_step), [2.5, 1.25])
model_linear_right = VariationalBiorbdModel(biorbd_model_path, control_type=ControlType.LINEAR_CONTINUOUS, control_discrete_approximation=QuadratureRule.RECTANGLE_RIGHT)
TestUtils.assert_equal(model_linear_right.control_approximation(control0, control1, time_step), [0.75, 2.25])
q_prev = MX([1.0, 2.0])
q_cur = MX([3.0, 4.0])
q_next = MX([5.0, 6.0])
control_prev = MX([7.0, 8.0])
control_cur = MX([9.0, 10.0])
control_next = MX([11.0, 12.0])
time_step_sym = MX.sym('time_step', 1, 1)
q_prev_sym = MX.sym('q_prev', model.nb_q, 1)
q_cur_sym = MX.sym('q_cur', model.nb_q, 1)
q_next_sym = MX.sym('q_next', model.nb_q, 1)
control_prev_sym = MX.sym('control_prev', model.nb_tau, 1)
control_cur_sym = MX.sym('control_cur', model.nb_tau, 1)
control_next_sym = MX.sym('control_next', model.nb_tau, 1)
qdot_sym = MX.sym('qdot', model.nb_qdot, 1)
discrete_ele = Function('discrete_euler_lagrange_equations', [time_step_sym, q_prev_sym, q_cur_sym, q_next_sym, control_prev_sym, control_cur_sym, control_next_sym], [model.discrete_euler_lagrange_equations(time_step_sym, q_prev_sym, q_cur_sym, q_next_sym, control_prev_sym, control_cur_sym, control_next_sym)])
TestUtils.assert_equal(discrete_ele(time_step, q_prev, q_cur, q_next, control_prev, control_cur, control_next), [1.2098695, 11.])
compute_initial_states = Function('compute_initial_states', [time_step_sym, q_cur_sym, qdot_sym, q_next_sym, control_cur_sym, control_next_sym], [model.compute_initial_states(time_step_sym, q_cur_sym, qdot_sym, q_next_sym, control_cur_sym, control_next_sym)])
TestUtils.assert_equal(compute_initial_states(time_step, q_cur, qdot, q_next, control_cur, control_next), [(- 2.), 4.])
compute_final_states = Function('compute_final_states', [time_step_sym, q_prev_sym, q_cur_sym, qdot_sym, control_prev_sym, control_cur_sym], [model.compute_final_states(time_step_sym, q_prev_sym, q_cur_sym, qdot_sym, control_prev_sym, control_cur_sym)])
TestUtils.assert_equal(compute_final_states(time_step, q_prev, q_cur, qdot, control_prev, control_cur), [3., 7.])
biorbd_model_path = (bioptim_folder + '/models/pendulum_holonomic.bioMod')
holonomic_model = VariationalBiorbdModel(biorbd_model_path)
(constraints_func, constraints_jacobian_func, constraints_double_derivative_func) = HolonomicConstraintsFcn.superimpose_markers(holonomic_model, marker_1='marker_1', index=slice(2, 3))
holonomic_model._add_holonomic_constraint(constraints_func, constraints_jacobian_func, constraints_double_derivative_func)
q_prev = MX([1.0, 2.0, 3.0])
q_cur = MX([3.0, 4.0, 5.0])
q_next = MX([5.0, 6.0, 7.0])
control_prev = MX([7.0, 8.0, 9.0])
control_cur = MX([9.0, 10.0, 11.0])
control_next = MX([11.0, 12.0, 13.0])
qdot = MX([1.0, 2.0, 3.0])
lambdas = MX([1.0])
q_prev_sym = MX.sym('q_prev', holonomic_model.nb_q, 1)
q_cur_sym = MX.sym('q_cur', holonomic_model.nb_q, 1)
q_next_sym = MX.sym('q_next', holonomic_model.nb_q, 1)
control_prev_sym = MX.sym('control_prev', holonomic_model.nb_tau, 1)
control_cur_sym = MX.sym('control_cur', holonomic_model.nb_tau, 1)
control_next_sym = MX.sym('control_next', holonomic_model.nb_tau, 1)
qdot_sym = MX.sym('qdot', holonomic_model.nb_qdot, 1)
lambdas_sym = MX.sym('lambda', holonomic_model.nb_holonomic_constraints, 1)
holonomic_discrete_constraints_jacobian = Function('holonomic_discrete_constraints_jacobian', [time_step_sym, q_cur_sym], [holonomic_model.discrete_holonomic_constraints_jacobian(time_step, q_cur_sym)])
TestUtils.assert_equal(holonomic_discrete_constraints_jacobian(time_step, q_cur), [0.0, 0.5, 0.0])
discrete_ele = Function('discrete_euler_lagrange_equations', [time_step_sym, q_prev_sym, q_cur_sym, q_next_sym, control_prev_sym, control_cur_sym, control_next_sym, lambdas_sym], [holonomic_model.discrete_euler_lagrange_equations(time_step_sym, q_prev_sym, q_cur_sym, q_next_sym, control_prev_sym, control_cur_sym, control_next_sym, lambdas_sym)])
TestUtils.assert_equal(discrete_ele(time_step, q_prev, q_cur, q_next, control_prev, control_cur, control_next, lambdas), [0.7429345, (- 2.), 14., 6.0])
compute_initial_states = Function('compute_initial_states', [time_step_sym, q_cur_sym, qdot_sym, q_next_sym, control_cur_sym, control_next_sym, lambdas_sym], [holonomic_model.compute_initial_states(time_step_sym, q_cur_sym, qdot_sym, q_next_sym, control_cur_sym, control_next_sym, lambdas_sym)])
TestUtils.assert_equal(compute_initial_states(time_step, q_cur, qdot, q_next, control_cur, control_next, lambdas), [(- 1.), (- 4.), 5., 6.0, 4.0, 2.0])
compute_final_states = Function('compute_final_states', [time_step_sym, q_prev_sym, q_cur_sym, qdot_sym, control_prev_sym, control_cur_sym, lambdas_sym], [holonomic_model.compute_final_states(time_step_sym, q_prev_sym, q_cur_sym, qdot_sym, control_prev_sym, control_cur_sym, lambdas_sym)])
TestUtils.assert_equal(compute_final_states(time_step, q_prev, q_cur, qdot, control_prev, control_cur, lambdas), [2., 2., 8., 2.0]) |
(scope='module')
def sample_data_bundle(analysis_element_1, analysis_element_2):
parent_function_1 = analysis_element_1['parent']
parent_function_2 = analysis_element_2['parent']
report_dict = {parent_function_1: {'The Crime'}, parent_function_2: {'Another Crime'}}
reference_dict = {parent_function_1: set(), parent_function_2: {parent_function_1}}
return (report_dict, reference_dict) |
class FilterMenu():
MENU = "\n <menu action='Filters'>\n <menuitem action='FilterGenre' always-show-image='true'/>\n <menuitem action='FilterArtist' always-show-image='true'/>\n <menuitem action='FilterAlbum' always-show-image='true'/>\n <separator/>\n <menuitem action='RandomGenre' always-show-image='true'/>\n <menuitem action='RandomArtist' always-show-image='true'/>\n <menuitem action='RandomAlbum' always-show-image='true'/>\n <separator/>\n <menuitem action='All' always-show-image='true'/>\n <menuitem action='PlayedRecently' always-show-image='true'/>\n <menuitem action='AddedRecently' always-show-image='true'/>\n <menuitem action='TopRated' always-show-image='true'/>\n </menu>"
__OUTER_MENU = ("\n <ui>\n <menubar name='Menu'>\n %s\n </menubar>\n </ui>" % MENU)
def __init__(self, library, player, ui=None):
self._browser = None
self._library = library
self._player = player
self._standalone = (not ui)
ag = Gtk.ActionGroup.new('QuodLibetFilterActions')
for (name, icon_name, label, cb) in [('Filters', '', _('_Filters'), None), ('PlayedRecently', Icons.EDIT_FIND, _('Recently _Played'), self.__filter_menu_actions), ('AddedRecently', Icons.EDIT_FIND, _('Recently _Added'), self.__filter_menu_actions), ('TopRated', Icons.EDIT_FIND, _('_Top 40'), self.__filter_menu_actions), ('All', Icons.EDIT_FIND, _('All _Songs'), self.__filter_menu_actions)]:
action = Action(name=name, icon_name=icon_name, label=label)
if cb:
action.connect('activate', cb)
ag.add_action(action)
for (tag_, lab) in [('genre', _('On Current _Genre(s)')), ('artist', _('On Current _Artist(s)')), ('album', _('On Current Al_bum'))]:
act = Action(name=('Filter%s' % util.capitalize(tag_)), label=lab, icon_name=Icons.EDIT_SELECT_ALL)
act.connect('activate', self.__filter_on, tag_, None, player)
ag.add_action(act)
for (tag_, accel, label) in [('genre', 'G', _('Random _Genre')), ('artist', 'T', _('Random _Artist')), ('album', 'M', _('Random Al_bum'))]:
act = Action(name=('Random%s' % util.capitalize(tag_)), label=label, icon_name=Icons.DIALOG_QUESTION)
act.connect('activate', self.__random, tag_)
ag.add_action_with_accel(act, ('<Primary>' + accel))
if self._standalone:
ui = Gtk.UIManager()
ui.add_ui_from_string(self.__OUTER_MENU)
ui.insert_action_group(ag, (- 1))
self._ui = ui
self._get_child_widget('TopRated').set_tooltip_text(_("The 40 songs you've played most (more than 40 may be chosen if there are ties)"))
menu_item = self._get_child_widget('/Menu/Filters')
if isinstance(menu_item, Gtk.ImageMenuItem):
menu_item.set_image(None)
self._player_id = player.connect('song-started', self._on_song_started)
self.set_song(player.song)
self._hide_menus()
def destroy(self):
if self._player:
self._player.disconnect(self._player_id)
self._player = None
self._browser = None
self._library = None
def _on_song_started(self, player, song):
self.set_song(song)
def __random(self, item, key):
self._browser.filter_random(key)
def __filter_on(self, action, header, songs, player):
if (songs is None):
if player.song:
songs = [player.song]
else:
return
self._browser.filter_on(songs, header)
def __filter_menu_actions(self, menuitem):
name = menuitem.get_name()
if (name == 'PlayedRecently'):
self._make_query('#(lastplayed < 7 days ago)')
elif (name == 'AddedRecently'):
self._make_query('#(added < 7 days ago)')
elif (name == 'TopRated'):
bg = background_filter()
songs = ((bg and filter(bg, self._library)) or self._library)
songs = [song.get('~#playcount', 0) for song in songs]
if (len(songs) == 0):
return
songs.sort()
if (len(songs) < 40):
self._make_query(f'#(playcount > {(songs[0] - 1):d})')
else:
self._make_query(f'#(playcount > {(songs[(- 40)] - 1):d})')
elif (name == 'All'):
self._browser.unfilter()
def _make_query(self, query):
assert isinstance(query, str)
if self._browser.can_filter_text():
self._browser.filter_text(query)
self._browser.activate()
def _hide_menus(self):
menus = {'genre': ['FilterGenre', 'RandomGenre'], 'artist': ['FilterArtist', 'RandomArtist'], 'album': ['FilterAlbum', 'RandomAlbum'], None: ['PlayedRecently', 'AddedRecently', 'TopRated', 'All']}
for (key, widget_names) in menus.items():
if self._browser:
can_filter = self._browser.can_filter(key)
else:
can_filter = False
for name in widget_names:
self._get_child_widget(name).set_property('visible', can_filter)
def set_browser(self, browser):
self._browser = browser
self._hide_menus()
def set_song(self, song):
for wid in ['FilterAlbum', 'FilterArtist', 'FilterGenre']:
self._get_child_widget(wid).set_sensitive(bool(song))
if song:
for h in ['genre', 'artist', 'album']:
widget = self._get_child_widget(('Filter%s' % h.capitalize()))
widget.set_sensitive((h in song))
def _get_child_widget(self, name=None):
path = ('/Menu%s/Filters' % ('' if self._standalone else '/Browse'))
if name:
path += ('/' + name)
return self._ui.get_widget(path)
def get_widget(self):
path = ('/Menu' if self._standalone else '/Menu/Browse')
return self._ui.get_widget(path)
def get_accel_group(self):
return self._ui.get_accel_group() |
def train(args, model, meta_optimizer, dataloader):
model.train()
for (batch_idx, batch) in enumerate(dataloader):
model.zero_grad()
batch['train'][0] = batch['train'][0].view(args.batch_size, (- 1), 6, 36, 36)
batch['test'][0] = batch['test'][0].view(args.batch_size, (- 1), 6, 36, 36)
batch['train'][1] = batch['train'][1].view(args.batch_size, (- 1), 1)
batch['test'][1] = batch['test'][1].view(args.batch_size, (- 1), 1)
(train_inputs, train_targets) = batch['train']
train_inputs = train_inputs.to(device=args.device)
train_targets = train_targets.to(device=args.device)
(test_inputs, test_targets) = batch['test']
test_inputs = test_inputs.to(device=args.device)
test_targets = test_targets.to(device=args.device)
inner_optimiser = torch.optim.SGD(model.parameters(), lr=args.inner_step_size)
for (task_idx, (train_input, train_target, test_input, test_target)) in enumerate(zip(train_inputs, train_targets, test_inputs, test_targets)):
with higher.innerloop_ctx(model, inner_optimiser, copy_initial_weights=False) as (fmodel, diffopt):
for step in range(args.num_adapt_steps):
train_logit = fmodel(train_input)
inner_loss = F.mse_loss(train_logit, train_target)
diffopt.step(inner_loss)
test_logit = fmodel(test_input)
outer_loss = F.mse_loss(test_logit, test_target)
outer_loss.backward()
meta_optimizer.step() |
def performer_set(id3, key, value):
wanted_role = key.split(':', 1)[1]
try:
mcl = id3['TMCL']
except KeyError:
mcl = mutagen.id3.TMCL(encoding=3, people=[])
id3.add(mcl)
mcl.encoding = 3
people = [p for p in mcl.people if (p[0] != wanted_role)]
for v in value:
people.append((wanted_role, v))
mcl.people = people |
def read_lab(tsv_path, lab_path, pad_len=0, upsample=1):
with open(tsv_path) as f:
f.readline()
uids = [op.splitext(op.basename(line.rstrip().split()[0]))[0] for line in f]
with open(lab_path) as f:
labs_list = [pad(line.rstrip().split(), pad_len).repeat(upsample) for line in f]
assert (len(uids) == len(labs_list))
return dict(zip(uids, labs_list)) |
class LidGroups(object):
def __init__(self, model):
if (not model._model.fileLoaded):
raise PYSWMMException('SWMM Model Not Open')
self._model = model._model
self._cuindex = 0
self._nLidGroups = self._model.getProjectSize(ObjectType.SUBCATCH.value)
def __len__(self):
return self._model.getProjectSize(ObjectType.SUBCATCH.value)
def __contains__(self, subcatchmentid):
return self._model.ObjectIDexist(ObjectType.SUBCATCH.value, subcatchmentid)
def __getitem__(self, subcatchmentid):
if self.__contains__(subcatchmentid):
return LidGroup(self._model, subcatchmentid)
else:
raise PYSWMMException('Subcatchment ID Does not Exist')
def __iter__(self):
return self
def __next__(self):
if (self._cuindex < self._nLidGroups):
lidgroupobject = self.__getitem__(self._subcatchmentid)
self._cuindex += 1
return lidgroupobject
else:
raise StopIteration()
def _subcatchmentid(self):
return self._model.getObjectId(ObjectType.SUBCATCH.value, self._cuindex) |
def serial_options(options):
values = {}
values['port'] = options.get('port', (config.get('serial_driver', 'port') or '/dev/ttyS0'))
values['baudrate'] = options.get('baudrate', config.getint('serial_driver', 'baudrate'))
values['bytesize'] = options.get('bytesize', config.getint('serial_driver', 'bytesize'))
values['parity'] = options.get('parity', config.get('serial_driver', 'parity'))
values['stopbits'] = options.get('stopbits', config.getint('serial_driver', 'stopbits'))
values['rtscts'] = options.get('rtscts', config.getboolean('serial_driver', 'rtscts'))
values['xonxoff'] = options.get('xonxoff', config.getboolean('serial_driver', 'xonxoff'))
values['timeout'] = options.get('timeout', config.getint('serial_driver', 'timeout'))
values['eol_cr'] = options.get('eol_cr', config.getboolean('serial_driver', 'eol_cr'))
values['eol_lf'] = options.get('eol_lf', config.getboolean('serial_driver', 'eol_lf'))
data = options.get('data', '')
if values['eol_cr']:
data += serial.CR
if values['eol_lf']:
data += serial.LF
return (values, data) |
class BlankPage(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent, size=(0, 0))
self.mainFrame = gui.mainFrame.MainFrame.getInstance()
self.parent = parent
self.parent.Bind(EVT_NOTEBOOK_PAGE_CHANGED, self.pageChanged)
self.SetBackgroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_WINDOW))
wx.PostEvent(self.mainFrame, GE.FitChanged(fitIDs=()))
def Destroy(self):
wx.Panel.Destroy(self)
def pageChanged(self, event):
if self.parent.IsActive(self):
fitID = None
wx.PostEvent(self.mainFrame, GE.FitChanged(fitIDs=(fitID,)))
event.Skip() |
def evaluation(file_name):
gt_path = os.path.join(args.gt_path, file_name)
pre_path = os.path.join(args.pre_path, file_name)
assert os.path.exists(gt_path)
assert os.path.exists(pre_path)
gt_points = np.loadtxt(gt_path)
pre_points = np.loadtxt(pre_path)
(gt2pre, _) = NearestNeighbors(n_neighbors=1, algorithm='auto').fit(pre_points).kneighbors(gt_points)
(pre2gt, _) = NearestNeighbors(n_neighbors=1, algorithm='auto').fit(gt_points).kneighbors(pre_points)
return (np.squeeze(gt2pre), np.squeeze(pre2gt), emd_samples(gt_points, pre_points)) |
def zero_shot_classifier(model, classnames, templates, args):
with torch.no_grad():
zeroshot_weights = []
for classname in tqdm(classnames):
texts = [template(classname) for template in templates]
texts = tokenize(texts).to(args.device)
if (args.distributed and (not args.horovod)):
class_embeddings = model.module.encode_text(texts)
else:
class_embeddings = model.encode_text(texts)
class_embedding = F.normalize(class_embeddings, dim=(- 1)).mean(dim=0)
class_embedding /= class_embedding.norm()
zeroshot_weights.append(class_embedding)
zeroshot_weights = torch.stack(zeroshot_weights, dim=1).to(args.device)
return zeroshot_weights |
def get_dataset(data_cfg):
if (data_cfg['type'] == 'RepeatDataset'):
return RepeatDataset(get_dataset(data_cfg['dataset']), data_cfg['times'])
if isinstance(data_cfg['ann_file'], (list, tuple)):
ann_files = data_cfg['ann_file']
num_dset = len(ann_files)
else:
ann_files = [data_cfg['ann_file']]
num_dset = 1
if ('proposal_file' in data_cfg.keys()):
if isinstance(data_cfg['proposal_file'], (list, tuple)):
proposal_files = data_cfg['proposal_file']
else:
proposal_files = [data_cfg['proposal_file']]
else:
proposal_files = ([None] * num_dset)
assert (len(proposal_files) == num_dset)
if isinstance(data_cfg['img_prefix'], (list, tuple)):
img_prefixes = data_cfg['img_prefix']
else:
img_prefixes = ([data_cfg['img_prefix']] * num_dset)
assert (len(img_prefixes) == num_dset)
dsets = []
for i in range(num_dset):
data_info = copy.deepcopy(data_cfg)
data_info['ann_file'] = ann_files[i]
data_info['proposal_file'] = proposal_files[i]
data_info['img_prefix'] = img_prefixes[i]
dset = obj_from_dict(data_info, datasets)
dsets.append(dset)
if (len(dsets) > 1):
dset = ConcatDataset(dsets)
else:
dset = dsets[0]
return dset |
def generate_pk_hash_column(tables: List[pa.Table], primary_keys: Optional[List[str]]=None, requires_sha1: bool=False) -> List[pa.Table]:
def _generate_pk_hash(table: pa.Table) -> pa.Array:
pk_columns = []
for pk_name in primary_keys:
pk_columns.append(sliced_string_cast(table[pk_name]))
pk_columns.append(PK_DELIMITER)
hash_column = pc.binary_join_element_wise(*pk_columns)
return hash_column
def _generate_uuid(table: pa.Table) -> pa.Array:
hash_column = pa.array([uuid.uuid4().hex for _ in range(len(table))], pa.string())
return hash_column
start = time.monotonic()
hash_column_list = []
can_sha1 = False
if primary_keys:
hash_column_list = [_generate_pk_hash(table) for table in tables]
can_sha1 = (requires_sha1 or _is_sha1_desired(hash_column_list))
else:
hash_column_list = [_generate_uuid(table) for table in tables]
logger.info(f'can_generate_sha1={can_sha1} for the table and requires_sha1={requires_sha1}')
result = []
total_len = 0
total_size = 0
for (index, table) in enumerate(tables):
if can_sha1:
table = _append_sha1_hash_to_table(table, hash_column_list[index])
else:
table = table.append_column(sc._PK_HASH_STRING_COLUMN_FIELD, hash_column_list[index])
total_len += len(table)
total_size += hash_column_list[index].nbytes
result.append(table)
end = time.monotonic()
logger.info(f'Took {(end - start)}s to generate pk hash of len: {total_len} for size: {total_size} bytes')
return result |
class SingleFileDirectoryFormatType(FormatType):
def __init__(self, view_type):
self._wrapped_view_type = view_type.file.format
super().__init__(view_type)
def _get_transformer_to(self, other):
(transformer, record) = self._wrap_transformer(self, other)
if (transformer is not None):
return (transformer, record)
(transformer, record) = self._wrap_transformer(self, other, wrap_input=True)
if (transformer is not None):
return (transformer, record)
if (type(other) is type(self)):
(transformer, record) = self._wrap_transformer(self, other, wrap_input=True, wrap_output=True)
if (transformer is not None):
return (transformer, record)
return other._get_transformer_from(self)
def _get_transformer_from(self, other):
return self._wrap_transformer(other, self, wrap_output=True)
def _wrap_transformer(self, in_, out_, wrap_input=False, wrap_output=False):
input = (in_._wrapped_view_type if wrap_input else in_._view_type)
output = (out_._wrapped_view_type if wrap_output else out_._view_type)
(transformer, record) = self._lookup_transformer(input, output)
if (transformer is None):
return (None, None)
if wrap_input:
transformer = in_._wrap_input(transformer)
if wrap_output:
transformer = out_._wrap_output(transformer)
return (transformer, record)
def _wrap_input(self, transformer):
def wrapped(view):
return transformer(view.file.view(self._wrapped_view_type))
return wrapped
def _wrap_output(self, transformer):
def wrapped(view):
new_view = self._view_type()
file_view = transformer(view)
if (transformer is not identity_transformer):
self.set_user_owned(file_view, False)
new_view.file.write_data(file_view, self._wrapped_view_type)
return new_view
return wrapped |
def _to_utc_date_string(ts):
if isinstance(ts, datetime):
try:
ts = ts.astimezone(utc_tz)
except:
import tzlocal
ts = ts.replace(tzinfo=tzlocal.get_localzone())
mindate = datetime.min.replace(tzinfo=utc_tz)
maxdate = datetime.max.replace(tzinfo=utc_tz)
if ((mindate + ts.tzinfo.utcoffset(ts)) > ts):
logging.error('Cannot coerce datetime %s to UTC. Changed to min-date.', ts)
ts = mindate
elif (ts > (maxdate - ts.tzinfo.utcoffset(ts))):
logging.error('Cannot coerce datetime %s to UTC. Changed to max-date.', ts)
ts = maxdate
else:
ts = ts.astimezone(utc_tz)
return ts.strftime('%Y%m%dT%H%M%SZ') |
class CocoaScreenMode(ScreenMode):
def __init__(self, screen, cgmode):
super(CocoaScreenMode, self).__init__(screen)
quartz.CGDisplayModeRetain(cgmode)
self.cgmode = cgmode
self.width = int(quartz.CGDisplayModeGetWidth(cgmode))
self.height = int(quartz.CGDisplayModeGetHeight(cgmode))
self.depth = self.getBitsPerPixel(cgmode)
self.rate = quartz.CGDisplayModeGetRefreshRate(cgmode)
def __del__(self):
quartz.CGDisplayModeRelease(self.cgmode)
self.cgmode = None
def getBitsPerPixel(self, cgmode):
IO8BitIndexedPixels = 'PPPPPPPP'
IO16BitDirectPixels = '-RRRRRGGGGGBBBBB'
IO32BitDirectPixels = 'RRRRRRRRGGGGGGGGBBBBBBBB'
cfstring = c_void_p(quartz.CGDisplayModeCopyPixelEncoding(cgmode))
pixelEncoding = cfstring_to_string(cfstring)
cf.CFRelease(cfstring)
if (pixelEncoding == IO8BitIndexedPixels):
return 8
if (pixelEncoding == IO16BitDirectPixels):
return 16
if (pixelEncoding == IO32BitDirectPixels):
return 32
return 0 |
def pytest_ignore_collect(path, config):
has_ro = config.getoption('--ro-functional')
has_rw = config.getoption('--rw-functional')
base = os.path.basename(str(path))
is_ro = (base == 'test_ro_functional.py')
is_rw = (base == 'test_rw_functional.py')
if (is_ro and (not has_ro)):
return True
if (is_rw and (not has_rw)):
return True |
class IndexOptions(object):
__slots__ = ('min_variable_heavies', 'max_variable_heavies', 'min_variable_ratio', 'max_variable_ratio', 'min_radius', 'max_radius', 'symmetric', 'max_heavies_transf', 'max_frac_trans', 'smallest_transformation_only')
def __init__(self, min_variable_heavies=None, max_variable_heavies=None, min_variable_ratio=None, max_variable_ratio=None, max_heavies_transf=None, max_frac_trans=None, min_radius=0, max_radius=5, symmetric=False, smallest_transformation_only=False):
assert ((min_variable_heavies is None) or (min_variable_heavies >= 0)), min_variable_heavies
self.min_variable_heavies = min_variable_heavies
assert ((max_variable_heavies is None) or ((min_variable_heavies is None) and (max_variable_heavies >= 0)) or (min_variable_heavies <= max_variable_heavies)), max_variable_heavies
self.max_variable_heavies = max_variable_heavies
assert ((min_variable_ratio is None) or (0.0 <= min_variable_ratio <= 1.0)), min_variable_ratio
self.min_variable_ratio = min_variable_ratio
assert ((max_variable_ratio is None) or ((min_variable_ratio is None) and (max_variable_ratio <= 1.0)) or (min_variable_ratio <= max_variable_ratio <= 1.0))
self.max_variable_ratio = max_variable_ratio
assert ((max_heavies_transf is None) or (max_heavies_transf >= 0)), max_heavies_transf
self.max_heavies_transf = max_heavies_transf
assert ((max_frac_trans is None) or (max_frac_trans >= 0)), max_heavies_transf
self.max_frac_trans = max_frac_trans
assert (min_radius <= max_radius), (min_radius, max_radius)
assert (min_radius >= 0), min_radius
self.min_radius = min_radius
assert (max_radius >= 0), max_radius
self.max_radius = max_radius
assert isinstance(symmetric, bool)
self.symmetric = symmetric
assert isinstance(smallest_transformation_only, bool)
self.smallest_transformation_only = smallest_transformation_only
def to_dict(self):
d = {}
for name in IndexOptions.__slots__:
value = getattr(self, name)
if (value is not None):
d[name] = value
return d
def get_fragment_filter(self):
from . import index_algorithm
filters = []
if (self.min_variable_heavies is not None):
filters.append(index_algorithm.MinVariableHeaviesFilter(self.min_variable_heavies))
if (self.max_variable_heavies is not None):
filters.append(index_algorithm.MaxVariableHeaviesFilter(self.max_variable_heavies))
if (self.min_variable_ratio is not None):
filters.append(index_algorithm.MinVariableRatioFilter(self.min_variable_ratio))
if (self.max_variable_ratio is not None):
filters.append(index_algorithm.MaxVariableRatioFilter(self.max_variable_ratio))
if (not filters):
return index_algorithm.MultipleFilters([])
elif (len(filters) == 1):
return filters[0]
else:
return index_algorithm.MultipleFilters(filters) |
def generate_data():
data_dir = os.path.expanduser(FLAGS.data_dir)
tmp_dir = os.path.expanduser(FLAGS.tmp_dir)
tf.gfile.MakeDirs(data_dir)
tf.gfile.MakeDirs(tmp_dir)
problem = list(sorted(_SUPPORTED_PROBLEM_GENERATORS))[0]
set_random_seed()
training_gen = _SUPPORTED_PROBLEM_GENERATORS[problem]
tf.logging.info('Generating training data for %s.', problem)
train_output_files = generator_utils.generate_files(training_gen(), ((problem + UNSHUFFLED_SUFFIX) + '-train'), FLAGS.data_dir, FLAGS.num_shards, FLAGS.max_cases)
train_output_files = []
output_dir = FLAGS.data_dir
for shard in xrange(FLAGS.num_shards):
output_filename = ('%s-%.5d-of-%.5d' % ('translation-unshuffled-train', shard, FLAGS.num_shards))
output_file = os.path.join(output_dir, output_filename)
train_output_files.append(output_file)
tf.logging.info('Shuffling data...')
for fname in train_output_files:
records = generator_utils.read_records(fname)
random.shuffle(records)
out_fname = fname.replace(UNSHUFFLED_SUFFIX, '')
generator_utils.write_records(records, out_fname)
tf.gfile.Remove(fname)
tf.logging.info('Data Process Over') |
class TestSparseMultiheadAttention(unittest.TestCase):
def test_sparse_multihead_attention(self):
attn_weights = torch.randn(1, 8, 8)
bidirectional_sparse_mask = torch.tensor([[0, 0, 0, 0, 0, float('-inf'), float('-inf'), 0], [0, 0, 0, 0, 0, float('-inf'), float('-inf'), 0], [0, 0, 0, 0, 0, float('-inf'), float('-inf'), 0], [0, 0, 0, 0, 0, float('-inf'), float('-inf'), 0], [float('-inf'), float('-inf'), float('-inf'), 0, 0, 0, 0, 0], [float('-inf'), float('-inf'), float('-inf'), 0, 0, 0, 0, 0], [float('-inf'), float('-inf'), float('-inf'), 0, 0, 0, 0, 0], [float('-inf'), float('-inf'), float('-inf'), 0, 0, 0, 0, 0]])
bidirectional_attention = SparseMultiheadAttention(16, 1, stride=4, expressivity=1, is_bidirectional=True)
bidirectional_attention_sparse_mask = bidirectional_attention.buffered_sparse_mask(attn_weights, 8, 8)
torch.all(torch.eq(bidirectional_attention_sparse_mask, bidirectional_sparse_mask))
sparse_mask = torch.tensor([[0, float('-inf'), float('-inf'), float('-inf'), float('-inf'), float('-inf'), float('-inf'), float('-inf')], [0, 0, float('-inf'), float('-inf'), float('-inf'), float('-inf'), float('-inf'), float('-inf')], [0, 0, 0, float('-inf'), float('-inf'), float('-inf'), float('-inf'), float('-inf')], [0, 0, 0, 0, float('-inf'), float('-inf'), float('-inf'), float('-inf')], [0, 0, 0, 0, 0, float('-inf'), float('-inf'), float('-inf')], [float('-inf'), float('-inf'), float('-inf'), 0, 0, 0, float('-inf'), float('-inf')], [float('-inf'), float('-inf'), float('-inf'), 0, 0, 0, 0, float('-inf')], [float('-inf'), float('-inf'), float('-inf'), 0, 0, 0, 0, 0]])
attention = SparseMultiheadAttention(16, 1, stride=4, expressivity=1, is_bidirectional=False)
attention_sparse_mask = attention.buffered_sparse_mask(attn_weights, 8, 8)
torch.all(torch.eq(attention_sparse_mask, sparse_mask)) |
class StatisticsRPaths():
def __init__(self, rbdir):
self.rbdir = rbdir
self.session_rps = self.get_sorted_inc_rps('session_statistics')
self.filestat_rps = self.get_sorted_inc_rps('file_statistics')
self.combined_pairs = self.get_combined_pairs()
def get_sorted_inc_rps(self, prefix):
incs = self.rbdir.append(prefix).get_incfiles_list()
if begin_time:
incs = filter((lambda i: (i.getinctime() >= begin_time)), incs)
if end_time:
incs = filter((lambda i: (i.getinctime() <= end_time)), incs)
incs = list(incs)
incs.sort(key=(lambda i: i.getinctime()))
return incs
def get_combined_pairs(self):
session_dict = {}
for inc in self.session_rps:
session_dict[inc.getinctime()] = inc
filestat_dict = {}
for inc in self.filestat_rps:
filestat_dict[inc.getinctime()] = inc
result = []
keylist = list(session_dict)
keylist.sort()
for time in keylist:
if (time in filestat_dict):
result.append((session_dict[time], filestat_dict[time]))
else:
sys.stderr.write("No file_statistics to match '{rp}'".format(rp=session_dict[time]))
return result |
class FollowTopic(Mutation):
class Arguments():
pk = ID()
feedback = String()
_required
def mutate(_root, info, pk):
topic = get_object_or_404(Topic, id=pk)
following = info.context.user.following_topics
if following.filter(pk=pk).exists():
following.remove(topic)
return FollowTopic(feedback=_('you no longer follow this topic'))
following.add(topic)
return FollowTopic(_('you are now following this topic')) |
def _sample_smc_int(draws, kernel, start, model, random_seed, chain, progressbar=None, **kernel_kwargs):
in_out_pickled = (type(model) == bytes)
if in_out_pickled:
(draws, kernel, start, model) = map(cloudpickle.loads, (draws, kernel, start, model))
kernel_kwargs = {key: cloudpickle.loads(value) for (key, value) in kernel_kwargs.items()}
smc = kernel(draws=draws, start=start, model=model, random_seed=random_seed, **kernel_kwargs)
if progressbar:
progressbar.comment = f"{getattr(progressbar, 'base_comment', '')} Stage: 0 Beta: 0"
progressbar.update_bar((getattr(progressbar, 'offset', 0) + 0))
smc._initialize_kernel()
smc.setup_kernel()
stage = 0
sample_stats = defaultdict(list)
while (smc.beta < 1):
smc.update_beta_and_weights()
if progressbar:
progressbar.comment = f"{getattr(progressbar, 'base_comment', '')} Stage: {stage} Beta: {smc.beta:.3f}"
progressbar.update_bar((getattr(progressbar, 'offset', 0) + int((smc.beta * 100))))
smc.resample()
smc.tune()
smc.mutate()
for (stat, value) in smc.sample_stats().items():
sample_stats[stat].append(value)
stage += 1
results = (smc._posterior_to_trace(chain), sample_stats, smc.sample_settings())
if in_out_pickled:
results = cloudpickle.dumps(results)
return results |
class MovieMainPage(KinopoiskPage):
url = '/film/{id}/'
main_persons = {'': 'directors', '': 'screenwriters', '': 'producers', '': 'operators', '': 'composers', '': 'art_direction_by', '': 'editing_by'}
main_profits = {'': 'budget', '': 'marketing', ' ': 'profit_usa', ' ': 'profit_russia', ' ': 'profit_world'}
xpath = {'url': './/meta[="og:url"]/', 'title': './/h1/span/text()', 'title_en': './/span[="styles_originalTitle__19q6I"]/text()', 'plot': './/p[="styles_paragraph__2Otvx"]/text()', 'rating': '(.//span[contains(,"film-rating-value")])[1]/text()', 'votes': '(.//span[="styles_count__3hSWL"])[2]/text()', 'imdb': './/div[contains(," film-sub-rating")]/span[1]/text()[3]', 'imdb2': './/div[contains(," film-sub-rating")]/span[2]/text()'}
regex = {'trailers': re.compile('GetTrailerPreview\\(([^)]+)\\)'), 'imdb': re.compile('^IMDb: ([0-9.]+) \\(([0-9 ]+)\\)$')}
def parse(self):
content_info = BeautifulSoup(self.content, 'html.parser')
table_info = content_info.find('div', {'data-test-id': 'encyclopedic-table'})
if table_info:
self.parse_table_info(table_info)
trailers = self.regex['trailers'].findall(self.content)
if len(trailers):
self.instance.add_trailer(json.loads(trailers[0].replace("'", '"')))
self.parse_actors(content_info)
self.content = html.fromstring(self.content)
self.instance.id = self.prepare_int(self.extract('url').split('/')[(- 2)].split('-')[(- 1)])
self.instance.title = self.extract_title()
self.instance.title_en = self.extract('title_en', to_str=True)
self.instance.plot = self.extract('plot', to_str=True)
self.instance.plot = re.sub('\\s+', ' ', self.instance.plot)
try:
self.instance.rating = self.extract('rating', to_float=True)
except ValueError:
pass
self.instance.votes = self.extract('votes', to_int=True)
self.instance.imdb_rating = self.extract('imdb', to_float=True)
imdb_votes = self.extract('imdb2')
if imdb_votes:
if ('K' in imdb_votes):
imdb_votes = (imdb_votes.replace('K', '') * 100000)
self.instance.imdb_votes = self.prepare_int(imdb_votes)
self.instance.set_source('main_page')
def parse_table_info(self, table_info):
for row in table_info.findChildren('div', recursive=False):
pairs = row.findChildren('div')
name = pairs[0].text
value = pairs[1]
self.set_value(name, value)
def parse_main_profit(self, field_name, value):
setattr(self.instance, field_name, self.find_profit(value))
def parse_actors(self, content_info):
container = content_info.find('div', {'class': re.compile('film-crew')}).find('ul')
if container:
actors = container.parent
if (actors and actors.ul):
self.parse_persons('actors', [li.a for li in actors.ul.findAll('li')])
def parse_persons(self, field_name, links):
from kinopoisk.person import Person
for link in links:
if (isinstance(link, Tag) and (link.text != '...')):
person = Person.get_parsed('short_link', link.decode())
getattr(self.instance, field_name).append(person)
def set_value(self, name, value_tds):
value = value_tds.text
if (value == '-'):
return
name = name.lower()
if (name == ''):
self.instance.tagline = self.prepare_str(value)
elif (name == ''):
if (value != ''):
self.instance.runtime = self.prepare_int(value.split(' ')[0])
elif (name in ['', ' ']):
try:
self.instance.year = self.prepare_int(value.split('(')[0])
except ValueError:
pass
self.instance.series = ('' in value)
elif (name == ''):
for item in value.split(', '):
self.instance.countries.append(self.prepare_str(item))
elif (name == ''):
genres = value.split(', ')
for genre in genres:
self.instance.genres.append(self.prepare_str(genre.replace('', '')))
elif (name in self.main_profits):
self.parse_main_profit(self.main_profits[name], value_tds)
elif (name in self.main_persons):
self.parse_persons(self.main_persons[name], value_tds.contents) |
class _NodeNameCollector(ast.RopeNodeVisitor):
def __init__(self, levels=None):
self.names = []
self.levels = levels
self.index = 0
def _add_node(self, node):
new_levels = []
if (self.levels is not None):
new_levels = list(self.levels)
new_levels.append(self.index)
self.index += 1
self._added(node, new_levels)
def _added(self, node, levels):
if hasattr(node, 'id'):
self.names.append((node.id, levels))
def _Name(self, node):
self._add_node(node)
def _ExceptHandler(self, node):
self.names.append((node.name, []))
def _Tuple(self, node):
new_levels = []
if (self.levels is not None):
new_levels = list(self.levels)
new_levels.append(self.index)
self.index += 1
visitor = _NodeNameCollector(new_levels)
for child in ast.iter_child_nodes(node):
visitor.visit(child)
self.names.extend(visitor.names)
def _Subscript(self, node):
self._add_node(node)
def _Attribute(self, node):
self._add_node(node)
def _Slice(self, node):
self._add_node(node) |
class Solution():
def largestTriangleArea(self, points: List[List[int]]) -> float:
max_ = 0
for i in range(0, (len(points) - 2)):
for j in range(1, (len(points) - 1)):
for k in range(2, len(points)):
a = abs(math.sqrt((((points[i][0] - points[j][0]) ** 2) + ((points[i][1] - points[j][1]) ** 2))))
b = abs(math.sqrt((((points[i][0] - points[k][0]) ** 2) + ((points[i][1] - points[k][1]) ** 2))))
c = abs(math.sqrt((((points[k][0] - points[j][0]) ** 2) + ((points[k][1] - points[j][1]) ** 2))))
s = (((a + b) + c) / 2)
area_tri = round(math.sqrt(abs((((s * (s - a)) * (s - b)) * (s - c)))), 1)
if (area_tri > max_):
max_ = area_tri
return max_ |
class DecoderNet(nn.Module):
def __init__(self, layers):
super(DecoderNet, self).__init__()
self.de_layer5 = self.make_decoder_layer(resDecoderBlock, 512, 512, layers[4], stride=2)
self.de_layer4 = self.make_decoder_layer(resDecoderBlock, 512, 256, layers[3], stride=2)
self.de_layer3 = self.make_decoder_layer(resDecoderBlock, 256, 128, layers[2], stride=2)
self.de_layer2 = self.make_decoder_layer(resDecoderBlock, 128, 64, layers[1], stride=2)
self.de_layer1 = self.make_decoder_layer(plainDecoderBlock, 64, 64, layers[0], stride=1)
self.conv_end = nn.Conv2d(64, 2, kernel_size=3, stride=1, padding=1)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def make_decoder_layer(self, block, inChannel, outChannel, block_num, stride):
layers = []
for i in range(0, (block_num - 1)):
layers.append(block(inChannel, inChannel, stride=1))
layers.append(block(inChannel, outChannel, stride=stride))
return nn.Sequential(*layers)
def forward(self, x):
x = self.de_layer5(x)
x = self.de_layer4(x)
x = self.de_layer3(x)
x = self.de_layer2(x)
x = self.de_layer1(x)
x = self.conv_end(x)
return x |
def get_cls_from_str(cls_str: str, source: object, fork_inst) -> type:
cls = getattr(builtins, cls_str, None)
if cls:
return cls
if (('[' in cls_str) and (']' in cls_str)):
return _get_generic_cls_from_str(cls_str, source, fork_inst)
try:
splitted = cls_str.split('.')
module_name = '.'.join(splitted[:(- 1)])
cls_name = splitted[(- 1)]
cls_module = import_module(module_name)
cls = getattr(cls_module, cls_name)
except (ImportError, AttributeError, ValueError):
cls = _lookup_announced_class(cls_str, source, fork_inst)
return cls |
def find_symbol_tables_recursive(prefix: str, symbols: SymbolTable) -> dict[(str, SymbolTable)]:
result = {}
result[prefix] = symbols
for (name, node) in symbols.items():
if (isinstance(node.node, TypeInfo) and node.node.fullname.startswith((prefix + '.'))):
more = find_symbol_tables_recursive(((prefix + '.') + name), node.node.names)
result.update(more)
return result |
class HadamardMultiplier(nn.Module):
def __init__(self, group, dim, learnable):
super(HadamardMultiplier, self).__init__()
self.group = group
self.dim = dim
H_group = self.constructH(group)
self.H = nn.Parameter(H_group.repeat((dim // group), 1, 1), requires_grad=learnable)
def constructH(self, group):
H = torch.ones(1, 1).cuda()
for i in range(int(math.log2(group))):
H = (torch.cat((torch.cat([H, H], 1), torch.cat([H, (- H)], 1)), 0) / math.sqrt(2))
assert (H.shape[0] == group)
return H
def forward(self, x):
x_shape2 = x.shape
x = x.reshape((- 1), x.shape[(- 1)])
x = x.reshape((- 1), (self.dim // self.group), self.group).transpose(0, 1)
x = torch.bmm(x, self.H).transpose(0, 1)
x = x.reshape(x_shape2)
return x |
def test_timestamp(tmp_path_factory, wheel_path, monkeypatch):
build_dir = tmp_path_factory.mktemp('build')
for filename in ('one', 'two', 'three'):
build_dir.joinpath(filename).write_text((filename + '\n'), encoding='utf-8')
monkeypatch.setenv('SOURCE_DATE_EPOCH', '')
with WheelFile(wheel_path, 'w') as wf:
wf.write_files(str(build_dir))
with ZipFile(wheel_path, 'r') as zf:
for info in zf.infolist():
assert (info.date_time[:3] == (1980, 1, 1))
assert (info.compress_type == ZIP_DEFLATED) |
class AssetFinderTestCase(WithTradingCalendars, ZiplineTestCase):
asset_finder_type = AssetFinder
def write_assets(self, **kwargs):
self._asset_writer.write(**kwargs)
def init_instance_fixtures(self):
super(AssetFinderTestCase, self).init_instance_fixtures()
conn = self.enter_instance_context(empty_assets_db())
self._asset_writer = AssetDBWriter(conn)
self.asset_finder = self.asset_finder_type(conn)
def test_blocked_lookup_symbol_query(self):
as_of = pd.Timestamp('2013-01-01', tz='UTC')
nsids = (SQLITE_MAX_VARIABLE_NUMBER + 10)
sids = range(nsids)
frame = pd.DataFrame.from_records([{'sid': sid, 'symbol': ('TEST.%d' % sid), 'start_date': as_of.value, 'end_date': as_of.value, 'exchange': uuid.uuid4().hex} for sid in sids])
self.write_assets(equities=frame)
assets = self.asset_finder.retrieve_equities(sids)
assert_equal(viewkeys(assets), set(sids))
def test_lookup_symbol_delimited(self):
as_of = pd.Timestamp('2013-01-01', tz='UTC')
frame = pd.DataFrame.from_records([{'sid': i, 'symbol': ('TEST.%d' % i), 'company_name': ('company%d' % i), 'start_date': as_of.value, 'end_date': as_of.value, 'exchange': uuid.uuid4().hex} for i in range(3)])
self.write_assets(equities=frame)
finder = self.asset_finder
(asset_0, asset_1, asset_2) = (finder.retrieve_asset(i) for i in range(3))
for i in range(2):
with self.assertRaises(SymbolNotFound):
finder.lookup_symbol('TEST', as_of)
with self.assertRaises(SymbolNotFound):
finder.lookup_symbol('TEST1', as_of)
with self.assertRaises(SymbolNotFound):
finder.lookup_symbol('', as_of)
for fuzzy_char in ['-', '/', '_', '.']:
self.assertEqual(asset_1, finder.lookup_symbol(('TEST%s1' % fuzzy_char), as_of))
def test_lookup_symbol_fuzzy(self):
metadata = pd.DataFrame.from_records([{'symbol': 'PRTY_HRD', 'exchange': 'TEST'}, {'symbol': 'BRKA', 'exchange': 'TEST'}, {'symbol': 'BRK_A', 'exchange': 'TEST'}])
self.write_assets(equities=metadata)
finder = self.asset_finder
dt = pd.Timestamp('2013-01-01', tz='UTC')
with self.assertRaises(SymbolNotFound):
finder.lookup_symbol('PRTYHRD', None)
with self.assertRaises(SymbolNotFound):
finder.lookup_symbol('PRTYHRD', dt)
self.assertEqual(0, finder.lookup_symbol('PRTYHRD', None, fuzzy=True))
self.assertEqual(0, finder.lookup_symbol('PRTYHRD', dt, fuzzy=True))
self.assertEqual(0, finder.lookup_symbol('PRTY_HRD', None))
self.assertEqual(0, finder.lookup_symbol('PRTY_HRD', dt))
self.assertEqual(0, finder.lookup_symbol('PRTY_HRD', None, fuzzy=True))
self.assertEqual(0, finder.lookup_symbol('PRTY_HRD', dt, fuzzy=True))
self.assertEqual(1, finder.lookup_symbol('BRKA', None))
self.assertEqual(1, finder.lookup_symbol('BRKA', dt))
self.assertEqual(1, finder.lookup_symbol('BRKA', None, fuzzy=True))
self.assertEqual(1, finder.lookup_symbol('BRKA', dt, fuzzy=True))
self.assertEqual(2, finder.lookup_symbol('BRK_A', None))
self.assertEqual(2, finder.lookup_symbol('BRK_A', dt))
self.assertEqual(2, finder.lookup_symbol('BRK_A', None, fuzzy=True))
self.assertEqual(2, finder.lookup_symbol('BRK_A', dt, fuzzy=True))
def test_lookup_symbol_change_ticker(self):
T = partial(pd.Timestamp, tz='utc')
metadata = pd.DataFrame.from_records([{'symbol': 'A', 'asset_name': 'Asset A', 'start_date': T('2014-01-01'), 'end_date': T('2014-01-05'), 'exchange': 'TEST'}, {'symbol': 'B', 'asset_name': 'Asset B', 'start_date': T('2014-01-06'), 'end_date': T('2014-01-10'), 'exchange': 'TEST'}, {'symbol': 'C', 'asset_name': 'Asset C', 'start_date': T('2014-01-01'), 'end_date': T('2014-01-05'), 'exchange': 'TEST'}, {'symbol': 'A', 'asset_name': 'Asset A', 'start_date': T('2014-01-06'), 'end_date': T('2014-01-10'), 'exchange': 'TEST'}], index=[0, 0, 1, 1])
self.write_assets(equities=metadata)
finder = self.asset_finder
with self.assertRaises(SymbolNotFound):
finder.lookup_symbol('A', T('2013-12-31'))
with self.assertRaises(SymbolNotFound):
finder.lookup_symbol('C', T('2013-12-31'))
for asof in pd.date_range('2014-01-01', '2014-01-05', tz='utc'):
A_result = finder.lookup_symbol('A', asof)
assert_equal(A_result, finder.retrieve_asset(0), msg=str(asof))
assert_equal(A_result.symbol, 'B')
assert_equal(A_result.asset_name, 'Asset B')
C_result = finder.lookup_symbol('C', asof)
assert_equal(C_result, finder.retrieve_asset(1), msg=str(asof))
assert_equal(C_result.symbol, 'A')
assert_equal(C_result.asset_name, 'Asset A')
with self.assertRaises(SymbolNotFound):
finder.lookup_symbol('B', T('2014-01-05'))
assert_equal(finder.lookup_symbol('C', T('2014-01-07')), finder.retrieve_asset(1))
for asof in pd.date_range('2014-01-06', '2014-01-11', tz='utc'):
B_result = finder.lookup_symbol('B', asof)
assert_equal(B_result, finder.retrieve_asset(0), msg=str(asof))
assert_equal(B_result.symbol, 'B')
assert_equal(B_result.asset_name, 'Asset B')
A_result = finder.lookup_symbol('A', asof)
assert_equal(A_result, finder.retrieve_asset(1), msg=str(asof))
assert_equal(A_result.symbol, 'A')
assert_equal(A_result.asset_name, 'Asset A')
def test_lookup_symbol(self):
dates = pd.date_range('2013-01-01', freq='2D', periods=5, tz='UTC')
df = pd.DataFrame.from_records([{'sid': i, 'symbol': 'existing', 'start_date': date.value, 'end_date': (date + timedelta(days=1)).value, 'exchange': 'NYSE'} for (i, date) in enumerate(dates)])
self.write_assets(equities=df)
finder = self.asset_finder
for _ in range(2):
with self.assertRaises(SymbolNotFound):
finder.lookup_symbol('NON_EXISTING', dates[0])
with self.assertRaises(MultipleSymbolsFound):
finder.lookup_symbol('EXISTING', None)
for (i, date) in enumerate(dates):
result = finder.lookup_symbol('EXISTING', date)
self.assertEqual(result.symbol, 'EXISTING')
self.assertEqual(result.sid, i)
def test_fail_to_write_overlapping_data(self):
df = pd.DataFrame.from_records([{'sid': 1, 'symbol': 'multiple', 'start_date': pd.Timestamp('2010-01-01'), 'end_date': pd.Timestamp('2012-01-01'), 'exchange': 'NYSE'}, {'sid': 2, 'symbol': 'multiple', 'start_date': pd.Timestamp('2010-01-01'), 'end_date': pd.Timestamp('2013-01-01'), 'exchange': 'NYSE'}, {'sid': 3, 'symbol': 'multiple', 'start_date': pd.Timestamp('2011-01-01'), 'end_date': pd.Timestamp('2012-01-01'), 'exchange': 'NYSE'}])
with self.assertRaises(ValueError) as e:
self.write_assets(equities=df)
expected_error_msg = "Ambiguous ownership for 1 symbol, multiple assets held the following symbols:\nMULTIPLE (??):\n intersections: (('2010-01-01 00:00:00', '2012-01-01 00:00:00'), ('2011-01-01 00:00:00', '2012-01-01 00:00:00'))\n start_date end_date\n sid \n 1 2010-01-01 2012-01-01\n 2 2010-01-01 2013-01-01\n 3 2011-01-01 2012-01-01"
self.assertEqual(str(e.exception), expected_error_msg)
def test_lookup_generic(self):
cases = build_lookup_generic_cases()
self.add_instance_callback(cases.close)
for (finder, inputs, reference_date, country, expected) in cases:
(results, missing) = finder.lookup_generic(inputs, reference_date, country)
self.assertEqual(results, expected)
self.assertEqual(missing, [])
def test_lookup_none_raises(self):
with self.assertRaises(TypeError):
self.asset_finder.lookup_symbol(None, pd.Timestamp('2013-01-01'))
def test_lookup_mult_are_one(self):
date = pd.Timestamp('2013-01-01', tz='UTC')
df = pd.DataFrame.from_records([{'sid': 1, 'symbol': symbol, 'start_date': date.value, 'end_date': (date + timedelta(days=30)).value, 'exchange': 'NYSE'} for symbol in ('FOOB', 'FOO_B')])
self.write_assets(equities=df)
finder = self.asset_finder
result = finder.lookup_symbol('FOO/B', (date + timedelta(1)), fuzzy=True)
self.assertEqual(result.sid, 1)
def test_endless_multiple_resolves(self):
date = pd.Timestamp('2013-01-01', tz='UTC')
df = pd.DataFrame.from_records([{'sid': 1, 'symbol': 'FOOB', 'start_date': date.value, 'end_date': date.max.value, 'exchange': 'NYSE'}, {'sid': 1, 'symbol': 'FOO_B', 'start_date': (date + timedelta(days=31)).value, 'end_date': (date + timedelta(days=60)).value, 'exchange': 'NYSE'}, {'sid': 2, 'symbol': 'FOO_B', 'start_date': (date + timedelta(days=61)).value, 'end_date': date.max.value, 'exchange': 'NYSE'}])
self.write_assets(equities=df)
finder = self.asset_finder
result = finder.lookup_symbol('FOO/B', (date + timedelta(days=90)), fuzzy=True)
self.assertEqual(result.sid, 2)
def test_lookup_generic_handle_missing(self):
data = pd.DataFrame.from_records([{'sid': 0, 'symbol': 'real', 'start_date': pd.Timestamp('2013-1-1', tz='UTC'), 'end_date': pd.Timestamp('2014-1-1', tz='UTC'), 'exchange': 'TEST'}, {'sid': 1, 'symbol': 'also_real', 'start_date': pd.Timestamp('2013-1-1', tz='UTC'), 'end_date': pd.Timestamp('2014-1-1', tz='UTC'), 'exchange': 'TEST'}, {'sid': 2, 'symbol': 'real_but_old', 'start_date': pd.Timestamp('2002-1-1', tz='UTC'), 'end_date': pd.Timestamp('2003-1-1', tz='UTC'), 'exchange': 'TEST'}, {'sid': 3, 'symbol': 'real_but_in_the_future', 'start_date': pd.Timestamp('2014-1-1', tz='UTC'), 'end_date': pd.Timestamp('2020-1-1', tz='UTC'), 'exchange': 'THE FUTURE'}])
self.write_assets(equities=data)
finder = self.asset_finder
(results, missing) = finder.lookup_generic(['REAL', 1, 'FAKE', 'REAL_BUT_OLD', 'REAL_BUT_IN_THE_FUTURE'], pd.Timestamp('2013-02-01', tz='UTC'), country_code=None)
self.assertEqual(len(results), 3)
self.assertEqual(results[0].symbol, 'REAL')
self.assertEqual(results[0].sid, 0)
self.assertEqual(results[1].symbol, 'ALSO_REAL')
self.assertEqual(results[1].sid, 1)
self.assertEqual(results[2].symbol, 'REAL_BUT_OLD')
self.assertEqual(results[2].sid, 2)
self.assertEqual(len(missing), 2)
self.assertEqual(missing[0], 'FAKE')
self.assertEqual(missing[1], 'REAL_BUT_IN_THE_FUTURE')
def test_lookup_generic_multiple_symbols_across_countries(self):
data = pd.DataFrame.from_records([{'sid': 0, 'symbol': 'real', 'start_date': pd.Timestamp('2013-1-1', tz='UTC'), 'end_date': pd.Timestamp('2014-1-1', tz='UTC'), 'exchange': 'US_EXCHANGE'}, {'sid': 1, 'symbol': 'real', 'start_date': pd.Timestamp('2013-1-1', tz='UTC'), 'end_date': pd.Timestamp('2014-1-1', tz='UTC'), 'exchange': 'CA_EXCHANGE'}])
exchanges = pd.DataFrame.from_records([{'exchange': 'US_EXCHANGE', 'country_code': 'US'}, {'exchange': 'CA_EXCHANGE', 'country_code': 'CA'}])
self.write_assets(equities=data, exchanges=exchanges)
with self.assertRaises(SameSymbolUsedAcrossCountries):
self.asset_finder.lookup_generic('real', as_of_date=pd.Timestamp('2014-1-1', tz='UTC'), country_code=None)
with self.assertRaises(SameSymbolUsedAcrossCountries):
self.asset_finder.lookup_generic('real', as_of_date=None, country_code=None)
(matches, missing) = self.asset_finder.lookup_generic('real', as_of_date=pd.Timestamp('2014-1-1', tz='UTC'), country_code='US')
self.assertEqual([matches], [self.asset_finder.retrieve_asset(0)])
self.assertEqual(missing, [])
(matches, missing) = self.asset_finder.lookup_generic('real', as_of_date=pd.Timestamp('2014-1-1', tz='UTC'), country_code='CA')
self.assertEqual([matches], [self.asset_finder.retrieve_asset(1)])
self.assertEqual(missing, [])
def test_security_dates_warning(self):
eq_end = pd.Timestamp('2012-01-01', tz='UTC')
equity_asset = Equity(1, symbol='TESTEQ', end_date=eq_end, exchange_info=ExchangeInfo('TEST', 'TEST', '??'))
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
equity_asset.security_start_date
equity_asset.security_end_date
equity_asset.security_name
self.assertEqual(3, len(w))
for warning in w:
self.assertTrue(issubclass(warning.category, DeprecationWarning))
def test_compute_lifetimes(self):
assets_per_exchange = 4
trading_day = self.trading_calendar.day
first_start = pd.Timestamp('2015-04-01', tz='UTC')
equities = pd.concat([make_rotating_equity_info(num_assets=assets_per_exchange, first_start=first_start, frequency=trading_day, periods_between_starts=3, asset_lifetime=5, exchange=exchange) for exchange in ('US_EXCHANGE_1', 'US_EXCHANGE_2', 'CA_EXCHANGE', 'JP_EXCHANGE')], ignore_index=True)
equities['symbol'] = list(string.ascii_uppercase[:len(equities)])
sids = np.arange(len(equities))
np.random.RandomState(1337).shuffle(sids)
equities.index = sids
permute_sid = dict(zip(sids, range(len(sids)))).__getitem__
exchanges = pd.DataFrame.from_records([{'exchange': 'US_EXCHANGE_1', 'country_code': 'US'}, {'exchange': 'US_EXCHANGE_2', 'country_code': 'US'}, {'exchange': 'CA_EXCHANGE', 'country_code': 'CA'}, {'exchange': 'JP_EXCHANGE', 'country_code': 'JP'}])
sids_by_country = {'US': equities.index[:(2 * assets_per_exchange)], 'CA': equities.index[(2 * assets_per_exchange):(3 * assets_per_exchange)], 'JP': equities.index[(3 * assets_per_exchange):]}
self.write_assets(equities=equities, exchanges=exchanges)
finder = self.asset_finder
all_dates = pd.date_range(start=first_start, end=equities.end_date.max(), freq=trading_day)
for dates in all_subindices(all_dates):
expected_with_start_raw = full(shape=(len(dates), assets_per_exchange), fill_value=False, dtype=bool)
expected_no_start_raw = full(shape=(len(dates), assets_per_exchange), fill_value=False, dtype=bool)
for (i, date) in enumerate(dates):
it = equities.iloc[:4][['start_date', 'end_date']].itertuples(index=False)
for (j, (start, end)) in enumerate(it):
if (start <= date <= end):
expected_with_start_raw[(i, j)] = True
if (start < date):
expected_no_start_raw[(i, j)] = True
for country_codes in powerset(exchanges.country_code.unique()):
expected_sids = pd.Int64Index(sorted(concat((sids_by_country[country_code] for country_code in country_codes))))
permuted_sids = [sid for sid in sorted(expected_sids, key=permute_sid)]
tile_count = (len(country_codes) + ('US' in country_codes))
expected_with_start = pd.DataFrame(data=np.tile(expected_with_start_raw, tile_count), index=dates, columns=pd.Int64Index(permuted_sids))
result = finder.lifetimes(dates, include_start_date=True, country_codes=country_codes)
assert_equal(result.columns, expected_sids)
result = result[permuted_sids]
assert_equal(result, expected_with_start)
expected_no_start = pd.DataFrame(data=np.tile(expected_no_start_raw, tile_count), index=dates, columns=pd.Int64Index(permuted_sids))
result = finder.lifetimes(dates, include_start_date=False, country_codes=country_codes)
assert_equal(result.columns, expected_sids)
result = result[permuted_sids]
assert_equal(result, expected_no_start)
def test_sids(self):
self.write_assets(equities=make_simple_equity_info([0, 1, 2], pd.Timestamp('2014-01-01'), pd.Timestamp('2014-01-02')))
self.assertEqual({0, 1, 2}, set(self.asset_finder.sids))
def test_lookup_by_supplementary_field(self):
equities = pd.DataFrame.from_records([{'sid': 0, 'symbol': 'A', 'start_date': pd.Timestamp('2013-1-1', tz='UTC'), 'end_date': pd.Timestamp('2014-1-1', tz='UTC'), 'exchange': 'TEST'}, {'sid': 1, 'symbol': 'B', 'start_date': pd.Timestamp('2013-1-1', tz='UTC'), 'end_date': pd.Timestamp('2014-1-1', tz='UTC'), 'exchange': 'TEST'}, {'sid': 2, 'symbol': 'C', 'start_date': pd.Timestamp('2013-7-1', tz='UTC'), 'end_date': pd.Timestamp('2014-1-1', tz='UTC'), 'exchange': 'TEST'}])
equity_supplementary_mappings = pd.DataFrame.from_records([{'sid': 0, 'field': 'ALT_ID', 'value': '', 'start_date': pd.Timestamp('2013-1-1', tz='UTC'), 'end_date': pd.Timestamp('2013-6-28', tz='UTC')}, {'sid': 1, 'field': 'ALT_ID', 'value': '', 'start_date': pd.Timestamp('2013-1-1', tz='UTC'), 'end_date': pd.Timestamp('2014-1-1', tz='UTC')}, {'sid': 0, 'field': 'ALT_ID', 'value': '', 'start_date': pd.Timestamp('2013-7-1', tz='UTC'), 'end_date': pd.Timestamp('2014-1-1', tz='UTC')}, {'sid': 2, 'field': 'ALT_ID', 'value': '', 'start_date': pd.Timestamp('2013-7-1', tz='UTC'), 'end_date': pd.Timestamp('2014-1-1', tz='UTC')}])
self.write_assets(equities=equities, equity_supplementary_mappings=equity_supplementary_mappings)
af = self.asset_finder
dt = pd.Timestamp('2013-6-28', tz='UTC')
asset_0 = af.lookup_by_supplementary_field('ALT_ID', '', dt)
self.assertEqual(asset_0.sid, 0)
asset_1 = af.lookup_by_supplementary_field('ALT_ID', '', dt)
self.assertEqual(asset_1.sid, 1)
with self.assertRaisesRegex(ValueNotFoundForField, "Value '{}' was not found for field '{}'.".format('', 'ALT_ID')):
af.lookup_by_supplementary_field('ALT_ID', '', dt)
dt = pd.Timestamp('2014-01-02', tz='UTC')
asset_2 = af.lookup_by_supplementary_field('ALT_ID', '', dt)
self.assertEqual(asset_2.sid, 2)
asset_1 = af.lookup_by_supplementary_field('ALT_ID', '', dt)
self.assertEqual(asset_1.sid, 1)
asset_0 = af.lookup_by_supplementary_field('ALT_ID', '', dt)
self.assertEqual(asset_0.sid, 0)
expected_in_repr = "Multiple occurrences of the value '{}' found for field '{}'.".format('', 'ALT_ID')
with self.assertRaisesRegex(MultipleValuesFoundForField, expected_in_repr):
af.lookup_by_supplementary_field('ALT_ID', '', None)
def test_get_supplementary_field(self):
equities = pd.DataFrame.from_records([{'sid': 0, 'symbol': 'A', 'start_date': pd.Timestamp('2013-1-1', tz='UTC'), 'end_date': pd.Timestamp('2014-1-1', tz='UTC'), 'exchange': 'TEST'}, {'sid': 1, 'symbol': 'B', 'start_date': pd.Timestamp('2013-1-1', tz='UTC'), 'end_date': pd.Timestamp('2014-1-1', tz='UTC'), 'exchange': 'TEST'}, {'sid': 2, 'symbol': 'C', 'start_date': pd.Timestamp('2013-7-1', tz='UTC'), 'end_date': pd.Timestamp('2014-1-1', tz='UTC'), 'exchange': 'TEST'}])
equity_supplementary_mappings = pd.DataFrame.from_records([{'sid': 0, 'field': 'ALT_ID', 'value': '', 'start_date': pd.Timestamp('2013-1-1', tz='UTC'), 'end_date': pd.Timestamp('2013-6-28', tz='UTC')}, {'sid': 1, 'field': 'ALT_ID', 'value': '', 'start_date': pd.Timestamp('2013-1-1', tz='UTC'), 'end_date': pd.Timestamp('2014-1-1', tz='UTC')}, {'sid': 0, 'field': 'ALT_ID', 'value': '', 'start_date': pd.Timestamp('2013-7-1', tz='UTC'), 'end_date': pd.Timestamp('2014-1-1', tz='UTC')}, {'sid': 2, 'field': 'ALT_ID', 'value': '', 'start_date': pd.Timestamp('2013-7-1', tz='UTC'), 'end_date': pd.Timestamp('2014-1-1', tz='UTC')}])
self.write_assets(equities=equities, equity_supplementary_mappings=equity_supplementary_mappings)
finder = self.asset_finder
dt = pd.Timestamp('2013-6-28', tz='UTC')
for (sid, expected) in [(0, ''), (1, '')]:
self.assertEqual(finder.get_supplementary_field(sid, 'ALT_ID', dt), expected)
with self.assertRaisesRegex(NoValueForSid, "No '{}' value found for sid '{}'.".format('ALT_ID', 2)):
(finder.get_supplementary_field(2, 'ALT_ID', dt),)
dt = pd.Timestamp('2014-01-02', tz='UTC')
for (sid, expected) in [(0, ''), (1, ''), (2, '')]:
self.assertEqual(finder.get_supplementary_field(sid, 'ALT_ID', dt), expected)
with self.assertRaisesRegex(MultipleValuesFoundForSid, "Multiple '{}' values found for sid '{}'.".format('ALT_ID', 0)):
(finder.get_supplementary_field(0, 'ALT_ID', None),)
def test_group_by_type(self):
equities = make_simple_equity_info(range(5), start_date=pd.Timestamp('2014-01-01'), end_date=pd.Timestamp('2015-01-01'))
futures = make_commodity_future_info(first_sid=6, root_symbols=['CL'], years=[2014])
queries = [([0, 1, 3], [6, 7]), ([0, 2, 3], [7, 10]), (list(equities.index), list(futures.index))]
self.write_assets(equities=equities, futures=futures)
finder = self.asset_finder
for (equity_sids, future_sids) in queries:
results = finder.group_by_type((equity_sids + future_sids))
self.assertEqual(results, {'equity': set(equity_sids), 'future': set(future_sids)})
([(Equity, 'retrieve_equities', EquitiesNotFound), (Future, 'retrieve_futures_contracts', FutureContractsNotFound)])
def test_retrieve_specific_type(self, type_, lookup_name, failure_type):
equities = make_simple_equity_info(range(5), start_date=pd.Timestamp('2014-01-01'), end_date=pd.Timestamp('2015-01-01'))
max_equity = equities.index.max()
futures = make_commodity_future_info(first_sid=(max_equity + 1), root_symbols=['CL'], years=[2014])
equity_sids = [0, 1]
future_sids = [(max_equity + 1), (max_equity + 2), (max_equity + 3)]
if (type_ == Equity):
success_sids = equity_sids
fail_sids = future_sids
else:
fail_sids = equity_sids
success_sids = future_sids
self.write_assets(equities=equities, futures=futures)
finder = self.asset_finder
lookup = getattr(finder, lookup_name)
for _ in range(2):
results = lookup(success_sids)
self.assertIsInstance(results, dict)
self.assertEqual(set(results.keys()), set(success_sids))
self.assertEqual(valmap(int, results), dict(zip(success_sids, success_sids)))
self.assertEqual({type_}, {type(asset) for asset in itervalues(results)})
with self.assertRaises(failure_type):
lookup(fail_sids)
with self.assertRaises(failure_type):
lookup([success_sids[0], fail_sids[0]])
def test_retrieve_all(self):
equities = make_simple_equity_info(range(5), start_date=pd.Timestamp('2014-01-01'), end_date=pd.Timestamp('2015-01-01'))
max_equity = equities.index.max()
futures = make_commodity_future_info(first_sid=(max_equity + 1), root_symbols=['CL'], years=[2014])
self.write_assets(equities=equities, futures=futures)
finder = self.asset_finder
all_sids = finder.sids
self.assertEqual(len(all_sids), (len(equities) + len(futures)))
queries = [(), tuple(equities.index[:2]), tuple(futures.index[:3]), (tuple(equities.index[2:]) + tuple(futures.index[3:])), (tuple(equities.index[2:]) + tuple(futures.index[3:])), all_sids, all_sids]
for sids in queries:
equity_sids = [i for i in sids if (i <= max_equity)]
future_sids = [i for i in sids if (i > max_equity)]
results = finder.retrieve_all(sids)
self.assertEqual(sids, tuple(map(int, results)))
self.assertEqual(([Equity for _ in equity_sids] + [Future for _ in future_sids]), list(map(type, results)))
self.assertEqual((list(equities.symbol.loc[equity_sids]) + list(futures.symbol.loc[future_sids])), list((asset.symbol for asset in results)))
([(EquitiesNotFound, 'equity', 'equities'), (FutureContractsNotFound, 'future contract', 'future contracts'), (SidsNotFound, 'asset', 'assets')])
def test_error_message_plurality(self, error_type, singular, plural):
try:
raise error_type(sids=[1])
except error_type as e:
self.assertEqual(str(e), 'No {singular} found for sid: 1.'.format(singular=singular))
try:
raise error_type(sids=[1, 2])
except error_type as e:
self.assertEqual(str(e), 'No {plural} found for sids: [1, 2].'.format(plural=plural)) |
class Name(Model):
table_name = 'names'
schema = {'name': 'TEXT', 'module': 'TEXT', 'package': 'TEXT', 'source': 'INTEGER', 'type': 'INTEGER'}
columns = list(schema.keys())
objects = Query(table_name, columns)
def create_table(cls, connection):
super().create_table(connection)
connection.execute('CREATE INDEX IF NOT EXISTS name ON names(name)')
connection.execute('CREATE INDEX IF NOT EXISTS module ON names(module)')
connection.execute('CREATE INDEX IF NOT EXISTS package ON names(package)')
search_submodule_like = objects.where('module LIKE ("%." || ?)')
search_module_like = objects.where('module LIKE (?)')
import_assist = objects.where("name LIKE (? || '%')")
search_by_name_like = objects.where('name LIKE (?)')
search_by_name = objects.where('name IS (?)')
delete_by_module_name = objects.where('module = ?').delete_from() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.